Merge pull request #5344 from sbodagala/version-vector-prototype
Version vector prototype: merge apple/master into version vector branch
This commit is contained in:
commit
c059aad41a
|
@ -1,4 +1,5 @@
|
|||
# Build artifacts
|
||||
/my_build/
|
||||
/bin/
|
||||
/lib/
|
||||
/packages/
|
||||
|
@ -83,6 +84,7 @@ ipch/
|
|||
compile_commands.json
|
||||
flow/actorcompiler/obj
|
||||
flow/coveragetool/obj
|
||||
*.code-workspace
|
||||
|
||||
# IDE indexing (commonly used tools)
|
||||
/compile_commands.json
|
||||
|
|
|
@ -16,7 +16,12 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
if(WIN32)
|
||||
cmake_minimum_required(VERSION 3.15)
|
||||
else()
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
endif()
|
||||
|
||||
project(foundationdb
|
||||
VERSION 7.1.0
|
||||
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
|
||||
|
@ -196,9 +201,9 @@ configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/BuildFlags.h.in ${CMAKE_CUR
|
|||
if (CMAKE_EXPORT_COMPILE_COMMANDS AND WITH_PYTHON)
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/contrib/gen_compile_db.py
|
||||
ARGS -b ${CMAKE_CURRENT_BINARY_DIR} -s ${CMAKE_CURRENT_SOURCE_DIR} -o ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/contrib/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
COMMENT "Build compile commands for IDE"
|
||||
)
|
||||
add_custom_target(processed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||
|
|
|
@ -42,7 +42,7 @@ FDBLibTLSPolicy::FDBLibTLSPolicy(Reference<FDBLibTLSPlugin> plugin)
|
|||
key_data_set(false), verify_peers_set(false) {
|
||||
|
||||
if ((tls_cfg = tls_config_new()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSConfigError");
|
||||
TraceEvent(SevError, "FDBLibTLSConfigError").log();
|
||||
throw std::runtime_error("FDBLibTLSConfigError");
|
||||
}
|
||||
|
||||
|
@ -67,14 +67,14 @@ ITLSSession* FDBLibTLSPolicy::create_session(bool is_client,
|
|||
// servername, since this will be ignored - the servername should be
|
||||
// matched by the verify criteria instead.
|
||||
if (verify_peers_set && servername != nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSVerifyPeersWithServerName");
|
||||
TraceEvent(SevError, "FDBLibTLSVerifyPeersWithServerName").log();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// If verify peers has not been set, then require a server name to
|
||||
// avoid an accidental lack of name validation.
|
||||
if (!verify_peers_set && servername == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSNoServerName");
|
||||
TraceEvent(SevError, "FDBLibTLSNoServerName").log();
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -123,18 +123,18 @@ struct stack_st_X509* FDBLibTLSPolicy::parse_cert_pem(const uint8_t* cert_pem, s
|
|||
if (cert_pem_len > INT_MAX)
|
||||
goto err;
|
||||
if ((bio = BIO_new_mem_buf((void*)cert_pem, cert_pem_len)) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
|
||||
goto err;
|
||||
}
|
||||
if ((certs = sk_X509_new_null()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
|
||||
goto err;
|
||||
}
|
||||
|
||||
ERR_clear_error();
|
||||
while ((cert = PEM_read_bio_X509(bio, nullptr, password_cb, nullptr)) != nullptr) {
|
||||
if (!sk_X509_push(certs, cert)) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ struct stack_st_X509* FDBLibTLSPolicy::parse_cert_pem(const uint8_t* cert_pem, s
|
|||
}
|
||||
|
||||
if (sk_X509_num(certs) < 1) {
|
||||
TraceEvent(SevError, "FDBLibTLSNoCerts");
|
||||
TraceEvent(SevError, "FDBLibTLSNoCerts").log();
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -168,11 +168,11 @@ err:
|
|||
|
||||
bool FDBLibTLSPolicy::set_ca_data(const uint8_t* ca_data, int ca_len) {
|
||||
if (ca_data_set) {
|
||||
TraceEvent(SevError, "FDBLibTLSCAAlreadySet");
|
||||
TraceEvent(SevError, "FDBLibTLSCAAlreadySet").log();
|
||||
return false;
|
||||
}
|
||||
if (session_created) {
|
||||
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive");
|
||||
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -194,11 +194,11 @@ bool FDBLibTLSPolicy::set_ca_data(const uint8_t* ca_data, int ca_len) {
|
|||
|
||||
bool FDBLibTLSPolicy::set_cert_data(const uint8_t* cert_data, int cert_len) {
|
||||
if (cert_data_set) {
|
||||
TraceEvent(SevError, "FDBLibTLSCertAlreadySet");
|
||||
TraceEvent(SevError, "FDBLibTLSCertAlreadySet").log();
|
||||
return false;
|
||||
}
|
||||
if (session_created) {
|
||||
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive");
|
||||
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -218,11 +218,11 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
|
|||
bool rc = false;
|
||||
|
||||
if (key_data_set) {
|
||||
TraceEvent(SevError, "FDBLibTLSKeyAlreadySet");
|
||||
TraceEvent(SevError, "FDBLibTLSKeyAlreadySet").log();
|
||||
goto err;
|
||||
}
|
||||
if (session_created) {
|
||||
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive");
|
||||
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
@ -231,7 +231,7 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
|
|||
long len;
|
||||
|
||||
if ((bio = BIO_new_mem_buf((void*)key_data, key_len)) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
|
||||
goto err;
|
||||
}
|
||||
ERR_clear_error();
|
||||
|
@ -241,7 +241,7 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
|
|||
|
||||
if ((ERR_GET_LIB(errnum) == ERR_LIB_PEM && ERR_GET_REASON(errnum) == PEM_R_BAD_DECRYPT) ||
|
||||
(ERR_GET_LIB(errnum) == ERR_LIB_EVP && ERR_GET_REASON(errnum) == EVP_R_BAD_DECRYPT)) {
|
||||
TraceEvent(SevError, "FDBLibTLSIncorrectPassword");
|
||||
TraceEvent(SevError, "FDBLibTLSIncorrectPassword").log();
|
||||
} else {
|
||||
ERR_error_string_n(errnum, errbuf, sizeof(errbuf));
|
||||
TraceEvent(SevError, "FDBLibTLSPrivateKeyError").detail("LibcryptoErrorMessage", errbuf);
|
||||
|
@ -250,15 +250,15 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
|
|||
}
|
||||
BIO_free(bio);
|
||||
if ((bio = BIO_new(BIO_s_mem())) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
|
||||
goto err;
|
||||
}
|
||||
if (!PEM_write_bio_PrivateKey(bio, key, nullptr, nullptr, 0, nullptr, nullptr)) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
|
||||
goto err;
|
||||
}
|
||||
if ((len = BIO_get_mem_data(bio, &data)) <= 0) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
|
||||
goto err;
|
||||
}
|
||||
if (tls_config_set_key_mem(tls_cfg, (const uint8_t*)data, len) == -1) {
|
||||
|
@ -283,16 +283,16 @@ err:
|
|||
|
||||
bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[], int verify_peers_len[]) {
|
||||
if (verify_peers_set) {
|
||||
TraceEvent(SevError, "FDBLibTLSVerifyPeersAlreadySet");
|
||||
TraceEvent(SevError, "FDBLibTLSVerifyPeersAlreadySet").log();
|
||||
return false;
|
||||
}
|
||||
if (session_created) {
|
||||
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive");
|
||||
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
|
||||
return false;
|
||||
}
|
||||
|
||||
if (count < 1) {
|
||||
TraceEvent(SevError, "FDBLibTLSNoVerifyPeers");
|
||||
TraceEvent(SevError, "FDBLibTLSNoVerifyPeers").log();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy,
|
|||
|
||||
if (is_client) {
|
||||
if ((tls_ctx = tls_client()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSClientError", uid);
|
||||
TraceEvent(SevError, "FDBLibTLSClientError", uid).log();
|
||||
throw std::runtime_error("FDBLibTLSClientError");
|
||||
}
|
||||
if (tls_configure(tls_ctx, policy->tls_cfg) == -1) {
|
||||
|
@ -88,7 +88,7 @@ FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy,
|
|||
}
|
||||
} else {
|
||||
if ((tls_sctx = tls_server()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSServerError", uid);
|
||||
TraceEvent(SevError, "FDBLibTLSServerError", uid).log();
|
||||
throw std::runtime_error("FDBLibTLSServerError");
|
||||
}
|
||||
if (tls_configure(tls_sctx, policy->tls_cfg) == -1) {
|
||||
|
@ -250,7 +250,7 @@ std::tuple<bool, std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLS
|
|||
|
||||
// Verify the certificate.
|
||||
if ((store_ctx = X509_STORE_CTX_new()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid);
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid).log();
|
||||
reason = "Out of memory";
|
||||
goto err;
|
||||
}
|
||||
|
@ -333,7 +333,7 @@ bool FDBLibTLSSession::verify_peer() {
|
|||
return true;
|
||||
|
||||
if ((cert_pem = tls_peer_cert_chain_pem(tls_ctx, &cert_pem_len)) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSNoCertError", uid);
|
||||
TraceEvent(SevError, "FDBLibTLSNoCertError", uid).log();
|
||||
goto err;
|
||||
}
|
||||
if ((certs = policy->parse_cert_pem(cert_pem, cert_pem_len)) == nullptr)
|
||||
|
@ -388,14 +388,14 @@ int FDBLibTLSSession::handshake() {
|
|||
|
||||
int FDBLibTLSSession::read(uint8_t* data, int length) {
|
||||
if (!handshake_completed) {
|
||||
TraceEvent(SevError, "FDBLibTLSReadHandshakeError");
|
||||
TraceEvent(SevError, "FDBLibTLSReadHandshakeError").log();
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
ssize_t n = tls_read(tls_ctx, data, length);
|
||||
if (n > 0) {
|
||||
if (n > INT_MAX) {
|
||||
TraceEvent(SevError, "FDBLibTLSReadOverflow");
|
||||
TraceEvent(SevError, "FDBLibTLSReadOverflow").log();
|
||||
return FAILED;
|
||||
}
|
||||
return (int)n;
|
||||
|
@ -415,14 +415,14 @@ int FDBLibTLSSession::read(uint8_t* data, int length) {
|
|||
|
||||
int FDBLibTLSSession::write(const uint8_t* data, int length) {
|
||||
if (!handshake_completed) {
|
||||
TraceEvent(SevError, "FDBLibTLSWriteHandshakeError", uid);
|
||||
TraceEvent(SevError, "FDBLibTLSWriteHandshakeError", uid).log();
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
ssize_t n = tls_write(tls_ctx, data, length);
|
||||
if (n > 0) {
|
||||
if (n > INT_MAX) {
|
||||
TraceEvent(SevError, "FDBLibTLSWriteOverflow", uid);
|
||||
TraceEvent(SevError, "FDBLibTLSWriteOverflow", uid).log();
|
||||
return FAILED;
|
||||
}
|
||||
return (int)n;
|
||||
|
|
|
@ -30,7 +30,9 @@ Developers interested in using FoundationDB can get started by downloading and i
|
|||
Developers on an OS for which there is no binary package, or who would like
|
||||
to start hacking on the code, can get started by compiling from source.
|
||||
|
||||
The official docker image for building is `foundationdb/foundationdb-build`. It has all dependencies installed. To build outside the official docker image you'll need at least these dependencies:
|
||||
The official docker image for building is [`foundationdb/build`](https://hub.docker.com/r/foundationdb/build) which has all dependencies installed. The Docker image definitions used by FoundationDB team members can be found in the [dedicated repository.](https://github.com/FoundationDB/fdb-build-support).
|
||||
|
||||
To build outside the official docker image you'll need at least these dependencies:
|
||||
|
||||
1. Install cmake Version 3.13 or higher [CMake](https://cmake.org/)
|
||||
1. Install [Mono](http://www.mono-project.com/download/stable/)
|
||||
|
@ -77,7 +79,7 @@ describe the actor compiler source file, not the post-processed output files,
|
|||
and places the output file in the source directory. This file should then be
|
||||
picked up automatically by any tooling.
|
||||
|
||||
Note that if building inside of the `foundationdb/foundationdb-build` docker
|
||||
Note that if building inside of the `foundationdb/build` docker
|
||||
image, the resulting paths will still be incorrect and require manual fixing.
|
||||
One will wish to re-run `cmake` with `-DCMAKE_EXPORT_COMPILE_COMMANDS=OFF` to
|
||||
prevent it from reverting the manual changes.
|
||||
|
@ -138,7 +140,7 @@ You should create a second build-directory which you will use for building and d
|
|||
### Linux
|
||||
|
||||
There are no special requirements for Linux. A docker image can be pulled from
|
||||
`foundationdb/foundationdb-build` that has all of FoundationDB's dependencies
|
||||
`foundationdb/build` that has all of FoundationDB's dependencies
|
||||
pre-installed, and is what the CI uses to build and test PRs.
|
||||
|
||||
```
|
||||
|
|
|
@ -79,6 +79,7 @@ if(NOT WIN32)
|
|||
test/unit/fdb_api.hpp)
|
||||
|
||||
set(UNIT_TEST_VERSION_510_SRCS test/unit/unit_tests_version_510.cpp)
|
||||
set(TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS test/unit/trace_partial_file_suffix_test.cpp)
|
||||
|
||||
if(OPEN_FOR_IDE)
|
||||
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)
|
||||
|
@ -88,6 +89,7 @@ if(NOT WIN32)
|
|||
add_library(fdb_c_setup_tests OBJECT test/unit/setup_tests.cpp)
|
||||
add_library(fdb_c_unit_tests OBJECT ${UNIT_TEST_SRCS})
|
||||
add_library(fdb_c_unit_tests_version_510 OBJECT ${UNIT_TEST_VERSION_510_SRCS})
|
||||
add_library(trace_partial_file_suffix_test OBJECT ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
||||
else()
|
||||
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
||||
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
||||
|
@ -96,6 +98,7 @@ if(NOT WIN32)
|
|||
add_executable(fdb_c_setup_tests test/unit/setup_tests.cpp)
|
||||
add_executable(fdb_c_unit_tests ${UNIT_TEST_SRCS})
|
||||
add_executable(fdb_c_unit_tests_version_510 ${UNIT_TEST_VERSION_510_SRCS})
|
||||
add_executable(trace_partial_file_suffix_test ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
||||
strip_debug_symbols(fdb_c_performance_test)
|
||||
strip_debug_symbols(fdb_c_ryw_benchmark)
|
||||
strip_debug_symbols(fdb_c_txn_size_test)
|
||||
|
@ -106,12 +109,14 @@ if(NOT WIN32)
|
|||
|
||||
add_dependencies(fdb_c_setup_tests doctest)
|
||||
add_dependencies(fdb_c_unit_tests doctest)
|
||||
add_dependencies(fdb_c_unit_tests_version_510 doctest)
|
||||
target_include_directories(fdb_c_setup_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_include_directories(fdb_c_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_include_directories(fdb_c_unit_tests_version_510 PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_link_libraries(fdb_c_setup_tests PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_unit_tests PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_unit_tests_version_510 PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(trace_partial_file_suffix_test PRIVATE fdb_c Threads::Threads)
|
||||
|
||||
# do not set RPATH for mako
|
||||
set_property(TARGET mako PROPERTY SKIP_BUILD_RPATH TRUE)
|
||||
|
@ -146,6 +151,11 @@ if(NOT WIN32)
|
|||
COMMAND $<TARGET_FILE:fdb_c_unit_tests_version_510>
|
||||
@CLUSTER_FILE@
|
||||
fdb)
|
||||
add_fdbclient_test(
|
||||
NAME trace_partial_file_suffix_test
|
||||
COMMAND $<TARGET_FILE:trace_partial_file_suffix_test>
|
||||
@CLUSTER_FILE@
|
||||
fdb)
|
||||
add_fdbclient_test(
|
||||
NAME fdb_c_external_client_unit_tests
|
||||
COMMAND $<TARGET_FILE:fdb_c_unit_tests>
|
||||
|
|
|
@ -162,7 +162,7 @@ extern "C" DLLEXPORT fdb_bool_t fdb_future_is_ready(FDBFuture* f) {
|
|||
return TSAVB(f)->isReady();
|
||||
}
|
||||
|
||||
class CAPICallback : public ThreadCallback {
|
||||
class CAPICallback final : public ThreadCallback {
|
||||
public:
|
||||
CAPICallback(void (*callbackf)(FDBFuture*, void*), FDBFuture* f, void* userdata)
|
||||
: callbackf(callbackf), f(f), userdata(userdata) {}
|
||||
|
|
|
@ -66,6 +66,7 @@ public:
|
|||
};
|
||||
|
||||
struct FDBPromise {
|
||||
virtual ~FDBPromise() = default;
|
||||
virtual void send(void*) = 0;
|
||||
};
|
||||
|
||||
|
|
|
@ -1056,12 +1056,12 @@ void* worker_thread(void* thread_args) {
|
|||
}
|
||||
|
||||
fprintf(debugme,
|
||||
"DEBUG: worker_id:%d (%d) thread_id:%d (%d) (tid:%d)\n",
|
||||
"DEBUG: worker_id:%d (%d) thread_id:%d (%d) (tid:%lld)\n",
|
||||
worker_id,
|
||||
args->num_processes,
|
||||
thread_id,
|
||||
args->num_threads,
|
||||
(unsigned int)pthread_self());
|
||||
(uint64_t)pthread_self());
|
||||
|
||||
if (args->tpsmax) {
|
||||
thread_tps = compute_thread_tps(args->tpsmax, worker_id, thread_id, args->num_processes, args->num_threads);
|
||||
|
|
|
@ -0,0 +1,111 @@
|
|||
/*
|
||||
* trace_partial_file_suffix_test.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
|
||||
#include "flow/Platform.h"
|
||||
|
||||
#define FDB_API_VERSION 710
|
||||
#include "foundationdb/fdb_c.h"
|
||||
|
||||
#undef NDEBUG
|
||||
#include <cassert>
|
||||
|
||||
void fdb_check(fdb_error_t e) {
|
||||
if (e) {
|
||||
std::cerr << fdb_get_error(e) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
void set_net_opt(FDBNetworkOption option, const std::string& value) {
|
||||
fdb_check(fdb_network_set_option(option, reinterpret_cast<const uint8_t*>(value.c_str()), value.size()));
|
||||
}
|
||||
|
||||
bool file_exists(const char* path) {
|
||||
FILE* f = fopen(path, "r");
|
||||
if (f) {
|
||||
fclose(f);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
fdb_check(fdb_select_api_version(710));
|
||||
|
||||
std::string file_identifier = "trace_partial_file_suffix_test" + std::to_string(std::random_device{}());
|
||||
std::string trace_partial_file_suffix = ".tmp";
|
||||
std::string simulated_stray_partial_file =
|
||||
"trace.127.0.0.1." + file_identifier + ".simulated.xml" + trace_partial_file_suffix;
|
||||
|
||||
// Simulate this process crashing previously by creating a ".tmp" file
|
||||
{ std::ofstream file{ simulated_stray_partial_file }; }
|
||||
|
||||
set_net_opt(FDBNetworkOption::FDB_NET_OPTION_TRACE_ENABLE, "");
|
||||
set_net_opt(FDBNetworkOption::FDB_NET_OPTION_TRACE_FILE_IDENTIFIER, file_identifier);
|
||||
set_net_opt(FDBNetworkOption::FDB_NET_OPTION_TRACE_PARTIAL_FILE_SUFFIX, trace_partial_file_suffix);
|
||||
|
||||
fdb_check(fdb_setup_network());
|
||||
std::thread network_thread{ &fdb_run_network };
|
||||
|
||||
// Apparently you need to open a database to initialize logging
|
||||
FDBDatabase* out;
|
||||
fdb_check(fdb_create_database(nullptr, &out));
|
||||
fdb_database_destroy(out);
|
||||
|
||||
// Eventually there's a new trace file for this test ending in .tmp
|
||||
std::string name;
|
||||
for (;;) {
|
||||
for (const auto& path : platform::listFiles(".")) {
|
||||
if (path.find(file_identifier) != std::string::npos && path.find(".simulated.") == std::string::npos) {
|
||||
assert(path.substr(path.size() - trace_partial_file_suffix.size()) == trace_partial_file_suffix);
|
||||
name = path;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!name.empty()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fdb_check(fdb_stop_network());
|
||||
network_thread.join();
|
||||
|
||||
// After shutting down, the suffix is removed for both the simulated stray file and our new file
|
||||
if (!trace_partial_file_suffix.empty()) {
|
||||
assert(!file_exists(name.c_str()));
|
||||
assert(!file_exists(simulated_stray_partial_file.c_str()));
|
||||
}
|
||||
|
||||
auto new_name = name.substr(0, name.size() - trace_partial_file_suffix.size());
|
||||
auto new_stray_name =
|
||||
simulated_stray_partial_file.substr(0, simulated_stray_partial_file.size() - trace_partial_file_suffix.size());
|
||||
assert(file_exists(new_name.c_str()));
|
||||
assert(file_exists(new_stray_name.c_str()));
|
||||
remove(new_name.c_str());
|
||||
remove(new_stray_name.c_str());
|
||||
assert(!file_exists(new_name.c_str()));
|
||||
assert(!file_exists(new_stray_name.c_str()));
|
||||
}
|
|
@ -2177,6 +2177,81 @@ TEST_CASE("monitor_network_busyness") {
|
|||
CHECK(containsGreaterZero);
|
||||
}
|
||||
|
||||
// Commit a transaction and confirm it has not been reset
|
||||
TEST_CASE("commit_does_not_reset") {
|
||||
fdb::Transaction tr(db);
|
||||
fdb::Transaction tr2(db);
|
||||
|
||||
// Commit two transactions, one that will fail with conflict and the other
|
||||
// that will succeed. Ensure both transactions are not reset at the end.
|
||||
while (1) {
|
||||
fdb::Int64Future tr1GrvFuture = tr.get_read_version();
|
||||
fdb_error_t err = wait_future(tr1GrvFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture tr1OnErrorFuture = tr.on_error(err);
|
||||
fdb_check(wait_future(tr1OnErrorFuture));
|
||||
continue;
|
||||
}
|
||||
|
||||
int64_t tr1StartVersion;
|
||||
CHECK(!tr1GrvFuture.get(&tr1StartVersion));
|
||||
|
||||
fdb::Int64Future tr2GrvFuture = tr2.get_read_version();
|
||||
err = wait_future(tr2GrvFuture);
|
||||
|
||||
if (err) {
|
||||
fdb::EmptyFuture tr2OnErrorFuture = tr2.on_error(err);
|
||||
fdb_check(wait_future(tr2OnErrorFuture));
|
||||
continue;
|
||||
}
|
||||
|
||||
int64_t tr2StartVersion;
|
||||
CHECK(!tr2GrvFuture.get(&tr2StartVersion));
|
||||
|
||||
tr.set(key("foo"), "bar");
|
||||
fdb::EmptyFuture tr1CommitFuture = tr.commit();
|
||||
err = wait_future(tr1CommitFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture tr1OnErrorFuture = tr.on_error(err);
|
||||
fdb_check(wait_future(tr1OnErrorFuture));
|
||||
continue;
|
||||
}
|
||||
|
||||
fdb_check(tr2.add_conflict_range(key("foo"), strinc(key("foo")), FDB_CONFLICT_RANGE_TYPE_READ));
|
||||
tr2.set(key("foo"), "bar");
|
||||
fdb::EmptyFuture tr2CommitFuture = tr2.commit();
|
||||
err = wait_future(tr2CommitFuture);
|
||||
CHECK(err == 1020); // not_committed
|
||||
|
||||
fdb::Int64Future tr1GrvFuture2 = tr.get_read_version();
|
||||
err = wait_future(tr1GrvFuture2);
|
||||
if (err) {
|
||||
fdb::EmptyFuture tr1OnErrorFuture = tr.on_error(err);
|
||||
fdb_check(wait_future(tr1OnErrorFuture));
|
||||
continue;
|
||||
}
|
||||
|
||||
int64_t tr1EndVersion;
|
||||
CHECK(!tr1GrvFuture2.get(&tr1EndVersion));
|
||||
|
||||
fdb::Int64Future tr2GrvFuture2 = tr2.get_read_version();
|
||||
err = wait_future(tr2GrvFuture2);
|
||||
if (err) {
|
||||
fdb::EmptyFuture tr2OnErrorFuture = tr2.on_error(err);
|
||||
fdb_check(wait_future(tr2OnErrorFuture));
|
||||
continue;
|
||||
}
|
||||
|
||||
int64_t tr2EndVersion;
|
||||
CHECK(!tr2GrvFuture2.get(&tr2EndVersion));
|
||||
|
||||
// If we reset the transaction, then the read version will change
|
||||
CHECK(tr1StartVersion == tr1EndVersion);
|
||||
CHECK(tr2StartVersion == tr2EndVersion);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc < 3) {
|
||||
std::cout << "Unit tests for the FoundationDB C API.\n"
|
||||
|
|
|
@ -36,8 +36,8 @@ const Subspace DirectoryLayer::DEFAULT_CONTENT_SUBSPACE = Subspace();
|
|||
const StringRef DirectoryLayer::PARTITION_LAYER = LiteralStringRef("partition");
|
||||
|
||||
DirectoryLayer::DirectoryLayer(Subspace nodeSubspace, Subspace contentSubspace, bool allowManualPrefixes)
|
||||
: nodeSubspace(nodeSubspace), contentSubspace(contentSubspace), allowManualPrefixes(allowManualPrefixes),
|
||||
rootNode(nodeSubspace.get(nodeSubspace.key())), allocator(rootNode.get(HIGH_CONTENTION_KEY)) {}
|
||||
: rootNode(nodeSubspace.get(nodeSubspace.key())), nodeSubspace(nodeSubspace), contentSubspace(contentSubspace),
|
||||
allocator(rootNode.get(HIGH_CONTENTION_KEY)), allowManualPrefixes(allowManualPrefixes) {}
|
||||
|
||||
Subspace DirectoryLayer::nodeWithPrefix(StringRef const& prefix) const {
|
||||
return nodeSubspace.get(prefix);
|
||||
|
|
|
@ -167,9 +167,9 @@ struct RangeResultRef : VectorRef<KeyValueRef> {
|
|||
|
||||
RangeResultRef() : more(false), readToBegin(false), readThroughEnd(false) {}
|
||||
RangeResultRef(Arena& p, const RangeResultRef& toCopy)
|
||||
: more(toCopy.more), readToBegin(toCopy.readToBegin), readThroughEnd(toCopy.readThroughEnd),
|
||||
: VectorRef<KeyValueRef>(p, toCopy), more(toCopy.more),
|
||||
readThrough(toCopy.readThrough.present() ? KeyRef(p, toCopy.readThrough.get()) : Optional<KeyRef>()),
|
||||
VectorRef<KeyValueRef>(p, toCopy) {}
|
||||
readToBegin(toCopy.readToBegin), readThroughEnd(toCopy.readThroughEnd) {}
|
||||
RangeResultRef(const VectorRef<KeyValueRef>& value, bool more, Optional<KeyRef> readThrough = Optional<KeyRef>())
|
||||
: VectorRef<KeyValueRef>(value), more(more), readThrough(readThrough), readToBegin(false), readThroughEnd(false) {
|
||||
}
|
||||
|
|
|
@ -19,12 +19,11 @@
|
|||
*/
|
||||
|
||||
#include "Tuple.h"
|
||||
#include <boost/static_assert.hpp>
|
||||
|
||||
namespace FDB {
|
||||
// The floating point operations depend on this using the IEEE 754 standard.
|
||||
BOOST_STATIC_ASSERT(std::numeric_limits<float>::is_iec559);
|
||||
BOOST_STATIC_ASSERT(std::numeric_limits<double>::is_iec559);
|
||||
static_assert(std::numeric_limits<float>::is_iec559);
|
||||
static_assert(std::numeric_limits<double>::is_iec559);
|
||||
|
||||
const size_t Uuid::SIZE = 16;
|
||||
|
||||
|
|
|
@ -308,7 +308,7 @@ if(NOT OPEN_FOR_IDE)
|
|||
if(RUN_JUNIT_TESTS)
|
||||
# Sets up the JUnit testing structure to run through ctest
|
||||
#
|
||||
# To add a new junit test, add the class to the JAVA_JUNIT_TESTS variable in `src/tests.cmake`. Note that if you run a Suite,
|
||||
# To add a new junit test, add the class to the JAVA_JUNIT_TESTS variable in `src/tests.cmake`. Note that if you run a Suite,
|
||||
# ctest will NOT display underlying details of the suite itself, so it's best to avoid junit suites in general. Also,
|
||||
# if you need a different runner other than JUnitCore, you'll have to modify this so be aware.
|
||||
#
|
||||
|
@ -316,8 +316,8 @@ if(NOT OPEN_FOR_IDE)
|
|||
#
|
||||
# ctest .
|
||||
#
|
||||
# from the ${BUILD_DIR}/bindings/java subdirectory.
|
||||
#
|
||||
# from the ${BUILD_DIR}/bindings/java subdirectory.
|
||||
#
|
||||
# Note: if you are running from ${BUILD_DIR}, additional tests of the native logic will be run. To avoid these, use
|
||||
#
|
||||
# ctest . -R java-unit
|
||||
|
@ -325,15 +325,15 @@ if(NOT OPEN_FOR_IDE)
|
|||
# ctest has lots of flexible command options, so be sure to refer to its documentation if you want to do something specific(documentation
|
||||
# can be found at https://cmake.org/cmake/help/v3.19/manual/ctest.1.html)
|
||||
|
||||
add_jar(fdb-junit SOURCES ${JAVA_JUNIT_TESTS} ${JUNIT_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
add_jar(fdb-junit SOURCES ${JAVA_JUNIT_TESTS} ${JUNIT_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
|
||||
${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar
|
||||
)
|
||||
get_property(junit_jar_path TARGET fdb-junit PROPERTY JAR_FILE)
|
||||
|
||||
|
||||
add_test(NAME java-unit
|
||||
COMMAND ${Java_JAVA_EXECUTABLE}
|
||||
-classpath "${target_jar}:${junit_jar_path}:${JUNIT_CLASSPATH}"
|
||||
|
@ -346,12 +346,12 @@ if(NOT OPEN_FOR_IDE)
|
|||
if(RUN_JAVA_INTEGRATION_TESTS)
|
||||
# Set up the integration tests. These tests generally require a running database server to function properly. Most tests
|
||||
# should be written such that they can be run in parallel with other integration tests (e.g. try to use a unique key range for each test
|
||||
# whenever possible), because it's a reasonable assumption that a single server will be shared among multiple tests, and might do so
|
||||
# whenever possible), because it's a reasonable assumption that a single server will be shared among multiple tests, and might do so
|
||||
# concurrently.
|
||||
#
|
||||
# Integration tests are run through ctest the same way as unit tests, but their label is prefixed with the entry 'integration-'.
|
||||
# Note that most java integration tests will fail if they can't quickly connect to a running FDB instance(depending on how the test is written, anyway).
|
||||
# However, if you want to explicitly skip them, you can run
|
||||
# Note that most java integration tests will fail if they can't quickly connect to a running FDB instance(depending on how the test is written, anyway).
|
||||
# However, if you want to explicitly skip them, you can run
|
||||
#
|
||||
# `ctest -E integration`
|
||||
#
|
||||
|
@ -368,8 +368,8 @@ if(NOT OPEN_FOR_IDE)
|
|||
# empty, consider generating a random prefix for the keys you write, use
|
||||
# the directory layer with a unique path, etc.)
|
||||
#
|
||||
add_jar(fdb-integration SOURCES ${JAVA_INTEGRATION_TESTS} ${JAVA_INTEGRATION_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
add_jar(fdb-integration SOURCES ${JAVA_INTEGRATION_TESTS} ${JAVA_INTEGRATION_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
|
||||
|
@ -382,7 +382,14 @@ if(NOT OPEN_FOR_IDE)
|
|||
COMMAND ${Java_JAVA_EXECUTABLE}
|
||||
-classpath "${target_jar}:${integration_jar_path}:${JUNIT_CLASSPATH}"
|
||||
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
|
||||
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner"
|
||||
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner" "-T MultiClient"
|
||||
)
|
||||
|
||||
add_multi_fdbclient_test(NAME java-multi-integration
|
||||
COMMAND ${Java_JAVA_EXECUTABLE}
|
||||
-classpath "${target_jar}:${integration_jar_path}:${JUNIT_CLASSPATH}"
|
||||
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
|
||||
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner" "-t MultiClient"
|
||||
)
|
||||
|
||||
endif()
|
||||
|
|
|
@ -513,7 +513,7 @@ struct JVM {
|
|||
}
|
||||
};
|
||||
|
||||
struct JavaWorkload : FDBWorkload {
|
||||
struct JavaWorkload final : FDBWorkload {
|
||||
std::shared_ptr<JVM> jvm;
|
||||
FDBLogger& log;
|
||||
FDBWorkloadContext* context = nullptr;
|
||||
|
|
|
@ -22,4 +22,19 @@ To skip integration tests, execute `ctest -E integration` from `${BUILD_DIR}/bin
|
|||
To run _only_ integration tests, run `ctest -R integration` from `${BUILD_DIR}/bindings/java`.
|
||||
|
||||
There are lots of other useful `ctest` commands, which we don't need to get into here. For more information,
|
||||
see the [https://cmake.org/cmake/help/v3.19/manual/ctest.1.html](ctest documentation).
|
||||
see the [https://cmake.org/cmake/help/v3.19/manual/ctest.1.html](ctest documentation).
|
||||
|
||||
### Multi-Client tests
|
||||
Multi-Client tests are integration tests that can only be executed when multiple clusters are running. To write a multi-client
|
||||
test, do the following:
|
||||
|
||||
1. Tag all tests that require multiple clients with `@Tag("MultiClient")`
|
||||
2. Ensure that your tests have the `MultiClientHelper` extension present, and Registered as an extension
|
||||
3. Ensure that your test class is in the the JAVA_INTEGRATION_TESTS list in `test.cmake`
|
||||
|
||||
( see `BasicMultiClientIntegrationTest` for a good reference example)
|
||||
|
||||
It is important to note that it requires significant time to start and stop 3 separate clusters; if the underying test takes a long time to run,
|
||||
ctest will time out and kill the test. When that happens, there is no guarantee that the FDB clusters will be properly stopped! It is thus
|
||||
in your best interest to ensure that all tests run in a relatively small amount of time, or have a longer timeout attached.
|
||||
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* BasicMultiClientIntegrationTest
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Random;
|
||||
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Tag;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
|
||||
/**
|
||||
* Simple class to test multi-client logic.
|
||||
*
|
||||
* Note that all Multi-client-only tests _must_ be tagged with "MultiClient", which will ensure that they are excluded
|
||||
* from non-multi-threaded tests.
|
||||
*/
|
||||
public class BasicMultiClientIntegrationTest {
|
||||
@RegisterExtension public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
||||
@Test
|
||||
@Tag("MultiClient")
|
||||
void testMultiClientWritesAndReadsData() throws Exception {
|
||||
FDB fdb = FDB.selectAPIVersion(630);
|
||||
fdb.options().setKnob("min_trace_severity=5");
|
||||
|
||||
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
|
||||
System.out.print("Starting tests.");
|
||||
Random rand = new Random();
|
||||
for (int counter = 0; counter < 25; ++counter) {
|
||||
for (Database db : dbs) {
|
||||
String key = Integer.toString(rand.nextInt(100000000));
|
||||
String val = Integer.toString(rand.nextInt(100000000));
|
||||
|
||||
db.run(tr -> {
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from(val).pack());
|
||||
return null;
|
||||
});
|
||||
|
||||
String fetchedVal = db.run(tr -> {
|
||||
byte[] result = tr.get(Tuple.from(key).pack()).join();
|
||||
return Tuple.fromBytes(result).getString(0);
|
||||
});
|
||||
Assertions.assertEquals(val, fetchedVal, "Wrong result!");
|
||||
}
|
||||
Thread.sleep(200);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,201 @@
|
|||
/*
|
||||
* CycleMultiClientIntegrationTest
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.ArrayList;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Random;
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
/**
|
||||
* Setup: Generating a cycle 0 -> 1 -> 2 -> 3 -> 0, its length is 4
|
||||
* Process: randomly choose an element, reverse 2nd and 4rd element, considering the chosen one as the 1st element.
|
||||
* Check: verify no element is lost or added, and they are still a cycle.
|
||||
*
|
||||
* This test is to verify the atomicity of transactions.
|
||||
*/
|
||||
public class CycleMultiClientIntegrationTest {
|
||||
public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
||||
// more write txn than validate txn, as parent thread waits only for validate txn.
|
||||
private static final int writeTxnCnt = 2000;
|
||||
private static final int validateTxnCnt = 1000;
|
||||
private static final int threadPerDB = 5;
|
||||
|
||||
private static final int cycleLength = 4;
|
||||
private static List<String> expected = new ArrayList<>(Arrays.asList("0", "1", "2", "3"));
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
setupThreads(fdb);
|
||||
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
|
||||
System.out.println("Starting tests");
|
||||
setup(dbs);
|
||||
System.out.println("Start processing and validating");
|
||||
process(dbs);
|
||||
check(dbs);
|
||||
System.out.println("Test finished");
|
||||
}
|
||||
|
||||
private static synchronized void setupThreads(FDB fdb) {
|
||||
int clientThreadsPerVersion = clientHelper.readClusterFromEnv().length;
|
||||
fdb.options().setClientThreadsPerVersion(clientThreadsPerVersion);
|
||||
System.out.printf("thread per version is %d\n", clientThreadsPerVersion);
|
||||
fdb.options().setExternalClientDirectory("/var/dynamic-conf/lib");
|
||||
fdb.options().setTraceEnable("/tmp");
|
||||
fdb.options().setKnob("min_trace_severity=5");
|
||||
}
|
||||
|
||||
private static void setup(Collection<Database> dbs) {
|
||||
// 0 -> 1 -> 2 -> 3 -> 0
|
||||
for (Database db : dbs) {
|
||||
db.run(tr -> {
|
||||
for (int k = 0; k < cycleLength; k++) {
|
||||
String key = Integer.toString(k);
|
||||
String value = Integer.toString((k + 1) % cycleLength);
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from(value).pack());
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private static void process(Collection<Database> dbs) {
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final Thread thread = new Thread(CycleWorkload.create(db));
|
||||
thread.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void check(Collection<Database> dbs) throws InterruptedException {
|
||||
final Map<Thread, CycleChecker> threadsToCheckers = new HashMap<>();
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final CycleChecker checker = new CycleChecker(db);
|
||||
final Thread thread = new Thread(checker);
|
||||
thread.start();
|
||||
threadsToCheckers.put(thread, checker);
|
||||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<Thread, CycleChecker> entry : threadsToCheckers.entrySet()) {
|
||||
entry.getKey().join();
|
||||
final boolean succeed = entry.getValue().succeed();
|
||||
Assertions.assertTrue(succeed, "Cycle test failed");
|
||||
}
|
||||
}
|
||||
|
||||
public static class CycleWorkload implements Runnable {
|
||||
|
||||
private final Database db;
|
||||
|
||||
private CycleWorkload(Database db) {
|
||||
this.db = db;
|
||||
}
|
||||
|
||||
public static CycleWorkload create(Database db) {
|
||||
return new CycleWorkload(db);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int i = 0; i < writeTxnCnt; i++) {
|
||||
db.run(tr -> {
|
||||
final int k = ThreadLocalRandom.current().nextInt(cycleLength);
|
||||
final String key = Integer.toString(k);
|
||||
byte[] result1 = tr.get(Tuple.from(key).pack()).join();
|
||||
String value1 = Tuple.fromBytes(result1).getString(0);
|
||||
|
||||
byte[] result2 = tr.get(Tuple.from(value1).pack()).join();
|
||||
String value2 = Tuple.fromBytes(result2).getString(0);
|
||||
|
||||
byte[] result3 = tr.get(Tuple.from(value2).pack()).join();
|
||||
String value3 = Tuple.fromBytes(result3).getString(0);
|
||||
|
||||
byte[] result4 = tr.get(Tuple.from(value3).pack()).join();
|
||||
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from(value2).pack());
|
||||
tr.set(Tuple.from(value2).pack(), Tuple.from(value1).pack());
|
||||
tr.set(Tuple.from(value1).pack(), Tuple.from(value3).pack());
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class CycleChecker implements Runnable {
|
||||
private final Database db;
|
||||
private boolean succeed;
|
||||
|
||||
public CycleChecker(Database db) {
|
||||
this.db = db;
|
||||
this.succeed = true;
|
||||
}
|
||||
|
||||
public static CycleChecker create(Database db) {
|
||||
return new CycleChecker(db);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int i = 0; i < validateTxnCnt; i++) {
|
||||
db.run(tr -> {
|
||||
final int k = ThreadLocalRandom.current().nextInt(cycleLength);
|
||||
final String key = Integer.toString(k);
|
||||
byte[] result1 = tr.get(Tuple.from(key).pack()).join();
|
||||
String value1 = Tuple.fromBytes(result1).getString(0);
|
||||
|
||||
byte[] result2 = tr.get(Tuple.from(value1).pack()).join();
|
||||
String value2 = Tuple.fromBytes(result2).getString(0);
|
||||
|
||||
byte[] result3 = tr.get(Tuple.from(value2).pack()).join();
|
||||
String value3 = Tuple.fromBytes(result3).getString(0);
|
||||
|
||||
byte[] result4 = tr.get(Tuple.from(value3).pack()).join();
|
||||
String value4 = Tuple.fromBytes(result4).getString(0);
|
||||
|
||||
if (!key.equals(value4)) {
|
||||
succeed = false;
|
||||
}
|
||||
List<String> actual = new ArrayList<>(Arrays.asList(value1, value2, value3, value4));
|
||||
Collections.sort(actual);
|
||||
if (!expected.equals(actual)) {
|
||||
succeed = false;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public boolean succeed() {
|
||||
return succeed;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -19,8 +19,6 @@
|
|||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
/*
|
||||
* MultiClientHelper.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import org.junit.jupiter.api.extension.AfterEachCallback;
|
||||
import org.junit.jupiter.api.extension.BeforeAllCallback;
|
||||
import org.junit.jupiter.api.extension.ExtensionContext;
|
||||
|
||||
/**
|
||||
* Callback to help define a multi-client scenario and ensure that
|
||||
* the clients can be configured properly.
|
||||
*/
|
||||
public class MultiClientHelper implements BeforeAllCallback,AfterEachCallback{
|
||||
private String[] clusterFiles;
|
||||
private Collection<Database> openDatabases;
|
||||
|
||||
public static String[] readClusterFromEnv() {
|
||||
/*
|
||||
* Reads the cluster file lists from the ENV variable
|
||||
* FDB_CLUSTERS.
|
||||
*/
|
||||
String clusterFilesProp = System.getenv("FDB_CLUSTERS");
|
||||
if (clusterFilesProp == null) {
|
||||
throw new IllegalStateException("Missing FDB cluster connection file names");
|
||||
}
|
||||
|
||||
return clusterFilesProp.split(";");
|
||||
}
|
||||
|
||||
Collection<Database> openDatabases(FDB fdb){
|
||||
if(openDatabases!=null){
|
||||
return openDatabases;
|
||||
}
|
||||
if(clusterFiles==null){
|
||||
clusterFiles = readClusterFromEnv();
|
||||
}
|
||||
Collection<Database> dbs = new ArrayList<Database>();
|
||||
for (String arg : clusterFiles) {
|
||||
System.out.printf("Opening Cluster: %s\n", arg);
|
||||
dbs.add(fdb.open(arg));
|
||||
}
|
||||
|
||||
this.openDatabases = dbs;
|
||||
return dbs;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeAll(ExtensionContext arg0) throws Exception {
|
||||
clusterFiles = readClusterFromEnv();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterEach(ExtensionContext arg0) throws Exception {
|
||||
//close any databases that have been opened
|
||||
if(openDatabases!=null){
|
||||
for(Database db : openDatabases){
|
||||
db.close();
|
||||
}
|
||||
}
|
||||
openDatabases = null;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,188 @@
|
|||
/*
|
||||
* RepeatableReadMultiThreadClientTest
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
/**
|
||||
* This test verify transcations have repeatable read.
|
||||
* 1 First set initialValue to key.
|
||||
* 2 Have transactions to read the key and verify the initialValue in a loop, if it does not
|
||||
* see the initialValue as the value, it set the flag to false.
|
||||
*
|
||||
* 3 Then have new transactions set the value and then read to verify the new value is set,
|
||||
* if it does not read the new value, set the flag to false.
|
||||
*
|
||||
* 4 Verify that old transactions have not finished when new transactions have finished,
|
||||
* then verify old transactions does not have false flag -- it means that old transactions
|
||||
* are still seeting the initialValue even after new transactions set them to a new value.
|
||||
*/
|
||||
public class RepeatableReadMultiThreadClientTest {
|
||||
public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
||||
private static final int oldValueReadCount = 30;
|
||||
private static final int threadPerDB = 5;
|
||||
|
||||
private static final String key = "foo";
|
||||
private static final String initialValue = "bar";
|
||||
private static final String newValue = "cool";
|
||||
private static final Map<Thread, OldValueReader> threadToOldValueReaders = new HashMap<>();
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
setupThreads(fdb);
|
||||
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
|
||||
System.out.println("Starting tests");
|
||||
setup(dbs);
|
||||
System.out.println("Start processing and validating");
|
||||
readOldValue(dbs);
|
||||
setNewValueAndRead(dbs);
|
||||
System.out.println("Test finished");
|
||||
}
|
||||
|
||||
private static synchronized void setupThreads(FDB fdb) {
|
||||
int clientThreadsPerVersion = clientHelper.readClusterFromEnv().length;
|
||||
fdb.options().setClientThreadsPerVersion(clientThreadsPerVersion);
|
||||
System.out.printf("thread per version is %d\n", clientThreadsPerVersion);
|
||||
fdb.options().setExternalClientDirectory("/var/dynamic-conf/lib");
|
||||
fdb.options().setTraceEnable("/tmp");
|
||||
fdb.options().setKnob("min_trace_severity=5");
|
||||
}
|
||||
|
||||
private static void setup(Collection<Database> dbs) {
|
||||
// 0 -> 1 -> 2 -> 3 -> 0
|
||||
for (Database db : dbs) {
|
||||
db.run(tr -> {
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from(initialValue).pack());
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private static void readOldValue(Collection<Database> dbs) throws InterruptedException {
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final OldValueReader oldValueReader = new OldValueReader(db);
|
||||
final Thread thread = new Thread(OldValueReader.create(db));
|
||||
thread.start();
|
||||
threadToOldValueReaders.put(thread, oldValueReader);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void setNewValueAndRead(Collection<Database> dbs) throws InterruptedException {
|
||||
// threads running NewValueReader need to wait for threads to start first who run OldValueReader
|
||||
Thread.sleep(1000);
|
||||
final Map<Thread, NewValueReader> threads = new HashMap<>();
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final NewValueReader newValueReader = new NewValueReader(db);
|
||||
final Thread thread = new Thread(NewValueReader.create(db));
|
||||
thread.start();
|
||||
threads.put(thread, newValueReader);
|
||||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<Thread, NewValueReader> entry : threads.entrySet()) {
|
||||
entry.getKey().join();
|
||||
Assertions.assertTrue(entry.getValue().succeed, "new value reader failed to read the correct value");
|
||||
}
|
||||
|
||||
for (Map.Entry<Thread, OldValueReader> entry : threadToOldValueReaders.entrySet()) {
|
||||
Assertions.assertTrue(entry.getKey().isAlive(), "Old value reader finished too soon, cannot verify repeatable read, succeed is " + entry.getValue().succeed);
|
||||
}
|
||||
|
||||
for (Map.Entry<Thread, OldValueReader> entry : threadToOldValueReaders.entrySet()) {
|
||||
entry.getKey().join();
|
||||
Assertions.assertTrue(entry.getValue().succeed, "old value reader failed to read the correct value");
|
||||
}
|
||||
}
|
||||
|
||||
public static class OldValueReader implements Runnable {
|
||||
|
||||
private final Database db;
|
||||
private boolean succeed;
|
||||
|
||||
private OldValueReader(Database db) {
|
||||
this.db = db;
|
||||
this.succeed = true;
|
||||
}
|
||||
|
||||
public static OldValueReader create(Database db) {
|
||||
return new OldValueReader(db);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
db.run(tr -> {
|
||||
try {
|
||||
for (int i = 0; i < oldValueReadCount; i++) {
|
||||
byte[] result = tr.get(Tuple.from(key).pack()).join();
|
||||
String value = Tuple.fromBytes(result).getString(0);
|
||||
if (!initialValue.equals(value)) {
|
||||
succeed = false;
|
||||
break;
|
||||
}
|
||||
Thread.sleep(100);
|
||||
}
|
||||
}
|
||||
catch (Exception e) {
|
||||
succeed = false;
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public static class NewValueReader implements Runnable {
|
||||
private final Database db;
|
||||
private boolean succeed;
|
||||
|
||||
public NewValueReader(Database db) {
|
||||
this.db = db;
|
||||
this.succeed = true;
|
||||
}
|
||||
|
||||
public static NewValueReader create(Database db) {
|
||||
return new NewValueReader(db);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
db.run(tr -> {
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from(newValue).pack());
|
||||
return null;
|
||||
});
|
||||
String value = db.run(tr -> {
|
||||
byte[] result = tr.get(Tuple.from(key).pack()).join();
|
||||
return Tuple.fromBytes(result).getString(0);
|
||||
});
|
||||
if (!newValue.equals(value)) {
|
||||
succeed = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
package com.apple.foundationdb;
|
||||
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.LinkedBlockingQueue;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
/**
|
||||
* Each cluster has a queue, producer writes a key and then send a message to this queue in JVM.
|
||||
* Consumer would consume the key by checking the existence of the key, if it does not find the key,
|
||||
* then the test would fail.
|
||||
*
|
||||
* This test is to verify the causal consistency of transactions for mutli-threaded client.
|
||||
*/
|
||||
public class SidebandMultiThreadClientTest {
|
||||
public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
||||
private static final Map<Database, BlockingQueue<String>> db2Queues = new HashMap<>();
|
||||
private static final int threadPerDB = 5;
|
||||
private static final int txnCnt = 1000;
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
setupThreads(fdb);
|
||||
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
|
||||
for (Database db : dbs) {
|
||||
db2Queues.put(db, new LinkedBlockingQueue<>());
|
||||
}
|
||||
System.out.println("Start processing and validating");
|
||||
process(dbs);
|
||||
check(dbs);
|
||||
System.out.println("Test finished");
|
||||
}
|
||||
|
||||
private static synchronized void setupThreads(FDB fdb) {
|
||||
int clientThreadsPerVersion = clientHelper.readClusterFromEnv().length;
|
||||
fdb.options().setClientThreadsPerVersion(clientThreadsPerVersion);
|
||||
System.out.printf("thread per version is %d\n", clientThreadsPerVersion);
|
||||
fdb.options().setExternalClientDirectory("/var/dynamic-conf/lib");
|
||||
fdb.options().setTraceEnable("/tmp");
|
||||
fdb.options().setKnob("min_trace_severity=5");
|
||||
}
|
||||
|
||||
private static void process(Collection<Database> dbs) {
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final Thread thread = new Thread(Producer.create(db, db2Queues.get(db)));
|
||||
thread.start();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void check(Collection<Database> dbs) throws InterruptedException {
|
||||
final Map<Thread, Consumer> threads2Consumers = new HashMap<>();
|
||||
for (Database db : dbs) {
|
||||
for (int i = 0; i < threadPerDB; i++) {
|
||||
final Consumer consumer = Consumer.create(db, db2Queues.get(db));
|
||||
final Thread thread = new Thread(consumer);
|
||||
thread.start();
|
||||
threads2Consumers.put(thread, consumer);
|
||||
}
|
||||
}
|
||||
|
||||
for (Map.Entry<Thread, Consumer> entry : threads2Consumers.entrySet()) {
|
||||
entry.getKey().join();
|
||||
final boolean succeed = entry.getValue().succeed;
|
||||
Assertions.assertTrue(succeed, "Sideband test failed");
|
||||
}
|
||||
}
|
||||
|
||||
public static class Producer implements Runnable {
|
||||
private final Database db;
|
||||
private final BlockingQueue<String> queue;
|
||||
|
||||
private Producer(Database db, BlockingQueue<String> queue) {
|
||||
this.db = db;
|
||||
this.queue = queue;
|
||||
}
|
||||
|
||||
public static Producer create(Database db, BlockingQueue<String> queue) {
|
||||
return new Producer(db, queue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
for (int i = 0; i < txnCnt; i++) {
|
||||
final long suffix = ThreadLocalRandom.current().nextLong();
|
||||
final String key = String.format("Sideband/Multithread/Test/%d", suffix);
|
||||
db.run(tr -> {
|
||||
tr.set(Tuple.from(key).pack(), Tuple.from("bar").pack());
|
||||
return null;
|
||||
});
|
||||
queue.offer(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static class Consumer implements Runnable {
|
||||
private final Database db;
|
||||
private final BlockingQueue<String> queue;
|
||||
private boolean succeed;
|
||||
|
||||
private Consumer(Database db, BlockingQueue<String> queue) {
|
||||
this.db = db;
|
||||
this.queue = queue;
|
||||
this.succeed = true;
|
||||
}
|
||||
|
||||
public static Consumer create(Database db, BlockingQueue<String> queue) {
|
||||
return new Consumer(db, queue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
for (int i = 0; i < txnCnt && succeed; i++) {
|
||||
final String key = queue.take();
|
||||
db.run(tr -> {
|
||||
byte[] result = tr.get(Tuple.from(key).pack()).join();
|
||||
if (result == null) {
|
||||
System.out.println("FAILED to get key " + key + " from DB " + db);
|
||||
succeed = false;
|
||||
}
|
||||
if (!succeed) {
|
||||
return null;
|
||||
}
|
||||
String value = Tuple.fromBytes(result).getString(0);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
System.out.println("Get Exception in consumer: " + e);
|
||||
succeed = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -165,9 +165,13 @@ public class KeySelector {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns the {@code or-equal} parameter of this {@code KeySelector}. For internal use.
|
||||
* Returns the orEqual parameter for this {@code KeySelector}. See the
|
||||
* {@link #KeySelector(byte[], boolean, int)} KeySelector constructor}
|
||||
* for more details.
|
||||
*
|
||||
* @return the {@code or-equal} parameter of this {@code KeySelector}.
|
||||
*/
|
||||
boolean orEqual() {
|
||||
public boolean orEqual() {
|
||||
return orEqual;
|
||||
}
|
||||
|
||||
|
|
|
@ -48,12 +48,17 @@ set(JUNIT_RESOURCES
|
|||
set(JAVA_INTEGRATION_TESTS
|
||||
src/integration/com/apple/foundationdb/DirectoryTest.java
|
||||
src/integration/com/apple/foundationdb/RangeQueryIntegrationTest.java
|
||||
src/integration/com/apple/foundationdb/BasicMultiClientIntegrationTest.java
|
||||
src/integration/com/apple/foundationdb/CycleMultiClientIntegrationTest.java
|
||||
src/integration/com/apple/foundationdb/SidebandMultiThreadClientTest.java
|
||||
src/integration/com/apple/foundationdb/RepeatableReadMultiThreadClientTest.java
|
||||
)
|
||||
|
||||
# Resources that are used in integration testing, but are not explicitly test files (JUnit rules,
|
||||
# utility classes, and so forth)
|
||||
set(JAVA_INTEGRATION_RESOURCES
|
||||
src/integration/com/apple/foundationdb/RequiresDatabase.java
|
||||
src/integration/com/apple/foundationdb/MultiClientHelper.java
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -81,5 +81,15 @@ if (NOT WIN32 AND NOT OPEN_FOR_IDE)
|
|||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||
${CMAKE_BINARY_DIR}/bin/fdbcli
|
||||
@CLUSTER_FILE@
|
||||
1
|
||||
)
|
||||
add_fdbclient_test(
|
||||
NAME multi_process_fdbcli_tests
|
||||
PROCESS_NUMBER 5
|
||||
TEST_TIMEOUT 120 # The test can take near to 1 minutes sometime, set timeout to 2 minutes to be safe
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||
${CMAKE_BINARY_DIR}/bin/fdbcli
|
||||
@CLUSTER_FILE@
|
||||
5
|
||||
)
|
||||
endif()
|
||||
|
|
|
@ -332,22 +332,128 @@ def transaction(logger):
|
|||
output7 = run_fdbcli_command('get', 'key')
|
||||
assert output7 == "`key': not found"
|
||||
|
||||
def get_fdb_process_addresses():
|
||||
# get all processes' network addresses
|
||||
output = run_fdbcli_command('kill')
|
||||
# except the first line, each line is one process
|
||||
addresses = output.split('\n')[1:]
|
||||
assert len(addresses) == process_number
|
||||
return addresses
|
||||
|
||||
@enable_logging()
|
||||
def coordinators(logger):
|
||||
# we should only have one coordinator for now
|
||||
output1 = run_fdbcli_command('coordinators')
|
||||
assert len(output1.split('\n')) > 2
|
||||
cluster_description = output1.split('\n')[0].split(': ')[-1]
|
||||
logger.debug("Cluster description: {}".format(cluster_description))
|
||||
coordinators = output1.split('\n')[1].split(': ')[-1]
|
||||
# verify the coordinator
|
||||
coordinator_list = get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')
|
||||
assert len(coordinator_list) == 1
|
||||
assert coordinator_list[0]['address'] == coordinators
|
||||
# verify the cluster description
|
||||
assert get_value_from_status_json(True, 'cluster', 'connection_string').startswith('{}:'.format(cluster_description))
|
||||
addresses = get_fdb_process_addresses()
|
||||
# set all 5 processes as coordinators and update the cluster description
|
||||
new_cluster_description = 'a_simple_description'
|
||||
run_fdbcli_command('coordinators', *addresses, 'description={}'.format(new_cluster_description))
|
||||
# verify now we have 5 coordinators and the description is updated
|
||||
output2 = run_fdbcli_command('coordinators')
|
||||
assert output2.split('\n')[0].split(': ')[-1] == new_cluster_description
|
||||
assert output2.split('\n')[1] == 'Cluster coordinators ({}): {}'.format(5, ','.join(addresses))
|
||||
# auto change should go back to 1 coordinator
|
||||
run_fdbcli_command('coordinators', 'auto')
|
||||
assert len(get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')) == 1
|
||||
|
||||
@enable_logging()
|
||||
def exclude(logger):
|
||||
# get all processes' network addresses
|
||||
addresses = get_fdb_process_addresses()
|
||||
logger.debug("Cluster processes: {}".format(' '.join(addresses)))
|
||||
# There should be no excluded process for now
|
||||
no_excluded_process_output = 'There are currently no servers or localities excluded from the database.'
|
||||
output1 = run_fdbcli_command('exclude')
|
||||
assert no_excluded_process_output in output1
|
||||
# randomly pick one and exclude the process
|
||||
excluded_address = random.choice(addresses)
|
||||
# sometimes we need to retry the exclude
|
||||
while True:
|
||||
logger.debug("Excluding process: {}".format(excluded_address))
|
||||
error_message = run_fdbcli_command_and_get_error('exclude', excluded_address)
|
||||
if error_message == 'WARNING: {} is a coordinator!'.format(excluded_address):
|
||||
# exclude coordinator will print the warning, verify the randomly selected process is the coordinator
|
||||
coordinator_list = get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')
|
||||
assert len(coordinator_list) == 1
|
||||
assert coordinator_list[0]['address'] == excluded_address
|
||||
break
|
||||
elif not error_message:
|
||||
break
|
||||
else:
|
||||
logger.debug("Error message: {}\n".format(error_message))
|
||||
logger.debug("Retry exclude after 1 second")
|
||||
time.sleep(1)
|
||||
output2 = run_fdbcli_command('exclude')
|
||||
assert 'There are currently 1 servers or localities being excluded from the database' in output2
|
||||
assert excluded_address in output2
|
||||
run_fdbcli_command('include', excluded_address)
|
||||
# check the include is successful
|
||||
output4 = run_fdbcli_command('exclude')
|
||||
assert no_excluded_process_output in output4
|
||||
|
||||
# read the system key 'k', need to enable the option first
|
||||
def read_system_key(k):
|
||||
output = run_fdbcli_command('option', 'on', 'READ_SYSTEM_KEYS;', 'get', k)
|
||||
if 'is' not in output:
|
||||
# key not present
|
||||
return None
|
||||
_, value = output.split(' is ')
|
||||
return value
|
||||
|
||||
@enable_logging()
|
||||
def throttle(logger):
|
||||
# no throttled tags at the beginning
|
||||
no_throttle_tags_output = 'There are no throttled tags'
|
||||
assert run_fdbcli_command('throttle', 'list') == no_throttle_tags_output
|
||||
# test 'throttle enable auto'
|
||||
run_fdbcli_command('throttle', 'enable', 'auto')
|
||||
# verify the change is applied by reading the system key
|
||||
# not an elegant way, may change later
|
||||
enable_flag = read_system_key('\\xff\\x02/throttledTags/autoThrottlingEnabled')
|
||||
assert enable_flag == "`1'"
|
||||
run_fdbcli_command('throttle', 'disable', 'auto')
|
||||
enable_flag = read_system_key('\\xff\\x02/throttledTags/autoThrottlingEnabled')
|
||||
# verify disabled
|
||||
assert enable_flag == "`0'"
|
||||
# TODO : test manual throttling, not easy to do now
|
||||
|
||||
if __name__ == '__main__':
|
||||
# fdbcli_tests.py <path_to_fdbcli_binary> <path_to_fdb_cluster_file>
|
||||
assert len(sys.argv) == 3, "Please pass arguments: <path_to_fdbcli_binary> <path_to_fdb_cluster_file>"
|
||||
# fdbcli_tests.py <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>
|
||||
assert len(sys.argv) == 4, "Please pass arguments: <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>"
|
||||
# shell command template
|
||||
command_template = [sys.argv[1], '-C', sys.argv[2], '--exec']
|
||||
# tests for fdbcli commands
|
||||
# assertions will fail if fdbcli does not work as expected
|
||||
advanceversion()
|
||||
cache_range()
|
||||
consistencycheck()
|
||||
datadistribution()
|
||||
kill()
|
||||
lockAndUnlock()
|
||||
maintenance()
|
||||
setclass()
|
||||
suspend()
|
||||
transaction()
|
||||
process_number = int(sys.argv[3])
|
||||
if process_number == 1:
|
||||
# TODO: disable for now, the change can cause the database unavailable
|
||||
#advanceversion()
|
||||
cache_range()
|
||||
consistencycheck()
|
||||
datadistribution()
|
||||
kill()
|
||||
lockAndUnlock()
|
||||
maintenance()
|
||||
setclass()
|
||||
suspend()
|
||||
transaction()
|
||||
throttle()
|
||||
else:
|
||||
assert process_number > 1, "Process number should be positive"
|
||||
# the kill command which used to list processes seems to not work as expected sometime
|
||||
# which makes the test flaky.
|
||||
# We need to figure out the reason and then re-enable these tests
|
||||
#coordinators()
|
||||
#exclude()
|
||||
|
||||
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
FROM centos:6
|
||||
LABEL version=0.0.4
|
||||
|
||||
RUN yum install -y yum-utils
|
||||
RUN yum-config-manager --enable rhel-server-rhscl-7-rpms
|
||||
RUN yum -y install centos-release-scl
|
||||
RUN yum install -y devtoolset-7
|
||||
|
||||
# install cmake
|
||||
RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz > /tmp/cmake.tar.gz &&\
|
||||
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 /tmp/cmake.tar.gz" > /tmp/cmake-sha.txt &&\
|
||||
sha256sum -c /tmp/cmake-sha.txt &&\
|
||||
cd /tmp && tar xf cmake.tar.gz && cp -r cmake-3.13.4-Linux-x86_64/* /usr/local/
|
||||
|
||||
# install boost
|
||||
RUN curl -L https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 > /tmp/boost.tar.bz2 &&\
|
||||
cd /tmp && echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost.tar.bz2" > boost-sha.txt &&\
|
||||
sha256sum -c boost-sha.txt && tar xf boost.tar.bz2 && cp -r boost_1_72_0/boost /usr/local/include/ &&\
|
||||
rm -rf boost.tar.bz2 boost_1_72_0
|
||||
|
||||
# install mono (for actorcompiler)
|
||||
RUN yum install -y epel-release
|
||||
RUN yum install -y mono-core
|
||||
|
||||
# install Java
|
||||
RUN yum install -y java-1.8.0-openjdk-devel
|
||||
|
||||
# install LibreSSL
|
||||
RUN curl https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.2.tar.gz > /tmp/libressl.tar.gz &&\
|
||||
cd /tmp && echo "b8cb31e59f1294557bfc80f2a662969bc064e83006ceef0574e2553a1c254fd5 libressl.tar.gz" > libressl-sha.txt &&\
|
||||
sha256sum -c libressl-sha.txt && tar xf libressl.tar.gz &&\
|
||||
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
|
||||
cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- make -j`nproc` install &&\
|
||||
rm -rf /tmp/libressl-2.8.2 /tmp/libressl.tar.gz
|
||||
|
||||
|
||||
# install dependencies for bindings and documentation
|
||||
# python 2.7 is required for the documentation
|
||||
RUN yum install -y rh-python36-python-devel rh-ruby24 golang python27
|
||||
|
||||
# install packaging tools
|
||||
RUN yum install -y rpm-build debbuild
|
||||
|
||||
CMD scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash
|
|
@ -1,279 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
arguments_usage() {
|
||||
cat <<EOF
|
||||
usage: build.sh [-h] [commands]
|
||||
-h: print this help message and
|
||||
abort execution
|
||||
|
||||
Will execute the passed commands
|
||||
in the order they were passed
|
||||
EOF
|
||||
}
|
||||
|
||||
arguments_parse() {
|
||||
local __res=0
|
||||
while getopts ":h" opt
|
||||
do
|
||||
case ${opt} in
|
||||
h )
|
||||
arguments_usage
|
||||
__res=2
|
||||
break
|
||||
;;
|
||||
\? )
|
||||
echo "Unknown option ${opt}"
|
||||
arguments_usage
|
||||
__res=1
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
commands=("$@")
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
configure() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
cmake ../foundationdb ${CMAKE_EXTRA_ARGS}
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
build_fast() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
make -j`nproc`
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
build() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
configure
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
build_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
package_fast() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
make -j`nproc` packages
|
||||
cpack
|
||||
cpack -G RPM -D GENERATE_EL6=ON
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
package() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
build
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
package_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
rpm() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
configure
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
build_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
fakeroot cpack -G RPM -D GENERATE_EL6=ON
|
||||
fakeroot cpack -G RPM
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
deb() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
configure
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
build_fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
fakeroot cpack -G DEB
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
test-fast() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
ctest -j`nproc` ${CTEST_EXTRA_ARGS}
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
test() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
build
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
test-fast
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main() {
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
arguments_parse "$@"
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
if [ ${__res} -eq 2 ]
|
||||
then
|
||||
# in this case there was no error
|
||||
# We still want to exit the script
|
||||
__res=0
|
||||
fi
|
||||
break
|
||||
fi
|
||||
echo "Num commands ${#commands[@]}"
|
||||
for command in "${commands[@]}"
|
||||
do
|
||||
echo "Command: ${command}"
|
||||
case ${command} in
|
||||
configure )
|
||||
configure
|
||||
__res=$?
|
||||
;;
|
||||
build )
|
||||
build
|
||||
__res=$?
|
||||
;;
|
||||
build/fast )
|
||||
build_fast
|
||||
__res=$?
|
||||
;;
|
||||
package )
|
||||
package
|
||||
__res=$?
|
||||
;;
|
||||
package/fast )
|
||||
package_fast
|
||||
__res=$?
|
||||
;;
|
||||
rpm )
|
||||
rpm
|
||||
__res=$?
|
||||
;;
|
||||
deb )
|
||||
deb
|
||||
__res=$?
|
||||
;;
|
||||
test-fast)
|
||||
test-fast
|
||||
__res=$?
|
||||
;;
|
||||
test)
|
||||
test
|
||||
__res=$?
|
||||
;;
|
||||
* )
|
||||
echo "ERROR: Command not found ($command)"
|
||||
__res=1
|
||||
;;
|
||||
esac
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,3 +0,0 @@
|
|||
FROM centos:6
|
||||
|
||||
RUN yum install -y yum-utils upstart initscripts
|
|
@ -1,3 +0,0 @@
|
|||
FROM centos:7
|
||||
|
||||
RUN yum install -y yum-utils systemd sysvinit-tools
|
|
@ -1,3 +0,0 @@
|
|||
FROM ubuntu:16.04
|
||||
|
||||
RUN apt-get update && apt-get install -y systemd python
|
|
@ -1,65 +0,0 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
|
||||
common: &common
|
||||
image: foundationdb-build:0.0.4
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
depends_on: [common]
|
||||
#debuginfo builds need the build path to be longer than
|
||||
#the path where debuginfo sources are places. Crazy, yes,
|
||||
#see the manual for CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX.
|
||||
volumes:
|
||||
- ../..:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/foundationdb
|
||||
- ${BUILDDIR}:/foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
|
||||
working_dir: /foundationdb/deep/directory/as/debuginfo/doesnt/work/otherwise/build
|
||||
|
||||
configure: &configure
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh configure
|
||||
|
||||
build: &build
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh build
|
||||
|
||||
build-fast: &build-fast
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh build/fast
|
||||
|
||||
rpm: &rpm
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh rpm
|
||||
|
||||
deb: &deb
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh deb
|
||||
|
||||
package: &package
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh package
|
||||
|
||||
package-fast: &package-fast
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh package/fast
|
||||
|
||||
test-fast: &test-fast
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh test-fast
|
||||
|
||||
test: &test
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh test
|
||||
|
||||
snowflake-ci: &snowflake-ci
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh package test-fast
|
||||
|
||||
shell:
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash
|
||||
|
|
@ -1,17 +0,0 @@
|
|||
[centos6]
|
||||
name = fdb-centos6
|
||||
location = centos6-test
|
||||
packages = ^.*el6((?!debuginfo).)*\.rpm$
|
||||
format = rpm
|
||||
|
||||
[centos7]
|
||||
name = fdb-centos7
|
||||
location = centos7-test
|
||||
packages = ^.*el7((?!debuginfo).)*\.rpm$
|
||||
format = rpm
|
||||
|
||||
[ubuntu_16_04]
|
||||
name = fdb-debian
|
||||
location = debian-test
|
||||
packages = ^.*\.deb$
|
||||
format = deb
|
|
@ -1,32 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
|
||||
|
||||
source ${source_dir}/modules/globals.sh
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/deb.sh
|
||||
source ${source_dir}/modules/tests.sh
|
||||
source ${source_dir}/modules/test_args.sh
|
||||
|
||||
main() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
test_args_parse "$@"
|
||||
__res=$?
|
||||
if [ ${__res} -eq 2 ]
|
||||
then
|
||||
__res=0
|
||||
break
|
||||
elif [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
tests_main
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,5 +0,0 @@
|
|||
cmake_minimum_required(VERSION 2.8.0)
|
||||
project(fdb_c_app C)
|
||||
find_package(FoundationDB-Client REQUIRED)
|
||||
add_executable(app app.c)
|
||||
target_link_libraries(app PRIVATE fdb_c)
|
|
@ -1,7 +0,0 @@
|
|||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
fdb_select_api_version(710);
|
||||
return 0;
|
||||
}
|
|
@ -1,114 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z ${arguments_sh_included+x} ]
|
||||
then
|
||||
arguments_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
arguments_usage() {
|
||||
cat <<EOF
|
||||
usage: test_packages.sh [-h] [commands]
|
||||
-h: print this help message and
|
||||
abort execution
|
||||
-b DIR: point set the fdb build directory
|
||||
(this is a required argument).
|
||||
-s DIR: Path to fdb source directory.
|
||||
-p STR: Colon-separated list of package
|
||||
file names (without path) to
|
||||
test.
|
||||
-c PATH: Path to a ini-file with the docker
|
||||
configuration
|
||||
-t TEST: One of DEB, RPM, ALL
|
||||
-n TESTS: Colon separated list of test names
|
||||
to run (will run all if this option
|
||||
is not set)
|
||||
-j NUM Number of threads the tester should
|
||||
run in parallel.
|
||||
-P STR Pruning strategy for docker container
|
||||
(Can be ALL|FAILED|SUCCEEDED|NONE)
|
||||
Defaults to "SUCCEEDED"
|
||||
|
||||
Will execute the passed commands
|
||||
in the order they were passed
|
||||
EOF
|
||||
}
|
||||
|
||||
arguments_parse() {
|
||||
local __res=0
|
||||
run_deb_tests=1
|
||||
run_rpm_tests=1
|
||||
docker_parallelism=1
|
||||
pruning_strategy=SUCCEEDED
|
||||
while getopts ":hb:s:p:c:t:n:j:P:" opt
|
||||
do
|
||||
case ${opt} in
|
||||
h )
|
||||
arguments_usage
|
||||
__res=2
|
||||
break
|
||||
;;
|
||||
b )
|
||||
fdb_build="${OPTARG}"
|
||||
;;
|
||||
s )
|
||||
fdb_source="${OPTARG}"
|
||||
;;
|
||||
p )
|
||||
fdb_packages="${OPTARG}"
|
||||
;;
|
||||
c )
|
||||
docker_ini="${OPTARG}"
|
||||
;;
|
||||
t )
|
||||
if [ "${OPTARG}" = "DEB" ]
|
||||
then
|
||||
run_rpm_tests=0
|
||||
elif [ "${OPTARG}" = "RPM" ]
|
||||
then
|
||||
run_deb_tests=0
|
||||
elif [ "${OPTARG}" != "ALL" ]
|
||||
then
|
||||
echo -e "${RED}No such test: ${OPTARG}${NC}"
|
||||
echo "Note: Currently known tests are: RPM, DEB, and ALL"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
n )
|
||||
tests_to_run="${OPTARG}"
|
||||
;;
|
||||
j )
|
||||
docker_parallelism="${OPTARG}"
|
||||
if [[ $docker_parallelism =~ "^[0-9]+$" ]]
|
||||
then
|
||||
echo -e "${RED}Error: -j expects a number, ${OPTARG}, is not a number" >&2
|
||||
__res=1
|
||||
break
|
||||
elif [ $docker_parallelism -lt 1 ]
|
||||
then
|
||||
echo -e "${RED}Error: -j ${OPTARG} makes no sense" >&2
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
;;
|
||||
P )
|
||||
pruning_strategy="${OPTARG}"
|
||||
if ! [[ "${pruning_strategy}" =~ ^(ALL|FAILED|SUCCEEDED|NONE)$ ]]
|
||||
then
|
||||
fail "Unknown pruning strategy ${pruning_strategy}"
|
||||
fi
|
||||
;;
|
||||
\? )
|
||||
curr_index="$((OPTIND-1))"
|
||||
echo "Unknown option ${@:${curr_index}:1}"
|
||||
arguments_usage
|
||||
__res=1
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
commands=("$@")
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,123 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z ${config_sh_included+x} ]
|
||||
then
|
||||
config_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
config_load_vms() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
if [ -z "${docker_ini+x}"]
|
||||
then
|
||||
docker_file="${source_dir}/../docker.ini"
|
||||
fi
|
||||
# parse the ini file and read it into an
|
||||
# associative array
|
||||
eval "$(awk -F ' *= *' '{ if ($1 ~ /^\[/) section=$1; else if ($1 !~ /^$/) printf "ini_%s%s=\47%s\47\n", $1, section, $2 }' ${docker_file})"
|
||||
vms=( "${!ini_name[@]}" )
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
echo "ERROR: Could not parse config-file ${docker_file}"
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
config_find_packages() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
cd ${fdb_build}
|
||||
while read f
|
||||
do
|
||||
if [[ "${f}" =~ .*"clients".* || "${f}" =~ .*"server".* ]]
|
||||
then
|
||||
if [ -z ${fdb_packages+x} ]
|
||||
then
|
||||
fdb_packages="${f}"
|
||||
else
|
||||
fdb_packages="${fdb_packages}:${f}"
|
||||
fi
|
||||
fi
|
||||
done <<< "$(ls *.deb *.rpm)"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
get_fdb_source() {
|
||||
local __res=0
|
||||
enterfun
|
||||
cd ${source_dir}
|
||||
while true
|
||||
do
|
||||
if [ -d .git ]
|
||||
then
|
||||
# we found the root
|
||||
pwd
|
||||
break
|
||||
fi
|
||||
if [ `pwd` = "/" ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
cd ..
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
fdb_build=0
|
||||
|
||||
config_verify() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
if [ -z ${fdb_source+x} ]
|
||||
then
|
||||
fdb_source=`get_fdb_source`
|
||||
fi
|
||||
if [ ! -d "${fdb_build}" ]
|
||||
then
|
||||
__res=1
|
||||
echo "ERROR: Could not find fdb build dir: ${fdb_build}"
|
||||
echo " Either set the environment variable fdb_build or"
|
||||
echo " pass it with -b <PATH_TO_BUILD>"
|
||||
fi
|
||||
if [ ! -f "${fdb_source}/flow/Net2.actor.cpp" ]
|
||||
then
|
||||
__res=1
|
||||
echo "ERROR: ${fdb_source} does not appear to be a fdb source"
|
||||
echo " directory. Either pass it with -s or set"
|
||||
echo " the environment variable fdb_source."
|
||||
fi
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
config_load_vms
|
||||
__res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,40 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${deb_sh_included}" ]
|
||||
then
|
||||
deb_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
install_build_tools() {
|
||||
apt-get -y install cmake gcc
|
||||
}
|
||||
|
||||
install() {
|
||||
local __res=0
|
||||
enterfun
|
||||
echo "Install FoundationDB"
|
||||
cd /build/packages
|
||||
package_names=()
|
||||
for f in "${package_files[@]}"
|
||||
do
|
||||
package_name="$(dpkg -I ${f} | grep Package | sed 's/.*://')"
|
||||
package_names+=( "${package_name}" )
|
||||
done
|
||||
dpkg -i ${package_files[@]}
|
||||
apt-get -yf -o Dpkg::Options::="--force-confold" install
|
||||
__res=$?
|
||||
sleep 5
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
uninstall() {
|
||||
local __res=0
|
||||
enterfun
|
||||
apt-get -y remove ${package_names[@]}
|
||||
__res=$?
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,186 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${docker_sh_included+x}" ]
|
||||
then
|
||||
docker_sh_included=1
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/config.sh
|
||||
source ${source_dir}/modules/tests.sh
|
||||
|
||||
failed_tests=()
|
||||
|
||||
docker_ids=()
|
||||
docker_threads=()
|
||||
docker_logs=()
|
||||
docker_error_logs=()
|
||||
|
||||
docker_wait_any() {
|
||||
local __res=0
|
||||
enterfun
|
||||
while [ "${#docker_threads[@]}" -gt 0 ]
|
||||
do
|
||||
IFS=";" read -ra res <${pipe_file}
|
||||
docker_id=${res[0]}
|
||||
result=${res[1]}
|
||||
i=0
|
||||
for (( idx=0; idx<${#docker_ids[@]}; idx++ ))
|
||||
do
|
||||
if [ "${docker_id}" = "${docker_ids[idx]}" ]
|
||||
then
|
||||
i=idx
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ "${result}" -eq 0 ]
|
||||
then
|
||||
echo -e "${GREEN}Test succeeded: ${docker_threads[$i]}"
|
||||
echo -e "\tDocker-ID: ${docker_ids[$i]} "
|
||||
echo -e "\tLog-File: ${docker_logs[$i]}"
|
||||
echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}"
|
||||
else
|
||||
echo -e "${RED}Test FAILED: ${docker_threads[$i]}"
|
||||
echo -e "\tDocker-ID: ${docker_ids[$i]} "
|
||||
echo -e "\tLog-File: ${docker_logs[$i]}"
|
||||
echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}"
|
||||
failed_tests+=( "${docker_threads[$i]}" )
|
||||
fi
|
||||
n=$((i+1))
|
||||
docker_ids=( "${docker_ids[@]:0:$i}" "${docker_ids[@]:$n}" )
|
||||
docker_threads=( "${docker_threads[@]:0:$i}" "${docker_threads[@]:$n}" )
|
||||
docker_logs=( "${docker_logs[@]:0:$i}" "${docker_logs[@]:$n}" )
|
||||
docker_error_logs=( "${docker_error_logs[@]:0:$i}" "${docker_error_logs[@]:$n}" )
|
||||
break
|
||||
done
|
||||
exitfun
|
||||
return "${__res}"
|
||||
}
|
||||
|
||||
docker_wait_all() {
|
||||
local __res=0
|
||||
while [ "${#docker_threads[@]}" -gt 0 ]
|
||||
do
|
||||
docker_wait_any
|
||||
if [ "$?" -ne 0 ]
|
||||
then
|
||||
__res=1
|
||||
fi
|
||||
done
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
docker_run() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
echo "Testing the following:"
|
||||
echo "======================"
|
||||
for K in "${vms[@]}"
|
||||
do
|
||||
curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${K}]} ) )
|
||||
echo "Will test the following ${#curr_packages[@]} packages in docker-image ${K}:"
|
||||
for p in "${curr_packages[@]}"
|
||||
do
|
||||
echo " ${p}"
|
||||
done
|
||||
echo
|
||||
done
|
||||
log_dir="${fdb_build}/pkg_tester"
|
||||
pipe_file="${fdb_build}/pkg_tester.pipe"
|
||||
lock_file="${fdb_build}/pkg_tester.lock"
|
||||
if [ -p "${pipe_file}" ]
|
||||
then
|
||||
rm "${pipe_file}"
|
||||
successOr "Could not delete old pipe file"
|
||||
fi
|
||||
if [ -f "${lock_file}" ]
|
||||
then
|
||||
rm "${lock_file}"
|
||||
successOr "Could not delete old pipe file"
|
||||
fi
|
||||
touch "${lock_file}"
|
||||
successOr "Could not create lock file"
|
||||
mkfifo "${pipe_file}"
|
||||
successOr "Could not create pipe file"
|
||||
mkdir -p "${log_dir}"
|
||||
# setup the containers
|
||||
# TODO: shall we make this parallel as well?
|
||||
for vm in "${vms[@]}"
|
||||
do
|
||||
curr_name="${ini_name[$vm]}"
|
||||
curr_location="${ini_location[$vm]}"
|
||||
if [[ "$curr_location" = /* ]]
|
||||
then
|
||||
cd "${curr_location}"
|
||||
else
|
||||
cd ${source_dir}/../${curr_location}
|
||||
fi
|
||||
docker_buid_logs="${log_dir}/docker_build_${curr_name}"
|
||||
docker build . -t ${curr_name} 1> "${docker_buid_logs}.log" 2> "${docker_buid_logs}.err"
|
||||
successOr "Building Docker image ${curr_name} failed - see ${docker_buid_logs}.log and ${docker_buid_logs}.err"
|
||||
done
|
||||
if [ ! -z "${tests_to_run+x}"]
|
||||
then
|
||||
tests=()
|
||||
IFS=';' read -ra tests <<< "${tests_to_run}"
|
||||
fi
|
||||
for vm in "${vms[@]}"
|
||||
do
|
||||
curr_name="${ini_name[$vm]}"
|
||||
curr_format="${ini_format[$vm]}"
|
||||
curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${vm}]} ) )
|
||||
for curr_test in "${tests[@]}"
|
||||
do
|
||||
if [ "${#docker_ids[@]}" -ge "${docker_parallelism}" ]
|
||||
then
|
||||
docker_wait_any
|
||||
fi
|
||||
log_file="${log_dir}/${curr_name}_${curr_test}.log"
|
||||
err_file="${log_dir}/${curr_name}_${curr_test}.err"
|
||||
docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\
|
||||
-v "${fdb_build}:/build"\
|
||||
${curr_name} /sbin/init )
|
||||
echo "Starting Test ${curr_name}/${curr_test} Docker-ID: ${docker_id}"
|
||||
{
|
||||
docker exec "${docker_id}" bash \
|
||||
/foundationdb/build/cmake/package_tester/${curr_format}_tests.sh -n ${curr_test} ${curr_packages[@]}\
|
||||
2> ${err_file} 1> ${log_file}
|
||||
res=$?
|
||||
if [ "${pruning_strategy}" = "ALL" ]
|
||||
then
|
||||
docker kill "${docker_id}" > /dev/null
|
||||
elif [ "${res}" -eq 0 ] && [ "${pruning_strategy}" = "SUCCEEDED" ]
|
||||
then
|
||||
docker kill "${docker_id}" > /dev/null
|
||||
elif [ "${res}" -ne 0 ] && [ "${pruning_strategy}" = "FAILED" ]
|
||||
then
|
||||
docker kill "${docker_id}" > /dev/null
|
||||
fi
|
||||
flock "${lock_file}" echo "${docker_id};${res}" >> "${pipe_file}"
|
||||
} &
|
||||
docker_ids+=( "${docker_id}" )
|
||||
docker_threads+=( "${curr_name}/${curr_test}" )
|
||||
docker_logs+=( "${log_file}" )
|
||||
docker_error_logs+=( "${err_file}" )
|
||||
done
|
||||
done
|
||||
docker_wait_all
|
||||
rm ${pipe_file}
|
||||
if [ "${#failed_tests[@]}" -eq 0 ]
|
||||
then
|
||||
echo -e "${GREEN}SUCCESS${NC}"
|
||||
else
|
||||
echo -e "${RED}FAILURE"
|
||||
echo "The following tests failed:"
|
||||
for t in "${failed_tests[@]}"
|
||||
do
|
||||
echo " - ${t}"
|
||||
done
|
||||
echo -e "${NC}"
|
||||
__res=1
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return "${__res}"
|
||||
}
|
||||
fi
|
|
@ -1,23 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# This module has to be included first and only once.
|
||||
# This is because of a limitation of older bash versions
|
||||
# that doesn't allow us to declare associative arrays
|
||||
# globally.
|
||||
|
||||
if [ -z "${global_sh_included+x}"]
|
||||
then
|
||||
global_sh_included=1
|
||||
else
|
||||
echo "global.sh can only be included once"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -A ini_name
|
||||
declare -A ini_location
|
||||
declare -A ini_packages
|
||||
declare -A ini_format
|
||||
declare -A test_start_state
|
||||
declare -A test_exit_state
|
||||
declare -a tests
|
||||
declare -a vms
|
|
@ -1,45 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${rpm_sh_included}" ]
|
||||
then
|
||||
rpm_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
conf_save_extension=".rpmsave"
|
||||
|
||||
install_build_tools() {
|
||||
yum -y install cmake gcc
|
||||
}
|
||||
|
||||
install() {
|
||||
local __res=0
|
||||
enterfun
|
||||
cd /build/packages
|
||||
package_names=()
|
||||
for f in "${package_files[@]}"
|
||||
do
|
||||
package_names+=( "$(rpm -qp ${f})" )
|
||||
done
|
||||
yum install -y ${package_files[@]}
|
||||
__res=$?
|
||||
# give the server some time to come up
|
||||
sleep 5
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
uninstall() {
|
||||
local __res=0
|
||||
enterfun
|
||||
if [ "$1" == "purge" ]
|
||||
then
|
||||
yum remove --purge -y ${package_names[@]}
|
||||
else
|
||||
yum remove -y ${package_names[@]}
|
||||
fi
|
||||
__res=$?
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,49 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z ${test_args_sh_included+x} ]
|
||||
then
|
||||
test_args_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
test_args_usage() {
|
||||
me=`basename "$0"`
|
||||
echo "usage: ${me} [-h] files..."
|
||||
cat <<EOF
|
||||
-n TEST: The name of the test to run
|
||||
|
||||
Will execute the passed commands
|
||||
in the order they were passed
|
||||
EOF
|
||||
}
|
||||
|
||||
test_args_parse() {
|
||||
local __res=0
|
||||
run_deb_tests=1
|
||||
run_rpm_tests=1
|
||||
while getopts ":hn:" opt
|
||||
do
|
||||
case ${opt} in
|
||||
h )
|
||||
test_args_usage
|
||||
__res=2
|
||||
break
|
||||
;;
|
||||
n )
|
||||
echo "test_name=${OPTARG}"
|
||||
test_name="${OPTARG}"
|
||||
;;
|
||||
\? )
|
||||
curr_index="$((OPTIND-1))"
|
||||
echo "Unknown option ${@:${curr_index}:1}"
|
||||
arguments_usage
|
||||
__res=1
|
||||
break
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND -1))
|
||||
package_files=("$@")
|
||||
return ${__res}
|
||||
}
|
||||
fi
|
|
@ -1,64 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${testing_sh_included+x}" ]
|
||||
then
|
||||
testing_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
|
||||
desired_state() {
|
||||
case $1 in
|
||||
CLEAN )
|
||||
:
|
||||
;;
|
||||
INSTALLED )
|
||||
install
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
tests_healthy() {
|
||||
enterfun
|
||||
local __res=0
|
||||
for _ in 1
|
||||
do
|
||||
cd /
|
||||
fdbcli --exec status
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
return 1
|
||||
fi
|
||||
healthy="$(fdbcli --exec status | grep HEALTHY | wc -l)"
|
||||
if [ -z "${healthy}" ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
tests_clean() {
|
||||
uninstall purge
|
||||
successOr "FoundationDB was not uninstalled correctly"
|
||||
# systemd/initd are not running, so we have to kill manually here
|
||||
pidof fdbmonitor | xargs kill
|
||||
tests_clean_nouninstall
|
||||
rm -rf /etc/foundationdb
|
||||
rm -rf /var/lib/foundationdb
|
||||
rm -rf /var/log/foundationdb
|
||||
}
|
||||
|
||||
tests_main() {
|
||||
new_state="${test_start_state[${test_name}]}"
|
||||
echo "Setting desired state ${new_state} for ${test_name}"
|
||||
desired_state "${new_state}"
|
||||
${test_name}
|
||||
successOr "${test_name} Failed"
|
||||
echo "======================================================================="
|
||||
echo "Test $t successfully finished"
|
||||
echo "======================================================================="
|
||||
current_state="${test_exit_state[${test_name}]}"
|
||||
}
|
||||
fi
|
|
@ -1,143 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# In this file the tests are formulated which
|
||||
# should run in the docker container to test
|
||||
# whether the RPM and DEB packages work properly.
|
||||
#
|
||||
# In order to add a test, a user first has to
|
||||
# add the name of the test to the `tests` array
|
||||
# which is defined in this file.
|
||||
#
|
||||
# Then, she must define the state this test
|
||||
# expects the container to be in. To do that,
|
||||
# a value for the test has to be added to the
|
||||
# associative array `test_start_state`. Valid
|
||||
# values are:
|
||||
#
|
||||
# - INSTALLED: In this case, the test will be
|
||||
# started with a freshly installed FDB, but
|
||||
# no other changes were made to the container.
|
||||
# - CLEAN: This simply means that the container
|
||||
# will run a minimal version of the OS (as defined
|
||||
# in the corresponsing Dockerfile)
|
||||
#
|
||||
# A test is then simply a bash function with the
|
||||
# same name as the test. It can use the predefined
|
||||
# bash functions `install` and `uninstall` to either
|
||||
# install or uninstall FDB on the container. The FDB
|
||||
# build directory can be found in `/build`, the
|
||||
# source code will be located in `/foundationdb`
|
||||
|
||||
if [ -z "${tests_sh_included+x}" ]
|
||||
then
|
||||
tests_sh_included=1
|
||||
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/testing.sh
|
||||
|
||||
tests=( "fresh_install" "keep_config" )
|
||||
test_start_state=([fresh_install]=INSTALLED [keep_config]=CLEAN )
|
||||
|
||||
fresh_install() {
|
||||
tests_healthy
|
||||
successOr "Fresh installation is not clean"
|
||||
# test that we can read from and write to fdb
|
||||
cd /
|
||||
timeout 2 fdbcli --exec 'writemode on ; set foo bar'
|
||||
successOr "Cannot write to database"
|
||||
getresult="$(timeout 2 fdbcli --exec 'get foo')"
|
||||
successOr "Get on database failed"
|
||||
if [ "${getresult}" != "\`foo' is \`bar'" ]
|
||||
then
|
||||
fail "value was not set correctly"
|
||||
fi
|
||||
timeout 2 fdbcli --exec 'writemode on ; clear foo'
|
||||
successOr "Deleting value failed"
|
||||
getresult="$(timeout 2 fdbcli --exec 'get foo')"
|
||||
successOr "Get on database failed"
|
||||
if [ "${getresult}" != "\`foo': not found" ]
|
||||
then
|
||||
fail "value was not cleared correctly"
|
||||
fi
|
||||
PYTHON_TARGZ_NAME="$(ls /build/packages | grep 'foundationdb-[0-9.]*\.tar\.gz' | sed 's/\.tar\.gz$//')"
|
||||
tar -C /tmp -xvf /build/packages/${PYTHON_TARGZ_NAME}.tar.gz
|
||||
pushd /tmp/${PYTHON_TARGZ_NAME}
|
||||
python setup.py install
|
||||
successOr "Installing python bindings failed"
|
||||
popd
|
||||
python -c 'import fdb; fdb.api_version(710)'
|
||||
successOr "Loading python bindings failed"
|
||||
|
||||
# Test cmake and pkg-config integration: https://github.com/apple/foundationdb/issues/1483
|
||||
install_build_tools
|
||||
rm -rf build-fdb_c_app
|
||||
mkdir build-fdb_c_app
|
||||
pushd build-fdb_c_app
|
||||
cmake /foundationdb/build/cmake/package_tester/fdb_c_app && make
|
||||
successOr "FoundationDB-Client cmake integration failed"
|
||||
cc /foundationdb/build/cmake/package_tester/fdb_c_app/app.c `pkg-config --libs --cflags foundationdb-client`
|
||||
successOr "FoundationDB-Client pkg-config integration failed"
|
||||
popd
|
||||
}
|
||||
|
||||
keep_config() {
|
||||
mkdir /etc/foundationdb
|
||||
description=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 8)
|
||||
random_str=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 8)
|
||||
successOr "Could not create secret"
|
||||
echo $description:$random_str@127.0.0.1:4500 > /tmp/fdb.cluster
|
||||
successOr "Could not create fdb.cluster file"
|
||||
sed '/\[fdbserver.4500\]/a \[fdbserver.4501\]' /foundationdb/packaging/foundationdb.conf > /tmp/foundationdb.conf
|
||||
successOr "Could not change foundationdb.conf file"
|
||||
# we need to keep these files around for testing that the install didn't change them
|
||||
cp /tmp/fdb.cluster /etc/foundationdb/fdb.cluster
|
||||
cp /tmp/foundationdb.conf /etc/foundationdb/foundationdb.conf
|
||||
|
||||
install
|
||||
successOr "FoundationDB install failed"
|
||||
# make sure we are not in build directory as there is a fdbc.cluster file there
|
||||
echo "Configure new database - Install isn't supposed to do this for us"
|
||||
echo "as there was an existing configuration"
|
||||
cd /
|
||||
timeout 2 fdbcli --exec 'configure new single ssd'
|
||||
successOr "Couldn't configure new database"
|
||||
tests_healthy
|
||||
num_processes="$(timeout 2 fdbcli --exec 'status' | grep "FoundationDB processes" | sed -e 's/.*- //')"
|
||||
if [ "${num_processes}" -ne 2 ]
|
||||
then
|
||||
fail Number of processes incorrect after config change
|
||||
fi
|
||||
|
||||
differences="$(diff /tmp/fdb.cluster /etc/foundationdb/fdb.cluster)"
|
||||
if [ -n "${differences}" ]
|
||||
then
|
||||
fail Install changed configuration files
|
||||
fi
|
||||
differences="$(diff /tmp/foundationdb.conf /etc/foundationdb/foundationdb.conf)"
|
||||
if [ -n "${differences}" ]
|
||||
then
|
||||
fail Install changed configuration files
|
||||
fi
|
||||
|
||||
uninstall
|
||||
# make sure config didn't get deleted
|
||||
# RPM, however, renames the file on remove, so we need to check for this
|
||||
conffile="/etc/foundationdb/foundationdb.conf${conf_save_extension}"
|
||||
if [ ! -f /etc/foundationdb/fdb.cluster ] || [ ! -f "${conffile}" ]
|
||||
then
|
||||
fail "Uninstall removed configuration"
|
||||
fi
|
||||
differences="$(diff /tmp/foundationdb.conf ${conffile})"
|
||||
if [ -n "${differences}" ]
|
||||
then
|
||||
fail "${conffile} changed during remove"
|
||||
fi
|
||||
differences="$(diff /tmp/fdb.cluster /etc/foundationdb/fdb.cluster)"
|
||||
if [ -n "${differences}" ]
|
||||
then
|
||||
fail "/etc/foundationdb/fdb.cluster changed during remove"
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
fi
|
|
@ -1,40 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z ${util_sh_included+x} ]
|
||||
then
|
||||
util_sh_included=1
|
||||
|
||||
# for colored output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
|
||||
enterfun() {
|
||||
pushd . > /dev/null
|
||||
}
|
||||
|
||||
exitfun() {
|
||||
popd > /dev/null
|
||||
}
|
||||
|
||||
fail() {
|
||||
false
|
||||
successOr ${@:1}
|
||||
}
|
||||
|
||||
successOr() {
|
||||
local __res=$?
|
||||
if [ ${__res} -ne 0 ]
|
||||
then
|
||||
if [ "$#" -gt 1 ]
|
||||
then
|
||||
>&2 echo -e "${RED}${@:1} ${NC}"
|
||||
fi
|
||||
exit ${__res}
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
fi
|
|
@ -1,32 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
|
||||
|
||||
source ${source_dir}/modules/globals.sh
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/rpm.sh
|
||||
source ${source_dir}/modules/tests.sh
|
||||
source ${source_dir}/modules/test_args.sh
|
||||
|
||||
main() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
test_args_parse "$@"
|
||||
__res=$?
|
||||
if [ ${__res} -eq 2 ]
|
||||
then
|
||||
__res=0
|
||||
break
|
||||
elif [ ${__res} -ne 0 ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
tests_main
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,35 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
|
||||
|
||||
source ${source_dir}/modules/globals.sh
|
||||
source ${source_dir}/modules/config.sh
|
||||
source ${source_dir}/modules/util.sh
|
||||
source ${source_dir}/modules/arguments.sh
|
||||
source ${source_dir}/modules/docker.sh
|
||||
|
||||
main() {
|
||||
local __res=0
|
||||
enterfun
|
||||
for _ in 1
|
||||
do
|
||||
arguments_parse "$@"
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
config_verify
|
||||
if [ $? -ne 0 ]
|
||||
then
|
||||
__res=1
|
||||
break
|
||||
fi
|
||||
docker_run
|
||||
__res=$?
|
||||
done
|
||||
exitfun
|
||||
return ${__res}
|
||||
}
|
||||
|
||||
main "$@"
|
|
@ -1,105 +0,0 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
common: &common
|
||||
image: foundationdb/foundationdb-build:0.1.24
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
depends_on: [common]
|
||||
volumes:
|
||||
- ..:/__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb
|
||||
working_dir: /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb
|
||||
environment:
|
||||
- MAKEJOBS=1
|
||||
- USE_CCACHE=1
|
||||
- BUILD_DIR=./work
|
||||
|
||||
release-setup: &release-setup
|
||||
<<: *build-setup
|
||||
environment:
|
||||
- MAKEJOBS=1
|
||||
- USE_CCACHE=1
|
||||
- RELEASE=true
|
||||
- BUILD_DIR=./work
|
||||
|
||||
snapshot-setup: &snapshot-setup
|
||||
<<: *build-setup
|
||||
|
||||
build-docs:
|
||||
<<: *build-setup
|
||||
volumes:
|
||||
- ..:/foundationdb
|
||||
working_dir: /foundationdb
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" docpackage'
|
||||
|
||||
|
||||
release-packages: &release-packages
|
||||
<<: *release-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
|
||||
|
||||
snapshot-packages: &snapshot-packages
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
|
||||
|
||||
prb-packages:
|
||||
<<: *snapshot-packages
|
||||
|
||||
|
||||
release-bindings: &release-bindings
|
||||
<<: *release-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
|
||||
snapshot-bindings: &snapshot-bindings
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
|
||||
prb-bindings:
|
||||
<<: *snapshot-bindings
|
||||
|
||||
|
||||
snapshot-cmake: &snapshot-cmake
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=0 -DUSE_WERROR=ON /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}" "packages" "strip_targets" && cpack'
|
||||
|
||||
prb-cmake:
|
||||
<<: *snapshot-cmake
|
||||
|
||||
|
||||
snapshot-bindings-cmake: &snapshot-bindings-cmake
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=0 -DUSE_WERROR=ON /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}" "bindings/all"'
|
||||
|
||||
prb-bindings-cmake:
|
||||
<<: *snapshot-bindings-cmake
|
||||
|
||||
|
||||
snapshot-cmake: &snapshot-testpackages
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}"'
|
||||
|
||||
prb-testpackages:
|
||||
<<: *snapshot-testpackages
|
||||
|
||||
|
||||
snapshot-ctest: &snapshot-ctest
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=1 -DUSE_WERROR=ON /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure'
|
||||
|
||||
prb-ctest:
|
||||
<<: *snapshot-ctest
|
||||
|
||||
|
||||
snapshot-correctness: &snapshot-correctness
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -G "Ninja" -DFDB_RELEASE=1 -DUSE_WERROR=ON /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && ninja -v -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure'
|
||||
|
||||
prb-correctness:
|
||||
<<: *snapshot-correctness
|
||||
|
||||
|
||||
shell:
|
||||
<<: *build-setup
|
||||
volumes:
|
||||
- ..:/foundationdb
|
||||
entrypoint: /bin/bash
|
|
@ -1,290 +0,0 @@
|
|||
FROM centos:6
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
RUN sed -i -e '/enabled/d' /etc/yum.repos.d/CentOS-Base.repo && \
|
||||
sed -i -e '/gpgcheck=1/a enabled=0' /etc/yum.repos.d/CentOS-Base.repo && \
|
||||
sed -i -n '/6.1/q;p' /etc/yum.repos.d/CentOS-Vault.repo && \
|
||||
sed -i -e "s/6\.0/$(cut -d\ -f3 /etc/redhat-release)/g" /etc/yum.repos.d/CentOS-Vault.repo && \
|
||||
sed -i -e 's/enabled=0/enabled=1/g' /etc/yum.repos.d/CentOS-Vault.repo && \
|
||||
yum install -y \
|
||||
centos-release-scl-rh \
|
||||
epel-release \
|
||||
scl-utils \
|
||||
yum-utils && \
|
||||
yum-config-manager --enable rhel-server-rhscl-7-rpms && \
|
||||
sed -i -e 's/#baseurl=/baseurl=/g' \
|
||||
-e 's/mirror.centos.org/vault.centos.org/g' \
|
||||
-e 's/mirrorlist=/#mirrorlist=/g' \
|
||||
/etc/yum.repos.d/CentOS-SCLo-scl-rh.repo && \
|
||||
yum install -y \
|
||||
binutils-devel \
|
||||
curl \
|
||||
debbuild \
|
||||
devtoolset-8 \
|
||||
devtoolset-8-libasan-devel \
|
||||
devtoolset-8-libtsan-devel \
|
||||
devtoolset-8-libubsan-devel \
|
||||
devtoolset-8-valgrind-devel \
|
||||
dos2unix \
|
||||
dpkg \
|
||||
gettext-devel \
|
||||
git \
|
||||
golang \
|
||||
java-1.8.0-openjdk-devel \
|
||||
libcurl-devel \
|
||||
libuuid-devel \
|
||||
libxslt \
|
||||
lz4 \
|
||||
lz4-devel \
|
||||
lz4-static \
|
||||
mono-devel \
|
||||
redhat-lsb-core \
|
||||
rpm-build \
|
||||
tcl-devel \
|
||||
unzip \
|
||||
wget \
|
||||
rh-python36 \
|
||||
rh-python36-python-devel \
|
||||
rh-ruby24 && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
# build/install autoconf -- same version installed by yum in centos7
|
||||
RUN curl -Ls http://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz -o autoconf.tar.gz && \
|
||||
echo "954bd69b391edc12d6a4a51a2dd1476543da5c6bbf05a95b59dc0dd6fd4c2969 autoconf.tar.gz" > autoconf-sha.txt && \
|
||||
sha256sum -c autoconf-sha.txt && \
|
||||
mkdir autoconf && \
|
||||
tar --strip-components 1 --no-same-owner --directory autoconf -xf autoconf.tar.gz && \
|
||||
cd autoconf && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd ../ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install automake -- same version installed by yum in centos7
|
||||
RUN curl -Ls http://ftp.gnu.org/gnu/automake/automake-1.13.4.tar.gz -o automake.tar.gz && \
|
||||
echo "4c93abc0bff54b296f41f92dd3aa1e73e554265a6f719df465574983ef6f878c automake.tar.gz" > automake-sha.txt && \
|
||||
sha256sum -c automake-sha.txt && \
|
||||
mkdir automake && \
|
||||
tar --strip-components 1 --no-same-owner --directory automake -xf automake.tar.gz && \
|
||||
cd automake && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd ../ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install git
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/git/git/archive/v2.30.0.tar.gz -o git.tar.gz && \
|
||||
echo "8db4edd1a0a74ebf4b78aed3f9e25c8f2a7db3c00b1aaee94d1e9834fae24e61 git.tar.gz" > git-sha.txt && \
|
||||
sha256sum -c git-sha.txt && \
|
||||
mkdir git && \
|
||||
tar --strip-components 1 --no-same-owner --directory git -xf git.tar.gz && \
|
||||
cd git && \
|
||||
make configure && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd ../ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install ninja
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ninja-build/ninja/archive/v1.9.0.zip -o ninja.zip && \
|
||||
echo "8e2e654a418373f10c22e4cc9bdbe9baeca8527ace8d572e0b421e9d9b85b7ef ninja.zip" > ninja-sha.txt && \
|
||||
sha256sum -c ninja-sha.txt && \
|
||||
unzip ninja.zip && \
|
||||
cd ninja-1.9.0 && \
|
||||
./configure.py --bootstrap && \
|
||||
cp ninja /usr/bin && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install cmake
|
||||
RUN curl -Ls https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz -o cmake.tar.gz && \
|
||||
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 cmake.tar.gz" > cmake-sha.txt && \
|
||||
sha256sum -c cmake-sha.txt && \
|
||||
mkdir cmake && \
|
||||
tar --strip-components 1 --no-same-owner --directory cmake -xf cmake.tar.gz && \
|
||||
cp -r cmake/* /usr/local/ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install LLVM
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
curl -Ls https://github.com/llvm/llvm-project/releases/download/llvmorg-10.0.0/llvm-project-10.0.0.tar.xz -o llvm.tar.xz && \
|
||||
echo "6287a85f4a6aeb07dbffe27847117fe311ada48005f2b00241b523fe7b60716e llvm.tar.xz" > llvm-sha.txt && \
|
||||
sha256sum -c llvm-sha.txt && \
|
||||
mkdir llvm-project && \
|
||||
tar --strip-components 1 --no-same-owner --directory llvm-project -xf llvm.tar.xz && \
|
||||
mkdir -p llvm-project/build && \
|
||||
cd llvm-project/build && \
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-G Ninja \
|
||||
-DLLVM_INCLUDE_EXAMPLES=OFF \
|
||||
-DLLVM_INCLUDE_TESTS=OFF \
|
||||
-DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;compiler-rt;libcxx;libcxxabi;libunwind;lld;lldb" \
|
||||
-DLLVM_STATIC_LINK_CXX_STDLIB=ON \
|
||||
../llvm && \
|
||||
cmake --build . && \
|
||||
cmake --build . --target install && \
|
||||
cd ../.. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install openssl
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://www.openssl.org/source/openssl-1.1.1h.tar.gz -o openssl.tar.gz && \
|
||||
echo "5c9ca8774bd7b03e5784f26ae9e9e6d749c9da2438545077e6b3d755a06595d9 openssl.tar.gz" > openssl-sha.txt && \
|
||||
sha256sum -c openssl-sha.txt && \
|
||||
mkdir openssl && \
|
||||
tar --strip-components 1 --no-same-owner --directory openssl -xf openssl.tar.gz && \
|
||||
cd openssl && \
|
||||
./config CFLAGS="-fPIC -O3" --prefix=/usr/local && \
|
||||
make -j`nproc` && \
|
||||
make -j1 install && \
|
||||
ln -sv /usr/local/lib64/lib*.so.1.1 /usr/lib64/ && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install rocksdb to /opt
|
||||
RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocksdb.tar.gz && \
|
||||
echo "d573d2f15cdda883714f7e0bc87b814a8d4a53a82edde558f08f940e905541ee rocksdb.tar.gz" > rocksdb-sha.txt && \
|
||||
sha256sum -c rocksdb-sha.txt && \
|
||||
tar --directory /opt -xf rocksdb.tar.gz && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.67 to /opt
|
||||
RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
|
||||
echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \
|
||||
sha256sum -c boost-sha-67.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \
|
||||
rm -rf /opt/boost_1_67_0/libs && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.72 to /opt
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
|
||||
echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \
|
||||
sha256sum -c boost-sha-72.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \
|
||||
cd /opt/boost_1_72_0 &&\
|
||||
./bootstrap.sh --with-libraries=context &&\
|
||||
./b2 link=static cxxflags=-std=c++14 --prefix=/opt/boost_1_72_0 install &&\
|
||||
rm -rf /opt/boost_1_72_0/libs && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# jemalloc (needed for FDB after 6.3)
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2 -o jemalloc-5.2.1.tar.bz2 && \
|
||||
echo "34330e5ce276099e2e8950d9335db5a875689a4c6a56751ef3b1d8c537f887f6 jemalloc-5.2.1.tar.bz2" > jemalloc-sha.txt && \
|
||||
sha256sum -c jemalloc-sha.txt && \
|
||||
mkdir jemalloc && \
|
||||
tar --strip-components 1 --no-same-owner --no-same-permissions --directory jemalloc -xjf jemalloc-5.2.1.tar.bz2 && \
|
||||
cd jemalloc && \
|
||||
./configure --enable-static --disable-cxx && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# Install CCACHE
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ccache/ccache/releases/download/v4.0/ccache-4.0.tar.gz -o ccache.tar.gz && \
|
||||
echo "ac97af86679028ebc8555c99318352588ff50f515fc3a7f8ed21a8ad367e3d45 ccache.tar.gz" > ccache-sha256.txt && \
|
||||
sha256sum -c ccache-sha256.txt && \
|
||||
mkdir ccache &&\
|
||||
tar --strip-components 1 --no-same-owner --directory ccache -xf ccache.tar.gz && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DZSTD_FROM_INTERNET=ON ../ccache && \
|
||||
cmake --build . --target install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install toml
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ToruNiina/toml11/archive/v3.4.0.tar.gz -o toml.tar.gz && \
|
||||
echo "bc6d733efd9216af8c119d8ac64a805578c79cc82b813e4d1d880ca128bd154d toml.tar.gz" > toml-sha256.txt && \
|
||||
sha256sum -c toml-sha256.txt && \
|
||||
mkdir toml && \
|
||||
tar --strip-components 1 --no-same-owner --directory toml -xf toml.tar.gz && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -Dtoml11_BUILD_TEST=OFF ../toml && \
|
||||
cmake --build . --target install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# download old fdbserver binaries
|
||||
ARG FDB_VERSION="6.2.29"
|
||||
RUN mkdir -p /opt/foundationdb/old && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/misc/fdbservers-${FDB_VERSION}.tar.gz | \
|
||||
tar --no-same-owner --directory /opt/foundationdb/old -xz && \
|
||||
chmod +x /opt/foundationdb/old/* && \
|
||||
ln -sf /opt/foundationdb/old/fdbserver-${FDB_VERSION} /opt/foundationdb/old/fdbserver
|
||||
|
||||
# build/install distcc
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
curl -Ls https://github.com/distcc/distcc/archive/v3.3.5.tar.gz -o distcc.tar.gz && \
|
||||
echo "13a4b3ce49dfc853a3de550f6ccac583413946b3a2fa778ddf503a9edc8059b0 distcc.tar.gz" > distcc-sha256.txt && \
|
||||
sha256sum -c distcc-sha256.txt && \
|
||||
mkdir distcc && \
|
||||
tar --strip-components 1 --no-same-owner --directory distcc -xf distcc.tar.gz && \
|
||||
cd distcc && \
|
||||
./autogen.sh && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
RUN curl -Ls https://github.com/manticoresoftware/manticoresearch/raw/master/misc/junit/ctest2junit.xsl -o /opt/ctest2junit.xsl
|
||||
|
||||
# # Setting this environment variable switches from OpenSSL to BoringSSL
|
||||
# ENV OPENSSL_ROOT_DIR=/opt/boringssl
|
||||
#
|
||||
# # install BoringSSL: TODO: They don't seem to have releases(?) I picked today's master SHA.
|
||||
# RUN cd /opt &&\
|
||||
# git clone https://boringssl.googlesource.com/boringssl &&\
|
||||
# cd boringssl &&\
|
||||
# git checkout e796cc65025982ed1fb9ef41b3f74e8115092816 &&\
|
||||
# mkdir build
|
||||
#
|
||||
# # ninja doesn't respect CXXFLAGS, and the boringssl CMakeLists doesn't expose an option to define __STDC_FORMAT_MACROS
|
||||
# # also, enable -fPIC.
|
||||
# # this is moderately uglier than creating a patchfile, but easier to maintain.
|
||||
# RUN cd /opt/boringssl &&\
|
||||
# for f in crypto/fipsmodule/rand/fork_detect_test.cc \
|
||||
# include/openssl/bn.h \
|
||||
# ssl/test/bssl_shim.cc ; do \
|
||||
# perl -p -i -e 's/#include <inttypes.h>/#define __STDC_FORMAT_MACROS 1\n#include <inttypes.h>/g;' $f ; \
|
||||
# done &&\
|
||||
# perl -p -i -e 's/-Werror/-Werror -fPIC/' CMakeLists.txt &&\
|
||||
# git diff
|
||||
#
|
||||
# RUN cd /opt/boringssl/build &&\
|
||||
# scl enable devtoolset-8 rh-python36 rh-ruby24 -- cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. &&\
|
||||
# scl enable devtoolset-8 rh-python36 rh-ruby24 -- ninja &&\
|
||||
# ./ssl/ssl_test &&\
|
||||
# mkdir -p ../lib && cp crypto/libcrypto.a ssl/libssl.a ../lib
|
||||
#
|
||||
# # Localize time zone
|
||||
# ARG TIMEZONEINFO=America/Los_Angeles
|
||||
# RUN rm -f /etc/localtime && ln -s /usr/share/zoneinfo/${TIMEZONEINFO} /etc/localtime
|
||||
#
|
||||
# LABEL version=${IMAGE_TAG}
|
||||
# ENV DOCKER_IMAGEVER=${IMAGE_TAG}
|
||||
# ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0
|
||||
# ENV CC=/opt/rh/devtoolset-8/root/usr/bin/gcc
|
||||
# ENV CXX=/opt/rh/devtoolset-8/root/usr/bin/g++
|
||||
#
|
||||
# ENV CCACHE_NOHASHDIR=true
|
||||
# ENV CCACHE_UMASK=0000
|
||||
# ENV CCACHE_SLOPPINESS="file_macro,time_macros,include_file_mtime,include_file_ctime,file_stat_matches"
|
||||
#
|
||||
# CMD scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash
|
|
@ -1,84 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos6-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
# add vscode server
|
||||
RUN yum repolist && \
|
||||
yum -y install \
|
||||
bash-completion \
|
||||
byobu \
|
||||
cgdb \
|
||||
emacs-nox \
|
||||
jq \
|
||||
the_silver_searcher \
|
||||
tmux \
|
||||
tree \
|
||||
vim \
|
||||
zsh && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
WORKDIR /tmp
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
pip3 install \
|
||||
lxml \
|
||||
psutil \
|
||||
python-dateutil \
|
||||
subprocess32 && \
|
||||
mkdir fdb-joshua && \
|
||||
cd fdb-joshua && \
|
||||
git clone https://github.com/FoundationDB/fdb-joshua . && \
|
||||
pip3 install /tmp/fdb-joshua && \
|
||||
cd /tmp && \
|
||||
curl -Ls https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.9/2020-11-02/bin/linux/amd64/kubectl -o kubectl && \
|
||||
echo "3dbe69e6deb35fbd6fec95b13d20ac1527544867ae56e3dae17e8c4d638b25b9 kubectl" > kubectl.txt && \
|
||||
sha256sum -c kubectl.txt && \
|
||||
mv kubectl /usr/local/bin/kubectl && \
|
||||
chmod 755 /usr/local/bin/kubectl && \
|
||||
curl https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.0.30.zip -o "awscliv2.zip" && \
|
||||
echo "7ee475f22c1b35cc9e53affbf96a9ffce91706e154a9441d0d39cbf8366b718e awscliv2.zip" > awscliv2.txt && \
|
||||
sha256sum -c awscliv2.txt && \
|
||||
unzip -qq awscliv2.zip && \
|
||||
./aws/install && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
ARG FDB_VERSION="6.2.29"
|
||||
RUN mkdir -p /usr/lib/foundationdb/plugins && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/misc/joshua_tls_library.tar.gz | \
|
||||
tar --strip-components=1 --no-same-owner --directory /usr/lib/foundationdb/plugins -xz && \
|
||||
ln -sf /usr/lib/foundationdb/plugins/FDBGnuTLS.so /usr/lib/foundationdb/plugins/fdb-libressl-plugin.so && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/${FDB_VERSION}/linux/libfdb_c_${FDB_VERSION}.so -o /usr/lib64/libfdb_c_${FDB_VERSION}.so && \
|
||||
ln -sf /usr/lib64/libfdb_c_${FDB_VERSION}.so /usr/lib64/libfdb_c.so
|
||||
|
||||
WORKDIR /root
|
||||
RUN rm -f /root/anaconda-ks.cfg && \
|
||||
printf '%s\n' \
|
||||
'source /opt/rh/devtoolset-8/enable' \
|
||||
'source /opt/rh/rh-python36/enable' \
|
||||
'source /opt/rh/rh-ruby26/enable' \
|
||||
'' \
|
||||
'function cmk_ci() {' \
|
||||
' cmake -S ${HOME}/src/foundationdb -B ${HOME}/build_output -D USE_CCACHE=ON -D USE_WERROR=ON -D RocksDB_ROOT=/opt/rocksdb-6.10.1 -D RUN_JUNIT_TESTS=ON -D RUN_JAVA_INTEGRATION_TESTS=ON -G Ninja && \' \
|
||||
' ninja -v -C ${HOME}/build_output -j 84 all packages strip_targets' \
|
||||
'}' \
|
||||
'function cmk() {' \
|
||||
' cmake -S ${HOME}/src/foundationdb -B ${HOME}/build_output -D USE_CCACHE=ON -D USE_WERROR=ON -D RocksDB_ROOT=/opt/rocksdb-6.10.1 -D RUN_JUNIT_TESTS=ON -D RUN_JAVA_INTEGRATION_TESTS=ON -G Ninja && \' \
|
||||
' ninja -C ${HOME}/build_output -j 84' \
|
||||
'}' \
|
||||
'function ct() {' \
|
||||
' cd ${HOME}/build_output && ctest -j 32 --no-compress-output -T test --output-on-failure' \
|
||||
'}' \
|
||||
'function j() {' \
|
||||
' python3 -m joshua.joshua "${@}"' \
|
||||
'}' \
|
||||
'function jsd() {' \
|
||||
' j start --tarball $(find ${HOME}/build_output/packages -name correctness\*.tar.gz) "${@}"' \
|
||||
'}' \
|
||||
'' \
|
||||
'USER_BASHRC="$HOME/src/.bashrc.local"' \
|
||||
'if test -f "$USER_BASHRC"; then' \
|
||||
' source $USER_BASHRC' \
|
||||
'fi' \
|
||||
'' \
|
||||
>> .bashrc
|
|
@ -1,24 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos6-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
RUN useradd distcc && \
|
||||
source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
update-distcc-symlinks
|
||||
|
||||
EXPOSE 3632
|
||||
EXPOSE 3633
|
||||
USER distcc
|
||||
ENV ALLOW 0.0.0.0/0
|
||||
|
||||
ENTRYPOINT distccd \
|
||||
--daemon \
|
||||
--enable-tcp-insecure \
|
||||
--no-detach \
|
||||
--port 3632 \
|
||||
--log-stderr \
|
||||
--log-level info \
|
||||
--listen 0.0.0.0 \
|
||||
--allow ${ALLOW} \
|
||||
--jobs `nproc`
|
|
@ -1,247 +0,0 @@
|
|||
FROM centos:7
|
||||
|
||||
WORKDIR /tmp
|
||||
COPY mono-project.com.rpmkey.pgp ./
|
||||
RUN rpmkeys --import mono-project.com.rpmkey.pgp && \
|
||||
curl -Ls https://download.mono-project.com/repo/centos7-stable.repo -o /etc/yum.repos.d/mono-centos7-stable.repo && \
|
||||
yum repolist && \
|
||||
yum install -y \
|
||||
centos-release-scl-rh \
|
||||
epel-release \
|
||||
scl-utils \
|
||||
yum-utils && \
|
||||
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo && \
|
||||
yum install -y \
|
||||
autoconf \
|
||||
automake \
|
||||
binutils-devel \
|
||||
curl \
|
||||
debbuild \
|
||||
devtoolset-8 \
|
||||
devtoolset-8-libasan-devel \
|
||||
devtoolset-8-libtsan-devel \
|
||||
devtoolset-8-libubsan-devel \
|
||||
devtoolset-8-systemtap-sdt-devel \
|
||||
docker-ce \
|
||||
dos2unix \
|
||||
dpkg \
|
||||
gettext-devel \
|
||||
git \
|
||||
golang \
|
||||
java-11-openjdk-devel \
|
||||
libcurl-devel \
|
||||
libuuid-devel \
|
||||
libxslt \
|
||||
lz4 \
|
||||
lz4-devel \
|
||||
lz4-static \
|
||||
mono-devel \
|
||||
redhat-lsb-core \
|
||||
rpm-build \
|
||||
tcl-devel \
|
||||
unzip \
|
||||
wget && \
|
||||
if [ "$(uname -p)" == "aarch64" ]; then \
|
||||
yum install -y \
|
||||
rh-python38 \
|
||||
rh-python38-python-devel \
|
||||
rh-ruby27; \
|
||||
else \
|
||||
yum install -y \
|
||||
rh-python36 \
|
||||
rh-python36-python-devel \
|
||||
rh-ruby26; \
|
||||
fi && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
# build/install git
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/git/git/archive/v2.30.0.tar.gz -o git.tar.gz && \
|
||||
echo "8db4edd1a0a74ebf4b78aed3f9e25c8f2a7db3c00b1aaee94d1e9834fae24e61 git.tar.gz" > git-sha.txt && \
|
||||
sha256sum -c git-sha.txt && \
|
||||
mkdir git && \
|
||||
tar --strip-components 1 --no-same-owner --directory git -xf git.tar.gz && \
|
||||
cd git && \
|
||||
make configure && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd ../ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install ninja
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ninja-build/ninja/archive/v1.9.0.zip -o ninja.zip && \
|
||||
echo "8e2e654a418373f10c22e4cc9bdbe9baeca8527ace8d572e0b421e9d9b85b7ef ninja.zip" > ninja-sha.txt && \
|
||||
sha256sum -c ninja-sha.txt && \
|
||||
unzip ninja.zip && \
|
||||
cd ninja-1.9.0 && \
|
||||
./configure.py --bootstrap && \
|
||||
cp ninja /usr/bin && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install cmake
|
||||
RUN if [ "$(uname -p)" == "aarch64" ]; then \
|
||||
curl -Ls https://github.com/Kitware/CMake/releases/download/v3.19.6/cmake-3.19.6-Linux-aarch64.tar.gz -o cmake.tar.gz; \
|
||||
echo "69ec045c6993907a4f4a77349d0a0668f1bd3ce8bc5f6fbab6dc7a7e2ffc4f80 cmake.tar.gz" > cmake-sha.txt; \
|
||||
else \
|
||||
curl -Ls https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz -o cmake.tar.gz; \
|
||||
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 cmake.tar.gz" > cmake-sha.txt; \
|
||||
fi && \
|
||||
sha256sum -c cmake-sha.txt && \
|
||||
mkdir cmake && \
|
||||
tar --strip-components 1 --no-same-owner --directory cmake -xf cmake.tar.gz && \
|
||||
cp -r cmake/* /usr/local/ && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install LLVM
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/llvm/llvm-project/releases/download/llvmorg-11.0.0/llvm-project-11.0.0.tar.xz -o llvm.tar.xz && \
|
||||
echo "b7b639fc675fa1c86dd6d0bc32267be9eb34451748d2efd03f674b773000e92b llvm.tar.xz" > llvm-sha.txt && \
|
||||
sha256sum -c llvm-sha.txt && \
|
||||
mkdir llvm-project && \
|
||||
tar --strip-components 1 --no-same-owner --directory llvm-project -xf llvm.tar.xz && \
|
||||
mkdir -p llvm-project/build && \
|
||||
cd llvm-project/build && \
|
||||
cmake \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-G Ninja \
|
||||
-DLLVM_INCLUDE_EXAMPLES=OFF \
|
||||
-DLLVM_INCLUDE_TESTS=OFF \
|
||||
-DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;compiler-rt;libcxx;libcxxabi;libunwind;lld;lldb" \
|
||||
-DLLVM_STATIC_LINK_CXX_STDLIB=ON \
|
||||
../llvm && \
|
||||
cmake --build . && \
|
||||
cmake --build . --target install && \
|
||||
cd ../.. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install openssl
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://www.openssl.org/source/openssl-1.1.1h.tar.gz -o openssl.tar.gz && \
|
||||
echo "5c9ca8774bd7b03e5784f26ae9e9e6d749c9da2438545077e6b3d755a06595d9 openssl.tar.gz" > openssl-sha.txt && \
|
||||
sha256sum -c openssl-sha.txt && \
|
||||
mkdir openssl && \
|
||||
tar --strip-components 1 --no-same-owner --directory openssl -xf openssl.tar.gz && \
|
||||
cd openssl && \
|
||||
./config CFLAGS="-fPIC -O3" --prefix=/usr/local && \
|
||||
make -j`nproc` && \
|
||||
make -j1 install && \
|
||||
ln -sv /usr/local/lib64/lib*.so.1.1 /usr/lib64/ && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install rocksdb to /opt
|
||||
RUN curl -Ls https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocksdb.tar.gz && \
|
||||
echo "d573d2f15cdda883714f7e0bc87b814a8d4a53a82edde558f08f940e905541ee rocksdb.tar.gz" > rocksdb-sha.txt && \
|
||||
sha256sum -c rocksdb-sha.txt && \
|
||||
tar --directory /opt -xf rocksdb.tar.gz && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.67 to /opt
|
||||
RUN curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 && \
|
||||
echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt && \
|
||||
sha256sum -c boost-sha-67.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_67_0.tar.bz2 && \
|
||||
rm -rf /opt/boost_1_67_0/libs && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# install boost 1.72 to /opt
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 && \
|
||||
echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt && \
|
||||
sha256sum -c boost-sha-72.txt && \
|
||||
tar --no-same-owner --directory /opt -xjf boost_1_72_0.tar.bz2 && \
|
||||
cd /opt/boost_1_72_0 &&\
|
||||
./bootstrap.sh --with-libraries=context &&\
|
||||
./b2 link=static cxxflags=-std=c++14 --prefix=/opt/boost_1_72_0 install &&\
|
||||
rm -rf /opt/boost_1_72_0/libs && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# jemalloc (needed for FDB after 6.3)
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/jemalloc/jemalloc/releases/download/5.2.1/jemalloc-5.2.1.tar.bz2 -o jemalloc-5.2.1.tar.bz2 && \
|
||||
echo "34330e5ce276099e2e8950d9335db5a875689a4c6a56751ef3b1d8c537f887f6 jemalloc-5.2.1.tar.bz2" > jemalloc-sha.txt && \
|
||||
sha256sum -c jemalloc-sha.txt && \
|
||||
mkdir jemalloc && \
|
||||
tar --strip-components 1 --no-same-owner --no-same-permissions --directory jemalloc -xjf jemalloc-5.2.1.tar.bz2 && \
|
||||
cd jemalloc && \
|
||||
./configure --enable-static --disable-cxx && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# Install CCACHE
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ccache/ccache/releases/download/v4.0/ccache-4.0.tar.gz -o ccache.tar.gz && \
|
||||
echo "ac97af86679028ebc8555c99318352588ff50f515fc3a7f8ed21a8ad367e3d45 ccache.tar.gz" > ccache-sha256.txt && \
|
||||
sha256sum -c ccache-sha256.txt && \
|
||||
mkdir ccache &&\
|
||||
tar --strip-components 1 --no-same-owner --directory ccache -xf ccache.tar.gz && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DZSTD_FROM_INTERNET=ON ../ccache && \
|
||||
cmake --build . --target install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# build/install toml
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://github.com/ToruNiina/toml11/archive/v3.4.0.tar.gz -o toml.tar.gz && \
|
||||
echo "bc6d733efd9216af8c119d8ac64a805578c79cc82b813e4d1d880ca128bd154d toml.tar.gz" > toml-sha256.txt && \
|
||||
sha256sum -c toml-sha256.txt && \
|
||||
mkdir toml && \
|
||||
tar --strip-components 1 --no-same-owner --directory toml -xf toml.tar.gz && \
|
||||
mkdir build && \
|
||||
cd build && \
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -Dtoml11_BUILD_TEST=OFF ../toml && \
|
||||
cmake --build . --target install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# download old fdbserver binaries
|
||||
ARG FDB_VERSION="6.2.29"
|
||||
RUN mkdir -p /opt/foundationdb/old && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/misc/fdbservers-${FDB_VERSION}.tar.gz | \
|
||||
tar --no-same-owner --directory /opt/foundationdb/old -xz && \
|
||||
chmod +x /opt/foundationdb/old/* && \
|
||||
ln -sf /opt/foundationdb/old/fdbserver-${FDB_VERSION} /opt/foundationdb/old/fdbserver
|
||||
|
||||
# build/install distcc
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
if [ "$(uname -p)" == "aarch64" ]; then \
|
||||
source /opt/rh/rh-python38/enable; \
|
||||
else \
|
||||
source /opt/rh/rh-python36/enable; \
|
||||
fi && \
|
||||
curl -Ls https://github.com/distcc/distcc/archive/v3.3.5.tar.gz -o distcc.tar.gz && \
|
||||
echo "13a4b3ce49dfc853a3de550f6ccac583413946b3a2fa778ddf503a9edc8059b0 distcc.tar.gz" > distcc-sha256.txt && \
|
||||
sha256sum -c distcc-sha256.txt && \
|
||||
mkdir distcc && \
|
||||
tar --strip-components 1 --no-same-owner --directory distcc -xf distcc.tar.gz && \
|
||||
cd distcc && \
|
||||
./autogen.sh && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
# valgrind
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
curl -Ls https://sourceware.org/pub/valgrind/valgrind-3.17.0.tar.bz2 -o valgrind-3.17.0.tar.bz2 && \
|
||||
echo "ad3aec668e813e40f238995f60796d9590eee64a16dff88421430630e69285a2 valgrind-3.17.0.tar.bz2" > valgrind-sha.txt && \
|
||||
sha256sum -c valgrind-sha.txt && \
|
||||
mkdir valgrind && \
|
||||
tar --strip-components 1 --no-same-owner --no-same-permissions --directory valgrind -xjf valgrind-3.17.0.tar.bz2 && \
|
||||
cd valgrind && \
|
||||
./configure && \
|
||||
make && \
|
||||
make install && \
|
||||
cd .. && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
RUN curl -Ls https://github.com/manticoresoftware/manticoresearch/raw/master/misc/junit/ctest2junit.xsl -o /opt/ctest2junit.xsl
|
|
@ -1,40 +0,0 @@
|
|||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
Version: SKS 1.1.6
|
||||
Comment: Hostname: sks.pod01.fleetstreetops.com
|
||||
|
||||
mQENBFPfqCcBCADctOzyTxfWvf40Nlb+AMkcJyb505WSbzhWU8yPmBNAJOnbwueMsTkNMHEO
|
||||
u8fGRNxRWj5o/Db1N7EoSQtK3OgFnBef8xquUyrzA1nJ2aPfUWX+bhTG1TwyrtLaOssFRz6z
|
||||
/h/ChUIFvt2VZCw+Yx4BiKi+tvgwrHTYB/Yf2J9+R/1O6949n6veFFRBfgPOL0djhvRqXzhv
|
||||
FjJkh4xhTaGVeOnRR3+YQkblmti2n6KYl0n2kNB40ujSqpTloSfnR5tmJpz00WoOA9MJBdvH
|
||||
txTTn8l6rVzXbm4mW9ZmB1kht/BgWaNLaIisW5AZSkQKer35wOWf0G7Gw+cWHq+I7W9pABEB
|
||||
AAG0OlhhbWFyaW4gUHVibGljIEplbmtpbnMgKGF1dG8tc2lnbmluZykgPHJlbGVuZ0B4YW1h
|
||||
cmluLmNvbT6JARwEEAECAAYFAlQIhKQACgkQyQ+cuQ4frQyc1wf+MCusJK4ANLWikbgiSSx1
|
||||
qMBveBlLKLEdCxYY+B9rc/pRDw448iBdd+nuSVdbRoqLgoN8gHbClboP+i22yw+mga0KASD7
|
||||
b1mpdYB0npR3H73zbYArn3qTV8s/yUXkIAEFUtj0yoEuv8KjO8P7nZJh8OuqqAupUVN0s3Kj
|
||||
ONqXqi6Ro3fvVEZWOUFZl/FmY5KmXlpcw+YwE5CaNhJ2WunrjFTDqynRU/LeoPEKuwyYvfo9
|
||||
37zJFCrpAUMTr/9QpEKmV61H7fEHA9oHq97FBwWfjOU0l2mrXt1zJ97xVd2DXxrZodlkiY6B
|
||||
76rhaT4ZhltY1E7WB2Z9WPfTe1Y6jz4fZ4kBHAQQAQgABgUCWEyoiAAKCRABFQplW72BAn/P
|
||||
CAC0GkRBR3JTmG8WGeQMLb/o6Gon9cxpLnKv1GgFbHSM7XYMe7ySh5zxORwFuECuJ5+qcA6c
|
||||
Ve/kJAV8rewLULL9yvHK3oK7R8zoVGbFVm+lyoxiaXpkkWg21Mb8IubiO+tA/dJc7hKQSpoI
|
||||
0+dmJNaNrTVwqj0tQ8e0OL9KvBOYwFbSe06bocSNPVmKCt0EOvpGcQfzFw5UEjJVkqFn/moU
|
||||
rSxj0YsJpwRXB1pOsBaQC6r9oCgUvxPf4H77U07+ImXzxRWInVPYFSXSiBA7p+hzvsikmZEl
|
||||
iIAia8mTteUF1GeK4kafUk6iZZUfBlCIb9sV4O9Vvv8W0VjK4Vg6O2UAiQE4BBMBAgAiBQJT
|
||||
36gnAhsDBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCmoZs409gx75DoB/9h5p8u1cUS
|
||||
y6Mp2PjjW398LJZaqWwaa2W/lcLEKN7oWTC5Yf5BEuVsO9270pVln9Cv7hiqcbC8kywk+sZv
|
||||
RsYO3uoTRwsmImc/7uaK382hey1A2hvkH5fYHmY/5Z/Z0bm/A0k0chhG2ycjWjZXYLZ96I0V
|
||||
U3ZBQBHoh3qRtgWq4yWTsCJBX+FKPBdmkIpgcPXQw+hak0mj2sILqjScRZT1Oe+WJsMNMaLa
|
||||
8dSdw+pPm8NM/VGLmO9iTTDApuAsRixpCYLdJY+ThGNrKe6xDswQo8gr3gbBkJi0wLRDP2Rz
|
||||
q7rD0TC2PxOaWOZ7hmyz+EhjLcjZhHNJTaa+NV0k8YAwuQENBFPfqCcBCACtc7HssC9S3PxJ
|
||||
m1youvGfYLhm+KzMO+gIoy7R32VXIZNxrkMYzaeerqSsMwxdhEjyOscT+rJbRGZ+9iPOGeh4
|
||||
AqZlzzOuxQ/Lg5h+2mGVXe0Avb+A2zC56mLSQCL3W8NjABUZdknnc1YIf9Dz05fy4jPEttNS
|
||||
y+Rzte0ITLH1Hy/PKBrlF5n+G1/86f3L5n1ZZXmV3vi+rXT/OyEh9xRS4usmR6kVh4o2XGlI
|
||||
zUrUjhZvb4lxrHfWgzKlWFoUSydaZDk7eikTKF692RiSSpLbDLW2sNOdzT2eqv2B8CJRF5sL
|
||||
bD6BB3dAbH7KfqKiCT3xcCZhNEZw+M+GcRO/HNbnABEBAAGJAR8EGAECAAkFAlPfqCcCGwwA
|
||||
CgkQpqGbONPYMe+sNQgAwjm9PJ45t7NBNTXn1zadoQQbPqz9qAlWiII0k+zzJCTTVqgyIXJY
|
||||
I6zdNiB/Oh1Xajs/T9z9tL54+LLqgtZKa0lzDOmcxn6Iujf3a1MFdYxKgaQtT2ADxAimuBoz
|
||||
3Y1ohxXgAs2+VISWYoPBI+UWhYqg11zq3uwpFIYQBRgkVydCxefCxY19okNp9FPC7KJPpJkO
|
||||
NgDAK693Y9mOZXSq+XeGhjy3Sxesl0PYLIfV33z+vCpc2o1dDA5wuycgfqupNQITkQm6gPOH
|
||||
1jLu8Vttm4fdEtVMcqkn8dJFomo3JW3qxI7IWwjbVRg10G8LGAuBbD6CA0dGSf8PkHFYv2Xs
|
||||
dQ==
|
||||
=MWcF
|
||||
-----END PGP PUBLIC KEY BLOCK-----
|
|
@ -1,113 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos7-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
# add vscode server
|
||||
RUN yum-config-manager --add-repo=https://copr.fedorainfracloud.org/coprs/carlwgeorge/ripgrep/repo/epel-7/carlwgeorge-ripgrep-epel-7.repo && \
|
||||
yum repolist && \
|
||||
yum -y install \
|
||||
bash-completion \
|
||||
byobu \
|
||||
cgdb \
|
||||
emacs-nox \
|
||||
fish \
|
||||
jq \
|
||||
ripgrep \
|
||||
the_silver_searcher \
|
||||
tmux \
|
||||
tree \
|
||||
vim \
|
||||
zsh && \
|
||||
yum clean all && \
|
||||
rm -rf /var/cache/yum
|
||||
|
||||
WORKDIR /tmp
|
||||
RUN source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
pip3 install \
|
||||
lxml \
|
||||
psutil \
|
||||
python-dateutil \
|
||||
subprocess32 && \
|
||||
mkdir fdb-joshua && \
|
||||
cd fdb-joshua && \
|
||||
git clone https://github.com/FoundationDB/fdb-joshua . && \
|
||||
pip3 install /tmp/fdb-joshua && \
|
||||
cd /tmp && \
|
||||
curl -Ls https://amazon-eks.s3.us-west-2.amazonaws.com/1.18.9/2020-11-02/bin/linux/amd64/kubectl -o kubectl && \
|
||||
echo "3dbe69e6deb35fbd6fec95b13d20ac1527544867ae56e3dae17e8c4d638b25b9 kubectl" > kubectl.txt && \
|
||||
sha256sum -c kubectl.txt && \
|
||||
mv kubectl /usr/local/bin/kubectl && \
|
||||
chmod 755 /usr/local/bin/kubectl && \
|
||||
curl https://awscli.amazonaws.com/awscli-exe-linux-x86_64-2.0.30.zip -o "awscliv2.zip" && \
|
||||
echo "7ee475f22c1b35cc9e53affbf96a9ffce91706e154a9441d0d39cbf8366b718e awscliv2.zip" > awscliv2.txt && \
|
||||
sha256sum -c awscliv2.txt && \
|
||||
unzip -qq awscliv2.zip && \
|
||||
./aws/install && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
ARG FDB_VERSION="6.2.29"
|
||||
RUN mkdir -p /usr/lib/foundationdb/plugins && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/misc/joshua_tls_library.tar.gz | \
|
||||
tar --strip-components=1 --no-same-owner --directory /usr/lib/foundationdb/plugins -xz && \
|
||||
ln -sf /usr/lib/foundationdb/plugins/FDBGnuTLS.so /usr/lib/foundationdb/plugins/fdb-libressl-plugin.so && \
|
||||
curl -Ls https://www.foundationdb.org/downloads/${FDB_VERSION}/linux/libfdb_c_${FDB_VERSION}.so -o /usr/lib64/libfdb_c_${FDB_VERSION}.so && \
|
||||
ln -sf /usr/lib64/libfdb_c_${FDB_VERSION}.so /usr/lib64/libfdb_c.so
|
||||
|
||||
WORKDIR /root
|
||||
RUN curl -Ls https://update.code.visualstudio.com/latest/server-linux-x64/stable -o /tmp/vscode-server-linux-x64.tar.gz && \
|
||||
mkdir -p .vscode-server/bin/latest && \
|
||||
tar --strip-components 1 --no-same-owner --directory .vscode-server/bin/latest -xf /tmp/vscode-server-linux-x64.tar.gz && \
|
||||
touch .vscode-server/bin/latest/0 && \
|
||||
rm -rf /tmp/*
|
||||
RUN rm -f /root/anaconda-ks.cfg && \
|
||||
printf '%s\n' \
|
||||
'#!/usr/bin/env bash' \
|
||||
'set -Eeuo pipefail' \
|
||||
'' \
|
||||
'mkdir -p ~/.docker' \
|
||||
'cat > ~/.docker/config.json << EOF' \
|
||||
'{' \
|
||||
' "proxies":' \
|
||||
' {' \
|
||||
' "default":' \
|
||||
' {' \
|
||||
' "httpProxy": "${HTTP_PROXY}",' \
|
||||
' "httpsProxy": "${HTTPS_PROXY}",' \
|
||||
' "noProxy": "${NO_PROXY}"' \
|
||||
' }' \
|
||||
' }' \
|
||||
'}' \
|
||||
'EOF' \
|
||||
> docker_proxy.sh && \
|
||||
chmod 755 docker_proxy.sh && \
|
||||
printf '%s\n' \
|
||||
'source /opt/rh/devtoolset-8/enable' \
|
||||
'source /opt/rh/rh-python36/enable' \
|
||||
'source /opt/rh/rh-ruby26/enable' \
|
||||
'' \
|
||||
'function cmk_ci() {' \
|
||||
' cmake -S ${HOME}/src/foundationdb -B ${HOME}/build_output -D USE_CCACHE=ON -D USE_WERROR=ON -D RocksDB_ROOT=/opt/rocksdb-6.10.1 -D RUN_JUNIT_TESTS=ON -D RUN_JAVA_INTEGRATION_TESTS=ON -G Ninja && \' \
|
||||
' ninja -v -C ${HOME}/build_output -j 84 all packages strip_targets' \
|
||||
'}' \
|
||||
'function cmk() {' \
|
||||
' cmake -S ${HOME}/src/foundationdb -B ${HOME}/build_output -D USE_CCACHE=ON -D USE_WERROR=ON -D RocksDB_ROOT=/opt/rocksdb-6.10.1 -D RUN_JUNIT_TESTS=ON -D RUN_JAVA_INTEGRATION_TESTS=ON -G Ninja && \' \
|
||||
' ninja -C ${HOME}/build_output -j 84' \
|
||||
'}' \
|
||||
'function ct() {' \
|
||||
' cd ${HOME}/build_output && ctest -j 32 --no-compress-output -T test --output-on-failure' \
|
||||
'}' \
|
||||
'function j() {' \
|
||||
' python3 -m joshua.joshua "${@}"' \
|
||||
'}' \
|
||||
'function jsd() {' \
|
||||
' j start --tarball $(find ${HOME}/build_output/packages -name correctness\*.tar.gz) "${@}"' \
|
||||
'}' \
|
||||
'' \
|
||||
'USER_BASHRC="$HOME/src/.bashrc.local"' \
|
||||
'if test -f "$USER_BASHRC"; then' \
|
||||
' source $USER_BASHRC' \
|
||||
'fi' \
|
||||
'' \
|
||||
'bash ${HOME}/docker_proxy.sh' \
|
||||
>> .bashrc
|
|
@ -1,24 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos7-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
RUN useradd distcc && \
|
||||
source /opt/rh/devtoolset-8/enable && \
|
||||
source /opt/rh/rh-python36/enable && \
|
||||
update-distcc-symlinks
|
||||
|
||||
EXPOSE 3632
|
||||
EXPOSE 3633
|
||||
USER distcc
|
||||
ENV ALLOW 0.0.0.0/0
|
||||
|
||||
ENTRYPOINT distccd \
|
||||
--daemon \
|
||||
--enable-tcp-insecure \
|
||||
--no-detach \
|
||||
--port 3632 \
|
||||
--log-stderr \
|
||||
--log-level info \
|
||||
--listen 0.0.0.0 \
|
||||
--allow ${ALLOW} \
|
||||
--jobs `nproc`
|
|
@ -1,20 +0,0 @@
|
|||
ARG REPOSITORY=foundationdb/build
|
||||
ARG VERSION=centos7-latest
|
||||
FROM ${REPOSITORY}:${VERSION}
|
||||
|
||||
ENV YCSB_VERSION=ycsb-foundationdb-binding-0.17.0 \
|
||||
PATH=${PATH}:/usr/bin
|
||||
|
||||
RUN cd /opt \
|
||||
&& eval curl "-Ls https://github.com/brianfrankcooper/YCSB/releases/download/0.17.0/ycsb-foundationdb-binding-0.17.0.tar.gz" \
|
||||
| tar -xzvf -
|
||||
|
||||
RUN rm -Rf /opt/${YCSB_VERSION}/lib/fdb-java-5.2.5.jar
|
||||
|
||||
# COPY The Appropriate fdb-java-.jar Aaron from packages
|
||||
# COPY binary RPM for foundationd-db
|
||||
# Install Binary
|
||||
|
||||
WORKDIR "/opt/${YCSB_VERSION}"
|
||||
|
||||
ENTRYPOINT ["bin/ycsb.sh"]
|
|
@ -1,99 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# we first check whether the user is in the group docker
|
||||
user=$(id -un)
|
||||
DIR_UUID=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1)
|
||||
group=$(id -gn)
|
||||
uid=$(id -u)
|
||||
gid=$(id -g)
|
||||
gids=( $(id -G) )
|
||||
groups=( $(id -Gn) )
|
||||
tmpdir="/tmp/fdb-docker-${DIR_UUID}"
|
||||
image=fdb-dev
|
||||
|
||||
pushd .
|
||||
mkdir ${tmpdir}
|
||||
cd ${tmpdir}
|
||||
|
||||
echo
|
||||
|
||||
cat <<EOF >> Dockerfile
|
||||
FROM foundationdb/foundationdb-dev:0.11.1
|
||||
RUN yum install -y sudo
|
||||
RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
|
||||
RUN groupadd -g 1100 sudo
|
||||
EOF
|
||||
|
||||
num_groups=${#gids[@]}
|
||||
additional_groups="-G sudo"
|
||||
for ((i=0;i<num_groups;i++))
|
||||
do
|
||||
echo "RUN groupadd -g ${gids[$i]} ${groups[$i]} || true" >> Dockerfile
|
||||
if [ ${gids[i]} -ne ${gid} ]
|
||||
then
|
||||
additional_groups="${additional_groups},${gids[$i]}"
|
||||
fi
|
||||
done
|
||||
|
||||
cat <<EOF >> Dockerfile
|
||||
RUN useradd -u ${uid} -g ${gid} ${additional_groups} -m ${user}
|
||||
|
||||
USER ${user}
|
||||
CMD scl enable devtoolset-8 rh-python36 rh-ruby24 -- bash
|
||||
|
||||
EOF
|
||||
|
||||
echo "Created ${tmpdir}"
|
||||
echo "Buidling Docker container ${image}"
|
||||
sudo docker build -t ${image} .
|
||||
|
||||
popd
|
||||
|
||||
echo "Writing startup script"
|
||||
mkdir -p $HOME/bin
|
||||
cat <<EOF > $HOME/bin/fdb-dev
|
||||
#!/usr/bin/bash
|
||||
|
||||
if [ -d "\${CCACHE_DIR}" ]
|
||||
then
|
||||
args="-v \${CCACHE_DIR}:\${CCACHE_DIR}"
|
||||
args="\${args} -e CCACHE_DIR=\${CCACHE_DIR}"
|
||||
args="\${args} -e CCACHE_UMASK=\${CCACHE_UMASK}"
|
||||
ccache_args=\$args
|
||||
fi
|
||||
|
||||
if [ -t 1 ] ; then
|
||||
TERMINAL_ARGS=-it `# Run in interactive mode and simulate a TTY`
|
||||
else
|
||||
TERMINAL_ARGS=-i `# Run in interactive mode`
|
||||
fi
|
||||
|
||||
sudo docker run --rm `# delete (temporary) image after return` \\
|
||||
\${TERMINAL_ARGS} \\
|
||||
--privileged=true `# Run in privileged mode ` \\
|
||||
--cap-add=SYS_PTRACE \\
|
||||
--security-opt seccomp=unconfined \\
|
||||
-v "${HOME}:${HOME}" `# Mount home directory` \\
|
||||
-w="\$(pwd)" \\
|
||||
\${ccache_args} \\
|
||||
${image} "\$@"
|
||||
EOF
|
||||
|
||||
cat <<EOF > $HOME/bin/clangd
|
||||
#!/usr/bin/bash
|
||||
|
||||
fdb-dev scl enable devtoolset-8 rh-python36 rh-ruby24 -- clangd
|
||||
EOF
|
||||
|
||||
if [[ ":$PATH:" != *":$HOME/bin:"* ]]
|
||||
then
|
||||
echo "WARNING: $HOME/bin is not in your PATH!"
|
||||
echo -e "\tThis can cause problems with some scripts (like fdb-clangd)"
|
||||
fi
|
||||
chmod +x $HOME/bin/fdb-dev
|
||||
chmod +x $HOME/bin/clangd
|
||||
echo "To start the dev docker image run $HOME/bin/fdb-dev"
|
||||
echo "$HOME/bin/clangd can be used for IDE integration"
|
||||
echo "You can edit these files but be aware that this script will overwrite your changes if you rerun it"
|
|
@ -1,3 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat $1 | grep '<PackageName>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,'
|
|
@ -1,4 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
cat $1 | grep '<Version>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,'
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
import sys
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print("Usage: txt-to-toml.py [src.txt]")
|
||||
return 1
|
||||
|
||||
filename = sys.argv[1]
|
||||
|
||||
indent = " "
|
||||
in_workload = False
|
||||
first_test = False
|
||||
keys_before_test = False
|
||||
|
||||
for line in open(filename):
|
||||
k = ""
|
||||
v = ""
|
||||
|
||||
if line.strip().startswith(";"):
|
||||
print((indent if in_workload else "") + line.strip().replace(";", "#"))
|
||||
continue
|
||||
|
||||
if "=" in line:
|
||||
(k, v) = line.strip().split("=")
|
||||
(k, v) = (k.strip(), v.strip())
|
||||
|
||||
if k == "testTitle":
|
||||
first_test = True
|
||||
if in_workload:
|
||||
print("")
|
||||
in_workload = False
|
||||
if keys_before_test:
|
||||
print("")
|
||||
keys_before_test = False
|
||||
print("[[test]]")
|
||||
|
||||
if k == "testName":
|
||||
in_workload = True
|
||||
print("")
|
||||
print(indent + "[[test.workload]]")
|
||||
|
||||
if not first_test:
|
||||
keys_before_test = True
|
||||
|
||||
if v.startswith("."):
|
||||
v = "0" + v
|
||||
|
||||
if any(c.isalpha() or c in ["/", "!"] for c in v):
|
||||
if v != "true" and v != "false":
|
||||
v = "'" + v + "'"
|
||||
|
||||
if k == "buggify":
|
||||
print("buggify = " + ("true" if v == "'on'" else "false"))
|
||||
elif k:
|
||||
print((indent if in_workload else "") + k + " = " + v)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
|
@ -39,6 +39,9 @@ function(configure_testing)
|
|||
endfunction()
|
||||
|
||||
function(verify_testing)
|
||||
if(NOT ENABLE_SIMULATION_TESTS)
|
||||
return()
|
||||
endif()
|
||||
foreach(test_file IN LISTS fdb_test_files)
|
||||
message(SEND_ERROR "${test_file} found but it is not associated with a test")
|
||||
endforeach()
|
||||
|
@ -119,27 +122,30 @@ function(add_fdb_test)
|
|||
set(VALGRIND_OPTION "--use-valgrind")
|
||||
endif()
|
||||
list(TRANSFORM ADD_FDB_TEST_TEST_FILES PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/")
|
||||
add_test(NAME ${test_name}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${TestRunner}
|
||||
-n ${test_name}
|
||||
-b ${PROJECT_BINARY_DIR}
|
||||
-t ${test_type}
|
||||
-O ${OLD_FDBSERVER_BINARY}
|
||||
--crash
|
||||
--aggregate-traces ${TEST_AGGREGATE_TRACES}
|
||||
--log-format ${TEST_LOG_FORMAT}
|
||||
--keep-logs ${TEST_KEEP_LOGS}
|
||||
--keep-simdirs ${TEST_KEEP_SIMDIR}
|
||||
--seed ${SEED}
|
||||
--test-number ${assigned_id}
|
||||
${BUGGIFY_OPTION}
|
||||
${VALGRIND_OPTION}
|
||||
${ADD_FDB_TEST_TEST_FILES}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
|
||||
get_filename_component(test_dir_full ${first_file} DIRECTORY)
|
||||
if(NOT ${test_dir_full} STREQUAL "")
|
||||
get_filename_component(test_dir ${test_dir_full} NAME)
|
||||
set_tests_properties(${test_name} PROPERTIES TIMEOUT ${this_test_timeout} LABELS "${test_dir}")
|
||||
if (ENABLE_SIMULATION_TESTS)
|
||||
add_test(NAME ${test_name}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${TestRunner}
|
||||
-n ${test_name}
|
||||
-b ${PROJECT_BINARY_DIR}
|
||||
-t ${test_type}
|
||||
-O ${OLD_FDBSERVER_BINARY}
|
||||
--crash
|
||||
--aggregate-traces ${TEST_AGGREGATE_TRACES}
|
||||
--log-format ${TEST_LOG_FORMAT}
|
||||
--keep-logs ${TEST_KEEP_LOGS}
|
||||
--keep-simdirs ${TEST_KEEP_SIMDIR}
|
||||
--seed ${SEED}
|
||||
--test-number ${assigned_id}
|
||||
${BUGGIFY_OPTION}
|
||||
${VALGRIND_OPTION}
|
||||
${ADD_FDB_TEST_TEST_FILES}
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR})
|
||||
set_tests_properties("${test_name}" PROPERTIES ENVIRONMENT UBSAN_OPTIONS=print_stacktrace=1:halt_on_error=1)
|
||||
get_filename_component(test_dir_full ${first_file} DIRECTORY)
|
||||
if(NOT ${test_dir_full} STREQUAL "")
|
||||
get_filename_component(test_dir ${test_dir_full} NAME)
|
||||
set_tests_properties(${test_name} PROPERTIES TIMEOUT ${this_test_timeout} LABELS "${test_dir}")
|
||||
endif()
|
||||
endif()
|
||||
# set variables used for generating test packages
|
||||
set(TEST_NAMES ${TEST_NAMES} ${test_name} PARENT_SCOPE)
|
||||
|
@ -261,6 +267,14 @@ function(create_correctness_package)
|
|||
)
|
||||
add_custom_target(package_tests ALL DEPENDS ${tar_file})
|
||||
add_dependencies(package_tests strip_only_fdbserver TestHarness)
|
||||
set(unversioned_tar_file "${CMAKE_BINARY_DIR}/packages/correctness.tar.gz")
|
||||
add_custom_command(
|
||||
OUTPUT "${unversioned_tar_file}"
|
||||
DEPENDS "${tar_file}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${tar_file}" "${unversioned_tar_file}"
|
||||
COMMENT "Copy correctness package to ${unversioned_tar_file}")
|
||||
add_custom_target(package_tests_u DEPENDS "${unversioned_tar_file}")
|
||||
add_dependencies(package_tests_u package_tests)
|
||||
endfunction()
|
||||
|
||||
function(create_valgrind_correctness_package)
|
||||
|
@ -288,6 +302,14 @@ function(create_valgrind_correctness_package)
|
|||
)
|
||||
add_custom_target(package_valgrind_tests ALL DEPENDS ${tar_file})
|
||||
add_dependencies(package_valgrind_tests strip_only_fdbserver TestHarness)
|
||||
set(unversioned_tar_file "${CMAKE_BINARY_DIR}/packages/valgrind.tar.gz")
|
||||
add_custom_command(
|
||||
OUTPUT "${unversioned_tar_file}"
|
||||
DEPENDS "${tar_file}"
|
||||
COMMAND ${CMAKE_COMMAND} -E copy "${tar_file}" "${unversioned_tar_file}"
|
||||
COMMENT "Copy valgrind package to ${unversioned_tar_file}")
|
||||
add_custom_target(package_valgrind_tests_u DEPENDS "${unversioned_tar_file}")
|
||||
add_dependencies(package_valgrind_tests_u package_valgrind_tests)
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
|
@ -378,9 +400,10 @@ function(package_bindingtester)
|
|||
add_dependencies(bindingtester copy_bindingtester_binaries)
|
||||
endfunction()
|
||||
|
||||
# Creates a single cluster before running the specified command (usually a ctest test)
|
||||
function(add_fdbclient_test)
|
||||
set(options DISABLED ENABLED)
|
||||
set(oneValueArgs NAME)
|
||||
set(oneValueArgs NAME PROCESS_NUMBER TEST_TIMEOUT)
|
||||
set(multiValueArgs COMMAND)
|
||||
cmake_parse_arguments(T "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
if(OPEN_FOR_IDE)
|
||||
|
@ -396,12 +419,57 @@ function(add_fdbclient_test)
|
|||
message(FATAL_ERROR "COMMAND is a required argument for add_fdbclient_test")
|
||||
endif()
|
||||
message(STATUS "Adding Client test ${T_NAME}")
|
||||
add_test(NAME "${T_NAME}"
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/tmp_cluster.py
|
||||
if (T_PROCESS_NUMBER)
|
||||
add_test(NAME "${T_NAME}"
|
||||
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tests/TestRunner/tmp_cluster.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--process-number ${T_PROCESS_NUMBER}
|
||||
--
|
||||
${T_COMMAND})
|
||||
else()
|
||||
add_test(NAME "${T_NAME}"
|
||||
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tests/TestRunner/tmp_cluster.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--
|
||||
${T_COMMAND})
|
||||
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 60)
|
||||
endif()
|
||||
if (T_TEST_TIMEOUT)
|
||||
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT ${T_TEST_TIMEOUT})
|
||||
else()
|
||||
# default timeout
|
||||
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 60)
|
||||
endif()
|
||||
set_tests_properties("${T_NAME}" PROPERTIES ENVIRONMENT UBSAN_OPTIONS=print_stacktrace=1:halt_on_error=1)
|
||||
endfunction()
|
||||
|
||||
# Creates 3 distinct clusters before running the specified command.
|
||||
# This is useful for testing features that require multiple clusters (like the
|
||||
# multi-cluster FDB client)
|
||||
function(add_multi_fdbclient_test)
|
||||
set(options DISABLED ENABLED)
|
||||
set(oneValueArgs NAME)
|
||||
set(multiValueArgs COMMAND)
|
||||
cmake_parse_arguments(T "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
|
||||
if(OPEN_FOR_IDE)
|
||||
return()
|
||||
endif()
|
||||
if(NOT T_ENABLED AND T_DISABLED)
|
||||
return()
|
||||
endif()
|
||||
if(NOT T_NAME)
|
||||
message(FATAL_ERROR "NAME is a required argument for add_multi_fdbclient_test")
|
||||
endif()
|
||||
if(NOT T_COMMAND)
|
||||
message(FATAL_ERROR "COMMAND is a required argument for add_multi_fdbclient_test")
|
||||
endif()
|
||||
message(STATUS "Adding Client test ${T_NAME}")
|
||||
add_test(NAME "${T_NAME}"
|
||||
COMMAND ${Python_EXECUTABLE} ${CMAKE_SOURCE_DIR}/tests/TestRunner/tmp_multi_cluster.py
|
||||
--build-dir ${CMAKE_BINARY_DIR}
|
||||
--clusters 3
|
||||
--
|
||||
${T_COMMAND})
|
||||
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 60)
|
||||
endfunction()
|
||||
|
||||
function(add_java_test)
|
||||
|
|
|
@ -1,61 +1,73 @@
|
|||
function(compile_boost)
|
||||
|
||||
# Initialize function incoming parameters
|
||||
set(options)
|
||||
set(oneValueArgs TARGET)
|
||||
set(multiValueArgs BUILD_ARGS CXXFLAGS LDFLAGS)
|
||||
cmake_parse_arguments(MY "${options}" "${oneValueArgs}"
|
||||
cmake_parse_arguments(COMPILE_BOOST "${options}" "${oneValueArgs}"
|
||||
"${multiValueArgs}" ${ARGN} )
|
||||
# Configure the boost toolset to use
|
||||
set(BOOTSTRAP_ARGS "--with-libraries=context")
|
||||
set(B2_COMMAND "./b2")
|
||||
set(BOOST_COMPILER_FLAGS -fvisibility=hidden -fPIC -std=c++14 -w)
|
||||
|
||||
# Configure bootstrap command
|
||||
set(BOOTSTRAP_COMMAND "./bootstrap.sh")
|
||||
set(BOOTSTRAP_LIBRARIES "context")
|
||||
|
||||
set(BOOST_CXX_COMPILER "${CMAKE_CXX_COMPILER}")
|
||||
if(APPLE)
|
||||
set(BOOST_TOOLSET "clang-darwin")
|
||||
# this is to fix a weird macOS issue -- by default
|
||||
# cmake would otherwise pass a compiler that can't
|
||||
# compile boost
|
||||
set(BOOST_CXX_COMPILER "/usr/bin/clang++")
|
||||
elseif(CLANG)
|
||||
if(CLANG)
|
||||
set(BOOST_TOOLSET "clang")
|
||||
list(APPEND BOOTSTRAP_ARGS "${BOOTSTRAP_COMMAND} --with-toolset=clang")
|
||||
if(APPLE)
|
||||
# this is to fix a weird macOS issue -- by default
|
||||
# cmake would otherwise pass a compiler that can't
|
||||
# compile boost
|
||||
set(BOOST_CXX_COMPILER "/usr/bin/clang++")
|
||||
endif()
|
||||
else()
|
||||
set(BOOST_TOOLSET "gcc")
|
||||
endif()
|
||||
if(APPLE OR USE_LIBCXX)
|
||||
list(APPEND BOOST_COMPILER_FLAGS -stdlib=libc++)
|
||||
endif()
|
||||
set(BOOST_ADDITIONAL_COMPILE_OPTIOINS "")
|
||||
foreach(flag IN LISTS BOOST_COMPILER_FLAGS MY_CXXFLAGS)
|
||||
string(APPEND BOOST_ADDITIONAL_COMPILE_OPTIOINS "<cxxflags>${flag} ")
|
||||
endforeach()
|
||||
foreach(flag IN LISTS MY_LDFLAGS)
|
||||
string(APPEND BOOST_ADDITIONAL_COMPILE_OPTIOINS "<linkflags>${flag} ")
|
||||
endforeach()
|
||||
configure_file(${CMAKE_SOURCE_DIR}/cmake/user-config.jam.cmake ${CMAKE_BINARY_DIR}/user-config.jam)
|
||||
message(STATUS "Use ${BOOST_TOOLSET} to build boost")
|
||||
|
||||
# Configure b2 command
|
||||
set(B2_COMMAND "./b2")
|
||||
set(BOOST_COMPILER_FLAGS -fvisibility=hidden -fPIC -std=c++17 -w)
|
||||
set(BOOST_LINK_FLAGS "")
|
||||
if(APPLE OR CLANG OR USE_LIBCXX)
|
||||
list(APPEND BOOST_COMPILER_FLAGS -stdlib=libc++ -nostdlib++)
|
||||
list(APPEND BOOST_LINK_FLAGS -static-libgcc -lc++ -lc++abi)
|
||||
endif()
|
||||
|
||||
# Update the user-config.jam
|
||||
set(BOOST_ADDITIONAL_COMPILE_OPTIOINS "")
|
||||
foreach(flag IN LISTS BOOST_COMPILER_FLAGS COMPILE_BOOST_CXXFLAGS)
|
||||
string(APPEND BOOST_ADDITIONAL_COMPILE_OPTIONS "<cxxflags>${flag} ")
|
||||
endforeach()
|
||||
#foreach(flag IN LISTS BOOST_LINK_FLAGS COMPILE_BOOST_LDFLAGS)
|
||||
# string(APPEND BOOST_ADDITIONAL_COMPILE_OPTIONS "<linkflags>${flag} ")
|
||||
#endforeach()
|
||||
configure_file(${CMAKE_SOURCE_DIR}/cmake/user-config.jam.cmake ${CMAKE_BINARY_DIR}/user-config.jam)
|
||||
set(USER_CONFIG_FLAG --user-config=${CMAKE_BINARY_DIR}/user-config.jam)
|
||||
|
||||
# Build boost
|
||||
include(ExternalProject)
|
||||
set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install")
|
||||
ExternalProject_add("${MY_TARGET}Project"
|
||||
ExternalProject_add("${COMPILE_BOOST_TARGET}Project"
|
||||
URL "https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2"
|
||||
URL_HASH SHA256=59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722
|
||||
CONFIGURE_COMMAND ./bootstrap.sh ${BOOTSTRAP_ARGS}
|
||||
BUILD_COMMAND ${B2_COMMAND} link=static ${MY_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install
|
||||
CONFIGURE_COMMAND ${BOOTSTRAP_COMMAND} ${BOOTSTRAP_ARGS} --with-libraries=${BOOTSTRAP_LIBRARIES} --with-toolset=${BOOST_TOOLSET}
|
||||
BUILD_COMMAND ${B2_COMMAND} link=static ${COMPILE_BOOST_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install
|
||||
BUILD_IN_SOURCE ON
|
||||
INSTALL_COMMAND ""
|
||||
UPDATE_COMMAND ""
|
||||
BUILD_BYPRODUCTS "${BOOST_INSTALL_DIR}/boost/config.hpp"
|
||||
"${BOOST_INSTALL_DIR}/lib/libboost_context.a")
|
||||
|
||||
add_library(${MY_TARGET}_context STATIC IMPORTED)
|
||||
add_dependencies(${MY_TARGET}_context ${MY_TARGET}Project)
|
||||
set_target_properties(${MY_TARGET}_context PROPERTIES IMPORTED_LOCATION "${BOOST_INSTALL_DIR}/lib/libboost_context.a")
|
||||
add_library(${COMPILE_BOOST_TARGET}_context STATIC IMPORTED)
|
||||
add_dependencies(${COMPILE_BOOST_TARGET}_context ${COMPILE_BOOST_TARGET}Project)
|
||||
set_target_properties(${COMPILE_BOOST_TARGET}_context PROPERTIES IMPORTED_LOCATION "${BOOST_INSTALL_DIR}/lib/libboost_context.a")
|
||||
|
||||
add_library(${MY_TARGET} INTERFACE)
|
||||
target_include_directories(${MY_TARGET} SYSTEM INTERFACE ${BOOST_INSTALL_DIR}/include)
|
||||
target_link_libraries(${MY_TARGET} INTERFACE ${MY_TARGET}_context)
|
||||
endfunction()
|
||||
add_library(${COMPILE_BOOST_TARGET} INTERFACE)
|
||||
target_include_directories(${COMPILE_BOOST_TARGET} SYSTEM INTERFACE ${BOOST_INSTALL_DIR}/include)
|
||||
target_link_libraries(${COMPILE_BOOST_TARGET} INTERFACE ${COMPILE_BOOST_TARGET}_context)
|
||||
|
||||
endfunction(compile_boost)
|
||||
|
||||
if(USE_SANITIZER)
|
||||
if(WIN32)
|
||||
|
@ -72,10 +84,20 @@ if(USE_SANITIZER)
|
|||
return()
|
||||
endif()
|
||||
|
||||
list(APPEND CMAKE_PREFIX_PATH /opt/boost_1_72_0)
|
||||
# since boost 1.72 boost installs cmake configs. We will enforce config mode
|
||||
set(Boost_USE_STATIC_LIBS ON)
|
||||
set(BOOST_HINT_PATHS /opt/boost_1_72_0)
|
||||
|
||||
# Clang and Gcc will have different name mangling to std::call_once, etc.
|
||||
if (UNIX AND CMAKE_CXX_COMPILER_ID MATCHES "Clang$")
|
||||
list(APPEND CMAKE_PREFIX_PATH /opt/boost_1_72_0_clang)
|
||||
set(BOOST_HINT_PATHS /opt/boost_1_72_0_clang)
|
||||
message(STATUS "Using Clang version of boost::context")
|
||||
else ()
|
||||
list(APPEND CMAKE_PREFIX_PATH /opt/boost_1_72_0)
|
||||
set(BOOST_HINT_PATHS /opt/boost_1_72_0)
|
||||
message(STATUS "Using g++ version of boost::context")
|
||||
endif ()
|
||||
|
||||
if(BOOST_ROOT)
|
||||
list(APPEND BOOST_HINT_PATHS ${BOOST_ROOT})
|
||||
endif()
|
||||
|
|
|
@ -36,8 +36,8 @@ if (RocksDB_FOUND)
|
|||
${BINARY_DIR}/librocksdb.a)
|
||||
else()
|
||||
ExternalProject_Add(rocksdb
|
||||
URL https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz
|
||||
URL_HASH SHA256=d573d2f15cdda883714f7e0bc87b814a8d4a53a82edde558f08f940e905541ee
|
||||
URL https://github.com/facebook/rocksdb/archive/v6.22.1.tar.gz
|
||||
URL_HASH SHA256=2df8f34a44eda182e22cf84dee7a14f17f55d305ff79c06fb3cd1e5f8831e00d
|
||||
CMAKE_ARGS -DUSE_RTTI=1 -DPORTABLE=${PORTABLE_ROCKSDB}
|
||||
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
|
|
|
@ -100,8 +100,7 @@ if(WIN32)
|
|||
endif()
|
||||
add_compile_options(/W0 /EHsc /bigobj $<$<CONFIG:Release>:/Zi> /MP /FC /Gm-)
|
||||
add_compile_definitions(NOMINMAX)
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd")
|
||||
set(CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
|
||||
else()
|
||||
set(GCC NO)
|
||||
set(CLANG NO)
|
||||
|
@ -262,10 +261,6 @@ else()
|
|||
|
||||
if (CLANG)
|
||||
add_compile_options()
|
||||
# Clang has link errors unless `atomic` is specifically requested.
|
||||
if(NOT APPLE)
|
||||
#add_link_options(-latomic)
|
||||
endif()
|
||||
if (APPLE OR USE_LIBCXX)
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-stdlib=libc++>)
|
||||
if (NOT APPLE)
|
||||
|
@ -286,25 +281,20 @@ else()
|
|||
-Wpessimizing-move
|
||||
-Woverloaded-virtual
|
||||
-Wshift-sign-overflow
|
||||
# Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 10
|
||||
# Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 11
|
||||
-Wno-comment
|
||||
-Wno-dangling-else
|
||||
-Wno-delete-non-virtual-dtor
|
||||
-Wno-format
|
||||
-Wno-mismatched-tags
|
||||
-Wno-missing-field-initializers
|
||||
-Wno-reorder
|
||||
-Wno-reorder-ctor
|
||||
-Wno-sign-compare
|
||||
-Wno-tautological-pointer-compare
|
||||
-Wno-undefined-var-template
|
||||
-Wno-tautological-pointer-compare
|
||||
-Wno-unknown-pragmas
|
||||
-Wno-unknown-warning-option
|
||||
-Wno-unused-function
|
||||
-Wno-unused-local-typedef
|
||||
-Wno-unused-parameter
|
||||
-Wno-self-assign
|
||||
)
|
||||
if (USE_CCACHE)
|
||||
add_compile_options(
|
||||
|
|
|
@ -1 +1 @@
|
|||
using @BOOST_TOOLSET@ : : @BOOST_CXX_COMPILER@ : @BOOST_ADDITIONAL_COMPILE_OPTIOINS@ ;
|
||||
using @BOOST_TOOLSET@ : : @BOOST_CXX_COMPILER@ : @BOOST_ADDITIONAL_COMPILE_OPTIONS@ ;
|
||||
|
|
|
@ -144,7 +144,9 @@ namespace SummarizeTest
|
|||
string oldBinaryFolder = (args.Length > 1) ? args[1] : Path.Combine("/opt", "joshua", "global_data", "oldBinaries");
|
||||
bool useValgrind = args.Length > 2 && args[2].ToLower() == "true";
|
||||
int maxTries = (args.Length > 3) ? int.Parse(args[3]) : 3;
|
||||
return Run(Path.Combine("bin", BINARY), "", "tests", "summary.xml", "error.xml", "tmp", oldBinaryFolder, useValgrind, maxTries, true, Path.Combine("/app", "deploy", "runtime", ".tls_5_1", PLUGIN));
|
||||
bool buggifyEnabled = (args.Length > 4) ? bool.Parse(args[4]) : true;
|
||||
bool faultInjectionEnabled = (args.Length > 5) ? bool.Parse(args[5]) : true;
|
||||
return Run(Path.Combine("bin", BINARY), "", "tests", "summary.xml", "error.xml", "tmp", oldBinaryFolder, useValgrind, maxTries, true, Path.Combine("/app", "deploy", "runtime", ".tls_5_1", PLUGIN), buggifyEnabled, faultInjectionEnabled);
|
||||
}
|
||||
catch(Exception e)
|
||||
{
|
||||
|
@ -240,10 +242,10 @@ namespace SummarizeTest
|
|||
}
|
||||
}
|
||||
|
||||
static int Run(string fdbserverName, string tlsPluginFile, string testFolder, string summaryFileName, string errorFileName, string runDir, string oldBinaryFolder, bool useValgrind, int maxTries, bool traceToStdout = false, string tlsPluginFile_5_1 = "")
|
||||
static int Run(string fdbserverName, string tlsPluginFile, string testFolder, string summaryFileName, string errorFileName, string runDir, string oldBinaryFolder, bool useValgrind, int maxTries, bool traceToStdout = false, string tlsPluginFile_5_1 = "", bool buggifyEnabled = true, bool faultInjectionEnabled = true)
|
||||
{
|
||||
int seed = random.Next(1000000000);
|
||||
bool buggify = random.NextDouble() < buggifyOnRatio;
|
||||
bool buggify = buggifyEnabled ? (random.NextDouble() < buggifyOnRatio) : false;
|
||||
string testFile = null;
|
||||
string testDir = "";
|
||||
string oldServerName = "";
|
||||
|
@ -353,11 +355,11 @@ namespace SummarizeTest
|
|||
bool useNewPlugin = (oldServerName == fdbserverName) || versionGreaterThanOrEqual(oldServerName.Split('-').Last(), "5.2.0");
|
||||
bool useToml = File.Exists(testFile + "-1.toml");
|
||||
string testFile1 = useToml ? testFile + "-1.toml" : testFile + "-1.txt";
|
||||
result = RunTest(firstServerName, useNewPlugin ? tlsPluginFile : tlsPluginFile_5_1, summaryFileName, errorFileName, seed, buggify, testFile1, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, false, true, oldServerName, traceToStdout, noSim);
|
||||
result = RunTest(firstServerName, useNewPlugin ? tlsPluginFile : tlsPluginFile_5_1, summaryFileName, errorFileName, seed, buggify, testFile1, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, false, true, oldServerName, traceToStdout, noSim, faultInjectionEnabled);
|
||||
if (result == 0)
|
||||
{
|
||||
string testFile2 = useToml ? testFile + "-2.toml" : testFile + "-2.txt";
|
||||
result = RunTest(secondServerName, tlsPluginFile, summaryFileName, errorFileName, seed+1, buggify, testFile2, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, true, false, oldServerName, traceToStdout, noSim);
|
||||
result = RunTest(secondServerName, tlsPluginFile, summaryFileName, errorFileName, seed+1, buggify, testFile2, runDir, uid, expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, true, false, oldServerName, traceToStdout, noSim, faultInjectionEnabled);
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -365,13 +367,13 @@ namespace SummarizeTest
|
|||
int expectedUnseed = -1;
|
||||
if (!useValgrind && unseedCheck)
|
||||
{
|
||||
result = RunTest(fdbserverName, tlsPluginFile, null, null, seed, buggify, testFile, runDir, Guid.NewGuid().ToString(), -1, out expectedUnseed, out retryableError, logOnRetryableError, false, false, false, "", traceToStdout, noSim);
|
||||
result = RunTest(fdbserverName, tlsPluginFile, null, null, seed, buggify, testFile, runDir, Guid.NewGuid().ToString(), -1, out expectedUnseed, out retryableError, logOnRetryableError, false, false, false, "", traceToStdout, noSim, faultInjectionEnabled);
|
||||
}
|
||||
|
||||
if (!retryableError)
|
||||
{
|
||||
int unseed;
|
||||
result = RunTest(fdbserverName, tlsPluginFile, summaryFileName, errorFileName, seed, buggify, testFile, runDir, Guid.NewGuid().ToString(), expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, false, false, "", traceToStdout, noSim);
|
||||
result = RunTest(fdbserverName, tlsPluginFile, summaryFileName, errorFileName, seed, buggify, testFile, runDir, Guid.NewGuid().ToString(), expectedUnseed, out unseed, out retryableError, logOnRetryableError, useValgrind, false, false, "", traceToStdout, noSim, faultInjectionEnabled);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -386,7 +388,7 @@ namespace SummarizeTest
|
|||
|
||||
private static int RunTest(string fdbserverName, string tlsPluginFile, string summaryFileName, string errorFileName, int seed,
|
||||
bool buggify, string testFile, string runDir, string uid, int expectedUnseed, out int unseed, out bool retryableError, bool logOnRetryableError, bool useValgrind, bool restarting = false,
|
||||
bool willRestart = false, string oldBinaryName = "", bool traceToStdout = false, bool noSim = false)
|
||||
bool willRestart = false, string oldBinaryName = "", bool traceToStdout = false, bool noSim = false, bool faultInjectionEnabled = true)
|
||||
{
|
||||
unseed = -1;
|
||||
|
||||
|
@ -407,7 +409,7 @@ namespace SummarizeTest
|
|||
Directory.CreateDirectory(tempPath);
|
||||
Directory.SetCurrentDirectory(tempPath);
|
||||
|
||||
if (!restarting) LogTestPlan(summaryFileName, testFile, seed, buggify, expectedUnseed != -1, uid, oldBinaryName);
|
||||
if (!restarting) LogTestPlan(summaryFileName, testFile, seed, buggify, expectedUnseed != -1, uid, faultInjectionEnabled, oldBinaryName);
|
||||
|
||||
string valgrindOutputFile = null;
|
||||
using (var process = new System.Diagnostics.Process())
|
||||
|
@ -422,15 +424,16 @@ namespace SummarizeTest
|
|||
process.StartInfo.RedirectStandardOutput = true;
|
||||
string role = (noSim) ? "test" : "simulation";
|
||||
var args = "";
|
||||
string faultInjectionArg = string.IsNullOrEmpty(oldBinaryName) ? string.Format("-fi {0}", faultInjectionEnabled ? "on" : "off") : "";
|
||||
if (willRestart && oldBinaryName.EndsWith("alpha6"))
|
||||
{
|
||||
args = string.Format("-Rs 1000000000 -r {0} {1} -s {2} -f \"{3}\" -b {4} {5} --crash",
|
||||
role, IsRunningOnMono() ? "" : "-q", seed, testFile, buggify ? "on" : "off", tlsPluginArg);
|
||||
args = string.Format("-Rs 1000000000 -r {0} {1} -s {2} -f \"{3}\" -b {4} {5} {6} --crash",
|
||||
role, IsRunningOnMono() ? "" : "-q", seed, testFile, buggify ? "on" : "off", faultInjectionArg, tlsPluginArg);
|
||||
}
|
||||
else
|
||||
{
|
||||
args = string.Format("-Rs 1GB -r {0} {1} -s {2} -f \"{3}\" -b {4} {5} --crash",
|
||||
role, IsRunningOnMono() ? "" : "-q", seed, testFile, buggify ? "on" : "off", tlsPluginArg);
|
||||
args = string.Format("-Rs 1GB -r {0} {1} -s {2} -f \"{3}\" -b {4} {5} {6} --crash",
|
||||
role, IsRunningOnMono() ? "" : "-q", seed, testFile, buggify ? "on" : "off", faultInjectionArg, tlsPluginArg);
|
||||
}
|
||||
if (restarting) args = args + " --restarting";
|
||||
if (useValgrind && !willRestart)
|
||||
|
@ -524,7 +527,7 @@ namespace SummarizeTest
|
|||
var xout = new XElement("UnableToKillProcess",
|
||||
new XAttribute("Severity", (int)Magnesium.Severity.SevWarnAlways));
|
||||
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName);
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName, faultInjectionEnabled);
|
||||
return 104;
|
||||
}
|
||||
}
|
||||
|
@ -536,7 +539,8 @@ namespace SummarizeTest
|
|||
consoleThread.Join();
|
||||
|
||||
var traceFiles = Directory.GetFiles(tempPath, "trace*.*").Where(s => s.EndsWith(".xml") || s.EndsWith(".json")).ToArray();
|
||||
if (traceFiles.Length == 0)
|
||||
// if no traces caused by the process failed then the result will include its stderr
|
||||
if (process.ExitCode == 0 && traceFiles.Length == 0)
|
||||
{
|
||||
if (!traceToStdout)
|
||||
{
|
||||
|
@ -548,7 +552,7 @@ namespace SummarizeTest
|
|||
new XAttribute("Plugin", tlsPluginFile),
|
||||
new XAttribute("MachineName", System.Environment.MachineName));
|
||||
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName);
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName, faultInjectionEnabled);
|
||||
ok = useValgrind ? 0 : 103;
|
||||
}
|
||||
else
|
||||
|
@ -587,7 +591,7 @@ namespace SummarizeTest
|
|||
new XAttribute("Severity", (int)Magnesium.Severity.SevError),
|
||||
new XAttribute("ErrorMessage", e.Message));
|
||||
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName);
|
||||
AppendXmlMessageToSummary(summaryFileName, xout, traceToStdout, testFile, seed, buggify, expectedUnseed != -1, oldBinaryName, faultInjectionEnabled);
|
||||
return 101;
|
||||
}
|
||||
finally
|
||||
|
@ -637,6 +641,15 @@ namespace SummarizeTest
|
|||
{
|
||||
if(!String.IsNullOrEmpty(errLine.Data))
|
||||
{
|
||||
if (errLine.Data.EndsWith("WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!")) {
|
||||
// When running ASAN we expect to see this message. Boost coroutine should be using the correct asan annotations so that it shouldn't produce any false positives.
|
||||
return;
|
||||
}
|
||||
if (errLine.Data.EndsWith("Warning: unimplemented fcntl command: 1036")) {
|
||||
// Valgrind produces this warning when F_SET_RW_HINT is used
|
||||
return;
|
||||
}
|
||||
|
||||
hasError = true;
|
||||
if(Errors.Count < maxErrors) {
|
||||
if(errLine.Data.Length > maxErrorLength) {
|
||||
|
@ -694,13 +707,14 @@ namespace SummarizeTest
|
|||
}
|
||||
}
|
||||
|
||||
static void LogTestPlan(string summaryFileName, string testFileName, int randomSeed, bool buggify, bool testDeterminism, string uid, string oldBinary="")
|
||||
static void LogTestPlan(string summaryFileName, string testFileName, int randomSeed, bool buggify, bool testDeterminism, string uid, bool faultInjectionEnabled, string oldBinary="")
|
||||
{
|
||||
var xout = new XElement("TestPlan",
|
||||
new XAttribute("TestUID", uid),
|
||||
new XAttribute("RandomSeed", randomSeed),
|
||||
new XAttribute("TestFile", testFileName),
|
||||
new XAttribute("BuggifyEnabled", buggify ? "1" : "0"),
|
||||
new XAttribute("FaultInjectionEnabled", faultInjectionEnabled ? "1" : "0"),
|
||||
new XAttribute("DeterminismCheck", testDeterminism ? "1" : "0"),
|
||||
new XAttribute("OldBinary", Path.GetFileName(oldBinary)));
|
||||
AppendToSummary(summaryFileName, xout);
|
||||
|
@ -790,6 +804,8 @@ namespace SummarizeTest
|
|||
new XAttribute("DeterminismCheck", expectedUnseed != -1 ? "1" : "0"),
|
||||
new XAttribute("OldBinary", Path.GetFileName(oldBinaryName)));
|
||||
testBeginFound = true;
|
||||
if (ev.DDetails.ContainsKey("FaultInjectionEnabled"))
|
||||
xout.Add(new XAttribute("FaultInjectionEnabled", ev.Details.FaultInjectionEnabled));
|
||||
}
|
||||
if (ev.Type == "Simulation")
|
||||
{
|
||||
|
@ -961,10 +977,6 @@ namespace SummarizeTest
|
|||
int stderrBytes = 0;
|
||||
foreach (string err in outputErrors)
|
||||
{
|
||||
if (err.EndsWith("WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!")) {
|
||||
// When running ASAN we expect to see this message. Boost coroutine should be using the correct asan annotations so that it shouldn't produce any false positives.
|
||||
continue;
|
||||
}
|
||||
if (stderrSeverity == (int)Magnesium.Severity.SevError)
|
||||
{
|
||||
error = true;
|
||||
|
@ -1229,7 +1241,7 @@ namespace SummarizeTest
|
|||
}
|
||||
|
||||
private static void AppendXmlMessageToSummary(string summaryFileName, XElement xout, bool traceToStdout = false, string testFile = null,
|
||||
int? seed = null, bool? buggify = null, bool? determinismCheck = null, string oldBinaryName = null)
|
||||
int? seed = null, bool? buggify = null, bool? determinismCheck = null, string oldBinaryName = null, bool? faultInjectionEnabled = null)
|
||||
{
|
||||
var test = new XElement("Test", xout);
|
||||
if(testFile != null)
|
||||
|
@ -1238,6 +1250,8 @@ namespace SummarizeTest
|
|||
test.Add(new XAttribute("RandomSeed", seed));
|
||||
if(buggify != null)
|
||||
test.Add(new XAttribute("BuggifyEnabled", buggify.Value ? "1" : "0"));
|
||||
if(faultInjectionEnabled != null)
|
||||
test.Add(new XAttribute("FaultInjectionEnabled", faultInjectionEnabled.Value ? "1" : "0"));
|
||||
if(determinismCheck != null)
|
||||
test.Add(new XAttribute("DeterminismCheck", determinismCheck.Value ? "1" : "0"));
|
||||
if(oldBinaryName != null)
|
||||
|
|
|
@ -352,7 +352,7 @@ API for random reads to the DiskQueue. That ability is now required for
|
|||
peeking, and thus, `IDiskQueue`'s API has been enhanced correspondingly:
|
||||
|
||||
``` CPP
|
||||
enum class CheckHashes { NO, YES };
|
||||
BOOLEAN_PARAM(CheckHashes);
|
||||
|
||||
class IDiskQueue {
|
||||
// ...
|
||||
|
@ -369,9 +369,9 @@ and not `(start, length)`.
|
|||
Spilled data, when using spill-by-value, was resistant to bitrot via data being
|
||||
checksummed interally within SQLite's B-tree. Now that reads can be done
|
||||
directly, the responsibility for verifying data integrity falls upon the
|
||||
DiskQueue. `CheckHashes::YES` will cause the DiskQueue to use the checksum in
|
||||
DiskQueue. `CheckHashes::TRUE` will cause the DiskQueue to use the checksum in
|
||||
each DiskQueue page to verify data integrity. If an externally maintained
|
||||
checksums exists to verify the returned data, then `CheckHashes::NO` can be
|
||||
checksums exists to verify the returned data, then `CheckHashes::FALSE` can be
|
||||
used to elide the checksumming. A page failing its checksum will cause the
|
||||
transaction log to die with an `io_error()`.
|
||||
|
||||
|
|
|
@ -1,4 +1,8 @@
|
|||
add_subdirectory(tutorial)
|
||||
if(WIN32)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# build a virtualenv
|
||||
set(sphinx_dir ${CMAKE_CURRENT_SOURCE_DIR}/sphinx)
|
||||
set(venv_dir ${CMAKE_CURRENT_BINARY_DIR}/venv)
|
||||
|
|
|
@ -25,6 +25,8 @@ API version 700
|
|||
General
|
||||
-------
|
||||
|
||||
* Committing a transaction will no longer partially reset it. In particular, getting the read version from a transaction that has committed or failed to commit with an error will return the original read version.
|
||||
|
||||
Python bindings
|
||||
---------------
|
||||
|
||||
|
|
|
@ -0,0 +1,195 @@
|
|||
###################################################
|
||||
FDB HA Write Path: How a mutation travels in FDB HA
|
||||
###################################################
|
||||
|
||||
| Author: Meng Xu
|
||||
| Reviewer: Alex Miller, Jingyu Zhou, Lukas Joswiak, Trevor Clinkenbeard
|
||||
| Audience: FDB developers, SREs and expert users.
|
||||
|
||||
This document describes how a mutation is replicated and moved from proxy to storage servers (SS) in a FDB High Availability (HA) cluster. Historically, FDB HA is also called Fearless DR or Multi-region configuration.
|
||||
|
||||
To simplify the description, we assume the HA cluster has the following configuration:
|
||||
|
||||
* Replication factor = 3 for storage servers. It means each mutation is replicated to 3 storage servers in the primary datacenter (DC) and 3 SSes in the secondary DC.
|
||||
|
||||
* Replication factor = 3 for transaction logs (tLogs). It means each mutation is synchronously replicated to 3 primary tLogs and 1 satellite tLog.
|
||||
|
||||
* Satellite replication factor = 1 satellite single replication. It means each mutation must be synchronously replicated to 1 satellite tLog before it can be committed.
|
||||
|
||||
* The satellite replication factor can be configured with one or two satellites and single, double or triple replicas as described here. We typically use only 1 satellite single replica config.
|
||||
|
||||
* Only 1 satellite is configured in the primary DC.
|
||||
|
||||
We describe the background knowledge -- Sharding and Tag structure -- before we discuss how a mutation travels in a FDB HA cluster.
|
||||
|
||||
Sharding: Which shard goes to which servers?
|
||||
============================================
|
||||
|
||||
A shard is a continuous key range. FDB divides the entire keyspace to thousands of shards. A mutation’s key decides which shard it belongs to.
|
||||
|
||||
Shard-to-SS mapping is determined by the \xff/keyServers/ system keyspace. In the system keyspace, a shard’s begin key is used as the key, the shard’s end key is the next key, and the shard’s SSes are the value. For example, we have the following key-values in the system keyspace: \xff/keyServers/a=(SS1,SS2,SS3) , \xff/keyServers/b=(SS2,SS4,SS7) . It indicates: shard [a,b) will be saved on storage servers whose IDs are SS1, SS2, SS3; and shard [b, \xff\xff) will be saved on storage servers SS2, SS4, SS7.
|
||||
|
||||
SS-to-tag mapping is decided by the \xff/serverTag/ system keyspace. A tag is a whole number (i.e., natural number and 0). Each SS is mapped to a tag and vice versa. We use tags to represent SSes in the transaction system to save space and speed up search, because tags are continuous and small numbers (described in Tag structure section) while SS IDs are 64 bit random UID.
|
||||
|
||||
Shard-to-tLog mapping is decided by shard-to-SS mapping and tLog’s replication policy. We use an example to explain how it works. Assume a mutation is mapped to SS1, SS2, and SS5, whose tags are respectively 1, 2, 5. The system has four tlogs. We use a function to transfer tag to tLog index: f(tag) = tLogIndex, where f(tag) is a modular function in FDB 6.2 and 6.3 implementation. In the example, the mutation’s assigned tLogs will be 1, 2, 1, which is calculated as the shard’s tag % 4. As you may notice, the three tags produces only two unique tLog indexes, which does not satisfy tLog’s replication policy that requires 3 tLog replicas. The proxy will call the replication policy engine, selectReplicas(), to choose another tLog for the mutation.
|
||||
|
||||
|
||||
Tag structure
|
||||
=============
|
||||
|
||||
Tag is an overloaded term in FDB. In the early history of FDB, a tag is a number used in SS-to-tag mapping. As FDB evolves, tags are used by different components for different purposes:
|
||||
|
||||
* As FDB evolves to HA, tags are used by not only primary tLogs but also satellite tLogs, log router, and remote tLogs;
|
||||
|
||||
* As FDB scales and we work to reduce the recovery time, a special tag for transaction state store (txnStateStore) is introduced;
|
||||
|
||||
* FDB also have transaction tags which are used for transaction throttling, not for the tag-partitioned log system mentioned in this article. See :ref:`transaction-tagging`
|
||||
|
||||
To distinguish the types of tags used for different purposes at different locations (primary DC or remote DC), we introduce Tag structure, which has two fields:
|
||||
|
||||
* locality (int8_t): When it is non-negative value, it decides which DC id the tag is used in. For example, if it is 0, it means the tag is used in primary DC and the tag’s id represents a storage server and is used for primary tLogs to index by storage servers. When it is negative, it decides which types of tags the tag belongs to. For example, if it is -2, it is a log router tag, and its id is used to decide which log router the tagged mutation should be sent to. The definition of all localities are in FDBTypes.h and you can easily find it if you search tagLocalitySpecial in the file.
|
||||
|
||||
* id (uint16_t): Once locality decides which FDB components will the tag be applied to, id decides which process in the component type will be used for the tagged mutation.
|
||||
|
||||
* FDB components in this context means (i) which DC of tLogs, and (ii) which types of tLogs.
|
||||
|
||||
To simplify our discussion in the document, we use “tag.id” to represent a tag’s id, and tag as the Tag structure that has both locality and id. We represent a Tag as (locality, id).
|
||||
|
||||
|
||||
|
||||
How does a mutation travel in FDB?
|
||||
==================================
|
||||
|
||||
To simplify the description, we ignore the batching mechanisms happening in each component in the data path that are used to improve the system’s performance.
|
||||
|
||||
Figure 1 illustrates how a mutation is routed inside FDB. The solid lines are asynchronous pull operations, while the dotted lines are synchronous push operations.
|
||||
|
||||
.. image:: images/FDB_ha_write_path.png
|
||||
|
||||
At Client
|
||||
---------
|
||||
|
||||
When an application creates a transaction and writes mutations, its FDB client sends the set of mutations to a proxy, say proxy 0. Now let’s focus on one of the normal mutations, say m1, whose key is in the normal keyspace.
|
||||
|
||||
At Proxy
|
||||
--------
|
||||
|
||||
**Sequencing.** *It first asks the master for the commit version of this transaction batch*. The master acts like a sequencer for FDB transactions to determine the order of transactions to commit by assigning a new commit version and the last assigned commit version as the previous commit version. The transaction log system will use the [previous commit version, commit version] pair to determine its commit order, i.e., only make this transaction durable after the transaction with the previous commit version is made durable.
|
||||
|
||||
**Conflict checking.** *Proxy then checks if the transaction has conflicts* with others by sending mutations to resolvers. Resolvers check if there are conflicts among mutations in different transactions from different proxies. Suppose the mutation m1’s transaction passes conflict check and can be committed.
|
||||
|
||||
**Commit mutation messages.** *Proxy then commits the mutations to tLogs*. Each proxy has the shard-to-tag mapping. It assigns Tags (which has locality and id) to the mutation m1. In the HA cluster in FDB 6.2, the mutation has the following Tags:
|
||||
|
||||
* 3 tags for primary DC. Assume they are (0, 1), (0, 2), and (0,5). The tag ids are decided by which primary SSes will eventually save the mutation;
|
||||
|
||||
* 3 tags for remote DC. Assume they are (1, 3), (1, 6), (1, 10). The tag ids are decided by which remote SSes will eventually save the mutation;
|
||||
|
||||
* 1 tag for log router. Assume it is (-2, 3), where -2 is the locality value for all log router tags. The tag id is randomly chosen by proxy as well.
|
||||
|
||||
* No tag for satellite tLog. The "satellite TLog locality" -5 in the code is used when recruiting a satellite TLog to tell it that it is a satellite TLog. This causes the satellite TLog to only index log router tags (-2) and not bother indexing any of the >0 tags.
|
||||
|
||||
Why do we need log routers? Why cannot we let remote tLog directly pull data from primary tLogs?
|
||||
|
||||
The main reason is to avoid shipping the mutation across WAN multiple times. If you attach remote SS's tags, the same mutation will cross WAN 3 times. In contrast, the router tag reduces it to only 1 time.
|
||||
|
||||
Why do we randomly assign tag id for satellite tLogs and log routers?
|
||||
|
||||
Another alternative is to use remote SSes’ tags to decide which satellite tLog and log routers a shard should always go to. We tried that approach before and compared its performance with randomly assigned tags. Evaluation showed that randomly assigning a mutation to satellite tLogs and log routers provide lower latency and higher throughput for these two types of logs. This is somewhat expected: When we randomly assign a mutation to a satellite tlog (and log router), we may assign mutations in the same shard to different satellite tLogs (and log routers). The randomness happens to balance load on the logs.
|
||||
|
||||
Proxy groups mutations with the same tag as messages. Proxy then synchronously pushes these mutation messages to tLogs based on the tags. Proxy cannot acknowledge that the transaction is committed until the message has been durable on all primary and satellite tLogs.
|
||||
|
||||
**Commit empty messages to tLogs.** When a proxy commits a tagged mutation message at version V1 to tLogs, it also has to commit an empty message at the same version V1 to the rest of tLogs. This makes sure every tLog has the same versions of messages, even though some messages are empty. This is a trick used in FDB to let all tLogs march at the same versions. The reason why FDB does the trick is that the master hands out segments of versions as 'from v1 to v2', and the TLogs need to be able to piece all of them back together into one consistent timeline. It may or may not be a good design decision, because a slow tLog can delay other tLogs of the same kind. We may want to revisit the design later.
|
||||
|
||||
|
||||
At primary tLogs and satellite tLogs
|
||||
------------------------------------
|
||||
|
||||
Once it receives mutations pushed by proxies, it builds indexes for each tag’s mutations. Primary TLogs index both log router tags and the primary DC's SS tags. Satellite tLogs only index log router tags.
|
||||
|
||||
If tLogs’ mutations cannot be peeked and popped by its consumers (i.e., SSes and log routers) quickly enough, tLogs’ memory usage will increase. When buffered mutations exceed 1.5GB (configurable by knob), their in-memory index will be spilled into a “Tag,version->disk location” B-tree.
|
||||
|
||||
tLogs also maintain two properties:
|
||||
|
||||
* It will not make a mutation at version V1 durable until mutations before V1 has been made durable;
|
||||
|
||||
* It will not pop (i.e., delete) mutations at version V2, until mutations before V2 have been popped.
|
||||
|
||||
|
||||
At primary SS
|
||||
-------------
|
||||
|
||||
**Primary tLog of a SS.** Since a SS’s tag is identically mapped to one tLog. The tLog has all mutations for the SS and is the primary tLog for the SS. When the SS peeks data from tLogs, it will prefer to peek data from its primary tLog. If the primary tLog crashes, it will contact the rest of tLogs, ask for mutations with the SS’s tag, and merge them together. This complex merge operation is abstracted in the TagPartitionedLogSystem interface.
|
||||
|
||||
**Pulling data from tLogs.** Each SS in the primary DC keeps pulling mutations, whose tag is the SS’s tag, from tLogs. Once mutations before a version V1 are made durable on a SS, the SS pops the tag upto the version V1 from *all* tLogs. The pop operation is an RPC to tLogs through the TagPartitionedLogSystem interface.
|
||||
|
||||
Since the mutation m1 has three tags for primary SSes, the mutation will be made durable on three primary SSes. This marks the end of the mutation’s journey in the primary DC.
|
||||
|
||||
Now let’s look at how the mutation m1 is routed to the remote DC.
|
||||
|
||||
|
||||
At log router
|
||||
-------------
|
||||
|
||||
Log routers are consumers of satellite tLogs or primary tLogs, controlled by a knob LOG_ROUTER_PEEK_FROM_SATELLITES_PREFERRED. By default, the knob is configured for log routers to use satellite tLogs. This relationship is similar to primary SSes to primary tLogs.
|
||||
|
||||
Each log router tag is mapped to one log router. Each log router keeps pulling mutations, which have the log router’s tag, from satellite tLogs. The number of log router tags is always the same as the number of log routers, which is always some N multiple of the number of satellite logs. Each log router has a preferred satellite TLog that has all of its mutations, so in the normal steady state, each satellite should have N log routers peeking from it (and only it).
|
||||
|
||||
Log router buffers its mutations in memory and waits for the remote tLogs to peek and pop its data. If the buffered data cannot be popped by remote tLog quickly enough, log router’s memory usage will increase. To avoid out of memory (OOM), a log router only buffers 5 seconds of mutations in memory. It pauses peeking data from satellite tLogs until its excessive buffered mutations have been popped by remote tLogs.
|
||||
|
||||
|
||||
At remote tLogs
|
||||
---------------
|
||||
|
||||
Remote tLogs are consumers of log routers. Each remote tLog keeps pulling mutations, which have the remote tLog’s tag, from log routers. Because log router tags are randomly chosen for mutations, a remote tLog’s mutations can spread across all log routers. So each remote tLog must contact all log routers for its data and merge these mutations in increasing order of versions on the remote tLog.
|
||||
|
||||
Once a remote tLog collects and merge mutations from all log routers, it makes them durable on disk, index them based on their tags, and pop the mutations from log routers.
|
||||
|
||||
Now the mutation m1 has arrived at the remote tLog, which is similar as when it arrives at the primary tLog.
|
||||
|
||||
|
||||
At remote SSes
|
||||
--------------
|
||||
|
||||
Similar to how primary SSes pull mutations from primary tLogs, each remote SS keeps pulling mutations, which have its tag, from remote tLogs. Once a remote SS makes mutations up to a version V1 durable, the SS pops its tag to the version V1 from all remote tLogs.
|
||||
|
||||
|
||||
Implementation
|
||||
==============
|
||||
|
||||
* proxy assigns tags to a mutation:
|
||||
|
||||
https://github.com/apple/foundationdb/blob/7eabdf784a21bca102f84e7eaf14bafc54605dff/fdbserver/MasterProxyServer.actor.cpp#L1410
|
||||
|
||||
|
||||
Mutation Serialization (WiP)
|
||||
============================
|
||||
|
||||
This section will go into detail on how mutations are serialized as preparation for ingestion into the TagPartitionedLogSystem. This has also been covered at:
|
||||
|
||||
https://drive.google.com/file/d/1OaP5bqH2kst1VxD6RWj8h2cdr9rhhBHy/view
|
||||
|
||||
The proxy handles splitting transactions into their individual mutations. These mutations are then serialized and synchronously sent to multiple transaction logs.
|
||||
|
||||
The process starts in *commitBatch*. Eventually, *assignMutationsToStorageServers* is called to assign mutations to storage servers and serialize them. This function loops over each mutation in each transaction, determining the set of tags for the mutation (which storage servers it will be sent to), and then calling *LogPushData.writeTypedMessage* on the mutation.
|
||||
|
||||
The *LogPushData* class is used to hold serialized mutations on a per transaction log basis. It’s *messagesWriter* field holds one *BinaryWriter* per transaction log.
|
||||
|
||||
*LogPushData.writeTypedMessage* is the function that serializes each mutation and writes it to the correct binary stream to be sent to the corresponding transaction log. Each serialized mutation contains additional metadata about the message, with the format:
|
||||
|
||||
.. image:: /images/serialized_mutation_metadata_format.png
|
||||
|
||||
* Message size: size of the message, in bytes, excluding the four bytes used for the message size
|
||||
|
||||
* Subsequence: integer value used for message ordering
|
||||
|
||||
* # of tags: integer value used to indicate the number of tags following
|
||||
|
||||
* Tag: serialized *Tag* object, repeated # of tags times for each location
|
||||
|
||||
Metadata takes up (10 + 3 * number_of_tags) bytes of each serialized mutation.
|
||||
|
||||
There is an additional metadata message prepended to the list of mutations in certain circumstances. To assist with visibility efforts, transaction logs and storage servers need to be able to associate a mutation with the transaction it was part of. This allows individual transactions to be tracked as they travel throughout FDB. Thus, at the beginning of each transaction, a *SpanProtocolMessage* will be written to the message stream before the first mutation for each location. A *SpanProtocolMessage* is a separate message, similar to the *LogProtocolMessage*, which holds metadata about the transaction itself.
|
||||
|
||||
An example may work best to illustrate the serialization process. Assume a client submits a transaction consisting of two mutations, m1 and m2. The proxy determines that m1 should be sent to tlogs 1, 2, and 3, while m2 should be sent to tlogs 2, 3, and 4. When m1 is serialized, a *LogProtocolMessage* will be written to the message stream for tlogs 1, 2, and 3 before the serialized m1 is written. Next, when m2 is serialized, a *LogProtocolMessage* will only be written to tlog 4, because tlogs 2 and 3 have already had a *LogProtocolMessage* written to them *for the transaction*. When all mutations in a transaction have been written, the process starts over for the next transaction.
|
||||
|
||||
This allows all transaction logs to receive information about the transaction each mutation is a part of. Storage servers will pull this information when pulling mutations, allowing them to track transaction info as well.
|
Binary file not shown.
After Width: | Height: | Size: 790 KiB |
Binary file not shown.
After Width: | Height: | Size: 115 KiB |
|
@ -2,6 +2,28 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.3.19
|
||||
======
|
||||
* Add the ``trace_partial_file_suffix`` network option. This option will give unfinished trace files a special suffix to indicate they're not complete yet. When the trace file is complete, it is renamed to remove the suffix. `(PR #5330) <https://github.com/apple/foundationdb/pull/5330>`_
|
||||
|
||||
6.3.18
|
||||
======
|
||||
* The multi-version client API would not propagate errors that occurred when creating databases on external clients. This could result in a invalid memory accesses. `(PR #5221) <https://github.com/apple/foundationdb/pull/5221>`_
|
||||
* Fixed a race between the multi-version client connecting to a cluster and destroying the database that could cause an assertion failure. `(PR #5221) <https://github.com/apple/foundationdb/pull/5221>`_
|
||||
* Added Mako latency measurements. `(PR #5255) <https://github.com/apple/foundationdb/pull/5255>`_
|
||||
* Fixed a bug introduced when porting restoring an inconsistent snapshot feature from 7.0 branch to 6.3 branch. The parameter that controls whether to perform an inconsistent snapshot restore may instead be used to lock the database during restore. `(PR #5228) <https://github.com/apple/foundationdb/pull/5228>`_
|
||||
* Added SidebandMultiThreadClientTest, which validates causal consistency for multi-threaded client. `(PR #5173) <https://github.com/apple/foundationdb/pull/5173>`_
|
||||
|
||||
6.3.17
|
||||
======
|
||||
* Made readValuePrefix consistent regarding error messages. `(PR #5160) <https://github.com/apple/foundationdb/pull/5160>`_
|
||||
* Added ``TLogPopDetails`` trace event to tLog pop. `(PR #5134) <https://github.com/apple/foundationdb/pull/5134>`_
|
||||
* Added ``CommitBatchingEmptyMessageRatio`` metric to track the ratio of empty messages to tlogs. `(PR #5087) <https://github.com/apple/foundationdb/pull/5087>`_
|
||||
* Observability improvements in ProxyStats. `(PR #5046) <https://github.com/apple/foundationdb/pull/5046>`_
|
||||
* Added ``RecoveryInternal`` and ``ProxyReplies`` trace events to recovery_transaction step in recovery. `(PR #5038) <https://github.com/apple/foundationdb/pull/5038>`_
|
||||
* Multi-threaded client documentation improvements. `(PR #5033) <https://github.com/apple/foundationdb/pull/5033>`_
|
||||
* Added ``ClusterControllerWorkerFailed`` trace event when a worker is removed from cluster controller. `(PR #5035) <https://github.com/apple/foundationdb/pull/5035>`_
|
||||
* Added histograms for storage server write path components. `(PR #5019) <https://github.com/apple/foundationdb/pull/5019>`_
|
||||
|
||||
6.3.15
|
||||
======
|
||||
|
|
|
@ -28,6 +28,8 @@ Features
|
|||
* Added the Testing Storage Server (TSS), which allows FoundationDB to run an "untrusted" storage engine with identical workload to the current storage engine, with zero impact on durability or correctness, and minimal impact on performance. `(Documentation) <https://github.com/apple/foundationdb/blob/master/documentation/sphinx/source/tss.rst>`_ `(PR #4556) <https://github.com/apple/foundationdb/pull/4556>`_
|
||||
* Added perpetual storage wiggle that supports less impactful B-trees recreation and data migration. These will also be used for deploying the Testing Storage Server which compares 2 storage engines' results. See :ref:`Documentation <perpetual-storage-wiggle>` for details. `(PR #4838) <https://github.com/apple/foundationdb/pull/4838>`_
|
||||
* Improved the efficiency with which storage servers replicate data between themselves. `(PR #5017) <https://github.com/apple/foundationdb/pull/5017>`_
|
||||
* Added support to ``exclude command`` to exclude based on locality match. `(PR #5113) <https://github.com/apple/foundationdb/pull/5113>`_
|
||||
* Add the ``trace_partial_file_suffix`` network option. This option will give unfinished trace files a special suffix to indicate they're not complete yet. When the trace file is complete, it is renamed to remove the suffix. `(PR #5328) <https://github.com/apple/foundationdb/pull/5328>`_
|
||||
|
||||
Performance
|
||||
-----------
|
||||
|
@ -60,6 +62,8 @@ Fixes
|
|||
* Added a new pre-backup action when creating a backup. Backups can now either verify the range data is being saved to is empty before the backup begins (current behavior) or clear the range where data is being saved to. Fixes a ``restore_destination_not_empty`` failure after a backup retry due to ``commit_unknown_failure``. `(PR #4595) <https://github.com/apple/foundationdb/pull/4595>`_
|
||||
* When configured with ``usable_regions=2``, a cluster would not fail over to a region which contained only storage class processes. `(PR #4599) <https://github.com/apple/foundationdb/pull/4599>`_
|
||||
* If a restore is done using a prefix to remove and specific key ranges to restore, the key range boundaries must begin with the prefix to remove. `(PR #4684) <https://github.com/apple/foundationdb/pull/4684>`_
|
||||
* The multi-version client API would not propagate errors that occurred when creating databases on external clients. This could result in a invalid memory accesses. `(PR #5220) <https://github.com/apple/foundationdb/pull/5220>`_
|
||||
* Fixed a race between the multi-version client connecting to a cluster and destroying the database that could cause an assertion failure. `(PR #5220) <https://github.com/apple/foundationdb/pull/5220>`_
|
||||
|
||||
Status
|
||||
------
|
||||
|
@ -89,6 +93,8 @@ Other Changes
|
|||
* The ``foundationdb`` service installed by the RPM packages will now automatically restart ``fdbmonitor`` after 60 seconds when it fails. `(PR #3841) <https://github.com/apple/foundationdb/pull/3841>`_
|
||||
* Capture output of forked snapshot processes in trace events. `(PR #4254) <https://github.com/apple/foundationdb/pull/4254/files>`_
|
||||
* Add ErrorKind field to Severity 40 trace events. `(PR #4741) <https://github.com/apple/foundationdb/pull/4741/files>`_
|
||||
* Added histograms for the storage server write path components. `(PR #5021) <https://github.com/apple/foundationdb/pull/5021/files>`_
|
||||
* Committing a transaction will no longer partially reset it as of API version 700. `(PR #5271) <https://github.com/apple/foundationdb/pull/5271/files>`_
|
||||
|
||||
Earlier release notes
|
||||
---------------------
|
||||
|
|
|
@ -1,7 +1,97 @@
|
|||
.. _request-tracing:
|
||||
|
||||
#########################
|
||||
Request Tracing Framework
|
||||
#########################
|
||||
###############
|
||||
Request Tracing
|
||||
###############
|
||||
|
||||
.. include:: guide-common.rst.inc
|
||||
The request tracing framework adds the ability to monitor transactions as they
|
||||
move through FoundationDB. Tracing provides a detailed view into where
|
||||
transactions spend time with data exported in near real-time, enabling fast
|
||||
performance debugging. The FoundationDB tracing framework is based off the
|
||||
`OpenTracing <https://opentracing.io/>`_ specification.
|
||||
|
||||
*Disambiguation:* :ref:`Trace files <administration-managing-trace-files>` are
|
||||
local log files containing debug and error output from a local ``fdbserver``
|
||||
binary. Request tracing produces similarly named *traces* which record the
|
||||
amount of time a transaction spent in a part of the system. This document uses
|
||||
the term tracing (or trace) to refer to these request traces, not local debug
|
||||
information, unless otherwise specified.
|
||||
|
||||
*Note*: Full request tracing capability requires at least ``TLogVersion::V6``.
|
||||
|
||||
==============
|
||||
Recording data
|
||||
==============
|
||||
|
||||
The request tracing framework produces no data by default. To enable collection
|
||||
of traces, specify the collection type using the ``--tracer`` command line
|
||||
option for ``fdbserver`` and the ``DISTRIBUTED_CLIENT_TRACER`` :ref:`network
|
||||
option <network-options-using-environment-variables>` for clients. Both client
|
||||
and server must have the same trace value set to perform correctly.
|
||||
|
||||
========================= ===============
|
||||
**Option** **Description**
|
||||
------------------------- ---------------
|
||||
none No tracing data is collected.
|
||||
file, logfile, log_file Write tracing data to FDB trace files, specified with ``--logdir``.
|
||||
network_lossy Send tracing data as UDP packets. Data is sent to ``localhost:8889``, but the default port can be changed by setting the ``TRACING_UDP_LISTENER_PORT`` knob. This option is useful if you have a log aggregation program to collect trace data.
|
||||
========================= ===============
|
||||
|
||||
-----------
|
||||
Data format
|
||||
-----------
|
||||
|
||||
Spans are the building blocks of traces. A span represents an operation in the
|
||||
life of a transaction, including the start and end timestamp and an operation.
|
||||
A collection of spans make up a trace, representing a single transaction. The
|
||||
tracing framework outputs individual spans, which can be reconstructed into
|
||||
traces through their parent relationships.
|
||||
|
||||
Trace data sent as UDP packets when using the ``network_lossy`` option is
|
||||
serialized using `MessagePack <https://msgpack.org>`_. To save on the amount of
|
||||
data sent, spans are serialized as an array of length 8 (if the span has one or
|
||||
more parents), or length 7 (if the span has no parents).
|
||||
|
||||
The fields of a span are specified below. The index at which the field appears
|
||||
in the serialized msgpack array is also specified, for those using the UDP
|
||||
collection format.
|
||||
|
||||
================== ========= ======== ===============
|
||||
**Field** **Index** **Type** **Description**
|
||||
------------------ --------- -------- ---------------
|
||||
Source IP:port 0 string The IP and port of the machine where the span originated.
|
||||
Trace ID 1 uint64 The 64-bit identifier of the trace. All spans in a trace share the same trace ID.
|
||||
Span ID 2 uint64 The 64-bit identifier of the span. All spans have a unique identifier.
|
||||
Start timestamp 3 double The timestamp when the operation represented by the span began.
|
||||
End timestamp 4 double The timestamp when the operation represented by the span ended.
|
||||
Operation name 5 string The name of the operation the span represents.
|
||||
Tags 6 map User defined tags, added manually to specify additional information.
|
||||
Parent span IDs 7 vector (Optional) A list of span IDs representing parents of this span.
|
||||
================== ========= ======== ===============
|
||||
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
Multiple parent spans
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Unlike traditional distributed tracing frameworks, FoundationDB spans can have
|
||||
multiple parents. Because many FDB transactions are batched into a single
|
||||
transaction, to continue tracing the request, the batched transaction must
|
||||
treat all its component transactions as parents.
|
||||
|
||||
---------------
|
||||
Control options
|
||||
---------------
|
||||
|
||||
In addition to the command line parameter described above, tracing can be set
|
||||
at a database and transaction level.
|
||||
|
||||
Tracing can be globally disabled by setting the
|
||||
``distributed_transaction_trace_disable`` database option. It can be enabled by
|
||||
setting the ``distributed_transaction_trace_enable`` database option. If
|
||||
neither option is specified but a tracer option is set as described above,
|
||||
tracing will be enabled.
|
||||
|
||||
Tracing can be enabled or disabled for individual transactions. The special key
|
||||
space exposes an API to set a custom trace ID for a transaction, or to disable
|
||||
tracing for the transaction. See the special key space :ref:`tracing module
|
||||
documentation <special-key-space-tracing-module>` to learn more.
|
||||
|
|
|
@ -250,6 +250,8 @@ use the global configuration functions.
|
|||
|
||||
#. ``\xff\xff/global_config/<key> := <value>`` Read/write. Reading keys in the range will return a tuple decoded string representation of the value for the given key. Writing a value will update all processes in the cluster with the new key-value pair. Values must be written using the :ref:`api-python-tuple-layer`.
|
||||
|
||||
.. _special-key-space-tracing-module:
|
||||
|
||||
Tracing module
|
||||
--------------
|
||||
|
||||
|
|
|
@ -30,6 +30,8 @@ These documents explain the engineering design of FoundationDB, with detailed in
|
|||
|
||||
* :doc:`read-write-path` describes how FDB read and write path works.
|
||||
|
||||
* :doc:`ha-write-path` describes how FDB write path works in HA setting.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:titlesonly:
|
||||
|
@ -48,3 +50,4 @@ These documents explain the engineering design of FoundationDB, with detailed in
|
|||
testing
|
||||
kv-architecture
|
||||
read-write-path
|
||||
ha-write-path
|
||||
|
|
|
@ -6,7 +6,7 @@ Visibility Documents
|
|||
|
||||
Curation of documents related to Visibility into FDB.
|
||||
|
||||
* :doc:`request-tracing` walks you through request-tracing framework.
|
||||
* :doc:`request-tracing` provides fine-grained visibility into the flow of transactions through the system.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
|
|
|
@ -183,6 +183,7 @@ ACTOR Future<Void> echoServer() {
|
|||
req.reply.send(std::string(req.message.rbegin(), req.message.rend()));
|
||||
}
|
||||
when(state StreamRequest req = waitNext(echoServer.stream.getFuture())) {
|
||||
req.reply.setByteLimit(1024);
|
||||
state int i = 0;
|
||||
for (; i < 100; ++i) {
|
||||
wait(req.reply.onReady());
|
||||
|
|
|
@ -598,7 +598,7 @@ int main(int argc, char** argv) {
|
|||
Error::init();
|
||||
|
||||
StringRef url(param.container_url);
|
||||
setupNetwork(0, true);
|
||||
setupNetwork(0, UseMetrics::True);
|
||||
|
||||
TraceEvent::setNetworkThread();
|
||||
openTraceFile(NetworkAddress(), 10 << 20, 10 << 20, param.log_dir, "convert", param.trace_log_group);
|
||||
|
|
|
@ -41,6 +41,11 @@ enum {
|
|||
OPT_TRACE_LOG_GROUP,
|
||||
OPT_INPUT_FILE,
|
||||
OPT_BUILD_FLAGS,
|
||||
OPT_LIST_ONLY,
|
||||
OPT_KEY_PREFIX,
|
||||
OPT_HEX_KEY_PREFIX,
|
||||
OPT_BEGIN_VERSION_FILTER,
|
||||
OPT_END_VERSION_FILTER,
|
||||
OPT_HELP
|
||||
};
|
||||
|
||||
|
@ -62,6 +67,11 @@ CSimpleOpt::SOption gConverterOptions[] = { { OPT_CONTAINER, "-r", SO_REQ_SEP },
|
|||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
|
||||
{ OPT_LIST_ONLY, "--list_only", SO_NONE },
|
||||
{ OPT_KEY_PREFIX, "-k", SO_REQ_SEP },
|
||||
{ OPT_HEX_KEY_PREFIX, "--hex_prefix", SO_REQ_SEP },
|
||||
{ OPT_BEGIN_VERSION_FILTER, "--begin_version_filter", SO_REQ_SEP },
|
||||
{ OPT_END_VERSION_FILTER, "--end_version_filter", SO_REQ_SEP },
|
||||
{ OPT_HELP, "-?", SO_NONE },
|
||||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
|
|
|
@ -19,14 +19,20 @@
|
|||
*/
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdlib>
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbbackup/BackupTLSConfig.h"
|
||||
#include "fdbclient/BackupAgent.actor.h"
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbbackup/FileConverter.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/MutationList.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/Trace.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/serialize.h"
|
||||
|
@ -65,6 +71,14 @@ void printDecodeUsage() {
|
|||
TLS_HELP
|
||||
#endif
|
||||
" --build_flags Print build information and exit.\n"
|
||||
" --list_only Print file list and exit.\n"
|
||||
" -k KEY_PREFIX Use the prefix for filtering mutations\n"
|
||||
" --hex_prefix HEX_PREFIX\n"
|
||||
" The prefix specified in HEX format, e.g., \\x05\\x01.\n"
|
||||
" --begin_version_filter BEGIN_VERSION\n"
|
||||
" The version range's begin version (inclusive) for filtering.\n"
|
||||
" --end_version_filter END_VERSION\n"
|
||||
" The version range's end version (exclusive) for filtering.\n"
|
||||
"\n";
|
||||
return;
|
||||
}
|
||||
|
@ -76,9 +90,19 @@ void printBuildInformation() {
|
|||
struct DecodeParams {
|
||||
std::string container_url;
|
||||
std::string fileFilter; // only files match the filter will be decoded
|
||||
bool log_enabled = false;
|
||||
bool log_enabled = true;
|
||||
std::string log_dir, trace_format, trace_log_group;
|
||||
BackupTLSConfig tlsConfig;
|
||||
bool list_only = false;
|
||||
std::string prefix; // Key prefix for filtering
|
||||
Version beginVersionFilter = 0;
|
||||
Version endVersionFilter = std::numeric_limits<Version>::max();
|
||||
|
||||
// Returns if [begin, end) overlap with the filter range
|
||||
bool overlap(Version begin, Version end) const {
|
||||
// Filter [100, 200), [50,75) [200, 300)
|
||||
return !(begin >= endVersionFilter || end <= beginVersionFilter);
|
||||
}
|
||||
|
||||
std::string toString() {
|
||||
std::string s;
|
||||
|
@ -97,12 +121,69 @@ struct DecodeParams {
|
|||
s.append(" LogGroup:").append(trace_log_group);
|
||||
}
|
||||
}
|
||||
s.append(", list_only: ").append(list_only ? "true" : "false");
|
||||
if (beginVersionFilter != 0) {
|
||||
s.append(", beginVersionFilter: ").append(std::to_string(beginVersionFilter));
|
||||
}
|
||||
if (endVersionFilter < std::numeric_limits<Version>::max()) {
|
||||
s.append(", endVersionFilter: ").append(std::to_string(endVersionFilter));
|
||||
}
|
||||
if (!prefix.empty()) {
|
||||
s.append(", KeyPrefix: ").append(printable(KeyRef(prefix)));
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
|
||||
};
|
||||
|
||||
// Decode an ASCII string, e.g., "\x15\x1b\x19\x04\xaf\x0c\x28\x0a",
|
||||
// into the binary string.
|
||||
std::string decode_hex_string(std::string line) {
|
||||
size_t i = 0;
|
||||
std::string ret;
|
||||
|
||||
while (i <= line.length()) {
|
||||
switch (line[i]) {
|
||||
case '\\':
|
||||
if (i + 2 > line.length()) {
|
||||
std::cerr << "Invalid hex string at: " << i << "\n";
|
||||
return ret;
|
||||
}
|
||||
switch (line[i + 1]) {
|
||||
char ent, save;
|
||||
case '"':
|
||||
case '\\':
|
||||
case ' ':
|
||||
case ';':
|
||||
line.erase(i, 1);
|
||||
break;
|
||||
case 'x':
|
||||
if (i + 4 > line.length()) {
|
||||
std::cerr << "Invalid hex string at: " << i << "\n";
|
||||
return ret;
|
||||
}
|
||||
char* pEnd;
|
||||
save = line[i + 4];
|
||||
line[i + 4] = 0;
|
||||
ent = char(strtoul(line.data() + i + 2, &pEnd, 16));
|
||||
if (*pEnd) {
|
||||
std::cerr << "Invalid hex string at: " << i << "\n";
|
||||
return ret;
|
||||
}
|
||||
line[i + 4] = save;
|
||||
line.replace(i, 4, 1, ent);
|
||||
break;
|
||||
default:
|
||||
std::cerr << "Invalid hex string at: " << i << "\n";
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
return line.substr(0, i);
|
||||
}
|
||||
|
||||
int parseDecodeCommandLine(DecodeParams* param, CSimpleOpt* args) {
|
||||
while (args->Next()) {
|
||||
auto lastError = args->LastError();
|
||||
|
@ -124,6 +205,26 @@ int parseDecodeCommandLine(DecodeParams* param, CSimpleOpt* args) {
|
|||
param->container_url = args->OptionArg();
|
||||
break;
|
||||
|
||||
case OPT_LIST_ONLY:
|
||||
param->list_only = true;
|
||||
break;
|
||||
|
||||
case OPT_KEY_PREFIX:
|
||||
param->prefix = args->OptionArg();
|
||||
break;
|
||||
|
||||
case OPT_HEX_KEY_PREFIX:
|
||||
param->prefix = decode_hex_string(args->OptionArg());
|
||||
break;
|
||||
|
||||
case OPT_BEGIN_VERSION_FILTER:
|
||||
param->beginVersionFilter = std::atoll(args->OptionArg());
|
||||
break;
|
||||
|
||||
case OPT_END_VERSION_FILTER:
|
||||
param->endVersionFilter = std::atoll(args->OptionArg());
|
||||
break;
|
||||
|
||||
case OPT_CRASHONERROR:
|
||||
g_crashOnError = true;
|
||||
break;
|
||||
|
@ -141,7 +242,7 @@ int parseDecodeCommandLine(DecodeParams* param, CSimpleOpt* args) {
|
|||
break;
|
||||
|
||||
case OPT_TRACE_FORMAT:
|
||||
if (!validateTraceFormat(args->OptionArg())) {
|
||||
if (!selectTraceFormatter(args->OptionArg())) {
|
||||
std::cerr << "ERROR: Unrecognized trace format " << args->OptionArg() << "\n";
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
|
@ -202,78 +303,18 @@ void printLogFiles(std::string msg, const std::vector<LogFile>& files) {
|
|||
std::vector<LogFile> getRelevantLogFiles(const std::vector<LogFile>& files, const DecodeParams& params) {
|
||||
std::vector<LogFile> filtered;
|
||||
for (const auto& file : files) {
|
||||
if (file.fileName.find(params.fileFilter) != std::string::npos) {
|
||||
if (file.fileName.find(params.fileFilter) != std::string::npos &&
|
||||
params.overlap(file.beginVersion, file.endVersion + 1)) {
|
||||
filtered.push_back(file);
|
||||
}
|
||||
}
|
||||
return filtered;
|
||||
}
|
||||
|
||||
std::pair<Version, int32_t> decode_key(const StringRef& key) {
|
||||
ASSERT(key.size() == sizeof(uint8_t) + sizeof(Version) + sizeof(int32_t));
|
||||
|
||||
uint8_t hash;
|
||||
Version version;
|
||||
int32_t part;
|
||||
BinaryReader rd(key, Unversioned());
|
||||
rd >> hash >> version >> part;
|
||||
version = bigEndian64(version);
|
||||
part = bigEndian32(part);
|
||||
|
||||
int32_t v = version / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
||||
ASSERT(((uint8_t)hashlittle(&v, sizeof(v), 0)) == hash);
|
||||
|
||||
return std::make_pair(version, part);
|
||||
}
|
||||
|
||||
// Decodes an encoded list of mutations in the format of:
|
||||
// [includeVersion:uint64_t][val_length:uint32_t][mutation_1][mutation_2]...[mutation_k],
|
||||
// where a mutation is encoded as:
|
||||
// [type:uint32_t][keyLength:uint32_t][valueLength:uint32_t][key][value]
|
||||
std::vector<MutationRef> decode_value(const StringRef& value) {
|
||||
StringRefReader reader(value, restore_corrupted_data());
|
||||
|
||||
reader.consume<uint64_t>(); // Consume the includeVersion
|
||||
uint32_t val_length = reader.consume<uint32_t>();
|
||||
if (val_length != value.size() - sizeof(uint64_t) - sizeof(uint32_t)) {
|
||||
TraceEvent(SevError, "ValueError")
|
||||
.detail("ValueLen", val_length)
|
||||
.detail("ValueSize", value.size())
|
||||
.detail("Value", printable(value));
|
||||
}
|
||||
|
||||
std::vector<MutationRef> mutations;
|
||||
while (1) {
|
||||
if (reader.eof())
|
||||
break;
|
||||
|
||||
// Deserialization of a MutationRef, which was packed by MutationListRef::push_back_deep()
|
||||
uint32_t type, p1len, p2len;
|
||||
type = reader.consume<uint32_t>();
|
||||
p1len = reader.consume<uint32_t>();
|
||||
p2len = reader.consume<uint32_t>();
|
||||
|
||||
const uint8_t* key = reader.consume(p1len);
|
||||
const uint8_t* val = reader.consume(p2len);
|
||||
|
||||
mutations.emplace_back((MutationRef::Type)type, StringRef(key, p1len), StringRef(val, p2len));
|
||||
}
|
||||
return mutations;
|
||||
}
|
||||
|
||||
struct VersionedMutations {
|
||||
Version version;
|
||||
std::vector<MutationRef> mutations;
|
||||
Arena arena; // The arena that contains the mutations.
|
||||
};
|
||||
|
||||
struct VersionedKVPart {
|
||||
Arena arena;
|
||||
Version version;
|
||||
int32_t part;
|
||||
StringRef kv;
|
||||
VersionedKVPart(Arena arena, Version version, int32_t part, StringRef kv)
|
||||
: arena(arena), version(version), part(part), kv(kv) {}
|
||||
std::string serializedMutations; // buffer that contains mutations
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -293,174 +334,66 @@ struct VersionedKVPart {
|
|||
* at any time this object might have two blocks of data in memory.
|
||||
*/
|
||||
class DecodeProgress {
|
||||
std::vector<VersionedKVPart> keyValues;
|
||||
std::vector<Standalone<VectorRef<KeyValueRef>>> blocks;
|
||||
std::unordered_map<Version, fileBackup::AccumulatedMutations> mutationBlocksByVersion;
|
||||
|
||||
public:
|
||||
DecodeProgress() = default;
|
||||
template <class U>
|
||||
DecodeProgress(const LogFile& file, U&& values) : file(file), keyValues(std::forward<U>(values)) {}
|
||||
DecodeProgress(const LogFile& file) : file(file) {}
|
||||
|
||||
// If there are no more mutations to pull from the file.
|
||||
// However, we could have unfinished version in the buffer when EOF is true,
|
||||
// which means we should look for data in the next file. The caller
|
||||
// should call getUnfinishedBuffer() to get these left data.
|
||||
bool finished() const { return (eof && keyValues.empty()) || (leftover && !keyValues.empty()); }
|
||||
|
||||
std::vector<VersionedKVPart>&& getUnfinishedBuffer() && { return std::move(keyValues); }
|
||||
|
||||
// Returns all mutations of the next version in a batch.
|
||||
Future<VersionedMutations> getNextBatch() { return getNextBatchImpl(this); }
|
||||
bool finished() const { return done; }
|
||||
|
||||
// Open and loads file into memory
|
||||
Future<Void> openFile(Reference<IBackupContainer> container) { return openFileImpl(this, container); }
|
||||
|
||||
// The following are private APIs:
|
||||
|
||||
// Returns true if value contains complete data.
|
||||
static bool isValueComplete(StringRef value) {
|
||||
StringRefReader reader(value, restore_corrupted_data());
|
||||
|
||||
reader.consume<uint64_t>(); // Consume the includeVersion
|
||||
uint32_t val_length = reader.consume<uint32_t>();
|
||||
return val_length == value.size() - sizeof(uint64_t) - sizeof(uint32_t);
|
||||
}
|
||||
|
||||
// PRECONDITION: finished() must return false before calling this function.
|
||||
// Returns the next batch of mutations along with the arena backing it.
|
||||
// Note the returned batch can be empty when the file has unfinished
|
||||
// version batch data that are in the next file.
|
||||
ACTOR static Future<VersionedMutations> getNextBatchImpl(DecodeProgress* self) {
|
||||
ASSERT(!self->finished());
|
||||
VersionedMutations getNextBatch() {
|
||||
ASSERT(!finished());
|
||||
|
||||
loop {
|
||||
if (self->keyValues.size() <= 1) {
|
||||
// Try to decode another block when less than one left
|
||||
wait(readAndDecodeFile(self));
|
||||
}
|
||||
|
||||
const auto& kv = self->keyValues[0];
|
||||
ASSERT(kv.part == 0);
|
||||
|
||||
// decode next versions, check if they are continuous parts
|
||||
int idx = 1; // next kv pair in "keyValues"
|
||||
int bufSize = kv.kv.size();
|
||||
for (int lastPart = 0; idx < self->keyValues.size(); idx++, lastPart++) {
|
||||
if (idx == self->keyValues.size())
|
||||
break;
|
||||
|
||||
const auto& nextKV = self->keyValues[idx];
|
||||
if (kv.version != nextKV.version) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (lastPart + 1 != nextKV.part) {
|
||||
TraceEvent("DecodeError").detail("Part1", lastPart).detail("Part2", nextKV.part);
|
||||
throw restore_corrupted_data();
|
||||
}
|
||||
bufSize += nextKV.kv.size();
|
||||
}
|
||||
|
||||
VersionedMutations m;
|
||||
m.version = kv.version;
|
||||
TraceEvent("Decode").detail("Version", m.version).detail("Idx", idx).detail("Q", self->keyValues.size());
|
||||
StringRef value = kv.kv;
|
||||
if (idx > 1) {
|
||||
// Stitch parts into one and then decode one by one
|
||||
Standalone<StringRef> buf = self->combineValues(idx, bufSize);
|
||||
value = buf;
|
||||
m.arena = buf.arena();
|
||||
}
|
||||
if (isValueComplete(value)) {
|
||||
m.mutations = decode_value(value);
|
||||
if (m.arena.getSize() == 0) {
|
||||
m.arena = kv.arena;
|
||||
}
|
||||
self->keyValues.erase(self->keyValues.begin(), self->keyValues.begin() + idx);
|
||||
return m;
|
||||
} else if (!self->eof) {
|
||||
// Read one more block, hopefully the missing part of the value can be found.
|
||||
wait(readAndDecodeFile(self));
|
||||
} else {
|
||||
TraceEvent(SevWarn, "MissingValue").detail("Version", m.version);
|
||||
self->leftover = true;
|
||||
return m; // Empty mutations
|
||||
VersionedMutations vms;
|
||||
for (auto& [version, m] : mutationBlocksByVersion) {
|
||||
if (m.isComplete()) {
|
||||
vms.version = version;
|
||||
std::vector<MutationRef> mutations = fileBackup::decodeMutationLogValue(m.serializedMutations);
|
||||
TraceEvent("Decode").detail("Version", vms.version).detail("N", mutations.size());
|
||||
vms.mutations.insert(vms.mutations.end(), mutations.begin(), mutations.end());
|
||||
vms.serializedMutations = m.serializedMutations;
|
||||
mutationBlocksByVersion.erase(version);
|
||||
return vms;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a buffer which stitches first "idx" values into one.
|
||||
// "len" MUST equal the summation of these values.
|
||||
Standalone<StringRef> combineValues(const int idx, const int len) {
|
||||
ASSERT(idx <= keyValues.size() && idx > 1);
|
||||
|
||||
Standalone<StringRef> buf = makeString(len);
|
||||
int n = 0;
|
||||
for (int i = 0; i < idx; i++) {
|
||||
const auto& value = keyValues[i].kv;
|
||||
memcpy(mutateString(buf) + n, value.begin(), value.size());
|
||||
n += value.size();
|
||||
}
|
||||
|
||||
ASSERT(n == len);
|
||||
return buf;
|
||||
}
|
||||
|
||||
// Decodes a block into KeyValueRef stored in "keyValues".
|
||||
void decode_block(const Standalone<StringRef>& buf, int len) {
|
||||
StringRef block(buf.begin(), len);
|
||||
StringRefReader reader(block, restore_corrupted_data());
|
||||
|
||||
try {
|
||||
// Read header, currently only decoding version BACKUP_AGENT_MLOG_VERSION
|
||||
if (reader.consume<int32_t>() != BACKUP_AGENT_MLOG_VERSION)
|
||||
throw restore_unsupported_file_version();
|
||||
|
||||
// Read k/v pairs. Block ends either at end of last value exactly or with 0xFF as first key len byte.
|
||||
while (1) {
|
||||
// If eof reached or first key len bytes is 0xFF then end of block was reached.
|
||||
if (reader.eof() || *reader.rptr == 0xFF)
|
||||
break;
|
||||
|
||||
// Read key and value. If anything throws then there is a problem.
|
||||
uint32_t kLen = reader.consumeNetworkUInt32();
|
||||
const uint8_t* k = reader.consume(kLen);
|
||||
std::pair<Version, int32_t> version_part = decode_key(StringRef(k, kLen));
|
||||
uint32_t vLen = reader.consumeNetworkUInt32();
|
||||
const uint8_t* v = reader.consume(vLen);
|
||||
TraceEvent(SevDecodeInfo, "Block")
|
||||
.detail("KeySize", kLen)
|
||||
.detail("valueSize", vLen)
|
||||
.detail("Offset", reader.rptr - buf.begin())
|
||||
.detail("Version", version_part.first)
|
||||
.detail("Part", version_part.second);
|
||||
keyValues.emplace_back(buf.arena(), version_part.first, version_part.second, StringRef(v, vLen));
|
||||
}
|
||||
|
||||
// Make sure any remaining bytes in the block are 0xFF
|
||||
for (auto b : reader.remainder()) {
|
||||
if (b != 0xFF)
|
||||
throw restore_corrupted_data_padding();
|
||||
}
|
||||
|
||||
// The (version, part) in a block can be out of order, i.e., (3, 0)
|
||||
// can be followed by (4, 0), and then (3, 1). So we need to sort them
|
||||
// first by version, and then by part number.
|
||||
std::sort(keyValues.begin(), keyValues.end(), [](const VersionedKVPart& a, const VersionedKVPart& b) {
|
||||
return a.version == b.version ? a.part < b.part : a.version < b.version;
|
||||
});
|
||||
return;
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevWarn, "CorruptBlock").error(e).detail("Offset", reader.rptr - buf.begin());
|
||||
throw;
|
||||
// No complete versions
|
||||
if (!mutationBlocksByVersion.empty()) {
|
||||
TraceEvent(SevWarn, "UnfishedBlocks").detail("NumberOfVersions", mutationBlocksByVersion.size());
|
||||
}
|
||||
done = true;
|
||||
return vms;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> openFileImpl(DecodeProgress* self, Reference<IBackupContainer> container) {
|
||||
Reference<IAsyncFile> fd = wait(container->readFile(self->file.fileName));
|
||||
self->fd = fd;
|
||||
wait(readAndDecodeFile(self));
|
||||
while (!self->eof) {
|
||||
wait(readAndDecodeFile(self));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
// Add chunks to mutationBlocksByVersion
|
||||
void addBlockKVPairs(VectorRef<KeyValueRef> chunks) {
|
||||
for (auto& kv : chunks) {
|
||||
auto versionAndChunkNumber = fileBackup::decodeMutationLogKey(kv.key);
|
||||
mutationBlocksByVersion[versionAndChunkNumber.first].addChunk(versionAndChunkNumber.second, kv);
|
||||
}
|
||||
}
|
||||
|
||||
// Reads a file block, decodes it into key/value pairs, and stores these pairs.
|
||||
ACTOR static Future<Void> readAndDecodeFile(DecodeProgress* self) {
|
||||
try {
|
||||
|
@ -470,17 +403,18 @@ public:
|
|||
return Void();
|
||||
}
|
||||
|
||||
state Standalone<StringRef> buf = makeString(len);
|
||||
state int rLen = wait(self->fd->read(mutateString(buf), len, self->offset));
|
||||
// Decode a file block into log_key and log_value chunks
|
||||
Standalone<VectorRef<KeyValueRef>> chunks =
|
||||
wait(fileBackup::decodeMutationLogFileBlock(self->fd, self->offset, len));
|
||||
self->blocks.push_back(chunks);
|
||||
|
||||
TraceEvent("ReadFile")
|
||||
.detail("Name", self->file.fileName)
|
||||
.detail("Len", rLen)
|
||||
.detail("Len", len)
|
||||
.detail("Offset", self->offset);
|
||||
if (rLen != len) {
|
||||
throw restore_corrupted_data();
|
||||
}
|
||||
self->decode_block(buf, rLen);
|
||||
self->offset += rLen;
|
||||
self->addBlockKVPairs(chunks);
|
||||
self->offset += len;
|
||||
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevWarn, "CorruptLogFileBlock")
|
||||
|
@ -496,12 +430,55 @@ public:
|
|||
Reference<IAsyncFile> fd;
|
||||
int64_t offset = 0;
|
||||
bool eof = false;
|
||||
bool leftover = false; // Done but has unfinished version batch data left
|
||||
bool done = false;
|
||||
};
|
||||
|
||||
ACTOR Future<Void> process_file(Reference<IBackupContainer> container, LogFile file, UID uid, DecodeParams params) {
|
||||
if (file.fileSize == 0) {
|
||||
TraceEvent("SkipEmptyFile", uid).detail("Name", file.fileName);
|
||||
return Void();
|
||||
}
|
||||
|
||||
state DecodeProgress progress(file);
|
||||
wait(progress.openFile(container));
|
||||
while (!progress.finished()) {
|
||||
VersionedMutations vms = progress.getNextBatch();
|
||||
if (vms.version < params.beginVersionFilter || vms.version >= params.endVersionFilter) {
|
||||
TraceEvent("SkipVersion").detail("Version", vms.version);
|
||||
continue;
|
||||
}
|
||||
|
||||
int sub = 0;
|
||||
for (const auto& m : vms.mutations) {
|
||||
sub++; // sub sequence number starts at 1
|
||||
bool print = params.prefix.empty(); // no filtering
|
||||
|
||||
if (!print) {
|
||||
if (isSingleKeyMutation((MutationRef::Type)m.type)) {
|
||||
print = m.param1.startsWith(StringRef(params.prefix));
|
||||
} else if (m.type == MutationRef::ClearRange) {
|
||||
KeyRange range(KeyRangeRef(m.param1, m.param2));
|
||||
print = range.contains(StringRef(params.prefix));
|
||||
} else {
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
if (print) {
|
||||
TraceEvent(format("Mutation_%llu_%d", vms.version, sub).c_str(), uid)
|
||||
.detail("Version", vms.version)
|
||||
.setMaxFieldLength(10000)
|
||||
.detail("M", m.toString());
|
||||
std::cout << vms.version << " " << m.toString() << "\n";
|
||||
}
|
||||
}
|
||||
}
|
||||
TraceEvent("ProcessFileDone", uid).detail("File", file.fileName);
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> decode_logs(DecodeParams params) {
|
||||
state Reference<IBackupContainer> container = IBackupContainer::openContainer(params.container_url);
|
||||
|
||||
state UID uid = deterministicRandom()->randomUniqueID();
|
||||
state BackupFileList listing = wait(container->dumpFileList());
|
||||
// remove partitioned logs
|
||||
listing.logs.erase(std::remove_if(listing.logs.begin(),
|
||||
|
@ -512,7 +489,8 @@ ACTOR Future<Void> decode_logs(DecodeParams params) {
|
|||
}),
|
||||
listing.logs.end());
|
||||
std::sort(listing.logs.begin(), listing.logs.end());
|
||||
TraceEvent("Container").detail("URL", params.container_url).detail("Logs", listing.logs.size());
|
||||
TraceEvent("Container", uid).detail("URL", params.container_url).detail("Logs", listing.logs.size());
|
||||
TraceEvent("DecodeParam", uid).setMaxFieldLength(100000).detail("Value", params.toString());
|
||||
|
||||
BackupDescription desc = wait(container->describeBackup());
|
||||
std::cout << "\n" << desc.toString() << "\n";
|
||||
|
@ -520,26 +498,15 @@ ACTOR Future<Void> decode_logs(DecodeParams params) {
|
|||
state std::vector<LogFile> logs = getRelevantLogFiles(listing.logs, params);
|
||||
printLogFiles("Relevant files are: ", logs);
|
||||
|
||||
state int i = 0;
|
||||
// Previous file's unfinished version data
|
||||
state std::vector<VersionedKVPart> left;
|
||||
for (; i < logs.size(); i++) {
|
||||
if (logs[i].fileSize == 0)
|
||||
continue;
|
||||
if (params.list_only) return Void();
|
||||
|
||||
state DecodeProgress progress(logs[i], std::move(left));
|
||||
wait(progress.openFile(container));
|
||||
while (!progress.finished()) {
|
||||
VersionedMutations vms = wait(progress.getNextBatch());
|
||||
for (const auto& m : vms.mutations) {
|
||||
std::cout << vms.version << " " << m.toString() << "\n";
|
||||
}
|
||||
}
|
||||
left = std::move(progress).getUnfinishedBuffer();
|
||||
if (!left.empty()) {
|
||||
TraceEvent("UnfinishedFile").detail("File", logs[i].fileName).detail("Q", left.size());
|
||||
}
|
||||
state int idx = 0;
|
||||
while (idx < logs.size()) {
|
||||
TraceEvent("ProcessFile").detail("Name", logs[idx].fileName).detail("I", idx);
|
||||
wait(process_file(container, logs[idx], uid, params));
|
||||
idx++;
|
||||
}
|
||||
TraceEvent("DecodeDone", uid);
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -564,6 +531,8 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
if (!param.trace_format.empty()) {
|
||||
setNetworkOption(FDBNetworkOptions::TRACE_FORMAT, StringRef(param.trace_format));
|
||||
} else {
|
||||
setNetworkOption(FDBNetworkOptions::TRACE_FORMAT, "json"_sr);
|
||||
}
|
||||
if (!param.trace_log_group.empty()) {
|
||||
setNetworkOption(FDBNetworkOptions::TRACE_LOG_GROUP, StringRef(param.trace_log_group));
|
||||
|
@ -571,7 +540,7 @@ int main(int argc, char** argv) {
|
|||
}
|
||||
|
||||
if (!param.tlsConfig.setupTLS()) {
|
||||
TraceEvent(SevError, "TLSError");
|
||||
TraceEvent(SevError, "TLSError").log();
|
||||
throw tls_error();
|
||||
}
|
||||
|
||||
|
@ -579,15 +548,20 @@ int main(int argc, char** argv) {
|
|||
Error::init();
|
||||
|
||||
StringRef url(param.container_url);
|
||||
setupNetwork(0, true);
|
||||
setupNetwork(0, UseMetrics::True);
|
||||
|
||||
TraceEvent::setNetworkThread();
|
||||
openTraceFile(NetworkAddress(), 10 << 20, 10 << 20, param.log_dir, "decode", param.trace_log_group);
|
||||
openTraceFile(NetworkAddress(), 10 << 20, 500 << 20, param.log_dir, "decode", param.trace_log_group);
|
||||
param.tlsConfig.setupBlobCredentials();
|
||||
|
||||
auto f = stopAfter(decode_logs(param));
|
||||
|
||||
runNetwork();
|
||||
|
||||
flushTraceFileVoid();
|
||||
fflush(stdout);
|
||||
closeTraceFile();
|
||||
|
||||
return status;
|
||||
} catch (Error& e) {
|
||||
std::cerr << "ERROR: " << e.what() << "\n";
|
||||
|
|
|
@ -133,6 +133,7 @@ enum {
|
|||
OPT_WAITFORDONE,
|
||||
OPT_BACKUPKEYS_FILTER,
|
||||
OPT_INCREMENTALONLY,
|
||||
OPT_ENCRYPTION_KEY_FILE,
|
||||
|
||||
// Backup Modify
|
||||
OPT_MOD_ACTIVE_INTERVAL,
|
||||
|
@ -259,6 +260,7 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
|
|||
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
|
||||
{ OPT_INCREMENTALONLY, "--incremental", SO_NONE },
|
||||
{ OPT_ENCRYPTION_KEY_FILE, "--encryption_key_file", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
|
@ -697,6 +699,7 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
|
|||
{ OPT_INCREMENTALONLY, "--incremental", SO_NONE },
|
||||
{ OPT_RESTORE_BEGIN_VERSION, "--begin_version", SO_REQ_SEP },
|
||||
{ OPT_RESTORE_INCONSISTENT_SNAPSHOT_ONLY, "--inconsistent_snapshot_only", SO_NONE },
|
||||
{ OPT_ENCRYPTION_KEY_FILE, "--encryption_key_file", SO_REQ_SEP },
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
|
@ -1089,6 +1092,8 @@ static void printBackupUsage(bool devhelp) {
|
|||
" Performs incremental backup without the base backup.\n"
|
||||
" This option indicates to the backup agent that it will only need to record the log files, "
|
||||
"and ignore the range files.\n");
|
||||
printf(" --encryption_key_file"
|
||||
" The AES-128-GCM key in the provided file is used for encrypting backup files.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
printf(TLS_HELP);
|
||||
#endif
|
||||
|
@ -1162,6 +1167,8 @@ static void printRestoreUsage(bool devhelp) {
|
|||
" To be used in conjunction with incremental restore.\n"
|
||||
" Indicates to the backup agent to only begin replaying log files from a certain version, "
|
||||
"instead of the entire set.\n");
|
||||
printf(" --encryption_key_file"
|
||||
" The AES-128-GCM key in the provided file is used for decrypting backup files.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
printf(TLS_HELP);
|
||||
#endif
|
||||
|
@ -1463,7 +1470,7 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
|
|||
std::string id,
|
||||
ProgramExe exe,
|
||||
Database dest,
|
||||
bool snapshot = false) {
|
||||
Snapshot snapshot = Snapshot::False) {
|
||||
// This process will write a document that looks like this:
|
||||
// { backup : { $expires : {<subdoc>}, version: <version from approximately 30 seconds from now> }
|
||||
// so that the value under 'backup' will eventually expire to null and thus be ignored by
|
||||
|
@ -1639,7 +1646,7 @@ ACTOR Future<Void> cleanupStatus(Reference<ReadYourWritesTransaction> tr,
|
|||
std::string name,
|
||||
std::string id,
|
||||
int limit = 1) {
|
||||
state RangeResult docs = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true));
|
||||
state RangeResult docs = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, Snapshot::True));
|
||||
state bool readMore = false;
|
||||
state int i;
|
||||
for (i = 0; i < docs.size(); ++i) {
|
||||
|
@ -1668,7 +1675,7 @@ ACTOR Future<Void> cleanupStatus(Reference<ReadYourWritesTransaction> tr,
|
|||
}
|
||||
if (readMore) {
|
||||
limit = 10000;
|
||||
RangeResult docs2 = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, true));
|
||||
RangeResult docs2 = wait(tr->getRange(KeyRangeRef(rootKey, strinc(rootKey)), limit, Snapshot::True));
|
||||
docs = std::move(docs2);
|
||||
readMore = false;
|
||||
}
|
||||
|
@ -1705,7 +1712,10 @@ ACTOR Future<json_spirit::mObject> getLayerStatus(Database src, std::string root
|
|||
|
||||
// Read layer status for this layer and get the total count of agent processes (instances) then adjust the poll delay
|
||||
// based on that and BACKUP_AGGREGATE_POLL_RATE
|
||||
ACTOR Future<Void> updateAgentPollRate(Database src, std::string rootKey, std::string name, double* pollDelay) {
|
||||
ACTOR Future<Void> updateAgentPollRate(Database src,
|
||||
std::string rootKey,
|
||||
std::string name,
|
||||
std::shared_ptr<double> pollDelay) {
|
||||
loop {
|
||||
try {
|
||||
json_spirit::mObject status = wait(getLayerStatus(src, rootKey));
|
||||
|
@ -1727,7 +1737,7 @@ ACTOR Future<Void> updateAgentPollRate(Database src, std::string rootKey, std::s
|
|||
ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest,
|
||||
std::string name,
|
||||
ProgramExe exe,
|
||||
double* pollDelay,
|
||||
std::shared_ptr<double> pollDelay,
|
||||
Database taskDest = Database(),
|
||||
std::string id = nondeterministicRandom()->randomUniqueID().toString()) {
|
||||
state std::string metaKey = layerStatusMetaPrefixRange.begin.toString() + "json/" + name;
|
||||
|
@ -1757,7 +1767,8 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest,
|
|||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
state Future<std::string> futureStatusDoc = getLayerStatus(tr, name, id, exe, taskDest, true);
|
||||
state Future<std::string> futureStatusDoc =
|
||||
getLayerStatus(tr, name, id, exe, taskDest, Snapshot::True);
|
||||
wait(cleanupStatus(tr, rootKey, name, id));
|
||||
std::string statusdoc = wait(futureStatusDoc);
|
||||
tr->set(instanceKey, statusdoc);
|
||||
|
@ -1774,7 +1785,7 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest,
|
|||
|
||||
// Now that status was written at least once by this process (and hopefully others), start the poll rate
|
||||
// control updater if it wasn't started yet
|
||||
if (!pollRateUpdater.isValid() && pollDelay != nullptr)
|
||||
if (!pollRateUpdater.isValid())
|
||||
pollRateUpdater = updateAgentPollRate(statusUpdateDest, rootKey, name, pollDelay);
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevWarnAlways, "UnableToWriteStatus").error(e);
|
||||
|
@ -1784,17 +1795,17 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest,
|
|||
}
|
||||
|
||||
ACTOR Future<Void> runDBAgent(Database src, Database dest) {
|
||||
state double pollDelay = 1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE;
|
||||
state std::shared_ptr<double> pollDelay = std::make_shared<double>(1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE);
|
||||
std::string id = nondeterministicRandom()->randomUniqueID().toString();
|
||||
state Future<Void> status = statusUpdateActor(src, "dr_backup", ProgramExe::DR_AGENT, &pollDelay, dest, id);
|
||||
state Future<Void> status = statusUpdateActor(src, "dr_backup", ProgramExe::DR_AGENT, pollDelay, dest, id);
|
||||
state Future<Void> status_other =
|
||||
statusUpdateActor(dest, "dr_backup_dest", ProgramExe::DR_AGENT, &pollDelay, dest, id);
|
||||
statusUpdateActor(dest, "dr_backup_dest", ProgramExe::DR_AGENT, pollDelay, dest, id);
|
||||
|
||||
state DatabaseBackupAgent backupAgent(src);
|
||||
|
||||
loop {
|
||||
try {
|
||||
wait(backupAgent.run(dest, &pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
|
||||
wait(backupAgent.run(dest, pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_operation_cancelled)
|
||||
|
@ -1811,14 +1822,14 @@ ACTOR Future<Void> runDBAgent(Database src, Database dest) {
|
|||
}
|
||||
|
||||
ACTOR Future<Void> runAgent(Database db) {
|
||||
state double pollDelay = 1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE;
|
||||
state Future<Void> status = statusUpdateActor(db, "backup", ProgramExe::AGENT, &pollDelay);
|
||||
state std::shared_ptr<double> pollDelay = std::make_shared<double>(1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE);
|
||||
state Future<Void> status = statusUpdateActor(db, "backup", ProgramExe::AGENT, pollDelay);
|
||||
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
loop {
|
||||
try {
|
||||
wait(backupAgent.run(db, &pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
|
||||
wait(backupAgent.run(db, pollDelay, CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT));
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_operation_cancelled)
|
||||
|
@ -1846,7 +1857,8 @@ ACTOR Future<Void> submitDBBackup(Database src,
|
|||
backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
|
||||
}
|
||||
|
||||
wait(backupAgent.submitBackup(dest, KeyRef(tagName), backupRanges, false, StringRef(), StringRef(), true));
|
||||
wait(backupAgent.submitBackup(
|
||||
dest, KeyRef(tagName), backupRanges, StopWhenDone::False, StringRef(), StringRef(), LockDB::True));
|
||||
|
||||
// Check if a backup agent is running
|
||||
bool agentRunning = wait(backupAgent.checkActive(dest));
|
||||
|
@ -1890,10 +1902,10 @@ ACTOR Future<Void> submitBackup(Database db,
|
|||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
std::string tagName,
|
||||
bool dryRun,
|
||||
bool waitForCompletion,
|
||||
bool stopWhenDone,
|
||||
bool usePartitionedLog,
|
||||
bool incrementalBackupOnly) {
|
||||
WaitForComplete waitForCompletion,
|
||||
StopWhenDone stopWhenDone,
|
||||
UsePartitionedLog usePartitionedLog,
|
||||
IncrementalBackupOnly incrementalBackupOnly) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
|
@ -1996,7 +2008,7 @@ ACTOR Future<Void> switchDBBackup(Database src,
|
|||
Database dest,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
std::string tagName,
|
||||
bool forceAction) {
|
||||
ForceAction forceAction) {
|
||||
try {
|
||||
state DatabaseBackupAgent backupAgent(src);
|
||||
|
||||
|
@ -2046,7 +2058,7 @@ ACTOR Future<Void> statusDBBackup(Database src, Database dest, std::string tagNa
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> statusBackup(Database db, std::string tagName, bool showErrors, bool json) {
|
||||
ACTOR Future<Void> statusBackup(Database db, std::string tagName, ShowErrors showErrors, bool json) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
|
@ -2063,11 +2075,15 @@ ACTOR Future<Void> statusBackup(Database db, std::string tagName, bool showError
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> abortDBBackup(Database src, Database dest, std::string tagName, bool partial, bool dstOnly) {
|
||||
ACTOR Future<Void> abortDBBackup(Database src,
|
||||
Database dest,
|
||||
std::string tagName,
|
||||
PartialBackup partial,
|
||||
DstOnly dstOnly) {
|
||||
try {
|
||||
state DatabaseBackupAgent backupAgent(src);
|
||||
|
||||
wait(backupAgent.abortBackup(dest, Key(tagName), partial, false, dstOnly));
|
||||
wait(backupAgent.abortBackup(dest, Key(tagName), partial, AbortOldBackup::False, dstOnly));
|
||||
wait(backupAgent.unlockBackup(dest, Key(tagName)));
|
||||
|
||||
printf("The DR on tag `%s' was successfully aborted.\n", printable(StringRef(tagName)).c_str());
|
||||
|
@ -2118,7 +2134,7 @@ ACTOR Future<Void> abortBackup(Database db, std::string tagName) {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> cleanupMutations(Database db, bool deleteData) {
|
||||
ACTOR Future<Void> cleanupMutations(Database db, DeleteData deleteData) {
|
||||
try {
|
||||
wait(cleanupBackup(db, deleteData));
|
||||
} catch (Error& e) {
|
||||
|
@ -2131,7 +2147,7 @@ ACTOR Future<Void> cleanupMutations(Database db, bool deleteData) {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitBackup(Database db, std::string tagName, bool stopWhenDone) {
|
||||
ACTOR Future<Void> waitBackup(Database db, std::string tagName, StopWhenDone stopWhenDone) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
|
@ -2150,7 +2166,7 @@ ACTOR Future<Void> waitBackup(Database db, std::string tagName, bool stopWhenDon
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> discontinueBackup(Database db, std::string tagName, bool waitForCompletion) {
|
||||
ACTOR Future<Void> discontinueBackup(Database db, std::string tagName, WaitForComplete waitForCompletion) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
|
@ -2220,7 +2236,9 @@ ACTOR Future<Void> changeDBBackupResumed(Database src, Database dest, bool pause
|
|||
return Void();
|
||||
}
|
||||
|
||||
Reference<IBackupContainer> openBackupContainer(const char* name, std::string destinationContainer) {
|
||||
Reference<IBackupContainer> openBackupContainer(const char* name,
|
||||
std::string destinationContainer,
|
||||
Optional<std::string> const& encryptionKeyFile = {}) {
|
||||
// Error, if no dest container was specified
|
||||
if (destinationContainer.empty()) {
|
||||
fprintf(stderr, "ERROR: No backup destination was specified.\n");
|
||||
|
@ -2230,7 +2248,7 @@ Reference<IBackupContainer> openBackupContainer(const char* name, std::string de
|
|||
|
||||
Reference<IBackupContainer> c;
|
||||
try {
|
||||
c = IBackupContainer::openContainer(destinationContainer);
|
||||
c = IBackupContainer::openContainer(destinationContainer, encryptionKeyFile);
|
||||
} catch (Error& e) {
|
||||
std::string msg = format("ERROR: '%s' on URL '%s'", e.what(), destinationContainer.c_str());
|
||||
if (e.code() == error_code_backup_invalid_url && !IBackupContainer::lastOpenError.empty()) {
|
||||
|
@ -2255,12 +2273,13 @@ ACTOR Future<Void> runRestore(Database db,
|
|||
Version targetVersion,
|
||||
std::string targetTimestamp,
|
||||
bool performRestore,
|
||||
bool verbose,
|
||||
bool waitForDone,
|
||||
Verbose verbose,
|
||||
WaitForComplete waitForDone,
|
||||
std::string addPrefix,
|
||||
std::string removePrefix,
|
||||
bool onlyAppyMutationLogs,
|
||||
bool inconsistentSnapshotOnly) {
|
||||
OnlyApplyMutationLogs onlyApplyMutationLogs,
|
||||
InconsistentSnapshotOnly inconsistentSnapshotOnly,
|
||||
Optional<std::string> encryptionKeyFile) {
|
||||
if (ranges.empty()) {
|
||||
ranges.push_back_deep(ranges.arena(), normalKeys);
|
||||
}
|
||||
|
@ -2296,7 +2315,8 @@ ACTOR Future<Void> runRestore(Database db,
|
|||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
state Reference<IBackupContainer> bc = openBackupContainer(exeRestore.toString().c_str(), container);
|
||||
state Reference<IBackupContainer> bc =
|
||||
openBackupContainer(exeRestore.toString().c_str(), container, encryptionKeyFile);
|
||||
|
||||
// If targetVersion is unset then use the maximum restorable version from the backup description
|
||||
if (targetVersion == invalidVersion) {
|
||||
|
@ -2306,7 +2326,7 @@ ACTOR Future<Void> runRestore(Database db,
|
|||
|
||||
BackupDescription desc = wait(bc->describeBackup());
|
||||
|
||||
if (onlyAppyMutationLogs && desc.contiguousLogEnd.present()) {
|
||||
if (onlyApplyMutationLogs && desc.contiguousLogEnd.present()) {
|
||||
targetVersion = desc.contiguousLogEnd.get() - 1;
|
||||
} else if (desc.maxRestorableVersion.present()) {
|
||||
targetVersion = desc.maxRestorableVersion.get();
|
||||
|
@ -2330,10 +2350,11 @@ ACTOR Future<Void> runRestore(Database db,
|
|||
verbose,
|
||||
KeyRef(addPrefix),
|
||||
KeyRef(removePrefix),
|
||||
true,
|
||||
onlyAppyMutationLogs,
|
||||
LockDB::True,
|
||||
onlyApplyMutationLogs,
|
||||
inconsistentSnapshotOnly,
|
||||
beginVersion));
|
||||
beginVersion,
|
||||
encryptionKeyFile));
|
||||
|
||||
if (waitForDone && verbose) {
|
||||
// If restore is now complete then report version restored
|
||||
|
@ -2369,8 +2390,8 @@ ACTOR Future<Void> runFastRestoreTool(Database db,
|
|||
Standalone<VectorRef<KeyRangeRef>> ranges,
|
||||
Version dbVersion,
|
||||
bool performRestore,
|
||||
bool verbose,
|
||||
bool waitForDone) {
|
||||
Verbose verbose,
|
||||
WaitForComplete waitForDone) {
|
||||
try {
|
||||
state FileBackupAgent backupAgent;
|
||||
state Version restoreVersion = invalidVersion;
|
||||
|
@ -2413,7 +2434,7 @@ ACTOR Future<Void> runFastRestoreTool(Database db,
|
|||
ranges,
|
||||
KeyRef(container),
|
||||
dbVersion,
|
||||
true,
|
||||
LockDB::True,
|
||||
randomUID,
|
||||
LiteralStringRef(""),
|
||||
LiteralStringRef("")));
|
||||
|
@ -2512,7 +2533,8 @@ ACTOR Future<Void> expireBackupData(const char* name,
|
|||
Database db,
|
||||
bool force,
|
||||
Version restorableAfterVersion,
|
||||
std::string restorableAfterDatetime) {
|
||||
std::string restorableAfterDatetime,
|
||||
Optional<std::string> encryptionKeyFile) {
|
||||
if (!endDatetime.empty()) {
|
||||
Version v = wait(timeKeeperVersionFromDatetime(endDatetime, db));
|
||||
endVersion = v;
|
||||
|
@ -2531,7 +2553,7 @@ ACTOR Future<Void> expireBackupData(const char* name,
|
|||
}
|
||||
|
||||
try {
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer, encryptionKeyFile);
|
||||
|
||||
state IBackupContainer::ExpireProgress progress;
|
||||
state std::string lastProgress;
|
||||
|
@ -2613,9 +2635,10 @@ ACTOR Future<Void> describeBackup(const char* name,
|
|||
std::string destinationContainer,
|
||||
bool deep,
|
||||
Optional<Database> cx,
|
||||
bool json) {
|
||||
bool json,
|
||||
Optional<std::string> encryptionKeyFile) {
|
||||
try {
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer, encryptionKeyFile);
|
||||
state BackupDescription desc = wait(c->describeBackup(deep));
|
||||
if (cx.present())
|
||||
wait(desc.resolveVersionTimes(cx.get()));
|
||||
|
@ -2645,7 +2668,7 @@ ACTOR Future<Void> queryBackup(const char* name,
|
|||
Version restoreVersion,
|
||||
std::string originalClusterFile,
|
||||
std::string restoreTimestamp,
|
||||
bool verbose) {
|
||||
Verbose verbose) {
|
||||
state UID operationId = deterministicRandom()->randomUniqueID();
|
||||
state JsonBuilderObject result;
|
||||
state std::string errorMessage;
|
||||
|
@ -2838,7 +2861,7 @@ ACTOR Future<Void> modifyBackup(Database db, std::string tagName, BackupModifyOp
|
|||
}
|
||||
|
||||
state BackupConfig config(uidFlag.get().first);
|
||||
EBackupState s = wait(config.stateEnum().getOrThrow(tr, false, backup_invalid_info()));
|
||||
EBackupState s = wait(config.stateEnum().getOrThrow(tr, Snapshot::False, backup_invalid_info()));
|
||||
if (!FileBackupAgent::isRunnable(s)) {
|
||||
fprintf(stderr, "Backup on tag '%s' is not runnable.\n", tagName.c_str());
|
||||
throw backup_error();
|
||||
|
@ -2858,7 +2881,7 @@ ACTOR Future<Void> modifyBackup(Database db, std::string tagName, BackupModifyOp
|
|||
}
|
||||
|
||||
if (options.activeSnapshotIntervalSeconds.present()) {
|
||||
Version begin = wait(config.snapshotBeginVersion().getOrThrow(tr, false, backup_error()));
|
||||
Version begin = wait(config.snapshotBeginVersion().getOrThrow(tr, Snapshot::False, backup_error()));
|
||||
config.snapshotTargetEndVersion().set(tr,
|
||||
begin + ((int64_t)options.activeSnapshotIntervalSeconds.get() *
|
||||
CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
|
||||
|
@ -3244,13 +3267,13 @@ int main(int argc, char* argv[]) {
|
|||
Version beginVersion = invalidVersion;
|
||||
Version restoreVersion = invalidVersion;
|
||||
std::string restoreTimestamp;
|
||||
bool waitForDone = false;
|
||||
bool stopWhenDone = true;
|
||||
bool usePartitionedLog = false; // Set to true to use new backup system
|
||||
bool incrementalBackupOnly = false;
|
||||
bool onlyAppyMutationLogs = false;
|
||||
bool inconsistentSnapshotOnly = false;
|
||||
bool forceAction = false;
|
||||
WaitForComplete waitForDone{ false };
|
||||
StopWhenDone stopWhenDone{ true };
|
||||
UsePartitionedLog usePartitionedLog{ false }; // Set to true to use new backup system
|
||||
IncrementalBackupOnly incrementalBackupOnly{ false };
|
||||
OnlyApplyMutationLogs onlyApplyMutationLogs{ false };
|
||||
InconsistentSnapshotOnly inconsistentSnapshotOnly{ false };
|
||||
ForceAction forceAction{ false };
|
||||
bool trace = false;
|
||||
bool quietDisplay = false;
|
||||
bool dryRun = false;
|
||||
|
@ -3260,8 +3283,8 @@ int main(int argc, char* argv[]) {
|
|||
uint64_t traceRollSize = TRACE_DEFAULT_ROLL_SIZE;
|
||||
uint64_t traceMaxLogsSize = TRACE_DEFAULT_MAX_LOGS_SIZE;
|
||||
ESOError lastError;
|
||||
bool partial = true;
|
||||
bool dstOnly = false;
|
||||
PartialBackup partial{ true };
|
||||
DstOnly dstOnly{ false };
|
||||
LocalityData localities;
|
||||
uint64_t memLimit = 8LL << 30;
|
||||
Optional<uint64_t> ti;
|
||||
|
@ -3271,7 +3294,8 @@ int main(int argc, char* argv[]) {
|
|||
std::string restoreClusterFileDest;
|
||||
std::string restoreClusterFileOrig;
|
||||
bool jsonOutput = false;
|
||||
bool deleteData = false;
|
||||
DeleteData deleteData{ false };
|
||||
Optional<std::string> encryptionKeyFile;
|
||||
|
||||
BackupModifyOptions modifyOptions;
|
||||
|
||||
|
@ -3355,13 +3379,13 @@ int main(int argc, char* argv[]) {
|
|||
dryRun = true;
|
||||
break;
|
||||
case OPT_DELETE_DATA:
|
||||
deleteData = true;
|
||||
deleteData.set(true);
|
||||
break;
|
||||
case OPT_MIN_CLEANUP_SECONDS:
|
||||
knobs.emplace_back("min_cleanup_seconds", args->OptionArg());
|
||||
break;
|
||||
case OPT_FORCE:
|
||||
forceAction = true;
|
||||
forceAction.set(true);
|
||||
break;
|
||||
case OPT_TRACE:
|
||||
trace = true;
|
||||
|
@ -3441,10 +3465,10 @@ int main(int argc, char* argv[]) {
|
|||
sourceClusterFile = args->OptionArg();
|
||||
break;
|
||||
case OPT_CLEANUP:
|
||||
partial = false;
|
||||
partial.set(false);
|
||||
break;
|
||||
case OPT_DSTONLY:
|
||||
dstOnly = true;
|
||||
dstOnly.set(true);
|
||||
break;
|
||||
case OPT_KNOB: {
|
||||
std::string syn = args->OptionSyntax();
|
||||
|
@ -3503,17 +3527,20 @@ int main(int argc, char* argv[]) {
|
|||
modifyOptions.verifyUID = args->OptionArg();
|
||||
break;
|
||||
case OPT_WAITFORDONE:
|
||||
waitForDone = true;
|
||||
waitForDone.set(true);
|
||||
break;
|
||||
case OPT_NOSTOPWHENDONE:
|
||||
stopWhenDone = false;
|
||||
stopWhenDone.set(false);
|
||||
break;
|
||||
case OPT_USE_PARTITIONED_LOG:
|
||||
usePartitionedLog = true;
|
||||
usePartitionedLog.set(true);
|
||||
break;
|
||||
case OPT_INCREMENTALONLY:
|
||||
incrementalBackupOnly = true;
|
||||
onlyAppyMutationLogs = true;
|
||||
incrementalBackupOnly.set(true);
|
||||
onlyApplyMutationLogs.set(true);
|
||||
break;
|
||||
case OPT_ENCRYPTION_KEY_FILE:
|
||||
encryptionKeyFile = args->OptionArg();
|
||||
break;
|
||||
case OPT_RESTORECONTAINER:
|
||||
restoreContainer = args->OptionArg();
|
||||
|
@ -3565,7 +3592,7 @@ int main(int argc, char* argv[]) {
|
|||
break;
|
||||
}
|
||||
case OPT_RESTORE_INCONSISTENT_SNAPSHOT_ONLY: {
|
||||
inconsistentSnapshotOnly = true;
|
||||
inconsistentSnapshotOnly.set(true);
|
||||
break;
|
||||
}
|
||||
#ifdef _WIN32
|
||||
|
@ -3704,7 +3731,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
}
|
||||
|
||||
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::NO, IsSimulated::NO);
|
||||
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
|
||||
auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection();
|
||||
for (const auto& [knobName, knobValueString] : knobs) {
|
||||
try {
|
||||
|
@ -3731,7 +3758,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
// Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs
|
||||
g_knobs.initialize(Randomize::NO, IsSimulated::NO);
|
||||
g_knobs.initialize(Randomize::False, IsSimulated::False);
|
||||
|
||||
if (trace) {
|
||||
if (!traceLogGroup.empty())
|
||||
|
@ -3769,7 +3796,7 @@ int main(int argc, char* argv[]) {
|
|||
Reference<IBackupContainer> c;
|
||||
|
||||
try {
|
||||
setupNetwork(0, true);
|
||||
setupNetwork(0, UseMetrics::True);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
return FDB_EXIT_ERROR;
|
||||
|
@ -3813,7 +3840,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
try {
|
||||
db = Database::createDatabase(ccf, -1, true, localities);
|
||||
db = Database::createDatabase(ccf, -1, IsInternal::True, localities);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str());
|
||||
|
@ -3833,7 +3860,7 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
try {
|
||||
sourceDb = Database::createDatabase(sourceCcf, -1, true, localities);
|
||||
sourceDb = Database::createDatabase(sourceCcf, -1, IsInternal::True, localities);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", sourceCcf->getFilename().c_str());
|
||||
|
@ -3853,7 +3880,7 @@ int main(int argc, char* argv[]) {
|
|||
if (!initCluster())
|
||||
return FDB_EXIT_ERROR;
|
||||
// Test out the backup url to make sure it parses. Doesn't test to make sure it's actually writeable.
|
||||
openBackupContainer(argv[0], destinationContainer);
|
||||
openBackupContainer(argv[0], destinationContainer, encryptionKeyFile);
|
||||
f = stopAfter(submitBackup(db,
|
||||
destinationContainer,
|
||||
initialSnapshotIntervalSeconds,
|
||||
|
@ -3879,7 +3906,7 @@ int main(int argc, char* argv[]) {
|
|||
case BackupType::STATUS:
|
||||
if (!initCluster())
|
||||
return FDB_EXIT_ERROR;
|
||||
f = stopAfter(statusBackup(db, tagName, true, jsonOutput));
|
||||
f = stopAfter(statusBackup(db, tagName, ShowErrors::True, jsonOutput));
|
||||
break;
|
||||
|
||||
case BackupType::ABORT:
|
||||
|
@ -3932,7 +3959,8 @@ int main(int argc, char* argv[]) {
|
|||
db,
|
||||
forceAction,
|
||||
expireRestorableAfterVersion,
|
||||
expireRestorableAfterDatetime));
|
||||
expireRestorableAfterDatetime,
|
||||
encryptionKeyFile));
|
||||
break;
|
||||
|
||||
case BackupType::DELETE_BACKUP:
|
||||
|
@ -3952,7 +3980,8 @@ int main(int argc, char* argv[]) {
|
|||
destinationContainer,
|
||||
describeDeep,
|
||||
describeTimestamps ? Optional<Database>(db) : Optional<Database>(),
|
||||
jsonOutput));
|
||||
jsonOutput,
|
||||
encryptionKeyFile));
|
||||
break;
|
||||
|
||||
case BackupType::LIST:
|
||||
|
@ -3968,7 +3997,7 @@ int main(int argc, char* argv[]) {
|
|||
restoreVersion,
|
||||
restoreClusterFileOrig,
|
||||
restoreTimestamp,
|
||||
!quietDisplay));
|
||||
Verbose{ !quietDisplay }));
|
||||
break;
|
||||
|
||||
case BackupType::DUMP:
|
||||
|
@ -4029,15 +4058,16 @@ int main(int argc, char* argv[]) {
|
|||
restoreVersion,
|
||||
restoreTimestamp,
|
||||
!dryRun,
|
||||
!quietDisplay,
|
||||
Verbose{ !quietDisplay },
|
||||
waitForDone,
|
||||
addPrefix,
|
||||
removePrefix,
|
||||
onlyAppyMutationLogs,
|
||||
inconsistentSnapshotOnly));
|
||||
onlyApplyMutationLogs,
|
||||
inconsistentSnapshotOnly,
|
||||
encryptionKeyFile));
|
||||
break;
|
||||
case RestoreType::WAIT:
|
||||
f = stopAfter(success(ba.waitRestore(db, KeyRef(tagName), true)));
|
||||
f = stopAfter(success(ba.waitRestore(db, KeyRef(tagName), Verbose::True)));
|
||||
break;
|
||||
case RestoreType::ABORT:
|
||||
f = stopAfter(
|
||||
|
@ -4097,8 +4127,14 @@ int main(int argc, char* argv[]) {
|
|||
// TODO: We have not implemented the code commented out in this case
|
||||
switch (restoreType) {
|
||||
case RestoreType::START:
|
||||
f = stopAfter(runFastRestoreTool(
|
||||
db, tagName, restoreContainer, backupKeys, restoreVersion, !dryRun, !quietDisplay, waitForDone));
|
||||
f = stopAfter(runFastRestoreTool(db,
|
||||
tagName,
|
||||
restoreContainer,
|
||||
backupKeys,
|
||||
restoreVersion,
|
||||
!dryRun,
|
||||
Verbose{ !quietDisplay },
|
||||
waitForDone));
|
||||
break;
|
||||
case RestoreType::WAIT:
|
||||
printf("[TODO][ERROR] FastRestore does not support RESTORE_WAIT yet!\n");
|
||||
|
|
|
@ -8,6 +8,7 @@ set(FDBCLI_SRCS
|
|||
ForceRecoveryWithDataLossCommand.actor.cpp
|
||||
MaintenanceCommand.actor.cpp
|
||||
SnapshotCommand.actor.cpp
|
||||
ThrottleCommand.actor.cpp
|
||||
Util.cpp
|
||||
linenoise/linenoise.h)
|
||||
|
||||
|
|
|
@ -0,0 +1,645 @@
|
|||
/*
|
||||
* ThrottleCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/TagThrottle.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
// Helper functions copied from TagThrottle.actor.cpp
|
||||
// The only difference is transactions are changed to go through MultiversionTransaction,
|
||||
// instead of the native Transaction(i.e., RYWTransaction)
|
||||
|
||||
ACTOR Future<bool> getValidAutoEnabled(Reference<ITransaction> tr) {
|
||||
state bool result;
|
||||
loop {
|
||||
Optional<Value> value = wait(safeThreadFutureToFuture(tr->get(tagThrottleAutoEnabledKey)));
|
||||
if (!value.present()) {
|
||||
tr->reset();
|
||||
wait(delay(CLIENT_KNOBS->DEFAULT_BACKOFF));
|
||||
continue;
|
||||
} else if (value.get() == LiteralStringRef("1")) {
|
||||
result = true;
|
||||
} else if (value.get() == LiteralStringRef("0")) {
|
||||
result = false;
|
||||
} else {
|
||||
TraceEvent(SevWarnAlways, "InvalidAutoTagThrottlingValue").detail("Value", value.get());
|
||||
tr->reset();
|
||||
wait(delay(CLIENT_KNOBS->DEFAULT_BACKOFF));
|
||||
continue;
|
||||
}
|
||||
return result;
|
||||
};
|
||||
}
|
||||
|
||||
ACTOR Future<std::vector<TagThrottleInfo>> getThrottledTags(Reference<IDatabase> db,
|
||||
int limit,
|
||||
bool containsRecommend = false) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
state bool reportAuto = containsRecommend;
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
try {
|
||||
if (!containsRecommend) {
|
||||
wait(store(reportAuto, getValidAutoEnabled(tr)));
|
||||
}
|
||||
state ThreadFuture<RangeResult> f = tr->getRange(
|
||||
reportAuto ? tagThrottleKeys : KeyRangeRef(tagThrottleKeysPrefix, tagThrottleAutoKeysPrefix), limit);
|
||||
RangeResult throttles = wait(safeThreadFutureToFuture(f));
|
||||
std::vector<TagThrottleInfo> results;
|
||||
for (auto throttle : throttles) {
|
||||
results.push_back(TagThrottleInfo(TagThrottleKey::fromKey(throttle.key),
|
||||
TagThrottleValue::fromValue(throttle.value)));
|
||||
}
|
||||
return results;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<std::vector<TagThrottleInfo>> getRecommendedTags(Reference<IDatabase> db, int limit) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
try {
|
||||
bool enableAuto = wait(getValidAutoEnabled(tr));
|
||||
if (enableAuto) {
|
||||
return std::vector<TagThrottleInfo>();
|
||||
}
|
||||
state ThreadFuture<RangeResult> f =
|
||||
tr->getRange(KeyRangeRef(tagThrottleAutoKeysPrefix, tagThrottleKeys.end), limit);
|
||||
RangeResult throttles = wait(safeThreadFutureToFuture(f));
|
||||
std::vector<TagThrottleInfo> results;
|
||||
for (auto throttle : throttles) {
|
||||
results.push_back(TagThrottleInfo(TagThrottleKey::fromKey(throttle.key),
|
||||
TagThrottleValue::fromValue(throttle.value)));
|
||||
}
|
||||
return results;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> updateThrottleCount(Reference<ITransaction> tr, int64_t delta) {
|
||||
state ThreadFuture<Optional<Value>> countVal = tr->get(tagThrottleCountKey);
|
||||
state ThreadFuture<Optional<Value>> limitVal = tr->get(tagThrottleLimitKey);
|
||||
|
||||
wait(success(safeThreadFutureToFuture(countVal)) && success(safeThreadFutureToFuture(limitVal)));
|
||||
|
||||
int64_t count = 0;
|
||||
int64_t limit = 0;
|
||||
|
||||
if (countVal.get().present()) {
|
||||
BinaryReader reader(countVal.get().get(), Unversioned());
|
||||
reader >> count;
|
||||
}
|
||||
|
||||
if (limitVal.get().present()) {
|
||||
BinaryReader reader(limitVal.get().get(), Unversioned());
|
||||
reader >> limit;
|
||||
}
|
||||
|
||||
count += delta;
|
||||
|
||||
if (count > limit) {
|
||||
throw too_many_tag_throttles();
|
||||
}
|
||||
|
||||
BinaryWriter writer(Unversioned());
|
||||
writer << count;
|
||||
|
||||
tr->set(tagThrottleCountKey, writer.toValue());
|
||||
return Void();
|
||||
}
|
||||
|
||||
void signalThrottleChange(Reference<ITransaction> tr) {
|
||||
tr->atomicOp(
|
||||
tagThrottleSignalKey, LiteralStringRef("XXXXXXXXXX\x00\x00\x00\x00"), MutationRef::SetVersionstampedValue);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> throttleTags(Reference<IDatabase> db,
|
||||
TagSet tags,
|
||||
double tpsRate,
|
||||
double initialDuration,
|
||||
TagThrottleType throttleType,
|
||||
TransactionPriority priority,
|
||||
Optional<double> expirationTime = Optional<double>(),
|
||||
Optional<TagThrottledReason> reason = Optional<TagThrottledReason>()) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
state Key key = TagThrottleKey(tags, throttleType, priority).toKey();
|
||||
|
||||
ASSERT(initialDuration > 0);
|
||||
|
||||
if (throttleType == TagThrottleType::MANUAL) {
|
||||
reason = TagThrottledReason::MANUAL;
|
||||
}
|
||||
TagThrottleValue throttle(tpsRate,
|
||||
expirationTime.present() ? expirationTime.get() : 0,
|
||||
initialDuration,
|
||||
reason.present() ? reason.get() : TagThrottledReason::UNSET);
|
||||
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
|
||||
wr << throttle;
|
||||
state Value value = wr.toValue();
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
if (throttleType == TagThrottleType::MANUAL) {
|
||||
Optional<Value> oldThrottle = wait(safeThreadFutureToFuture(tr->get(key)));
|
||||
if (!oldThrottle.present()) {
|
||||
wait(updateThrottleCount(tr, 1));
|
||||
}
|
||||
}
|
||||
|
||||
tr->set(key, value);
|
||||
|
||||
if (throttleType == TagThrottleType::MANUAL) {
|
||||
signalThrottleChange(tr);
|
||||
}
|
||||
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<bool> unthrottleTags(Reference<IDatabase> db,
|
||||
TagSet tags,
|
||||
Optional<TagThrottleType> throttleType,
|
||||
Optional<TransactionPriority> priority) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
|
||||
state std::vector<Key> keys;
|
||||
for (auto p : allTransactionPriorities) {
|
||||
if (!priority.present() || priority.get() == p) {
|
||||
if (!throttleType.present() || throttleType.get() == TagThrottleType::AUTO) {
|
||||
keys.push_back(TagThrottleKey(tags, TagThrottleType::AUTO, p).toKey());
|
||||
}
|
||||
if (!throttleType.present() || throttleType.get() == TagThrottleType::MANUAL) {
|
||||
keys.push_back(TagThrottleKey(tags, TagThrottleType::MANUAL, p).toKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state bool removed = false;
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
state std::vector<Future<Optional<Value>>> values;
|
||||
values.reserve(keys.size());
|
||||
for (auto key : keys) {
|
||||
values.push_back(safeThreadFutureToFuture(tr->get(key)));
|
||||
}
|
||||
|
||||
wait(waitForAll(values));
|
||||
|
||||
int delta = 0;
|
||||
for (int i = 0; i < values.size(); ++i) {
|
||||
if (values[i].get().present()) {
|
||||
if (TagThrottleKey::fromKey(keys[i]).throttleType == TagThrottleType::MANUAL) {
|
||||
delta -= 1;
|
||||
}
|
||||
|
||||
tr->clear(keys[i]);
|
||||
|
||||
// Report that we are removing this tag if we ever see it present.
|
||||
// This protects us from getting confused if the transaction is maybe committed.
|
||||
// It's ok if someone else actually ends up removing this tag at the same time
|
||||
// and we aren't the ones to actually do it.
|
||||
removed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (delta != 0) {
|
||||
wait(updateThrottleCount(tr, delta));
|
||||
}
|
||||
if (removed) {
|
||||
signalThrottleChange(tr);
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
}
|
||||
|
||||
return removed;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> enableAuto(Reference<IDatabase> db, bool enabled) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
Optional<Value> value = wait(safeThreadFutureToFuture(tr->get(tagThrottleAutoEnabledKey)));
|
||||
if (!value.present() || (enabled && value.get() != LiteralStringRef("1")) ||
|
||||
(!enabled && value.get() != LiteralStringRef("0"))) {
|
||||
tr->set(tagThrottleAutoEnabledKey, LiteralStringRef(enabled ? "1" : "0"));
|
||||
signalThrottleChange(tr);
|
||||
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
}
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<bool> unthrottleMatchingThrottles(Reference<IDatabase> db,
|
||||
KeyRef beginKey,
|
||||
KeyRef endKey,
|
||||
Optional<TransactionPriority> priority,
|
||||
bool onlyExpiredThrottles) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
|
||||
state KeySelector begin = firstGreaterOrEqual(beginKey);
|
||||
state KeySelector end = firstGreaterOrEqual(endKey);
|
||||
|
||||
state bool removed = false;
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
// holds memory of the RangeResult
|
||||
state ThreadFuture<RangeResult> f = tr->getRange(begin, end, 1000);
|
||||
state RangeResult tags = wait(safeThreadFutureToFuture(f));
|
||||
state uint64_t unthrottledTags = 0;
|
||||
uint64_t manualUnthrottledTags = 0;
|
||||
for (auto tag : tags) {
|
||||
if (onlyExpiredThrottles) {
|
||||
double expirationTime = TagThrottleValue::fromValue(tag.value).expirationTime;
|
||||
if (expirationTime == 0 || expirationTime > now()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
TagThrottleKey key = TagThrottleKey::fromKey(tag.key);
|
||||
if (priority.present() && key.priority != priority.get()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (key.throttleType == TagThrottleType::MANUAL) {
|
||||
++manualUnthrottledTags;
|
||||
}
|
||||
|
||||
removed = true;
|
||||
tr->clear(tag.key);
|
||||
unthrottledTags++;
|
||||
}
|
||||
|
||||
if (manualUnthrottledTags > 0) {
|
||||
wait(updateThrottleCount(tr, -manualUnthrottledTags));
|
||||
}
|
||||
|
||||
if (unthrottledTags > 0) {
|
||||
signalThrottleChange(tr);
|
||||
}
|
||||
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
|
||||
if (!tags.more) {
|
||||
return removed;
|
||||
}
|
||||
|
||||
ASSERT(tags.size() > 0);
|
||||
begin = KeySelector(firstGreaterThan(tags[tags.size() - 1].key), tags.arena());
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Future<bool> unthrottleAll(Reference<IDatabase> db,
|
||||
Optional<TagThrottleType> tagThrottleType,
|
||||
Optional<TransactionPriority> priority) {
|
||||
KeyRef begin = tagThrottleKeys.begin;
|
||||
KeyRef end = tagThrottleKeys.end;
|
||||
|
||||
if (tagThrottleType.present() && tagThrottleType == TagThrottleType::AUTO) {
|
||||
begin = tagThrottleAutoKeysPrefix;
|
||||
} else if (tagThrottleType.present() && tagThrottleType == TagThrottleType::MANUAL) {
|
||||
end = tagThrottleAutoKeysPrefix;
|
||||
}
|
||||
|
||||
return unthrottleMatchingThrottles(db, begin, end, priority, false);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||
|
||||
if (tokens.size() == 1) {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
} else if (tokencmp(tokens[1], "list")) {
|
||||
if (tokens.size() > 4) {
|
||||
printf("Usage: throttle list [throttled|recommended|all] [LIMIT]\n");
|
||||
printf("\n");
|
||||
printf("Lists tags that are currently throttled.\n");
|
||||
printf("The default LIMIT is 100 tags.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
state bool reportThrottled = true;
|
||||
state bool reportRecommended = false;
|
||||
if (tokens.size() >= 3) {
|
||||
if (tokencmp(tokens[2], "recommended")) {
|
||||
reportThrottled = false;
|
||||
reportRecommended = true;
|
||||
} else if (tokencmp(tokens[2], "all")) {
|
||||
reportThrottled = true;
|
||||
reportRecommended = true;
|
||||
} else if (!tokencmp(tokens[2], "throttled")) {
|
||||
printf("ERROR: failed to parse `%s'.\n", printable(tokens[2]).c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
state int throttleListLimit = 100;
|
||||
if (tokens.size() >= 4) {
|
||||
char* end;
|
||||
throttleListLimit = std::strtol((const char*)tokens[3].begin(), &end, 10);
|
||||
if ((tokens.size() > 4 && !std::isspace(*end)) || (tokens.size() == 4 && *end != '\0')) {
|
||||
fprintf(stderr, "ERROR: failed to parse limit `%s'.\n", printable(tokens[3]).c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
state std::vector<TagThrottleInfo> tags;
|
||||
if (reportThrottled && reportRecommended) {
|
||||
wait(store(tags, getThrottledTags(db, throttleListLimit, true)));
|
||||
} else if (reportThrottled) {
|
||||
wait(store(tags, getThrottledTags(db, throttleListLimit)));
|
||||
} else if (reportRecommended) {
|
||||
wait(store(tags, getRecommendedTags(db, throttleListLimit)));
|
||||
}
|
||||
|
||||
bool anyLogged = false;
|
||||
for (auto itr = tags.begin(); itr != tags.end(); ++itr) {
|
||||
if (itr->expirationTime > now()) {
|
||||
if (!anyLogged) {
|
||||
printf("Throttled tags:\n\n");
|
||||
printf(" Rate (txn/s) | Expiration (s) | Priority | Type | Reason |Tag\n");
|
||||
printf(" --------------+----------------+-----------+--------+------------+------\n");
|
||||
|
||||
anyLogged = true;
|
||||
}
|
||||
|
||||
std::string reasonStr = "unset";
|
||||
if (itr->reason == TagThrottledReason::MANUAL) {
|
||||
reasonStr = "manual";
|
||||
} else if (itr->reason == TagThrottledReason::BUSY_WRITE) {
|
||||
reasonStr = "busy write";
|
||||
} else if (itr->reason == TagThrottledReason::BUSY_READ) {
|
||||
reasonStr = "busy read";
|
||||
}
|
||||
|
||||
printf(" %12d | %13ds | %9s | %6s | %10s |%s\n",
|
||||
(int)(itr->tpsRate),
|
||||
std::min((int)(itr->expirationTime - now()), (int)(itr->initialDuration)),
|
||||
transactionPriorityToString(itr->priority, false),
|
||||
itr->throttleType == TagThrottleType::AUTO ? "auto" : "manual",
|
||||
reasonStr.c_str(),
|
||||
itr->tag.toString().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (tags.size() == throttleListLimit) {
|
||||
printf("\nThe tag limit `%d' was reached. Use the [LIMIT] argument to view additional tags.\n",
|
||||
throttleListLimit);
|
||||
printf("Usage: throttle list [LIMIT]\n");
|
||||
}
|
||||
if (!anyLogged) {
|
||||
printf("There are no %s tags\n", reportThrottled ? "throttled" : "recommended");
|
||||
}
|
||||
} else if (tokencmp(tokens[1], "on")) {
|
||||
if (tokens.size() < 4 || !tokencmp(tokens[2], "tag") || tokens.size() > 7) {
|
||||
printf("Usage: throttle on tag <TAG> [RATE] [DURATION] [PRIORITY]\n");
|
||||
printf("\n");
|
||||
printf("Enables throttling for transactions with the specified tag.\n");
|
||||
printf("An optional transactions per second rate can be specified (default 0).\n");
|
||||
printf("An optional duration can be specified, which must include a time suffix (s, m, h, "
|
||||
"d) (default 1h).\n");
|
||||
printf("An optional priority can be specified. Choices are `default', `immediate', and "
|
||||
"`batch' (default `default').\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
double tpsRate = 0.0;
|
||||
uint64_t duration = 3600;
|
||||
TransactionPriority priority = TransactionPriority::DEFAULT;
|
||||
|
||||
if (tokens.size() >= 5) {
|
||||
char* end;
|
||||
tpsRate = std::strtod((const char*)tokens[4].begin(), &end);
|
||||
if ((tokens.size() > 5 && !std::isspace(*end)) || (tokens.size() == 5 && *end != '\0')) {
|
||||
fprintf(stderr, "ERROR: failed to parse rate `%s'.\n", printable(tokens[4]).c_str());
|
||||
return false;
|
||||
}
|
||||
if (tpsRate < 0) {
|
||||
fprintf(stderr, "ERROR: rate cannot be negative `%f'\n", tpsRate);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (tokens.size() == 6) {
|
||||
Optional<uint64_t> parsedDuration = parseDuration(tokens[5].toString());
|
||||
if (!parsedDuration.present()) {
|
||||
fprintf(stderr, "ERROR: failed to parse duration `%s'.\n", printable(tokens[5]).c_str());
|
||||
return false;
|
||||
}
|
||||
duration = parsedDuration.get();
|
||||
|
||||
if (duration == 0) {
|
||||
fprintf(stderr, "ERROR: throttle duration cannot be 0\n");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
if (tokens.size() == 7) {
|
||||
if (tokens[6] == LiteralStringRef("default")) {
|
||||
priority = TransactionPriority::DEFAULT;
|
||||
} else if (tokens[6] == LiteralStringRef("immediate")) {
|
||||
priority = TransactionPriority::IMMEDIATE;
|
||||
} else if (tokens[6] == LiteralStringRef("batch")) {
|
||||
priority = TransactionPriority::BATCH;
|
||||
} else {
|
||||
fprintf(stderr,
|
||||
"ERROR: unrecognized priority `%s'. Must be one of `default',\n `immediate', "
|
||||
"or `batch'.\n",
|
||||
tokens[6].toString().c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
TagSet tags;
|
||||
tags.addTag(tokens[3]);
|
||||
|
||||
wait(throttleTags(db, tags, tpsRate, duration, TagThrottleType::MANUAL, priority));
|
||||
printf("Tag `%s' has been throttled\n", tokens[3].toString().c_str());
|
||||
} else if (tokencmp(tokens[1], "off")) {
|
||||
int nextIndex = 2;
|
||||
TagSet tags;
|
||||
bool throttleTypeSpecified = false;
|
||||
bool is_error = false;
|
||||
Optional<TagThrottleType> throttleType = TagThrottleType::MANUAL;
|
||||
Optional<TransactionPriority> priority;
|
||||
|
||||
if (tokens.size() == 2) {
|
||||
is_error = true;
|
||||
}
|
||||
|
||||
while (nextIndex < tokens.size() && !is_error) {
|
||||
if (tokencmp(tokens[nextIndex], "all")) {
|
||||
if (throttleTypeSpecified) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
throttleTypeSpecified = true;
|
||||
throttleType = Optional<TagThrottleType>();
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "auto")) {
|
||||
if (throttleTypeSpecified) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
throttleTypeSpecified = true;
|
||||
throttleType = TagThrottleType::AUTO;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "manual")) {
|
||||
if (throttleTypeSpecified) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
throttleTypeSpecified = true;
|
||||
throttleType = TagThrottleType::MANUAL;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "default")) {
|
||||
if (priority.present()) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
priority = TransactionPriority::DEFAULT;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "immediate")) {
|
||||
if (priority.present()) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
priority = TransactionPriority::IMMEDIATE;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "batch")) {
|
||||
if (priority.present()) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
priority = TransactionPriority::BATCH;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "tag")) {
|
||||
if (tags.size() > 0 || nextIndex == tokens.size() - 1) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
tags.addTag(tokens[nextIndex + 1]);
|
||||
nextIndex += 2;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_error) {
|
||||
state const char* throttleTypeString =
|
||||
!throttleType.present() ? "" : (throttleType.get() == TagThrottleType::AUTO ? "auto-" : "manually ");
|
||||
state std::string priorityString =
|
||||
priority.present() ? format(" at %s priority", transactionPriorityToString(priority.get(), false)) : "";
|
||||
|
||||
if (tags.size() > 0) {
|
||||
bool success = wait(unthrottleTags(db, tags, throttleType, priority));
|
||||
if (success) {
|
||||
printf("Unthrottled tag `%s'%s\n", tokens[3].toString().c_str(), priorityString.c_str());
|
||||
} else {
|
||||
printf("Tag `%s' was not %sthrottled%s\n",
|
||||
tokens[3].toString().c_str(),
|
||||
throttleTypeString,
|
||||
priorityString.c_str());
|
||||
}
|
||||
} else {
|
||||
bool unthrottled = wait(unthrottleAll(db, throttleType, priority));
|
||||
if (unthrottled) {
|
||||
printf("Unthrottled all %sthrottled tags%s\n", throttleTypeString, priorityString.c_str());
|
||||
} else {
|
||||
printf("There were no tags being %sthrottled%s\n", throttleTypeString, priorityString.c_str());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
printf("Usage: throttle off [all|auto|manual] [tag <TAG>] [PRIORITY]\n");
|
||||
printf("\n");
|
||||
printf("Disables throttling for throttles matching the specified filters. At least one "
|
||||
"filter must be used.\n\n");
|
||||
printf("An optional qualifier `all', `auto', or `manual' can be used to specify the type "
|
||||
"of throttle\n");
|
||||
printf("affected. `all' targets all throttles, `auto' targets those created by the "
|
||||
"cluster, and\n");
|
||||
printf("`manual' targets those created manually (default `manual').\n\n");
|
||||
printf("The `tag' filter can be use to turn off only a specific tag.\n\n");
|
||||
printf("The priority filter can be used to turn off only throttles at specific priorities. "
|
||||
"Choices are\n");
|
||||
printf("`default', `immediate', or `batch'. By default, all priorities are targeted.\n");
|
||||
}
|
||||
} else if (tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable")) {
|
||||
if (tokens.size() != 3 || !tokencmp(tokens[2], "auto")) {
|
||||
printf("Usage: throttle <enable|disable> auto\n");
|
||||
printf("\n");
|
||||
printf("Enables or disable automatic tag throttling.\n");
|
||||
return false;
|
||||
}
|
||||
state bool autoTagThrottlingEnabled = tokencmp(tokens[1], "enable");
|
||||
wait(enableAuto(db, autoTagThrottlingEnabled));
|
||||
printf("Automatic tag throttling has been %s\n", autoTagThrottlingEnabled ? "enabled" : "disabled");
|
||||
} else {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
CommandFactory throttleFactory(
|
||||
"throttle",
|
||||
CommandHelp("throttle <on|off|enable auto|disable auto|list> [ARGS]",
|
||||
"view and control throttled tags",
|
||||
"Use `on' and `off' to manually throttle or unthrottle tags. Use `enable auto' or `disable auto' "
|
||||
"to enable or disable automatic tag throttling. Use `list' to print the list of throttled tags.\n"));
|
||||
} // namespace fdb_cli
|
|
@ -648,11 +648,6 @@ void initHelp() {
|
|||
"namespace for all the profiling-related commands.",
|
||||
"Different types support different actions. Run `profile` to get a list of "
|
||||
"types, and iteratively explore the help.\n");
|
||||
helpMap["throttle"] =
|
||||
CommandHelp("throttle <on|off|enable auto|disable auto|list> [ARGS]",
|
||||
"view and control throttled tags",
|
||||
"Use `on' and `off' to manually throttle or unthrottle tags. Use `enable auto' or `disable auto' "
|
||||
"to enable or disable automatic tag throttling. Use `list' to print the list of throttled tags.\n");
|
||||
helpMap["cache_range"] = CommandHelp(
|
||||
"cache_range <set|clear> <BEGINKEY> <ENDKEY>",
|
||||
"Mark a key range to add to or remove from storage caches.",
|
||||
|
@ -3151,7 +3146,7 @@ struct CLIOptions {
|
|||
}
|
||||
|
||||
// Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs
|
||||
g_knobs.initialize(Randomize::NO, IsSimulated::NO);
|
||||
g_knobs.initialize(Randomize::False, IsSimulated::False);
|
||||
}
|
||||
|
||||
int processArg(CSimpleOpt& args) {
|
||||
|
@ -3322,7 +3317,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
TraceEvent::setNetworkThread();
|
||||
|
||||
try {
|
||||
db = Database::createDatabase(ccf, -1, false);
|
||||
db = Database::createDatabase(ccf, -1, IsInternal::False);
|
||||
if (!opt.exec.present()) {
|
||||
printf("Using cluster file `%s'.\n", ccf->getFilename().c_str());
|
||||
}
|
||||
|
@ -3960,6 +3955,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
wait(makeInterruptable(GlobalConfig::globalConfig().onInitialized()));
|
||||
if (tokencmp(tokens[2], "get")) {
|
||||
if (tokens.size() != 3) {
|
||||
fprintf(stderr, "ERROR: Addtional arguments to `get` are not supported.\n");
|
||||
|
@ -4494,300 +4490,12 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
}
|
||||
|
||||
if (tokencmp(tokens[0], "throttle")) {
|
||||
if (tokens.size() == 1) {
|
||||
printUsage(tokens[0]);
|
||||
bool _result = wait(throttleCommandActor(db2, tokens));
|
||||
if (!_result)
|
||||
is_error = true;
|
||||
continue;
|
||||
} else if (tokencmp(tokens[1], "list")) {
|
||||
if (tokens.size() > 4) {
|
||||
printf("Usage: throttle list [throttled|recommended|all] [LIMIT]\n");
|
||||
printf("\n");
|
||||
printf("Lists tags that are currently throttled.\n");
|
||||
printf("The default LIMIT is 100 tags.\n");
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
state bool reportThrottled = true;
|
||||
state bool reportRecommended = false;
|
||||
if (tokens.size() >= 3) {
|
||||
if (tokencmp(tokens[2], "recommended")) {
|
||||
reportThrottled = false;
|
||||
reportRecommended = true;
|
||||
} else if (tokencmp(tokens[2], "all")) {
|
||||
reportThrottled = true;
|
||||
reportRecommended = true;
|
||||
} else if (!tokencmp(tokens[2], "throttled")) {
|
||||
printf("ERROR: failed to parse `%s'.\n", printable(tokens[2]).c_str());
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
state int throttleListLimit = 100;
|
||||
if (tokens.size() >= 4) {
|
||||
char* end;
|
||||
throttleListLimit = std::strtol((const char*)tokens[3].begin(), &end, 10);
|
||||
if ((tokens.size() > 4 && !std::isspace(*end)) || (tokens.size() == 4 && *end != '\0')) {
|
||||
fprintf(stderr, "ERROR: failed to parse limit `%s'.\n", printable(tokens[3]).c_str());
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
state std::vector<TagThrottleInfo> tags;
|
||||
if (reportThrottled && reportRecommended) {
|
||||
wait(store(tags, ThrottleApi::getThrottledTags(db, throttleListLimit, true)));
|
||||
} else if (reportThrottled) {
|
||||
wait(store(tags, ThrottleApi::getThrottledTags(db, throttleListLimit)));
|
||||
} else if (reportRecommended) {
|
||||
wait(store(tags, ThrottleApi::getRecommendedTags(db, throttleListLimit)));
|
||||
}
|
||||
|
||||
bool anyLogged = false;
|
||||
for (auto itr = tags.begin(); itr != tags.end(); ++itr) {
|
||||
if (itr->expirationTime > now()) {
|
||||
if (!anyLogged) {
|
||||
printf("Throttled tags:\n\n");
|
||||
printf(" Rate (txn/s) | Expiration (s) | Priority | Type | Reason |Tag\n");
|
||||
printf(
|
||||
" --------------+----------------+-----------+--------+------------+------\n");
|
||||
|
||||
anyLogged = true;
|
||||
}
|
||||
|
||||
std::string reasonStr = "unset";
|
||||
if (itr->reason == TagThrottledReason::MANUAL) {
|
||||
reasonStr = "manual";
|
||||
} else if (itr->reason == TagThrottledReason::BUSY_WRITE) {
|
||||
reasonStr = "busy write";
|
||||
} else if (itr->reason == TagThrottledReason::BUSY_READ) {
|
||||
reasonStr = "busy read";
|
||||
}
|
||||
|
||||
printf(" %12d | %13ds | %9s | %6s | %10s |%s\n",
|
||||
(int)(itr->tpsRate),
|
||||
std::min((int)(itr->expirationTime - now()), (int)(itr->initialDuration)),
|
||||
transactionPriorityToString(itr->priority, false),
|
||||
itr->throttleType == TagThrottleType::AUTO ? "auto" : "manual",
|
||||
reasonStr.c_str(),
|
||||
itr->tag.toString().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (tags.size() == throttleListLimit) {
|
||||
printf(
|
||||
"\nThe tag limit `%d' was reached. Use the [LIMIT] argument to view additional tags.\n",
|
||||
throttleListLimit);
|
||||
printf("Usage: throttle list [LIMIT]\n");
|
||||
}
|
||||
if (!anyLogged) {
|
||||
printf("There are no %s tags\n", reportThrottled ? "throttled" : "recommended");
|
||||
}
|
||||
} else if (tokencmp(tokens[1], "on")) {
|
||||
if (tokens.size() < 4 || !tokencmp(tokens[2], "tag") || tokens.size() > 7) {
|
||||
printf("Usage: throttle on tag <TAG> [RATE] [DURATION] [PRIORITY]\n");
|
||||
printf("\n");
|
||||
printf("Enables throttling for transactions with the specified tag.\n");
|
||||
printf("An optional transactions per second rate can be specified (default 0).\n");
|
||||
printf("An optional duration can be specified, which must include a time suffix (s, m, h, "
|
||||
"d) (default 1h).\n");
|
||||
printf("An optional priority can be specified. Choices are `default', `immediate', and "
|
||||
"`batch' (default `default').\n");
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
double tpsRate = 0.0;
|
||||
uint64_t duration = 3600;
|
||||
TransactionPriority priority = TransactionPriority::DEFAULT;
|
||||
|
||||
if (tokens.size() >= 5) {
|
||||
char* end;
|
||||
tpsRate = std::strtod((const char*)tokens[4].begin(), &end);
|
||||
if ((tokens.size() > 5 && !std::isspace(*end)) || (tokens.size() == 5 && *end != '\0')) {
|
||||
fprintf(stderr, "ERROR: failed to parse rate `%s'.\n", printable(tokens[4]).c_str());
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
if (tpsRate < 0) {
|
||||
fprintf(stderr, "ERROR: rate cannot be negative `%f'\n", tpsRate);
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (tokens.size() == 6) {
|
||||
Optional<uint64_t> parsedDuration = parseDuration(tokens[5].toString());
|
||||
if (!parsedDuration.present()) {
|
||||
fprintf(
|
||||
stderr, "ERROR: failed to parse duration `%s'.\n", printable(tokens[5]).c_str());
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
duration = parsedDuration.get();
|
||||
|
||||
if (duration == 0) {
|
||||
fprintf(stderr, "ERROR: throttle duration cannot be 0\n");
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (tokens.size() == 7) {
|
||||
if (tokens[6] == LiteralStringRef("default")) {
|
||||
priority = TransactionPriority::DEFAULT;
|
||||
} else if (tokens[6] == LiteralStringRef("immediate")) {
|
||||
priority = TransactionPriority::IMMEDIATE;
|
||||
} else if (tokens[6] == LiteralStringRef("batch")) {
|
||||
priority = TransactionPriority::BATCH;
|
||||
} else {
|
||||
fprintf(stderr,
|
||||
"ERROR: unrecognized priority `%s'. Must be one of `default',\n `immediate', "
|
||||
"or `batch'.\n",
|
||||
tokens[6].toString().c_str());
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
TagSet tags;
|
||||
tags.addTag(tokens[3]);
|
||||
|
||||
wait(ThrottleApi::throttleTags(db, tags, tpsRate, duration, TagThrottleType::MANUAL, priority));
|
||||
printf("Tag `%s' has been throttled\n", tokens[3].toString().c_str());
|
||||
} else if (tokencmp(tokens[1], "off")) {
|
||||
int nextIndex = 2;
|
||||
TagSet tags;
|
||||
bool throttleTypeSpecified = false;
|
||||
Optional<TagThrottleType> throttleType = TagThrottleType::MANUAL;
|
||||
Optional<TransactionPriority> priority;
|
||||
|
||||
if (tokens.size() == 2) {
|
||||
is_error = true;
|
||||
}
|
||||
|
||||
while (nextIndex < tokens.size() && !is_error) {
|
||||
if (tokencmp(tokens[nextIndex], "all")) {
|
||||
if (throttleTypeSpecified) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
throttleTypeSpecified = true;
|
||||
throttleType = Optional<TagThrottleType>();
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "auto")) {
|
||||
if (throttleTypeSpecified) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
throttleTypeSpecified = true;
|
||||
throttleType = TagThrottleType::AUTO;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "manual")) {
|
||||
if (throttleTypeSpecified) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
throttleTypeSpecified = true;
|
||||
throttleType = TagThrottleType::MANUAL;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "default")) {
|
||||
if (priority.present()) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
priority = TransactionPriority::DEFAULT;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "immediate")) {
|
||||
if (priority.present()) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
priority = TransactionPriority::IMMEDIATE;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "batch")) {
|
||||
if (priority.present()) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
priority = TransactionPriority::BATCH;
|
||||
++nextIndex;
|
||||
} else if (tokencmp(tokens[nextIndex], "tag")) {
|
||||
if (tags.size() > 0 || nextIndex == tokens.size() - 1) {
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
tags.addTag(tokens[nextIndex + 1]);
|
||||
nextIndex += 2;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_error) {
|
||||
state const char* throttleTypeString =
|
||||
!throttleType.present()
|
||||
? ""
|
||||
: (throttleType.get() == TagThrottleType::AUTO ? "auto-" : "manually ");
|
||||
state std::string priorityString =
|
||||
priority.present()
|
||||
? format(" at %s priority", transactionPriorityToString(priority.get(), false))
|
||||
: "";
|
||||
|
||||
if (tags.size() > 0) {
|
||||
bool success = wait(ThrottleApi::unthrottleTags(db, tags, throttleType, priority));
|
||||
if (success) {
|
||||
printf("Unthrottled tag `%s'%s\n",
|
||||
tokens[3].toString().c_str(),
|
||||
priorityString.c_str());
|
||||
} else {
|
||||
printf("Tag `%s' was not %sthrottled%s\n",
|
||||
tokens[3].toString().c_str(),
|
||||
throttleTypeString,
|
||||
priorityString.c_str());
|
||||
}
|
||||
} else {
|
||||
bool unthrottled = wait(ThrottleApi::unthrottleAll(db, throttleType, priority));
|
||||
if (unthrottled) {
|
||||
printf("Unthrottled all %sthrottled tags%s\n",
|
||||
throttleTypeString,
|
||||
priorityString.c_str());
|
||||
} else {
|
||||
printf("There were no tags being %sthrottled%s\n",
|
||||
throttleTypeString,
|
||||
priorityString.c_str());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
printf("Usage: throttle off [all|auto|manual] [tag <TAG>] [PRIORITY]\n");
|
||||
printf("\n");
|
||||
printf("Disables throttling for throttles matching the specified filters. At least one "
|
||||
"filter must be used.\n\n");
|
||||
printf("An optional qualifier `all', `auto', or `manual' can be used to specify the type "
|
||||
"of throttle\n");
|
||||
printf("affected. `all' targets all throttles, `auto' targets those created by the "
|
||||
"cluster, and\n");
|
||||
printf("`manual' targets those created manually (default `manual').\n\n");
|
||||
printf("The `tag' filter can be use to turn off only a specific tag.\n\n");
|
||||
printf("The priority filter can be used to turn off only throttles at specific priorities. "
|
||||
"Choices are\n");
|
||||
printf("`default', `immediate', or `batch'. By default, all priorities are targeted.\n");
|
||||
}
|
||||
} else if (tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable")) {
|
||||
if (tokens.size() != 3 || !tokencmp(tokens[2], "auto")) {
|
||||
printf("Usage: throttle <enable|disable> auto\n");
|
||||
printf("\n");
|
||||
printf("Enables or disable automatic tag throttling.\n");
|
||||
is_error = true;
|
||||
continue;
|
||||
}
|
||||
state bool autoTagThrottlingEnabled = tokencmp(tokens[1], "enable");
|
||||
wait(ThrottleApi::enableAuto(db, autoTagThrottlingEnabled));
|
||||
printf("Automatic tag throttling has been %s\n",
|
||||
autoTagThrottlingEnabled ? "enabled" : "disabled");
|
||||
} else {
|
||||
printUsage(tokens[0]);
|
||||
is_error = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "cache_range")) {
|
||||
if (tokens.size() != 4) {
|
||||
printUsage(tokens[0]);
|
||||
|
@ -4924,7 +4632,7 @@ int main(int argc, char** argv) {
|
|||
|
||||
registerCrashHandler();
|
||||
|
||||
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::NO, IsSimulated::NO);
|
||||
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
|
||||
|
||||
#ifdef __unixish__
|
||||
struct sigaction act;
|
||||
|
|
|
@ -83,6 +83,8 @@ ACTOR Future<bool> forceRecoveryWithDataLossCommandActor(Reference<IDatabase> db
|
|||
ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// snapshot command
|
||||
ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// throttle command
|
||||
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
|
||||
} // namespace fdb_cli
|
||||
|
||||
|
|
|
@ -256,7 +256,7 @@ public:
|
|||
m_concurrentUploads(bstore->knobs.concurrent_writes_per_file) {
|
||||
|
||||
// Add first part
|
||||
m_parts.push_back(Reference<Part>(new Part(1, m_bstore->knobs.multipart_min_part_size)));
|
||||
m_parts.push_back(makeReference<Part>(1, m_bstore->knobs.multipart_min_part_size));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "fdbclient/AsyncTaskThread.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
@ -30,13 +32,22 @@ public:
|
|||
bool isTerminate() const override { return true; }
|
||||
};
|
||||
|
||||
ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread, int* sum, int count) {
|
||||
ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread, std::atomic<int> *sum, int count, int clientId, double meanSleep) {
|
||||
state int i = 0;
|
||||
state double randomSleep = 0.0;
|
||||
for (; i < count; ++i) {
|
||||
randomSleep = deterministicRandom()->random01() * 2 * meanSleep;
|
||||
wait(delay(randomSleep));
|
||||
wait(asyncTaskThread->execAsync([sum = sum] {
|
||||
++(*sum);
|
||||
sum->fetch_add(1);
|
||||
return Void();
|
||||
}));
|
||||
TraceEvent("AsyncTaskThreadIncrementedSum")
|
||||
.detail("Index", i)
|
||||
.detail("Sum", sum->load())
|
||||
.detail("ClientId", clientId)
|
||||
.detail("RandomSleep", randomSleep)
|
||||
.detail("MeanSleep", meanSleep);
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
@ -51,7 +62,7 @@ AsyncTaskThread::~AsyncTaskThread() {
|
|||
bool wakeUp = false;
|
||||
{
|
||||
std::lock_guard<std::mutex> g(m);
|
||||
wakeUp = queue.push(std::make_shared<TerminateTask>());
|
||||
wakeUp = queue.push(std::make_unique<TerminateTask>());
|
||||
}
|
||||
if (wakeUp) {
|
||||
cv.notify_one();
|
||||
|
@ -61,7 +72,7 @@ AsyncTaskThread::~AsyncTaskThread() {
|
|||
|
||||
void AsyncTaskThread::run(AsyncTaskThread* self) {
|
||||
while (true) {
|
||||
std::shared_ptr<IAsyncTask> task;
|
||||
std::unique_ptr<IAsyncTask> task;
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(self->m);
|
||||
self->cv.wait(lk, [self] { return !self->queue.canSleep(); });
|
||||
|
@ -75,14 +86,30 @@ void AsyncTaskThread::run(AsyncTaskThread* self) {
|
|||
}
|
||||
|
||||
TEST_CASE("/asynctaskthread/add") {
|
||||
state int sum = 0;
|
||||
state std::atomic<int> sum = 0;
|
||||
state AsyncTaskThread asyncTaskThread;
|
||||
state int numClients = 10;
|
||||
state int incrementsPerClient = 100;
|
||||
std::vector<Future<Void>> clients;
|
||||
clients.reserve(10);
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
clients.push_back(asyncTaskThreadClient(&asyncTaskThread, &sum, 100));
|
||||
clients.reserve(numClients);
|
||||
for (int clientId = 0; clientId < numClients; ++clientId) {
|
||||
clients.push_back(asyncTaskThreadClient(&asyncTaskThread, &sum, incrementsPerClient, clientId, deterministicRandom()->random01() * 0.01));
|
||||
}
|
||||
wait(waitForAll(clients));
|
||||
ASSERT(sum == 1000);
|
||||
ASSERT_EQ(sum.load(), numClients * incrementsPerClient);
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/asynctaskthread/error") {
|
||||
state AsyncTaskThread asyncTaskThread;
|
||||
try {
|
||||
wait(asyncTaskThread.execAsync([]{
|
||||
throw operation_failed();
|
||||
return Void();
|
||||
}));
|
||||
ASSERT(false);
|
||||
} catch (Error &e) {
|
||||
ASSERT_EQ(e.code(), error_code_operation_failed);
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ public:
|
|||
};
|
||||
|
||||
class AsyncTaskThread {
|
||||
ThreadSafeQueue<std::shared_ptr<IAsyncTask>> queue;
|
||||
ThreadSafeQueue<std::unique_ptr<IAsyncTask>> queue;
|
||||
std::condition_variable cv;
|
||||
std::mutex m;
|
||||
std::thread thread;
|
||||
|
@ -60,7 +60,7 @@ class AsyncTaskThread {
|
|||
bool wakeUp = false;
|
||||
{
|
||||
std::lock_guard<std::mutex> g(m);
|
||||
wakeUp = queue.push(std::make_shared<AsyncTask<F>>(func));
|
||||
wakeUp = queue.push(std::make_unique<AsyncTask<F>>(func));
|
||||
}
|
||||
if (wakeUp) {
|
||||
cv.notify_one();
|
||||
|
@ -88,6 +88,7 @@ public:
|
|||
auto funcResult = func();
|
||||
onMainThreadVoid([promise, funcResult] { promise.send(funcResult); }, nullptr, priority);
|
||||
} catch (Error& e) {
|
||||
TraceEvent("ErrorExecutingAsyncTask").error(e);
|
||||
onMainThreadVoid([promise, e] { promise.sendError(e); }, nullptr, priority);
|
||||
}
|
||||
});
|
||||
|
|
|
@ -36,6 +36,26 @@
|
|||
#include "fdbclient/BackupContainer.h"
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
FDB_DECLARE_BOOLEAN_PARAM(LockDB);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(UnlockDB);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(StopWhenDone);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(Verbose);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(WaitForComplete);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(ForceAction);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(Terminator);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(IncrementalBackupOnly);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(UsePartitionedLog);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(OnlyApplyMutationLogs);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(InconsistentSnapshotOnly);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(ShowErrors);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(AbortOldBackup);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(DstOnly); // TODO: More descriptive name?
|
||||
FDB_DECLARE_BOOLEAN_PARAM(WaitForDestUID);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(CheckBackupUID);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(DeleteData);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(SetValidation);
|
||||
FDB_DECLARE_BOOLEAN_PARAM(PartialBackup);
|
||||
|
||||
class BackupAgentBase : NonCopyable {
|
||||
public:
|
||||
// Time formatter for anything backup or restore related
|
||||
|
@ -65,6 +85,7 @@ public:
|
|||
static const Key keyConfigStopWhenDoneKey;
|
||||
static const Key keyStateStatus;
|
||||
static const Key keyStateStop;
|
||||
static const Key keyStateLogBeginVersion;
|
||||
static const Key keyLastUid;
|
||||
static const Key keyBeginKey;
|
||||
static const Key keyEndKey;
|
||||
|
@ -82,151 +103,26 @@ public:
|
|||
static const Key keySourceStates;
|
||||
static const Key keySourceTagName;
|
||||
|
||||
static const int logHeaderSize;
|
||||
static constexpr int logHeaderSize = 12;
|
||||
|
||||
// Convert the status text to an enumerated value
|
||||
static EnumState getState(std::string stateText) {
|
||||
auto enState = EnumState::STATE_ERRORED;
|
||||
|
||||
if (stateText.empty()) {
|
||||
enState = EnumState::STATE_NEVERRAN;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been submitted")) {
|
||||
enState = EnumState::STATE_SUBMITTED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been started")) {
|
||||
enState = EnumState::STATE_RUNNING;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("is differential")) {
|
||||
enState = EnumState::STATE_RUNNING_DIFFERENTIAL;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been completed")) {
|
||||
enState = EnumState::STATE_COMPLETED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been aborted")) {
|
||||
enState = EnumState::STATE_ABORTED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been partially aborted")) {
|
||||
enState = EnumState::STATE_PARTIALLY_ABORTED;
|
||||
}
|
||||
|
||||
return enState;
|
||||
}
|
||||
static EnumState getState(std::string const& stateText);
|
||||
|
||||
// Convert the status enum to a text description
|
||||
static const char* getStateText(EnumState enState) {
|
||||
const char* stateText;
|
||||
|
||||
switch (enState) {
|
||||
case EnumState::STATE_ERRORED:
|
||||
stateText = "has errored";
|
||||
break;
|
||||
case EnumState::STATE_NEVERRAN:
|
||||
stateText = "has never been started";
|
||||
break;
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
stateText = "has been submitted";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING:
|
||||
stateText = "has been started";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
stateText = "is differential";
|
||||
break;
|
||||
case EnumState::STATE_COMPLETED:
|
||||
stateText = "has been completed";
|
||||
break;
|
||||
case EnumState::STATE_ABORTED:
|
||||
stateText = "has been aborted";
|
||||
break;
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
stateText = "has been partially aborted";
|
||||
break;
|
||||
default:
|
||||
stateText = "<undefined>";
|
||||
break;
|
||||
}
|
||||
|
||||
return stateText;
|
||||
}
|
||||
static const char* getStateText(EnumState enState);
|
||||
|
||||
// Convert the status enum to a name
|
||||
static const char* getStateName(EnumState enState) {
|
||||
const char* s;
|
||||
|
||||
switch (enState) {
|
||||
case EnumState::STATE_ERRORED:
|
||||
s = "Errored";
|
||||
break;
|
||||
case EnumState::STATE_NEVERRAN:
|
||||
s = "NeverRan";
|
||||
break;
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
s = "Submitted";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING:
|
||||
s = "Running";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
s = "RunningDifferentially";
|
||||
break;
|
||||
case EnumState::STATE_COMPLETED:
|
||||
s = "Completed";
|
||||
break;
|
||||
case EnumState::STATE_ABORTED:
|
||||
s = "Aborted";
|
||||
break;
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
s = "Aborting";
|
||||
break;
|
||||
default:
|
||||
s = "<undefined>";
|
||||
break;
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
static const char* getStateName(EnumState enState);
|
||||
|
||||
// Determine if the specified state is runnable
|
||||
static bool isRunnable(EnumState enState) {
|
||||
bool isRunnable = false;
|
||||
static bool isRunnable(EnumState enState);
|
||||
|
||||
switch (enState) {
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
case EnumState::STATE_RUNNING:
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
isRunnable = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
static KeyRef getDefaultTag() { return StringRef(defaultTagName); }
|
||||
|
||||
return isRunnable;
|
||||
}
|
||||
|
||||
static const KeyRef getDefaultTag() { return StringRef(defaultTagName); }
|
||||
|
||||
static const std::string getDefaultTagName() { return defaultTagName; }
|
||||
static std::string getDefaultTagName() { return defaultTagName; }
|
||||
|
||||
// This is only used for automatic backup name generation
|
||||
static Standalone<StringRef> getCurrentTime() {
|
||||
double t = now();
|
||||
time_t curTime = t;
|
||||
char buffer[128];
|
||||
struct tm* timeinfo;
|
||||
timeinfo = localtime(&curTime);
|
||||
strftime(buffer, 128, "%Y-%m-%d-%H-%M-%S", timeinfo);
|
||||
|
||||
std::string time(buffer);
|
||||
return StringRef(time + format(".%06d", (int)(1e6 * (t - curTime))));
|
||||
}
|
||||
static Standalone<StringRef> getCurrentTime();
|
||||
|
||||
protected:
|
||||
static const std::string defaultTagName;
|
||||
|
@ -249,7 +145,11 @@ public:
|
|||
|
||||
KeyBackedProperty<Key> lastBackupTimestamp() { return config.pack(LiteralStringRef(__FUNCTION__)); }
|
||||
|
||||
Future<Void> run(Database cx, double* pollDelay, int maxConcurrentTasks) {
|
||||
Future<Void> run(Database cx, double pollDelay, int maxConcurrentTasks) {
|
||||
return taskBucket->run(cx, futureBucket, std::make_shared<double const>(pollDelay), maxConcurrentTasks);
|
||||
}
|
||||
|
||||
Future<Void> run(Database cx, std::shared_ptr<double const> pollDelay, int maxConcurrentTasks) {
|
||||
return taskBucket->run(cx, futureBucket, pollDelay, maxConcurrentTasks);
|
||||
}
|
||||
|
||||
|
@ -260,13 +160,13 @@ public:
|
|||
static Key getPauseKey();
|
||||
|
||||
// parallel restore
|
||||
Future<Void> parallelRestoreFinish(Database cx, UID randomUID, bool unlockDB = true);
|
||||
Future<Void> parallelRestoreFinish(Database cx, UID randomUID, UnlockDB = UnlockDB::True);
|
||||
Future<Void> submitParallelRestore(Database cx,
|
||||
Key backupTag,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
Key bcUrl,
|
||||
Version targetVersion,
|
||||
bool lockDB,
|
||||
LockDB lockDB,
|
||||
UID randomUID,
|
||||
Key addPrefix,
|
||||
Key removePrefix);
|
||||
|
@ -288,29 +188,31 @@ public:
|
|||
Key tagName,
|
||||
Key url,
|
||||
Standalone<VectorRef<KeyRangeRef>> ranges,
|
||||
bool waitForComplete = true,
|
||||
Version targetVersion = -1,
|
||||
bool verbose = true,
|
||||
WaitForComplete = WaitForComplete::True,
|
||||
Version targetVersion = ::invalidVersion,
|
||||
Verbose = Verbose::True,
|
||||
Key addPrefix = Key(),
|
||||
Key removePrefix = Key(),
|
||||
bool lockDB = true,
|
||||
bool onlyAppyMutationLogs = false,
|
||||
bool inconsistentSnapshotOnly = false,
|
||||
Version beginVersion = -1);
|
||||
LockDB = LockDB::True,
|
||||
OnlyApplyMutationLogs = OnlyApplyMutationLogs::False,
|
||||
InconsistentSnapshotOnly = InconsistentSnapshotOnly::False,
|
||||
Version beginVersion = ::invalidVersion,
|
||||
Optional<std::string> const& encryptionKeyFileName = {});
|
||||
Future<Version> restore(Database cx,
|
||||
Optional<Database> cxOrig,
|
||||
Key tagName,
|
||||
Key url,
|
||||
bool waitForComplete = true,
|
||||
Version targetVersion = -1,
|
||||
bool verbose = true,
|
||||
WaitForComplete waitForComplete = WaitForComplete::True,
|
||||
Version targetVersion = ::invalidVersion,
|
||||
Verbose verbose = Verbose::True,
|
||||
KeyRange range = normalKeys,
|
||||
Key addPrefix = Key(),
|
||||
Key removePrefix = Key(),
|
||||
bool lockDB = true,
|
||||
bool onlyAppyMutationLogs = false,
|
||||
bool inconsistentSnapshotOnly = false,
|
||||
Version beginVersion = -1) {
|
||||
LockDB lockDB = LockDB::True,
|
||||
OnlyApplyMutationLogs onlyApplyMutationLogs = OnlyApplyMutationLogs::False,
|
||||
InconsistentSnapshotOnly inconsistentSnapshotOnly = InconsistentSnapshotOnly::False,
|
||||
Version beginVersion = ::invalidVersion,
|
||||
Optional<std::string> const& encryptionKeyFileName = {}) {
|
||||
Standalone<VectorRef<KeyRangeRef>> rangeRef;
|
||||
rangeRef.push_back_deep(rangeRef.arena(), range);
|
||||
return restore(cx,
|
||||
|
@ -324,9 +226,10 @@ public:
|
|||
addPrefix,
|
||||
removePrefix,
|
||||
lockDB,
|
||||
onlyAppyMutationLogs,
|
||||
onlyApplyMutationLogs,
|
||||
inconsistentSnapshotOnly,
|
||||
beginVersion);
|
||||
beginVersion,
|
||||
encryptionKeyFileName);
|
||||
}
|
||||
Future<Version> atomicRestore(Database cx,
|
||||
Key tagName,
|
||||
|
@ -347,7 +250,7 @@ public:
|
|||
Future<ERestoreState> abortRestore(Database cx, Key tagName);
|
||||
|
||||
// Waits for a restore tag to reach a final (stable) state.
|
||||
Future<ERestoreState> waitRestore(Database cx, Key tagName, bool verbose);
|
||||
Future<ERestoreState> waitRestore(Database cx, Key tagName, Verbose);
|
||||
|
||||
// Get a string describing the status of a tag
|
||||
Future<std::string> restoreStatus(Reference<ReadYourWritesTransaction> tr, Key tagName);
|
||||
|
@ -362,20 +265,22 @@ public:
|
|||
Key outContainer,
|
||||
int initialSnapshotIntervalSeconds,
|
||||
int snapshotIntervalSeconds,
|
||||
std::string tagName,
|
||||
std::string const& tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone = true,
|
||||
bool partitionedLog = false,
|
||||
bool incrementalBackupOnly = false);
|
||||
StopWhenDone = StopWhenDone::True,
|
||||
UsePartitionedLog = UsePartitionedLog::False,
|
||||
IncrementalBackupOnly = IncrementalBackupOnly::False,
|
||||
Optional<std::string> const& encryptionKeyFileName = {});
|
||||
Future<Void> submitBackup(Database cx,
|
||||
Key outContainer,
|
||||
int initialSnapshotIntervalSeconds,
|
||||
int snapshotIntervalSeconds,
|
||||
std::string tagName,
|
||||
std::string const& tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone = true,
|
||||
bool partitionedLog = false,
|
||||
bool incrementalBackupOnly = false) {
|
||||
StopWhenDone stopWhenDone = StopWhenDone::True,
|
||||
UsePartitionedLog partitionedLog = UsePartitionedLog::False,
|
||||
IncrementalBackupOnly incrementalBackupOnly = IncrementalBackupOnly::False,
|
||||
Optional<std::string> const& encryptionKeyFileName = {}) {
|
||||
return runRYWTransactionFailIfLocked(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
return submitBackup(tr,
|
||||
outContainer,
|
||||
|
@ -385,7 +290,8 @@ public:
|
|||
backupRanges,
|
||||
stopWhenDone,
|
||||
partitionedLog,
|
||||
incrementalBackupOnly);
|
||||
incrementalBackupOnly,
|
||||
encryptionKeyFileName);
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -407,19 +313,19 @@ public:
|
|||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) { return abortBackup(tr, tagName); });
|
||||
}
|
||||
|
||||
Future<std::string> getStatus(Database cx, bool showErrors, std::string tagName);
|
||||
Future<std::string> getStatus(Database cx, ShowErrors, std::string tagName);
|
||||
Future<std::string> getStatusJSON(Database cx, std::string tagName);
|
||||
|
||||
Future<Optional<Version>> getLastRestorable(Reference<ReadYourWritesTransaction> tr,
|
||||
Key tagName,
|
||||
bool snapshot = false);
|
||||
Snapshot = Snapshot::False);
|
||||
void setLastRestorable(Reference<ReadYourWritesTransaction> tr, Key tagName, Version version);
|
||||
|
||||
// stopWhenDone will return when the backup is stopped, if enabled. Otherwise, it
|
||||
// will return when the backup directory is restorable.
|
||||
Future<EnumState> waitBackup(Database cx,
|
||||
std::string tagName,
|
||||
bool stopWhenDone = true,
|
||||
StopWhenDone = StopWhenDone::True,
|
||||
Reference<IBackupContainer>* pContainer = nullptr,
|
||||
UID* pUID = nullptr);
|
||||
|
||||
|
@ -462,8 +368,8 @@ public:
|
|||
DatabaseBackupAgent(DatabaseBackupAgent&& r) noexcept
|
||||
: subspace(std::move(r.subspace)), states(std::move(r.states)), config(std::move(r.config)),
|
||||
errors(std::move(r.errors)), ranges(std::move(r.ranges)), tagNames(std::move(r.tagNames)),
|
||||
taskBucket(std::move(r.taskBucket)), futureBucket(std::move(r.futureBucket)),
|
||||
sourceStates(std::move(r.sourceStates)), sourceTagNames(std::move(r.sourceTagNames)) {}
|
||||
sourceStates(std::move(r.sourceStates)), sourceTagNames(std::move(r.sourceTagNames)),
|
||||
taskBucket(std::move(r.taskBucket)), futureBucket(std::move(r.futureBucket)) {}
|
||||
|
||||
void operator=(DatabaseBackupAgent&& r) noexcept {
|
||||
subspace = std::move(r.subspace);
|
||||
|
@ -478,7 +384,11 @@ public:
|
|||
sourceTagNames = std::move(r.sourceTagNames);
|
||||
}
|
||||
|
||||
Future<Void> run(Database cx, double* pollDelay, int maxConcurrentTasks) {
|
||||
Future<Void> run(Database cx, double pollDelay, int maxConcurrentTasks) {
|
||||
return taskBucket->run(cx, futureBucket, std::make_shared<double const>(pollDelay), maxConcurrentTasks);
|
||||
}
|
||||
|
||||
Future<Void> run(Database cx, std::shared_ptr<double const> pollDelay, int maxConcurrentTasks) {
|
||||
return taskBucket->run(cx, futureBucket, pollDelay, maxConcurrentTasks);
|
||||
}
|
||||
|
||||
|
@ -487,7 +397,7 @@ public:
|
|||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
Key addPrefix,
|
||||
Key removePrefix,
|
||||
bool forceAction = false);
|
||||
ForceAction = ForceAction::False);
|
||||
|
||||
Future<Void> unlockBackup(Reference<ReadYourWritesTransaction> tr, Key tagName);
|
||||
Future<Void> unlockBackup(Database cx, Key tagName) {
|
||||
|
@ -506,18 +416,18 @@ public:
|
|||
Future<Void> submitBackup(Reference<ReadYourWritesTransaction> tr,
|
||||
Key tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone = true,
|
||||
StopWhenDone = StopWhenDone::True,
|
||||
Key addPrefix = StringRef(),
|
||||
Key removePrefix = StringRef(),
|
||||
bool lockDatabase = false,
|
||||
LockDB lockDatabase = LockDB::False,
|
||||
PreBackupAction backupAction = PreBackupAction::VERIFY);
|
||||
Future<Void> submitBackup(Database cx,
|
||||
Key tagName,
|
||||
Standalone<VectorRef<KeyRangeRef>> backupRanges,
|
||||
bool stopWhenDone = true,
|
||||
StopWhenDone stopWhenDone = StopWhenDone::True,
|
||||
Key addPrefix = StringRef(),
|
||||
Key removePrefix = StringRef(),
|
||||
bool lockDatabase = false,
|
||||
LockDB lockDatabase = LockDB::False,
|
||||
PreBackupAction backupAction = PreBackupAction::VERIFY) {
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
return submitBackup(
|
||||
|
@ -533,35 +443,36 @@ public:
|
|||
|
||||
Future<Void> abortBackup(Database cx,
|
||||
Key tagName,
|
||||
bool partial = false,
|
||||
bool abortOldBackup = false,
|
||||
bool dstOnly = false,
|
||||
bool waitForDestUID = false);
|
||||
PartialBackup = PartialBackup::False,
|
||||
AbortOldBackup = AbortOldBackup::False,
|
||||
DstOnly = DstOnly::False,
|
||||
WaitForDestUID = WaitForDestUID::False);
|
||||
|
||||
Future<std::string> getStatus(Database cx, int errorLimit, Key tagName);
|
||||
|
||||
Future<EnumState> getStateValue(Reference<ReadYourWritesTransaction> tr, UID logUid, bool snapshot = false);
|
||||
Future<EnumState> getStateValue(Reference<ReadYourWritesTransaction> tr, UID logUid, Snapshot = Snapshot::False);
|
||||
Future<EnumState> getStateValue(Database cx, UID logUid) {
|
||||
return runRYWTransaction(cx,
|
||||
[=](Reference<ReadYourWritesTransaction> tr) { return getStateValue(tr, logUid); });
|
||||
}
|
||||
|
||||
Future<UID> getDestUid(Reference<ReadYourWritesTransaction> tr, UID logUid, bool snapshot = false);
|
||||
Future<UID> getDestUid(Reference<ReadYourWritesTransaction> tr, UID logUid, Snapshot = Snapshot::False);
|
||||
Future<UID> getDestUid(Database cx, UID logUid) {
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) { return getDestUid(tr, logUid); });
|
||||
}
|
||||
|
||||
Future<UID> getLogUid(Reference<ReadYourWritesTransaction> tr, Key tagName, bool snapshot = false);
|
||||
Future<UID> getLogUid(Reference<ReadYourWritesTransaction> tr, Key tagName, Snapshot = Snapshot::False);
|
||||
Future<UID> getLogUid(Database cx, Key tagName) {
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) { return getLogUid(tr, tagName); });
|
||||
}
|
||||
|
||||
Future<int64_t> getRangeBytesWritten(Reference<ReadYourWritesTransaction> tr, UID logUid, bool snapshot = false);
|
||||
Future<int64_t> getLogBytesWritten(Reference<ReadYourWritesTransaction> tr, UID logUid, bool snapshot = false);
|
||||
|
||||
Future<int64_t> getRangeBytesWritten(Reference<ReadYourWritesTransaction> tr,
|
||||
UID logUid,
|
||||
Snapshot = Snapshot::False);
|
||||
Future<int64_t> getLogBytesWritten(Reference<ReadYourWritesTransaction> tr, UID logUid, Snapshot = Snapshot::False);
|
||||
// stopWhenDone will return when the backup is stopped, if enabled. Otherwise, it
|
||||
// will return when the backup directory is restorable.
|
||||
Future<EnumState> waitBackup(Database cx, Key tagName, bool stopWhenDone = true);
|
||||
Future<EnumState> waitBackup(Database cx, Key tagName, StopWhenDone = StopWhenDone::True);
|
||||
Future<EnumState> waitSubmitted(Database cx, Key tagName);
|
||||
Future<Void> waitUpgradeToLatestDrVersion(Database cx, Key tagName);
|
||||
|
||||
|
@ -619,7 +530,7 @@ Future<Void> eraseLogData(Reference<ReadYourWritesTransaction> tr,
|
|||
Key logUidValue,
|
||||
Key destUidValue,
|
||||
Optional<Version> endVersion = Optional<Version>(),
|
||||
bool checkBackupUid = false,
|
||||
CheckBackupUID = CheckBackupUID::False,
|
||||
Version backupUid = 0);
|
||||
Key getApplyKey(Version version, Key backupUid);
|
||||
Version getLogKeyVersion(Key key);
|
||||
|
@ -631,18 +542,18 @@ ACTOR Future<Void> readCommitted(Database cx,
|
|||
PromiseStream<RangeResultWithVersion> results,
|
||||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
bool terminator = true,
|
||||
bool systemAccess = false,
|
||||
bool lockAware = false);
|
||||
Terminator terminator = Terminator::True,
|
||||
AccessSystemKeys systemAccess = AccessSystemKeys::False,
|
||||
LockAware lockAware = LockAware::False);
|
||||
ACTOR Future<Void> readCommitted(Database cx,
|
||||
PromiseStream<RCGroup> results,
|
||||
Future<Void> active,
|
||||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
std::function<std::pair<uint64_t, uint32_t>(Key key)> groupBy,
|
||||
bool terminator = true,
|
||||
bool systemAccess = false,
|
||||
bool lockAware = false);
|
||||
Terminator terminator = Terminator::True,
|
||||
AccessSystemKeys systemAccess = AccessSystemKeys::False,
|
||||
LockAware lockAware = LockAware::False);
|
||||
ACTOR Future<Void> applyMutations(Database cx,
|
||||
Key uid,
|
||||
Key addPrefix,
|
||||
|
@ -652,7 +563,7 @@ ACTOR Future<Void> applyMutations(Database cx,
|
|||
RequestStream<CommitTransactionRequest> commit,
|
||||
NotifiedVersion* committedVersion,
|
||||
Reference<KeyRangeMap<Version>> keyVersion);
|
||||
ACTOR Future<Void> cleanupBackup(Database cx, bool deleteData);
|
||||
ACTOR Future<Void> cleanupBackup(Database cx, DeleteData deleteData);
|
||||
|
||||
using EBackupState = BackupAgentBase::EnumState;
|
||||
template <>
|
||||
|
@ -695,14 +606,15 @@ public:
|
|||
typedef KeyBackedMap<std::string, UidAndAbortedFlagT> TagMap;
|
||||
// Map of tagName to {UID, aborted_flag} located in the fileRestorePrefixRange keyspace.
|
||||
class TagUidMap : public KeyBackedMap<std::string, UidAndAbortedFlagT> {
|
||||
ACTOR static Future<std::vector<KeyBackedTag>> getAll_impl(TagUidMap* tagsMap,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
Snapshot snapshot);
|
||||
|
||||
public:
|
||||
TagUidMap(const StringRef& prefix) : TagMap(LiteralStringRef("tag->uid/").withPrefix(prefix)), prefix(prefix) {}
|
||||
|
||||
ACTOR static Future<std::vector<KeyBackedTag>> getAll_impl(TagUidMap* tagsMap,
|
||||
Reference<ReadYourWritesTransaction> tr,
|
||||
bool snapshot);
|
||||
|
||||
Future<std::vector<KeyBackedTag>> getAll(Reference<ReadYourWritesTransaction> tr, bool snapshot = false) {
|
||||
Future<std::vector<KeyBackedTag>> getAll(Reference<ReadYourWritesTransaction> tr,
|
||||
Snapshot snapshot = Snapshot::False) {
|
||||
return getAll_impl(this, tr, snapshot);
|
||||
}
|
||||
|
||||
|
@ -718,12 +630,12 @@ static inline KeyBackedTag makeBackupTag(std::string tagName) {
|
|||
}
|
||||
|
||||
static inline Future<std::vector<KeyBackedTag>> getAllRestoreTags(Reference<ReadYourWritesTransaction> tr,
|
||||
bool snapshot = false) {
|
||||
Snapshot snapshot = Snapshot::False) {
|
||||
return TagUidMap(fileRestorePrefixRange.begin).getAll(tr, snapshot);
|
||||
}
|
||||
|
||||
static inline Future<std::vector<KeyBackedTag>> getAllBackupTags(Reference<ReadYourWritesTransaction> tr,
|
||||
bool snapshot = false) {
|
||||
Snapshot snapshot = Snapshot::False) {
|
||||
return TagUidMap(fileBackupPrefixRange.begin).getAll(tr, snapshot);
|
||||
}
|
||||
|
||||
|
@ -738,7 +650,9 @@ public:
|
|||
|
||||
KeyBackedConfig(StringRef prefix, Reference<Task> task) : KeyBackedConfig(prefix, TaskParams.uid().get(task)) {}
|
||||
|
||||
Future<Void> toTask(Reference<ReadYourWritesTransaction> tr, Reference<Task> task, bool setValidation = true) {
|
||||
Future<Void> toTask(Reference<ReadYourWritesTransaction> tr,
|
||||
Reference<Task> task,
|
||||
SetValidation setValidation = SetValidation::True) {
|
||||
// Set the uid task parameter
|
||||
TaskParams.uid().set(task, uid);
|
||||
|
||||
|
@ -803,11 +717,22 @@ protected:
|
|||
|
||||
template <>
|
||||
inline Tuple Codec<Reference<IBackupContainer>>::pack(Reference<IBackupContainer> const& bc) {
|
||||
return Tuple().append(StringRef(bc->getURL()));
|
||||
Tuple tuple;
|
||||
tuple.append(StringRef(bc->getURL()));
|
||||
if (bc->getEncryptionKeyFileName().present()) {
|
||||
tuple.append(bc->getEncryptionKeyFileName().get());
|
||||
}
|
||||
return tuple;
|
||||
}
|
||||
template <>
|
||||
inline Reference<IBackupContainer> Codec<Reference<IBackupContainer>>::unpack(Tuple const& val) {
|
||||
return IBackupContainer::openContainer(val.getString(0).toString());
|
||||
ASSERT(val.size() == 1 || val.size() == 2);
|
||||
auto url = val.getString(0).toString();
|
||||
Optional<std::string> encryptionKeyFileName;
|
||||
if (val.size() == 2) {
|
||||
encryptionKeyFileName = val.getString(1).toString();
|
||||
}
|
||||
return IBackupContainer::openContainer(url, encryptionKeyFileName);
|
||||
}
|
||||
|
||||
class BackupConfig : public KeyBackedConfig {
|
||||
|
@ -1056,6 +981,11 @@ ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeRangeFileBlock(Reference<
|
|||
int64_t offset,
|
||||
int len);
|
||||
|
||||
// Reads a mutation log block from file and parses into batch mutation blocks for further parsing.
|
||||
ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeMutationLogFileBlock(Reference<IAsyncFile> file,
|
||||
int64_t offset,
|
||||
int len);
|
||||
|
||||
// Return a block of contiguous padding bytes "\0xff" for backup files, growing if needed.
|
||||
Value makePadding(int size);
|
||||
} // namespace fileBackup
|
||||
|
|
|
@ -26,6 +26,24 @@
|
|||
#include "flow/ActorCollection.h"
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
FDB_DEFINE_BOOLEAN_PARAM(LockDB);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(UnlockDB);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(StopWhenDone);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(Verbose);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(WaitForComplete);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(ForceAction);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(Terminator);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(UsePartitionedLog);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(InconsistentSnapshotOnly);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(ShowErrors);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(AbortOldBackup);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(DstOnly);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(WaitForDestUID);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(CheckBackupUID);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(DeleteData);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(SetValidation);
|
||||
FDB_DEFINE_BOOLEAN_PARAM(PartialBackup);
|
||||
|
||||
std::string BackupAgentBase::formatTime(int64_t epochs) {
|
||||
time_t curTime = (time_t)epochs;
|
||||
char buffer[30];
|
||||
|
@ -95,32 +113,33 @@ int64_t BackupAgentBase::parseTime(std::string timestamp) {
|
|||
return ts;
|
||||
}
|
||||
|
||||
const Key BackupAgentBase::keyFolderId = LiteralStringRef("config_folderid");
|
||||
const Key BackupAgentBase::keyBeginVersion = LiteralStringRef("beginVersion");
|
||||
const Key BackupAgentBase::keyEndVersion = LiteralStringRef("endVersion");
|
||||
const Key BackupAgentBase::keyPrevBeginVersion = LiteralStringRef("prevBeginVersion");
|
||||
const Key BackupAgentBase::keyConfigBackupTag = LiteralStringRef("config_backup_tag");
|
||||
const Key BackupAgentBase::keyConfigLogUid = LiteralStringRef("config_log_uid");
|
||||
const Key BackupAgentBase::keyConfigBackupRanges = LiteralStringRef("config_backup_ranges");
|
||||
const Key BackupAgentBase::keyConfigStopWhenDoneKey = LiteralStringRef("config_stop_when_done");
|
||||
const Key BackupAgentBase::keyStateStop = LiteralStringRef("state_stop");
|
||||
const Key BackupAgentBase::keyStateStatus = LiteralStringRef("state_status");
|
||||
const Key BackupAgentBase::keyLastUid = LiteralStringRef("last_uid");
|
||||
const Key BackupAgentBase::keyBeginKey = LiteralStringRef("beginKey");
|
||||
const Key BackupAgentBase::keyEndKey = LiteralStringRef("endKey");
|
||||
const Key BackupAgentBase::keyDrVersion = LiteralStringRef("drVersion");
|
||||
const Key BackupAgentBase::destUid = LiteralStringRef("destUid");
|
||||
const Key BackupAgentBase::backupStartVersion = LiteralStringRef("backupStartVersion");
|
||||
const Key BackupAgentBase::keyFolderId = "config_folderid"_sr;
|
||||
const Key BackupAgentBase::keyBeginVersion = "beginVersion"_sr;
|
||||
const Key BackupAgentBase::keyEndVersion = "endVersion"_sr;
|
||||
const Key BackupAgentBase::keyPrevBeginVersion = "prevBeginVersion"_sr;
|
||||
const Key BackupAgentBase::keyConfigBackupTag = "config_backup_tag"_sr;
|
||||
const Key BackupAgentBase::keyConfigLogUid = "config_log_uid"_sr;
|
||||
const Key BackupAgentBase::keyConfigBackupRanges = "config_backup_ranges"_sr;
|
||||
const Key BackupAgentBase::keyConfigStopWhenDoneKey = "config_stop_when_done"_sr;
|
||||
const Key BackupAgentBase::keyStateStop = "state_stop"_sr;
|
||||
const Key BackupAgentBase::keyStateStatus = "state_status"_sr;
|
||||
const Key BackupAgentBase::keyStateLogBeginVersion = "last_begin_version"_sr;
|
||||
const Key BackupAgentBase::keyLastUid = "last_uid"_sr;
|
||||
const Key BackupAgentBase::keyBeginKey = "beginKey"_sr;
|
||||
const Key BackupAgentBase::keyEndKey = "endKey"_sr;
|
||||
const Key BackupAgentBase::keyDrVersion = "drVersion"_sr;
|
||||
const Key BackupAgentBase::destUid = "destUid"_sr;
|
||||
const Key BackupAgentBase::backupStartVersion = "backupStartVersion"_sr;
|
||||
|
||||
const Key BackupAgentBase::keyTagName = LiteralStringRef("tagname");
|
||||
const Key BackupAgentBase::keyStates = LiteralStringRef("state");
|
||||
const Key BackupAgentBase::keyConfig = LiteralStringRef("config");
|
||||
const Key BackupAgentBase::keyErrors = LiteralStringRef("errors");
|
||||
const Key BackupAgentBase::keyRanges = LiteralStringRef("ranges");
|
||||
const Key BackupAgentBase::keyTasks = LiteralStringRef("tasks");
|
||||
const Key BackupAgentBase::keyFutures = LiteralStringRef("futures");
|
||||
const Key BackupAgentBase::keySourceStates = LiteralStringRef("source_states");
|
||||
const Key BackupAgentBase::keySourceTagName = LiteralStringRef("source_tagname");
|
||||
const Key BackupAgentBase::keyTagName = "tagname"_sr;
|
||||
const Key BackupAgentBase::keyStates = "state"_sr;
|
||||
const Key BackupAgentBase::keyConfig = "config"_sr;
|
||||
const Key BackupAgentBase::keyErrors = "errors"_sr;
|
||||
const Key BackupAgentBase::keyRanges = "ranges"_sr;
|
||||
const Key BackupAgentBase::keyTasks = "tasks"_sr;
|
||||
const Key BackupAgentBase::keyFutures = "futures"_sr;
|
||||
const Key BackupAgentBase::keySourceStates = "source_states"_sr;
|
||||
const Key BackupAgentBase::keySourceTagName = "source_tagname"_sr;
|
||||
|
||||
bool copyParameter(Reference<Task> source, Reference<Task> dest, Key key) {
|
||||
if (source) {
|
||||
|
@ -374,9 +393,9 @@ ACTOR Future<Void> readCommitted(Database cx,
|
|||
PromiseStream<RangeResultWithVersion> results,
|
||||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
bool terminator,
|
||||
bool systemAccess,
|
||||
bool lockAware) {
|
||||
Terminator terminator,
|
||||
AccessSystemKeys systemAccess,
|
||||
LockAware lockAware) {
|
||||
state KeySelector begin = firstGreaterOrEqual(range.begin);
|
||||
state KeySelector end = firstGreaterOrEqual(range.end);
|
||||
state Transaction tr(cx);
|
||||
|
@ -450,9 +469,9 @@ ACTOR Future<Void> readCommitted(Database cx,
|
|||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
std::function<std::pair<uint64_t, uint32_t>(Key key)> groupBy,
|
||||
bool terminator,
|
||||
bool systemAccess,
|
||||
bool lockAware) {
|
||||
Terminator terminator,
|
||||
AccessSystemKeys systemAccess,
|
||||
LockAware lockAware) {
|
||||
state KeySelector nextKey = firstGreaterOrEqual(range.begin);
|
||||
state KeySelector end = firstGreaterOrEqual(range.end);
|
||||
|
||||
|
@ -559,7 +578,8 @@ Future<Void> readCommitted(Database cx,
|
|||
Reference<FlowLock> lock,
|
||||
KeyRangeRef range,
|
||||
std::function<std::pair<uint64_t, uint32_t>(Key key)> groupBy) {
|
||||
return readCommitted(cx, results, Void(), lock, range, groupBy, true, true, true);
|
||||
return readCommitted(
|
||||
cx, results, Void(), lock, range, groupBy, Terminator::True, AccessSystemKeys::True, LockAware::True);
|
||||
}
|
||||
|
||||
ACTOR Future<int> dumpData(Database cx,
|
||||
|
@ -770,7 +790,7 @@ ACTOR static Future<Void> _eraseLogData(Reference<ReadYourWritesTransaction> tr,
|
|||
Key logUidValue,
|
||||
Key destUidValue,
|
||||
Optional<Version> endVersion,
|
||||
bool checkBackupUid,
|
||||
CheckBackupUID checkBackupUid,
|
||||
Version backupUid) {
|
||||
state Key backupLatestVersionsPath = destUidValue.withPrefix(backupLatestVersionsPrefix);
|
||||
state Key backupLatestVersionsKey = logUidValue.withPrefix(backupLatestVersionsPath);
|
||||
|
@ -898,7 +918,7 @@ Future<Void> eraseLogData(Reference<ReadYourWritesTransaction> tr,
|
|||
Key logUidValue,
|
||||
Key destUidValue,
|
||||
Optional<Version> endVersion,
|
||||
bool checkBackupUid,
|
||||
CheckBackupUID checkBackupUid,
|
||||
Version backupUid) {
|
||||
return _eraseLogData(tr, logUidValue, destUidValue, endVersion, checkBackupUid, backupUid);
|
||||
}
|
||||
|
@ -995,7 +1015,7 @@ ACTOR Future<Void> cleanupLogMutations(Database cx, Value destUidValue, bool del
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> cleanupBackup(Database cx, bool deleteData) {
|
||||
ACTOR Future<Void> cleanupBackup(Database cx, DeleteData deleteData) {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
loop {
|
||||
try {
|
||||
|
@ -1014,3 +1034,124 @@ ACTOR Future<Void> cleanupBackup(Database cx, bool deleteData) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert the status text to an enumerated value
|
||||
BackupAgentBase::EnumState BackupAgentBase::getState(std::string const& stateText) {
|
||||
auto enState = EnumState::STATE_ERRORED;
|
||||
|
||||
if (stateText.empty()) {
|
||||
enState = EnumState::STATE_NEVERRAN;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been submitted")) {
|
||||
enState = EnumState::STATE_SUBMITTED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been started")) {
|
||||
enState = EnumState::STATE_RUNNING;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("is differential")) {
|
||||
enState = EnumState::STATE_RUNNING_DIFFERENTIAL;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been completed")) {
|
||||
enState = EnumState::STATE_COMPLETED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been aborted")) {
|
||||
enState = EnumState::STATE_ABORTED;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been partially aborted")) {
|
||||
enState = EnumState::STATE_PARTIALLY_ABORTED;
|
||||
}
|
||||
|
||||
return enState;
|
||||
}
|
||||
|
||||
const char* BackupAgentBase::getStateText(EnumState enState) {
|
||||
const char* stateText;
|
||||
|
||||
switch (enState) {
|
||||
case EnumState::STATE_ERRORED:
|
||||
stateText = "has errored";
|
||||
break;
|
||||
case EnumState::STATE_NEVERRAN:
|
||||
stateText = "has never been started";
|
||||
break;
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
stateText = "has been submitted";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING:
|
||||
stateText = "has been started";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
stateText = "is differential";
|
||||
break;
|
||||
case EnumState::STATE_COMPLETED:
|
||||
stateText = "has been completed";
|
||||
break;
|
||||
case EnumState::STATE_ABORTED:
|
||||
stateText = "has been aborted";
|
||||
break;
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
stateText = "has been partially aborted";
|
||||
break;
|
||||
default:
|
||||
stateText = "<undefined>";
|
||||
break;
|
||||
}
|
||||
|
||||
return stateText;
|
||||
}
|
||||
|
||||
const char* BackupAgentBase::getStateName(EnumState enState) {
|
||||
switch (enState) {
|
||||
case EnumState::STATE_ERRORED:
|
||||
return "Errored";
|
||||
case EnumState::STATE_NEVERRAN:
|
||||
return "NeverRan";
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
return "Submitted";
|
||||
break;
|
||||
case EnumState::STATE_RUNNING:
|
||||
return "Running";
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
return "RunningDifferentially";
|
||||
case EnumState::STATE_COMPLETED:
|
||||
return "Completed";
|
||||
case EnumState::STATE_ABORTED:
|
||||
return "Aborted";
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
return "Aborting";
|
||||
default:
|
||||
return "<undefined>";
|
||||
}
|
||||
}
|
||||
|
||||
bool BackupAgentBase::isRunnable(EnumState enState) {
|
||||
switch (enState) {
|
||||
case EnumState::STATE_SUBMITTED:
|
||||
case EnumState::STATE_RUNNING:
|
||||
case EnumState::STATE_RUNNING_DIFFERENTIAL:
|
||||
case EnumState::STATE_PARTIALLY_ABORTED:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Standalone<StringRef> BackupAgentBase::getCurrentTime() {
|
||||
double t = now();
|
||||
time_t curTime = t;
|
||||
char buffer[128];
|
||||
struct tm* timeinfo;
|
||||
timeinfo = localtime(&curTime);
|
||||
strftime(buffer, 128, "%Y-%m-%d-%H-%M-%S", timeinfo);
|
||||
|
||||
std::string time(buffer);
|
||||
return StringRef(time + format(".%06d", (int)(1e6 * (t - curTime))));
|
||||
}
|
||||
|
||||
std::string const BackupAgentBase::defaultTagName = "default";
|
||||
|
|
|
@ -58,6 +58,7 @@ ACTOR Future<Void> appendStringRefWithLen(Reference<IBackupFile> file, Standalon
|
|||
wait(file->append(s.begin(), s.size()));
|
||||
return Void();
|
||||
}
|
||||
|
||||
} // namespace IBackupFile_impl
|
||||
|
||||
Future<Void> IBackupFile::appendStringRefWithLen(Standalone<StringRef> s) {
|
||||
|
@ -253,7 +254,8 @@ std::vector<std::string> IBackupContainer::getURLFormats() {
|
|||
}
|
||||
|
||||
// Get an IBackupContainer based on a container URL string
|
||||
Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& url) {
|
||||
Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& url,
|
||||
Optional<std::string> const& encryptionKeyFileName) {
|
||||
static std::map<std::string, Reference<IBackupContainer>> m_cache;
|
||||
|
||||
Reference<IBackupContainer>& r = m_cache[url];
|
||||
|
@ -262,9 +264,9 @@ Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& u
|
|||
|
||||
try {
|
||||
StringRef u(url);
|
||||
if (u.startsWith(LiteralStringRef("file://"))) {
|
||||
r = Reference<IBackupContainer>(new BackupContainerLocalDirectory(url));
|
||||
} else if (u.startsWith(LiteralStringRef("blobstore://"))) {
|
||||
if (u.startsWith("file://"_sr)) {
|
||||
r = makeReference<BackupContainerLocalDirectory>(url, encryptionKeyFileName);
|
||||
} else if (u.startsWith("blobstore://"_sr)) {
|
||||
std::string resource;
|
||||
|
||||
// The URL parameters contain blobstore endpoint tunables as well as possible backup-specific options.
|
||||
|
@ -277,15 +279,16 @@ Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& u
|
|||
for (auto c : resource)
|
||||
if (!isalnum(c) && c != '_' && c != '-' && c != '.' && c != '/')
|
||||
throw backup_invalid_url();
|
||||
r = Reference<IBackupContainer>(new BackupContainerS3BlobStore(bstore, resource, backupParams));
|
||||
r = makeReference<BackupContainerS3BlobStore>(bstore, resource, backupParams, encryptionKeyFileName);
|
||||
}
|
||||
#ifdef BUILD_AZURE_BACKUP
|
||||
else if (u.startsWith(LiteralStringRef("azure://"))) {
|
||||
u.eat(LiteralStringRef("azure://"));
|
||||
auto address = NetworkAddress::parse(u.eat(LiteralStringRef("/")).toString());
|
||||
auto containerName = u.eat(LiteralStringRef("/")).toString();
|
||||
auto accountName = u.eat(LiteralStringRef("/")).toString();
|
||||
r = Reference<IBackupContainer>(new BackupContainerAzureBlobStore(address, containerName, accountName));
|
||||
else if (u.startsWith("azure://"_sr)) {
|
||||
u.eat("azure://"_sr);
|
||||
auto accountName = u.eat("@"_sr).toString();
|
||||
auto endpoint = u.eat("/"_sr).toString();
|
||||
auto containerName = u.eat("/"_sr).toString();
|
||||
r = makeReference<BackupContainerAzureBlobStore>(
|
||||
endpoint, accountName, containerName, encryptionKeyFileName);
|
||||
}
|
||||
#endif
|
||||
else {
|
||||
|
@ -293,6 +296,7 @@ Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& u
|
|||
throw backup_invalid_url();
|
||||
}
|
||||
|
||||
r->encryptionKeyFileName = encryptionKeyFileName;
|
||||
r->URL = url;
|
||||
return r;
|
||||
} catch (Error& e) {
|
||||
|
@ -315,10 +319,10 @@ Reference<IBackupContainer> IBackupContainer::openContainer(const std::string& u
|
|||
ACTOR Future<std::vector<std::string>> listContainers_impl(std::string baseURL) {
|
||||
try {
|
||||
StringRef u(baseURL);
|
||||
if (u.startsWith(LiteralStringRef("file://"))) {
|
||||
if (u.startsWith("file://"_sr)) {
|
||||
std::vector<std::string> results = wait(BackupContainerLocalDirectory::listURLs(baseURL));
|
||||
return results;
|
||||
} else if (u.startsWith(LiteralStringRef("blobstore://"))) {
|
||||
} else if (u.startsWith("blobstore://"_sr)) {
|
||||
std::string resource;
|
||||
|
||||
S3BlobStoreEndpoint::ParametersT backupParams;
|
||||
|
@ -333,14 +337,14 @@ ACTOR Future<std::vector<std::string>> listContainers_impl(std::string baseURL)
|
|||
}
|
||||
|
||||
// Create a dummy container to parse the backup-specific parameters from the URL and get a final bucket name
|
||||
BackupContainerS3BlobStore dummy(bstore, "dummy", backupParams);
|
||||
BackupContainerS3BlobStore dummy(bstore, "dummy", backupParams, {});
|
||||
|
||||
std::vector<std::string> results = wait(BackupContainerS3BlobStore::listURLs(bstore, dummy.getBucket()));
|
||||
return results;
|
||||
}
|
||||
// TODO: Enable this when Azure backups are ready
|
||||
/*
|
||||
else if (u.startsWith(LiteralStringRef("azure://"))) {
|
||||
else if (u.startsWith("azure://"_sr)) {
|
||||
std::vector<std::string> results = wait(BackupContainerAzureBlobStore::listURLs(baseURL));
|
||||
return results;
|
||||
}
|
||||
|
@ -386,7 +390,7 @@ ACTOR Future<Version> timeKeeperVersionFromDatetime(std::string datetime, Databa
|
|||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
state std::vector<std::pair<int64_t, Version>> results =
|
||||
wait(versionMap.getRange(tr, 0, time, 1, false, true));
|
||||
wait(versionMap.getRange(tr, 0, time, 1, Snapshot::False, Reverse::True));
|
||||
if (results.size() != 1) {
|
||||
// No key less than time was found in the database
|
||||
// Look for a key >= time.
|
||||
|
@ -425,7 +429,7 @@ ACTOR Future<Optional<int64_t>> timeKeeperEpochsFromVersion(Version v, Reference
|
|||
|
||||
// Find the highest time < mid
|
||||
state std::vector<std::pair<int64_t, Version>> results =
|
||||
wait(versionMap.getRange(tr, min, mid, 1, false, true));
|
||||
wait(versionMap.getRange(tr, min, mid, 1, Snapshot::False, Reverse::True));
|
||||
|
||||
if (results.size() != 1) {
|
||||
if (mid == min) {
|
||||
|
|
|
@ -293,16 +293,58 @@ public:
|
|||
Version beginVersion = -1) = 0;
|
||||
|
||||
// Get an IBackupContainer based on a container spec string
|
||||
static Reference<IBackupContainer> openContainer(const std::string& url);
|
||||
static Reference<IBackupContainer> openContainer(const std::string& url,
|
||||
const Optional<std::string>& encryptionKeyFileName = {});
|
||||
static std::vector<std::string> getURLFormats();
|
||||
static Future<std::vector<std::string>> listContainers(const std::string& baseURL);
|
||||
|
||||
std::string getURL() const { return URL; }
|
||||
std::string const &getURL() const { return URL; }
|
||||
Optional<std::string> const &getEncryptionKeyFileName() const { return encryptionKeyFileName; }
|
||||
|
||||
static std::string lastOpenError;
|
||||
|
||||
private:
|
||||
std::string URL;
|
||||
Optional<std::string> encryptionKeyFileName;
|
||||
};
|
||||
|
||||
namespace fileBackup {
|
||||
// Accumulates mutation log value chunks, as both a vector of chunks and as a combined chunk,
|
||||
// in chunk order, and can check the chunk set for completion or intersection with a set
|
||||
// of ranges.
|
||||
struct AccumulatedMutations {
|
||||
AccumulatedMutations() : lastChunkNumber(-1) {}
|
||||
|
||||
// Add a KV pair for this mutation chunk set
|
||||
// It will be accumulated onto serializedMutations if the chunk number is
|
||||
// the next expected value.
|
||||
void addChunk(int chunkNumber, const KeyValueRef& kv);
|
||||
|
||||
// Returns true if both
|
||||
// - 1 or more chunks were added to this set
|
||||
// - The header of the first chunk contains a valid protocol version and a length
|
||||
// that matches the bytes after the header in the combined value in serializedMutations
|
||||
bool isComplete() const;
|
||||
|
||||
// Returns true if a complete chunk contains any MutationRefs which intersect with any
|
||||
// range in ranges.
|
||||
// It is undefined behavior to run this if isComplete() does not return true.
|
||||
bool matchesAnyRange(const std::vector<KeyRange>& ranges) const;
|
||||
|
||||
std::vector<KeyValueRef> kvs;
|
||||
std::string serializedMutations;
|
||||
int lastChunkNumber;
|
||||
};
|
||||
|
||||
// Decodes a mutation log key, which contains (hash, commitVersion, chunkNumber) and
|
||||
// returns (commitVersion, chunkNumber)
|
||||
std::pair<Version, int32_t> decodeMutationLogKey(const StringRef& key);
|
||||
|
||||
// Decodes an encoded list of mutations in the format of:
|
||||
// [includeVersion:uint64_t][val_length:uint32_t][mutation_1][mutation_2]...[mutation_k],
|
||||
// where a mutation is encoded as:
|
||||
// [type:uint32_t][keyLength:uint32_t][valueLength:uint32_t][param1][param2]
|
||||
std::vector<MutationRef> decodeMutationLogValue(const StringRef& value);
|
||||
} // namespace fileBackup
|
||||
|
||||
#endif
|
||||
|
|
|
@ -19,37 +19,70 @@
|
|||
*/
|
||||
|
||||
#include "fdbclient/BackupContainerAzureBlobStore.h"
|
||||
#include "fdbrpc/AsyncFileEncrypted.h"
|
||||
#include <future>
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
std::string const notFoundErrorCode = "404";
|
||||
|
||||
void printAzureError(std::string const& operationName, azure::storage_lite::storage_error const& err) {
|
||||
printf("(%s) : Error from Azure SDK : %s (%s) : %s",
|
||||
operationName.c_str(),
|
||||
err.code_name.c_str(),
|
||||
err.code.c_str(),
|
||||
err.message.c_str());
|
||||
}
|
||||
|
||||
template <class T>
|
||||
T waitAzureFuture(std::future<azure::storage_lite::storage_outcome<T>>&& f, std::string const& operationName) {
|
||||
auto outcome = f.get();
|
||||
if (outcome.success()) {
|
||||
return outcome.response();
|
||||
} else {
|
||||
printAzureError(operationName, outcome.error());
|
||||
throw backup_error();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
class BackupContainerAzureBlobStoreImpl {
|
||||
public:
|
||||
using AzureClient = azure::storage_lite::blob_client;
|
||||
|
||||
class ReadFile final : public IAsyncFile, ReferenceCounted<ReadFile> {
|
||||
AsyncTaskThread& asyncTaskThread;
|
||||
AsyncTaskThread* asyncTaskThread;
|
||||
std::string containerName;
|
||||
std::string blobName;
|
||||
AzureClient* client;
|
||||
std::shared_ptr<AzureClient> client;
|
||||
|
||||
public:
|
||||
ReadFile(AsyncTaskThread& asyncTaskThread,
|
||||
const std::string& containerName,
|
||||
const std::string& blobName,
|
||||
AzureClient* client)
|
||||
: asyncTaskThread(asyncTaskThread), containerName(containerName), blobName(blobName), client(client) {}
|
||||
std::shared_ptr<AzureClient> const& client)
|
||||
: asyncTaskThread(&asyncTaskThread), containerName(containerName), blobName(blobName), client(client) {}
|
||||
|
||||
void addref() override { ReferenceCounted<ReadFile>::addref(); }
|
||||
void delref() override { ReferenceCounted<ReadFile>::delref(); }
|
||||
Future<int> read(void* data, int length, int64_t offset) {
|
||||
return asyncTaskThread.execAsync([client = this->client,
|
||||
containerName = this->containerName,
|
||||
blobName = this->blobName,
|
||||
data,
|
||||
length,
|
||||
offset] {
|
||||
Future<int> read(void* data, int length, int64_t offset) override {
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreRead")
|
||||
.detail("Length", length)
|
||||
.detail("Offset", offset)
|
||||
.detail("ContainerName", containerName)
|
||||
.detail("BlobName", blobName);
|
||||
return asyncTaskThread->execAsync([client = this->client,
|
||||
containerName = this->containerName,
|
||||
blobName = this->blobName,
|
||||
data,
|
||||
length,
|
||||
offset] {
|
||||
std::ostringstream oss(std::ios::out | std::ios::binary);
|
||||
client->download_blob_to_stream(containerName, blobName, offset, length, oss);
|
||||
waitAzureFuture(client->download_blob_to_stream(containerName, blobName, offset, length, oss),
|
||||
"download_blob_to_stream");
|
||||
auto str = std::move(oss).str();
|
||||
memcpy(data, str.c_str(), str.size());
|
||||
return static_cast<int>(str.size());
|
||||
|
@ -60,19 +93,23 @@ public:
|
|||
Future<Void> truncate(int64_t size) override { throw file_not_writable(); }
|
||||
Future<Void> sync() override { throw file_not_writable(); }
|
||||
Future<int64_t> size() const override {
|
||||
return asyncTaskThread.execAsync([client = this->client,
|
||||
containerName = this->containerName,
|
||||
blobName = this->blobName] {
|
||||
return static_cast<int64_t>(client->get_blob_properties(containerName, blobName).get().response().size);
|
||||
});
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreReadFileSize")
|
||||
.detail("ContainerName", containerName)
|
||||
.detail("BlobName", blobName);
|
||||
return asyncTaskThread->execAsync(
|
||||
[client = this->client, containerName = this->containerName, blobName = this->blobName] {
|
||||
auto resp =
|
||||
waitAzureFuture(client->get_blob_properties(containerName, blobName), "get_blob_properties");
|
||||
return static_cast<int64_t>(resp.size);
|
||||
});
|
||||
}
|
||||
std::string getFilename() const override { return blobName; }
|
||||
int64_t debugFD() const override { return 0; }
|
||||
};
|
||||
|
||||
class WriteFile final : public IAsyncFile, ReferenceCounted<WriteFile> {
|
||||
AsyncTaskThread& asyncTaskThread;
|
||||
AzureClient* client;
|
||||
AsyncTaskThread* asyncTaskThread;
|
||||
std::shared_ptr<AzureClient> client;
|
||||
std::string containerName;
|
||||
std::string blobName;
|
||||
int64_t m_cursor{ 0 };
|
||||
|
@ -87,8 +124,8 @@ public:
|
|||
WriteFile(AsyncTaskThread& asyncTaskThread,
|
||||
const std::string& containerName,
|
||||
const std::string& blobName,
|
||||
AzureClient* client)
|
||||
: asyncTaskThread(asyncTaskThread), containerName(containerName), blobName(blobName), client(client) {}
|
||||
std::shared_ptr<AzureClient> const& client)
|
||||
: asyncTaskThread(&asyncTaskThread), containerName(containerName), blobName(blobName), client(client) {}
|
||||
|
||||
void addref() override { ReferenceCounted<WriteFile>::addref(); }
|
||||
void delref() override { ReferenceCounted<WriteFile>::delref(); }
|
||||
|
@ -113,22 +150,33 @@ public:
|
|||
return Void();
|
||||
}
|
||||
Future<Void> sync() override {
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreSync")
|
||||
.detail("Length", buffer.size())
|
||||
.detail("ContainerName", containerName)
|
||||
.detail("BlobName", blobName);
|
||||
auto movedBuffer = std::move(buffer);
|
||||
buffer.clear();
|
||||
return asyncTaskThread.execAsync([client = this->client,
|
||||
containerName = this->containerName,
|
||||
blobName = this->blobName,
|
||||
buffer = std::move(movedBuffer)] {
|
||||
std::istringstream iss(std::move(buffer));
|
||||
auto resp = client->append_block_from_stream(containerName, blobName, iss).get();
|
||||
return Void();
|
||||
});
|
||||
buffer = {};
|
||||
if (!movedBuffer.empty()) {
|
||||
return asyncTaskThread->execAsync([client = this->client,
|
||||
containerName = this->containerName,
|
||||
blobName = this->blobName,
|
||||
buffer = std::move(movedBuffer)] {
|
||||
std::istringstream iss(std::move(buffer));
|
||||
waitAzureFuture(client->append_block_from_stream(containerName, blobName, iss),
|
||||
"append_block_from_stream");
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
Future<int64_t> size() const override {
|
||||
return asyncTaskThread.execAsync(
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreSize")
|
||||
.detail("ContainerName", containerName)
|
||||
.detail("BlobName", blobName);
|
||||
return asyncTaskThread->execAsync(
|
||||
[client = this->client, containerName = this->containerName, blobName = this->blobName] {
|
||||
auto resp = client->get_blob_properties(containerName, blobName).get().response();
|
||||
ASSERT(resp.valid()); // TODO: Should instead throw here
|
||||
auto resp =
|
||||
waitAzureFuture(client->get_blob_properties(containerName, blobName), "get_blob_properties");
|
||||
return static_cast<int64_t>(resp.size);
|
||||
});
|
||||
}
|
||||
|
@ -162,35 +210,53 @@ public:
|
|||
|
||||
static bool isDirectory(const std::string& blobName) { return blobName.size() && blobName.back() == '/'; }
|
||||
|
||||
// Hack to get around the fact that macros don't work inside actor functions
|
||||
static Reference<IAsyncFile> encryptFile(Reference<IAsyncFile> const& f, AsyncFileEncrypted::Mode mode) {
|
||||
Reference<IAsyncFile> result = f;
|
||||
#if ENCRYPTION_ENABLED
|
||||
result = makeReference<AsyncFileEncrypted>(result, mode);
|
||||
#endif
|
||||
return result;
|
||||
}
|
||||
|
||||
ACTOR static Future<Reference<IAsyncFile>> readFile(BackupContainerAzureBlobStore* self, std::string fileName) {
|
||||
bool exists = wait(self->blobExists(fileName));
|
||||
if (!exists) {
|
||||
throw file_not_found();
|
||||
}
|
||||
return Reference<IAsyncFile>(
|
||||
new ReadFile(self->asyncTaskThread, self->containerName, fileName, self->client.get()));
|
||||
Reference<IAsyncFile> f =
|
||||
makeReference<ReadFile>(self->asyncTaskThread, self->containerName, fileName, self->client);
|
||||
if (self->usesEncryption()) {
|
||||
f = encryptFile(f, AsyncFileEncrypted::Mode::READ_ONLY);
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
ACTOR static Future<Reference<IBackupFile>> writeFile(BackupContainerAzureBlobStore* self, std::string fileName) {
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreCreateWriteFile")
|
||||
.detail("ContainerName", self->containerName)
|
||||
.detail("FileName", fileName);
|
||||
wait(self->asyncTaskThread.execAsync(
|
||||
[client = self->client.get(), containerName = self->containerName, fileName = fileName] {
|
||||
auto outcome = client->create_append_blob(containerName, fileName).get();
|
||||
[client = self->client, containerName = self->containerName, fileName = fileName] {
|
||||
waitAzureFuture(client->create_append_blob(containerName, fileName), "create_append_blob");
|
||||
return Void();
|
||||
}));
|
||||
return Reference<IBackupFile>(
|
||||
new BackupFile(fileName,
|
||||
Reference<IAsyncFile>(new WriteFile(
|
||||
self->asyncTaskThread, self->containerName, fileName, self->client.get()))));
|
||||
Reference<IAsyncFile> f =
|
||||
makeReference<WriteFile>(self->asyncTaskThread, self->containerName, fileName, self->client);
|
||||
if (self->usesEncryption()) {
|
||||
f = encryptFile(f, AsyncFileEncrypted::Mode::APPEND_ONLY);
|
||||
}
|
||||
return makeReference<BackupFile>(fileName, f);
|
||||
}
|
||||
|
||||
static void listFiles(AzureClient* client,
|
||||
static void listFiles(std::shared_ptr<AzureClient> const& client,
|
||||
const std::string& containerName,
|
||||
const std::string& path,
|
||||
std::function<bool(std::string const&)> folderPathFilter,
|
||||
BackupContainerFileSystem::FilesAndSizesT& result) {
|
||||
auto resp = client->list_blobs_segmented(containerName, "/", "", path).get().response();
|
||||
auto resp = waitAzureFuture(client->list_blobs_segmented(containerName, "/", "", path), "list_blobs_segmented");
|
||||
for (const auto& blob : resp.blobs) {
|
||||
if (isDirectory(blob.name) && folderPathFilter(blob.name)) {
|
||||
if (isDirectory(blob.name) && (!folderPathFilter || folderPathFilter(blob.name))) {
|
||||
listFiles(client, containerName, blob.name, folderPathFilter, result);
|
||||
} else {
|
||||
result.emplace_back(blob.name, blob.content_length);
|
||||
|
@ -204,8 +270,12 @@ public:
|
|||
BackupContainerFileSystem::FilesAndSizesT files = wait(self->listFiles());
|
||||
filesToDelete = files.size();
|
||||
}
|
||||
wait(self->asyncTaskThread.execAsync([containerName = self->containerName, client = self->client.get()] {
|
||||
client->delete_container(containerName).wait();
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreDeleteContainer")
|
||||
.detail("FilesToDelete", filesToDelete)
|
||||
.detail("ContainerName", self->containerName)
|
||||
.detail("TrackNumDeleted", pNumDeleted != nullptr);
|
||||
wait(self->asyncTaskThread.execAsync([containerName = self->containerName, client = self->client] {
|
||||
waitAzureFuture(client->delete_container(containerName), "delete_container");
|
||||
return Void();
|
||||
}));
|
||||
if (pNumDeleted) {
|
||||
|
@ -213,26 +283,45 @@ public:
|
|||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
Future<bool> BackupContainerAzureBlobStore::blobExists(const std::string& fileName) {
|
||||
return asyncTaskThread.execAsync(
|
||||
[client = this->client.get(), containerName = this->containerName, fileName = fileName] {
|
||||
auto resp = client->get_blob_properties(containerName, fileName).get().response();
|
||||
return resp.valid();
|
||||
});
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreCheckExists")
|
||||
.detail("FileName", fileName)
|
||||
.detail("ContainerName", containerName);
|
||||
return asyncTaskThread.execAsync([client = this->client, containerName = this->containerName, fileName = fileName] {
|
||||
auto outcome = client->get_blob_properties(containerName, fileName).get();
|
||||
if (outcome.success()) {
|
||||
return true;
|
||||
} else {
|
||||
auto const& err = outcome.error();
|
||||
if (err.code == notFoundErrorCode) {
|
||||
return false;
|
||||
} else {
|
||||
printAzureError("get_blob_properties", err);
|
||||
throw backup_error();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
BackupContainerAzureBlobStore::BackupContainerAzureBlobStore(const NetworkAddress& address,
|
||||
BackupContainerAzureBlobStore::BackupContainerAzureBlobStore(const std::string& endpoint,
|
||||
const std::string& accountName,
|
||||
const std::string& containerName)
|
||||
const std::string& containerName,
|
||||
const Optional<std::string>& encryptionKeyFileName)
|
||||
: containerName(containerName) {
|
||||
std::string accountKey = std::getenv("AZURE_KEY");
|
||||
|
||||
setEncryptionKey(encryptionKeyFileName);
|
||||
const char* _accountKey = std::getenv("AZURE_KEY");
|
||||
if (!_accountKey) {
|
||||
TraceEvent(SevError, "EnvironmentVariableNotFound").detail("EnvVariable", "AZURE_KEY");
|
||||
// TODO: More descriptive error?
|
||||
throw backup_error();
|
||||
}
|
||||
std::string accountKey = _accountKey;
|
||||
auto credential = std::make_shared<azure::storage_lite::shared_key_credential>(accountName, accountKey);
|
||||
auto storageAccount = std::make_shared<azure::storage_lite::storage_account>(
|
||||
accountName, credential, false, format("http://%s/%s", address.toString().c_str(), accountName.c_str()));
|
||||
|
||||
accountName, credential, true, format("https://%s", endpoint.c_str()));
|
||||
client = std::make_unique<AzureClient>(storageAccount, 1);
|
||||
}
|
||||
|
||||
|
@ -244,15 +333,30 @@ void BackupContainerAzureBlobStore::delref() {
|
|||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::create() {
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, client = this->client.get()] {
|
||||
client->create_container(containerName).wait();
|
||||
return Void();
|
||||
});
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreCreateContainer").detail("ContainerName", containerName);
|
||||
Future<Void> createContainerFuture =
|
||||
asyncTaskThread.execAsync([containerName = this->containerName, client = this->client] {
|
||||
waitAzureFuture(client->create_container(containerName), "create_container");
|
||||
return Void();
|
||||
});
|
||||
Future<Void> encryptionSetupFuture = usesEncryption() ? encryptionSetupComplete() : Void();
|
||||
return createContainerFuture && encryptionSetupFuture;
|
||||
}
|
||||
Future<bool> BackupContainerAzureBlobStore::exists() {
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, client = this->client.get()] {
|
||||
auto resp = client->get_container_properties(containerName).get().response();
|
||||
return resp.valid();
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreCheckContainerExists").detail("ContainerName", containerName);
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, client = this->client] {
|
||||
auto outcome = client->get_container_properties(containerName).get();
|
||||
if (outcome.success()) {
|
||||
return true;
|
||||
} else {
|
||||
auto const& err = outcome.error();
|
||||
if (err.code == notFoundErrorCode) {
|
||||
return false;
|
||||
} else {
|
||||
printAzureError("got_container_properties", err);
|
||||
throw backup_error();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -267,22 +371,23 @@ Future<Reference<IBackupFile>> BackupContainerAzureBlobStore::writeFile(const st
|
|||
Future<BackupContainerFileSystem::FilesAndSizesT> BackupContainerAzureBlobStore::listFiles(
|
||||
const std::string& path,
|
||||
std::function<bool(std::string const&)> folderPathFilter) {
|
||||
return asyncTaskThread.execAsync([client = this->client.get(),
|
||||
containerName = this->containerName,
|
||||
path = path,
|
||||
folderPathFilter = folderPathFilter] {
|
||||
FilesAndSizesT result;
|
||||
BackupContainerAzureBlobStoreImpl::listFiles(client, containerName, path, folderPathFilter, result);
|
||||
return result;
|
||||
});
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreListFiles").detail("ContainerName", containerName).detail("Path", path);
|
||||
return asyncTaskThread.execAsync(
|
||||
[client = this->client, containerName = this->containerName, path = path, folderPathFilter = folderPathFilter] {
|
||||
FilesAndSizesT result;
|
||||
BackupContainerAzureBlobStoreImpl::listFiles(client, containerName, path, folderPathFilter, result);
|
||||
return result;
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::deleteFile(const std::string& fileName) {
|
||||
return asyncTaskThread.execAsync(
|
||||
[containerName = this->containerName, fileName = fileName, client = client.get()]() {
|
||||
client->delete_blob(containerName, fileName).wait();
|
||||
return Void();
|
||||
});
|
||||
TraceEvent(SevDebug, "BCAzureBlobStoreDeleteFile")
|
||||
.detail("ContainerName", containerName)
|
||||
.detail("FileName", fileName);
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, fileName = fileName, client = client]() {
|
||||
client->delete_blob(containerName, fileName).wait();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::deleteContainer(int* pNumDeleted) {
|
||||
|
@ -295,5 +400,5 @@ Future<std::vector<std::string>> BackupContainerAzureBlobStore::listURLs(const s
|
|||
}
|
||||
|
||||
std::string BackupContainerAzureBlobStore::getURLFormat() {
|
||||
return "azure://<ip>:<port>/<accountname>/<container>/<path_to_file>";
|
||||
return "azure://<accountname>@<endpoint>/<container>/";
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ class BackupContainerAzureBlobStore final : public BackupContainerFileSystem,
|
|||
ReferenceCounted<BackupContainerAzureBlobStore> {
|
||||
using AzureClient = azure::storage_lite::blob_client;
|
||||
|
||||
std::unique_ptr<AzureClient> client;
|
||||
std::shared_ptr<AzureClient> client;
|
||||
std::string containerName;
|
||||
AsyncTaskThread asyncTaskThread;
|
||||
|
||||
|
@ -42,9 +42,10 @@ class BackupContainerAzureBlobStore final : public BackupContainerFileSystem,
|
|||
friend class BackupContainerAzureBlobStoreImpl;
|
||||
|
||||
public:
|
||||
BackupContainerAzureBlobStore(const NetworkAddress& address,
|
||||
BackupContainerAzureBlobStore(const std::string& endpoint,
|
||||
const std::string& accountName,
|
||||
const std::string& containerName);
|
||||
const std::string& containerName,
|
||||
const Optional<std::string>& encryptionKeyFileName);
|
||||
|
||||
void addref() override;
|
||||
void delref() override;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue