Merge remote-tracking branch 'upstream/master' into add-c-function-for-management-commands
This commit is contained in:
commit
aec0398fd0
|
@ -293,12 +293,12 @@ bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[],
|
|||
break;
|
||||
}
|
||||
if(split == start || verifyString[split-1] != '\\') {
|
||||
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start,split-start)));
|
||||
auto verify = makeReference<FDBLibTLSVerify>(verifyString.substr(start, split - start));
|
||||
verify_rules.push_back(verify);
|
||||
start = split+1;
|
||||
}
|
||||
}
|
||||
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start)));
|
||||
auto verify = makeReference<FDBLibTLSVerify>(verifyString.substr(start));
|
||||
verify_rules.push_back(verify);
|
||||
} catch ( const std::runtime_error& ) {
|
||||
verify_rules.clear();
|
||||
|
|
|
@ -66,7 +66,7 @@ static void logf(const char* event, void* uid, bool is_error, ...) {
|
|||
int FDBLibTLSVerifyTest::run() {
|
||||
Reference<FDBLibTLSVerify> verify;
|
||||
try {
|
||||
verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(input));
|
||||
verify = makeReference<FDBLibTLSVerify>(input);
|
||||
} catch ( const std::runtime_error& e ) {
|
||||
if (valid) {
|
||||
std::cerr << "FAIL: Verify test failed, but should have succeeded - '" << input << "'\n";
|
||||
|
@ -102,8 +102,8 @@ int FDBLibTLSVerifyTest::run() {
|
|||
}
|
||||
|
||||
static int policy_verify_test() {
|
||||
Reference<FDBLibTLSPlugin> plugin = Reference<FDBLibTLSPlugin>(new FDBLibTLSPlugin());
|
||||
Reference<FDBLibTLSPolicy> policy = Reference<FDBLibTLSPolicy>(new FDBLibTLSPolicy(plugin, (ITLSLogFunc)logf));
|
||||
auto plugin = makeReference<FDBLibTLSPlugin>();
|
||||
auto policy = makeReference<FDBLibTLSPolicy>(plugin, (ITLSLogFunc)logf);
|
||||
|
||||
const char *verify_peers[] = {
|
||||
"S.CN=abc",
|
||||
|
@ -116,9 +116,9 @@ static int policy_verify_test() {
|
|||
(int)strlen(verify_peers[2]),
|
||||
};
|
||||
Reference<FDBLibTLSVerify> verify_rules[] = {
|
||||
Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string(verify_peers[0], verify_peers_len[0]))),
|
||||
Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string(verify_peers[1], verify_peers_len[1]))),
|
||||
Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string(verify_peers[2], verify_peers_len[2]))),
|
||||
makeReference<FDBLibTLSVerify>(std::string(verify_peers[0], verify_peers_len[0])),
|
||||
makeReference<FDBLibTLSVerify>(std::string(verify_peers[1], verify_peers_len[1])),
|
||||
makeReference<FDBLibTLSVerify>(std::string(verify_peers[2], verify_peers_len[2])),
|
||||
};
|
||||
|
||||
if (!policy->set_verify_peers(3, (const uint8_t **)verify_peers, verify_peers_len)) {
|
||||
|
|
|
@ -123,9 +123,18 @@ fdb_error_t fdb_run_network() {
|
|||
CATCH_AND_RETURN( API->runNetwork(); );
|
||||
}
|
||||
|
||||
#ifdef ADDRESS_SANITIZER
|
||||
extern "C" void __lsan_do_leak_check();
|
||||
#endif
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
fdb_error_t fdb_stop_network() {
|
||||
CATCH_AND_RETURN( API->stopNetwork(); );
|
||||
#ifdef ADDRESS_SANITIZER
|
||||
// fdb_stop_network intentionally leaks a bunch of memory, so let's do the
|
||||
// leak check before that so it's meaningful
|
||||
__lsan_do_leak_check();
|
||||
#endif
|
||||
CATCH_AND_RETURN(API->stopNetwork(););
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT
|
||||
|
|
|
@ -8,10 +8,10 @@ RUN yum install -y yum-utils &&\
|
|||
http://opensource.wandisco.com/centos/6/git/x86_64/wandisco-git-release-6-1.noarch.rpm &&\
|
||||
yum -y install devtoolset-8-8.1-1.el6 java-1.8.0-openjdk-devel \
|
||||
devtoolset-8-gcc-8.3.1 devtoolset-8-gcc-c++-8.3.1 \
|
||||
devtoolset-8-libubsan-devel devtoolset-8-valgrind-devel \
|
||||
devtoolset-8-libubsan-devel devtoolset-8-libasan-devel devtoolset-8-valgrind-devel \
|
||||
rh-python36-python-devel rh-ruby24 golang python27 rpm-build \
|
||||
mono-core debbuild python-pip dos2unix valgrind-devel ccache \
|
||||
distcc wget git &&\
|
||||
distcc wget git lz4 lz4-devel lz4-static &&\
|
||||
pip install boto3==1.1.1
|
||||
|
||||
USER root
|
||||
|
@ -61,8 +61,8 @@ RUN cd /opt/ && curl -L https://github.com/facebook/rocksdb/archive/v6.10.1.tar.
|
|||
ARG TIMEZONEINFO=America/Los_Angeles
|
||||
RUN rm -f /etc/localtime && ln -s /usr/share/zoneinfo/${TIMEZONEINFO} /etc/localtime
|
||||
|
||||
LABEL version=0.1.17
|
||||
ENV DOCKER_IMAGEVER=0.1.17
|
||||
LABEL version=0.1.19
|
||||
ENV DOCKER_IMAGEVER=0.1.19
|
||||
ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0
|
||||
ENV CC=/opt/rh/devtoolset-8/root/usr/bin/gcc
|
||||
ENV CXX=/opt/rh/devtoolset-8/root/usr/bin/g++
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
FROM foundationdb/foundationdb-build:0.1.17
|
||||
FROM foundationdb/foundationdb-build:0.1.19
|
||||
|
||||
USER root
|
||||
|
||||
|
@ -50,8 +50,8 @@ RUN cp -iv /usr/local/bin/clang++ /usr/local/bin/clang++.deref &&\
|
|||
ldconfig &&\
|
||||
rm -rf /mnt/artifacts
|
||||
|
||||
LABEL version=0.11.9
|
||||
ENV DOCKER_IMAGEVER=0.11.9
|
||||
LABEL version=0.11.10
|
||||
ENV DOCKER_IMAGEVER=0.11.10
|
||||
|
||||
ENV CLANGCC=/usr/local/bin/clang.de8a65ef
|
||||
ENV CLANGCXX=/usr/local/bin/clang++.de8a65ef
|
||||
|
|
|
@ -2,7 +2,7 @@ version: "3"
|
|||
|
||||
services:
|
||||
common: &common
|
||||
image: foundationdb/foundationdb-build:0.1.17
|
||||
image: foundationdb/foundationdb-build:0.1.19
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
|
|
|
@ -18,7 +18,7 @@ if (RocksDB_FOUND)
|
|||
-DWITH_CORE_TOOLS=OFF
|
||||
-DWITH_BENCHMARK_TOOLS=OFF
|
||||
-DWITH_BZ2=OFF
|
||||
-DWITH_LZ4=OFF
|
||||
-DWITH_LZ4=ON
|
||||
-DWITH_SNAPPY=OFF
|
||||
-DWITH_ZLIB=OFF
|
||||
-DWITH_ZSTD=OFF
|
||||
|
@ -45,7 +45,7 @@ else()
|
|||
-DWITH_CORE_TOOLS=OFF
|
||||
-DWITH_BENCHMARK_TOOLS=OFF
|
||||
-DWITH_BZ2=OFF
|
||||
-DWITH_LZ4=OFF
|
||||
-DWITH_LZ4=ON
|
||||
-DWITH_SNAPPY=OFF
|
||||
-DWITH_ZLIB=OFF
|
||||
-DWITH_ZSTD=OFF
|
||||
|
|
|
@ -106,10 +106,10 @@ endif()
|
|||
# RocksDB
|
||||
################################################################################
|
||||
|
||||
set(SSD_ROCKSDB_EXPERIMENTAL OFF CACHE BOOL "Build with experimental RocksDB support")
|
||||
set(SSD_ROCKSDB_EXPERIMENTAL ON CACHE BOOL "Build with experimental RocksDB support")
|
||||
# RocksDB is currently enabled by default for GCC but does not build with the latest
|
||||
# Clang.
|
||||
if (SSD_ROCKSDB_EXPERIMENTAL OR GCC)
|
||||
if (SSD_ROCKSDB_EXPERIMENTAL AND GCC)
|
||||
set(WITH_ROCKSDB_EXPERIMENTAL ON)
|
||||
else()
|
||||
set(WITH_ROCKSDB_EXPERIMENTAL OFF)
|
||||
|
|
|
@ -48,6 +48,10 @@ function(install_symlink)
|
|||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/lib64/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}local/bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/lib64/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-pm")
|
||||
elseif("${IN_LINK_DIR}" MATCHES "bin")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}bin/${IN_FILE_NAME}"
|
||||
|
@ -59,6 +63,10 @@ function(install_symlink)
|
|||
COMPONENTS "${IN_COMPONENT}-el6"
|
||||
"${IN_COMPONENT}-el7"
|
||||
"${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../${rel_path}/bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/local/bin/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-pm")
|
||||
elseif("${IN_LINK_DIR}" MATCHES "fdbmonitor")
|
||||
install_symlink_impl(
|
||||
TO "../../${rel_path}bin/${IN_FILE_NAME}"
|
||||
|
@ -70,6 +78,10 @@ function(install_symlink)
|
|||
COMPONENTS "${IN_COMPONENT}-el6"
|
||||
"${IN_COMPONENT}-el7"
|
||||
"${IN_COMPONENT}-deb")
|
||||
install_symlink_impl(
|
||||
TO "../../${rel_path}/bin/${IN_FILE_NAME}"
|
||||
DESTINATION "usr/local/lib/foundationdb/${IN_LINK_NAME}"
|
||||
COMPONENTS "${IN_COMPONENT}-pm")
|
||||
else()
|
||||
message(FATAL_ERROR "Unknown LINK_DIR ${IN_LINK_DIR}")
|
||||
endif()
|
||||
|
@ -109,7 +121,7 @@ set(install_destination_for_lib_tgz "lib")
|
|||
set(install_destination_for_lib_deb "usr/lib")
|
||||
set(install_destination_for_lib_el6 "usr/lib64")
|
||||
set(install_destination_for_lib_el7 "usr/lib64")
|
||||
set(install_destination_for_lib_pm "lib")
|
||||
set(install_destination_for_lib_pm "usr/local/lib")
|
||||
set(install_destination_for_fdbmonitor_tgz "sbin")
|
||||
set(install_destination_for_fdbmonitor_deb "usr/lib/foundationdb")
|
||||
set(install_destination_for_fdbmonitor_el6 "usr/lib/foundationdb")
|
||||
|
@ -129,12 +141,12 @@ set(install_destination_for_log_tgz "log/foundationdb")
|
|||
set(install_destination_for_log_deb "var/log/foundationdb")
|
||||
set(install_destination_for_log_el6 "var/log/foundationdb")
|
||||
set(install_destination_for_log_el7 "var/log/foundationdb")
|
||||
set(install_destination_for_log_pm "")
|
||||
set(install_destination_for_log_pm "usr/local/foundationdb/logs")
|
||||
set(install_destination_for_data_tgz "lib/foundationdb")
|
||||
set(install_destination_for_data_deb "var/lib/foundationdb/data")
|
||||
set(install_destination_for_data_el6 "var/lib/foundationdb/data")
|
||||
set(install_destination_for_data_el7 "var/lib/foundationdb/data")
|
||||
set(install_destination_for_data_pm "")
|
||||
set(install_destination_for_data_pm "usr/local/foundationdb/data")
|
||||
|
||||
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
|
||||
function(fdb_configure_and_install)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -22,6 +22,8 @@ FoundationDB supports language bindings for application development using the or
|
|||
|
||||
* :doc:`api-version-upgrade-guide` contains information about upgrading client code to a new API version.
|
||||
|
||||
* :doc:`transaction-profiler-analyzer` contains information about enabling transaction profiling and analyzing.
|
||||
|
||||
* :doc:`known-limitations` describes both long-term design limitations of FoundationDB and short-term limitations applicable to the current version.
|
||||
|
||||
.. toctree::
|
||||
|
@ -38,4 +40,5 @@ FoundationDB supports language bindings for application development using the or
|
|||
api-general
|
||||
transaction-tagging
|
||||
known-limitations
|
||||
transaction-profiler-analyzer
|
||||
api-version-upgrade-guide
|
||||
|
|
|
@ -313,7 +313,7 @@ client
|
|||
|
||||
``profile client <get|set>``
|
||||
|
||||
Reads or sets parameters of client transaction sampling. Use ``get`` to list the current parameters, and ``set <RATE|default> <SIZE|default>`` to set them. ``RATE`` is the fraction of transactions to be sampled, and ``SIZE`` is the amount (in bytes) of sampled data to store in the database.
|
||||
Reads or sets parameters of client transaction sampling. Use ``get`` to list the current parameters, and ``set <RATE|default> <SIZE|default>`` to set them. ``RATE`` is the fraction of transactions to be sampled, and ``SIZE`` is the amount (in bytes) of sampled data to store in the database. For more information, see :doc:`transaction-profiler-analyzer`.
|
||||
|
||||
list
|
||||
^^^^
|
||||
|
|
|
@ -866,7 +866,7 @@ Some of this information is also available in ``\xff\xff/status/json``, but thes
|
|||
>>> for k, v in db.get_range_startswith('\xff\xff/metrics/health/'):
|
||||
... print(k, v)
|
||||
...
|
||||
('\xff\xff/metrics/health/aggregate', '{"batch_limited":false,"tps_limit":483988.66315011407,"worst_storage_durability_lag":5000001,"worst_storage_queue":2036,"worst_log_queue":300}')
|
||||
('\xff\xff/metrics/health/aggregate', '{"batch_limited":false,"limiting_storage_durability_lag":5000000,"limiting_storage_queue":1000,"tps_limit":483988.66315011407,"worst_storage_durability_lag":5000001,"worst_storage_queue":2036,"worst_log_queue":300}')
|
||||
('\xff\xff/metrics/health/log/e639a9ad0373367784cc550c615c469b', '{"log_queue":300}')
|
||||
('\xff\xff/metrics/health/storage/ab2ce4caf743c9c1ae57063629c6678a', '{"cpu_usage":2.398696781487125,"disk_usage":0.059995917598039405,"storage_durability_lag":5000001,"storage_queue":2036}')
|
||||
|
||||
|
@ -874,15 +874,17 @@ Some of this information is also available in ``\xff\xff/status/json``, but thes
|
|||
|
||||
Aggregate stats about cluster health. Reading this key alone is slightly cheaper than reading any of the per-process keys.
|
||||
|
||||
============================ ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
---------------------------- -------- ---------------
|
||||
batch_limited boolean Whether or not the cluster is limiting batch priority transactions
|
||||
tps_limit number The rate at which normal priority transactions are allowed to start
|
||||
worst_storage_durability_lag number See the description for storage_durability_lag
|
||||
worst_storage_queue number See the description for storage_queue
|
||||
worst_log_queue number See the description for log_queue
|
||||
============================ ======== ===============
|
||||
=================================== ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
----------------------------------- -------- ---------------
|
||||
batch_limited boolean Whether or not the cluster is limiting batch priority transactions
|
||||
limiting_storage_durability_lag number storage_durability_lag that ratekeeper is using to determing throttling (see the description for storage_durability_lag)
|
||||
limiting_storage_queue number storage_queue that ratekeeper is using to determing throttling (see the description for storage_queue)
|
||||
tps_limit number The rate at which normal priority transactions are allowed to start
|
||||
worst_storage_durability_lag number See the description for storage_durability_lag
|
||||
worst_storage_queue number See the description for storage_queue
|
||||
worst_log_queue number See the description for log_queue
|
||||
=================================== ======== ===============
|
||||
|
||||
``\xff\xff/metrics/health/log/<id>``
|
||||
|
||||
|
|
|
@ -2,6 +2,12 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.2.28
|
||||
======
|
||||
* Log detailed team collection information when median available space ratio of all teams is too low. `(PR #3912) <https://github.com/apple/foundationdb/pull/3912>`_
|
||||
* Bug fix, blob client did not support authentication key sizes over 64 bytes. `(PR #3964) <https://github.com/apple/foundationdb/pull/3964>`_
|
||||
|
||||
|
||||
6.2.27
|
||||
======
|
||||
* For clusters with a large number of shards, avoid slow tasks in the data distributor by adding yields to the shard map destruction. `(PR #3834) <https://github.com/apple/foundationdb/pull/3834>`_
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.3.9
|
||||
=====
|
||||
6.3.10
|
||||
======
|
||||
|
||||
Features
|
||||
--------
|
||||
|
@ -63,6 +63,7 @@ Fixes
|
|||
* Fix an issue where ``fdbcli --exec 'exclude no_wait ...'`` would incorrectly report that processes can safely be removed from the cluster. [6.3.5] `(PR #3566) <https://github.com/apple/foundationdb/pull/3566>`_
|
||||
* Commit latencies could become large because of inaccurate compute estimates. [6.3.9] `(PR #3845) <https://github.com/apple/foundationdb/pull/3845>`_
|
||||
* Added a timeout on TLS handshakes to prevent them from hanging indefinitely. [6.3.9] `(PR #3850) <https://github.com/apple/foundationdb/pull/3850>`_
|
||||
* Bug fix, blob client did not support authentication key sizes over 64 bytes. `(PR #3964) <https://github.com/apple/foundationdb/pull/3964>`_
|
||||
|
||||
Status
|
||||
------
|
||||
|
@ -121,6 +122,8 @@ Fixes from previous versions
|
|||
* The 6.3.1 patch release includes all fixes from the patch releases 6.2.21 and 6.2.22. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
* The 6.3.3 patch release includes all fixes from the patch release 6.2.23. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
* The 6.3.5 patch release includes all fixes from the patch releases 6.2.24 and 6.2.25. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
* The 6.3.9 patch release includes all fixes from the patch releases 6.2.26. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
* The 6.3.10 patch release includes all fixes from the patch releases 6.2.27. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
|
||||
Fixes only impacting 6.3.0+
|
||||
---------------------------
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
.. _transaction-profiler-analyzer:
|
||||
|
||||
###################################
|
||||
Transaction profiling and analyzing
|
||||
###################################
|
||||
|
||||
FoundationDB natively implements transaction profiling and analyzing. There are two ways to enable transaction profiling in FoundationDB. One is globally through the database, via ``fdbcli`` command which sets keys in the database and the clients pick it up.
|
||||
|
||||
``e.g. fdbcli> profile client set 0.01 100MB`` profiles 1% of transactions and maintains 100MB worth of history in the database.
|
||||
|
||||
Second way is through client side knobs ``CSI_SAMPLING_PROBABILITY`` and ``CSI_SIZE_LIMIT`` which have to be set at every client that you want to profile. Enabling transaction profiling through the database setting has higher precedence and overrides any client knob settings.
|
||||
|
||||
There are only two inputs for transaction profiling i.e. sampling rate and the size limit.
|
||||
|
||||
The transactions are sampled at the specified rate and all the events for that sampled transaction are recorded. Then at 30 second interval, the data for all the sampled transactions during that interval is flushed to the database. The sampled data is written into special key space ``“\xff\x02/fdbClientInfo/ - \xff\x02/fdbClientInfo0”``
|
||||
|
||||
The second part of transaction profiling involves deleting old sampled data to restrict the size. Retention is purely based on the input size limit. If the size of all the recorded data exceeds the input limit, then the old ones get deleted. But the limit is a soft limit, you could go over the limit temporarily.
|
||||
|
||||
There are many ways that this data can be exposed for analysis. One can imagine building a client that reads the data from the database and streams it to external tools such as Wavefront.
|
||||
|
||||
One such tool that’s available as part of open source FDB is a python script called ``transaction_profiling_analyzer.py`` that's available here on `GitHUb <https://github.com/apple/foundationdb/blob/master/contrib/transaction_profiling_analyzer/transaction_profiling_analyzer.py>`_. It reads the sampled data from the database and outputs it in a user friendly format. Currently it’s most useful in identifying hot key-ranges (for both reading and writing).
|
||||
|
||||
Prerequisites
|
||||
=============
|
||||
|
||||
* ``python3``
|
||||
* ``fdb python bindings`` - If you don't have the Python bindings installed, you can append $BUILDDIR/bindings/python to the PYTHONPATH environment variable, then you should be able to import fdb
|
||||
|
||||
Additional packages
|
||||
===================
|
||||
|
||||
* ``dateparser`` - for human date parsing
|
||||
* ``sortedcontainers`` - for estimating key range read/write density
|
||||
|
||||
Sample usage
|
||||
============
|
||||
|
||||
* ``$python3 transaction_profiling_analyzer.py --help`` - Shows the help message and exits
|
||||
|
||||
* ``python3 transaction_profiling_analyzer.py -C fdb.cluster --start-time "17:00 2020/07/07 PDT" --end-time "17:50 2020/07/07 PDT"`` - Analyzes and prints full information between a start and end time frame
|
||||
|
||||
Using filters:
|
||||
==============
|
||||
|
||||
* ``python3 ~/transaction_profiling_analyzer.py -C fdb.cluster --filter-get --start-time "17:00 2020/07/07 PDT" --end-time "17:50 2020/07/07 PDT"`` - Analyzes and prints information about gets between a start and end time frame
|
||||
|
||||
* ``python3 ~/transaction_profiling_analyzer.py -C fdb.cluster --filter-get --start-time "17:00 2020/07/07 PDT" --end-time "17:50 2020/07/07 PDT" --top-requests 5`` - Analyzes and prints information about top 5 keys for gets between a start and end time frame
|
|
@ -298,7 +298,7 @@ struct MutationFilesReadProgress : public ReferenceCounted<MutationFilesReadProg
|
|||
// Attempt decode the first few blocks of log files until beginVersion is consumed
|
||||
std::vector<Future<Void>> fileDecodes;
|
||||
for (int i = 0; i < asyncFiles.size(); i++) {
|
||||
Reference<FileProgress> fp(new FileProgress(asyncFiles[i].get(), i));
|
||||
auto fp = makeReference<FileProgress>(asyncFiles[i].get(), i);
|
||||
progress->fileProgress.push_back(fp);
|
||||
fileDecodes.push_back(
|
||||
decodeToVersion(fp, progress->beginVersion, progress->endVersion, progress->getLogFile(i)));
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbclient/KeyBackedTypes.h"
|
||||
#include "fdbclient/RunTransaction.actor.h"
|
||||
#include "fdbclient/BlobStore.h"
|
||||
#include "fdbclient/S3BlobStore.h"
|
||||
#include "fdbclient/json_spirit/json_spirit_writer_template.h"
|
||||
|
||||
#include "flow/Platform.h"
|
||||
|
@ -1460,12 +1460,12 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
|
|||
o.create("configured_workers") = CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT;
|
||||
|
||||
if(exe == EXE_AGENT) {
|
||||
static BlobStoreEndpoint::Stats last_stats;
|
||||
static S3BlobStoreEndpoint::Stats last_stats;
|
||||
static double last_ts = 0;
|
||||
BlobStoreEndpoint::Stats current_stats = BlobStoreEndpoint::s_stats;
|
||||
S3BlobStoreEndpoint::Stats current_stats = S3BlobStoreEndpoint::s_stats;
|
||||
JSONDoc blobstats = o.create("blob_stats");
|
||||
blobstats.create("total") = current_stats.getJSON();
|
||||
BlobStoreEndpoint::Stats diff = current_stats - last_stats;
|
||||
S3BlobStoreEndpoint::Stats diff = current_stats - last_stats;
|
||||
json_spirit::mObject diffObj = diff.getJSON();
|
||||
if(last_ts > 0)
|
||||
diffObj["bytes_per_second"] = double(current_stats.bytes_sent - last_stats.bytes_sent) / (now() - last_ts);
|
||||
|
@ -3790,7 +3790,7 @@ int main(int argc, char* argv[]) {
|
|||
auto initCluster = [&](bool quiet = false) {
|
||||
auto resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName(clusterFile);
|
||||
try {
|
||||
ccf = Reference<ClusterConnectionFile>(new ClusterConnectionFile(resolvedClusterFile.first));
|
||||
ccf = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
||||
}
|
||||
catch (Error& e) {
|
||||
if(!quiet)
|
||||
|
@ -3813,7 +3813,7 @@ int main(int argc, char* argv[]) {
|
|||
if(sourceClusterFile.size()) {
|
||||
auto resolvedSourceClusterFile = ClusterConnectionFile::lookupClusterFileName(sourceClusterFile);
|
||||
try {
|
||||
sourceCcf = Reference<ClusterConnectionFile>(new ClusterConnectionFile(resolvedSourceClusterFile.first));
|
||||
sourceCcf = makeReference<ClusterConnectionFile>(resolvedSourceClusterFile.first);
|
||||
}
|
||||
catch (Error& e) {
|
||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedSourceClusterFile, e).c_str());
|
||||
|
|
|
@ -2509,7 +2509,7 @@ ACTOR Future<bool> setClass( Database db, std::vector<StringRef> tokens ) {
|
|||
|
||||
Reference<ReadYourWritesTransaction> getTransaction(Database db, Reference<ReadYourWritesTransaction> &tr, FdbOptions *options, bool intrans) {
|
||||
if(!tr || !intrans) {
|
||||
tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(db));
|
||||
tr = makeReference<ReadYourWritesTransaction>(db);
|
||||
options->apply(tr);
|
||||
}
|
||||
|
||||
|
@ -3011,7 +3011,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
|
||||
state std::pair<std::string, bool> resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName( opt.clusterFile );
|
||||
try {
|
||||
ccf = Reference<ClusterConnectionFile>( new ClusterConnectionFile( resolvedClusterFile.first ) );
|
||||
ccf = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedClusterFile, e).c_str());
|
||||
return 1;
|
||||
|
@ -3472,7 +3472,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||
CLIENT_KNOBS->TOO_MANY)));
|
||||
ASSERT(!kvs.more);
|
||||
Reference<FlowLock> connectLock(new FlowLock(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM));
|
||||
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
||||
std::vector<Future<Void>> addInterfs;
|
||||
for( auto it : kvs ) {
|
||||
addInterfs.push_back(addInterface(&address_interface, connectLock, it));
|
||||
|
@ -3537,7 +3537,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||
CLIENT_KNOBS->TOO_MANY)));
|
||||
ASSERT(!kvs.more);
|
||||
Reference<FlowLock> connectLock(new FlowLock(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM));
|
||||
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
||||
std::vector<Future<Void>> addInterfs;
|
||||
for( auto it : kvs ) {
|
||||
addInterfs.push_back(addInterface(&address_interface, connectLock, it));
|
||||
|
@ -3875,7 +3875,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||
CLIENT_KNOBS->TOO_MANY)));
|
||||
ASSERT(!kvs.more);
|
||||
Reference<FlowLock> connectLock(new FlowLock(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM));
|
||||
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
||||
std::vector<Future<Void>> addInterfs;
|
||||
for( auto it : kvs ) {
|
||||
addInterfs.push_back(addInterface(&address_interface, connectLock, it));
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* AsyncFileBlobStore.actor.cpp
|
||||
* AsyncFileS3BlobStore.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -18,40 +18,37 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/AsyncFileBlobStore.actor.h"
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
Future<int64_t> AsyncFileBlobStoreRead::size() const {
|
||||
if(!m_size.isValid())
|
||||
m_size = m_bstore->objectSize(m_bucket, m_object);
|
||||
Future<int64_t> AsyncFileS3BlobStoreRead::size() const {
|
||||
if (!m_size.isValid()) m_size = m_bstore->objectSize(m_bucket, m_object);
|
||||
return m_size;
|
||||
}
|
||||
|
||||
Future<int> AsyncFileBlobStoreRead::read( void *data, int length, int64_t offset ) {
|
||||
Future<int> AsyncFileS3BlobStoreRead::read(void* data, int length, int64_t offset) {
|
||||
return m_bstore->readObject(m_bucket, m_object, data, length, offset);
|
||||
}
|
||||
|
||||
|
||||
ACTOR Future<Void> sendStuff(int id, Reference<IRateControl> t, int bytes) {
|
||||
printf("Starting fake sender %d which will send send %d bytes.\n", id, bytes);
|
||||
state double ts = timer();
|
||||
state int total = 0;
|
||||
while(total < bytes) {
|
||||
state int r = std::min<int>(deterministicRandom()->randomInt(0,1000), bytes - total);
|
||||
while (total < bytes) {
|
||||
state int r = std::min<int>(deterministicRandom()->randomInt(0, 1000), bytes - total);
|
||||
wait(t->getAllowance(r));
|
||||
total += r;
|
||||
}
|
||||
double dur = timer() - ts;
|
||||
printf("Sender %d: Sent %d in %fs, %f/s\n", id, total, dur, total/dur);
|
||||
printf("Sender %d: Sent %d in %fs, %f/s\n", id, total, dur, total / dur);
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/backup/throttling") {
|
||||
// Test will not work in simulation.
|
||||
if(g_network->isSimulated())
|
||||
return Void();
|
||||
if (g_network->isSimulated()) return Void();
|
||||
|
||||
state int limit = 100000;
|
||||
state Reference<IRateControl> t(new SpeedLimit(limit, 1));
|
||||
|
@ -62,13 +59,18 @@ TEST_CASE("/backup/throttling") {
|
|||
state int total = 0;
|
||||
int s;
|
||||
s = 500000;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
s = 50000;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
s = 5000;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
|
||||
wait(waitForAll(f));
|
||||
double dur = timer() - ts;
|
||||
|
@ -78,5 +80,3 @@ TEST_CASE("/backup/throttling") {
|
|||
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* AsyncFileBlobStore.actor.h
|
||||
* AsyncFileS3BlobStore.actor.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -20,12 +20,13 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source version.
|
||||
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source
|
||||
// version.
|
||||
#if defined(NO_INTELLISENSE) && !defined(FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_G_H)
|
||||
#define FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_G_H
|
||||
#include "fdbclient/AsyncFileBlobStore.actor.g.h"
|
||||
#elif !defined(FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_H)
|
||||
#define FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_H
|
||||
#define FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_G_H
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.g.h"
|
||||
#elif !defined(FDBRPC_ASYNCFILES3BLOBSTORE_ACTOR_H)
|
||||
#define FDBRPC_ASYNCFILES3BLOBSTORE_ACTOR_H
|
||||
|
||||
#include <sstream>
|
||||
#include <time.h>
|
||||
|
@ -34,55 +35,54 @@
|
|||
#include "flow/serialize.h"
|
||||
#include "flow/Net2Packet.h"
|
||||
#include "fdbrpc/IRateControl.h"
|
||||
#include "fdbclient/BlobStore.h"
|
||||
#include "fdbclient/S3BlobStore.h"
|
||||
#include "fdbclient/md5/md5.h"
|
||||
#include "fdbclient/libb64/encode.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
ACTOR template<typename T> static Future<T> joinErrorGroup(Future<T> f, Promise<Void> p) {
|
||||
ACTOR template <typename T>
|
||||
static Future<T> joinErrorGroup(Future<T> f, Promise<Void> p) {
|
||||
try {
|
||||
wait(success(f) || p.getFuture());
|
||||
return f.get();
|
||||
} catch(Error &e) {
|
||||
if(p.canBeSet())
|
||||
p.sendError(e);
|
||||
} catch (Error& e) {
|
||||
if (p.canBeSet()) p.sendError(e);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
// This class represents a write-only file that lives in an S3-style blob store. It writes using the REST API,
|
||||
// using multi-part upload and beginning to transfer each part as soon as it is large enough.
|
||||
// All write operations file operations must be sequential and contiguous.
|
||||
// Limits on part sizes, upload speed, and concurrent uploads are taken from the BlobStoreEndpoint being used.
|
||||
class AsyncFileBlobStoreWrite : public IAsyncFile, public ReferenceCounted<AsyncFileBlobStoreWrite> {
|
||||
// Limits on part sizes, upload speed, and concurrent uploads are taken from the S3BlobStoreEndpoint being used.
|
||||
class AsyncFileS3BlobStoreWrite : public IAsyncFile, public ReferenceCounted<AsyncFileS3BlobStoreWrite> {
|
||||
public:
|
||||
virtual void addref() { ReferenceCounted<AsyncFileBlobStoreWrite>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<AsyncFileBlobStoreWrite>::delref(); }
|
||||
virtual void addref() { ReferenceCounted<AsyncFileS3BlobStoreWrite>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<AsyncFileS3BlobStoreWrite>::delref(); }
|
||||
|
||||
struct Part : ReferenceCounted<Part> {
|
||||
Part(int n, int minSize) : number(n), writer(content.getWriteBuffer(minSize), nullptr, Unversioned()), length(0) {
|
||||
Part(int n, int minSize)
|
||||
: number(n), writer(content.getWriteBuffer(minSize), nullptr, Unversioned()), length(0) {
|
||||
etag = std::string();
|
||||
::MD5_Init(&content_md5_buf);
|
||||
}
|
||||
virtual ~Part() {
|
||||
etag.cancel();
|
||||
}
|
||||
virtual ~Part() { etag.cancel(); }
|
||||
Future<std::string> etag;
|
||||
int number;
|
||||
UnsentPacketQueue content;
|
||||
std::string md5string;
|
||||
PacketWriter writer;
|
||||
int length;
|
||||
void write(const uint8_t *buf, int len) {
|
||||
void write(const uint8_t* buf, int len) {
|
||||
writer.serializeBytes(buf, len);
|
||||
::MD5_Update(&content_md5_buf, buf, len);
|
||||
length += len;
|
||||
}
|
||||
// MD5 sum can only be finalized once, further calls will do nothing so new writes will be reflected in the sum.
|
||||
void finalizeMD5() {
|
||||
if(md5string.empty()) {
|
||||
if (md5string.empty()) {
|
||||
std::string sumBytes;
|
||||
sumBytes.resize(16);
|
||||
::MD5_Final((unsigned char *)sumBytes.data(), &content_md5_buf);
|
||||
::MD5_Final((unsigned char*)sumBytes.data(), &content_md5_buf);
|
||||
md5string = base64::encoder::from_string(sumBytes);
|
||||
md5string.resize(md5string.size() - 1);
|
||||
}
|
||||
|
@ -94,71 +94,75 @@ public:
|
|||
|
||||
Future<int> read(void* data, int length, int64_t offset) override { throw file_not_readable(); }
|
||||
|
||||
ACTOR static Future<Void> write_impl(Reference<AsyncFileBlobStoreWrite> f, const uint8_t *data, int length) {
|
||||
state Part *p = f->m_parts.back().getPtr();
|
||||
// If this write will cause the part to cross the min part size boundary then write to the boundary and start a new part.
|
||||
while(p->length + length >= f->m_bstore->knobs.multipart_min_part_size) {
|
||||
ACTOR static Future<Void> write_impl(Reference<AsyncFileS3BlobStoreWrite> f, const uint8_t* data, int length) {
|
||||
state Part* p = f->m_parts.back().getPtr();
|
||||
// If this write will cause the part to cross the min part size boundary then write to the boundary and start a
|
||||
// new part.
|
||||
while (p->length + length >= f->m_bstore->knobs.multipart_min_part_size) {
|
||||
// Finish off this part
|
||||
int finishlen = f->m_bstore->knobs.multipart_min_part_size - p->length;
|
||||
p->write((const uint8_t *)data, finishlen);
|
||||
p->write((const uint8_t*)data, finishlen);
|
||||
|
||||
// Adjust source buffer args
|
||||
length -= finishlen;
|
||||
data = (const uint8_t *)data + finishlen;
|
||||
data = (const uint8_t*)data + finishlen;
|
||||
|
||||
// End current part (and start new one)
|
||||
wait(f->endCurrentPart(f.getPtr(), true));
|
||||
p = f->m_parts.back().getPtr();
|
||||
}
|
||||
|
||||
p->write((const uint8_t *)data, length);
|
||||
p->write((const uint8_t*)data, length);
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> write(void const* data, int length, int64_t offset) override {
|
||||
if(offset != m_cursor)
|
||||
throw non_sequential_op();
|
||||
if (offset != m_cursor) throw non_sequential_op();
|
||||
m_cursor += length;
|
||||
|
||||
return m_error.getFuture() || write_impl(Reference<AsyncFileBlobStoreWrite>::addRef(this), (const uint8_t *)data, length);
|
||||
return m_error.getFuture() ||
|
||||
write_impl(Reference<AsyncFileS3BlobStoreWrite>::addRef(this), (const uint8_t*)data, length);
|
||||
}
|
||||
|
||||
Future<Void> truncate(int64_t size) override {
|
||||
if(size != m_cursor)
|
||||
return non_sequential_op();
|
||||
if (size != m_cursor) return non_sequential_op();
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<std::string> doPartUpload(AsyncFileBlobStoreWrite *f, Part *p) {
|
||||
ACTOR static Future<std::string> doPartUpload(AsyncFileS3BlobStoreWrite* f, Part* p) {
|
||||
p->finalizeMD5();
|
||||
std::string upload_id = wait(f->getUploadID());
|
||||
std::string etag = wait(f->m_bstore->uploadPart(f->m_bucket, f->m_object, upload_id, p->number, &p->content, p->length, p->md5string));
|
||||
std::string etag = wait(f->m_bstore->uploadPart(f->m_bucket, f->m_object, upload_id, p->number, &p->content,
|
||||
p->length, p->md5string));
|
||||
return etag;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> doFinishUpload(AsyncFileBlobStoreWrite* f) {
|
||||
ACTOR static Future<Void> doFinishUpload(AsyncFileS3BlobStoreWrite* f) {
|
||||
// If there is only 1 part then it has not yet been uploaded so just write the whole file at once.
|
||||
if(f->m_parts.size() == 1) {
|
||||
if (f->m_parts.size() == 1) {
|
||||
Reference<Part> part = f->m_parts.back();
|
||||
part->finalizeMD5();
|
||||
wait(f->m_bstore->writeEntireFileFromBuffer(f->m_bucket, f->m_object, &part->content, part->length, part->md5string));
|
||||
wait(f->m_bstore->writeEntireFileFromBuffer(f->m_bucket, f->m_object, &part->content, part->length,
|
||||
part->md5string));
|
||||
return Void();
|
||||
}
|
||||
|
||||
// There are at least 2 parts. End the last part (which could be empty)
|
||||
wait(f->endCurrentPart(f));
|
||||
|
||||
state BlobStoreEndpoint::MultiPartSetT partSet;
|
||||
state S3BlobStoreEndpoint::MultiPartSetT partSet;
|
||||
state std::vector<Reference<Part>>::iterator p;
|
||||
|
||||
// Wait for all the parts to be done to get their ETags, populate the partSet required to finish the object upload.
|
||||
for(p = f->m_parts.begin(); p != f->m_parts.end(); ++p) {
|
||||
// Wait for all the parts to be done to get their ETags, populate the partSet required to finish the object
|
||||
// upload.
|
||||
for (p = f->m_parts.begin(); p != f->m_parts.end(); ++p) {
|
||||
std::string tag = wait((*p)->etag);
|
||||
if((*p)->length > 0) // The last part might be empty and has to be omitted.
|
||||
if ((*p)->length > 0) // The last part might be empty and has to be omitted.
|
||||
partSet[(*p)->number] = tag;
|
||||
}
|
||||
|
||||
// No need to wait for the upload ID here because the above loop waited for all the parts and each part required the upload ID so it is ready
|
||||
// No need to wait for the upload ID here because the above loop waited for all the parts and each part required
|
||||
// the upload ID so it is ready
|
||||
wait(f->m_bstore->finishMultiPartUpload(f->m_bucket, f->m_object, f->m_upload_id.get(), partSet));
|
||||
|
||||
return Void();
|
||||
|
@ -167,43 +171,43 @@ public:
|
|||
// Ready once all data has been sent AND acknowledged from the remote side
|
||||
Future<Void> sync() override {
|
||||
// Only initiate the finish operation once, and also prevent further writing.
|
||||
if(!m_finished.isValid()) {
|
||||
if (!m_finished.isValid()) {
|
||||
m_finished = doFinishUpload(this);
|
||||
m_cursor = -1; // Cause future write attempts to fail
|
||||
m_cursor = -1; // Cause future write attempts to fail
|
||||
}
|
||||
|
||||
return m_finished;
|
||||
}
|
||||
|
||||
//
|
||||
// Flush can't really do what the caller would "want" for a blob store file. The caller would probably notionally want
|
||||
// all bytes written to be at least in transit to the blob store, but that is not very feasible. The blob store
|
||||
// has a minimum size requirement for all but the final part, and parts must be sent with a header that specifies
|
||||
// their size. So in the case of a write buffer that does not meet the part minimum size the part could be sent
|
||||
// but then if there is any more data written then that part needs to be sent again in its entirety. So a client
|
||||
// that calls flush often could generate far more blob store write traffic than they intend to.
|
||||
// Flush can't really do what the caller would "want" for a blob store file. The caller would probably notionally
|
||||
// want all bytes written to be at least in transit to the blob store, but that is not very feasible. The blob
|
||||
// store has a minimum size requirement for all but the final part, and parts must be sent with a header that
|
||||
// specifies their size. So in the case of a write buffer that does not meet the part minimum size the part could
|
||||
// be sent but then if there is any more data written then that part needs to be sent again in its entirety. So a
|
||||
// client that calls flush often could generate far more blob store write traffic than they intend to.
|
||||
Future<Void> flush() override { return Void(); }
|
||||
|
||||
Future<int64_t> size() const override { return m_cursor; }
|
||||
|
||||
Future<Void> readZeroCopy(void** data, int* length, int64_t offset) override {
|
||||
TraceEvent(SevError, "ReadZeroCopyNotSupported").detail("FileType", "BlobStoreWrite");
|
||||
TraceEvent(SevError, "ReadZeroCopyNotSupported").detail("FileType", "S3BlobStoreWrite");
|
||||
return platform_error();
|
||||
}
|
||||
void releaseZeroCopy(void* data, int length, int64_t offset) override {}
|
||||
|
||||
int64_t debugFD() const override { return -1; }
|
||||
|
||||
~AsyncFileBlobStoreWrite() override {
|
||||
~AsyncFileS3BlobStoreWrite() override {
|
||||
m_upload_id.cancel();
|
||||
m_finished.cancel();
|
||||
m_parts.clear(); // Contains futures
|
||||
m_parts.clear(); // Contains futures
|
||||
}
|
||||
|
||||
std::string getFilename() const override { return m_object; }
|
||||
|
||||
private:
|
||||
Reference<BlobStoreEndpoint> m_bstore;
|
||||
Reference<S3BlobStoreEndpoint> m_bstore;
|
||||
std::string m_bucket;
|
||||
std::string m_object;
|
||||
|
||||
|
@ -216,48 +220,46 @@ private:
|
|||
FlowLock m_concurrentUploads;
|
||||
|
||||
// End the current part and start uploading it, but also wait for a part to finish if too many are in transit.
|
||||
ACTOR static Future<Void> endCurrentPart(AsyncFileBlobStoreWrite *f, bool startNew = false) {
|
||||
if(f->m_parts.back()->length == 0)
|
||||
return Void();
|
||||
ACTOR static Future<Void> endCurrentPart(AsyncFileS3BlobStoreWrite* f, bool startNew = false) {
|
||||
if (f->m_parts.back()->length == 0) return Void();
|
||||
|
||||
// Wait for an upload slot to be available
|
||||
wait(f->m_concurrentUploads.take());
|
||||
|
||||
// Do the upload, and if it fails forward errors to m_error and also stop if anything else sends an error to m_error
|
||||
// Also, hold a releaser for the concurrent upload slot while all that is going on.
|
||||
// Do the upload, and if it fails forward errors to m_error and also stop if anything else sends an error to
|
||||
// m_error Also, hold a releaser for the concurrent upload slot while all that is going on.
|
||||
auto releaser = std::make_shared<FlowLock::Releaser>(f->m_concurrentUploads, 1);
|
||||
f->m_parts.back()->etag =
|
||||
holdWhile(std::move(releaser), joinErrorGroup(doPartUpload(f, f->m_parts.back().getPtr()), f->m_error));
|
||||
|
||||
// Make a new part to write to
|
||||
if(startNew)
|
||||
f->m_parts.push_back(Reference<Part>(new Part(f->m_parts.size() + 1, f->m_bstore->knobs.multipart_min_part_size)));
|
||||
if (startNew)
|
||||
f->m_parts.push_back(
|
||||
Reference<Part>(new Part(f->m_parts.size() + 1, f->m_bstore->knobs.multipart_min_part_size)));
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<std::string> getUploadID() {
|
||||
if(!m_upload_id.isValid())
|
||||
m_upload_id = m_bstore->beginMultiPartUpload(m_bucket, m_object);
|
||||
if (!m_upload_id.isValid()) m_upload_id = m_bstore->beginMultiPartUpload(m_bucket, m_object);
|
||||
return m_upload_id;
|
||||
}
|
||||
|
||||
public:
|
||||
AsyncFileBlobStoreWrite(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object)
|
||||
: m_bstore(bstore), m_bucket(bucket), m_object(object), m_cursor(0), m_concurrentUploads(bstore->knobs.concurrent_writes_per_file) {
|
||||
AsyncFileS3BlobStoreWrite(Reference<S3BlobStoreEndpoint> bstore, std::string bucket, std::string object)
|
||||
: m_bstore(bstore), m_bucket(bucket), m_object(object), m_cursor(0),
|
||||
m_concurrentUploads(bstore->knobs.concurrent_writes_per_file) {
|
||||
|
||||
// Add first part
|
||||
m_parts.push_back(Reference<Part>(new Part(1, m_bstore->knobs.multipart_min_part_size)));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
// This class represents a read-only file that lives in an S3-style blob store. It reads using the REST API.
|
||||
class AsyncFileBlobStoreRead : public IAsyncFile, public ReferenceCounted<AsyncFileBlobStoreRead> {
|
||||
class AsyncFileS3BlobStoreRead : public IAsyncFile, public ReferenceCounted<AsyncFileS3BlobStoreRead> {
|
||||
public:
|
||||
virtual void addref() { ReferenceCounted<AsyncFileBlobStoreRead>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<AsyncFileBlobStoreRead>::delref(); }
|
||||
virtual void addref() { ReferenceCounted<AsyncFileS3BlobStoreRead>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<AsyncFileS3BlobStoreRead>::delref(); }
|
||||
|
||||
Future<int> read(void* data, int length, int64_t offset) override;
|
||||
|
||||
|
@ -270,7 +272,7 @@ public:
|
|||
Future<int64_t> size() const override;
|
||||
|
||||
Future<Void> readZeroCopy(void** data, int* length, int64_t offset) override {
|
||||
TraceEvent(SevError, "ReadZeroCopyNotSupported").detail("FileType", "BlobStoreRead");
|
||||
TraceEvent(SevError, "ReadZeroCopyNotSupported").detail("FileType", "S3BlobStoreRead");
|
||||
return platform_error();
|
||||
}
|
||||
void releaseZeroCopy(void* data, int length, int64_t offset) override {}
|
||||
|
@ -279,17 +281,15 @@ public:
|
|||
|
||||
std::string getFilename() const override { return m_object; }
|
||||
|
||||
virtual ~AsyncFileBlobStoreRead() {}
|
||||
virtual ~AsyncFileS3BlobStoreRead() {}
|
||||
|
||||
Reference<BlobStoreEndpoint> m_bstore;
|
||||
Reference<S3BlobStoreEndpoint> m_bstore;
|
||||
std::string m_bucket;
|
||||
std::string m_object;
|
||||
mutable Future<int64_t> m_size;
|
||||
|
||||
AsyncFileBlobStoreRead(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object)
|
||||
: m_bstore(bstore), m_bucket(bucket), m_object(object) {
|
||||
}
|
||||
|
||||
AsyncFileS3BlobStoreRead(Reference<S3BlobStoreEndpoint> bstore, std::string bucket, std::string object)
|
||||
: m_bstore(bstore), m_bucket(bucket), m_object(object) {}
|
||||
};
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* AsyncTaskThread.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/AsyncTaskThread.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
class TerminateTask final : public IAsyncTask {
|
||||
public:
|
||||
void operator()() override { ASSERT(false); }
|
||||
bool isTerminate() const override { return true; }
|
||||
};
|
||||
|
||||
ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread, int* sum, int count) {
|
||||
state int i = 0;
|
||||
for (; i < count; ++i) {
|
||||
wait(asyncTaskThread->execAsync([sum = sum] {
|
||||
++(*sum);
|
||||
return Void();
|
||||
}));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const double AsyncTaskThread::meanDelay = 0.01;
|
||||
|
||||
AsyncTaskThread::AsyncTaskThread() : thread([this] { run(this); }) {}
|
||||
|
||||
AsyncTaskThread::~AsyncTaskThread() {
|
||||
bool wakeUp = false;
|
||||
{
|
||||
std::lock_guard<std::mutex> g(m);
|
||||
wakeUp = queue.push(std::make_shared<TerminateTask>());
|
||||
}
|
||||
if (wakeUp) {
|
||||
cv.notify_one();
|
||||
}
|
||||
thread.join();
|
||||
}
|
||||
|
||||
void AsyncTaskThread::run(AsyncTaskThread* self) {
|
||||
while (true) {
|
||||
std::shared_ptr<IAsyncTask> task;
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(self->m);
|
||||
self->cv.wait(lk, [self] { return !self->queue.canSleep(); });
|
||||
task = self->queue.pop().get();
|
||||
if (task->isTerminate()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
(*task)();
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("/asynctaskthread/add") {
|
||||
state int sum = 0;
|
||||
state AsyncTaskThread asyncTaskThread;
|
||||
std::vector<Future<Void>> clients;
|
||||
clients.reserve(10);
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
clients.push_back(asyncTaskThreadClient(&asyncTaskThread, &sum, 100));
|
||||
}
|
||||
wait(waitForAll(clients));
|
||||
ASSERT(sum == 1000);
|
||||
return Void();
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* AsyncTaskThread.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef __ASYNC_TASK_THREAD_H__
|
||||
#define __ASYNC_TASK_THREAD_H__
|
||||
|
||||
#include <thread>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include "flow/network.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/ThreadSafeQueue.h"
|
||||
|
||||
class IAsyncTask {
|
||||
public:
|
||||
virtual void operator()() = 0;
|
||||
virtual ~IAsyncTask() = default;
|
||||
virtual bool isTerminate() const = 0;
|
||||
};
|
||||
|
||||
template <class F>
|
||||
class AsyncTask final : public IAsyncTask {
|
||||
F func;
|
||||
|
||||
public:
|
||||
AsyncTask(const F& func) : func(func) {}
|
||||
|
||||
void operator()() override { func(); }
|
||||
bool isTerminate() const override { return false; }
|
||||
};
|
||||
|
||||
class AsyncTaskThread {
|
||||
ThreadSafeQueue<std::shared_ptr<IAsyncTask>> queue;
|
||||
std::condition_variable cv;
|
||||
std::mutex m;
|
||||
std::thread thread;
|
||||
|
||||
static void run(AsyncTaskThread* self);
|
||||
|
||||
template <class F>
|
||||
void addTask(const F& func) {
|
||||
bool wakeUp = false;
|
||||
{
|
||||
std::lock_guard<std::mutex> g(m);
|
||||
wakeUp = queue.push(std::make_shared<AsyncTask<F>>(func));
|
||||
}
|
||||
if (wakeUp) {
|
||||
cv.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
static const double meanDelay;
|
||||
|
||||
public:
|
||||
AsyncTaskThread();
|
||||
|
||||
// Warning: This destructor can hang if a task hangs, so it is
|
||||
// up to the caller to prevent tasks from hanging indefinitely
|
||||
~AsyncTaskThread();
|
||||
|
||||
template <class F>
|
||||
auto execAsync(const F& func, TaskPriority priority = TaskPriority::DefaultOnMainThread)
|
||||
-> Future<decltype(func())> {
|
||||
if (g_network->isSimulated()) {
|
||||
return map(delayJittered(meanDelay), [func](Void _) { return func(); });
|
||||
}
|
||||
Promise<decltype(func())> promise;
|
||||
addTask([promise, func, priority] {
|
||||
try {
|
||||
auto funcResult = func();
|
||||
onMainThreadVoid([promise, funcResult] { promise.send(funcResult); }, nullptr, priority);
|
||||
} catch (Error& e) {
|
||||
onMainThreadVoid([promise, e] { promise.sendError(e); }, nullptr, priority);
|
||||
}
|
||||
});
|
||||
return promise.getFuture();
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
|
@ -269,6 +269,7 @@ public:
|
|||
|
||||
enum ERestoreState { UNITIALIZED = 0, QUEUED = 1, STARTING = 2, RUNNING = 3, COMPLETED = 4, ABORTED = 5 };
|
||||
static StringRef restoreStateText(ERestoreState id);
|
||||
static Key getPauseKey();
|
||||
|
||||
// parallel restore
|
||||
Future<Void> parallelRestoreFinish(Database cx, UID randomUID, bool unlockDB = true);
|
||||
|
@ -427,7 +428,8 @@ public:
|
|||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr){ return discontinueBackup(tr, tagName); });
|
||||
}
|
||||
|
||||
Future<Void> abortBackup(Database cx, Key tagName, bool partial = false, bool abortOldBackup = false, bool dstOnly = false);
|
||||
Future<Void> abortBackup(Database cx, Key tagName, bool partial = false, bool abortOldBackup = false,
|
||||
bool dstOnly = false, bool waitForDestUID = false);
|
||||
|
||||
Future<std::string> getStatus(Database cx, int errorLimit, Key tagName);
|
||||
|
||||
|
|
|
@ -638,7 +638,8 @@ ACTOR Future<Void> applyMutations(Database cx, Key uid, Key addPrefix, Key remov
|
|||
|
||||
for (int i = 0; i < ranges.size(); ++i) {
|
||||
results.push_back(PromiseStream<RCGroup>());
|
||||
locks.push_back(Reference<FlowLock>( new FlowLock(std::max(CLIENT_KNOBS->APPLY_MAX_LOCK_BYTES/ranges.size(), CLIENT_KNOBS->APPLY_MIN_LOCK_BYTES))));
|
||||
locks.push_back(makeReference<FlowLock>(
|
||||
std::max(CLIENT_KNOBS->APPLY_MAX_LOCK_BYTES / ranges.size(), CLIENT_KNOBS->APPLY_MIN_LOCK_BYTES)));
|
||||
rc.push_back(readCommitted(cx, results[i], locks[i], ranges[i], decodeBKMutationLogKey));
|
||||
}
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -18,8 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_BackupContainer_H
|
||||
#define FDBCLIENT_BackupContainer_H
|
||||
#ifndef FDBCLIENT_BACKUP_CONTAINER_H
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_H
|
||||
#pragma once
|
||||
|
||||
#include "flow/flow.h"
|
||||
|
@ -40,7 +40,7 @@ Future<Version> timeKeeperVersionFromDatetime(std::string const &datetime, Datab
|
|||
// TODO: Move the log file and range file format encoding/decoding stuff to this file and behind interfaces.
|
||||
class IBackupFile {
|
||||
public:
|
||||
IBackupFile(std::string fileName) : m_fileName(fileName), m_offset(0) {}
|
||||
IBackupFile(const std::string& fileName) : m_fileName(fileName), m_offset(0) {}
|
||||
virtual ~IBackupFile() {}
|
||||
// Backup files are append-only and cannot have more than 1 append outstanding at once.
|
||||
virtual Future<Void> append(const void *data, int len) = 0;
|
||||
|
@ -247,7 +247,7 @@ public:
|
|||
int64_t totalBytes) = 0;
|
||||
|
||||
// Open a file for read by name
|
||||
virtual Future<Reference<IAsyncFile>> readFile(std::string name) = 0;
|
||||
virtual Future<Reference<IAsyncFile>> readFile(const std::string& name) = 0;
|
||||
|
||||
// Returns the key ranges in the snapshot file. This is an expensive function
|
||||
// and should only be used in simulation for sanity check.
|
||||
|
@ -289,9 +289,9 @@ public:
|
|||
bool logsOnly = false, Version beginVersion = -1) = 0;
|
||||
|
||||
// Get an IBackupContainer based on a container spec string
|
||||
static Reference<IBackupContainer> openContainer(std::string url);
|
||||
static Reference<IBackupContainer> openContainer(const std::string& url);
|
||||
static std::vector<std::string> getURLFormats();
|
||||
static Future<std::vector<std::string>> listContainers(std::string baseURL);
|
||||
static Future<std::vector<std::string>> listContainers(const std::string& baseURL);
|
||||
|
||||
std::string getURL() const {
|
||||
return URL;
|
||||
|
@ -303,4 +303,4 @@ private:
|
|||
std::string URL;
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,279 @@
|
|||
/*
|
||||
* BackupContainerAzureBlobStore.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/BackupContainerAzureBlobStore.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
class BackupContainerAzureBlobStoreImpl {
|
||||
public:
|
||||
using AzureClient = azure::storage_lite::blob_client;
|
||||
|
||||
class ReadFile final : public IAsyncFile, ReferenceCounted<ReadFile> {
|
||||
AsyncTaskThread& asyncTaskThread;
|
||||
std::string containerName;
|
||||
std::string blobName;
|
||||
AzureClient* client;
|
||||
|
||||
public:
|
||||
ReadFile(AsyncTaskThread& asyncTaskThread, const std::string& containerName, const std::string& blobName,
|
||||
AzureClient* client)
|
||||
: asyncTaskThread(asyncTaskThread), containerName(containerName), blobName(blobName), client(client) {}
|
||||
|
||||
void addref() override { ReferenceCounted<ReadFile>::addref(); }
|
||||
void delref() override { ReferenceCounted<ReadFile>::delref(); }
|
||||
Future<int> read(void* data, int length, int64_t offset) {
|
||||
return asyncTaskThread.execAsync([client = this->client, containerName = this->containerName,
|
||||
blobName = this->blobName, data, length, offset] {
|
||||
std::ostringstream oss(std::ios::out | std::ios::binary);
|
||||
client->download_blob_to_stream(containerName, blobName, offset, length, oss);
|
||||
auto str = oss.str();
|
||||
memcpy(data, str.c_str(), str.size());
|
||||
return static_cast<int>(str.size());
|
||||
});
|
||||
}
|
||||
Future<Void> zeroRange(int64_t offset, int64_t length) override { throw file_not_writable(); }
|
||||
Future<Void> write(void const* data, int length, int64_t offset) override { throw file_not_writable(); }
|
||||
Future<Void> truncate(int64_t size) override { throw file_not_writable(); }
|
||||
Future<Void> sync() override { throw file_not_writable(); }
|
||||
Future<int64_t> size() const override {
|
||||
return asyncTaskThread.execAsync([client = this->client, containerName = this->containerName,
|
||||
blobName = this->blobName] {
|
||||
return static_cast<int64_t>(client->get_blob_properties(containerName, blobName).get().response().size);
|
||||
});
|
||||
}
|
||||
std::string getFilename() const override { return blobName; }
|
||||
int64_t debugFD() const override { return 0; }
|
||||
};
|
||||
|
||||
class WriteFile final : public IAsyncFile, ReferenceCounted<WriteFile> {
|
||||
AsyncTaskThread& asyncTaskThread;
|
||||
AzureClient* client;
|
||||
std::string containerName;
|
||||
std::string blobName;
|
||||
int64_t m_cursor{ 0 };
|
||||
// Ideally this buffer should not be a string, but
|
||||
// the Azure SDK only supports/tests uploading to append
|
||||
// blobs from a stringstream.
|
||||
std::string buffer;
|
||||
|
||||
static constexpr size_t bufferLimit = 1 << 20;
|
||||
|
||||
public:
|
||||
WriteFile(AsyncTaskThread& asyncTaskThread, const std::string& containerName, const std::string& blobName,
|
||||
AzureClient* client)
|
||||
: asyncTaskThread(asyncTaskThread), containerName(containerName), blobName(blobName), client(client) {}
|
||||
|
||||
void addref() override { ReferenceCounted<WriteFile>::addref(); }
|
||||
void delref() override { ReferenceCounted<WriteFile>::delref(); }
|
||||
Future<int> read(void* data, int length, int64_t offset) override { throw file_not_readable(); }
|
||||
Future<Void> write(void const* data, int length, int64_t offset) override {
|
||||
if (offset != m_cursor) {
|
||||
throw non_sequential_op();
|
||||
}
|
||||
m_cursor += length;
|
||||
auto p = static_cast<char const*>(data);
|
||||
buffer.insert(buffer.cend(), p, p + length);
|
||||
if (buffer.size() > bufferLimit) {
|
||||
return sync();
|
||||
} else {
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
Future<Void> truncate(int64_t size) override {
|
||||
if (size != m_cursor) {
|
||||
throw non_sequential_op();
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
Future<Void> sync() override {
|
||||
auto movedBuffer = std::move(buffer);
|
||||
buffer.clear();
|
||||
return asyncTaskThread.execAsync([client = this->client, containerName = this->containerName,
|
||||
blobName = this->blobName, buffer = std::move(movedBuffer)] {
|
||||
std::istringstream iss(std::move(buffer));
|
||||
auto resp = client->append_block_from_stream(containerName, blobName, iss).get();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
Future<int64_t> size() const override {
|
||||
return asyncTaskThread.execAsync(
|
||||
[client = this->client, containerName = this->containerName, blobName = this->blobName] {
|
||||
auto resp = client->get_blob_properties(containerName, blobName).get().response();
|
||||
ASSERT(resp.valid()); // TODO: Should instead throw here
|
||||
return static_cast<int64_t>(resp.size);
|
||||
});
|
||||
}
|
||||
std::string getFilename() const override { return blobName; }
|
||||
int64_t debugFD() const override { return -1; }
|
||||
};
|
||||
|
||||
class BackupFile final : public IBackupFile, ReferenceCounted<BackupFile> {
|
||||
Reference<IAsyncFile> m_file;
|
||||
|
||||
public:
|
||||
BackupFile(const std::string& fileName, Reference<IAsyncFile> file) : IBackupFile(fileName), m_file(file) {}
|
||||
Future<Void> append(const void* data, int len) override {
|
||||
Future<Void> r = m_file->write(data, len, m_offset);
|
||||
m_offset += len;
|
||||
return r;
|
||||
}
|
||||
Future<Void> finish() override {
|
||||
Reference<BackupFile> self = Reference<BackupFile>::addRef(this);
|
||||
return map(m_file->sync(), [=](Void _) {
|
||||
self->m_file.clear();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
void addref() override { ReferenceCounted<BackupFile>::addref(); }
|
||||
void delref() override { ReferenceCounted<BackupFile>::delref(); }
|
||||
};
|
||||
|
||||
static bool isDirectory(const std::string& blobName) { return blobName.size() && blobName.back() == '/'; }
|
||||
|
||||
ACTOR static Future<Reference<IAsyncFile>> readFile(BackupContainerAzureBlobStore* self, std::string fileName) {
|
||||
bool exists = wait(self->blobExists(fileName));
|
||||
if (!exists) {
|
||||
throw file_not_found();
|
||||
}
|
||||
return Reference<IAsyncFile>(
|
||||
new ReadFile(self->asyncTaskThread, self->containerName, fileName, self->client.get()));
|
||||
}
|
||||
|
||||
ACTOR static Future<Reference<IBackupFile>> writeFile(BackupContainerAzureBlobStore* self, std::string fileName) {
|
||||
wait(self->asyncTaskThread.execAsync(
|
||||
[client = self->client.get(), containerName = self->containerName, fileName = fileName] {
|
||||
auto outcome = client->create_append_blob(containerName, fileName).get();
|
||||
return Void();
|
||||
}));
|
||||
return Reference<IBackupFile>(
|
||||
new BackupFile(fileName, Reference<IAsyncFile>(new WriteFile(self->asyncTaskThread, self->containerName,
|
||||
fileName, self->client.get()))));
|
||||
}
|
||||
|
||||
static void listFiles(AzureClient* client, const std::string& containerName, const std::string& path,
|
||||
std::function<bool(std::string const&)> folderPathFilter,
|
||||
BackupContainerFileSystem::FilesAndSizesT& result) {
|
||||
auto resp = client->list_blobs_segmented(containerName, "/", "", path).get().response();
|
||||
for (const auto& blob : resp.blobs) {
|
||||
if (isDirectory(blob.name) && folderPathFilter(blob.name)) {
|
||||
listFiles(client, containerName, blob.name, folderPathFilter, result);
|
||||
} else {
|
||||
result.emplace_back(blob.name, blob.content_length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> deleteContainer(BackupContainerAzureBlobStore* self, int* pNumDeleted) {
|
||||
state int filesToDelete = 0;
|
||||
if (pNumDeleted) {
|
||||
BackupContainerFileSystem::FilesAndSizesT files = wait(self->listFiles());
|
||||
filesToDelete = files.size();
|
||||
}
|
||||
wait(self->asyncTaskThread.execAsync([containerName = self->containerName, client = self->client.get()] {
|
||||
client->delete_container(containerName).wait();
|
||||
return Void();
|
||||
}));
|
||||
if (pNumDeleted) {
|
||||
*pNumDeleted += filesToDelete;
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
|
||||
Future<bool> BackupContainerAzureBlobStore::blobExists(const std::string& fileName) {
|
||||
return asyncTaskThread.execAsync(
|
||||
[client = this->client.get(), containerName = this->containerName, fileName = fileName] {
|
||||
auto resp = client->get_blob_properties(containerName, fileName).get().response();
|
||||
return resp.valid();
|
||||
});
|
||||
}
|
||||
|
||||
BackupContainerAzureBlobStore::BackupContainerAzureBlobStore(const NetworkAddress& address,
|
||||
const std::string& accountName,
|
||||
const std::string& containerName)
|
||||
: containerName(containerName) {
|
||||
std::string accountKey = std::getenv("AZURE_KEY");
|
||||
|
||||
auto credential = std::make_shared<azure::storage_lite::shared_key_credential>(accountName, accountKey);
|
||||
auto storageAccount = std::make_shared<azure::storage_lite::storage_account>(
|
||||
accountName, credential, false, format("http://%s/%s", address.toString().c_str(), accountName.c_str()));
|
||||
|
||||
client = std::make_unique<AzureClient>(storageAccount, 1);
|
||||
}
|
||||
|
||||
void BackupContainerAzureBlobStore::addref() {
|
||||
return ReferenceCounted<BackupContainerAzureBlobStore>::addref();
|
||||
}
|
||||
void BackupContainerAzureBlobStore::delref() {
|
||||
return ReferenceCounted<BackupContainerAzureBlobStore>::delref();
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::create() {
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, client = this->client.get()] {
|
||||
client->create_container(containerName).wait();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
Future<bool> BackupContainerAzureBlobStore::exists() {
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, client = this->client.get()] {
|
||||
auto resp = client->get_container_properties(containerName).get().response();
|
||||
return resp.valid();
|
||||
});
|
||||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> BackupContainerAzureBlobStore::readFile(const std::string& fileName) {
|
||||
return BackupContainerAzureBlobStoreImpl::readFile(this, fileName);
|
||||
}
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerAzureBlobStore::writeFile(const std::string& fileName) {
|
||||
return BackupContainerAzureBlobStoreImpl::writeFile(this, fileName);
|
||||
}
|
||||
|
||||
Future<BackupContainerFileSystem::FilesAndSizesT> BackupContainerAzureBlobStore::listFiles(
|
||||
const std::string& path, std::function<bool(std::string const&)> folderPathFilter) {
|
||||
return asyncTaskThread.execAsync([client = this->client.get(), containerName = this->containerName, path = path,
|
||||
folderPathFilter = folderPathFilter] {
|
||||
FilesAndSizesT result;
|
||||
BackupContainerAzureBlobStoreImpl::listFiles(client, containerName, path, folderPathFilter, result);
|
||||
return result;
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::deleteFile(const std::string& fileName) {
|
||||
return asyncTaskThread.execAsync(
|
||||
[containerName = this->containerName, fileName = fileName, client = client.get()]() {
|
||||
client->delete_blob(containerName, fileName).wait();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::deleteContainer(int* pNumDeleted) {
|
||||
return BackupContainerAzureBlobStoreImpl::deleteContainer(this, pNumDeleted);
|
||||
}
|
||||
|
||||
Future<std::vector<std::string>> BackupContainerAzureBlobStore::listURLs(const std::string& baseURL) {
|
||||
// TODO: Implement this
|
||||
return std::vector<std::string>{};
|
||||
}
|
||||
|
||||
std::string BackupContainerAzureBlobStore::getURLFormat() {
|
||||
return "azure://<ip>:<port>/<accountname>/<container>/<path_to_file>";
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* BackupContainerAzureBlobStore.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#if (!defined FDBCLIENT_BACKUP_CONTAINER_AZURE_BLOBSTORE_H) && (defined BUILD_AZURE_BACKUP)
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_AZURE_BLOBSTORE_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/AsyncTaskThread.h"
|
||||
#include "fdbclient/BackupContainerFileSystem.h"
|
||||
|
||||
#include "storage_credential.h"
|
||||
#include "storage_account.h"
|
||||
#include "blob/blob_client.h"
|
||||
|
||||
class BackupContainerAzureBlobStore final : public BackupContainerFileSystem,
|
||||
ReferenceCounted<BackupContainerAzureBlobStore> {
|
||||
using AzureClient = azure::storage_lite::blob_client;
|
||||
|
||||
std::unique_ptr<AzureClient> client;
|
||||
std::string containerName;
|
||||
AsyncTaskThread asyncTaskThread;
|
||||
|
||||
Future<bool> blobExists(const std::string& fileName);
|
||||
|
||||
friend class BackupContainerAzureBlobStoreImpl;
|
||||
|
||||
public:
|
||||
BackupContainerAzureBlobStore(const NetworkAddress& address, const std::string& accountName,
|
||||
const std::string& containerName);
|
||||
|
||||
void addref() override;
|
||||
void delref() override;
|
||||
|
||||
Future<Void> create() override;
|
||||
|
||||
Future<bool> exists() override;
|
||||
|
||||
Future<Reference<IAsyncFile>> readFile(const std::string& fileName) override;
|
||||
|
||||
Future<Reference<IBackupFile>> writeFile(const std::string& fileName) override;
|
||||
|
||||
Future<FilesAndSizesT> listFiles(const std::string& path = "",
|
||||
std::function<bool(std::string const&)> folderPathFilter = nullptr) override;
|
||||
|
||||
Future<Void> deleteFile(const std::string& fileName) override;
|
||||
|
||||
Future<Void> deleteContainer(int* pNumDeleted) override;
|
||||
|
||||
static Future<std::vector<std::string>> listURLs(const std::string& baseURL);
|
||||
|
||||
static std::string getURLFormat();
|
||||
};
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* BackupContainerFileSystem.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_BACKUP_CONTAINER_FILESYSTEM_H
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_FILESYSTEM_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "flow/Trace.h"
|
||||
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
|
||||
/* BackupContainerFileSystem implements a backup container which stores files in a nested folder structure.
|
||||
* Inheritors must only defined methods for writing, reading, deleting, sizing, and listing files.
|
||||
*
|
||||
* Snapshot manifests (a complete set of files constituting a database snapshot for the backup's target ranges)
|
||||
* are stored as JSON files at paths like
|
||||
* /snapshots/snapshot,minVersion,maxVersion,totalBytes
|
||||
*
|
||||
* Key range files for snapshots are stored at paths like
|
||||
* /kvranges/snapshot,startVersion/N/range,version,uid,blockSize
|
||||
* where startVersion is the version at which the backup snapshot execution began and N is a number
|
||||
* that is increased as key range files are generated over time (at varying rates) such that there
|
||||
* are around 5,000 key range files in each folder.
|
||||
*
|
||||
* Note that startVersion will NOT correspond to the minVersion of a snapshot manifest because
|
||||
* snapshot manifest min/max versions are based on the actual contained data and the first data
|
||||
* file written will be after the start version of the snapshot's execution.
|
||||
*
|
||||
* Log files are at file paths like
|
||||
* /plogs/.../log,startVersion,endVersion,UID,tagID-of-N,blocksize
|
||||
* /logs/.../log,startVersion,endVersion,UID,blockSize
|
||||
* where ... is a multi level path which sorts lexically into version order and results in approximately 1
|
||||
* unique folder per day containing about 5,000 files. Logs after FDB 6.3 are stored in "plogs"
|
||||
* directory and are partitioned according to tagIDs (0, 1, 2, ...) and the total number partitions is N.
|
||||
* Old backup logs FDB 6.2 and earlier are stored in "logs" directory and are not partitioned.
|
||||
* After FDB 6.3, users can choose to use the new partitioned logs or old logs.
|
||||
*
|
||||
*
|
||||
* BACKWARD COMPATIBILITY
|
||||
*
|
||||
* Prior to FDB version 6.0.16, key range files were stored using a different folder scheme. Newer versions
|
||||
* still support this scheme for all restore and backup management operations but key range files generated
|
||||
* by backup using version 6.0.16 or later use the scheme describe above.
|
||||
*
|
||||
* The old format stored key range files at paths like
|
||||
* /ranges/.../range,version,uid,blockSize
|
||||
* where ... is a multi level path with sorts lexically into version order and results in up to approximately
|
||||
* 900 unique folders per day. The number of files per folder depends on the configured snapshot rate and
|
||||
* database size and will vary from 1 to around 5,000.
|
||||
*/
|
||||
class BackupContainerFileSystem : public IBackupContainer {
|
||||
public:
|
||||
void addref() override = 0;
|
||||
void delref() override = 0;
|
||||
|
||||
BackupContainerFileSystem() {}
|
||||
virtual ~BackupContainerFileSystem() {}
|
||||
|
||||
// Create the container
|
||||
Future<Void> create() override = 0;
|
||||
Future<bool> exists() override = 0;
|
||||
|
||||
// Get a list of fileNames and their sizes in the container under the given path
|
||||
// Although not required, an implementation can avoid traversing unwanted subfolders
|
||||
// by calling folderPathFilter(absoluteFolderPath) and checking for a false return value.
|
||||
using FilesAndSizesT = std::vector<std::pair<std::string, int64_t>>;
|
||||
virtual Future<FilesAndSizesT> listFiles(const std::string& path = "",
|
||||
std::function<bool(std::string const&)> folderPathFilter = nullptr) = 0;
|
||||
|
||||
// Open a file for read by fileName
|
||||
Future<Reference<IAsyncFile>> readFile(const std::string& fileName) override = 0;
|
||||
|
||||
// Open a file for write by fileName
|
||||
virtual Future<Reference<IBackupFile>> writeFile(const std::string& fileName) = 0;
|
||||
|
||||
// Delete a file
|
||||
virtual Future<Void> deleteFile(const std::string& fileName) = 0;
|
||||
|
||||
// Delete entire container. During the process, if pNumDeleted is not null it will be
|
||||
// updated with the count of deleted files so that progress can be seen.
|
||||
Future<Void> deleteContainer(int* pNumDeleted) override = 0;
|
||||
|
||||
Future<Reference<IBackupFile>> writeLogFile(Version beginVersion, Version endVersion, int blockSize) final;
|
||||
|
||||
Future<Reference<IBackupFile>> writeTaggedLogFile(Version beginVersion, Version endVersion, int blockSize,
|
||||
uint16_t tagId, int totalTags) final;
|
||||
|
||||
Future<Reference<IBackupFile>> writeRangeFile(Version snapshotBeginVersion, int snapshotFileCount,
|
||||
Version fileVersion, int blockSize) override;
|
||||
|
||||
Future<std::pair<std::vector<RangeFile>, std::map<std::string, KeyRange>>> readKeyspaceSnapshot(
|
||||
KeyspaceSnapshotFile snapshot);
|
||||
|
||||
Future<Void> writeKeyspaceSnapshotFile(const std::vector<std::string>& fileNames,
|
||||
const std::vector<std::pair<Key, Key>>& beginEndKeys,
|
||||
int64_t totalBytes) final;
|
||||
|
||||
// List log files, unsorted, which contain data at any version >= beginVersion and <= targetVersion.
|
||||
// "partitioned" flag indicates if new partitioned mutation logs or old logs should be listed.
|
||||
Future<std::vector<LogFile>> listLogFiles(Version beginVersion, Version targetVersion, bool partitioned);
|
||||
|
||||
// List range files, unsorted, which contain data at or between beginVersion and endVersion
|
||||
// Note: The contents of each top level snapshot.N folder do not necessarily constitute a valid snapshot
|
||||
// and therefore listing files is not how RestoreSets are obtained.
|
||||
// Note: Snapshots partially written using FDB versions prior to 6.0.16 will have some range files stored
|
||||
// using the old folder scheme read by old_listRangeFiles
|
||||
Future<std::vector<RangeFile>> listRangeFiles(Version beginVersion, Version endVersion);
|
||||
|
||||
// List snapshots which have been fully written, in sorted beginVersion order, which start before end and finish on
|
||||
// or after begin
|
||||
Future<std::vector<KeyspaceSnapshotFile>> listKeyspaceSnapshots(Version begin = 0,
|
||||
Version end = std::numeric_limits<Version>::max());
|
||||
|
||||
Future<BackupFileList> dumpFileList(Version begin, Version end) override;
|
||||
|
||||
// Uses the virtual methods to describe the backup contents
|
||||
Future<BackupDescription> describeBackup(bool deepScan, Version logStartVersionOverride) final;
|
||||
|
||||
// Delete all data up to (but not including endVersion)
|
||||
Future<Void> expireData(Version expireEndVersion, bool force, ExpireProgress* progress,
|
||||
Version restorableBeginVersion) final;
|
||||
|
||||
Future<KeyRange> getSnapshotFileKeyRange(const RangeFile& file) final;
|
||||
|
||||
Future<Optional<RestorableFileSet>> getRestoreSet(Version targetVersion, VectorRef<KeyRangeRef> keyRangesFilter,
|
||||
bool logsOnly, Version beginVersion) final;
|
||||
|
||||
private:
|
||||
struct VersionProperty {
|
||||
VersionProperty(Reference<BackupContainerFileSystem> bc, const std::string& name)
|
||||
: bc(bc), path("properties/" + name) {}
|
||||
Reference<BackupContainerFileSystem> bc;
|
||||
std::string path;
|
||||
Future<Optional<Version>> get();
|
||||
Future<Void> set(Version v);
|
||||
Future<Void> clear();
|
||||
};
|
||||
|
||||
// To avoid the need to scan the underyling filesystem in many cases, some important version boundaries are stored
|
||||
// in named files. These versions also indicate what version ranges are known to be deleted or partially deleted.
|
||||
//
|
||||
// The values below describe version ranges as follows:
|
||||
// 0 - expiredEndVersion All files in this range have been deleted
|
||||
// expiredEndVersion - unreliableEndVersion Some files in this range may have been deleted.
|
||||
//
|
||||
// logBeginVersion - logEnd Log files are contiguous in this range and have NOT been deleted by
|
||||
// fdbbackup logEnd - infinity Files in this range may or may not exist yet
|
||||
//
|
||||
VersionProperty logBeginVersion();
|
||||
VersionProperty logEndVersion();
|
||||
VersionProperty expiredEndVersion();
|
||||
VersionProperty unreliableEndVersion();
|
||||
VersionProperty logType();
|
||||
|
||||
// List range files, unsorted, which contain data at or between beginVersion and endVersion
|
||||
// NOTE: This reads the range file folder schema from FDB 6.0.15 and earlier and is provided for backward
|
||||
// compatibility
|
||||
Future<std::vector<RangeFile>> old_listRangeFiles(Version beginVersion, Version endVersion);
|
||||
|
||||
friend class BackupContainerFileSystemImpl;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* BackupContainerLocalDirectory.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/BackupContainerLocalDirectory.h"
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
#include "flow/Platform.actor.h"
|
||||
#include "flow/Platform.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
class BackupFile : public IBackupFile, ReferenceCounted<BackupFile> {
|
||||
public:
|
||||
BackupFile(std::string fileName, Reference<IAsyncFile> file, std::string finalFullPath)
|
||||
: IBackupFile(fileName), m_file(file), m_finalFullPath(finalFullPath) {}
|
||||
|
||||
Future<Void> append(const void* data, int len) {
|
||||
Future<Void> r = m_file->write(data, len, m_offset);
|
||||
m_offset += len;
|
||||
return r;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> finish_impl(Reference<BackupFile> f) {
|
||||
wait(f->m_file->truncate(f->size())); // Some IAsyncFile implementations extend in whole block sizes.
|
||||
wait(f->m_file->sync());
|
||||
std::string name = f->m_file->getFilename();
|
||||
f->m_file.clear();
|
||||
renameFile(name, f->m_finalFullPath);
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> finish() { return finish_impl(Reference<BackupFile>::addRef(this)); }
|
||||
|
||||
void addref() override { return ReferenceCounted<BackupFile>::addref(); }
|
||||
void delref() override { return ReferenceCounted<BackupFile>::delref(); }
|
||||
|
||||
private:
|
||||
Reference<IAsyncFile> m_file;
|
||||
std::string m_finalFullPath;
|
||||
};
|
||||
|
||||
ACTOR static Future<BackupContainerFileSystem::FilesAndSizesT> listFiles_impl(std::string path, std::string m_path) {
|
||||
state std::vector<std::string> files;
|
||||
wait(platform::findFilesRecursivelyAsync(joinPath(m_path, path), &files));
|
||||
|
||||
BackupContainerFileSystem::FilesAndSizesT results;
|
||||
|
||||
// Remove .lnk files from results, they are a side effect of a backup that was *read* during simulation. See
|
||||
// openFile() above for more info on why they are created.
|
||||
if (g_network->isSimulated())
|
||||
files.erase(
|
||||
std::remove_if(files.begin(), files.end(),
|
||||
[](std::string const& f) { return StringRef(f).endsWith(LiteralStringRef(".lnk")); }),
|
||||
files.end());
|
||||
|
||||
for (auto& f : files) {
|
||||
// Hide .part or .temp files.
|
||||
StringRef s(f);
|
||||
if (!s.endsWith(LiteralStringRef(".part")) && !s.endsWith(LiteralStringRef(".temp")))
|
||||
results.push_back({ f.substr(m_path.size() + 1), ::fileSize(f) });
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void BackupContainerLocalDirectory::addref() {
|
||||
return ReferenceCounted<BackupContainerLocalDirectory>::addref();
|
||||
}
|
||||
void BackupContainerLocalDirectory::delref() {
|
||||
return ReferenceCounted<BackupContainerLocalDirectory>::delref();
|
||||
}
|
||||
|
||||
std::string BackupContainerLocalDirectory::getURLFormat() {
|
||||
return "file://</path/to/base/dir/>";
|
||||
}
|
||||
|
||||
BackupContainerLocalDirectory::BackupContainerLocalDirectory(const std::string& url) {
|
||||
std::string path;
|
||||
if (url.find("file://") != 0) {
|
||||
TraceEvent(SevWarn, "BackupContainerLocalDirectory")
|
||||
.detail("Description", "Invalid URL for BackupContainerLocalDirectory")
|
||||
.detail("URL", url);
|
||||
}
|
||||
|
||||
path = url.substr(7);
|
||||
// Remove trailing slashes on path
|
||||
path.erase(path.find_last_not_of("\\/") + 1);
|
||||
|
||||
std::string absolutePath = abspath(path);
|
||||
|
||||
if (!g_network->isSimulated() && path != absolutePath) {
|
||||
TraceEvent(SevWarn, "BackupContainerLocalDirectory")
|
||||
.detail("Description", "Backup path must be absolute (e.g. file:///some/path)")
|
||||
.detail("URL", url)
|
||||
.detail("Path", path)
|
||||
.detail("AbsolutePath", absolutePath);
|
||||
// throw io_error();
|
||||
IBackupContainer::lastOpenError =
|
||||
format("Backup path '%s' must be the absolute path '%s'", path.c_str(), absolutePath.c_str());
|
||||
throw backup_invalid_url();
|
||||
}
|
||||
|
||||
// Finalized path written to will be will be <path>/backup-<uid>
|
||||
m_path = path;
|
||||
}
|
||||
|
||||
Future<std::vector<std::string>> BackupContainerLocalDirectory::listURLs(const std::string& url) {
|
||||
std::string path;
|
||||
if (url.find("file://") != 0) {
|
||||
TraceEvent(SevWarn, "BackupContainerLocalDirectory")
|
||||
.detail("Description", "Invalid URL for BackupContainerLocalDirectory")
|
||||
.detail("URL", url);
|
||||
}
|
||||
|
||||
path = url.substr(7);
|
||||
// Remove trailing slashes on path
|
||||
path.erase(path.find_last_not_of("\\/") + 1);
|
||||
|
||||
if (!g_network->isSimulated() && path != abspath(path)) {
|
||||
TraceEvent(SevWarn, "BackupContainerLocalDirectory")
|
||||
.detail("Description", "Backup path must be absolute (e.g. file:///some/path)")
|
||||
.detail("URL", url)
|
||||
.detail("Path", path);
|
||||
throw io_error();
|
||||
}
|
||||
std::vector<std::string> dirs = platform::listDirectories(path);
|
||||
std::vector<std::string> results;
|
||||
|
||||
for (auto& r : dirs) {
|
||||
if (r == "." || r == "..") continue;
|
||||
results.push_back(std::string("file://") + joinPath(path, r));
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerLocalDirectory::create() {
|
||||
// Nothing should be done here because create() can be called by any process working with the container URL,
|
||||
// such as fdbbackup. Since "local directory" containers are by definition local to the machine they are
|
||||
// accessed from, the container's creation (in this case the creation of a directory) must be ensured prior to
|
||||
// every file creation, which is done in openFile(). Creating the directory here will result in unnecessary
|
||||
// directories being created on machines that run fdbbackup but not agents.
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<bool> BackupContainerLocalDirectory::exists() {
|
||||
return directoryExists(m_path);
|
||||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> BackupContainerLocalDirectory::readFile(const std::string& path) {
|
||||
int flags = IAsyncFile::OPEN_NO_AIO | IAsyncFile::OPEN_READONLY | IAsyncFile::OPEN_UNCACHED;
|
||||
// Simulation does not properly handle opening the same file from multiple machines using a shared filesystem,
|
||||
// so create a symbolic link to make each file opening appear to be unique. This could also work in production
|
||||
// but only if the source directory is writeable which shouldn't be required for a restore.
|
||||
std::string fullPath = joinPath(m_path, path);
|
||||
#ifndef _WIN32
|
||||
if (g_network->isSimulated()) {
|
||||
if (!fileExists(fullPath)) {
|
||||
throw file_not_found();
|
||||
}
|
||||
|
||||
if (g_simulator.getCurrentProcess()->uid == UID()) {
|
||||
TraceEvent(SevError, "BackupContainerReadFileOnUnsetProcessID");
|
||||
}
|
||||
std::string uniquePath = fullPath + "." + g_simulator.getCurrentProcess()->uid.toString() + ".lnk";
|
||||
unlink(uniquePath.c_str());
|
||||
ASSERT(symlink(basename(path).c_str(), uniquePath.c_str()) == 0);
|
||||
fullPath = uniquePath;
|
||||
}
|
||||
// Opening cached mode forces read/write mode at a lower level, overriding the readonly request. So cached mode
|
||||
// can't be used because backup files are read-only. Cached mode can only help during restore task retries handled
|
||||
// by the same process that failed the first task execution anyway, which is a very rare case.
|
||||
#endif
|
||||
Future<Reference<IAsyncFile>> f = IAsyncFileSystem::filesystem()->open(fullPath, flags, 0644);
|
||||
|
||||
if (g_network->isSimulated()) {
|
||||
int blockSize = 0;
|
||||
// Extract block size from the filename, if present
|
||||
size_t lastComma = path.find_last_of(',');
|
||||
if (lastComma != path.npos) {
|
||||
blockSize = atoi(path.substr(lastComma + 1).c_str());
|
||||
}
|
||||
if (blockSize <= 0) {
|
||||
blockSize = deterministicRandom()->randomInt(1e4, 1e6);
|
||||
}
|
||||
if (deterministicRandom()->random01() < .01) {
|
||||
blockSize /= deterministicRandom()->randomInt(1, 3);
|
||||
}
|
||||
ASSERT(blockSize > 0);
|
||||
|
||||
return map(f, [=](Reference<IAsyncFile> fr) {
|
||||
int readAhead = deterministicRandom()->randomInt(0, 3);
|
||||
int reads = deterministicRandom()->randomInt(1, 3);
|
||||
int cacheSize = deterministicRandom()->randomInt(0, 3);
|
||||
return Reference<IAsyncFile>(new AsyncFileReadAheadCache(fr, blockSize, readAhead, reads, cacheSize));
|
||||
});
|
||||
}
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerLocalDirectory::writeFile(const std::string& path) {
|
||||
int flags = IAsyncFile::OPEN_NO_AIO | IAsyncFile::OPEN_CREATE | IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE |
|
||||
IAsyncFile::OPEN_READWRITE;
|
||||
std::string fullPath = joinPath(m_path, path);
|
||||
platform::createDirectory(parentDirectory(fullPath));
|
||||
std::string temp = fullPath + "." + deterministicRandom()->randomUniqueID().toString() + ".temp";
|
||||
Future<Reference<IAsyncFile>> f = IAsyncFileSystem::filesystem()->open(temp, flags, 0644);
|
||||
return map(f, [=](Reference<IAsyncFile> f) { return Reference<IBackupFile>(new BackupFile(path, f, fullPath)); });
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerLocalDirectory::deleteFile(const std::string& path) {
|
||||
::deleteFile(joinPath(m_path, path));
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<BackupContainerFileSystem::FilesAndSizesT> BackupContainerLocalDirectory::listFiles(
|
||||
const std::string& path, std::function<bool(std::string const&)>) {
|
||||
return listFiles_impl(path, m_path);
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerLocalDirectory::deleteContainer(int* pNumDeleted) {
|
||||
// In order to avoid deleting some random directory due to user error, first describe the backup
|
||||
// and make sure it has something in it.
|
||||
return map(describeBackup(false, invalidVersion), [=](BackupDescription const& desc) {
|
||||
// If the backup has no snapshots and no logs then it's probably not a valid backup
|
||||
if (desc.snapshots.size() == 0 && !desc.minLogBegin.present()) throw backup_invalid_url();
|
||||
|
||||
int count = platform::eraseDirectoryRecursive(m_path);
|
||||
if (pNumDeleted != nullptr) *pNumDeleted = count;
|
||||
|
||||
return Void();
|
||||
});
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* BackupContainerLocalDirectory.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_BACKUP_CONTAINER_LOCAL_DIRECTORY_H
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_LOCAL_DIRECTORY_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/BackupContainerFileSystem.h"
|
||||
#include "flow/flow.h"
|
||||
|
||||
class BackupContainerLocalDirectory : public BackupContainerFileSystem,
|
||||
ReferenceCounted<BackupContainerLocalDirectory> {
|
||||
public:
|
||||
void addref() final;
|
||||
void delref() final;
|
||||
|
||||
static std::string getURLFormat();
|
||||
|
||||
BackupContainerLocalDirectory(const std::string& url);
|
||||
|
||||
static Future<std::vector<std::string>> listURLs(const std::string& url);
|
||||
|
||||
Future<Void> create() final;
|
||||
|
||||
// The container exists if the folder it resides in exists
|
||||
Future<bool> exists() final;
|
||||
|
||||
Future<Reference<IAsyncFile>> readFile(const std::string& path) final;
|
||||
|
||||
Future<Reference<IBackupFile>> writeFile(const std::string& path) final;
|
||||
|
||||
Future<Void> deleteFile(const std::string& path) final;
|
||||
|
||||
Future<FilesAndSizesT> listFiles(const std::string& path, std::function<bool(std::string const&)>) final;
|
||||
|
||||
Future<Void> deleteContainer(int* pNumDeleted) final;
|
||||
|
||||
private:
|
||||
std::string m_path;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
* BackupContainerS3BlobStore.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
|
||||
#include "fdbclient/BackupContainerS3BlobStore.h"
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
class BackupContainerS3BlobStoreImpl {
|
||||
public:
|
||||
// Backup files to under a single folder prefix with subfolders for each named backup
|
||||
static const std::string DATAFOLDER;
|
||||
|
||||
// Indexfolder contains keys for which user-named backups exist. Backup names can contain an arbitrary
|
||||
// number of slashes so the backup names are kept in a separate folder tree from their actual data.
|
||||
static const std::string INDEXFOLDER;
|
||||
|
||||
ACTOR static Future<std::vector<std::string>> listURLs(Reference<S3BlobStoreEndpoint> bstore, std::string bucket) {
|
||||
state std::string basePath = INDEXFOLDER + '/';
|
||||
S3BlobStoreEndpoint::ListResult contents = wait(bstore->listObjects(bucket, basePath));
|
||||
std::vector<std::string> results;
|
||||
for (auto& f : contents.objects) {
|
||||
results.push_back(
|
||||
bstore->getResourceURL(f.name.substr(basePath.size()), format("bucket=%s", bucket.c_str())));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
class BackupFile : public IBackupFile, ReferenceCounted<BackupFile> {
|
||||
public:
|
||||
BackupFile(std::string fileName, Reference<IAsyncFile> file) : IBackupFile(fileName), m_file(file) {}
|
||||
|
||||
Future<Void> append(const void* data, int len) {
|
||||
Future<Void> r = m_file->write(data, len, m_offset);
|
||||
m_offset += len;
|
||||
return r;
|
||||
}
|
||||
|
||||
Future<Void> finish() {
|
||||
Reference<BackupFile> self = Reference<BackupFile>::addRef(this);
|
||||
return map(m_file->sync(), [=](Void _) {
|
||||
self->m_file.clear();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
void addref() final { return ReferenceCounted<BackupFile>::addref(); }
|
||||
void delref() final { return ReferenceCounted<BackupFile>::delref(); }
|
||||
|
||||
private:
|
||||
Reference<IAsyncFile> m_file;
|
||||
};
|
||||
|
||||
ACTOR static Future<BackupContainerFileSystem::FilesAndSizesT> listFiles(
|
||||
Reference<BackupContainerS3BlobStore> bc, std::string path,
|
||||
std::function<bool(std::string const&)> pathFilter) {
|
||||
// pathFilter expects container based paths, so create a wrapper which converts a raw path
|
||||
// to a container path by removing the known backup name prefix.
|
||||
state int prefixTrim = bc->dataPath("").size();
|
||||
std::function<bool(std::string const&)> rawPathFilter = [=](const std::string& folderPath) {
|
||||
ASSERT(folderPath.size() >= prefixTrim);
|
||||
return pathFilter(folderPath.substr(prefixTrim));
|
||||
};
|
||||
|
||||
state S3BlobStoreEndpoint::ListResult result = wait(bc->m_bstore->listObjects(
|
||||
bc->m_bucket, bc->dataPath(path), '/', std::numeric_limits<int>::max(), rawPathFilter));
|
||||
BackupContainerFileSystem::FilesAndSizesT files;
|
||||
for (auto& o : result.objects) {
|
||||
ASSERT(o.name.size() >= prefixTrim);
|
||||
files.push_back({ o.name.substr(prefixTrim), o.size });
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> create(Reference<BackupContainerS3BlobStore> bc) {
|
||||
wait(bc->m_bstore->createBucket(bc->m_bucket));
|
||||
|
||||
// Check/create the index entry
|
||||
bool exists = wait(bc->m_bstore->objectExists(bc->m_bucket, bc->indexEntry()));
|
||||
if (!exists) {
|
||||
wait(bc->m_bstore->writeEntireFile(bc->m_bucket, bc->indexEntry(), ""));
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> deleteContainer(Reference<BackupContainerS3BlobStore> bc, int* pNumDeleted) {
|
||||
bool e = wait(bc->exists());
|
||||
if (!e) {
|
||||
TraceEvent(SevWarnAlways, "BackupContainerDoesNotExist").detail("URL", bc->getURL());
|
||||
throw backup_does_not_exist();
|
||||
}
|
||||
|
||||
// First delete everything under the data prefix in the bucket
|
||||
wait(bc->m_bstore->deleteRecursively(bc->m_bucket, bc->dataPath(""), pNumDeleted));
|
||||
|
||||
// Now that all files are deleted, delete the index entry
|
||||
wait(bc->m_bstore->deleteObject(bc->m_bucket, bc->indexEntry()));
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
|
||||
const std::string BackupContainerS3BlobStoreImpl::DATAFOLDER = "data";
|
||||
const std::string BackupContainerS3BlobStoreImpl::INDEXFOLDER = "backups";
|
||||
|
||||
std::string BackupContainerS3BlobStore::dataPath(const std::string& path) {
|
||||
return BackupContainerS3BlobStoreImpl::DATAFOLDER + "/" + m_name + "/" + path;
|
||||
}
|
||||
|
||||
// Get the path of the backups's index entry
|
||||
std::string BackupContainerS3BlobStore::indexEntry() {
|
||||
return BackupContainerS3BlobStoreImpl::INDEXFOLDER + "/" + m_name;
|
||||
}
|
||||
|
||||
BackupContainerS3BlobStore::BackupContainerS3BlobStore(Reference<S3BlobStoreEndpoint> bstore, const std::string& name,
|
||||
const S3BlobStoreEndpoint::ParametersT& params)
|
||||
: m_bstore(bstore), m_name(name), m_bucket("FDB_BACKUPS_V2") {
|
||||
|
||||
// Currently only one parameter is supported, "bucket"
|
||||
for (auto& kv : params) {
|
||||
if (kv.first == "bucket") {
|
||||
m_bucket = kv.second;
|
||||
continue;
|
||||
}
|
||||
TraceEvent(SevWarn, "BackupContainerS3BlobStoreInvalidParameter")
|
||||
.detail("Name", kv.first)
|
||||
.detail("Value", kv.second);
|
||||
IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", kv.first.c_str());
|
||||
throw backup_invalid_url();
|
||||
}
|
||||
}
|
||||
|
||||
void BackupContainerS3BlobStore::addref() {
|
||||
return ReferenceCounted<BackupContainerS3BlobStore>::addref();
|
||||
}
|
||||
void BackupContainerS3BlobStore::delref() {
|
||||
return ReferenceCounted<BackupContainerS3BlobStore>::delref();
|
||||
}
|
||||
|
||||
std::string BackupContainerS3BlobStore::getURLFormat() {
|
||||
return S3BlobStoreEndpoint::getURLFormat(true) + " (Note: The 'bucket' parameter is required.)";
|
||||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> BackupContainerS3BlobStore::readFile(const std::string& path) {
|
||||
return Reference<IAsyncFile>(new AsyncFileReadAheadCache(
|
||||
Reference<IAsyncFile>(new AsyncFileS3BlobStoreRead(m_bstore, m_bucket, dataPath(path))),
|
||||
m_bstore->knobs.read_block_size, m_bstore->knobs.read_ahead_blocks, m_bstore->knobs.concurrent_reads_per_file,
|
||||
m_bstore->knobs.read_cache_blocks_per_file));
|
||||
}
|
||||
|
||||
Future<std::vector<std::string>> BackupContainerS3BlobStore::listURLs(Reference<S3BlobStoreEndpoint> bstore,
|
||||
const std::string& bucket) {
|
||||
return BackupContainerS3BlobStoreImpl::listURLs(bstore, bucket);
|
||||
}
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerS3BlobStore::writeFile(const std::string& path) {
|
||||
return Reference<IBackupFile>(new BackupContainerS3BlobStoreImpl::BackupFile(
|
||||
path, Reference<IAsyncFile>(new AsyncFileS3BlobStoreWrite(m_bstore, m_bucket, dataPath(path)))));
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::deleteFile(const std::string& path) {
|
||||
return m_bstore->deleteObject(m_bucket, dataPath(path));
|
||||
}
|
||||
|
||||
Future<BackupContainerFileSystem::FilesAndSizesT> BackupContainerS3BlobStore::listFiles(
|
||||
const std::string& path, std::function<bool(std::string const&)> pathFilter) {
|
||||
return BackupContainerS3BlobStoreImpl::listFiles(Reference<BackupContainerS3BlobStore>::addRef(this), path,
|
||||
pathFilter);
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::create() {
|
||||
return BackupContainerS3BlobStoreImpl::create(Reference<BackupContainerS3BlobStore>::addRef(this));
|
||||
}
|
||||
|
||||
Future<bool> BackupContainerS3BlobStore::exists() {
|
||||
return m_bstore->objectExists(m_bucket, indexEntry());
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::deleteContainer(int* pNumDeleted) {
|
||||
return BackupContainerS3BlobStoreImpl::deleteContainer(Reference<BackupContainerS3BlobStore>::addRef(this),
|
||||
pNumDeleted);
|
||||
}
|
||||
|
||||
std::string BackupContainerS3BlobStore::getBucket() const {
|
||||
return m_bucket;
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* BackupContainerS3BlobStore.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_BACKUP_CONTAINER_S3_BLOBSTORE_H
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_S3_BLOBSTORE_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
|
||||
#include "fdbclient/BackupContainerFileSystem.h"
|
||||
|
||||
class BackupContainerS3BlobStore final : public BackupContainerFileSystem,
|
||||
ReferenceCounted<BackupContainerS3BlobStore> {
|
||||
Reference<S3BlobStoreEndpoint> m_bstore;
|
||||
std::string m_name;
|
||||
|
||||
// All backup data goes into a single bucket
|
||||
std::string m_bucket;
|
||||
|
||||
std::string dataPath(const std::string& path);
|
||||
|
||||
// Get the path of the backups's index entry
|
||||
std::string indexEntry();
|
||||
|
||||
friend class BackupContainerS3BlobStoreImpl;
|
||||
|
||||
public:
|
||||
BackupContainerS3BlobStore(Reference<S3BlobStoreEndpoint> bstore, const std::string& name,
|
||||
const S3BlobStoreEndpoint::ParametersT& params);
|
||||
|
||||
void addref() override;
|
||||
void delref() override;
|
||||
|
||||
static std::string getURLFormat();
|
||||
|
||||
Future<Reference<IAsyncFile>> readFile(const std::string& path) final;
|
||||
|
||||
static Future<std::vector<std::string>> listURLs(Reference<S3BlobStoreEndpoint> bstore, const std::string& bucket);
|
||||
|
||||
Future<Reference<IBackupFile>> writeFile(const std::string& path) final;
|
||||
|
||||
Future<Void> deleteFile(const std::string& path) final;
|
||||
|
||||
Future<FilesAndSizesT> listFiles(const std::string& path, std::function<bool(std::string const&)> pathFilter) final;
|
||||
|
||||
Future<Void> create() final;
|
||||
|
||||
// The container exists if the index entry in the blob bucket exists
|
||||
Future<bool> exists() final;
|
||||
|
||||
Future<Void> deleteContainer(int* pNumDeleted) final;
|
||||
|
||||
std::string getBucket() const;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -222,7 +222,8 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
|
|||
StringRef key = c.eat(":");
|
||||
StringRef secret = c.eat();
|
||||
|
||||
return Reference<BlobStoreEndpoint>(new BlobStoreEndpoint(host.toString(), service.toString(), key.toString(), secret.toString(), knobs, extraHeaders));
|
||||
return makeReference<BlobStoreEndpoint>(host.toString(), service.toString(), key.toString(), secret.toString(),
|
||||
knobs, extraHeaders);
|
||||
|
||||
} catch(std::string &err) {
|
||||
if(error != nullptr)
|
||||
|
@ -951,7 +952,12 @@ Future<std::vector<std::string>> BlobStoreEndpoint::listBuckets() {
|
|||
std::string BlobStoreEndpoint::hmac_sha1(std::string const &msg) {
|
||||
std::string key = secret;
|
||||
|
||||
// First pad the key to 64 bytes.
|
||||
// Hash key to shorten it if it is longer than SHA1 block size
|
||||
if(key.size() > 64) {
|
||||
key = SHA1::from_string(key);
|
||||
}
|
||||
|
||||
// Pad key up to SHA1 block size if needed
|
||||
key.append(64 - key.size(), '\0');
|
||||
|
||||
std::string kipad = key;
|
||||
|
|
|
@ -1,13 +1,20 @@
|
|||
set(FDBCLIENT_SRCS
|
||||
AsyncFileBlobStore.actor.cpp
|
||||
AsyncFileBlobStore.actor.h
|
||||
AsyncFileS3BlobStore.actor.cpp
|
||||
AsyncFileS3BlobStore.actor.h
|
||||
AsyncTaskThread.actor.cpp
|
||||
AsyncTaskThread.h
|
||||
Atomic.h
|
||||
AutoPublicAddress.cpp
|
||||
BackupAgent.actor.h
|
||||
BackupAgentBase.actor.cpp
|
||||
BackupContainer.actor.cpp
|
||||
BackupContainer.h
|
||||
BlobStore.actor.cpp
|
||||
BackupContainerFileSystem.actor.cpp
|
||||
BackupContainerFileSystem.h
|
||||
BackupContainerLocalDirectory.actor.cpp
|
||||
BackupContainerLocalDirectory.h
|
||||
BackupContainerS3BlobStore.actor.cpp
|
||||
BackupContainerS3BlobStore.h
|
||||
ClientLogEvents.h
|
||||
ClientWorkerInterface.h
|
||||
ClusterInterface.h
|
||||
|
@ -34,8 +41,6 @@ set(FDBCLIENT_SRCS
|
|||
ManagementAPI.actor.cpp
|
||||
ManagementAPI.actor.h
|
||||
CommitProxyInterface.h
|
||||
MetricLogger.actor.cpp
|
||||
MetricLogger.h
|
||||
MonitorLeader.actor.cpp
|
||||
MonitorLeader.h
|
||||
MultiVersionAssignmentVars.h
|
||||
|
@ -53,6 +58,7 @@ set(FDBCLIENT_SRCS
|
|||
RunTransaction.actor.h
|
||||
RYWIterator.cpp
|
||||
RYWIterator.h
|
||||
S3BlobStore.actor.cpp
|
||||
Schemas.cpp
|
||||
Schemas.h
|
||||
SnapshotCache.h
|
||||
|
@ -93,6 +99,46 @@ set(options_srcs ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
|||
vexillographer_compile(TARGET fdboptions LANG cpp OUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
||||
|
||||
set(BUILD_AZURE_BACKUP OFF CACHE BOOL "Build Azure backup client")
|
||||
if(BUILD_AZURE_BACKUP)
|
||||
add_compile_definitions(BUILD_AZURE_BACKUP)
|
||||
set(FDBCLIENT_SRCS
|
||||
${FDBCLIENT_SRCS}
|
||||
BackupContainerAzureBlobStore.actor.cpp
|
||||
BackupContainerAzureBlobStore.h)
|
||||
|
||||
configure_file(azurestorage.cmake azurestorage-download/CMakeLists.txt)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" .
|
||||
RESULT_VARIABLE results
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/azurestorage-download
|
||||
)
|
||||
|
||||
if(results)
|
||||
message(FATAL_ERROR "Configuration step for AzureStorage has Failed. ${results}")
|
||||
endif()
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build . --config Release
|
||||
RESULT_VARIABLE results
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/azurestorage-download
|
||||
)
|
||||
|
||||
if(results)
|
||||
message(FATAL_ERROR "Build step for AzureStorage has Failed. ${results}")
|
||||
endif()
|
||||
|
||||
add_subdirectory(
|
||||
${CMAKE_CURRENT_BINARY_DIR}/azurestorage-src
|
||||
${CMAKE_CURRENT_BINARY_DIR}/azurestorage-build
|
||||
)
|
||||
endif()
|
||||
|
||||
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
||||
add_dependencies(fdbclient fdboptions)
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
||||
if(BUILD_AZURE_BACKUP)
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc PRIVATE curl uuid azure-storage-lite)
|
||||
else()
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
||||
endif()
|
||||
|
|
|
@ -159,9 +159,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Key begin, Key end, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(BackupRangeTaskFunc::name, BackupRangeTaskFunc::version, doneKey));
|
||||
auto task = makeReference<Task>(BackupRangeTaskFunc::name, BackupRangeTaskFunc::version, doneKey);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[BackupAgentBase::keyBeginKey] = begin;
|
||||
task->params[BackupAgentBase::keyEndKey] = end;
|
||||
|
@ -263,8 +263,8 @@ namespace dbBackup {
|
|||
|
||||
state int valueLoc = 0;
|
||||
state int committedValueLoc = 0;
|
||||
state Reference<ReadYourWritesTransaction> tr = Reference<ReadYourWritesTransaction>( new ReadYourWritesTransaction(cx) );
|
||||
loop{
|
||||
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(cx);
|
||||
loop{
|
||||
try {
|
||||
tr->reset();
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -447,9 +447,9 @@ namespace dbBackup {
|
|||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
// After the BackupRangeTask completes, set the stop key which will stop the BackupLogsTask
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(FinishFullBackupTaskFunc::name, FinishFullBackupTaskFunc::version, doneKey));
|
||||
auto task = makeReference<Task>(FinishFullBackupTaskFunc::name, FinishFullBackupTaskFunc::version, doneKey);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
if (!waitFor) {
|
||||
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
|
||||
|
@ -497,9 +497,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version endVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(EraseLogRangeTaskFunc::name, EraseLogRangeTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(EraseLogRangeTaskFunc::name, EraseLogRangeTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(1, Unversioned()); //FIXME: remove in 6.X, only needed for 5.2 backward compatibility
|
||||
task->params[DatabaseBackupAgent::keyEndVersion] = BinaryWriter::toValue(endVersion, Unversioned());
|
||||
|
@ -692,8 +692,8 @@ namespace dbBackup {
|
|||
|
||||
for (int j = results.size(); j < prefetchTo; j ++) {
|
||||
results.push_back(PromiseStream<RCGroup>());
|
||||
locks.push_back(Reference<FlowLock>(new FlowLock(CLIENT_KNOBS->COPY_LOG_READ_AHEAD_BYTES)));
|
||||
rc.push_back(readCommitted(taskBucket->src, results[j], Future<Void>(Void()), locks[j], ranges[j], decodeBKMutationLogKey, true, true, true));
|
||||
locks.push_back(makeReference<FlowLock>(CLIENT_KNOBS->COPY_LOG_READ_AHEAD_BYTES));
|
||||
rc.push_back(readCommitted(taskBucket->src, results[j], Future<Void>(Void()), locks[j], ranges[j], decodeBKMutationLogKey, true, true, true));
|
||||
}
|
||||
|
||||
// copy the range
|
||||
|
@ -731,9 +731,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version beginVersion, Version endVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(CopyLogRangeTaskFunc::name, CopyLogRangeTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(CopyLogRangeTaskFunc::name, CopyLogRangeTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(beginVersion, Unversioned());
|
||||
task->params[DatabaseBackupAgent::keyEndVersion] = BinaryWriter::toValue(endVersion, Unversioned());
|
||||
|
@ -852,9 +852,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version prevBeginVersion, Version beginVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(CopyLogsTaskFunc::name, CopyLogsTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(CopyLogsTaskFunc::name, CopyLogsTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
task->params[BackupAgentBase::keyBeginVersion] = BinaryWriter::toValue(beginVersion, Unversioned());
|
||||
task->params[DatabaseBackupAgent::keyPrevBeginVersion] = BinaryWriter::toValue(prevBeginVersion, Unversioned());
|
||||
|
||||
|
@ -931,9 +931,10 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(FinishedFullBackupTaskFunc::name, FinishedFullBackupTaskFunc::version, doneKey));
|
||||
auto task =
|
||||
makeReference<Task>(FinishedFullBackupTaskFunc::name, FinishedFullBackupTaskFunc::version, doneKey);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
if (!waitFor) {
|
||||
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
|
||||
|
@ -1032,9 +1033,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version prevBeginVersion, Version beginVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(CopyDiffLogsTaskFunc::name, CopyDiffLogsTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(CopyDiffLogsTaskFunc::name, CopyDiffLogsTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(beginVersion, Unversioned());
|
||||
task->params[DatabaseBackupAgent::keyPrevBeginVersion] = BinaryWriter::toValue(prevBeginVersion, Unversioned());
|
||||
|
@ -1210,9 +1211,10 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, Version beginVersion, Version endVersion, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(OldCopyLogRangeTaskFunc::name, OldCopyLogRangeTaskFunc::version, doneKey, 1));
|
||||
auto task =
|
||||
makeReference<Task>(OldCopyLogRangeTaskFunc::name, OldCopyLogRangeTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
task->params[DatabaseBackupAgent::keyBeginVersion] = BinaryWriter::toValue(beginVersion, Unversioned());
|
||||
task->params[DatabaseBackupAgent::keyEndVersion] = BinaryWriter::toValue(endVersion, Unversioned());
|
||||
|
@ -1289,9 +1291,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(AbortOldBackupTaskFunc::name, AbortOldBackupTaskFunc::version, doneKey, 1));
|
||||
auto task = makeReference<Task>(AbortOldBackupTaskFunc::name, AbortOldBackupTaskFunc::version, doneKey, 1);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
if (!waitFor) {
|
||||
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
|
||||
|
@ -1498,9 +1500,9 @@ namespace dbBackup {
|
|||
|
||||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<Task> parentTask, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>()) {
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(BackupRestorableTaskFunc::name, BackupRestorableTaskFunc::version, doneKey));
|
||||
auto task = makeReference<Task>(BackupRestorableTaskFunc::name, BackupRestorableTaskFunc::version, doneKey);
|
||||
|
||||
copyDefaultParameters(parentTask, task);
|
||||
copyDefaultParameters(parentTask, task);
|
||||
|
||||
if (!waitFor) {
|
||||
return taskBucket->addTask(tr, task, parentTask->params[Task::reservedTaskParamValidKey], task->params[BackupAgentBase::keyFolderId]);
|
||||
|
@ -1597,6 +1599,7 @@ namespace dbBackup {
|
|||
wait(tr->commit());
|
||||
break;
|
||||
} catch (Error &e) {
|
||||
TraceEvent("SetDestUidOrBeginVersionError").error(e, true);
|
||||
wait(tr->onError(e));
|
||||
}
|
||||
}
|
||||
|
@ -1690,9 +1693,9 @@ namespace dbBackup {
|
|||
ACTOR static Future<Key> addTask(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Key logUid, Key backupUid, Key keyAddPrefix, Key keyRemovePrefix, Key keyConfigBackupRanges, Key tagName, TaskCompletionKey completionKey, Reference<TaskFuture> waitFor = Reference<TaskFuture>(), bool databasesInSync=false)
|
||||
{
|
||||
Key doneKey = wait(completionKey.get(tr, taskBucket));
|
||||
Reference<Task> task(new Task(StartFullBackupTaskFunc::name, StartFullBackupTaskFunc::version, doneKey));
|
||||
auto task = makeReference<Task>(StartFullBackupTaskFunc::name, StartFullBackupTaskFunc::version, doneKey);
|
||||
|
||||
task->params[BackupAgentBase::keyFolderId] = backupUid;
|
||||
task->params[BackupAgentBase::keyFolderId] = backupUid;
|
||||
task->params[BackupAgentBase::keyConfigLogUid] = logUid;
|
||||
task->params[DatabaseBackupAgent::keyAddPrefix] = keyAddPrefix;
|
||||
task->params[DatabaseBackupAgent::keyRemovePrefix] = keyRemovePrefix;
|
||||
|
@ -2167,7 +2170,8 @@ public:
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> abortBackup(DatabaseBackupAgent* backupAgent, Database cx, Key tagName, bool partial, bool abortOldBackup, bool dstOnly) {
|
||||
ACTOR static Future<Void> abortBackup(DatabaseBackupAgent* backupAgent, Database cx, Key tagName, bool partial,
|
||||
bool abortOldBackup, bool dstOnly, bool waitForDestUID) {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
state Key logUidValue, destUidValue;
|
||||
state UID logUid, destUid;
|
||||
|
@ -2187,14 +2191,19 @@ public:
|
|||
state Future<UID> destUidFuture = backupAgent->getDestUid(tr, logUid);
|
||||
wait(success(statusFuture) && success(destUidFuture));
|
||||
|
||||
UID destUid = destUidFuture.get();
|
||||
if (destUid.isValid()) {
|
||||
destUidValue = BinaryWriter::toValue(destUid, Unversioned());
|
||||
}
|
||||
EBackupState status = statusFuture.get();
|
||||
if (!backupAgent->isRunnable(status)) {
|
||||
throw backup_unneeded();
|
||||
}
|
||||
UID destUid = destUidFuture.get();
|
||||
if (destUid.isValid()) {
|
||||
destUidValue = BinaryWriter::toValue(destUid, Unversioned());
|
||||
} else if (destUidValue.size() == 0 && waitForDestUID) {
|
||||
// Give DR task a chance to update destUid to avoid the problem of
|
||||
// leftover version key. If we got an commit_unknown_result before,
|
||||
// reuse the previous destUidValue.
|
||||
throw not_committed();
|
||||
}
|
||||
|
||||
Optional<Value> _backupUid = wait(tr->get(backupAgent->states.get(logUidValue).pack(DatabaseBackupAgent::keyFolderId)));
|
||||
backupUid = _backupUid.get();
|
||||
|
@ -2215,11 +2224,12 @@ public:
|
|||
break;
|
||||
}
|
||||
catch (Error &e) {
|
||||
TraceEvent("DBA_AbortError").error(e, true);
|
||||
wait(tr->onError(e));
|
||||
}
|
||||
}
|
||||
|
||||
tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(cx));
|
||||
tr = makeReference<ReadYourWritesTransaction>(cx);
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
@ -2327,7 +2337,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(cx));
|
||||
tr = makeReference<ReadYourWritesTransaction>(cx);
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -2523,8 +2533,9 @@ Future<Void> DatabaseBackupAgent::discontinueBackup(Reference<ReadYourWritesTran
|
|||
return DatabaseBackupAgentImpl::discontinueBackup(this, tr, tagName);
|
||||
}
|
||||
|
||||
Future<Void> DatabaseBackupAgent::abortBackup(Database cx, Key tagName, bool partial, bool abortOldBackup, bool dstOnly){
|
||||
return DatabaseBackupAgentImpl::abortBackup(this, cx, tagName, partial, abortOldBackup, dstOnly);
|
||||
Future<Void> DatabaseBackupAgent::abortBackup(Database cx, Key tagName, bool partial, bool abortOldBackup, bool dstOnly,
|
||||
bool waitForDestUID) {
|
||||
return DatabaseBackupAgentImpl::abortBackup(this, cx, tagName, partial, abortOldBackup, dstOnly, waitForDestUID);
|
||||
}
|
||||
|
||||
Future<std::string> DatabaseBackupAgent::getStatus(Database cx, int errorLimit, Key tagName) {
|
||||
|
|
|
@ -997,7 +997,9 @@ struct HealthMetrics {
|
|||
};
|
||||
|
||||
int64_t worstStorageQueue;
|
||||
int64_t limitingStorageQueue;
|
||||
int64_t worstStorageDurabilityLag;
|
||||
int64_t limitingStorageDurabilityLag;
|
||||
int64_t worstTLogQueue;
|
||||
double tpsLimit;
|
||||
bool batchLimited;
|
||||
|
@ -1005,17 +1007,15 @@ struct HealthMetrics {
|
|||
std::map<UID, int64_t> tLogQueue;
|
||||
|
||||
HealthMetrics()
|
||||
: worstStorageQueue(0)
|
||||
, worstStorageDurabilityLag(0)
|
||||
, worstTLogQueue(0)
|
||||
, tpsLimit(0.0)
|
||||
, batchLimited(false)
|
||||
{}
|
||||
: worstStorageQueue(0), limitingStorageQueue(0), worstStorageDurabilityLag(0), limitingStorageDurabilityLag(0),
|
||||
worstTLogQueue(0), tpsLimit(0.0), batchLimited(false) {}
|
||||
|
||||
void update(const HealthMetrics& hm, bool detailedInput, bool detailedOutput)
|
||||
{
|
||||
worstStorageQueue = hm.worstStorageQueue;
|
||||
limitingStorageQueue = hm.limitingStorageQueue;
|
||||
worstStorageDurabilityLag = hm.worstStorageDurabilityLag;
|
||||
limitingStorageDurabilityLag = hm.limitingStorageDurabilityLag;
|
||||
worstTLogQueue = hm.worstTLogQueue;
|
||||
tpsLimit = hm.tpsLimit;
|
||||
batchLimited = hm.batchLimited;
|
||||
|
@ -1030,19 +1030,16 @@ struct HealthMetrics {
|
|||
}
|
||||
|
||||
bool operator==(HealthMetrics const& r) const {
|
||||
return (
|
||||
worstStorageQueue == r.worstStorageQueue &&
|
||||
worstStorageDurabilityLag == r.worstStorageDurabilityLag &&
|
||||
worstTLogQueue == r.worstTLogQueue &&
|
||||
storageStats == r.storageStats &&
|
||||
tLogQueue == r.tLogQueue &&
|
||||
batchLimited == r.batchLimited
|
||||
);
|
||||
return (worstStorageQueue == r.worstStorageQueue && limitingStorageQueue == r.limitingStorageQueue &&
|
||||
worstStorageDurabilityLag == r.worstStorageDurabilityLag &&
|
||||
limitingStorageDurabilityLag == r.limitingStorageDurabilityLag && worstTLogQueue == r.worstTLogQueue &&
|
||||
storageStats == r.storageStats && tLogQueue == r.tLogQueue && batchLimited == r.batchLimited);
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, worstStorageQueue, worstStorageDurabilityLag, worstTLogQueue, tpsLimit, batchLimited, storageStats, tLogQueue);
|
||||
serializer(ar, worstStorageQueue, worstStorageDurabilityLag, worstTLogQueue, tpsLimit, batchLimited,
|
||||
storageStats, tLogQueue, limitingStorageQueue, limitingStorageDurabilityLag);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -99,6 +99,11 @@ StringRef FileBackupAgent::restoreStateText(ERestoreState id) {
|
|||
}
|
||||
}
|
||||
|
||||
Key FileBackupAgent::getPauseKey() {
|
||||
FileBackupAgent backupAgent;
|
||||
return backupAgent.taskBucket->getPauseKey();
|
||||
}
|
||||
|
||||
template<> Tuple Codec<ERestoreState>::pack(ERestoreState const &val) { return Tuple().append(val); }
|
||||
template<> ERestoreState Codec<ERestoreState>::unpack(Tuple const &val) { return (ERestoreState)val.getInt(0); }
|
||||
|
||||
|
@ -1398,8 +1403,8 @@ namespace fileBackup {
|
|||
else {
|
||||
ASSERT(snapshotBatchSize.present());
|
||||
// Batch future key exists in the config so create future from it
|
||||
snapshotBatchFuture = Reference<TaskFuture>(new TaskFuture(futureBucket, snapshotBatchFutureKey.get()));
|
||||
}
|
||||
snapshotBatchFuture = makeReference<TaskFuture>(futureBucket, snapshotBatchFutureKey.get());
|
||||
}
|
||||
|
||||
break;
|
||||
} catch(Error &e) {
|
||||
|
|
|
@ -368,7 +368,7 @@ ClientCoordinators::ClientCoordinators( Key clusterKey, std::vector<NetworkAddre
|
|||
for (const auto& coord : coordinators) {
|
||||
clientLeaderServers.push_back( ClientLeaderRegInterface( coord ) );
|
||||
}
|
||||
ccf = Reference<ClusterConnectionFile>(new ClusterConnectionFile( ClusterConnectionString( coordinators, clusterKey ) ) );
|
||||
ccf = makeReference<ClusterConnectionFile>(ClusterConnectionString(coordinators, clusterKey));
|
||||
}
|
||||
|
||||
|
||||
|
@ -477,7 +477,8 @@ ACTOR Future<MonitorLeaderInfo> monitorLeaderOneGeneration( Reference<ClusterCon
|
|||
if (leader.present()) {
|
||||
if( leader.get().first.forward ) {
|
||||
TraceEvent("MonitorLeaderForwarding").detail("NewConnStr", leader.get().first.serializedInfo.toString()).detail("OldConnStr", info.intermediateConnFile->getConnectionString().toString());
|
||||
info.intermediateConnFile = Reference<ClusterConnectionFile>(new ClusterConnectionFile(connFile->getFilename(), ClusterConnectionString(leader.get().first.serializedInfo.toString())));
|
||||
info.intermediateConnFile = makeReference<ClusterConnectionFile>(
|
||||
connFile->getFilename(), ClusterConnectionString(leader.get().first.serializedInfo.toString()));
|
||||
return info;
|
||||
}
|
||||
if(connFile != info.intermediateConnFile) {
|
||||
|
@ -505,7 +506,7 @@ template <class LeaderInterface>
|
|||
Future<Void> monitorLeaderRemotely(Reference<ClusterConnectionFile> const& connFile,
|
||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader) {
|
||||
LeaderDeserializer<LeaderInterface> deserializer;
|
||||
Reference<AsyncVar<Value>> serializedInfo( new AsyncVar<Value> );
|
||||
auto serializedInfo = makeReference<AsyncVar<Value>>();
|
||||
Future<Void> m = monitorLeaderRemotelyInternal( connFile, serializedInfo );
|
||||
return m || deserializer( serializedInfo, outKnownLeader );
|
||||
}
|
||||
|
|
|
@ -100,7 +100,7 @@ template <class LeaderInterface>
|
|||
Future<Void> monitorLeader(Reference<ClusterConnectionFile> const& connFile,
|
||||
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader) {
|
||||
LeaderDeserializer<LeaderInterface> deserializer;
|
||||
Reference<AsyncVar<Value>> serializedInfo( new AsyncVar<Value> );
|
||||
auto serializedInfo = makeReference<AsyncVar<Value>>();
|
||||
Future<Void> m = monitorLeaderInternal( connFile, serializedInfo );
|
||||
return m || deserializer( serializedInfo, outKnownLeader );
|
||||
}
|
||||
|
|
|
@ -442,7 +442,7 @@ Reference<IDatabase> DLApi::createDatabase609(const char *clusterFilePath) {
|
|||
}));
|
||||
});
|
||||
|
||||
return Reference<DLDatabase>(new DLDatabase(api, dbFuture));
|
||||
return makeReference<DLDatabase>(api, dbFuture);
|
||||
}
|
||||
|
||||
Reference<IDatabase> DLApi::createDatabase(const char *clusterFilePath) {
|
||||
|
@ -918,7 +918,8 @@ void MultiVersionDatabase::DatabaseState::stateChanged() {
|
|||
|
||||
void MultiVersionDatabase::DatabaseState::addConnection(Reference<ClientInfo> client, std::string clusterFilePath) {
|
||||
clients.push_back(client);
|
||||
connectionAttempts.push_back(Reference<Connector>(new Connector(Reference<DatabaseState>::addRef(this), client, clusterFilePath)));
|
||||
connectionAttempts.push_back(
|
||||
makeReference<Connector>(Reference<DatabaseState>::addRef(this), client, clusterFilePath));
|
||||
}
|
||||
|
||||
void MultiVersionDatabase::DatabaseState::startConnections() {
|
||||
|
@ -985,7 +986,7 @@ Reference<ClientInfo> MultiVersionApi::getLocalClient() {
|
|||
|
||||
void MultiVersionApi::selectApiVersion(int apiVersion) {
|
||||
if(!localClient) {
|
||||
localClient = Reference<ClientInfo>(new ClientInfo(ThreadSafeApi::api));
|
||||
localClient = makeReference<ClientInfo>(ThreadSafeApi::api);
|
||||
}
|
||||
|
||||
if(this->apiVersion != 0 && this->apiVersion != apiVersion) {
|
||||
|
@ -1044,7 +1045,7 @@ void MultiVersionApi::addExternalLibrary(std::string path) {
|
|||
|
||||
if(externalClients.count(filename) == 0) {
|
||||
TraceEvent("AddingExternalClient").detail("LibraryPath", filename);
|
||||
externalClients[filename] = Reference<ClientInfo>(new ClientInfo(new DLApi(path), path));
|
||||
externalClients[filename] = makeReference<ClientInfo>(new DLApi(path), path);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1061,7 +1062,7 @@ void MultiVersionApi::addExternalLibraryDirectory(std::string path) {
|
|||
std::string lib = abspath(joinPath(path, filename));
|
||||
if(externalClients.count(filename) == 0) {
|
||||
TraceEvent("AddingExternalClient").detail("LibraryPath", filename);
|
||||
externalClients[filename] = Reference<ClientInfo>(new ClientInfo(new DLApi(lib), lib));
|
||||
externalClients[filename] = makeReference<ClientInfo>(new DLApi(lib), lib);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1764,7 +1765,7 @@ struct DLTest {
|
|||
static Reference<FdbCApi> getApi() {
|
||||
static Reference<FdbCApi> api;
|
||||
if(!api) {
|
||||
api = Reference<FdbCApi>(new FdbCApi());
|
||||
api = makeReference<FdbCApi>();
|
||||
|
||||
// Functions needed for DLSingleAssignmentVar
|
||||
api->futureSetCallback = [](FdbCApi::FDBFuture *f, FdbCApi::FDBCallback callback, void *callbackParameter) {
|
||||
|
|
|
@ -84,12 +84,6 @@ using std::pair;
|
|||
|
||||
namespace {
|
||||
|
||||
ACTOR template <class T, class Fun>
|
||||
Future<T> runAfter(Future<T> in, Fun func) {
|
||||
T res = wait(in);
|
||||
return func(res);
|
||||
}
|
||||
|
||||
template <class Interface, class Request>
|
||||
Future<REPLY_TYPE(Request)> loadBalance(
|
||||
DatabaseContext* ctx, const Reference<LocationInfo> alternatives, RequestStream<Request> Interface::*channel,
|
||||
|
@ -99,13 +93,14 @@ Future<REPLY_TYPE(Request)> loadBalance(
|
|||
if (alternatives->hasCaches) {
|
||||
return loadBalance(alternatives->locations(), channel, request, taskID, atMostOnce, model);
|
||||
}
|
||||
return runAfter(loadBalance(alternatives->locations(), channel, request, taskID, atMostOnce, model),
|
||||
[ctx](auto res) {
|
||||
if (res.cached) {
|
||||
ctx->updateCache.trigger();
|
||||
}
|
||||
return res;
|
||||
});
|
||||
return fmap(
|
||||
[ctx](auto const& res) {
|
||||
if (res.cached) {
|
||||
ctx->updateCache.trigger();
|
||||
}
|
||||
return res;
|
||||
},
|
||||
loadBalance(alternatives->locations(), channel, request, taskID, atMostOnce, model));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
@ -521,9 +516,9 @@ void updateLocationCacheWithCaches(DatabaseContext* self, const std::map<UID, St
|
|||
}
|
||||
}
|
||||
for (const auto& p : added) {
|
||||
interfaces.emplace_back(Reference<ReferencedInterface<StorageServerInterface>>{new ReferencedInterface<StorageServerInterface>{p.second}});
|
||||
interfaces.push_back(makeReference<ReferencedInterface<StorageServerInterface>>(p.second));
|
||||
}
|
||||
iter->value() = Reference<LocationInfo>{ new LocationInfo(interfaces, true) };
|
||||
iter->value() = makeReference<LocationInfo>(interfaces, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -536,7 +531,7 @@ Reference<LocationInfo> addCaches(const Reference<LocationInfo>& loc,
|
|||
interfaces.emplace_back((*loc)[i]);
|
||||
}
|
||||
interfaces.insert(interfaces.end(), other.begin(), other.end());
|
||||
return Reference<LocationInfo>{ new LocationInfo{ interfaces, true } };
|
||||
return makeReference<LocationInfo>(interfaces, true);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> updateCachedRanges(DatabaseContext* self, std::map<UID, StorageServerInterface>* cacheServers) {
|
||||
|
@ -556,8 +551,7 @@ ACTOR Future<Void> updateCachedRanges(DatabaseContext* self, std::map<UID, Stora
|
|||
std::vector<Reference<ReferencedInterface<StorageServerInterface>>> cacheInterfaces;
|
||||
cacheInterfaces.reserve(cacheServers->size());
|
||||
for (const auto& p : *cacheServers) {
|
||||
cacheInterfaces.emplace_back(Reference<ReferencedInterface<StorageServerInterface>>{
|
||||
new ReferencedInterface<StorageServerInterface>{ p.second } });
|
||||
cacheInterfaces.push_back(makeReference<ReferencedInterface<StorageServerInterface>>(p.second));
|
||||
}
|
||||
bool currCached = false;
|
||||
KeyRef begin, end;
|
||||
|
@ -766,7 +760,9 @@ static Standalone<RangeResultRef> healthMetricsToKVPairs(const HealthMetrics& me
|
|||
statsObj["batch_limited"] = metrics.batchLimited;
|
||||
statsObj["tps_limit"] = metrics.tpsLimit;
|
||||
statsObj["worst_storage_durability_lag"] = metrics.worstStorageDurabilityLag;
|
||||
statsObj["limiting_storage_durability_lag"] = metrics.limitingStorageDurabilityLag;
|
||||
statsObj["worst_storage_queue"] = metrics.worstStorageQueue;
|
||||
statsObj["limiting_storage_queue"] = metrics.limitingStorageQueue;
|
||||
statsObj["worst_log_queue"] = metrics.worstTLogQueue;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
|
@ -1109,7 +1105,7 @@ Reference<LocationInfo> DatabaseContext::setCachedLocation( const KeyRangeRef& k
|
|||
}
|
||||
|
||||
int maxEvictionAttempts = 100, attempts = 0;
|
||||
Reference<LocationInfo> loc = Reference<LocationInfo>( new LocationInfo(serverRefs) );
|
||||
auto loc = makeReference<LocationInfo>(serverRefs);
|
||||
while( locationCache.size() > locationCacheSize && attempts < maxEvictionAttempts) {
|
||||
TEST( true ); // NativeAPI storage server locationCache entry evicted
|
||||
attempts++;
|
||||
|
@ -1185,10 +1181,10 @@ void DatabaseContext::setOption( FDBDatabaseOptions::Option option, Optional<Str
|
|||
case FDBDatabaseOptions::MACHINE_ID:
|
||||
clientLocality = LocalityData( clientLocality.processId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>(), clientLocality.machineId(), clientLocality.dcId() );
|
||||
if (clientInfo->get().commitProxies.size())
|
||||
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies, false));
|
||||
commitProxies = makeReference<CommitProxyInfo>(clientInfo->get().commitProxies, false);
|
||||
if( clientInfo->get().grvProxies.size() )
|
||||
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies, true) );
|
||||
server_interf.clear();
|
||||
grvProxies = makeReference<GrvProxyInfo>(clientInfo->get().grvProxies, true);
|
||||
server_interf.clear();
|
||||
locationCache.insert( allKeys, Reference<LocationInfo>() );
|
||||
break;
|
||||
case FDBDatabaseOptions::MAX_WATCHES:
|
||||
|
@ -1197,10 +1193,10 @@ void DatabaseContext::setOption( FDBDatabaseOptions::Option option, Optional<Str
|
|||
case FDBDatabaseOptions::DATACENTER_ID:
|
||||
clientLocality = LocalityData(clientLocality.processId(), clientLocality.zoneId(), clientLocality.machineId(), value.present() ? Standalone<StringRef>(value.get()) : Optional<Standalone<StringRef>>());
|
||||
if (clientInfo->get().commitProxies.size())
|
||||
commitProxies = Reference<CommitProxyInfo>( new CommitProxyInfo(clientInfo->get().commitProxies, false));
|
||||
commitProxies = makeReference<CommitProxyInfo>(clientInfo->get().commitProxies, false);
|
||||
if( clientInfo->get().grvProxies.size() )
|
||||
grvProxies = Reference<GrvProxyInfo>( new GrvProxyInfo( clientInfo->get().grvProxies, true));
|
||||
server_interf.clear();
|
||||
grvProxies = makeReference<GrvProxyInfo>(clientInfo->get().grvProxies, true);
|
||||
server_interf.clear();
|
||||
locationCache.insert( allKeys, Reference<LocationInfo>() );
|
||||
break;
|
||||
case FDBDatabaseOptions::SNAPSHOT_RYW_ENABLE:
|
||||
|
@ -1341,8 +1337,8 @@ Database Database::createDatabase( Reference<ClusterConnectionFile> connFile, in
|
|||
|
||||
g_network->initTLS();
|
||||
|
||||
Reference<AsyncVar<ClientDBInfo>> clientInfo(new AsyncVar<ClientDBInfo>());
|
||||
Reference<AsyncVar<Reference<ClusterConnectionFile>>> connectionFile(new AsyncVar<Reference<ClusterConnectionFile>>());
|
||||
auto clientInfo = makeReference<AsyncVar<ClientDBInfo>>();
|
||||
auto connectionFile = makeReference<AsyncVar<Reference<ClusterConnectionFile>>>();
|
||||
connectionFile->set(connFile);
|
||||
Future<Void> clientInfoMonitor = monitorProxies(connectionFile, clientInfo, networkOptions.supportedVersions, StringRef(networkOptions.traceLogGroup));
|
||||
|
||||
|
@ -1585,11 +1581,11 @@ void DatabaseContext::updateProxies() {
|
|||
grvProxies.clear();
|
||||
bool commitProxyProvisional = false, grvProxyProvisional = false;
|
||||
if (clientInfo->get().commitProxies.size()) {
|
||||
commitProxies = Reference<CommitProxyInfo>(new CommitProxyInfo(clientInfo->get().commitProxies, false));
|
||||
commitProxies = makeReference<CommitProxyInfo>(clientInfo->get().commitProxies, false);
|
||||
commitProxyProvisional = clientInfo->get().commitProxies[0].provisional;
|
||||
}
|
||||
if (clientInfo->get().grvProxies.size()) {
|
||||
grvProxies = Reference<GrvProxyInfo>(new GrvProxyInfo(clientInfo->get().grvProxies, true));
|
||||
grvProxies = makeReference<GrvProxyInfo>(clientInfo->get().grvProxies, true);
|
||||
grvProxyProvisional = clientInfo->get().grvProxies[0].provisional;
|
||||
}
|
||||
if (clientInfo->get().commitProxies.size() && clientInfo->get().grvProxies.size()) {
|
||||
|
@ -2279,7 +2275,7 @@ ACTOR Future<Standalone<RangeResultRef>> getExactRange( Database cx, Version ver
|
|||
}
|
||||
|
||||
if (!more || locations[shard].first.empty()) {
|
||||
TEST(true);
|
||||
TEST(true); // getExactrange (!more || locations[shard].first.empty())
|
||||
if(shard == locations.size()-1) {
|
||||
const KeyRangeRef& range = locations[shard].first;
|
||||
KeyRef begin = reverse ? keys.begin : range.end;
|
||||
|
@ -3779,8 +3775,8 @@ void Transaction::setOption( FDBTransactionOptions::Option option, Optional<Stri
|
|||
}
|
||||
}
|
||||
else {
|
||||
trLogInfo = Reference<TransactionLogInfo>(new TransactionLogInfo(value.get().printable(), TransactionLogInfo::DONT_LOG));
|
||||
trLogInfo->maxFieldLength = options.maxTransactionLoggingFieldLength;
|
||||
trLogInfo = makeReference<TransactionLogInfo>(value.get().printable(), TransactionLogInfo::DONT_LOG);
|
||||
trLogInfo->maxFieldLength = options.maxTransactionLoggingFieldLength;
|
||||
}
|
||||
if (info.debugID.present()) {
|
||||
TraceEvent(SevInfo, "TransactionBeingTraced")
|
||||
|
@ -4462,7 +4458,6 @@ Future< StorageMetrics > Transaction::getStorageMetrics( KeyRange const& keys, i
|
|||
|
||||
ACTOR Future<Standalone<VectorRef<DDMetricsRef>>> waitDataDistributionMetricsList(Database cx, KeyRange keys,
|
||||
int shardLimit) {
|
||||
state Future<Void> clientTimeout = delay(5.0);
|
||||
loop {
|
||||
choose {
|
||||
when(wait(cx->onProxiesChanged())) {}
|
||||
|
@ -4474,7 +4469,6 @@ ACTOR Future<Standalone<VectorRef<DDMetricsRef>>> waitDataDistributionMetricsLis
|
|||
}
|
||||
return rep.get().storageMetricsList;
|
||||
}
|
||||
when(wait(clientTimeout)) { throw timed_out(); }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4604,7 +4598,7 @@ Reference<TransactionLogInfo> Transaction::createTrLogInfoProbabilistically(cons
|
|||
if(!cx->isError()) {
|
||||
double clientSamplingProbability = std::isinf(cx->clientInfo->get().clientTxnInfoSampleRate) ? CLIENT_KNOBS->CSI_SAMPLING_PROBABILITY : cx->clientInfo->get().clientTxnInfoSampleRate;
|
||||
if (((networkOptions.logClientInfo.present() && networkOptions.logClientInfo.get()) || BUGGIFY) && deterministicRandom()->random01() < clientSamplingProbability && (!g_network->isSimulated() || !g_simulator.speedUpSimulation)) {
|
||||
return Reference<TransactionLogInfo>(new TransactionLogInfo(TransactionLogInfo::DATABASE));
|
||||
return makeReference<TransactionLogInfo>(TransactionLogInfo::DATABASE);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1228,7 +1228,7 @@ ACTOR Future<Standalone<RangeResultRef>> getWorkerInterfaces (Reference<ClusterC
|
|||
}
|
||||
|
||||
Future< Optional<Value> > ReadYourWritesTransaction::get( const Key& key, bool snapshot ) {
|
||||
TEST(true);
|
||||
TEST(true); // ReadYourWritesTransaction::get
|
||||
|
||||
if (getDatabase()->apiVersionAtLeast(630)) {
|
||||
if (specialKeys.contains(key)) {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* BlobStore.h
|
||||
* S3BlobStore.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -31,11 +31,11 @@
|
|||
|
||||
// Representation of all the things you need to connect to a blob store instance with some credentials.
|
||||
// Reference counted because a very large number of them could be needed.
|
||||
class BlobStoreEndpoint : public ReferenceCounted<BlobStoreEndpoint> {
|
||||
class S3BlobStoreEndpoint : public ReferenceCounted<S3BlobStoreEndpoint> {
|
||||
public:
|
||||
struct Stats {
|
||||
Stats() : requests_successful(0), requests_failed(0), bytes_sent(0) {}
|
||||
Stats operator-(const Stats &rhs);
|
||||
Stats operator-(const Stats& rhs);
|
||||
void clear() { memset(this, 0, sizeof(*this)); }
|
||||
json_spirit::mObject getJSON();
|
||||
|
||||
|
@ -48,29 +48,12 @@ public:
|
|||
|
||||
struct BlobKnobs {
|
||||
BlobKnobs();
|
||||
int secure_connection,
|
||||
connect_tries,
|
||||
connect_timeout,
|
||||
max_connection_life,
|
||||
request_tries,
|
||||
request_timeout_min,
|
||||
requests_per_second,
|
||||
list_requests_per_second,
|
||||
write_requests_per_second,
|
||||
read_requests_per_second,
|
||||
delete_requests_per_second,
|
||||
multipart_max_part_size,
|
||||
multipart_min_part_size,
|
||||
concurrent_requests,
|
||||
concurrent_uploads,
|
||||
concurrent_lists,
|
||||
concurrent_reads_per_file,
|
||||
concurrent_writes_per_file,
|
||||
read_block_size,
|
||||
read_ahead_blocks,
|
||||
read_cache_blocks_per_file,
|
||||
max_send_bytes_per_second,
|
||||
max_recv_bytes_per_second;
|
||||
int secure_connection, connect_tries, connect_timeout, max_connection_life, request_tries, request_timeout_min,
|
||||
requests_per_second, list_requests_per_second, write_requests_per_second, read_requests_per_second,
|
||||
delete_requests_per_second, multipart_max_part_size, multipart_min_part_size, concurrent_requests,
|
||||
concurrent_uploads, concurrent_lists, concurrent_reads_per_file, concurrent_writes_per_file,
|
||||
read_block_size, read_ahead_blocks, read_cache_blocks_per_file, max_send_bytes_per_second,
|
||||
max_recv_bytes_per_second;
|
||||
bool set(StringRef name, int value);
|
||||
std::string getURLParameters() const;
|
||||
static std::vector<std::string> getKnobDescriptions() {
|
||||
|
@ -79,8 +62,10 @@ public:
|
|||
"connect_tries (or ct) Number of times to try to connect for each request.",
|
||||
"connect_timeout (or cto) Number of seconds to wait for a connect request to succeed.",
|
||||
"max_connection_life (or mcl) Maximum number of seconds to use a single TCP connection.",
|
||||
"request_tries (or rt) Number of times to try each request until a parseable HTTP response other than 429 is received.",
|
||||
"request_timeout_min (or rtom) Number of seconds to wait for a request to succeed after a connection is established.",
|
||||
"request_tries (or rt) Number of times to try each request until a parseable HTTP "
|
||||
"response other than 429 is received.",
|
||||
"request_timeout_min (or rtom) Number of seconds to wait for a request to succeed after a "
|
||||
"connection is established.",
|
||||
"requests_per_second (or rps) Max number of requests to start per second.",
|
||||
"list_requests_per_second (or lrps) Max number of list requests to start per second.",
|
||||
"write_requests_per_second (or wrps) Max number of write requests to start per second.",
|
||||
|
@ -88,8 +73,10 @@ public:
|
|||
"delete_requests_per_second (or drps) Max number of delete requests to start per second.",
|
||||
"multipart_max_part_size (or maxps) Max part size for multipart uploads.",
|
||||
"multipart_min_part_size (or minps) Min part size for multipart uploads.",
|
||||
"concurrent_requests (or cr) Max number of total requests in progress at once, regardless of operation-specific concurrency limits.",
|
||||
"concurrent_uploads (or cu) Max concurrent uploads (part or whole) that can be in progress at once.",
|
||||
"concurrent_requests (or cr) Max number of total requests in progress at once, regardless of "
|
||||
"operation-specific concurrency limits.",
|
||||
"concurrent_uploads (or cu) Max concurrent uploads (part or whole) that can be in progress "
|
||||
"at once.",
|
||||
"concurrent_lists (or cl) Max concurrent list operations that can be in progress at once.",
|
||||
"concurrent_reads_per_file (or crps) Max concurrent reads in progress for any one file.",
|
||||
"concurrent_writes_per_file (or cwps) Max concurrent uploads in progress for any one file.",
|
||||
|
@ -97,43 +84,45 @@ public:
|
|||
"read_ahead_blocks (or rab) Number of blocks to read ahead of requested offset.",
|
||||
"read_cache_blocks_per_file (or rcb) Size of the read cache for a file in blocks.",
|
||||
"max_send_bytes_per_second (or sbps) Max send bytes per second for all requests combined.",
|
||||
"max_recv_bytes_per_second (or rbps) Max receive bytes per second for all requests combined (NOT YET USED)."
|
||||
"max_recv_bytes_per_second (or rbps) Max receive bytes per second for all requests combined (NOT YET "
|
||||
"USED)."
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
BlobStoreEndpoint(std::string const &host, std::string service, std::string const &key, std::string const &secret, BlobKnobs const &knobs = BlobKnobs(), HTTP::Headers extraHeaders = HTTP::Headers())
|
||||
: host(host), service(service), key(key), secret(secret), lookupSecret(secret.empty()), knobs(knobs), extraHeaders(extraHeaders),
|
||||
requestRate(new SpeedLimit(knobs.requests_per_second, 1)),
|
||||
requestRateList(new SpeedLimit(knobs.list_requests_per_second, 1)),
|
||||
requestRateWrite(new SpeedLimit(knobs.write_requests_per_second, 1)),
|
||||
requestRateRead(new SpeedLimit(knobs.read_requests_per_second, 1)),
|
||||
requestRateDelete(new SpeedLimit(knobs.delete_requests_per_second, 1)),
|
||||
sendRate(new SpeedLimit(knobs.max_send_bytes_per_second, 1)),
|
||||
recvRate(new SpeedLimit(knobs.max_recv_bytes_per_second, 1)),
|
||||
concurrentRequests(knobs.concurrent_requests),
|
||||
concurrentUploads(knobs.concurrent_uploads),
|
||||
concurrentLists(knobs.concurrent_lists) {
|
||||
S3BlobStoreEndpoint(std::string const& host, std::string service, std::string const& key, std::string const& secret,
|
||||
BlobKnobs const& knobs = BlobKnobs(), HTTP::Headers extraHeaders = HTTP::Headers())
|
||||
: host(host), service(service), key(key), secret(secret), lookupSecret(secret.empty()), knobs(knobs),
|
||||
extraHeaders(extraHeaders), requestRate(new SpeedLimit(knobs.requests_per_second, 1)),
|
||||
requestRateList(new SpeedLimit(knobs.list_requests_per_second, 1)),
|
||||
requestRateWrite(new SpeedLimit(knobs.write_requests_per_second, 1)),
|
||||
requestRateRead(new SpeedLimit(knobs.read_requests_per_second, 1)),
|
||||
requestRateDelete(new SpeedLimit(knobs.delete_requests_per_second, 1)),
|
||||
sendRate(new SpeedLimit(knobs.max_send_bytes_per_second, 1)),
|
||||
recvRate(new SpeedLimit(knobs.max_recv_bytes_per_second, 1)), concurrentRequests(knobs.concurrent_requests),
|
||||
concurrentUploads(knobs.concurrent_uploads), concurrentLists(knobs.concurrent_lists) {
|
||||
|
||||
if(host.empty())
|
||||
throw connection_string_invalid();
|
||||
if (host.empty()) throw connection_string_invalid();
|
||||
}
|
||||
|
||||
static std::string getURLFormat(bool withResource = false) {
|
||||
const char *resource = "";
|
||||
if(withResource)
|
||||
resource = "<name>";
|
||||
return format("blobstore://<api_key>:<secret>@<host>[:<port>]/%s[?<param>=<value>[&<param>=<value>]...]", resource);
|
||||
const char* resource = "";
|
||||
if (withResource) resource = "<name>";
|
||||
return format("blobstore://<api_key>:<secret>@<host>[:<port>]/%s[?<param>=<value>[&<param>=<value>]...]",
|
||||
resource);
|
||||
}
|
||||
|
||||
typedef std::map<std::string, std::string> ParametersT;
|
||||
|
||||
// Parse url and return a BlobStoreEndpoint
|
||||
// If the url has parameters that BlobStoreEndpoint can't consume then an error will be thrown unless ignored_parameters is given in which case
|
||||
// the unconsumed parameters will be added to it.
|
||||
static Reference<BlobStoreEndpoint> fromString(std::string const &url, std::string *resourceFromURL = nullptr, std::string *error = nullptr, ParametersT *ignored_parameters = nullptr);
|
||||
// Parse url and return a S3BlobStoreEndpoint
|
||||
// If the url has parameters that S3BlobStoreEndpoint can't consume then an error will be thrown unless
|
||||
// ignored_parameters is given in which case the unconsumed parameters will be added to it.
|
||||
static Reference<S3BlobStoreEndpoint> fromString(std::string const& url, std::string* resourceFromURL = nullptr,
|
||||
std::string* error = nullptr,
|
||||
ParametersT* ignored_parameters = nullptr);
|
||||
|
||||
// Get a normalized version of this URL with the given resource and any non-default BlobKnob values as URL parameters in addition to the passed params string
|
||||
// Get a normalized version of this URL with the given resource and any non-default BlobKnob values as URL
|
||||
// parameters in addition to the passed params string
|
||||
std::string getResourceURL(std::string resource, std::string params);
|
||||
|
||||
struct ReusableConnection {
|
||||
|
@ -142,7 +131,7 @@ public:
|
|||
};
|
||||
std::queue<ReusableConnection> connectionPool;
|
||||
Future<ReusableConnection> connect();
|
||||
void returnConnection(ReusableConnection &conn);
|
||||
void returnConnection(ReusableConnection& conn);
|
||||
|
||||
std::string host;
|
||||
std::string service;
|
||||
|
@ -167,18 +156,21 @@ public:
|
|||
Future<Void> updateSecret();
|
||||
|
||||
// Calculates the authentication string from the secret key
|
||||
std::string hmac_sha1(std::string const &msg);
|
||||
std::string hmac_sha1(std::string const& msg);
|
||||
|
||||
// Sets headers needed for Authorization (including Date which will be overwritten if present)
|
||||
void setAuthHeaders(std::string const &verb, std::string const &resource, HTTP::Headers &headers);
|
||||
void setAuthHeaders(std::string const& verb, std::string const& resource, HTTP::Headers& headers);
|
||||
|
||||
// Prepend the HTTP request header to the given PacketBuffer, returning the new head of the buffer chain
|
||||
static PacketBuffer * writeRequestHeader(std::string const &request, HTTP::Headers const &headers, PacketBuffer *dest);
|
||||
static PacketBuffer* writeRequestHeader(std::string const& request, HTTP::Headers const& headers,
|
||||
PacketBuffer* dest);
|
||||
|
||||
// Do an HTTP request to the Blob Store, read the response. Handles authentication.
|
||||
// Every blob store interaction should ultimately go through this function
|
||||
|
||||
Future<Reference<HTTP::Response>> doRequest(std::string const &verb, std::string const &resource, const HTTP::Headers &headers, UnsentPacketQueue *pContent, int contentLen, std::set<unsigned int> successCodes);
|
||||
Future<Reference<HTTP::Response>> doRequest(std::string const& verb, std::string const& resource,
|
||||
const HTTP::Headers& headers, UnsentPacketQueue* pContent,
|
||||
int contentLen, std::set<unsigned int> successCodes);
|
||||
|
||||
struct ObjectInfo {
|
||||
std::string name;
|
||||
|
@ -192,51 +184,61 @@ public:
|
|||
|
||||
// Get bucket contents via a stream, since listing large buckets will take many serial blob requests
|
||||
// If a delimiter is passed then common prefixes will be read in parallel, recursively, depending on recurseFilter.
|
||||
// Recursefilter is a must be a function that takes a string and returns true if it passes. The default behavior is to assume true.
|
||||
Future<Void> listObjectsStream(std::string const &bucket, PromiseStream<ListResult> results, Optional<std::string> prefix = {}, Optional<char> delimiter = {}, int maxDepth = 0, std::function<bool(std::string const &)> recurseFilter = nullptr);
|
||||
// Recursefilter is a must be a function that takes a string and returns true if it passes. The default behavior is
|
||||
// to assume true.
|
||||
Future<Void> listObjectsStream(std::string const& bucket, PromiseStream<ListResult> results,
|
||||
Optional<std::string> prefix = {}, Optional<char> delimiter = {}, int maxDepth = 0,
|
||||
std::function<bool(std::string const&)> recurseFilter = nullptr);
|
||||
|
||||
// Get a list of the files in a bucket, see listObjectsStream for more argument detail.
|
||||
Future<ListResult> listObjects(std::string const &bucket, Optional<std::string> prefix = {}, Optional<char> delimiter = {}, int maxDepth = 0, std::function<bool(std::string const &)> recurseFilter = nullptr);
|
||||
Future<ListResult> listObjects(std::string const& bucket, Optional<std::string> prefix = {},
|
||||
Optional<char> delimiter = {}, int maxDepth = 0,
|
||||
std::function<bool(std::string const&)> recurseFilter = nullptr);
|
||||
|
||||
// Get a list of all buckets
|
||||
Future<std::vector<std::string>> listBuckets();
|
||||
|
||||
// Check if a bucket exists
|
||||
Future<bool> bucketExists(std::string const &bucket);
|
||||
Future<bool> bucketExists(std::string const& bucket);
|
||||
|
||||
// Check if an object exists in a bucket
|
||||
Future<bool> objectExists(std::string const &bucket, std::string const &object);
|
||||
Future<bool> objectExists(std::string const& bucket, std::string const& object);
|
||||
|
||||
// Get the size of an object in a bucket
|
||||
Future<int64_t> objectSize(std::string const &bucket, std::string const &object);
|
||||
Future<int64_t> objectSize(std::string const& bucket, std::string const& object);
|
||||
|
||||
// Read an arbitrary segment of an object
|
||||
Future<int> readObject(std::string const &bucket, std::string const &object, void *data, int length, int64_t offset);
|
||||
Future<int> readObject(std::string const& bucket, std::string const& object, void* data, int length,
|
||||
int64_t offset);
|
||||
|
||||
// Delete an object in a bucket
|
||||
Future<Void> deleteObject(std::string const &bucket, std::string const &object);
|
||||
Future<Void> deleteObject(std::string const& bucket, std::string const& object);
|
||||
|
||||
// Delete all objects in a bucket under a prefix. Note this is not atomic as blob store does not
|
||||
// support this operation directly. This method is just a convenience method that lists and deletes
|
||||
// all of the objects in the bucket under the given prefix.
|
||||
// Since it can take a while, if a pNumDeleted and/or pBytesDeleted are provided they will be incremented every time
|
||||
// a deletion of an object completes.
|
||||
Future<Void> deleteRecursively(std::string const &bucket, std::string prefix = "", int *pNumDeleted = nullptr, int64_t *pBytesDeleted = nullptr);
|
||||
Future<Void> deleteRecursively(std::string const& bucket, std::string prefix = "", int* pNumDeleted = nullptr,
|
||||
int64_t* pBytesDeleted = nullptr);
|
||||
|
||||
// Create a bucket if it does not already exists.
|
||||
Future<Void> createBucket(std::string const &bucket);
|
||||
Future<Void> createBucket(std::string const& bucket);
|
||||
|
||||
// Useful methods for working with tiny files
|
||||
Future<std::string> readEntireFile(std::string const &bucket, std::string const &object);
|
||||
Future<Void> writeEntireFile(std::string const &bucket, std::string const &object, std::string const &content);
|
||||
Future<Void> writeEntireFileFromBuffer(std::string const &bucket, std::string const &object, UnsentPacketQueue *pContent, int contentLen, std::string const &contentMD5);
|
||||
Future<std::string> readEntireFile(std::string const& bucket, std::string const& object);
|
||||
Future<Void> writeEntireFile(std::string const& bucket, std::string const& object, std::string const& content);
|
||||
Future<Void> writeEntireFileFromBuffer(std::string const& bucket, std::string const& object,
|
||||
UnsentPacketQueue* pContent, int contentLen, std::string const& contentMD5);
|
||||
|
||||
// MultiPart upload methods
|
||||
// Returns UploadID
|
||||
Future<std::string> beginMultiPartUpload(std::string const &bucket, std::string const &object);
|
||||
Future<std::string> beginMultiPartUpload(std::string const& bucket, std::string const& object);
|
||||
// Returns eTag
|
||||
Future<std::string> uploadPart(std::string const &bucket, std::string const &object, std::string const &uploadID, unsigned int partNumber, UnsentPacketQueue *pContent, int contentLen, std::string const &contentMD5);
|
||||
Future<std::string> uploadPart(std::string const& bucket, std::string const& object, std::string const& uploadID,
|
||||
unsigned int partNumber, UnsentPacketQueue* pContent, int contentLen,
|
||||
std::string const& contentMD5);
|
||||
typedef std::map<int, std::string> MultiPartSetT;
|
||||
Future<Void> finishMultiPartUpload(std::string const &bucket, std::string const &object, std::string const &uploadID, MultiPartSetT const &parts);
|
||||
Future<Void> finishMultiPartUpload(std::string const& bucket, std::string const& object,
|
||||
std::string const& uploadID, MultiPartSetT const& parts);
|
||||
};
|
||||
|
|
@ -939,6 +939,8 @@ const KeyRef JSONSchemas::storageHealthSchema = LiteralStringRef(R"""(
|
|||
const KeyRef JSONSchemas::aggregateHealthSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"batch_limited": false,
|
||||
"limiting_storage_durability_lag": 5050809,
|
||||
"limiting_storage_queue": 2030,
|
||||
"tps_limit": 457082.8105811302,
|
||||
"worst_storage_durability_lag": 5050809,
|
||||
"worst_storage_queue": 2030,
|
||||
|
|
|
@ -243,12 +243,12 @@ ACTOR Future<Standalone<RangeResultRef>> SpecialKeySpace::getRangeAggregationAct
|
|||
// Handle all corner cases like what RYW does
|
||||
// return if range inverted
|
||||
if (actualBeginOffset >= actualEndOffset && begin.getKey() >= end.getKey()) {
|
||||
TEST(true);
|
||||
TEST(true); // inverted range
|
||||
return RangeResultRef(false, false);
|
||||
}
|
||||
// If touches begin or end, return with readToBegin and readThroughEnd flags
|
||||
if (begin.getKey() == moduleBoundary.end || end.getKey() == moduleBoundary.begin) {
|
||||
TEST(true);
|
||||
TEST(true); // query touches begin or end
|
||||
return result;
|
||||
}
|
||||
state RangeMap<Key, SpecialKeyRangeReadImpl*, KeyRangeRef>::Ranges ranges =
|
||||
|
@ -540,25 +540,34 @@ Future<Standalone<RangeResultRef>> ConflictingKeysImpl::getRange(ReadYourWritesT
|
|||
}
|
||||
|
||||
ACTOR Future<Standalone<RangeResultRef>> ddMetricsGetRangeActor(ReadYourWritesTransaction* ryw, KeyRangeRef kr) {
|
||||
try {
|
||||
auto keys = kr.removePrefix(ddStatsRange.begin);
|
||||
Standalone<VectorRef<DDMetricsRef>> resultWithoutPrefix =
|
||||
wait(waitDataDistributionMetricsList(ryw->getDatabase(), keys, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT));
|
||||
Standalone<RangeResultRef> result;
|
||||
for (const auto& ddMetricsRef : resultWithoutPrefix) {
|
||||
// each begin key is the previous end key, thus we only encode the begin key in the result
|
||||
KeyRef beginKey = ddMetricsRef.beginKey.withPrefix(ddStatsRange.begin, result.arena());
|
||||
// Use json string encoded in utf-8 to encode the values, easy for adding more fields in the future
|
||||
json_spirit::mObject statsObj;
|
||||
statsObj["shard_bytes"] = ddMetricsRef.shardBytes;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
ValueRef bytes(result.arena(), statsString);
|
||||
result.push_back(result.arena(), KeyValueRef(beginKey, bytes));
|
||||
loop {
|
||||
try {
|
||||
auto keys = kr.removePrefix(ddStatsRange.begin);
|
||||
Standalone<VectorRef<DDMetricsRef>> resultWithoutPrefix = wait(
|
||||
waitDataDistributionMetricsList(ryw->getDatabase(), keys, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT));
|
||||
Standalone<RangeResultRef> result;
|
||||
for (const auto& ddMetricsRef : resultWithoutPrefix) {
|
||||
// each begin key is the previous end key, thus we only encode the begin key in the result
|
||||
KeyRef beginKey = ddMetricsRef.beginKey.withPrefix(ddStatsRange.begin, result.arena());
|
||||
// Use json string encoded in utf-8 to encode the values, easy for adding more fields in the future
|
||||
json_spirit::mObject statsObj;
|
||||
statsObj["shard_bytes"] = ddMetricsRef.shardBytes;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
ValueRef bytes(result.arena(), statsString);
|
||||
result.push_back(result.arena(), KeyValueRef(beginKey, bytes));
|
||||
}
|
||||
return result;
|
||||
} catch (Error& e) {
|
||||
state Error err(e);
|
||||
if (e.code() == error_code_operation_failed) {
|
||||
TraceEvent(SevWarnAlways, "DataDistributorNotPresent")
|
||||
.detail("Operation", "DDMetricsReqestThroughSpecialKeys");
|
||||
wait(delayJittered(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
|
||||
continue;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
return result;
|
||||
} catch (Error& e) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -558,7 +558,7 @@ ACTOR Future<Void> timeoutMonitorLeader(Database db) {
|
|||
Future<StatusObject> StatusClient::statusFetcher( Database db ) {
|
||||
db->lastStatusFetch = now();
|
||||
if(!db->statusClusterInterface) {
|
||||
db->statusClusterInterface = Reference<AsyncVar<Optional<ClusterInterface>>>(new AsyncVar<Optional<ClusterInterface>>);
|
||||
db->statusClusterInterface = makeReference<AsyncVar<Optional<ClusterInterface>>>();
|
||||
db->statusLeaderMon = timeoutMonitorLeader(db);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,8 +35,10 @@ void TagSet::addTag(TransactionTagRef tag) {
|
|||
throw too_many_tags();
|
||||
}
|
||||
|
||||
auto result = tags.insert(TransactionTagRef(arena, tag));
|
||||
if(result.second) {
|
||||
TransactionTagRef tagRef(arena, tag);
|
||||
auto it = find(tags.begin(), tags.end(), tagRef);
|
||||
if (it == tags.end()) {
|
||||
tags.push_back(std::move(tagRef));
|
||||
bytes += tag.size();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ typedef Standalone<TransactionTagRef> TransactionTag;
|
|||
|
||||
class TagSet {
|
||||
public:
|
||||
typedef std::set<TransactionTagRef>::const_iterator const_iterator;
|
||||
typedef std::vector<TransactionTagRef>::const_iterator const_iterator;
|
||||
|
||||
TagSet() : bytes(0) {}
|
||||
|
||||
|
@ -54,51 +54,35 @@ public:
|
|||
const_iterator end() const {
|
||||
return tags.end();
|
||||
}
|
||||
|
||||
void clear() {
|
||||
tags.clear();
|
||||
bytes = 0;
|
||||
}
|
||||
//private:
|
||||
Arena arena;
|
||||
std::set<TransactionTagRef> tags;
|
||||
size_t bytes;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct dynamic_size_traits<TagSet> : std::true_type {
|
||||
// May be called multiple times during one serialization
|
||||
template <class Context>
|
||||
static size_t size(const TagSet& t, Context&) {
|
||||
return t.tags.size() + t.bytes;
|
||||
}
|
||||
|
||||
// Guaranteed to be called only once during serialization
|
||||
template <class Context>
|
||||
static void save(uint8_t* out, const TagSet& t, Context& c) {
|
||||
void save(uint8_t* out, Context& c) const {
|
||||
uint8_t *start = out;
|
||||
for (const auto& tag : t.tags) {
|
||||
for (const auto& tag : *this) {
|
||||
*(out++) = (uint8_t)tag.size();
|
||||
|
||||
std::copy(tag.begin(), tag.end(), out);
|
||||
out += tag.size();
|
||||
}
|
||||
|
||||
ASSERT((size_t)(out-start) == size(t, c));
|
||||
ASSERT((size_t)(out - start) == size() + bytes);
|
||||
}
|
||||
|
||||
// Context is an arbitrary type that is plumbed by reference throughout the
|
||||
// load call tree.
|
||||
template <class Context>
|
||||
static void load(const uint8_t* data, size_t size, TagSet& t, Context& context) {
|
||||
void load(const uint8_t* data, size_t size, Context& context) {
|
||||
//const uint8_t *start = data;
|
||||
const uint8_t *end = data + size;
|
||||
while(data < end) {
|
||||
uint8_t len = *(data++);
|
||||
TransactionTagRef tag(context.tryReadZeroCopy(data, len), len);
|
||||
// Tags are already deduplicated
|
||||
const auto& tag = tags.emplace_back(context.tryReadZeroCopy(data, len), len);
|
||||
data += len;
|
||||
|
||||
t.tags.insert(tag);
|
||||
t.bytes += tag.size();
|
||||
bytes += tag.size();
|
||||
}
|
||||
|
||||
ASSERT(data == end);
|
||||
|
@ -106,7 +90,41 @@ struct dynamic_size_traits<TagSet> : std::true_type {
|
|||
// Deserialized tag sets share the arena with the request that contained them
|
||||
// For this reason, persisting a TagSet that shares memory with other request
|
||||
// members should be done with caution.
|
||||
t.arena = context.arena();
|
||||
arena = context.arena();
|
||||
}
|
||||
|
||||
size_t getBytes() const { return bytes; }
|
||||
|
||||
const Arena& getArena() const { return arena; }
|
||||
|
||||
private:
|
||||
size_t bytes;
|
||||
Arena arena;
|
||||
// Currently there are never >= 256 tags, so
|
||||
// std::vector is faster than std::set. This may
|
||||
// change if we allow more tags in the future.
|
||||
std::vector<TransactionTagRef> tags;
|
||||
};
|
||||
|
||||
template <>
|
||||
struct dynamic_size_traits<TagSet> : std::true_type {
|
||||
// May be called multiple times during one serialization
|
||||
template <class Context>
|
||||
static size_t size(const TagSet& t, Context&) {
|
||||
return t.size() + t.getBytes();
|
||||
}
|
||||
|
||||
// Guaranteed to be called only once during serialization
|
||||
template <class Context>
|
||||
static void save(uint8_t* out, const TagSet& t, Context& c) {
|
||||
t.save(out, c);
|
||||
}
|
||||
|
||||
// Context is an arbitrary type that is plumbed by reference throughout the
|
||||
// load call tree.
|
||||
template <class Context>
|
||||
static void load(const uint8_t* data, size_t size, TagSet& t, Context& context) {
|
||||
t.load(data, size, context);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -208,4 +226,4 @@ using PrioritizedTransactionTagMap = std::map<TransactionPriority, TransactionTa
|
|||
|
||||
template <class Value>
|
||||
using UIDTransactionTagMap = std::unordered_map<UID, TransactionTagMap<Value>>;
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -513,7 +513,7 @@ public:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> run(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, double *pollDelay, int maxConcurrentTasks) {
|
||||
state Reference<AsyncVar<bool>> paused = Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) );
|
||||
state Reference<AsyncVar<bool>> paused = makeReference<AsyncVar<bool>>(true);
|
||||
state Future<Void> watchPausedFuture = watchPaused(cx, taskBucket, paused);
|
||||
taskBucket->metricLogger = traceCounters("TaskBucketMetrics", taskBucket->dbgid, CLIENT_KNOBS->TASKBUCKET_LOGGING_DELAY, &taskBucket->cc);
|
||||
loop {
|
||||
|
@ -528,7 +528,7 @@ public:
|
|||
static Future<Standalone<StringRef>> addIdle(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket) {
|
||||
taskBucket->setOptions(tr);
|
||||
|
||||
Reference<Task> newTask(new Task(IdleTaskFunc::name, IdleTaskFunc::version));
|
||||
auto newTask = makeReference<Task>(IdleTaskFunc::name, IdleTaskFunc::version);
|
||||
return taskBucket->addTask(tr, newTask);
|
||||
}
|
||||
|
||||
|
@ -991,7 +991,7 @@ Future<Void> FutureBucket::clear(Reference<ReadYourWritesTransaction> tr){
|
|||
Reference<TaskFuture> FutureBucket::future(Reference<ReadYourWritesTransaction> tr){
|
||||
setOptions(tr);
|
||||
|
||||
Reference<TaskFuture> taskFuture(new TaskFuture(Reference<FutureBucket>::addRef(this)));
|
||||
auto taskFuture = makeReference<TaskFuture>(Reference<FutureBucket>::addRef(this));
|
||||
taskFuture->addBlock(tr, StringRef());
|
||||
|
||||
return taskFuture;
|
||||
|
@ -1002,7 +1002,7 @@ Future<bool> FutureBucket::isEmpty(Reference<ReadYourWritesTransaction> tr) {
|
|||
}
|
||||
|
||||
Reference<TaskFuture> FutureBucket::unpack(Key key) {
|
||||
return Reference<TaskFuture>(new TaskFuture(Reference<FutureBucket>::addRef(this), key));
|
||||
return makeReference<TaskFuture>(Reference<FutureBucket>::addRef(this), key);
|
||||
}
|
||||
|
||||
class TaskFutureImpl {
|
||||
|
@ -1028,7 +1028,7 @@ public:
|
|||
for (int i = 0; i < vectorFuture.size(); ++i) {
|
||||
Key key = StringRef(deterministicRandom()->randomUniqueID().toString());
|
||||
taskFuture->addBlock(tr, key);
|
||||
Reference<Task> task(new Task());
|
||||
auto task = makeReference<Task>();
|
||||
task->params[Task::reservedTaskParamKeyType] = LiteralStringRef("UnblockFuture");
|
||||
task->params[Task::reservedTaskParamKeyFuture] = taskFuture->key;
|
||||
task->params[Task::reservedTaskParamKeyBlockID] = key;
|
||||
|
@ -1111,7 +1111,7 @@ public:
|
|||
// If we see a new task ID and the old one isn't empty then process the task accumulated so far and make a new task
|
||||
if(taskID.size() != 0 && taskID != lastTaskID) {
|
||||
actions.push_back(performAction(tr, taskBucket, taskFuture, task));
|
||||
task = Reference<Task>(new Task());
|
||||
task = makeReference<Task>();
|
||||
}
|
||||
task->params[key] = s.value;
|
||||
lastTaskID = taskID;
|
||||
|
|
|
@ -135,10 +135,10 @@ namespace PTreeImpl {
|
|||
// and should drop its reference count
|
||||
Reference<PTree<T>> r;
|
||||
if (which)
|
||||
r = Reference<PTree<T>>( new PTree<T>( node->priority, node->data, node->child(0, at), ptr, at ) );
|
||||
else
|
||||
r = Reference<PTree<T>>( new PTree<T>( node->priority, node->data, ptr, node->child(1, at), at ) );
|
||||
node->pointer[2].clear();
|
||||
r = makeReference<PTree<T>>(node->priority, node->data, node->child(0, at), ptr, at);
|
||||
else
|
||||
r = makeReference<PTree<T>>(node->priority, node->data, ptr, node->child(1, at), at);
|
||||
node->pointer[2].clear();
|
||||
return r;
|
||||
} else {
|
||||
if (node->updated)
|
||||
|
@ -150,10 +150,10 @@ namespace PTreeImpl {
|
|||
}
|
||||
if ( node->updated ) {
|
||||
if (which)
|
||||
return Reference<PTree<T>>( new PTree<T>( node->priority, node->data, node->child(0, at), ptr, at ) );
|
||||
else
|
||||
return Reference<PTree<T>>( new PTree<T>( node->priority, node->data, ptr, node->child(1, at), at ) );
|
||||
} else {
|
||||
return makeReference<PTree<T>>(node->priority, node->data, node->child(0, at), ptr, at);
|
||||
else
|
||||
return makeReference<PTree<T>>(node->priority, node->data, ptr, node->child(1, at), at);
|
||||
} else {
|
||||
node->lastUpdateVersion = at;
|
||||
node->replacedPointer = which;
|
||||
node->pointer[2] = ptr;
|
||||
|
@ -269,8 +269,8 @@ namespace PTreeImpl {
|
|||
template<class T>
|
||||
void insert(Reference<PTree<T>>& p, Version at, const T& x) {
|
||||
if (!p){
|
||||
p = Reference<PTree<T>>(new PTree<T>(x, at));
|
||||
} else {
|
||||
p = makeReference<PTree<T>>(x, at);
|
||||
} else {
|
||||
bool direction = !(x < p->data);
|
||||
Reference<PTree<T>> child = p->child(direction, at);
|
||||
insert(child, at, x);
|
||||
|
@ -425,8 +425,8 @@ namespace PTreeImpl {
|
|||
if (!left) return right;
|
||||
if (!right) return left;
|
||||
|
||||
Reference<PTree<T>> r = Reference<PTree<T>>(new PTree<T>(lastNode(left, at)->data, at));
|
||||
if (EXPENSIVE_VALIDATION) {
|
||||
Reference<PTree<T>> r = makeReference<PTree<T>>(lastNode(left, at)->data, at);
|
||||
if (EXPENSIVE_VALIDATION) {
|
||||
ASSERT( r->data < firstNode(right, at)->data);
|
||||
}
|
||||
Reference<PTree<T>> a = left;
|
||||
|
|
|
@ -628,7 +628,7 @@ private:
|
|||
bool end_conflict = it.is_conflict_range();
|
||||
bool end_unreadable = it.is_unreadable();
|
||||
|
||||
TEST( it.is_conflict_range() != lastConflicted );
|
||||
TEST( it.is_conflict_range() != lastConflicted ); // not last conflicted
|
||||
|
||||
it.tree.clear();
|
||||
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
project(azurestorage-download)
|
||||
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(azurestorage
|
||||
GIT_REPOSITORY https://github.com/Azure/azure-storage-cpplite.git
|
||||
GIT_TAG 11e1f98b021446ef340f4886796899a6eb1ad9a5 # v0.3.0
|
||||
SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/azurestorage-src"
|
||||
BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/azurestorage-build"
|
||||
CMAKE_ARGS "-DCMAKE_BUILD_TYPE=Release"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
BUILD_BYPRODUCTS "${CMAKE_CURRENT_BINARY_DIR}/libazure-storage-lite.a"
|
||||
)
|
|
@ -20,8 +20,10 @@ endif()
|
|||
# Create a local sandbox for quick manual testing without simulator
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/sandbox/data)
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/sandbox/logs)
|
||||
configure_file(${CMAKE_SOURCE_DIR}/cmake/Sandbox.conf.cmake
|
||||
${CMAKE_BINARY_DIR}/sandbox/foundationdb.conf)
|
||||
if(NOT EXISTS ${CMAKE_BINARY_DIR}/sandbox/foundationdb.conf)
|
||||
configure_file(${CMAKE_SOURCE_DIR}/cmake/Sandbox.conf.cmake
|
||||
${CMAKE_BINARY_DIR}/sandbox/foundationdb.conf)
|
||||
endif()
|
||||
|
||||
# this is not portable on Windows - but fdbmonitor isn't built there anyways...
|
||||
add_custom_target(clean_sandbox
|
||||
|
|
|
@ -60,7 +60,8 @@ Future<Reference<IAsyncFile>> AsyncFileCached::open_impl( std::string filename,
|
|||
if(cacheItr == simulatorPageCaches.end()) {
|
||||
int64_t pageCacheSize4k = (BUGGIFY) ? FLOW_KNOBS->BUGGIFY_SIM_PAGE_CACHE_4K : FLOW_KNOBS->SIM_PAGE_CACHE_4K;
|
||||
int64_t pageCacheSize64k = (BUGGIFY) ? FLOW_KNOBS->BUGGIFY_SIM_PAGE_CACHE_64K : FLOW_KNOBS->SIM_PAGE_CACHE_64K;
|
||||
auto caches = std::make_pair(Reference<EvictablePageCache>(new EvictablePageCache(4096, pageCacheSize4k)), Reference<EvictablePageCache>(new EvictablePageCache(65536, pageCacheSize64k)));
|
||||
auto caches = std::make_pair(makeReference<EvictablePageCache>(4096, pageCacheSize4k),
|
||||
makeReference<EvictablePageCache>(65536, pageCacheSize64k));
|
||||
simulatorPageCaches[g_network->getLocalAddress()] = caches;
|
||||
pageCache = (flags & IAsyncFile::OPEN_LARGE_PAGES) ? caches.second : caches.first;
|
||||
}
|
||||
|
@ -69,10 +70,10 @@ Future<Reference<IAsyncFile>> AsyncFileCached::open_impl( std::string filename,
|
|||
}
|
||||
else {
|
||||
if(flags & IAsyncFile::OPEN_LARGE_PAGES) {
|
||||
if(!pc64k.present()) pc64k = Reference<EvictablePageCache>(new EvictablePageCache(65536, FLOW_KNOBS->PAGE_CACHE_64K));
|
||||
if (!pc64k.present()) pc64k = makeReference<EvictablePageCache>(65536, FLOW_KNOBS->PAGE_CACHE_64K);
|
||||
pageCache = pc64k.get();
|
||||
} else {
|
||||
if(!pc4k.present()) pc4k = Reference<EvictablePageCache>(new EvictablePageCache(4096, FLOW_KNOBS->PAGE_CACHE_4K));
|
||||
if (!pc4k.present()) pc4k = makeReference<EvictablePageCache>(4096, FLOW_KNOBS->PAGE_CACHE_4K);
|
||||
pageCache = pc4k.get();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -172,11 +172,11 @@ public:
|
|||
|
||||
static Future<Void> async_fdatasync( int fd ) {
|
||||
// Used by AsyncFileKAIO, since kernel AIO doesn't really implement fdatasync yet
|
||||
return sync_impl( fd, Reference<ErrorInfo>(new ErrorInfo) );
|
||||
return sync_impl(fd, makeReference<ErrorInfo>());
|
||||
}
|
||||
static Future<Void> async_fsync( int fd ) {
|
||||
// Used by AsyncFileKAIO, since kernel AIO doesn't really implement fsync yet
|
||||
return sync_impl( fd, Reference<ErrorInfo>(new ErrorInfo), true );
|
||||
return sync_impl(fd, makeReference<ErrorInfo>(), true);
|
||||
}
|
||||
ACTOR static Future<Void> waitAndAtomicRename( Future<Void> fsync, std::string part_filename, std::string final_filename ) {
|
||||
// First wait for the data in the part file to be durable
|
||||
|
|
|
@ -74,9 +74,7 @@ public:
|
|||
when( wait(success( g_simulator.getCurrentProcess()->shutdownSignal.getFuture() )) ) {
|
||||
throw io_error().asInjectedFault();
|
||||
}
|
||||
when( Reference<IAsyncFile> f = wait( wrappedFile ) ) {
|
||||
return Reference<AsyncFileDetachable>( new AsyncFileDetachable(f) );
|
||||
}
|
||||
when(Reference<IAsyncFile> f = wait(wrappedFile)) { return makeReference<AsyncFileDetachable>(f); }
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -682,7 +682,7 @@ TEST_CASE("/flow/flow/yieldedFuture/progress")
|
|||
// Check that if check_yield always returns true, the yieldedFuture will do nothing immediately but will
|
||||
// get one thing done per "tick" (per delay(0) returning).
|
||||
|
||||
Reference<YieldMockNetwork> yn( new YieldMockNetwork );
|
||||
auto yn = makeReference<YieldMockNetwork>();
|
||||
|
||||
yn->nextYield = 0;
|
||||
|
||||
|
@ -717,7 +717,7 @@ TEST_CASE("/flow/flow/yieldedFuture/random")
|
|||
{
|
||||
// Check expectations about exactly how yieldedFuture responds to check_yield results
|
||||
|
||||
Reference<YieldMockNetwork> yn( new YieldMockNetwork );
|
||||
auto yn = makeReference<YieldMockNetwork>();
|
||||
|
||||
for(int r=0; r<100; r++) {
|
||||
Promise<Void> p;
|
||||
|
@ -765,7 +765,7 @@ TEST_CASE("/flow/perf/yieldedFuture")
|
|||
double start;
|
||||
int N = 1000000;
|
||||
|
||||
Reference<YieldMockNetwork> yn( new YieldMockNetwork );
|
||||
auto yn = makeReference<YieldMockNetwork>();
|
||||
|
||||
yn->nextYield = 2*N + 100;
|
||||
|
||||
|
|
|
@ -283,7 +283,7 @@ TransportData::TransportData(uint64_t transportId)
|
|||
transportId(transportId),
|
||||
numIncompatibleConnections(0)
|
||||
{
|
||||
degraded = Reference<AsyncVar<bool>>( new AsyncVar<bool>(false) );
|
||||
degraded = makeReference<AsyncVar<bool>>(false);
|
||||
pingLogger = pingLatencyLogger(this);
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,7 @@ TransportData::TransportData(uint64_t transportId)
|
|||
|
||||
#pragma pack( push, 1 )
|
||||
struct ConnectPacket {
|
||||
// The value does not inclueds the size of `connectPacketLength` itself,
|
||||
// The value does not include the size of `connectPacketLength` itself,
|
||||
// but only the other fields of this structure.
|
||||
uint32_t connectPacketLength;
|
||||
ProtocolVersion protocolVersion; // Expect currentProtocolVersion
|
||||
|
@ -1192,7 +1192,7 @@ Reference<Peer> TransportData::getPeer( NetworkAddress const& address ) {
|
|||
Reference<Peer> TransportData::getOrOpenPeer( NetworkAddress const& address, bool startConnectionKeeper ) {
|
||||
auto peer = getPeer(address);
|
||||
if(!peer) {
|
||||
peer = Reference<Peer>( new Peer(this, address) );
|
||||
peer = makeReference<Peer>(this, address);
|
||||
if(startConnectionKeeper && !isLocalAddress(address)) {
|
||||
peer->connect = connectionKeeper(peer);
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "flow/flow.h"
|
||||
|
||||
// All outstanding operations must be cancelled before the destructor of IAsyncFile is called.
|
||||
// The desirability of the above semantic is disputed. Some classes (AsyncFileBlobStore,
|
||||
// The desirability of the above semantic is disputed. Some classes (AsyncFileS3BlobStore,
|
||||
// AsyncFileCached) maintain references, while others (AsyncFileNonDurable) don't, and the comment
|
||||
// is unapplicable to some others as well (AsyncFileKAIO). It's safest to assume that all operations
|
||||
// must complete or cancel, but you should probably look at the file implementations you'll be using.
|
||||
|
|
|
@ -184,7 +184,7 @@ public:
|
|||
localitySet = itKeyValue->_resultset;
|
||||
}
|
||||
else {
|
||||
localitySet = Reference<LocalitySet>(new LocalitySet(*_localitygroup));
|
||||
localitySet = makeReference<LocalitySet>(*_localitygroup);
|
||||
_cachemisses ++;
|
||||
// If the key is not within the current key set, skip it because no items within
|
||||
// the current entry array has the key
|
||||
|
@ -213,7 +213,7 @@ public:
|
|||
|
||||
// This function is used to create an subset containing the specified entries
|
||||
Reference<LocalitySet> restrict(std::vector<LocalityEntry> const& entryArray) {
|
||||
Reference<LocalitySet> localitySet(new LocalitySet(*_localitygroup));
|
||||
auto localitySet = makeReference<LocalitySet>(*_localitygroup);
|
||||
for (auto& entry : entryArray) {
|
||||
localitySet->add(getRecordViaEntry(entry), *this);
|
||||
}
|
||||
|
@ -509,7 +509,7 @@ struct LocalityGroup : public LocalitySet {
|
|||
|
||||
LocalityEntry const& add(LocalityData const& data) {
|
||||
// _recordArray.size() is the new entry index for the new data
|
||||
Reference<LocalityRecord> record(new LocalityRecord(convertToAttribMap(data), _recordArray.size()));
|
||||
auto record = makeReference<LocalityRecord>(convertToAttribMap(data), _recordArray.size());
|
||||
_recordArray.push_back(record);
|
||||
return LocalitySet::add(record, *this);
|
||||
}
|
||||
|
@ -552,7 +552,7 @@ struct LocalityGroup : public LocalitySet {
|
|||
|
||||
// Convert locality data to sorted vector of int pairs
|
||||
Reference<KeyValueMap> convertToAttribMap(LocalityData const& data) {
|
||||
Reference<KeyValueMap> attribHashMap(new KeyValueMap);
|
||||
auto attribHashMap = makeReference<KeyValueMap>();
|
||||
for (auto& dataPair : data._data) {
|
||||
auto indexKey = keyIndex(dataPair.first);
|
||||
auto indexValue = valueIndex(dataPair.second);
|
||||
|
|
|
@ -45,7 +45,7 @@ double Counter::getRate() const {
|
|||
}
|
||||
|
||||
double Counter::getRoughness() const {
|
||||
double elapsed = now() - roughness_interval_start;
|
||||
double elapsed = last_event - roughness_interval_start;
|
||||
if(elapsed == 0) {
|
||||
return -1;
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ Future<REPLY_TYPE(Req)> retryBrokenPromise( RequestStream<Req> to, Req request,
|
|||
throw;
|
||||
resetReply( request );
|
||||
wait( delayJittered(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY, taskID) );
|
||||
TEST(true); // retryBrokenPromise
|
||||
TEST(true); // retryBrokenPromise with taskID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,13 @@
|
|||
*/
|
||||
|
||||
#include <cinttypes>
|
||||
#include <deque>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/IThreadPool.h"
|
||||
#include "flow/Util.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
|
@ -29,6 +34,8 @@
|
|||
#include "flow/crc32c.h"
|
||||
#include "fdbrpc/TraceFileIO.h"
|
||||
#include "flow/FaultInjection.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
#include "flow/network.h"
|
||||
#include "flow/TLSConfig.actor.h"
|
||||
#include "fdbrpc/Net2FileSystem.h"
|
||||
|
@ -426,8 +433,10 @@ public:
|
|||
|
||||
static bool should_poll() { return false; }
|
||||
|
||||
ACTOR static Future<Reference<IAsyncFile>> open( std::string filename, int flags, int mode,
|
||||
Reference<DiskParameters> diskParameters = Reference<DiskParameters>(new DiskParameters(25000, 150000000)), bool delayOnWrite = true ) {
|
||||
ACTOR static Future<Reference<IAsyncFile>> open(
|
||||
std::string filename, int flags, int mode,
|
||||
Reference<DiskParameters> diskParameters = makeReference<DiskParameters>(25000, 150000000),
|
||||
bool delayOnWrite = true) {
|
||||
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
|
||||
state TaskPriority currentTaskID = g_network->getCurrentTask();
|
||||
|
||||
|
@ -554,8 +563,8 @@ private:
|
|||
|
||||
debugFileCheck("SimpleFileRead", self->filename, data, offset, length);
|
||||
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::read");
|
||||
INJECT_FAULT(io_error, "SimpleFile::read");
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::read"); // SimpleFile::read io_timeout injected
|
||||
INJECT_FAULT(io_error, "SimpleFile::read"); // SimpleFile::read io_error injected
|
||||
|
||||
return read_bytes;
|
||||
}
|
||||
|
@ -592,8 +601,8 @@ private:
|
|||
|
||||
debugFileCheck("SimpleFileWrite", self->filename, (void*)data.begin(), offset, data.size());
|
||||
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::write");
|
||||
INJECT_FAULT(io_error, "SimpleFile::write");
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::write"); // SimpleFile::write inject io_timeout
|
||||
INJECT_FAULT(io_error, "SimpleFile::write"); // SimpleFile::write inject io_error
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -619,8 +628,8 @@ private:
|
|||
if (randLog)
|
||||
fprintf( randLog, "SFT2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str());
|
||||
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::truncate" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::truncate" );
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_timeout
|
||||
INJECT_FAULT( io_error, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_error
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -652,8 +661,8 @@ private:
|
|||
if (randLog)
|
||||
fprintf( randLog, "SFC2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str());
|
||||
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::sync" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::sync" );
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::sync" ); // SimpleFile::sync inject io_timeout
|
||||
INJECT_FAULT( io_error, "SimpleFile::sync" ); // SimpleFile::sync inject io_errot
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
@ -673,7 +682,7 @@ private:
|
|||
|
||||
if (randLog)
|
||||
fprintf(randLog, "SFS2 %s %s %s %" PRId64 "\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str(), pos);
|
||||
INJECT_FAULT( io_error, "SimpleFile::size" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::size" ); // SimpleFile::size inject io_error
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
@ -793,8 +802,8 @@ public:
|
|||
return waitForProcessAndConnect( toAddr, this );
|
||||
}
|
||||
auto peerp = getProcessByAddress(toAddr);
|
||||
Reference<Sim2Conn> myc( new Sim2Conn( getCurrentProcess() ) );
|
||||
Reference<Sim2Conn> peerc( new Sim2Conn( peerp ) );
|
||||
auto myc = makeReference<Sim2Conn>(getCurrentProcess());
|
||||
auto peerc = makeReference<Sim2Conn>(peerp);
|
||||
|
||||
myc->connect(peerc, toAddr);
|
||||
IPAddress localIp;
|
||||
|
@ -811,7 +820,11 @@ public:
|
|||
((Sim2Listener*)peerp->getListener(toAddr).getPtr())->incomingConnection( 0.5*deterministicRandom()->random01(), Reference<IConnection>(peerc) );
|
||||
return onConnect( ::delay(0.5*deterministicRandom()->random01()), myc );
|
||||
}
|
||||
Future<std::vector<NetworkAddress>> resolveTCPEndpoint(std::string host, std::string service) override {
|
||||
|
||||
Future<Reference<IUDPSocket>> createUDPSocket(NetworkAddress toAddr) override;
|
||||
Future<Reference<IUDPSocket>> createUDPSocket(bool isV6 = false) override;
|
||||
|
||||
Future<std::vector<NetworkAddress>> resolveTCPEndpoint(std::string host, std::string service) override {
|
||||
throw lookup_failed();
|
||||
}
|
||||
ACTOR static Future<Reference<IConnection>> onConnect( Future<Void> ready, Reference<Sim2Conn> conn ) {
|
||||
|
@ -1434,7 +1447,7 @@ public:
|
|||
|
||||
// Check if any processes on machine are rebooting
|
||||
if ( processesOnMachine != processesPerMachine ) {
|
||||
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
||||
TEST(true); //Attempted reboot and kill, but the target did not have all of its processes running
|
||||
TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detail("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
||||
if (ktFinal) *ktFinal = None;
|
||||
return false;
|
||||
|
@ -1545,12 +1558,12 @@ public:
|
|||
.detail("KilledDC", kt==ktMin);
|
||||
|
||||
TEST(kt != ktMin); // DataCenter kill was rejected by killMachine
|
||||
TEST((kt==ktMin) && (kt == RebootAndDelete)); // Resulted in a reboot and delete
|
||||
TEST((kt==ktMin) && (kt == Reboot)); // Resulted in a reboot
|
||||
TEST((kt==ktMin) && (kt == KillInstantly)); // Resulted in an instant kill
|
||||
TEST((kt==ktMin) && (kt == InjectFaults)); // Resulted in a kill by injecting faults
|
||||
TEST((kt==ktMin) && (kt != ktOrig)); // Kill request was downgraded
|
||||
TEST((kt==ktMin) && (kt == ktOrig)); // Requested kill was done
|
||||
TEST((kt==ktMin) && (kt == RebootAndDelete)); // Datacenter kill Resulted in a reboot and delete
|
||||
TEST((kt==ktMin) && (kt == Reboot)); // Datacenter kill Resulted in a reboot
|
||||
TEST((kt==ktMin) && (kt == KillInstantly)); // Datacenter kill Resulted in an instant kill
|
||||
TEST((kt==ktMin) && (kt == InjectFaults)); // Datacenter kill Resulted in a kill by injecting faults
|
||||
TEST((kt==ktMin) && (kt != ktOrig)); // Datacenter Kill request was downgraded
|
||||
TEST((kt==ktMin) && (kt == ktOrig)); // Datacenter kill - Requested kill was done
|
||||
|
||||
if (ktFinal) *ktFinal = ktMin;
|
||||
|
||||
|
@ -1725,6 +1738,202 @@ public:
|
|||
int yield_limit; // how many more times yield may return false before next returning true
|
||||
};
|
||||
|
||||
class UDPSimSocket : public IUDPSocket, ReferenceCounted<UDPSimSocket> {
|
||||
using Packet = std::shared_ptr<std::vector<uint8_t>>;
|
||||
UID id;
|
||||
ISimulator::ProcessInfo* process;
|
||||
Optional<NetworkAddress> peerAddress;
|
||||
Optional<ISimulator::ProcessInfo*> peerProcess;
|
||||
Optional<Reference<UDPSimSocket>> peerSocket;
|
||||
ActorCollection actors;
|
||||
Promise<Void> closed;
|
||||
std::deque<std::pair<NetworkAddress, Packet>> recvBuffer;
|
||||
AsyncVar<int64_t> writtenPackets;
|
||||
NetworkAddress _localAddress;
|
||||
bool randomDropPacket() {
|
||||
auto res = deterministicRandom()->random01() < .000001;
|
||||
TEST(res); // UDP packet drop
|
||||
return res;
|
||||
}
|
||||
|
||||
bool isClosed() const { return closed.getFuture().isReady(); }
|
||||
Future<Void> onClosed() const { return closed.getFuture(); }
|
||||
|
||||
ACTOR static Future<Void> cleanupPeerSocket(UDPSimSocket* self) {
|
||||
wait(self->peerSocket.get()->onClosed());
|
||||
self->peerSocket.reset();
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> send(UDPSimSocket* self, Reference<UDPSimSocket> peerSocket, uint8_t const* begin,
|
||||
uint8_t const* end) {
|
||||
state Packet packet(std::make_shared<std::vector<uint8_t>>());
|
||||
packet->resize(end - begin);
|
||||
std::copy(begin, end, packet->begin());
|
||||
wait( delay( .002 * deterministicRandom()->random01() ) );
|
||||
peerSocket->recvBuffer.emplace_back(self->_localAddress, std::move(packet));
|
||||
peerSocket->writtenPackets.set(peerSocket->writtenPackets.get() + 1);
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<int> receiveFrom(UDPSimSocket* self, uint8_t* begin, uint8_t* end, NetworkAddress* sender) {
|
||||
state TaskPriority currentTaskID = g_sim2.getCurrentTask();
|
||||
wait(self->writtenPackets.onChange());
|
||||
wait(g_sim2.onProcess(self->process, currentTaskID));
|
||||
auto packet = self->recvBuffer.front().second;
|
||||
int sz = packet->size();
|
||||
ASSERT(sz <= end - begin);
|
||||
if (sender) {
|
||||
*sender = self->recvBuffer.front().first;
|
||||
}
|
||||
std::copy(packet->begin(), packet->end(), begin);
|
||||
self->recvBuffer.pop_front();
|
||||
return sz;
|
||||
}
|
||||
|
||||
public:
|
||||
UDPSimSocket(NetworkAddress const& localAddress, Optional<NetworkAddress> const& peerAddress)
|
||||
: id(deterministicRandom()->randomUniqueID()), process(g_simulator.getCurrentProcess()), peerAddress(peerAddress),
|
||||
actors(false), _localAddress(localAddress) {
|
||||
g_sim2.addressMap.emplace(_localAddress, process);
|
||||
process->boundUDPSockets.emplace(localAddress, this);
|
||||
}
|
||||
~UDPSimSocket() {
|
||||
if (!closed.getFuture().isReady()) {
|
||||
close();
|
||||
closed.send(Void());
|
||||
}
|
||||
actors.clear(true);
|
||||
}
|
||||
void close() override {
|
||||
process->boundUDPSockets.erase(_localAddress);
|
||||
g_sim2.addressMap.erase(_localAddress);
|
||||
}
|
||||
UID getDebugID() const override { return id; }
|
||||
void addref() override { ReferenceCounted<UDPSimSocket>::addref(); }
|
||||
void delref() override { ReferenceCounted<UDPSimSocket>::delref(); }
|
||||
|
||||
Future<int> send(uint8_t const* begin, uint8_t const* end) override {
|
||||
int sz = int(end - begin);
|
||||
auto res = fmap([sz](Void){ return sz; }, delay(0.0));
|
||||
ASSERT(sz <= IUDPSocket::MAX_PACKET_SIZE);
|
||||
ASSERT(peerAddress.present());
|
||||
if (!peerProcess.present()) {
|
||||
auto iter = g_sim2.addressMap.find(peerAddress.get());
|
||||
if (iter == g_sim2.addressMap.end()) {
|
||||
return res;
|
||||
}
|
||||
peerProcess = iter->second;
|
||||
}
|
||||
if (!peerSocket.present() || peerSocket.get()->isClosed()) {
|
||||
peerSocket.reset();
|
||||
auto iter = peerProcess.get()->boundUDPSockets.find(peerAddress.get());
|
||||
if (iter == peerProcess.get()->boundUDPSockets.end()) {
|
||||
return fmap([sz](Void){ return sz; }, delay(0.0));
|
||||
}
|
||||
peerSocket = iter->second.castTo<UDPSimSocket>();
|
||||
// the notation of leaking connections doesn't make much sense in the context of UDP
|
||||
// so we simply handle those in the simulator
|
||||
actors.add(cleanupPeerSocket(this));
|
||||
}
|
||||
if (randomDropPacket()) {
|
||||
return res;
|
||||
}
|
||||
actors.add(send(this, peerSocket.get(), begin, end));
|
||||
return res;
|
||||
}
|
||||
Future<int> sendTo(uint8_t const* begin, uint8_t const* end, NetworkAddress const& peer) override {
|
||||
int sz = int(end - begin);
|
||||
auto res = fmap([sz](Void){ return sz; }, delay(0.0));
|
||||
ASSERT(sz <= MAX_PACKET_SIZE);
|
||||
ISimulator::ProcessInfo* peerProcess = nullptr;
|
||||
Reference<UDPSimSocket> peerSocket;
|
||||
{
|
||||
auto iter = g_sim2.addressMap.find(peer);
|
||||
if (iter == g_sim2.addressMap.end()) {
|
||||
return res;
|
||||
}
|
||||
peerProcess = iter->second;
|
||||
}
|
||||
{
|
||||
auto iter = peerProcess->boundUDPSockets.find(peer);
|
||||
if (iter == peerProcess->boundUDPSockets.end()) {
|
||||
return res;
|
||||
}
|
||||
peerSocket = iter->second.castTo<UDPSimSocket>();
|
||||
}
|
||||
actors.add(send(this, peerSocket, begin, end));
|
||||
return res;
|
||||
}
|
||||
Future<int> receive(uint8_t* begin, uint8_t* end) override {
|
||||
return receiveFrom(begin, end, nullptr);
|
||||
}
|
||||
Future<int> receiveFrom(uint8_t* begin, uint8_t* end, NetworkAddress* sender) override {
|
||||
if (!recvBuffer.empty()) {
|
||||
auto buf = recvBuffer.front().second;
|
||||
if (sender) {
|
||||
*sender = recvBuffer.front().first;
|
||||
}
|
||||
int sz = buf->size();
|
||||
ASSERT(sz <= end - begin);
|
||||
std::copy(buf->begin(), buf->end(), begin);
|
||||
auto res = fmap([sz](Void){ return sz; }, delay(0.0));
|
||||
recvBuffer.pop_front();
|
||||
return res;
|
||||
}
|
||||
return receiveFrom(this, begin, end, sender);
|
||||
}
|
||||
void bind(NetworkAddress const& addr) override {
|
||||
g_sim2.addressMap.erase(_localAddress);
|
||||
process->boundUDPSockets.erase(_localAddress);
|
||||
process->boundUDPSockets.emplace(addr, Reference<UDPSimSocket>::addRef(this));
|
||||
_localAddress = addr;
|
||||
g_sim2.addressMap.emplace(_localAddress, process);
|
||||
}
|
||||
|
||||
NetworkAddress localAddress() const override {
|
||||
return _localAddress;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
Future<Reference<IUDPSocket>> Sim2::createUDPSocket(NetworkAddress toAddr) {
|
||||
NetworkAddress localAddress;
|
||||
auto process = g_simulator.getCurrentProcess();
|
||||
if (process->address.ip.isV6()) {
|
||||
IPAddress::IPAddressStore store = process->address.ip.toV6();
|
||||
uint16_t* ipParts = (uint16_t*)store.data();
|
||||
ipParts[7] += deterministicRandom()->randomInt(0, 256);
|
||||
localAddress.ip = IPAddress(store);
|
||||
} else {
|
||||
localAddress.ip = IPAddress(process->address.ip.toV4() + deterministicRandom()->randomInt(0, 256));
|
||||
}
|
||||
localAddress.port = deterministicRandom()->randomInt(40000, 60000);
|
||||
return Reference<IUDPSocket>(new UDPSimSocket(localAddress, toAddr));
|
||||
}
|
||||
|
||||
Future<Reference<IUDPSocket>> Sim2::createUDPSocket(bool isV6) {
|
||||
NetworkAddress localAddress;
|
||||
auto process = g_simulator.getCurrentProcess();
|
||||
if (process->address.ip.isV6() == isV6) {
|
||||
localAddress = process->address;
|
||||
} else {
|
||||
ASSERT(process->addresses.secondaryAddress.present() &&
|
||||
process->addresses.secondaryAddress.get().isV6() == isV6);
|
||||
localAddress = process->addresses.secondaryAddress.get();
|
||||
}
|
||||
if (localAddress.ip.isV6()) {
|
||||
IPAddress::IPAddressStore store = localAddress.ip.toV6();
|
||||
uint16_t* ipParts = (uint16_t*)store.data();
|
||||
ipParts[7] += deterministicRandom()->randomInt(0, 256);
|
||||
localAddress.ip = IPAddress(store);
|
||||
} else {
|
||||
localAddress.ip = IPAddress(localAddress.ip.toV4() + deterministicRandom()->randomInt(0, 256));
|
||||
}
|
||||
localAddress.port = deterministicRandom()->randomInt(40000, 60000);
|
||||
return Reference<IUDPSocket>(new UDPSimSocket(localAddress, Optional<NetworkAddress>{}));
|
||||
}
|
||||
|
||||
void startNewSimulator() {
|
||||
ASSERT( !g_network );
|
||||
g_network = g_pSimulator = new Sim2();
|
||||
|
@ -1843,7 +2052,8 @@ Future< Reference<class IAsyncFile> > Sim2FileSystem::open( std::string filename
|
|||
}
|
||||
// Simulated disk parameters are shared by the AsyncFileNonDurable and the underlying SimpleFile.
|
||||
// This way, they can both keep up with the time to start the next operation
|
||||
Reference<DiskParameters> diskParameters(new DiskParameters(FLOW_KNOBS->SIM_DISK_IOPS, FLOW_KNOBS->SIM_DISK_BANDWIDTH));
|
||||
auto diskParameters =
|
||||
makeReference<DiskParameters>(FLOW_KNOBS->SIM_DISK_IOPS, FLOW_KNOBS->SIM_DISK_BANDWIDTH);
|
||||
machineCache[actualFilename] = AsyncFileNonDurable::open(filename, actualFilename, SimpleFile::open(filename, flags, mode, diskParameters, false), diskParameters);
|
||||
}
|
||||
Future<Reference<IAsyncFile>> f = AsyncFileDetachable::open( machineCache[actualFilename] );
|
||||
|
|
|
@ -55,6 +55,7 @@ public:
|
|||
ProcessClass startingClass;
|
||||
TDMetricCollection tdmetrics;
|
||||
std::map<NetworkAddress, Reference<IListener>> listenerMap;
|
||||
std::map<NetworkAddress, Reference<IUDPSocket>> boundUDPSockets;
|
||||
bool failed;
|
||||
bool excluded;
|
||||
bool cleared;
|
||||
|
|
|
@ -32,7 +32,7 @@ Reference<StorageInfo> getStorageInfo(UID id, std::map<UID, Reference<StorageInf
|
|||
Reference<StorageInfo> storageInfo;
|
||||
auto cacheItr = storageCache->find(id);
|
||||
if(cacheItr == storageCache->end()) {
|
||||
storageInfo = Reference<StorageInfo>( new StorageInfo() );
|
||||
storageInfo = makeReference<StorageInfo>();
|
||||
storageInfo->tag = decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor(id) ).get().get() );
|
||||
storageInfo->interf = decodeServerListValue( txnStateStore->readValue( serverListKeyFor(id) ).get().get() );
|
||||
(*storageCache)[id] = storageInfo;
|
||||
|
@ -55,12 +55,12 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
std::map<Tag, Version>* tag_popped, bool initialCommit) {
|
||||
//std::map<keyRef, vector<uint16_t>> cacheRangeInfo;
|
||||
std::map<KeyRef, MutationRef> cachedRangeInfo;
|
||||
if (toCommit) {
|
||||
toCommit->addTransactionInfo(spanContext);
|
||||
}
|
||||
|
||||
for (auto const& m : mutations) {
|
||||
//TraceEvent("MetadataMutation", dbgid).detail("M", m.toString());
|
||||
if (toCommit) {
|
||||
toCommit->addTransactionInfo(spanContext);
|
||||
}
|
||||
|
||||
if (m.param1.size() && m.param1[0] == systemKeys.begin[0] && m.type == MutationRef::SetValue) {
|
||||
if(m.param1.startsWith(keyServersPrefix)) {
|
||||
|
@ -127,7 +127,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
if(storageCache) {
|
||||
auto cacheItr = storageCache->find(id);
|
||||
if(cacheItr == storageCache->end()) {
|
||||
Reference<StorageInfo> storageInfo = Reference<StorageInfo>( new StorageInfo() );
|
||||
Reference<StorageInfo> storageInfo = makeReference<StorageInfo>();
|
||||
storageInfo->tag = tag;
|
||||
Optional<Key> interfKey = txnStateStore->readValue( serverListKeyFor(id) ).get();
|
||||
if(interfKey.present()) {
|
||||
|
@ -198,7 +198,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
|
||||
auto cacheItr = storageCache->find(id);
|
||||
if(cacheItr == storageCache->end()) {
|
||||
Reference<StorageInfo> storageInfo = Reference<StorageInfo>( new StorageInfo() );
|
||||
Reference<StorageInfo> storageInfo = makeReference<StorageInfo>();
|
||||
storageInfo->interf = interf;
|
||||
Optional<Key> tagKey = txnStateStore->readValue( serverTagKeyFor(id) ).get();
|
||||
if(tagKey.present()) {
|
||||
|
@ -221,7 +221,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
auto &p = (*uid_applyMutationsData)[uid];
|
||||
p.endVersion = BinaryReader::fromStringRef<Version>(m.param2, Unversioned());
|
||||
if(p.keyVersion == Reference<KeyRangeMap<Version>>())
|
||||
p.keyVersion = Reference<KeyRangeMap<Version>>( new KeyRangeMap<Version>() );
|
||||
p.keyVersion = makeReference<KeyRangeMap<Version>>();
|
||||
if(!p.worker.isValid() || p.worker.isReady()) {
|
||||
auto addPrefixValue = txnStateStore->readValue(uid.withPrefix(applyMutationsAddPrefixRange.begin)).get();
|
||||
auto removePrefixValue = txnStateStore->readValue(uid.withPrefix(applyMutationsRemovePrefixRange.begin)).get();
|
||||
|
@ -241,7 +241,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
Key k = m.param1.substr(applyMutationsKeyVersionMapRange.begin.size() + sizeof(UID));
|
||||
auto &p = (*uid_applyMutationsData)[uid];
|
||||
if(p.keyVersion == Reference<KeyRangeMap<Version>>())
|
||||
p.keyVersion = Reference<KeyRangeMap<Version>>( new KeyRangeMap<Version>() );
|
||||
p.keyVersion = makeReference<KeyRangeMap<Version>>();
|
||||
p.keyVersion->rawInsert( k, BinaryReader::fromStringRef<Version>(m.param2, Unversioned()) );
|
||||
}
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ void applyMetadataMutations(SpanID const& spanContext, UID const& dbgid, Arena&
|
|||
if(uid == uid2) {
|
||||
auto &p = (*uid_applyMutationsData)[uid];
|
||||
if(p.keyVersion == Reference<KeyRangeMap<Version>>())
|
||||
p.keyVersion = Reference<KeyRangeMap<Version>>( new KeyRangeMap<Version>() );
|
||||
p.keyVersion = makeReference<KeyRangeMap<Version>>();
|
||||
p.keyVersion->rawErase( KeyRangeRef( m.param1.substr(applyMutationsKeyVersionMapRange.begin.size() + sizeof(UID)), m.param2.substr(applyMutationsKeyVersionMapRange.begin.size() + sizeof(UID))) );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ set(FDBSERVER_SRCS
|
|||
FDBExecHelper.actor.h
|
||||
GrvProxyServer.actor.cpp
|
||||
IDiskQueue.h
|
||||
IKeyValueContainer.h
|
||||
IKeyValueStore.h
|
||||
IPager.h
|
||||
IVersionedStore.h
|
||||
|
@ -46,6 +47,8 @@ set(FDBSERVER_SRCS
|
|||
LogSystemDiskQueueAdapter.h
|
||||
LogSystemPeekCursor.actor.cpp
|
||||
MasterInterface.h
|
||||
MetricLogger.actor.cpp
|
||||
MetricLogger.h
|
||||
CommitProxyServer.actor.cpp
|
||||
masterserver.actor.cpp
|
||||
MutationTracking.h
|
||||
|
@ -63,6 +66,7 @@ set(FDBSERVER_SRCS
|
|||
pubsub.h
|
||||
QuietDatabase.actor.cpp
|
||||
QuietDatabase.h
|
||||
RadixTree.h
|
||||
Ratekeeper.actor.cpp
|
||||
RatekeeperInterface.h
|
||||
RecoveryState.h
|
||||
|
@ -123,6 +127,7 @@ set(FDBSERVER_SRCS
|
|||
workloads/BackupCorrectness.actor.cpp
|
||||
workloads/BackupAndParallelRestoreCorrectness.actor.cpp
|
||||
workloads/ParallelRestore.actor.cpp
|
||||
workloads/BackupToBlob.actor.cpp
|
||||
workloads/BackupToDBAbort.actor.cpp
|
||||
workloads/BackupToDBCorrectness.actor.cpp
|
||||
workloads/BackupToDBUpgrade.actor.cpp
|
||||
|
@ -183,6 +188,7 @@ set(FDBSERVER_SRCS
|
|||
workloads/RemoveServersSafely.actor.cpp
|
||||
workloads/ReportConflictingKeys.actor.cpp
|
||||
workloads/RestoreBackup.actor.cpp
|
||||
workloads/RestoreFromBlob.actor.cpp
|
||||
workloads/Rollback.actor.cpp
|
||||
workloads/RyowCorrectness.actor.cpp
|
||||
workloads/RYWDisable.actor.cpp
|
||||
|
@ -212,6 +218,7 @@ set(FDBSERVER_SRCS
|
|||
workloads/UnitPerf.actor.cpp
|
||||
workloads/UnitTests.actor.cpp
|
||||
workloads/Unreadable.actor.cpp
|
||||
workloads/UDPWorkload.actor.cpp
|
||||
workloads/VersionStamp.actor.cpp
|
||||
workloads/WatchAndWait.actor.cpp
|
||||
workloads/Watches.actor.cpp
|
||||
|
@ -237,6 +244,10 @@ if (WITH_ROCKSDB_EXPERIMENTAL)
|
|||
set(PORTABLE_ROCKSDB 1)
|
||||
|
||||
include(CompileRocksDB)
|
||||
# CompileRocksDB sets `lz4_LIBRARIES` to be the shared lib, we want to link
|
||||
# statically, so find the static library here.
|
||||
find_library(lz4_STATIC_LIBRARIES
|
||||
NAMES liblz4.a REQUIRED)
|
||||
endif()
|
||||
|
||||
# Suppress warnings in sqlite since it's third party
|
||||
|
@ -257,7 +268,7 @@ target_include_directories(fdbserver PRIVATE
|
|||
if (WITH_ROCKSDB_EXPERIMENTAL)
|
||||
add_dependencies(fdbserver rocksdb)
|
||||
target_include_directories(fdbserver PRIVATE ${ROCKSDB_INCLUDE_DIR})
|
||||
target_link_libraries(fdbserver PRIVATE fdbclient fdb_sqlite ${ROCKSDB_LIBRARIES})
|
||||
target_link_libraries(fdbserver PRIVATE fdbclient fdb_sqlite ${ROCKSDB_LIBRARIES} ${lz4_STATIC_LIBRARIES})
|
||||
else()
|
||||
target_link_libraries(fdbserver PRIVATE fdbclient fdb_sqlite)
|
||||
endif()
|
||||
|
|
|
@ -113,12 +113,13 @@ public:
|
|||
bool cachePopulated;
|
||||
std::map<NetworkAddress, std::pair<double, OpenDatabaseRequest>> clientStatus;
|
||||
|
||||
DBInfo() : masterRegistrationCount(0), recoveryStalled(false), forceRecovery(false), unfinishedRecoveries(0), logGenerations(0), cachePopulated(false),
|
||||
clientInfo( new AsyncVar<ClientDBInfo>( ClientDBInfo() ) ), dbInfoCount(0),
|
||||
serverInfo( new AsyncVar<ServerDBInfo>( ServerDBInfo() ) ),
|
||||
db( DatabaseContext::create( clientInfo, Future<Void>(), LocalityData(), true, TaskPriority::DefaultEndpoint, true ) ) // SOMEDAY: Locality!
|
||||
{
|
||||
}
|
||||
DBInfo()
|
||||
: masterRegistrationCount(0), recoveryStalled(false), forceRecovery(false), unfinishedRecoveries(0),
|
||||
logGenerations(0), cachePopulated(false), clientInfo(new AsyncVar<ClientDBInfo>()), dbInfoCount(0),
|
||||
serverInfo(new AsyncVar<ServerDBInfo>()),
|
||||
db(DatabaseContext::create(clientInfo, Future<Void>(), LocalityData(), true, TaskPriority::DefaultEndpoint,
|
||||
true)) // SOMEDAY: Locality!
|
||||
{}
|
||||
|
||||
void setDistributor(const DataDistributorInterface& interf) {
|
||||
auto newInfo = serverInfo->get();
|
||||
|
@ -1212,7 +1213,9 @@ public:
|
|||
for(auto& logSet : dbi.logSystemConfig.tLogs) {
|
||||
if(region.satelliteTLogPolicy.isValid() && logSet.isLocal && logSet.locality == tagLocalitySatellite) {
|
||||
oldSatelliteFallback = logSet.tLogPolicy->info() != region.satelliteTLogPolicy->info();
|
||||
ASSERT(!oldSatelliteFallback || logSet.tLogPolicy->info() == region.satelliteTLogPolicyFallback->info());
|
||||
ASSERT(!oldSatelliteFallback ||
|
||||
(region.satelliteTLogPolicyFallback.isValid() &&
|
||||
logSet.tLogPolicy->info() == region.satelliteTLogPolicyFallback->info()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1934,7 +1937,7 @@ ACTOR Future<Void> clusterRecruitFromConfiguration( ClusterControllerData* self,
|
|||
|
||||
ACTOR Future<Void> clusterRecruitRemoteFromConfiguration( ClusterControllerData* self, RecruitRemoteFromConfigurationRequest req ) {
|
||||
// At the moment this doesn't really need to be an actor (it always completes immediately)
|
||||
TEST(true); //ClusterController RecruitTLogsRequest
|
||||
TEST(true); //ClusterController RecruitTLogsRequest Remote
|
||||
loop {
|
||||
try {
|
||||
RecruitRemoteFromConfigurationReply rep = self->findRemoteWorkersForConfiguration( req );
|
||||
|
@ -2186,7 +2189,7 @@ void registerWorker( RegisterWorkerRequest req, ClusterControllerData *self ) {
|
|||
#define TIME_KEEPER_VERSION LiteralStringRef("1")
|
||||
|
||||
ACTOR Future<Void> timeKeeperSetVersion(ClusterControllerData *self) {
|
||||
state Reference<ReadYourWritesTransaction> tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(self->cx));
|
||||
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->cx);
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -2214,7 +2217,7 @@ ACTOR Future<Void> timeKeeper(ClusterControllerData *self) {
|
|||
wait(timeKeeperSetVersion(self));
|
||||
|
||||
loop {
|
||||
state Reference<ReadYourWritesTransaction> tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(self->cx));
|
||||
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->cx);
|
||||
loop {
|
||||
try {
|
||||
if(!g_network->isSimulated()) {
|
||||
|
|
|
@ -547,7 +547,7 @@ ACTOR Future<Void> preresolutionProcessing(CommitBatchContext* self) {
|
|||
}
|
||||
|
||||
// Pre-resolution the commits
|
||||
TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1);
|
||||
TEST(pProxyCommitData->latestLocalCommitBatchResolving.get() < localBatchNumber - 1); // Wait for local batch
|
||||
wait(pProxyCommitData->latestLocalCommitBatchResolving.whenAtLeast(localBatchNumber - 1));
|
||||
self->releaseDelay = delay(
|
||||
std::min(SERVER_KNOBS->MAX_PROXY_COMPUTE,
|
||||
|
|
|
@ -239,7 +239,7 @@ struct MovableCoordinatedStateImpl {
|
|||
}
|
||||
// SOMEDAY: If moveState.mode == MovingFrom, read (without locking) old state and assert that it corresponds with our state and is ReallyTo(coordinators)
|
||||
if (moveState.mode == MovableValue::MaybeTo) {
|
||||
TEST(true);
|
||||
TEST(true); // Maybe moveto state
|
||||
ASSERT( moveState.other.present() );
|
||||
wait( self->moveTo( self, &self->cs, ClusterConnectionString( moveState.other.get().toString() ), moveState.value ) );
|
||||
}
|
||||
|
|
|
@ -276,11 +276,12 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
|||
state Future<Void> notifyCheck = delay(SERVER_KNOBS->NOTIFICATION_FULL_CLEAR_TIME / SERVER_KNOBS->MIN_NOTIFICATIONS);
|
||||
state ClientData clientData;
|
||||
state int clientCount = 0;
|
||||
state Reference<AsyncVar<bool>> hasConnectedClients = Reference<AsyncVar<bool>>( new AsyncVar<bool>(false) );
|
||||
state Reference<AsyncVar<bool>> hasConnectedClients = makeReference<AsyncVar<bool>>(false);
|
||||
state ActorCollection actors(false);
|
||||
state Future<Void> leaderMon;
|
||||
state AsyncVar<Value> leaderInterface;
|
||||
state Reference<AsyncVar<Optional<LeaderInfo>>> currentElectedLeader = Reference<AsyncVar<Optional<LeaderInfo>>>( new AsyncVar<Optional<LeaderInfo>>() );
|
||||
state Reference<AsyncVar<Optional<LeaderInfo>>> currentElectedLeader =
|
||||
makeReference<AsyncVar<Optional<LeaderInfo>>>();
|
||||
|
||||
loop choose {
|
||||
when ( OpenDatabaseCoordRequest req = waitNext( interf.openDatabase.getFuture() ) ) {
|
||||
|
|
|
@ -365,7 +365,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
|
|||
MoveKeysLock moveKeysLock,
|
||||
std::vector<Optional<Key>> remoteDcIds,
|
||||
const DDEnabledState* ddEnabledState) {
|
||||
state Reference<InitialDataDistribution> result = Reference<InitialDataDistribution>(new InitialDataDistribution);
|
||||
state Reference<InitialDataDistribution> result = makeReference<InitialDataDistribution>();
|
||||
state Key beginKey = allKeys.begin;
|
||||
|
||||
state bool succeeded;
|
||||
|
@ -622,6 +622,8 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
int lowestUtilizationTeam;
|
||||
int highestUtilizationTeam;
|
||||
|
||||
AsyncTrigger printDetailedTeamsInfo;
|
||||
|
||||
void resetLocalitySet() {
|
||||
storageServerSet = Reference<LocalitySet>(new LocalityMap<UID>());
|
||||
LocalityMap<UID>* storageServerMap = (LocalityMap<UID>*) storageServerSet.getPtr();
|
||||
|
@ -801,6 +803,13 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
} else {
|
||||
self->medianAvailableSpace = SERVER_KNOBS->MIN_AVAILABLE_SPACE_RATIO;
|
||||
}
|
||||
if (self->medianAvailableSpace < SERVER_KNOBS->TARGET_AVAILABLE_SPACE_RATIO) {
|
||||
TraceEvent(SevWarn, "DDTeamMedianAvailableSpaceTooSmall", self->distributorId)
|
||||
.detail("MedianAvailableSpaceRatio", self->medianAvailableSpace)
|
||||
.detail("TargetAvailableSpaceRatio", SERVER_KNOBS->TARGET_AVAILABLE_SPACE_RATIO)
|
||||
.detail("Primary", self->primary);
|
||||
self->printDetailedTeamsInfo.trigger();
|
||||
}
|
||||
}
|
||||
|
||||
bool foundSrc = false;
|
||||
|
@ -1252,7 +1261,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
void addTeam(const vector<Reference<TCServerInfo>>& newTeamServers, bool isInitialTeam,
|
||||
bool redundantTeam = false) {
|
||||
Reference<TCTeamInfo> teamInfo(new TCTeamInfo(newTeamServers));
|
||||
auto teamInfo = makeReference<TCTeamInfo>(newTeamServers);
|
||||
|
||||
// Move satisfiesPolicy to the end for performance benefit
|
||||
bool badTeam = redundantTeam || teamInfo->size() != configuration.storageTeamSize
|
||||
|
@ -1309,7 +1318,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
|
||||
// Add a machine team specified by input machines
|
||||
Reference<TCMachineTeamInfo> addMachineTeam(vector<Reference<TCMachineInfo>> machines) {
|
||||
Reference<TCMachineTeamInfo> machineTeamInfo(new TCMachineTeamInfo(machines));
|
||||
auto machineTeamInfo = makeReference<TCMachineTeamInfo>(machines);
|
||||
machineTeams.push_back(machineTeamInfo);
|
||||
|
||||
// Assign machine teams to machine
|
||||
|
@ -2334,7 +2343,11 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
allServers.push_back( newServer.id() );
|
||||
|
||||
TraceEvent("AddedStorageServer", distributorId).detail("ServerID", newServer.id()).detail("ProcessClass", processClass.toString()).detail("WaitFailureToken", newServer.waitFailure.getEndpoint().token).detail("Address", newServer.waitFailure.getEndpoint().getPrimaryAddress());
|
||||
auto &r = server_info[newServer.id()] = Reference<TCServerInfo>( new TCServerInfo( newServer, this, processClass, includedDCs.empty() || std::find(includedDCs.begin(), includedDCs.end(), newServer.locality.dcId()) != includedDCs.end(), storageServerSet ) );
|
||||
auto& r = server_info[newServer.id()] = makeReference<TCServerInfo>(
|
||||
newServer, this, processClass,
|
||||
includedDCs.empty() ||
|
||||
std::find(includedDCs.begin(), includedDCs.end(), newServer.locality.dcId()) != includedDCs.end(),
|
||||
storageServerSet);
|
||||
|
||||
// Establish the relation between server and machine
|
||||
checkAndCreateMachine(r);
|
||||
|
@ -2397,10 +2410,10 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
Reference<TCMachineInfo> machineInfo;
|
||||
if (machine_info.find(machine_id) == machine_info.end()) {
|
||||
// uid is the first storage server process on the machine
|
||||
TEST(true);
|
||||
TEST(true); // First storage server in process on the machine
|
||||
// For each machine, store the first server's localityEntry into machineInfo for later use.
|
||||
LocalityEntry localityEntry = machineLocalityMap.add(locality, &server->id);
|
||||
machineInfo = Reference<TCMachineInfo>(new TCMachineInfo(server, localityEntry));
|
||||
machineInfo = makeReference<TCMachineInfo>(server, localityEntry);
|
||||
machine_info.insert(std::make_pair(machine_id, machineInfo));
|
||||
} else {
|
||||
machineInfo = machine_info.find(machine_id)->second;
|
||||
|
@ -2719,6 +2732,196 @@ ACTOR Future<Void> waitUntilHealthy(DDTeamCollection* self, double extraDelay =
|
|||
}
|
||||
}
|
||||
|
||||
// Take a snapshot of necessary data structures from `DDTeamCollection` and print them out with yields to avoid slow
|
||||
// task on the run loop.
|
||||
ACTOR Future<Void> printSnapshotTeamsInfo(Reference<DDTeamCollection> self) {
|
||||
state DatabaseConfiguration configuration;
|
||||
state std::map<UID, Reference<TCServerInfo>> server_info;
|
||||
state std::map<UID, ServerStatus> server_status;
|
||||
state vector<Reference<TCTeamInfo>> teams;
|
||||
state std::map<Standalone<StringRef>, Reference<TCMachineInfo>> machine_info;
|
||||
state std::vector<Reference<TCMachineTeamInfo>> machineTeams;
|
||||
// state std::vector<std::string> internedLocalityRecordKeyNameStrings;
|
||||
// state int machineLocalityMapEntryArraySize;
|
||||
// state std::vector<Reference<LocalityRecord>> machineLocalityMapRecordArray;
|
||||
state int traceEventsPrinted = 0;
|
||||
state std::vector<const UID*> serverIDs;
|
||||
state double lastPrintTime = 0;
|
||||
loop {
|
||||
wait(self->printDetailedTeamsInfo.onTrigger());
|
||||
if (now() - lastPrintTime < SERVER_KNOBS->DD_TEAMS_INFO_PRINT_INTERVAL) {
|
||||
continue;
|
||||
}
|
||||
lastPrintTime = now();
|
||||
|
||||
traceEventsPrinted = 0;
|
||||
|
||||
double snapshotStart = now();
|
||||
|
||||
configuration = self->configuration;
|
||||
server_info = self->server_info;
|
||||
teams = self->teams;
|
||||
machine_info = self->machine_info;
|
||||
machineTeams = self->machineTeams;
|
||||
// internedLocalityRecordKeyNameStrings = self->machineLocalityMap._keymap->_lookuparray;
|
||||
// machineLocalityMapEntryArraySize = self->machineLocalityMap.size();
|
||||
// machineLocalityMapRecordArray = self->machineLocalityMap.getRecordArray();
|
||||
std::vector<const UID*> _uids = self->machineLocalityMap.getObjects();
|
||||
serverIDs = _uids;
|
||||
|
||||
auto const& keys = self->server_status.getKeys();
|
||||
for (auto const& key : keys) {
|
||||
server_status.emplace(key, self->server_status.get(key));
|
||||
}
|
||||
|
||||
TraceEvent("DDPrintSnapshotTeasmInfo", self->distributorId)
|
||||
.detail("SnapshotSpeed", now() - snapshotStart)
|
||||
.detail("Primary", self->primary);
|
||||
|
||||
// Print to TraceEvents
|
||||
TraceEvent("DDConfig", self->distributorId)
|
||||
.detail("StorageTeamSize", configuration.storageTeamSize)
|
||||
.detail("DesiredTeamsPerServer", SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER)
|
||||
.detail("MaxTeamsPerServer", SERVER_KNOBS->MAX_TEAMS_PER_SERVER)
|
||||
.detail("Primary", self->primary);
|
||||
|
||||
TraceEvent("ServerInfo", self->distributorId)
|
||||
.detail("Size", server_info.size())
|
||||
.detail("Primary", self->primary);
|
||||
state int i;
|
||||
state std::map<UID, Reference<TCServerInfo>>::iterator server = server_info.begin();
|
||||
for (i = 0; i < server_info.size(); i++) {
|
||||
TraceEvent("ServerInfo", self->distributorId)
|
||||
.detail("ServerInfoIndex", i)
|
||||
.detail("ServerID", server->first.toString())
|
||||
.detail("ServerTeamOwned", server->second->teams.size())
|
||||
.detail("MachineID", server->second->machine->machineID.contents().toString())
|
||||
.detail("Primary", self->primary);
|
||||
server++;
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
server = server_info.begin();
|
||||
for (i = 0; i < server_info.size(); i++) {
|
||||
const UID& uid = server->first;
|
||||
TraceEvent("ServerStatus", self->distributorId)
|
||||
.detail("ServerUID", uid)
|
||||
.detail("Healthy", !server_status.at(uid).isUnhealthy())
|
||||
.detail("MachineIsValid", server_info[uid]->machine.isValid())
|
||||
.detail("MachineTeamSize",
|
||||
server_info[uid]->machine.isValid() ? server_info[uid]->machine->machineTeams.size() : -1)
|
||||
.detail("Primary", self->primary);
|
||||
server++;
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent("ServerTeamInfo", self->distributorId).detail("Size", teams.size()).detail("Primary", self->primary);
|
||||
for (i = 0; i < teams.size(); i++) {
|
||||
const auto& team = teams[i];
|
||||
TraceEvent("ServerTeamInfo", self->distributorId)
|
||||
.detail("TeamIndex", i)
|
||||
.detail("Healthy", team->isHealthy())
|
||||
.detail("TeamSize", team->size())
|
||||
.detail("MemberIDs", team->getServerIDsStr())
|
||||
.detail("Primary", self->primary);
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent("MachineInfo", self->distributorId)
|
||||
.detail("Size", machine_info.size())
|
||||
.detail("Primary", self->primary);
|
||||
state std::map<Standalone<StringRef>, Reference<TCMachineInfo>>::iterator machine = machine_info.begin();
|
||||
state bool isMachineHealthy = false;
|
||||
for (i = 0; i < machine_info.size(); i++) {
|
||||
Reference<TCMachineInfo> _machine = machine->second;
|
||||
if (!_machine.isValid() || machine_info.find(_machine->machineID) == machine_info.end() ||
|
||||
_machine->serversOnMachine.empty()) {
|
||||
isMachineHealthy = false;
|
||||
}
|
||||
|
||||
// Healthy machine has at least one healthy server
|
||||
for (auto& server : _machine->serversOnMachine) {
|
||||
if (!server_status.at(server->id).isUnhealthy()) {
|
||||
isMachineHealthy = true;
|
||||
}
|
||||
}
|
||||
|
||||
isMachineHealthy = false;
|
||||
TraceEvent("MachineInfo", self->distributorId)
|
||||
.detail("MachineInfoIndex", i)
|
||||
.detail("Healthy", isMachineHealthy)
|
||||
.detail("MachineID", machine->first.contents().toString())
|
||||
.detail("MachineTeamOwned", machine->second->machineTeams.size())
|
||||
.detail("ServerNumOnMachine", machine->second->serversOnMachine.size())
|
||||
.detail("ServersID", machine->second->getServersIDStr())
|
||||
.detail("Primary", self->primary);
|
||||
machine++;
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent("MachineTeamInfo", self->distributorId)
|
||||
.detail("Size", machineTeams.size())
|
||||
.detail("Primary", self->primary);
|
||||
for (i = 0; i < machineTeams.size(); i++) {
|
||||
const auto& team = machineTeams[i];
|
||||
TraceEvent("MachineTeamInfo", self->distributorId)
|
||||
.detail("TeamIndex", i)
|
||||
.detail("MachineIDs", team->getMachineIDsStr())
|
||||
.detail("ServerTeams", team->serverTeams.size())
|
||||
.detail("Primary", self->primary);
|
||||
if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: re-enable the following logging or remove them.
|
||||
// TraceEvent("LocalityRecordKeyName", self->distributorId)
|
||||
// .detail("Size", internedLocalityRecordKeyNameStrings.size())
|
||||
// .detail("Primary", self->primary);
|
||||
// for (i = 0; i < internedLocalityRecordKeyNameStrings.size(); i++) {
|
||||
// TraceEvent("LocalityRecordKeyIndexName", self->distributorId)
|
||||
// .detail("KeyIndex", i)
|
||||
// .detail("KeyName", internedLocalityRecordKeyNameStrings[i])
|
||||
// .detail("Primary", self->primary);
|
||||
// if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
// wait(yield());
|
||||
// }
|
||||
// }
|
||||
|
||||
// TraceEvent("MachineLocalityMap", self->distributorId)
|
||||
// .detail("Size", machineLocalityMapEntryArraySize)
|
||||
// .detail("Primary", self->primary);
|
||||
// for (i = 0; i < serverIDs.size(); i++) {
|
||||
// const auto& serverID = serverIDs[i];
|
||||
// Reference<LocalityRecord> record = machineLocalityMapRecordArray[i];
|
||||
// if (record.isValid()) {
|
||||
// TraceEvent("MachineLocalityMap", self->distributorId)
|
||||
// .detail("LocalityIndex", i)
|
||||
// .detail("UID", serverID->toString())
|
||||
// .detail("LocalityRecord", record->toString())
|
||||
// .detail("Primary", self->primary);
|
||||
// } else {
|
||||
// TraceEvent("MachineLocalityMap", self->distributorId)
|
||||
// .detail("LocalityIndex", i)
|
||||
// .detail("UID", serverID->toString())
|
||||
// .detail("LocalityRecord", "[NotFound]")
|
||||
// .detail("Primary", self->primary);
|
||||
// }
|
||||
// if (++traceEventsPrinted % SERVER_KNOBS->DD_TEAMS_INFO_PRINT_YIELD_COUNT == 0) {
|
||||
// wait(yield());
|
||||
// }
|
||||
// }
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> removeBadTeams(DDTeamCollection* self) {
|
||||
wait(self->initialFailureReactionDelay);
|
||||
wait(waitUntilHealthy(self));
|
||||
|
@ -2851,7 +3054,7 @@ ACTOR Future<Void> machineTeamRemover(DDTeamCollection* self) {
|
|||
// in the serverTeams vector in the machine team.
|
||||
--teamIndex;
|
||||
self->addTeam(team->getServers(), true, true);
|
||||
TEST(true);
|
||||
TEST(true); // Removed machine team
|
||||
}
|
||||
|
||||
self->doBuildTeams = true;
|
||||
|
@ -2930,7 +3133,7 @@ ACTOR Future<Void> serverTeamRemover(DDTeamCollection* self) {
|
|||
bool foundTeam = self->removeTeam(st);
|
||||
ASSERT(foundTeam == true);
|
||||
self->addTeam(st->getServers(), true, true);
|
||||
TEST(true);
|
||||
TEST(true); // Marked team as a bad team
|
||||
|
||||
self->doBuildTeams = true;
|
||||
|
||||
|
@ -4677,7 +4880,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
|
|||
Reference<AsyncVar<bool>> anyZeroHealthyTeams;
|
||||
vector<Reference<AsyncVar<bool>>> zeroHealthyTeams;
|
||||
tcis.push_back(TeamCollectionInterface());
|
||||
zeroHealthyTeams.push_back(Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) ));
|
||||
zeroHealthyTeams.push_back(makeReference<AsyncVar<bool>>(true));
|
||||
int storageTeamSize = configuration.storageTeamSize;
|
||||
|
||||
// Stored outside of data distribution tracker to avoid slow tasks
|
||||
|
@ -4689,8 +4892,8 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
|
|||
tcis.push_back(TeamCollectionInterface());
|
||||
storageTeamSize = 2*configuration.storageTeamSize;
|
||||
|
||||
zeroHealthyTeams.push_back( Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) ) );
|
||||
anyZeroHealthyTeams = Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) );
|
||||
zeroHealthyTeams.push_back(makeReference<AsyncVar<bool>>(true));
|
||||
anyZeroHealthyTeams = makeReference<AsyncVar<bool>>(true);
|
||||
actors.push_back( anyTrue(zeroHealthyTeams, anyZeroHealthyTeams) );
|
||||
} else {
|
||||
anyZeroHealthyTeams = zeroHealthyTeams[0];
|
||||
|
@ -4709,22 +4912,31 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
|
|||
"DDQueue", self->ddId, &normalDDQueueErrors()));
|
||||
|
||||
vector<DDTeamCollection*> teamCollectionsPtrs;
|
||||
primaryTeamCollection = Reference<DDTeamCollection>( new DDTeamCollection(cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, primaryDcId, configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>(), readyToStart.getFuture(), zeroHealthyTeams[0], true, processingUnhealthy) );
|
||||
primaryTeamCollection = makeReference<DDTeamCollection>(
|
||||
cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, primaryDcId,
|
||||
configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>(), readyToStart.getFuture(),
|
||||
zeroHealthyTeams[0], true, processingUnhealthy);
|
||||
teamCollectionsPtrs.push_back(primaryTeamCollection.getPtr());
|
||||
if (configuration.usableRegions > 1) {
|
||||
remoteTeamCollection = Reference<DDTeamCollection>( new DDTeamCollection(cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, remoteDcIds, Optional<std::vector<Optional<Key>>>(), readyToStart.getFuture() && remoteRecovered(self->dbInfo), zeroHealthyTeams[1], false, processingUnhealthy) );
|
||||
remoteTeamCollection = makeReference<DDTeamCollection>(
|
||||
cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, remoteDcIds,
|
||||
Optional<std::vector<Optional<Key>>>(), readyToStart.getFuture() && remoteRecovered(self->dbInfo),
|
||||
zeroHealthyTeams[1], false, processingUnhealthy);
|
||||
teamCollectionsPtrs.push_back(remoteTeamCollection.getPtr());
|
||||
remoteTeamCollection->teamCollections = teamCollectionsPtrs;
|
||||
actors.push_back(
|
||||
reportErrorsExcept(dataDistributionTeamCollection(remoteTeamCollection, initData, tcis[1],
|
||||
self->dbInfo, ddEnabledState),
|
||||
"DDTeamCollectionSecondary", self->ddId, &normalDDQueueErrors()));
|
||||
actors.push_back(printSnapshotTeamsInfo(remoteTeamCollection));
|
||||
}
|
||||
primaryTeamCollection->teamCollections = teamCollectionsPtrs;
|
||||
self->teamCollection = primaryTeamCollection.getPtr();
|
||||
actors.push_back(reportErrorsExcept(
|
||||
dataDistributionTeamCollection(primaryTeamCollection, initData, tcis[0], self->dbInfo, ddEnabledState),
|
||||
"DDTeamCollectionPrimary", self->ddId, &normalDDQueueErrors()));
|
||||
|
||||
actors.push_back(printSnapshotTeamsInfo(primaryTeamCollection));
|
||||
actors.push_back(yieldPromiseStream(output.getFuture(), input));
|
||||
|
||||
wait( waitForAll( actors ) );
|
||||
|
@ -4848,6 +5060,22 @@ ACTOR Future<Void> ddSnapCreateCore(DistributorSnapRequest snapReq, Reference<As
|
|||
TraceEvent("SnapDataDistributor_AfterSnapCoords")
|
||||
.detail("SnapPayload", snapReq.snapPayload)
|
||||
.detail("SnapUID", snapReq.snapUID);
|
||||
tr.reset();
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
TraceEvent("SnapDataDistributor_ClearFlagAttempt")
|
||||
.detail("SnapPayload", snapReq.snapPayload)
|
||||
.detail("SnapUID", snapReq.snapUID);
|
||||
tr.clear(writeRecoveryKey);
|
||||
wait(tr.commit());
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
TraceEvent("SnapDataDistributor_ClearFlagError").error(e);
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
} catch (Error& err) {
|
||||
state Error e = err;
|
||||
TraceEvent("SnapDataDistributor_SnapReqExit")
|
||||
|
@ -5108,31 +5336,17 @@ ACTOR Future<Void> dataDistributor(DataDistributorInterface di, Reference<AsyncV
|
|||
}
|
||||
|
||||
DDTeamCollection* testTeamCollection(int teamSize, Reference<IReplicationPolicy> policy, int processCount) {
|
||||
Database database = DatabaseContext::create(
|
||||
Reference<AsyncVar<ClientDBInfo>>(new AsyncVar<ClientDBInfo>()),
|
||||
Never(),
|
||||
LocalityData(),
|
||||
false
|
||||
);
|
||||
Database database =
|
||||
DatabaseContext::create(makeReference<AsyncVar<ClientDBInfo>>(), Never(), LocalityData(), false);
|
||||
|
||||
DatabaseConfiguration conf;
|
||||
conf.storageTeamSize = teamSize;
|
||||
conf.storagePolicy = policy;
|
||||
|
||||
DDTeamCollection* collection = new DDTeamCollection(
|
||||
database,
|
||||
UID(0, 0),
|
||||
MoveKeysLock(),
|
||||
PromiseStream<RelocateShard>(),
|
||||
Reference<ShardsAffectedByTeamFailure>(new ShardsAffectedByTeamFailure()),
|
||||
conf,
|
||||
{},
|
||||
{},
|
||||
Future<Void>(Void()),
|
||||
Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) ),
|
||||
true,
|
||||
Reference<AsyncVar<bool>>( new AsyncVar<bool>(false) )
|
||||
);
|
||||
DDTeamCollection* collection =
|
||||
new DDTeamCollection(database, UID(0, 0), MoveKeysLock(), PromiseStream<RelocateShard>(),
|
||||
makeReference<ShardsAffectedByTeamFailure>(), conf, {}, {}, Future<Void>(Void()),
|
||||
makeReference<AsyncVar<bool>>(true), true, makeReference<AsyncVar<bool>>(false));
|
||||
|
||||
for (int id = 1; id <= processCount; ++id) {
|
||||
UID uid(id, 0);
|
||||
|
@ -5141,7 +5355,8 @@ DDTeamCollection* testTeamCollection(int teamSize, Reference<IReplicationPolicy>
|
|||
interface.locality.set(LiteralStringRef("machineid"), Standalone<StringRef>(std::to_string(id)));
|
||||
interface.locality.set(LiteralStringRef("zoneid"), Standalone<StringRef>(std::to_string(id % 5)));
|
||||
interface.locality.set(LiteralStringRef("data_hall"), Standalone<StringRef>(std::to_string(id % 3)));
|
||||
collection->server_info[uid] = Reference<TCServerInfo>(new TCServerInfo(interface, collection, ProcessClass(), true, collection->storageServerSet));
|
||||
collection->server_info[uid] =
|
||||
makeReference<TCServerInfo>(interface, collection, ProcessClass(), true, collection->storageServerSet);
|
||||
collection->server_status.set(uid, ServerStatus(false, false, interface.locality));
|
||||
collection->checkAndCreateMachine(collection->server_info[uid]);
|
||||
}
|
||||
|
@ -5150,8 +5365,8 @@ DDTeamCollection* testTeamCollection(int teamSize, Reference<IReplicationPolicy>
|
|||
}
|
||||
|
||||
DDTeamCollection* testMachineTeamCollection(int teamSize, Reference<IReplicationPolicy> policy, int processCount) {
|
||||
Database database = DatabaseContext::create(Reference<AsyncVar<ClientDBInfo>>(new AsyncVar<ClientDBInfo>()),
|
||||
Never(), LocalityData(), false);
|
||||
Database database =
|
||||
DatabaseContext::create(makeReference<AsyncVar<ClientDBInfo>>(), Never(), LocalityData(), false);
|
||||
|
||||
DatabaseConfiguration conf;
|
||||
conf.storageTeamSize = teamSize;
|
||||
|
@ -5159,10 +5374,8 @@ DDTeamCollection* testMachineTeamCollection(int teamSize, Reference<IReplication
|
|||
|
||||
DDTeamCollection* collection =
|
||||
new DDTeamCollection(database, UID(0, 0), MoveKeysLock(), PromiseStream<RelocateShard>(),
|
||||
Reference<ShardsAffectedByTeamFailure>(new ShardsAffectedByTeamFailure()), conf, {}, {},
|
||||
Future<Void>(Void()),
|
||||
Reference<AsyncVar<bool>>(new AsyncVar<bool>(true)), true,
|
||||
Reference<AsyncVar<bool>>(new AsyncVar<bool>(false)));
|
||||
makeReference<ShardsAffectedByTeamFailure>(), conf, {}, {}, Future<Void>(Void()),
|
||||
makeReference<AsyncVar<bool>>(true), true, makeReference<AsyncVar<bool>>(false));
|
||||
|
||||
for (int id = 1; id <= processCount; id++) {
|
||||
UID uid(id, 0);
|
||||
|
@ -5182,7 +5395,7 @@ DDTeamCollection* testMachineTeamCollection(int teamSize, Reference<IReplication
|
|||
interface.locality.set(LiteralStringRef("data_hall"), Standalone<StringRef>(std::to_string(data_hall_id)));
|
||||
interface.locality.set(LiteralStringRef("dcid"), Standalone<StringRef>(std::to_string(dc_id)));
|
||||
collection->server_info[uid] =
|
||||
Reference<TCServerInfo>(new TCServerInfo(interface, collection, ProcessClass(), true, collection->storageServerSet));
|
||||
makeReference<TCServerInfo>(interface, collection, ProcessClass(), true, collection->storageServerSet);
|
||||
|
||||
collection->server_status.set(uid, ServerStatus(false, false, interface.locality));
|
||||
}
|
||||
|
|
|
@ -717,7 +717,7 @@ void restartShardTrackers(DataDistributionTracker* self, KeyRangeRef keys, Optio
|
|||
continue;
|
||||
}
|
||||
|
||||
Reference<AsyncVar<Optional<ShardMetrics>>> shardMetrics(new AsyncVar<Optional<ShardMetrics>>());
|
||||
auto shardMetrics = makeReference<AsyncVar<Optional<ShardMetrics>>>();
|
||||
|
||||
// For the case where the new tracker will take over at the boundaries of current shard(s)
|
||||
// we can use the old size if it is available. This will be the case when merging shards.
|
||||
|
|
|
@ -241,7 +241,7 @@ public:
|
|||
|
||||
void setFile(Reference<IAsyncFile> f) {
|
||||
this->f = f;
|
||||
this->syncQueue = Reference<SyncQueue>( new SyncQueue(1, f) );
|
||||
this->syncQueue = makeReference<SyncQueue>(1, f);
|
||||
}
|
||||
};
|
||||
File files[2]; // After readFirstAndLastPages(), files[0] is logically before files[1] (pushes are always into files[1])
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#define IKEYVALUECONTAINER_H
|
||||
#pragma once
|
||||
|
||||
#include "IndexedSet.h"
|
||||
#include "flow/IndexedSet.h"
|
||||
|
||||
// Stored in the IndexedSets that hold the database.
|
||||
// Each KeyValueMapPair is 32 bytes, excluding arena memory.
|
|
@ -18,15 +18,15 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbserver/IKeyValueStore.h"
|
||||
#include "fdbserver/IDiskQueue.h"
|
||||
#include "flow/IKeyValueContainer.h"
|
||||
#include "flow/RadixTree.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
#include "fdbserver/DeltaTree.h"
|
||||
#include "fdbserver/IDiskQueue.h"
|
||||
#include "fdbserver/IKeyValueContainer.h"
|
||||
#include "fdbserver/IKeyValueStore.h"
|
||||
#include "fdbserver/RadixTree.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
#define OP_DISK_OVERHEAD (sizeof(OpHeader) + 1)
|
||||
|
||||
|
|
|
@ -1510,7 +1510,7 @@ private:
|
|||
Reference<ReadCursor> getCursor() {
|
||||
Reference<ReadCursor> cursor = *ppReadCursor;
|
||||
if (!cursor) {
|
||||
*ppReadCursor = cursor = Reference<ReadCursor>(new ReadCursor);
|
||||
*ppReadCursor = cursor = makeReference<ReadCursor>();
|
||||
cursor->init(conn);
|
||||
}
|
||||
return cursor;
|
||||
|
|
|
@ -243,6 +243,8 @@ void ServerKnobs::initialize(bool randomize, ClientKnobs* clientKnobs, bool isSi
|
|||
init( DD_SS_FAILURE_VERSIONLAG, 250000000 );
|
||||
init( DD_SS_ALLOWED_VERSIONLAG, 200000000 ); if( randomize && BUGGIFY ) { DD_SS_FAILURE_VERSIONLAG = deterministicRandom()->randomInt(15000000, 500000000); DD_SS_ALLOWED_VERSIONLAG = 0.75 * DD_SS_FAILURE_VERSIONLAG; }
|
||||
init( DD_SS_STUCK_TIME_LIMIT, 300.0 ); if( randomize && BUGGIFY ) { DD_SS_STUCK_TIME_LIMIT = 200.0 + deterministicRandom()->random01() * 100.0; }
|
||||
init( DD_TEAMS_INFO_PRINT_INTERVAL, 60 ); if( randomize && BUGGIFY ) DD_TEAMS_INFO_PRINT_INTERVAL = 10;
|
||||
init( DD_TEAMS_INFO_PRINT_YIELD_COUNT, 100 ); if( randomize && BUGGIFY ) DD_TEAMS_INFO_PRINT_YIELD_COUNT = deterministicRandom()->random01() * 1000 + 1;
|
||||
|
||||
// TeamRemover
|
||||
init( TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER, false ); if( randomize && BUGGIFY ) TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER = deterministicRandom()->random01() < 0.1 ? true : false; // false by default. disable the consistency check when it's true
|
||||
|
|
|
@ -191,6 +191,8 @@ public:
|
|||
int64_t DD_SS_FAILURE_VERSIONLAG; // Allowed SS version lag from the current read version before marking it as failed.
|
||||
int64_t DD_SS_ALLOWED_VERSIONLAG; // SS will be marked as healthy if it's version lag goes below this value.
|
||||
double DD_SS_STUCK_TIME_LIMIT; // If a storage server is not getting new versions for this amount of time, then it becomes undesired.
|
||||
int DD_TEAMS_INFO_PRINT_INTERVAL;
|
||||
int DD_TEAMS_INFO_PRINT_YIELD_COUNT;
|
||||
|
||||
// TeamRemover to remove redundant teams
|
||||
bool TR_FLAG_DISABLE_MACHINE_TEAM_REMOVER; // disable the machineTeamRemover actor
|
||||
|
|
|
@ -59,7 +59,7 @@ ACTOR template <class T> Future<Void> buggifyDelayedAsyncVar( Reference<AsyncVar
|
|||
|
||||
template <class T>
|
||||
Future<Void> buggifyDelayedAsyncVar( Reference<AsyncVar<T>> &var ) {
|
||||
Reference<AsyncVar<T>> in( new AsyncVar<T> );
|
||||
auto in = makeReference<AsyncVar<T>>();
|
||||
auto f = buggifyDelayedAsyncVar(in, var);
|
||||
var = in;
|
||||
return f;
|
||||
|
|
|
@ -60,7 +60,7 @@ Future<Void> tryBecomeLeader( ServerCoordinators const& coordinators,
|
|||
bool hasConnected,
|
||||
Reference<AsyncVar<ClusterControllerPriorityInfo>> const& asyncPriorityInfo)
|
||||
{
|
||||
Reference<AsyncVar<Value>> serializedInfo(new AsyncVar<Value>);
|
||||
auto serializedInfo = makeReference<AsyncVar<Value>>();
|
||||
Future<Void> m = tryBecomeLeaderInternal(coordinators, ObjectWriter::toValue(proposedInterface, IncludeVersion()),
|
||||
serializedInfo, hasConnected, asyncPriorityInfo);
|
||||
return m || asyncDeserialize(serializedInfo, outKnownLeader);
|
||||
|
|
|
@ -116,7 +116,7 @@ struct LogRouterData {
|
|||
|
||||
//only callable after getTagData returns a null reference
|
||||
Reference<TagData> createTagData(Tag tag, Version popped, Version knownCommittedVersion) {
|
||||
Reference<TagData> newTagData(new TagData(tag, popped, knownCommittedVersion));
|
||||
auto newTagData = makeReference<TagData>(tag, popped, knownCommittedVersion);
|
||||
tag_data[tag.id] = newTagData;
|
||||
return newTagData;
|
||||
}
|
||||
|
@ -138,7 +138,7 @@ struct LogRouterData {
|
|||
}
|
||||
}
|
||||
|
||||
eventCacheHolder = Reference<EventCacheHolder>( new EventCacheHolder(dbgid.shortString() + ".PeekLocation") );
|
||||
eventCacheHolder = makeReference<EventCacheHolder>(dbgid.shortString() + ".PeekLocation");
|
||||
|
||||
specialCounter(cc, "Version", [this](){ return this->version.get(); });
|
||||
specialCounter(cc, "MinPopped", [this](){ return this->minPopped.get(); });
|
||||
|
|
|
@ -877,7 +877,6 @@ struct LogPushData : NonCopyable {
|
|||
void addTransactionInfo(SpanID const& context) {
|
||||
TEST(!spanContext.isValid()); // addTransactionInfo with invalid SpanID
|
||||
spanContext = context;
|
||||
transactionSubseq = 0;
|
||||
writtenLocations.clear();
|
||||
}
|
||||
|
||||
|
@ -919,21 +918,30 @@ struct LogPushData : NonCopyable {
|
|||
|
||||
BinaryWriter bw(AssumeVersion(currentProtocolVersion));
|
||||
|
||||
// Metadata messages should be written before span information. If this
|
||||
// isn't a metadata message, make sure all locations have had
|
||||
// transaction info written to them. Mutations may have different sets
|
||||
// of tags, so it is necessary to check all tag locations each time a
|
||||
// mutation is written.
|
||||
// Metadata messages (currently LogProtocolMessage is the only metadata
|
||||
// message) should be written before span information. If this isn't a
|
||||
// metadata message, make sure all locations have had transaction info
|
||||
// written to them. Mutations may have different sets of tags, so it
|
||||
// is necessary to check all tag locations each time a mutation is
|
||||
// written.
|
||||
if (!metadataMessage) {
|
||||
// If span information hasn't been written for this transaction yet,
|
||||
// generate a subsequence value for the message.
|
||||
if (!transactionSubseq) {
|
||||
transactionSubseq = this->subsequence++;
|
||||
}
|
||||
|
||||
uint32_t subseq = this->subsequence++;
|
||||
bool updatedLocation = false;
|
||||
for (int loc : msg_locations) {
|
||||
writeTransactionInfo(loc);
|
||||
updatedLocation = writeTransactionInfo(loc, subseq) || updatedLocation;
|
||||
}
|
||||
// If this message doesn't write to any new locations, the
|
||||
// subsequence wasn't actually used and can be decremented.
|
||||
if (!updatedLocation) {
|
||||
this->subsequence--;
|
||||
TEST(true); // No new SpanContextMessage written to transaction logs
|
||||
ASSERT(this->subsequence > 0);
|
||||
}
|
||||
} else {
|
||||
// When writing a metadata message, make sure transaction state has
|
||||
// been reset. If you are running into this assertion, make sure
|
||||
// you are calling addTransactionInfo before each transaction.
|
||||
ASSERT(writtenLocations.size() == 0);
|
||||
}
|
||||
|
||||
uint32_t subseq = this->subsequence++;
|
||||
|
@ -975,33 +983,31 @@ private:
|
|||
// field.
|
||||
std::unordered_set<int> writtenLocations;
|
||||
uint32_t subsequence;
|
||||
// Store transaction subsequence separately, as multiple mutations may need
|
||||
// to write transaction info. This can happen if later mutations in a
|
||||
// transaction need to write to a different location than earlier
|
||||
// mutations.
|
||||
uint32_t transactionSubseq;
|
||||
SpanID spanContext;
|
||||
|
||||
// Writes transaction info to the message stream for the given location if
|
||||
// it has not already been written (for the current transaction).
|
||||
void writeTransactionInfo(int location) {
|
||||
if (!FLOW_KNOBS->WRITE_TRACING_ENABLED || logSystem->getTLogVersion() < TLogVersion::V6) {
|
||||
return;
|
||||
// Writes transaction info to the message stream at the given location if
|
||||
// it has not already been written (for the current transaction). Returns
|
||||
// true on a successful write, and false if the location has already been
|
||||
// written.
|
||||
bool writeTransactionInfo(int location, uint32_t subseq) {
|
||||
if (!FLOW_KNOBS->WRITE_TRACING_ENABLED || logSystem->getTLogVersion() < TLogVersion::V6 || writtenLocations.count(location) != 0) {
|
||||
return false;
|
||||
}
|
||||
if (writtenLocations.count(location) == 0) {
|
||||
writtenLocations.insert(location);
|
||||
|
||||
BinaryWriter& wr = messagesWriter[location];
|
||||
SpanContextMessage contextMessage(spanContext);
|
||||
TEST(true); // Wrote SpanContextMessage to a transaction log
|
||||
writtenLocations.insert(location);
|
||||
|
||||
int offset = wr.getLength();
|
||||
wr << uint32_t(0) << transactionSubseq << uint16_t(prev_tags.size());
|
||||
for(auto& tag : prev_tags)
|
||||
wr << tag;
|
||||
wr << contextMessage;
|
||||
int length = wr.getLength() - offset;
|
||||
*(uint32_t*)((uint8_t*)wr.getData() + offset) = length - sizeof(uint32_t);
|
||||
}
|
||||
BinaryWriter& wr = messagesWriter[location];
|
||||
SpanContextMessage contextMessage(spanContext);
|
||||
|
||||
int offset = wr.getLength();
|
||||
wr << uint32_t(0) << subseq << uint16_t(prev_tags.size());
|
||||
for(auto& tag : prev_tags)
|
||||
wr << tag;
|
||||
wr << contextMessage;
|
||||
int length = wr.getLength() - offset;
|
||||
*(uint32_t*)((uint8_t*)wr.getData() + offset) = length - sizeof(uint32_t);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -48,7 +48,8 @@ ILogSystem::ServerPeekCursor::ServerPeekCursor( TLogPeekReply const& results, Lo
|
|||
}
|
||||
|
||||
Reference<ILogSystem::IPeekCursor> ILogSystem::ServerPeekCursor::cloneNoMore() {
|
||||
return Reference<ILogSystem::ServerPeekCursor>( new ILogSystem::ServerPeekCursor( results, messageVersion, end, messageAndTags, hasMsg, poppedVersion, tag ) );
|
||||
return makeReference<ILogSystem::ServerPeekCursor>(results, messageVersion, end, messageAndTags, hasMsg,
|
||||
poppedVersion, tag);
|
||||
}
|
||||
|
||||
void ILogSystem::ServerPeekCursor::setProtocolVersion( ProtocolVersion version ) {
|
||||
|
@ -351,7 +352,7 @@ ILogSystem::MergedPeekCursor::MergedPeekCursor( std::vector<Reference<AsyncVar<O
|
|||
bool parallelGetMore, std::vector< LocalityData > const& tLogLocalities, Reference<IReplicationPolicy> const tLogPolicy, int tLogReplicationFactor )
|
||||
: bestServer(bestServer), readQuorum(readQuorum), tag(tag), currentCursor(0), hasNextMessage(false), messageVersion(begin), randomID(deterministicRandom()->randomUniqueID()), tLogReplicationFactor(tLogReplicationFactor) {
|
||||
if(tLogPolicy) {
|
||||
logSet = Reference<LogSet>( new LogSet() );
|
||||
logSet = makeReference<LogSet>();
|
||||
logSet->tLogPolicy = tLogPolicy;
|
||||
logSet->tLogLocalities = tLogLocalities;
|
||||
filterLocalityDataForPolicy(logSet->tLogPolicy, &logSet->tLogLocalities);
|
||||
|
@ -359,7 +360,8 @@ ILogSystem::MergedPeekCursor::MergedPeekCursor( std::vector<Reference<AsyncVar<O
|
|||
}
|
||||
|
||||
for( int i = 0; i < logServers.size(); i++ ) {
|
||||
Reference<ILogSystem::ServerPeekCursor> cursor( new ILogSystem::ServerPeekCursor( logServers[i], tag, begin, end, bestServer >= 0, parallelGetMore ) );
|
||||
auto cursor = makeReference<ILogSystem::ServerPeekCursor>(logServers[i], tag, begin, end, bestServer >= 0,
|
||||
parallelGetMore);
|
||||
//TraceEvent("MPC_Starting", randomID).detail("Cursor", cursor->randomID).detail("End", end);
|
||||
serverCursors.push_back( cursor );
|
||||
}
|
||||
|
@ -378,7 +380,8 @@ Reference<ILogSystem::IPeekCursor> ILogSystem::MergedPeekCursor::cloneNoMore() {
|
|||
for( auto it : serverCursors ) {
|
||||
cursors.push_back(it->cloneNoMore());
|
||||
}
|
||||
return Reference<ILogSystem::MergedPeekCursor>( new ILogSystem::MergedPeekCursor( cursors, messageVersion, bestServer, readQuorum, nextVersion, logSet, tLogReplicationFactor ) );
|
||||
return makeReference<ILogSystem::MergedPeekCursor>(cursors, messageVersion, bestServer, readQuorum, nextVersion,
|
||||
logSet, tLogReplicationFactor);
|
||||
}
|
||||
|
||||
void ILogSystem::MergedPeekCursor::setProtocolVersion( ProtocolVersion version ) {
|
||||
|
@ -589,7 +592,8 @@ ILogSystem::SetPeekCursor::SetPeekCursor( std::vector<Reference<LogSet>> const&
|
|||
int maxServers = 0;
|
||||
for( int i = 0; i < logSets.size(); i++ ) {
|
||||
for( int j = 0; j < logSets[i]->logServers.size(); j++) {
|
||||
Reference<ILogSystem::ServerPeekCursor> cursor( new ILogSystem::ServerPeekCursor( logSets[i]->logServers[j], tag, begin, end, true, parallelGetMore ) );
|
||||
auto cursor = makeReference<ILogSystem::ServerPeekCursor>(logSets[i]->logServers[j], tag, begin, end, true,
|
||||
parallelGetMore);
|
||||
serverCursors[i].push_back( cursor );
|
||||
}
|
||||
maxServers = std::max<int>(maxServers, serverCursors[i].size());
|
||||
|
@ -616,7 +620,8 @@ Reference<ILogSystem::IPeekCursor> ILogSystem::SetPeekCursor::cloneNoMore() {
|
|||
cursors[i].push_back( serverCursors[i][j]->cloneNoMore() );
|
||||
}
|
||||
}
|
||||
return Reference<ILogSystem::SetPeekCursor>( new ILogSystem::SetPeekCursor( logSets, cursors, messageVersion, bestSet, bestServer, nextVersion, useBestSet ) );
|
||||
return makeReference<ILogSystem::SetPeekCursor>(logSets, cursors, messageVersion, bestSet, bestServer, nextVersion,
|
||||
useBestSet);
|
||||
}
|
||||
|
||||
void ILogSystem::SetPeekCursor::setProtocolVersion( ProtocolVersion version ) {
|
||||
|
@ -723,7 +728,7 @@ void ILogSystem::SetPeekCursor::updateMessage(int logIdx, bool usePolicy) {
|
|||
c->advanceTo(messageVersion);
|
||||
if( start <= messageVersion && messageVersion < c->version() ) {
|
||||
advancedPast = true;
|
||||
TEST(true); //Merge peek cursor advanced past desired sequence
|
||||
TEST(true); //Merge peek cursor with logIdx advanced past desired sequence
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -999,7 +1004,8 @@ ILogSystem::BufferedCursor::BufferedCursor( std::vector<Reference<AsyncVar<Optio
|
|||
messages.reserve(SERVER_KNOBS->DESIRED_OUTSTANDING_MESSAGES);
|
||||
cursorMessages.resize(logServers.size());
|
||||
for( int i = 0; i < logServers.size(); i++ ) {
|
||||
Reference<ILogSystem::ServerPeekCursor> cursor( new ILogSystem::ServerPeekCursor( logServers[i], tag, begin, end, false, parallelGetMore ) );
|
||||
auto cursor =
|
||||
makeReference<ILogSystem::ServerPeekCursor>(logServers[i], tag, begin, end, false, parallelGetMore);
|
||||
cursors.push_back( cursor );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "fdbclient/DatabaseContext.h"
|
||||
#include "fdbclient/ReadYourWrites.h"
|
||||
#include "fdbclient/KeyBackedTypes.h"
|
||||
#include "fdbserver/MetricLogger.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
struct MetricsRule {
|
|
@ -461,8 +461,8 @@ namespace oldTLog_4_6 {
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // LogData already stopped
|
||||
TEST( !logData->stopped ); // LogData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1005,7 +1005,7 @@ namespace oldTLog_4_6 {
|
|||
auto& sequenceData = trackerData.sequence_version[sequence+1];
|
||||
if(sequenceData.isSet()) {
|
||||
if(sequenceData.getFuture().get() != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -589,8 +589,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1295,7 +1295,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -680,8 +680,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
|||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
|
@ -1689,7 +1689,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
|||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -31,7 +31,8 @@
|
|||
#include <map>
|
||||
#include <stdexcept>
|
||||
|
||||
#include "Arena.h"
|
||||
#include "fdbserver/IKeyValueContainer.h"
|
||||
#include "flow/Arena.h"
|
||||
|
||||
// forward declaration
|
||||
const int LEAF_BYTE = -1;
|
|
@ -1118,7 +1118,9 @@ void updateRate(RatekeeperData* self, RatekeeperLimits* limits) {
|
|||
}
|
||||
|
||||
self->healthMetrics.worstStorageQueue = worstStorageQueueStorageServer;
|
||||
self->healthMetrics.limitingStorageQueue = limitingStorageQueueStorageServer;
|
||||
self->healthMetrics.worstStorageDurabilityLag = worstDurabilityLag;
|
||||
self->healthMetrics.limitingStorageDurabilityLag = limitingDurabilityLag;
|
||||
|
||||
double writeToReadLatencyLimit = 0;
|
||||
Version worstVersionLag = 0;
|
||||
|
|
|
@ -43,8 +43,7 @@ ACTOR static Future<Void> handleApplyToDBRequest(RestoreVersionBatchRequest req,
|
|||
void handleUpdateRateRequest(RestoreUpdateRateRequest req, Reference<RestoreApplierData> self);
|
||||
|
||||
ACTOR Future<Void> restoreApplierCore(RestoreApplierInterface applierInterf, int nodeIndex, Database cx) {
|
||||
state Reference<RestoreApplierData> self =
|
||||
Reference<RestoreApplierData>(new RestoreApplierData(applierInterf.id(), nodeIndex));
|
||||
state Reference<RestoreApplierData> self = makeReference<RestoreApplierData>(applierInterf.id(), nodeIndex);
|
||||
state ActorCollection actors(false);
|
||||
state Future<Void> exitRole = Never();
|
||||
|
||||
|
@ -774,4 +773,4 @@ Value applyAtomicOp(Optional<StringRef> existingValue, Value value, MutationRef:
|
|||
ASSERT(false);
|
||||
}
|
||||
return Value();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ ACTOR Future<Void> startRestoreController(Reference<RestoreWorkerData> controlle
|
|||
ASSERT(controllerWorker.isValid());
|
||||
ASSERT(controllerWorker->controllerInterf.present());
|
||||
state Reference<RestoreControllerData> self =
|
||||
Reference<RestoreControllerData>(new RestoreControllerData(controllerWorker->controllerInterf.get().id()));
|
||||
makeReference<RestoreControllerData>(controllerWorker->controllerInterf.get().id());
|
||||
state Future<Void> error = actorCollection(self->addActor.getFuture());
|
||||
|
||||
try {
|
||||
|
@ -373,8 +373,8 @@ ACTOR static Future<Version> processRestoreRequest(Reference<RestoreControllerDa
|
|||
.detail("BatchSize", versionBatch->size)
|
||||
.detail("RunningVersionBatches", self->runningVersionBatches.get())
|
||||
.detail("VersionBatches", versionBatches.size());
|
||||
self->batch[batchIndex] = Reference<ControllerBatchData>(new ControllerBatchData());
|
||||
self->batchStatus[batchIndex] = Reference<ControllerBatchStatus>(new ControllerBatchStatus());
|
||||
self->batch[batchIndex] = makeReference<ControllerBatchData>();
|
||||
self->batchStatus[batchIndex] = makeReference<ControllerBatchStatus>();
|
||||
fBatches.push_back(distributeWorkloadPerVersionBatch(self, batchIndex, cx, request, *versionBatch));
|
||||
// Wait a bit to give the current version batch a head start from the next version batch
|
||||
wait(delay(SERVER_KNOBS->FASTRESTORE_VB_LAUNCH_DELAY));
|
||||
|
@ -1164,4 +1164,4 @@ ACTOR static Future<Void> checkRolesLiveness(Reference<RestoreControllerData> se
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -218,8 +218,7 @@ ACTOR Future<Void> dispatchRequests(Reference<RestoreLoaderData> self) {
|
|||
|
||||
ACTOR Future<Void> restoreLoaderCore(RestoreLoaderInterface loaderInterf, int nodeIndex, Database cx,
|
||||
RestoreControllerInterface ci) {
|
||||
state Reference<RestoreLoaderData> self =
|
||||
Reference<RestoreLoaderData>(new RestoreLoaderData(loaderInterf.id(), nodeIndex, ci));
|
||||
state Reference<RestoreLoaderData> self = makeReference<RestoreLoaderData>(loaderInterf.id(), nodeIndex, ci);
|
||||
state Future<Void> error = actorCollection(self->addActor.getFuture());
|
||||
state ActorCollection actors(false); // actors whose errors can be ignored
|
||||
state Future<Void> exitRole = Never();
|
||||
|
|
|
@ -188,7 +188,7 @@ struct RestoreLoaderData : RestoreRoleData, public ReferenceCounted<RestoreLoade
|
|||
nodeID = loaderInterfID;
|
||||
nodeIndex = assignedIndex;
|
||||
role = RestoreRole::Loader;
|
||||
hasPendingRequests = Reference<AsyncVar<bool>>(new AsyncVar<bool>(false));
|
||||
hasPendingRequests = makeReference<AsyncVar<bool>>(false);
|
||||
}
|
||||
|
||||
~RestoreLoaderData() = default;
|
||||
|
@ -216,8 +216,8 @@ struct RestoreLoaderData : RestoreRoleData, public ReferenceCounted<RestoreLoade
|
|||
|
||||
void initVersionBatch(int batchIndex) {
|
||||
TraceEvent("FastRestoreLoaderInitVersionBatch", nodeID).detail("BatchIndex", batchIndex);
|
||||
batch[batchIndex] = Reference<LoaderBatchData>(new LoaderBatchData(nodeID, batchIndex));
|
||||
status[batchIndex] = Reference<LoaderBatchStatus>(new LoaderBatchStatus());
|
||||
batch[batchIndex] = makeReference<LoaderBatchData>(nodeID, batchIndex);
|
||||
status[batchIndex] = makeReference<LoaderBatchStatus>();
|
||||
}
|
||||
|
||||
void resetPerRestoreRequest() {
|
||||
|
|
|
@ -340,10 +340,9 @@ ACTOR Future<Void> monitorleader(Reference<AsyncVar<RestoreWorkerInterface>> lea
|
|||
ACTOR Future<Void> _restoreWorker(Database cx, LocalityData locality) {
|
||||
state ActorCollection actors(false);
|
||||
state Future<Void> myWork = Never();
|
||||
state Reference<AsyncVar<RestoreWorkerInterface>> leader =
|
||||
Reference<AsyncVar<RestoreWorkerInterface>>(new AsyncVar<RestoreWorkerInterface>());
|
||||
state Reference<AsyncVar<RestoreWorkerInterface>> leader = makeReference<AsyncVar<RestoreWorkerInterface>>();
|
||||
state RestoreWorkerInterface myWorkerInterf;
|
||||
state Reference<RestoreWorkerData> self = Reference<RestoreWorkerData>(new RestoreWorkerData());
|
||||
state Reference<RestoreWorkerData> self = makeReference<RestoreWorkerData>();
|
||||
|
||||
myWorkerInterf.initEndpoints();
|
||||
self->workerID = myWorkerInterf.id();
|
||||
|
|
|
@ -92,7 +92,7 @@ ACTOR Future<Void> runDr( Reference<ClusterConnectionFile> connFile ) {
|
|||
if (g_simulator.drAgents == ISimulator::BackupToDB) {
|
||||
Database cx = Database::createDatabase(connFile, -1);
|
||||
|
||||
Reference<ClusterConnectionFile> extraFile(new ClusterConnectionFile(*g_simulator.extraDB));
|
||||
auto extraFile = makeReference<ClusterConnectionFile>(*g_simulator.extraDB);
|
||||
state Database extraDB = Database::createDatabase(extraFile, -1);
|
||||
|
||||
TraceEvent("StartingDrAgents").detail("ConnFile", connFile->getConnectionString().toString()).detail("ExtraString", extraFile->getConnectionString().toString());
|
||||
|
@ -267,10 +267,11 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
|||
|
||||
if(!useSeedFile) {
|
||||
writeFile(joinPath(*dataFolder, "fdb.cluster"), connStr.toString());
|
||||
connFile = Reference<ClusterConnectionFile>( new ClusterConnectionFile( joinPath( *dataFolder, "fdb.cluster" )));
|
||||
connFile = makeReference<ClusterConnectionFile>(joinPath(*dataFolder, "fdb.cluster"));
|
||||
}
|
||||
else {
|
||||
connFile = Reference<ClusterConnectionFile>( new ClusterConnectionFile( joinPath( *dataFolder, "fdb.cluster" ), connStr.toString() ) );
|
||||
connFile =
|
||||
makeReference<ClusterConnectionFile>(joinPath(*dataFolder, "fdb.cluster"), connStr.toString());
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -642,7 +643,7 @@ ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>>* systemActors, st
|
|||
// SOMEDAY: parse backup agent from test file
|
||||
systemActors->push_back(reportErrors(
|
||||
simulatedMachine(conn, ipAddrs, usingSSL, localities, processClass, baseFolder, true,
|
||||
i == useSeedForMachine, enableExtraDB ? AgentAddition : AgentNone,
|
||||
i == useSeedForMachine, AgentAddition,
|
||||
usingSSL && (listenersPerProcess == 1 || processClass == ProcessClass::TesterClass),
|
||||
whitelistBinPaths),
|
||||
processClass == ProcessClass::TesterClass ? "SimulatedTesterMachine" : "SimulatedMachine"));
|
||||
|
@ -755,7 +756,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
|||
break;
|
||||
}
|
||||
case 3: {
|
||||
TEST(true); // Simulated cluster using radix-tree storage engine
|
||||
TEST(true); // Simulated cluster using redwood storage engine
|
||||
set_config("ssd-redwood-experimental");
|
||||
break;
|
||||
}
|
||||
|
@ -856,7 +857,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
|||
int satellite_replication_type = deterministicRandom()->randomInt(0,3);
|
||||
switch (satellite_replication_type) {
|
||||
case 0: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (>4 datacenters)
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
|
@ -883,7 +884,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
|||
break;
|
||||
}
|
||||
case 1: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (<4 datacenters)
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
|
@ -1137,8 +1138,8 @@ void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFo
|
|||
|
||||
// Use IPv6 25% of the time
|
||||
bool useIPv6 = deterministicRandom()->random01() < 0.25;
|
||||
TEST( useIPv6 );
|
||||
TEST( !useIPv6 );
|
||||
TEST( useIPv6 ); // Use IPv6
|
||||
TEST( !useIPv6 ); // Use IPv4
|
||||
|
||||
vector<NetworkAddress> coordinatorAddresses;
|
||||
if(minimumRegions > 1) {
|
||||
|
@ -1423,10 +1424,9 @@ ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool reboot
|
|||
std::string clusterFileDir = joinPath( dataFolder, deterministicRandom()->randomUniqueID().toString() );
|
||||
platform::createDirectory( clusterFileDir );
|
||||
writeFile(joinPath(clusterFileDir, "fdb.cluster"), connFile.get().toString());
|
||||
wait(timeoutError(runTests(Reference<ClusterConnectionFile>(
|
||||
new ClusterConnectionFile(joinPath(clusterFileDir, "fdb.cluster"))),
|
||||
TEST_TYPE_FROM_FILE, TEST_ON_TESTERS, testerCount, testFile, startingConfiguration),
|
||||
isBuggifyEnabled(BuggifyType::General) ? 36000.0 : 5400.0));
|
||||
wait(timeoutError(runTests(makeReference<ClusterConnectionFile>(joinPath(clusterFileDir, "fdb.cluster")),
|
||||
TEST_TYPE_FROM_FILE, TEST_ON_TESTERS, testerCount, testFile, startingConfiguration),
|
||||
isBuggifyEnabled(BuggifyType::General) ? 36000.0 : 5400.0));
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevError, "SetupAndRunError").error(e);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue