Merge branch 'master' of https://github.com/apple/foundationdb into stable_interfaces
This commit is contained in:
commit
9c6c2704d0
|
@ -0,0 +1,80 @@
|
|||
FROM centos:7
|
||||
|
||||
RUN yum install -y centos-release-scl scl-utils
|
||||
RUN rpmkeys --import "http://pool.sks-keyservers.net/pks/lookup?op=get&search=0x3fa7e0328081bff6a14da29aa6a19b38d3d831ef"
|
||||
RUN curl https://download.mono-project.com/repo/centos7-stable.repo | tee /etc/yum.repos.d/mono-centos7-stable.repo
|
||||
RUN yum install -y curl rpm-build wget git unzip devtoolset-8 devtoolset-8-libubsan-devel devtoolset-8-valgrind-devel \
|
||||
rh-ruby26 go-toolset-7 rh-git218 rh-python36-devel java-11-openjdk-devel.x86_64 mono-devel dos2unix dpkg rh-python36
|
||||
|
||||
# install Ninja
|
||||
RUN cd /tmp && curl -L https://github.com/ninja-build/ninja/archive/v1.9.0.zip -o ninja.zip &&\
|
||||
unzip ninja.zip && cd ninja-1.9.0 && scl enable devtoolset-8 -- ./configure.py --bootstrap && cp ninja /usr/bin &&\
|
||||
cd .. && rm -rf ninja-1.9.0 ninja.zip
|
||||
|
||||
# install cmake
|
||||
RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.13.4-Linux-x86_64.tar.gz -o /tmp/cmake.tar.gz &&\
|
||||
echo "563a39e0a7c7368f81bfa1c3aff8b590a0617cdfe51177ddc808f66cc0866c76 /tmp/cmake.tar.gz" > /tmp/cmake-sha.txt &&\
|
||||
sha256sum -c /tmp/cmake-sha.txt &&\
|
||||
cd /tmp && tar xf cmake.tar.gz &&\
|
||||
cp -r cmake-3.13.4-Linux-x86_64/* /usr/local/ &&\
|
||||
rm -rf cmake.tar.gz cmake-3.13.4-Linux-x86_64 cmake-sha.txt
|
||||
|
||||
# install LLVM
|
||||
RUN curl -L https://github.com/llvm/llvm-project/releases/download/llvmorg-11.0.0/llvm-project-11.0.0.tar.xz > /tmp/llvm.tar.xz
|
||||
RUN cd tmp &&\
|
||||
echo "b7b639fc675fa1c86dd6d0bc32267be9eb34451748d2efd03f674b773000e92b llvm.tar.xz" > llvm-sha.txt &&\
|
||||
sha256sum -c llvm-sha.txt
|
||||
RUN cd /tmp && tar xf llvm.tar.xz --no-same-owner
|
||||
RUN mkdir /tmp/build && cd /tmp/build &&\
|
||||
scl enable devtoolset-8 -- cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DLLVM_INCLUDE_EXAMPLES=OFF -DLLVM_INCLUDE_TESTS=OFF \
|
||||
-DLLVM_ENABLE_PROJECTS="clang;clang-tools-extra;compiler-rt;libcxx;libcxxabi;libunwind;lld;lldb"\
|
||||
-DLLVM_STATIC_LINK_CXX_STDLIB=ON ../llvm-project-11.0.0/llvm
|
||||
RUN cd /tmp/build && cmake --build .
|
||||
RUN cd /tmp/build && cmake --build . --target install
|
||||
RUN rm -rf /tmp/*
|
||||
|
||||
# install openssl
|
||||
RUN cd /tmp && curl -L https://www.openssl.org/source/openssl-1.1.1h.tar.gz -o openssl.tar.gz &&\
|
||||
echo "5c9ca8774bd7b03e5784f26ae9e9e6d749c9da2438545077e6b3d755a06595d9 openssl.tar.gz" > openssl-sha.txt &&\
|
||||
sha256sum -c openssl-sha.txt && tar -xzf openssl.tar.gz &&\
|
||||
cd openssl-1.1.1h && scl enable devtoolset-8 -- ./config CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
|
||||
scl enable devtoolset-8 -- make -j`nproc` && scl enable devtoolset-8 -- make -j1 install &&\
|
||||
ln -sv /usr/local/lib64/lib*.so.1.1 /usr/lib64/ &&\
|
||||
cd /tmp/ && rm -rf /tmp/openssl-1.1.1h /tmp/openssl.tar.gz
|
||||
|
||||
# install RocksDB
|
||||
RUN cd /opt/ && curl -L https://github.com/facebook/rocksdb/archive/v6.10.1.tar.gz -o rocksdb.tar.gz &&\
|
||||
echo "d573d2f15cdda883714f7e0bc87b814a8d4a53a82edde558f08f940e905541ee rocksdb.tar.gz" > rocksdb-sha.txt &&\
|
||||
sha256sum -c rocksdb-sha.txt && tar xf rocksdb.tar.gz && rm -rf rocksdb.tar.gz rocksdb-sha.txt
|
||||
|
||||
# install Boost
|
||||
# wget of bintray without forcing UTF-8 encoding results in 403 Forbidden
|
||||
RUN cd /opt/ &&\
|
||||
curl -L https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 -o boost_1_67_0.tar.bz2 &&\
|
||||
echo "2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2" > boost-sha-67.txt &&\
|
||||
sha256sum -c boost-sha-67.txt &&\
|
||||
tar -xjf boost_1_67_0.tar.bz2 &&\
|
||||
rm -rf boost_1_67_0.tar.bz2 boost-sha-67.txt boost_1_67_0/libs &&\
|
||||
curl -L https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2 -o boost_1_72_0.tar.bz2 &&\
|
||||
echo "59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 boost_1_72_0.tar.bz2" > boost-sha-72.txt &&\
|
||||
sha256sum -c boost-sha-72.txt &&\
|
||||
tar -xjf boost_1_72_0.tar.bz2 &&\
|
||||
rm -rf boost_1_72_0.tar.bz2 boost-sha-72.txt boost_1_72_0/libs
|
||||
|
||||
|
||||
# Install CCACHE
|
||||
RUN cd /tmp && curl -L https://github.com/ccache/ccache/releases/download/v4.0/ccache-4.0.tar.gz > ccache.tar.gz &&\
|
||||
echo "ac97af86679028ebc8555c99318352588ff50f515fc3a7f8ed21a8ad367e3d45 ccache.tar.gz" > ccache-sha256.txt &&\
|
||||
sha256sum -c ccache-sha256.txt &&\
|
||||
tar xf ccache.tar.gz && rm -rf build && mkdir build && cd build && cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -DZSTD_FROM_INTERNET=ON ../ccache-4.0 &&\
|
||||
cmake --build . --target install && cd / && rm -rf tmp/build && rm -rf tmp/ccache-4.0
|
||||
|
||||
# Install toml11
|
||||
RUN cd /tmp && curl -L https://github.com/ToruNiina/toml11/archive/v3.4.0.tar.gz > toml.tar.gz &&\
|
||||
echo "bc6d733efd9216af8c119d8ac64a805578c79cc82b813e4d1d880ca128bd154d toml.tar.gz" > toml-sha256.txt &&\
|
||||
sha256sum -c toml-sha256.txt &&\
|
||||
tar xf toml.tar.gz && rm -rf build && mkdir build && cd build && cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -Dtoml11_BUILD_TEST=OFF ../toml11-3.4.0 &&\
|
||||
cmake --build . --target install && cd / && rm -rf tmp/build && rm -rf tmp/toml11-3.4.0
|
||||
|
||||
# do some cleanup
|
||||
RUN rm -rf /tmp/* && yum clean all && rm -rf /var/cache/yum
|
|
@ -7,8 +7,10 @@ env_set(USE_VALGRIND_FOR_CTEST ${USE_VALGRIND} BOOL "Use valgrind for ctest")
|
|||
env_set(ALLOC_INSTRUMENTATION OFF BOOL "Instrument alloc")
|
||||
env_set(WITH_UNDODB OFF BOOL "Use rr or undodb")
|
||||
env_set(USE_ASAN OFF BOOL "Compile with address sanitizer")
|
||||
env_set(USE_GCOV OFF BOOL "Compile with gcov instrumentation")
|
||||
env_set(USE_MSAN OFF BOOL "Compile with memory sanitizer. To avoid false positives you need to dynamically link to a msan-instrumented libc++ and libc++abi, which you must compile separately. See https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo#instrumented-libc.")
|
||||
env_set(USE_TSAN OFF BOOL "Compile with thread sanitizer. It is recommended to dynamically link to a tsan-instrumented libc++ and libc++abi, which you can compile separately.")
|
||||
env_set(USE_UBSAN OFF BOOL "Compile with undefined behavior sanitizer")
|
||||
env_set(USE_TSAN OFF BOOL "Compile with thread sanitizer")
|
||||
env_set(FDB_RELEASE OFF BOOL "This is a building of a final release")
|
||||
env_set(USE_CCACHE OFF BOOL "Use ccache for compilation if available")
|
||||
env_set(RELATIVE_DEBUG_PATHS OFF BOOL "Use relative file paths in debug info")
|
||||
|
@ -27,6 +29,9 @@ endif()
|
|||
if(STATIC_LINK_LIBCXX AND USE_TSAN)
|
||||
message(FATAL_ERROR "Unsupported configuration: STATIC_LINK_LIBCXX doesn't work with tsan")
|
||||
endif()
|
||||
if(STATIC_LINK_LIBCXX AND USE_MSAN)
|
||||
message(FATAL_ERROR "Unsupported configuration: STATIC_LINK_LIBCXX doesn't work with msan")
|
||||
endif()
|
||||
|
||||
set(rel_debug_paths OFF)
|
||||
if(RELATIVE_DEBUG_PATHS)
|
||||
|
@ -163,10 +168,28 @@ else()
|
|||
if(USE_ASAN)
|
||||
add_compile_options(
|
||||
-fsanitize=address
|
||||
-DUSE_SANITIZER)
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fsanitize=address")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=address")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address ${CMAKE_THREAD_LIBS_INIT}")
|
||||
-DUSE_SANITIZER
|
||||
-DADDRESS_SANITIZER
|
||||
)
|
||||
add_link_options(-fsanitize=address)
|
||||
endif()
|
||||
|
||||
if(USE_MSAN)
|
||||
if(NOT CLANG)
|
||||
message(FATAL_ERROR "Unsupported configuration: USE_MSAN only works with Clang")
|
||||
endif()
|
||||
add_compile_options(
|
||||
-fsanitize=memory
|
||||
-fsanitize-memory-track-origins=2
|
||||
-DUSE_SANITIZER
|
||||
-DMEMORY_SANITIZER
|
||||
)
|
||||
add_link_options(-fsanitize=memory)
|
||||
endif()
|
||||
|
||||
if(USE_GCOV)
|
||||
add_compile_options(--coverage -DUSE_GCOV)
|
||||
add_link_options(--coverage)
|
||||
endif()
|
||||
|
||||
if(USE_UBSAN)
|
||||
|
@ -174,19 +197,20 @@ else()
|
|||
-fsanitize=undefined
|
||||
# TODO(atn34) Re-enable -fsanitize=alignment once https://github.com/apple/foundationdb/issues/1434 is resolved
|
||||
-fno-sanitize=alignment
|
||||
-DUSE_SANITIZER)
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fsanitize=undefined")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=undefined")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=undefined ${CMAKE_THREAD_LIBS_INIT}")
|
||||
-DUSE_SANITIZER
|
||||
-DUNDEFINED_BEHAVIOR_SANITIZER
|
||||
)
|
||||
add_link_options(-fsanitize=undefined)
|
||||
endif()
|
||||
|
||||
if(USE_TSAN)
|
||||
add_compile_options(
|
||||
-fsanitize=thread
|
||||
-DUSE_SANITIZER)
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fsanitize=thread")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=thread")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread ${CMAKE_THREAD_LIBS_INIT}")
|
||||
-DUSE_SANITIZER
|
||||
-DTHREAD_SANITIZER
|
||||
-DDYNAMIC_ANNOTATIONS_EXTERNAL_IMPL=1
|
||||
)
|
||||
add_link_options(-fsanitize=thread)
|
||||
endif()
|
||||
|
||||
if(PORTABLE_BINARY)
|
||||
|
@ -360,4 +384,3 @@ else()
|
|||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ using System;
|
|||
using System.Collections.Generic;
|
||||
using System.Linq;
|
||||
using System.Text;
|
||||
using System.Text.RegularExpressions;
|
||||
using System.Threading;
|
||||
using System.Xml.Linq;
|
||||
using System.IO;
|
||||
|
@ -148,7 +149,7 @@ namespace SummarizeTest
|
|||
{
|
||||
var xout = new XElement("TestHarnessError",
|
||||
new XAttribute("Severity", (int)Magnesium.Severity.SevError),
|
||||
new XAttribute("ErrorMessage", e.Message));
|
||||
new XAttribute("ErrorMessage", e.ToString()));
|
||||
|
||||
AppendXmlMessageToSummary("summary.xml", xout, true);
|
||||
throw;
|
||||
|
@ -214,6 +215,30 @@ namespace SummarizeTest
|
|||
return ((System.Collections.IStructuralComparable)version1).CompareTo(version2, System.Collections.Generic.Comparer<int>.Default) >= 0;
|
||||
}
|
||||
|
||||
static bool versionLessThan(string ver1, string ver2) {
|
||||
return !versionGreaterThanOrEqual(ver1, ver2);
|
||||
}
|
||||
|
||||
static string getFdbserverVersion(string fdbserverName) {
|
||||
using (var process = new System.Diagnostics.Process())
|
||||
{
|
||||
process.StartInfo.UseShellExecute = false;
|
||||
process.StartInfo.RedirectStandardOutput = true;
|
||||
process.StartInfo.FileName = fdbserverName;
|
||||
process.StartInfo.Arguments = "--version";
|
||||
process.StartInfo.RedirectStandardError = true;
|
||||
|
||||
process.Start();
|
||||
var output = process.StandardOutput.ReadToEnd();
|
||||
// If the process finished successfully, we call the parameterless WaitForExit to ensure that output buffers get flushed
|
||||
process.WaitForExit();
|
||||
|
||||
var match = Regex.Match(output, @"v(\d+\.\d+\.\d+)");
|
||||
if (match.Groups.Count < 1) return "";
|
||||
return match.Groups[1].Value;
|
||||
}
|
||||
}
|
||||
|
||||
static int Run(string fdbserverName, string tlsPluginFile, string testFolder, string summaryFileName, string errorFileName, string runDir, string oldBinaryFolder, bool useValgrind, int maxTries, bool traceToStdout = false, string tlsPluginFile_5_1 = "")
|
||||
{
|
||||
int seed = random.Next(1000000000);
|
||||
|
@ -264,16 +289,23 @@ namespace SummarizeTest
|
|||
}
|
||||
uniqueFiles = uniqueFileSet.ToArray();
|
||||
testFile = random.Choice(uniqueFiles);
|
||||
string oldBinaryVersionLowerBound = "0.0.0";
|
||||
// The on-disk format changed in 4.0.0, and 5.x can't load files from 3.x.
|
||||
string oldBinaryVersionLowerBound = "4.0.0";
|
||||
string lastFolderName = Path.GetFileName(Path.GetDirectoryName(testFile));
|
||||
if (lastFolderName.Contains("from_") || lastFolderName.Contains("to_")) // Only perform upgrade/downgrade tests from certain versions
|
||||
{
|
||||
oldBinaryVersionLowerBound = lastFolderName.Split('_').Last();
|
||||
}
|
||||
string oldBinaryVersionUpperBound = getFdbserverVersion(fdbserverName);
|
||||
if (versionGreaterThanOrEqual("4.0.0", oldBinaryVersionUpperBound)) {
|
||||
// If the binary under test is from 3.x, then allow upgrade tests from 3.x binaries.
|
||||
oldBinaryVersionLowerBound = "0.0.0";
|
||||
}
|
||||
string[] currentBinary = { fdbserverName };
|
||||
IEnumerable<string> oldBinaries = Array.FindAll(
|
||||
Directory.GetFiles(oldBinaryFolder),
|
||||
x => versionGreaterThanOrEqual(Path.GetFileName(x).Split('-').Last(), oldBinaryVersionLowerBound));
|
||||
x => versionGreaterThanOrEqual(Path.GetFileName(x).Split('-').Last(), oldBinaryVersionLowerBound)
|
||||
&& versionLessThan(Path.GetFileName(x).Split('-').Last(), oldBinaryVersionUpperBound));
|
||||
oldBinaries = oldBinaries.Concat(currentBinary);
|
||||
oldServerName = random.Choice(oldBinaries.ToList<string>());
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbclient/KeyBackedTypes.h"
|
||||
#include "fdbclient/RunTransaction.actor.h"
|
||||
#include "fdbclient/BlobStore.h"
|
||||
#include "fdbclient/S3BlobStore.h"
|
||||
#include "fdbclient/json_spirit/json_spirit_writer_template.h"
|
||||
|
||||
#include "flow/Platform.h"
|
||||
|
@ -1460,12 +1460,12 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
|
|||
o.create("configured_workers") = CLIENT_KNOBS->BACKUP_TASKS_PER_AGENT;
|
||||
|
||||
if(exe == EXE_AGENT) {
|
||||
static BlobStoreEndpoint::Stats last_stats;
|
||||
static S3BlobStoreEndpoint::Stats last_stats;
|
||||
static double last_ts = 0;
|
||||
BlobStoreEndpoint::Stats current_stats = BlobStoreEndpoint::s_stats;
|
||||
S3BlobStoreEndpoint::Stats current_stats = S3BlobStoreEndpoint::s_stats;
|
||||
JSONDoc blobstats = o.create("blob_stats");
|
||||
blobstats.create("total") = current_stats.getJSON();
|
||||
BlobStoreEndpoint::Stats diff = current_stats - last_stats;
|
||||
S3BlobStoreEndpoint::Stats diff = current_stats - last_stats;
|
||||
json_spirit::mObject diffObj = diff.getJSON();
|
||||
if(last_ts > 0)
|
||||
diffObj["bytes_per_second"] = double(current_stats.bytes_sent - last_stats.bytes_sent) / (now() - last_ts);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* AsyncFileBlobStore.actor.cpp
|
||||
* AsyncFileS3BlobStore.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -18,40 +18,37 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/AsyncFileBlobStore.actor.h"
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
Future<int64_t> AsyncFileBlobStoreRead::size() const {
|
||||
if(!m_size.isValid())
|
||||
m_size = m_bstore->objectSize(m_bucket, m_object);
|
||||
Future<int64_t> AsyncFileS3BlobStoreRead::size() const {
|
||||
if (!m_size.isValid()) m_size = m_bstore->objectSize(m_bucket, m_object);
|
||||
return m_size;
|
||||
}
|
||||
|
||||
Future<int> AsyncFileBlobStoreRead::read( void *data, int length, int64_t offset ) {
|
||||
Future<int> AsyncFileS3BlobStoreRead::read(void* data, int length, int64_t offset) {
|
||||
return m_bstore->readObject(m_bucket, m_object, data, length, offset);
|
||||
}
|
||||
|
||||
|
||||
ACTOR Future<Void> sendStuff(int id, Reference<IRateControl> t, int bytes) {
|
||||
printf("Starting fake sender %d which will send send %d bytes.\n", id, bytes);
|
||||
state double ts = timer();
|
||||
state int total = 0;
|
||||
while(total < bytes) {
|
||||
state int r = std::min<int>(deterministicRandom()->randomInt(0,1000), bytes - total);
|
||||
while (total < bytes) {
|
||||
state int r = std::min<int>(deterministicRandom()->randomInt(0, 1000), bytes - total);
|
||||
wait(t->getAllowance(r));
|
||||
total += r;
|
||||
}
|
||||
double dur = timer() - ts;
|
||||
printf("Sender %d: Sent %d in %fs, %f/s\n", id, total, dur, total/dur);
|
||||
printf("Sender %d: Sent %d in %fs, %f/s\n", id, total, dur, total / dur);
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/backup/throttling") {
|
||||
// Test will not work in simulation.
|
||||
if(g_network->isSimulated())
|
||||
return Void();
|
||||
if (g_network->isSimulated()) return Void();
|
||||
|
||||
state int limit = 100000;
|
||||
state Reference<IRateControl> t(new SpeedLimit(limit, 1));
|
||||
|
@ -62,13 +59,18 @@ TEST_CASE("/backup/throttling") {
|
|||
state int total = 0;
|
||||
int s;
|
||||
s = 500000;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
s = 50000;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
s = 5000;
|
||||
f.push_back(sendStuff(id++, t, s)); total += s;
|
||||
f.push_back(sendStuff(id++, t, s));
|
||||
total += s;
|
||||
|
||||
wait(waitForAll(f));
|
||||
double dur = timer() - ts;
|
||||
|
@ -78,5 +80,3 @@ TEST_CASE("/backup/throttling") {
|
|||
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* AsyncFileBlobStore.actor.h
|
||||
* AsyncFileS3BlobStore.actor.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -20,12 +20,13 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source version.
|
||||
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source
|
||||
// version.
|
||||
#if defined(NO_INTELLISENSE) && !defined(FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_G_H)
|
||||
#define FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_G_H
|
||||
#include "fdbclient/AsyncFileBlobStore.actor.g.h"
|
||||
#elif !defined(FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_H)
|
||||
#define FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_H
|
||||
#define FDBRPC_ASYNCFILEBLOBSTORE_ACTOR_G_H
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.g.h"
|
||||
#elif !defined(FDBRPC_ASYNCFILES3BLOBSTORE_ACTOR_H)
|
||||
#define FDBRPC_ASYNCFILES3BLOBSTORE_ACTOR_H
|
||||
|
||||
#include <sstream>
|
||||
#include <time.h>
|
||||
|
@ -34,55 +35,54 @@
|
|||
#include "flow/serialize.h"
|
||||
#include "flow/Net2Packet.h"
|
||||
#include "fdbrpc/IRateControl.h"
|
||||
#include "fdbclient/BlobStore.h"
|
||||
#include "fdbclient/S3BlobStore.h"
|
||||
#include "fdbclient/md5/md5.h"
|
||||
#include "fdbclient/libb64/encode.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
ACTOR template<typename T> static Future<T> joinErrorGroup(Future<T> f, Promise<Void> p) {
|
||||
ACTOR template <typename T>
|
||||
static Future<T> joinErrorGroup(Future<T> f, Promise<Void> p) {
|
||||
try {
|
||||
wait(success(f) || p.getFuture());
|
||||
return f.get();
|
||||
} catch(Error &e) {
|
||||
if(p.canBeSet())
|
||||
p.sendError(e);
|
||||
} catch (Error& e) {
|
||||
if (p.canBeSet()) p.sendError(e);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
// This class represents a write-only file that lives in an S3-style blob store. It writes using the REST API,
|
||||
// using multi-part upload and beginning to transfer each part as soon as it is large enough.
|
||||
// All write operations file operations must be sequential and contiguous.
|
||||
// Limits on part sizes, upload speed, and concurrent uploads are taken from the BlobStoreEndpoint being used.
|
||||
class AsyncFileBlobStoreWrite : public IAsyncFile, public ReferenceCounted<AsyncFileBlobStoreWrite> {
|
||||
// Limits on part sizes, upload speed, and concurrent uploads are taken from the S3BlobStoreEndpoint being used.
|
||||
class AsyncFileS3BlobStoreWrite : public IAsyncFile, public ReferenceCounted<AsyncFileS3BlobStoreWrite> {
|
||||
public:
|
||||
virtual void addref() { ReferenceCounted<AsyncFileBlobStoreWrite>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<AsyncFileBlobStoreWrite>::delref(); }
|
||||
virtual void addref() { ReferenceCounted<AsyncFileS3BlobStoreWrite>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<AsyncFileS3BlobStoreWrite>::delref(); }
|
||||
|
||||
struct Part : ReferenceCounted<Part> {
|
||||
Part(int n, int minSize) : number(n), writer(content.getWriteBuffer(minSize), nullptr, Unversioned()), length(0) {
|
||||
Part(int n, int minSize)
|
||||
: number(n), writer(content.getWriteBuffer(minSize), nullptr, Unversioned()), length(0) {
|
||||
etag = std::string();
|
||||
::MD5_Init(&content_md5_buf);
|
||||
}
|
||||
virtual ~Part() {
|
||||
etag.cancel();
|
||||
}
|
||||
virtual ~Part() { etag.cancel(); }
|
||||
Future<std::string> etag;
|
||||
int number;
|
||||
UnsentPacketQueue content;
|
||||
std::string md5string;
|
||||
PacketWriter writer;
|
||||
int length;
|
||||
void write(const uint8_t *buf, int len) {
|
||||
void write(const uint8_t* buf, int len) {
|
||||
writer.serializeBytes(buf, len);
|
||||
::MD5_Update(&content_md5_buf, buf, len);
|
||||
length += len;
|
||||
}
|
||||
// MD5 sum can only be finalized once, further calls will do nothing so new writes will be reflected in the sum.
|
||||
void finalizeMD5() {
|
||||
if(md5string.empty()) {
|
||||
if (md5string.empty()) {
|
||||
std::string sumBytes;
|
||||
sumBytes.resize(16);
|
||||
::MD5_Final((unsigned char *)sumBytes.data(), &content_md5_buf);
|
||||
::MD5_Final((unsigned char*)sumBytes.data(), &content_md5_buf);
|
||||
md5string = base64::encoder::from_string(sumBytes);
|
||||
md5string.resize(md5string.size() - 1);
|
||||
}
|
||||
|
@ -94,71 +94,75 @@ public:
|
|||
|
||||
Future<int> read(void* data, int length, int64_t offset) override { throw file_not_readable(); }
|
||||
|
||||
ACTOR static Future<Void> write_impl(Reference<AsyncFileBlobStoreWrite> f, const uint8_t *data, int length) {
|
||||
state Part *p = f->m_parts.back().getPtr();
|
||||
// If this write will cause the part to cross the min part size boundary then write to the boundary and start a new part.
|
||||
while(p->length + length >= f->m_bstore->knobs.multipart_min_part_size) {
|
||||
ACTOR static Future<Void> write_impl(Reference<AsyncFileS3BlobStoreWrite> f, const uint8_t* data, int length) {
|
||||
state Part* p = f->m_parts.back().getPtr();
|
||||
// If this write will cause the part to cross the min part size boundary then write to the boundary and start a
|
||||
// new part.
|
||||
while (p->length + length >= f->m_bstore->knobs.multipart_min_part_size) {
|
||||
// Finish off this part
|
||||
int finishlen = f->m_bstore->knobs.multipart_min_part_size - p->length;
|
||||
p->write((const uint8_t *)data, finishlen);
|
||||
p->write((const uint8_t*)data, finishlen);
|
||||
|
||||
// Adjust source buffer args
|
||||
length -= finishlen;
|
||||
data = (const uint8_t *)data + finishlen;
|
||||
data = (const uint8_t*)data + finishlen;
|
||||
|
||||
// End current part (and start new one)
|
||||
wait(f->endCurrentPart(f.getPtr(), true));
|
||||
p = f->m_parts.back().getPtr();
|
||||
}
|
||||
|
||||
p->write((const uint8_t *)data, length);
|
||||
p->write((const uint8_t*)data, length);
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> write(void const* data, int length, int64_t offset) override {
|
||||
if(offset != m_cursor)
|
||||
throw non_sequential_op();
|
||||
if (offset != m_cursor) throw non_sequential_op();
|
||||
m_cursor += length;
|
||||
|
||||
return m_error.getFuture() || write_impl(Reference<AsyncFileBlobStoreWrite>::addRef(this), (const uint8_t *)data, length);
|
||||
return m_error.getFuture() ||
|
||||
write_impl(Reference<AsyncFileS3BlobStoreWrite>::addRef(this), (const uint8_t*)data, length);
|
||||
}
|
||||
|
||||
Future<Void> truncate(int64_t size) override {
|
||||
if(size != m_cursor)
|
||||
return non_sequential_op();
|
||||
if (size != m_cursor) return non_sequential_op();
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<std::string> doPartUpload(AsyncFileBlobStoreWrite *f, Part *p) {
|
||||
ACTOR static Future<std::string> doPartUpload(AsyncFileS3BlobStoreWrite* f, Part* p) {
|
||||
p->finalizeMD5();
|
||||
std::string upload_id = wait(f->getUploadID());
|
||||
std::string etag = wait(f->m_bstore->uploadPart(f->m_bucket, f->m_object, upload_id, p->number, &p->content, p->length, p->md5string));
|
||||
std::string etag = wait(f->m_bstore->uploadPart(f->m_bucket, f->m_object, upload_id, p->number, &p->content,
|
||||
p->length, p->md5string));
|
||||
return etag;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> doFinishUpload(AsyncFileBlobStoreWrite* f) {
|
||||
ACTOR static Future<Void> doFinishUpload(AsyncFileS3BlobStoreWrite* f) {
|
||||
// If there is only 1 part then it has not yet been uploaded so just write the whole file at once.
|
||||
if(f->m_parts.size() == 1) {
|
||||
if (f->m_parts.size() == 1) {
|
||||
Reference<Part> part = f->m_parts.back();
|
||||
part->finalizeMD5();
|
||||
wait(f->m_bstore->writeEntireFileFromBuffer(f->m_bucket, f->m_object, &part->content, part->length, part->md5string));
|
||||
wait(f->m_bstore->writeEntireFileFromBuffer(f->m_bucket, f->m_object, &part->content, part->length,
|
||||
part->md5string));
|
||||
return Void();
|
||||
}
|
||||
|
||||
// There are at least 2 parts. End the last part (which could be empty)
|
||||
wait(f->endCurrentPart(f));
|
||||
|
||||
state BlobStoreEndpoint::MultiPartSetT partSet;
|
||||
state S3BlobStoreEndpoint::MultiPartSetT partSet;
|
||||
state std::vector<Reference<Part>>::iterator p;
|
||||
|
||||
// Wait for all the parts to be done to get their ETags, populate the partSet required to finish the object upload.
|
||||
for(p = f->m_parts.begin(); p != f->m_parts.end(); ++p) {
|
||||
// Wait for all the parts to be done to get their ETags, populate the partSet required to finish the object
|
||||
// upload.
|
||||
for (p = f->m_parts.begin(); p != f->m_parts.end(); ++p) {
|
||||
std::string tag = wait((*p)->etag);
|
||||
if((*p)->length > 0) // The last part might be empty and has to be omitted.
|
||||
if ((*p)->length > 0) // The last part might be empty and has to be omitted.
|
||||
partSet[(*p)->number] = tag;
|
||||
}
|
||||
|
||||
// No need to wait for the upload ID here because the above loop waited for all the parts and each part required the upload ID so it is ready
|
||||
// No need to wait for the upload ID here because the above loop waited for all the parts and each part required
|
||||
// the upload ID so it is ready
|
||||
wait(f->m_bstore->finishMultiPartUpload(f->m_bucket, f->m_object, f->m_upload_id.get(), partSet));
|
||||
|
||||
return Void();
|
||||
|
@ -167,43 +171,43 @@ public:
|
|||
// Ready once all data has been sent AND acknowledged from the remote side
|
||||
Future<Void> sync() override {
|
||||
// Only initiate the finish operation once, and also prevent further writing.
|
||||
if(!m_finished.isValid()) {
|
||||
if (!m_finished.isValid()) {
|
||||
m_finished = doFinishUpload(this);
|
||||
m_cursor = -1; // Cause future write attempts to fail
|
||||
m_cursor = -1; // Cause future write attempts to fail
|
||||
}
|
||||
|
||||
return m_finished;
|
||||
}
|
||||
|
||||
//
|
||||
// Flush can't really do what the caller would "want" for a blob store file. The caller would probably notionally want
|
||||
// all bytes written to be at least in transit to the blob store, but that is not very feasible. The blob store
|
||||
// has a minimum size requirement for all but the final part, and parts must be sent with a header that specifies
|
||||
// their size. So in the case of a write buffer that does not meet the part minimum size the part could be sent
|
||||
// but then if there is any more data written then that part needs to be sent again in its entirety. So a client
|
||||
// that calls flush often could generate far more blob store write traffic than they intend to.
|
||||
// Flush can't really do what the caller would "want" for a blob store file. The caller would probably notionally
|
||||
// want all bytes written to be at least in transit to the blob store, but that is not very feasible. The blob
|
||||
// store has a minimum size requirement for all but the final part, and parts must be sent with a header that
|
||||
// specifies their size. So in the case of a write buffer that does not meet the part minimum size the part could
|
||||
// be sent but then if there is any more data written then that part needs to be sent again in its entirety. So a
|
||||
// client that calls flush often could generate far more blob store write traffic than they intend to.
|
||||
Future<Void> flush() override { return Void(); }
|
||||
|
||||
Future<int64_t> size() const override { return m_cursor; }
|
||||
|
||||
Future<Void> readZeroCopy(void** data, int* length, int64_t offset) override {
|
||||
TraceEvent(SevError, "ReadZeroCopyNotSupported").detail("FileType", "BlobStoreWrite");
|
||||
TraceEvent(SevError, "ReadZeroCopyNotSupported").detail("FileType", "S3BlobStoreWrite");
|
||||
return platform_error();
|
||||
}
|
||||
void releaseZeroCopy(void* data, int length, int64_t offset) override {}
|
||||
|
||||
int64_t debugFD() const override { return -1; }
|
||||
|
||||
~AsyncFileBlobStoreWrite() override {
|
||||
~AsyncFileS3BlobStoreWrite() override {
|
||||
m_upload_id.cancel();
|
||||
m_finished.cancel();
|
||||
m_parts.clear(); // Contains futures
|
||||
m_parts.clear(); // Contains futures
|
||||
}
|
||||
|
||||
std::string getFilename() const override { return m_object; }
|
||||
|
||||
private:
|
||||
Reference<BlobStoreEndpoint> m_bstore;
|
||||
Reference<S3BlobStoreEndpoint> m_bstore;
|
||||
std::string m_bucket;
|
||||
std::string m_object;
|
||||
|
||||
|
@ -216,48 +220,46 @@ private:
|
|||
FlowLock m_concurrentUploads;
|
||||
|
||||
// End the current part and start uploading it, but also wait for a part to finish if too many are in transit.
|
||||
ACTOR static Future<Void> endCurrentPart(AsyncFileBlobStoreWrite *f, bool startNew = false) {
|
||||
if(f->m_parts.back()->length == 0)
|
||||
return Void();
|
||||
ACTOR static Future<Void> endCurrentPart(AsyncFileS3BlobStoreWrite* f, bool startNew = false) {
|
||||
if (f->m_parts.back()->length == 0) return Void();
|
||||
|
||||
// Wait for an upload slot to be available
|
||||
wait(f->m_concurrentUploads.take());
|
||||
|
||||
// Do the upload, and if it fails forward errors to m_error and also stop if anything else sends an error to m_error
|
||||
// Also, hold a releaser for the concurrent upload slot while all that is going on.
|
||||
// Do the upload, and if it fails forward errors to m_error and also stop if anything else sends an error to
|
||||
// m_error Also, hold a releaser for the concurrent upload slot while all that is going on.
|
||||
auto releaser = std::make_shared<FlowLock::Releaser>(f->m_concurrentUploads, 1);
|
||||
f->m_parts.back()->etag =
|
||||
holdWhile(std::move(releaser), joinErrorGroup(doPartUpload(f, f->m_parts.back().getPtr()), f->m_error));
|
||||
|
||||
// Make a new part to write to
|
||||
if(startNew)
|
||||
f->m_parts.push_back(Reference<Part>(new Part(f->m_parts.size() + 1, f->m_bstore->knobs.multipart_min_part_size)));
|
||||
if (startNew)
|
||||
f->m_parts.push_back(
|
||||
Reference<Part>(new Part(f->m_parts.size() + 1, f->m_bstore->knobs.multipart_min_part_size)));
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<std::string> getUploadID() {
|
||||
if(!m_upload_id.isValid())
|
||||
m_upload_id = m_bstore->beginMultiPartUpload(m_bucket, m_object);
|
||||
if (!m_upload_id.isValid()) m_upload_id = m_bstore->beginMultiPartUpload(m_bucket, m_object);
|
||||
return m_upload_id;
|
||||
}
|
||||
|
||||
public:
|
||||
AsyncFileBlobStoreWrite(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object)
|
||||
: m_bstore(bstore), m_bucket(bucket), m_object(object), m_cursor(0), m_concurrentUploads(bstore->knobs.concurrent_writes_per_file) {
|
||||
AsyncFileS3BlobStoreWrite(Reference<S3BlobStoreEndpoint> bstore, std::string bucket, std::string object)
|
||||
: m_bstore(bstore), m_bucket(bucket), m_object(object), m_cursor(0),
|
||||
m_concurrentUploads(bstore->knobs.concurrent_writes_per_file) {
|
||||
|
||||
// Add first part
|
||||
m_parts.push_back(Reference<Part>(new Part(1, m_bstore->knobs.multipart_min_part_size)));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
// This class represents a read-only file that lives in an S3-style blob store. It reads using the REST API.
|
||||
class AsyncFileBlobStoreRead : public IAsyncFile, public ReferenceCounted<AsyncFileBlobStoreRead> {
|
||||
class AsyncFileS3BlobStoreRead : public IAsyncFile, public ReferenceCounted<AsyncFileS3BlobStoreRead> {
|
||||
public:
|
||||
virtual void addref() { ReferenceCounted<AsyncFileBlobStoreRead>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<AsyncFileBlobStoreRead>::delref(); }
|
||||
virtual void addref() { ReferenceCounted<AsyncFileS3BlobStoreRead>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<AsyncFileS3BlobStoreRead>::delref(); }
|
||||
|
||||
Future<int> read(void* data, int length, int64_t offset) override;
|
||||
|
||||
|
@ -270,7 +272,7 @@ public:
|
|||
Future<int64_t> size() const override;
|
||||
|
||||
Future<Void> readZeroCopy(void** data, int* length, int64_t offset) override {
|
||||
TraceEvent(SevError, "ReadZeroCopyNotSupported").detail("FileType", "BlobStoreRead");
|
||||
TraceEvent(SevError, "ReadZeroCopyNotSupported").detail("FileType", "S3BlobStoreRead");
|
||||
return platform_error();
|
||||
}
|
||||
void releaseZeroCopy(void* data, int length, int64_t offset) override {}
|
||||
|
@ -279,17 +281,15 @@ public:
|
|||
|
||||
std::string getFilename() const override { return m_object; }
|
||||
|
||||
virtual ~AsyncFileBlobStoreRead() {}
|
||||
virtual ~AsyncFileS3BlobStoreRead() {}
|
||||
|
||||
Reference<BlobStoreEndpoint> m_bstore;
|
||||
Reference<S3BlobStoreEndpoint> m_bstore;
|
||||
std::string m_bucket;
|
||||
std::string m_object;
|
||||
mutable Future<int64_t> m_size;
|
||||
|
||||
AsyncFileBlobStoreRead(Reference<BlobStoreEndpoint> bstore, std::string bucket, std::string object)
|
||||
: m_bstore(bstore), m_bucket(bucket), m_object(object) {
|
||||
}
|
||||
|
||||
AsyncFileS3BlobStoreRead(Reference<S3BlobStoreEndpoint> bstore, std::string bucket, std::string object)
|
||||
: m_bstore(bstore), m_bucket(bucket), m_object(object) {}
|
||||
};
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* AsyncTaskThread.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/AsyncTaskThread.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
class TerminateTask final : public IAsyncTask {
|
||||
public:
|
||||
void operator()() override { ASSERT(false); }
|
||||
bool isTerminate() const override { return true; }
|
||||
};
|
||||
|
||||
ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread, int* sum, int count) {
|
||||
state int i = 0;
|
||||
for (; i < count; ++i) {
|
||||
wait(asyncTaskThread->execAsync([sum = sum] {
|
||||
++(*sum);
|
||||
return Void();
|
||||
}));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const double AsyncTaskThread::meanDelay = 0.01;
|
||||
|
||||
AsyncTaskThread::AsyncTaskThread() : thread([this] { run(this); }) {}
|
||||
|
||||
AsyncTaskThread::~AsyncTaskThread() {
|
||||
bool wakeUp = false;
|
||||
{
|
||||
std::lock_guard<std::mutex> g(m);
|
||||
wakeUp = queue.push(std::make_shared<TerminateTask>());
|
||||
}
|
||||
if (wakeUp) {
|
||||
cv.notify_one();
|
||||
}
|
||||
thread.join();
|
||||
}
|
||||
|
||||
void AsyncTaskThread::run(AsyncTaskThread* self) {
|
||||
while (true) {
|
||||
std::shared_ptr<IAsyncTask> task;
|
||||
{
|
||||
std::unique_lock<std::mutex> lk(self->m);
|
||||
self->cv.wait(lk, [self] { return !self->queue.canSleep(); });
|
||||
task = self->queue.pop().get();
|
||||
if (task->isTerminate()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
(*task)();
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("/asynctaskthread/add") {
|
||||
state int sum = 0;
|
||||
state AsyncTaskThread asyncTaskThread;
|
||||
std::vector<Future<Void>> clients;
|
||||
clients.reserve(10);
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
clients.push_back(asyncTaskThreadClient(&asyncTaskThread, &sum, 100));
|
||||
}
|
||||
wait(waitForAll(clients));
|
||||
ASSERT(sum == 1000);
|
||||
return Void();
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
/*
|
||||
* AsyncTaskThread.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef __ASYNC_TASK_THREAD_H__
|
||||
#define __ASYNC_TASK_THREAD_H__
|
||||
|
||||
#include <thread>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
#include "flow/network.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/ThreadSafeQueue.h"
|
||||
|
||||
class IAsyncTask {
|
||||
public:
|
||||
virtual void operator()() = 0;
|
||||
virtual ~IAsyncTask() = default;
|
||||
virtual bool isTerminate() const = 0;
|
||||
};
|
||||
|
||||
template <class F>
|
||||
class AsyncTask final : public IAsyncTask {
|
||||
F func;
|
||||
|
||||
public:
|
||||
AsyncTask(const F& func) : func(func) {}
|
||||
|
||||
void operator()() override { func(); }
|
||||
bool isTerminate() const override { return false; }
|
||||
};
|
||||
|
||||
class AsyncTaskThread {
|
||||
ThreadSafeQueue<std::shared_ptr<IAsyncTask>> queue;
|
||||
std::condition_variable cv;
|
||||
std::mutex m;
|
||||
std::thread thread;
|
||||
|
||||
static void run(AsyncTaskThread* self);
|
||||
|
||||
template <class F>
|
||||
void addTask(const F& func) {
|
||||
bool wakeUp = false;
|
||||
{
|
||||
std::lock_guard<std::mutex> g(m);
|
||||
wakeUp = queue.push(std::make_shared<AsyncTask<F>>(func));
|
||||
}
|
||||
if (wakeUp) {
|
||||
cv.notify_one();
|
||||
}
|
||||
}
|
||||
|
||||
static const double meanDelay;
|
||||
|
||||
public:
|
||||
AsyncTaskThread();
|
||||
|
||||
// Warning: This destructor can hang if a task hangs, so it is
|
||||
// up to the caller to prevent tasks from hanging indefinitely
|
||||
~AsyncTaskThread();
|
||||
|
||||
template <class F>
|
||||
auto execAsync(const F& func, TaskPriority priority = TaskPriority::DefaultOnMainThread)
|
||||
-> Future<decltype(func())> {
|
||||
if (g_network->isSimulated()) {
|
||||
return map(delayJittered(meanDelay), [func](Void _) { return func(); });
|
||||
}
|
||||
Promise<decltype(func())> promise;
|
||||
addTask([promise, func, priority] {
|
||||
try {
|
||||
auto funcResult = func();
|
||||
onMainThreadVoid([promise, funcResult] { promise.send(funcResult); }, nullptr, priority);
|
||||
} catch (Error& e) {
|
||||
onMainThreadVoid([promise, e] { promise.sendError(e); }, nullptr, priority);
|
||||
}
|
||||
});
|
||||
return promise.getFuture();
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -18,8 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_BackupContainer_H
|
||||
#define FDBCLIENT_BackupContainer_H
|
||||
#ifndef FDBCLIENT_BACKUP_CONTAINER_H
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_H
|
||||
#pragma once
|
||||
|
||||
#include "flow/flow.h"
|
||||
|
@ -40,7 +40,7 @@ Future<Version> timeKeeperVersionFromDatetime(std::string const &datetime, Datab
|
|||
// TODO: Move the log file and range file format encoding/decoding stuff to this file and behind interfaces.
|
||||
class IBackupFile {
|
||||
public:
|
||||
IBackupFile(std::string fileName) : m_fileName(fileName), m_offset(0) {}
|
||||
IBackupFile(const std::string& fileName) : m_fileName(fileName), m_offset(0) {}
|
||||
virtual ~IBackupFile() {}
|
||||
// Backup files are append-only and cannot have more than 1 append outstanding at once.
|
||||
virtual Future<Void> append(const void *data, int len) = 0;
|
||||
|
@ -247,7 +247,7 @@ public:
|
|||
int64_t totalBytes) = 0;
|
||||
|
||||
// Open a file for read by name
|
||||
virtual Future<Reference<IAsyncFile>> readFile(std::string name) = 0;
|
||||
virtual Future<Reference<IAsyncFile>> readFile(const std::string& name) = 0;
|
||||
|
||||
// Returns the key ranges in the snapshot file. This is an expensive function
|
||||
// and should only be used in simulation for sanity check.
|
||||
|
@ -289,9 +289,9 @@ public:
|
|||
bool logsOnly = false, Version beginVersion = -1) = 0;
|
||||
|
||||
// Get an IBackupContainer based on a container spec string
|
||||
static Reference<IBackupContainer> openContainer(std::string url);
|
||||
static Reference<IBackupContainer> openContainer(const std::string& url);
|
||||
static std::vector<std::string> getURLFormats();
|
||||
static Future<std::vector<std::string>> listContainers(std::string baseURL);
|
||||
static Future<std::vector<std::string>> listContainers(const std::string& baseURL);
|
||||
|
||||
std::string getURL() const {
|
||||
return URL;
|
||||
|
@ -303,4 +303,4 @@ private:
|
|||
std::string URL;
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,279 @@
|
|||
/*
|
||||
* BackupContainerAzureBlobStore.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/BackupContainerAzureBlobStore.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
class BackupContainerAzureBlobStoreImpl {
|
||||
public:
|
||||
using AzureClient = azure::storage_lite::blob_client;
|
||||
|
||||
class ReadFile final : public IAsyncFile, ReferenceCounted<ReadFile> {
|
||||
AsyncTaskThread& asyncTaskThread;
|
||||
std::string containerName;
|
||||
std::string blobName;
|
||||
AzureClient* client;
|
||||
|
||||
public:
|
||||
ReadFile(AsyncTaskThread& asyncTaskThread, const std::string& containerName, const std::string& blobName,
|
||||
AzureClient* client)
|
||||
: asyncTaskThread(asyncTaskThread), containerName(containerName), blobName(blobName), client(client) {}
|
||||
|
||||
void addref() override { ReferenceCounted<ReadFile>::addref(); }
|
||||
void delref() override { ReferenceCounted<ReadFile>::delref(); }
|
||||
Future<int> read(void* data, int length, int64_t offset) {
|
||||
return asyncTaskThread.execAsync([client = this->client, containerName = this->containerName,
|
||||
blobName = this->blobName, data, length, offset] {
|
||||
std::ostringstream oss(std::ios::out | std::ios::binary);
|
||||
client->download_blob_to_stream(containerName, blobName, offset, length, oss);
|
||||
auto str = oss.str();
|
||||
memcpy(data, str.c_str(), str.size());
|
||||
return static_cast<int>(str.size());
|
||||
});
|
||||
}
|
||||
Future<Void> zeroRange(int64_t offset, int64_t length) override { throw file_not_writable(); }
|
||||
Future<Void> write(void const* data, int length, int64_t offset) override { throw file_not_writable(); }
|
||||
Future<Void> truncate(int64_t size) override { throw file_not_writable(); }
|
||||
Future<Void> sync() override { throw file_not_writable(); }
|
||||
Future<int64_t> size() const override {
|
||||
return asyncTaskThread.execAsync([client = this->client, containerName = this->containerName,
|
||||
blobName = this->blobName] {
|
||||
return static_cast<int64_t>(client->get_blob_properties(containerName, blobName).get().response().size);
|
||||
});
|
||||
}
|
||||
std::string getFilename() const override { return blobName; }
|
||||
int64_t debugFD() const override { return 0; }
|
||||
};
|
||||
|
||||
class WriteFile final : public IAsyncFile, ReferenceCounted<WriteFile> {
|
||||
AsyncTaskThread& asyncTaskThread;
|
||||
AzureClient* client;
|
||||
std::string containerName;
|
||||
std::string blobName;
|
||||
int64_t m_cursor{ 0 };
|
||||
// Ideally this buffer should not be a string, but
|
||||
// the Azure SDK only supports/tests uploading to append
|
||||
// blobs from a stringstream.
|
||||
std::string buffer;
|
||||
|
||||
static constexpr size_t bufferLimit = 1 << 20;
|
||||
|
||||
public:
|
||||
WriteFile(AsyncTaskThread& asyncTaskThread, const std::string& containerName, const std::string& blobName,
|
||||
AzureClient* client)
|
||||
: asyncTaskThread(asyncTaskThread), containerName(containerName), blobName(blobName), client(client) {}
|
||||
|
||||
void addref() override { ReferenceCounted<WriteFile>::addref(); }
|
||||
void delref() override { ReferenceCounted<WriteFile>::delref(); }
|
||||
Future<int> read(void* data, int length, int64_t offset) override { throw file_not_readable(); }
|
||||
Future<Void> write(void const* data, int length, int64_t offset) override {
|
||||
if (offset != m_cursor) {
|
||||
throw non_sequential_op();
|
||||
}
|
||||
m_cursor += length;
|
||||
auto p = static_cast<char const*>(data);
|
||||
buffer.insert(buffer.cend(), p, p + length);
|
||||
if (buffer.size() > bufferLimit) {
|
||||
return sync();
|
||||
} else {
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
Future<Void> truncate(int64_t size) override {
|
||||
if (size != m_cursor) {
|
||||
throw non_sequential_op();
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
Future<Void> sync() override {
|
||||
auto movedBuffer = std::move(buffer);
|
||||
buffer.clear();
|
||||
return asyncTaskThread.execAsync([client = this->client, containerName = this->containerName,
|
||||
blobName = this->blobName, buffer = std::move(movedBuffer)] {
|
||||
std::istringstream iss(std::move(buffer));
|
||||
auto resp = client->append_block_from_stream(containerName, blobName, iss).get();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
Future<int64_t> size() const override {
|
||||
return asyncTaskThread.execAsync(
|
||||
[client = this->client, containerName = this->containerName, blobName = this->blobName] {
|
||||
auto resp = client->get_blob_properties(containerName, blobName).get().response();
|
||||
ASSERT(resp.valid()); // TODO: Should instead throw here
|
||||
return static_cast<int64_t>(resp.size);
|
||||
});
|
||||
}
|
||||
std::string getFilename() const override { return blobName; }
|
||||
int64_t debugFD() const override { return -1; }
|
||||
};
|
||||
|
||||
class BackupFile final : public IBackupFile, ReferenceCounted<BackupFile> {
|
||||
Reference<IAsyncFile> m_file;
|
||||
|
||||
public:
|
||||
BackupFile(const std::string& fileName, Reference<IAsyncFile> file) : IBackupFile(fileName), m_file(file) {}
|
||||
Future<Void> append(const void* data, int len) override {
|
||||
Future<Void> r = m_file->write(data, len, m_offset);
|
||||
m_offset += len;
|
||||
return r;
|
||||
}
|
||||
Future<Void> finish() override {
|
||||
Reference<BackupFile> self = Reference<BackupFile>::addRef(this);
|
||||
return map(m_file->sync(), [=](Void _) {
|
||||
self->m_file.clear();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
void addref() override { ReferenceCounted<BackupFile>::addref(); }
|
||||
void delref() override { ReferenceCounted<BackupFile>::delref(); }
|
||||
};
|
||||
|
||||
static bool isDirectory(const std::string& blobName) { return blobName.size() && blobName.back() == '/'; }
|
||||
|
||||
ACTOR static Future<Reference<IAsyncFile>> readFile(BackupContainerAzureBlobStore* self, std::string fileName) {
|
||||
bool exists = wait(self->blobExists(fileName));
|
||||
if (!exists) {
|
||||
throw file_not_found();
|
||||
}
|
||||
return Reference<IAsyncFile>(
|
||||
new ReadFile(self->asyncTaskThread, self->containerName, fileName, self->client.get()));
|
||||
}
|
||||
|
||||
ACTOR static Future<Reference<IBackupFile>> writeFile(BackupContainerAzureBlobStore* self, std::string fileName) {
|
||||
wait(self->asyncTaskThread.execAsync(
|
||||
[client = self->client.get(), containerName = self->containerName, fileName = fileName] {
|
||||
auto outcome = client->create_append_blob(containerName, fileName).get();
|
||||
return Void();
|
||||
}));
|
||||
return Reference<IBackupFile>(
|
||||
new BackupFile(fileName, Reference<IAsyncFile>(new WriteFile(self->asyncTaskThread, self->containerName,
|
||||
fileName, self->client.get()))));
|
||||
}
|
||||
|
||||
static void listFiles(AzureClient* client, const std::string& containerName, const std::string& path,
|
||||
std::function<bool(std::string const&)> folderPathFilter,
|
||||
BackupContainerFileSystem::FilesAndSizesT& result) {
|
||||
auto resp = client->list_blobs_segmented(containerName, "/", "", path).get().response();
|
||||
for (const auto& blob : resp.blobs) {
|
||||
if (isDirectory(blob.name) && folderPathFilter(blob.name)) {
|
||||
listFiles(client, containerName, blob.name, folderPathFilter, result);
|
||||
} else {
|
||||
result.emplace_back(blob.name, blob.content_length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> deleteContainer(BackupContainerAzureBlobStore* self, int* pNumDeleted) {
|
||||
state int filesToDelete = 0;
|
||||
if (pNumDeleted) {
|
||||
BackupContainerFileSystem::FilesAndSizesT files = wait(self->listFiles());
|
||||
filesToDelete = files.size();
|
||||
}
|
||||
wait(self->asyncTaskThread.execAsync([containerName = self->containerName, client = self->client.get()] {
|
||||
client->delete_container(containerName).wait();
|
||||
return Void();
|
||||
}));
|
||||
if (pNumDeleted) {
|
||||
*pNumDeleted += filesToDelete;
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
|
||||
Future<bool> BackupContainerAzureBlobStore::blobExists(const std::string& fileName) {
|
||||
return asyncTaskThread.execAsync(
|
||||
[client = this->client.get(), containerName = this->containerName, fileName = fileName] {
|
||||
auto resp = client->get_blob_properties(containerName, fileName).get().response();
|
||||
return resp.valid();
|
||||
});
|
||||
}
|
||||
|
||||
BackupContainerAzureBlobStore::BackupContainerAzureBlobStore(const NetworkAddress& address,
|
||||
const std::string& accountName,
|
||||
const std::string& containerName)
|
||||
: containerName(containerName) {
|
||||
std::string accountKey = std::getenv("AZURE_KEY");
|
||||
|
||||
auto credential = std::make_shared<azure::storage_lite::shared_key_credential>(accountName, accountKey);
|
||||
auto storageAccount = std::make_shared<azure::storage_lite::storage_account>(
|
||||
accountName, credential, false, format("http://%s/%s", address.toString().c_str(), accountName.c_str()));
|
||||
|
||||
client = std::make_unique<AzureClient>(storageAccount, 1);
|
||||
}
|
||||
|
||||
void BackupContainerAzureBlobStore::addref() {
|
||||
return ReferenceCounted<BackupContainerAzureBlobStore>::addref();
|
||||
}
|
||||
void BackupContainerAzureBlobStore::delref() {
|
||||
return ReferenceCounted<BackupContainerAzureBlobStore>::delref();
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::create() {
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, client = this->client.get()] {
|
||||
client->create_container(containerName).wait();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
Future<bool> BackupContainerAzureBlobStore::exists() {
|
||||
return asyncTaskThread.execAsync([containerName = this->containerName, client = this->client.get()] {
|
||||
auto resp = client->get_container_properties(containerName).get().response();
|
||||
return resp.valid();
|
||||
});
|
||||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> BackupContainerAzureBlobStore::readFile(const std::string& fileName) {
|
||||
return BackupContainerAzureBlobStoreImpl::readFile(this, fileName);
|
||||
}
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerAzureBlobStore::writeFile(const std::string& fileName) {
|
||||
return BackupContainerAzureBlobStoreImpl::writeFile(this, fileName);
|
||||
}
|
||||
|
||||
Future<BackupContainerFileSystem::FilesAndSizesT> BackupContainerAzureBlobStore::listFiles(
|
||||
const std::string& path, std::function<bool(std::string const&)> folderPathFilter) {
|
||||
return asyncTaskThread.execAsync([client = this->client.get(), containerName = this->containerName, path = path,
|
||||
folderPathFilter = folderPathFilter] {
|
||||
FilesAndSizesT result;
|
||||
BackupContainerAzureBlobStoreImpl::listFiles(client, containerName, path, folderPathFilter, result);
|
||||
return result;
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::deleteFile(const std::string& fileName) {
|
||||
return asyncTaskThread.execAsync(
|
||||
[containerName = this->containerName, fileName = fileName, client = client.get()]() {
|
||||
client->delete_blob(containerName, fileName).wait();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerAzureBlobStore::deleteContainer(int* pNumDeleted) {
|
||||
return BackupContainerAzureBlobStoreImpl::deleteContainer(this, pNumDeleted);
|
||||
}
|
||||
|
||||
Future<std::vector<std::string>> BackupContainerAzureBlobStore::listURLs(const std::string& baseURL) {
|
||||
// TODO: Implement this
|
||||
return std::vector<std::string>{};
|
||||
}
|
||||
|
||||
std::string BackupContainerAzureBlobStore::getURLFormat() {
|
||||
return "azure://<ip>:<port>/<accountname>/<container>/<path_to_file>";
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* BackupContainerAzureBlobStore.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#if (!defined FDBCLIENT_BACKUP_CONTAINER_AZURE_BLOBSTORE_H) && (defined BUILD_AZURE_BACKUP)
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_AZURE_BLOBSTORE_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/AsyncTaskThread.h"
|
||||
#include "fdbclient/BackupContainerFileSystem.h"
|
||||
|
||||
#include "storage_credential.h"
|
||||
#include "storage_account.h"
|
||||
#include "blob/blob_client.h"
|
||||
|
||||
class BackupContainerAzureBlobStore final : public BackupContainerFileSystem,
|
||||
ReferenceCounted<BackupContainerAzureBlobStore> {
|
||||
using AzureClient = azure::storage_lite::blob_client;
|
||||
|
||||
std::unique_ptr<AzureClient> client;
|
||||
std::string containerName;
|
||||
AsyncTaskThread asyncTaskThread;
|
||||
|
||||
Future<bool> blobExists(const std::string& fileName);
|
||||
|
||||
friend class BackupContainerAzureBlobStoreImpl;
|
||||
|
||||
public:
|
||||
BackupContainerAzureBlobStore(const NetworkAddress& address, const std::string& accountName,
|
||||
const std::string& containerName);
|
||||
|
||||
void addref() override;
|
||||
void delref() override;
|
||||
|
||||
Future<Void> create() override;
|
||||
|
||||
Future<bool> exists() override;
|
||||
|
||||
Future<Reference<IAsyncFile>> readFile(const std::string& fileName) override;
|
||||
|
||||
Future<Reference<IBackupFile>> writeFile(const std::string& fileName) override;
|
||||
|
||||
Future<FilesAndSizesT> listFiles(const std::string& path = "",
|
||||
std::function<bool(std::string const&)> folderPathFilter = nullptr) override;
|
||||
|
||||
Future<Void> deleteFile(const std::string& fileName) override;
|
||||
|
||||
Future<Void> deleteContainer(int* pNumDeleted) override;
|
||||
|
||||
static Future<std::vector<std::string>> listURLs(const std::string& baseURL);
|
||||
|
||||
static std::string getURLFormat();
|
||||
};
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* BackupContainerFileSystem.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_BACKUP_CONTAINER_FILESYSTEM_H
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_FILESYSTEM_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "flow/Trace.h"
|
||||
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
|
||||
/* BackupContainerFileSystem implements a backup container which stores files in a nested folder structure.
|
||||
* Inheritors must only defined methods for writing, reading, deleting, sizing, and listing files.
|
||||
*
|
||||
* Snapshot manifests (a complete set of files constituting a database snapshot for the backup's target ranges)
|
||||
* are stored as JSON files at paths like
|
||||
* /snapshots/snapshot,minVersion,maxVersion,totalBytes
|
||||
*
|
||||
* Key range files for snapshots are stored at paths like
|
||||
* /kvranges/snapshot,startVersion/N/range,version,uid,blockSize
|
||||
* where startVersion is the version at which the backup snapshot execution began and N is a number
|
||||
* that is increased as key range files are generated over time (at varying rates) such that there
|
||||
* are around 5,000 key range files in each folder.
|
||||
*
|
||||
* Note that startVersion will NOT correspond to the minVersion of a snapshot manifest because
|
||||
* snapshot manifest min/max versions are based on the actual contained data and the first data
|
||||
* file written will be after the start version of the snapshot's execution.
|
||||
*
|
||||
* Log files are at file paths like
|
||||
* /plogs/.../log,startVersion,endVersion,UID,tagID-of-N,blocksize
|
||||
* /logs/.../log,startVersion,endVersion,UID,blockSize
|
||||
* where ... is a multi level path which sorts lexically into version order and results in approximately 1
|
||||
* unique folder per day containing about 5,000 files. Logs after FDB 6.3 are stored in "plogs"
|
||||
* directory and are partitioned according to tagIDs (0, 1, 2, ...) and the total number partitions is N.
|
||||
* Old backup logs FDB 6.2 and earlier are stored in "logs" directory and are not partitioned.
|
||||
* After FDB 6.3, users can choose to use the new partitioned logs or old logs.
|
||||
*
|
||||
*
|
||||
* BACKWARD COMPATIBILITY
|
||||
*
|
||||
* Prior to FDB version 6.0.16, key range files were stored using a different folder scheme. Newer versions
|
||||
* still support this scheme for all restore and backup management operations but key range files generated
|
||||
* by backup using version 6.0.16 or later use the scheme describe above.
|
||||
*
|
||||
* The old format stored key range files at paths like
|
||||
* /ranges/.../range,version,uid,blockSize
|
||||
* where ... is a multi level path with sorts lexically into version order and results in up to approximately
|
||||
* 900 unique folders per day. The number of files per folder depends on the configured snapshot rate and
|
||||
* database size and will vary from 1 to around 5,000.
|
||||
*/
|
||||
class BackupContainerFileSystem : public IBackupContainer {
|
||||
public:
|
||||
void addref() override = 0;
|
||||
void delref() override = 0;
|
||||
|
||||
BackupContainerFileSystem() {}
|
||||
virtual ~BackupContainerFileSystem() {}
|
||||
|
||||
// Create the container
|
||||
Future<Void> create() override = 0;
|
||||
Future<bool> exists() override = 0;
|
||||
|
||||
// Get a list of fileNames and their sizes in the container under the given path
|
||||
// Although not required, an implementation can avoid traversing unwanted subfolders
|
||||
// by calling folderPathFilter(absoluteFolderPath) and checking for a false return value.
|
||||
using FilesAndSizesT = std::vector<std::pair<std::string, int64_t>>;
|
||||
virtual Future<FilesAndSizesT> listFiles(const std::string& path = "",
|
||||
std::function<bool(std::string const&)> folderPathFilter = nullptr) = 0;
|
||||
|
||||
// Open a file for read by fileName
|
||||
Future<Reference<IAsyncFile>> readFile(const std::string& fileName) override = 0;
|
||||
|
||||
// Open a file for write by fileName
|
||||
virtual Future<Reference<IBackupFile>> writeFile(const std::string& fileName) = 0;
|
||||
|
||||
// Delete a file
|
||||
virtual Future<Void> deleteFile(const std::string& fileName) = 0;
|
||||
|
||||
// Delete entire container. During the process, if pNumDeleted is not null it will be
|
||||
// updated with the count of deleted files so that progress can be seen.
|
||||
Future<Void> deleteContainer(int* pNumDeleted) override = 0;
|
||||
|
||||
Future<Reference<IBackupFile>> writeLogFile(Version beginVersion, Version endVersion, int blockSize) final;
|
||||
|
||||
Future<Reference<IBackupFile>> writeTaggedLogFile(Version beginVersion, Version endVersion, int blockSize,
|
||||
uint16_t tagId, int totalTags) final;
|
||||
|
||||
Future<Reference<IBackupFile>> writeRangeFile(Version snapshotBeginVersion, int snapshotFileCount,
|
||||
Version fileVersion, int blockSize) override;
|
||||
|
||||
Future<std::pair<std::vector<RangeFile>, std::map<std::string, KeyRange>>> readKeyspaceSnapshot(
|
||||
KeyspaceSnapshotFile snapshot);
|
||||
|
||||
Future<Void> writeKeyspaceSnapshotFile(const std::vector<std::string>& fileNames,
|
||||
const std::vector<std::pair<Key, Key>>& beginEndKeys,
|
||||
int64_t totalBytes) final;
|
||||
|
||||
// List log files, unsorted, which contain data at any version >= beginVersion and <= targetVersion.
|
||||
// "partitioned" flag indicates if new partitioned mutation logs or old logs should be listed.
|
||||
Future<std::vector<LogFile>> listLogFiles(Version beginVersion, Version targetVersion, bool partitioned);
|
||||
|
||||
// List range files, unsorted, which contain data at or between beginVersion and endVersion
|
||||
// Note: The contents of each top level snapshot.N folder do not necessarily constitute a valid snapshot
|
||||
// and therefore listing files is not how RestoreSets are obtained.
|
||||
// Note: Snapshots partially written using FDB versions prior to 6.0.16 will have some range files stored
|
||||
// using the old folder scheme read by old_listRangeFiles
|
||||
Future<std::vector<RangeFile>> listRangeFiles(Version beginVersion, Version endVersion);
|
||||
|
||||
// List snapshots which have been fully written, in sorted beginVersion order, which start before end and finish on
|
||||
// or after begin
|
||||
Future<std::vector<KeyspaceSnapshotFile>> listKeyspaceSnapshots(Version begin = 0,
|
||||
Version end = std::numeric_limits<Version>::max());
|
||||
|
||||
Future<BackupFileList> dumpFileList(Version begin, Version end) override;
|
||||
|
||||
// Uses the virtual methods to describe the backup contents
|
||||
Future<BackupDescription> describeBackup(bool deepScan, Version logStartVersionOverride) final;
|
||||
|
||||
// Delete all data up to (but not including endVersion)
|
||||
Future<Void> expireData(Version expireEndVersion, bool force, ExpireProgress* progress,
|
||||
Version restorableBeginVersion) final;
|
||||
|
||||
Future<KeyRange> getSnapshotFileKeyRange(const RangeFile& file) final;
|
||||
|
||||
Future<Optional<RestorableFileSet>> getRestoreSet(Version targetVersion, VectorRef<KeyRangeRef> keyRangesFilter,
|
||||
bool logsOnly, Version beginVersion) final;
|
||||
|
||||
private:
|
||||
struct VersionProperty {
|
||||
VersionProperty(Reference<BackupContainerFileSystem> bc, const std::string& name)
|
||||
: bc(bc), path("properties/" + name) {}
|
||||
Reference<BackupContainerFileSystem> bc;
|
||||
std::string path;
|
||||
Future<Optional<Version>> get();
|
||||
Future<Void> set(Version v);
|
||||
Future<Void> clear();
|
||||
};
|
||||
|
||||
// To avoid the need to scan the underyling filesystem in many cases, some important version boundaries are stored
|
||||
// in named files. These versions also indicate what version ranges are known to be deleted or partially deleted.
|
||||
//
|
||||
// The values below describe version ranges as follows:
|
||||
// 0 - expiredEndVersion All files in this range have been deleted
|
||||
// expiredEndVersion - unreliableEndVersion Some files in this range may have been deleted.
|
||||
//
|
||||
// logBeginVersion - logEnd Log files are contiguous in this range and have NOT been deleted by
|
||||
// fdbbackup logEnd - infinity Files in this range may or may not exist yet
|
||||
//
|
||||
VersionProperty logBeginVersion();
|
||||
VersionProperty logEndVersion();
|
||||
VersionProperty expiredEndVersion();
|
||||
VersionProperty unreliableEndVersion();
|
||||
VersionProperty logType();
|
||||
|
||||
// List range files, unsorted, which contain data at or between beginVersion and endVersion
|
||||
// NOTE: This reads the range file folder schema from FDB 6.0.15 and earlier and is provided for backward
|
||||
// compatibility
|
||||
Future<std::vector<RangeFile>> old_listRangeFiles(Version beginVersion, Version endVersion);
|
||||
|
||||
friend class BackupContainerFileSystemImpl;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -0,0 +1,255 @@
|
|||
/*
|
||||
* BackupContainerLocalDirectory.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/BackupContainerLocalDirectory.h"
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
#include "flow/Platform.actor.h"
|
||||
#include "flow/Platform.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
class BackupFile : public IBackupFile, ReferenceCounted<BackupFile> {
|
||||
public:
|
||||
BackupFile(std::string fileName, Reference<IAsyncFile> file, std::string finalFullPath)
|
||||
: IBackupFile(fileName), m_file(file), m_finalFullPath(finalFullPath) {}
|
||||
|
||||
Future<Void> append(const void* data, int len) {
|
||||
Future<Void> r = m_file->write(data, len, m_offset);
|
||||
m_offset += len;
|
||||
return r;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> finish_impl(Reference<BackupFile> f) {
|
||||
wait(f->m_file->truncate(f->size())); // Some IAsyncFile implementations extend in whole block sizes.
|
||||
wait(f->m_file->sync());
|
||||
std::string name = f->m_file->getFilename();
|
||||
f->m_file.clear();
|
||||
renameFile(name, f->m_finalFullPath);
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> finish() { return finish_impl(Reference<BackupFile>::addRef(this)); }
|
||||
|
||||
void addref() override { return ReferenceCounted<BackupFile>::addref(); }
|
||||
void delref() override { return ReferenceCounted<BackupFile>::delref(); }
|
||||
|
||||
private:
|
||||
Reference<IAsyncFile> m_file;
|
||||
std::string m_finalFullPath;
|
||||
};
|
||||
|
||||
ACTOR static Future<BackupContainerFileSystem::FilesAndSizesT> listFiles_impl(std::string path, std::string m_path) {
|
||||
state std::vector<std::string> files;
|
||||
wait(platform::findFilesRecursivelyAsync(joinPath(m_path, path), &files));
|
||||
|
||||
BackupContainerFileSystem::FilesAndSizesT results;
|
||||
|
||||
// Remove .lnk files from results, they are a side effect of a backup that was *read* during simulation. See
|
||||
// openFile() above for more info on why they are created.
|
||||
if (g_network->isSimulated())
|
||||
files.erase(
|
||||
std::remove_if(files.begin(), files.end(),
|
||||
[](std::string const& f) { return StringRef(f).endsWith(LiteralStringRef(".lnk")); }),
|
||||
files.end());
|
||||
|
||||
for (auto& f : files) {
|
||||
// Hide .part or .temp files.
|
||||
StringRef s(f);
|
||||
if (!s.endsWith(LiteralStringRef(".part")) && !s.endsWith(LiteralStringRef(".temp")))
|
||||
results.push_back({ f.substr(m_path.size() + 1), ::fileSize(f) });
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void BackupContainerLocalDirectory::addref() {
|
||||
return ReferenceCounted<BackupContainerLocalDirectory>::addref();
|
||||
}
|
||||
void BackupContainerLocalDirectory::delref() {
|
||||
return ReferenceCounted<BackupContainerLocalDirectory>::delref();
|
||||
}
|
||||
|
||||
std::string BackupContainerLocalDirectory::getURLFormat() {
|
||||
return "file://</path/to/base/dir/>";
|
||||
}
|
||||
|
||||
BackupContainerLocalDirectory::BackupContainerLocalDirectory(const std::string& url) {
|
||||
std::string path;
|
||||
if (url.find("file://") != 0) {
|
||||
TraceEvent(SevWarn, "BackupContainerLocalDirectory")
|
||||
.detail("Description", "Invalid URL for BackupContainerLocalDirectory")
|
||||
.detail("URL", url);
|
||||
}
|
||||
|
||||
path = url.substr(7);
|
||||
// Remove trailing slashes on path
|
||||
path.erase(path.find_last_not_of("\\/") + 1);
|
||||
|
||||
std::string absolutePath = abspath(path);
|
||||
|
||||
if (!g_network->isSimulated() && path != absolutePath) {
|
||||
TraceEvent(SevWarn, "BackupContainerLocalDirectory")
|
||||
.detail("Description", "Backup path must be absolute (e.g. file:///some/path)")
|
||||
.detail("URL", url)
|
||||
.detail("Path", path)
|
||||
.detail("AbsolutePath", absolutePath);
|
||||
// throw io_error();
|
||||
IBackupContainer::lastOpenError =
|
||||
format("Backup path '%s' must be the absolute path '%s'", path.c_str(), absolutePath.c_str());
|
||||
throw backup_invalid_url();
|
||||
}
|
||||
|
||||
// Finalized path written to will be will be <path>/backup-<uid>
|
||||
m_path = path;
|
||||
}
|
||||
|
||||
Future<std::vector<std::string>> BackupContainerLocalDirectory::listURLs(const std::string& url) {
|
||||
std::string path;
|
||||
if (url.find("file://") != 0) {
|
||||
TraceEvent(SevWarn, "BackupContainerLocalDirectory")
|
||||
.detail("Description", "Invalid URL for BackupContainerLocalDirectory")
|
||||
.detail("URL", url);
|
||||
}
|
||||
|
||||
path = url.substr(7);
|
||||
// Remove trailing slashes on path
|
||||
path.erase(path.find_last_not_of("\\/") + 1);
|
||||
|
||||
if (!g_network->isSimulated() && path != abspath(path)) {
|
||||
TraceEvent(SevWarn, "BackupContainerLocalDirectory")
|
||||
.detail("Description", "Backup path must be absolute (e.g. file:///some/path)")
|
||||
.detail("URL", url)
|
||||
.detail("Path", path);
|
||||
throw io_error();
|
||||
}
|
||||
std::vector<std::string> dirs = platform::listDirectories(path);
|
||||
std::vector<std::string> results;
|
||||
|
||||
for (auto& r : dirs) {
|
||||
if (r == "." || r == "..") continue;
|
||||
results.push_back(std::string("file://") + joinPath(path, r));
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerLocalDirectory::create() {
|
||||
// Nothing should be done here because create() can be called by any process working with the container URL,
|
||||
// such as fdbbackup. Since "local directory" containers are by definition local to the machine they are
|
||||
// accessed from, the container's creation (in this case the creation of a directory) must be ensured prior to
|
||||
// every file creation, which is done in openFile(). Creating the directory here will result in unnecessary
|
||||
// directories being created on machines that run fdbbackup but not agents.
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<bool> BackupContainerLocalDirectory::exists() {
|
||||
return directoryExists(m_path);
|
||||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> BackupContainerLocalDirectory::readFile(const std::string& path) {
|
||||
int flags = IAsyncFile::OPEN_NO_AIO | IAsyncFile::OPEN_READONLY | IAsyncFile::OPEN_UNCACHED;
|
||||
// Simulation does not properly handle opening the same file from multiple machines using a shared filesystem,
|
||||
// so create a symbolic link to make each file opening appear to be unique. This could also work in production
|
||||
// but only if the source directory is writeable which shouldn't be required for a restore.
|
||||
std::string fullPath = joinPath(m_path, path);
|
||||
#ifndef _WIN32
|
||||
if (g_network->isSimulated()) {
|
||||
if (!fileExists(fullPath)) {
|
||||
throw file_not_found();
|
||||
}
|
||||
|
||||
if (g_simulator.getCurrentProcess()->uid == UID()) {
|
||||
TraceEvent(SevError, "BackupContainerReadFileOnUnsetProcessID");
|
||||
}
|
||||
std::string uniquePath = fullPath + "." + g_simulator.getCurrentProcess()->uid.toString() + ".lnk";
|
||||
unlink(uniquePath.c_str());
|
||||
ASSERT(symlink(basename(path).c_str(), uniquePath.c_str()) == 0);
|
||||
fullPath = uniquePath;
|
||||
}
|
||||
// Opening cached mode forces read/write mode at a lower level, overriding the readonly request. So cached mode
|
||||
// can't be used because backup files are read-only. Cached mode can only help during restore task retries handled
|
||||
// by the same process that failed the first task execution anyway, which is a very rare case.
|
||||
#endif
|
||||
Future<Reference<IAsyncFile>> f = IAsyncFileSystem::filesystem()->open(fullPath, flags, 0644);
|
||||
|
||||
if (g_network->isSimulated()) {
|
||||
int blockSize = 0;
|
||||
// Extract block size from the filename, if present
|
||||
size_t lastComma = path.find_last_of(',');
|
||||
if (lastComma != path.npos) {
|
||||
blockSize = atoi(path.substr(lastComma + 1).c_str());
|
||||
}
|
||||
if (blockSize <= 0) {
|
||||
blockSize = deterministicRandom()->randomInt(1e4, 1e6);
|
||||
}
|
||||
if (deterministicRandom()->random01() < .01) {
|
||||
blockSize /= deterministicRandom()->randomInt(1, 3);
|
||||
}
|
||||
ASSERT(blockSize > 0);
|
||||
|
||||
return map(f, [=](Reference<IAsyncFile> fr) {
|
||||
int readAhead = deterministicRandom()->randomInt(0, 3);
|
||||
int reads = deterministicRandom()->randomInt(1, 3);
|
||||
int cacheSize = deterministicRandom()->randomInt(0, 3);
|
||||
return Reference<IAsyncFile>(new AsyncFileReadAheadCache(fr, blockSize, readAhead, reads, cacheSize));
|
||||
});
|
||||
}
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerLocalDirectory::writeFile(const std::string& path) {
|
||||
int flags = IAsyncFile::OPEN_NO_AIO | IAsyncFile::OPEN_CREATE | IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE |
|
||||
IAsyncFile::OPEN_READWRITE;
|
||||
std::string fullPath = joinPath(m_path, path);
|
||||
platform::createDirectory(parentDirectory(fullPath));
|
||||
std::string temp = fullPath + "." + deterministicRandom()->randomUniqueID().toString() + ".temp";
|
||||
Future<Reference<IAsyncFile>> f = IAsyncFileSystem::filesystem()->open(temp, flags, 0644);
|
||||
return map(f, [=](Reference<IAsyncFile> f) { return Reference<IBackupFile>(new BackupFile(path, f, fullPath)); });
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerLocalDirectory::deleteFile(const std::string& path) {
|
||||
::deleteFile(joinPath(m_path, path));
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<BackupContainerFileSystem::FilesAndSizesT> BackupContainerLocalDirectory::listFiles(
|
||||
const std::string& path, std::function<bool(std::string const&)>) {
|
||||
return listFiles_impl(path, m_path);
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerLocalDirectory::deleteContainer(int* pNumDeleted) {
|
||||
// In order to avoid deleting some random directory due to user error, first describe the backup
|
||||
// and make sure it has something in it.
|
||||
return map(describeBackup(false, invalidVersion), [=](BackupDescription const& desc) {
|
||||
// If the backup has no snapshots and no logs then it's probably not a valid backup
|
||||
if (desc.snapshots.size() == 0 && !desc.minLogBegin.present()) throw backup_invalid_url();
|
||||
|
||||
int count = platform::eraseDirectoryRecursive(m_path);
|
||||
if (pNumDeleted != nullptr) *pNumDeleted = count;
|
||||
|
||||
return Void();
|
||||
});
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* BackupContainerLocalDirectory.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_BACKUP_CONTAINER_LOCAL_DIRECTORY_H
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_LOCAL_DIRECTORY_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/BackupContainerFileSystem.h"
|
||||
#include "flow/flow.h"
|
||||
|
||||
class BackupContainerLocalDirectory : public BackupContainerFileSystem,
|
||||
ReferenceCounted<BackupContainerLocalDirectory> {
|
||||
public:
|
||||
void addref() final;
|
||||
void delref() final;
|
||||
|
||||
static std::string getURLFormat();
|
||||
|
||||
BackupContainerLocalDirectory(const std::string& url);
|
||||
|
||||
static Future<std::vector<std::string>> listURLs(const std::string& url);
|
||||
|
||||
Future<Void> create() final;
|
||||
|
||||
// The container exists if the folder it resides in exists
|
||||
Future<bool> exists() final;
|
||||
|
||||
Future<Reference<IAsyncFile>> readFile(const std::string& path) final;
|
||||
|
||||
Future<Reference<IBackupFile>> writeFile(const std::string& path) final;
|
||||
|
||||
Future<Void> deleteFile(const std::string& path) final;
|
||||
|
||||
Future<FilesAndSizesT> listFiles(const std::string& path, std::function<bool(std::string const&)>) final;
|
||||
|
||||
Future<Void> deleteContainer(int* pNumDeleted) final;
|
||||
|
||||
private:
|
||||
std::string m_path;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -0,0 +1,204 @@
|
|||
/*
|
||||
* BackupContainerS3BlobStore.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
|
||||
#include "fdbclient/BackupContainerS3BlobStore.h"
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
class BackupContainerS3BlobStoreImpl {
|
||||
public:
|
||||
// Backup files to under a single folder prefix with subfolders for each named backup
|
||||
static const std::string DATAFOLDER;
|
||||
|
||||
// Indexfolder contains keys for which user-named backups exist. Backup names can contain an arbitrary
|
||||
// number of slashes so the backup names are kept in a separate folder tree from their actual data.
|
||||
static const std::string INDEXFOLDER;
|
||||
|
||||
ACTOR static Future<std::vector<std::string>> listURLs(Reference<S3BlobStoreEndpoint> bstore, std::string bucket) {
|
||||
state std::string basePath = INDEXFOLDER + '/';
|
||||
S3BlobStoreEndpoint::ListResult contents = wait(bstore->listObjects(bucket, basePath));
|
||||
std::vector<std::string> results;
|
||||
for (auto& f : contents.objects) {
|
||||
results.push_back(
|
||||
bstore->getResourceURL(f.name.substr(basePath.size()), format("bucket=%s", bucket.c_str())));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
class BackupFile : public IBackupFile, ReferenceCounted<BackupFile> {
|
||||
public:
|
||||
BackupFile(std::string fileName, Reference<IAsyncFile> file) : IBackupFile(fileName), m_file(file) {}
|
||||
|
||||
Future<Void> append(const void* data, int len) {
|
||||
Future<Void> r = m_file->write(data, len, m_offset);
|
||||
m_offset += len;
|
||||
return r;
|
||||
}
|
||||
|
||||
Future<Void> finish() {
|
||||
Reference<BackupFile> self = Reference<BackupFile>::addRef(this);
|
||||
return map(m_file->sync(), [=](Void _) {
|
||||
self->m_file.clear();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
void addref() final { return ReferenceCounted<BackupFile>::addref(); }
|
||||
void delref() final { return ReferenceCounted<BackupFile>::delref(); }
|
||||
|
||||
private:
|
||||
Reference<IAsyncFile> m_file;
|
||||
};
|
||||
|
||||
ACTOR static Future<BackupContainerFileSystem::FilesAndSizesT> listFiles(
|
||||
Reference<BackupContainerS3BlobStore> bc, std::string path,
|
||||
std::function<bool(std::string const&)> pathFilter) {
|
||||
// pathFilter expects container based paths, so create a wrapper which converts a raw path
|
||||
// to a container path by removing the known backup name prefix.
|
||||
state int prefixTrim = bc->dataPath("").size();
|
||||
std::function<bool(std::string const&)> rawPathFilter = [=](const std::string& folderPath) {
|
||||
ASSERT(folderPath.size() >= prefixTrim);
|
||||
return pathFilter(folderPath.substr(prefixTrim));
|
||||
};
|
||||
|
||||
state S3BlobStoreEndpoint::ListResult result = wait(bc->m_bstore->listObjects(
|
||||
bc->m_bucket, bc->dataPath(path), '/', std::numeric_limits<int>::max(), rawPathFilter));
|
||||
BackupContainerFileSystem::FilesAndSizesT files;
|
||||
for (auto& o : result.objects) {
|
||||
ASSERT(o.name.size() >= prefixTrim);
|
||||
files.push_back({ o.name.substr(prefixTrim), o.size });
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> create(Reference<BackupContainerS3BlobStore> bc) {
|
||||
wait(bc->m_bstore->createBucket(bc->m_bucket));
|
||||
|
||||
// Check/create the index entry
|
||||
bool exists = wait(bc->m_bstore->objectExists(bc->m_bucket, bc->indexEntry()));
|
||||
if (!exists) {
|
||||
wait(bc->m_bstore->writeEntireFile(bc->m_bucket, bc->indexEntry(), ""));
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> deleteContainer(Reference<BackupContainerS3BlobStore> bc, int* pNumDeleted) {
|
||||
bool e = wait(bc->exists());
|
||||
if (!e) {
|
||||
TraceEvent(SevWarnAlways, "BackupContainerDoesNotExist").detail("URL", bc->getURL());
|
||||
throw backup_does_not_exist();
|
||||
}
|
||||
|
||||
// First delete everything under the data prefix in the bucket
|
||||
wait(bc->m_bstore->deleteRecursively(bc->m_bucket, bc->dataPath(""), pNumDeleted));
|
||||
|
||||
// Now that all files are deleted, delete the index entry
|
||||
wait(bc->m_bstore->deleteObject(bc->m_bucket, bc->indexEntry()));
|
||||
|
||||
return Void();
|
||||
}
|
||||
};
|
||||
|
||||
const std::string BackupContainerS3BlobStoreImpl::DATAFOLDER = "data";
|
||||
const std::string BackupContainerS3BlobStoreImpl::INDEXFOLDER = "backups";
|
||||
|
||||
std::string BackupContainerS3BlobStore::dataPath(const std::string& path) {
|
||||
return BackupContainerS3BlobStoreImpl::DATAFOLDER + "/" + m_name + "/" + path;
|
||||
}
|
||||
|
||||
// Get the path of the backups's index entry
|
||||
std::string BackupContainerS3BlobStore::indexEntry() {
|
||||
return BackupContainerS3BlobStoreImpl::INDEXFOLDER + "/" + m_name;
|
||||
}
|
||||
|
||||
BackupContainerS3BlobStore::BackupContainerS3BlobStore(Reference<S3BlobStoreEndpoint> bstore, const std::string& name,
|
||||
const S3BlobStoreEndpoint::ParametersT& params)
|
||||
: m_bstore(bstore), m_name(name), m_bucket("FDB_BACKUPS_V2") {
|
||||
|
||||
// Currently only one parameter is supported, "bucket"
|
||||
for (auto& kv : params) {
|
||||
if (kv.first == "bucket") {
|
||||
m_bucket = kv.second;
|
||||
continue;
|
||||
}
|
||||
TraceEvent(SevWarn, "BackupContainerS3BlobStoreInvalidParameter")
|
||||
.detail("Name", kv.first)
|
||||
.detail("Value", kv.second);
|
||||
IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", kv.first.c_str());
|
||||
throw backup_invalid_url();
|
||||
}
|
||||
}
|
||||
|
||||
void BackupContainerS3BlobStore::addref() {
|
||||
return ReferenceCounted<BackupContainerS3BlobStore>::addref();
|
||||
}
|
||||
void BackupContainerS3BlobStore::delref() {
|
||||
return ReferenceCounted<BackupContainerS3BlobStore>::delref();
|
||||
}
|
||||
|
||||
std::string BackupContainerS3BlobStore::getURLFormat() {
|
||||
return S3BlobStoreEndpoint::getURLFormat(true) + " (Note: The 'bucket' parameter is required.)";
|
||||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> BackupContainerS3BlobStore::readFile(const std::string& path) {
|
||||
return Reference<IAsyncFile>(new AsyncFileReadAheadCache(
|
||||
Reference<IAsyncFile>(new AsyncFileS3BlobStoreRead(m_bstore, m_bucket, dataPath(path))),
|
||||
m_bstore->knobs.read_block_size, m_bstore->knobs.read_ahead_blocks, m_bstore->knobs.concurrent_reads_per_file,
|
||||
m_bstore->knobs.read_cache_blocks_per_file));
|
||||
}
|
||||
|
||||
Future<std::vector<std::string>> BackupContainerS3BlobStore::listURLs(Reference<S3BlobStoreEndpoint> bstore,
|
||||
const std::string& bucket) {
|
||||
return BackupContainerS3BlobStoreImpl::listURLs(bstore, bucket);
|
||||
}
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerS3BlobStore::writeFile(const std::string& path) {
|
||||
return Reference<IBackupFile>(new BackupContainerS3BlobStoreImpl::BackupFile(
|
||||
path, Reference<IAsyncFile>(new AsyncFileS3BlobStoreWrite(m_bstore, m_bucket, dataPath(path)))));
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::deleteFile(const std::string& path) {
|
||||
return m_bstore->deleteObject(m_bucket, dataPath(path));
|
||||
}
|
||||
|
||||
Future<BackupContainerFileSystem::FilesAndSizesT> BackupContainerS3BlobStore::listFiles(
|
||||
const std::string& path, std::function<bool(std::string const&)> pathFilter) {
|
||||
return BackupContainerS3BlobStoreImpl::listFiles(Reference<BackupContainerS3BlobStore>::addRef(this), path,
|
||||
pathFilter);
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::create() {
|
||||
return BackupContainerS3BlobStoreImpl::create(Reference<BackupContainerS3BlobStore>::addRef(this));
|
||||
}
|
||||
|
||||
Future<bool> BackupContainerS3BlobStore::exists() {
|
||||
return m_bstore->objectExists(m_bucket, indexEntry());
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::deleteContainer(int* pNumDeleted) {
|
||||
return BackupContainerS3BlobStoreImpl::deleteContainer(Reference<BackupContainerS3BlobStore>::addRef(this),
|
||||
pNumDeleted);
|
||||
}
|
||||
|
||||
std::string BackupContainerS3BlobStore::getBucket() const {
|
||||
return m_bucket;
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* BackupContainerS3BlobStore.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_BACKUP_CONTAINER_S3_BLOBSTORE_H
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_S3_BLOBSTORE_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
|
||||
#include "fdbclient/BackupContainerFileSystem.h"
|
||||
|
||||
class BackupContainerS3BlobStore final : public BackupContainerFileSystem,
|
||||
ReferenceCounted<BackupContainerS3BlobStore> {
|
||||
Reference<S3BlobStoreEndpoint> m_bstore;
|
||||
std::string m_name;
|
||||
|
||||
// All backup data goes into a single bucket
|
||||
std::string m_bucket;
|
||||
|
||||
std::string dataPath(const std::string& path);
|
||||
|
||||
// Get the path of the backups's index entry
|
||||
std::string indexEntry();
|
||||
|
||||
friend class BackupContainerS3BlobStoreImpl;
|
||||
|
||||
public:
|
||||
BackupContainerS3BlobStore(Reference<S3BlobStoreEndpoint> bstore, const std::string& name,
|
||||
const S3BlobStoreEndpoint::ParametersT& params);
|
||||
|
||||
void addref() override;
|
||||
void delref() override;
|
||||
|
||||
static std::string getURLFormat();
|
||||
|
||||
Future<Reference<IAsyncFile>> readFile(const std::string& path) final;
|
||||
|
||||
static Future<std::vector<std::string>> listURLs(Reference<S3BlobStoreEndpoint> bstore, const std::string& bucket);
|
||||
|
||||
Future<Reference<IBackupFile>> writeFile(const std::string& path) final;
|
||||
|
||||
Future<Void> deleteFile(const std::string& path) final;
|
||||
|
||||
Future<FilesAndSizesT> listFiles(const std::string& path, std::function<bool(std::string const&)> pathFilter) final;
|
||||
|
||||
Future<Void> create() final;
|
||||
|
||||
// The container exists if the index entry in the blob bucket exists
|
||||
Future<bool> exists() final;
|
||||
|
||||
Future<Void> deleteContainer(int* pNumDeleted) final;
|
||||
|
||||
std::string getBucket() const;
|
||||
};
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -1,13 +1,20 @@
|
|||
set(FDBCLIENT_SRCS
|
||||
AsyncFileBlobStore.actor.cpp
|
||||
AsyncFileBlobStore.actor.h
|
||||
AsyncFileS3BlobStore.actor.cpp
|
||||
AsyncFileS3BlobStore.actor.h
|
||||
AsyncTaskThread.actor.cpp
|
||||
AsyncTaskThread.h
|
||||
Atomic.h
|
||||
AutoPublicAddress.cpp
|
||||
BackupAgent.actor.h
|
||||
BackupAgentBase.actor.cpp
|
||||
BackupContainer.actor.cpp
|
||||
BackupContainer.h
|
||||
BlobStore.actor.cpp
|
||||
BackupContainerFileSystem.actor.cpp
|
||||
BackupContainerFileSystem.h
|
||||
BackupContainerLocalDirectory.actor.cpp
|
||||
BackupContainerLocalDirectory.h
|
||||
BackupContainerS3BlobStore.actor.cpp
|
||||
BackupContainerS3BlobStore.h
|
||||
ClientLogEvents.h
|
||||
ClientWorkerInterface.h
|
||||
ClusterInterface.h
|
||||
|
@ -53,6 +60,7 @@ set(FDBCLIENT_SRCS
|
|||
RunTransaction.actor.h
|
||||
RYWIterator.cpp
|
||||
RYWIterator.h
|
||||
S3BlobStore.actor.cpp
|
||||
Schemas.cpp
|
||||
Schemas.h
|
||||
SnapshotCache.h
|
||||
|
@ -93,6 +101,46 @@ set(options_srcs ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
|||
vexillographer_compile(TARGET fdboptions LANG cpp OUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.h ${CMAKE_CURRENT_BINARY_DIR}/FDBOptions.g.cpp)
|
||||
|
||||
set(BUILD_AZURE_BACKUP OFF CACHE BOOL "Build Azure backup client")
|
||||
if(BUILD_AZURE_BACKUP)
|
||||
add_compile_definitions(BUILD_AZURE_BACKUP)
|
||||
set(FDBCLIENT_SRCS
|
||||
${FDBCLIENT_SRCS}
|
||||
BackupContainerAzureBlobStore.actor.cpp
|
||||
BackupContainerAzureBlobStore.h)
|
||||
|
||||
configure_file(azurestorage.cmake azurestorage-download/CMakeLists.txt)
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" .
|
||||
RESULT_VARIABLE results
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/azurestorage-download
|
||||
)
|
||||
|
||||
if(results)
|
||||
message(FATAL_ERROR "Configuration step for AzureStorage has Failed. ${results}")
|
||||
endif()
|
||||
|
||||
execute_process(
|
||||
COMMAND ${CMAKE_COMMAND} --build . --config Release
|
||||
RESULT_VARIABLE results
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/azurestorage-download
|
||||
)
|
||||
|
||||
if(results)
|
||||
message(FATAL_ERROR "Build step for AzureStorage has Failed. ${results}")
|
||||
endif()
|
||||
|
||||
add_subdirectory(
|
||||
${CMAKE_CURRENT_BINARY_DIR}/azurestorage-src
|
||||
${CMAKE_CURRENT_BINARY_DIR}/azurestorage-build
|
||||
)
|
||||
endif()
|
||||
|
||||
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
||||
add_dependencies(fdbclient fdboptions)
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
||||
if(BUILD_AZURE_BACKUP)
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc PRIVATE curl uuid azure-storage-lite)
|
||||
else()
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
||||
endif()
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* BlobStore.h
|
||||
* S3BlobStore.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -31,11 +31,11 @@
|
|||
|
||||
// Representation of all the things you need to connect to a blob store instance with some credentials.
|
||||
// Reference counted because a very large number of them could be needed.
|
||||
class BlobStoreEndpoint : public ReferenceCounted<BlobStoreEndpoint> {
|
||||
class S3BlobStoreEndpoint : public ReferenceCounted<S3BlobStoreEndpoint> {
|
||||
public:
|
||||
struct Stats {
|
||||
Stats() : requests_successful(0), requests_failed(0), bytes_sent(0) {}
|
||||
Stats operator-(const Stats &rhs);
|
||||
Stats operator-(const Stats& rhs);
|
||||
void clear() { memset(this, 0, sizeof(*this)); }
|
||||
json_spirit::mObject getJSON();
|
||||
|
||||
|
@ -48,29 +48,12 @@ public:
|
|||
|
||||
struct BlobKnobs {
|
||||
BlobKnobs();
|
||||
int secure_connection,
|
||||
connect_tries,
|
||||
connect_timeout,
|
||||
max_connection_life,
|
||||
request_tries,
|
||||
request_timeout_min,
|
||||
requests_per_second,
|
||||
list_requests_per_second,
|
||||
write_requests_per_second,
|
||||
read_requests_per_second,
|
||||
delete_requests_per_second,
|
||||
multipart_max_part_size,
|
||||
multipart_min_part_size,
|
||||
concurrent_requests,
|
||||
concurrent_uploads,
|
||||
concurrent_lists,
|
||||
concurrent_reads_per_file,
|
||||
concurrent_writes_per_file,
|
||||
read_block_size,
|
||||
read_ahead_blocks,
|
||||
read_cache_blocks_per_file,
|
||||
max_send_bytes_per_second,
|
||||
max_recv_bytes_per_second;
|
||||
int secure_connection, connect_tries, connect_timeout, max_connection_life, request_tries, request_timeout_min,
|
||||
requests_per_second, list_requests_per_second, write_requests_per_second, read_requests_per_second,
|
||||
delete_requests_per_second, multipart_max_part_size, multipart_min_part_size, concurrent_requests,
|
||||
concurrent_uploads, concurrent_lists, concurrent_reads_per_file, concurrent_writes_per_file,
|
||||
read_block_size, read_ahead_blocks, read_cache_blocks_per_file, max_send_bytes_per_second,
|
||||
max_recv_bytes_per_second;
|
||||
bool set(StringRef name, int value);
|
||||
std::string getURLParameters() const;
|
||||
static std::vector<std::string> getKnobDescriptions() {
|
||||
|
@ -79,8 +62,10 @@ public:
|
|||
"connect_tries (or ct) Number of times to try to connect for each request.",
|
||||
"connect_timeout (or cto) Number of seconds to wait for a connect request to succeed.",
|
||||
"max_connection_life (or mcl) Maximum number of seconds to use a single TCP connection.",
|
||||
"request_tries (or rt) Number of times to try each request until a parseable HTTP response other than 429 is received.",
|
||||
"request_timeout_min (or rtom) Number of seconds to wait for a request to succeed after a connection is established.",
|
||||
"request_tries (or rt) Number of times to try each request until a parseable HTTP "
|
||||
"response other than 429 is received.",
|
||||
"request_timeout_min (or rtom) Number of seconds to wait for a request to succeed after a "
|
||||
"connection is established.",
|
||||
"requests_per_second (or rps) Max number of requests to start per second.",
|
||||
"list_requests_per_second (or lrps) Max number of list requests to start per second.",
|
||||
"write_requests_per_second (or wrps) Max number of write requests to start per second.",
|
||||
|
@ -88,8 +73,10 @@ public:
|
|||
"delete_requests_per_second (or drps) Max number of delete requests to start per second.",
|
||||
"multipart_max_part_size (or maxps) Max part size for multipart uploads.",
|
||||
"multipart_min_part_size (or minps) Min part size for multipart uploads.",
|
||||
"concurrent_requests (or cr) Max number of total requests in progress at once, regardless of operation-specific concurrency limits.",
|
||||
"concurrent_uploads (or cu) Max concurrent uploads (part or whole) that can be in progress at once.",
|
||||
"concurrent_requests (or cr) Max number of total requests in progress at once, regardless of "
|
||||
"operation-specific concurrency limits.",
|
||||
"concurrent_uploads (or cu) Max concurrent uploads (part or whole) that can be in progress "
|
||||
"at once.",
|
||||
"concurrent_lists (or cl) Max concurrent list operations that can be in progress at once.",
|
||||
"concurrent_reads_per_file (or crps) Max concurrent reads in progress for any one file.",
|
||||
"concurrent_writes_per_file (or cwps) Max concurrent uploads in progress for any one file.",
|
||||
|
@ -97,43 +84,45 @@ public:
|
|||
"read_ahead_blocks (or rab) Number of blocks to read ahead of requested offset.",
|
||||
"read_cache_blocks_per_file (or rcb) Size of the read cache for a file in blocks.",
|
||||
"max_send_bytes_per_second (or sbps) Max send bytes per second for all requests combined.",
|
||||
"max_recv_bytes_per_second (or rbps) Max receive bytes per second for all requests combined (NOT YET USED)."
|
||||
"max_recv_bytes_per_second (or rbps) Max receive bytes per second for all requests combined (NOT YET "
|
||||
"USED)."
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
BlobStoreEndpoint(std::string const &host, std::string service, std::string const &key, std::string const &secret, BlobKnobs const &knobs = BlobKnobs(), HTTP::Headers extraHeaders = HTTP::Headers())
|
||||
: host(host), service(service), key(key), secret(secret), lookupSecret(secret.empty()), knobs(knobs), extraHeaders(extraHeaders),
|
||||
requestRate(new SpeedLimit(knobs.requests_per_second, 1)),
|
||||
requestRateList(new SpeedLimit(knobs.list_requests_per_second, 1)),
|
||||
requestRateWrite(new SpeedLimit(knobs.write_requests_per_second, 1)),
|
||||
requestRateRead(new SpeedLimit(knobs.read_requests_per_second, 1)),
|
||||
requestRateDelete(new SpeedLimit(knobs.delete_requests_per_second, 1)),
|
||||
sendRate(new SpeedLimit(knobs.max_send_bytes_per_second, 1)),
|
||||
recvRate(new SpeedLimit(knobs.max_recv_bytes_per_second, 1)),
|
||||
concurrentRequests(knobs.concurrent_requests),
|
||||
concurrentUploads(knobs.concurrent_uploads),
|
||||
concurrentLists(knobs.concurrent_lists) {
|
||||
S3BlobStoreEndpoint(std::string const& host, std::string service, std::string const& key, std::string const& secret,
|
||||
BlobKnobs const& knobs = BlobKnobs(), HTTP::Headers extraHeaders = HTTP::Headers())
|
||||
: host(host), service(service), key(key), secret(secret), lookupSecret(secret.empty()), knobs(knobs),
|
||||
extraHeaders(extraHeaders), requestRate(new SpeedLimit(knobs.requests_per_second, 1)),
|
||||
requestRateList(new SpeedLimit(knobs.list_requests_per_second, 1)),
|
||||
requestRateWrite(new SpeedLimit(knobs.write_requests_per_second, 1)),
|
||||
requestRateRead(new SpeedLimit(knobs.read_requests_per_second, 1)),
|
||||
requestRateDelete(new SpeedLimit(knobs.delete_requests_per_second, 1)),
|
||||
sendRate(new SpeedLimit(knobs.max_send_bytes_per_second, 1)),
|
||||
recvRate(new SpeedLimit(knobs.max_recv_bytes_per_second, 1)), concurrentRequests(knobs.concurrent_requests),
|
||||
concurrentUploads(knobs.concurrent_uploads), concurrentLists(knobs.concurrent_lists) {
|
||||
|
||||
if(host.empty())
|
||||
throw connection_string_invalid();
|
||||
if (host.empty()) throw connection_string_invalid();
|
||||
}
|
||||
|
||||
static std::string getURLFormat(bool withResource = false) {
|
||||
const char *resource = "";
|
||||
if(withResource)
|
||||
resource = "<name>";
|
||||
return format("blobstore://<api_key>:<secret>@<host>[:<port>]/%s[?<param>=<value>[&<param>=<value>]...]", resource);
|
||||
const char* resource = "";
|
||||
if (withResource) resource = "<name>";
|
||||
return format("blobstore://<api_key>:<secret>@<host>[:<port>]/%s[?<param>=<value>[&<param>=<value>]...]",
|
||||
resource);
|
||||
}
|
||||
|
||||
typedef std::map<std::string, std::string> ParametersT;
|
||||
|
||||
// Parse url and return a BlobStoreEndpoint
|
||||
// If the url has parameters that BlobStoreEndpoint can't consume then an error will be thrown unless ignored_parameters is given in which case
|
||||
// the unconsumed parameters will be added to it.
|
||||
static Reference<BlobStoreEndpoint> fromString(std::string const &url, std::string *resourceFromURL = nullptr, std::string *error = nullptr, ParametersT *ignored_parameters = nullptr);
|
||||
// Parse url and return a S3BlobStoreEndpoint
|
||||
// If the url has parameters that S3BlobStoreEndpoint can't consume then an error will be thrown unless
|
||||
// ignored_parameters is given in which case the unconsumed parameters will be added to it.
|
||||
static Reference<S3BlobStoreEndpoint> fromString(std::string const& url, std::string* resourceFromURL = nullptr,
|
||||
std::string* error = nullptr,
|
||||
ParametersT* ignored_parameters = nullptr);
|
||||
|
||||
// Get a normalized version of this URL with the given resource and any non-default BlobKnob values as URL parameters in addition to the passed params string
|
||||
// Get a normalized version of this URL with the given resource and any non-default BlobKnob values as URL
|
||||
// parameters in addition to the passed params string
|
||||
std::string getResourceURL(std::string resource, std::string params);
|
||||
|
||||
struct ReusableConnection {
|
||||
|
@ -142,7 +131,7 @@ public:
|
|||
};
|
||||
std::queue<ReusableConnection> connectionPool;
|
||||
Future<ReusableConnection> connect();
|
||||
void returnConnection(ReusableConnection &conn);
|
||||
void returnConnection(ReusableConnection& conn);
|
||||
|
||||
std::string host;
|
||||
std::string service;
|
||||
|
@ -167,18 +156,21 @@ public:
|
|||
Future<Void> updateSecret();
|
||||
|
||||
// Calculates the authentication string from the secret key
|
||||
std::string hmac_sha1(std::string const &msg);
|
||||
std::string hmac_sha1(std::string const& msg);
|
||||
|
||||
// Sets headers needed for Authorization (including Date which will be overwritten if present)
|
||||
void setAuthHeaders(std::string const &verb, std::string const &resource, HTTP::Headers &headers);
|
||||
void setAuthHeaders(std::string const& verb, std::string const& resource, HTTP::Headers& headers);
|
||||
|
||||
// Prepend the HTTP request header to the given PacketBuffer, returning the new head of the buffer chain
|
||||
static PacketBuffer * writeRequestHeader(std::string const &request, HTTP::Headers const &headers, PacketBuffer *dest);
|
||||
static PacketBuffer* writeRequestHeader(std::string const& request, HTTP::Headers const& headers,
|
||||
PacketBuffer* dest);
|
||||
|
||||
// Do an HTTP request to the Blob Store, read the response. Handles authentication.
|
||||
// Every blob store interaction should ultimately go through this function
|
||||
|
||||
Future<Reference<HTTP::Response>> doRequest(std::string const &verb, std::string const &resource, const HTTP::Headers &headers, UnsentPacketQueue *pContent, int contentLen, std::set<unsigned int> successCodes);
|
||||
Future<Reference<HTTP::Response>> doRequest(std::string const& verb, std::string const& resource,
|
||||
const HTTP::Headers& headers, UnsentPacketQueue* pContent,
|
||||
int contentLen, std::set<unsigned int> successCodes);
|
||||
|
||||
struct ObjectInfo {
|
||||
std::string name;
|
||||
|
@ -192,51 +184,61 @@ public:
|
|||
|
||||
// Get bucket contents via a stream, since listing large buckets will take many serial blob requests
|
||||
// If a delimiter is passed then common prefixes will be read in parallel, recursively, depending on recurseFilter.
|
||||
// Recursefilter is a must be a function that takes a string and returns true if it passes. The default behavior is to assume true.
|
||||
Future<Void> listObjectsStream(std::string const &bucket, PromiseStream<ListResult> results, Optional<std::string> prefix = {}, Optional<char> delimiter = {}, int maxDepth = 0, std::function<bool(std::string const &)> recurseFilter = nullptr);
|
||||
// Recursefilter is a must be a function that takes a string and returns true if it passes. The default behavior is
|
||||
// to assume true.
|
||||
Future<Void> listObjectsStream(std::string const& bucket, PromiseStream<ListResult> results,
|
||||
Optional<std::string> prefix = {}, Optional<char> delimiter = {}, int maxDepth = 0,
|
||||
std::function<bool(std::string const&)> recurseFilter = nullptr);
|
||||
|
||||
// Get a list of the files in a bucket, see listObjectsStream for more argument detail.
|
||||
Future<ListResult> listObjects(std::string const &bucket, Optional<std::string> prefix = {}, Optional<char> delimiter = {}, int maxDepth = 0, std::function<bool(std::string const &)> recurseFilter = nullptr);
|
||||
Future<ListResult> listObjects(std::string const& bucket, Optional<std::string> prefix = {},
|
||||
Optional<char> delimiter = {}, int maxDepth = 0,
|
||||
std::function<bool(std::string const&)> recurseFilter = nullptr);
|
||||
|
||||
// Get a list of all buckets
|
||||
Future<std::vector<std::string>> listBuckets();
|
||||
|
||||
// Check if a bucket exists
|
||||
Future<bool> bucketExists(std::string const &bucket);
|
||||
Future<bool> bucketExists(std::string const& bucket);
|
||||
|
||||
// Check if an object exists in a bucket
|
||||
Future<bool> objectExists(std::string const &bucket, std::string const &object);
|
||||
Future<bool> objectExists(std::string const& bucket, std::string const& object);
|
||||
|
||||
// Get the size of an object in a bucket
|
||||
Future<int64_t> objectSize(std::string const &bucket, std::string const &object);
|
||||
Future<int64_t> objectSize(std::string const& bucket, std::string const& object);
|
||||
|
||||
// Read an arbitrary segment of an object
|
||||
Future<int> readObject(std::string const &bucket, std::string const &object, void *data, int length, int64_t offset);
|
||||
Future<int> readObject(std::string const& bucket, std::string const& object, void* data, int length,
|
||||
int64_t offset);
|
||||
|
||||
// Delete an object in a bucket
|
||||
Future<Void> deleteObject(std::string const &bucket, std::string const &object);
|
||||
Future<Void> deleteObject(std::string const& bucket, std::string const& object);
|
||||
|
||||
// Delete all objects in a bucket under a prefix. Note this is not atomic as blob store does not
|
||||
// support this operation directly. This method is just a convenience method that lists and deletes
|
||||
// all of the objects in the bucket under the given prefix.
|
||||
// Since it can take a while, if a pNumDeleted and/or pBytesDeleted are provided they will be incremented every time
|
||||
// a deletion of an object completes.
|
||||
Future<Void> deleteRecursively(std::string const &bucket, std::string prefix = "", int *pNumDeleted = nullptr, int64_t *pBytesDeleted = nullptr);
|
||||
Future<Void> deleteRecursively(std::string const& bucket, std::string prefix = "", int* pNumDeleted = nullptr,
|
||||
int64_t* pBytesDeleted = nullptr);
|
||||
|
||||
// Create a bucket if it does not already exists.
|
||||
Future<Void> createBucket(std::string const &bucket);
|
||||
Future<Void> createBucket(std::string const& bucket);
|
||||
|
||||
// Useful methods for working with tiny files
|
||||
Future<std::string> readEntireFile(std::string const &bucket, std::string const &object);
|
||||
Future<Void> writeEntireFile(std::string const &bucket, std::string const &object, std::string const &content);
|
||||
Future<Void> writeEntireFileFromBuffer(std::string const &bucket, std::string const &object, UnsentPacketQueue *pContent, int contentLen, std::string const &contentMD5);
|
||||
Future<std::string> readEntireFile(std::string const& bucket, std::string const& object);
|
||||
Future<Void> writeEntireFile(std::string const& bucket, std::string const& object, std::string const& content);
|
||||
Future<Void> writeEntireFileFromBuffer(std::string const& bucket, std::string const& object,
|
||||
UnsentPacketQueue* pContent, int contentLen, std::string const& contentMD5);
|
||||
|
||||
// MultiPart upload methods
|
||||
// Returns UploadID
|
||||
Future<std::string> beginMultiPartUpload(std::string const &bucket, std::string const &object);
|
||||
Future<std::string> beginMultiPartUpload(std::string const& bucket, std::string const& object);
|
||||
// Returns eTag
|
||||
Future<std::string> uploadPart(std::string const &bucket, std::string const &object, std::string const &uploadID, unsigned int partNumber, UnsentPacketQueue *pContent, int contentLen, std::string const &contentMD5);
|
||||
Future<std::string> uploadPart(std::string const& bucket, std::string const& object, std::string const& uploadID,
|
||||
unsigned int partNumber, UnsentPacketQueue* pContent, int contentLen,
|
||||
std::string const& contentMD5);
|
||||
typedef std::map<int, std::string> MultiPartSetT;
|
||||
Future<Void> finishMultiPartUpload(std::string const &bucket, std::string const &object, std::string const &uploadID, MultiPartSetT const &parts);
|
||||
Future<Void> finishMultiPartUpload(std::string const& bucket, std::string const& object,
|
||||
std::string const& uploadID, MultiPartSetT const& parts);
|
||||
};
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
project(azurestorage-download)
|
||||
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(azurestorage
|
||||
GIT_REPOSITORY https://github.com/Azure/azure-storage-cpplite.git
|
||||
GIT_TAG 11e1f98b021446ef340f4886796899a6eb1ad9a5 # v0.3.0
|
||||
SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/azurestorage-src"
|
||||
BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/azurestorage-build"
|
||||
CMAKE_ARGS "-DCMAKE_BUILD_TYPE=Release"
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
TEST_COMMAND ""
|
||||
BUILD_BYPRODUCTS "${CMAKE_CURRENT_BINARY_DIR}/libazure-storage-lite.a"
|
||||
)
|
|
@ -305,7 +305,7 @@ TransportData::TransportData(uint64_t transportId)
|
|||
|
||||
#pragma pack( push, 1 )
|
||||
struct ConnectPacket {
|
||||
// The value does not inclueds the size of `connectPacketLength` itself,
|
||||
// The value does not include the size of `connectPacketLength` itself,
|
||||
// but only the other fields of this structure.
|
||||
uint32_t connectPacketLength;
|
||||
ProtocolVersion protocolVersion; // Expect currentProtocolVersion
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "flow/flow.h"
|
||||
|
||||
// All outstanding operations must be cancelled before the destructor of IAsyncFile is called.
|
||||
// The desirability of the above semantic is disputed. Some classes (AsyncFileBlobStore,
|
||||
// The desirability of the above semantic is disputed. Some classes (AsyncFileS3BlobStore,
|
||||
// AsyncFileCached) maintain references, while others (AsyncFileNonDurable) don't, and the comment
|
||||
// is unapplicable to some others as well (AsyncFileKAIO). It's safest to assume that all operations
|
||||
// must complete or cancel, but you should probably look at the file implementations you'll be using.
|
||||
|
|
|
@ -2127,7 +2127,7 @@ eio__statvfsat (int dirfd, const char *path, struct statvfs *buf)
|
|||
static void ecb_noinline ecb_cold
|
||||
etp_proc_init (void)
|
||||
{
|
||||
#if HAVE_PRCTL_SET_NAME
|
||||
#if HAVE_PRCTL_SET_NAME && !defined(MEMORY_SANITIZER)
|
||||
/* provide a more sensible "thread name" */
|
||||
char name[16 + 1];
|
||||
const int namelen = sizeof (name) - 1;
|
||||
|
|
|
@ -123,6 +123,7 @@ set(FDBSERVER_SRCS
|
|||
workloads/BackupCorrectness.actor.cpp
|
||||
workloads/BackupAndParallelRestoreCorrectness.actor.cpp
|
||||
workloads/ParallelRestore.actor.cpp
|
||||
workloads/BackupToBlob.actor.cpp
|
||||
workloads/BackupToDBAbort.actor.cpp
|
||||
workloads/BackupToDBCorrectness.actor.cpp
|
||||
workloads/BackupToDBUpgrade.actor.cpp
|
||||
|
@ -184,6 +185,7 @@ set(FDBSERVER_SRCS
|
|||
workloads/RemoveServersSafely.actor.cpp
|
||||
workloads/ReportConflictingKeys.actor.cpp
|
||||
workloads/RestoreBackup.actor.cpp
|
||||
workloads/RestoreFromBlob.actor.cpp
|
||||
workloads/Rollback.actor.cpp
|
||||
workloads/RyowCorrectness.actor.cpp
|
||||
workloads/RYWDisable.actor.cpp
|
||||
|
|
|
@ -464,6 +464,7 @@ ACTOR Future<Void> runWorkloadAsync( Database cx, WorkloadInterface workIface, T
|
|||
checkReq = req;
|
||||
if (!checkResult.present()) {
|
||||
try {
|
||||
TraceEvent("TestChecking", workIface.id()).detail("Workload", workload->description());
|
||||
bool check = wait( timeoutError( workload->check(cx), workload->getCheckTimeout() ) );
|
||||
checkResult = CheckReply{ (!startResult.present() || !startResult.get().isError()) && check };
|
||||
} catch (Error& e) {
|
||||
|
@ -475,6 +476,7 @@ ACTOR Future<Void> runWorkloadAsync( Database cx, WorkloadInterface workIface, T
|
|||
.detail("Workload", workload->description());
|
||||
//ok = false;
|
||||
}
|
||||
TraceEvent("TestCheckComplete", workIface.id()).detail("Workload", workload->description());
|
||||
}
|
||||
|
||||
sendResult( checkReq, checkResult );
|
||||
|
|
|
@ -387,7 +387,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
state Future<Void> cp = changePaused(cx, &backupAgent);
|
||||
}
|
||||
|
||||
// Increment the backup agent requets
|
||||
// Increment the backup agent requests
|
||||
if (self->agentRequest) {
|
||||
BackupAndRestoreCorrectnessWorkload::backupAgentRequests ++;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* BackupToBlob.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "fdbclient/BackupAgent.actor.h"
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbserver/workloads/workloads.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
struct BackupToBlobWorkload : TestWorkload {
|
||||
double backupAfter;
|
||||
Key backupTag;
|
||||
Standalone<StringRef> backupURL;
|
||||
int snapshotInterval = 100000;
|
||||
|
||||
static constexpr const char* DESCRIPTION = "BackupToBlob";
|
||||
|
||||
BackupToBlobWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
|
||||
backupAfter = getOption(options, LiteralStringRef("backupAfter"), 10.0);
|
||||
backupTag = getOption(options, LiteralStringRef("backupTag"), BackupAgentBase::getDefaultTag());
|
||||
backupURL = getOption(options, LiteralStringRef("backupURL"), LiteralStringRef("http://0.0.0.0:10000"));
|
||||
}
|
||||
|
||||
std::string description() const override { return DESCRIPTION; }
|
||||
|
||||
Future<Void> setup(Database const& cx) override { return Void(); }
|
||||
|
||||
ACTOR static Future<Void> _start(Database cx, BackupToBlobWorkload* self) {
|
||||
state FileBackupAgent backupAgent;
|
||||
state Standalone<VectorRef<KeyRangeRef>> backupRanges;
|
||||
backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
|
||||
|
||||
wait(delay(self->backupAfter));
|
||||
wait(backupAgent.submitBackup(cx, self->backupURL, self->snapshotInterval, self->backupTag.toString(),
|
||||
backupRanges));
|
||||
EBackupState backupStatus = wait(backupAgent.waitBackup(cx, self->backupTag.toString(), true));
|
||||
TraceEvent("BackupToBlob_BackupStatus").detail("Status", BackupAgentBase::getStateText(backupStatus));
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> start(Database const& cx) override { return clientId ? Void() : _start(cx, this); }
|
||||
|
||||
Future<bool> check(Database const& cx) override { return true; }
|
||||
|
||||
void getMetrics(std::vector<PerfMetric>& m) override {}
|
||||
};
|
||||
|
||||
WorkloadFactory<BackupToBlobWorkload> BackupToBlobWorkloadFactory(BackupToBlobWorkload::DESCRIPTION);
|
|
@ -30,6 +30,8 @@ struct RestoreBackupWorkload final : TestWorkload {
|
|||
|
||||
FileBackupAgent backupAgent;
|
||||
Reference<IBackupContainer> backupContainer;
|
||||
Future<Void> agentFuture;
|
||||
double backupPollDelay = 1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE;
|
||||
|
||||
Standalone<StringRef> backupDir;
|
||||
Standalone<StringRef> tag;
|
||||
|
@ -72,7 +74,15 @@ struct RestoreBackupWorkload final : TestWorkload {
|
|||
desc.contiguousLogEnd.present() ? desc.contiguousLogEnd.get() : invalidVersion)
|
||||
.detail("TargetVersion", waitForVersion);
|
||||
if (desc.contiguousLogEnd.present() && desc.contiguousLogEnd.get() >= waitForVersion) {
|
||||
wait(self->backupAgent.discontinueBackup(cx, self->tag));
|
||||
try {
|
||||
TraceEvent("DiscontinuingBackup");
|
||||
wait(self->backupAgent.discontinueBackup(cx, self->tag));
|
||||
} catch (Error& e) {
|
||||
TraceEvent("ErrorDiscontinuingBackup").error(e);
|
||||
if (e.code() != error_code_backup_unneeded) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
wait(delay(5.0));
|
||||
|
@ -99,6 +109,7 @@ struct RestoreBackupWorkload final : TestWorkload {
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> _start(RestoreBackupWorkload* self, Database cx) {
|
||||
self->agentFuture = self->backupAgent.run(cx, &self->backupPollDelay, CLIENT_KNOBS->SIM_BACKUP_TASKS_PER_AGENT);
|
||||
wait(delay(self->delayFor));
|
||||
wait(waitOnBackup(self, cx));
|
||||
wait(clearDatabase(cx));
|
||||
|
|
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
* RestoreFromBlob.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "fdbclient/BackupAgent.actor.h"
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbserver/workloads/workloads.actor.h"
|
||||
#include "fdbserver/workloads/BulkSetup.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
struct RestoreFromBlobWorkload : TestWorkload {
|
||||
double restoreAfter;
|
||||
Key backupTag;
|
||||
Standalone<StringRef> backupURL;
|
||||
bool waitForComplete;
|
||||
|
||||
static constexpr const char* DESCRIPTION = "RestoreFromBlob";
|
||||
|
||||
RestoreFromBlobWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
|
||||
restoreAfter = getOption(options, LiteralStringRef("restoreAfter"), 10.0);
|
||||
backupTag = getOption(options, LiteralStringRef("backupTag"), BackupAgentBase::getDefaultTag());
|
||||
backupURL = getOption(options, LiteralStringRef("backupURL"), LiteralStringRef("http://0.0.0.0:10000"));
|
||||
waitForComplete = getOption(options, LiteralStringRef("waitForComplete"), true);
|
||||
}
|
||||
|
||||
std::string description() const override { return DESCRIPTION; }
|
||||
|
||||
Future<Void> setup(Database const& cx) override { return Void(); }
|
||||
|
||||
ACTOR static Future<Void> _start(Database cx, RestoreFromBlobWorkload* self) {
|
||||
state FileBackupAgent backupAgent;
|
||||
state Standalone<VectorRef<KeyRangeRef>> restoreRanges;
|
||||
restoreRanges.push_back_deep(restoreRanges.arena(), normalKeys);
|
||||
|
||||
wait(delay(self->restoreAfter));
|
||||
Version v =
|
||||
wait(backupAgent.restore(cx, {}, self->backupTag, self->backupURL, restoreRanges, self->waitForComplete));
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> start(Database const& cx) override { return clientId ? Void() : _start(cx, this); }
|
||||
|
||||
Future<bool> check(Database const& cx) override { return true; }
|
||||
|
||||
void getMetrics(std::vector<PerfMetric>& m) override {}
|
||||
};
|
||||
|
||||
WorkloadFactory<RestoreFromBlobWorkload> RestoreFromBlobWorkloadFactory(RestoreFromBlobWorkload::DESCRIPTION);
|
|
@ -55,7 +55,7 @@ struct WriteDuringReadWorkload : TestWorkload {
|
|||
slowModeStart = getOption( options, LiteralStringRef("slowModeStart"), 1000.0 );
|
||||
numOps = getOption( options, LiteralStringRef("numOps"), 21 );
|
||||
rarelyCommit = getOption( options, LiteralStringRef("rarelyCommit"), false );
|
||||
maximumTotalData = getOption( options, LiteralStringRef("maximumTotalData"), 7e6);
|
||||
maximumTotalData = getOption( options, LiteralStringRef("maximumTotalData"), 3e6);
|
||||
minNode = getOption( options, LiteralStringRef("minNode"), 0);
|
||||
useSystemKeys = getOption( options, LiteralStringRef("useSystemKeys"), deterministicRandom()->random01() < 0.5);
|
||||
adjacentKeys = deterministicRandom()->random01() < 0.5;
|
||||
|
|
|
@ -2968,10 +2968,17 @@ extern "C" void criticalError(int exitCode, const char *type, const char *messag
|
|||
|
||||
extern void flushTraceFileVoid();
|
||||
|
||||
#ifdef USE_GCOV
|
||||
extern "C" void __gcov_flush();
|
||||
#endif
|
||||
|
||||
extern "C" void flushAndExit(int exitCode) {
|
||||
flushTraceFileVoid();
|
||||
fflush(stdout);
|
||||
closeTraceFile();
|
||||
#ifdef USE_GCOV
|
||||
__gcov_flush();
|
||||
#endif
|
||||
#ifdef _WIN32
|
||||
// This function is documented as being asynchronous, but we suspect it might actually be synchronous in the
|
||||
// case that it is passed a handle to the current process. If not, then there may be cases where we escalate
|
||||
|
@ -3003,7 +3010,7 @@ ImageInfo getImageInfo(const void *symbol) {
|
|||
ImageInfo imageInfo;
|
||||
|
||||
#ifdef __linux__
|
||||
link_map *linkMap;
|
||||
link_map* linkMap = nullptr;
|
||||
int res = dladdr1(symbol, &info, (void**)&linkMap, RTLD_DL_LINKMAP);
|
||||
#else
|
||||
int res = dladdr(symbol, &info);
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <stdarg.h>
|
||||
#include <cinttypes>
|
||||
|
||||
#if (defined (__linux__) || defined (__FreeBSD__)) && defined(__AVX__)
|
||||
#if (defined(__linux__) || defined(__FreeBSD__)) && defined(__AVX__) && !defined(MEMORY_SANITIZER)
|
||||
// For benchmarking; need a version of rte_memcpy that doesn't live in the same compilation unit as the test.
|
||||
void * rte_memcpy_noinline(void *__restrict __dest, const void *__restrict __src, size_t __n) {
|
||||
return rte_memcpy(__dest, __src, __n);
|
||||
|
@ -41,7 +41,7 @@ __attribute__((visibility ("default"))) void *memcpy (void *__restrict __dest, c
|
|||
void * rte_memcpy_noinline(void *__restrict __dest, const void *__restrict __src, size_t __n) {
|
||||
return memcpy(__dest, __src, __n);
|
||||
}
|
||||
#endif // (defined (__linux__) || defined (__FreeBSD__)) && defined(__AVX__)
|
||||
#endif // (defined (__linux__) || defined (__FreeBSD__)) && defined(__AVX__) && !defined(MEMORY_SANITIZER)
|
||||
|
||||
INetwork *g_network = 0;
|
||||
|
||||
|
|
|
@ -308,9 +308,8 @@ Future<U> mapAsync(Future<T> what, F actorFunc) {
|
|||
}
|
||||
|
||||
//maps a vector of futures with an asynchronous function
|
||||
template<class T, class F>
|
||||
std::vector<Future<std::invoke_result_t<F, T>>> mapAsync(std::vector<Future<T>> const& what, F const& actorFunc)
|
||||
{
|
||||
template <class T, class F>
|
||||
auto mapAsync(std::vector<Future<T>> const& what, F const& actorFunc) {
|
||||
std::vector<std::invoke_result_t<F, T>> ret;
|
||||
ret.reserve(what.size());
|
||||
for (const auto& f : what) ret.push_back(mapAsync(f, actorFunc));
|
||||
|
@ -367,9 +366,8 @@ Future<std::invoke_result_t<F, T>> map(Future<T> what, F func)
|
|||
}
|
||||
|
||||
//maps a vector of futures
|
||||
template<class T, class F>
|
||||
std::vector<Future<std::invoke_result_t<F, T>>> map(std::vector<Future<T>> const& what, F const& func)
|
||||
{
|
||||
template <class T, class F>
|
||||
auto map(std::vector<Future<T>> const& what, F const& func) {
|
||||
std::vector<Future<std::invoke_result_t<F, T>>> ret;
|
||||
ret.reserve(what.size());
|
||||
for (const auto& f : what) ret.push_back(map(f, func));
|
||||
|
@ -443,9 +441,7 @@ Future<Void> asyncFilter( FutureStream<T> input, F actorPred, PromiseStream<T> o
|
|||
loop {
|
||||
try {
|
||||
choose {
|
||||
when ( T nextInput = waitNext(input) ) {
|
||||
futures.push_back( std::pair<T, Future<bool>>(nextInput, actorPred(nextInput)) );
|
||||
}
|
||||
when(T nextInput = waitNext(input)) { futures.emplace_back(nextInput, actorPred(nextInput)); }
|
||||
when ( bool pass = wait( futures.size() == 0 ? Never() : futures.front().second ) ) {
|
||||
if(pass) output.send(futures.front().first);
|
||||
futures.pop_front();
|
||||
|
@ -1309,7 +1305,8 @@ private:
|
|||
Promise<Void> broken_on_destruct;
|
||||
|
||||
ACTOR static Future<Void> takeActor(FlowLock* lock, TaskPriority taskID, int64_t amount) {
|
||||
state std::list<std::pair<Promise<Void>, int64_t>>::iterator it = lock->takers.insert(lock->takers.end(), std::make_pair(Promise<Void>(), amount));
|
||||
state std::list<std::pair<Promise<Void>, int64_t>>::iterator it =
|
||||
lock->takers.emplace(lock->takers.end(), Promise<Void>(), amount);
|
||||
|
||||
try {
|
||||
wait( it->first.getFuture() );
|
||||
|
@ -1366,7 +1363,7 @@ struct NotifiedInt {
|
|||
Future<Void> whenAtLeast( int64_t limit ) {
|
||||
if (val >= limit) return Void();
|
||||
Promise<Void> p;
|
||||
waiting.push( std::make_pair(limit,p) );
|
||||
waiting.emplace(limit, p);
|
||||
return p.getFuture();
|
||||
}
|
||||
|
||||
|
|
|
@ -2926,7 +2926,7 @@ static class VDSOInitHelper {
|
|||
/* Each function is empty and called (via a macro) only in debug mode.
|
||||
The arguments are captured by dynamic tools at runtime. */
|
||||
|
||||
#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 && !defined(__native_client__) && !__has_feature(thread_sanitizer)
|
||||
#if DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL == 0 && !defined(__native_client__)
|
||||
|
||||
#if __has_feature(memory_sanitizer)
|
||||
#include <sanitizer/msan_interface.h>
|
||||
|
|
|
@ -43,7 +43,6 @@ if(WITH_PYTHON)
|
|||
add_fdb_test(TEST_FILES BackupContainers.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES BandwidthThrottle.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES BigInsert.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES BlobStore.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES ConsistencyCheck.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES DDMetricsExclude.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES DataDistributionMetrics.txt IGNORE)
|
||||
|
@ -76,6 +75,7 @@ if(WITH_PYTHON)
|
|||
add_fdb_test(TEST_FILES RedwoodPerfPrefixCompression.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES RedwoodPerfSequentialInsert.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES RocksDBTest.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES S3BlobStore.txt IGNORE)
|
||||
add_fdb_test(TEST_FILES SampleNoSimAttrition.txt IGNORE)
|
||||
if (NOT USE_UBSAN) # TODO re-enable in UBSAN after https://github.com/apple/foundationdb/issues/2410 is resolved
|
||||
add_fdb_test(TEST_FILES SimpleExternalTest.txt)
|
||||
|
@ -108,6 +108,7 @@ if(WITH_PYTHON)
|
|||
add_fdb_test(TEST_FILES fast/AtomicBackupToDBCorrectness.toml)
|
||||
add_fdb_test(TEST_FILES fast/AtomicOps.toml)
|
||||
add_fdb_test(TEST_FILES fast/AtomicOpsApiCorrectness.toml)
|
||||
add_fdb_test(TEST_FILES fast/BackupBlobCorrectness.toml IGNORE)
|
||||
add_fdb_test(TEST_FILES fast/BackupCorrectness.toml)
|
||||
add_fdb_test(TEST_FILES fast/BackupCorrectnessClean.toml)
|
||||
add_fdb_test(TEST_FILES fast/BackupToDBCorrectness.toml)
|
||||
|
@ -200,8 +201,8 @@ if(WITH_PYTHON)
|
|||
TEST_FILES restarting/from_5.2.0/ClientTransactionProfilingCorrectness-1.txt
|
||||
restarting/from_5.2.0/ClientTransactionProfilingCorrectness-2.txt)
|
||||
add_fdb_test(
|
||||
TEST_FILES restarting/from_7.0.0/UpgradeAndBackupRestore-1.txt
|
||||
restarting/from_7.0.0/UpgradeAndBackupRestore-2.txt IGNORE)
|
||||
TEST_FILES restarting/from_7.0.0/UpgradeAndBackupRestore-1.toml
|
||||
restarting/from_7.0.0/UpgradeAndBackupRestore-2.toml)
|
||||
add_fdb_test(
|
||||
TEST_FILES restarting/to_6.3.5/CycleTestRestart-1.txt
|
||||
restarting/to_6.3.5/CycleTestRestart-2.txt)
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
[[test]]
|
||||
testTitle = 'Cycle'
|
||||
clearAfterTest = 'false'
|
||||
simBackupAgents = 'BackupToFile'
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Cycle'
|
||||
nodeCount = 3000
|
||||
testDuration = 10.0
|
||||
expectedRate = 0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'RandomClogging'
|
||||
testDuration = 10.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Rollback'
|
||||
meanDelay = 5.0
|
||||
testDuration = 10.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 10.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 10.0
|
||||
|
||||
[[test]]
|
||||
testTitle = 'Backup'
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'BackupToBlob'
|
||||
backupAfter = 0.0
|
||||
backupTag = 'default'
|
||||
backupURL = 'azure://0.0.0.0:10000/devstoreaccount1/test_container/'
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'RandomClogging'
|
||||
testDuration = 10.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Rollback'
|
||||
meanDelay = 5.0
|
||||
testDuration = 10.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 10.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 10.0
|
||||
|
||||
[[test]]
|
||||
testTitle = 'Restore'
|
||||
clearAfterTest = 'false'
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'RestoreFromBlob'
|
||||
restoreAfter = 0.0
|
||||
backupTag = 'default'
|
||||
backupURL = 'azure://0.0.0.0:10000/devstoreaccount1/test_container/'
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'RandomClogging'
|
||||
testDuration = 60.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Rollback'
|
||||
meanDelay = 5.0
|
||||
testDuration = 10.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 10.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 10.0
|
||||
|
||||
[[test]]
|
||||
testTitle = 'CycleCheck'
|
||||
checkOnly = 'true'
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Cycle'
|
||||
nodeCount = 3000
|
||||
expectedRate = 0
|
|
@ -0,0 +1,55 @@
|
|||
[[test]]
|
||||
testTitle = 'SubmitBackup'
|
||||
simBackupAgents= 'BackupToFile'
|
||||
clearAfterTest = false
|
||||
runConsistencyCheck=false
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'SubmitBackup'
|
||||
delayFor = 0
|
||||
stopWhenDone = false
|
||||
|
||||
[[test]]
|
||||
testTitle = 'FirstCycleTest'
|
||||
clearAfterTest=false
|
||||
runConsistencyCheck = false
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Cycle'
|
||||
nodeCount = 30000
|
||||
transactionsPerSecond = 2500.0
|
||||
testDuration = 30.0
|
||||
expectedRate = 0
|
||||
keyPrefix = 'BeforeRestart'
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'RandomClogging'
|
||||
testDuration = 90.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Rollback'
|
||||
meanDelay = 90.0
|
||||
testDuration = 90.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 90.0
|
||||
|
||||
[[test.workload]]
|
||||
testName='Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 90.0
|
||||
|
||||
[[test]]
|
||||
testTitle = 'SaveDatabase'
|
||||
clearAfterTest = false
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'SaveAndKill'
|
||||
restartInfoLocation = 'simfdb/restartInfo.ini'
|
||||
testDuration=30.0
|
|
@ -1,43 +0,0 @@
|
|||
testTitle=SubmitBackup
|
||||
simBackupAgents=BackupToFile
|
||||
clearAfterTest = false
|
||||
|
||||
testName=SubmitBackup
|
||||
delayFor = 0
|
||||
stopWhenDone = false
|
||||
|
||||
testTitle=FirstCycleTest
|
||||
clearAfterTest=false
|
||||
|
||||
testName=Cycle
|
||||
nodeCount = 30000
|
||||
transactionsPerSecond = 2500.0
|
||||
testDuration = 30.0
|
||||
expectedRate = 0
|
||||
keyPrefix=BeforeRestart
|
||||
|
||||
testName=RandomClogging
|
||||
testDuration = 90.0
|
||||
|
||||
testName=Rollback
|
||||
meanDelay = 90.0
|
||||
testDuration = 90.0
|
||||
|
||||
testName=Attrition
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 90.0
|
||||
|
||||
testName=Attrition
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 90.0
|
||||
|
||||
testTitle=SaveDatabase
|
||||
clearAfterTest = false
|
||||
|
||||
testName=SaveAndKill
|
||||
restartInfoLocation=simfdb/restartInfo.ini
|
||||
testDuration=30.0
|
|
@ -0,0 +1,61 @@
|
|||
[[test]]
|
||||
testTitle = 'SecondCycleTest'
|
||||
simBackupAgents = 'BackupToFile'
|
||||
clearAfterTest=false
|
||||
runConsistencyCheck=false
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Cycle'
|
||||
nodeCount = 30000
|
||||
transactionsPerSecond = 2500.0
|
||||
testDuration = 30.0
|
||||
expectedRate = 0
|
||||
keyPrefix = 'AfterRestart'
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'RandomClogging'
|
||||
testDuration = 90.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Rollback'
|
||||
meanDelay = 90.0
|
||||
testDuration = 90.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 90.0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Attrition'
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 90.0
|
||||
|
||||
[[test]]
|
||||
testTitle= 'RestoreBackup'
|
||||
simBackupAgents = 'BackupToFile'
|
||||
clearAfterTest=false
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'RestoreBackup'
|
||||
tag = 'default'
|
||||
|
||||
[[test]]
|
||||
testTitle = 'CheckCycles'
|
||||
checkOnly=true
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Cycle'
|
||||
nodeCount=30000
|
||||
keyPrefix = 'AfterRestart'
|
||||
expectedRate=0
|
||||
|
||||
[[test.workload]]
|
||||
testName = 'Cycle'
|
||||
nodeCount = 30000
|
||||
keyPrefix= 'BeforeRestart'
|
||||
expectedRate = 0
|
|
@ -1,56 +0,0 @@
|
|||
testTitle=SecondCycleTest
|
||||
simBackupAgents=BackupToFile
|
||||
clearAfterTest=false
|
||||
|
||||
testName=Cycle
|
||||
nodeCount = 30000
|
||||
transactionsPerSecond = 2500.0
|
||||
testDuration = 30.0
|
||||
expectedRate = 0
|
||||
keyPrefix=AfterRestart
|
||||
|
||||
testName=Cycle
|
||||
nodeCount = 30000
|
||||
transactionsPerSecond = 2500.0
|
||||
testDuration = 30.0
|
||||
expectedRate = 0
|
||||
keyPrefix=BeforeRestart
|
||||
|
||||
testName=RandomClogging
|
||||
testDuration = 90.0
|
||||
|
||||
testName=Rollback
|
||||
meanDelay = 90.0
|
||||
testDuration = 90.0
|
||||
|
||||
testName=Attrition
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 90.0
|
||||
|
||||
testName=Attrition
|
||||
machinesToKill = 10
|
||||
machinesToLeave = 3
|
||||
reboot = true
|
||||
testDuration = 90.0
|
||||
|
||||
testTitle=RestoreBackup
|
||||
simBackupAgents=BackupToFile
|
||||
clearAfterTest=false
|
||||
|
||||
testName=RestoreBackup
|
||||
tag=default
|
||||
|
||||
testTitle=CheckCycles
|
||||
checkOnly=true
|
||||
|
||||
testName=Cycle
|
||||
nodeCount=30000
|
||||
keyPrefix=AfterRestart
|
||||
expectedRate=0
|
||||
|
||||
testName=Cycle
|
||||
nodeCount = 30000
|
||||
keyPrefix=BeforeRestart
|
||||
expectedRate = 0
|
Loading…
Reference in New Issue