Moved BackupContainerS3BlobStore into its own files
This commit is contained in:
parent
e15a268548
commit
e1d58d3c66
|
@ -39,6 +39,7 @@
|
|||
#include "fdbclient/BackupContainerAzureBlobStore.h"
|
||||
#include "fdbclient/BackupContainerFileSystem.actor.h"
|
||||
#include "fdbclient/BackupContainerLocalDirectory.h"
|
||||
#include "fdbclient/BackupContainerS3BlobStore.h"
|
||||
#include "fdbclient/Status.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbclient/ReadYourWrites.h"
|
||||
|
@ -242,7 +243,7 @@ std::string IBackupContainer::lastOpenError;
|
|||
std::vector<std::string> IBackupContainer::getURLFormats() {
|
||||
std::vector<std::string> formats;
|
||||
formats.push_back(BackupContainerLocalDirectory::getURLFormat());
|
||||
formats.push_back(BackupContainerBlobStore::getURLFormat());
|
||||
formats.push_back(BackupContainerS3BlobStore::getURLFormat());
|
||||
return formats;
|
||||
}
|
||||
|
||||
|
@ -268,7 +269,7 @@ Reference<IBackupContainer> IBackupContainer::openContainer(std::string url) {
|
|||
if (resource.empty()) throw backup_invalid_url();
|
||||
for (auto c : resource)
|
||||
if (!isalnum(c) && c != '_' && c != '-' && c != '.' && c != '/') throw backup_invalid_url();
|
||||
r = Reference<IBackupContainer>(new BackupContainerBlobStore(bstore, resource, backupParams));
|
||||
r = Reference<IBackupContainer>(new BackupContainerS3BlobStore(bstore, resource, backupParams));
|
||||
} else if (u.startsWith(LiteralStringRef("http"))) {
|
||||
r = Reference<IBackupContainer>(new BackupContainerAzureBlobStore());
|
||||
} else {
|
||||
|
@ -314,9 +315,9 @@ ACTOR Future<std::vector<std::string>> listContainers_impl(std::string baseURL)
|
|||
}
|
||||
|
||||
// Create a dummy container to parse the backup-specific parameters from the URL and get a final bucket name
|
||||
BackupContainerBlobStore dummy(bstore, "dummy", backupParams);
|
||||
BackupContainerS3BlobStore dummy(bstore, "dummy", backupParams);
|
||||
|
||||
std::vector<std::string> results = wait(BackupContainerBlobStore::listURLs(bstore, dummy.getBucket()));
|
||||
std::vector<std::string> results = wait(BackupContainerS3BlobStore::listURLs(bstore, dummy.getBucket()));
|
||||
return results;
|
||||
} else {
|
||||
IBackupContainer::lastOpenError = "invalid URL prefix";
|
||||
|
|
|
@ -1311,9 +1311,6 @@ BackupContainerFileSystem::VersionProperty BackupContainerFileSystem::logType()
|
|||
return { Reference<BackupContainerFileSystem>::addRef(this), "mutation_log_type" };
|
||||
}
|
||||
|
||||
const std::string BackupContainerBlobStore::DATAFOLDER = "data";
|
||||
const std::string BackupContainerBlobStore::INDEXFOLDER = "backups";
|
||||
|
||||
namespace backup_test {
|
||||
|
||||
int chooseFileSize(std::vector<int>& sizes) {
|
||||
|
|
|
@ -276,165 +276,5 @@ public:
|
|||
VersionProperty logType();
|
||||
};
|
||||
|
||||
class BackupContainerBlobStore final : public BackupContainerFileSystem, ReferenceCounted<BackupContainerBlobStore> {
|
||||
private:
|
||||
// Backup files to under a single folder prefix with subfolders for each named backup
|
||||
static const std::string DATAFOLDER;
|
||||
|
||||
// Indexfolder contains keys for which user-named backups exist. Backup names can contain an arbitrary
|
||||
// number of slashes so the backup names are kept in a separate folder tree from their actual data.
|
||||
static const std::string INDEXFOLDER;
|
||||
|
||||
Reference<BlobStoreEndpoint> m_bstore;
|
||||
std::string m_name;
|
||||
|
||||
// All backup data goes into a single bucket
|
||||
std::string m_bucket;
|
||||
|
||||
std::string dataPath(const std::string path) { return DATAFOLDER + "/" + m_name + "/" + path; }
|
||||
|
||||
// Get the path of the backups's index entry
|
||||
std::string indexEntry() { return INDEXFOLDER + "/" + m_name; }
|
||||
|
||||
public:
|
||||
BackupContainerBlobStore(Reference<BlobStoreEndpoint> bstore, std::string name,
|
||||
const BlobStoreEndpoint::ParametersT& params)
|
||||
: m_bstore(bstore), m_name(name), m_bucket("FDB_BACKUPS_V2") {
|
||||
|
||||
// Currently only one parameter is supported, "bucket"
|
||||
for (auto& kv : params) {
|
||||
if (kv.first == "bucket") {
|
||||
m_bucket = kv.second;
|
||||
continue;
|
||||
}
|
||||
TraceEvent(SevWarn, "BackupContainerBlobStoreInvalidParameter")
|
||||
.detail("Name", kv.first)
|
||||
.detail("Value", kv.second);
|
||||
IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", kv.first.c_str());
|
||||
throw backup_invalid_url();
|
||||
}
|
||||
}
|
||||
|
||||
void addref() override { return ReferenceCounted<BackupContainerBlobStore>::addref(); }
|
||||
void delref() override { return ReferenceCounted<BackupContainerBlobStore>::delref(); }
|
||||
|
||||
static std::string getURLFormat() {
|
||||
return BlobStoreEndpoint::getURLFormat(true) + " (Note: The 'bucket' parameter is required.)";
|
||||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> readFile(std::string path) final {
|
||||
return Reference<IAsyncFile>(new AsyncFileReadAheadCache(
|
||||
Reference<IAsyncFile>(new AsyncFileBlobStoreRead(m_bstore, m_bucket, dataPath(path))),
|
||||
m_bstore->knobs.read_block_size, m_bstore->knobs.read_ahead_blocks,
|
||||
m_bstore->knobs.concurrent_reads_per_file, m_bstore->knobs.read_cache_blocks_per_file));
|
||||
}
|
||||
|
||||
ACTOR static Future<std::vector<std::string>> listURLs(Reference<BlobStoreEndpoint> bstore, std::string bucket) {
|
||||
state std::string basePath = INDEXFOLDER + '/';
|
||||
BlobStoreEndpoint::ListResult contents = wait(bstore->listObjects(bucket, basePath));
|
||||
std::vector<std::string> results;
|
||||
for (auto& f : contents.objects) {
|
||||
results.push_back(
|
||||
bstore->getResourceURL(f.name.substr(basePath.size()), format("bucket=%s", bucket.c_str())));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
class BackupFile : public IBackupFile, ReferenceCounted<BackupFile> {
|
||||
public:
|
||||
BackupFile(std::string fileName, Reference<IAsyncFile> file) : IBackupFile(fileName), m_file(file) {}
|
||||
|
||||
Future<Void> append(const void* data, int len) {
|
||||
Future<Void> r = m_file->write(data, len, m_offset);
|
||||
m_offset += len;
|
||||
return r;
|
||||
}
|
||||
|
||||
Future<Void> finish() {
|
||||
Reference<BackupFile> self = Reference<BackupFile>::addRef(this);
|
||||
return map(m_file->sync(), [=](Void _) {
|
||||
self->m_file.clear();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
void addref() final { return ReferenceCounted<BackupFile>::addref(); }
|
||||
void delref() final { return ReferenceCounted<BackupFile>::delref(); }
|
||||
|
||||
private:
|
||||
Reference<IAsyncFile> m_file;
|
||||
};
|
||||
|
||||
Future<Reference<IBackupFile>> writeFile(const std::string& path) final {
|
||||
return Reference<IBackupFile>(new BackupFile(
|
||||
path, Reference<IAsyncFile>(new AsyncFileBlobStoreWrite(m_bstore, m_bucket, dataPath(path)))));
|
||||
}
|
||||
|
||||
Future<Void> deleteFile(std::string path) final { return m_bstore->deleteObject(m_bucket, dataPath(path)); }
|
||||
|
||||
ACTOR static Future<FilesAndSizesT> listFiles_impl(Reference<BackupContainerBlobStore> bc, std::string path,
|
||||
std::function<bool(std::string const&)> pathFilter) {
|
||||
// pathFilter expects container based paths, so create a wrapper which converts a raw path
|
||||
// to a container path by removing the known backup name prefix.
|
||||
state int prefixTrim = bc->dataPath("").size();
|
||||
std::function<bool(std::string const&)> rawPathFilter = [=](const std::string& folderPath) {
|
||||
ASSERT(folderPath.size() >= prefixTrim);
|
||||
return pathFilter(folderPath.substr(prefixTrim));
|
||||
};
|
||||
|
||||
state BlobStoreEndpoint::ListResult result = wait(bc->m_bstore->listObjects(
|
||||
bc->m_bucket, bc->dataPath(path), '/', std::numeric_limits<int>::max(), rawPathFilter));
|
||||
FilesAndSizesT files;
|
||||
for (auto& o : result.objects) {
|
||||
ASSERT(o.name.size() >= prefixTrim);
|
||||
files.push_back({ o.name.substr(prefixTrim), o.size });
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
Future<FilesAndSizesT> listFiles(std::string path, std::function<bool(std::string const&)> pathFilter) final {
|
||||
return listFiles_impl(Reference<BackupContainerBlobStore>::addRef(this), path, pathFilter);
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> create_impl(Reference<BackupContainerBlobStore> bc) {
|
||||
wait(bc->m_bstore->createBucket(bc->m_bucket));
|
||||
|
||||
// Check/create the index entry
|
||||
bool exists = wait(bc->m_bstore->objectExists(bc->m_bucket, bc->indexEntry()));
|
||||
if (!exists) {
|
||||
wait(bc->m_bstore->writeEntireFile(bc->m_bucket, bc->indexEntry(), ""));
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> create() final { return create_impl(Reference<BackupContainerBlobStore>::addRef(this)); }
|
||||
|
||||
// The container exists if the index entry in the blob bucket exists
|
||||
Future<bool> exists() final { return m_bstore->objectExists(m_bucket, indexEntry()); }
|
||||
|
||||
ACTOR static Future<Void> deleteContainer_impl(Reference<BackupContainerBlobStore> bc, int* pNumDeleted) {
|
||||
bool e = wait(bc->exists());
|
||||
if (!e) {
|
||||
TraceEvent(SevWarnAlways, "BackupContainerDoesNotExist").detail("URL", bc->getURL());
|
||||
throw backup_does_not_exist();
|
||||
}
|
||||
|
||||
// First delete everything under the data prefix in the bucket
|
||||
wait(bc->m_bstore->deleteRecursively(bc->m_bucket, bc->dataPath(""), pNumDeleted));
|
||||
|
||||
// Now that all files are deleted, delete the index entry
|
||||
wait(bc->m_bstore->deleteObject(bc->m_bucket, bc->indexEntry()));
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> deleteContainer(int* pNumDeleted) final {
|
||||
return deleteContainer_impl(Reference<BackupContainerBlobStore>::addRef(this), pNumDeleted);
|
||||
}
|
||||
|
||||
std::string getBucket() const { return m_bucket; }
|
||||
};
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,197 @@
|
|||
/*
|
||||
* BackupContainerS3BlobStore.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/BackupContainerS3BlobStore.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
// Backup files to under a single folder prefix with subfolders for each named backup
|
||||
const std::string DATAFOLDER = "data";
|
||||
|
||||
// Indexfolder contains keys for which user-named backups exist. Backup names can contain an arbitrary
|
||||
// number of slashes so the backup names are kept in a separate folder tree from their actual data.
|
||||
const std::string INDEXFOLDER = "backups";
|
||||
|
||||
ACTOR static Future<std::vector<std::string>> listURLs_impl(Reference<BlobStoreEndpoint> bstore, std::string bucket) {
|
||||
state std::string basePath = INDEXFOLDER + '/';
|
||||
BlobStoreEndpoint::ListResult contents = wait(bstore->listObjects(bucket, basePath));
|
||||
std::vector<std::string> results;
|
||||
for (auto& f : contents.objects) {
|
||||
results.push_back(bstore->getResourceURL(f.name.substr(basePath.size()), format("bucket=%s", bucket.c_str())));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
class BackupFile : public IBackupFile, ReferenceCounted<BackupFile> {
|
||||
public:
|
||||
BackupFile(std::string fileName, Reference<IAsyncFile> file) : IBackupFile(fileName), m_file(file) {}
|
||||
|
||||
Future<Void> append(const void* data, int len) {
|
||||
Future<Void> r = m_file->write(data, len, m_offset);
|
||||
m_offset += len;
|
||||
return r;
|
||||
}
|
||||
|
||||
Future<Void> finish() {
|
||||
Reference<BackupFile> self = Reference<BackupFile>::addRef(this);
|
||||
return map(m_file->sync(), [=](Void _) {
|
||||
self->m_file.clear();
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
void addref() final { return ReferenceCounted<BackupFile>::addref(); }
|
||||
void delref() final { return ReferenceCounted<BackupFile>::delref(); }
|
||||
|
||||
private:
|
||||
Reference<IAsyncFile> m_file;
|
||||
};
|
||||
|
||||
ACTOR static Future<BackupContainerFileSystem::FilesAndSizesT> listFiles_impl(
|
||||
Reference<BackupContainerS3BlobStore> bc, std::string path, std::function<bool(std::string const&)> pathFilter) {
|
||||
// pathFilter expects container based paths, so create a wrapper which converts a raw path
|
||||
// to a container path by removing the known backup name prefix.
|
||||
state int prefixTrim = bc->dataPath("").size();
|
||||
std::function<bool(std::string const&)> rawPathFilter = [=](const std::string& folderPath) {
|
||||
ASSERT(folderPath.size() >= prefixTrim);
|
||||
return pathFilter(folderPath.substr(prefixTrim));
|
||||
};
|
||||
|
||||
state BlobStoreEndpoint::ListResult result = wait(bc->m_bstore->listObjects(
|
||||
bc->m_bucket, bc->dataPath(path), '/', std::numeric_limits<int>::max(), rawPathFilter));
|
||||
BackupContainerFileSystem::FilesAndSizesT files;
|
||||
for (auto& o : result.objects) {
|
||||
ASSERT(o.name.size() >= prefixTrim);
|
||||
files.push_back({ o.name.substr(prefixTrim), o.size });
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> create_impl(Reference<BackupContainerS3BlobStore> bc) {
|
||||
wait(bc->m_bstore->createBucket(bc->m_bucket));
|
||||
|
||||
// Check/create the index entry
|
||||
bool exists = wait(bc->m_bstore->objectExists(bc->m_bucket, bc->indexEntry()));
|
||||
if (!exists) {
|
||||
wait(bc->m_bstore->writeEntireFile(bc->m_bucket, bc->indexEntry(), ""));
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> deleteContainer_impl(Reference<BackupContainerS3BlobStore> bc, int* pNumDeleted) {
|
||||
bool e = wait(bc->exists());
|
||||
if (!e) {
|
||||
TraceEvent(SevWarnAlways, "BackupContainerDoesNotExist").detail("URL", bc->getURL());
|
||||
throw backup_does_not_exist();
|
||||
}
|
||||
|
||||
// First delete everything under the data prefix in the bucket
|
||||
wait(bc->m_bstore->deleteRecursively(bc->m_bucket, bc->dataPath(""), pNumDeleted));
|
||||
|
||||
// Now that all files are deleted, delete the index entry
|
||||
wait(bc->m_bstore->deleteObject(bc->m_bucket, bc->indexEntry()));
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
std::string BackupContainerS3BlobStore::dataPath(const std::string path) {
|
||||
return DATAFOLDER + "/" + m_name + "/" + path;
|
||||
}
|
||||
|
||||
// Get the path of the backups's index entry
|
||||
std::string BackupContainerS3BlobStore::indexEntry() {
|
||||
return INDEXFOLDER + "/" + m_name;
|
||||
}
|
||||
|
||||
BackupContainerS3BlobStore::BackupContainerS3BlobStore(Reference<BlobStoreEndpoint> bstore, std::string name,
|
||||
const BlobStoreEndpoint::ParametersT& params)
|
||||
: m_bstore(bstore), m_name(name), m_bucket("FDB_BACKUPS_V2") {
|
||||
|
||||
// Currently only one parameter is supported, "bucket"
|
||||
for (auto& kv : params) {
|
||||
if (kv.first == "bucket") {
|
||||
m_bucket = kv.second;
|
||||
continue;
|
||||
}
|
||||
TraceEvent(SevWarn, "BackupContainerS3BlobStoreInvalidParameter")
|
||||
.detail("Name", kv.first)
|
||||
.detail("Value", kv.second);
|
||||
IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", kv.first.c_str());
|
||||
throw backup_invalid_url();
|
||||
}
|
||||
}
|
||||
|
||||
void BackupContainerS3BlobStore::addref() {
|
||||
return ReferenceCounted<BackupContainerS3BlobStore>::addref();
|
||||
}
|
||||
void BackupContainerS3BlobStore::delref() {
|
||||
return ReferenceCounted<BackupContainerS3BlobStore>::delref();
|
||||
}
|
||||
|
||||
std::string BackupContainerS3BlobStore::getURLFormat() {
|
||||
return BlobStoreEndpoint::getURLFormat(true) + " (Note: The 'bucket' parameter is required.)";
|
||||
}
|
||||
|
||||
Future<Reference<IAsyncFile>> BackupContainerS3BlobStore::readFile(std::string path) {
|
||||
return Reference<IAsyncFile>(new AsyncFileReadAheadCache(
|
||||
Reference<IAsyncFile>(new AsyncFileBlobStoreRead(m_bstore, m_bucket, dataPath(path))),
|
||||
m_bstore->knobs.read_block_size, m_bstore->knobs.read_ahead_blocks, m_bstore->knobs.concurrent_reads_per_file,
|
||||
m_bstore->knobs.read_cache_blocks_per_file));
|
||||
}
|
||||
|
||||
Future<std::vector<std::string>> BackupContainerS3BlobStore::listURLs(Reference<BlobStoreEndpoint> bstore,
|
||||
std::string bucket) {
|
||||
return listURLs_impl(bstore, bucket);
|
||||
}
|
||||
|
||||
Future<Reference<IBackupFile>> BackupContainerS3BlobStore::writeFile(const std::string& path) {
|
||||
return Reference<IBackupFile>(
|
||||
new BackupFile(path, Reference<IAsyncFile>(new AsyncFileBlobStoreWrite(m_bstore, m_bucket, dataPath(path)))));
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::deleteFile(std::string path) {
|
||||
return m_bstore->deleteObject(m_bucket, dataPath(path));
|
||||
}
|
||||
|
||||
Future<BackupContainerFileSystem::FilesAndSizesT> BackupContainerS3BlobStore::listFiles(
|
||||
std::string path, std::function<bool(std::string const&)> pathFilter) {
|
||||
return listFiles_impl(Reference<BackupContainerS3BlobStore>::addRef(this), path, pathFilter);
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::create() {
|
||||
return create_impl(Reference<BackupContainerS3BlobStore>::addRef(this));
|
||||
}
|
||||
|
||||
Future<bool> BackupContainerS3BlobStore::exists() {
|
||||
return m_bstore->objectExists(m_bucket, indexEntry());
|
||||
}
|
||||
|
||||
Future<Void> BackupContainerS3BlobStore::deleteContainer(int* pNumDeleted) {
|
||||
return deleteContainer_impl(Reference<BackupContainerS3BlobStore>::addRef(this), pNumDeleted);
|
||||
}
|
||||
|
||||
std::string BackupContainerS3BlobStore::getBucket() const {
|
||||
return m_bucket;
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* BackupContainerS3BlobStore.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_BACKUP_CONTAINER_S3_BLOBSTORE_H
|
||||
#define FDBCLIENT_BACKUP_CONTAINER_S3_BLOBSTORE_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/BackupContainerFileSystem.actor.h"
|
||||
|
||||
class BackupContainerS3BlobStore final : public BackupContainerFileSystem,
|
||||
ReferenceCounted<BackupContainerS3BlobStore> {
|
||||
public:
|
||||
// TODO: Encapsulate these?
|
||||
Reference<BlobStoreEndpoint> m_bstore;
|
||||
std::string m_name;
|
||||
|
||||
// All backup data goes into a single bucket
|
||||
std::string m_bucket;
|
||||
|
||||
std::string dataPath(const std::string path);
|
||||
|
||||
// Get the path of the backups's index entry
|
||||
std::string indexEntry();
|
||||
|
||||
public:
|
||||
BackupContainerS3BlobStore(Reference<BlobStoreEndpoint> bstore, std::string name,
|
||||
const BlobStoreEndpoint::ParametersT& params);
|
||||
|
||||
void addref() override;
|
||||
void delref() override;
|
||||
|
||||
static std::string getURLFormat();
|
||||
|
||||
Future<Reference<IAsyncFile>> readFile(std::string path) final;
|
||||
|
||||
static Future<std::vector<std::string>> listURLs(Reference<BlobStoreEndpoint> bstore, std::string bucket);
|
||||
|
||||
Future<Reference<IBackupFile>> writeFile(const std::string& path) final;
|
||||
|
||||
Future<Void> deleteFile(std::string path) final;
|
||||
|
||||
Future<FilesAndSizesT> listFiles(std::string path, std::function<bool(std::string const&)> pathFilter) final;
|
||||
|
||||
Future<Void> create() final;
|
||||
|
||||
// The container exists if the index entry in the blob bucket exists
|
||||
Future<bool> exists() final;
|
||||
|
||||
Future<Void> deleteContainer(int* pNumDeleted) final;
|
||||
|
||||
std::string getBucket() const;
|
||||
};
|
||||
|
||||
#endif
|
|
@ -14,6 +14,8 @@ set(FDBCLIENT_SRCS
|
|||
BackupContainerFileSystem.actor.h
|
||||
BackupContainerLocalDirectory.actor.cpp
|
||||
BackupContainerLocalDirectory.h
|
||||
BackupContainerS3BlobStore.actor.cpp
|
||||
BackupContainerS3BlobStore.h
|
||||
BlobStore.actor.cpp
|
||||
ClientLogEvents.h
|
||||
ClientWorkerInterface.h
|
||||
|
|
Loading…
Reference in New Issue