Merge remote-tracking branch 'origin/master' into add-vectorref-clear
This commit is contained in:
commit
03ce46bb5d
|
@ -11,14 +11,14 @@ AllowAllParametersOfDeclarationOnNextLine: false
|
|||
AllowShortBlocksOnASingleLine: false
|
||||
AllowShortCaseLabelsOnASingleLine: false
|
||||
AllowShortFunctionsOnASingleLine: Inline
|
||||
AllowShortIfStatementsOnASingleLine: true
|
||||
AllowShortLoopsOnASingleLine: true
|
||||
AllowShortIfStatementsOnASingleLine: false
|
||||
AllowShortLoopsOnASingleLine: false
|
||||
AlwaysBreakAfterDefinitionReturnType: None
|
||||
AlwaysBreakAfterReturnType: None
|
||||
AlwaysBreakBeforeMultilineStrings: false
|
||||
AlwaysBreakTemplateDeclarations: true
|
||||
BinPackArguments: true
|
||||
BinPackParameters: true
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
BreakBeforeBinaryOperators: None
|
||||
BreakBeforeBraces: Attach
|
||||
ColumnLimit: 120
|
||||
|
|
|
@ -7,7 +7,7 @@ bindings/java/foundationdb-client*.jar
|
|||
bindings/java/foundationdb-tests*.jar
|
||||
bindings/java/fdb-java-*-sources.jar
|
||||
packaging/msi/FDBInstaller.msi
|
||||
|
||||
builds/
|
||||
# Generated source, build, and packaging files
|
||||
*.g.cpp
|
||||
*.g.h
|
||||
|
@ -70,6 +70,8 @@ trace.*.xml
|
|||
*.user
|
||||
.idea/
|
||||
.project
|
||||
.projectile
|
||||
.dir-locals.el
|
||||
.pydevproject
|
||||
.vscode
|
||||
.vs/
|
||||
|
@ -85,6 +87,7 @@ flow/coveragetool/obj
|
|||
/compile_commands.json
|
||||
/.ccls-cache
|
||||
/.clangd
|
||||
/.cache
|
||||
|
||||
# Temporary and user configuration files
|
||||
*~
|
||||
|
@ -95,3 +98,6 @@ flow/coveragetool/obj
|
|||
.DS_Store
|
||||
temp/
|
||||
/versions.target
|
||||
/compile_commands.json
|
||||
/.ccls-cache
|
||||
.clangd/
|
||||
|
|
|
@ -85,28 +85,6 @@ Steve Dekorte (libcoroutine)
|
|||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
Jean-loup Gailly, Mark Adler (zlib)
|
||||
Copyright (C) 1995-2013 Jean-loup Gailly and Mark Adler
|
||||
|
||||
This software is provided 'as-is', without any express or implied
|
||||
warranty. In no event will the authors be held liable for any damages
|
||||
arising from the use of this software.
|
||||
|
||||
Permission is granted to anyone to use this software for any purpose,
|
||||
including commercial applications, and to alter it and redistribute it
|
||||
freely, subject to the following restrictions:
|
||||
|
||||
1. The origin of this software must not be misrepresented; you must not
|
||||
claim that you wrote the original software. If you use this software
|
||||
in a product, an acknowledgment in the product documentation would be
|
||||
appreciated but is not required.
|
||||
2. Altered source versions must be plainly marked as such, and must not be
|
||||
misrepresented as being the original software.
|
||||
3. This notice may not be removed or altered from any source distribution.
|
||||
|
||||
Jean-loup Gailly Mark Adler
|
||||
jloup@gzip.org madler@alumni.caltech.edu
|
||||
|
||||
The Go Authors (Go Tools)
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
# limitations under the License.
|
||||
cmake_minimum_required(VERSION 3.13)
|
||||
project(foundationdb
|
||||
VERSION 7.0.0
|
||||
VERSION 7.1.0
|
||||
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
|
||||
HOMEPAGE_URL "http://www.foundationdb.org/"
|
||||
LANGUAGES C CXX ASM)
|
||||
|
@ -72,7 +72,8 @@ add_custom_target(branch_file ALL DEPENDS ${CURR_BRANCH_FILE})
|
|||
execute_process(
|
||||
COMMAND git rev-parse HEAD
|
||||
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
|
||||
OUTPUT_VARIABLE CURRENT_GIT_VERSION_WNL)
|
||||
OUTPUT_VARIABLE CURRENT_GIT_VERSION_WNL
|
||||
OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
string(STRIP "${CURRENT_GIT_VERSION_WNL}" CURRENT_GIT_VERSION)
|
||||
message(STATUS "Current git version ${CURRENT_GIT_VERSION}")
|
||||
|
||||
|
@ -164,6 +165,7 @@ endif()
|
|||
add_subdirectory(fdbbackup)
|
||||
add_subdirectory(contrib)
|
||||
add_subdirectory(tests)
|
||||
add_subdirectory(flowbench EXCLUDE_FROM_ALL)
|
||||
if(WITH_PYTHON)
|
||||
add_subdirectory(bindings)
|
||||
endif()
|
||||
|
@ -181,6 +183,12 @@ if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
|
|||
add_link_options(-lexecinfo)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# Build information
|
||||
################################################################################
|
||||
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/BuildFlags.h.in ${CMAKE_CURRENT_BINARY_DIR}/fdbclient/BuildFlags.h)
|
||||
|
||||
################################################################################
|
||||
# process compile commands for IDE
|
||||
################################################################################
|
||||
|
|
|
@ -32,11 +32,11 @@ We draw inspiration from the Apache Software Foundation's informal motto: ["comm
|
|||
|
||||
The project technical lead is Evan Tschannen (ejt@apple.com).
|
||||
|
||||
Members of the Apple FoundationDB team are part of the initial core committers helping review individual contributions; you'll see them commenting on your pull requests. Future committers to the open source project, and the process for adding individuals in this role will be formalized in the future.
|
||||
Members of the Apple FoundationDB team are part of the core committers helping review individual contributions; you'll see them commenting on your pull requests. As the FDB open source community has grown, some members of the community have consistently produced high quality code reviews and other significant contributions to FoundationDB. The project technical lead maintains a list of external committers that actively contribute in this way, and gives them permission to review and merge pull requests.
|
||||
|
||||
## Contributing
|
||||
### Opening a Pull Request
|
||||
We love pull requests! For minor changes, feel free to open up a PR directly. For larger feature development and any changes that may require community discussion, we ask that you discuss your ideas on the [community forums](https://forums.foundationdb.org) prior to opening a PR, and then reference that thread within your PR comment.
|
||||
We love pull requests! For minor changes, feel free to open up a PR directly. For larger feature development and any changes that may require community discussion, we ask that you discuss your ideas on the [community forums](https://forums.foundationdb.org) prior to opening a PR, and then reference that thread within your PR comment. Please refer to [FoundationDB Commit Process](https://github.com/apple/foundationdb/wiki/FoundationDB-Commit-Process) for more detailed guidelines.
|
||||
|
||||
CI will be run automatically for core committers, and for community PRs it will be initiated by the request of a core committer. Tests can also be run locally via `ctest`, and core committers can run additional validation on pull requests prior to merging them.
|
||||
|
||||
|
|
|
@ -31,21 +31,20 @@ FDBLibTLSPlugin::FDBLibTLSPlugin() {
|
|||
rc = tls_init();
|
||||
}
|
||||
|
||||
FDBLibTLSPlugin::~FDBLibTLSPlugin() {
|
||||
}
|
||||
FDBLibTLSPlugin::~FDBLibTLSPlugin() {}
|
||||
|
||||
ITLSPolicy *FDBLibTLSPlugin::create_policy() {
|
||||
ITLSPolicy* FDBLibTLSPlugin::create_policy() {
|
||||
if (rc < 0) {
|
||||
// Log the failure from tls_init during our constructor.
|
||||
TraceEvent(SevError, "FDBLibTLSInitError").detail("LibTLSErrorMessage", "failed to initialize libtls");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
return new FDBLibTLSPolicy(Reference<FDBLibTLSPlugin>::addRef(this));
|
||||
}
|
||||
|
||||
extern "C" BOOST_SYMBOL_EXPORT void *get_tls_plugin(const char *plugin_type_name_and_version) {
|
||||
extern "C" BOOST_SYMBOL_EXPORT void* get_tls_plugin(const char* plugin_type_name_and_version) {
|
||||
if (strcmp(plugin_type_name_and_version, FDBLibTLSPlugin::get_plugin_type_name_and_version()) == 0) {
|
||||
return new FDBLibTLSPlugin;
|
||||
}
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ struct FDBLibTLSPlugin : ITLSPlugin, ReferenceCounted<FDBLibTLSPlugin> {
|
|||
virtual void addref() { ReferenceCounted<FDBLibTLSPlugin>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<FDBLibTLSPlugin>::delref(); }
|
||||
|
||||
virtual ITLSPolicy *create_policy();
|
||||
virtual ITLSPolicy* create_policy();
|
||||
|
||||
int rc;
|
||||
};
|
||||
|
|
|
@ -37,11 +37,11 @@
|
|||
#include <string.h>
|
||||
#include <limits.h>
|
||||
|
||||
FDBLibTLSPolicy::FDBLibTLSPolicy(Reference<FDBLibTLSPlugin> plugin):
|
||||
plugin(plugin), tls_cfg(NULL), roots(NULL), session_created(false), ca_data_set(false),
|
||||
cert_data_set(false), key_data_set(false), verify_peers_set(false) {
|
||||
FDBLibTLSPolicy::FDBLibTLSPolicy(Reference<FDBLibTLSPlugin> plugin)
|
||||
: plugin(plugin), tls_cfg(nullptr), roots(nullptr), session_created(false), ca_data_set(false), cert_data_set(false),
|
||||
key_data_set(false), verify_peers_set(false) {
|
||||
|
||||
if ((tls_cfg = tls_config_new()) == NULL) {
|
||||
if ((tls_cfg = tls_config_new()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSConfigError");
|
||||
throw std::runtime_error("FDBLibTLSConfigError");
|
||||
}
|
||||
|
@ -55,39 +55,52 @@ FDBLibTLSPolicy::~FDBLibTLSPolicy() {
|
|||
tls_config_free(tls_cfg);
|
||||
}
|
||||
|
||||
ITLSSession* FDBLibTLSPolicy::create_session(bool is_client, const char* servername, TLSSendCallbackFunc send_func, void* send_ctx, TLSRecvCallbackFunc recv_func, void* recv_ctx, void* uid) {
|
||||
ITLSSession* FDBLibTLSPolicy::create_session(bool is_client,
|
||||
const char* servername,
|
||||
TLSSendCallbackFunc send_func,
|
||||
void* send_ctx,
|
||||
TLSRecvCallbackFunc recv_func,
|
||||
void* recv_ctx,
|
||||
void* uid) {
|
||||
if (is_client) {
|
||||
// If verify peers has been set then there is no point specifying a
|
||||
// servername, since this will be ignored - the servername should be
|
||||
// matched by the verify criteria instead.
|
||||
if (verify_peers_set && servername != NULL) {
|
||||
if (verify_peers_set && servername != nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSVerifyPeersWithServerName");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// If verify peers has not been set, then require a server name to
|
||||
// avoid an accidental lack of name validation.
|
||||
if (!verify_peers_set && servername == NULL) {
|
||||
if (!verify_peers_set && servername == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSNoServerName");
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
session_created = true;
|
||||
try {
|
||||
return new FDBLibTLSSession(Reference<FDBLibTLSPolicy>::addRef(this), is_client, servername, send_func, send_ctx, recv_func, recv_ctx, uid);
|
||||
} catch ( ... ) {
|
||||
return NULL;
|
||||
return new FDBLibTLSSession(Reference<FDBLibTLSPolicy>::addRef(this),
|
||||
is_client,
|
||||
servername,
|
||||
send_func,
|
||||
send_ctx,
|
||||
recv_func,
|
||||
recv_ctx,
|
||||
uid);
|
||||
} catch (...) {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
static int password_cb(char *buf, int size, int rwflag, void *u) {
|
||||
const char *password = (const char *)u;
|
||||
static int password_cb(char* buf, int size, int rwflag, void* u) {
|
||||
const char* password = (const char*)u;
|
||||
int plen;
|
||||
|
||||
if (size < 0)
|
||||
return 0;
|
||||
if (u == NULL)
|
||||
if (u == nullptr)
|
||||
return 0;
|
||||
|
||||
plen = strlen(password);
|
||||
|
@ -102,24 +115,24 @@ static int password_cb(char *buf, int size, int rwflag, void *u) {
|
|||
}
|
||||
|
||||
struct stack_st_X509* FDBLibTLSPolicy::parse_cert_pem(const uint8_t* cert_pem, size_t cert_pem_len) {
|
||||
struct stack_st_X509 *certs = NULL;
|
||||
X509 *cert = NULL;
|
||||
BIO *bio = NULL;
|
||||
struct stack_st_X509* certs = nullptr;
|
||||
X509* cert = nullptr;
|
||||
BIO* bio = nullptr;
|
||||
int errnum;
|
||||
|
||||
if (cert_pem_len > INT_MAX)
|
||||
goto err;
|
||||
if ((bio = BIO_new_mem_buf((void *)cert_pem, cert_pem_len)) == NULL) {
|
||||
if ((bio = BIO_new_mem_buf((void*)cert_pem, cert_pem_len)) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
goto err;
|
||||
}
|
||||
if ((certs = sk_X509_new_null()) == NULL) {
|
||||
if ((certs = sk_X509_new_null()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ERR_clear_error();
|
||||
while ((cert = PEM_read_bio_X509(bio, NULL, password_cb, NULL)) != NULL) {
|
||||
while ((cert = PEM_read_bio_X509(bio, nullptr, password_cb, nullptr)) != nullptr) {
|
||||
if (!sk_X509_push(certs, cert)) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
goto err;
|
||||
|
@ -145,12 +158,12 @@ struct stack_st_X509* FDBLibTLSPolicy::parse_cert_pem(const uint8_t* cert_pem, s
|
|||
|
||||
return certs;
|
||||
|
||||
err:
|
||||
err:
|
||||
sk_X509_pop_free(certs, X509_free);
|
||||
X509_free(cert);
|
||||
BIO_free(bio);
|
||||
|
||||
return NULL;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool FDBLibTLSPolicy::set_ca_data(const uint8_t* ca_data, int ca_len) {
|
||||
|
@ -166,7 +179,7 @@ bool FDBLibTLSPolicy::set_ca_data(const uint8_t* ca_data, int ca_len) {
|
|||
if (ca_len < 0)
|
||||
return false;
|
||||
sk_X509_pop_free(roots, X509_free);
|
||||
if ((roots = parse_cert_pem(ca_data, ca_len)) == NULL)
|
||||
if ((roots = parse_cert_pem(ca_data, ca_len)) == nullptr)
|
||||
return false;
|
||||
|
||||
if (tls_config_set_ca_mem(tls_cfg, ca_data, ca_len) == -1) {
|
||||
|
@ -200,8 +213,8 @@ bool FDBLibTLSPolicy::set_cert_data(const uint8_t* cert_data, int cert_len) {
|
|||
}
|
||||
|
||||
bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const char* password) {
|
||||
EVP_PKEY *key = NULL;
|
||||
BIO *bio = NULL;
|
||||
EVP_PKEY* key = nullptr;
|
||||
BIO* bio = nullptr;
|
||||
bool rc = false;
|
||||
|
||||
if (key_data_set) {
|
||||
|
@ -213,21 +226,21 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (password != NULL) {
|
||||
char *data;
|
||||
if (password != nullptr) {
|
||||
char* data;
|
||||
long len;
|
||||
|
||||
if ((bio = BIO_new_mem_buf((void *)key_data, key_len)) == NULL) {
|
||||
if ((bio = BIO_new_mem_buf((void*)key_data, key_len)) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
goto err;
|
||||
}
|
||||
ERR_clear_error();
|
||||
if ((key = PEM_read_bio_PrivateKey(bio, NULL, password_cb, (void *)password)) == NULL) {
|
||||
if ((key = PEM_read_bio_PrivateKey(bio, nullptr, password_cb, (void*)password)) == nullptr) {
|
||||
int errnum = ERR_peek_error();
|
||||
char errbuf[256];
|
||||
|
||||
if ((ERR_GET_LIB(errnum) == ERR_LIB_PEM && ERR_GET_REASON(errnum) == PEM_R_BAD_DECRYPT) ||
|
||||
(ERR_GET_LIB(errnum) == ERR_LIB_EVP && ERR_GET_REASON(errnum) == EVP_R_BAD_DECRYPT)) {
|
||||
(ERR_GET_LIB(errnum) == ERR_LIB_EVP && ERR_GET_REASON(errnum) == EVP_R_BAD_DECRYPT)) {
|
||||
TraceEvent(SevError, "FDBLibTLSIncorrectPassword");
|
||||
} else {
|
||||
ERR_error_string_n(errnum, errbuf, sizeof(errbuf));
|
||||
|
@ -236,11 +249,11 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
|
|||
goto err;
|
||||
}
|
||||
BIO_free(bio);
|
||||
if ((bio = BIO_new(BIO_s_mem())) == NULL) {
|
||||
if ((bio = BIO_new(BIO_s_mem())) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
goto err;
|
||||
}
|
||||
if (!PEM_write_bio_PrivateKey(bio, key, NULL, NULL, 0, NULL, NULL)) {
|
||||
if (!PEM_write_bio_PrivateKey(bio, key, nullptr, nullptr, 0, nullptr, nullptr)) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
goto err;
|
||||
}
|
||||
|
@ -248,7 +261,7 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
|
|||
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
|
||||
goto err;
|
||||
}
|
||||
if (tls_config_set_key_mem(tls_cfg, (const uint8_t *)data, len) == -1) {
|
||||
if (tls_config_set_key_mem(tls_cfg, (const uint8_t*)data, len) == -1) {
|
||||
TraceEvent(SevError, "FDBLibTLSKeyError").detail("LibTLSErrorMessage", tls_config_error(tls_cfg));
|
||||
goto err;
|
||||
}
|
||||
|
@ -262,7 +275,7 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
|
|||
key_data_set = true;
|
||||
rc = true;
|
||||
|
||||
err:
|
||||
err:
|
||||
BIO_free(bio);
|
||||
EVP_PKEY_free(key);
|
||||
return rc;
|
||||
|
@ -287,20 +300,20 @@ bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[],
|
|||
try {
|
||||
std::string verifyString((const char*)verify_peers[i], verify_peers_len[i]);
|
||||
int start = 0;
|
||||
while(start < verifyString.size()) {
|
||||
while (start < verifyString.size()) {
|
||||
int split = verifyString.find('|', start);
|
||||
if(split == std::string::npos) {
|
||||
if (split == std::string::npos) {
|
||||
break;
|
||||
}
|
||||
if(split == start || verifyString[split-1] != '\\') {
|
||||
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start,split-start)));
|
||||
if (split == start || verifyString[split - 1] != '\\') {
|
||||
auto verify = makeReference<FDBLibTLSVerify>(verifyString.substr(start, split - start));
|
||||
verify_rules.push_back(verify);
|
||||
start = split+1;
|
||||
start = split + 1;
|
||||
}
|
||||
}
|
||||
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start)));
|
||||
auto verify = makeReference<FDBLibTLSVerify>(verifyString.substr(start));
|
||||
verify_rules.push_back(verify);
|
||||
} catch ( const std::runtime_error& ) {
|
||||
} catch (const std::runtime_error&) {
|
||||
verify_rules.clear();
|
||||
std::string verifyString((const char*)verify_peers[i], verify_peers_len[i]);
|
||||
TraceEvent(SevError, "FDBLibTLSVerifyPeersParseError").detail("Config", verifyString);
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
struct FDBLibTLSPolicy: ITLSPolicy, ReferenceCounted<FDBLibTLSPolicy> {
|
||||
struct FDBLibTLSPolicy : ITLSPolicy, ReferenceCounted<FDBLibTLSPolicy> {
|
||||
FDBLibTLSPolicy(Reference<FDBLibTLSPlugin> plugin);
|
||||
virtual ~FDBLibTLSPolicy();
|
||||
|
||||
|
@ -41,7 +41,13 @@ struct FDBLibTLSPolicy: ITLSPolicy, ReferenceCounted<FDBLibTLSPolicy> {
|
|||
|
||||
Reference<FDBLibTLSPlugin> plugin;
|
||||
|
||||
virtual ITLSSession* create_session(bool is_client, const char* servername, TLSSendCallbackFunc send_func, void* send_ctx, TLSRecvCallbackFunc recv_func, void* recv_ctx, void* uid);
|
||||
virtual ITLSSession* create_session(bool is_client,
|
||||
const char* servername,
|
||||
TLSSendCallbackFunc send_func,
|
||||
void* send_ctx,
|
||||
TLSRecvCallbackFunc recv_func,
|
||||
void* recv_ctx,
|
||||
void* uid);
|
||||
|
||||
struct stack_st_X509* parse_cert_pem(const uint8_t* cert_pem, size_t cert_pem_len);
|
||||
void parse_verify(std::string input);
|
||||
|
|
|
@ -36,11 +36,10 @@
|
|||
#include <string.h>
|
||||
#include <limits.h>
|
||||
|
||||
static ssize_t tls_read_func(struct tls *ctx, void *buf, size_t buflen, void *cb_arg)
|
||||
{
|
||||
FDBLibTLSSession *session = (FDBLibTLSSession *)cb_arg;
|
||||
static ssize_t tls_read_func(struct tls* ctx, void* buf, size_t buflen, void* cb_arg) {
|
||||
FDBLibTLSSession* session = (FDBLibTLSSession*)cb_arg;
|
||||
|
||||
int rv = session->recv_func(session->recv_ctx, (uint8_t *)buf, buflen);
|
||||
int rv = session->recv_func(session->recv_ctx, (uint8_t*)buf, buflen);
|
||||
if (rv < 0)
|
||||
return 0;
|
||||
if (rv == 0)
|
||||
|
@ -48,11 +47,10 @@ static ssize_t tls_read_func(struct tls *ctx, void *buf, size_t buflen, void *cb
|
|||
return (ssize_t)rv;
|
||||
}
|
||||
|
||||
static ssize_t tls_write_func(struct tls *ctx, const void *buf, size_t buflen, void *cb_arg)
|
||||
{
|
||||
FDBLibTLSSession *session = (FDBLibTLSSession *)cb_arg;
|
||||
static ssize_t tls_write_func(struct tls* ctx, const void* buf, size_t buflen, void* cb_arg) {
|
||||
FDBLibTLSSession* session = (FDBLibTLSSession*)cb_arg;
|
||||
|
||||
int rv = session->send_func(session->send_ctx, (const uint8_t *)buf, buflen);
|
||||
int rv = session->send_func(session->send_ctx, (const uint8_t*)buf, buflen);
|
||||
if (rv < 0)
|
||||
return 0;
|
||||
if (rv == 0)
|
||||
|
@ -60,14 +58,21 @@ static ssize_t tls_write_func(struct tls *ctx, const void *buf, size_t buflen, v
|
|||
return (ssize_t)rv;
|
||||
}
|
||||
|
||||
FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy, bool is_client, const char* servername, TLSSendCallbackFunc send_func, void* send_ctx, TLSRecvCallbackFunc recv_func, void* recv_ctx, void* uidptr) :
|
||||
tls_ctx(NULL), tls_sctx(NULL), is_client(is_client), policy(policy), send_func(send_func), send_ctx(send_ctx),
|
||||
recv_func(recv_func), recv_ctx(recv_ctx), handshake_completed(false), lastVerifyFailureLogged(0.0) {
|
||||
FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy,
|
||||
bool is_client,
|
||||
const char* servername,
|
||||
TLSSendCallbackFunc send_func,
|
||||
void* send_ctx,
|
||||
TLSRecvCallbackFunc recv_func,
|
||||
void* recv_ctx,
|
||||
void* uidptr)
|
||||
: tls_ctx(nullptr), tls_sctx(nullptr), is_client(is_client), policy(policy), send_func(send_func), send_ctx(send_ctx),
|
||||
recv_func(recv_func), recv_ctx(recv_ctx), handshake_completed(false), lastVerifyFailureLogged(0.0) {
|
||||
if (uidptr)
|
||||
uid = * (UID*) uidptr;
|
||||
uid = *(UID*)uidptr;
|
||||
|
||||
if (is_client) {
|
||||
if ((tls_ctx = tls_client()) == NULL) {
|
||||
if ((tls_ctx = tls_client()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSClientError", uid);
|
||||
throw std::runtime_error("FDBLibTLSClientError");
|
||||
}
|
||||
|
@ -82,7 +87,7 @@ FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy, bool is_cl
|
|||
throw std::runtime_error("FDBLibTLSConnectError");
|
||||
}
|
||||
} else {
|
||||
if ((tls_sctx = tls_server()) == NULL) {
|
||||
if ((tls_sctx = tls_server()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSServerError", uid);
|
||||
throw std::runtime_error("FDBLibTLSServerError");
|
||||
}
|
||||
|
@ -108,13 +113,13 @@ FDBLibTLSSession::~FDBLibTLSSession() {
|
|||
|
||||
bool match_criteria_entry(const std::string& criteria, ASN1_STRING* entry, MatchType mt) {
|
||||
bool rc = false;
|
||||
ASN1_STRING* asn_criteria = NULL;
|
||||
unsigned char* criteria_utf8 = NULL;
|
||||
ASN1_STRING* asn_criteria = nullptr;
|
||||
unsigned char* criteria_utf8 = nullptr;
|
||||
int criteria_utf8_len = 0;
|
||||
unsigned char* entry_utf8 = NULL;
|
||||
unsigned char* entry_utf8 = nullptr;
|
||||
int entry_utf8_len = 0;
|
||||
|
||||
if ((asn_criteria = ASN1_IA5STRING_new()) == NULL)
|
||||
if ((asn_criteria = ASN1_IA5STRING_new()) == nullptr)
|
||||
goto err;
|
||||
if (ASN1_STRING_set(asn_criteria, criteria.c_str(), criteria.size()) != 1)
|
||||
goto err;
|
||||
|
@ -123,12 +128,10 @@ bool match_criteria_entry(const std::string& criteria, ASN1_STRING* entry, Match
|
|||
if ((entry_utf8_len = ASN1_STRING_to_UTF8(&entry_utf8, entry)) < 1)
|
||||
goto err;
|
||||
if (mt == MatchType::EXACT) {
|
||||
if (criteria_utf8_len == entry_utf8_len &&
|
||||
memcmp(criteria_utf8, entry_utf8, criteria_utf8_len) == 0)
|
||||
if (criteria_utf8_len == entry_utf8_len && memcmp(criteria_utf8, entry_utf8, criteria_utf8_len) == 0)
|
||||
rc = true;
|
||||
} else if (mt == MatchType::PREFIX) {
|
||||
if (criteria_utf8_len <= entry_utf8_len &&
|
||||
memcmp(criteria_utf8, entry_utf8, criteria_utf8_len) == 0)
|
||||
if (criteria_utf8_len <= entry_utf8_len && memcmp(criteria_utf8, entry_utf8, criteria_utf8_len) == 0)
|
||||
rc = true;
|
||||
} else if (mt == MatchType::SUFFIX) {
|
||||
if (criteria_utf8_len <= entry_utf8_len &&
|
||||
|
@ -136,15 +139,15 @@ bool match_criteria_entry(const std::string& criteria, ASN1_STRING* entry, Match
|
|||
rc = true;
|
||||
}
|
||||
|
||||
err:
|
||||
err:
|
||||
ASN1_STRING_free(asn_criteria);
|
||||
free(criteria_utf8);
|
||||
free(entry_utf8);
|
||||
return rc;
|
||||
}
|
||||
|
||||
bool match_name_criteria(X509_NAME *name, NID nid, const std::string& criteria, MatchType mt) {
|
||||
X509_NAME_ENTRY *name_entry;
|
||||
bool match_name_criteria(X509_NAME* name, NID nid, const std::string& criteria, MatchType mt) {
|
||||
X509_NAME_ENTRY* name_entry;
|
||||
int idx;
|
||||
|
||||
// If name does not exist, or has multiple of this RDN, refuse to proceed.
|
||||
|
@ -152,13 +155,13 @@ bool match_name_criteria(X509_NAME *name, NID nid, const std::string& criteria,
|
|||
return false;
|
||||
if (X509_NAME_get_index_by_NID(name, nid, idx) != -1)
|
||||
return false;
|
||||
if ((name_entry = X509_NAME_get_entry(name, idx)) == NULL)
|
||||
if ((name_entry = X509_NAME_get_entry(name, idx)) == nullptr)
|
||||
return false;
|
||||
|
||||
return match_criteria_entry(criteria, name_entry->value, mt);
|
||||
}
|
||||
|
||||
bool match_extension_criteria(X509 *cert, NID nid, const std::string& value, MatchType mt) {
|
||||
bool match_extension_criteria(X509* cert, NID nid, const std::string& value, MatchType mt) {
|
||||
if (nid != NID_subject_alt_name && nid != NID_issuer_alt_name) {
|
||||
// I have no idea how other extensions work.
|
||||
return false;
|
||||
|
@ -168,28 +171,27 @@ bool match_extension_criteria(X509 *cert, NID nid, const std::string& value, Mat
|
|||
return false;
|
||||
}
|
||||
std::string value_gen = value.substr(0, pos);
|
||||
std::string value_val = value.substr(pos+1, value.npos);
|
||||
STACK_OF(GENERAL_NAME)* sans = reinterpret_cast<STACK_OF(GENERAL_NAME)*>(X509_get_ext_d2i(cert, nid, NULL, NULL));
|
||||
if (sans == NULL) {
|
||||
std::string value_val = value.substr(pos + 1, value.npos);
|
||||
STACK_OF(GENERAL_NAME)* sans =
|
||||
reinterpret_cast<STACK_OF(GENERAL_NAME)*>(X509_get_ext_d2i(cert, nid, nullptr, nullptr));
|
||||
if (sans == nullptr) {
|
||||
return false;
|
||||
}
|
||||
int num_sans = sk_GENERAL_NAME_num( sans );
|
||||
int num_sans = sk_GENERAL_NAME_num(sans);
|
||||
bool rc = false;
|
||||
for( int i = 0; i < num_sans && !rc; ++i ) {
|
||||
GENERAL_NAME* altname = sk_GENERAL_NAME_value( sans, i );
|
||||
for (int i = 0; i < num_sans && !rc; ++i) {
|
||||
GENERAL_NAME* altname = sk_GENERAL_NAME_value(sans, i);
|
||||
std::string matchable;
|
||||
switch (altname->type) {
|
||||
case GEN_OTHERNAME:
|
||||
break;
|
||||
case GEN_EMAIL:
|
||||
if (value_gen == "EMAIL" &&
|
||||
match_criteria_entry( value_val, altname->d.rfc822Name, mt)) {
|
||||
if (value_gen == "EMAIL" && match_criteria_entry(value_val, altname->d.rfc822Name, mt)) {
|
||||
rc = true;
|
||||
break;
|
||||
}
|
||||
case GEN_DNS:
|
||||
if (value_gen == "DNS" &&
|
||||
match_criteria_entry( value_val, altname->d.dNSName, mt )) {
|
||||
if (value_gen == "DNS" && match_criteria_entry(value_val, altname->d.dNSName, mt)) {
|
||||
rc = true;
|
||||
break;
|
||||
}
|
||||
|
@ -198,14 +200,12 @@ bool match_extension_criteria(X509 *cert, NID nid, const std::string& value, Mat
|
|||
case GEN_EDIPARTY:
|
||||
break;
|
||||
case GEN_URI:
|
||||
if (value_gen == "URI" &&
|
||||
match_criteria_entry( value_val, altname->d.uniformResourceIdentifier, mt )) {
|
||||
if (value_gen == "URI" && match_criteria_entry(value_val, altname->d.uniformResourceIdentifier, mt)) {
|
||||
rc = true;
|
||||
break;
|
||||
}
|
||||
case GEN_IPADD:
|
||||
if (value_gen == "IP" &&
|
||||
match_criteria_entry( value_val, altname->d.iPAddress, mt )) {
|
||||
if (value_gen == "IP" && match_criteria_entry(value_val, altname->d.iPAddress, mt)) {
|
||||
rc = true;
|
||||
break;
|
||||
}
|
||||
|
@ -217,8 +217,13 @@ bool match_extension_criteria(X509 *cert, NID nid, const std::string& value, Mat
|
|||
return rc;
|
||||
}
|
||||
|
||||
bool match_criteria(X509* cert, X509_NAME* subject, NID nid, const std::string& criteria, MatchType mt, X509Location loc) {
|
||||
switch(loc) {
|
||||
bool match_criteria(X509* cert,
|
||||
X509_NAME* subject,
|
||||
NID nid,
|
||||
const std::string& criteria,
|
||||
MatchType mt,
|
||||
X509Location loc) {
|
||||
switch (loc) {
|
||||
case X509Location::NAME: {
|
||||
return match_name_criteria(subject, nid, criteria, mt);
|
||||
}
|
||||
|
@ -230,11 +235,12 @@ bool match_criteria(X509* cert, X509_NAME* subject, NID nid, const std::string&
|
|||
return false;
|
||||
}
|
||||
|
||||
std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSVerify> verify, struct stack_st_X509 *certs) {
|
||||
X509_STORE_CTX *store_ctx = NULL;
|
||||
std::tuple<bool, std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSVerify> verify,
|
||||
struct stack_st_X509* certs) {
|
||||
X509_STORE_CTX* store_ctx = nullptr;
|
||||
X509_NAME *subject, *issuer;
|
||||
bool rc = false;
|
||||
X509* cert = NULL;
|
||||
X509* cert = nullptr;
|
||||
// if returning false, give a reason string
|
||||
std::string reason = "";
|
||||
|
||||
|
@ -243,12 +249,12 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
|
|||
return std::make_tuple(true, reason);
|
||||
|
||||
// Verify the certificate.
|
||||
if ((store_ctx = X509_STORE_CTX_new()) == NULL) {
|
||||
if ((store_ctx = X509_STORE_CTX_new()) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid);
|
||||
reason = "Out of memory";
|
||||
goto err;
|
||||
}
|
||||
if (!X509_STORE_CTX_init(store_ctx, NULL, sk_X509_value(certs, 0), certs)) {
|
||||
if (!X509_STORE_CTX_init(store_ctx, nullptr, sk_X509_value(certs, 0), certs)) {
|
||||
reason = "Store ctx init";
|
||||
goto err;
|
||||
}
|
||||
|
@ -257,31 +263,33 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
|
|||
if (!verify->verify_time)
|
||||
X509_VERIFY_PARAM_set_flags(X509_STORE_CTX_get0_param(store_ctx), X509_V_FLAG_NO_CHECK_TIME);
|
||||
if (X509_verify_cert(store_ctx) <= 0) {
|
||||
const char *errstr = X509_verify_cert_error_string(X509_STORE_CTX_get_error(store_ctx));
|
||||
const char* errstr = X509_verify_cert_error_string(X509_STORE_CTX_get_error(store_ctx));
|
||||
reason = "Verify cert error: " + std::string(errstr);
|
||||
goto err;
|
||||
}
|
||||
|
||||
// Check subject criteria.
|
||||
cert = sk_X509_value(store_ctx->chain, 0);
|
||||
if ((subject = X509_get_subject_name(cert)) == NULL) {
|
||||
if ((subject = X509_get_subject_name(cert)) == nullptr) {
|
||||
reason = "Cert subject error";
|
||||
goto err;
|
||||
}
|
||||
for (auto &pair: verify->subject_criteria) {
|
||||
if (!match_criteria(cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
|
||||
for (auto& pair : verify->subject_criteria) {
|
||||
if (!match_criteria(
|
||||
cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
|
||||
reason = "Cert subject match failure";
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
// Check issuer criteria.
|
||||
if ((issuer = X509_get_issuer_name(cert)) == NULL) {
|
||||
if ((issuer = X509_get_issuer_name(cert)) == nullptr) {
|
||||
reason = "Cert issuer error";
|
||||
goto err;
|
||||
}
|
||||
for (auto &pair: verify->issuer_criteria) {
|
||||
if (!match_criteria(cert, issuer, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
|
||||
for (auto& pair : verify->issuer_criteria) {
|
||||
if (!match_criteria(
|
||||
cert, issuer, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
|
||||
reason = "Cert issuer match failure";
|
||||
goto err;
|
||||
}
|
||||
|
@ -289,12 +297,13 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
|
|||
|
||||
// Check root criteria - this is the subject of the final certificate in the stack.
|
||||
cert = sk_X509_value(store_ctx->chain, sk_X509_num(store_ctx->chain) - 1);
|
||||
if ((subject = X509_get_subject_name(cert)) == NULL) {
|
||||
if ((subject = X509_get_subject_name(cert)) == nullptr) {
|
||||
reason = "Root subject error";
|
||||
goto err;
|
||||
}
|
||||
for (auto &pair: verify->root_criteria) {
|
||||
if (!match_criteria(cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
|
||||
for (auto& pair : verify->root_criteria) {
|
||||
if (!match_criteria(
|
||||
cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
|
||||
reason = "Root subject match failure";
|
||||
goto err;
|
||||
}
|
||||
|
@ -303,15 +312,15 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
|
|||
// If we got this far, everything checked out...
|
||||
rc = true;
|
||||
|
||||
err:
|
||||
err:
|
||||
X509_STORE_CTX_free(store_ctx);
|
||||
|
||||
return std::make_tuple(rc, reason);
|
||||
}
|
||||
|
||||
bool FDBLibTLSSession::verify_peer() {
|
||||
struct stack_st_X509 *certs = NULL;
|
||||
const uint8_t *cert_pem;
|
||||
struct stack_st_X509* certs = nullptr;
|
||||
const uint8_t* cert_pem;
|
||||
size_t cert_pem_len;
|
||||
bool rc = false;
|
||||
std::set<std::string> verify_failure_reasons;
|
||||
|
@ -323,15 +332,15 @@ bool FDBLibTLSSession::verify_peer() {
|
|||
if (policy->verify_rules.empty())
|
||||
return true;
|
||||
|
||||
if ((cert_pem = tls_peer_cert_chain_pem(tls_ctx, &cert_pem_len)) == NULL) {
|
||||
if ((cert_pem = tls_peer_cert_chain_pem(tls_ctx, &cert_pem_len)) == nullptr) {
|
||||
TraceEvent(SevError, "FDBLibTLSNoCertError", uid);
|
||||
goto err;
|
||||
}
|
||||
if ((certs = policy->parse_cert_pem(cert_pem, cert_pem_len)) == NULL)
|
||||
if ((certs = policy->parse_cert_pem(cert_pem, cert_pem_len)) == nullptr)
|
||||
goto err;
|
||||
|
||||
// Any matching rule is sufficient.
|
||||
for (auto &verify_rule: policy->verify_rules) {
|
||||
for (auto& verify_rule : policy->verify_rules) {
|
||||
std::tie(verify_success, verify_failure_reason) = check_verify(verify_rule, certs);
|
||||
if (verify_success) {
|
||||
rc = true;
|
||||
|
@ -344,7 +353,7 @@ bool FDBLibTLSSession::verify_peer() {
|
|||
|
||||
if (!rc) {
|
||||
// log the various failure reasons
|
||||
if(now() - lastVerifyFailureLogged > 1.0) {
|
||||
if (now() - lastVerifyFailureLogged > 1.0) {
|
||||
for (std::string reason : verify_failure_reasons) {
|
||||
lastVerifyFailureLogged = now();
|
||||
TraceEvent("FDBLibTLSVerifyFailure", uid).suppressFor(1.0).detail("Reason", reason);
|
||||
|
@ -352,7 +361,7 @@ bool FDBLibTLSSession::verify_peer() {
|
|||
}
|
||||
}
|
||||
|
||||
err:
|
||||
err:
|
||||
sk_X509_pop_free(certs, X509_free);
|
||||
|
||||
return rc;
|
||||
|
|
|
@ -33,14 +33,21 @@
|
|||
#include <tls.h>
|
||||
|
||||
struct FDBLibTLSSession : ITLSSession, ReferenceCounted<FDBLibTLSSession> {
|
||||
FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy, bool is_client, const char* servername, TLSSendCallbackFunc send_func, void* send_ctx, TLSRecvCallbackFunc recv_func, void* recv_ctx, void* uid);
|
||||
FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy,
|
||||
bool is_client,
|
||||
const char* servername,
|
||||
TLSSendCallbackFunc send_func,
|
||||
void* send_ctx,
|
||||
TLSRecvCallbackFunc recv_func,
|
||||
void* recv_ctx,
|
||||
void* uid);
|
||||
virtual ~FDBLibTLSSession();
|
||||
|
||||
virtual void addref() { ReferenceCounted<FDBLibTLSSession>::addref(); }
|
||||
virtual void delref() { ReferenceCounted<FDBLibTLSSession>::delref(); }
|
||||
|
||||
bool verify_peer();
|
||||
std::tuple<bool,std::string> check_verify(Reference<FDBLibTLSVerify> verify, struct stack_st_X509 *certs);
|
||||
std::tuple<bool, std::string> check_verify(Reference<FDBLibTLSVerify> verify, struct stack_st_X509* certs);
|
||||
|
||||
virtual int handshake();
|
||||
virtual int read(uint8_t* data, int length);
|
||||
|
@ -50,8 +57,8 @@ struct FDBLibTLSSession : ITLSSession, ReferenceCounted<FDBLibTLSSession> {
|
|||
|
||||
bool is_client;
|
||||
|
||||
struct tls *tls_ctx;
|
||||
struct tls *tls_sctx;
|
||||
struct tls* tls_ctx;
|
||||
struct tls* tls_sctx;
|
||||
|
||||
TLSSendCallbackFunc send_func;
|
||||
void* send_ctx;
|
||||
|
|
|
@ -43,24 +43,24 @@ static int hexValue(char c) {
|
|||
static std::string de4514(std::string const& input, int start, int& out_end) {
|
||||
std::string output;
|
||||
|
||||
if(input[start] == '#' || input[start] == ' ') {
|
||||
if (input[start] == '#' || input[start] == ' ') {
|
||||
out_end = start;
|
||||
return output;
|
||||
}
|
||||
|
||||
int space_count = 0;
|
||||
|
||||
for(int p = start; p < input.size();) {
|
||||
switch(input[p]) {
|
||||
for (int p = start; p < input.size();) {
|
||||
switch (input[p]) {
|
||||
case '\\': // Handle escaped sequence
|
||||
|
||||
// Backslash escaping nothing!
|
||||
if(p == input.size() - 1) {
|
||||
if (p == input.size() - 1) {
|
||||
out_end = p;
|
||||
goto FIN;
|
||||
}
|
||||
|
||||
switch(input[p+1]) {
|
||||
switch (input[p + 1]) {
|
||||
case ' ':
|
||||
case '"':
|
||||
case '#':
|
||||
|
@ -72,24 +72,24 @@ static std::string de4514(std::string const& input, int start, int& out_end) {
|
|||
case '>':
|
||||
case '|':
|
||||
case '\\':
|
||||
output += input[p+1];
|
||||
output += input[p + 1];
|
||||
p += 2;
|
||||
space_count = 0;
|
||||
continue;
|
||||
|
||||
default:
|
||||
// Backslash escaping pair of hex digits requires two characters
|
||||
if(p == input.size() - 2) {
|
||||
if (p == input.size() - 2) {
|
||||
out_end = p;
|
||||
goto FIN;
|
||||
}
|
||||
|
||||
try {
|
||||
output += hexValue(input[p+1]) * 16 + hexValue(input[p+2]);
|
||||
output += hexValue(input[p + 1]) * 16 + hexValue(input[p + 2]);
|
||||
p += 3;
|
||||
space_count = 0;
|
||||
continue;
|
||||
} catch( ... ) {
|
||||
} catch (...) {
|
||||
out_end = p;
|
||||
goto FIN;
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ static std::string de4514(std::string const& input, int start, int& out_end) {
|
|||
default:
|
||||
// Character is what it is
|
||||
output += input[p];
|
||||
if(input[p] == ' ')
|
||||
if (input[p] == ' ')
|
||||
space_count++;
|
||||
else
|
||||
space_count = 0;
|
||||
|
@ -119,7 +119,7 @@ static std::string de4514(std::string const& input, int start, int& out_end) {
|
|||
|
||||
out_end = input.size();
|
||||
|
||||
FIN:
|
||||
FIN:
|
||||
out_end -= space_count;
|
||||
output.resize(output.size() - space_count);
|
||||
|
||||
|
@ -128,16 +128,17 @@ static std::string de4514(std::string const& input, int start, int& out_end) {
|
|||
|
||||
static std::pair<std::string, std::string> splitPair(std::string const& input, char c) {
|
||||
int p = input.find_first_of(c);
|
||||
if(p == input.npos) {
|
||||
if (p == input.npos) {
|
||||
throw std::runtime_error("splitPair");
|
||||
}
|
||||
return std::make_pair(input.substr(0, p), input.substr(p+1, input.size()));
|
||||
return std::make_pair(input.substr(0, p), input.substr(p + 1, input.size()));
|
||||
}
|
||||
|
||||
static NID abbrevToNID(std::string const& sn) {
|
||||
NID nid = NID_undef;
|
||||
|
||||
if (sn == "C" || sn == "CN" || sn == "L" || sn == "ST" || sn == "O" || sn == "OU" || sn == "UID" || sn == "DC" || sn == "subjectAltName")
|
||||
if (sn == "C" || sn == "CN" || sn == "L" || sn == "ST" || sn == "O" || sn == "OU" || sn == "UID" || sn == "DC" ||
|
||||
sn == "subjectAltName")
|
||||
nid = OBJ_sn2nid(sn.c_str());
|
||||
if (nid == NID_undef)
|
||||
throw std::runtime_error("abbrevToNID");
|
||||
|
@ -147,7 +148,7 @@ static NID abbrevToNID(std::string const& sn) {
|
|||
|
||||
static X509Location locationForNID(NID nid) {
|
||||
const char* name = OBJ_nid2ln(nid);
|
||||
if (name == NULL) {
|
||||
if (name == nullptr) {
|
||||
throw std::runtime_error("locationForNID");
|
||||
}
|
||||
if (strncmp(name, "X509v3", 6) == 0) {
|
||||
|
@ -158,13 +159,11 @@ static X509Location locationForNID(NID nid) {
|
|||
}
|
||||
}
|
||||
|
||||
FDBLibTLSVerify::FDBLibTLSVerify(std::string verify_config):
|
||||
verify_cert(true), verify_time(true) {
|
||||
FDBLibTLSVerify::FDBLibTLSVerify(std::string verify_config) : verify_cert(true), verify_time(true) {
|
||||
parse_verify(verify_config);
|
||||
}
|
||||
|
||||
FDBLibTLSVerify::~FDBLibTLSVerify() {
|
||||
}
|
||||
FDBLibTLSVerify::~FDBLibTLSVerify() {}
|
||||
|
||||
void FDBLibTLSVerify::parse_verify(std::string input) {
|
||||
int s = 0;
|
||||
|
@ -176,8 +175,10 @@ void FDBLibTLSVerify::parse_verify(std::string input) {
|
|||
throw std::runtime_error("parse_verify");
|
||||
|
||||
MatchType mt = MatchType::EXACT;
|
||||
if (input[eq-1] == '>') mt = MatchType::PREFIX;
|
||||
if (input[eq-1] == '<') mt = MatchType::SUFFIX;
|
||||
if (input[eq - 1] == '>')
|
||||
mt = MatchType::PREFIX;
|
||||
if (input[eq - 1] == '<')
|
||||
mt = MatchType::SUFFIX;
|
||||
std::string term = input.substr(s, eq - s - (mt == MatchType::EXACT ? 0 : 1));
|
||||
|
||||
if (term.find("Check.") == 0) {
|
||||
|
@ -206,7 +207,7 @@ void FDBLibTLSVerify::parse_verify(std::string input) {
|
|||
|
||||
s = eq + 3;
|
||||
} else {
|
||||
std::map< int, Criteria >* criteria = &subject_criteria;
|
||||
std::map<int, Criteria>* criteria = &subject_criteria;
|
||||
|
||||
if (term.find('.') != term.npos) {
|
||||
auto scoped = splitPair(term, '.');
|
||||
|
|
|
@ -47,14 +47,10 @@ enum class X509Location {
|
|||
};
|
||||
|
||||
struct Criteria {
|
||||
Criteria( const std::string& s )
|
||||
: criteria(s), match_type(MatchType::EXACT), location(X509Location::NAME) {}
|
||||
Criteria( const std::string& s, MatchType mt )
|
||||
: criteria(s), match_type(mt), location(X509Location::NAME) {}
|
||||
Criteria( const std::string& s, X509Location loc)
|
||||
: criteria(s), match_type(MatchType::EXACT), location(loc) {}
|
||||
Criteria( const std::string& s, MatchType mt, X509Location loc)
|
||||
: criteria(s), match_type(mt), location(loc) {}
|
||||
Criteria(const std::string& s) : criteria(s), match_type(MatchType::EXACT), location(X509Location::NAME) {}
|
||||
Criteria(const std::string& s, MatchType mt) : criteria(s), match_type(mt), location(X509Location::NAME) {}
|
||||
Criteria(const std::string& s, X509Location loc) : criteria(s), match_type(MatchType::EXACT), location(loc) {}
|
||||
Criteria(const std::string& s, MatchType mt, X509Location loc) : criteria(s), match_type(mt), location(loc) {}
|
||||
|
||||
std::string criteria;
|
||||
MatchType match_type;
|
||||
|
@ -65,7 +61,7 @@ struct Criteria {
|
|||
}
|
||||
};
|
||||
|
||||
struct FDBLibTLSVerify: ReferenceCounted<FDBLibTLSVerify> {
|
||||
struct FDBLibTLSVerify : ReferenceCounted<FDBLibTLSVerify> {
|
||||
FDBLibTLSVerify(std::string verify);
|
||||
virtual ~FDBLibTLSVerify();
|
||||
|
||||
|
@ -77,9 +73,9 @@ struct FDBLibTLSVerify: ReferenceCounted<FDBLibTLSVerify> {
|
|||
bool verify_cert;
|
||||
bool verify_time;
|
||||
|
||||
std::map< NID, Criteria > subject_criteria;
|
||||
std::map< NID, Criteria > issuer_criteria;
|
||||
std::map< NID, Criteria > root_criteria;
|
||||
std::map<NID, Criteria> subject_criteria;
|
||||
std::map<NID, Criteria> issuer_criteria;
|
||||
std::map<NID, Criteria> root_criteria;
|
||||
};
|
||||
|
||||
#endif /* FDB_LIBTLS_VERIFY_H */
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -33,11 +33,18 @@
|
|||
#include "FDBLibTLS/FDBLibTLSPolicy.h"
|
||||
|
||||
struct FDBLibTLSVerifyTest {
|
||||
FDBLibTLSVerifyTest(std::string input):
|
||||
input(input), valid(false), verify_cert(true), verify_time(true), subject_criteria({}), issuer_criteria({}), root_criteria({}) {};
|
||||
FDBLibTLSVerifyTest(std::string input, bool verify_cert, bool verify_time, std::map<int, Criteria> subject, std::map<int, Criteria> issuer, std::map<int, Criteria> root):
|
||||
input(input), valid(true), verify_cert(verify_cert), verify_time(verify_time), subject_criteria(subject), issuer_criteria(issuer), root_criteria(root) {};
|
||||
~FDBLibTLSVerifyTest() {};
|
||||
FDBLibTLSVerifyTest(std::string input)
|
||||
: input(input), valid(false), verify_cert(true), verify_time(true), subject_criteria({}), issuer_criteria({}),
|
||||
root_criteria({}){};
|
||||
FDBLibTLSVerifyTest(std::string input,
|
||||
bool verify_cert,
|
||||
bool verify_time,
|
||||
std::map<int, Criteria> subject,
|
||||
std::map<int, Criteria> issuer,
|
||||
std::map<int, Criteria> root)
|
||||
: input(input), valid(true), verify_cert(verify_cert), verify_time(verify_time), subject_criteria(subject),
|
||||
issuer_criteria(issuer), root_criteria(root){};
|
||||
~FDBLibTLSVerifyTest(){};
|
||||
|
||||
int run();
|
||||
|
||||
|
@ -54,20 +61,21 @@ struct FDBLibTLSVerifyTest {
|
|||
|
||||
static std::string criteriaToString(std::map<int, Criteria> const& criteria) {
|
||||
std::string s;
|
||||
for (auto &pair: criteria) {
|
||||
s += "{" + std::to_string(pair.first) + ":(" + printable(pair.second.criteria) + ", " + boost::lexical_cast<std::string>((int)pair.second.match_type) + ", " + boost::lexical_cast<std::string>((int)pair.second.location) + ")}";
|
||||
for (auto& pair : criteria) {
|
||||
s += "{" + std::to_string(pair.first) + ":(" + printable(pair.second.criteria) + ", " +
|
||||
boost::lexical_cast<std::string>((int)pair.second.match_type) + ", " +
|
||||
boost::lexical_cast<std::string>((int)pair.second.location) + ")}";
|
||||
}
|
||||
return "{" + s + "}";
|
||||
}
|
||||
|
||||
static void logf(const char* event, void* uid, bool is_error, ...) {
|
||||
}
|
||||
static void logf(const char* event, void* uid, bool is_error, ...) {}
|
||||
|
||||
int FDBLibTLSVerifyTest::run() {
|
||||
Reference<FDBLibTLSVerify> verify;
|
||||
try {
|
||||
verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(input));
|
||||
} catch ( const std::runtime_error& e ) {
|
||||
verify = makeReference<FDBLibTLSVerify>(input);
|
||||
} catch (const std::runtime_error& e) {
|
||||
if (valid) {
|
||||
std::cerr << "FAIL: Verify test failed, but should have succeeded - '" << input << "'\n";
|
||||
return 1;
|
||||
|
@ -87,25 +95,28 @@ int FDBLibTLSVerifyTest::run() {
|
|||
return 1;
|
||||
}
|
||||
if (verify->subject_criteria != subject_criteria) {
|
||||
std::cerr << "FAIL: Got subject criteria " << criteriaToString(verify->subject_criteria) << ", want " << criteriaToString(subject_criteria) << "\n";
|
||||
std::cerr << "FAIL: Got subject criteria " << criteriaToString(verify->subject_criteria) << ", want "
|
||||
<< criteriaToString(subject_criteria) << "\n";
|
||||
return 1;
|
||||
}
|
||||
if (verify->issuer_criteria != issuer_criteria) {
|
||||
std::cerr << "FAIL: Got issuer criteria " << criteriaToString(verify->issuer_criteria) << ", want " << criteriaToString(issuer_criteria) << "\n";
|
||||
std::cerr << "FAIL: Got issuer criteria " << criteriaToString(verify->issuer_criteria) << ", want "
|
||||
<< criteriaToString(issuer_criteria) << "\n";
|
||||
return 1;
|
||||
}
|
||||
if (verify->root_criteria != root_criteria) {
|
||||
std::cerr << "FAIL: Got root criteria " << criteriaToString(verify->root_criteria) << ", want " << criteriaToString(root_criteria) << "\n";
|
||||
std::cerr << "FAIL: Got root criteria " << criteriaToString(verify->root_criteria) << ", want "
|
||||
<< criteriaToString(root_criteria) << "\n";
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int policy_verify_test() {
|
||||
Reference<FDBLibTLSPlugin> plugin = Reference<FDBLibTLSPlugin>(new FDBLibTLSPlugin());
|
||||
Reference<FDBLibTLSPolicy> policy = Reference<FDBLibTLSPolicy>(new FDBLibTLSPolicy(plugin, (ITLSLogFunc)logf));
|
||||
auto plugin = makeReference<FDBLibTLSPlugin>();
|
||||
auto policy = makeReference<FDBLibTLSPolicy>(plugin, (ITLSLogFunc)logf);
|
||||
|
||||
const char *verify_peers[] = {
|
||||
const char* verify_peers[] = {
|
||||
"S.CN=abc",
|
||||
"I.CN=def",
|
||||
"R.CN=xyz,Check.Unexpired=0",
|
||||
|
@ -116,12 +127,12 @@ static int policy_verify_test() {
|
|||
(int)strlen(verify_peers[2]),
|
||||
};
|
||||
Reference<FDBLibTLSVerify> verify_rules[] = {
|
||||
Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string(verify_peers[0], verify_peers_len[0]))),
|
||||
Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string(verify_peers[1], verify_peers_len[1]))),
|
||||
Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(std::string(verify_peers[2], verify_peers_len[2]))),
|
||||
makeReference<FDBLibTLSVerify>(std::string(verify_peers[0], verify_peers_len[0])),
|
||||
makeReference<FDBLibTLSVerify>(std::string(verify_peers[1], verify_peers_len[1])),
|
||||
makeReference<FDBLibTLSVerify>(std::string(verify_peers[2], verify_peers_len[2])),
|
||||
};
|
||||
|
||||
if (!policy->set_verify_peers(3, (const uint8_t **)verify_peers, verify_peers_len)) {
|
||||
if (!policy->set_verify_peers(3, (const uint8_t**)verify_peers, verify_peers_len)) {
|
||||
std::cerr << "FAIL: Policy verify test failed, but should have succeeded\n";
|
||||
return 1;
|
||||
}
|
||||
|
@ -131,25 +142,30 @@ static int policy_verify_test() {
|
|||
}
|
||||
|
||||
int i = 0;
|
||||
for (auto &verify_rule: policy->verify_rules) {
|
||||
for (auto& verify_rule : policy->verify_rules) {
|
||||
if (verify_rule->verify_cert != verify_rules[i]->verify_cert) {
|
||||
std::cerr << "FAIL: Got verify cert " << verify_rule->verify_cert << ", want " << verify_rules[i]->verify_cert << "\n";
|
||||
std::cerr << "FAIL: Got verify cert " << verify_rule->verify_cert << ", want "
|
||||
<< verify_rules[i]->verify_cert << "\n";
|
||||
return 1;
|
||||
}
|
||||
if (verify_rule->verify_time != verify_rules[i]->verify_time) {
|
||||
std::cerr << "FAIL: Got verify time " << verify_rule->verify_time << ", want " << verify_rules[i]->verify_time << "\n";
|
||||
std::cerr << "FAIL: Got verify time " << verify_rule->verify_time << ", want "
|
||||
<< verify_rules[i]->verify_time << "\n";
|
||||
return 1;
|
||||
}
|
||||
if (verify_rule->subject_criteria != verify_rules[i]->subject_criteria) {
|
||||
std::cerr << "FAIL: Got subject criteria " << criteriaToString(verify_rule->subject_criteria) << ", want " << criteriaToString(verify_rules[i]->subject_criteria) << "\n";
|
||||
std::cerr << "FAIL: Got subject criteria " << criteriaToString(verify_rule->subject_criteria) << ", want "
|
||||
<< criteriaToString(verify_rules[i]->subject_criteria) << "\n";
|
||||
return 1;
|
||||
}
|
||||
if (verify_rule->issuer_criteria != verify_rules[i]->issuer_criteria) {
|
||||
std::cerr << "FAIL: Got issuer criteria " << criteriaToString(verify_rule->issuer_criteria) << ", want " << criteriaToString(verify_rules[i]->issuer_criteria) << "\n";
|
||||
std::cerr << "FAIL: Got issuer criteria " << criteriaToString(verify_rule->issuer_criteria) << ", want "
|
||||
<< criteriaToString(verify_rules[i]->issuer_criteria) << "\n";
|
||||
return 1;
|
||||
}
|
||||
if (verify_rule->root_criteria != verify_rules[i]->root_criteria) {
|
||||
std::cerr << "FAIL: Got root criteria " << criteriaToString(verify_rule->root_criteria) << ", want " << criteriaToString(verify_rules[i]->root_criteria) << "\n";
|
||||
std::cerr << "FAIL: Got root criteria " << criteriaToString(verify_rule->root_criteria) << ", want "
|
||||
<< criteriaToString(verify_rules[i]->root_criteria) << "\n";
|
||||
return 1;
|
||||
}
|
||||
i++;
|
||||
|
@ -157,8 +173,7 @@ static int policy_verify_test() {
|
|||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int main(int argc, char** argv) {
|
||||
int failed = 0;
|
||||
|
||||
#define EXACT(x) Criteria(x, MatchType::EXACT, X509Location::NAME)
|
||||
|
@ -173,29 +188,60 @@ int main(int argc, char **argv)
|
|||
FDBLibTLSVerifyTest("Check.Unexpired=0", true, false, {}, {}, {}),
|
||||
FDBLibTLSVerifyTest("Check.Valid=1,Check.Unexpired=0", true, false, {}, {}, {}),
|
||||
FDBLibTLSVerifyTest("Check.Unexpired=0,Check.Valid=0", false, false, {}, {}, {}),
|
||||
FDBLibTLSVerifyTest("Check.Unexpired=0,I.C=US,C=US,S.O=XYZCorp\\, LLC", true, false,
|
||||
{{NID_countryName, EXACT("US")}, {NID_organizationName, EXACT("XYZCorp, LLC")}}, {{NID_countryName, EXACT("US")}}, {}),
|
||||
FDBLibTLSVerifyTest("Check.Unexpired=0,I.C=US,C=US,S.O=XYZCorp\\= LLC", true, false,
|
||||
{{NID_countryName, EXACT("US")}, {NID_organizationName, EXACT("XYZCorp= LLC")}}, {{NID_countryName, EXACT("US")}}, {}),
|
||||
FDBLibTLSVerifyTest("Check.Unexpired=0,R.C=US,C=US,S.O=XYZCorp\\= LLC", true, false,
|
||||
{{NID_countryName, EXACT("US")}, {NID_organizationName, EXACT("XYZCorp= LLC")}}, {}, {{NID_countryName, EXACT("US")}}),
|
||||
FDBLibTLSVerifyTest("Check.Unexpired=0,I.C=US,C=US,S.O=XYZCorp=LLC", true, false,
|
||||
{{NID_countryName, EXACT("US")}, {NID_organizationName, EXACT("XYZCorp=LLC")}}, {{NID_countryName, EXACT("US")}}, {}),
|
||||
FDBLibTLSVerifyTest("I.C=US,C=US,Check.Unexpired=0,S.O=XYZCorp=LLC", true, false,
|
||||
{{NID_countryName, EXACT("US")}, {NID_organizationName, EXACT("XYZCorp=LLC")}}, {{NID_countryName, EXACT("US")}}, {}),
|
||||
FDBLibTLSVerifyTest("I.C=US,C=US,S.O=XYZCorp\\, LLC", true, true,
|
||||
{{NID_countryName, EXACT("US")}, {NID_organizationName, EXACT("XYZCorp, LLC")}}, {{NID_countryName, EXACT("US")}}, {}),
|
||||
FDBLibTLSVerifyTest("I.C=US,C=US,S.O=XYZCorp\\, LLC,R.CN=abc", true, true,
|
||||
{{NID_countryName, EXACT("US")}, {NID_organizationName, EXACT("XYZCorp, LLC")}},
|
||||
{{NID_countryName, EXACT("US")}},
|
||||
{{NID_commonName, EXACT("abc")}}),
|
||||
FDBLibTLSVerifyTest("C=\\,S=abc", true, true, {{NID_countryName, EXACT(",S=abc")}}, {}, {}),
|
||||
FDBLibTLSVerifyTest("CN=\\61\\62\\63", true, true, {{NID_commonName, EXACT("abc")}}, {}, {}),
|
||||
FDBLibTLSVerifyTest("CN=a\\62c", true, true, {{NID_commonName, EXACT("abc")}}, {}, {}),
|
||||
FDBLibTLSVerifyTest("CN=a\\01c", true, true, {{NID_commonName, EXACT("a\001c")}}, {}, {}),
|
||||
FDBLibTLSVerifyTest("S.subjectAltName=XYZCorp", true, true, {{NID_subject_alt_name, {"XYZCorp", MatchType::EXACT, X509Location::EXTENSION}}}, {}, {}),
|
||||
FDBLibTLSVerifyTest("S.O>=XYZ", true, true, {{NID_organizationName, PREFIX("XYZ")}}, {}, {}),
|
||||
FDBLibTLSVerifyTest("S.O<=LLC", true, true, {{NID_organizationName, SUFFIX("LLC")}}, {}, {}),
|
||||
FDBLibTLSVerifyTest("Check.Unexpired=0,I.C=US,C=US,S.O=XYZCorp\\, LLC",
|
||||
true,
|
||||
false,
|
||||
{ { NID_countryName, EXACT("US") }, { NID_organizationName, EXACT("XYZCorp, LLC") } },
|
||||
{ { NID_countryName, EXACT("US") } },
|
||||
{}),
|
||||
FDBLibTLSVerifyTest("Check.Unexpired=0,I.C=US,C=US,S.O=XYZCorp\\= LLC",
|
||||
true,
|
||||
false,
|
||||
{ { NID_countryName, EXACT("US") }, { NID_organizationName, EXACT("XYZCorp= LLC") } },
|
||||
{ { NID_countryName, EXACT("US") } },
|
||||
{}),
|
||||
FDBLibTLSVerifyTest("Check.Unexpired=0,R.C=US,C=US,S.O=XYZCorp\\= LLC",
|
||||
true,
|
||||
false,
|
||||
{ { NID_countryName, EXACT("US") }, { NID_organizationName, EXACT("XYZCorp= LLC") } },
|
||||
{},
|
||||
{ { NID_countryName, EXACT("US") } }),
|
||||
FDBLibTLSVerifyTest("Check.Unexpired=0,I.C=US,C=US,S.O=XYZCorp=LLC",
|
||||
true,
|
||||
false,
|
||||
{ { NID_countryName, EXACT("US") }, { NID_organizationName, EXACT("XYZCorp=LLC") } },
|
||||
{ { NID_countryName, EXACT("US") } },
|
||||
{}),
|
||||
FDBLibTLSVerifyTest("I.C=US,C=US,Check.Unexpired=0,S.O=XYZCorp=LLC",
|
||||
true,
|
||||
false,
|
||||
{ { NID_countryName, EXACT("US") }, { NID_organizationName, EXACT("XYZCorp=LLC") } },
|
||||
{ { NID_countryName, EXACT("US") } },
|
||||
{}),
|
||||
FDBLibTLSVerifyTest("I.C=US,C=US,S.O=XYZCorp\\, LLC",
|
||||
true,
|
||||
true,
|
||||
{ { NID_countryName, EXACT("US") }, { NID_organizationName, EXACT("XYZCorp, LLC") } },
|
||||
{ { NID_countryName, EXACT("US") } },
|
||||
{}),
|
||||
FDBLibTLSVerifyTest("I.C=US,C=US,S.O=XYZCorp\\, LLC,R.CN=abc",
|
||||
true,
|
||||
true,
|
||||
{ { NID_countryName, EXACT("US") }, { NID_organizationName, EXACT("XYZCorp, LLC") } },
|
||||
{ { NID_countryName, EXACT("US") } },
|
||||
{ { NID_commonName, EXACT("abc") } }),
|
||||
FDBLibTLSVerifyTest("C=\\,S=abc", true, true, { { NID_countryName, EXACT(",S=abc") } }, {}, {}),
|
||||
FDBLibTLSVerifyTest("CN=\\61\\62\\63", true, true, { { NID_commonName, EXACT("abc") } }, {}, {}),
|
||||
FDBLibTLSVerifyTest("CN=a\\62c", true, true, { { NID_commonName, EXACT("abc") } }, {}, {}),
|
||||
FDBLibTLSVerifyTest("CN=a\\01c", true, true, { { NID_commonName, EXACT("a\001c") } }, {}, {}),
|
||||
FDBLibTLSVerifyTest("S.subjectAltName=XYZCorp",
|
||||
true,
|
||||
true,
|
||||
{ { NID_subject_alt_name, { "XYZCorp", MatchType::EXACT, X509Location::EXTENSION } } },
|
||||
{},
|
||||
{}),
|
||||
FDBLibTLSVerifyTest("S.O>=XYZ", true, true, { { NID_organizationName, PREFIX("XYZ") } }, {}, {}),
|
||||
FDBLibTLSVerifyTest("S.O<=LLC", true, true, { { NID_organizationName, SUFFIX("LLC") } }, {}, {}),
|
||||
|
||||
// Invalid cases.
|
||||
FDBLibTLSVerifyTest("Check.Invalid=0"),
|
||||
|
@ -212,7 +258,7 @@ int main(int argc, char **argv)
|
|||
#undef PREFIX
|
||||
#undef SUFFIX
|
||||
|
||||
for (auto &test: tests)
|
||||
for (auto& test : tests)
|
||||
failed |= test.run();
|
||||
|
||||
failed |= policy_verify_test();
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
<img alt="FoundationDB logo" src="documentation/FDB_logo.png?raw=true" width="400">
|
||||
|
||||

|
||||
|
||||
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs ACID transactions for all operations. It is especially well-suited for read/write workloads but also has excellent performance for write-intensive workloads. Users interact with the database using API language binding.
|
||||
|
||||
To learn more about FoundationDB, visit [foundationdb.org](https://www.foundationdb.org/)
|
||||
|
@ -155,11 +157,11 @@ The build under MacOS will work the same way as on Linux. To get boost and ninja
|
|||
cmake -G Ninja <PATH_TO_FOUNDATIONDB_SOURCE>
|
||||
```
|
||||
|
||||
To generate a installable package, you can use cpack:
|
||||
To generate a installable package,
|
||||
|
||||
```sh
|
||||
ninja
|
||||
cpack -G productbuild
|
||||
$SRCDIR/packaging/osx/buildpkg.sh . $SRCDIR
|
||||
```
|
||||
|
||||
### Windows
|
||||
|
@ -169,7 +171,7 @@ that Visual Studio is used to compile.
|
|||
|
||||
1. Install Visual Studio 2017 (Community Edition is tested)
|
||||
1. Install cmake Version 3.12 or higher [CMake](https://cmake.org/)
|
||||
1. Download version 1.72 of [Boost](https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2)
|
||||
1. Download version 1.72 of [Boost](https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2)
|
||||
1. Unpack boost (you don't need to compile it)
|
||||
1. Install [Mono](http://www.mono-project.com/download/stable/)
|
||||
1. (Optional) Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
add_subdirectory(c)
|
||||
if(NOT OPEN_FOR_IDE)
|
||||
# flow bindings currently doesn't support that
|
||||
add_subdirectory(c)
|
||||
add_subdirectory(flow)
|
||||
endif()
|
||||
add_subdirectory(python)
|
||||
|
|
|
@ -26,7 +26,7 @@ sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings',
|
|||
|
||||
import util
|
||||
|
||||
FDB_API_VERSION = 700
|
||||
FDB_API_VERSION = 710
|
||||
|
||||
LOGGING = {
|
||||
'version': 1,
|
||||
|
|
|
@ -157,7 +157,7 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
|
|||
api_version = min_version
|
||||
elif random.random() < 0.9:
|
||||
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430,
|
||||
440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700] if v >= min_version and v <= max_version])
|
||||
440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700, 710] if v >= min_version and v <= max_version])
|
||||
else:
|
||||
api_version = random.randint(min_version, max_version)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
import os
|
||||
|
||||
MAX_API_VERSION = 700
|
||||
MAX_API_VERSION = 710
|
||||
COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple']
|
||||
ALL_TYPES = COMMON_TYPES + ['versionstamp']
|
||||
|
||||
|
|
|
@ -157,7 +157,7 @@ class ApiTest(Test):
|
|||
read_conflicts = ['READ_CONFLICT_RANGE', 'READ_CONFLICT_KEY']
|
||||
write_conflicts = ['WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT']
|
||||
txn_sizes = ['GET_APPROXIMATE_SIZE']
|
||||
storage_metrics = ['GET_ESTIMATED_RANGE_SIZE']
|
||||
storage_metrics = ['GET_ESTIMATED_RANGE_SIZE', 'GET_RANGE_SPLIT_POINTS']
|
||||
|
||||
op_choices += reads
|
||||
op_choices += mutations
|
||||
|
@ -553,6 +553,23 @@ class ApiTest(Test):
|
|||
instructions.push_args(key1, key2)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
elif op == 'GET_RANGE_SPLIT_POINTS':
|
||||
# Protect against inverted range and identical keys
|
||||
key1 = self.workspace.pack(self.random.random_tuple(1))
|
||||
key2 = self.workspace.pack(self.random.random_tuple(1))
|
||||
|
||||
while key1 == key2:
|
||||
key1 = self.workspace.pack(self.random.random_tuple(1))
|
||||
key2 = self.workspace.pack(self.random.random_tuple(1))
|
||||
|
||||
if key1 > key2:
|
||||
key1, key2 = key2, key1
|
||||
|
||||
# TODO: randomize chunkSize but should not exceed 100M(shard limit)
|
||||
chunkSize = 10000000 # 10M
|
||||
instructions.push_args(key1, key2, chunkSize)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
|
||||
else:
|
||||
assert False, 'Unknown operation: ' + op
|
||||
|
|
|
@ -34,7 +34,7 @@ fdb.api_version(FDB_API_VERSION)
|
|||
|
||||
|
||||
class ScriptedTest(Test):
|
||||
TEST_API_VERSION = 700
|
||||
TEST_API_VERSION = 710
|
||||
|
||||
def __init__(self, subspace):
|
||||
super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION)
|
||||
|
|
|
@ -71,17 +71,27 @@ if(NOT WIN32)
|
|||
test/mako/mako.h
|
||||
test/mako/utils.c
|
||||
test/mako/utils.h)
|
||||
add_subdirectory(test/unit/third_party)
|
||||
find_package(Threads REQUIRED)
|
||||
set(UNIT_TEST_SRCS
|
||||
test/unit/unit_tests.cpp
|
||||
test/unit/fdb_api.cpp
|
||||
test/unit/fdb_api.hpp)
|
||||
|
||||
if(OPEN_FOR_IDE)
|
||||
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)
|
||||
add_library(fdb_c_ryw_benchmark OBJECT test/ryw_benchmark.c test/test.h)
|
||||
add_library(fdb_c_txn_size_test OBJECT test/txn_size_test.c test/test.h)
|
||||
add_library(mako OBJECT ${MAKO_SRCS})
|
||||
add_library(fdb_c_setup_tests OBJECT test/unit/setup_tests.cpp)
|
||||
add_library(fdb_c_unit_tests OBJECT ${UNIT_TEST_SRCS})
|
||||
else()
|
||||
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
||||
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
||||
add_executable(fdb_c_txn_size_test test/txn_size_test.c test/test.h)
|
||||
add_executable(mako ${MAKO_SRCS})
|
||||
add_executable(fdb_c_setup_tests test/unit/setup_tests.cpp)
|
||||
add_executable(fdb_c_unit_tests ${UNIT_TEST_SRCS})
|
||||
strip_debug_symbols(fdb_c_performance_test)
|
||||
strip_debug_symbols(fdb_c_ryw_benchmark)
|
||||
strip_debug_symbols(fdb_c_txn_size_test)
|
||||
|
@ -89,9 +99,49 @@ if(NOT WIN32)
|
|||
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c)
|
||||
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c)
|
||||
target_link_libraries(fdb_c_txn_size_test PRIVATE fdb_c)
|
||||
|
||||
add_dependencies(fdb_c_setup_tests doctest)
|
||||
add_dependencies(fdb_c_unit_tests doctest)
|
||||
target_include_directories(fdb_c_setup_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_include_directories(fdb_c_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_link_libraries(fdb_c_setup_tests PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_unit_tests PRIVATE fdb_c Threads::Threads)
|
||||
|
||||
# do not set RPATH for mako
|
||||
set_property(TARGET mako PROPERTY SKIP_BUILD_RPATH TRUE)
|
||||
target_link_libraries(mako PRIVATE fdb_c)
|
||||
|
||||
if(NOT OPEN_FOR_IDE)
|
||||
# Make sure that fdb_c.h is compatible with c90
|
||||
add_executable(fdb_c90_test test/fdb_c90_test.c)
|
||||
set_property(TARGET fdb_c90_test PROPERTY C_STANDARD 90)
|
||||
target_compile_options(fdb_c90_test PRIVATE -Wall -Wextra -Wpedantic -Werror)
|
||||
target_link_libraries(fdb_c90_test PRIVATE fdb_c)
|
||||
endif()
|
||||
|
||||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_c> ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||
DEPENDS fdb_c
|
||||
COMMENT "Copy libfdb_c to use as external client for test")
|
||||
add_custom_target(external_client DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so)
|
||||
add_dependencies(fdb_c_unit_tests external_client)
|
||||
|
||||
add_fdbclient_test(
|
||||
NAME fdb_c_setup_tests
|
||||
COMMAND $<TARGET_FILE:fdb_c_setup_tests>)
|
||||
add_fdbclient_test(
|
||||
NAME fdb_c_unit_tests
|
||||
COMMAND $<TARGET_FILE:fdb_c_unit_tests>
|
||||
@CLUSTER_FILE@
|
||||
fdb)
|
||||
add_fdbclient_test(
|
||||
NAME fdb_c_external_client_unit_tests
|
||||
COMMAND $<TARGET_FILE:fdb_c_unit_tests>
|
||||
@CLUSTER_FILE@
|
||||
fdb
|
||||
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c.so
|
||||
)
|
||||
endif()
|
||||
|
||||
set(c_workloads_srcs
|
||||
|
|
|
@ -25,17 +25,17 @@
|
|||
|
||||
#include <Windows.h>
|
||||
|
||||
BOOL WINAPI DllMain( HINSTANCE dll, DWORD reason, LPVOID reserved ) {
|
||||
BOOL WINAPI DllMain(HINSTANCE dll, DWORD reason, LPVOID reserved) {
|
||||
|
||||
if (reason == DLL_THREAD_DETACH)
|
||||
releaseAllThreadMagazines();
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
#elif defined( __unixish__ )
|
||||
#elif defined(__unixish__)
|
||||
|
||||
#ifdef __INTEL_COMPILER
|
||||
#pragma warning ( disable:2415 )
|
||||
#pragma warning(disable : 2415)
|
||||
#endif
|
||||
|
||||
static pthread_key_t threadDestructorKey;
|
||||
|
@ -45,13 +45,13 @@ static void threadDestructor(void*) {
|
|||
}
|
||||
|
||||
void registerThread() {
|
||||
pthread_setspecific( threadDestructorKey, (const void*)1 );
|
||||
pthread_setspecific(threadDestructorKey, (const void*)1);
|
||||
}
|
||||
|
||||
static int initThreadDestructorKey() {
|
||||
if (!pthread_key_create(&threadDestructorKey, &threadDestructor)) {
|
||||
registerThread();
|
||||
setFastAllocatorThreadInitFunction( ®isterThread );
|
||||
setFastAllocatorThreadInitFunction(®isterThread);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -44,7 +44,8 @@ enum class FDBSeverity { Debug, Info, Warn, WarnAlways, Error };
|
|||
|
||||
class FDBLogger {
|
||||
public:
|
||||
virtual void trace(FDBSeverity sev, const std::string& name,
|
||||
virtual void trace(FDBSeverity sev,
|
||||
const std::string& name,
|
||||
const std::vector<std::pair<std::string, std::string>>& details) = 0;
|
||||
};
|
||||
|
||||
|
|
|
@ -22,16 +22,15 @@
|
|||
#define FDB_C_H
|
||||
#pragma once
|
||||
|
||||
|
||||
#ifndef DLLEXPORT
|
||||
#define DLLEXPORT
|
||||
#endif
|
||||
|
||||
#if !defined(FDB_API_VERSION)
|
||||
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 700)
|
||||
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 710)
|
||||
#elif FDB_API_VERSION < 13
|
||||
#error API version no longer supported (upgrade to 13)
|
||||
#elif FDB_API_VERSION > 700
|
||||
#elif FDB_API_VERSION > 710
|
||||
#error Requested API version requires a newer version of this header
|
||||
#endif
|
||||
|
||||
|
@ -45,11 +44,15 @@
|
|||
#define WARN_UNUSED_RESULT
|
||||
#endif
|
||||
|
||||
// With default settings, gcc will not warn about unprototyped functions being called, so it
|
||||
// is easy to erroneously call a function which is not available at FDB_API_VERSION and then
|
||||
// get an error only at runtime. These macros ensure a compile error in such cases, and
|
||||
// attempt to make the compile error slightly informative.
|
||||
#define This_FoundationDB_API_function_is_removed_at_this_FDB_API_VERSION() [=====]
|
||||
/*
|
||||
* With default settings, gcc will not warn about unprototyped functions being
|
||||
* called, so it is easy to erroneously call a function which is not available
|
||||
* at FDB_API_VERSION and then get an error only at runtime. These macros
|
||||
* ensure a compile error in such cases, and attempt to make the compile error
|
||||
* slightly informative.
|
||||
*/
|
||||
#define This_FoundationDB_API_function_is_removed_at_this_FDB_API_VERSION() \
|
||||
{ == == = }
|
||||
#define FDB_REMOVED_FUNCTION This_FoundationDB_API_function_is_removed_at_this_FDB_API_VERSION(0)
|
||||
|
||||
#include <stdint.h>
|
||||
|
@ -60,301 +63,351 @@
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* Pointers to these opaque types represent objects in the FDB API */
|
||||
typedef struct FDB_future FDBFuture;
|
||||
typedef struct FDB_database FDBDatabase;
|
||||
typedef struct FDB_transaction FDBTransaction;
|
||||
/* Pointers to these opaque types represent objects in the FDB API */
|
||||
typedef struct FDB_future FDBFuture;
|
||||
typedef struct FDB_database FDBDatabase;
|
||||
typedef struct FDB_transaction FDBTransaction;
|
||||
|
||||
typedef int fdb_error_t;
|
||||
typedef int fdb_bool_t;
|
||||
typedef int fdb_error_t;
|
||||
typedef int fdb_bool_t;
|
||||
|
||||
DLLEXPORT const char*
|
||||
fdb_get_error( fdb_error_t code );
|
||||
DLLEXPORT const char* fdb_get_error(fdb_error_t code);
|
||||
|
||||
DLLEXPORT fdb_bool_t
|
||||
fdb_error_predicate( int predicate_test, fdb_error_t code );
|
||||
DLLEXPORT fdb_bool_t fdb_error_predicate(int predicate_test, fdb_error_t code);
|
||||
|
||||
#define /* fdb_error_t */ fdb_select_api_version(v) fdb_select_api_version_impl(v, FDB_API_VERSION)
|
||||
#define /* fdb_error_t */ fdb_select_api_version(v) fdb_select_api_version_impl(v, FDB_API_VERSION)
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_network_set_option( FDBNetworkOption option, uint8_t const* value,
|
||||
int value_length );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_network_set_option(FDBNetworkOption option,
|
||||
uint8_t const* value,
|
||||
int value_length);
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network();
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network();
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_run_network();
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_run_network();
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_stop_network();
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_stop_network();
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_add_network_thread_completion_hook(void (*hook)(void*), void *hook_parameter);
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_add_network_thread_completion_hook(void (*hook)(void*),
|
||||
void* hook_parameter);
|
||||
|
||||
#pragma pack(push, 4)
|
||||
#if FDB_API_VERSION >= 700
|
||||
typedef struct keyvalue {
|
||||
const uint8_t* key;
|
||||
int key_length;
|
||||
const uint8_t* value;
|
||||
int value_length;
|
||||
} FDBKeyValue;
|
||||
typedef struct key {
|
||||
const uint8_t* key;
|
||||
int key_length;
|
||||
} FDBKey;
|
||||
#if FDB_API_VERSION >= 710
|
||||
typedef struct keyvalue {
|
||||
const uint8_t* key;
|
||||
int key_length;
|
||||
const uint8_t* value;
|
||||
int value_length;
|
||||
} FDBKeyValue;
|
||||
#else
|
||||
typedef struct keyvalue {
|
||||
const void* key;
|
||||
int key_length;
|
||||
const void* value;
|
||||
int value_length;
|
||||
} FDBKeyValue;
|
||||
typedef struct keyvalue {
|
||||
const void* key;
|
||||
int key_length;
|
||||
const void* value;
|
||||
int value_length;
|
||||
} FDBKeyValue;
|
||||
#endif
|
||||
#pragma pack(pop)
|
||||
|
||||
DLLEXPORT void fdb_future_cancel( FDBFuture* f );
|
||||
DLLEXPORT void fdb_future_cancel(FDBFuture* f);
|
||||
|
||||
DLLEXPORT void fdb_future_release_memory( FDBFuture* f );
|
||||
DLLEXPORT void fdb_future_release_memory(FDBFuture* f);
|
||||
|
||||
DLLEXPORT void fdb_future_destroy( FDBFuture* f );
|
||||
DLLEXPORT void fdb_future_destroy(FDBFuture* f);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_block_until_ready( FDBFuture* f );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_block_until_ready(FDBFuture* f);
|
||||
|
||||
DLLEXPORT fdb_bool_t fdb_future_is_ready( FDBFuture* f );
|
||||
DLLEXPORT fdb_bool_t fdb_future_is_ready(FDBFuture* f);
|
||||
|
||||
typedef void (*FDBCallback)(FDBFuture* future, void* callback_parameter);
|
||||
typedef void (*FDBCallback)(FDBFuture* future, void* callback_parameter);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_set_callback( FDBFuture* f, FDBCallback callback,
|
||||
void* callback_parameter );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_set_callback(FDBFuture* f,
|
||||
FDBCallback callback,
|
||||
void* callback_parameter);
|
||||
|
||||
#if FDB_API_VERSION >= 23
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_error( FDBFuture* f );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_error(FDBFuture* f);
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_int64( FDBFuture* f, int64_t* out );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_int64(FDBFuture* f, int64_t* out);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_key( FDBFuture* f, uint8_t const** out_key,
|
||||
int* out_key_length );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_uint64(FDBFuture* f, uint64_t* out);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_value( FDBFuture* f, fdb_bool_t *out_present,
|
||||
uint8_t const** out_value,
|
||||
int* out_value_length );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_key(FDBFuture* f, uint8_t const** out_key, int* out_key_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_value(FDBFuture* f,
|
||||
fdb_bool_t* out_present,
|
||||
uint8_t const** out_value,
|
||||
int* out_value_length);
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_keyvalue_array( FDBFuture* f, FDBKeyValue const** out_kv,
|
||||
int* out_count, fdb_bool_t* out_more );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_keyvalue_array(FDBFuture* f,
|
||||
FDBKeyValue const** out_kv,
|
||||
int* out_count,
|
||||
fdb_bool_t* out_more);
|
||||
#endif
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_key_array(FDBFuture* f,
|
||||
FDBKey const** out_key_array,
|
||||
int* out_count);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_string_array(FDBFuture* f,
|
||||
const char*** out_strings,
|
||||
int* out_count);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_create_database(const char* cluster_file_path, FDBDatabase** out_database);
|
||||
|
||||
DLLEXPORT void fdb_database_destroy(FDBDatabase* d);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_database_set_option(FDBDatabase* d,
|
||||
FDBDatabaseOption option,
|
||||
uint8_t const* value,
|
||||
int value_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_database_create_transaction(FDBDatabase* d,
|
||||
FDBTransaction** out_transaction);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_database_reboot_worker(FDBDatabase* db,
|
||||
uint8_t const* address,
|
||||
int address_length,
|
||||
fdb_bool_t check,
|
||||
int duration);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_database_force_recovery_with_data_loss(FDBDatabase* db,
|
||||
uint8_t const* dcid,
|
||||
int dcid_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_database_create_snapshot(FDBDatabase* db,
|
||||
uint8_t const* uid,
|
||||
int uid_length,
|
||||
uint8_t const* snap_command,
|
||||
int snap_command_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT double fdb_database_get_main_thread_busyness(FDBDatabase* db);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_database_get_server_protocol(FDBDatabase* db, uint64_t expected_version);
|
||||
|
||||
DLLEXPORT void fdb_transaction_destroy(FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT void fdb_transaction_cancel(FDBTransaction* tr);
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_transaction_set_option(FDBTransaction* tr,
|
||||
FDBTransactionOption option,
|
||||
uint8_t const* value,
|
||||
int value_length);
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_string_array(FDBFuture* f,
|
||||
const char*** out_strings, int* out_count);
|
||||
DLLEXPORT void fdb_transaction_set_read_version(FDBTransaction* tr, int64_t version);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_create_database( const char* cluster_file_path, FDBDatabase** out_database );
|
||||
|
||||
DLLEXPORT void fdb_database_destroy( FDBDatabase* d );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_database_set_option( FDBDatabase* d, FDBDatabaseOption option,
|
||||
uint8_t const* value, int value_length );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_database_create_transaction( FDBDatabase* d,
|
||||
FDBTransaction** out_transaction );
|
||||
|
||||
DLLEXPORT void fdb_transaction_destroy( FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT void fdb_transaction_cancel( FDBTransaction* tr);
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_read_version(FDBTransaction* tr);
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_transaction_set_option( FDBTransaction* tr, FDBTransactionOption option,
|
||||
uint8_t const* value, int value_length );
|
||||
#endif
|
||||
|
||||
DLLEXPORT void
|
||||
fdb_transaction_set_read_version( FDBTransaction* tr, int64_t version );
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_read_version( FDBTransaction* tr );
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_get( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, fdb_bool_t snapshot );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length,
|
||||
fdb_bool_t snapshot);
|
||||
#endif
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_get_key( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, fdb_bool_t or_equal,
|
||||
int offset, fdb_bool_t snapshot );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_key(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length,
|
||||
fdb_bool_t or_equal,
|
||||
int offset,
|
||||
fdb_bool_t snapshot);
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_get_addresses_for_key(FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length);
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_addresses_for_key(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length);
|
||||
|
||||
#if FDB_API_VERSION >= 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, fdb_bool_t begin_or_equal, int begin_offset,
|
||||
uint8_t const* end_key_name, int end_key_name_length,
|
||||
fdb_bool_t end_or_equal, int end_offset, int limit, int target_bytes,
|
||||
FDBStreamingMode mode, int iteration, fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
#endif
|
||||
|
||||
DLLEXPORT void
|
||||
fdb_transaction_set( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, uint8_t const* value,
|
||||
int value_length );
|
||||
DLLEXPORT void fdb_transaction_set(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length,
|
||||
uint8_t const* value,
|
||||
int value_length);
|
||||
|
||||
DLLEXPORT void
|
||||
fdb_transaction_atomic_op( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length, uint8_t const* param,
|
||||
int param_length, FDBMutationType operation_type );
|
||||
DLLEXPORT void fdb_transaction_atomic_op(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length,
|
||||
uint8_t const* param,
|
||||
int param_length,
|
||||
FDBMutationType operation_type);
|
||||
|
||||
DLLEXPORT void
|
||||
fdb_transaction_clear( FDBTransaction* tr, uint8_t const* key_name,
|
||||
int key_name_length );
|
||||
DLLEXPORT void fdb_transaction_clear(FDBTransaction* tr, uint8_t const* key_name, int key_name_length);
|
||||
|
||||
DLLEXPORT void fdb_transaction_clear_range(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, uint8_t const* end_key_name,
|
||||
int end_key_name_length );
|
||||
DLLEXPORT void fdb_transaction_clear_range(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_watch( FDBTransaction *tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length);
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_watch(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_commit( FDBTransaction* tr );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_commit(FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_transaction_get_committed_version( FDBTransaction* tr,
|
||||
int64_t* out_version );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_transaction_get_committed_version(FDBTransaction* tr,
|
||||
int64_t* out_version);
|
||||
|
||||
// This function intentionally returns an FDBFuture instead of an integer directly,
|
||||
// so that calling this API can see the effect of previous mutations on the transaction.
|
||||
// Specifically, mutations are applied asynchronously by the main thread. In order to
|
||||
// see them, this call has to be serviced by the main thread too.
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_get_approximate_size(FDBTransaction* tr);
|
||||
/*
|
||||
* This function intentionally returns an FDBFuture instead of an integer
|
||||
* directly, so that calling this API can see the effect of previous
|
||||
* mutations on the transaction. Specifically, mutations are applied
|
||||
* asynchronously by the main thread. In order to see them, this call has to
|
||||
* be serviced by the main thread too.
|
||||
*/
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_approximate_size(FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_versionstamp( FDBTransaction* tr );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_versionstamp(FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_on_error( FDBTransaction* tr, fdb_error_t error );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_on_error(FDBTransaction* tr, fdb_error_t error);
|
||||
|
||||
DLLEXPORT void fdb_transaction_reset( FDBTransaction* tr );
|
||||
DLLEXPORT void fdb_transaction_reset(FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_transaction_add_conflict_range(FDBTransaction *tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
FDBConflictRangeType type);
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_transaction_add_conflict_range(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
FDBConflictRangeType type);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_transaction_get_estimated_range_size_bytes( FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length);
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_estimated_range_size_bytes(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length);
|
||||
|
||||
#define FDB_KEYSEL_LAST_LESS_THAN(k, l) k, l, 0, 0
|
||||
#define FDB_KEYSEL_LAST_LESS_OR_EQUAL(k, l) k, l, 1, 0
|
||||
#define FDB_KEYSEL_FIRST_GREATER_THAN(k, l) k, l, 1, 1
|
||||
#define FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(k, l) k, l, 0, 1
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range_split_points(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
int64_t chunk_size);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_select_api_version_impl( int runtime_version, int header_version );
|
||||
#define FDB_KEYSEL_LAST_LESS_THAN(k, l) k, l, 0, 0
|
||||
#define FDB_KEYSEL_LAST_LESS_OR_EQUAL(k, l) k, l, 1, 0
|
||||
#define FDB_KEYSEL_FIRST_GREATER_THAN(k, l) k, l, 1, 1
|
||||
#define FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(k, l) k, l, 0, 1
|
||||
|
||||
DLLEXPORT int fdb_get_max_api_version();
|
||||
DLLEXPORT const char* fdb_get_client_version();
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_select_api_version_impl(int runtime_version, int header_version);
|
||||
|
||||
/* LEGACY API VERSIONS */
|
||||
DLLEXPORT int fdb_get_max_api_version();
|
||||
DLLEXPORT const char* fdb_get_client_version();
|
||||
|
||||
/* LEGACY API VERSIONS */
|
||||
|
||||
#if FDB_API_VERSION < 620
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_version( FDBFuture* f, int64_t* out_version );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_version(FDBFuture* f, int64_t* out_version);
|
||||
#else
|
||||
#define fdb_future_get_version(f, ov) FDB_REMOVED_FUNCTION
|
||||
#define fdb_future_get_version(f, ov) FDB_REMOVED_FUNCTION
|
||||
#endif
|
||||
|
||||
#if FDB_API_VERSION < 610 || defined FDB_INCLUDE_LEGACY_TYPES
|
||||
typedef struct FDB_cluster FDBCluster;
|
||||
typedef struct FDB_cluster FDBCluster;
|
||||
|
||||
typedef enum {
|
||||
// This option is only a placeholder for C compatibility and should not be used
|
||||
FDB_CLUSTER_OPTION_DUMMY_DO_NOT_USE=-1
|
||||
} FDBClusterOption;
|
||||
typedef enum {
|
||||
/* This option is only a placeholder for C compatibility and should not be used */
|
||||
FDB_CLUSTER_OPTION_DUMMY_DO_NOT_USE = -1
|
||||
} FDBClusterOption;
|
||||
#endif
|
||||
|
||||
#if FDB_API_VERSION < 610
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_cluster( FDBFuture* f, FDBCluster** out_cluster );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_cluster(FDBFuture* f, FDBCluster** out_cluster);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_database( FDBFuture* f, FDBDatabase** out_database );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_database(FDBFuture* f, FDBDatabase** out_database);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_create_cluster( const char* cluster_file_path );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_create_cluster(const char* cluster_file_path);
|
||||
|
||||
DLLEXPORT void fdb_cluster_destroy( FDBCluster* c );
|
||||
DLLEXPORT void fdb_cluster_destroy(FDBCluster* c);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_cluster_set_option( FDBCluster* c, FDBClusterOption option,
|
||||
uint8_t const* value, int value_length );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_cluster_set_option(FDBCluster* c,
|
||||
FDBClusterOption option,
|
||||
uint8_t const* value,
|
||||
int value_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
|
||||
fdb_cluster_create_database( FDBCluster* c, uint8_t const* db_name,
|
||||
int db_name_length );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_cluster_create_database(FDBCluster* c,
|
||||
uint8_t const* db_name,
|
||||
int db_name_length);
|
||||
#else
|
||||
#define fdb_future_get_cluster(f, oc) FDB_REMOVED_FUNCTION
|
||||
#define fdb_future_get_database(f, od) FDB_REMOVED_FUNCTION
|
||||
#define fdb_create_cluster(cfp) FDB_REMOVED_FUNCTION
|
||||
#define fdb_cluster_destroy(c) FDB_REMOVED_FUNCTION
|
||||
#define fdb_cluster_set_option(c, o, v, vl) FDB_REMOVED_FUNCTION
|
||||
#define fdb_cluster_create_database(c, dn, dnl) FDB_REMOVED_FUNCTION
|
||||
#define fdb_future_get_cluster(f, oc) FDB_REMOVED_FUNCTION
|
||||
#define fdb_future_get_database(f, od) FDB_REMOVED_FUNCTION
|
||||
#define fdb_create_cluster(cfp) FDB_REMOVED_FUNCTION
|
||||
#define fdb_cluster_destroy(c) FDB_REMOVED_FUNCTION
|
||||
#define fdb_cluster_set_option(c, o, v, vl) FDB_REMOVED_FUNCTION
|
||||
#define fdb_cluster_create_database(c, dn, dnl) FDB_REMOVED_FUNCTION
|
||||
#endif
|
||||
|
||||
#if FDB_API_VERSION < 23
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
|
||||
fdb_future_get_error( FDBFuture* f,
|
||||
const char** out_description /* = NULL */ );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_error(FDBFuture* f, const char** out_description /* = NULL */);
|
||||
|
||||
DLLEXPORT fdb_bool_t fdb_future_is_error( FDBFuture* f );
|
||||
DLLEXPORT fdb_bool_t fdb_future_is_error(FDBFuture* f);
|
||||
#else
|
||||
#define fdb_future_is_error(x) FDB_REMOVED_FUNCTION
|
||||
#define fdb_future_is_error(x) FDB_REMOVED_FUNCTION
|
||||
#endif
|
||||
|
||||
#if FDB_API_VERSION < 14
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_keyvalue_array(
|
||||
FDBFuture* f, FDBKeyValue const** out_kv, int* out_count );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_keyvalue_array(FDBFuture* f,
|
||||
FDBKeyValue const** out_kv,
|
||||
int* out_count);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get(
|
||||
FDBTransaction* tr, uint8_t const* key_name, int key_name_length );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_key(
|
||||
FDBTransaction* tr, uint8_t const* key_name, int key_name_length,
|
||||
fdb_bool_t or_equal, int offset );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_key(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length,
|
||||
fdb_bool_t or_equal,
|
||||
int offset);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network( const char* local_address );
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network(const char* local_address);
|
||||
|
||||
DLLEXPORT void fdb_transaction_set_option(
|
||||
FDBTransaction* tr, FDBTransactionOption option );
|
||||
DLLEXPORT void fdb_transaction_set_option(FDBTransaction* tr, FDBTransactionOption option);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, uint8_t const* end_key_name,
|
||||
int end_key_name_length, int limit );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
int limit);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range_selector(
|
||||
FDBTransaction* tr, uint8_t const* begin_key_name,
|
||||
int begin_key_name_length, fdb_bool_t begin_or_equal,
|
||||
int begin_offset, uint8_t const* end_key_name,
|
||||
int end_key_name_length, fdb_bool_t end_or_equal, int end_offset,
|
||||
int limit );
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range_selector(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
int limit);
|
||||
#else
|
||||
#define fdb_transaction_get_range_selector(tr,bkn,bknl,boe,bo,ekn,eknl,eoe,eo,lim) FDB_REMOVED_FUNCTION
|
||||
#define fdb_transaction_get_range_selector(tr, bkn, bknl, boe, bo, ekn, eknl, eoe, eo, lim) FDB_REMOVED_FUNCTION
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
(void)argc;
|
||||
(void)argv;
|
||||
fdb_select_api_version(710);
|
||||
return 0;
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -3,12 +3,13 @@
|
|||
#pragma once
|
||||
|
||||
#ifndef FDB_API_VERSION
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#endif
|
||||
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <pthread.h>
|
||||
#include <sys/types.h>
|
||||
#include <stdbool.h>
|
||||
#if defined(__linux__)
|
||||
#include <linux/limits.h>
|
||||
#elif defined(__APPLE__)
|
||||
|
@ -32,6 +33,8 @@
|
|||
#define FDB_ERROR_ABORT -2
|
||||
#define FDB_ERROR_CONFLICT -3
|
||||
|
||||
#define LAT_BLOCK_SIZE 511 /* size of each block to get detailed latency for each operation */
|
||||
|
||||
/* transaction specification */
|
||||
enum Operations {
|
||||
OP_GETREADVERSION,
|
||||
|
@ -47,6 +50,7 @@ enum Operations {
|
|||
OP_CLEARRANGE,
|
||||
OP_SETCLEARRANGE,
|
||||
OP_COMMIT,
|
||||
OP_TRANSACTION, /* pseudo-operation - cumulative time for the operation + commit */
|
||||
MAX_OP /* must be the last item */
|
||||
};
|
||||
|
||||
|
@ -64,6 +68,7 @@ enum Arguments {
|
|||
ARG_VERSION,
|
||||
ARG_KNOBS,
|
||||
ARG_FLATBUFFERS,
|
||||
ARG_LOGGROUP,
|
||||
ARG_TRACE,
|
||||
ARG_TRACEPATH,
|
||||
ARG_TRACEFORMAT,
|
||||
|
@ -71,7 +76,10 @@ enum Arguments {
|
|||
ARG_TPSMIN,
|
||||
ARG_TPSINTERVAL,
|
||||
ARG_TPSCHANGE,
|
||||
ARG_TXNTRACE
|
||||
ARG_TXNTRACE,
|
||||
ARG_TXNTAGGING,
|
||||
ARG_TXNTAGGINGPREFIX,
|
||||
ARG_STREAMING_MODE
|
||||
};
|
||||
|
||||
enum TPSChangeTypes { TPS_SIN, TPS_SQUARE, TPS_PULSE };
|
||||
|
@ -79,6 +87,8 @@ enum TPSChangeTypes { TPS_SIN, TPS_SQUARE, TPS_PULSE };
|
|||
#define KEYPREFIX "mako"
|
||||
#define KEYPREFIXLEN 4
|
||||
|
||||
#define TEMP_DATA_STORE "/tmp/makoTemp"
|
||||
|
||||
/* we set mako_txnspec_t and mako_args_t only once in the master process,
|
||||
* and won't be touched by child processes.
|
||||
*/
|
||||
|
@ -88,7 +98,9 @@ typedef struct {
|
|||
int ops[MAX_OP][3];
|
||||
} mako_txnspec_t;
|
||||
|
||||
#define LOGGROUP_MAX 256
|
||||
#define KNOB_MAX 256
|
||||
#define TAGPREFIXLENGTH_MAX 8
|
||||
|
||||
/* benchmark parameters */
|
||||
typedef struct {
|
||||
|
@ -112,12 +124,16 @@ typedef struct {
|
|||
int verbose;
|
||||
mako_txnspec_t txnspec;
|
||||
char cluster_file[PATH_MAX];
|
||||
char log_group[LOGGROUP_MAX];
|
||||
int trace;
|
||||
char tracepath[PATH_MAX];
|
||||
int traceformat; /* 0 - XML, 1 - JSON */
|
||||
char knobs[KNOB_MAX];
|
||||
uint8_t flatbuffers;
|
||||
int txntrace;
|
||||
int txntagging;
|
||||
char txntagging_prefix[TAGPREFIXLENGTH_MAX];
|
||||
FDBStreamingMode streaming_mode;
|
||||
} mako_args_t;
|
||||
|
||||
/* shared memory */
|
||||
|
@ -129,8 +145,15 @@ typedef struct {
|
|||
int signal;
|
||||
int readycount;
|
||||
double throttle_factor;
|
||||
int stopcount;
|
||||
} mako_shmhdr_t;
|
||||
|
||||
/* memory block allocated to each operation when collecting detailed latency */
|
||||
typedef struct {
|
||||
uint64_t data[LAT_BLOCK_SIZE];
|
||||
void* next_block;
|
||||
} lat_block_t;
|
||||
|
||||
typedef struct {
|
||||
uint64_t xacts;
|
||||
uint64_t conflicts;
|
||||
|
@ -145,6 +168,7 @@ typedef struct {
|
|||
/* per-process information */
|
||||
typedef struct {
|
||||
int worker_id;
|
||||
pid_t parent_id;
|
||||
FDBDatabase* database;
|
||||
mako_args_t* args;
|
||||
mako_shmhdr_t* shm;
|
||||
|
@ -153,6 +177,10 @@ typedef struct {
|
|||
/* args for threads */
|
||||
typedef struct {
|
||||
int thread_id;
|
||||
int elem_size[MAX_OP]; /* stores the multiple of LAT_BLOCK_SIZE to check the memory allocation of each operation */
|
||||
bool is_memory_allocated[MAX_OP]; /* flag specified for each operation, whether the memory was allocated to that
|
||||
specific operation */
|
||||
lat_block_t* block[MAX_OP];
|
||||
process_info_t* process;
|
||||
} thread_args_t;
|
||||
|
||||
|
|
|
@ -77,3 +77,58 @@ void genkey(char* str, int num, int rows, int len) {
|
|||
}
|
||||
str[len - 1] = '\0';
|
||||
}
|
||||
|
||||
/* This is another sorting algorithm used to calculate latency parameters */
|
||||
/* We moved from radix sort to quick sort to avoid extra space used in radix sort */
|
||||
|
||||
#if 0
|
||||
uint64_t get_max(uint64_t arr[], int n) {
|
||||
uint64_t mx = arr[0];
|
||||
for (int i = 1; i < n; i++) {
|
||||
if (arr[i] > mx) {
|
||||
mx = arr[i];
|
||||
}
|
||||
}
|
||||
return mx;
|
||||
}
|
||||
|
||||
void bucket_data(uint64_t arr[], int n, uint64_t exp) {
|
||||
// uint64_t output[n];
|
||||
int i, count[10] = { 0 };
|
||||
uint64_t* output = (uint64_t*)malloc(sizeof(uint64_t) * n);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
count[(arr[i] / exp) % 10]++;
|
||||
}
|
||||
for (i = 1; i < 10; i++) {
|
||||
count[i] += count[i - 1];
|
||||
}
|
||||
for (i = n - 1; i >= 0; i--) {
|
||||
output[count[(arr[i] / exp) % 10] - 1] = arr[i];
|
||||
count[(arr[i] / exp) % 10]--;
|
||||
}
|
||||
for (i = 0; i < n; i++) {
|
||||
arr[i] = output[i];
|
||||
}
|
||||
free(output);
|
||||
}
|
||||
|
||||
// The main function is to sort arr[] of size n using Radix Sort
|
||||
void radix_sort(uint64_t* arr, int n) {
|
||||
// Find the maximum number to know number of digits
|
||||
uint64_t m = get_max(arr, n);
|
||||
for (uint64_t exp = 1; m / exp > 0; exp *= 10) bucket_data(arr, n, exp);
|
||||
}
|
||||
#endif
|
||||
|
||||
int compare(const void* a, const void* b) {
|
||||
const uint64_t* da = (const uint64_t*)a;
|
||||
const uint64_t* db = (const uint64_t*)b;
|
||||
|
||||
return (*da > *db) - (*da < *db);
|
||||
}
|
||||
|
||||
// The main function is to sort arr[] of size n using Quick Sort
|
||||
void quick_sort(uint64_t* arr, int n) {
|
||||
qsort(arr, n, sizeof(uint64_t), compare);
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
#define UTILS_H
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
/* uniform-distribution random */
|
||||
/* return a uniform random number between low and high, both inclusive */
|
||||
int urand(int low, int high);
|
||||
|
@ -48,4 +50,15 @@ int digits(int num);
|
|||
/* len is the buffer size, key length + null */
|
||||
void genkey(char* str, int num, int rows, int len);
|
||||
|
||||
#if 0
|
||||
// The main function is to sort arr[] of size n using Radix Sort
|
||||
void radix_sort(uint64_t arr[], int n);
|
||||
void bucket_data(uint64_t arr[], int n, uint64_t exp);
|
||||
uint64_t get_max(uint64_t arr[], int n);
|
||||
#endif
|
||||
|
||||
// The main function is to sort arr[] of size n using Quick Sort
|
||||
void quick_sort(uint64_t arr[], int n);
|
||||
int compare(const void* a, const void* b);
|
||||
|
||||
#endif /* UTILS_H */
|
||||
|
|
|
@ -31,38 +31,40 @@ int numKeys = 1000000;
|
|||
int keySize = 16;
|
||||
uint8_t** keys = NULL;
|
||||
int valueSize = 100;
|
||||
uint8_t *valueStr = NULL;
|
||||
uint8_t* valueStr = NULL;
|
||||
|
||||
fdb_error_t waitError(FDBFuture *f) {
|
||||
fdb_error_t waitError(FDBFuture* f) {
|
||||
fdb_error_t blockError = fdb_future_block_until_ready(f);
|
||||
if(!blockError) {
|
||||
if (!blockError) {
|
||||
return fdb_future_get_error(f);
|
||||
} else {
|
||||
return blockError;
|
||||
}
|
||||
}
|
||||
|
||||
struct RunResult run(struct ResultSet *rs, FDBDatabase *db, struct RunResult (*func)(struct ResultSet*, FDBTransaction*)) {
|
||||
FDBTransaction *tr = NULL;
|
||||
struct RunResult run(struct ResultSet* rs,
|
||||
FDBDatabase* db,
|
||||
struct RunResult (*func)(struct ResultSet*, FDBTransaction*)) {
|
||||
FDBTransaction* tr = NULL;
|
||||
fdb_error_t e = fdb_database_create_transaction(db, &tr);
|
||||
checkError(e, "create transaction", rs);
|
||||
|
||||
while(1) {
|
||||
while (1) {
|
||||
struct RunResult r = func(rs, tr);
|
||||
e = r.e;
|
||||
if(!e) {
|
||||
FDBFuture *f = fdb_transaction_commit(tr);
|
||||
if (!e) {
|
||||
FDBFuture* f = fdb_transaction_commit(tr);
|
||||
e = waitError(f);
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
|
||||
if(e) {
|
||||
FDBFuture *f = fdb_transaction_on_error(tr, e);
|
||||
if (e) {
|
||||
FDBFuture* f = fdb_transaction_on_error(tr, e);
|
||||
fdb_error_t retryE = waitError(f);
|
||||
fdb_future_destroy(f);
|
||||
if (retryE) {
|
||||
fdb_transaction_destroy(tr);
|
||||
return (struct RunResult) {0, retryE};
|
||||
return (struct RunResult){ 0, retryE };
|
||||
}
|
||||
} else {
|
||||
fdb_transaction_destroy(tr);
|
||||
|
@ -73,19 +75,22 @@ struct RunResult run(struct ResultSet *rs, FDBDatabase *db, struct RunResult (*f
|
|||
return RES(0, 4100); // internal_error ; we should never get here
|
||||
}
|
||||
|
||||
int runTest(struct RunResult (*testFxn)(struct ResultSet*, FDBTransaction*), FDBDatabase *db, struct ResultSet *rs, const char *kpiName) {
|
||||
int runTest(struct RunResult (*testFxn)(struct ResultSet*, FDBTransaction*),
|
||||
FDBDatabase* db,
|
||||
struct ResultSet* rs,
|
||||
const char* kpiName) {
|
||||
int numRuns = 25;
|
||||
int *results = malloc(sizeof(int)*numRuns);
|
||||
int* results = malloc(sizeof(int) * numRuns);
|
||||
int i = 0;
|
||||
for(; i < numRuns; ++i) {
|
||||
for (; i < numRuns; ++i) {
|
||||
struct RunResult res = run(rs, db, testFxn);
|
||||
if(res.e) {
|
||||
if (res.e) {
|
||||
logError(res.e, kpiName, rs);
|
||||
free(results);
|
||||
return 0;
|
||||
}
|
||||
results[i] = res.res;
|
||||
if(results[i] < 0) {
|
||||
if (results[i] < 0) {
|
||||
free(results);
|
||||
return -1;
|
||||
}
|
||||
|
@ -99,19 +104,22 @@ int runTest(struct RunResult (*testFxn)(struct ResultSet*, FDBTransaction*), FDB
|
|||
return result;
|
||||
}
|
||||
|
||||
int runTestDb(struct RunResult (*testFxn)(struct ResultSet*, FDBDatabase*), FDBDatabase *db, struct ResultSet *rs, const char *kpiName) {
|
||||
int runTestDb(struct RunResult (*testFxn)(struct ResultSet*, FDBDatabase*),
|
||||
FDBDatabase* db,
|
||||
struct ResultSet* rs,
|
||||
const char* kpiName) {
|
||||
int numRuns = 25;
|
||||
int *results = malloc(sizeof(int)*numRuns);
|
||||
int* results = malloc(sizeof(int) * numRuns);
|
||||
int i = 0;
|
||||
for(; i < numRuns; ++i) {
|
||||
for (; i < numRuns; ++i) {
|
||||
struct RunResult res = testFxn(rs, db);
|
||||
if(res.e) {
|
||||
if (res.e) {
|
||||
logError(res.e, kpiName, rs);
|
||||
free(results);
|
||||
return 0;
|
||||
}
|
||||
results[i] = res.res;
|
||||
if(results[i] < 0) {
|
||||
if (results[i] < 0) {
|
||||
free(results);
|
||||
return -1;
|
||||
}
|
||||
|
@ -125,139 +133,144 @@ int runTestDb(struct RunResult (*testFxn)(struct ResultSet*, FDBDatabase*), FDBD
|
|||
return result;
|
||||
}
|
||||
|
||||
|
||||
struct RunResult clearAll(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
struct RunResult clearAll(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
fdb_transaction_clear_range(tr, (uint8_t*)"", 0, (uint8_t*)"\xff", 1);
|
||||
return RES(0, 0);
|
||||
}
|
||||
|
||||
uint32_t start = 0;
|
||||
uint32_t stop = 0;
|
||||
struct RunResult insertRange(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
struct RunResult insertRange(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
int i;
|
||||
for(i = start; i < stop; i++) {
|
||||
for (i = start; i < stop; i++) {
|
||||
fdb_transaction_set(tr, keys[i], keySize, valueStr, valueSize);
|
||||
}
|
||||
return RES(0, 0);
|
||||
}
|
||||
|
||||
void insertData(struct ResultSet *rs, FDBDatabase *db) {
|
||||
void insertData(struct ResultSet* rs, FDBDatabase* db) {
|
||||
checkError(run(rs, db, &clearAll).e, "clearing database", rs);
|
||||
|
||||
// TODO: Do this asynchronously.
|
||||
start = 0;
|
||||
while(start < numKeys) {
|
||||
while (start < numKeys) {
|
||||
stop = start + 1000;
|
||||
if(stop > numKeys) stop = numKeys;
|
||||
if (stop > numKeys)
|
||||
stop = numKeys;
|
||||
checkError(run(rs, db, &insertRange).e, "inserting data range", rs);
|
||||
start = stop;
|
||||
}
|
||||
}
|
||||
|
||||
fdb_error_t setRetryLimit(struct ResultSet *rs, FDBTransaction *tr, uint64_t limit) {
|
||||
fdb_error_t setRetryLimit(struct ResultSet* rs, FDBTransaction* tr, uint64_t limit) {
|
||||
return fdb_transaction_set_option(tr, FDB_TR_OPTION_RETRY_LIMIT, (const uint8_t*)&limit, sizeof(uint64_t));
|
||||
}
|
||||
|
||||
uint32_t FUTURE_LATENCY_COUNT = 100000;
|
||||
const char *FUTURE_LATENCY_KPI = "C future throughput (local client)";
|
||||
struct RunResult futureLatency(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* FUTURE_LATENCY_KPI = "C future throughput (local client)";
|
||||
struct RunResult futureLatency(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
if (e)
|
||||
return RES(0, e);
|
||||
|
||||
FDBFuture *f = fdb_transaction_get_read_version(tr);
|
||||
FDBFuture* f = fdb_transaction_get_read_version(tr);
|
||||
e = waitError(f);
|
||||
fdb_future_destroy(f);
|
||||
maybeLogError(e, "getting initial read version", rs);
|
||||
if(e) return RES(0, e);
|
||||
if (e)
|
||||
return RES(0, e);
|
||||
|
||||
double start = getTime();
|
||||
int i;
|
||||
for(i = 0; i < FUTURE_LATENCY_COUNT; i++) {
|
||||
FDBFuture *f = fdb_transaction_get_read_version(tr);
|
||||
for (i = 0; i < FUTURE_LATENCY_COUNT; i++) {
|
||||
FDBFuture* f = fdb_transaction_get_read_version(tr);
|
||||
e = waitError(f);
|
||||
fdb_future_destroy(f);
|
||||
maybeLogError(e, "getting read version", rs);
|
||||
if(e) return RES(0, e);
|
||||
if (e)
|
||||
return RES(0, e);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
return RES(FUTURE_LATENCY_COUNT/(end - start), 0);
|
||||
return RES(FUTURE_LATENCY_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t CLEAR_COUNT = 100000;
|
||||
const char *CLEAR_KPI = "C clear throughput (local client)";
|
||||
struct RunResult clear(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* CLEAR_KPI = "C clear throughput (local client)";
|
||||
struct RunResult clear(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
double start = getTime();
|
||||
int i;
|
||||
for(i = 0; i < CLEAR_COUNT; i++) {
|
||||
for (i = 0; i < CLEAR_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
fdb_transaction_clear(tr, keys[k], keySize);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
fdb_transaction_reset(tr); // Don't actually clear things.
|
||||
return RES(CLEAR_COUNT/(end - start), 0);
|
||||
return RES(CLEAR_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t CLEAR_RANGE_COUNT = 100000;
|
||||
const char *CLEAR_RANGE_KPI = "C clear range throughput (local client)";
|
||||
struct RunResult clearRange(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* CLEAR_RANGE_KPI = "C clear range throughput (local client)";
|
||||
struct RunResult clearRange(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
double start = getTime();
|
||||
int i;
|
||||
for(i = 0; i < CLEAR_RANGE_COUNT; i++) {
|
||||
for (i = 0; i < CLEAR_RANGE_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % (numKeys - 1);
|
||||
fdb_transaction_clear_range(tr, keys[k], keySize, keys[k+1], keySize);
|
||||
fdb_transaction_clear_range(tr, keys[k], keySize, keys[k + 1], keySize);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
fdb_transaction_reset(tr); // Don't actually clear things.
|
||||
return RES(CLEAR_RANGE_COUNT/(end - start), 0);
|
||||
return RES(CLEAR_RANGE_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t SET_COUNT = 100000;
|
||||
const char *SET_KPI = "C set throughput (local client)";
|
||||
struct RunResult set(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* SET_KPI = "C set throughput (local client)";
|
||||
struct RunResult set(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
double start = getTime();
|
||||
int i;
|
||||
for(i = 0; i < SET_COUNT; i++) {
|
||||
for (i = 0; i < SET_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
fdb_transaction_set(tr, keys[k], keySize, valueStr, valueSize);
|
||||
}
|
||||
double end = getTime();
|
||||
|
||||
fdb_transaction_reset(tr); // Don't actually set things.
|
||||
return RES(SET_COUNT/(end - start), 0);
|
||||
return RES(SET_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t PARALLEL_GET_COUNT = 10000;
|
||||
const char *PARALLEL_GET_KPI = "C parallel get throughput (local client)";
|
||||
struct RunResult parallelGet(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* PARALLEL_GET_KPI = "C parallel get throughput (local client)";
|
||||
struct RunResult parallelGet(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
if (e)
|
||||
return RES(0, e);
|
||||
|
||||
FDBFuture **futures = (FDBFuture**)malloc((sizeof(FDBFuture*)) * PARALLEL_GET_COUNT);
|
||||
FDBFuture** futures = (FDBFuture**)malloc((sizeof(FDBFuture*)) * PARALLEL_GET_COUNT);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
int i;
|
||||
for(i = 0; i < PARALLEL_GET_COUNT; i++) {
|
||||
for (i = 0; i < PARALLEL_GET_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
futures[i] = fdb_transaction_get(tr, keys[k], keySize, 0);
|
||||
}
|
||||
|
||||
fdb_bool_t present;
|
||||
uint8_t const *outValue;
|
||||
uint8_t const* outValue;
|
||||
int outValueLength;
|
||||
|
||||
for(i = 0; i < PARALLEL_GET_COUNT; i++) {
|
||||
for (i = 0; i < PARALLEL_GET_COUNT; i++) {
|
||||
e = maybeLogError(fdb_future_block_until_ready(futures[i]), "waiting for get future", rs);
|
||||
if(e) {
|
||||
if (e) {
|
||||
fdb_future_destroy(futures[i]);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_value(futures[i], &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
if(e) {
|
||||
e = maybeLogError(
|
||||
fdb_future_get_value(futures[i], &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
if (e) {
|
||||
fdb_future_destroy(futures[i]);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
@ -268,39 +281,41 @@ struct RunResult parallelGet(struct ResultSet *rs, FDBTransaction *tr) {
|
|||
double end = getTime();
|
||||
|
||||
free(futures);
|
||||
return RES(PARALLEL_GET_COUNT/(end - start), 0);
|
||||
return RES(PARALLEL_GET_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t ALTERNATING_GET_SET_COUNT = 2000;
|
||||
const char *ALTERNATING_GET_SET_KPI = "C alternating get set throughput (local client)";
|
||||
struct RunResult alternatingGetSet(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* ALTERNATING_GET_SET_KPI = "C alternating get set throughput (local client)";
|
||||
struct RunResult alternatingGetSet(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
if (e)
|
||||
return RES(0, e);
|
||||
|
||||
FDBFuture **futures = (FDBFuture**)malloc((sizeof(FDBFuture*)) * ALTERNATING_GET_SET_COUNT);
|
||||
FDBFuture** futures = (FDBFuture**)malloc((sizeof(FDBFuture*)) * ALTERNATING_GET_SET_COUNT);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
int i;
|
||||
for(i = 0; i < ALTERNATING_GET_SET_COUNT; i++) {
|
||||
for (i = 0; i < ALTERNATING_GET_SET_COUNT; i++) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
fdb_transaction_set(tr, keys[k], keySize, valueStr, valueSize);
|
||||
futures[i] = fdb_transaction_get(tr, keys[k], keySize, 0);
|
||||
}
|
||||
|
||||
fdb_bool_t present;
|
||||
uint8_t const *outValue;
|
||||
uint8_t const* outValue;
|
||||
int outValueLength;
|
||||
|
||||
for(i = 0; i < ALTERNATING_GET_SET_COUNT; i++) {
|
||||
for (i = 0; i < ALTERNATING_GET_SET_COUNT; i++) {
|
||||
e = maybeLogError(fdb_future_block_until_ready(futures[i]), "waiting for get future", rs);
|
||||
if(e) {
|
||||
if (e) {
|
||||
fdb_future_destroy(futures[i]);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_value(futures[i], &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
if(e) {
|
||||
e = maybeLogError(
|
||||
fdb_future_get_value(futures[i], &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
if (e) {
|
||||
fdb_future_destroy(futures[i]);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
@ -311,38 +326,39 @@ struct RunResult alternatingGetSet(struct ResultSet *rs, FDBTransaction *tr) {
|
|||
double end = getTime();
|
||||
|
||||
free(futures);
|
||||
return RES(ALTERNATING_GET_SET_COUNT/(end - start), 0);
|
||||
return RES(ALTERNATING_GET_SET_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t SERIAL_GET_COUNT = 2000;
|
||||
const char *SERIAL_GET_KPI = "C serial get throughput (local client)";
|
||||
struct RunResult serialGet(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* SERIAL_GET_KPI = "C serial get throughput (local client)";
|
||||
struct RunResult serialGet(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
if (e)
|
||||
return RES(0, e);
|
||||
|
||||
int i;
|
||||
uint32_t *keyIndices = (uint32_t*)malloc((sizeof(uint32_t)) * SERIAL_GET_COUNT);
|
||||
uint32_t* keyIndices = (uint32_t*)malloc((sizeof(uint32_t)) * SERIAL_GET_COUNT);
|
||||
|
||||
if(SERIAL_GET_COUNT > numKeys/2) {
|
||||
for(i = 0; i < SERIAL_GET_COUNT; i++) {
|
||||
if (SERIAL_GET_COUNT > numKeys / 2) {
|
||||
for (i = 0; i < SERIAL_GET_COUNT; i++) {
|
||||
keyIndices[i] = ((uint64_t)rand()) % numKeys;
|
||||
}
|
||||
} else {
|
||||
for(i = 0; i < SERIAL_GET_COUNT; i++) {
|
||||
while(1) {
|
||||
for (i = 0; i < SERIAL_GET_COUNT; i++) {
|
||||
while (1) {
|
||||
// Yes, this is a linear scan. This happens outside
|
||||
// the part we are measuring.
|
||||
uint32_t index = ((uint64_t)rand()) % numKeys;
|
||||
int j;
|
||||
fdb_bool_t found = 0;
|
||||
for(j = 0; j < i; j++) {
|
||||
if(keyIndices[j] == index) {
|
||||
for (j = 0; j < i; j++) {
|
||||
if (keyIndices[j] == index) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if(!found) {
|
||||
if (!found) {
|
||||
keyIndices[i] = index;
|
||||
break;
|
||||
}
|
||||
|
@ -353,13 +369,13 @@ struct RunResult serialGet(struct ResultSet *rs, FDBTransaction *tr) {
|
|||
double start = getTime();
|
||||
|
||||
fdb_bool_t present;
|
||||
uint8_t const *outValue;
|
||||
uint8_t const* outValue;
|
||||
int outValueLength;
|
||||
|
||||
for(i = 0; i < SERIAL_GET_COUNT; i++) {
|
||||
FDBFuture *f = fdb_transaction_get(tr, keys[keyIndices[i]], keySize, 0);
|
||||
for (i = 0; i < SERIAL_GET_COUNT; i++) {
|
||||
FDBFuture* f = fdb_transaction_get(tr, keys[keyIndices[i]], keySize, 0);
|
||||
fdb_error_t e = maybeLogError(fdb_future_block_until_ready(f), "getting key in serial", rs);
|
||||
if(e) {
|
||||
if (e) {
|
||||
free(keyIndices);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
|
@ -367,7 +383,7 @@ struct RunResult serialGet(struct ResultSet *rs, FDBTransaction *tr) {
|
|||
|
||||
e = maybeLogError(fdb_future_get_value(f, &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
fdb_future_destroy(f);
|
||||
if(e) {
|
||||
if (e) {
|
||||
free(keyIndices);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
@ -376,66 +392,87 @@ struct RunResult serialGet(struct ResultSet *rs, FDBTransaction *tr) {
|
|||
double end = getTime();
|
||||
|
||||
free(keyIndices);
|
||||
return RES(SERIAL_GET_COUNT/(end - start), 0);
|
||||
return RES(SERIAL_GET_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t GET_RANGE_COUNT = 100000;
|
||||
const char *GET_RANGE_KPI = "C get range throughput (local client)";
|
||||
struct RunResult getRange(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* GET_RANGE_KPI = "C get range throughput (local client)";
|
||||
struct RunResult getRange(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
if (e)
|
||||
return RES(0, e);
|
||||
|
||||
uint32_t startKey = ((uint64_t)rand()) % (numKeys - GET_RANGE_COUNT - 1);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
const FDBKeyValue *outKv;
|
||||
const FDBKeyValue* outKv;
|
||||
int outCount;
|
||||
fdb_bool_t outMore = 1;
|
||||
int totalOut = 0;
|
||||
int iteration = 0;
|
||||
|
||||
FDBFuture *f = fdb_transaction_get_range(tr,
|
||||
keys[startKey], keySize, 1, 0,
|
||||
keys[startKey + GET_RANGE_COUNT], keySize, 1, 0,
|
||||
0, 0,
|
||||
FDB_STREAMING_MODE_WANT_ALL, ++iteration, 0, 0);
|
||||
FDBFuture* f = fdb_transaction_get_range(tr,
|
||||
keys[startKey],
|
||||
keySize,
|
||||
1,
|
||||
0,
|
||||
keys[startKey + GET_RANGE_COUNT],
|
||||
keySize,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
FDB_STREAMING_MODE_WANT_ALL,
|
||||
++iteration,
|
||||
0,
|
||||
0);
|
||||
|
||||
while(outMore) {
|
||||
while (outMore) {
|
||||
e = maybeLogError(fdb_future_block_until_ready(f), "getting range", rs);
|
||||
if(e) {
|
||||
if (e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_keyvalue_array(f, &outKv, &outCount, &outMore), "reading range array", rs);
|
||||
if(e) {
|
||||
if (e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
totalOut += outCount;
|
||||
|
||||
if(outMore) {
|
||||
FDBFuture *f2 = fdb_transaction_get_range(tr,
|
||||
outKv[outCount - 1].key, outKv[outCount - 1].key_length, 1, 1,
|
||||
keys[startKey + GET_RANGE_COUNT], keySize, 1, 0,
|
||||
0, 0,
|
||||
FDB_STREAMING_MODE_WANT_ALL, ++iteration, 0, 0);
|
||||
if (outMore) {
|
||||
FDBFuture* f2 = fdb_transaction_get_range(tr,
|
||||
outKv[outCount - 1].key,
|
||||
outKv[outCount - 1].key_length,
|
||||
1,
|
||||
1,
|
||||
keys[startKey + GET_RANGE_COUNT],
|
||||
keySize,
|
||||
1,
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
FDB_STREAMING_MODE_WANT_ALL,
|
||||
++iteration,
|
||||
0,
|
||||
0);
|
||||
fdb_future_destroy(f);
|
||||
f = f2;
|
||||
}
|
||||
}
|
||||
|
||||
if(totalOut != GET_RANGE_COUNT) {
|
||||
char *msg = (char*)malloc((sizeof(char)) * 200);
|
||||
if (totalOut != GET_RANGE_COUNT) {
|
||||
char* msg = (char*)malloc((sizeof(char)) * 200);
|
||||
sprintf(msg, "verifying out count (%d != %d)", totalOut, GET_RANGE_COUNT);
|
||||
logError(4100, msg, rs);
|
||||
free(msg);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, 4100);
|
||||
}
|
||||
if(outMore) {
|
||||
if (outMore) {
|
||||
logError(4100, "verifying no more in range", rs);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, 4100);
|
||||
|
@ -444,84 +481,84 @@ struct RunResult getRange(struct ResultSet *rs, FDBTransaction *tr) {
|
|||
|
||||
double end = getTime();
|
||||
|
||||
return RES(GET_RANGE_COUNT/(end - start), 0);
|
||||
return RES(GET_RANGE_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t GET_KEY_COUNT = 2000;
|
||||
const char *GET_KEY_KPI = "C get key throughput (local client)";
|
||||
struct RunResult getKey(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* GET_KEY_KPI = "C get key throughput (local client)";
|
||||
struct RunResult getKey(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
if (e)
|
||||
return RES(0, e);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
fdb_bool_t present;
|
||||
uint8_t const *outValue;
|
||||
uint8_t const* outValue;
|
||||
int outValueLength;
|
||||
|
||||
int i;
|
||||
for(i = 0; i < GET_KEY_COUNT; i++) {
|
||||
for (i = 0; i < GET_KEY_COUNT; i++) {
|
||||
int key = ((uint64_t)rand()) % numKeys;
|
||||
int offset = (((uint64_t)rand()) % 21) - 10;
|
||||
FDBFuture *f = fdb_transaction_get_key(tr, keys[key], keySize, 1, offset, 0);
|
||||
FDBFuture* f = fdb_transaction_get_key(tr, keys[key], keySize, 1, offset, 0);
|
||||
|
||||
e = maybeLogError(fdb_future_block_until_ready(f), "waiting for get key", rs);
|
||||
if(e) {
|
||||
if (e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_value(f, &present, &outValue, &outValueLength), "getting future value", rs);
|
||||
fdb_future_destroy(f);
|
||||
if(e) {
|
||||
if (e) {
|
||||
return RES(0, e);
|
||||
}
|
||||
}
|
||||
|
||||
double end = getTime();
|
||||
|
||||
return RES(GET_KEY_COUNT/(end - start), 0);
|
||||
return RES(GET_KEY_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
uint32_t GET_SINGLE_KEY_RANGE_COUNT = 2000;
|
||||
const char *GET_SINGLE_KEY_RANGE_KPI = "C get_single_key_range throughput (local client)";
|
||||
struct RunResult getSingleKeyRange(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
const char* GET_SINGLE_KEY_RANGE_KPI = "C get_single_key_range throughput (local client)";
|
||||
struct RunResult getSingleKeyRange(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
fdb_error_t e = maybeLogError(setRetryLimit(rs, tr, 5), "setting retry limit", rs);
|
||||
if(e) return RES(0, e);
|
||||
if (e)
|
||||
return RES(0, e);
|
||||
|
||||
double start = getTime();
|
||||
|
||||
const FDBKeyValue *outKv;
|
||||
const FDBKeyValue* outKv;
|
||||
int outCount;
|
||||
fdb_bool_t outMore;
|
||||
|
||||
int i;
|
||||
for(i = 0; i < GET_SINGLE_KEY_RANGE_COUNT; i++) {
|
||||
for (i = 0; i < GET_SINGLE_KEY_RANGE_COUNT; i++) {
|
||||
int key = ((uint64_t)rand()) % (numKeys - 1);
|
||||
FDBFuture *f = fdb_transaction_get_range(tr,
|
||||
keys[key], keySize, 1, 0,
|
||||
keys[key + 1], keySize, 1, 0,
|
||||
2, 0,
|
||||
FDB_STREAMING_MODE_EXACT, 1, 0, 0);
|
||||
FDBFuture* f = fdb_transaction_get_range(
|
||||
tr, keys[key], keySize, 1, 0, keys[key + 1], keySize, 1, 0, 2, 0, FDB_STREAMING_MODE_EXACT, 1, 0, 0);
|
||||
|
||||
e = maybeLogError(fdb_future_block_until_ready(f), "waiting for single key range", rs);
|
||||
if(e) {
|
||||
if (e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
e = maybeLogError(fdb_future_get_keyvalue_array(f, &outKv, &outCount, &outMore), "reading single key range array", rs);
|
||||
if(e) {
|
||||
e = maybeLogError(
|
||||
fdb_future_get_keyvalue_array(f, &outKv, &outCount, &outMore), "reading single key range array", rs);
|
||||
if (e) {
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, e);
|
||||
}
|
||||
|
||||
if(outCount != 1) {
|
||||
if (outCount != 1) {
|
||||
logError(4100, "more than one key returned in single key range read", rs);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, 4100);
|
||||
}
|
||||
if(outMore) {
|
||||
if (outMore) {
|
||||
logError(4100, "more keys to read in single key range read", rs);
|
||||
fdb_future_destroy(f);
|
||||
return RES(0, 4100);
|
||||
|
@ -532,33 +569,34 @@ struct RunResult getSingleKeyRange(struct ResultSet *rs, FDBTransaction *tr) {
|
|||
|
||||
double end = getTime();
|
||||
|
||||
return RES(GET_SINGLE_KEY_RANGE_COUNT/(end - start), 0);
|
||||
return RES(GET_SINGLE_KEY_RANGE_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
struct RunResult singleKey(struct ResultSet *rs, FDBTransaction *tr) {
|
||||
struct RunResult singleKey(struct ResultSet* rs, FDBTransaction* tr) {
|
||||
int k = ((uint64_t)rand()) % numKeys;
|
||||
fdb_transaction_set(tr, keys[k], keySize, valueStr, valueSize);
|
||||
return RES(0, 0);
|
||||
}
|
||||
|
||||
uint32_t WRITE_TRANSACTION_COUNT = 1000;
|
||||
const char *WRITE_TRANSACTION_KPI = "C write_transaction throughput (local client)";
|
||||
struct RunResult writeTransaction(struct ResultSet *rs, FDBDatabase *db) {
|
||||
const char* WRITE_TRANSACTION_KPI = "C write_transaction throughput (local client)";
|
||||
struct RunResult writeTransaction(struct ResultSet* rs, FDBDatabase* db) {
|
||||
double start = getTime();
|
||||
|
||||
int i;
|
||||
for(i = 0; i < WRITE_TRANSACTION_COUNT; i++) {
|
||||
for (i = 0; i < WRITE_TRANSACTION_COUNT; i++) {
|
||||
struct RunResult res = run(rs, db, &singleKey);
|
||||
if(res.e) return res;
|
||||
if (res.e)
|
||||
return res;
|
||||
}
|
||||
|
||||
double end = getTime();
|
||||
|
||||
return RES(WRITE_TRANSACTION_COUNT/(end - start), 0);
|
||||
return RES(WRITE_TRANSACTION_COUNT / (end - start), 0);
|
||||
}
|
||||
|
||||
void runTests(struct ResultSet *rs) {
|
||||
FDBDatabase *db = openDatabase(rs, &netThread);
|
||||
void runTests(struct ResultSet* rs) {
|
||||
FDBDatabase* db = openDatabase(rs, &netThread);
|
||||
|
||||
printf("Loading database...\n");
|
||||
insertData(rs, db);
|
||||
|
@ -600,15 +638,15 @@ void runTests(struct ResultSet *rs) {
|
|||
fdb_stop_network();
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
int main(int argc, char** argv) {
|
||||
srand(time(NULL));
|
||||
struct ResultSet *rs = newResultSet();
|
||||
checkError(fdb_select_api_version(700), "select API version", rs);
|
||||
struct ResultSet* rs = newResultSet();
|
||||
checkError(fdb_select_api_version(710), "select API version", rs);
|
||||
printf("Running performance test at client version: %s\n", fdb_get_client_version());
|
||||
|
||||
valueStr = (uint8_t*)malloc((sizeof(uint8_t))*valueSize);
|
||||
valueStr = (uint8_t*)malloc((sizeof(uint8_t)) * valueSize);
|
||||
int i;
|
||||
for(i = 0; i < valueSize; i++) {
|
||||
for (i = 0; i < valueSize; i++) {
|
||||
valueStr[i] = (uint8_t)'x';
|
||||
}
|
||||
|
||||
|
|
|
@ -34,23 +34,26 @@ int numKeys = 10000;
|
|||
int keySize = 16;
|
||||
uint8_t** keys;
|
||||
|
||||
void insertData(FDBTransaction *tr) {
|
||||
void insertData(FDBTransaction* tr) {
|
||||
fdb_transaction_clear_range(tr, (uint8_t*)"", 0, (uint8_t*)"\xff", 1);
|
||||
|
||||
uint8_t *v = (uint8_t*)"foo";
|
||||
uint8_t* v = (uint8_t*)"foo";
|
||||
uint32_t i;
|
||||
for(i = 0; i <= numKeys; ++i) {
|
||||
for (i = 0; i <= numKeys; ++i) {
|
||||
fdb_transaction_set(tr, keys[i], keySize, v, 3);
|
||||
}
|
||||
}
|
||||
|
||||
int runTest(int (*testFxn)(FDBTransaction*, struct ResultSet*), FDBTransaction *tr, struct ResultSet *rs, const char *kpiName) {
|
||||
int runTest(int (*testFxn)(FDBTransaction*, struct ResultSet*),
|
||||
FDBTransaction* tr,
|
||||
struct ResultSet* rs,
|
||||
const char* kpiName) {
|
||||
int numRuns = 25;
|
||||
int *results = malloc(sizeof(int)*numRuns);
|
||||
int* results = malloc(sizeof(int) * numRuns);
|
||||
int i = 0;
|
||||
for(; i < numRuns; ++i) {
|
||||
for (; i < numRuns; ++i) {
|
||||
results[i] = testFxn(tr, rs);
|
||||
if(results[i] < 0) {
|
||||
if (results[i] < 0) {
|
||||
free(results);
|
||||
return -1;
|
||||
}
|
||||
|
@ -64,17 +67,19 @@ int runTest(int (*testFxn)(FDBTransaction*, struct ResultSet*), FDBTransaction *
|
|||
return result;
|
||||
}
|
||||
|
||||
int getSingle(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int getSingle(FDBTransaction* tr, struct ResultSet* rs) {
|
||||
int present;
|
||||
uint8_t const *value;
|
||||
uint8_t const* value;
|
||||
int length;
|
||||
int i;
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < numKeys; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get(tr, keys[5001], keySize, 0);
|
||||
if(getError(fdb_future_block_until_ready(f), "GetSingle (block for get)", rs)) return -1;
|
||||
if(getError(fdb_future_get_value(f, &present, &value, &length), "GetSingle (get result)", rs)) return -1;
|
||||
for (i = 0; i < numKeys; ++i) {
|
||||
FDBFuture* f = fdb_transaction_get(tr, keys[5001], keySize, 0);
|
||||
if (getError(fdb_future_block_until_ready(f), "GetSingle (block for get)", rs))
|
||||
return -1;
|
||||
if (getError(fdb_future_get_value(f, &present, &value, &length), "GetSingle (get result)", rs))
|
||||
return -1;
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
double end = getTime();
|
||||
|
@ -82,17 +87,19 @@ int getSingle(FDBTransaction *tr, struct ResultSet *rs) {
|
|||
return numKeys / (end - start);
|
||||
}
|
||||
|
||||
int getManySequential(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int getManySequential(FDBTransaction* tr, struct ResultSet* rs) {
|
||||
int present;
|
||||
uint8_t const *value;
|
||||
uint8_t const* value;
|
||||
int length;
|
||||
int i;
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < numKeys; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get(tr, keys[i], keySize, 0);
|
||||
if(getError(fdb_future_block_until_ready(f), "GetManySequential (block for get)", rs)) return -1;
|
||||
if(getError(fdb_future_get_value(f, &present, &value, &length), "GetManySequential (get result)", rs)) return -1;
|
||||
for (i = 0; i < numKeys; ++i) {
|
||||
FDBFuture* f = fdb_transaction_get(tr, keys[i], keySize, 0);
|
||||
if (getError(fdb_future_block_until_ready(f), "GetManySequential (block for get)", rs))
|
||||
return -1;
|
||||
if (getError(fdb_future_get_value(f, &present, &value, &length), "GetManySequential (get result)", rs))
|
||||
return -1;
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
double end = getTime();
|
||||
|
@ -100,20 +107,30 @@ int getManySequential(FDBTransaction *tr, struct ResultSet *rs) {
|
|||
return numKeys / (end - start);
|
||||
}
|
||||
|
||||
int getRangeBasic(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int getRangeBasic(FDBTransaction* tr, struct ResultSet* rs) {
|
||||
int count;
|
||||
const FDBKeyValue *kvs;
|
||||
const FDBKeyValue* kvs;
|
||||
int more;
|
||||
int i;
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < 100; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get_range(tr, FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize), FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize), numKeys, 0, 0, 1, 0, 0);
|
||||
for (i = 0; i < 100; ++i) {
|
||||
FDBFuture* f = fdb_transaction_get_range(tr,
|
||||
FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize),
|
||||
FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize),
|
||||
numKeys,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0);
|
||||
|
||||
if(getError(fdb_future_block_until_ready(f), "GetRangeBasic (block for get range)", rs)) return -1;
|
||||
if(getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "GetRangeBasic (get range results)", rs)) return -1;
|
||||
if (getError(fdb_future_block_until_ready(f), "GetRangeBasic (block for get range)", rs))
|
||||
return -1;
|
||||
if (getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "GetRangeBasic (get range results)", rs))
|
||||
return -1;
|
||||
|
||||
if(count != numKeys) {
|
||||
if (count != numKeys) {
|
||||
fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys);
|
||||
addError(rs, "GetRangeBasic bad count");
|
||||
return -1;
|
||||
|
@ -124,26 +141,37 @@ int getRangeBasic(FDBTransaction *tr, struct ResultSet *rs) {
|
|||
return 100 * numKeys / (end - start);
|
||||
}
|
||||
|
||||
int singleClearGetRange(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int singleClearGetRange(FDBTransaction* tr, struct ResultSet* rs) {
|
||||
int count;
|
||||
const FDBKeyValue *kvs;
|
||||
const FDBKeyValue* kvs;
|
||||
int more;
|
||||
int i;
|
||||
|
||||
for(i = 0; i < numKeys; i+=2) {
|
||||
for (i = 0; i < numKeys; i += 2) {
|
||||
fdb_transaction_clear(tr, keys[i], keySize);
|
||||
}
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < 100; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get_range(tr, FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize), FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize), numKeys, 0, 0, 1, 0, 0);
|
||||
for (i = 0; i < 100; ++i) {
|
||||
FDBFuture* f = fdb_transaction_get_range(tr,
|
||||
FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize),
|
||||
FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize),
|
||||
numKeys,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0);
|
||||
|
||||
if(getError(fdb_future_block_until_ready(f), "SingleClearGetRange (block for get range)", rs)) return -1;
|
||||
if(getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "SingleClearGetRange (get range results)", rs)) return -1;
|
||||
if (getError(fdb_future_block_until_ready(f), "SingleClearGetRange (block for get range)", rs))
|
||||
return -1;
|
||||
if (getError(
|
||||
fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "SingleClearGetRange (get range results)", rs))
|
||||
return -1;
|
||||
|
||||
fdb_future_destroy(f);
|
||||
|
||||
if(count != numKeys/2) {
|
||||
if (count != numKeys / 2) {
|
||||
fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys);
|
||||
addError(rs, "SingleClearGetRange bad count");
|
||||
return -1;
|
||||
|
@ -155,27 +183,38 @@ int singleClearGetRange(FDBTransaction *tr, struct ResultSet *rs) {
|
|||
return 100 * numKeys / 2 / (end - start);
|
||||
}
|
||||
|
||||
int clearRangeGetRange(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int clearRangeGetRange(FDBTransaction* tr, struct ResultSet* rs) {
|
||||
int count;
|
||||
const FDBKeyValue *kvs;
|
||||
const FDBKeyValue* kvs;
|
||||
int more;
|
||||
int i;
|
||||
|
||||
for(i = 0; i < numKeys; i+=4) {
|
||||
fdb_transaction_clear_range(tr, keys[i], keySize, keys[i+1], keySize);
|
||||
for (i = 0; i < numKeys; i += 4) {
|
||||
fdb_transaction_clear_range(tr, keys[i], keySize, keys[i + 1], keySize);
|
||||
}
|
||||
|
||||
double start = getTime();
|
||||
for(i = 0; i < 100; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get_range(tr, FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize), FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize), numKeys, 0, 0, 1, 0, 0);
|
||||
for (i = 0; i < 100; ++i) {
|
||||
FDBFuture* f = fdb_transaction_get_range(tr,
|
||||
FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[0], keySize),
|
||||
FDB_KEYSEL_LAST_LESS_OR_EQUAL(keys[numKeys], keySize),
|
||||
numKeys,
|
||||
0,
|
||||
0,
|
||||
1,
|
||||
0,
|
||||
0);
|
||||
|
||||
if(getError(fdb_future_block_until_ready(f), "ClearRangeGetRange (block for get range)", rs)) return -1;
|
||||
if(getError(fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "ClearRangeGetRange (get range results)", rs)) return -1;
|
||||
if (getError(fdb_future_block_until_ready(f), "ClearRangeGetRange (block for get range)", rs))
|
||||
return -1;
|
||||
if (getError(
|
||||
fdb_future_get_keyvalue_array(f, &kvs, &count, &more), "ClearRangeGetRange (get range results)", rs))
|
||||
return -1;
|
||||
|
||||
fdb_future_destroy(f);
|
||||
|
||||
if(count != numKeys*3/4) {
|
||||
fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys*3/4);
|
||||
if (count != numKeys * 3 / 4) {
|
||||
fprintf(stderr, "Bad count %d (expected %d)\n", count, numKeys * 3 / 4);
|
||||
addError(rs, "ClearRangeGetRange bad count");
|
||||
return -1;
|
||||
}
|
||||
|
@ -186,13 +225,13 @@ int clearRangeGetRange(FDBTransaction *tr, struct ResultSet *rs) {
|
|||
return 100 * numKeys * 3 / 4 / (end - start);
|
||||
}
|
||||
|
||||
int interleavedSetsGets(FDBTransaction *tr, struct ResultSet *rs) {
|
||||
int interleavedSetsGets(FDBTransaction* tr, struct ResultSet* rs) {
|
||||
int present;
|
||||
uint8_t const *value;
|
||||
uint8_t const* value;
|
||||
int length;
|
||||
int i;
|
||||
|
||||
uint8_t *k = (uint8_t*)"foo";
|
||||
uint8_t* k = (uint8_t*)"foo";
|
||||
uint8_t v[10];
|
||||
int num = 1;
|
||||
|
||||
|
@ -200,10 +239,12 @@ int interleavedSetsGets(FDBTransaction *tr, struct ResultSet *rs) {
|
|||
sprintf((char*)v, "%d", num);
|
||||
fdb_transaction_set(tr, k, 3, v, strlen((char*)v));
|
||||
|
||||
for(i = 0; i < 10000; ++i) {
|
||||
FDBFuture *f = fdb_transaction_get(tr, k, 3, 0);
|
||||
if(getError(fdb_future_block_until_ready(f), "InterleavedSetsGets (block for get)", rs)) return -1;
|
||||
if(getError(fdb_future_get_value(f, &present, &value, &length), "InterleavedSetsGets (get result)", rs)) return -1;
|
||||
for (i = 0; i < 10000; ++i) {
|
||||
FDBFuture* f = fdb_transaction_get(tr, k, 3, 0);
|
||||
if (getError(fdb_future_block_until_ready(f), "InterleavedSetsGets (block for get)", rs))
|
||||
return -1;
|
||||
if (getError(fdb_future_get_value(f, &present, &value, &length), "InterleavedSetsGets (get result)", rs))
|
||||
return -1;
|
||||
fdb_future_destroy(f);
|
||||
|
||||
sprintf((char*)v, "%d", ++num);
|
||||
|
@ -214,13 +255,13 @@ int interleavedSetsGets(FDBTransaction *tr, struct ResultSet *rs) {
|
|||
return 10000 / (end - start);
|
||||
}
|
||||
|
||||
void runTests(struct ResultSet *rs) {
|
||||
FDBDatabase *db = openDatabase(rs, &netThread);
|
||||
void runTests(struct ResultSet* rs) {
|
||||
FDBDatabase* db = openDatabase(rs, &netThread);
|
||||
|
||||
FDBTransaction *tr;
|
||||
FDBTransaction* tr;
|
||||
checkError(fdb_database_create_transaction(db, &tr), "create transaction", rs);
|
||||
|
||||
FDBFuture *f = fdb_transaction_get_read_version(tr);
|
||||
FDBFuture* f = fdb_transaction_get_read_version(tr);
|
||||
checkError(fdb_future_block_until_ready(f), "block for read version", rs);
|
||||
|
||||
int64_t version;
|
||||
|
@ -241,10 +282,10 @@ void runTests(struct ResultSet *rs) {
|
|||
fdb_stop_network();
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
int main(int argc, char** argv) {
|
||||
srand(time(NULL));
|
||||
struct ResultSet *rs = newResultSet();
|
||||
checkError(fdb_select_api_version(700), "select API version", rs);
|
||||
struct ResultSet* rs = newResultSet();
|
||||
checkError(fdb_select_api_version(710), "select API version", rs);
|
||||
printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version());
|
||||
|
||||
keys = generateKeys(numKeys, keySize);
|
||||
|
@ -255,4 +296,3 @@ int main(int argc, char **argv) {
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include <inttypes.h>
|
||||
|
||||
#ifndef FDB_API_VERSION
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#endif
|
||||
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
@ -38,27 +38,27 @@
|
|||
double getTime() {
|
||||
static struct timeval tv;
|
||||
gettimeofday(&tv, NULL);
|
||||
return tv.tv_usec/1000000.0 + tv.tv_sec;
|
||||
return tv.tv_usec / 1000000.0 + tv.tv_sec;
|
||||
}
|
||||
|
||||
void writeKey(uint8_t **dest, int key, int keySize) {
|
||||
*dest = (uint8_t*)malloc((sizeof(uint8_t))*keySize);
|
||||
void writeKey(uint8_t** dest, int key, int keySize) {
|
||||
*dest = (uint8_t*)malloc((sizeof(uint8_t)) * keySize);
|
||||
sprintf((char*)*dest, "%0*d", keySize, key);
|
||||
}
|
||||
|
||||
uint8_t **generateKeys(int numKeys, int keySize) {
|
||||
uint8_t **keys = (uint8_t**)malloc(sizeof(uint8_t*)*(numKeys+1));
|
||||
uint8_t** generateKeys(int numKeys, int keySize) {
|
||||
uint8_t** keys = (uint8_t**)malloc(sizeof(uint8_t*) * (numKeys + 1));
|
||||
|
||||
uint32_t i;
|
||||
for(i = 0; i <= numKeys; ++i) {
|
||||
for (i = 0; i <= numKeys; ++i) {
|
||||
writeKey(keys + i, i, keySize);
|
||||
}
|
||||
|
||||
return keys;
|
||||
}
|
||||
void freeKeys(uint8_t **keys, int numKeys) {
|
||||
void freeKeys(uint8_t** keys, int numKeys) {
|
||||
uint32_t i;
|
||||
for(i = 0; i < numKeys; i++) {
|
||||
for (i = 0; i < numKeys; i++) {
|
||||
free(keys[i]);
|
||||
}
|
||||
free(keys);
|
||||
|
@ -68,38 +68,39 @@ int cmpfunc(const void* a, const void* b) {
|
|||
return (*(int*)a - *(int*)b);
|
||||
}
|
||||
|
||||
int median(int *values, int length) {
|
||||
int median(int* values, int length) {
|
||||
qsort(values, length, sizeof(int), cmpfunc);
|
||||
return values[length/2];
|
||||
return values[length / 2];
|
||||
}
|
||||
|
||||
struct RunResult {
|
||||
int res;
|
||||
fdb_error_t e;
|
||||
};
|
||||
#define RES(x, y) (struct RunResult) { x, y }
|
||||
#define RES(x, y) \
|
||||
(struct RunResult) { x, y }
|
||||
|
||||
struct Kpi {
|
||||
const char *name;
|
||||
const char* name;
|
||||
int value;
|
||||
const char *units;
|
||||
const char* units;
|
||||
|
||||
struct Kpi *next;
|
||||
struct Kpi* next;
|
||||
};
|
||||
|
||||
struct Error {
|
||||
char *message;
|
||||
char* message;
|
||||
|
||||
struct Error *next;
|
||||
struct Error* next;
|
||||
};
|
||||
|
||||
struct ResultSet {
|
||||
struct Kpi *kpis;
|
||||
struct Error *errors;
|
||||
struct Kpi* kpis;
|
||||
struct Error* errors;
|
||||
};
|
||||
|
||||
struct ResultSet* newResultSet() {
|
||||
struct ResultSet *rs = malloc(sizeof(struct ResultSet));
|
||||
struct ResultSet* rs = malloc(sizeof(struct ResultSet));
|
||||
|
||||
rs->kpis = NULL;
|
||||
rs->errors = NULL;
|
||||
|
@ -107,8 +108,8 @@ struct ResultSet* newResultSet() {
|
|||
return rs;
|
||||
}
|
||||
|
||||
void addKpi(struct ResultSet *rs, const char *name, int value, const char *units) {
|
||||
struct Kpi *k = malloc(sizeof(struct Kpi));
|
||||
void addKpi(struct ResultSet* rs, const char* name, int value, const char* units) {
|
||||
struct Kpi* k = malloc(sizeof(struct Kpi));
|
||||
k->name = name;
|
||||
k->value = value;
|
||||
k->units = units;
|
||||
|
@ -116,20 +117,20 @@ void addKpi(struct ResultSet *rs, const char *name, int value, const char *units
|
|||
rs->kpis = k;
|
||||
}
|
||||
|
||||
void addError(struct ResultSet *rs, const char *message) {
|
||||
struct Error *e = malloc(sizeof(struct Error));
|
||||
e->message = (char*)malloc(strlen(message)+1);
|
||||
void addError(struct ResultSet* rs, const char* message) {
|
||||
struct Error* e = malloc(sizeof(struct Error));
|
||||
e->message = (char*)malloc(strlen(message) + 1);
|
||||
strcpy(e->message, message);
|
||||
e->next = rs->errors;
|
||||
rs->errors = e;
|
||||
}
|
||||
|
||||
void writeResultSet(struct ResultSet *rs) {
|
||||
void writeResultSet(struct ResultSet* rs) {
|
||||
uint64_t id = ((uint64_t)rand() << 32) + rand();
|
||||
char name[100];
|
||||
sprintf(name, "fdb-c_result-%" SCNu64 ".json", id);
|
||||
FILE *fp = fopen(name, "w");
|
||||
if(!fp) {
|
||||
FILE* fp = fopen(name, "w");
|
||||
if (!fp) {
|
||||
fprintf(stderr, "Could not open results file %s\n", name);
|
||||
exit(1);
|
||||
}
|
||||
|
@ -137,10 +138,10 @@ void writeResultSet(struct ResultSet *rs) {
|
|||
fprintf(fp, "{\n");
|
||||
fprintf(fp, "\t\"kpis\": {\n");
|
||||
|
||||
struct Kpi *k = rs->kpis;
|
||||
while(k != NULL) {
|
||||
struct Kpi* k = rs->kpis;
|
||||
while (k != NULL) {
|
||||
fprintf(fp, "\t\t\"%s\": { \"units\": \"%s\", \"value\": %d }", k->name, k->units, k->value);
|
||||
if(k->next != NULL) {
|
||||
if (k->next != NULL) {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\n");
|
||||
|
@ -150,10 +151,10 @@ void writeResultSet(struct ResultSet *rs) {
|
|||
fprintf(fp, "\t},\n");
|
||||
fprintf(fp, "\t\"errors\": [\n");
|
||||
|
||||
struct Error *e = rs->errors;
|
||||
while(e != NULL) {
|
||||
struct Error* e = rs->errors;
|
||||
while (e != NULL) {
|
||||
fprintf(fp, "\t\t\"%s\"", e->message);
|
||||
if(e->next != NULL) {
|
||||
if (e->next != NULL) {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\n");
|
||||
|
@ -166,17 +167,17 @@ void writeResultSet(struct ResultSet *rs) {
|
|||
fclose(fp);
|
||||
}
|
||||
|
||||
void freeResultSet(struct ResultSet *rs) {
|
||||
struct Kpi *k = rs->kpis;
|
||||
while(k != NULL) {
|
||||
struct Kpi *next = k->next;
|
||||
void freeResultSet(struct ResultSet* rs) {
|
||||
struct Kpi* k = rs->kpis;
|
||||
while (k != NULL) {
|
||||
struct Kpi* next = k->next;
|
||||
free(k);
|
||||
k = next;
|
||||
}
|
||||
|
||||
struct Error *e = rs->errors;
|
||||
while(e != NULL) {
|
||||
struct Error *next = e->next;
|
||||
struct Error* e = rs->errors;
|
||||
while (e != NULL) {
|
||||
struct Error* next = e->next;
|
||||
free(e->message);
|
||||
free(e);
|
||||
e = next;
|
||||
|
@ -185,12 +186,12 @@ void freeResultSet(struct ResultSet *rs) {
|
|||
free(rs);
|
||||
}
|
||||
|
||||
fdb_error_t getError(fdb_error_t err, const char* context, struct ResultSet *rs) {
|
||||
if(err) {
|
||||
char *msg = (char*)malloc(strlen(context) + 100);
|
||||
fdb_error_t getError(fdb_error_t err, const char* context, struct ResultSet* rs) {
|
||||
if (err) {
|
||||
char* msg = (char*)malloc(strlen(context) + 100);
|
||||
sprintf(msg, "Error in %s: %s", context, fdb_get_error(err));
|
||||
fprintf(stderr, "%s\n", msg);
|
||||
if(rs != NULL) {
|
||||
if (rs != NULL) {
|
||||
addError(rs, msg);
|
||||
}
|
||||
|
||||
|
@ -200,9 +201,9 @@ fdb_error_t getError(fdb_error_t err, const char* context, struct ResultSet *rs)
|
|||
return err;
|
||||
}
|
||||
|
||||
void checkError(fdb_error_t err, const char* context, struct ResultSet *rs) {
|
||||
if(getError(err, context, rs)) {
|
||||
if(rs != NULL) {
|
||||
void checkError(fdb_error_t err, const char* context, struct ResultSet* rs) {
|
||||
if (getError(err, context, rs)) {
|
||||
if (rs != NULL) {
|
||||
writeResultSet(rs);
|
||||
freeResultSet(rs);
|
||||
}
|
||||
|
@ -210,11 +211,11 @@ void checkError(fdb_error_t err, const char* context, struct ResultSet *rs) {
|
|||
}
|
||||
}
|
||||
|
||||
fdb_error_t logError(fdb_error_t err, const char* context, struct ResultSet *rs) {
|
||||
char *msg = (char*)malloc(strlen(context) + 100);
|
||||
fdb_error_t logError(fdb_error_t err, const char* context, struct ResultSet* rs) {
|
||||
char* msg = (char*)malloc(strlen(context) + 100);
|
||||
sprintf(msg, "Error in %s: %s", context, fdb_get_error(err));
|
||||
fprintf(stderr, "%s\n", msg);
|
||||
if(rs != NULL) {
|
||||
if (rs != NULL) {
|
||||
addError(rs, msg);
|
||||
}
|
||||
|
||||
|
@ -222,8 +223,8 @@ fdb_error_t logError(fdb_error_t err, const char* context, struct ResultSet *rs)
|
|||
return err;
|
||||
}
|
||||
|
||||
fdb_error_t maybeLogError(fdb_error_t err, const char* context, struct ResultSet *rs) {
|
||||
if(err && !fdb_error_predicate( FDB_ERROR_PREDICATE_RETRYABLE, err ) ) {
|
||||
fdb_error_t maybeLogError(fdb_error_t err, const char* context, struct ResultSet* rs) {
|
||||
if (err && !fdb_error_predicate(FDB_ERROR_PREDICATE_RETRYABLE, err)) {
|
||||
return logError(err, context, rs);
|
||||
}
|
||||
return err;
|
||||
|
@ -234,11 +235,11 @@ void* runNetwork() {
|
|||
return NULL;
|
||||
}
|
||||
|
||||
FDBDatabase* openDatabase(struct ResultSet *rs, pthread_t *netThread) {
|
||||
FDBDatabase* openDatabase(struct ResultSet* rs, pthread_t* netThread) {
|
||||
checkError(fdb_setup_network(), "setup network", rs);
|
||||
pthread_create(netThread, NULL, (void*)(&runNetwork), NULL);
|
||||
|
||||
FDBDatabase *db;
|
||||
FDBDatabase* db;
|
||||
checkError(fdb_create_database(NULL, &db), "create database", rs);
|
||||
|
||||
return db;
|
||||
|
|
|
@ -31,14 +31,14 @@ pthread_t netThread;
|
|||
const int numKeys = 100;
|
||||
uint8_t** keys = NULL;
|
||||
|
||||
#define KEY_SIZE 16
|
||||
#define KEY_SIZE 16
|
||||
#define VALUE_SIZE 100
|
||||
uint8_t valueStr[VALUE_SIZE];
|
||||
|
||||
fdb_error_t getSize(struct ResultSet* rs, FDBTransaction* tr, int64_t* out_size) {
|
||||
fdb_error_t e;
|
||||
FDBFuture* future = fdb_transaction_get_approximate_size(tr);
|
||||
|
||||
|
||||
e = maybeLogError(fdb_future_block_until_ready(future), "waiting for get future", rs);
|
||||
if (e) {
|
||||
fdb_future_destroy(future);
|
||||
|
@ -55,11 +55,11 @@ fdb_error_t getSize(struct ResultSet* rs, FDBTransaction* tr, int64_t* out_size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void runTests(struct ResultSet *rs) {
|
||||
void runTests(struct ResultSet* rs) {
|
||||
int64_t sizes[numKeys];
|
||||
int i = 0, j = 0;
|
||||
FDBDatabase *db = openDatabase(rs, &netThread);
|
||||
FDBTransaction *tr = NULL;
|
||||
FDBDatabase* db = openDatabase(rs, &netThread);
|
||||
FDBTransaction* tr = NULL;
|
||||
fdb_error_t e = fdb_database_create_transaction(db, &tr);
|
||||
checkError(e, "create transaction", rs);
|
||||
memset(sizes, 0, numKeys * sizeof(uint32_t));
|
||||
|
@ -82,7 +82,7 @@ void runTests(struct ResultSet *rs) {
|
|||
printf("size %d: %u\n", i, sizes[i]);
|
||||
i++;
|
||||
|
||||
fdb_transaction_clear_range(tr, keys[i], KEY_SIZE, keys[i+1], KEY_SIZE);
|
||||
fdb_transaction_clear_range(tr, keys[i], KEY_SIZE, keys[i + 1], KEY_SIZE);
|
||||
e = getSize(rs, tr, sizes + i);
|
||||
checkError(e, "transaction get size", rs);
|
||||
printf("size %d: %u\n", i, sizes[i]);
|
||||
|
@ -94,10 +94,10 @@ void runTests(struct ResultSet *rs) {
|
|||
printf("Test passed!\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
int main(int argc, char** argv) {
|
||||
srand(time(NULL));
|
||||
struct ResultSet *rs = newResultSet();
|
||||
checkError(fdb_select_api_version(700), "select API version", rs);
|
||||
struct ResultSet* rs = newResultSet();
|
||||
checkError(fdb_select_api_version(710), "select API version", rs);
|
||||
printf("Running performance test at client version: %s\n", fdb_get_client_version());
|
||||
|
||||
keys = generateKeys(numKeys, KEY_SIZE);
|
||||
|
|
|
@ -0,0 +1,239 @@
|
|||
/*
|
||||
* fdb_api.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdb_api.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace fdb {
|
||||
|
||||
// Future
|
||||
|
||||
Future::~Future() {
|
||||
fdb_future_destroy(future_);
|
||||
}
|
||||
|
||||
bool Future::is_ready() {
|
||||
return fdb_future_is_ready(future_);
|
||||
}
|
||||
|
||||
[[nodiscard]] fdb_error_t Future::block_until_ready() {
|
||||
return fdb_future_block_until_ready(future_);
|
||||
}
|
||||
|
||||
[[nodiscard]] fdb_error_t Future::set_callback(FDBCallback callback, void* callback_parameter) {
|
||||
return fdb_future_set_callback(future_, callback, callback_parameter);
|
||||
}
|
||||
|
||||
[[nodiscard]] fdb_error_t Future::get_error() {
|
||||
return fdb_future_get_error(future_);
|
||||
}
|
||||
|
||||
void Future::release_memory() {
|
||||
fdb_future_release_memory(future_);
|
||||
}
|
||||
|
||||
void Future::cancel() {
|
||||
fdb_future_cancel(future_);
|
||||
}
|
||||
|
||||
// Int64Future
|
||||
|
||||
[[nodiscard]] fdb_error_t Int64Future::get(int64_t* out) {
|
||||
return fdb_future_get_int64(future_, out);
|
||||
}
|
||||
|
||||
// ValueFuture
|
||||
|
||||
[[nodiscard]] fdb_error_t ValueFuture::get(fdb_bool_t* out_present, const uint8_t** out_value, int* out_value_length) {
|
||||
return fdb_future_get_value(future_, out_present, out_value, out_value_length);
|
||||
}
|
||||
|
||||
// KeyFuture
|
||||
|
||||
[[nodiscard]] fdb_error_t KeyFuture::get(const uint8_t** out_key, int* out_key_length) {
|
||||
return fdb_future_get_key(future_, out_key, out_key_length);
|
||||
}
|
||||
|
||||
// StringArrayFuture
|
||||
|
||||
[[nodiscard]] fdb_error_t StringArrayFuture::get(const char*** out_strings, int* out_count) {
|
||||
return fdb_future_get_string_array(future_, out_strings, out_count);
|
||||
}
|
||||
|
||||
// KeyValueArrayFuture
|
||||
|
||||
[[nodiscard]] fdb_error_t KeyValueArrayFuture::get(const FDBKeyValue** out_kv, int* out_count, fdb_bool_t* out_more) {
|
||||
return fdb_future_get_keyvalue_array(future_, out_kv, out_count, out_more);
|
||||
}
|
||||
|
||||
// Database
|
||||
Int64Future Database::reboot_worker(FDBDatabase* db,
|
||||
const uint8_t* address,
|
||||
int address_length,
|
||||
fdb_bool_t check,
|
||||
int duration) {
|
||||
return Int64Future(fdb_database_reboot_worker(db, address, address_length, check, duration));
|
||||
}
|
||||
|
||||
EmptyFuture Database::force_recovery_with_data_loss(FDBDatabase* db, const uint8_t* dcid, int dcid_length) {
|
||||
return EmptyFuture(fdb_database_force_recovery_with_data_loss(db, dcid, dcid_length));
|
||||
}
|
||||
|
||||
EmptyFuture Database::create_snapshot(FDBDatabase* db,
|
||||
const uint8_t* uid,
|
||||
int uid_length,
|
||||
const uint8_t* snap_command,
|
||||
int snap_command_length) {
|
||||
return EmptyFuture(fdb_database_create_snapshot(db, uid, uid_length, snap_command, snap_command_length));
|
||||
}
|
||||
|
||||
// Transaction
|
||||
|
||||
Transaction::Transaction(FDBDatabase* db) {
|
||||
if (fdb_error_t err = fdb_database_create_transaction(db, &tr_)) {
|
||||
std::cerr << fdb_get_error(err) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::~Transaction() {
|
||||
fdb_transaction_destroy(tr_);
|
||||
}
|
||||
|
||||
void Transaction::reset() {
|
||||
fdb_transaction_reset(tr_);
|
||||
}
|
||||
|
||||
void Transaction::cancel() {
|
||||
fdb_transaction_cancel(tr_);
|
||||
}
|
||||
|
||||
[[nodiscard]] fdb_error_t Transaction::set_option(FDBTransactionOption option, const uint8_t* value, int value_length) {
|
||||
return fdb_transaction_set_option(tr_, option, value, value_length);
|
||||
}
|
||||
|
||||
void Transaction::set_read_version(int64_t version) {
|
||||
fdb_transaction_set_read_version(tr_, version);
|
||||
}
|
||||
|
||||
Int64Future Transaction::get_read_version() {
|
||||
return Int64Future(fdb_transaction_get_read_version(tr_));
|
||||
}
|
||||
|
||||
Int64Future Transaction::get_approximate_size() {
|
||||
return Int64Future(fdb_transaction_get_approximate_size(tr_));
|
||||
}
|
||||
|
||||
KeyFuture Transaction::get_versionstamp() {
|
||||
return KeyFuture(fdb_transaction_get_versionstamp(tr_));
|
||||
}
|
||||
|
||||
ValueFuture Transaction::get(std::string_view key, fdb_bool_t snapshot) {
|
||||
return ValueFuture(fdb_transaction_get(tr_, (const uint8_t*)key.data(), key.size(), snapshot));
|
||||
}
|
||||
|
||||
KeyFuture Transaction::get_key(const uint8_t* key_name,
|
||||
int key_name_length,
|
||||
fdb_bool_t or_equal,
|
||||
int offset,
|
||||
fdb_bool_t snapshot) {
|
||||
return KeyFuture(fdb_transaction_get_key(tr_, key_name, key_name_length, or_equal, offset, snapshot));
|
||||
}
|
||||
|
||||
StringArrayFuture Transaction::get_addresses_for_key(std::string_view key) {
|
||||
return StringArrayFuture(fdb_transaction_get_addresses_for_key(tr_, (const uint8_t*)key.data(), key.size()));
|
||||
}
|
||||
|
||||
KeyValueArrayFuture Transaction::get_range(const uint8_t* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
const uint8_t* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
return KeyValueArrayFuture(fdb_transaction_get_range(tr_,
|
||||
begin_key_name,
|
||||
begin_key_name_length,
|
||||
begin_or_equal,
|
||||
begin_offset,
|
||||
end_key_name,
|
||||
end_key_name_length,
|
||||
end_or_equal,
|
||||
end_offset,
|
||||
limit,
|
||||
target_bytes,
|
||||
mode,
|
||||
iteration,
|
||||
snapshot,
|
||||
reverse));
|
||||
}
|
||||
|
||||
EmptyFuture Transaction::watch(std::string_view key) {
|
||||
return EmptyFuture(fdb_transaction_watch(tr_, (const uint8_t*)key.data(), key.size()));
|
||||
}
|
||||
|
||||
EmptyFuture Transaction::commit() {
|
||||
return EmptyFuture(fdb_transaction_commit(tr_));
|
||||
}
|
||||
|
||||
EmptyFuture Transaction::on_error(fdb_error_t err) {
|
||||
return EmptyFuture(fdb_transaction_on_error(tr_, err));
|
||||
}
|
||||
|
||||
void Transaction::clear(std::string_view key) {
|
||||
return fdb_transaction_clear(tr_, (const uint8_t*)key.data(), key.size());
|
||||
}
|
||||
|
||||
void Transaction::clear_range(std::string_view begin_key, std::string_view end_key) {
|
||||
fdb_transaction_clear_range(
|
||||
tr_, (const uint8_t*)begin_key.data(), begin_key.size(), (const uint8_t*)end_key.data(), end_key.size());
|
||||
}
|
||||
|
||||
void Transaction::set(std::string_view key, std::string_view value) {
|
||||
fdb_transaction_set(tr_, (const uint8_t*)key.data(), key.size(), (const uint8_t*)value.data(), value.size());
|
||||
}
|
||||
|
||||
void Transaction::atomic_op(std::string_view key,
|
||||
const uint8_t* param,
|
||||
int param_length,
|
||||
FDBMutationType operationType) {
|
||||
return fdb_transaction_atomic_op(tr_, (const uint8_t*)key.data(), key.size(), param, param_length, operationType);
|
||||
}
|
||||
|
||||
[[nodiscard]] fdb_error_t Transaction::get_committed_version(int64_t* out_version) {
|
||||
return fdb_transaction_get_committed_version(tr_, out_version);
|
||||
}
|
||||
|
||||
fdb_error_t Transaction::add_conflict_range(std::string_view begin_key,
|
||||
std::string_view end_key,
|
||||
FDBConflictRangeType type) {
|
||||
return fdb_transaction_add_conflict_range(
|
||||
tr_, (const uint8_t*)begin_key.data(), begin_key.size(), (const uint8_t*)end_key.data(), end_key.size(), type);
|
||||
}
|
||||
|
||||
} // namespace fdb
|
|
@ -0,0 +1,256 @@
|
|||
/*
|
||||
* fdb_api.hpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// A collection of C++ classes to wrap the C API to improve memory management
|
||||
// and add types to futures. Using the old C API may look something like:
|
||||
//
|
||||
// FDBTransaction *tr;
|
||||
// fdb_database_create_transaction(db, &tr);
|
||||
// FDBFuture *f = fdb_transaction_get(tr, (const uint8_t*)"foo", 3, true);
|
||||
// fdb_future_block_until_ready(f);
|
||||
// fdb_future_get_value(f, ...);
|
||||
// fdb_future_destroy(f);
|
||||
// fdb_transaction_destroy(tr);
|
||||
//
|
||||
// Using the wrapper classes defined here, it will instead look like:
|
||||
//
|
||||
// fdb::Transaction tr(db);
|
||||
// fdb::ValueFuture f = tr.get((const uint8_t*)"foo", 3, true);
|
||||
// f.block_until_ready();
|
||||
// f.get_value(f, ...);
|
||||
//
|
||||
|
||||
#pragma once
|
||||
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
|
||||
namespace fdb {
|
||||
|
||||
// Wrapper parent class to manage memory of an FDBFuture pointer. Cleans up
|
||||
// FDBFuture when this instance goes out of scope.
|
||||
class Future {
|
||||
public:
|
||||
virtual ~Future() = 0;
|
||||
|
||||
// Wrapper around fdb_future_is_ready.
|
||||
bool is_ready();
|
||||
// Wrapper around fdb_future_block_until_ready.
|
||||
fdb_error_t block_until_ready();
|
||||
// Wrapper around fdb_future_set_callback.
|
||||
fdb_error_t set_callback(FDBCallback callback, void* callback_parameter);
|
||||
// Wrapper around fdb_future_get_error.
|
||||
fdb_error_t get_error();
|
||||
// Wrapper around fdb_future_release_memory.
|
||||
void release_memory();
|
||||
// Wrapper around fdb_future_cancel.
|
||||
void cancel();
|
||||
|
||||
// Conversion operator to allow Future instances to work interchangeably as
|
||||
// an FDBFuture object.
|
||||
// operator FDBFuture* () const {
|
||||
// return future_;
|
||||
// }
|
||||
|
||||
protected:
|
||||
Future(FDBFuture* f) : future_(f) {}
|
||||
FDBFuture* future_;
|
||||
};
|
||||
|
||||
class Int64Future : public Future {
|
||||
public:
|
||||
// Call this function instead of fdb_future_get_int64 when using the
|
||||
// Int64Future type. It's behavior is identical to fdb_future_get_int64.
|
||||
fdb_error_t get(int64_t* out);
|
||||
|
||||
private:
|
||||
friend class Transaction;
|
||||
friend class Database;
|
||||
Int64Future(FDBFuture* f) : Future(f) {}
|
||||
};
|
||||
|
||||
class KeyFuture : public Future {
|
||||
public:
|
||||
// Call this function instead of fdb_future_get_key when using the KeyFuture
|
||||
// type. It's behavior is identical to fdb_future_get_key.
|
||||
fdb_error_t get(const uint8_t** out_key, int* out_key_length);
|
||||
|
||||
private:
|
||||
friend class Transaction;
|
||||
KeyFuture(FDBFuture* f) : Future(f) {}
|
||||
};
|
||||
|
||||
class ValueFuture : public Future {
|
||||
public:
|
||||
// Call this function instead of fdb_future_get_value when using the
|
||||
// ValueFuture type. It's behavior is identical to fdb_future_get_value.
|
||||
fdb_error_t get(fdb_bool_t* out_present, const uint8_t** out_value, int* out_value_length);
|
||||
|
||||
private:
|
||||
friend class Transaction;
|
||||
ValueFuture(FDBFuture* f) : Future(f) {}
|
||||
};
|
||||
|
||||
class StringArrayFuture : public Future {
|
||||
public:
|
||||
// Call this function instead of fdb_future_get_string_array when using the
|
||||
// StringArrayFuture type. It's behavior is identical to
|
||||
// fdb_future_get_string_array.
|
||||
fdb_error_t get(const char*** out_strings, int* out_count);
|
||||
|
||||
private:
|
||||
friend class Transaction;
|
||||
StringArrayFuture(FDBFuture* f) : Future(f) {}
|
||||
};
|
||||
|
||||
class KeyValueArrayFuture : public Future {
|
||||
public:
|
||||
// Call this function instead of fdb_future_get_keyvalue_array when using
|
||||
// the KeyValueArrayFuture type. It's behavior is identical to
|
||||
// fdb_future_get_keyvalue_array.
|
||||
fdb_error_t get(const FDBKeyValue** out_kv, int* out_count, fdb_bool_t* out_more);
|
||||
|
||||
private:
|
||||
friend class Transaction;
|
||||
KeyValueArrayFuture(FDBFuture* f) : Future(f) {}
|
||||
};
|
||||
|
||||
class EmptyFuture : public Future {
|
||||
private:
|
||||
friend class Transaction;
|
||||
friend class Database;
|
||||
EmptyFuture(FDBFuture* f) : Future(f) {}
|
||||
};
|
||||
|
||||
// Wrapper around FDBDatabase, providing database-level API
|
||||
class Database final {
|
||||
public:
|
||||
static Int64Future reboot_worker(FDBDatabase* db,
|
||||
const uint8_t* address,
|
||||
int address_length,
|
||||
fdb_bool_t check,
|
||||
int duration);
|
||||
static EmptyFuture force_recovery_with_data_loss(FDBDatabase* db, const uint8_t* dcid, int dcid_length);
|
||||
static EmptyFuture create_snapshot(FDBDatabase* db,
|
||||
const uint8_t* uid,
|
||||
int uid_length,
|
||||
const uint8_t* snap_command,
|
||||
int snap_command_length);
|
||||
};
|
||||
|
||||
// Wrapper around FDBTransaction, providing the same set of calls as the C API.
|
||||
// Handles cleanup of memory, removing the need to call
|
||||
// fdb_transaction_destroy.
|
||||
class Transaction final {
|
||||
public:
|
||||
// Given an FDBDatabase, initializes a new transaction.
|
||||
Transaction(FDBDatabase* db);
|
||||
~Transaction();
|
||||
|
||||
// Wrapper around fdb_transaction_reset.
|
||||
void reset();
|
||||
|
||||
// Wrapper around fdb_transaction_cancel.
|
||||
void cancel();
|
||||
|
||||
// Wrapper around fdb_transaction_set_option.
|
||||
fdb_error_t set_option(FDBTransactionOption option, const uint8_t* value, int value_length);
|
||||
|
||||
// Wrapper around fdb_transaction_set_read_version.
|
||||
void set_read_version(int64_t version);
|
||||
|
||||
// Returns a future which will be set to the transaction read version.
|
||||
Int64Future get_read_version();
|
||||
|
||||
// Returns a future which will be set to the approximate transaction size so far.
|
||||
Int64Future get_approximate_size();
|
||||
|
||||
// Returns a future which will be set to the versionstamp which was used by
|
||||
// any versionstamp operations in the transaction.
|
||||
KeyFuture get_versionstamp();
|
||||
|
||||
// Returns a future which will be set to the value of `key` in the database.
|
||||
ValueFuture get(std::string_view key, fdb_bool_t snapshot);
|
||||
|
||||
// Returns a future which will be set to the key in the database matching the
|
||||
// passed key selector.
|
||||
KeyFuture get_key(const uint8_t* key_name,
|
||||
int key_name_length,
|
||||
fdb_bool_t or_equal,
|
||||
int offset,
|
||||
fdb_bool_t snapshot);
|
||||
|
||||
// Returns a future which will be set to an array of strings.
|
||||
StringArrayFuture get_addresses_for_key(std::string_view key);
|
||||
|
||||
// Returns a future which will be set to an FDBKeyValue array.
|
||||
KeyValueArrayFuture get_range(const uint8_t* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
const uint8_t* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
|
||||
// Wrapper around fdb_transaction_watch. Returns a future representing an
|
||||
// empty value.
|
||||
EmptyFuture watch(std::string_view key);
|
||||
|
||||
// Wrapper around fdb_transaction_commit. Returns a future representing an
|
||||
// empty value.
|
||||
EmptyFuture commit();
|
||||
|
||||
// Wrapper around fdb_transaction_on_error. Returns a future representing an
|
||||
// empty value.
|
||||
EmptyFuture on_error(fdb_error_t err);
|
||||
|
||||
// Wrapper around fdb_transaction_clear.
|
||||
void clear(std::string_view key);
|
||||
|
||||
// Wrapper around fdb_transaction_clear_range.
|
||||
void clear_range(std::string_view begin_key, std::string_view end_key);
|
||||
|
||||
// Wrapper around fdb_transaction_set.
|
||||
void set(std::string_view key, std::string_view value);
|
||||
|
||||
// Wrapper around fdb_transaction_atomic_op.
|
||||
void atomic_op(std::string_view key, const uint8_t* param, int param_length, FDBMutationType operationType);
|
||||
|
||||
// Wrapper around fdb_transaction_get_committed_version.
|
||||
fdb_error_t get_committed_version(int64_t* out_version);
|
||||
|
||||
// Wrapper around fdb_transaction_add_conflict_range.
|
||||
fdb_error_t add_conflict_range(std::string_view begin_key, std::string_view end_key, FDBConflictRangeType type);
|
||||
|
||||
private:
|
||||
FDBTransaction* tr_;
|
||||
};
|
||||
|
||||
} // namespace fdb
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* setup_tests.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Unit tests for API setup, network initialization functions from the FDB C API.
|
||||
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <iostream>
|
||||
#include <thread>
|
||||
|
||||
#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
|
||||
#include "doctest.h"
|
||||
|
||||
void fdb_check(fdb_error_t e) {
|
||||
if (e) {
|
||||
std::cerr << fdb_get_error(e) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("setup") {
|
||||
fdb_error_t err;
|
||||
// Version passed here must be <= FDB_API_VERSION
|
||||
err = fdb_select_api_version(9000);
|
||||
CHECK(err);
|
||||
|
||||
// Select current API version
|
||||
fdb_check(fdb_select_api_version(710));
|
||||
|
||||
// Error to call again after a successful return
|
||||
err = fdb_select_api_version(710);
|
||||
CHECK(err);
|
||||
|
||||
CHECK(fdb_get_max_api_version() >= 710);
|
||||
|
||||
fdb_check(fdb_setup_network());
|
||||
// Calling a second time should fail
|
||||
err = fdb_setup_network();
|
||||
CHECK(err);
|
||||
|
||||
struct Context {
|
||||
bool called = false;
|
||||
};
|
||||
Context context;
|
||||
fdb_check(fdb_add_network_thread_completion_hook(
|
||||
[](void* param) {
|
||||
auto* context = static_cast<Context*>(param);
|
||||
context->called = true;
|
||||
},
|
||||
&context));
|
||||
|
||||
std::thread network_thread{ &fdb_run_network };
|
||||
|
||||
CHECK(!context.called);
|
||||
fdb_check(fdb_stop_network());
|
||||
network_thread.join();
|
||||
CHECK(context.called);
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
# Download doctest repo.
|
||||
include(ExternalProject)
|
||||
find_package(Git REQUIRED)
|
||||
|
||||
ExternalProject_Add(
|
||||
doctest
|
||||
PREFIX ${CMAKE_BINARY_DIR}/doctest
|
||||
GIT_REPOSITORY https://github.com/onqtam/doctest.git
|
||||
GIT_TAG 1c8da00c978c19e00a434b2b1f854fcffc9fba35 # v2.4.0
|
||||
TIMEOUT 10
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
LOG_DOWNLOAD ON
|
||||
)
|
||||
|
||||
ExternalProject_Get_Property(doctest source_dir)
|
||||
set(DOCTEST_INCLUDE_DIR ${source_dir}/doctest CACHE INTERNAL "Path to include folder for doctest")
|
File diff suppressed because it is too large
Load Diff
|
@ -18,7 +18,7 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include "foundationdb/fdb_c.h"
|
||||
#undef DLLEXPORT
|
||||
#include "workloads.h"
|
||||
|
@ -104,7 +104,10 @@ struct SimpleWorkload : FDBWorkload {
|
|||
unsigned long from, to, lastTx = 0;
|
||||
std::unordered_map<State, ActorCallback> callbacks;
|
||||
|
||||
PopulateActor(const Callback& promise, SimpleWorkload& self, FDBDatabase* db, unsigned long from,
|
||||
PopulateActor(const Callback& promise,
|
||||
SimpleWorkload& self,
|
||||
FDBDatabase* db,
|
||||
unsigned long from,
|
||||
unsigned long to)
|
||||
: ActorBase(promise, self, db), from(from), to(to) {
|
||||
error = fdb_database_create_transaction(db, &tx);
|
||||
|
@ -130,8 +133,11 @@ struct SimpleWorkload : FDBWorkload {
|
|||
for (; from < to && ops < self.insertsPerTx; ++ops, ++from) {
|
||||
std::string value = std::to_string(from);
|
||||
std::string key = KEY_PREFIX + value;
|
||||
fdb_transaction_set(tx, reinterpret_cast<const uint8_t*>(key.c_str()), key.size(),
|
||||
reinterpret_cast<const uint8_t*>(value.c_str()), value.size());
|
||||
fdb_transaction_set(tx,
|
||||
reinterpret_cast<const uint8_t*>(key.c_str()),
|
||||
key.size(),
|
||||
reinterpret_cast<const uint8_t*>(value.c_str()),
|
||||
value.size());
|
||||
}
|
||||
lastTx = ops;
|
||||
auto commit_future = fdb_transaction_commit(tx);
|
||||
|
@ -154,7 +160,8 @@ struct SimpleWorkload : FDBWorkload {
|
|||
run();
|
||||
},
|
||||
[this](fdb_error_t error) {
|
||||
self.context->trace(FDBSeverity::Error, "AssertionFailure",
|
||||
self.context->trace(FDBSeverity::Error,
|
||||
"AssertionFailure",
|
||||
{ { "Reason", "tx.onError failed" },
|
||||
{ "Error", std::string(fdb_get_error(error)) } });
|
||||
self.success = false;
|
||||
|
@ -230,7 +237,8 @@ struct SimpleWorkload : FDBWorkload {
|
|||
get();
|
||||
},
|
||||
[this](fdb_error_t) {
|
||||
self.context->trace(FDBSeverity::Error, "AssertionFailure",
|
||||
self.context->trace(FDBSeverity::Error,
|
||||
"AssertionFailure",
|
||||
{ { "Reason", "tx.onError failed" },
|
||||
{ "Error", std::string(fdb_get_error(error)) } });
|
||||
self.success = false;
|
||||
|
@ -258,10 +266,10 @@ struct SimpleWorkload : FDBWorkload {
|
|||
insertsPerTx = context->getOption("insertsPerTx", 100ul);
|
||||
opsPerTx = context->getOption("opsPerTx", 100ul);
|
||||
runFor = context->getOption("runFor", 10.0);
|
||||
auto err = fdb_select_api_version(700);
|
||||
auto err = fdb_select_api_version(710);
|
||||
if (err) {
|
||||
context->trace(FDBSeverity::Info, "SelectAPIVersionFailed",
|
||||
{ { "Error", std::string(fdb_get_error(err)) } });
|
||||
context->trace(
|
||||
FDBSeverity::Info, "SelectAPIVersionFailed", { { "Error", std::string(fdb_get_error(err)) } });
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -23,19 +23,19 @@
|
|||
FDBWorkloadFactoryImpl::~FDBWorkloadFactoryImpl() {}
|
||||
|
||||
std::map<std::string, IFDBWorkloadFactory*>& FDBWorkloadFactoryImpl::factories() {
|
||||
static std::map<std::string, IFDBWorkloadFactory*> _factories;
|
||||
return _factories;
|
||||
static std::map<std::string, IFDBWorkloadFactory*> _factories;
|
||||
return _factories;
|
||||
}
|
||||
|
||||
std::shared_ptr<FDBWorkload> FDBWorkloadFactoryImpl::create(const std::string &name) {
|
||||
auto res = factories().find(name);
|
||||
if (res == factories().end()) {
|
||||
return nullptr;
|
||||
}
|
||||
return res->second->create();
|
||||
std::shared_ptr<FDBWorkload> FDBWorkloadFactoryImpl::create(const std::string& name) {
|
||||
auto res = factories().find(name);
|
||||
if (res == factories().end()) {
|
||||
return nullptr;
|
||||
}
|
||||
return res->second->create();
|
||||
}
|
||||
|
||||
FDBWorkloadFactory* workloadFactory(FDBLogger*) {
|
||||
static FDBWorkloadFactoryImpl impl;
|
||||
return &impl;
|
||||
static FDBWorkloadFactoryImpl impl;
|
||||
return &impl;
|
||||
}
|
||||
|
|
|
@ -33,15 +33,11 @@ struct FDBWorkloadFactoryImpl : FDBWorkloadFactory {
|
|||
std::shared_ptr<FDBWorkload> create(const std::string& name) override;
|
||||
};
|
||||
|
||||
template<class WorkloadType>
|
||||
template <class WorkloadType>
|
||||
struct FDBWorkloadFactoryT : IFDBWorkloadFactory {
|
||||
explicit FDBWorkloadFactoryT(const std::string& name) {
|
||||
FDBWorkloadFactoryImpl::factories()[name] = this;
|
||||
}
|
||||
explicit FDBWorkloadFactoryT(const std::string& name) { FDBWorkloadFactoryImpl::factories()[name] = this; }
|
||||
|
||||
std::shared_ptr<FDBWorkload> create() override {
|
||||
return std::make_shared<WorkloadType>();
|
||||
}
|
||||
std::shared_ptr<FDBWorkload> create() override { return std::make_shared<WorkloadType>(); }
|
||||
};
|
||||
|
||||
extern "C" DLLEXPORT FDBWorkloadFactory* workloadFactory(FDBLogger*);
|
||||
|
|
|
@ -22,486 +22,541 @@
|
|||
#include "DirectoryPartition.h"
|
||||
|
||||
namespace FDB {
|
||||
const uint8_t DirectoryLayer::LITTLE_ENDIAN_LONG_ONE[8] = {1,0,0,0,0,0,0,0};
|
||||
const StringRef DirectoryLayer::HIGH_CONTENTION_KEY = LiteralStringRef("hca");
|
||||
const StringRef DirectoryLayer::LAYER_KEY = LiteralStringRef("layer");
|
||||
const StringRef DirectoryLayer::VERSION_KEY = LiteralStringRef("version");
|
||||
const int64_t DirectoryLayer::SUB_DIR_KEY = 0;
|
||||
const uint8_t DirectoryLayer::LITTLE_ENDIAN_LONG_ONE[8] = { 1, 0, 0, 0, 0, 0, 0, 0 };
|
||||
const StringRef DirectoryLayer::HIGH_CONTENTION_KEY = LiteralStringRef("hca");
|
||||
const StringRef DirectoryLayer::LAYER_KEY = LiteralStringRef("layer");
|
||||
const StringRef DirectoryLayer::VERSION_KEY = LiteralStringRef("version");
|
||||
const int64_t DirectoryLayer::SUB_DIR_KEY = 0;
|
||||
|
||||
const uint32_t DirectoryLayer::VERSION[3] = {1, 0, 0};
|
||||
const uint32_t DirectoryLayer::VERSION[3] = { 1, 0, 0 };
|
||||
|
||||
const StringRef DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX = LiteralStringRef("\xfe");
|
||||
const Subspace DirectoryLayer::DEFAULT_NODE_SUBSPACE = Subspace(DEFAULT_NODE_SUBSPACE_PREFIX);
|
||||
const Subspace DirectoryLayer::DEFAULT_CONTENT_SUBSPACE = Subspace();
|
||||
const StringRef DirectoryLayer::PARTITION_LAYER = LiteralStringRef("partition");
|
||||
const StringRef DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX = LiteralStringRef("\xfe");
|
||||
const Subspace DirectoryLayer::DEFAULT_NODE_SUBSPACE = Subspace(DEFAULT_NODE_SUBSPACE_PREFIX);
|
||||
const Subspace DirectoryLayer::DEFAULT_CONTENT_SUBSPACE = Subspace();
|
||||
const StringRef DirectoryLayer::PARTITION_LAYER = LiteralStringRef("partition");
|
||||
|
||||
DirectoryLayer::DirectoryLayer(Subspace nodeSubspace, Subspace contentSubspace, bool allowManualPrefixes) :
|
||||
nodeSubspace(nodeSubspace), contentSubspace(contentSubspace), allowManualPrefixes(allowManualPrefixes),
|
||||
rootNode(nodeSubspace.get(nodeSubspace.key())), allocator(rootNode.get(HIGH_CONTENTION_KEY))
|
||||
{ }
|
||||
DirectoryLayer::DirectoryLayer(Subspace nodeSubspace, Subspace contentSubspace, bool allowManualPrefixes)
|
||||
: nodeSubspace(nodeSubspace), contentSubspace(contentSubspace), allowManualPrefixes(allowManualPrefixes),
|
||||
rootNode(nodeSubspace.get(nodeSubspace.key())), allocator(rootNode.get(HIGH_CONTENTION_KEY)) {}
|
||||
|
||||
Subspace DirectoryLayer::nodeWithPrefix(StringRef const& prefix) const {
|
||||
return nodeSubspace.get(prefix);
|
||||
}
|
||||
|
||||
template<class T>
|
||||
Optional<Subspace> DirectoryLayer::nodeWithPrefix(Optional<T> const& prefix) const {
|
||||
if(!prefix.present()) {
|
||||
return Optional<Subspace>();
|
||||
}
|
||||
|
||||
return nodeWithPrefix(prefix.get());
|
||||
}
|
||||
|
||||
ACTOR Future<DirectoryLayer::Node> find(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
state int pathIndex = 0;
|
||||
state DirectoryLayer::Node node = DirectoryLayer::Node(dirLayer, dirLayer->rootNode, IDirectory::Path(), path);
|
||||
|
||||
for(; pathIndex != path.size(); ++pathIndex) {
|
||||
ASSERT(node.subspace.present());
|
||||
Optional<FDBStandalone<ValueRef>> val = wait(tr->get(node.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(path[pathIndex], true).key()));
|
||||
|
||||
node.path.push_back(path[pathIndex]);
|
||||
node = DirectoryLayer::Node(dirLayer, dirLayer->nodeWithPrefix(val), node.path, path);
|
||||
|
||||
DirectoryLayer::Node _node = wait(node.loadMetadata(tr));
|
||||
node = _node;
|
||||
|
||||
if(!node.exists() || node.layer == DirectoryLayer::PARTITION_LAYER) {
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
if(!node.loadedMetadata) {
|
||||
DirectoryLayer::Node _node = wait(node.loadMetadata(tr));
|
||||
node = _node;
|
||||
}
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
IDirectory::Path DirectoryLayer::toAbsolutePath(IDirectory::Path const& subpath) const {
|
||||
Path path;
|
||||
|
||||
path.reserve(this->path.size() + subpath.size());
|
||||
path.insert(path.end(), this->path.begin(), this->path.end());
|
||||
path.insert(path.end(), subpath.begin(), subpath.end());
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> DirectoryLayer::contentsOfNode(Subspace const& node, Path const& path, Standalone<StringRef> const& layer) {
|
||||
Standalone<StringRef> prefix = nodeSubspace.unpack(node.key()).getString(0);
|
||||
|
||||
if(layer == PARTITION_LAYER) {
|
||||
return Reference<DirectorySubspace>(new DirectoryPartition(toAbsolutePath(path), prefix, Reference<DirectoryLayer>::addRef(this)));
|
||||
}
|
||||
else {
|
||||
return Reference<DirectorySubspace>(new DirectorySubspace(toAbsolutePath(path), prefix, Reference<DirectoryLayer>::addRef(this), layer));
|
||||
}
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> DirectoryLayer::openInternal(Standalone<StringRef> const& layer, Node const& existingNode, bool allowOpen) {
|
||||
if (!allowOpen) {
|
||||
throw directory_already_exists();
|
||||
}
|
||||
if(layer.size() > 0 && layer != existingNode.layer) {
|
||||
throw mismatched_layer();
|
||||
}
|
||||
|
||||
return existingNode.getContents();
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer) {
|
||||
return createOrOpenInternal(tr, path, layer, Optional<Standalone<StringRef>>(), false, true);
|
||||
}
|
||||
|
||||
void DirectoryLayer::initializeDirectory(Reference<Transaction> const& tr) const {
|
||||
tr->set(rootNode.pack(VERSION_KEY), StringRef((uint8_t*)VERSION, 12));
|
||||
}
|
||||
|
||||
ACTOR Future<Void> checkVersionInternal(const DirectoryLayer* dirLayer, Reference<Transaction> tr, bool writeAccess) {
|
||||
Optional<FDBStandalone<ValueRef>> versionBytes = wait(tr->get(dirLayer->rootNode.pack(DirectoryLayer::VERSION_KEY)));
|
||||
|
||||
if(!versionBytes.present()) {
|
||||
if(writeAccess) {
|
||||
dirLayer->initializeDirectory(tr);
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
else {
|
||||
if(versionBytes.get().size() != 12) {
|
||||
throw invalid_directory_layer_metadata();
|
||||
}
|
||||
if(((uint32_t*)versionBytes.get().begin())[0] > DirectoryLayer::VERSION[0]) {
|
||||
throw incompatible_directory_version();
|
||||
}
|
||||
else if(((uint32_t*)versionBytes.get().begin())[1] > DirectoryLayer::VERSION[1] && writeAccess) {
|
||||
throw incompatible_directory_version();
|
||||
}
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> DirectoryLayer::checkVersion(Reference<Transaction> const& tr, bool writeAccess) const {
|
||||
return checkVersionInternal(this, tr, writeAccess);
|
||||
}
|
||||
|
||||
ACTOR Future<Standalone<StringRef>> getPrefix(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, Optional<Standalone<StringRef>> prefix) {
|
||||
if(!prefix.present()) {
|
||||
Standalone<StringRef> allocated = wait(dirLayer->allocator.allocate(tr));
|
||||
state Standalone<StringRef> finalPrefix = allocated.withPrefix(dirLayer->contentSubspace.key());
|
||||
|
||||
FDBStandalone<RangeResultRef> result = wait(tr->getRange(KeyRangeRef(finalPrefix, strinc(finalPrefix)), 1));
|
||||
|
||||
if(result.size() > 0) {
|
||||
throw directory_prefix_not_empty();
|
||||
}
|
||||
|
||||
return finalPrefix;
|
||||
}
|
||||
|
||||
return prefix.get();
|
||||
}
|
||||
|
||||
ACTOR Future<Optional<Subspace>> nodeContainingKey(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, Standalone<StringRef> key, bool snapshot) {
|
||||
if(key.startsWith(dirLayer->nodeSubspace.key())) {
|
||||
return dirLayer->rootNode;
|
||||
}
|
||||
|
||||
KeyRange range = KeyRangeRef(dirLayer->nodeSubspace.range().begin, keyAfter(dirLayer->nodeSubspace.pack(key)));
|
||||
FDBStandalone<RangeResultRef> result = wait(tr->getRange(range, 1, snapshot, true));
|
||||
|
||||
if(result.size() > 0) {
|
||||
Standalone<StringRef> prevPrefix = dirLayer->nodeSubspace.unpack(result[0].key).getString(0);
|
||||
if(key.startsWith(prevPrefix)) {
|
||||
return dirLayer->nodeWithPrefix(prevPrefix);
|
||||
}
|
||||
}
|
||||
Subspace DirectoryLayer::nodeWithPrefix(StringRef const& prefix) const {
|
||||
return nodeSubspace.get(prefix);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
Optional<Subspace> DirectoryLayer::nodeWithPrefix(Optional<T> const& prefix) const {
|
||||
if (!prefix.present()) {
|
||||
return Optional<Subspace>();
|
||||
}
|
||||
|
||||
ACTOR Future<bool> isPrefixFree(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, Standalone<StringRef> prefix, bool snapshot){
|
||||
if(!prefix.size()) {
|
||||
return false;
|
||||
}
|
||||
return nodeWithPrefix(prefix.get());
|
||||
}
|
||||
|
||||
Optional<Subspace> node = wait(nodeContainingKey(dirLayer, tr, prefix, snapshot));
|
||||
if(node.present()) {
|
||||
return false;
|
||||
}
|
||||
ACTOR Future<DirectoryLayer::Node> find(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
IDirectory::Path path) {
|
||||
state int pathIndex = 0;
|
||||
state DirectoryLayer::Node node = DirectoryLayer::Node(dirLayer, dirLayer->rootNode, IDirectory::Path(), path);
|
||||
|
||||
FDBStandalone<RangeResultRef> result = wait(tr->getRange(KeyRangeRef(dirLayer->nodeSubspace.pack(prefix), dirLayer->nodeSubspace.pack(strinc(prefix))), 1, snapshot));
|
||||
return !result.size();
|
||||
for (; pathIndex != path.size(); ++pathIndex) {
|
||||
ASSERT(node.subspace.present());
|
||||
Optional<FDBStandalone<ValueRef>> val =
|
||||
wait(tr->get(node.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(path[pathIndex], true).key()));
|
||||
|
||||
}
|
||||
node.path.push_back(path[pathIndex]);
|
||||
node = DirectoryLayer::Node(dirLayer, dirLayer->nodeWithPrefix(val), node.path, path);
|
||||
|
||||
ACTOR Future<Subspace> getParentNode(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
if(path.size() > 1) {
|
||||
Reference<DirectorySubspace> parent = wait(dirLayer->createOrOpenInternal(tr, IDirectory::Path(path.begin(), path.end() - 1), StringRef(), Optional<Standalone<StringRef>>(), true, true));
|
||||
return dirLayer->nodeWithPrefix(parent->key());
|
||||
}
|
||||
else {
|
||||
return dirLayer->rootNode;
|
||||
DirectoryLayer::Node _node = wait(node.loadMetadata(tr));
|
||||
node = _node;
|
||||
|
||||
if (!node.exists() || node.layer == DirectoryLayer::PARTITION_LAYER) {
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Reference<DirectorySubspace>> createInternal(
|
||||
Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path,
|
||||
Standalone<StringRef> layer, Optional<Standalone<StringRef>> prefix, bool allowCreate)
|
||||
{
|
||||
if(!allowCreate) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
|
||||
wait(dirLayer->checkVersion(tr, true));
|
||||
|
||||
state Standalone<StringRef> newPrefix = wait(getPrefix(dirLayer, tr, prefix));
|
||||
bool isFree = wait(isPrefixFree(dirLayer, tr, newPrefix, !prefix.present()));
|
||||
|
||||
if(!isFree) {
|
||||
throw directory_prefix_in_use();
|
||||
}
|
||||
|
||||
Subspace parentNode = wait(getParentNode(dirLayer, tr, path));
|
||||
Subspace node = dirLayer->nodeWithPrefix(newPrefix);
|
||||
|
||||
tr->set(parentNode.get(DirectoryLayer::SUB_DIR_KEY).get(path.back(), true).key(), newPrefix);
|
||||
tr->set(node.get(DirectoryLayer::LAYER_KEY).key(), layer);
|
||||
return dirLayer->contentsOfNode(node, path, layer);
|
||||
if (!node.loadedMetadata) {
|
||||
DirectoryLayer::Node _node = wait(node.loadMetadata(tr));
|
||||
node = _node;
|
||||
}
|
||||
|
||||
ACTOR Future<Reference<DirectorySubspace>> _createOrOpenInternal(
|
||||
Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path,
|
||||
Standalone<StringRef> layer, Optional<Standalone<StringRef>> prefix, bool allowCreate, bool allowOpen)
|
||||
{
|
||||
ASSERT(!prefix.present() || allowCreate);
|
||||
wait(dirLayer->checkVersion(tr, false));
|
||||
return node;
|
||||
}
|
||||
|
||||
if(prefix.present() && !dirLayer->allowManualPrefixes) {
|
||||
if(!dirLayer->getPath().size()) {
|
||||
throw manual_prefixes_not_enabled();
|
||||
}
|
||||
else {
|
||||
throw prefix_in_partition();
|
||||
}
|
||||
IDirectory::Path DirectoryLayer::toAbsolutePath(IDirectory::Path const& subpath) const {
|
||||
Path path;
|
||||
|
||||
path.reserve(this->path.size() + subpath.size());
|
||||
path.insert(path.end(), this->path.begin(), this->path.end());
|
||||
path.insert(path.end(), subpath.begin(), subpath.end());
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> DirectoryLayer::contentsOfNode(Subspace const& node,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer) {
|
||||
Standalone<StringRef> prefix = nodeSubspace.unpack(node.key()).getString(0);
|
||||
|
||||
if (layer == PARTITION_LAYER) {
|
||||
return Reference<DirectorySubspace>(
|
||||
new DirectoryPartition(toAbsolutePath(path), prefix, Reference<DirectoryLayer>::addRef(this)));
|
||||
} else {
|
||||
return Reference<DirectorySubspace>(
|
||||
new DirectorySubspace(toAbsolutePath(path), prefix, Reference<DirectoryLayer>::addRef(this), layer));
|
||||
}
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> DirectoryLayer::openInternal(Standalone<StringRef> const& layer,
|
||||
Node const& existingNode,
|
||||
bool allowOpen) {
|
||||
if (!allowOpen) {
|
||||
throw directory_already_exists();
|
||||
}
|
||||
if (layer.size() > 0 && layer != existingNode.layer) {
|
||||
throw mismatched_layer();
|
||||
}
|
||||
|
||||
return existingNode.getContents();
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::open(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer) {
|
||||
return createOrOpenInternal(tr, path, layer, Optional<Standalone<StringRef>>(), false, true);
|
||||
}
|
||||
|
||||
void DirectoryLayer::initializeDirectory(Reference<Transaction> const& tr) const {
|
||||
tr->set(rootNode.pack(VERSION_KEY), StringRef((uint8_t*)VERSION, 12));
|
||||
}
|
||||
|
||||
ACTOR Future<Void> checkVersionInternal(const DirectoryLayer* dirLayer, Reference<Transaction> tr, bool writeAccess) {
|
||||
Optional<FDBStandalone<ValueRef>> versionBytes =
|
||||
wait(tr->get(dirLayer->rootNode.pack(DirectoryLayer::VERSION_KEY)));
|
||||
|
||||
if (!versionBytes.present()) {
|
||||
if (writeAccess) {
|
||||
dirLayer->initializeDirectory(tr);
|
||||
}
|
||||
return Void();
|
||||
} else {
|
||||
if (versionBytes.get().size() != 12) {
|
||||
throw invalid_directory_layer_metadata();
|
||||
}
|
||||
if (((uint32_t*)versionBytes.get().begin())[0] > DirectoryLayer::VERSION[0]) {
|
||||
throw incompatible_directory_version();
|
||||
} else if (((uint32_t*)versionBytes.get().begin())[1] > DirectoryLayer::VERSION[1] && writeAccess) {
|
||||
throw incompatible_directory_version();
|
||||
}
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> DirectoryLayer::checkVersion(Reference<Transaction> const& tr, bool writeAccess) const {
|
||||
return checkVersionInternal(this, tr, writeAccess);
|
||||
}
|
||||
|
||||
ACTOR Future<Standalone<StringRef>> getPrefix(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
Optional<Standalone<StringRef>> prefix) {
|
||||
if (!prefix.present()) {
|
||||
Standalone<StringRef> allocated = wait(dirLayer->allocator.allocate(tr));
|
||||
state Standalone<StringRef> finalPrefix = allocated.withPrefix(dirLayer->contentSubspace.key());
|
||||
|
||||
FDBStandalone<RangeResultRef> result = wait(tr->getRange(KeyRangeRef(finalPrefix, strinc(finalPrefix)), 1));
|
||||
|
||||
if (result.size() > 0) {
|
||||
throw directory_prefix_not_empty();
|
||||
}
|
||||
|
||||
if(!path.size()){
|
||||
throw cannot_open_root_directory();
|
||||
}
|
||||
return finalPrefix;
|
||||
}
|
||||
|
||||
state DirectoryLayer::Node existingNode = wait(find(dirLayer, tr, path));
|
||||
if(existingNode.exists()) {
|
||||
if(existingNode.isInPartition()) {
|
||||
IDirectory::Path subpath = existingNode.getPartitionSubpath();
|
||||
Reference<DirectorySubspace> dirSpace = wait(existingNode.getContents()->getDirectoryLayer()->createOrOpenInternal(tr, subpath, layer, prefix, allowCreate, allowOpen));
|
||||
return dirSpace;
|
||||
}
|
||||
return dirLayer->openInternal(layer, existingNode, allowOpen);
|
||||
return prefix.get();
|
||||
}
|
||||
|
||||
ACTOR Future<Optional<Subspace>> nodeContainingKey(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
Standalone<StringRef> key,
|
||||
bool snapshot) {
|
||||
if (key.startsWith(dirLayer->nodeSubspace.key())) {
|
||||
return dirLayer->rootNode;
|
||||
}
|
||||
|
||||
KeyRange range = KeyRangeRef(dirLayer->nodeSubspace.range().begin, keyAfter(dirLayer->nodeSubspace.pack(key)));
|
||||
FDBStandalone<RangeResultRef> result = wait(tr->getRange(range, 1, snapshot, true));
|
||||
|
||||
if (result.size() > 0) {
|
||||
Standalone<StringRef> prevPrefix = dirLayer->nodeSubspace.unpack(result[0].key).getString(0);
|
||||
if (key.startsWith(prevPrefix)) {
|
||||
return dirLayer->nodeWithPrefix(prevPrefix);
|
||||
}
|
||||
else {
|
||||
Reference<DirectorySubspace> dirSpace = wait(createInternal(dirLayer, tr, path, layer, prefix, allowCreate));
|
||||
}
|
||||
|
||||
return Optional<Subspace>();
|
||||
}
|
||||
|
||||
ACTOR Future<bool> isPrefixFree(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
Standalone<StringRef> prefix,
|
||||
bool snapshot) {
|
||||
if (!prefix.size()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
Optional<Subspace> node = wait(nodeContainingKey(dirLayer, tr, prefix, snapshot));
|
||||
if (node.present()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
FDBStandalone<RangeResultRef> result = wait(tr->getRange(
|
||||
KeyRangeRef(dirLayer->nodeSubspace.pack(prefix), dirLayer->nodeSubspace.pack(strinc(prefix))), 1, snapshot));
|
||||
return !result.size();
|
||||
}
|
||||
|
||||
ACTOR Future<Subspace> getParentNode(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
IDirectory::Path path) {
|
||||
if (path.size() > 1) {
|
||||
Reference<DirectorySubspace> parent =
|
||||
wait(dirLayer->createOrOpenInternal(tr,
|
||||
IDirectory::Path(path.begin(), path.end() - 1),
|
||||
StringRef(),
|
||||
Optional<Standalone<StringRef>>(),
|
||||
true,
|
||||
true));
|
||||
return dirLayer->nodeWithPrefix(parent->key());
|
||||
} else {
|
||||
return dirLayer->rootNode;
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Reference<DirectorySubspace>> createInternal(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
IDirectory::Path path,
|
||||
Standalone<StringRef> layer,
|
||||
Optional<Standalone<StringRef>> prefix,
|
||||
bool allowCreate) {
|
||||
if (!allowCreate) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
|
||||
wait(dirLayer->checkVersion(tr, true));
|
||||
|
||||
state Standalone<StringRef> newPrefix = wait(getPrefix(dirLayer, tr, prefix));
|
||||
bool isFree = wait(isPrefixFree(dirLayer, tr, newPrefix, !prefix.present()));
|
||||
|
||||
if (!isFree) {
|
||||
throw directory_prefix_in_use();
|
||||
}
|
||||
|
||||
Subspace parentNode = wait(getParentNode(dirLayer, tr, path));
|
||||
Subspace node = dirLayer->nodeWithPrefix(newPrefix);
|
||||
|
||||
tr->set(parentNode.get(DirectoryLayer::SUB_DIR_KEY).get(path.back(), true).key(), newPrefix);
|
||||
tr->set(node.get(DirectoryLayer::LAYER_KEY).key(), layer);
|
||||
return dirLayer->contentsOfNode(node, path, layer);
|
||||
}
|
||||
|
||||
ACTOR Future<Reference<DirectorySubspace>> _createOrOpenInternal(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
IDirectory::Path path,
|
||||
Standalone<StringRef> layer,
|
||||
Optional<Standalone<StringRef>> prefix,
|
||||
bool allowCreate,
|
||||
bool allowOpen) {
|
||||
ASSERT(!prefix.present() || allowCreate);
|
||||
wait(dirLayer->checkVersion(tr, false));
|
||||
|
||||
if (prefix.present() && !dirLayer->allowManualPrefixes) {
|
||||
if (!dirLayer->getPath().size()) {
|
||||
throw manual_prefixes_not_enabled();
|
||||
} else {
|
||||
throw prefix_in_partition();
|
||||
}
|
||||
}
|
||||
|
||||
if (!path.size()) {
|
||||
throw cannot_open_root_directory();
|
||||
}
|
||||
|
||||
state DirectoryLayer::Node existingNode = wait(find(dirLayer, tr, path));
|
||||
if (existingNode.exists()) {
|
||||
if (existingNode.isInPartition()) {
|
||||
IDirectory::Path subpath = existingNode.getPartitionSubpath();
|
||||
Reference<DirectorySubspace> dirSpace =
|
||||
wait(existingNode.getContents()->getDirectoryLayer()->createOrOpenInternal(
|
||||
tr, subpath, layer, prefix, allowCreate, allowOpen));
|
||||
return dirSpace;
|
||||
}
|
||||
return dirLayer->openInternal(layer, existingNode, allowOpen);
|
||||
} else {
|
||||
Reference<DirectorySubspace> dirSpace = wait(createInternal(dirLayer, tr, path, layer, prefix, allowCreate));
|
||||
return dirSpace;
|
||||
}
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::createOrOpenInternal(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix,
|
||||
bool allowCreate,
|
||||
bool allowOpen) {
|
||||
return _createOrOpenInternal(
|
||||
Reference<DirectoryLayer>::addRef(this), tr, path, layer, prefix, allowCreate, allowOpen);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::create(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix) {
|
||||
return createOrOpenInternal(tr, path, layer, prefix, true, false);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::createOrOpen(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer) {
|
||||
return createOrOpenInternal(tr, path, layer, Optional<Standalone<StringRef>>(), true, true);
|
||||
}
|
||||
|
||||
ACTOR Future<Standalone<VectorRef<StringRef>>> listInternal(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
IDirectory::Path path) {
|
||||
wait(dirLayer->checkVersion(tr, false));
|
||||
|
||||
state DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
|
||||
|
||||
if (!node.exists()) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
if (node.isInPartition(true)) {
|
||||
Standalone<VectorRef<StringRef>> partitionList =
|
||||
wait(node.getContents()->getDirectoryLayer()->list(tr, node.getPartitionSubpath()));
|
||||
return partitionList;
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::createOrOpenInternal(
|
||||
Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix, bool allowCreate, bool allowOpen)
|
||||
{
|
||||
return _createOrOpenInternal(Reference<DirectoryLayer>::addRef(this), tr, path, layer, prefix, allowCreate, allowOpen);
|
||||
}
|
||||
state Subspace subdir = node.subspace.get().get(DirectoryLayer::SUB_DIR_KEY);
|
||||
state Key begin = subdir.range().begin;
|
||||
state Standalone<VectorRef<StringRef>> subdirectories;
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::create(
|
||||
Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix)
|
||||
{
|
||||
return createOrOpenInternal(tr, path, layer, prefix, true, false);
|
||||
}
|
||||
loop {
|
||||
FDBStandalone<RangeResultRef> subdirRange = wait(tr->getRange(KeyRangeRef(begin, subdir.range().end)));
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer) {
|
||||
return createOrOpenInternal(tr, path, layer, Optional<Standalone<StringRef>>(), true, true);
|
||||
}
|
||||
|
||||
ACTOR Future<Standalone<VectorRef<StringRef>>> listInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
wait(dirLayer->checkVersion(tr, false));
|
||||
|
||||
state DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
|
||||
|
||||
if(!node.exists()) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
if(node.isInPartition(true)) {
|
||||
Standalone<VectorRef<StringRef>> partitionList = wait(node.getContents()->getDirectoryLayer()->list(tr, node.getPartitionSubpath()));
|
||||
return partitionList;
|
||||
for (int i = 0; i < subdirRange.size(); ++i) {
|
||||
subdirectories.push_back_deep(subdirectories.arena(), subdir.unpack(subdirRange[i].key).getString(0));
|
||||
}
|
||||
|
||||
state Subspace subdir = node.subspace.get().get(DirectoryLayer::SUB_DIR_KEY);
|
||||
state Key begin = subdir.range().begin;
|
||||
state Standalone<VectorRef<StringRef>> subdirectories;
|
||||
|
||||
loop {
|
||||
FDBStandalone<RangeResultRef> subdirRange = wait(tr->getRange(KeyRangeRef(begin, subdir.range().end)));
|
||||
|
||||
for(int i = 0; i < subdirRange.size(); ++i) {
|
||||
subdirectories.push_back_deep(subdirectories.arena(), subdir.unpack(subdirRange[i].key).getString(0));
|
||||
}
|
||||
|
||||
if(!subdirRange.more) {
|
||||
return subdirectories;
|
||||
}
|
||||
|
||||
begin = keyAfter(subdirRange.back().key);
|
||||
if (!subdirRange.more) {
|
||||
return subdirectories;
|
||||
}
|
||||
}
|
||||
|
||||
Future<Standalone<VectorRef<StringRef>>> DirectoryLayer::list(Reference<Transaction> const& tr, Path const& path) {
|
||||
return listInternal(Reference<DirectoryLayer>::addRef(this), tr, path);
|
||||
begin = keyAfter(subdirRange.back().key);
|
||||
}
|
||||
}
|
||||
|
||||
bool pathsEqual(IDirectory::Path const& path1, IDirectory::Path const& path2, size_t maxElementsToCheck = std::numeric_limits<size_t>::max()) {
|
||||
if(std::min(path1.size(), maxElementsToCheck) != std::min(path2.size(), maxElementsToCheck)) {
|
||||
Future<Standalone<VectorRef<StringRef>>> DirectoryLayer::list(Reference<Transaction> const& tr, Path const& path) {
|
||||
return listInternal(Reference<DirectoryLayer>::addRef(this), tr, path);
|
||||
}
|
||||
|
||||
bool pathsEqual(IDirectory::Path const& path1,
|
||||
IDirectory::Path const& path2,
|
||||
size_t maxElementsToCheck = std::numeric_limits<size_t>::max()) {
|
||||
if (std::min(path1.size(), maxElementsToCheck) != std::min(path2.size(), maxElementsToCheck)) {
|
||||
return false;
|
||||
}
|
||||
for (int i = 0; i < path1.size() && i < maxElementsToCheck; ++i) {
|
||||
if (path1[i] != path2[i]) {
|
||||
return false;
|
||||
}
|
||||
for(int i = 0; i < path1.size() && i < maxElementsToCheck; ++i) {
|
||||
if(path1[i] != path2[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> removeFromParent(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
ASSERT(path.size() >= 1);
|
||||
DirectoryLayer::Node parentNode = wait(find(dirLayer, tr, IDirectory::Path(path.begin(), path.end() - 1)));
|
||||
if(parentNode.subspace.present()) {
|
||||
tr->clear(parentNode.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(path.back(), true).key());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
return Void();
|
||||
ACTOR Future<Void> removeFromParent(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
IDirectory::Path path) {
|
||||
ASSERT(path.size() >= 1);
|
||||
DirectoryLayer::Node parentNode = wait(find(dirLayer, tr, IDirectory::Path(path.begin(), path.end() - 1)));
|
||||
if (parentNode.subspace.present()) {
|
||||
tr->clear(parentNode.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(path.back(), true).key());
|
||||
}
|
||||
|
||||
ACTOR Future<Reference<DirectorySubspace>> moveInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path oldPath, IDirectory::Path newPath) {
|
||||
wait(dirLayer->checkVersion(tr, true));
|
||||
return Void();
|
||||
}
|
||||
|
||||
if(oldPath.size() <= newPath.size()) {
|
||||
if(pathsEqual(oldPath, newPath, oldPath.size())) {
|
||||
throw invalid_destination_directory();
|
||||
}
|
||||
ACTOR Future<Reference<DirectorySubspace>> moveInternal(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
IDirectory::Path oldPath,
|
||||
IDirectory::Path newPath) {
|
||||
wait(dirLayer->checkVersion(tr, true));
|
||||
|
||||
if (oldPath.size() <= newPath.size()) {
|
||||
if (pathsEqual(oldPath, newPath, oldPath.size())) {
|
||||
throw invalid_destination_directory();
|
||||
}
|
||||
|
||||
std::vector<Future<DirectoryLayer::Node>> futures;
|
||||
futures.push_back(find(dirLayer, tr, oldPath));
|
||||
futures.push_back(find(dirLayer, tr, newPath));
|
||||
|
||||
std::vector<DirectoryLayer::Node> nodes = wait(getAll(futures));
|
||||
|
||||
state DirectoryLayer::Node oldNode = nodes[0];
|
||||
state DirectoryLayer::Node newNode = nodes[1];
|
||||
|
||||
if(!oldNode.exists()) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
|
||||
if(oldNode.isInPartition() || newNode.isInPartition()) {
|
||||
if(!oldNode.isInPartition() || !newNode.isInPartition() || !pathsEqual(oldNode.path, newNode.path)) {
|
||||
throw cannot_move_directory_between_partitions();
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> partitionMove = wait(newNode.getContents()->move(tr, oldNode.getPartitionSubpath(), newNode.getPartitionSubpath()));
|
||||
return partitionMove;
|
||||
}
|
||||
|
||||
if(newNode.exists() || newPath.empty()) {
|
||||
throw directory_already_exists();
|
||||
}
|
||||
|
||||
DirectoryLayer::Node parentNode = wait(find(dirLayer, tr, IDirectory::Path(newPath.begin(), newPath.end() - 1)));
|
||||
if(!parentNode.exists()) {
|
||||
throw parent_directory_does_not_exist();
|
||||
}
|
||||
|
||||
tr->set(parentNode.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(newPath.back(), true).key(), dirLayer->nodeSubspace.unpack(oldNode.subspace.get().key()).getString(0));
|
||||
wait(removeFromParent(dirLayer, tr, oldPath));
|
||||
|
||||
return dirLayer->contentsOfNode(oldNode.subspace.get(), newPath, oldNode.layer);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath) {
|
||||
return moveInternal(Reference<DirectoryLayer>::addRef(this), tr, oldPath, newPath);
|
||||
std::vector<Future<DirectoryLayer::Node>> futures;
|
||||
futures.push_back(find(dirLayer, tr, oldPath));
|
||||
futures.push_back(find(dirLayer, tr, newPath));
|
||||
|
||||
std::vector<DirectoryLayer::Node> nodes = wait(getAll(futures));
|
||||
|
||||
state DirectoryLayer::Node oldNode = nodes[0];
|
||||
state DirectoryLayer::Node newNode = nodes[1];
|
||||
|
||||
if (!oldNode.exists()) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath) {
|
||||
if (oldNode.isInPartition() || newNode.isInPartition()) {
|
||||
if (!oldNode.isInPartition() || !newNode.isInPartition() || !pathsEqual(oldNode.path, newNode.path)) {
|
||||
throw cannot_move_directory_between_partitions();
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> partitionMove =
|
||||
wait(newNode.getContents()->move(tr, oldNode.getPartitionSubpath(), newNode.getPartitionSubpath()));
|
||||
return partitionMove;
|
||||
}
|
||||
|
||||
if (newNode.exists() || newPath.empty()) {
|
||||
throw directory_already_exists();
|
||||
}
|
||||
|
||||
DirectoryLayer::Node parentNode = wait(find(dirLayer, tr, IDirectory::Path(newPath.begin(), newPath.end() - 1)));
|
||||
if (!parentNode.exists()) {
|
||||
throw parent_directory_does_not_exist();
|
||||
}
|
||||
|
||||
tr->set(parentNode.subspace.get().get(DirectoryLayer::SUB_DIR_KEY).get(newPath.back(), true).key(),
|
||||
dirLayer->nodeSubspace.unpack(oldNode.subspace.get().key()).getString(0));
|
||||
wait(removeFromParent(dirLayer, tr, oldPath));
|
||||
|
||||
return dirLayer->contentsOfNode(oldNode.subspace.get(), newPath, oldNode.layer);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::move(Reference<Transaction> const& tr,
|
||||
Path const& oldPath,
|
||||
Path const& newPath) {
|
||||
return moveInternal(Reference<DirectoryLayer>::addRef(this), tr, oldPath, newPath);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectoryLayer::moveTo(Reference<Transaction> const& tr,
|
||||
Path const& newAbsolutePath) {
|
||||
throw cannot_modify_root_directory();
|
||||
}
|
||||
|
||||
Future<Void> removeRecursive(Reference<DirectoryLayer> const&, Reference<Transaction> const&, Subspace const&);
|
||||
ACTOR Future<Void> removeRecursive(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, Subspace nodeSub) {
|
||||
state Subspace subdir = nodeSub.get(DirectoryLayer::SUB_DIR_KEY);
|
||||
state Key begin = subdir.range().begin;
|
||||
state std::vector<Future<Void>> futures;
|
||||
|
||||
loop {
|
||||
FDBStandalone<RangeResultRef> range = wait(tr->getRange(KeyRangeRef(begin, subdir.range().end)));
|
||||
for (int i = 0; i < range.size(); ++i) {
|
||||
Subspace subNode = dirLayer->nodeWithPrefix(range[i].value);
|
||||
futures.push_back(removeRecursive(dirLayer, tr, subNode));
|
||||
}
|
||||
|
||||
if (!range.more) {
|
||||
break;
|
||||
}
|
||||
|
||||
begin = keyAfter(range.back().key);
|
||||
}
|
||||
|
||||
// waits are done concurrently
|
||||
wait(waitForAll(futures));
|
||||
|
||||
Standalone<StringRef> nodePrefix = dirLayer->nodeSubspace.unpack(nodeSub.key()).getString(0);
|
||||
|
||||
tr->clear(KeyRangeRef(nodePrefix, strinc(nodePrefix)));
|
||||
tr->clear(nodeSub.range());
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<bool> removeInternal(Reference<DirectoryLayer> const&,
|
||||
Reference<Transaction> const&,
|
||||
IDirectory::Path const&,
|
||||
bool const&);
|
||||
ACTOR Future<bool> removeInternal(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
IDirectory::Path path,
|
||||
bool failOnNonexistent) {
|
||||
wait(dirLayer->checkVersion(tr, true));
|
||||
|
||||
if (path.empty()) {
|
||||
throw cannot_modify_root_directory();
|
||||
}
|
||||
|
||||
Future<Void> removeRecursive(Reference<DirectoryLayer> const&, Reference<Transaction> const&, Subspace const&);
|
||||
ACTOR Future<Void> removeRecursive(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, Subspace nodeSub) {
|
||||
state Subspace subdir = nodeSub.get(DirectoryLayer::SUB_DIR_KEY);
|
||||
state Key begin = subdir.range().begin;
|
||||
state std::vector<Future<Void>> futures;
|
||||
state DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
|
||||
|
||||
loop {
|
||||
FDBStandalone<RangeResultRef> range = wait(tr->getRange(KeyRangeRef(begin, subdir.range().end)));
|
||||
for (int i = 0; i < range.size(); ++i) {
|
||||
Subspace subNode = dirLayer->nodeWithPrefix(range[i].value);
|
||||
futures.push_back(removeRecursive(dirLayer, tr, subNode));
|
||||
}
|
||||
|
||||
if(!range.more) {
|
||||
break;
|
||||
}
|
||||
|
||||
begin = keyAfter(range.back().key);
|
||||
}
|
||||
|
||||
// waits are done concurrently
|
||||
wait(waitForAll(futures));
|
||||
|
||||
Standalone<StringRef> nodePrefix = dirLayer->nodeSubspace.unpack(nodeSub.key()).getString(0);
|
||||
|
||||
tr->clear(KeyRangeRef(nodePrefix, strinc(nodePrefix)));
|
||||
tr->clear(nodeSub.range());
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<bool> removeInternal(Reference<DirectoryLayer> const&, Reference<Transaction> const&, IDirectory::Path const&, bool const&);
|
||||
ACTOR Future<bool> removeInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path, bool failOnNonexistent) {
|
||||
wait(dirLayer->checkVersion(tr, true));
|
||||
|
||||
if(path.empty()) {
|
||||
throw cannot_modify_root_directory();
|
||||
}
|
||||
|
||||
state DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
|
||||
|
||||
if(!node.exists()) {
|
||||
if(failOnNonexistent) {
|
||||
throw directory_does_not_exist();
|
||||
}
|
||||
else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if(node.isInPartition()) {
|
||||
bool recurse = wait(removeInternal(node.getContents()->getDirectoryLayer(), tr, node.getPartitionSubpath(), failOnNonexistent));
|
||||
return recurse;
|
||||
}
|
||||
|
||||
|
||||
state std::vector<Future<Void>> futures;
|
||||
futures.push_back(removeRecursive(dirLayer, tr, node.subspace.get()));
|
||||
futures.push_back(removeFromParent(dirLayer, tr, path));
|
||||
|
||||
wait(waitForAll(futures));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Future<Void> DirectoryLayer::remove(Reference<Transaction> const& tr, Path const& path) {
|
||||
return success(removeInternal(Reference<DirectoryLayer>::addRef(this), tr, path, true));
|
||||
}
|
||||
|
||||
Future<bool> DirectoryLayer::removeIfExists(Reference<Transaction> const& tr, Path const& path) {
|
||||
return removeInternal(Reference<DirectoryLayer>::addRef(this), tr, path, false);
|
||||
}
|
||||
|
||||
ACTOR Future<bool> existsInternal(Reference<DirectoryLayer> dirLayer, Reference<Transaction> tr, IDirectory::Path path) {
|
||||
wait(dirLayer->checkVersion(tr, false));
|
||||
|
||||
DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
|
||||
|
||||
if(!node.exists()) {
|
||||
if (!node.exists()) {
|
||||
if (failOnNonexistent) {
|
||||
throw directory_does_not_exist();
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
||||
if(node.isInPartition()) {
|
||||
bool exists = wait(node.getContents()->getDirectoryLayer()->exists(tr, node.getPartitionSubpath()));
|
||||
return exists;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Future<bool> DirectoryLayer::exists(Reference<Transaction> const& tr, Path const& path) {
|
||||
return existsInternal(Reference<DirectoryLayer>::addRef(this), tr, path);
|
||||
if (node.isInPartition()) {
|
||||
bool recurse = wait(
|
||||
removeInternal(node.getContents()->getDirectoryLayer(), tr, node.getPartitionSubpath(), failOnNonexistent));
|
||||
return recurse;
|
||||
}
|
||||
|
||||
Reference<DirectoryLayer> DirectoryLayer::getDirectoryLayer() {
|
||||
return Reference<DirectoryLayer>::addRef(this);
|
||||
}
|
||||
state std::vector<Future<Void>> futures;
|
||||
futures.push_back(removeRecursive(dirLayer, tr, node.subspace.get()));
|
||||
futures.push_back(removeFromParent(dirLayer, tr, path));
|
||||
|
||||
const Standalone<StringRef> DirectoryLayer::getLayer() const {
|
||||
return StringRef();
|
||||
}
|
||||
wait(waitForAll(futures));
|
||||
|
||||
const IDirectory::Path DirectoryLayer::getPath() const {
|
||||
return path;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
Future<Void> DirectoryLayer::remove(Reference<Transaction> const& tr, Path const& path) {
|
||||
return success(removeInternal(Reference<DirectoryLayer>::addRef(this), tr, path, true));
|
||||
}
|
||||
|
||||
Future<bool> DirectoryLayer::removeIfExists(Reference<Transaction> const& tr, Path const& path) {
|
||||
return removeInternal(Reference<DirectoryLayer>::addRef(this), tr, path, false);
|
||||
}
|
||||
|
||||
ACTOR Future<bool> existsInternal(Reference<DirectoryLayer> dirLayer,
|
||||
Reference<Transaction> tr,
|
||||
IDirectory::Path path) {
|
||||
wait(dirLayer->checkVersion(tr, false));
|
||||
|
||||
DirectoryLayer::Node node = wait(find(dirLayer, tr, path));
|
||||
|
||||
if (!node.exists()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (node.isInPartition()) {
|
||||
bool exists = wait(node.getContents()->getDirectoryLayer()->exists(tr, node.getPartitionSubpath()));
|
||||
return exists;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Future<bool> DirectoryLayer::exists(Reference<Transaction> const& tr, Path const& path) {
|
||||
return existsInternal(Reference<DirectoryLayer>::addRef(this), tr, path);
|
||||
}
|
||||
|
||||
Reference<DirectoryLayer> DirectoryLayer::getDirectoryLayer() {
|
||||
return Reference<DirectoryLayer>::addRef(this);
|
||||
}
|
||||
|
||||
const Standalone<StringRef> DirectoryLayer::getLayer() const {
|
||||
return StringRef();
|
||||
}
|
||||
|
||||
const IDirectory::Path DirectoryLayer::getPath() const {
|
||||
return path;
|
||||
}
|
||||
} // namespace FDB
|
||||
|
|
|
@ -28,84 +28,108 @@
|
|||
#include "HighContentionAllocator.h"
|
||||
|
||||
namespace FDB {
|
||||
class DirectoryLayer : public IDirectory {
|
||||
public:
|
||||
DirectoryLayer(Subspace nodeSubspace = DEFAULT_NODE_SUBSPACE, Subspace contentSubspace = DEFAULT_CONTENT_SUBSPACE, bool allowManualPrefixes = false);
|
||||
class DirectoryLayer : public IDirectory {
|
||||
public:
|
||||
DirectoryLayer(Subspace nodeSubspace = DEFAULT_NODE_SUBSPACE,
|
||||
Subspace contentSubspace = DEFAULT_CONTENT_SUBSPACE,
|
||||
bool allowManualPrefixes = false);
|
||||
|
||||
Future<Reference<DirectorySubspace>> create(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>(), Optional<Standalone<StringRef>> const& prefix = Optional<Standalone<StringRef>>());
|
||||
Future<Reference<DirectorySubspace>> open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
Future<Reference<DirectorySubspace>> createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
Future<Reference<DirectorySubspace>> create(
|
||||
Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>(),
|
||||
Optional<Standalone<StringRef>> const& prefix = Optional<Standalone<StringRef>>());
|
||||
Future<Reference<DirectorySubspace>> open(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
Future<Reference<DirectorySubspace>> createOrOpen(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
|
||||
Future<bool> exists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
Future<Standalone<VectorRef<StringRef>>> list(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
Future<bool> exists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
Future<Standalone<VectorRef<StringRef>>> list(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
|
||||
Future<Reference<DirectorySubspace>> move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath);
|
||||
Future<Reference<DirectorySubspace>> moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath);
|
||||
Future<Reference<DirectorySubspace>> move(Reference<Transaction> const& tr,
|
||||
Path const& oldPath,
|
||||
Path const& newPath);
|
||||
Future<Reference<DirectorySubspace>> moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath);
|
||||
|
||||
Future<Void> remove(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
Future<bool> removeIfExists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
Future<Void> remove(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
Future<bool> removeIfExists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
|
||||
Reference<DirectoryLayer> getDirectoryLayer();
|
||||
const Standalone<StringRef> getLayer() const;
|
||||
const Path getPath() const;
|
||||
Reference<DirectoryLayer> getDirectoryLayer();
|
||||
const Standalone<StringRef> getLayer() const;
|
||||
const Path getPath() const;
|
||||
|
||||
static const Subspace DEFAULT_NODE_SUBSPACE;
|
||||
static const Subspace DEFAULT_CONTENT_SUBSPACE;
|
||||
static const StringRef PARTITION_LAYER;
|
||||
static const Subspace DEFAULT_NODE_SUBSPACE;
|
||||
static const Subspace DEFAULT_CONTENT_SUBSPACE;
|
||||
static const StringRef PARTITION_LAYER;
|
||||
|
||||
//private:
|
||||
static const uint8_t LITTLE_ENDIAN_LONG_ONE[8];
|
||||
static const StringRef HIGH_CONTENTION_KEY;
|
||||
static const StringRef LAYER_KEY;
|
||||
static const StringRef VERSION_KEY;
|
||||
static const int64_t SUB_DIR_KEY;
|
||||
static const uint32_t VERSION[3];
|
||||
static const StringRef DEFAULT_NODE_SUBSPACE_PREFIX;
|
||||
// private:
|
||||
static const uint8_t LITTLE_ENDIAN_LONG_ONE[8];
|
||||
static const StringRef HIGH_CONTENTION_KEY;
|
||||
static const StringRef LAYER_KEY;
|
||||
static const StringRef VERSION_KEY;
|
||||
static const int64_t SUB_DIR_KEY;
|
||||
static const uint32_t VERSION[3];
|
||||
static const StringRef DEFAULT_NODE_SUBSPACE_PREFIX;
|
||||
|
||||
struct Node {
|
||||
Node() {}
|
||||
Node(Reference<DirectoryLayer> const& directoryLayer, Optional<Subspace> const& subspace, Path const& path, Path const& targetPath);
|
||||
struct Node {
|
||||
Node() {}
|
||||
Node(Reference<DirectoryLayer> const& directoryLayer,
|
||||
Optional<Subspace> const& subspace,
|
||||
Path const& path,
|
||||
Path const& targetPath);
|
||||
|
||||
bool exists() const;
|
||||
bool exists() const;
|
||||
|
||||
Future<Node> loadMetadata(Reference<Transaction> tr);
|
||||
void ensureMetadataLoaded() const;
|
||||
Future<Node> loadMetadata(Reference<Transaction> tr);
|
||||
void ensureMetadataLoaded() const;
|
||||
|
||||
bool isInPartition(bool includeEmptySubpath = false) const;
|
||||
Path getPartitionSubpath() const;
|
||||
Reference<DirectorySubspace> getContents() const;
|
||||
|
||||
Reference<DirectoryLayer> directoryLayer;
|
||||
Optional<Subspace> subspace;
|
||||
Path path;
|
||||
Path targetPath;
|
||||
Standalone<StringRef> layer;
|
||||
|
||||
bool loadedMetadata;
|
||||
};
|
||||
|
||||
Reference<DirectorySubspace> openInternal(Standalone<StringRef> const& layer, Node const& existingNode, bool allowOpen);
|
||||
Future<Reference<DirectorySubspace>> createOrOpenInternal(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer, Optional<Standalone<StringRef>> const& prefix, bool allowCreate, bool allowOpen);
|
||||
|
||||
void initializeDirectory(Reference<Transaction> const& tr) const;
|
||||
Future<Void> checkVersion(Reference<Transaction> const& tr, bool writeAccess) const;
|
||||
|
||||
template <class T>
|
||||
Optional<Subspace> nodeWithPrefix(Optional<T> const& prefix) const;
|
||||
Subspace nodeWithPrefix(StringRef const& prefix) const;
|
||||
|
||||
Reference<DirectorySubspace> contentsOfNode(Subspace const& node, Path const& path, Standalone<StringRef> const& layer);
|
||||
|
||||
Path toAbsolutePath(Path const& subpath) const;
|
||||
|
||||
Subspace rootNode;
|
||||
Subspace nodeSubspace;
|
||||
Subspace contentSubspace;
|
||||
HighContentionAllocator allocator;
|
||||
bool allowManualPrefixes;
|
||||
bool isInPartition(bool includeEmptySubpath = false) const;
|
||||
Path getPartitionSubpath() const;
|
||||
Reference<DirectorySubspace> getContents() const;
|
||||
|
||||
Reference<DirectoryLayer> directoryLayer;
|
||||
Optional<Subspace> subspace;
|
||||
Path path;
|
||||
Path targetPath;
|
||||
Standalone<StringRef> layer;
|
||||
|
||||
bool loadedMetadata;
|
||||
};
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> openInternal(Standalone<StringRef> const& layer,
|
||||
Node const& existingNode,
|
||||
bool allowOpen);
|
||||
Future<Reference<DirectorySubspace>> createOrOpenInternal(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix,
|
||||
bool allowCreate,
|
||||
bool allowOpen);
|
||||
|
||||
void initializeDirectory(Reference<Transaction> const& tr) const;
|
||||
Future<Void> checkVersion(Reference<Transaction> const& tr, bool writeAccess) const;
|
||||
|
||||
template <class T>
|
||||
Optional<Subspace> nodeWithPrefix(Optional<T> const& prefix) const;
|
||||
Subspace nodeWithPrefix(StringRef const& prefix) const;
|
||||
|
||||
Reference<DirectorySubspace> contentsOfNode(Subspace const& node,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer);
|
||||
|
||||
Path toAbsolutePath(Path const& subpath) const;
|
||||
|
||||
Subspace rootNode;
|
||||
Subspace nodeSubspace;
|
||||
Subspace contentSubspace;
|
||||
HighContentionAllocator allocator;
|
||||
bool allowManualPrefixes;
|
||||
|
||||
Path path;
|
||||
};
|
||||
} // namespace FDB
|
||||
|
||||
#endif
|
|
@ -28,34 +28,38 @@
|
|||
#include "DirectoryLayer.h"
|
||||
|
||||
namespace FDB {
|
||||
class DirectoryPartition : public DirectorySubspace {
|
||||
class DirectoryPartition : public DirectorySubspace {
|
||||
|
||||
public:
|
||||
DirectoryPartition(Path const& path, StringRef const& prefix, Reference<DirectoryLayer> parentDirectoryLayer)
|
||||
: DirectorySubspace(path, prefix, Reference<DirectoryLayer>(new DirectoryLayer(Subspace(DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX.withPrefix(prefix)), Subspace(prefix))), DirectoryLayer::PARTITION_LAYER),
|
||||
parentDirectoryLayer(parentDirectoryLayer)
|
||||
{
|
||||
this->directoryLayer->path = path;
|
||||
}
|
||||
virtual ~DirectoryPartition() {}
|
||||
public:
|
||||
DirectoryPartition(Path const& path, StringRef const& prefix, Reference<DirectoryLayer> parentDirectoryLayer)
|
||||
: DirectorySubspace(path,
|
||||
prefix,
|
||||
Reference<DirectoryLayer>(new DirectoryLayer(
|
||||
Subspace(DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX.withPrefix(prefix)),
|
||||
Subspace(prefix))),
|
||||
DirectoryLayer::PARTITION_LAYER),
|
||||
parentDirectoryLayer(parentDirectoryLayer) {
|
||||
this->directoryLayer->path = path;
|
||||
}
|
||||
virtual ~DirectoryPartition() {}
|
||||
|
||||
virtual Key key() const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual bool contains(KeyRef const& key) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual Key key() const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual bool contains(KeyRef const& key) const { throw cannot_use_partition_as_subspace(); }
|
||||
|
||||
virtual Key pack(Tuple const& tuple = Tuple()) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual Tuple unpack(KeyRef const& key) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual KeyRange range(Tuple const& tuple = Tuple()) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual Key pack(Tuple const& tuple = Tuple()) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual Tuple unpack(KeyRef const& key) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual KeyRange range(Tuple const& tuple = Tuple()) const { throw cannot_use_partition_as_subspace(); }
|
||||
|
||||
virtual Subspace subspace(Tuple const& tuple) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual Subspace get(Tuple const& tuple) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual Subspace subspace(Tuple const& tuple) const { throw cannot_use_partition_as_subspace(); }
|
||||
virtual Subspace get(Tuple const& tuple) const { throw cannot_use_partition_as_subspace(); }
|
||||
|
||||
protected:
|
||||
Reference<DirectoryLayer> parentDirectoryLayer;
|
||||
protected:
|
||||
Reference<DirectoryLayer> parentDirectoryLayer;
|
||||
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayerForPath(Path const& path) const {
|
||||
return path.empty() ? parentDirectoryLayer : directoryLayer;
|
||||
}
|
||||
};
|
||||
}
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayerForPath(Path const& path) const {
|
||||
return path.empty() ? parentDirectoryLayer : directoryLayer;
|
||||
}
|
||||
};
|
||||
} // namespace FDB
|
||||
|
||||
#endif
|
|
@ -21,89 +21,100 @@
|
|||
#include "DirectorySubspace.h"
|
||||
|
||||
namespace FDB {
|
||||
DirectorySubspace::DirectorySubspace(Path const& path, StringRef const& prefix, Reference<DirectoryLayer> directoryLayer, Standalone<StringRef> const& layer)
|
||||
: Subspace(prefix), directoryLayer(directoryLayer), path(path), layer(layer) { }
|
||||
DirectorySubspace::DirectorySubspace(Path const& path,
|
||||
StringRef const& prefix,
|
||||
Reference<DirectoryLayer> directoryLayer,
|
||||
Standalone<StringRef> const& layer)
|
||||
: Subspace(prefix), directoryLayer(directoryLayer), path(path), layer(layer) {}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::create(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix) {
|
||||
return directoryLayer->create(tr, getPartitionSubpath(path), layer, prefix);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::create(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer,
|
||||
Optional<Standalone<StringRef>> const& prefix)
|
||||
{
|
||||
return directoryLayer->create(tr, getPartitionSubpath(path), layer, prefix);
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::open(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer) {
|
||||
return directoryLayer->open(tr, getPartitionSubpath(path), layer);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::createOrOpen(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer) {
|
||||
return directoryLayer->createOrOpen(tr, getPartitionSubpath(path), layer);
|
||||
}
|
||||
|
||||
Future<bool> DirectorySubspace::exists(Reference<Transaction> const& tr, Path const& path) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(path);
|
||||
return directoryLayer->exists(tr, getPartitionSubpath(path, directoryLayer));
|
||||
}
|
||||
|
||||
Future<Standalone<VectorRef<StringRef>>> DirectorySubspace::list(Reference<Transaction> const& tr, Path const& path) {
|
||||
return directoryLayer->list(tr, getPartitionSubpath(path));
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::move(Reference<Transaction> const& tr,
|
||||
Path const& oldPath,
|
||||
Path const& newPath) {
|
||||
return directoryLayer->move(tr, getPartitionSubpath(oldPath), getPartitionSubpath(newPath));
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::moveTo(Reference<Transaction> const& tr,
|
||||
Path const& newAbsolutePath) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(Path());
|
||||
Path directoryLayerPath = directoryLayer->getPath();
|
||||
|
||||
if (directoryLayerPath.size() > newAbsolutePath.size()) {
|
||||
return cannot_move_directory_between_partitions();
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer) {
|
||||
return directoryLayer->open(tr, getPartitionSubpath(path), layer);
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer) {
|
||||
return directoryLayer->createOrOpen(tr, getPartitionSubpath(path), layer);
|
||||
}
|
||||
|
||||
Future<bool> DirectorySubspace::exists(Reference<Transaction> const& tr, Path const& path) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(path);
|
||||
return directoryLayer->exists(tr, getPartitionSubpath(path, directoryLayer));
|
||||
}
|
||||
|
||||
Future<Standalone<VectorRef<StringRef>>> DirectorySubspace::list(Reference<Transaction> const& tr, Path const& path) {
|
||||
return directoryLayer->list(tr, getPartitionSubpath(path));
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath) {
|
||||
return directoryLayer->move(tr, getPartitionSubpath(oldPath), getPartitionSubpath(newPath));
|
||||
}
|
||||
|
||||
Future<Reference<DirectorySubspace>> DirectorySubspace::moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(Path());
|
||||
Path directoryLayerPath = directoryLayer->getPath();
|
||||
|
||||
if(directoryLayerPath.size() > newAbsolutePath.size()) {
|
||||
for (int i = 0; i < directoryLayerPath.size(); ++i) {
|
||||
if (directoryLayerPath[i] != newAbsolutePath[i]) {
|
||||
return cannot_move_directory_between_partitions();
|
||||
}
|
||||
|
||||
for(int i = 0; i < directoryLayerPath.size(); ++i) {
|
||||
if(directoryLayerPath[i] != newAbsolutePath[i]) {
|
||||
return cannot_move_directory_between_partitions();
|
||||
}
|
||||
}
|
||||
|
||||
Path newRelativePath(newAbsolutePath.begin() + directoryLayerPath.size(), newAbsolutePath.end());
|
||||
return directoryLayer->move(tr, getPartitionSubpath(Path(), directoryLayer), newRelativePath);
|
||||
}
|
||||
|
||||
Future<Void> DirectorySubspace::remove(Reference<Transaction> const& tr, Path const& path) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(path);
|
||||
return directoryLayer->remove(tr, getPartitionSubpath(path, directoryLayer));
|
||||
}
|
||||
|
||||
Future<bool> DirectorySubspace::removeIfExists(Reference<Transaction> const& tr, Path const& path) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(path);
|
||||
return directoryLayer->removeIfExists(tr, getPartitionSubpath(path, directoryLayer));
|
||||
}
|
||||
|
||||
Reference<DirectoryLayer> DirectorySubspace::getDirectoryLayer() {
|
||||
return directoryLayer;
|
||||
}
|
||||
|
||||
const Standalone<StringRef> DirectorySubspace::getLayer() const {
|
||||
return layer;
|
||||
}
|
||||
|
||||
const IDirectory::Path DirectorySubspace::getPath() const {
|
||||
return path;
|
||||
}
|
||||
|
||||
IDirectory::Path DirectorySubspace::getPartitionSubpath(Path const& path, Reference<DirectoryLayer> directoryLayer) const {
|
||||
if(!directoryLayer) {
|
||||
directoryLayer = this->directoryLayer;
|
||||
}
|
||||
|
||||
Path newPath(this->path.begin() + directoryLayer->getPath().size(), this->path.end());
|
||||
newPath.insert(newPath.end(), path.begin(), path.end());
|
||||
|
||||
return newPath;
|
||||
}
|
||||
|
||||
Reference<DirectoryLayer> DirectorySubspace::getDirectoryLayerForPath(Path const& path) const {
|
||||
return directoryLayer;
|
||||
}
|
||||
Path newRelativePath(newAbsolutePath.begin() + directoryLayerPath.size(), newAbsolutePath.end());
|
||||
return directoryLayer->move(tr, getPartitionSubpath(Path(), directoryLayer), newRelativePath);
|
||||
}
|
||||
|
||||
Future<Void> DirectorySubspace::remove(Reference<Transaction> const& tr, Path const& path) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(path);
|
||||
return directoryLayer->remove(tr, getPartitionSubpath(path, directoryLayer));
|
||||
}
|
||||
|
||||
Future<bool> DirectorySubspace::removeIfExists(Reference<Transaction> const& tr, Path const& path) {
|
||||
Reference<DirectoryLayer> directoryLayer = getDirectoryLayerForPath(path);
|
||||
return directoryLayer->removeIfExists(tr, getPartitionSubpath(path, directoryLayer));
|
||||
}
|
||||
|
||||
Reference<DirectoryLayer> DirectorySubspace::getDirectoryLayer() {
|
||||
return directoryLayer;
|
||||
}
|
||||
|
||||
const Standalone<StringRef> DirectorySubspace::getLayer() const {
|
||||
return layer;
|
||||
}
|
||||
|
||||
const IDirectory::Path DirectorySubspace::getPath() const {
|
||||
return path;
|
||||
}
|
||||
|
||||
IDirectory::Path DirectorySubspace::getPartitionSubpath(Path const& path,
|
||||
Reference<DirectoryLayer> directoryLayer) const {
|
||||
if (!directoryLayer) {
|
||||
directoryLayer = this->directoryLayer;
|
||||
}
|
||||
|
||||
Path newPath(this->path.begin() + directoryLayer->getPath().size(), this->path.end());
|
||||
newPath.insert(newPath.end(), path.begin(), path.end());
|
||||
|
||||
return newPath;
|
||||
}
|
||||
|
||||
Reference<DirectoryLayer> DirectorySubspace::getDirectoryLayerForPath(Path const& path) const {
|
||||
return directoryLayer;
|
||||
}
|
||||
} // namespace FDB
|
||||
|
|
|
@ -28,39 +28,53 @@
|
|||
#include "Subspace.h"
|
||||
|
||||
namespace FDB {
|
||||
class DirectorySubspace : public IDirectory, public Subspace {
|
||||
class DirectorySubspace : public IDirectory, public Subspace {
|
||||
|
||||
public:
|
||||
DirectorySubspace(Path const& path, StringRef const& prefix, Reference<DirectoryLayer> directorLayer, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
virtual ~DirectorySubspace() {}
|
||||
public:
|
||||
DirectorySubspace(Path const& path,
|
||||
StringRef const& prefix,
|
||||
Reference<DirectoryLayer> directorLayer,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
virtual ~DirectorySubspace() {}
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> create(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>(),
|
||||
Optional<Standalone<StringRef>> const& prefix = Optional<Standalone<StringRef>>());
|
||||
virtual Future<Reference<DirectorySubspace>> create(
|
||||
Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>(),
|
||||
Optional<Standalone<StringRef>> const& prefix = Optional<Standalone<StringRef>>());
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
virtual Future<Reference<DirectorySubspace>> createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
virtual Future<Reference<DirectorySubspace>> open(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
virtual Future<Reference<DirectorySubspace>> createOrOpen(
|
||||
Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>());
|
||||
|
||||
virtual Future<bool> exists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
virtual Future<Standalone<VectorRef<StringRef>>> list(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
virtual Future<bool> exists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
virtual Future<Standalone<VectorRef<StringRef>>> list(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath);
|
||||
virtual Future<Reference<DirectorySubspace>> moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath);
|
||||
virtual Future<Reference<DirectorySubspace>> move(Reference<Transaction> const& tr,
|
||||
Path const& oldPath,
|
||||
Path const& newPath);
|
||||
virtual Future<Reference<DirectorySubspace>> moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath);
|
||||
|
||||
virtual Future<Void> remove(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
virtual Future<bool> removeIfExists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
virtual Future<Void> remove(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
virtual Future<bool> removeIfExists(Reference<Transaction> const& tr, Path const& path = Path());
|
||||
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayer();
|
||||
virtual const Standalone<StringRef> getLayer() const;
|
||||
virtual const Path getPath() const;
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayer();
|
||||
virtual const Standalone<StringRef> getLayer() const;
|
||||
virtual const Path getPath() const;
|
||||
|
||||
protected:
|
||||
Reference<DirectoryLayer> directoryLayer;
|
||||
Path path;
|
||||
Standalone<StringRef> layer;
|
||||
protected:
|
||||
Reference<DirectoryLayer> directoryLayer;
|
||||
Path path;
|
||||
Standalone<StringRef> layer;
|
||||
|
||||
virtual Path getPartitionSubpath(Path const& path, Reference<DirectoryLayer> directoryLayer = Reference<DirectoryLayer>()) const;
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayerForPath(Path const& path) const;
|
||||
};
|
||||
}
|
||||
virtual Path getPartitionSubpath(Path const& path,
|
||||
Reference<DirectoryLayer> directoryLayer = Reference<DirectoryLayer>()) const;
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayerForPath(Path const& path) const;
|
||||
};
|
||||
} // namespace FDB
|
||||
|
||||
#endif
|
|
@ -22,292 +22,304 @@
|
|||
#define FDB_FLOW_LOANER_TYPES_H
|
||||
|
||||
namespace FDB {
|
||||
typedef StringRef KeyRef;
|
||||
typedef StringRef ValueRef;
|
||||
typedef StringRef KeyRef;
|
||||
typedef StringRef ValueRef;
|
||||
|
||||
typedef int64_t Version;
|
||||
typedef int64_t Version;
|
||||
|
||||
typedef Standalone<KeyRef> Key;
|
||||
typedef Standalone<ValueRef> Value;
|
||||
typedef Standalone<KeyRef> Key;
|
||||
typedef Standalone<ValueRef> Value;
|
||||
|
||||
inline Key keyAfter( const KeyRef& key ) {
|
||||
if(key == LiteralStringRef("\xff\xff"))
|
||||
return key;
|
||||
inline Key keyAfter(const KeyRef& key) {
|
||||
if (key == LiteralStringRef("\xff\xff"))
|
||||
return key;
|
||||
|
||||
Standalone<StringRef> r;
|
||||
uint8_t* s = new (r.arena()) uint8_t[ key.size() + 1 ];
|
||||
memcpy(s, key.begin(), key.size() );
|
||||
s[key.size()] = 0;
|
||||
((StringRef&) r) = StringRef( s, key.size() + 1 );
|
||||
return r;
|
||||
}
|
||||
|
||||
inline KeyRef keyAfter( const KeyRef& key, Arena& arena ) {
|
||||
if(key == LiteralStringRef("\xff\xff"))
|
||||
return key;
|
||||
uint8_t* t = new ( arena ) uint8_t[ key.size()+1 ];
|
||||
memcpy(t, key.begin(), key.size() );
|
||||
t[key.size()] = 0;
|
||||
return KeyRef(t,key.size()+1);
|
||||
}
|
||||
|
||||
struct KeySelectorRef {
|
||||
KeyRef key; // Find the last item less than key
|
||||
bool orEqual; // (or equal to key, if this is true)
|
||||
int offset; // and then move forward this many items (or backward if negative)
|
||||
KeySelectorRef() {}
|
||||
KeySelectorRef( const KeyRef& key, bool orEqual, int offset ) : key(key), orEqual(orEqual), offset(offset) {}
|
||||
|
||||
KeySelectorRef( Arena& arena, const KeySelectorRef& copyFrom ) : key(arena,copyFrom.key), orEqual(copyFrom.orEqual), offset(copyFrom.offset) {}
|
||||
int expectedSize() const { return key.expectedSize(); }
|
||||
|
||||
// std::string toString() const {
|
||||
// if (offset > 0) {
|
||||
// if (orEqual) return format("firstGreaterThan(%s)%+d", printable(key).c_str(), offset-1);
|
||||
// else return format("firstGreaterOrEqual(%s)%+d", printable(key).c_str(), offset-1);
|
||||
// } else {
|
||||
// if (orEqual) return format("lastLessOrEqual(%s)%+d", printable(key).c_str(), offset);
|
||||
// else return format("lastLessThan(%s)%+d", printable(key).c_str(), offset);
|
||||
// }
|
||||
// }
|
||||
|
||||
bool isBackward() const { return !orEqual && offset<=0; } // True if the resolution of the KeySelector depends only on keys less than key
|
||||
bool isFirstGreaterOrEqual() const { return !orEqual && offset==1; }
|
||||
bool isFirstGreaterThan() const { return orEqual && offset==1; }
|
||||
bool isLastLessOrEqual() const { return orEqual && offset==0; }
|
||||
|
||||
// True iff, regardless of the contents of the database, lhs must resolve to a key > rhs
|
||||
bool isDefinitelyGreater( KeyRef const& k ) {
|
||||
return offset >= 1 && ( isFirstGreaterOrEqual() ? key > k : key >= k );
|
||||
}
|
||||
// True iff, regardless of the contents of the database, lhs must resolve to a key < rhs
|
||||
bool isDefinitelyLess( KeyRef const& k ) {
|
||||
return offset <= 0 && ( isLastLessOrEqual() ? key < k : key <= k );
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
serializer(ar, key, orEqual, offset);
|
||||
}
|
||||
};
|
||||
inline bool operator == (const KeySelectorRef& lhs, const KeySelectorRef& rhs) { return lhs.key == rhs.key && lhs.orEqual==rhs.orEqual && lhs.offset==rhs.offset; }
|
||||
inline KeySelectorRef lastLessThan( const KeyRef& k ) {
|
||||
return KeySelectorRef( k, false, 0 );
|
||||
}
|
||||
inline KeySelectorRef lastLessOrEqual( const KeyRef& k ) {
|
||||
return KeySelectorRef( k, true, 0 );
|
||||
}
|
||||
inline KeySelectorRef firstGreaterThan( const KeyRef& k ) {
|
||||
return KeySelectorRef( k, true, +1 );
|
||||
}
|
||||
inline KeySelectorRef firstGreaterOrEqual( const KeyRef& k ) {
|
||||
return KeySelectorRef( k, false, +1 );
|
||||
}
|
||||
inline KeySelectorRef operator + (const KeySelectorRef& s, int off) {
|
||||
return KeySelectorRef(s.key, s.orEqual, s.offset+off);
|
||||
}
|
||||
inline KeySelectorRef operator - (const KeySelectorRef& s, int off) {
|
||||
return KeySelectorRef(s.key, s.orEqual, s.offset-off);
|
||||
}
|
||||
|
||||
typedef Standalone<KeySelectorRef> KeySelector;
|
||||
|
||||
struct KeyValueRef {
|
||||
KeyRef key;
|
||||
ValueRef value;
|
||||
KeyValueRef() {}
|
||||
KeyValueRef( const KeyRef& key, const ValueRef& value ) : key(key), value(value) {}
|
||||
KeyValueRef( Arena& a, const KeyValueRef& copyFrom ) : key(a, copyFrom.key), value(a, copyFrom.value) {}
|
||||
bool operator == ( const KeyValueRef& r ) const { return key == r.key && value == r.value; }
|
||||
|
||||
int expectedSize() const { return key.expectedSize() + value.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) { serializer(ar, key, value); }
|
||||
|
||||
struct OrderByKey {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const {
|
||||
return a.key < b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(T const& a, KeyValueRef const& b) const {
|
||||
return a < b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(KeyValueRef const& a, T const& b) const {
|
||||
return a.key < b;
|
||||
}
|
||||
};
|
||||
|
||||
struct OrderByKeyBack {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const {
|
||||
return a.key > b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(T const& a, KeyValueRef const& b) const {
|
||||
return a > b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(KeyValueRef const& a, T const& b) const {
|
||||
return a.key > b;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
typedef Standalone<KeyValueRef> KeyValue;
|
||||
|
||||
struct RangeResultRef : VectorRef<KeyValueRef> {
|
||||
bool more; // True if (but not necessarily only if) values remain in the *key* range requested (possibly beyond the limits requested)
|
||||
// False implies that no such values remain
|
||||
Optional<KeyRef> readThrough; // Only present when 'more' is true. When present, this value represent the end (or beginning if reverse) of the range
|
||||
// which was read to produce these results. This is guarenteed to be less than the requested range.
|
||||
bool readToBegin;
|
||||
bool readThroughEnd;
|
||||
|
||||
RangeResultRef() : more(false), readToBegin(false), readThroughEnd(false) {}
|
||||
RangeResultRef( Arena& p, const RangeResultRef& toCopy ) : more( toCopy.more ), readToBegin( toCopy.readToBegin ), readThroughEnd( toCopy.readThroughEnd ), readThrough( toCopy.readThrough.present() ? KeyRef( p, toCopy.readThrough.get() ) : Optional<KeyRef>() ), VectorRef<KeyValueRef>( p, toCopy ) {}
|
||||
RangeResultRef( const VectorRef<KeyValueRef>& value, bool more, Optional<KeyRef> readThrough = Optional<KeyRef>() ) : VectorRef<KeyValueRef>( value ), more( more ), readThrough( readThrough ), readToBegin( false ), readThroughEnd( false ) {}
|
||||
RangeResultRef( bool readToBegin, bool readThroughEnd ) : more(false), readToBegin(readToBegin), readThroughEnd(readThroughEnd) { }
|
||||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
serializer(ar, ((VectorRef<KeyValueRef>&)*this), more, readThrough, readToBegin, readThroughEnd);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRangeLimits {
|
||||
enum { ROW_LIMIT_UNLIMITED = -1, BYTE_LIMIT_UNLIMITED = -1 };
|
||||
|
||||
int rows;
|
||||
int minRows;
|
||||
int bytes;
|
||||
|
||||
GetRangeLimits() : rows( ROW_LIMIT_UNLIMITED ), minRows(1), bytes( BYTE_LIMIT_UNLIMITED ) {}
|
||||
explicit GetRangeLimits( int rowLimit ) : rows( rowLimit ), minRows(1), bytes( BYTE_LIMIT_UNLIMITED ) {}
|
||||
GetRangeLimits( int rowLimit, int byteLimit ) : rows( rowLimit ), minRows(1), bytes( byteLimit ) {}
|
||||
|
||||
void decrement( VectorRef<KeyValueRef> const& data );
|
||||
void decrement( KeyValueRef const& data );
|
||||
|
||||
// True if either the row or byte limit has been reached
|
||||
bool isReached();
|
||||
|
||||
// True if data would cause the row or byte limit to be reached
|
||||
bool reachedBy( VectorRef<KeyValueRef> const& data );
|
||||
|
||||
bool hasByteLimit();
|
||||
bool hasRowLimit();
|
||||
|
||||
bool hasSatisfiedMinRows();
|
||||
bool isValid() { return (rows >= 0 || rows == ROW_LIMIT_UNLIMITED)
|
||||
&& (bytes >= 0 || bytes == BYTE_LIMIT_UNLIMITED)
|
||||
&& minRows >= 0 && (minRows <= rows || rows == ROW_LIMIT_UNLIMITED); }
|
||||
};
|
||||
|
||||
struct KeyRangeRef {
|
||||
const KeyRef begin, end;
|
||||
KeyRangeRef() {}
|
||||
KeyRangeRef( const KeyRef& begin, const KeyRef& end ) : begin(begin), end(end) {
|
||||
if( begin > end ) {
|
||||
throw inverted_range();
|
||||
}
|
||||
}
|
||||
KeyRangeRef( Arena& a, const KeyRangeRef& copyFrom ) : begin(a, copyFrom.begin), end(a, copyFrom.end) {}
|
||||
bool operator == ( const KeyRangeRef& r ) const { return begin == r.begin && end == r.end; }
|
||||
bool operator != ( const KeyRangeRef& r ) const { return begin != r.begin || end != r.end; }
|
||||
bool contains( const KeyRef& key ) const { return begin <= key && key < end; }
|
||||
bool contains( const KeyRangeRef& keys ) const { return begin <= keys.begin && keys.end <= end; }
|
||||
bool intersects( const KeyRangeRef& keys ) const { return begin < keys.end && keys.begin < end; }
|
||||
bool empty() const { return begin == end; }
|
||||
|
||||
Standalone<KeyRangeRef> withPrefix( const StringRef& prefix ) const {
|
||||
return KeyRangeRef( begin.withPrefix(prefix), end.withPrefix(prefix) );
|
||||
}
|
||||
|
||||
const KeyRangeRef& operator = (const KeyRangeRef& rhs) {
|
||||
const_cast<KeyRef&>(begin) = rhs.begin;
|
||||
const_cast<KeyRef&>(end) = rhs.end;
|
||||
return *this;
|
||||
}
|
||||
|
||||
int expectedSize() const { return begin.expectedSize() + end.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) {
|
||||
serializer(ar, const_cast<KeyRef&>(begin), const_cast<KeyRef&>(end));
|
||||
if( begin > end ) {
|
||||
throw inverted_range();
|
||||
};
|
||||
}
|
||||
|
||||
struct ArbitraryOrder {
|
||||
bool operator()(KeyRangeRef const& a, KeyRangeRef const& b) const {
|
||||
if (a.begin < b.begin) return true;
|
||||
if (a.begin > b.begin) return false;
|
||||
return a.end < b.end;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
inline KeyRangeRef operator & (const KeyRangeRef& lhs, const KeyRangeRef& rhs) {
|
||||
KeyRef b = std::max(lhs.begin, rhs.begin), e = std::min(lhs.end, rhs.end);
|
||||
if (e < b)
|
||||
return KeyRangeRef();
|
||||
return KeyRangeRef(b,e);
|
||||
}
|
||||
|
||||
typedef Standalone<KeyRangeRef> KeyRange;
|
||||
|
||||
template <class T>
|
||||
static std::string describe(T const& item) {
|
||||
return item.toString();
|
||||
}
|
||||
template <class K, class V>
|
||||
static std::string describe(std::map<K, V> const& items, int max_items = -1) {
|
||||
if (!items.size())
|
||||
return "[no items]";
|
||||
|
||||
std::string s;
|
||||
int count = 0;
|
||||
for (auto it = items.begin(); it != items.end(); it++) {
|
||||
if (++count > max_items && max_items >= 0)
|
||||
break;
|
||||
if (count > 1) s += ",";
|
||||
s += describe(it->first) + "=>" + describe(it->second);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static std::string describeList(T const& items, int max_items) {
|
||||
if (!items.size())
|
||||
return "[no items]";
|
||||
|
||||
std::string s;
|
||||
int count = 0;
|
||||
for (auto const& item : items) {
|
||||
if (++count > max_items && max_items >= 0)
|
||||
break;
|
||||
if (count > 1) s += ",";
|
||||
s += describe(item);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static std::string describe(std::vector<T> const& items, int max_items = -1) {
|
||||
return describeList(items, max_items);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static std::string describe(std::set<T> const& items, int max_items = -1) {
|
||||
return describeList(items, max_items);
|
||||
}
|
||||
|
||||
template <class T1, class T2>
|
||||
static std::string describe(std::pair<T1, T2> const& pair) {
|
||||
return "first: " + describe(pair.first) + " second: " + describe(pair.second);
|
||||
}
|
||||
Standalone<StringRef> r;
|
||||
uint8_t* s = new (r.arena()) uint8_t[key.size() + 1];
|
||||
memcpy(s, key.begin(), key.size());
|
||||
s[key.size()] = 0;
|
||||
((StringRef&)r) = StringRef(s, key.size() + 1);
|
||||
return r;
|
||||
}
|
||||
|
||||
inline KeyRef keyAfter(const KeyRef& key, Arena& arena) {
|
||||
if (key == LiteralStringRef("\xff\xff"))
|
||||
return key;
|
||||
uint8_t* t = new (arena) uint8_t[key.size() + 1];
|
||||
memcpy(t, key.begin(), key.size());
|
||||
t[key.size()] = 0;
|
||||
return KeyRef(t, key.size() + 1);
|
||||
}
|
||||
|
||||
struct KeySelectorRef {
|
||||
KeyRef key; // Find the last item less than key
|
||||
bool orEqual; // (or equal to key, if this is true)
|
||||
int offset; // and then move forward this many items (or backward if negative)
|
||||
KeySelectorRef() {}
|
||||
KeySelectorRef(const KeyRef& key, bool orEqual, int offset) : key(key), orEqual(orEqual), offset(offset) {}
|
||||
|
||||
KeySelectorRef(Arena& arena, const KeySelectorRef& copyFrom)
|
||||
: key(arena, copyFrom.key), orEqual(copyFrom.orEqual), offset(copyFrom.offset) {}
|
||||
int expectedSize() const { return key.expectedSize(); }
|
||||
|
||||
// std::string toString() const {
|
||||
// if (offset > 0) {
|
||||
// if (orEqual) return format("firstGreaterThan(%s)%+d", printable(key).c_str(), offset-1);
|
||||
// else return format("firstGreaterOrEqual(%s)%+d", printable(key).c_str(), offset-1);
|
||||
// } else {
|
||||
// if (orEqual) return format("lastLessOrEqual(%s)%+d", printable(key).c_str(), offset);
|
||||
// else return format("lastLessThan(%s)%+d", printable(key).c_str(), offset);
|
||||
// }
|
||||
// }
|
||||
|
||||
bool isBackward() const {
|
||||
return !orEqual && offset <= 0;
|
||||
} // True if the resolution of the KeySelector depends only on keys less than key
|
||||
bool isFirstGreaterOrEqual() const { return !orEqual && offset == 1; }
|
||||
bool isFirstGreaterThan() const { return orEqual && offset == 1; }
|
||||
bool isLastLessOrEqual() const { return orEqual && offset == 0; }
|
||||
|
||||
// True iff, regardless of the contents of the database, lhs must resolve to a key > rhs
|
||||
bool isDefinitelyGreater(KeyRef const& k) { return offset >= 1 && (isFirstGreaterOrEqual() ? key > k : key >= k); }
|
||||
// True iff, regardless of the contents of the database, lhs must resolve to a key < rhs
|
||||
bool isDefinitelyLess(KeyRef const& k) { return offset <= 0 && (isLastLessOrEqual() ? key < k : key <= k); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, key, orEqual, offset);
|
||||
}
|
||||
};
|
||||
inline bool operator==(const KeySelectorRef& lhs, const KeySelectorRef& rhs) {
|
||||
return lhs.key == rhs.key && lhs.orEqual == rhs.orEqual && lhs.offset == rhs.offset;
|
||||
}
|
||||
inline KeySelectorRef lastLessThan(const KeyRef& k) {
|
||||
return KeySelectorRef(k, false, 0);
|
||||
}
|
||||
inline KeySelectorRef lastLessOrEqual(const KeyRef& k) {
|
||||
return KeySelectorRef(k, true, 0);
|
||||
}
|
||||
inline KeySelectorRef firstGreaterThan(const KeyRef& k) {
|
||||
return KeySelectorRef(k, true, +1);
|
||||
}
|
||||
inline KeySelectorRef firstGreaterOrEqual(const KeyRef& k) {
|
||||
return KeySelectorRef(k, false, +1);
|
||||
}
|
||||
inline KeySelectorRef operator+(const KeySelectorRef& s, int off) {
|
||||
return KeySelectorRef(s.key, s.orEqual, s.offset + off);
|
||||
}
|
||||
inline KeySelectorRef operator-(const KeySelectorRef& s, int off) {
|
||||
return KeySelectorRef(s.key, s.orEqual, s.offset - off);
|
||||
}
|
||||
|
||||
typedef Standalone<KeySelectorRef> KeySelector;
|
||||
|
||||
struct KeyValueRef {
|
||||
KeyRef key;
|
||||
ValueRef value;
|
||||
KeyValueRef() {}
|
||||
KeyValueRef(const KeyRef& key, const ValueRef& value) : key(key), value(value) {}
|
||||
KeyValueRef(Arena& a, const KeyValueRef& copyFrom) : key(a, copyFrom.key), value(a, copyFrom.value) {}
|
||||
bool operator==(const KeyValueRef& r) const { return key == r.key && value == r.value; }
|
||||
|
||||
int expectedSize() const { return key.expectedSize() + value.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) {
|
||||
serializer(ar, key, value);
|
||||
}
|
||||
|
||||
struct OrderByKey {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const { return a.key < b.key; }
|
||||
template <class T>
|
||||
bool operator()(T const& a, KeyValueRef const& b) const {
|
||||
return a < b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(KeyValueRef const& a, T const& b) const {
|
||||
return a.key < b;
|
||||
}
|
||||
};
|
||||
|
||||
struct OrderByKeyBack {
|
||||
bool operator()(KeyValueRef const& a, KeyValueRef const& b) const { return a.key > b.key; }
|
||||
template <class T>
|
||||
bool operator()(T const& a, KeyValueRef const& b) const {
|
||||
return a > b.key;
|
||||
}
|
||||
template <class T>
|
||||
bool operator()(KeyValueRef const& a, T const& b) const {
|
||||
return a.key > b;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
typedef Standalone<KeyValueRef> KeyValue;
|
||||
|
||||
struct RangeResultRef : VectorRef<KeyValueRef> {
|
||||
bool more; // True if (but not necessarily only if) values remain in the *key* range requested (possibly beyond the
|
||||
// limits requested)
|
||||
// False implies that no such values remain
|
||||
Optional<KeyRef> readThrough; // Only present when 'more' is true. When present, this value represent the end (or
|
||||
// beginning if reverse) of the range
|
||||
// which was read to produce these results. This is guarenteed to be less than the requested range.
|
||||
bool readToBegin;
|
||||
bool readThroughEnd;
|
||||
|
||||
RangeResultRef() : more(false), readToBegin(false), readThroughEnd(false) {}
|
||||
RangeResultRef(Arena& p, const RangeResultRef& toCopy)
|
||||
: more(toCopy.more), readToBegin(toCopy.readToBegin), readThroughEnd(toCopy.readThroughEnd),
|
||||
readThrough(toCopy.readThrough.present() ? KeyRef(p, toCopy.readThrough.get()) : Optional<KeyRef>()),
|
||||
VectorRef<KeyValueRef>(p, toCopy) {}
|
||||
RangeResultRef(const VectorRef<KeyValueRef>& value, bool more, Optional<KeyRef> readThrough = Optional<KeyRef>())
|
||||
: VectorRef<KeyValueRef>(value), more(more), readThrough(readThrough), readToBegin(false), readThroughEnd(false) {
|
||||
}
|
||||
RangeResultRef(bool readToBegin, bool readThroughEnd)
|
||||
: more(false), readToBegin(readToBegin), readThroughEnd(readThroughEnd) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, ((VectorRef<KeyValueRef>&)*this), more, readThrough, readToBegin, readThroughEnd);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRangeLimits {
|
||||
enum { ROW_LIMIT_UNLIMITED = -1, BYTE_LIMIT_UNLIMITED = -1 };
|
||||
|
||||
int rows;
|
||||
int minRows;
|
||||
int bytes;
|
||||
|
||||
GetRangeLimits() : rows(ROW_LIMIT_UNLIMITED), minRows(1), bytes(BYTE_LIMIT_UNLIMITED) {}
|
||||
explicit GetRangeLimits(int rowLimit) : rows(rowLimit), minRows(1), bytes(BYTE_LIMIT_UNLIMITED) {}
|
||||
GetRangeLimits(int rowLimit, int byteLimit) : rows(rowLimit), minRows(1), bytes(byteLimit) {}
|
||||
|
||||
void decrement(VectorRef<KeyValueRef> const& data);
|
||||
void decrement(KeyValueRef const& data);
|
||||
|
||||
// True if either the row or byte limit has been reached
|
||||
bool isReached();
|
||||
|
||||
// True if data would cause the row or byte limit to be reached
|
||||
bool reachedBy(VectorRef<KeyValueRef> const& data);
|
||||
|
||||
bool hasByteLimit();
|
||||
bool hasRowLimit();
|
||||
|
||||
bool hasSatisfiedMinRows();
|
||||
bool isValid() {
|
||||
return (rows >= 0 || rows == ROW_LIMIT_UNLIMITED) && (bytes >= 0 || bytes == BYTE_LIMIT_UNLIMITED) &&
|
||||
minRows >= 0 && (minRows <= rows || rows == ROW_LIMIT_UNLIMITED);
|
||||
}
|
||||
};
|
||||
|
||||
struct KeyRangeRef {
|
||||
const KeyRef begin, end;
|
||||
KeyRangeRef() {}
|
||||
KeyRangeRef(const KeyRef& begin, const KeyRef& end) : begin(begin), end(end) {
|
||||
if (begin > end) {
|
||||
throw inverted_range();
|
||||
}
|
||||
}
|
||||
KeyRangeRef(Arena& a, const KeyRangeRef& copyFrom) : begin(a, copyFrom.begin), end(a, copyFrom.end) {}
|
||||
bool operator==(const KeyRangeRef& r) const { return begin == r.begin && end == r.end; }
|
||||
bool operator!=(const KeyRangeRef& r) const { return begin != r.begin || end != r.end; }
|
||||
bool contains(const KeyRef& key) const { return begin <= key && key < end; }
|
||||
bool contains(const KeyRangeRef& keys) const { return begin <= keys.begin && keys.end <= end; }
|
||||
bool intersects(const KeyRangeRef& keys) const { return begin < keys.end && keys.begin < end; }
|
||||
bool empty() const { return begin == end; }
|
||||
|
||||
Standalone<KeyRangeRef> withPrefix(const StringRef& prefix) const {
|
||||
return KeyRangeRef(begin.withPrefix(prefix), end.withPrefix(prefix));
|
||||
}
|
||||
|
||||
const KeyRangeRef& operator=(const KeyRangeRef& rhs) {
|
||||
const_cast<KeyRef&>(begin) = rhs.begin;
|
||||
const_cast<KeyRef&>(end) = rhs.end;
|
||||
return *this;
|
||||
}
|
||||
|
||||
int expectedSize() const { return begin.expectedSize() + end.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
force_inline void serialize(Ar& ar) {
|
||||
serializer(ar, const_cast<KeyRef&>(begin), const_cast<KeyRef&>(end));
|
||||
if (begin > end) {
|
||||
throw inverted_range();
|
||||
};
|
||||
}
|
||||
|
||||
struct ArbitraryOrder {
|
||||
bool operator()(KeyRangeRef const& a, KeyRangeRef const& b) const {
|
||||
if (a.begin < b.begin)
|
||||
return true;
|
||||
if (a.begin > b.begin)
|
||||
return false;
|
||||
return a.end < b.end;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
inline KeyRangeRef operator&(const KeyRangeRef& lhs, const KeyRangeRef& rhs) {
|
||||
KeyRef b = std::max(lhs.begin, rhs.begin), e = std::min(lhs.end, rhs.end);
|
||||
if (e < b)
|
||||
return KeyRangeRef();
|
||||
return KeyRangeRef(b, e);
|
||||
}
|
||||
|
||||
typedef Standalone<KeyRangeRef> KeyRange;
|
||||
|
||||
template <class T>
|
||||
static std::string describe(T const& item) {
|
||||
return item.toString();
|
||||
}
|
||||
template <class K, class V>
|
||||
static std::string describe(std::map<K, V> const& items, int max_items = -1) {
|
||||
if (!items.size())
|
||||
return "[no items]";
|
||||
|
||||
std::string s;
|
||||
int count = 0;
|
||||
for (auto it = items.begin(); it != items.end(); it++) {
|
||||
if (++count > max_items && max_items >= 0)
|
||||
break;
|
||||
if (count > 1)
|
||||
s += ",";
|
||||
s += describe(it->first) + "=>" + describe(it->second);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static std::string describeList(T const& items, int max_items) {
|
||||
if (!items.size())
|
||||
return "[no items]";
|
||||
|
||||
std::string s;
|
||||
int count = 0;
|
||||
for (auto const& item : items) {
|
||||
if (++count > max_items && max_items >= 0)
|
||||
break;
|
||||
if (count > 1)
|
||||
s += ",";
|
||||
s += describe(item);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static std::string describe(std::vector<T> const& items, int max_items = -1) {
|
||||
return describeList(items, max_items);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
static std::string describe(std::set<T> const& items, int max_items = -1) {
|
||||
return describeList(items, max_items);
|
||||
}
|
||||
|
||||
template <class T1, class T2>
|
||||
static std::string describe(std::pair<T1, T2> const& pair) {
|
||||
return "first: " + describe(pair.first) + " second: " + describe(pair.second);
|
||||
}
|
||||
} // namespace FDB
|
||||
|
||||
#endif /* FDB_LOANER_TYPES_H */
|
||||
|
|
|
@ -21,90 +21,90 @@
|
|||
#include "HighContentionAllocator.h"
|
||||
|
||||
namespace FDB {
|
||||
ACTOR Future<Standalone<StringRef>> _allocate(Reference<Transaction> tr, Subspace counters, Subspace recent){
|
||||
state int64_t start = 0;
|
||||
state int64_t window = 0;
|
||||
ACTOR Future<Standalone<StringRef>> _allocate(Reference<Transaction> tr, Subspace counters, Subspace recent) {
|
||||
state int64_t start = 0;
|
||||
state int64_t window = 0;
|
||||
|
||||
loop {
|
||||
FDBStandalone<RangeResultRef> range = wait(tr->getRange(counters.range(), 1, true, true));
|
||||
|
||||
if (range.size() > 0) {
|
||||
start = counters.unpack(range[0].key).getInt(0);
|
||||
}
|
||||
|
||||
state bool windowAdvanced = false;
|
||||
loop {
|
||||
// if thread safety is needed, this should be locked {
|
||||
if (windowAdvanced) {
|
||||
tr->clear(KeyRangeRef(counters.key(), counters.get(start).key()));
|
||||
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_NEXT_WRITE_NO_WRITE_CONFLICT_RANGE);
|
||||
tr->clear(KeyRangeRef(recent.key(), recent.get(start).key()));
|
||||
}
|
||||
|
||||
int64_t inc = 1;
|
||||
tr->atomicOp(counters.get(start).key(), StringRef((uint8_t*)&inc, 8), FDB_MUTATION_TYPE_ADD);
|
||||
Future<Optional<FDBStandalone<ValueRef>>> countFuture = tr->get(counters.get(start).key(), true);
|
||||
// }
|
||||
|
||||
Optional<FDBStandalone<ValueRef>> countValue = wait(countFuture);
|
||||
|
||||
int64_t count = 0;
|
||||
if (countValue.present()) {
|
||||
if (countValue.get().size() != 8) {
|
||||
throw invalid_directory_layer_metadata();
|
||||
}
|
||||
count = *(int64_t*)countValue.get().begin();
|
||||
}
|
||||
|
||||
window = HighContentionAllocator::windowSize(start);
|
||||
if (count * 2 < window) {
|
||||
break;
|
||||
}
|
||||
|
||||
start += window;
|
||||
windowAdvanced = true;
|
||||
}
|
||||
|
||||
loop {
|
||||
FDBStandalone<RangeResultRef> range = wait(tr->getRange(counters.range(), 1, true, true));
|
||||
state int64_t candidate = deterministicRandom()->randomInt(start, start + window);
|
||||
|
||||
if(range.size() > 0) {
|
||||
start = counters.unpack(range[0].key).getInt(0);
|
||||
// if thread safety is needed, this should be locked {
|
||||
state Future<FDBStandalone<RangeResultRef>> latestCounter = tr->getRange(counters.range(), 1, true, true);
|
||||
state Future<Optional<FDBStandalone<ValueRef>>> candidateValue = tr->get(recent.get(candidate).key());
|
||||
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_NEXT_WRITE_NO_WRITE_CONFLICT_RANGE);
|
||||
tr->set(recent.get(candidate).key(), ValueRef());
|
||||
// }
|
||||
|
||||
wait(success(latestCounter) && success(candidateValue));
|
||||
int64_t currentWindowStart = 0;
|
||||
if (latestCounter.get().size() > 0) {
|
||||
currentWindowStart = counters.unpack(latestCounter.get()[0].key).getInt(0);
|
||||
}
|
||||
|
||||
state bool windowAdvanced = false;
|
||||
loop {
|
||||
// if thread safety is needed, this should be locked {
|
||||
if(windowAdvanced) {
|
||||
tr->clear(KeyRangeRef(counters.key(), counters.get(start).key()));
|
||||
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_NEXT_WRITE_NO_WRITE_CONFLICT_RANGE);
|
||||
tr->clear(KeyRangeRef(recent.key(), recent.get(start).key()));
|
||||
}
|
||||
|
||||
int64_t inc = 1;
|
||||
tr->atomicOp(counters.get(start).key(), StringRef((uint8_t*)&inc, 8), FDB_MUTATION_TYPE_ADD);
|
||||
Future<Optional<FDBStandalone<ValueRef>>> countFuture = tr->get(counters.get(start).key(), true);
|
||||
// }
|
||||
|
||||
Optional<FDBStandalone<ValueRef>> countValue = wait(countFuture);
|
||||
|
||||
int64_t count = 0;
|
||||
if(countValue.present()) {
|
||||
if(countValue.get().size() != 8) {
|
||||
throw invalid_directory_layer_metadata();
|
||||
}
|
||||
count = *(int64_t*)countValue.get().begin();
|
||||
}
|
||||
|
||||
window = HighContentionAllocator::windowSize(start);
|
||||
if(count * 2 < window) {
|
||||
break;
|
||||
}
|
||||
|
||||
start += window;
|
||||
windowAdvanced = true;
|
||||
if (currentWindowStart > start) {
|
||||
break;
|
||||
}
|
||||
|
||||
loop {
|
||||
state int64_t candidate = deterministicRandom()->randomInt(start, start + window);
|
||||
|
||||
// if thread safety is needed, this should be locked {
|
||||
state Future<FDBStandalone<RangeResultRef>> latestCounter = tr->getRange(counters.range(), 1, true, true);
|
||||
state Future<Optional<FDBStandalone<ValueRef>>> candidateValue = tr->get(recent.get(candidate).key());
|
||||
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_NEXT_WRITE_NO_WRITE_CONFLICT_RANGE);
|
||||
tr->set(recent.get(candidate).key(), ValueRef());
|
||||
// }
|
||||
|
||||
wait(success(latestCounter) && success(candidateValue));
|
||||
int64_t currentWindowStart = 0;
|
||||
if(latestCounter.get().size() > 0) {
|
||||
currentWindowStart = counters.unpack(latestCounter.get()[0].key).getInt(0);
|
||||
}
|
||||
|
||||
if(currentWindowStart > start) {
|
||||
break;
|
||||
}
|
||||
|
||||
if(!candidateValue.get().present()) {
|
||||
tr->addWriteConflictKey(recent.get(candidate).key());
|
||||
return Tuple().append(candidate).pack();
|
||||
}
|
||||
if (!candidateValue.get().present()) {
|
||||
tr->addWriteConflictKey(recent.get(candidate).key());
|
||||
return Tuple().append(candidate).pack();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Future<Standalone<StringRef>> HighContentionAllocator::allocate(Reference<Transaction> const& tr) const {
|
||||
return _allocate(tr, counters, recent);
|
||||
}
|
||||
|
||||
int64_t HighContentionAllocator::windowSize(int64_t start) {
|
||||
if (start < 255) {
|
||||
return 64;
|
||||
}
|
||||
if (start < 65535) {
|
||||
return 1024;
|
||||
}
|
||||
|
||||
return 8192;
|
||||
}
|
||||
}
|
||||
|
||||
Future<Standalone<StringRef>> HighContentionAllocator::allocate(Reference<Transaction> const& tr) const {
|
||||
return _allocate(tr, counters, recent);
|
||||
}
|
||||
|
||||
int64_t HighContentionAllocator::windowSize(int64_t start) {
|
||||
if (start < 255) {
|
||||
return 64;
|
||||
}
|
||||
if (start < 65535) {
|
||||
return 1024;
|
||||
}
|
||||
|
||||
return 8192;
|
||||
}
|
||||
} // namespace FDB
|
||||
|
|
|
@ -26,16 +26,17 @@
|
|||
#include "Subspace.h"
|
||||
|
||||
namespace FDB {
|
||||
class HighContentionAllocator {
|
||||
public:
|
||||
HighContentionAllocator(Subspace subspace) : counters(subspace.get(0)), recent(subspace.get(1)) {}
|
||||
Future<Standalone<StringRef>> allocate(Reference<Transaction> const& tr) const;
|
||||
class HighContentionAllocator {
|
||||
public:
|
||||
HighContentionAllocator(Subspace subspace) : counters(subspace.get(0)), recent(subspace.get(1)) {}
|
||||
Future<Standalone<StringRef>> allocate(Reference<Transaction> const& tr) const;
|
||||
|
||||
static int64_t windowSize(int64_t start);
|
||||
private:
|
||||
Subspace counters;
|
||||
Subspace recent;
|
||||
};
|
||||
}
|
||||
static int64_t windowSize(int64_t start);
|
||||
|
||||
private:
|
||||
Subspace counters;
|
||||
Subspace recent;
|
||||
};
|
||||
} // namespace FDB
|
||||
|
||||
#endif
|
||||
|
|
|
@ -27,34 +27,46 @@
|
|||
#include "bindings/flow/fdb_flow.h"
|
||||
|
||||
namespace FDB {
|
||||
class DirectoryLayer;
|
||||
class DirectorySubspace;
|
||||
class DirectoryLayer;
|
||||
class DirectorySubspace;
|
||||
|
||||
class IDirectory : public ReferenceCounted<IDirectory> {
|
||||
public:
|
||||
typedef std::vector<Standalone<StringRef>> Path;
|
||||
class IDirectory : public ReferenceCounted<IDirectory> {
|
||||
public:
|
||||
typedef std::vector<Standalone<StringRef>> Path;
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> create(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>(),
|
||||
Optional<Standalone<StringRef>> const& prefix = Optional<Standalone<StringRef>>()) = 0;
|
||||
virtual Future<Reference<DirectorySubspace>> create(
|
||||
Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>(),
|
||||
Optional<Standalone<StringRef>> const& prefix = Optional<Standalone<StringRef>>()) = 0;
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> open(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>()) = 0;
|
||||
virtual Future<Reference<DirectorySubspace>> createOrOpen(Reference<Transaction> const& tr, Path const& path, Standalone<StringRef> const& layer = Standalone<StringRef>()) = 0;
|
||||
virtual Future<Reference<DirectorySubspace>> open(Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>()) = 0;
|
||||
virtual Future<Reference<DirectorySubspace>> createOrOpen(
|
||||
Reference<Transaction> const& tr,
|
||||
Path const& path,
|
||||
Standalone<StringRef> const& layer = Standalone<StringRef>()) = 0;
|
||||
|
||||
virtual Future<bool> exists(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
virtual Future<Standalone<VectorRef<StringRef>>> list(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
virtual Future<bool> exists(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
virtual Future<Standalone<VectorRef<StringRef>>> list(Reference<Transaction> const& tr,
|
||||
Path const& path = Path()) = 0;
|
||||
|
||||
virtual Future<Reference<DirectorySubspace>> move(Reference<Transaction> const& tr, Path const& oldPath, Path const& newPath) = 0;
|
||||
virtual Future<Reference<DirectorySubspace>> moveTo(Reference<Transaction> const& tr, Path const& newAbsolutePath) = 0;
|
||||
virtual Future<Reference<DirectorySubspace>> move(Reference<Transaction> const& tr,
|
||||
Path const& oldPath,
|
||||
Path const& newPath) = 0;
|
||||
virtual Future<Reference<DirectorySubspace>> moveTo(Reference<Transaction> const& tr,
|
||||
Path const& newAbsolutePath) = 0;
|
||||
|
||||
virtual Future<Void> remove(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
virtual Future<bool> removeIfExists(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
virtual Future<Void> remove(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
virtual Future<bool> removeIfExists(Reference<Transaction> const& tr, Path const& path = Path()) = 0;
|
||||
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayer() = 0;
|
||||
virtual const Standalone<StringRef> getLayer() const = 0;
|
||||
virtual const Path getPath() const = 0;
|
||||
virtual Reference<DirectoryLayer> getDirectoryLayer() = 0;
|
||||
virtual const Standalone<StringRef> getLayer() const = 0;
|
||||
virtual const Path getPath() const = 0;
|
||||
|
||||
virtual ~IDirectory() {};
|
||||
};
|
||||
}
|
||||
virtual ~IDirectory(){};
|
||||
};
|
||||
} // namespace FDB
|
||||
|
||||
#endif
|
|
@ -21,50 +21,49 @@
|
|||
#include "DirectoryLayer.h"
|
||||
|
||||
namespace FDB {
|
||||
DirectoryLayer::Node::Node(Reference<DirectoryLayer> const& directoryLayer, Optional<Subspace> const& subspace, IDirectory::Path const& path, IDirectory::Path const& targetPath)
|
||||
: directoryLayer(directoryLayer),
|
||||
subspace(subspace),
|
||||
path(path),
|
||||
targetPath(targetPath),
|
||||
loadedMetadata(false)
|
||||
{ }
|
||||
DirectoryLayer::Node::Node(Reference<DirectoryLayer> const& directoryLayer,
|
||||
Optional<Subspace> const& subspace,
|
||||
IDirectory::Path const& path,
|
||||
IDirectory::Path const& targetPath)
|
||||
: directoryLayer(directoryLayer), subspace(subspace), path(path), targetPath(targetPath), loadedMetadata(false) {}
|
||||
|
||||
bool DirectoryLayer::Node::exists() const {
|
||||
return subspace.present();
|
||||
}
|
||||
bool DirectoryLayer::Node::exists() const {
|
||||
return subspace.present();
|
||||
}
|
||||
|
||||
ACTOR Future<DirectoryLayer::Node> loadMetadata(DirectoryLayer::Node *n, Reference<Transaction> tr) {
|
||||
if(!n->exists()){
|
||||
n->loadedMetadata = true;
|
||||
return *n;
|
||||
}
|
||||
|
||||
Optional<FDBStandalone<ValueRef>> layer = wait(tr->get(n->subspace.get().pack(DirectoryLayer::LAYER_KEY)));
|
||||
|
||||
n->layer = layer.present() ? layer.get() : Standalone<StringRef>();
|
||||
ACTOR Future<DirectoryLayer::Node> loadMetadata(DirectoryLayer::Node* n, Reference<Transaction> tr) {
|
||||
if (!n->exists()) {
|
||||
n->loadedMetadata = true;
|
||||
|
||||
return *n;
|
||||
}
|
||||
|
||||
//Calls to loadMetadata must keep the Node alive while the future is outstanding
|
||||
Future<DirectoryLayer::Node> DirectoryLayer::Node::loadMetadata(Reference<Transaction> tr) {
|
||||
return FDB::loadMetadata(this, tr);
|
||||
}
|
||||
Optional<FDBStandalone<ValueRef>> layer = wait(tr->get(n->subspace.get().pack(DirectoryLayer::LAYER_KEY)));
|
||||
|
||||
bool DirectoryLayer::Node::isInPartition(bool includeEmptySubpath) const {
|
||||
ASSERT(loadedMetadata);
|
||||
return exists() && layer == DirectoryLayer::PARTITION_LAYER && (includeEmptySubpath || targetPath.size() > path.size());
|
||||
}
|
||||
n->layer = layer.present() ? layer.get() : Standalone<StringRef>();
|
||||
n->loadedMetadata = true;
|
||||
|
||||
IDirectory::Path DirectoryLayer::Node::getPartitionSubpath() const {
|
||||
return Path(targetPath.begin() + path.size(), targetPath.end());
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> DirectoryLayer::Node::getContents() const {
|
||||
ASSERT(exists());
|
||||
ASSERT(loadedMetadata);
|
||||
|
||||
return directoryLayer->contentsOfNode(subspace.get(), path, layer);
|
||||
return *n;
|
||||
}
|
||||
|
||||
// Calls to loadMetadata must keep the Node alive while the future is outstanding
|
||||
Future<DirectoryLayer::Node> DirectoryLayer::Node::loadMetadata(Reference<Transaction> tr) {
|
||||
return FDB::loadMetadata(this, tr);
|
||||
}
|
||||
|
||||
bool DirectoryLayer::Node::isInPartition(bool includeEmptySubpath) const {
|
||||
ASSERT(loadedMetadata);
|
||||
return exists() && layer == DirectoryLayer::PARTITION_LAYER &&
|
||||
(includeEmptySubpath || targetPath.size() > path.size());
|
||||
}
|
||||
|
||||
IDirectory::Path DirectoryLayer::Node::getPartitionSubpath() const {
|
||||
return Path(targetPath.begin() + path.size(), targetPath.end());
|
||||
}
|
||||
|
||||
Reference<DirectorySubspace> DirectoryLayer::Node::getContents() const {
|
||||
ASSERT(exists());
|
||||
ASSERT(loadedMetadata);
|
||||
|
||||
return directoryLayer->contentsOfNode(subspace.get(), path, layer);
|
||||
}
|
||||
} // namespace FDB
|
||||
|
|
|
@ -21,71 +21,72 @@
|
|||
#include "Subspace.h"
|
||||
|
||||
namespace FDB {
|
||||
Subspace::Subspace(Tuple const& tuple, StringRef const& rawPrefix){
|
||||
StringRef packed = tuple.pack();
|
||||
Subspace::Subspace(Tuple const& tuple, StringRef const& rawPrefix) {
|
||||
StringRef packed = tuple.pack();
|
||||
|
||||
this->rawPrefix.reserve(this->rawPrefix.arena(), rawPrefix.size() + packed.size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), packed.begin(), packed.size());
|
||||
this->rawPrefix.reserve(this->rawPrefix.arena(), rawPrefix.size() + packed.size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), packed.begin(), packed.size());
|
||||
}
|
||||
|
||||
Subspace::Subspace(Tuple const& tuple, Standalone<VectorRef<uint8_t>> const& rawPrefix) {
|
||||
this->rawPrefix.reserve(this->rawPrefix.arena(), rawPrefix.size() + tuple.pack().size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
}
|
||||
|
||||
Subspace::Subspace(StringRef const& rawPrefix) {
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
}
|
||||
|
||||
Subspace::~Subspace() {}
|
||||
|
||||
Key Subspace::key() const {
|
||||
return StringRef(rawPrefix.begin(), rawPrefix.size());
|
||||
}
|
||||
|
||||
Key Subspace::pack(const Tuple& tuple) const {
|
||||
return tuple.pack().withPrefix(StringRef(rawPrefix.begin(), rawPrefix.size()));
|
||||
}
|
||||
|
||||
Tuple Subspace::unpack(StringRef const& key) const {
|
||||
if (!contains(key)) {
|
||||
throw key_not_in_subspace();
|
||||
}
|
||||
return Tuple::unpack(key.substr(rawPrefix.size()));
|
||||
}
|
||||
|
||||
Subspace::Subspace(Tuple const& tuple, Standalone<VectorRef<uint8_t>> const& rawPrefix) {
|
||||
this->rawPrefix.reserve(this->rawPrefix.arena(), rawPrefix.size() + tuple.pack().size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
}
|
||||
KeyRange Subspace::range(Tuple const& tuple) const {
|
||||
VectorRef<uint8_t> begin;
|
||||
VectorRef<uint8_t> end;
|
||||
|
||||
Subspace::Subspace(StringRef const& rawPrefix){
|
||||
this->rawPrefix.append(this->rawPrefix.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
}
|
||||
KeyRange keyRange;
|
||||
|
||||
Subspace::~Subspace() { }
|
||||
begin.reserve(keyRange.arena(), rawPrefix.size() + tuple.pack().size() + 1);
|
||||
begin.append(keyRange.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
begin.append(keyRange.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
begin.push_back(keyRange.arena(), uint8_t('\x00'));
|
||||
|
||||
Key Subspace::key() const {
|
||||
return StringRef(rawPrefix.begin(), rawPrefix.size());
|
||||
}
|
||||
end.reserve(keyRange.arena(), rawPrefix.size() + tuple.pack().size() + 1);
|
||||
end.append(keyRange.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
end.append(keyRange.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
end.push_back(keyRange.arena(), uint8_t('\xff'));
|
||||
|
||||
Key Subspace::pack(const Tuple& tuple) const {
|
||||
return tuple.pack().withPrefix(StringRef(rawPrefix.begin(), rawPrefix.size()));
|
||||
}
|
||||
// FIXME: test that this uses the keyRange arena and doesn't create another one
|
||||
keyRange.KeyRangeRef::operator=(
|
||||
KeyRangeRef(StringRef(begin.begin(), begin.size()), StringRef(end.begin(), end.size())));
|
||||
return keyRange;
|
||||
}
|
||||
|
||||
Tuple Subspace::unpack(StringRef const& key) const {
|
||||
if (!contains(key)) {
|
||||
throw key_not_in_subspace();
|
||||
}
|
||||
return Tuple::unpack(key.substr(rawPrefix.size()));
|
||||
}
|
||||
bool Subspace::contains(KeyRef const& key) const {
|
||||
return key.startsWith(StringRef(rawPrefix.begin(), rawPrefix.size()));
|
||||
}
|
||||
|
||||
KeyRange Subspace::range(Tuple const& tuple) const {
|
||||
VectorRef<uint8_t> begin;
|
||||
VectorRef<uint8_t> end;
|
||||
Subspace Subspace::subspace(Tuple const& tuple) const {
|
||||
return Subspace(tuple, rawPrefix);
|
||||
}
|
||||
|
||||
KeyRange keyRange;
|
||||
|
||||
begin.reserve(keyRange.arena(), rawPrefix.size() + tuple.pack().size() + 1);
|
||||
begin.append(keyRange.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
begin.append(keyRange.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
begin.push_back(keyRange.arena(), uint8_t('\x00'));
|
||||
|
||||
end.reserve(keyRange.arena(), rawPrefix.size() + tuple.pack().size() + 1);
|
||||
end.append(keyRange.arena(), rawPrefix.begin(), rawPrefix.size());
|
||||
end.append(keyRange.arena(), tuple.pack().begin(), tuple.pack().size());
|
||||
end.push_back(keyRange.arena(), uint8_t('\xff'));
|
||||
|
||||
// FIXME: test that this uses the keyRange arena and doesn't create another one
|
||||
keyRange.KeyRangeRef::operator=(KeyRangeRef(StringRef(begin.begin(), begin.size()), StringRef(end.begin(), end.size())));
|
||||
return keyRange;
|
||||
}
|
||||
|
||||
bool Subspace::contains(KeyRef const& key) const {
|
||||
return key.startsWith(StringRef(rawPrefix.begin(), rawPrefix.size()));
|
||||
}
|
||||
|
||||
Subspace Subspace::subspace(Tuple const& tuple) const {
|
||||
return Subspace(tuple, rawPrefix);
|
||||
}
|
||||
|
||||
Subspace Subspace::get(Tuple const& tuple) const {
|
||||
return subspace(tuple);
|
||||
}
|
||||
}
|
||||
Subspace Subspace::get(Tuple const& tuple) const {
|
||||
return subspace(tuple);
|
||||
}
|
||||
} // namespace FDB
|
|
@ -28,65 +28,65 @@
|
|||
#include "Tuple.h"
|
||||
|
||||
namespace FDB {
|
||||
class Subspace {
|
||||
public:
|
||||
Subspace(Tuple const& tuple = Tuple(), StringRef const& rawPrefix = StringRef());
|
||||
Subspace(StringRef const& rawPrefix);
|
||||
class Subspace {
|
||||
public:
|
||||
Subspace(Tuple const& tuple = Tuple(), StringRef const& rawPrefix = StringRef());
|
||||
Subspace(StringRef const& rawPrefix);
|
||||
|
||||
virtual ~Subspace();
|
||||
virtual ~Subspace();
|
||||
|
||||
virtual Key key() const;
|
||||
virtual bool contains(KeyRef const& key) const;
|
||||
virtual Key key() const;
|
||||
virtual bool contains(KeyRef const& key) const;
|
||||
|
||||
virtual Key pack(Tuple const& tuple = Tuple()) const;
|
||||
virtual Tuple unpack(KeyRef const& key) const;
|
||||
virtual KeyRange range(Tuple const& tuple = Tuple()) const;
|
||||
virtual Key pack(Tuple const& tuple = Tuple()) const;
|
||||
virtual Tuple unpack(KeyRef const& key) const;
|
||||
virtual KeyRange range(Tuple const& tuple = Tuple()) const;
|
||||
|
||||
template <class T>
|
||||
Key pack(T const& item) const {
|
||||
Tuple t;
|
||||
t.append(item);
|
||||
return pack(t);
|
||||
}
|
||||
template <class T>
|
||||
Key pack(T const& item) const {
|
||||
Tuple t;
|
||||
t.append(item);
|
||||
return pack(t);
|
||||
}
|
||||
|
||||
Key packNested(Tuple const& item) const {
|
||||
Tuple t;
|
||||
t.appendNested(item);
|
||||
return pack(t);
|
||||
}
|
||||
Key packNested(Tuple const& item) const {
|
||||
Tuple t;
|
||||
t.appendNested(item);
|
||||
return pack(t);
|
||||
}
|
||||
|
||||
Key pack(StringRef const& item, bool utf8=false) const {
|
||||
Tuple t;
|
||||
t.append(item, utf8);
|
||||
return pack(t);
|
||||
}
|
||||
Key pack(StringRef const& item, bool utf8 = false) const {
|
||||
Tuple t;
|
||||
t.append(item, utf8);
|
||||
return pack(t);
|
||||
}
|
||||
|
||||
virtual Subspace subspace(Tuple const& tuple) const;
|
||||
virtual Subspace get(Tuple const& tuple) const;
|
||||
virtual Subspace subspace(Tuple const& tuple) const;
|
||||
virtual Subspace get(Tuple const& tuple) const;
|
||||
|
||||
template <class T>
|
||||
Subspace get(T const& item) const {
|
||||
Tuple t;
|
||||
t.append(item);
|
||||
return get(t);
|
||||
}
|
||||
template <class T>
|
||||
Subspace get(T const& item) const {
|
||||
Tuple t;
|
||||
t.append(item);
|
||||
return get(t);
|
||||
}
|
||||
|
||||
Subspace getNested(Tuple const& item) const {
|
||||
Tuple t;
|
||||
t.appendNested(item);
|
||||
return get(t);
|
||||
}
|
||||
Subspace getNested(Tuple const& item) const {
|
||||
Tuple t;
|
||||
t.appendNested(item);
|
||||
return get(t);
|
||||
}
|
||||
|
||||
Subspace get(StringRef const& item, bool utf8=false) const {
|
||||
Tuple t;
|
||||
t.append(item, utf8);
|
||||
return get(t);
|
||||
}
|
||||
Subspace get(StringRef const& item, bool utf8 = false) const {
|
||||
Tuple t;
|
||||
t.append(item, utf8);
|
||||
return get(t);
|
||||
}
|
||||
|
||||
private:
|
||||
Subspace(Tuple const& tuple, Standalone<VectorRef<uint8_t>> const& rawPrefix);
|
||||
Standalone<VectorRef<uint8_t>> rawPrefix;
|
||||
};
|
||||
}
|
||||
private:
|
||||
Subspace(Tuple const& tuple, Standalone<VectorRef<uint8_t>> const& rawPrefix);
|
||||
Standalone<VectorRef<uint8_t>> rawPrefix;
|
||||
};
|
||||
} // namespace FDB
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -26,92 +26,93 @@
|
|||
#include "bindings/flow/fdb_flow.h"
|
||||
|
||||
namespace FDB {
|
||||
struct Uuid {
|
||||
const static size_t SIZE;
|
||||
struct Uuid {
|
||||
const static size_t SIZE;
|
||||
|
||||
Uuid(StringRef const& data);
|
||||
Uuid(StringRef const& data);
|
||||
|
||||
StringRef getData() const;
|
||||
StringRef getData() const;
|
||||
|
||||
// Comparisons
|
||||
bool operator==(Uuid const& other) const;
|
||||
bool operator!=(Uuid const& other) const;
|
||||
bool operator<(Uuid const& other) const;
|
||||
bool operator<=(Uuid const& other) const;
|
||||
bool operator>(Uuid const& other) const;
|
||||
bool operator>=(Uuid const& other) const;
|
||||
private:
|
||||
Standalone<StringRef> data;
|
||||
};
|
||||
// Comparisons
|
||||
bool operator==(Uuid const& other) const;
|
||||
bool operator!=(Uuid const& other) const;
|
||||
bool operator<(Uuid const& other) const;
|
||||
bool operator<=(Uuid const& other) const;
|
||||
bool operator>(Uuid const& other) const;
|
||||
bool operator>=(Uuid const& other) const;
|
||||
|
||||
struct Tuple {
|
||||
Tuple() {}
|
||||
private:
|
||||
Standalone<StringRef> data;
|
||||
};
|
||||
|
||||
static Tuple unpack(StringRef const& str);
|
||||
struct Tuple {
|
||||
Tuple() {}
|
||||
|
||||
Tuple& append(Tuple const& tuple);
|
||||
Tuple& append(StringRef const& str, bool utf8=false);
|
||||
Tuple& append(int32_t);
|
||||
Tuple& append(int64_t);
|
||||
Tuple& append(bool);
|
||||
Tuple& append(float);
|
||||
Tuple& append(double);
|
||||
Tuple& append(Uuid);
|
||||
Tuple& appendNested(Tuple const&);
|
||||
Tuple& appendNull();
|
||||
static Tuple unpack(StringRef const& str);
|
||||
|
||||
StringRef pack() const { return StringRef(data.begin(), data.size()); }
|
||||
Tuple& append(Tuple const& tuple);
|
||||
Tuple& append(StringRef const& str, bool utf8 = false);
|
||||
Tuple& append(int32_t);
|
||||
Tuple& append(int64_t);
|
||||
Tuple& append(bool);
|
||||
Tuple& append(float);
|
||||
Tuple& append(double);
|
||||
Tuple& append(Uuid);
|
||||
Tuple& appendNested(Tuple const&);
|
||||
Tuple& appendNull();
|
||||
|
||||
template <typename T>
|
||||
Tuple& operator<<(T const& t) {
|
||||
return append(t);
|
||||
}
|
||||
StringRef pack() const { return StringRef(data.begin(), data.size()); }
|
||||
|
||||
enum ElementType { NULL_TYPE, INT, BYTES, UTF8, BOOL, FLOAT, DOUBLE, UUID, NESTED };
|
||||
template <typename T>
|
||||
Tuple& operator<<(T const& t) {
|
||||
return append(t);
|
||||
}
|
||||
|
||||
// this is number of elements, not length of data
|
||||
size_t size() const { return offsets.size(); }
|
||||
enum ElementType { NULL_TYPE, INT, BYTES, UTF8, BOOL, FLOAT, DOUBLE, UUID, NESTED };
|
||||
|
||||
ElementType getType(size_t index) const;
|
||||
Standalone<StringRef> getString(size_t index) const;
|
||||
int64_t getInt(size_t index) const;
|
||||
bool getBool(size_t index) const;
|
||||
float getFloat(size_t index) const;
|
||||
double getDouble(size_t index) const;
|
||||
Uuid getUuid(size_t index) const;
|
||||
Tuple getNested(size_t index) const;
|
||||
// this is number of elements, not length of data
|
||||
size_t size() const { return offsets.size(); }
|
||||
|
||||
KeyRange range(Tuple const& tuple = Tuple()) const;
|
||||
ElementType getType(size_t index) const;
|
||||
Standalone<StringRef> getString(size_t index) const;
|
||||
int64_t getInt(size_t index) const;
|
||||
bool getBool(size_t index) const;
|
||||
float getFloat(size_t index) const;
|
||||
double getDouble(size_t index) const;
|
||||
Uuid getUuid(size_t index) const;
|
||||
Tuple getNested(size_t index) const;
|
||||
|
||||
Tuple subTuple(size_t beginIndex, size_t endIndex = std::numeric_limits<size_t>::max()) const;
|
||||
KeyRange range(Tuple const& tuple = Tuple()) const;
|
||||
|
||||
// Comparisons
|
||||
bool operator==(Tuple const& other) const;
|
||||
bool operator!=(Tuple const& other) const;
|
||||
bool operator<(Tuple const& other) const;
|
||||
bool operator<=(Tuple const& other) const;
|
||||
bool operator>(Tuple const& other) const;
|
||||
bool operator>=(Tuple const& other) const;
|
||||
Tuple subTuple(size_t beginIndex, size_t endIndex = std::numeric_limits<size_t>::max()) const;
|
||||
|
||||
private:
|
||||
static const uint8_t NULL_CODE;
|
||||
static const uint8_t BYTES_CODE;
|
||||
static const uint8_t STRING_CODE;
|
||||
static const uint8_t NESTED_CODE;
|
||||
static const uint8_t INT_ZERO_CODE;
|
||||
static const uint8_t POS_INT_END;
|
||||
static const uint8_t NEG_INT_START;
|
||||
static const uint8_t FLOAT_CODE;
|
||||
static const uint8_t DOUBLE_CODE;
|
||||
static const uint8_t FALSE_CODE;
|
||||
static const uint8_t TRUE_CODE;
|
||||
static const uint8_t UUID_CODE;
|
||||
// Comparisons
|
||||
bool operator==(Tuple const& other) const;
|
||||
bool operator!=(Tuple const& other) const;
|
||||
bool operator<(Tuple const& other) const;
|
||||
bool operator<=(Tuple const& other) const;
|
||||
bool operator>(Tuple const& other) const;
|
||||
bool operator>=(Tuple const& other) const;
|
||||
|
||||
Tuple(const StringRef& data);
|
||||
Tuple(Standalone<VectorRef<uint8_t>> data, std::vector<size_t> offsets);
|
||||
Standalone<VectorRef<uint8_t>> data;
|
||||
std::vector<size_t> offsets;
|
||||
};
|
||||
}
|
||||
private:
|
||||
static const uint8_t NULL_CODE;
|
||||
static const uint8_t BYTES_CODE;
|
||||
static const uint8_t STRING_CODE;
|
||||
static const uint8_t NESTED_CODE;
|
||||
static const uint8_t INT_ZERO_CODE;
|
||||
static const uint8_t POS_INT_END;
|
||||
static const uint8_t NEG_INT_START;
|
||||
static const uint8_t FLOAT_CODE;
|
||||
static const uint8_t DOUBLE_CODE;
|
||||
static const uint8_t FALSE_CODE;
|
||||
static const uint8_t TRUE_CODE;
|
||||
static const uint8_t UUID_CODE;
|
||||
|
||||
Tuple(const StringRef& data);
|
||||
Tuple(Standalone<VectorRef<uint8_t>> data, std::vector<size_t> offsets);
|
||||
Standalone<VectorRef<uint8_t>> data;
|
||||
std::vector<size_t> offsets;
|
||||
};
|
||||
} // namespace FDB
|
||||
|
||||
#endif /* _FDB_TUPLE_H_ */
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include "fdb_flow.h"
|
||||
|
||||
#include <cstdint>
|
||||
#include <stdio.h>
|
||||
#include <cinttypes>
|
||||
|
||||
|
@ -36,41 +37,42 @@ THREAD_FUNC networkThread(void* fdb) {
|
|||
}
|
||||
|
||||
ACTOR Future<Void> _test() {
|
||||
API *fdb = FDB::API::selectAPIVersion(700);
|
||||
API* fdb = FDB::API::selectAPIVersion(710);
|
||||
auto db = fdb->createDatabase();
|
||||
state Reference<Transaction> tr = db->createTransaction();
|
||||
|
||||
// tr->setVersion(1);
|
||||
|
||||
Version ver = wait( tr->getReadVersion() );
|
||||
Version ver = wait(tr->getReadVersion());
|
||||
printf("%" PRId64 "\n", ver);
|
||||
|
||||
state std::vector< Future<Version> > versions;
|
||||
state std::vector<Future<Version>> versions;
|
||||
|
||||
state double starttime = timer_monotonic();
|
||||
state int i;
|
||||
// for (i = 0; i < 100000; i++) {
|
||||
// Version v = wait( tr->getReadVersion() );
|
||||
// }
|
||||
for ( i = 0; i < 100000; i++ ) {
|
||||
versions.push_back( tr->getReadVersion() );
|
||||
for (i = 0; i < 100000; i++) {
|
||||
versions.push_back(tr->getReadVersion());
|
||||
}
|
||||
for ( i = 0; i < 100000; i++ ) {
|
||||
Version v = wait( versions[i] );
|
||||
for (i = 0; i < 100000; i++) {
|
||||
Version v = wait(versions[i]);
|
||||
}
|
||||
// wait( waitForAllReady( versions ) );
|
||||
printf("Elapsed: %lf\n", timer_monotonic() - starttime );
|
||||
printf("Elapsed: %lf\n", timer_monotonic() - starttime);
|
||||
|
||||
tr->set( LiteralStringRef("foo"), LiteralStringRef("bar") );
|
||||
tr->set(LiteralStringRef("foo"), LiteralStringRef("bar"));
|
||||
|
||||
Optional< FDBStandalone<ValueRef> > v = wait( tr->get( LiteralStringRef("foo") ) );
|
||||
if ( v.present() ) {
|
||||
printf("%s\n", v.get().toString().c_str() );
|
||||
Optional<FDBStandalone<ValueRef>> v = wait(tr->get(LiteralStringRef("foo")));
|
||||
if (v.present()) {
|
||||
printf("%s\n", v.get().toString().c_str());
|
||||
}
|
||||
|
||||
FDBStandalone<RangeResultRef> r = wait( tr->getRange( KeyRangeRef( LiteralStringRef("a"), LiteralStringRef("z") ), 100 ) );
|
||||
FDBStandalone<RangeResultRef> r =
|
||||
wait(tr->getRange(KeyRangeRef(LiteralStringRef("a"), LiteralStringRef("z")), 100));
|
||||
|
||||
for ( auto kv : r ) {
|
||||
for (auto kv : r) {
|
||||
printf("%s is %s\n", kv.key.toString().c_str(), kv.value.toString().c_str());
|
||||
}
|
||||
|
||||
|
@ -79,7 +81,7 @@ ACTOR Future<Void> _test() {
|
|||
}
|
||||
|
||||
void fdb_flow_test() {
|
||||
API *fdb = FDB::API::selectAPIVersion(700);
|
||||
API* fdb = FDB::API::selectAPIVersion(710);
|
||||
fdb->setupNetwork();
|
||||
startThread(networkThread, fdb);
|
||||
|
||||
|
@ -94,354 +96,432 @@ void fdb_flow_test() {
|
|||
g_network->run();
|
||||
}
|
||||
|
||||
// FDB object used by bindings
|
||||
namespace FDB {
|
||||
class DatabaseImpl : public Database, NonCopyable {
|
||||
public:
|
||||
virtual ~DatabaseImpl() { fdb_database_destroy(db); }
|
||||
class DatabaseImpl : public Database, NonCopyable {
|
||||
public:
|
||||
virtual ~DatabaseImpl() { fdb_database_destroy(db); }
|
||||
|
||||
Reference<Transaction> createTransaction() override;
|
||||
void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>()) override;
|
||||
Reference<Transaction> createTransaction() override;
|
||||
void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>()) override;
|
||||
Future<int64_t> rebootWorker(const StringRef& address, bool check = false, int duration = 0) override;
|
||||
Future<Void> forceRecoveryWithDataLoss(const StringRef& dcid) override;
|
||||
Future<Void> createSnapshot(const StringRef& uid, const StringRef& snap_command) override;
|
||||
|
||||
private:
|
||||
FDBDatabase* db;
|
||||
explicit DatabaseImpl(FDBDatabase* db) : db(db) {}
|
||||
private:
|
||||
FDBDatabase* db;
|
||||
explicit DatabaseImpl(FDBDatabase* db) : db(db) {}
|
||||
|
||||
friend class API;
|
||||
};
|
||||
friend class API;
|
||||
};
|
||||
|
||||
class TransactionImpl : public Transaction, private NonCopyable, public FastAllocated<TransactionImpl> {
|
||||
friend class DatabaseImpl;
|
||||
class TransactionImpl : public Transaction, private NonCopyable, public FastAllocated<TransactionImpl> {
|
||||
friend class DatabaseImpl;
|
||||
|
||||
public:
|
||||
virtual ~TransactionImpl() {
|
||||
if (tr) {
|
||||
fdb_transaction_destroy(tr);
|
||||
}
|
||||
public:
|
||||
virtual ~TransactionImpl() {
|
||||
if (tr) {
|
||||
fdb_transaction_destroy(tr);
|
||||
}
|
||||
|
||||
void setReadVersion(Version v) override;
|
||||
Future<Version> getReadVersion() override;
|
||||
|
||||
Future<Optional<FDBStandalone<ValueRef>>> get(const Key& key, bool snapshot = false) override;
|
||||
Future<FDBStandalone<KeyRef>> getKey(const KeySelector& key, bool snapshot = false) override;
|
||||
|
||||
Future<Void> watch(const Key& key) override;
|
||||
|
||||
using Transaction::getRange;
|
||||
Future<FDBStandalone<RangeResultRef>> getRange(const KeySelector& begin, const KeySelector& end,
|
||||
GetRangeLimits limits = GetRangeLimits(), bool snapshot = false,
|
||||
bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) override;
|
||||
|
||||
Future<int64_t> getEstimatedRangeSizeBytes(const KeyRange& keys) override;
|
||||
|
||||
void addReadConflictRange(KeyRangeRef const& keys) override;
|
||||
void addReadConflictKey(KeyRef const& key) override;
|
||||
void addWriteConflictRange(KeyRangeRef const& keys) override;
|
||||
void addWriteConflictKey(KeyRef const& key) override;
|
||||
|
||||
void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) override;
|
||||
void set(const KeyRef& key, const ValueRef& value) override;
|
||||
void clear(const KeyRangeRef& range) override;
|
||||
void clear(const KeyRef& key) override;
|
||||
|
||||
Future<Void> commit() override;
|
||||
Version getCommittedVersion() override;
|
||||
Future<FDBStandalone<StringRef>> getVersionstamp() override;
|
||||
|
||||
void setOption(FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>()) override;
|
||||
|
||||
Future<int64_t> getApproximateSize() override;
|
||||
Future<Void> onError(Error const& e) override;
|
||||
|
||||
void cancel() override;
|
||||
void reset() override;
|
||||
|
||||
TransactionImpl() : tr(NULL) {}
|
||||
TransactionImpl(TransactionImpl&& r) noexcept {
|
||||
tr = r.tr;
|
||||
r.tr = NULL;
|
||||
}
|
||||
TransactionImpl& operator=(TransactionImpl&& r) noexcept {
|
||||
tr = r.tr;
|
||||
r.tr = NULL;
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
FDBTransaction* tr;
|
||||
|
||||
explicit TransactionImpl(FDBDatabase* db);
|
||||
};
|
||||
|
||||
static inline void throw_on_error( fdb_error_t e ) {
|
||||
if (e)
|
||||
throw Error(e);
|
||||
}
|
||||
|
||||
void CFuture::blockUntilReady() {
|
||||
throw_on_error( fdb_future_block_until_ready( f ) );
|
||||
void setReadVersion(Version v) override;
|
||||
Future<Version> getReadVersion() override;
|
||||
|
||||
Future<Optional<FDBStandalone<ValueRef>>> get(const Key& key, bool snapshot = false) override;
|
||||
Future<FDBStandalone<KeyRef>> getKey(const KeySelector& key, bool snapshot = false) override;
|
||||
|
||||
Future<Void> watch(const Key& key) override;
|
||||
|
||||
using Transaction::getRange;
|
||||
Future<FDBStandalone<RangeResultRef>> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
GetRangeLimits limits = GetRangeLimits(),
|
||||
bool snapshot = false,
|
||||
bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) override;
|
||||
|
||||
Future<int64_t> getEstimatedRangeSizeBytes(const KeyRange& keys) override;
|
||||
Future<FDBStandalone<VectorRef<KeyRef>>> getRangeSplitPoints(const KeyRange& range, int64_t chunkSize) override;
|
||||
|
||||
void addReadConflictRange(KeyRangeRef const& keys) override;
|
||||
void addReadConflictKey(KeyRef const& key) override;
|
||||
void addWriteConflictRange(KeyRangeRef const& keys) override;
|
||||
void addWriteConflictKey(KeyRef const& key) override;
|
||||
|
||||
void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) override;
|
||||
void set(const KeyRef& key, const ValueRef& value) override;
|
||||
void clear(const KeyRangeRef& range) override;
|
||||
void clear(const KeyRef& key) override;
|
||||
|
||||
Future<Void> commit() override;
|
||||
Version getCommittedVersion() override;
|
||||
Future<FDBStandalone<StringRef>> getVersionstamp() override;
|
||||
|
||||
void setOption(FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>()) override;
|
||||
|
||||
Future<int64_t> getApproximateSize() override;
|
||||
Future<Void> onError(Error const& e) override;
|
||||
|
||||
void cancel() override;
|
||||
void reset() override;
|
||||
|
||||
TransactionImpl() : tr(nullptr) {}
|
||||
TransactionImpl(TransactionImpl&& r) noexcept {
|
||||
tr = r.tr;
|
||||
r.tr = nullptr;
|
||||
}
|
||||
TransactionImpl& operator=(TransactionImpl&& r) noexcept {
|
||||
tr = r.tr;
|
||||
r.tr = nullptr;
|
||||
return *this;
|
||||
}
|
||||
|
||||
void backToFutureCallback( FDBFuture* f, void* data ) {
|
||||
g_network->onMainThread( Promise<Void>((SAV<Void>*)data), TaskPriority::DefaultOnMainThread ); // SOMEDAY: think about this priority
|
||||
}
|
||||
private:
|
||||
FDBTransaction* tr;
|
||||
|
||||
// backToFuture<Type>( FDBFuture*, (FDBFuture* -> Type) ) -> Future<Type>
|
||||
// Takes an FDBFuture (from the alien client world, with callbacks potentially firing on an alien thread)
|
||||
// and converts it into a Future<T> (with callbacks working on this thread, cancellation etc).
|
||||
// You must pass as the second parameter a function which takes a ready FDBFuture* and returns a value of Type
|
||||
ACTOR template<class T, class Function> static Future<T> backToFuture( FDBFuture* _f, Function convertValue ) {
|
||||
state Reference<CFuture> f( new CFuture(_f) );
|
||||
explicit TransactionImpl(FDBDatabase* db);
|
||||
};
|
||||
|
||||
Promise<Void> ready;
|
||||
Future<Void> onReady = ready.getFuture();
|
||||
static inline void throw_on_error(fdb_error_t e) {
|
||||
if (e)
|
||||
throw Error(e);
|
||||
}
|
||||
|
||||
throw_on_error( fdb_future_set_callback( f->f, backToFutureCallback, ready.extractRawPointer() ) );
|
||||
wait( onReady );
|
||||
void CFuture::blockUntilReady() {
|
||||
throw_on_error(fdb_future_block_until_ready(f));
|
||||
}
|
||||
|
||||
return convertValue( f );
|
||||
}
|
||||
void backToFutureCallback(FDBFuture* f, void* data) {
|
||||
g_network->onMainThread(Promise<Void>((SAV<Void>*)data),
|
||||
TaskPriority::DefaultOnMainThread); // SOMEDAY: think about this priority
|
||||
}
|
||||
|
||||
void API::setNetworkOption( FDBNetworkOption option, Optional<StringRef> value ) {
|
||||
if ( value.present() )
|
||||
throw_on_error( fdb_network_set_option( option, value.get().begin(), value.get().size() ) );
|
||||
else
|
||||
throw_on_error( fdb_network_set_option( option, NULL, 0 ) );
|
||||
}
|
||||
// backToFuture<Type>( FDBFuture*, (FDBFuture* -> Type) ) -> Future<Type>
|
||||
// Takes an FDBFuture (from the alien client world, with callbacks potentially firing on an alien thread)
|
||||
// and converts it into a Future<T> (with callbacks working on this thread, cancellation etc).
|
||||
// You must pass as the second parameter a function which takes a ready FDBFuture* and returns a value of Type
|
||||
ACTOR template <class T, class Function>
|
||||
static Future<T> backToFuture(FDBFuture* _f, Function convertValue) {
|
||||
state Reference<CFuture> f(new CFuture(_f));
|
||||
|
||||
API* API::instance = NULL;
|
||||
API::API(int version) : version(version) {}
|
||||
Promise<Void> ready;
|
||||
Future<Void> onReady = ready.getFuture();
|
||||
|
||||
API* API::selectAPIVersion(int apiVersion) {
|
||||
if(API::instance) {
|
||||
if(apiVersion != API::instance->version) {
|
||||
throw api_version_already_set();
|
||||
}
|
||||
else {
|
||||
return API::instance;
|
||||
}
|
||||
}
|
||||
throw_on_error(fdb_future_set_callback(f->f, backToFutureCallback, ready.extractRawPointer()));
|
||||
wait(onReady);
|
||||
|
||||
if(apiVersion < 500 || apiVersion > FDB_API_VERSION) {
|
||||
throw api_version_not_supported();
|
||||
}
|
||||
return convertValue(f);
|
||||
}
|
||||
|
||||
throw_on_error( fdb_select_api_version_impl(apiVersion, FDB_API_VERSION) );
|
||||
void API::setNetworkOption(FDBNetworkOption option, Optional<StringRef> value) {
|
||||
if (value.present())
|
||||
throw_on_error(fdb_network_set_option(option, value.get().begin(), value.get().size()));
|
||||
else
|
||||
throw_on_error(fdb_network_set_option(option, nullptr, 0));
|
||||
}
|
||||
|
||||
API::instance = new API(apiVersion);
|
||||
return API::instance;
|
||||
}
|
||||
API* API::instance = nullptr;
|
||||
API::API(int version) : version(version) {}
|
||||
|
||||
bool API::isAPIVersionSelected() {
|
||||
return API::instance != NULL;
|
||||
}
|
||||
|
||||
API* API::getInstance() {
|
||||
if(API::instance == NULL) {
|
||||
throw api_version_unset();
|
||||
}
|
||||
else {
|
||||
API* API::selectAPIVersion(int apiVersion) {
|
||||
if (API::instance) {
|
||||
if (apiVersion != API::instance->version) {
|
||||
throw api_version_already_set();
|
||||
} else {
|
||||
return API::instance;
|
||||
}
|
||||
}
|
||||
|
||||
void API::setupNetwork() {
|
||||
throw_on_error( fdb_setup_network() );
|
||||
if (apiVersion < 500 || apiVersion > FDB_API_VERSION) {
|
||||
throw api_version_not_supported();
|
||||
}
|
||||
|
||||
void API::runNetwork() {
|
||||
throw_on_error( fdb_run_network() );
|
||||
throw_on_error(fdb_select_api_version_impl(apiVersion, FDB_API_VERSION));
|
||||
|
||||
API::instance = new API(apiVersion);
|
||||
return API::instance;
|
||||
}
|
||||
|
||||
bool API::isAPIVersionSelected() {
|
||||
return API::instance != nullptr;
|
||||
}
|
||||
|
||||
API* API::getInstance() {
|
||||
if (API::instance == nullptr) {
|
||||
throw api_version_unset();
|
||||
} else {
|
||||
return API::instance;
|
||||
}
|
||||
}
|
||||
|
||||
void API::stopNetwork() {
|
||||
throw_on_error( fdb_stop_network() );
|
||||
void API::setupNetwork() {
|
||||
throw_on_error(fdb_setup_network());
|
||||
}
|
||||
|
||||
void API::runNetwork() {
|
||||
throw_on_error(fdb_run_network());
|
||||
}
|
||||
|
||||
void API::stopNetwork() {
|
||||
throw_on_error(fdb_stop_network());
|
||||
}
|
||||
|
||||
bool API::evaluatePredicate(FDBErrorPredicate pred, Error const& e) {
|
||||
return fdb_error_predicate(pred, e.code());
|
||||
}
|
||||
|
||||
Reference<Database> API::createDatabase(std::string const& connFilename) {
|
||||
FDBDatabase* db;
|
||||
throw_on_error(fdb_create_database(connFilename.c_str(), &db));
|
||||
return Reference<Database>(new DatabaseImpl(db));
|
||||
}
|
||||
|
||||
int API::getAPIVersion() const {
|
||||
return version;
|
||||
}
|
||||
|
||||
Reference<Transaction> DatabaseImpl::createTransaction() {
|
||||
return Reference<Transaction>(new TransactionImpl(db));
|
||||
}
|
||||
|
||||
void DatabaseImpl::setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value) {
|
||||
if (value.present())
|
||||
throw_on_error(fdb_database_set_option(db, option, value.get().begin(), value.get().size()));
|
||||
else
|
||||
throw_on_error(fdb_database_set_option(db, option, nullptr, 0));
|
||||
}
|
||||
|
||||
Future<int64_t> DatabaseImpl::rebootWorker(const StringRef& address, bool check, int duration) {
|
||||
return backToFuture<int64_t>(fdb_database_reboot_worker(db, address.begin(), address.size(), check, duration),
|
||||
[](Reference<CFuture> f) {
|
||||
int64_t res;
|
||||
|
||||
throw_on_error(fdb_future_get_int64(f->f, &res));
|
||||
|
||||
return res;
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> DatabaseImpl::forceRecoveryWithDataLoss(const StringRef& dcid) {
|
||||
return backToFuture<Void>(fdb_database_force_recovery_with_data_loss(db, dcid.begin(), dcid.size()),
|
||||
[](Reference<CFuture> f) {
|
||||
throw_on_error(fdb_future_get_error(f->f));
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> DatabaseImpl::createSnapshot(const StringRef& uid, const StringRef& snap_command) {
|
||||
return backToFuture<Void>(
|
||||
fdb_database_create_snapshot(db, uid.begin(), uid.size(), snap_command.begin(), snap_command.size()),
|
||||
[](Reference<CFuture> f) {
|
||||
throw_on_error(fdb_future_get_error(f->f));
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
TransactionImpl::TransactionImpl(FDBDatabase* db) {
|
||||
throw_on_error(fdb_database_create_transaction(db, &tr));
|
||||
}
|
||||
|
||||
void TransactionImpl::setReadVersion(Version v) {
|
||||
fdb_transaction_set_read_version(tr, v);
|
||||
}
|
||||
|
||||
Future<Version> TransactionImpl::getReadVersion() {
|
||||
return backToFuture<Version>(fdb_transaction_get_read_version(tr), [](Reference<CFuture> f) {
|
||||
Version value;
|
||||
|
||||
throw_on_error(fdb_future_get_int64(f->f, &value));
|
||||
|
||||
return value;
|
||||
});
|
||||
}
|
||||
|
||||
Future<Optional<FDBStandalone<ValueRef>>> TransactionImpl::get(const Key& key, bool snapshot) {
|
||||
return backToFuture<Optional<FDBStandalone<ValueRef>>>(
|
||||
fdb_transaction_get(tr, key.begin(), key.size(), snapshot), [](Reference<CFuture> f) {
|
||||
fdb_bool_t present;
|
||||
uint8_t const* value;
|
||||
int value_length;
|
||||
|
||||
throw_on_error(fdb_future_get_value(f->f, &present, &value, &value_length));
|
||||
|
||||
if (present) {
|
||||
return Optional<FDBStandalone<ValueRef>>(FDBStandalone<ValueRef>(f, ValueRef(value, value_length)));
|
||||
} else {
|
||||
return Optional<FDBStandalone<ValueRef>>();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> TransactionImpl::watch(const Key& key) {
|
||||
return backToFuture<Void>(fdb_transaction_watch(tr, key.begin(), key.size()), [](Reference<CFuture> f) {
|
||||
throw_on_error(fdb_future_get_error(f->f));
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
Future<FDBStandalone<KeyRef>> TransactionImpl::getKey(const KeySelector& key, bool snapshot) {
|
||||
return backToFuture<FDBStandalone<KeyRef>>(
|
||||
fdb_transaction_get_key(tr, key.key.begin(), key.key.size(), key.orEqual, key.offset, snapshot),
|
||||
[](Reference<CFuture> f) {
|
||||
uint8_t const* key;
|
||||
int key_length;
|
||||
|
||||
throw_on_error(fdb_future_get_key(f->f, &key, &key_length));
|
||||
|
||||
return FDBStandalone<KeyRef>(f, KeyRef(key, key_length));
|
||||
});
|
||||
}
|
||||
|
||||
Future<FDBStandalone<RangeResultRef>> TransactionImpl::getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse,
|
||||
FDBStreamingMode streamingMode) {
|
||||
// FIXME: iteration
|
||||
return backToFuture<FDBStandalone<RangeResultRef>>(
|
||||
fdb_transaction_get_range(tr,
|
||||
begin.key.begin(),
|
||||
begin.key.size(),
|
||||
begin.orEqual,
|
||||
begin.offset,
|
||||
end.key.begin(),
|
||||
end.key.size(),
|
||||
end.orEqual,
|
||||
end.offset,
|
||||
limits.rows,
|
||||
limits.bytes,
|
||||
streamingMode,
|
||||
1,
|
||||
snapshot,
|
||||
reverse),
|
||||
[](Reference<CFuture> f) {
|
||||
FDBKeyValue const* kv;
|
||||
int count;
|
||||
fdb_bool_t more;
|
||||
|
||||
throw_on_error(fdb_future_get_keyvalue_array(f->f, &kv, &count, &more));
|
||||
|
||||
return FDBStandalone<RangeResultRef>(f,
|
||||
RangeResultRef(VectorRef<KeyValueRef>((KeyValueRef*)kv, count), more));
|
||||
});
|
||||
}
|
||||
|
||||
Future<int64_t> TransactionImpl::getEstimatedRangeSizeBytes(const KeyRange& keys) {
|
||||
return backToFuture<int64_t>(fdb_transaction_get_estimated_range_size_bytes(
|
||||
tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size()),
|
||||
[](Reference<CFuture> f) {
|
||||
int64_t bytes;
|
||||
throw_on_error(fdb_future_get_int64(f->f, &bytes));
|
||||
return bytes;
|
||||
});
|
||||
}
|
||||
|
||||
Future<FDBStandalone<VectorRef<KeyRef>>> TransactionImpl::getRangeSplitPoints(const KeyRange& range,
|
||||
int64_t chunkSize) {
|
||||
return backToFuture<FDBStandalone<VectorRef<KeyRef>>>(
|
||||
fdb_transaction_get_range_split_points(
|
||||
tr, range.begin.begin(), range.begin.size(), range.end.begin(), range.end.size(), chunkSize),
|
||||
[](Reference<CFuture> f) {
|
||||
FDBKey const* ks;
|
||||
int count;
|
||||
throw_on_error(fdb_future_get_key_array(f->f, &ks, &count));
|
||||
|
||||
return FDBStandalone<VectorRef<KeyRef>>(f, VectorRef<KeyRef>((KeyRef*)ks, count));
|
||||
});
|
||||
}
|
||||
|
||||
void TransactionImpl::addReadConflictRange(KeyRangeRef const& keys) {
|
||||
throw_on_error(fdb_transaction_add_conflict_range(
|
||||
tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_READ));
|
||||
}
|
||||
|
||||
void TransactionImpl::addReadConflictKey(KeyRef const& key) {
|
||||
return addReadConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
|
||||
}
|
||||
|
||||
void TransactionImpl::addWriteConflictRange(KeyRangeRef const& keys) {
|
||||
throw_on_error(fdb_transaction_add_conflict_range(
|
||||
tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_WRITE));
|
||||
}
|
||||
|
||||
void TransactionImpl::addWriteConflictKey(KeyRef const& key) {
|
||||
return addWriteConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
|
||||
}
|
||||
|
||||
void TransactionImpl::atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) {
|
||||
fdb_transaction_atomic_op(tr, key.begin(), key.size(), operand.begin(), operand.size(), operationType);
|
||||
}
|
||||
|
||||
void TransactionImpl::set(const KeyRef& key, const ValueRef& value) {
|
||||
fdb_transaction_set(tr, key.begin(), key.size(), value.begin(), value.size());
|
||||
}
|
||||
|
||||
void TransactionImpl::clear(const KeyRangeRef& range) {
|
||||
fdb_transaction_clear_range(tr, range.begin.begin(), range.begin.size(), range.end.begin(), range.end.size());
|
||||
}
|
||||
|
||||
void TransactionImpl::clear(const KeyRef& key) {
|
||||
fdb_transaction_clear(tr, key.begin(), key.size());
|
||||
}
|
||||
|
||||
Future<Void> TransactionImpl::commit() {
|
||||
return backToFuture<Void>(fdb_transaction_commit(tr), [](Reference<CFuture> f) {
|
||||
throw_on_error(fdb_future_get_error(f->f));
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
Version TransactionImpl::getCommittedVersion() {
|
||||
Version v;
|
||||
|
||||
throw_on_error(fdb_transaction_get_committed_version(tr, &v));
|
||||
return v;
|
||||
}
|
||||
|
||||
Future<FDBStandalone<StringRef>> TransactionImpl::getVersionstamp() {
|
||||
return backToFuture<FDBStandalone<KeyRef>>(fdb_transaction_get_versionstamp(tr), [](Reference<CFuture> f) {
|
||||
uint8_t const* key;
|
||||
int key_length;
|
||||
|
||||
throw_on_error(fdb_future_get_key(f->f, &key, &key_length));
|
||||
|
||||
return FDBStandalone<StringRef>(f, StringRef(key, key_length));
|
||||
});
|
||||
}
|
||||
|
||||
void TransactionImpl::setOption(FDBTransactionOption option, Optional<StringRef> value) {
|
||||
if (value.present()) {
|
||||
throw_on_error(fdb_transaction_set_option(tr, option, value.get().begin(), value.get().size()));
|
||||
} else {
|
||||
throw_on_error(fdb_transaction_set_option(tr, option, nullptr, 0));
|
||||
}
|
||||
}
|
||||
|
||||
bool API::evaluatePredicate(FDBErrorPredicate pred, Error const& e) {
|
||||
return fdb_error_predicate( pred, e.code() );
|
||||
}
|
||||
Future<int64_t> TransactionImpl::getApproximateSize() {
|
||||
return backToFuture<int64_t>(fdb_transaction_get_approximate_size(tr), [](Reference<CFuture> f) {
|
||||
int64_t size = 0;
|
||||
throw_on_error(fdb_future_get_int64(f->f, &size));
|
||||
return size;
|
||||
});
|
||||
}
|
||||
|
||||
Reference<Database> API::createDatabase(std::string const& connFilename) {
|
||||
FDBDatabase *db;
|
||||
throw_on_error(fdb_create_database(connFilename.c_str(), &db));
|
||||
return Reference<Database>(new DatabaseImpl(db));
|
||||
}
|
||||
Future<Void> TransactionImpl::onError(Error const& e) {
|
||||
return backToFuture<Void>(fdb_transaction_on_error(tr, e.code()), [](Reference<CFuture> f) {
|
||||
throw_on_error(fdb_future_get_error(f->f));
|
||||
return Void();
|
||||
});
|
||||
}
|
||||
|
||||
int API::getAPIVersion() const {
|
||||
return version;
|
||||
}
|
||||
void TransactionImpl::cancel() {
|
||||
fdb_transaction_cancel(tr);
|
||||
}
|
||||
|
||||
Reference<Transaction> DatabaseImpl::createTransaction() {
|
||||
return Reference<Transaction>(new TransactionImpl(db));
|
||||
}
|
||||
void TransactionImpl::reset() {
|
||||
fdb_transaction_reset(tr);
|
||||
}
|
||||
|
||||
void DatabaseImpl::setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value) {
|
||||
if (value.present())
|
||||
throw_on_error(fdb_database_set_option(db, option, value.get().begin(), value.get().size()));
|
||||
else
|
||||
throw_on_error(fdb_database_set_option(db, option, NULL, 0));
|
||||
}
|
||||
|
||||
TransactionImpl::TransactionImpl(FDBDatabase* db) {
|
||||
throw_on_error(fdb_database_create_transaction(db, &tr));
|
||||
}
|
||||
|
||||
void TransactionImpl::setReadVersion(Version v) {
|
||||
fdb_transaction_set_read_version( tr, v );
|
||||
}
|
||||
|
||||
Future<Version> TransactionImpl::getReadVersion() {
|
||||
return backToFuture<Version>( fdb_transaction_get_read_version( tr ), [](Reference<CFuture> f){
|
||||
Version value;
|
||||
|
||||
throw_on_error( fdb_future_get_int64( f->f, &value ) );
|
||||
|
||||
return value;
|
||||
} );
|
||||
}
|
||||
|
||||
Future<Optional<FDBStandalone<ValueRef>>> TransactionImpl::get(const Key& key, bool snapshot) {
|
||||
return backToFuture< Optional<FDBStandalone<ValueRef>> >( fdb_transaction_get( tr, key.begin(), key.size(), snapshot ), [](Reference<CFuture> f) {
|
||||
fdb_bool_t present;
|
||||
uint8_t const* value;
|
||||
int value_length;
|
||||
|
||||
throw_on_error( fdb_future_get_value( f->f, &present, &value, &value_length ) );
|
||||
|
||||
if ( present ) {
|
||||
return Optional<FDBStandalone<ValueRef>>( FDBStandalone<ValueRef>( f, ValueRef( value, value_length ) ) );
|
||||
} else {
|
||||
return Optional<FDBStandalone<ValueRef>>();
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
Future<Void> TransactionImpl::watch(const Key& key) {
|
||||
return backToFuture< Void >( fdb_transaction_watch( tr, key.begin(), key.size() ), [](Reference<CFuture> f) {
|
||||
throw_on_error( fdb_future_get_error( f->f ) );
|
||||
return Void();
|
||||
} );
|
||||
}
|
||||
|
||||
Future<FDBStandalone<KeyRef>> TransactionImpl::getKey(const KeySelector& key, bool snapshot) {
|
||||
return backToFuture< FDBStandalone<KeyRef> >( fdb_transaction_get_key( tr, key.key.begin(), key.key.size(), key.orEqual, key.offset, snapshot ), [](Reference<CFuture> f) {
|
||||
uint8_t const* key;
|
||||
int key_length;
|
||||
|
||||
throw_on_error( fdb_future_get_key( f->f, &key, &key_length ) );
|
||||
|
||||
return FDBStandalone<KeyRef>( f, KeyRef( key, key_length ) );
|
||||
} );
|
||||
}
|
||||
|
||||
Future<FDBStandalone<RangeResultRef>> TransactionImpl::getRange(const KeySelector& begin, const KeySelector& end, GetRangeLimits limits, bool snapshot, bool reverse, FDBStreamingMode streamingMode) {
|
||||
// FIXME: iteration
|
||||
return backToFuture< FDBStandalone<RangeResultRef> >( fdb_transaction_get_range( tr, begin.key.begin(), begin.key.size(), begin.orEqual, begin.offset, end.key.begin(), end.key.size(), end.orEqual, end.offset, limits.rows, limits.bytes, streamingMode, 1, snapshot, reverse ), [](Reference<CFuture> f) {
|
||||
FDBKeyValue const* kv;
|
||||
int count;
|
||||
fdb_bool_t more;
|
||||
|
||||
throw_on_error( fdb_future_get_keyvalue_array( f->f, &kv, &count, &more ) );
|
||||
|
||||
return FDBStandalone<RangeResultRef>( f, RangeResultRef( VectorRef<KeyValueRef>( (KeyValueRef*)kv, count ), more ) );
|
||||
} );
|
||||
}
|
||||
|
||||
Future<int64_t> TransactionImpl::getEstimatedRangeSizeBytes(const KeyRange& keys) {
|
||||
return backToFuture<int64_t>(fdb_transaction_get_estimated_range_size_bytes(tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size()), [](Reference<CFuture> f) {
|
||||
int64_t bytes;
|
||||
throw_on_error(fdb_future_get_int64(f->f, &bytes));
|
||||
return bytes;
|
||||
});
|
||||
}
|
||||
|
||||
void TransactionImpl::addReadConflictRange(KeyRangeRef const& keys) {
|
||||
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_READ ) );
|
||||
}
|
||||
|
||||
void TransactionImpl::addReadConflictKey(KeyRef const& key) {
|
||||
return addReadConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
|
||||
}
|
||||
|
||||
void TransactionImpl::addWriteConflictRange(KeyRangeRef const& keys) {
|
||||
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_WRITE ) );
|
||||
}
|
||||
|
||||
void TransactionImpl::addWriteConflictKey(KeyRef const& key) {
|
||||
return addWriteConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
|
||||
}
|
||||
|
||||
void TransactionImpl::atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) {
|
||||
fdb_transaction_atomic_op( tr, key.begin(), key.size(), operand.begin(), operand.size(), operationType );
|
||||
}
|
||||
|
||||
void TransactionImpl::set(const KeyRef& key, const ValueRef& value) {
|
||||
fdb_transaction_set( tr, key.begin(), key.size(), value.begin(), value.size() );
|
||||
}
|
||||
|
||||
void TransactionImpl::clear(const KeyRangeRef& range) {
|
||||
fdb_transaction_clear_range( tr, range.begin.begin(), range.begin.size(), range.end.begin(), range.end.size() );
|
||||
}
|
||||
|
||||
void TransactionImpl::clear(const KeyRef& key) {
|
||||
fdb_transaction_clear( tr, key.begin(), key.size() );
|
||||
}
|
||||
|
||||
Future<Void> TransactionImpl::commit() {
|
||||
return backToFuture< Void >( fdb_transaction_commit( tr ), [](Reference<CFuture> f) {
|
||||
throw_on_error( fdb_future_get_error( f->f ) );
|
||||
return Void();
|
||||
} );
|
||||
}
|
||||
|
||||
Version TransactionImpl::getCommittedVersion() {
|
||||
Version v;
|
||||
|
||||
throw_on_error( fdb_transaction_get_committed_version( tr, &v ) );
|
||||
return v;
|
||||
}
|
||||
|
||||
Future<FDBStandalone<StringRef>> TransactionImpl::getVersionstamp() {
|
||||
return backToFuture<FDBStandalone<KeyRef>>(fdb_transaction_get_versionstamp(tr), [](Reference<CFuture> f) {
|
||||
uint8_t const* key;
|
||||
int key_length;
|
||||
|
||||
throw_on_error( fdb_future_get_key( f->f, &key, &key_length ) );
|
||||
|
||||
return FDBStandalone<StringRef>( f, StringRef( key, key_length ) );
|
||||
});
|
||||
}
|
||||
|
||||
void TransactionImpl::setOption(FDBTransactionOption option, Optional<StringRef> value) {
|
||||
if ( value.present() ) {
|
||||
throw_on_error( fdb_transaction_set_option( tr, option, value.get().begin(), value.get().size() ) );
|
||||
} else {
|
||||
throw_on_error( fdb_transaction_set_option( tr, option, NULL, 0 ) );
|
||||
}
|
||||
}
|
||||
|
||||
Future<int64_t> TransactionImpl::getApproximateSize() {
|
||||
return backToFuture<int64_t>(fdb_transaction_get_approximate_size(tr), [](Reference<CFuture> f) {
|
||||
int64_t size = 0;
|
||||
throw_on_error(fdb_future_get_int64(f->f, &size));
|
||||
return size;
|
||||
});
|
||||
}
|
||||
|
||||
Future<Void> TransactionImpl::onError(Error const& e) {
|
||||
return backToFuture< Void >( fdb_transaction_on_error( tr, e.code() ), [](Reference<CFuture> f) {
|
||||
throw_on_error( fdb_future_get_error( f->f ) );
|
||||
return Void();
|
||||
} );
|
||||
}
|
||||
|
||||
void TransactionImpl::cancel() {
|
||||
fdb_transaction_cancel( tr );
|
||||
}
|
||||
|
||||
void TransactionImpl::reset() {
|
||||
fdb_transaction_reset( tr );
|
||||
}
|
||||
|
||||
} // namespace FDB
|
||||
} // namespace FDB
|
||||
|
|
|
@ -23,130 +23,150 @@
|
|||
|
||||
#include <flow/flow.h>
|
||||
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <bindings/c/foundationdb/fdb_c.h>
|
||||
#undef DLLEXPORT
|
||||
|
||||
#include "FDBLoanerTypes.h"
|
||||
|
||||
namespace FDB {
|
||||
struct CFuture : NonCopyable, ReferenceCounted<CFuture>, FastAllocated<CFuture> {
|
||||
CFuture() : f(NULL) {}
|
||||
explicit CFuture(FDBFuture* f) : f(f) {}
|
||||
~CFuture() {
|
||||
if (f) {
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
struct CFuture : NonCopyable, ReferenceCounted<CFuture>, FastAllocated<CFuture> {
|
||||
CFuture() : f(nullptr) {}
|
||||
explicit CFuture(FDBFuture* f) : f(f) {}
|
||||
~CFuture() {
|
||||
if (f) {
|
||||
fdb_future_destroy(f);
|
||||
}
|
||||
}
|
||||
|
||||
void blockUntilReady();
|
||||
void blockUntilReady();
|
||||
|
||||
FDBFuture* f;
|
||||
};
|
||||
FDBFuture* f;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
class FDBStandalone : public T {
|
||||
public:
|
||||
FDBStandalone() {}
|
||||
FDBStandalone(Reference<CFuture> f, T const& t) : T(t), f(f) {}
|
||||
FDBStandalone(FDBStandalone const& o) : T((T const&)o), f(o.f) {}
|
||||
template <class T>
|
||||
class FDBStandalone : public T {
|
||||
public:
|
||||
FDBStandalone() {}
|
||||
FDBStandalone(Reference<CFuture> f, T const& t) : T(t), f(f) {}
|
||||
FDBStandalone(FDBStandalone const& o) : T((T const&)o), f(o.f) {}
|
||||
|
||||
private:
|
||||
Reference<CFuture> f;
|
||||
};
|
||||
private:
|
||||
Reference<CFuture> f;
|
||||
};
|
||||
|
||||
class ReadTransaction : public ReferenceCounted<ReadTransaction> {
|
||||
public:
|
||||
virtual ~ReadTransaction(){};
|
||||
virtual void setReadVersion(Version v) = 0;
|
||||
virtual Future<Version> getReadVersion() = 0;
|
||||
class ReadTransaction : public ReferenceCounted<ReadTransaction> {
|
||||
public:
|
||||
virtual ~ReadTransaction(){};
|
||||
virtual void setReadVersion(Version v) = 0;
|
||||
virtual Future<Version> getReadVersion() = 0;
|
||||
|
||||
virtual Future<Optional<FDBStandalone<ValueRef>>> get(const Key& key, bool snapshot = false) = 0;
|
||||
virtual Future<FDBStandalone<KeyRef>> getKey(const KeySelector& key, bool snapshot = false) = 0;
|
||||
virtual Future<Void> watch(const Key& key) = 0;
|
||||
virtual Future<Optional<FDBStandalone<ValueRef>>> get(const Key& key, bool snapshot = false) = 0;
|
||||
virtual Future<FDBStandalone<KeyRef>> getKey(const KeySelector& key, bool snapshot = false) = 0;
|
||||
virtual Future<Void> watch(const Key& key) = 0;
|
||||
|
||||
virtual Future<FDBStandalone<RangeResultRef>> getRange(
|
||||
const KeySelector& begin, const KeySelector& end, GetRangeLimits limits = GetRangeLimits(),
|
||||
bool snapshot = false, bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) = 0;
|
||||
virtual Future<FDBStandalone<RangeResultRef>> getRange(
|
||||
const KeySelector& begin, const KeySelector& end, int limit, bool snapshot = false, bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
|
||||
return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse, streamingMode);
|
||||
}
|
||||
virtual Future<FDBStandalone<RangeResultRef>> getRange(
|
||||
const KeyRange& keys, int limit, bool snapshot = false, bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
|
||||
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limit, snapshot, reverse,
|
||||
streamingMode);
|
||||
}
|
||||
virtual Future<FDBStandalone<RangeResultRef>> getRange(
|
||||
const KeyRange& keys, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
|
||||
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limits, snapshot, reverse,
|
||||
streamingMode);
|
||||
}
|
||||
virtual Future<FDBStandalone<RangeResultRef>> getRange(
|
||||
const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
GetRangeLimits limits = GetRangeLimits(),
|
||||
bool snapshot = false,
|
||||
bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) = 0;
|
||||
virtual Future<FDBStandalone<RangeResultRef>> getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
|
||||
return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse, streamingMode);
|
||||
}
|
||||
virtual Future<FDBStandalone<RangeResultRef>> getRange(const KeyRange& keys,
|
||||
int limit,
|
||||
bool snapshot = false,
|
||||
bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
|
||||
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
||||
limit,
|
||||
snapshot,
|
||||
reverse,
|
||||
streamingMode);
|
||||
}
|
||||
virtual Future<FDBStandalone<RangeResultRef>> getRange(const KeyRange& keys,
|
||||
GetRangeLimits limits = GetRangeLimits(),
|
||||
bool snapshot = false,
|
||||
bool reverse = false,
|
||||
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
|
||||
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
|
||||
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()),
|
||||
limits,
|
||||
snapshot,
|
||||
reverse,
|
||||
streamingMode);
|
||||
}
|
||||
|
||||
virtual Future<int64_t> getEstimatedRangeSizeBytes(const KeyRange& keys) = 0;
|
||||
virtual Future<int64_t> getEstimatedRangeSizeBytes(const KeyRange& keys) = 0;
|
||||
virtual Future<FDBStandalone<VectorRef<KeyRef>>> getRangeSplitPoints(const KeyRange& range, int64_t chunkSize) = 0;
|
||||
|
||||
virtual void addReadConflictRange(KeyRangeRef const& keys) = 0;
|
||||
virtual void addReadConflictKey(KeyRef const& key) = 0;
|
||||
virtual void addReadConflictRange(KeyRangeRef const& keys) = 0;
|
||||
virtual void addReadConflictKey(KeyRef const& key) = 0;
|
||||
|
||||
virtual void setOption(FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>()) = 0;
|
||||
virtual void setOption(FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>()) = 0;
|
||||
|
||||
virtual Future<Void> onError(Error const& e) = 0;
|
||||
virtual Future<Void> onError(Error const& e) = 0;
|
||||
|
||||
virtual void cancel() = 0;
|
||||
virtual void reset() = 0;
|
||||
};
|
||||
virtual void cancel() = 0;
|
||||
virtual void reset() = 0;
|
||||
};
|
||||
|
||||
class Transaction : public ReadTransaction {
|
||||
public:
|
||||
virtual void addWriteConflictRange(KeyRangeRef const& keys) = 0;
|
||||
virtual void addWriteConflictKey(KeyRef const& key) = 0;
|
||||
class Transaction : public ReadTransaction {
|
||||
public:
|
||||
virtual void addWriteConflictRange(KeyRangeRef const& keys) = 0;
|
||||
virtual void addWriteConflictKey(KeyRef const& key) = 0;
|
||||
|
||||
virtual void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) = 0;
|
||||
virtual void set(const KeyRef& key, const ValueRef& value) = 0;
|
||||
virtual void clear(const KeyRangeRef& range) = 0;
|
||||
virtual void clear(const KeyRef& key) = 0;
|
||||
virtual void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) = 0;
|
||||
virtual void set(const KeyRef& key, const ValueRef& value) = 0;
|
||||
virtual void clear(const KeyRangeRef& range) = 0;
|
||||
virtual void clear(const KeyRef& key) = 0;
|
||||
|
||||
virtual Future<Void> commit() = 0;
|
||||
virtual Version getCommittedVersion() = 0;
|
||||
virtual Future<int64_t> getApproximateSize() = 0;
|
||||
virtual Future<FDBStandalone<StringRef>> getVersionstamp() = 0;
|
||||
};
|
||||
virtual Future<Void> commit() = 0;
|
||||
virtual Version getCommittedVersion() = 0;
|
||||
virtual Future<int64_t> getApproximateSize() = 0;
|
||||
virtual Future<FDBStandalone<StringRef>> getVersionstamp() = 0;
|
||||
};
|
||||
|
||||
class Database : public ReferenceCounted<Database> {
|
||||
public:
|
||||
virtual ~Database(){};
|
||||
virtual Reference<Transaction> createTransaction() = 0;
|
||||
virtual void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>()) = 0;
|
||||
};
|
||||
class Database : public ReferenceCounted<Database> {
|
||||
public:
|
||||
virtual ~Database(){};
|
||||
virtual Reference<Transaction> createTransaction() = 0;
|
||||
virtual void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>()) = 0;
|
||||
virtual Future<int64_t> rebootWorker(const StringRef& address, bool check = false, int duration = 0) = 0;
|
||||
virtual Future<Void> forceRecoveryWithDataLoss(const StringRef& dcid) = 0;
|
||||
virtual Future<Void> createSnapshot(const StringRef& uid, const StringRef& snap_command) = 0;
|
||||
};
|
||||
|
||||
class API {
|
||||
public:
|
||||
static API* selectAPIVersion(int apiVersion);
|
||||
static API* getInstance();
|
||||
static bool isAPIVersionSelected();
|
||||
class API {
|
||||
public:
|
||||
static API* selectAPIVersion(int apiVersion);
|
||||
static API* getInstance();
|
||||
static bool isAPIVersionSelected();
|
||||
|
||||
void setNetworkOption(FDBNetworkOption option, Optional<StringRef> value = Optional<StringRef>());
|
||||
void setNetworkOption(FDBNetworkOption option, Optional<StringRef> value = Optional<StringRef>());
|
||||
|
||||
void setupNetwork();
|
||||
void runNetwork();
|
||||
void stopNetwork();
|
||||
void setupNetwork();
|
||||
void runNetwork();
|
||||
void stopNetwork();
|
||||
|
||||
Reference<Database> createDatabase(std::string const& connFilename = "");
|
||||
Reference<Database> createDatabase(std::string const& connFilename = "");
|
||||
|
||||
bool evaluatePredicate(FDBErrorPredicate pred, Error const& e);
|
||||
int getAPIVersion() const;
|
||||
bool evaluatePredicate(FDBErrorPredicate pred, Error const& e);
|
||||
int getAPIVersion() const;
|
||||
|
||||
private:
|
||||
static API* instance;
|
||||
private:
|
||||
static API* instance;
|
||||
|
||||
API(int version);
|
||||
int version;
|
||||
};
|
||||
} // namespace FDB
|
||||
API(int version);
|
||||
int version;
|
||||
};
|
||||
} // namespace FDB
|
||||
#endif // FDB_FLOW_FDB_FLOW_H
|
||||
|
|
|
@ -19,14 +19,14 @@
|
|||
*/
|
||||
|
||||
#include "Tester.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
using namespace FDB;
|
||||
|
||||
ACTOR Future<std::vector<Tuple>> popTuples(Reference<FlowTesterData> data, int count = 1) {
|
||||
state std::vector<Tuple> tuples;
|
||||
|
||||
while(tuples.size() < count) {
|
||||
while (tuples.size() < count) {
|
||||
Standalone<StringRef> sizeStr = wait(data->stack.pop()[0].value);
|
||||
int size = Tuple::unpack(sizeStr).getInt(0);
|
||||
|
||||
|
@ -34,7 +34,7 @@ ACTOR Future<std::vector<Tuple>> popTuples(Reference<FlowTesterData> data, int c
|
|||
state Tuple tuple;
|
||||
|
||||
state int index;
|
||||
for(index = 0; index < tupleItems.size(); ++index) {
|
||||
for (index = 0; index < tupleItems.size(); ++index) {
|
||||
Standalone<StringRef> itemStr = wait(tupleItems[index].value);
|
||||
tuple.append(Tuple::unpack(itemStr));
|
||||
}
|
||||
|
@ -54,9 +54,9 @@ ACTOR Future<std::vector<IDirectory::Path>> popPaths(Reference<FlowTesterData> d
|
|||
std::vector<Tuple> tuples = wait(popTuples(data, count));
|
||||
|
||||
std::vector<IDirectory::Path> paths;
|
||||
for(auto &tuple : tuples) {
|
||||
for (auto& tuple : tuples) {
|
||||
IDirectory::Path path;
|
||||
for(int i = 0; i < tuple.size(); ++i) {
|
||||
for (int i = 0; i < tuple.size(); ++i) {
|
||||
path.push_back(tuple.getString(i));
|
||||
}
|
||||
|
||||
|
@ -74,9 +74,9 @@ ACTOR Future<IDirectory::Path> popPath(Reference<FlowTesterData> data) {
|
|||
std::string pathToString(IDirectory::Path const& path) {
|
||||
std::string str;
|
||||
str += "[";
|
||||
for(int i = 0; i < path.size(); ++i) {
|
||||
for (int i = 0; i < path.size(); ++i) {
|
||||
str += path[i].toString();
|
||||
if(i < path.size() - 1) {
|
||||
if (i < path.size() - 1) {
|
||||
str += ", ";
|
||||
}
|
||||
}
|
||||
|
@ -86,21 +86,21 @@ std::string pathToString(IDirectory::Path const& path) {
|
|||
|
||||
IDirectory::Path combinePaths(IDirectory::Path const& path1, IDirectory::Path const& path2) {
|
||||
IDirectory::Path outPath(path1.begin(), path1.end());
|
||||
for(auto p : path2) {
|
||||
for (auto p : path2) {
|
||||
outPath.push_back(p);
|
||||
}
|
||||
|
||||
return outPath;
|
||||
}
|
||||
|
||||
void logOp(std::string message, bool force=false) {
|
||||
if(LOG_OPS || force) {
|
||||
void logOp(std::string message, bool force = false) {
|
||||
if (LOG_OPS || force) {
|
||||
printf("%s\n", message.c_str());
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
||||
//DIRECTORY_CREATE_SUBSPACE
|
||||
// DIRECTORY_CREATE_SUBSPACE
|
||||
struct DirectoryCreateSubspaceFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -108,7 +108,8 @@ struct DirectoryCreateSubspaceFunc : InstructionFunc {
|
|||
state Tuple path = wait(popTuple(data));
|
||||
Tuple rawPrefix = wait(data->stack.waitAndPop());
|
||||
|
||||
logOp(format("Created subspace at %s: %s", tupleToString(path).c_str(), rawPrefix.getString(0).printable().c_str()));
|
||||
logOp(format(
|
||||
"Created subspace at %s: %s", tupleToString(path).c_str(), rawPrefix.getString(0).printable().c_str()));
|
||||
data->directoryData.push(new Subspace(path, rawPrefix.getString(0)));
|
||||
return Void();
|
||||
}
|
||||
|
@ -116,7 +117,7 @@ struct DirectoryCreateSubspaceFunc : InstructionFunc {
|
|||
const char* DirectoryCreateSubspaceFunc::name = "DIRECTORY_CREATE_SUBSPACE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryCreateSubspaceFunc);
|
||||
|
||||
//DIRECTORY_CREATE_LAYER
|
||||
// DIRECTORY_CREATE_LAYER
|
||||
struct DirectoryCreateLayerFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -127,15 +128,21 @@ struct DirectoryCreateLayerFunc : InstructionFunc {
|
|||
int index2 = args[1].getInt(0);
|
||||
bool allowManualPrefixes = args[2].getInt(0) != 0;
|
||||
|
||||
if(!data->directoryData.directoryList[index1].valid() || !data->directoryData.directoryList[index2].valid()) {
|
||||
if (!data->directoryData.directoryList[index1].valid() || !data->directoryData.directoryList[index2].valid()) {
|
||||
logOp("Create directory layer: None");
|
||||
data->directoryData.push();
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
Subspace* nodeSubspace = data->directoryData.directoryList[index1].subspace.get();
|
||||
Subspace* contentSubspace = data->directoryData.directoryList[index2].subspace.get();
|
||||
logOp(format("Create directory layer: node_subspace (%d) = %s, content_subspace (%d) = %s, allow_manual_prefixes = %d", index1, nodeSubspace->key().printable().c_str(), index2, nodeSubspace->key().printable().c_str(), allowManualPrefixes));
|
||||
data->directoryData.push(Reference<IDirectory>(new DirectoryLayer(*nodeSubspace, *contentSubspace, allowManualPrefixes)));
|
||||
logOp(format("Create directory layer: node_subspace (%d) = %s, content_subspace (%d) = %s, "
|
||||
"allow_manual_prefixes = %d",
|
||||
index1,
|
||||
nodeSubspace->key().printable().c_str(),
|
||||
index2,
|
||||
nodeSubspace->key().printable().c_str(),
|
||||
allowManualPrefixes));
|
||||
data->directoryData.push(
|
||||
Reference<IDirectory>(new DirectoryLayer(*nodeSubspace, *contentSubspace, allowManualPrefixes)));
|
||||
}
|
||||
|
||||
return Void();
|
||||
|
@ -144,7 +151,7 @@ struct DirectoryCreateLayerFunc : InstructionFunc {
|
|||
const char* DirectoryCreateLayerFunc::name = "DIRECTORY_CREATE_LAYER";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryCreateLayerFunc);
|
||||
|
||||
//DIRECTORY_CHANGE
|
||||
// DIRECTORY_CHANGE
|
||||
struct DirectoryChangeFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -153,13 +160,17 @@ struct DirectoryChangeFunc : InstructionFunc {
|
|||
data->directoryData.directoryListIndex = index.getInt(0);
|
||||
ASSERT(data->directoryData.directoryListIndex < data->directoryData.directoryList.size());
|
||||
|
||||
if(!data->directoryData.directoryList[data->directoryData.directoryListIndex].valid()) {
|
||||
if (!data->directoryData.directoryList[data->directoryData.directoryListIndex].valid()) {
|
||||
data->directoryData.directoryListIndex = data->directoryData.directoryErrorIndex;
|
||||
}
|
||||
|
||||
if(LOG_DIRS) {
|
||||
if (LOG_DIRS) {
|
||||
DirectoryOrSubspace d = data->directoryData.directoryList[data->directoryData.directoryListIndex];
|
||||
printf("Changed directory to %d (%s @\'%s\')\n", data->directoryData.directoryListIndex, d.typeString().c_str(), d.directory.present() ? pathToString(d.directory.get()->getPath()).c_str() : d.subspace.get()->key().printable().c_str());
|
||||
printf("Changed directory to %d (%s @\'%s\')\n",
|
||||
data->directoryData.directoryListIndex,
|
||||
d.typeString().c_str(),
|
||||
d.directory.present() ? pathToString(d.directory.get()->getPath()).c_str()
|
||||
: d.subspace.get()->key().printable().c_str());
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
|
@ -169,7 +180,7 @@ struct DirectoryChangeFunc : InstructionFunc {
|
|||
const char* DirectoryChangeFunc::name = "DIRECTORY_CHANGE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryChangeFunc);
|
||||
|
||||
//DIRECTORY_SET_ERROR_INDEX
|
||||
// DIRECTORY_SET_ERROR_INDEX
|
||||
struct DirectorySetErrorIndexFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -183,7 +194,7 @@ struct DirectorySetErrorIndexFunc : InstructionFunc {
|
|||
const char* DirectorySetErrorIndexFunc::name = "DIRECTORY_SET_ERROR_INDEX";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectorySetErrorIndexFunc);
|
||||
|
||||
//DIRECTORY_CREATE_OR_OPEN
|
||||
// DIRECTORY_CREATE_OR_OPEN
|
||||
struct DirectoryCreateOrOpenFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -193,11 +204,12 @@ struct DirectoryCreateOrOpenFunc : InstructionFunc {
|
|||
Standalone<StringRef> layer = layerTuple.getType(0) == Tuple::NULL_TYPE ? StringRef() : layerTuple.getString(0);
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("create_or_open %s: layer=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), layer.printable().c_str()));
|
||||
logOp(format("create_or_open %s: layer=%s",
|
||||
pathToString(combinePaths(directory->getPath(), path)).c_str(),
|
||||
layer.printable().c_str()));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, layer] () {
|
||||
return directory->createOrOpen(instruction->tr, path, layer);
|
||||
}));
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(
|
||||
instruction, [this, directory, layer]() { return directory->createOrOpen(instruction->tr, path, layer); }));
|
||||
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
|
@ -207,7 +219,7 @@ struct DirectoryCreateOrOpenFunc : InstructionFunc {
|
|||
const char* DirectoryCreateOrOpenFunc::name = "DIRECTORY_CREATE_OR_OPEN";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryCreateOrOpenFunc);
|
||||
|
||||
//DIRECTORY_CREATE
|
||||
// DIRECTORY_CREATE
|
||||
struct DirectoryCreateFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -215,14 +227,19 @@ struct DirectoryCreateFunc : InstructionFunc {
|
|||
state IDirectory::Path path = wait(popPath(data));
|
||||
std::vector<Tuple> args = wait(data->stack.waitAndPop(2));
|
||||
Standalone<StringRef> layer = args[0].getType(0) == Tuple::NULL_TYPE ? StringRef() : args[0].getString(0);
|
||||
Optional<Standalone<StringRef>> prefix = args[1].getType(0) == Tuple::NULL_TYPE ? Optional<Standalone<StringRef>>() : args[1].getString(0);
|
||||
Optional<Standalone<StringRef>> prefix =
|
||||
args[1].getType(0) == Tuple::NULL_TYPE ? Optional<Standalone<StringRef>>() : args[1].getString(0);
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("create %s: layer=%s, prefix=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), layer.printable().c_str(), prefix.present() ? prefix.get().printable().c_str() : "<not present>"));
|
||||
logOp(format("create %s: layer=%s, prefix=%s",
|
||||
pathToString(combinePaths(directory->getPath(), path)).c_str(),
|
||||
layer.printable().c_str(),
|
||||
prefix.present() ? prefix.get().printable().c_str() : "<not present>"));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, layer, prefix] () {
|
||||
return directory->create(instruction->tr, path, layer, prefix);
|
||||
}));
|
||||
Reference<DirectorySubspace> dirSubspace =
|
||||
wait(executeMutation(instruction, [this, directory, layer, prefix]() {
|
||||
return directory->create(instruction->tr, path, layer, prefix);
|
||||
}));
|
||||
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
|
@ -232,7 +249,7 @@ struct DirectoryCreateFunc : InstructionFunc {
|
|||
const char* DirectoryCreateFunc::name = "DIRECTORY_CREATE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryCreateFunc);
|
||||
|
||||
//DIRECTORY_OPEN
|
||||
// DIRECTORY_OPEN
|
||||
struct DirectoryOpenFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -242,7 +259,9 @@ struct DirectoryOpenFunc : InstructionFunc {
|
|||
Standalone<StringRef> layer = layerTuple.getType(0) == Tuple::NULL_TYPE ? StringRef() : layerTuple.getString(0);
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("open %s: layer=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), layer.printable().c_str()));
|
||||
logOp(format("open %s: layer=%s",
|
||||
pathToString(combinePaths(directory->getPath(), path)).c_str(),
|
||||
layer.printable().c_str()));
|
||||
Reference<DirectorySubspace> dirSubspace = wait(directory->open(instruction->tr, path, layer));
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
|
@ -252,7 +271,7 @@ struct DirectoryOpenFunc : InstructionFunc {
|
|||
const char* DirectoryOpenFunc::name = "DIRECTORY_OPEN";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryOpenFunc);
|
||||
|
||||
//DIRECTORY_MOVE
|
||||
// DIRECTORY_MOVE
|
||||
struct DirectoryMoveFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -260,11 +279,12 @@ struct DirectoryMoveFunc : InstructionFunc {
|
|||
std::vector<IDirectory::Path> paths = wait(popPaths(data, 2));
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("move %s to %s", pathToString(combinePaths(directory->getPath(), paths[0])).c_str(), pathToString(combinePaths(directory->getPath(), paths[1])).c_str()));
|
||||
logOp(format("move %s to %s",
|
||||
pathToString(combinePaths(directory->getPath(), paths[0])).c_str(),
|
||||
pathToString(combinePaths(directory->getPath(), paths[1])).c_str()));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, paths] () {
|
||||
return directory->move(instruction->tr, paths[0], paths[1]);
|
||||
}));
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(
|
||||
instruction, [this, directory, paths]() { return directory->move(instruction->tr, paths[0], paths[1]); }));
|
||||
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
|
@ -274,7 +294,7 @@ struct DirectoryMoveFunc : InstructionFunc {
|
|||
const char* DirectoryMoveFunc::name = "DIRECTORY_MOVE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryMoveFunc);
|
||||
|
||||
//DIRECTORY_MOVE_TO
|
||||
// DIRECTORY_MOVE_TO
|
||||
struct DirectoryMoveToFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -284,9 +304,8 @@ struct DirectoryMoveToFunc : InstructionFunc {
|
|||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("move %s to %s", pathToString(directory->getPath()).c_str(), pathToString(path).c_str()));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, path] () {
|
||||
return directory->moveTo(instruction->tr, path);
|
||||
}));
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(
|
||||
instruction, [this, directory, path]() { return directory->moveTo(instruction->tr, path); }));
|
||||
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
|
@ -296,27 +315,22 @@ struct DirectoryMoveToFunc : InstructionFunc {
|
|||
const char* DirectoryMoveToFunc::name = "DIRECTORY_MOVE_TO";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryMoveToFunc);
|
||||
|
||||
//DIRECTORY_REMOVE
|
||||
// DIRECTORY_REMOVE
|
||||
struct DirectoryRemoveFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple count = wait(data->stack.waitAndPop());
|
||||
state Reference<IDirectory> directory = data->directoryData.directory();
|
||||
if(count.getInt(0) == 0) {
|
||||
if (count.getInt(0) == 0) {
|
||||
logOp(format("remove %s", pathToString(directory->getPath()).c_str()));
|
||||
|
||||
wait(executeMutation(instruction, [this] () {
|
||||
return directory->remove(instruction->tr);
|
||||
}));
|
||||
}
|
||||
else {
|
||||
wait(executeMutation(instruction, [this]() { return directory->remove(instruction->tr); }));
|
||||
} else {
|
||||
IDirectory::Path path = wait(popPath(data));
|
||||
logOp(format("remove %s", pathToString(combinePaths(directory->getPath(), path)).c_str()));
|
||||
|
||||
wait(executeMutation(instruction, [this, path] () {
|
||||
return directory->remove(instruction->tr, path);
|
||||
}));
|
||||
wait(executeMutation(instruction, [this, path]() { return directory->remove(instruction->tr, path); }));
|
||||
}
|
||||
|
||||
return Void();
|
||||
|
@ -325,27 +339,24 @@ struct DirectoryRemoveFunc : InstructionFunc {
|
|||
const char* DirectoryRemoveFunc::name = "DIRECTORY_REMOVE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryRemoveFunc);
|
||||
|
||||
//DIRECTORY_REMOVE_IF_EXISTS
|
||||
// DIRECTORY_REMOVE_IF_EXISTS
|
||||
struct DirectoryRemoveIfExistsFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple count = wait(data->stack.waitAndPop());
|
||||
state Reference<IDirectory> directory = data->directoryData.directory();
|
||||
if(count.getInt(0) == 0) {
|
||||
if (count.getInt(0) == 0) {
|
||||
logOp(format("remove_if_exists %s", pathToString(directory->getPath()).c_str()));
|
||||
|
||||
wait(success(executeMutation(instruction, [this] () {
|
||||
return directory->removeIfExists(instruction->tr);
|
||||
})));
|
||||
}
|
||||
else {
|
||||
wait(
|
||||
success(executeMutation(instruction, [this]() { return directory->removeIfExists(instruction->tr); })));
|
||||
} else {
|
||||
IDirectory::Path path = wait(popPath(data));
|
||||
logOp(format("remove_if_exists %s", pathToString(combinePaths(directory->getPath(), path)).c_str()));
|
||||
|
||||
wait(success(executeMutation(instruction, [this, path] () {
|
||||
return directory->removeIfExists(instruction->tr, path);
|
||||
})));
|
||||
wait(success(executeMutation(instruction,
|
||||
[this, path]() { return directory->removeIfExists(instruction->tr, path); })));
|
||||
}
|
||||
|
||||
return Void();
|
||||
|
@ -354,7 +365,7 @@ struct DirectoryRemoveIfExistsFunc : InstructionFunc {
|
|||
const char* DirectoryRemoveIfExistsFunc::name = "DIRECTORY_REMOVE_IF_EXISTS";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryRemoveIfExistsFunc);
|
||||
|
||||
//DIRECTORY_LIST
|
||||
// DIRECTORY_LIST
|
||||
struct DirectoryListFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -362,12 +373,11 @@ struct DirectoryListFunc : InstructionFunc {
|
|||
Tuple count = wait(data->stack.waitAndPop());
|
||||
state Reference<IDirectory> directory = data->directoryData.directory();
|
||||
state Standalone<VectorRef<StringRef>> subdirs;
|
||||
if(count.getInt(0) == 0) {
|
||||
if (count.getInt(0) == 0) {
|
||||
logOp(format("list %s", pathToString(directory->getPath()).c_str()));
|
||||
Standalone<VectorRef<StringRef>> _subdirs = wait(directory->list(instruction->tr));
|
||||
subdirs = _subdirs;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
IDirectory::Path path = wait(popPath(data));
|
||||
logOp(format("list %s", pathToString(combinePaths(directory->getPath(), path)).c_str()));
|
||||
Standalone<VectorRef<StringRef>> _subdirs = wait(directory->list(instruction->tr, path));
|
||||
|
@ -375,7 +385,7 @@ struct DirectoryListFunc : InstructionFunc {
|
|||
}
|
||||
|
||||
Tuple subdirTuple;
|
||||
for(auto &sd : subdirs) {
|
||||
for (auto& sd : subdirs) {
|
||||
subdirTuple.append(sd, true);
|
||||
}
|
||||
|
||||
|
@ -386,7 +396,7 @@ struct DirectoryListFunc : InstructionFunc {
|
|||
const char* DirectoryListFunc::name = "DIRECTORY_LIST";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryListFunc);
|
||||
|
||||
//DIRECTORY_EXISTS
|
||||
// DIRECTORY_EXISTS
|
||||
struct DirectoryExistsFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -394,12 +404,11 @@ struct DirectoryExistsFunc : InstructionFunc {
|
|||
Tuple count = wait(data->stack.waitAndPop());
|
||||
state Reference<IDirectory> directory = data->directoryData.directory();
|
||||
state bool result;
|
||||
if(count.getInt(0) == 0) {
|
||||
if (count.getInt(0) == 0) {
|
||||
bool _result = wait(directory->exists(instruction->tr));
|
||||
result = _result;
|
||||
logOp(format("exists %s: %d", pathToString(directory->getPath()).c_str(), result));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
state IDirectory::Path path = wait(popPath(data));
|
||||
bool _result = wait(directory->exists(instruction->tr, path));
|
||||
result = _result;
|
||||
|
@ -413,7 +422,7 @@ struct DirectoryExistsFunc : InstructionFunc {
|
|||
const char* DirectoryExistsFunc::name = "DIRECTORY_EXISTS";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryExistsFunc);
|
||||
|
||||
//DIRECTORY_PACK_KEY
|
||||
// DIRECTORY_PACK_KEY
|
||||
struct DirectoryPackKeyFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -427,17 +436,19 @@ struct DirectoryPackKeyFunc : InstructionFunc {
|
|||
const char* DirectoryPackKeyFunc::name = "DIRECTORY_PACK_KEY";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryPackKeyFunc);
|
||||
|
||||
//DIRECTORY_UNPACK_KEY
|
||||
// DIRECTORY_UNPACK_KEY
|
||||
struct DirectoryUnpackKeyFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple key = wait(data->stack.waitAndPop());
|
||||
Subspace *subspace = data->directoryData.subspace();
|
||||
logOp(format("Unpack %s in subspace with prefix %s", key.getString(0).printable().c_str(), subspace->key().printable().c_str()));
|
||||
Subspace* subspace = data->directoryData.subspace();
|
||||
logOp(format("Unpack %s in subspace with prefix %s",
|
||||
key.getString(0).printable().c_str(),
|
||||
subspace->key().printable().c_str()));
|
||||
Tuple tuple = subspace->unpack(key.getString(0));
|
||||
for(int i = 0; i < tuple.size(); ++i) {
|
||||
data->stack.push(tuple.subTuple(i, i+1).pack());
|
||||
for (int i = 0; i < tuple.size(); ++i) {
|
||||
data->stack.push(tuple.subTuple(i, i + 1).pack());
|
||||
}
|
||||
|
||||
return Void();
|
||||
|
@ -446,7 +457,7 @@ struct DirectoryUnpackKeyFunc : InstructionFunc {
|
|||
const char* DirectoryUnpackKeyFunc::name = "DIRECTORY_UNPACK_KEY";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryUnpackKeyFunc);
|
||||
|
||||
//DIRECTORY_RANGE
|
||||
// DIRECTORY_RANGE
|
||||
struct DirectoryRangeFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -462,7 +473,7 @@ struct DirectoryRangeFunc : InstructionFunc {
|
|||
const char* DirectoryRangeFunc::name = "DIRECTORY_RANGE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryRangeFunc);
|
||||
|
||||
//DIRECTORY_CONTAINS
|
||||
// DIRECTORY_CONTAINS
|
||||
struct DirectoryContainsFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -477,15 +488,15 @@ struct DirectoryContainsFunc : InstructionFunc {
|
|||
const char* DirectoryContainsFunc::name = "DIRECTORY_CONTAINS";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryContainsFunc);
|
||||
|
||||
//DIRECTORY_OPEN_SUBSPACE
|
||||
// DIRECTORY_OPEN_SUBSPACE
|
||||
struct DirectoryOpenSubspaceFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple tuple = wait(popTuple(data));
|
||||
Subspace *subspace = data->directoryData.subspace();
|
||||
Subspace* subspace = data->directoryData.subspace();
|
||||
logOp(format("open_subspace %s (at %s)", tupleToString(tuple).c_str(), subspace->key().printable().c_str()));
|
||||
Subspace *child = new Subspace(subspace->subspace(tuple));
|
||||
Subspace* child = new Subspace(subspace->subspace(tuple));
|
||||
data->directoryData.push(child);
|
||||
|
||||
return Void();
|
||||
|
@ -494,7 +505,7 @@ struct DirectoryOpenSubspaceFunc : InstructionFunc {
|
|||
const char* DirectoryOpenSubspaceFunc::name = "DIRECTORY_OPEN_SUBSPACE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryOpenSubspaceFunc);
|
||||
|
||||
//DIRECTORY_LOG_SUBSPACE
|
||||
// DIRECTORY_LOG_SUBSPACE
|
||||
struct DirectoryLogSubspaceFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -510,7 +521,7 @@ struct DirectoryLogSubspaceFunc : InstructionFunc {
|
|||
const char* DirectoryLogSubspaceFunc::name = "DIRECTORY_LOG_SUBSPACE";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryLogSubspaceFunc);
|
||||
|
||||
//DIRECTORY_LOG_DIRECTORY
|
||||
// DIRECTORY_LOG_DIRECTORY
|
||||
struct DirectoryLogDirectoryFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
|
@ -520,22 +531,23 @@ struct DirectoryLogDirectoryFunc : InstructionFunc {
|
|||
state bool exists = wait(directory->exists(instruction->tr));
|
||||
|
||||
state Tuple childrenTuple;
|
||||
if(exists) {
|
||||
if (exists) {
|
||||
Standalone<VectorRef<StringRef>> children = wait(directory->list(instruction->tr));
|
||||
for(auto &c : children) {
|
||||
for (auto& c : children) {
|
||||
childrenTuple.append(c, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Subspace logSubspace(Tuple().append(data->directoryData.directoryListIndex), prefix.getString(0));
|
||||
|
||||
Tuple pathTuple;
|
||||
for(auto &p : directory->getPath()) {
|
||||
for (auto& p : directory->getPath()) {
|
||||
pathTuple.append(p, true);
|
||||
}
|
||||
|
||||
instruction->tr->set(logSubspace.pack(LiteralStringRef("path"), true), pathTuple.pack());
|
||||
instruction->tr->set(logSubspace.pack(LiteralStringRef("layer"), true), Tuple().append(directory->getLayer()).pack());
|
||||
instruction->tr->set(logSubspace.pack(LiteralStringRef("layer"), true),
|
||||
Tuple().append(directory->getLayer()).pack());
|
||||
instruction->tr->set(logSubspace.pack(LiteralStringRef("exists"), true), Tuple().append(exists ? 1 : 0).pack());
|
||||
instruction->tr->set(logSubspace.pack(LiteralStringRef("children"), true), childrenTuple.pack());
|
||||
|
||||
|
@ -545,13 +557,13 @@ struct DirectoryLogDirectoryFunc : InstructionFunc {
|
|||
const char* DirectoryLogDirectoryFunc::name = "DIRECTORY_LOG_DIRECTORY";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryLogDirectoryFunc);
|
||||
|
||||
//DIRECTORY_STRIP_PREFIX
|
||||
// DIRECTORY_STRIP_PREFIX
|
||||
struct DirectoryStripPrefixFunc : InstructionFunc {
|
||||
static const char* name;
|
||||
|
||||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple str = wait(data->stack.waitAndPop());
|
||||
Subspace *subspace = data->directoryData.subspace();
|
||||
Subspace* subspace = data->directoryData.subspace();
|
||||
ASSERT(str.getString(0).startsWith(subspace->key()));
|
||||
data->stack.pushTuple(str.getString(0).substr(subspace->key().size()));
|
||||
return Void();
|
||||
|
@ -559,4 +571,3 @@ struct DirectoryStripPrefixFunc : InstructionFunc {
|
|||
};
|
||||
const char* DirectoryStripPrefixFunc::name = "DIRECTORY_STRIP_PREFIX";
|
||||
REGISTER_INSTRUCTION_FUNC(DirectoryStripPrefixFunc);
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -18,21 +18,24 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source version.
|
||||
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source
|
||||
// version.
|
||||
#if defined(NO_INTELLISENSE) && !defined(FDB_FLOW_TESTER_TESTER_ACTOR_G_H)
|
||||
#define FDB_FLOW_TESTER_TESTER_ACTOR_G_H
|
||||
#include "Tester.actor.g.h"
|
||||
#define FDB_FLOW_TESTER_TESTER_ACTOR_G_H
|
||||
#include "Tester.actor.g.h"
|
||||
#elif !defined(FDB_FLOW_TESTER_TESTER_ACTOR_H)
|
||||
#define FDB_FLOW_TESTER_TESTER_ACTOR_H
|
||||
#define FDB_FLOW_TESTER_TESTER_ACTOR_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "flow/IDispatched.h"
|
||||
#include "bindings/flow/fdb_flow.h"
|
||||
#include "bindings/flow/IDirectory.h"
|
||||
#include "bindings/flow/Subspace.h"
|
||||
#include "bindings/flow/DirectoryLayer.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
constexpr bool LOG_ALL = false;
|
||||
constexpr bool LOG_INSTRUCTIONS = LOG_ALL || false;
|
||||
|
@ -54,19 +57,13 @@ struct FlowTesterStack {
|
|||
uint32_t index;
|
||||
std::vector<StackItem> data;
|
||||
|
||||
void push(Future<Standalone<StringRef>> value) {
|
||||
data.push_back(StackItem(index, value));
|
||||
}
|
||||
|
||||
void push(Standalone<StringRef> value) {
|
||||
push(Future<Standalone<StringRef>>(value));
|
||||
}
|
||||
void push(Future<Standalone<StringRef>> value) { data.push_back(StackItem(index, value)); }
|
||||
|
||||
void push(const StackItem& item) {
|
||||
data.push_back(item);
|
||||
}
|
||||
void push(Standalone<StringRef> value) { push(Future<Standalone<StringRef>>(value)); }
|
||||
|
||||
void pushTuple(StringRef value, bool utf8=false) {
|
||||
void push(const StackItem& item) { data.push_back(item); }
|
||||
|
||||
void pushTuple(StringRef value, bool utf8 = false) {
|
||||
FDB::Tuple t;
|
||||
t.append(value, utf8);
|
||||
data.push_back(StackItem(index, t.pack()));
|
||||
|
@ -86,10 +83,10 @@ struct FlowTesterStack {
|
|||
items.push_back(data.back());
|
||||
data.pop_back();
|
||||
count--;
|
||||
}
|
||||
}
|
||||
return items;
|
||||
}
|
||||
|
||||
|
||||
Future<std::vector<FDB::Tuple>> waitAndPop(int count);
|
||||
Future<FDB::Tuple> waitAndPop();
|
||||
|
||||
|
@ -99,33 +96,31 @@ struct FlowTesterStack {
|
|||
data.push_back(data.back());
|
||||
}
|
||||
|
||||
void clear() {
|
||||
data.clear();
|
||||
}
|
||||
void clear() { data.clear(); }
|
||||
};
|
||||
|
||||
struct InstructionData : public ReferenceCounted<InstructionData> {
|
||||
bool isDatabase;
|
||||
bool isSnapshot;
|
||||
bool isSnapshot;
|
||||
StringRef instruction;
|
||||
Reference<FDB::Transaction> tr;
|
||||
|
||||
InstructionData(bool _isDatabase, bool _isSnapshot, StringRef _instruction, Reference<FDB::Transaction> _tr)
|
||||
: isDatabase(_isDatabase)
|
||||
, isSnapshot(_isSnapshot)
|
||||
, instruction(_instruction)
|
||||
, tr(_tr) {}
|
||||
: isDatabase(_isDatabase), isSnapshot(_isSnapshot), instruction(_instruction), tr(_tr) {}
|
||||
};
|
||||
|
||||
struct FlowTesterData;
|
||||
|
||||
struct InstructionFunc : IDispatched<InstructionFunc, std::string, std::function<Future<Void>(Reference<FlowTesterData> data, Reference<InstructionData> instruction)>> {
|
||||
struct InstructionFunc
|
||||
: IDispatched<InstructionFunc,
|
||||
std::string,
|
||||
std::function<Future<Void>(Reference<FlowTesterData> data, Reference<InstructionData> instruction)>> {
|
||||
static Future<Void> call(std::string op, Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
ASSERT(data);
|
||||
ASSERT(instruction);
|
||||
|
||||
auto it = dispatches().find(op);
|
||||
if(it == dispatches().end()) {
|
||||
if (it == dispatches().end()) {
|
||||
fprintf(stderr, "Unrecognized instruction: %s\n", op.c_str());
|
||||
ASSERT(false);
|
||||
}
|
||||
|
@ -141,24 +136,20 @@ struct DirectoryOrSubspace {
|
|||
|
||||
DirectoryOrSubspace() {}
|
||||
DirectoryOrSubspace(Reference<FDB::IDirectory> directory) : directory(directory) {}
|
||||
DirectoryOrSubspace(FDB::Subspace *subspace) : subspace(subspace) {}
|
||||
DirectoryOrSubspace(Reference<FDB::DirectorySubspace> dirSubspace) : directory(dirSubspace), subspace(dirSubspace.getPtr()) {}
|
||||
DirectoryOrSubspace(FDB::Subspace* subspace) : subspace(subspace) {}
|
||||
DirectoryOrSubspace(Reference<FDB::DirectorySubspace> dirSubspace)
|
||||
: directory(dirSubspace), subspace(dirSubspace.getPtr()) {}
|
||||
|
||||
bool valid() {
|
||||
return directory.present() || subspace.present();
|
||||
}
|
||||
bool valid() { return directory.present() || subspace.present(); }
|
||||
|
||||
std::string typeString() {
|
||||
if(directory.present() && subspace.present()) {
|
||||
if (directory.present() && subspace.present()) {
|
||||
return "DirectorySubspace";
|
||||
}
|
||||
else if(directory.present()) {
|
||||
return "IDirectory";
|
||||
}
|
||||
else if(subspace.present()) {
|
||||
} else if (directory.present()) {
|
||||
return "IDirectory";
|
||||
} else if (subspace.present()) {
|
||||
return "Subspace";
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
return "InvalidDirectory";
|
||||
}
|
||||
}
|
||||
|
@ -169,10 +160,10 @@ struct DirectoryTesterData {
|
|||
int directoryListIndex;
|
||||
int directoryErrorIndex;
|
||||
|
||||
Reference<FDB::IDirectory> directory() {
|
||||
Reference<FDB::IDirectory> directory() {
|
||||
ASSERT(directoryListIndex < directoryList.size());
|
||||
ASSERT(directoryList[directoryListIndex].directory.present());
|
||||
return directoryList[directoryListIndex].directory.get();
|
||||
return directoryList[directoryListIndex].directory.get();
|
||||
}
|
||||
|
||||
FDB::Subspace* subspace() {
|
||||
|
@ -188,8 +179,8 @@ struct DirectoryTesterData {
|
|||
template <class T>
|
||||
void push(T item) {
|
||||
directoryList.push_back(DirectoryOrSubspace(item));
|
||||
if(LOG_DIRS) {
|
||||
printf("Pushed %s at %lu\n", directoryList.back().typeString().c_str(), directoryList.size()-1);
|
||||
if (LOG_DIRS) {
|
||||
printf("Pushed %s at %lu\n", directoryList.back().typeString().c_str(), directoryList.size() - 1);
|
||||
fflush(stdout);
|
||||
}
|
||||
}
|
||||
|
@ -198,7 +189,7 @@ struct DirectoryTesterData {
|
|||
};
|
||||
|
||||
struct FlowTesterData : public ReferenceCounted<FlowTesterData> {
|
||||
FDB::API *api;
|
||||
FDB::API* api;
|
||||
Reference<FDB::Database> db;
|
||||
Standalone<FDB::RangeResultRef> instructions;
|
||||
Standalone<StringRef> trName;
|
||||
|
@ -209,31 +200,28 @@ struct FlowTesterData : public ReferenceCounted<FlowTesterData> {
|
|||
std::vector<Future<Void>> subThreads;
|
||||
|
||||
Future<Void> processInstruction(Reference<InstructionData> instruction) {
|
||||
return InstructionFunc::call(instruction->instruction.toString(), Reference<FlowTesterData>::addRef(this), instruction);
|
||||
return InstructionFunc::call(
|
||||
instruction->instruction.toString(), Reference<FlowTesterData>::addRef(this), instruction);
|
||||
}
|
||||
|
||||
FlowTesterData(FDB::API *api) {
|
||||
this->api = api;
|
||||
}
|
||||
FlowTesterData(FDB::API* api) { this->api = api; }
|
||||
};
|
||||
|
||||
std::string tupleToString(FDB::Tuple const& tuple);
|
||||
|
||||
ACTOR template <class F>
|
||||
Future<decltype(fake<F>()().getValue())> executeMutation(Reference<InstructionData> instruction, F func) {
|
||||
Future<decltype(std::declval<F>()().getValue())> executeMutation(Reference<InstructionData> instruction, F func) {
|
||||
loop {
|
||||
try {
|
||||
state decltype(fake<F>()().getValue()) result = wait(func());
|
||||
if(instruction->isDatabase) {
|
||||
state decltype(std::declval<F>()().getValue()) result = wait(func());
|
||||
if (instruction->isDatabase) {
|
||||
wait(instruction->tr->commit());
|
||||
}
|
||||
return result;
|
||||
}
|
||||
catch(Error &e) {
|
||||
if(instruction->isDatabase) {
|
||||
} catch (Error& e) {
|
||||
if (instruction->isDatabase) {
|
||||
wait(instruction->tr->onError(e));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ This package requires:
|
|||
- [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
|
||||
- FoundationDB C API 2.0.x-6.1.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c))
|
||||
|
||||
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-700.
|
||||
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-710.
|
||||
|
||||
To install this package, you can run the "fdb-go-install.sh" script (for versions 5.0.x and greater):
|
||||
|
||||
|
|
|
@ -579,6 +579,17 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
case op == "GET_RANGE_SPLIT_POINTS":
|
||||
r := sm.popKeyRange()
|
||||
chunkSize := sm.waitAndPop().item.(int64)
|
||||
_, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
_ = rtr.GetRangeSplitPoints(r, chunkSize).MustGet()
|
||||
sm.store(idx, []byte("GOT_RANGE_SPLIT_POINTS"))
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
case op == "COMMIT":
|
||||
sm.store(idx, sm.currentTransaction().Commit())
|
||||
case op == "RESET":
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ A basic interaction with the FoundationDB API is demonstrated below:
|
|||
|
||||
func main() {
|
||||
// Different API versions may expose different runtime behaviors.
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
|
||||
// Open the default database from the system cluster
|
||||
db := fdb.MustOpenDefault()
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
|
@ -108,7 +108,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
|
|||
// library, an error will be returned. APIVersion must be called prior to any
|
||||
// other functions in the fdb package.
|
||||
//
|
||||
// Currently, this package supports API versions 200 through 700.
|
||||
// Currently, this package supports API versions 200 through 710.
|
||||
//
|
||||
// Warning: When using the multi-version client API, setting an API version that
|
||||
// is not supported by a particular client library will prevent that client from
|
||||
|
@ -116,7 +116,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
|
|||
// the API version of your application after upgrading your client until the
|
||||
// cluster has also been upgraded.
|
||||
func APIVersion(version int) error {
|
||||
headerVersion := 700
|
||||
headerVersion := 710
|
||||
|
||||
networkMutex.Lock()
|
||||
defer networkMutex.Unlock()
|
||||
|
@ -128,7 +128,7 @@ func APIVersion(version int) error {
|
|||
return errAPIVersionAlreadySet
|
||||
}
|
||||
|
||||
if version < 200 || version > 700 {
|
||||
if version < 200 || version > 710 {
|
||||
return errAPIVersionNotSupported
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
func ExampleOpenDefault() {
|
||||
var e error
|
||||
|
||||
e = fdb.APIVersion(700)
|
||||
e = fdb.APIVersion(710)
|
||||
if e != nil {
|
||||
fmt.Printf("Unable to set API version: %v\n", e)
|
||||
return
|
||||
|
@ -52,7 +52,7 @@ func ExampleOpenDefault() {
|
|||
}
|
||||
|
||||
func TestVersionstamp(t *testing.T) {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
setVs := func(t fdb.Transactor, key fdb.Key) (fdb.FutureKey, error) {
|
||||
|
@ -98,7 +98,7 @@ func TestVersionstamp(t *testing.T) {
|
|||
}
|
||||
|
||||
func ExampleTransactor() {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
setOne := func(t fdb.Transactor, key fdb.Key, value []byte) error {
|
||||
|
@ -149,7 +149,7 @@ func ExampleTransactor() {
|
|||
}
|
||||
|
||||
func ExampleReadTransactor() {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
getOne := func(rt fdb.ReadTransactor, key fdb.Key) ([]byte, error) {
|
||||
|
@ -202,7 +202,7 @@ func ExampleReadTransactor() {
|
|||
}
|
||||
|
||||
func ExamplePrefixRange() {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
tr, e := db.CreateTransaction()
|
||||
|
@ -241,7 +241,7 @@ func ExamplePrefixRange() {
|
|||
}
|
||||
|
||||
func ExampleRangeIterator() {
|
||||
fdb.MustAPIVersion(700)
|
||||
fdb.MustAPIVersion(710)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
tr, e := db.CreateTransaction()
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
package fdb
|
||||
|
||||
// #cgo LDFLAGS: -lfdb_c -lm
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
// #include <string.h>
|
||||
//
|
||||
|
@ -306,6 +306,57 @@ func (f *futureKeyValueArray) Get() ([]KeyValue, bool, error) {
|
|||
return ret, (more != 0), nil
|
||||
}
|
||||
|
||||
// FutureKeyArray represents the asynchronous result of a function
|
||||
// that returns an array of keys. FutureKeyArray is a lightweight object
|
||||
// that may be efficiently copied, and is safe for concurrent use by multiple goroutines.
|
||||
type FutureKeyArray interface {
|
||||
|
||||
// Get returns an array of keys or an error if the asynchronous operation
|
||||
// associated with this future did not successfully complete. The current
|
||||
// goroutine will be blocked until the future is ready.
|
||||
Get() ([]Key, error)
|
||||
|
||||
// MustGet returns an array of keys, or panics if the asynchronous operations
|
||||
// associated with this future did not successfully complete. The current goroutine
|
||||
// will be blocked until the future is ready.
|
||||
MustGet() []Key
|
||||
}
|
||||
|
||||
type futureKeyArray struct {
|
||||
*future
|
||||
}
|
||||
|
||||
func (f *futureKeyArray) Get() ([]Key, error) {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
f.BlockUntilReady()
|
||||
|
||||
var ks *C.FDBKey
|
||||
var count C.int
|
||||
|
||||
if err := C.fdb_future_get_key_array(f.ptr, &ks, &count); err != 0 {
|
||||
return nil, Error{int(err)}
|
||||
}
|
||||
|
||||
ret := make([]Key, int(count))
|
||||
|
||||
for i := 0; i < int(count); i++ {
|
||||
kptr := unsafe.Pointer(uintptr(unsafe.Pointer(ks)) + uintptr(i*12))
|
||||
|
||||
ret[i] = stringRefToSlice(kptr)
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (f *futureKeyArray) MustGet() []Key {
|
||||
val, err := f.Get()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
// FutureInt64 represents the asynchronous result of a function that returns a
|
||||
// database version. FutureInt64 is a lightweight object that may be efficiently
|
||||
// copied, and is safe for concurrent use by multiple goroutines.
|
||||
|
|
|
@ -220,11 +220,18 @@ func (o NetworkOptions) SetExternalClientDirectory(param string) error {
|
|||
return o.setOpt(63, []byte(param))
|
||||
}
|
||||
|
||||
// Prevents connections through the local client, allowing only connections through externally loaded client libraries. Intended primarily for testing.
|
||||
// Prevents connections through the local client, allowing only connections through externally loaded client libraries.
|
||||
func (o NetworkOptions) SetDisableLocalClient() error {
|
||||
return o.setOpt(64, nil)
|
||||
}
|
||||
|
||||
// Spawns multiple worker threads for each version of the client that is loaded. Setting this to a number greater than one implies disable_local_client.
|
||||
//
|
||||
// Parameter: Number of client threads to be spawned. Each cluster will be serviced by a single client thread.
|
||||
func (o NetworkOptions) SetClientThreadsPerVersion(param int64) error {
|
||||
return o.setOpt(65, int64ToBytes(param))
|
||||
}
|
||||
|
||||
// Disables logging of client statistics, such as sampled transaction activity.
|
||||
func (o NetworkOptions) SetDisableClientStatisticsLogging() error {
|
||||
return o.setOpt(70, nil)
|
||||
|
@ -521,6 +528,25 @@ func (o TransactionOptions) SetReportConflictingKeys() error {
|
|||
return o.setOpt(712, nil)
|
||||
}
|
||||
|
||||
// By default, the special key space will only allow users to read from exactly one module (a subspace in the special key space). Use this option to allow reading from zero or more modules. Users who set this option should be prepared for new modules, which may have different behaviors than the modules they're currently reading. For example, a new module might block or return an error.
|
||||
func (o TransactionOptions) SetSpecialKeySpaceRelaxed() error {
|
||||
return o.setOpt(713, nil)
|
||||
}
|
||||
|
||||
// Adds a tag to the transaction that can be used to apply manual targeted throttling. At most 5 tags can be set on a transaction.
|
||||
//
|
||||
// Parameter: String identifier used to associated this transaction with a throttling group. Must not exceed 16 characters.
|
||||
func (o TransactionOptions) SetTag(param string) error {
|
||||
return o.setOpt(800, []byte(param))
|
||||
}
|
||||
|
||||
// Adds a tag to the transaction that can be used to apply manual or automatic targeted throttling. At most 5 tags can be set on a transaction.
|
||||
//
|
||||
// Parameter: String identifier used to associated this transaction with a throttling group. Must not exceed 16 characters.
|
||||
func (o TransactionOptions) SetAutoThrottleTag(param string) error {
|
||||
return o.setOpt(801, []byte(param))
|
||||
}
|
||||
|
||||
type StreamingMode int
|
||||
|
||||
const (
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
|
|
@ -87,6 +87,8 @@ func (s Snapshot) GetDatabase() Database {
|
|||
return s.transaction.db
|
||||
}
|
||||
|
||||
// GetEstimatedRangeSizeBytes returns an estimate for the number of bytes
|
||||
// stored in the given range.
|
||||
func (s Snapshot) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
|
||||
beginKey, endKey := r.FDBRangeKeys()
|
||||
return s.getEstimatedRangeSizeBytes(
|
||||
|
@ -94,3 +96,15 @@ func (s Snapshot) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
|
|||
endKey.FDBKey(),
|
||||
)
|
||||
}
|
||||
|
||||
// GetRangeSplitPoints returns a list of keys that can split the given range
|
||||
// into (roughly) equally sized chunks based on chunkSize.
|
||||
// Note: the returned split points contain the start key and end key of the given range.
|
||||
func (s Snapshot) GetRangeSplitPoints(r ExactRange, chunkSize int64) FutureKeyArray {
|
||||
beginKey, endKey := r.FDBRangeKeys()
|
||||
return s.getRangeSplitPoints(
|
||||
beginKey.FDBKey(),
|
||||
endKey.FDBKey(),
|
||||
chunkSize,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -78,8 +78,9 @@ type Subspace interface {
|
|||
// FoundationDB keys (corresponding to the prefix of this Subspace).
|
||||
fdb.KeyConvertible
|
||||
|
||||
// All Subspaces implement fdb.ExactRange and fdb.Range, and describe all
|
||||
// keys logically in this Subspace.
|
||||
// All Subspaces implement fdb.ExactRange and fdb.Range, and describe all
|
||||
// keys strictly within the subspace that encode tuples. Specifically,
|
||||
// this will include all keys in [prefix + '\x00', prefix + '\xff').
|
||||
fdb.ExactRange
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
package fdb
|
||||
|
||||
// #define FDB_API_VERSION 700
|
||||
// #define FDB_API_VERSION 710
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
|
@ -40,6 +40,7 @@ type ReadTransaction interface {
|
|||
GetDatabase() Database
|
||||
Snapshot() Snapshot
|
||||
GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64
|
||||
GetRangeSplitPoints(r ExactRange, chunkSize int64) FutureKeyArray
|
||||
|
||||
ReadTransactor
|
||||
}
|
||||
|
@ -318,8 +319,14 @@ func (t *transaction) getEstimatedRangeSizeBytes(beginKey Key, endKey Key) Futur
|
|||
}
|
||||
}
|
||||
|
||||
// GetEstimatedRangeSizeBytes will get an estimate for the number of bytes
|
||||
// GetEstimatedRangeSizeBytes returns an estimate for the number of bytes
|
||||
// stored in the given range.
|
||||
// Note: the estimated size is calculated based on the sampling done by FDB server. The sampling
|
||||
// algorithm works roughly in this way: the larger the key-value pair is, the more likely it would
|
||||
// be sampled and the more accurate its sampled size would be. And due to
|
||||
// that reason it is recommended to use this API to query against large ranges for accuracy considerations.
|
||||
// For a rough reference, if the returned size is larger than 3MB, one can consider the size to be
|
||||
// accurate.
|
||||
func (t Transaction) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
|
||||
beginKey, endKey := r.FDBRangeKeys()
|
||||
return t.getEstimatedRangeSizeBytes(
|
||||
|
@ -328,6 +335,31 @@ func (t Transaction) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
|
|||
)
|
||||
}
|
||||
|
||||
func (t *transaction) getRangeSplitPoints(beginKey Key, endKey Key, chunkSize int64) FutureKeyArray {
|
||||
return &futureKeyArray{
|
||||
future: newFuture(C.fdb_transaction_get_range_split_points(
|
||||
t.ptr,
|
||||
byteSliceToPtr(beginKey),
|
||||
C.int(len(beginKey)),
|
||||
byteSliceToPtr(endKey),
|
||||
C.int(len(endKey)),
|
||||
C.int64_t(chunkSize),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
// GetRangeSplitPoints returns a list of keys that can split the given range
|
||||
// into (roughly) equally sized chunks based on chunkSize.
|
||||
// Note: the returned split points contain the start key and end key of the given range.
|
||||
func (t Transaction) GetRangeSplitPoints(r ExactRange, chunkSize int64) FutureKeyArray {
|
||||
beginKey, endKey := r.FDBRangeKeys()
|
||||
return t.getRangeSplitPoints(
|
||||
beginKey.FDBKey(),
|
||||
endKey.FDBKey(),
|
||||
chunkSize,
|
||||
)
|
||||
}
|
||||
|
||||
func (t *transaction) getReadVersion() FutureInt64 {
|
||||
return &futureInt64{
|
||||
future: newFuture(C.fdb_transaction_get_read_version(t.ptr)),
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
Language: Java
|
||||
# BasedOnStyle: Mozilla
|
||||
AccessModifierOffset: -4
|
||||
AlignAfterOpenBracket: Align
|
||||
AlignConsecutiveAssignments: false
|
||||
AlignConsecutiveDeclarations: false
|
||||
AlignEscapedNewlinesLeft: false
|
||||
AlignOperands: true
|
||||
AlignTrailingComments: false
|
||||
AllowAllParametersOfDeclarationOnNextLine: false
|
||||
AllowShortBlocksOnASingleLine: false
|
||||
AllowShortCaseLabelsOnASingleLine: false
|
||||
AllowShortFunctionsOnASingleLine: Inline
|
||||
AllowShortIfStatementsOnASingleLine: true
|
||||
AllowShortLoopsOnASingleLine: true
|
||||
AlwaysBreakAfterDefinitionReturnType: None
|
||||
AlwaysBreakAfterReturnType: None
|
||||
AlwaysBreakBeforeMultilineStrings: false
|
||||
AlwaysBreakTemplateDeclarations: true
|
||||
BinPackArguments: true
|
||||
BinPackParameters: true
|
||||
BreakBeforeBinaryOperators: None
|
||||
BreakBeforeBraces: Attach
|
||||
ColumnLimit: 120
|
||||
CommentPragmas: '^ IWYU pragma:|^ clang-format: ignore$|^TraceEvent'
|
||||
ConstructorInitializerAllOnOneLineOrOnePerLine: false
|
||||
ConstructorInitializerIndentWidth: 2
|
||||
ContinuationIndentWidth: 4
|
||||
Cpp11BracedListStyle: false
|
||||
DerivePointerAlignment: false
|
||||
DisableFormat: false
|
||||
ExperimentalAutoDetectBinPacking: false
|
||||
ForEachMacros: [ foreach, Q_FOREACH, BOOST_FOREACH ]
|
||||
IndentCaseLabels: false
|
||||
IndentWidth: 4
|
||||
IndentWrappedFunctionNames: false
|
||||
KeepEmptyLinesAtTheStartOfBlocks: true
|
||||
MacroBlockBegin: ''
|
||||
MacroBlockEnd: ''
|
||||
MaxEmptyLinesToKeep: 1
|
||||
NamespaceIndentation: None
|
||||
ObjCBlockIndentWidth: 2
|
||||
ObjCSpaceAfterProperty: true
|
||||
ObjCSpaceBeforeProtocolList: false
|
||||
PenaltyBreakBeforeFirstCallParameter: 19
|
||||
PenaltyBreakComment: 300
|
||||
PenaltyBreakFirstLessLess: 120
|
||||
PenaltyBreakString: 1000
|
||||
PenaltyExcessCharacter: 1000000
|
||||
PenaltyReturnTypeOnItsOwnLine: 200
|
||||
PointerAlignment: Left
|
||||
ReflowComments: true
|
||||
SortIncludes: false
|
||||
SpaceAfterCStyleCast: false
|
||||
SpaceBeforeAssignmentOperators: true
|
||||
SpaceBeforeParens: ControlStatements
|
||||
SpaceInEmptyParentheses: false
|
||||
SpacesBeforeTrailingComments: 1
|
||||
SpacesInAngles: false
|
||||
SpacesInContainerLiterals: true
|
||||
SpacesInCStyleCastParentheses: false
|
||||
SpacesInParentheses: false
|
||||
SpacesInSquareBrackets: false
|
||||
Standard: Cpp11
|
||||
TabWidth: 4
|
||||
UseTab: ForIndentation
|
|
@ -1,3 +1,7 @@
|
|||
set(RUN_JAVA_TESTS ON CACHE BOOL "Run Java unit tests")
|
||||
set(RUN_JUNIT_TESTS OFF CACHE BOOL "Compile and run junit tests")
|
||||
set(RUN_JAVA_INTEGRATION_TESTS OFF CACHE BOOL "Compile and run integration tests")
|
||||
|
||||
set(JAVA_BINDING_SRCS
|
||||
src/main/com/apple/foundationdb/async/AsyncIterable.java
|
||||
src/main/com/apple/foundationdb/async/AsyncIterator.java
|
||||
|
@ -22,11 +26,14 @@ set(JAVA_BINDING_SRCS
|
|||
src/main/com/apple/foundationdb/directory/NoSuchDirectoryException.java
|
||||
src/main/com/apple/foundationdb/directory/package-info.java
|
||||
src/main/com/apple/foundationdb/directory/PathUtil.java
|
||||
src/main/com/apple/foundationdb/DirectBufferIterator.java
|
||||
src/main/com/apple/foundationdb/DirectBufferPool.java
|
||||
src/main/com/apple/foundationdb/FDB.java
|
||||
src/main/com/apple/foundationdb/FDBDatabase.java
|
||||
src/main/com/apple/foundationdb/FDBTransaction.java
|
||||
src/main/com/apple/foundationdb/FutureInt64.java
|
||||
src/main/com/apple/foundationdb/FutureKey.java
|
||||
src/main/com/apple/foundationdb/FutureKeyArray.java
|
||||
src/main/com/apple/foundationdb/FutureResult.java
|
||||
src/main/com/apple/foundationdb/FutureResults.java
|
||||
src/main/com/apple/foundationdb/FutureStrings.java
|
||||
|
@ -42,6 +49,7 @@ set(JAVA_BINDING_SRCS
|
|||
src/main/com/apple/foundationdb/package-info.java
|
||||
src/main/com/apple/foundationdb/Range.java
|
||||
src/main/com/apple/foundationdb/RangeQuery.java
|
||||
src/main/com/apple/foundationdb/KeyArrayResult.java
|
||||
src/main/com/apple/foundationdb/RangeResult.java
|
||||
src/main/com/apple/foundationdb/RangeResultInfo.java
|
||||
src/main/com/apple/foundationdb/RangeResultSummary.java
|
||||
|
@ -51,6 +59,8 @@ set(JAVA_BINDING_SRCS
|
|||
src/main/com/apple/foundationdb/subspace/Subspace.java
|
||||
src/main/com/apple/foundationdb/Transaction.java
|
||||
src/main/com/apple/foundationdb/TransactionContext.java
|
||||
src/main/com/apple/foundationdb/EventKeeper.java
|
||||
src/main/com/apple/foundationdb/MapEventKeeper.java
|
||||
src/main/com/apple/foundationdb/testing/AbstractWorkload.java
|
||||
src/main/com/apple/foundationdb/testing/WorkloadContext.java
|
||||
src/main/com/apple/foundationdb/testing/Promise.java
|
||||
|
@ -74,7 +84,6 @@ set(JAVA_TESTS_SRCS
|
|||
src/test/com/apple/foundationdb/test/ContinuousSample.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryExtension.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryOperation.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryTest.java
|
||||
src/test/com/apple/foundationdb/test/DirectoryUtil.java
|
||||
src/test/com/apple/foundationdb/test/Example.java
|
||||
src/test/com/apple/foundationdb/test/Instruction.java
|
||||
|
@ -100,6 +109,8 @@ set(JAVA_TESTS_SRCS
|
|||
src/test/com/apple/foundationdb/test/WatchTest.java
|
||||
src/test/com/apple/foundationdb/test/WhileTrueTest.java)
|
||||
|
||||
include(src/tests.cmake)
|
||||
|
||||
set(GENERATED_JAVA_DIR ${CMAKE_CURRENT_BINARY_DIR}/src/main/com/apple/foundationdb)
|
||||
file(MAKE_DIRECTORY ${GENERATED_JAVA_DIR})
|
||||
|
||||
|
@ -130,8 +141,6 @@ endif()
|
|||
target_include_directories(fdb_java PRIVATE ${JNI_INCLUDE_DIRS})
|
||||
# libfdb_java.so is loaded by fdb-java.jar and doesn't need to depened on jvm shared libraries.
|
||||
target_link_libraries(fdb_java PRIVATE fdb_c)
|
||||
set_target_properties(fdb_java PROPERTIES
|
||||
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib/${SYSTEM_NAME}/amd64/)
|
||||
if(APPLE)
|
||||
set_target_properties(fdb_java PROPERTIES SUFFIX ".jnilib")
|
||||
endif()
|
||||
|
@ -171,12 +180,6 @@ add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES} ${CMAKE_SOURCE_DIR
|
|||
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib VERSION ${CMAKE_PROJECT_VERSION} MANIFEST ${MANIFEST_FILE})
|
||||
add_dependencies(fdb-java fdb_java_options fdb_java)
|
||||
|
||||
# TODO[mpilman]: The java RPM will require some more effort (mostly on debian). However,
|
||||
# most people will use the fat-jar, so it is not clear how high this priority is.
|
||||
|
||||
#install_jar(fdb-java DESTINATION ${FDB_SHARE_DIR}/java COMPONENT java)
|
||||
#install(TARGETS fdb_java DESTINATION ${FDB_LIB_DIR} COMPONENT java)
|
||||
|
||||
if(NOT OPEN_FOR_IDE)
|
||||
set(FAT_JAR_BINARIES "NOTFOUND" CACHE STRING
|
||||
"Path of a directory structure with libraries to include in fat jar (a lib directory)")
|
||||
|
@ -212,7 +215,11 @@ if(NOT OPEN_FOR_IDE)
|
|||
elseif(APPLE)
|
||||
set(lib_destination "osx/x86_64")
|
||||
else()
|
||||
set(lib_destination "linux/amd64")
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||
set(lib_destination "linux/aarch64")
|
||||
else()
|
||||
set(lib_destination "linux/amd64")
|
||||
endif()
|
||||
endif()
|
||||
set(lib_destination "${unpack_dir}/lib/${lib_destination}")
|
||||
set(jni_package "${CMAKE_BINARY_DIR}/packages/lib")
|
||||
|
@ -250,4 +257,128 @@ if(NOT OPEN_FOR_IDE)
|
|||
add_dependencies(fat-jar fdb-java)
|
||||
add_dependencies(fat-jar copy_lib)
|
||||
add_dependencies(packages fat-jar)
|
||||
|
||||
set(TEST_CP ${tests_jar} ${target_jar})
|
||||
|
||||
if(RUN_JUNIT_TESTS OR RUN_JAVA_INTEGRATION_TESTS)
|
||||
if (USE_SANITIZER)
|
||||
message(WARNING "Cannot run java tests with sanitizer builds")
|
||||
return()
|
||||
endif()
|
||||
# We use Junit libraries for both JUnit and integration testing structures, so download in either case
|
||||
# https://search.maven.org/remotecontent?filepath=org/junit/jupiter/junit-jupiter-engine/5.7.1/junit-jupiter-engine-5.7.1.jar
|
||||
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/junit/jupiter/junit-jupiter-engine/5.7.1/junit-jupiter-engine-5.7.1.jar"
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
|
||||
EXPECTED_HASH SHA256=56616c9350b3624f76cffef6b24ce7bb222915bfd5688f96d3cf4cef34f077cb)
|
||||
# https://search.maven.org/remotecontent?filepath=org/junit/jupiter/junit-jupiter-api/5.7.1/junit-jupiter-api-5.7.1.jar
|
||||
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/junit/jupiter/junit-jupiter-api/5.7.1/junit-jupiter-api-5.7.1.jar"
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
EXPECTED_HASH SHA256=ce7b985bc469e2625759a4ebc45533c70581a05a348278c1d6408e9b2e35e314)
|
||||
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/junit/jupiter/junit-jupiter-params/5.7.1/junit-jupiter-params-5.7.1.jar"
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
|
||||
EXPECTED_HASH SHA256=8effdd7f8a4ba5558b568184dee08008b2443c86c673ef81de5861fbc7ef0613)
|
||||
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/junit/platform/junit-platform-commons/1.7.1/junit-platform-commons-1.7.1.jar"
|
||||
${CMAKE_BINARY_DIR}/packages/junit-platform-commons-1.7.1.jar
|
||||
EXPECTED_HASH SHA256=7c546be86864718fbaceb79fa84ff1d3a516500fc428f1b21d061c2e0fbc5a4b)
|
||||
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/junit/platform/junit-platform-engine/1.7.1/junit-platform-engine-1.7.1.jar"
|
||||
${CMAKE_BINARY_DIR}/packages/junit-platform-engine-1.7.1.jar
|
||||
EXPECTED_HASH SHA256=37df5a9cd6dbc1f754ba2b46f96b8874a83660e1796bf38c738f022dcf86c23f)
|
||||
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/junit/platform/junit-platform-launcher/1.7.1/junit-platform-launcher-1.7.1.jar"
|
||||
${CMAKE_BINARY_DIR}/packages/junit-platform-launcher-1.7.1.jar
|
||||
EXPECTED_HASH SHA256=3122ac6fb284bc50e3afe46419fc977f94d580e9d3d1ea58805d200b510a99ee)
|
||||
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/junit/platform/junit-platform-console/1.7.1/junit-platform-console-1.7.1.jar"
|
||||
${CMAKE_BINARY_DIR}/packages/junit-platform-console-1.7.1.jar
|
||||
EXPECTED_HASH SHA256=11ed48fcdfcea32f2fa98872db7ecba2d49d178f76493e7a149a2242363ad12e)
|
||||
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/apiguardian/apiguardian-api/1.1.1/apiguardian-api-1.1.1.jar"
|
||||
${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar
|
||||
EXPECTED_HASH SHA256=fc68f0d28633caccf3980fdf1e99628fba9c49424ee56dc685cd8b4d2a9fefde)
|
||||
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/opentest4j/opentest4j/1.2.0/opentest4j-1.2.0.jar"
|
||||
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar)
|
||||
|
||||
set(JUNIT_CLASSPATH "${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar:${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar:${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar:${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar:${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar")
|
||||
set(JUNIT_CLASSPATH "${JUNIT_CLASSPATH}:${CMAKE_BINARY_DIR}/packages/junit-platform-console-1.7.1.jar:${CMAKE_BINARY_DIR}/packages/junit-platform-commons-1.7.1.jar")
|
||||
set(JUNIT_CLASSPATH "${JUNIT_CLASSPATH}:${CMAKE_BINARY_DIR}/packages/junit-platform-engine-1.7.1.jar:${CMAKE_BINARY_DIR}/packages/junit-platform-launcher-1.7.1.jar")
|
||||
endif()
|
||||
|
||||
if(RUN_JUNIT_TESTS)
|
||||
# Sets up the JUnit testing structure to run through ctest
|
||||
#
|
||||
# To add a new junit test, add the class to the JAVA_JUNIT_TESTS variable in `src/tests.cmake`. Note that if you run a Suite,
|
||||
# ctest will NOT display underlying details of the suite itself, so it's best to avoid junit suites in general. Also,
|
||||
# if you need a different runner other than JUnitCore, you'll have to modify this so be aware.
|
||||
#
|
||||
# To run tests (once built), run:
|
||||
#
|
||||
# ctest .
|
||||
#
|
||||
# from the ${BUILD_DIR}/bindings/java subdirectory.
|
||||
#
|
||||
# Note: if you are running from ${BUILD_DIR}, additional tests of the native logic will be run. To avoid these, use
|
||||
#
|
||||
# ctest . -R java-unit
|
||||
#
|
||||
# ctest has lots of flexible command options, so be sure to refer to its documentation if you want to do something specific(documentation
|
||||
# can be found at https://cmake.org/cmake/help/v3.19/manual/ctest.1.html)
|
||||
|
||||
add_jar(fdb-junit SOURCES ${JAVA_JUNIT_TESTS} ${JUNIT_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
|
||||
${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar
|
||||
)
|
||||
get_property(junit_jar_path TARGET fdb-junit PROPERTY JAR_FILE)
|
||||
|
||||
add_test(NAME java-unit
|
||||
COMMAND ${Java_JAVA_EXECUTABLE}
|
||||
-classpath "${target_jar}:${junit_jar_path}:${JUNIT_CLASSPATH}"
|
||||
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
|
||||
org.junit.platform.console.ConsoleLauncher "--details=summary" "-class-path=${junit_jar_path}" "--scan-classpath" "--disable-banner"
|
||||
)
|
||||
|
||||
endif()
|
||||
|
||||
if(RUN_JAVA_INTEGRATION_TESTS)
|
||||
# Set up the integration tests. These tests generally require a running database server to function properly. Most tests
|
||||
# should be written such that they can be run in parallel with other integration tests (e.g. try to use a unique key range for each test
|
||||
# whenever possible), because it's a reasonable assumption that a single server will be shared among multiple tests, and might do so
|
||||
# concurrently.
|
||||
#
|
||||
# Integration tests are run through ctest the same way as unit tests, but their label is prefixed with the entry 'integration-'.
|
||||
# Note that most java integration tests will fail if they can't quickly connect to a running FDB instance(depending on how the test is written, anyway).
|
||||
# However, if you want to explicitly skip them, you can run
|
||||
#
|
||||
# `ctest -E integration`
|
||||
#
|
||||
# To run only integration tests, execute
|
||||
#
|
||||
# `ctest -R integration`
|
||||
#
|
||||
# (Note: both of these commands are assumed to be running from the ${BUILD_DIR}/bindings/java directory).
|
||||
#
|
||||
# To add an integration test, add the relative class file path to the JAVA_INTEGRATION_TESTS variable in `src/tests.cmake`
|
||||
#
|
||||
# All integration tests share the same fdb cluster, so you should design
|
||||
# your test with that in mind (e.g. don't depend on the database being
|
||||
# empty, consider generating a random prefix for the keys you write, use
|
||||
# the directory layer with a unique path, etc.)
|
||||
#
|
||||
add_jar(fdb-integration SOURCES ${JAVA_INTEGRATION_TESTS} ${JAVA_INTEGRATION_RESOURCES} INCLUDE_JARS fdb-java
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
|
||||
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
|
||||
${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar)
|
||||
get_property(integration_jar_path TARGET fdb-integration PROPERTY JAR_FILE)
|
||||
|
||||
|
||||
# add_fdbclient_test will set FDB_CLUSTER_FILE if it's not set already
|
||||
add_fdbclient_test(NAME java-integration
|
||||
COMMAND ${Java_JAVA_EXECUTABLE}
|
||||
-classpath "${target_jar}:${integration_jar_path}:${JUNIT_CLASSPATH}"
|
||||
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
|
||||
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner"
|
||||
)
|
||||
|
||||
endif()
|
||||
endif()
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
*/
|
||||
|
||||
#include <foundationdb/ClientWorkload.h>
|
||||
#define FDB_API_VERSION 700
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
#include <jni.h>
|
||||
|
@ -75,6 +75,9 @@ void printTrace(JNIEnv* env, jclass, jlong logger, jint severity, jstring messag
|
|||
sev = FDBSeverity::Warn;
|
||||
} else if (severity < 40) {
|
||||
sev = FDBSeverity::WarnAlways;
|
||||
} else {
|
||||
assert(false);
|
||||
std::abort();
|
||||
}
|
||||
log->trace(sev, msg, detailsMap);
|
||||
if (isCopy) {
|
||||
|
@ -277,7 +280,8 @@ struct JVM {
|
|||
w.name = name;
|
||||
w.signature = sig;
|
||||
w.fnPtr = std::get<2>(t);
|
||||
log->trace(info, "PreparedNativeMethod",
|
||||
log->trace(info,
|
||||
"PreparedNativeMethod",
|
||||
{ { "Name", w.name },
|
||||
{ "Signature", w.signature },
|
||||
{ "Ptr", std::to_string(reinterpret_cast<uintptr_t>(w.fnPtr)) } });
|
||||
|
@ -359,7 +363,8 @@ struct JVM {
|
|||
{ "getOption", "(JLjava/lang/String;Z)Z", reinterpret_cast<void*>(&getOptionBool) },
|
||||
{ "getOption", "(JLjava/lang/String;J)J", reinterpret_cast<void*>(&getOptionLong) },
|
||||
{ "getOption", "(JLjava/lang/String;D)D", reinterpret_cast<void*>(&getOptionDouble) },
|
||||
{ "getOption", "(JLjava/lang/String;Ljava/lang/String;)Ljava/lang/String;",
|
||||
{ "getOption",
|
||||
"(JLjava/lang/String;Ljava/lang/String;)Ljava/lang/String;",
|
||||
reinterpret_cast<void*>(&getOptionString) },
|
||||
{ "getClientID", "(J)I", reinterpret_cast<void*>(&getClientID) },
|
||||
{ "getClientCount", "(J)I", reinterpret_cast<void*>(&getClientCount) },
|
||||
|
@ -370,7 +375,7 @@ struct JVM {
|
|||
jmethodID selectMethod =
|
||||
env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(I)Lcom/apple/foundationdb/FDB;");
|
||||
checkException();
|
||||
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(700));
|
||||
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(710));
|
||||
checkException();
|
||||
env->CallObjectMethod(fdbInstance, getMethod(fdbClass, "disableShutdownHook", "()V"));
|
||||
checkException();
|
||||
|
@ -388,7 +393,8 @@ struct JVM {
|
|||
auto impl = env->GetLongField(res, field);
|
||||
checkException();
|
||||
if (impl != jContext) {
|
||||
log->trace(error, "ContextNotCorrect",
|
||||
log->trace(error,
|
||||
"ContextNotCorrect",
|
||||
{ { "Expected", std::to_string(jContext) }, { "Impl", std::to_string(impl) } });
|
||||
std::terminate();
|
||||
}
|
||||
|
@ -468,14 +474,16 @@ struct JVM {
|
|||
}
|
||||
|
||||
jobject createDatabase(jobject workload, FDBDatabase* db) {
|
||||
auto executor =
|
||||
env->CallObjectMethod(workload, getMethod(getClass("com/apple/foundationdb/testing/AbstractWorkload"),
|
||||
"getExecutor", "()Ljava/util/concurrent/Executor;"));
|
||||
auto executor = env->CallObjectMethod(workload,
|
||||
getMethod(getClass("com/apple/foundationdb/testing/AbstractWorkload"),
|
||||
"getExecutor",
|
||||
"()Ljava/util/concurrent/Executor;"));
|
||||
auto databaseClass = getClass("com/apple/foundationdb/FDBDatabase");
|
||||
jlong databasePtr = reinterpret_cast<jlong>(db);
|
||||
jobject javaDatabase =
|
||||
env->NewObject(databaseClass, getMethod(databaseClass, "<init>", "(JLjava/util/concurrent/Executor;)V"),
|
||||
databasePtr, executor);
|
||||
jobject javaDatabase = env->NewObject(databaseClass,
|
||||
getMethod(databaseClass, "<init>", "(JLjava/util/concurrent/Executor;)V"),
|
||||
databasePtr,
|
||||
executor);
|
||||
env->DeleteLocalRef(executor);
|
||||
return javaDatabase;
|
||||
}
|
||||
|
@ -488,9 +496,10 @@ struct JVM {
|
|||
jPromise = createPromise(std::move(promise));
|
||||
env->CallVoidMethod(
|
||||
workload,
|
||||
getMethod(clazz, method,
|
||||
"(Lcom/apple/foundationdb/Database;Lcom/apple/foundationdb/testing/Promise;)V"),
|
||||
jdb, jPromise);
|
||||
getMethod(
|
||||
clazz, method, "(Lcom/apple/foundationdb/Database;Lcom/apple/foundationdb/testing/Promise;)V"),
|
||||
jdb,
|
||||
jPromise);
|
||||
env->DeleteLocalRef(jdb);
|
||||
env->DeleteLocalRef(jPromise);
|
||||
jPromise = nullptr;
|
||||
|
@ -512,7 +521,7 @@ struct JavaWorkload : FDBWorkload {
|
|||
bool failed = false;
|
||||
jobject workload = nullptr;
|
||||
JavaWorkload(const std::shared_ptr<JVM>& jvm, FDBLogger& log, const std::string& name)
|
||||
: jvm(jvm), log(log), name(name) {
|
||||
: jvm(jvm), log(log), name(name) {
|
||||
boost::replace_all(this->name, ".", "/");
|
||||
}
|
||||
~JavaWorkload() {
|
||||
|
@ -585,9 +594,7 @@ struct JavaWorkload : FDBWorkload {
|
|||
log.trace(error, "CheckFailedWithJNIError", { { "Error", e.toString() }, { "Location", e.location() } });
|
||||
}
|
||||
}
|
||||
void getMetrics(std::vector<FDBPerfMetric>& out) const override {
|
||||
jvm->getMetrics(workload, name, out);
|
||||
}
|
||||
void getMetrics(std::vector<FDBPerfMetric>& out) const override { jvm->getMetrics(workload, name, out); }
|
||||
};
|
||||
|
||||
struct JavaWorkloadFactory : FDBWorkloadFactory {
|
||||
|
|
|
@ -31,7 +31,7 @@ make packages
|
|||
#### Multi-Platform Jar-File
|
||||
|
||||
If you want to create a jar file that can run on more than one supported
|
||||
architecture (the offical one supports MacOS, Linux, and Windows), you can do
|
||||
architecture (the official one supports MacOS, Linux, and Windows), you can do
|
||||
that by executing the following steps:
|
||||
|
||||
1. Create a directory called `lib` somewhere on your file system.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,25 @@
|
|||
Adding JUnit tests
|
||||
===
|
||||
|
||||
For java development, it's often useful to use JUnit for testing due to the excellent tooling support.
|
||||
|
||||
To add a new unit test, do the following:
|
||||
|
||||
1. Write your test
|
||||
2. Add the test path to `tests.cmake`, using the relative path starting at `src`(for example, `src/junit/com/apple/foundationdb/tuple/ArrayUtilTests.java` will add the `ArrayUtilTests` test file).
|
||||
3. re-run the build (both `cmake` and `make/xcode/ninja`)
|
||||
|
||||
To add a new integration test:
|
||||
|
||||
1. Write the test, using JUnit.
|
||||
2. Add the test path to `tests.cmake`, using the relative path starting at `src` (i.e. `src/integration/com/apple/foundationdb/DirectoryTest.java`).
|
||||
3. re-run the build (both `cmake` and `make/xcode/ninja`)
|
||||
|
||||
To run all unit and integration tests, execute `ctest .` from `${BUILD_DIR}/bindings/java`.
|
||||
|
||||
To skip integration tests, execute `ctest -E integration` from `${BUILD_DIR}/bindings/java`.
|
||||
|
||||
To run _only_ integration tests, run `ctest -R integration` from `${BUILD_DIR}/bindings/java`.
|
||||
|
||||
There are lots of other useful `ctest` commands, which we don't need to get into here. For more information,
|
||||
see the [https://cmake.org/cmake/help/v3.19/manual/ctest.1.html](ctest documentation).
|
|
@ -0,0 +1,214 @@
|
|||
/*
|
||||
* DirectoryTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.Paths;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletionException;
|
||||
|
||||
import com.apple.foundationdb.directory.DirectoryAlreadyExistsException;
|
||||
import com.apple.foundationdb.directory.DirectoryLayer;
|
||||
import com.apple.foundationdb.directory.DirectorySubspace;
|
||||
import com.apple.foundationdb.directory.NoSuchDirectoryException;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
|
||||
/**
|
||||
* Integration tests for directory logic in FDB. This test requires a running
|
||||
* FDB instance to work properly; if one cannot be detected, all tests will be
|
||||
* skipped.
|
||||
*/
|
||||
@ExtendWith(RequiresDatabase.class)
|
||||
class DirectoryTest {
|
||||
private static final FDB fdb = FDB.selectAPIVersion(710);
|
||||
|
||||
@Test
|
||||
void testCanCreateDirectory() throws Exception {
|
||||
/*
|
||||
* Simple test to make sure we can create a directory
|
||||
*/
|
||||
final DirectoryLayer dir = new DirectoryLayer();
|
||||
try (Database db = fdb.open()) {
|
||||
|
||||
db.run(tr -> {
|
||||
List<String> path = new ArrayList<>();
|
||||
path.add("toCreate");
|
||||
try {
|
||||
DirectorySubspace foo = dir.create(tr, path).join();
|
||||
Assertions.assertIterableEquals(path, foo.getPath(), "Incorrect path");
|
||||
// make sure it exists
|
||||
Assertions.assertTrue(foo.exists(tr).join(), "does not exist even though it's been created!");
|
||||
|
||||
} finally {
|
||||
// remove the directory
|
||||
dir.remove(tr, path).join();
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanCreateSubDirectory() throws Exception {
|
||||
/*
|
||||
* Test that we can create a subdirectory safely
|
||||
*/
|
||||
final DirectoryLayer dir = new DirectoryLayer();
|
||||
try (Database db = fdb.open()) {
|
||||
|
||||
db.run(tr -> {
|
||||
List<String> path = new ArrayList<>();
|
||||
path.add("foo");
|
||||
try {
|
||||
DirectorySubspace foo = dir.create(tr, path).join();
|
||||
Assertions.assertIterableEquals(path, foo.getPath(), "Incorrect path");
|
||||
// make sure it exists
|
||||
Assertions.assertTrue(foo.exists(tr).join(), "does not exist even though it's been created!");
|
||||
|
||||
path.add("bar");
|
||||
DirectorySubspace bar = dir.create(tr, path).join();
|
||||
Assertions.assertIterableEquals(path, bar.getPath(), "incorrect path");
|
||||
Assertions.assertTrue(bar.exists(tr).join(), "does not exist even though it's been created!");
|
||||
} finally {
|
||||
// remove the directory
|
||||
dir.remove(tr, path).join();
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCanMoveSubDirectory() throws Exception {
|
||||
/*
|
||||
* Make sure that we can move a subdirectory correctly
|
||||
*/
|
||||
final DirectoryLayer dir = new DirectoryLayer();
|
||||
try (Database db = fdb.open()) {
|
||||
|
||||
db.run(tr -> {
|
||||
List<String> path = new ArrayList<>();
|
||||
path.add("src");
|
||||
try {
|
||||
DirectorySubspace foo = dir.create(tr, path).join();
|
||||
Assertions.assertIterableEquals(path, foo.getPath(), "Incorrect path");
|
||||
// make sure it exists
|
||||
Assertions.assertTrue(foo.exists(tr).join(), "does not exist even though it's been created!");
|
||||
|
||||
path.add("bar");
|
||||
DirectorySubspace bar = dir.create(tr, path).join();
|
||||
Assertions.assertIterableEquals(path, bar.getPath(), "incorrect path");
|
||||
Assertions.assertTrue(bar.exists(tr).join(), "does not exist even though it's been created!");
|
||||
|
||||
DirectorySubspace boo = dir.create(tr, Arrays.asList("dest")).join();
|
||||
Assertions.assertIterableEquals(Arrays.asList("dest"), boo.getPath(), "incorrect path");
|
||||
Assertions.assertTrue(boo.exists(tr).join(), "does not exist even though it's been created!");
|
||||
|
||||
// move the subdirectory and see if it works
|
||||
DirectorySubspace newBar = bar.moveTo(tr, Arrays.asList("dest", "bar")).join();
|
||||
Assertions.assertIterableEquals(Arrays.asList("dest", "bar"), newBar.getPath(), "incorrect path");
|
||||
Assertions.assertTrue(newBar.exists(tr).join(), "does not exist even though it's been created!");
|
||||
Assertions.assertFalse(bar.exists(tr).join(), "Still exists in old location!");
|
||||
|
||||
} finally {
|
||||
// remove the directory
|
||||
dir.remove(tr, Arrays.asList("src")).join();
|
||||
try {
|
||||
dir.remove(tr, Arrays.asList("dest")).join();
|
||||
} catch (CompletionException ce) {
|
||||
Throwable t = ce.getCause();
|
||||
if (!(t instanceof NoSuchDirectoryException)) {
|
||||
throw ce;
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCannotCreateDirectoryTwice() throws Exception {
|
||||
/*
|
||||
* Shouldn't be able to create the directory twice--make sure it throws the
|
||||
* proper error if we try.
|
||||
*/
|
||||
final DirectoryLayer dir = new DirectoryLayer();
|
||||
|
||||
try (Database db = fdb.open()) {
|
||||
|
||||
db.run(tr -> {
|
||||
List<String> path = new ArrayList<>();
|
||||
path.add("foo");
|
||||
try {
|
||||
DirectorySubspace foo = dir.createOrOpen(tr, path).join();
|
||||
Assertions.assertEquals(path, foo.getPath(), "Incorrect path");
|
||||
// make sure it exists
|
||||
Assertions.assertTrue(foo.exists(tr).join(), "does not exist even though it's been created!");
|
||||
|
||||
// now try to create it again
|
||||
try {
|
||||
DirectorySubspace foo2 = dir.create(tr, path).join();
|
||||
Assertions.fail("Was able to create a directory twice");
|
||||
} catch (DirectoryAlreadyExistsException expected) {
|
||||
} catch (CompletionException ce) {
|
||||
Throwable t = ce.getCause();
|
||||
if (!(t instanceof DirectoryAlreadyExistsException)) {
|
||||
throw ce;
|
||||
}
|
||||
}
|
||||
|
||||
} finally {
|
||||
// remove the directory
|
||||
dir.remove(tr, path).join();
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testCannotRemoveNonexistingDirectory() throws Exception {
|
||||
/*
|
||||
* can't remove a directory that's not there--should throw a
|
||||
* NoSuchDirectoryException
|
||||
*/
|
||||
final DirectoryLayer dir = new DirectoryLayer();
|
||||
|
||||
try (Database db = fdb.open()) {
|
||||
db.run(tr -> {
|
||||
try {
|
||||
dir.remove(tr, Arrays.asList("doesnotexist")).join();
|
||||
} catch (CompletionException ce) {
|
||||
Throwable t = ce.getCause();
|
||||
if (!(t instanceof NoSuchDirectoryException)) {
|
||||
throw ce;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,189 @@
|
|||
/*
|
||||
* RangeQueryIntegrationTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.Random;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import com.apple.foundationdb.async.AsyncIterable;
|
||||
import com.apple.foundationdb.async.AsyncIterator;
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
|
||||
/**
|
||||
* Integration tests around Range Queries. This requires a running FDB instance to work properly;
|
||||
* all tests will be skipped if it can't connect to a running instance relatively quickly.
|
||||
*/
|
||||
@ExtendWith(RequiresDatabase.class)
|
||||
class RangeQueryIntegrationTest {
|
||||
private static final FDB fdb = FDB.selectAPIVersion(710);
|
||||
|
||||
@BeforeEach
|
||||
@AfterEach
|
||||
void clearDatabase() throws Exception {
|
||||
/*
|
||||
* Empty the database before and after each run, just in case
|
||||
*/
|
||||
try (Database db = fdb.open()) {
|
||||
db.run(tr -> {
|
||||
tr.clear(Range.startsWith(new byte[] { (byte)0x00 }));
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void loadData(Database db, Map<byte[], byte[]> dataToLoad) {
|
||||
db.run(tr -> {
|
||||
for (Map.Entry<byte[], byte[]> entry : dataToLoad.entrySet()) {
|
||||
tr.set(entry.getKey(), entry.getValue());
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
public void canGetRowWithKeySelector() throws Exception {
|
||||
Random rand = new Random();
|
||||
byte[] key = new byte[128];
|
||||
byte[] value = new byte[128];
|
||||
rand.nextBytes(key);
|
||||
key[0] = (byte)0xEE;
|
||||
rand.nextBytes(value);
|
||||
|
||||
NavigableMap<byte[], byte[]> data = new TreeMap<>(ByteArrayUtil.comparator());
|
||||
data.put(key, value);
|
||||
try (Database db = fdb.open()) {
|
||||
loadData(db, data);
|
||||
db.run(tr -> {
|
||||
byte[] actualValue = tr.get(key).join();
|
||||
Assertions.assertNotNull(actualValue, "Missing key!");
|
||||
Assertions.assertArrayEquals(value, actualValue, "incorrect value!");
|
||||
|
||||
KeySelector start = KeySelector.firstGreaterOrEqual(new byte[] { key[0] });
|
||||
KeySelector end = KeySelector.firstGreaterOrEqual(ByteArrayUtil.strinc(start.getKey()));
|
||||
AsyncIterable<KeyValue> kvIterable = tr.getRange(start, end);
|
||||
AsyncIterator<KeyValue> kvs = kvIterable.iterator();
|
||||
|
||||
Assertions.assertTrue(kvs.hasNext(), "Did not return a record!");
|
||||
KeyValue n = kvs.next();
|
||||
Assertions.assertArrayEquals(key, n.getKey(), "Did not return a key correctly!");
|
||||
Assertions.assertArrayEquals(value, n.getValue(), "Did not return the corect value!");
|
||||
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void rangeQueryReturnsResults() throws Exception {
|
||||
/*
|
||||
* A quick test that if you insert a record, then do a range query which includes
|
||||
* the record, it'll be returned
|
||||
*/
|
||||
try (Database db = fdb.open()) {
|
||||
db.run(tr -> {
|
||||
tr.set("vcount".getBytes(), "zz".getBytes());
|
||||
return null;
|
||||
});
|
||||
|
||||
db.run(tr -> {
|
||||
AsyncIterable<KeyValue> kvs = tr.getRange("v".getBytes(), "y".getBytes());
|
||||
int cnt = 0;
|
||||
for (KeyValue kv : kvs) {
|
||||
Assertions.assertArrayEquals("vcount".getBytes(), kv.getKey(), "Incorrect key returned!");
|
||||
Assertions.assertArrayEquals("zz".getBytes(), kv.getValue(), "Incorrect value returned!");
|
||||
cnt++;
|
||||
}
|
||||
Assertions.assertEquals(1, cnt, "Incorrect number of KeyValues returned");
|
||||
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void rangeQueryReturnsEmptyOutsideRange() throws Exception {
|
||||
/*
|
||||
* A quick test that if you insert a record, then do a range query which does
|
||||
* not include the record, it won't be returned
|
||||
*/
|
||||
try (Database db = fdb.open()) {
|
||||
db.run(tr -> {
|
||||
tr.set("rangeEmpty".getBytes(), "zz".getBytes());
|
||||
return null;
|
||||
});
|
||||
|
||||
db.run(tr -> {
|
||||
AsyncIterator<KeyValue> kvs = tr.getRange("b".getBytes(), "c".getBytes()).iterator();
|
||||
if (kvs.hasNext()) {
|
||||
Assertions.fail("Found kvs when it really shouldn't: returned key = " +
|
||||
ByteArrayUtil.printable(kvs.next().getKey()));
|
||||
}
|
||||
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void rangeQueryOverMultipleRows() throws Exception {
|
||||
/*
|
||||
* Make sure that you can return multiple rows if you ask for it.
|
||||
* Hopefully this is large enough to force multiple batches
|
||||
*/
|
||||
int numRows = 100;
|
||||
Map<byte[], byte[]> expectedKvs = new TreeMap<>(ByteArrayUtil.comparator());
|
||||
try (Database db = fdb.open()) {
|
||||
db.run(tr -> {
|
||||
for (int i = 0; i < numRows; i++) {
|
||||
byte[] key = ("multiRow" + i).getBytes();
|
||||
byte[] value = ("multiValue" + i).getBytes();
|
||||
tr.set(key, value);
|
||||
expectedKvs.put(key, value);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
db.run(tr -> {
|
||||
Iterator<KeyValue> kvs = tr.getRange("multi".getBytes(), "multj".getBytes()).iterator();
|
||||
Iterator<Map.Entry<byte[], byte[]>> expectedKvIter = expectedKvs.entrySet().iterator();
|
||||
while (expectedKvIter.hasNext()) {
|
||||
Assertions.assertTrue(kvs.hasNext(), "iterator ended too early");
|
||||
KeyValue actualKv = kvs.next();
|
||||
Map.Entry<byte[], byte[]> expected = expectedKvIter.next();
|
||||
|
||||
Assertions.assertArrayEquals(expected.getKey(), actualKv.getKey(), "Incorrect key!");
|
||||
Assertions.assertArrayEquals(expected.getValue(), actualKv.getValue(), "Incorrect value!");
|
||||
}
|
||||
Assertions.assertFalse(kvs.hasNext(), "Iterator returned too much data");
|
||||
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* RequiresDatabase.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.extension.BeforeAllCallback;
|
||||
import org.junit.jupiter.api.extension.ConditionEvaluationResult;
|
||||
import org.junit.jupiter.api.extension.ExecutionCondition;
|
||||
import org.junit.jupiter.api.extension.ExtensionContext;
|
||||
import org.opentest4j.TestAbortedException;
|
||||
|
||||
/**
|
||||
* Rule to make it easy to write integration tests that only work when a running
|
||||
* database is detected and connectable using the default cluster file. Use this
|
||||
* as a @ClassRule on any integration test that requires a running database.
|
||||
*
|
||||
* This will attempt to connect to an FDB instance and perform a basic
|
||||
* operation. If it can do so quickly, then it will go ahead and run the
|
||||
* underlying test statement. If it cannot perform a basic operation against the
|
||||
* running DB, then it will throw an error and fail all tests
|
||||
*
|
||||
* There is a second safety valve--you can also set the env variable
|
||||
* `run.integration.tests` to false. If it's set, then all tests will just be
|
||||
* skipped outright, without trying to connect. This is useful for when you know you won't
|
||||
* be running a server and you don't want to deal with spurious test failures.
|
||||
*/
|
||||
public class RequiresDatabase implements ExecutionCondition, BeforeAllCallback {
|
||||
|
||||
public static boolean canRunIntegrationTest() {
|
||||
String prop = System.getProperty("run.integration.tests");
|
||||
if (prop == null) {
|
||||
return true;
|
||||
}
|
||||
return Boolean.parseBoolean(prop);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ConditionEvaluationResult evaluateExecutionCondition(ExtensionContext context) {
|
||||
if (canRunIntegrationTest()) {
|
||||
return ConditionEvaluationResult.enabled("Database is running");
|
||||
} else {
|
||||
return ConditionEvaluationResult.disabled("Database is not running");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeAll(ExtensionContext context) throws Exception {
|
||||
/*
|
||||
* This is in place to validate that a database is actually running. If it can't connect
|
||||
* within a pretty short timeout, then the tests automatically fail.
|
||||
*
|
||||
* This is in place mainly to fail-fast in the event of bad configurations; specifically, if the env flag
|
||||
* is set to true (or absent), but a backing server isn't actually running. When that happens, this check avoids
|
||||
* a long hang-time while waiting for the first database connection to finally timeout (which could take a
|
||||
* while, based on empirical observation)
|
||||
*
|
||||
* Note that JUnit will only call this method _after_ calling evaluateExecutionCondition(), so we can safely
|
||||
* assume that if we are here, then canRunIntegrationTest() is returning true and we don't have to bother
|
||||
* checking it.
|
||||
*/
|
||||
try (Database db = FDB.selectAPIVersion(710).open()) {
|
||||
db.run(tr -> {
|
||||
CompletableFuture<byte[]> future = tr.get("test".getBytes());
|
||||
|
||||
try {
|
||||
return future.get(100, TimeUnit.MILLISECONDS);
|
||||
} catch (TimeoutException te) {
|
||||
Assertions.fail("Test " + context.getDisplayName() +
|
||||
" failed to start: cannot to database within timeout");
|
||||
return null; // should never happen
|
||||
} catch (InterruptedException e) {
|
||||
throw new TestAbortedException("Interrupted during setup, skipping test");
|
||||
} catch (ExecutionException e) {
|
||||
throw new RuntimeException(e.getCause());
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
/*
|
||||
* EventKeeperTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import com.apple.foundationdb.EventKeeper.Events;
|
||||
import com.apple.foundationdb.async.AsyncIterator;
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
|
||||
/**
|
||||
* Basic test code for testing basic Transaction Timer logic.
|
||||
*
|
||||
* These tests don't check for a whole lot, they just verify that
|
||||
* instrumentation works as expected for specific patterns.
|
||||
*/
|
||||
class EventKeeperTest {
|
||||
|
||||
@Test
|
||||
@Disabled("Ignored because ctest will actually add the library and cause this to segfault")
|
||||
void testSetVersion() throws Exception {
|
||||
|
||||
EventKeeper timer = new MapEventKeeper();
|
||||
|
||||
try (FDBTransaction txn = new FDBTransaction(1, null, null, timer)) {
|
||||
Assertions.assertThrows(UnsatisfiedLinkError.class,
|
||||
() -> { txn.setReadVersion(1L); }, "Test should call a bad native method");
|
||||
long jniCalls = timer.getCount(Events.JNI_CALL);
|
||||
|
||||
Assertions.assertEquals(1L, jniCalls, "Unexpected number of JNI calls:");
|
||||
}catch(UnsatisfiedLinkError ignored){
|
||||
//this is necessary to prevent an exception being thrown at close time
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
@Disabled("Ignored because ctest will actually add the library and cause this to segfault")
|
||||
void testGetReadVersion() throws Exception {
|
||||
EventKeeper timer = new MapEventKeeper();
|
||||
|
||||
try (FDBTransaction txn = new FDBTransaction(1, null, null, timer)) {
|
||||
Assertions.assertThrows(UnsatisfiedLinkError.class,
|
||||
() -> { txn.getReadVersion(); }, "Test should call a bad native method");
|
||||
long jniCalls = timer.getCount(Events.JNI_CALL);
|
||||
|
||||
Assertions.assertEquals(1L, jniCalls, "Unexpected number of JNI calls:");
|
||||
}catch(UnsatisfiedLinkError ignored){
|
||||
//required to prevent an extra exception being thrown at close time
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testGetRangeRecordsFetches() throws Exception {
|
||||
EventKeeper timer = new MapEventKeeper();
|
||||
List<KeyValue> testKvs = Arrays.asList(new KeyValue("hello".getBytes(), "goodbye".getBytes()));
|
||||
|
||||
FDBTransaction txn = new FakeFDBTransaction(testKvs, 1L, null, null);
|
||||
|
||||
RangeQuery query = new RangeQuery(txn, true, KeySelector.firstGreaterOrEqual(new byte[] { 0x00 }),
|
||||
KeySelector.firstGreaterOrEqual(new byte[] { (byte)0xFF }), -1, false,
|
||||
StreamingMode.ITERATOR, timer);
|
||||
AsyncIterator<KeyValue> iter = query.iterator();
|
||||
|
||||
List<KeyValue> iteratedItems = new ArrayList<>();
|
||||
while (iter.hasNext()) {
|
||||
iteratedItems.add(iter.next());
|
||||
}
|
||||
|
||||
// basic verification that we got back what we expected to get back.
|
||||
Assertions.assertEquals(testKvs.size(), iteratedItems.size(), "Incorrect iterated list, size incorrect.");
|
||||
|
||||
int expectedByteSize = 0;
|
||||
for (KeyValue expected : testKvs) {
|
||||
byte[] eKey = expected.getKey();
|
||||
byte[] eVal = expected.getValue();
|
||||
expectedByteSize += eKey.length + 4;
|
||||
expectedByteSize += eVal.length + 4;
|
||||
boolean found = false;
|
||||
for (KeyValue actual : iteratedItems) {
|
||||
byte[] aKey = actual.getKey();
|
||||
byte[] aVal = actual.getValue();
|
||||
if (ByteArrayUtil.compareTo(eKey, 0, eKey.length, aKey, 0, aKey.length) == 0) {
|
||||
int cmp = ByteArrayUtil.compareTo(eVal, 0, eVal.length, aVal, 0, aVal.length);
|
||||
Assertions.assertEquals(0, cmp, "Incorrect value returned");
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Assertions.assertTrue(found, "missing key!");
|
||||
}
|
||||
|
||||
// now check the timer and see if it recorded any events
|
||||
Assertions.assertEquals(1, timer.getCount(Events.RANGE_QUERY_FETCHES), "Unexpected number of chunk fetches");
|
||||
Assertions.assertEquals(testKvs.size(), timer.getCount(Events.RANGE_QUERY_RECORDS_FETCHED),
|
||||
"Unexpected number of tuples fetched");
|
||||
Assertions.assertEquals(expectedByteSize, timer.getCount(Events.BYTES_FETCHED),
|
||||
"Incorrect number of bytes fetched");
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* FDBLibraryRule.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import org.junit.jupiter.api.extension.BeforeAllCallback;
|
||||
import org.junit.jupiter.api.extension.ExtensionContext;
|
||||
|
||||
/**
|
||||
* A Rule that pre-loads the native library on demand, for testing purposes.
|
||||
* This is mainly for convenience to avoid needing to worry about which version
|
||||
* number is correct and whatnot. It will fail to work if the native libraries
|
||||
* are not available for any reason.
|
||||
*/
|
||||
public class FDBLibraryRule implements BeforeAllCallback {
|
||||
private final int apiVersion;
|
||||
|
||||
// because FDB is a singleton (currently), this isn't a super-useful cache,
|
||||
// but it does make life slightly easier, so we'll keep it around
|
||||
private FDB instance;
|
||||
|
||||
public FDBLibraryRule(int apiVersion) { this.apiVersion = apiVersion; }
|
||||
|
||||
public static FDBLibraryRule current() { return new FDBLibraryRule(710); }
|
||||
|
||||
public static FDBLibraryRule v63() { return new FDBLibraryRule(630); }
|
||||
|
||||
public FDB get() { return instance; }
|
||||
|
||||
@Override
|
||||
public void beforeAll(ExtensionContext arg0) throws Exception {
|
||||
instance = FDB.selectAPIVersion(apiVersion);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
* FakeFDBTransaction.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
/**
|
||||
* A convenience class that makes it easier to construct a mock FDBTransaction.
|
||||
* This class does no native library calls: instead, it seeks to mimic (in a
|
||||
* simplistic way) what the FDB native API promises. The intent is to make it
|
||||
* easier to unit test specific java code without requiring a running server or
|
||||
* needing to test the entire C library as well.
|
||||
*
|
||||
* Note that this is a bit of a work-in-progress. The idea here is to use this
|
||||
* where handy, and discover (in the process) which native calls are being made,
|
||||
* then modify this class as appropriate to avoid them.
|
||||
*/
|
||||
public class FakeFDBTransaction extends FDBTransaction {
|
||||
private final NavigableMap<byte[], byte[]> backingData;
|
||||
private final Executor executor;
|
||||
|
||||
private int numRangeCalls = 0;
|
||||
|
||||
protected FakeFDBTransaction(long cPtr, Database database, Executor executor) {
|
||||
super(cPtr, database, executor);
|
||||
this.backingData = new TreeMap<>(ByteArrayUtil.comparator());
|
||||
this.executor = executor;
|
||||
}
|
||||
|
||||
public FakeFDBTransaction(Map<byte[], byte[]> backingData, long cPtr, Database db, Executor executor) {
|
||||
this(cPtr, db, executor);
|
||||
this.backingData.putAll(backingData);
|
||||
}
|
||||
|
||||
public FakeFDBTransaction(Collection<Map.Entry<byte[], byte[]>> backingData, long cPtr, Database db,
|
||||
Executor executor) {
|
||||
this(cPtr, db, executor);
|
||||
|
||||
for (Map.Entry<byte[], byte[]> entry : backingData) {
|
||||
this.backingData.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
public FakeFDBTransaction(List<KeyValue> backingData, long cPtr, Database db,
|
||||
Executor executor) {
|
||||
this(cPtr, db, executor);
|
||||
|
||||
for (KeyValue entry : backingData) {
|
||||
this.backingData.put(entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<byte[]> get(byte[] key) {
|
||||
return CompletableFuture.completedFuture(this.backingData.get(key));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of times getRange_internal() was called. Useful for
|
||||
* checking underlying behavior.
|
||||
*/
|
||||
public int getNumRangeCalls() { return numRangeCalls; }
|
||||
|
||||
@Override
|
||||
protected FutureResults getRange_internal(KeySelector begin, KeySelector end, int rowLimit, int targetBytes,
|
||||
int streamingMode, int iteration, boolean isSnapshot, boolean reverse) {
|
||||
numRangeCalls++;
|
||||
// TODO this is probably not correct for all KeySelector instances--we'll want to match with real behavior
|
||||
NavigableMap<byte[], byte[]> range =
|
||||
backingData.subMap(begin.getKey(), begin.orEqual(), end.getKey(), end.orEqual());
|
||||
if (reverse) {
|
||||
// reverse the order of the scan
|
||||
range = range.descendingMap();
|
||||
}
|
||||
|
||||
// holder variable so that we can pass the range to the results function safely
|
||||
final NavigableMap<byte[], byte[]> retMap = range;
|
||||
FutureResults fr = new FutureResults(-1L, false, executor, null) {
|
||||
@Override
|
||||
protected void registerMarshalCallback(Executor executor) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RangeResultInfo getIfDone_internal(long cPtr) throws FDBException {
|
||||
return new RangeResultInfo(this);
|
||||
}
|
||||
|
||||
@Override
|
||||
public RangeResult getResults() {
|
||||
List<KeyValue> kvs = new ArrayList<>();
|
||||
boolean more = false;
|
||||
int rowCount = 0;
|
||||
int sizeBytes = 0;
|
||||
for (Map.Entry<byte[], byte[]> kvEntry : retMap.entrySet()) {
|
||||
kvs.add(new KeyValue(kvEntry.getKey(), kvEntry.getValue()));
|
||||
rowCount++;
|
||||
if (rowLimit > 0 && rowCount == rowLimit) {
|
||||
more = true;
|
||||
break;
|
||||
}
|
||||
sizeBytes += kvEntry.getKey().length + kvEntry.getValue().length;
|
||||
if (targetBytes > 0 && sizeBytes >= targetBytes) {
|
||||
more = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return new RangeResult(kvs, more);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// no-op
|
||||
}
|
||||
};
|
||||
|
||||
fr.complete(new RangeResultInfo(fr));
|
||||
return fr;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void closeInternal(long cPtr) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
// no-op
|
||||
}
|
||||
}
|
|
@ -0,0 +1,272 @@
|
|||
/*
|
||||
* RangeQueryTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.AbstractMap;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.NavigableMap;
|
||||
import java.util.TreeMap;
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
import com.apple.foundationdb.async.AsyncIterable;
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.EnumSource;
|
||||
|
||||
/**
|
||||
* Tests around the Range Query logic.
|
||||
*
|
||||
* These tests do _not_ require a running FDB server to function. Instead, we
|
||||
* are operating on a good-faith "The underlying native library is correct"
|
||||
* functionality. For end-to-end tests which require a running server, see the
|
||||
* src/tests source folder.
|
||||
*/
|
||||
class RangeQueryTest {
|
||||
private static Executor EXECUTOR = new Executor() {
|
||||
@Override
|
||||
public void execute(Runnable command) {
|
||||
command.run();
|
||||
}
|
||||
};
|
||||
|
||||
private static FDBDatabase makeFakeDatabase(List<Map.Entry<byte[], byte[]>> data) {
|
||||
return new FDBDatabase(1, EXECUTOR) {
|
||||
// the choice of 3 is arbitrary, just trying to make sure it's unique(ish) in case we
|
||||
// need to test that uniqueness later.
|
||||
private long txnCounter = 3;
|
||||
|
||||
@Override
|
||||
public Transaction createTransaction() {
|
||||
long tId = txnCounter;
|
||||
txnCounter++;
|
||||
return new FakeFDBTransaction(data, tId, this, EXECUTOR);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
// no-op
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(StreamingMode.class)
|
||||
void testRangeScansWorkWithoutRowLimit(StreamingMode mode) throws Exception {
|
||||
/*
|
||||
* Test that the Range scan will return all the rows without the row limit.
|
||||
*/
|
||||
List<Map.Entry<byte[], byte[]>> data = new ArrayList<>();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
data.add(new AbstractMap.SimpleEntry<>(("apple" + i).getBytes(), ("crunchy" + i).getBytes()));
|
||||
}
|
||||
|
||||
try (Database db = makeFakeDatabase(data)) {
|
||||
try (Transaction tr = db.createTransaction()) {
|
||||
byte[] val = tr.get("apple4".getBytes()).join();
|
||||
Assertions.assertNotNull(val, "Missing entry for 'apple4'!");
|
||||
Assertions.assertArrayEquals(val, "crunchy4".getBytes(), "incorrect entry for 'apple4'~");
|
||||
|
||||
// now do a range scan on the whole data set
|
||||
AsyncIterable<KeyValue> iter = tr.getRange("a".getBytes(), "b".getBytes(), 0, false, mode);
|
||||
List<KeyValue> kvs = iter.asList().join();
|
||||
for (Map.Entry<byte[], byte[]> entry : data) {
|
||||
boolean found = false;
|
||||
for (KeyValue actualKv : kvs) {
|
||||
if (ByteArrayUtil.compareTo(entry.getKey(), 0, entry.getKey().length, actualKv.getKey(), 0,
|
||||
actualKv.getKey().length) == 0) {
|
||||
String errorMsg =
|
||||
String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>",
|
||||
new String(entry.getKey()), new String(entry.getValue()),
|
||||
new String(actualKv.getValue()));
|
||||
Assertions.assertEquals(
|
||||
0,
|
||||
ByteArrayUtil.compareTo(entry.getValue(), 0, entry.getValue().length,
|
||||
actualKv.getValue(), 0, actualKv.getValue().length),
|
||||
errorMsg);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
Assertions.assertTrue(found, "Did not find key '" + new String(entry.getKey()) + "'");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(StreamingMode.class)
|
||||
void testRangeScansWorkWithRowLimit(StreamingMode mode) throws Exception {
|
||||
/*
|
||||
* Basic test to make sure that we don't ask for too many records or return too
|
||||
* much data when exercising the row limit
|
||||
*/
|
||||
List<Map.Entry<byte[], byte[]>> data = new ArrayList<>();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
data.add(new AbstractMap.SimpleEntry<>(("apple" + i).getBytes(), ("crunchy" + i).getBytes()));
|
||||
}
|
||||
|
||||
try (Database db = makeFakeDatabase(data)) {
|
||||
try (Transaction tr = db.createTransaction()) {
|
||||
byte[] val = tr.get("apple4".getBytes()).join();
|
||||
Assertions.assertNotNull(val, "Missing entry for 'apple4'!");
|
||||
Assertions.assertArrayEquals(val, "crunchy4".getBytes(), "incorrect entry for 'apple4'~");
|
||||
|
||||
// now do a range scan on the whole data set
|
||||
int limit = 3;
|
||||
AsyncIterable<KeyValue> iter = tr.getRange("a".getBytes(), "b".getBytes(), limit, false, mode);
|
||||
List<KeyValue> kvs = iter.asList().join();
|
||||
Assertions.assertEquals(limit, kvs.size(), "incorrect number of kvs returned!");
|
||||
int cnt = 0;
|
||||
for (Map.Entry<byte[], byte[]> entry : data) {
|
||||
boolean found = false;
|
||||
for (KeyValue actualKv : kvs) {
|
||||
if (ByteArrayUtil.compareTo(entry.getKey(), 0, entry.getKey().length, actualKv.getKey(), 0,
|
||||
actualKv.getKey().length) == 0) {
|
||||
String erroMsg = String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>",
|
||||
new String(entry.getKey()), new String(entry.getValue()),
|
||||
new String(actualKv.getValue()));
|
||||
Assertions.assertEquals(
|
||||
0,
|
||||
ByteArrayUtil.compareTo(entry.getValue(), 0, entry.getValue().length,
|
||||
actualKv.getValue(), 0, actualKv.getValue().length),
|
||||
erroMsg);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
Assertions.assertTrue(found, "Did not find key '" + new String(entry.getKey()) + "'");
|
||||
cnt++;
|
||||
if (cnt == limit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Assertions.assertEquals(1, ((FakeFDBTransaction)tr).getNumRangeCalls(),
|
||||
"Did not do the correct number of range requests");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(StreamingMode.class)
|
||||
void testRangeScansWorkWithoutRowLimitReversed(StreamingMode mode) throws Exception {
|
||||
/*
|
||||
* Test that the Range scan will return all the rows without the row limit.
|
||||
*/
|
||||
List<Map.Entry<byte[], byte[]>> data = new ArrayList<>();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
data.add(new AbstractMap.SimpleEntry<>(("apple" + i).getBytes(), ("crunchy" + i).getBytes()));
|
||||
}
|
||||
|
||||
try (Database db = makeFakeDatabase(data)) {
|
||||
try (Transaction tr = db.createTransaction()) {
|
||||
byte[] val = tr.get("apple4".getBytes()).join();
|
||||
Assertions.assertNotNull(val, "Missing entry for 'apple4'!");
|
||||
Assertions.assertArrayEquals(val, "crunchy4".getBytes(), "incorrect entry for 'apple4'~");
|
||||
|
||||
// now do a range scan on the whole data set
|
||||
AsyncIterable<KeyValue> iter = tr.getRange("a".getBytes(), "b".getBytes(), 0, true, mode);
|
||||
List<KeyValue> kvs = iter.asList().join();
|
||||
for (Map.Entry<byte[], byte[]> entry : data) {
|
||||
boolean found = false;
|
||||
for (KeyValue actualKv : kvs) {
|
||||
if (ByteArrayUtil.compareTo(entry.getKey(), 0, entry.getKey().length, actualKv.getKey(), 0,
|
||||
actualKv.getKey().length) == 0) {
|
||||
String erroMsg = String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>",
|
||||
new String(entry.getKey()), new String(entry.getValue()),
|
||||
new String(actualKv.getValue()));
|
||||
Assertions.assertEquals(
|
||||
0,
|
||||
ByteArrayUtil.compareTo(entry.getValue(), 0, entry.getValue().length,
|
||||
actualKv.getValue(), 0, actualKv.getValue().length),
|
||||
erroMsg);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
Assertions.assertTrue(found, "Did not find key '" + new String(entry.getKey()) + "'");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@EnumSource(StreamingMode.class)
|
||||
void testRangeScansWorkWithRowLimitReversed(StreamingMode mode) throws Exception {
|
||||
/*
|
||||
* Basic test to make sure that we don't ask for too many records or return too
|
||||
* much data when exercising the row limit
|
||||
*/
|
||||
NavigableMap<byte[], byte[]> data = new TreeMap<>(ByteArrayUtil.comparator());
|
||||
for (int i = 0; i < 10; i++) {
|
||||
data.put(("apple" + i).getBytes(), ("crunchy" + i).getBytes());
|
||||
}
|
||||
|
||||
try (Database db = makeFakeDatabase(new ArrayList<>(data.entrySet()))) {
|
||||
try (Transaction tr = db.createTransaction()) {
|
||||
byte[] val = tr.get("apple4".getBytes()).join();
|
||||
Assertions.assertNotNull(val, "Missing entry for 'apple4'!");
|
||||
Assertions.assertArrayEquals(val, "crunchy4".getBytes(), "incorrect entry for 'apple4'~");
|
||||
|
||||
// now do a range scan on the whole data set
|
||||
int limit = 3;
|
||||
AsyncIterable<KeyValue> iter = tr.getRange("a".getBytes(), "b".getBytes(), limit, true, mode);
|
||||
List<KeyValue> kvs = iter.asList().join();
|
||||
Assertions.assertEquals(limit, kvs.size(), "incorrect number of kvs returned!");
|
||||
int cnt = 0;
|
||||
for (Map.Entry<byte[], byte[]> entry : data.descendingMap().entrySet()) {
|
||||
boolean found = false;
|
||||
for (KeyValue actualKv : kvs) {
|
||||
if (ByteArrayUtil.compareTo(entry.getKey(), 0, entry.getKey().length, actualKv.getKey(), 0,
|
||||
actualKv.getKey().length) == 0) {
|
||||
String erroMsg = String.format("Incorrect value for key '%s'; Expected: <%s>, Actual: <%s>",
|
||||
new String(entry.getKey()), new String(entry.getValue()),
|
||||
new String(actualKv.getValue()));
|
||||
Assertions.assertEquals(
|
||||
0,
|
||||
ByteArrayUtil.compareTo(entry.getValue(), 0, entry.getValue().length,
|
||||
actualKv.getValue(), 0, actualKv.getValue().length),
|
||||
erroMsg);
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
Assertions.assertTrue(found, "Did not find key '" + new String(entry.getKey()) + "'");
|
||||
cnt++;
|
||||
if (cnt == limit) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Assertions.assertEquals(1, ((FakeFDBTransaction)tr).getNumRangeCalls(),
|
||||
"Did not do the correct number of range requests");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
* ArrayUtilSortTests.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb.tuple;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Random;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeAll;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
/**
|
||||
* Tests relating to sorting
|
||||
*/
|
||||
class ArrayUtilSortTest {
|
||||
|
||||
private static final int SAMPLE_COUNT = 100000;
|
||||
private static final int SAMPLE_MAX_SIZE = 2048;
|
||||
private static List<byte[]> unsafe;
|
||||
private static List<byte[]> java;
|
||||
|
||||
@BeforeAll
|
||||
static void initTestClass() {
|
||||
unsafe = new ArrayList<>(SAMPLE_COUNT);
|
||||
java = new ArrayList<>(SAMPLE_COUNT);
|
||||
Random random = new Random();
|
||||
for (int i = 0; i <= SAMPLE_COUNT; i++) {
|
||||
byte[] addition = new byte[random.nextInt(SAMPLE_MAX_SIZE)];
|
||||
random.nextBytes(addition);
|
||||
unsafe.add(addition);
|
||||
java.add(addition);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testUnsafeSortSorts() throws Exception {
|
||||
/*
|
||||
* We want to test whether or not our comparator works, but that's hard to do
|
||||
* with a byte[] comparator because there isn't a canonical comparator to work
|
||||
* with, so any direct comparison would be written here and just have a
|
||||
* potential for breaking. To avoid that, we just compare our two different
|
||||
* implementations and make sure that they agree
|
||||
*/
|
||||
// sort it using unsafe logic
|
||||
Collections.sort(unsafe, FastByteComparisons.lexicographicalComparerUnsafeImpl());
|
||||
Collections.sort(java, FastByteComparisons.lexicographicalComparerJavaImpl());
|
||||
|
||||
Assertions.assertEquals(java.size(), unsafe.size(), "unsafe and java comparators disagree");
|
||||
for (int i = 0; i < java.size(); i++) {
|
||||
Assertions.assertArrayEquals(java.get(i), unsafe.get(i), "[pos ]" + i + ": comparators disagree");
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testUnsafeComparison() {
|
||||
for (int i = 0; i < unsafe.size(); i++) {
|
||||
Assertions.assertEquals(
|
||||
0, FastByteComparisons.lexicographicalComparerUnsafeImpl().compare(unsafe.get(i), java.get(i)));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testJavaComparison() {
|
||||
for (int i = 0; i < unsafe.size(); i++) {
|
||||
Assertions.assertEquals(
|
||||
0, FastByteComparisons.lexicographicalComparerJavaImpl().compare(unsafe.get(i), java.get(i)));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testUnsafeComparisonWithOffset() {
|
||||
for (int i = 0; i < unsafe.size(); i++) {
|
||||
if (unsafe.get(i).length > 5)
|
||||
Assertions.assertEquals(
|
||||
0, FastByteComparisons.lexicographicalComparerUnsafeImpl().compareTo(
|
||||
unsafe.get(i), 4, unsafe.get(i).length - 4, java.get(i), 4, java.get(i).length - 4));
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testJavaComparisonWithOffset() {
|
||||
for (int i = 0; i < unsafe.size(); i++) {
|
||||
if (unsafe.get(i).length > 5)
|
||||
Assertions.assertEquals(
|
||||
0, FastByteComparisons.lexicographicalComparerJavaImpl().compareTo(
|
||||
unsafe.get(i), 4, unsafe.get(i).length - 4, java.get(i), 4, java.get(i).length - 4));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,402 @@
|
|||
/*
|
||||
* ArrayUtilTests.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb.tuple;
|
||||
|
||||
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
import static org.junit.jupiter.api.Assertions.assertFalse;
|
||||
import static org.junit.jupiter.api.Assertions.assertTrue;
|
||||
import static org.junit.jupiter.api.Assertions.fail;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Disabled;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
/**
|
||||
* @author Ben
|
||||
*
|
||||
*/
|
||||
class ArrayUtilTest {
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#join(byte[], java.util.List)}.
|
||||
*/
|
||||
@Test
|
||||
void testJoinByteArrayListOfbyte() {
|
||||
byte[] a = new byte[] { 'a', 'b', 'c' };
|
||||
byte[] b = new byte[] { 'd', 'e', 'f' };
|
||||
|
||||
List<byte[]> parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
byte[] result = new byte[] { 'a', 'b', 'c', 'z', 'd', 'e', 'f', 'z' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] { 'z' }, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
result = new byte[] { 'z', 'a', 'b', 'c', 'z', 'd', 'e', 'f' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] { 'z' }, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
result = new byte[] { 'z', 'z', 'a', 'b', 'c', 'z', 'd', 'e', 'f' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] { 'z' }, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(b);
|
||||
result = new byte[] { 'a', 'b', 'c', 'z', 'z', 'z', 'd', 'e', 'f' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] { 'z' }, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] { 'b' });
|
||||
result = new byte[] { 'a', 'b', 'c', 'z', 'd', 'e', 'f', 'z', 'b' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] { 'z' }, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
result = new byte[] { 'z', 'z' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] { 'z' }, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
result = new byte[] {};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(null, parts));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#join(byte[][])}.
|
||||
*/
|
||||
@Test
|
||||
void testJoinByteArrayArray() {
|
||||
byte[] a = new byte[] { 'a', 'b', 'c' };
|
||||
byte[] b = new byte[] { 'd', 'e', 'f' };
|
||||
|
||||
List<byte[]> parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
byte[] result = new byte[] { 'a', 'b', 'c', 'd', 'e', 'f' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][] {})));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
result = new byte[] { 'a', 'b', 'c', 'd', 'e', 'f' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][] {})));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] { 'b' });
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
result = new byte[] { 'a', 'b', 'c', 'd', 'e', 'f', 'b' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][] {})));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] { 'b' });
|
||||
result = new byte[] { 'a', 'b', 'c', 'd', 'e', 'f', 'b' };
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][] {})));
|
||||
|
||||
// Self-referential, with conversion to array
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
assertArrayEquals(ByteArrayUtil.join(a, b), ByteArrayUtil.join(parts.toArray(new byte[][] {})));
|
||||
|
||||
// Test exception on null elements
|
||||
boolean isError = false;
|
||||
try {
|
||||
ByteArrayUtil.join(a, b, null);
|
||||
} catch (Exception e) {
|
||||
isError = true;
|
||||
} finally {
|
||||
assertTrue(isError);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#regionEquals(byte[], int, byte[])}.
|
||||
*/
|
||||
@Test
|
||||
void testRegionEquals() {
|
||||
byte[] src = new byte[] { 'a', (byte)12, (byte)255, 'n', 'm', 'z', 'k' };
|
||||
assertTrue(ByteArrayUtil.regionEquals(src, 3, new byte[] { 'n', 'm' }));
|
||||
|
||||
assertFalse(ByteArrayUtil.regionEquals(src, 2, new byte[] { 'n', 'm' }));
|
||||
|
||||
assertTrue(ByteArrayUtil.regionEquals(null, 0, null));
|
||||
|
||||
assertFalse(ByteArrayUtil.regionEquals(src, 0, null));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#replace(byte[], byte[], byte[])}.
|
||||
*/
|
||||
@Test
|
||||
void testReplace() {
|
||||
byte[] a = new byte[] { 'a', 'b', 'c' };
|
||||
byte[] b = new byte[] { 'd', 'e', 'f' };
|
||||
|
||||
byte[] src = ByteArrayUtil.join(a, b, a, b);
|
||||
byte[] result = new byte[] { 'z', 'd', 'e', 'f', 'z', 'd', 'e', 'f' };
|
||||
assertArrayEquals(result, ByteArrayUtil.replace(src, a, new byte[] { 'z' }));
|
||||
|
||||
src = ByteArrayUtil.join(a, b, a, b);
|
||||
assertArrayEquals(ByteArrayUtil.join(b, b), ByteArrayUtil.replace(src, a, new byte[] {}));
|
||||
|
||||
src = ByteArrayUtil.join(a, b, a, b);
|
||||
assertArrayEquals(ByteArrayUtil.join(a, a), ByteArrayUtil.replace(src, b, new byte[] {}));
|
||||
|
||||
src = ByteArrayUtil.join(a, a, a);
|
||||
assertArrayEquals(new byte[] {}, ByteArrayUtil.replace(src, a, new byte[] {}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#split(byte[], byte[])}.
|
||||
*/
|
||||
@Test
|
||||
void testSplit() {
|
||||
byte[] a = new byte[] { 'a', 'b', 'c' };
|
||||
byte[] b = new byte[] { 'd', 'e', 'f' };
|
||||
|
||||
byte[] src = ByteArrayUtil.join(a, b, a, b, a);
|
||||
List<byte[]> parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 3);
|
||||
for (byte[] p : parts) {
|
||||
assertArrayEquals(a, p);
|
||||
}
|
||||
|
||||
src = ByteArrayUtil.join(b, a, b, a, b, a);
|
||||
parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 4);
|
||||
int counter = 0;
|
||||
for (byte[] p : parts) {
|
||||
if (counter++ == 0)
|
||||
assertArrayEquals(new byte[] {}, p);
|
||||
else
|
||||
assertArrayEquals(a, p);
|
||||
}
|
||||
|
||||
src = ByteArrayUtil.join(a, b, a, b, a, b);
|
||||
parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 4);
|
||||
counter = 0;
|
||||
for (byte[] p : parts) {
|
||||
if (counter++ < 3)
|
||||
assertArrayEquals(a, p);
|
||||
else
|
||||
assertArrayEquals(new byte[] {}, p);
|
||||
}
|
||||
|
||||
// Multiple ending delimiters
|
||||
src = ByteArrayUtil.join(a, b, a, b, a, b, b, b);
|
||||
parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 6);
|
||||
counter = 0;
|
||||
for (byte[] p : parts) {
|
||||
if (counter++ < 3)
|
||||
assertArrayEquals(a, p);
|
||||
else
|
||||
assertArrayEquals(new byte[] {}, p);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for
|
||||
* {@link ByteArrayUtil#bisectLeft(java.math.BigInteger[], java.math.BigInteger)}.
|
||||
*/
|
||||
@Test
|
||||
@Disabled("not implemented")
|
||||
void testBisectLeft() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#compareUnsigned(byte[], byte[])}.
|
||||
*/
|
||||
@Test
|
||||
@Disabled("not implemented")
|
||||
void testCompare() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#findNext(byte[], byte, int)}.
|
||||
*/
|
||||
@Test
|
||||
@Disabled("not implemented")
|
||||
void testFindNext() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for
|
||||
* {@link ByteArrayUtil#findTerminator(byte[], byte, byte, int)}.
|
||||
*/
|
||||
@Test
|
||||
@Disabled("not implemented")
|
||||
void testFindTerminator() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#copyOfRange(byte[], int, int)}.
|
||||
*/
|
||||
@Test
|
||||
@Disabled("not implemented")
|
||||
void testCopyOfRange() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#strinc(byte[])}.
|
||||
*/
|
||||
@Test
|
||||
@Disabled("not implemented")
|
||||
void testStrinc() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#printable(byte[])}.
|
||||
*/
|
||||
@Test
|
||||
@Disabled("not implemented")
|
||||
void testPrintable() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
@Test
|
||||
void cannotReplaceNullBytes() throws Exception {
|
||||
Assertions.assertThrows(NullPointerException.class, () -> {
|
||||
ByteArrayUtil.replace(null, 0, 1, new byte[] { 0x00 }, new byte[] { 0x00, (byte)0xFF });
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void cannotReplaceWithNegativeOffset() throws Exception {
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> {
|
||||
ByteArrayUtil.replace(new byte[] { 0x00, 0x01 }, -1, 2, new byte[] { 0x00 },
|
||||
new byte[] { 0x00, (byte)0xFF });
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void cannotReplaceWithNegativeLength() throws Exception {
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> {
|
||||
ByteArrayUtil.replace(new byte[] { 0x00, 0x01 }, 1, -1, new byte[] { 0x00 },
|
||||
new byte[] { 0x00, (byte)0xFF });
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void cannotReplaceWithOffsetAfterEndOfArray() throws Exception {
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> {
|
||||
ByteArrayUtil.replace(new byte[] { 0x00, 0x01 }, 3, 2, new byte[] { 0x00 },
|
||||
new byte[] { 0x00, (byte)0xFF });
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void cannotReplaceWithLengthAfterEndOfArray() throws Exception {
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> {
|
||||
ByteArrayUtil.replace(new byte[] { 0x00, 0x01 }, 1, 2, new byte[] { 0x00 },
|
||||
new byte[] { 0x00, (byte)0xFF });
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void replaceWorks() throws Exception {
|
||||
List<byte[]> arrays = Arrays.asList(
|
||||
new byte[] { 0x01, 0x02, 0x01, 0x02 }, new byte[] { 0x01, 0x02 }, new byte[] { 0x03, 0x04 },
|
||||
new byte[] { 0x03, 0x04, 0x03, 0x04 }, new byte[] { 0x01, 0x02, 0x01, 0x02 }, new byte[] { 0x01, 0x02 },
|
||||
new byte[] { 0x03 }, new byte[] { 0x03, 0x03 }, new byte[] { 0x01, 0x02, 0x01, 0x02 },
|
||||
new byte[] { 0x01, 0x02 }, new byte[] { 0x03, 0x04, 0x05 },
|
||||
new byte[] { 0x03, 0x04, 0x05, 0x03, 0x04, 0x05 }, new byte[] { 0x00, 0x01, 0x02, 0x00, 0x01, 0x02, 0x00 },
|
||||
new byte[] { 0x01, 0x02 }, new byte[] { 0x03, 0x04, 0x05 },
|
||||
new byte[] { 0x00, 0x03, 0x04, 0x05, 0x00, 0x03, 0x04, 0x05, 0x00 }, new byte[] { 0x01, 0x01, 0x01, 0x01 },
|
||||
new byte[] { 0x01, 0x02 }, new byte[] { 0x03, 0x04 }, new byte[] { 0x01, 0x01, 0x01, 0x01 },
|
||||
new byte[] { 0x01, 0x01, 0x01, 0x01 }, new byte[] { 0x01, 0x02 }, new byte[] { 0x03 },
|
||||
new byte[] { 0x01, 0x01, 0x01, 0x01 }, new byte[] { 0x01, 0x01, 0x01, 0x01 }, new byte[] { 0x01, 0x02 },
|
||||
new byte[] { 0x03, 0x04, 0x05 }, new byte[] { 0x01, 0x01, 0x01, 0x01 },
|
||||
new byte[] { 0x01, 0x01, 0x01, 0x01, 0x01 }, new byte[] { 0x01, 0x01 }, new byte[] { 0x03, 0x04, 0x05 },
|
||||
new byte[] { 0x03, 0x04, 0x05, 0x03, 0x04, 0x05, 0x01 }, new byte[] { 0x01, 0x01, 0x01, 0x01, 0x01 },
|
||||
new byte[] { 0x01, 0x01 }, new byte[] { 0x03, 0x04 }, new byte[] { 0x03, 0x04, 0x03, 0x04, 0x01 },
|
||||
new byte[] { 0x01, 0x01, 0x01, 0x01, 0x01 }, new byte[] { 0x01, 0x01 }, new byte[] { 0x03 },
|
||||
new byte[] { 0x03, 0x03, 0x01 }, new byte[] { 0x01, 0x02, 0x01, 0x02 }, new byte[] { 0x01, 0x02 }, null,
|
||||
new byte[0], new byte[] { 0x01, 0x02, 0x01, 0x02 }, new byte[] { 0x01, 0x02 }, new byte[0], new byte[0],
|
||||
new byte[] { 0x01, 0x02, 0x01, 0x02 }, null, new byte[] { 0x04 }, new byte[] { 0x01, 0x02, 0x01, 0x02 },
|
||||
new byte[] { 0x01, 0x02, 0x01, 0x02 }, new byte[0], new byte[] { 0x04 },
|
||||
new byte[] { 0x01, 0x02, 0x01, 0x02 }, null, new byte[] { 0x01, 0x02 }, new byte[] { 0x04 }, null);
|
||||
for (int i = 0; i < arrays.size(); i += 4) {
|
||||
byte[] src = arrays.get(i);
|
||||
byte[] pattern = arrays.get(i + 1);
|
||||
byte[] replacement = arrays.get(i + 2);
|
||||
byte[] expectedResults = arrays.get(i + 3);
|
||||
byte[] results = ByteArrayUtil.replace(src, pattern, replacement);
|
||||
String errorMsg = String.format(
|
||||
"results <%s> did not match expected results <%s> when replaceing <%s> with <%s> in <%s>",
|
||||
ByteArrayUtil.printable(results), ByteArrayUtil.printable(expectedResults),
|
||||
ByteArrayUtil.printable(pattern), ByteArrayUtil.printable(replacement), ByteArrayUtil.printable(src));
|
||||
|
||||
Assertions.assertArrayEquals(expectedResults, results, errorMsg);
|
||||
if (src != null) {
|
||||
Assertions.assertTrue(
|
||||
src != results,
|
||||
String.format("src and results array are pointer-equal when replacing <%s> with <%s> in <%s>",
|
||||
ByteArrayUtil.printable(pattern), ByteArrayUtil.printable(replacement),
|
||||
ByteArrayUtil.printable(src)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,361 +0,0 @@
|
|||
/*
|
||||
* ArrayUtilTests.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb.tuple;
|
||||
|
||||
import static org.junit.Assert.assertArrayEquals;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
/**
|
||||
* @author Ben
|
||||
*
|
||||
*/
|
||||
public class ArrayUtilTests {
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#join(byte[], java.util.List)}.
|
||||
*/
|
||||
@Test
|
||||
public void testJoinByteArrayListOfbyte() {
|
||||
byte[] a = new byte[] {'a', 'b', 'c'};
|
||||
byte[] b = new byte[] {'d', 'e', 'f'};
|
||||
|
||||
List<byte[]> parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
byte[] result = new byte[] {'a', 'b', 'c', 'z', 'd', 'e', 'f', 'z'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
result = new byte[] {'z', 'a', 'b', 'c', 'z', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
result = new byte[] {'z', 'z', 'a', 'b', 'c', 'z', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(b);
|
||||
result = new byte[] {'a', 'b', 'c', 'z', 'z', 'z', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {'b'});
|
||||
result = new byte[] {'a', 'b', 'c', 'z', 'd', 'e', 'f', 'z', 'b'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
result = new byte[] {'z', 'z'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(new byte[] {'z'}, parts));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
result = new byte[] {};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(null, parts));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#join(byte[][])}.
|
||||
*/
|
||||
@Test
|
||||
public void testJoinByteArrayArray() {
|
||||
byte[] a = new byte[] {'a', 'b', 'c'};
|
||||
byte[] b = new byte[] {'d', 'e', 'f'};
|
||||
|
||||
List<byte[]> parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
byte[] result = new byte[] {'a', 'b', 'c', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
result = new byte[] {'a', 'b', 'c', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {'b'});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
parts.add(new byte[] {});
|
||||
result = new byte[] {'a', 'b', 'c', 'd', 'e', 'f', 'b'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {'b'});
|
||||
result = new byte[] {'a', 'b', 'c', 'd', 'e', 'f', 'b'};
|
||||
assertArrayEquals(result, ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
// Self-referential, with conversion to array
|
||||
parts = new ArrayList<byte[]>();
|
||||
parts.add(new byte[] {});
|
||||
parts.add(a);
|
||||
parts.add(b);
|
||||
parts.add(new byte[] {});
|
||||
assertArrayEquals(ByteArrayUtil.join(a, b), ByteArrayUtil.join(parts.toArray(new byte[][]{})));
|
||||
|
||||
// Test exception on null elements
|
||||
boolean isError = false;
|
||||
try {
|
||||
ByteArrayUtil.join(a, b, null);
|
||||
} catch(Exception e) {
|
||||
isError = true;
|
||||
} finally {
|
||||
assertTrue(isError);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#regionEquals(byte[], int, byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testRegionEquals() {
|
||||
byte[] src = new byte[] {'a', (byte)12, (byte)255, 'n', 'm', 'z', 'k'};
|
||||
assertTrue(ByteArrayUtil.regionEquals(src, 3, new byte[] { 'n', 'm' }));
|
||||
|
||||
assertFalse(ByteArrayUtil.regionEquals(src, 2, new byte[] { 'n', 'm' }));
|
||||
|
||||
assertTrue(ByteArrayUtil.regionEquals(null, 0, null));
|
||||
|
||||
assertFalse(ByteArrayUtil.regionEquals(src, 0, null));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#replace(byte[], byte[], byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testReplace() {
|
||||
byte[] a = new byte[] {'a', 'b', 'c'};
|
||||
byte[] b = new byte[] {'d', 'e', 'f'};
|
||||
|
||||
byte[] src = ByteArrayUtil.join(a, b, a, b);
|
||||
byte[] result = new byte[] {'z', 'd', 'e', 'f', 'z', 'd', 'e', 'f'};
|
||||
assertArrayEquals(result, ByteArrayUtil.replace(src, a, new byte[] {'z'}));
|
||||
|
||||
src = ByteArrayUtil.join(a, b, a, b);
|
||||
assertArrayEquals(ByteArrayUtil.join(b, b), ByteArrayUtil.replace(src, a, new byte[] {}));
|
||||
|
||||
src = ByteArrayUtil.join(a, b, a, b);
|
||||
assertArrayEquals(ByteArrayUtil.join(a, a), ByteArrayUtil.replace(src, b, new byte[] {}));
|
||||
|
||||
src = ByteArrayUtil.join(a, a, a);
|
||||
assertArrayEquals(new byte[] {}, ByteArrayUtil.replace(src, a, new byte[] {}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#split(byte[], byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testSplit() {
|
||||
byte[] a = new byte[] {'a', 'b', 'c'};
|
||||
byte[] b = new byte[] {'d', 'e', 'f'};
|
||||
|
||||
byte[] src = ByteArrayUtil.join(a, b, a, b, a);
|
||||
List<byte[]> parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 3);
|
||||
for(byte[] p : parts) {
|
||||
assertArrayEquals(a, p);
|
||||
}
|
||||
|
||||
src = ByteArrayUtil.join(b, a, b, a, b, a);
|
||||
parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 4);
|
||||
int counter = 0;
|
||||
for(byte[] p : parts) {
|
||||
if(counter++ == 0)
|
||||
assertArrayEquals(new byte[]{}, p);
|
||||
else
|
||||
assertArrayEquals(a, p);
|
||||
}
|
||||
|
||||
src = ByteArrayUtil.join(a, b, a, b, a, b);
|
||||
parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 4);
|
||||
counter = 0;
|
||||
for(byte[] p : parts) {
|
||||
if(counter++ < 3)
|
||||
assertArrayEquals(a, p);
|
||||
else
|
||||
assertArrayEquals(new byte[]{}, p);
|
||||
}
|
||||
|
||||
// Multiple ending delimiters
|
||||
src = ByteArrayUtil.join(a, b, a, b, a, b, b, b);
|
||||
parts = ByteArrayUtil.split(src, b);
|
||||
assertEquals(parts.size(), 6);
|
||||
counter = 0;
|
||||
for(byte[] p : parts) {
|
||||
if(counter++ < 3)
|
||||
assertArrayEquals(a, p);
|
||||
else
|
||||
assertArrayEquals(new byte[]{}, p);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#bisectLeft(java.math.BigInteger[], java.math.BigInteger)}.
|
||||
*/
|
||||
@Test
|
||||
public void testBisectLeft() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#compareUnsigned(byte[], byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testCompare() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#findNext(byte[], byte, int)}.
|
||||
*/
|
||||
@Test
|
||||
public void testFindNext() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#findTerminator(byte[], byte, byte, int)}.
|
||||
*/
|
||||
@Test
|
||||
public void testFindTerminator() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#copyOfRange(byte[], int, int)}.
|
||||
*/
|
||||
@Test
|
||||
public void testCopyOfRange() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#strinc(byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testStrinc() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test method for {@link ByteArrayUtil#printable(byte[])}.
|
||||
*/
|
||||
@Test
|
||||
public void testPrintable() {
|
||||
fail("Not yet implemented");
|
||||
}
|
||||
|
||||
private static final int SAMPLE_COUNT = 1000000;
|
||||
private static final int SAMPLE_MAX_SIZE = 2048;
|
||||
private List<byte[]> unsafe;
|
||||
private List<byte[]> java;
|
||||
@Before
|
||||
public void init() {
|
||||
unsafe = new ArrayList(SAMPLE_COUNT);
|
||||
java = new ArrayList(SAMPLE_COUNT);
|
||||
Random random = new Random();
|
||||
for (int i = 0; i <= SAMPLE_COUNT; i++) {
|
||||
byte[] addition = new byte[random.nextInt(SAMPLE_MAX_SIZE)];
|
||||
random.nextBytes(addition);
|
||||
unsafe.add(addition);
|
||||
java.add(addition);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testComparatorSort() {
|
||||
Collections.sort(unsafe, FastByteComparisons.lexicographicalComparerUnsafeImpl());
|
||||
Collections.sort(java, FastByteComparisons.lexicographicalComparerJavaImpl());
|
||||
Assert.assertTrue(unsafe.equals(java));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnsafeComparison() {
|
||||
for (int i =0; i< SAMPLE_COUNT; i++) {
|
||||
Assert.assertEquals(FastByteComparisons.lexicographicalComparerUnsafeImpl().compare(unsafe.get(i), java.get(i)), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testJavaComparison() {
|
||||
for (int i =0; i< SAMPLE_COUNT; i++) {
|
||||
Assert.assertEquals(FastByteComparisons.lexicographicalComparerJavaImpl().compare(unsafe.get(i), java.get(i)), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testUnsafeComparisonWithOffet() {
|
||||
for (int i =0; i< SAMPLE_COUNT; i++) {
|
||||
if (unsafe.get(i).length > 5)
|
||||
Assert.assertEquals(FastByteComparisons.lexicographicalComparerUnsafeImpl().compareTo(unsafe.get(i), 4, unsafe.get(i).length - 4, java.get(i), 4, java.get(i).length - 4), 0);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testJavaComparisonWithOffset() {
|
||||
for (int i =0; i< SAMPLE_COUNT; i++) {
|
||||
if (unsafe.get(i).length > 5)
|
||||
Assert.assertEquals(FastByteComparisons.lexicographicalComparerJavaImpl().compareTo(unsafe.get(i), 4, unsafe.get(i).length - 4, java.get(i), 4, java.get(i).length - 4), 0);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
package com.apple.foundationdb.tuple;
|
||||
|
||||
import java.nio.charset.Charset;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class ByteArrayUtilTest {
|
||||
|
||||
@Test
|
||||
void printableWorksForAllByteValues(){
|
||||
//Quick test to make sure that no bytes are unprintable
|
||||
byte[] bytes = new byte[2*((int)Byte.MAX_VALUE+1)];
|
||||
for(int i=0; i< bytes.length;i++){
|
||||
bytes[i] = (byte)(i & 0xff);
|
||||
}
|
||||
|
||||
String value = ByteArrayUtil.printable(bytes);
|
||||
String expected = "\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\x09\\x0a\\x0b\\x0c\\x0d\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd\\xfe\\xff";
|
||||
Assertions.assertEquals(expected,value,"Incorrect printable string");
|
||||
}
|
||||
|
||||
@Test
|
||||
void printableWorksForAsciiStrings(){
|
||||
char[] asciiChars = new char[]{
|
||||
'!','"','#','$','%','&','\'','(',')','*','+',',','~','.','/',
|
||||
'0','1','2','3','4','5','6','7','8','9',':',';','<','?','@',
|
||||
'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z',
|
||||
'[','\\',']','^','_','`',
|
||||
'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z','{','|','}','~',(char)127
|
||||
};
|
||||
|
||||
for(int i=0;i<asciiChars.length;i++){
|
||||
String substring = new String(asciiChars,0,i);
|
||||
byte[] asciiBytes = substring.getBytes(Charset.forName("UTF-8"));
|
||||
|
||||
String printable = ByteArrayUtil.printable(asciiBytes);
|
||||
String expected = substring.replace("\\", "\\\\");
|
||||
Assertions.assertEquals(expected,printable,"Incorrect printable string");
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
* TupleComparisonTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb.tuple;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.Arguments;
|
||||
import org.junit.jupiter.params.provider.MethodSource;
|
||||
|
||||
/**
|
||||
* Unit tests for comparisons of tuple objects.
|
||||
*/
|
||||
class TupleComparisonTest {
|
||||
|
||||
static List<Tuple> comparisons = Arrays.asList(
|
||||
Tuple.from(0L), Tuple.from(BigInteger.ZERO), Tuple.from(1L), Tuple.from(BigInteger.ONE), Tuple.from(-1L),
|
||||
Tuple.from(BigInteger.ONE.negate()), Tuple.from(Long.MAX_VALUE), Tuple.from(Long.MIN_VALUE),
|
||||
Tuple.from(BigInteger.valueOf(Long.MIN_VALUE).subtract(BigInteger.ONE)),
|
||||
Tuple.from(BigInteger.valueOf(Long.MIN_VALUE).shiftLeft(1)), Tuple.from(-0.0f), Tuple.from(0.0f),
|
||||
Tuple.from(-0.0), Tuple.from(0.0), Tuple.from(Float.NEGATIVE_INFINITY), Tuple.from(Double.NEGATIVE_INFINITY),
|
||||
Tuple.from(Float.NaN), Tuple.from(Double.NaN),
|
||||
Tuple.from(Float.intBitsToFloat(Float.floatToIntBits(Float.NaN) + 1)),
|
||||
Tuple.from(Double.longBitsToDouble(Double.doubleToLongBits(Double.NaN) + 1)),
|
||||
Tuple.from(Float.intBitsToFloat(Float.floatToIntBits(Float.NaN) + 2)),
|
||||
Tuple.from(Double.longBitsToDouble(Double.doubleToLongBits(Double.NaN) + 2)),
|
||||
Tuple.from(Float.intBitsToFloat(Float.floatToIntBits(Float.NaN) ^ Integer.MIN_VALUE)),
|
||||
Tuple.from(Double.longBitsToDouble(Double.doubleToLongBits(Double.NaN) ^ Long.MIN_VALUE)),
|
||||
Tuple.from(Float.intBitsToFloat(Float.floatToIntBits(Float.NaN) ^ Integer.MIN_VALUE + 1)),
|
||||
Tuple.from(Double.longBitsToDouble(Double.doubleToLongBits(Double.NaN) ^ Long.MIN_VALUE + 1)),
|
||||
Tuple.from(Float.POSITIVE_INFINITY), Tuple.from(Double.POSITIVE_INFINITY), Tuple.from((Object) new byte[0]),
|
||||
Tuple.from((Object) new byte[] { 0x00 }), Tuple.from((Object) new byte[] { 0x00, (byte)0xFF }),
|
||||
Tuple.from((Object) new byte[] { 0x7f }), Tuple.from((Object) new byte[] { (byte)0x80 }),
|
||||
Tuple.from(null, new byte[0]), Tuple.from(null, new byte[] { 0x00 }),
|
||||
Tuple.from(null, new byte[] { 0x00, (byte)0xFF }), Tuple.from(null, new byte[] { 0x7f }),
|
||||
Tuple.from(null, new byte[] { (byte)0x80 }), Tuple.from(Tuple.from(null, new byte[0])),
|
||||
Tuple.from(Tuple.from(null, new byte[] { 0x00 })),
|
||||
Tuple.from(Tuple.from(null, new byte[] { 0x00, (byte)0xFF })),
|
||||
Tuple.from(Tuple.from(null, new byte[] { 0x7f })), Tuple.from(Tuple.from(null, new byte[] { (byte)0x80 })),
|
||||
Tuple.from("a"), Tuple.from("\u03bc\u03ac\u03b8\u03b7\u03bc\u03b1"),
|
||||
Tuple.from("\u03bc\u03b1\u0301\u03b8\u03b7\u03bc\u03b1"), Tuple.from("\u4e2d\u6587"),
|
||||
Tuple.from("\u4e2d\u570B"), Tuple.from("\ud83d\udd25"), Tuple.from("\ud83e\udd6f"), Tuple.from("a\ud83d\udd25"),
|
||||
Tuple.from("\ufb49"), Tuple.from("\ud83d\udd25\ufb49"), Tuple.from(new UUID(-1, 0)),
|
||||
Tuple.from(new UUID(-1, -1)), Tuple.from(new UUID(1, -1)), Tuple.from(new UUID(1, 1)), Tuple.from(false),
|
||||
Tuple.from(true), Tuple.from(Arrays.asList(0, 1, 2)), Tuple.from(Arrays.asList(0, 1), "hello"),
|
||||
Tuple.from(Arrays.asList(0, 1), "help"),
|
||||
Tuple.from(Versionstamp.complete(
|
||||
new byte[] { 0x0a, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee, (byte)0xFF, 0x00, 0x01, 0x02, 0x03 })),
|
||||
Tuple.from(Versionstamp.complete(new byte[] { (byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee,
|
||||
(byte)0xFF, 0x00, 0x01, 0x02, 0x03 })),
|
||||
Tuple.from(Versionstamp.complete(new byte[] { (byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee,
|
||||
(byte)0xFF, 0x00, 0x01, 0x02, 0x03 },
|
||||
1)),
|
||||
Tuple.from(Versionstamp.complete(new byte[] { (byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee,
|
||||
(byte)0xFF, 0x00, 0x01, 0x02, 0x03 },
|
||||
0xa101)),
|
||||
Tuple.from(Versionstamp.complete(new byte[] { (byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee,
|
||||
(byte)0xFF, 0x00, 0x01, 0x02, 0x03 },
|
||||
65535)));
|
||||
|
||||
static Stream<Arguments> cartesianProvider() {
|
||||
List<Arguments> args = new ArrayList<>(comparisons.size() * comparisons.size());
|
||||
for (Tuple l : comparisons) {
|
||||
for (Tuple r : comparisons) {
|
||||
args.add(Arguments.of(l, r));
|
||||
}
|
||||
}
|
||||
return args.stream();
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("cartesianProvider")
|
||||
void testCanCompare(Tuple l, Tuple r) {
|
||||
/*
|
||||
* Verify that both implementations of the comparator compare the same way
|
||||
*/
|
||||
Tuple t1copy = Tuple.fromList(l.getItems());
|
||||
Tuple t2copy = Tuple.fromList(r.getItems());
|
||||
int semanticComparison = t1copy.compareTo(t2copy);
|
||||
int byteComparison = ByteArrayUtil.compareUnsigned(l.pack(), r.pack());
|
||||
String errorMsg = String.format("tuple l and r comparisons mismatched; semantic: <%d>,byte: <%d>",
|
||||
semanticComparison, byteComparison);
|
||||
Assertions.assertEquals(Integer.signum(semanticComparison), Integer.signum(byteComparison), errorMsg);
|
||||
int implicitByteComparison = l.compareTo(r);
|
||||
Assertions.assertEquals(Integer.signum(semanticComparison), Integer.signum(implicitByteComparison), errorMsg);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,542 @@
|
|||
/*
|
||||
* TupleEncodingTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb.tuple;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.math.BigInteger;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import com.apple.foundationdb.FDB;
|
||||
import com.apple.foundationdb.FDBLibraryRule;
|
||||
import com.apple.foundationdb.subspace.Subspace;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Assumptions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.Arguments;
|
||||
import org.junit.jupiter.params.provider.MethodSource;
|
||||
|
||||
/**
|
||||
* Tests around packing, versionstamps, and assorted encoding-related stuff for
|
||||
* tuples.
|
||||
*
|
||||
* This class should be used when adding/modifying tests that involve the "packing" logic,
|
||||
* while {@link TupleSerializationTest} should be used to test "serialization" behavior. Granted,
|
||||
* that distinction is pretty arbitrary and not really clear, but the main motivation for separating the two
|
||||
* classes is just to avoid having a single ludicrously large test file, so it's probably not the end of the world
|
||||
* if this rule isn't perfectly followed.
|
||||
*/
|
||||
class TuplePackingTest {
|
||||
private static final byte FF = (byte)0xff;
|
||||
@RegisterExtension static final FDBLibraryRule fdbLib = FDBLibraryRule.current();
|
||||
|
||||
static final List<Tuple> baseTuples =
|
||||
Arrays.asList(new Tuple(), Tuple.from(), Tuple.from((Object)null), Tuple.from("prefix"),
|
||||
Tuple.from("prefix", null), Tuple.from(new UUID(100, 1000)),
|
||||
Tuple.from(Versionstamp.incomplete(1)), Tuple.from(Tuple.from(Versionstamp.incomplete(2))),
|
||||
Tuple.from(Collections.singletonList(Versionstamp.incomplete(3))));
|
||||
|
||||
static final List<Object> items = Arrays.asList(
|
||||
null, 1066L, BigInteger.valueOf(1066), BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE), -3.14f, 2.71828,
|
||||
new byte[] { 0x01, 0x02, 0x03 }, new byte[] { 0x01, 0x00, 0x02, 0x00, 0x03 }, "hello there", "hell\0 there",
|
||||
"\ud83d\udd25", "\ufb14", false, true, Float.NaN, Float.intBitsToFloat(Integer.MAX_VALUE), Double.NaN,
|
||||
Double.longBitsToDouble(Long.MAX_VALUE),
|
||||
Versionstamp.complete(new byte[] { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09 }, 100),
|
||||
Versionstamp.incomplete(4), new UUID(-1, 1), Tuple.from((Object)null), Tuple.from("suffix", "tuple"),
|
||||
Tuple.from("s\0ffix", "tuple"), Arrays.asList("suffix", "tuple"), Arrays.asList("suffix", null, "tuple"),
|
||||
Tuple.from("suffix", null, "tuple"), Tuple.from("suffix", Versionstamp.incomplete(4), "tuple"),
|
||||
Arrays.asList("suffix", Arrays.asList("inner", Versionstamp.incomplete(5), "tuple"), "tuple"));
|
||||
|
||||
static final Stream<Arguments> baseAddCartesianProduct() {
|
||||
return baseTuples.stream().flatMap((left) -> {
|
||||
Stream<Object> oStream = items.stream();
|
||||
return items.stream().map((right) -> { return Arguments.of(left, right); });
|
||||
});
|
||||
}
|
||||
|
||||
static List<Tuple> twoIncomplete() {
|
||||
return Arrays.asList(Tuple.from(Versionstamp.incomplete(1), Versionstamp.incomplete(2)),
|
||||
Tuple.from(Tuple.from(Versionstamp.incomplete(3)), Tuple.from(Versionstamp.incomplete(4))),
|
||||
new Tuple().add(Versionstamp.incomplete()).add(Versionstamp.incomplete()),
|
||||
new Tuple().add(Versionstamp.incomplete()).add(3L).add(Versionstamp.incomplete()),
|
||||
Tuple.from(Tuple.from(Versionstamp.incomplete()), "dummy_string")
|
||||
.add(Tuple.from(Versionstamp.incomplete())),
|
||||
Tuple.from(Arrays.asList(Versionstamp.incomplete(), "dummy_string"))
|
||||
.add(Tuple.from(Versionstamp.incomplete())),
|
||||
Tuple.from(Tuple.from(Versionstamp.incomplete()), "dummy_string")
|
||||
.add(Collections.singletonList(Versionstamp.incomplete())));
|
||||
}
|
||||
|
||||
static final List<byte[]> malformedSequences() {
|
||||
return Arrays.asList(
|
||||
new byte[] { 0x01, (byte)0xde, (byte)0xad, (byte)0xc0, (byte)0xde }, // no termination
|
||||
// character for
|
||||
// byte array
|
||||
new byte[] { 0x01, (byte)0xde, (byte)0xad, 0x00, FF, (byte)0xc0, (byte)0xde }, // no termination
|
||||
// character but null
|
||||
// in middle
|
||||
new byte[] { 0x02, 'h', 'e', 'l', 'l', 'o' }, // no termination character for string
|
||||
new byte[] { 0x02, 'h', 'e', 'l', 0x00, FF, 'l', 'o' }, // no termination character but null in the
|
||||
// middle
|
||||
new byte[] { 0x02, 'u', 't', 'f', 0x08, (byte)0x80, 0x00 }, // invalid utf-8 code point start character
|
||||
new byte[] { 0x02, 'u', 't', 'f', 0x08, (byte)0xc0, 0x01, 0x00 }, // invalid utf-8 code point second
|
||||
// character
|
||||
// invalid utf-8 (corresponds to high surrogate \ud83d)
|
||||
new byte[] { 0x02, 'u', 't', 'f', 0x10, (byte)0xed, (byte)0xa0, (byte)0xbd, (byte)0x00 },
|
||||
// invalid utf-8 (corresponds to low surrogate \udd25)
|
||||
new byte[] { 0x02, 'u', 't', 'f', 0x10, (byte)0xed, (byte)0xb4, (byte)0xa5, (byte)0x00 },
|
||||
// invalid utf-8 (corresponds to \ud83d\udd25 which *is* valid utf-16, but not
|
||||
// encoded like that)
|
||||
new byte[] { 0x02, 'u', 't', 'f', 0x10, (byte)0xed, (byte)0xa0, (byte)0xbd, (byte)0xed, (byte)0xb4,
|
||||
(byte)0xa5, (byte)0x00 },
|
||||
new byte[] { 0x05, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00 }, // no termination character for nested tuple
|
||||
// no termination character for nested tuple but null in the middle
|
||||
new byte[] { 0x05, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00, 0x00, FF, 0x02, 't', 'h', 'e', 'r', 'e', 0x00 },
|
||||
new byte[] { 0x16, 0x01 }, // integer truncation
|
||||
new byte[] { 0x12, 0x01 }, // integer truncation
|
||||
new byte[] { 0x1d, 0x09, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 }, // integer truncation
|
||||
new byte[] { 0x0b, 0x09 ^ FF, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 }, // integer truncation
|
||||
new byte[] { 0x20, 0x01, 0x02, 0x03 }, // float truncation
|
||||
new byte[] { 0x21, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 }, // double truncation
|
||||
// UUID truncation
|
||||
new byte[] { 0x30, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e },
|
||||
// versionstamp truncation
|
||||
new byte[] { 0x33, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b },
|
||||
new byte[] { FF } // unknown
|
||||
// start
|
||||
// code
|
||||
);
|
||||
}
|
||||
|
||||
static final List<byte[]> wellFormedSequences() {
|
||||
return Arrays.asList(Tuple.from((Object) new byte[] { 0x01, 0x02 }).pack(), Tuple.from("hello").pack(),
|
||||
Tuple.from("hell\0").pack(), Tuple.from(1066L).pack(), Tuple.from(-1066L).pack(),
|
||||
Tuple.from(BigInteger.ONE.shiftLeft(Long.SIZE + 1)).pack(),
|
||||
Tuple.from(BigInteger.ONE.shiftLeft(Long.SIZE + 1).negate()).pack(),
|
||||
Tuple.from(-3.14f).pack(), Tuple.from(2.71828).pack(),
|
||||
Tuple.from(new UUID(1066L, 1415L)).pack(),
|
||||
Tuple
|
||||
.from(Versionstamp.fromBytes(new byte[] { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
||||
0x08, 0x09, 0x0a, 0x0b, 0x0c }))
|
||||
.pack());
|
||||
}
|
||||
|
||||
@Test
|
||||
void testEmptyTuple() throws Exception {
|
||||
Tuple t = new Tuple();
|
||||
Assertions.assertTrue(t.isEmpty(), "Empty tuple is not empty");
|
||||
Assertions.assertEquals(0, t.getPackedSize(), "empty tuple packed size is not 0");
|
||||
Assertions.assertEquals(0, t.pack().length, "empty tuple is not packed to the empty byte string");
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void packedSizeMatches(Tuple baseTuple, Object newItem) throws Exception {
|
||||
Tuple newItemTuple = Tuple.from(newItem);
|
||||
Tuple mergedWithAddAll = baseTuple.addAll(newItemTuple);
|
||||
Tuple addedTuple = baseTuple.addObject(newItem);
|
||||
Tuple listTuple = baseTuple.addAll(Collections.singletonList(newItem));
|
||||
|
||||
Assertions.assertEquals(baseTuple.getPackedSize() + newItemTuple.getPackedSize(),
|
||||
mergedWithAddAll.getPackedSize(), "Packed sizes aren't correct for addAll(Tuple)");
|
||||
Assertions.assertEquals(baseTuple.getPackedSize() + newItemTuple.getPackedSize(), listTuple.getPackedSize(),
|
||||
"Packed sizes aren't correct for addAll(Collection)");
|
||||
Assertions.assertEquals(baseTuple.getPackedSize() + newItemTuple.getPackedSize(), addedTuple.getPackedSize(),
|
||||
"Packed sizes aren't correct for addObject()");
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void cannotPackIncorrectlyWithNoIncompleteVersionstamp(Tuple baseTuple, Object newItem) throws Exception {
|
||||
Tuple newItemTuple = Tuple.from(newItem);
|
||||
Assumptions.assumeTrue(!baseTuple.hasIncompleteVersionstamp(),
|
||||
"Skipping because baseTuple has an incomplete versionstamp");
|
||||
Assumptions.assumeTrue(!newItemTuple.hasIncompleteVersionstamp(),
|
||||
"Skipping because newItem has an incomplete versionstamp");
|
||||
|
||||
Tuple mergedWithAddAll = baseTuple.addAll(newItemTuple);
|
||||
Tuple addedTuple = baseTuple.addObject(newItem);
|
||||
Tuple listTuple = baseTuple.addAll(Collections.singletonList(newItem));
|
||||
|
||||
for (Tuple t : Arrays.asList(mergedWithAddAll, addedTuple, listTuple)) {
|
||||
try {
|
||||
t.packWithVersionstamp();
|
||||
Assertions.fail("able to pack tuple with incomplete versionstamp using packWithVersionstamp");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void cannotPackIncorrectlyWithAddedItemIncompleteVersionstamp(Tuple baseTuple, Object newItem) throws Exception {
|
||||
Tuple newItemTuple = Tuple.from(newItem);
|
||||
Assumptions.assumeTrue(!baseTuple.hasIncompleteVersionstamp(),
|
||||
"Skipping because baseTuple has an incomplete versionstamp");
|
||||
Assumptions.assumeTrue(newItemTuple.hasIncompleteVersionstamp(),
|
||||
"Skipping because newItem has an incomplete versionstamp");
|
||||
|
||||
Tuple mergedWithAddAll = baseTuple.addAll(newItemTuple);
|
||||
Tuple addedTuple = baseTuple.addObject(newItem);
|
||||
Tuple listTuple = baseTuple.addAll(Collections.singletonList(newItem));
|
||||
|
||||
for (Tuple t : Arrays.asList(mergedWithAddAll, addedTuple, listTuple)) {
|
||||
try {
|
||||
t.pack();
|
||||
Assertions.fail("able to pack tuple with incomplete versionstamp using packWithVersionstamp");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void cannotPackIncorrectlyWithBaseTupleIncompleteVersionstamp(Tuple baseTuple, Object newItem) throws Exception {
|
||||
Tuple newItemTuple = Tuple.from(newItem);
|
||||
Assumptions.assumeTrue(baseTuple.hasIncompleteVersionstamp(),
|
||||
"Skipping because baseTuple has an incomplete versionstamp");
|
||||
Assumptions.assumeTrue(!newItemTuple.hasIncompleteVersionstamp(),
|
||||
"Skipping because newItem has an incomplete versionstamp");
|
||||
|
||||
Tuple mergedWithAddAll = baseTuple.addAll(newItemTuple);
|
||||
Tuple addedTuple = baseTuple.addObject(newItem);
|
||||
Tuple listTuple = baseTuple.addAll(Collections.singletonList(newItem));
|
||||
|
||||
for (Tuple t : Arrays.asList(mergedWithAddAll, addedTuple, listTuple)) {
|
||||
try {
|
||||
t.pack();
|
||||
Assertions.fail("able to pack tuple with incomplete versionstamp using packWithVersionstamp");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void cannotPackIncorrectlyWithOnlyIncompleteVersionstamp(Tuple baseTuple, Object newItem) throws Exception {
|
||||
Tuple newItemTuple = Tuple.from(newItem);
|
||||
Assumptions.assumeTrue(baseTuple.hasIncompleteVersionstamp(),
|
||||
"Skipping because baseTuple has an incomplete versionstamp");
|
||||
Assumptions.assumeTrue(newItemTuple.hasIncompleteVersionstamp(),
|
||||
"Skipping because newItem does not have an incomplete versionstamp");
|
||||
|
||||
Tuple mergedWithAddAll = baseTuple.addAll(newItemTuple);
|
||||
Tuple addedTuple = baseTuple.addObject(newItem);
|
||||
Tuple listTuple = baseTuple.addAll(Collections.singletonList(newItem));
|
||||
|
||||
for (Tuple t : Arrays.asList(mergedWithAddAll, addedTuple, listTuple)) {
|
||||
try {
|
||||
t.pack();
|
||||
Assertions.fail("able to pack tuple with incomplete versionstamp using packWithVersionstamp");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void canAddMethodsFromStream(Tuple baseTuple, Object newItem) throws Exception {
|
||||
Tuple freshTuple = Tuple.fromStream(Stream.concat(baseTuple.stream(), Stream.of(newItem)));
|
||||
Assertions.assertEquals(baseTuple.size() + 1, freshTuple.size(), "Incorrect tuple size after stream concat");
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void canEncodeAddedItemsWithCompleteVersionstamps(Tuple baseTuple, Object toAdd) throws Exception {
|
||||
Tuple newTuple = Tuple.from(toAdd);
|
||||
// skip this test if we don't fit the appropriate category
|
||||
Assumptions.assumeTrue(!baseTuple.hasIncompleteVersionstamp(), "baseTuple has incomplete versionstamp");
|
||||
Assumptions.assumeTrue(!newTuple.hasIncompleteVersionstamp(), "addingTuple has incomplete versionstamp");
|
||||
|
||||
byte[] concatPacked = ByteArrayUtil.join(baseTuple.pack(), newTuple.pack());
|
||||
byte[] prefixPacked = newTuple.pack(baseTuple.pack());
|
||||
byte[] streamPacked = Tuple.fromStream(Stream.concat(baseTuple.stream(), Stream.of(toAdd))).pack();
|
||||
byte[] tupleAddedPacked = baseTuple.addAll(newTuple).pack();
|
||||
byte[] listAddedPacked = baseTuple.addAll(Arrays.asList(toAdd)).pack();
|
||||
|
||||
Assertions.assertArrayEquals(concatPacked, prefixPacked, "concatPacked != prefixPacked!");
|
||||
Assertions.assertArrayEquals(prefixPacked, streamPacked, "prefixPacked != streamPacked!");
|
||||
Assertions.assertArrayEquals(streamPacked, tupleAddedPacked, "streamPacked != tupleAddedPacked!");
|
||||
Assertions.assertArrayEquals(tupleAddedPacked, listAddedPacked, "tupleAddedPacked != listAddedPacked!");
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void cannotPackItemsWithCompleteVersionstamps(Tuple baseTuple, Object toAdd) throws Exception {
|
||||
Tuple newTuple = Tuple.from(toAdd);
|
||||
// skip this test if we don't fit the appropriate category
|
||||
Assumptions.assumeTrue(!baseTuple.hasIncompleteVersionstamp(), "baseTuple has incomplete versionstamp");
|
||||
Assumptions.assumeTrue(!newTuple.hasIncompleteVersionstamp(), "addingTuple has incomplete versionstamp");
|
||||
|
||||
Tuple streamTuple = Tuple.fromStream(Stream.concat(baseTuple.stream(), Stream.of(toAdd)));
|
||||
Tuple aAllTuple = baseTuple.addAll(newTuple);
|
||||
Tuple addAllCollTuple = baseTuple.addAll(Arrays.asList(toAdd));
|
||||
Tuple addObjectTuple = baseTuple.addObject(toAdd);
|
||||
List<Tuple> allTuples = Arrays.asList(streamTuple, aAllTuple, addAllCollTuple, addObjectTuple);
|
||||
|
||||
for (Tuple t : allTuples) {
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> { t.packWithVersionstamp(); });
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void canEncodeAddedItemsWithIncompleteVersionstamps(Tuple baseTuple, Object toAdd) throws Exception {
|
||||
Tuple newTuple = Tuple.from(toAdd);
|
||||
// skip this test if we don't fit the appropriate category
|
||||
Assumptions.assumeTrue(!baseTuple.hasIncompleteVersionstamp(), "baseTuple has incomplete versionstamp");
|
||||
Assumptions.assumeTrue(newTuple.hasIncompleteVersionstamp(), "addingTuple has incomplete versionstamp");
|
||||
|
||||
byte[] prefixPacked = newTuple.packWithVersionstamp(baseTuple.pack());
|
||||
byte[] streamPacked =
|
||||
Tuple.fromStream(Stream.concat(baseTuple.stream(), Stream.of(toAdd))).packWithVersionstamp();
|
||||
byte[] tupleAddedPacked = baseTuple.addAll(newTuple).packWithVersionstamp();
|
||||
byte[] listAddedPacked = baseTuple.addAll(Arrays.asList(toAdd)).packWithVersionstamp();
|
||||
|
||||
Assertions.assertArrayEquals(prefixPacked, streamPacked, "prefixPacked != streamPacked!");
|
||||
Assertions.assertArrayEquals(streamPacked, tupleAddedPacked, "streamPacked != tupleAddedPacked!");
|
||||
Assertions.assertArrayEquals(tupleAddedPacked, listAddedPacked, "tupleAddedPacked != listAddedPacked!");
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void cannotPackItemsWithCompleteVersionstampsForNewItem(Tuple baseTuple, Object toAdd) throws Exception {
|
||||
Tuple newTuple = Tuple.from(toAdd);
|
||||
// skip this test if we don't fit the appropriate category
|
||||
Assumptions.assumeTrue(!baseTuple.hasIncompleteVersionstamp(), "baseTuple has incomplete versionstamp");
|
||||
Assumptions.assumeTrue(newTuple.hasIncompleteVersionstamp(), "addingTuple has incomplete versionstamp");
|
||||
|
||||
Tuple streamTuple = Tuple.fromStream(Stream.concat(baseTuple.stream(), Stream.of(toAdd)));
|
||||
Tuple aAllTuple = baseTuple.addAll(newTuple);
|
||||
Tuple addAllCollTuple = baseTuple.addAll(Arrays.asList(toAdd));
|
||||
Tuple addObjectTuple = baseTuple.addObject(toAdd);
|
||||
List<Tuple> allTuples = Arrays.asList(streamTuple, aAllTuple, addAllCollTuple, addObjectTuple);
|
||||
|
||||
for (Tuple t : allTuples) {
|
||||
try {
|
||||
t.pack();
|
||||
Assertions.fail("was able to pack tuple without incomplete versionstamps");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void canEncodeAddedItemsWithIncompleteTupleVersionstamps(Tuple baseTuple, Object toAdd) throws Exception {
|
||||
Tuple newTuple = Tuple.from(toAdd);
|
||||
// skip this test if we don't fit the appropriate category
|
||||
Assumptions.assumeTrue(baseTuple.hasIncompleteVersionstamp(), "baseTuple has incomplete versionstamp");
|
||||
Assumptions.assumeTrue(!newTuple.hasIncompleteVersionstamp(), "addingTuple has incomplete versionstamp");
|
||||
|
||||
byte[] prefixPacked = baseTuple.addObject(toAdd).packWithVersionstamp();
|
||||
byte[] streamPacked =
|
||||
Tuple.fromStream(Stream.concat(baseTuple.stream(), Stream.of(toAdd))).packWithVersionstamp();
|
||||
byte[] tupleAddedPacked = baseTuple.addAll(newTuple).packWithVersionstamp(); // concatPacked
|
||||
byte[] listAddedPacked = baseTuple.addAll(Arrays.asList(toAdd)).packWithVersionstamp();
|
||||
|
||||
Assertions.assertArrayEquals(prefixPacked, streamPacked, "prefixPacked != streamPacked!");
|
||||
Assertions.assertArrayEquals(streamPacked, tupleAddedPacked, "streamPacked != tupleAddedPacked!");
|
||||
Assertions.assertArrayEquals(tupleAddedPacked, listAddedPacked, "tupleAddedPacked != listAddedPacked!");
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("baseAddCartesianProduct")
|
||||
void cannotPackItemsWithCompleteVersionstampsForBaseTuple(Tuple baseTuple, Object toAdd) throws Exception {
|
||||
Tuple newTuple = Tuple.from(toAdd);
|
||||
// skip this test if we don't fit the appropriate category
|
||||
Assumptions.assumeTrue(baseTuple.hasIncompleteVersionstamp(), "baseTuple has incomplete versionstamp");
|
||||
Assumptions.assumeTrue(!newTuple.hasIncompleteVersionstamp(), "addingTuple has incomplete versionstamp");
|
||||
|
||||
Tuple streamTuple = Tuple.fromStream(Stream.concat(baseTuple.stream(), Stream.of(toAdd)));
|
||||
Tuple aAllTuple = baseTuple.addAll(newTuple);
|
||||
Tuple addAllCollTuple = baseTuple.addAll(Arrays.asList(toAdd));
|
||||
Tuple addObjectTuple = baseTuple.addObject(toAdd);
|
||||
List<Tuple> allTuples = Arrays.asList(streamTuple, aAllTuple, addAllCollTuple, addObjectTuple);
|
||||
|
||||
for (Tuple t : allTuples) {
|
||||
try {
|
||||
t.pack();
|
||||
Assertions.fail("was able to pack tuple without incomplete versionstamps");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testIncompleteVersionstamps() throws Exception {
|
||||
Assumptions.assumeTrue(FDB.instance().getAPIVersion() > 520, "Skipping test because version is too old");
|
||||
|
||||
// this is a tricky case where there are two tuples with identical
|
||||
// respresentations but different semantics.
|
||||
byte[] arr = new byte[0x0100fe];
|
||||
Arrays.fill(arr, (byte)0x7f); // the actual value doesn't matter, as long as it's not zero
|
||||
Tuple t1 = Tuple.from(arr, Versionstamp.complete(new byte[] { FF, FF, FF, FF, FF, FF, FF, FF, FF, FF }),
|
||||
new byte[] { 0x01, 0x01 });
|
||||
Tuple t2 = Tuple.from(arr, Versionstamp.incomplete());
|
||||
Assertions.assertNotEquals(t1, t2, "tuples " + t1 + " and " + t2 + " compared equal");
|
||||
|
||||
byte[] bytes1 = t1.pack();
|
||||
byte[] bytes2 = t2.packWithVersionstamp();
|
||||
Assertions.assertArrayEquals(bytes1, bytes2,
|
||||
"tuples " + t1 + " and " + t2 + " did not have matching representations");
|
||||
Assertions.assertNotEquals(
|
||||
t1, t2, "tuples " + t1 + " and " + t2 + "+ compared equal with memoized packed representations");
|
||||
}
|
||||
|
||||
@Test
|
||||
void testPositionInformationAdjustmentForIncompleteVersionstamp() throws Exception {
|
||||
// make sure position information adjustment works
|
||||
Tuple t3 = Tuple.from(Versionstamp.incomplete(1));
|
||||
Assertions.assertEquals(1 + Versionstamp.LENGTH + Integer.BYTES, t3.getPackedSize(),
|
||||
"incomplete versionstamp has incorrect packed size: " + t3.getPackedSize());
|
||||
|
||||
byte[] bytes3 = t3.packWithVersionstamp();
|
||||
Assertions.assertEquals(1,
|
||||
ByteBuffer.wrap(bytes3, bytes3.length - Integer.BYTES, Integer.BYTES)
|
||||
.order(ByteOrder.LITTLE_ENDIAN)
|
||||
.getInt(),
|
||||
"incomplete versionstamp has incorrect position");
|
||||
Assertions.assertEquals(Tuple.from(Versionstamp.incomplete(1)),
|
||||
Tuple.fromBytes(bytes3, 0, bytes3.length - Integer.BYTES),
|
||||
"unpacked bytes did not match");
|
||||
|
||||
Subspace subspace = new Subspace(Tuple.from("prefix"));
|
||||
byte[] bytes4 = subspace.packWithVersionstamp(t3);
|
||||
Assertions.assertEquals(1 + subspace.getKey().length,
|
||||
ByteBuffer.wrap(bytes4, bytes4.length - Integer.BYTES, Integer.BYTES)
|
||||
.order(ByteOrder.LITTLE_ENDIAN)
|
||||
.getInt(),
|
||||
"incomplete versionstamp has incorrect position with prefix");
|
||||
Assertions.assertEquals(Tuple.from("prefix", Versionstamp.incomplete(1)),
|
||||
Tuple.fromBytes(bytes4, 0, bytes4.length - Integer.BYTES),
|
||||
"unpacked bytes with subspace did not match");
|
||||
|
||||
try {
|
||||
// At this point, the representation is cached, so an easy bug would be to have
|
||||
// it return the already serialized value
|
||||
t3.pack();
|
||||
Assertions.fail("was able to pack versionstamp with incomplete versionstamp");
|
||||
} catch (IllegalArgumentException eexpected) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("twoIncomplete")
|
||||
void testTwoIncompleteVersionstamps(Tuple t) {
|
||||
Assertions.assertTrue(t.hasIncompleteVersionstamp(), "tuple doesn't think is has incomplete versionstamps");
|
||||
Assertions.assertTrue(t.getPackedSize() >= 2 * (1 + Versionstamp.LENGTH + Integer.BYTES),
|
||||
"tuple packed size " + t.getPackedSize() + " is smaller than expected");
|
||||
|
||||
try {
|
||||
t.pack();
|
||||
Assertions.fail("no error throws when packing any incomplete versionstamps");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
|
||||
try {
|
||||
t.packWithVersionstamp();
|
||||
Assertions.fail("no error thrown when packing with versionstamp with two incompletes");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("malformedSequences")
|
||||
void cantUnpackMalformedSequences(byte[] sequence) {
|
||||
|
||||
try {
|
||||
Tuple t = Tuple.fromBytes(sequence);
|
||||
Assertions.fail("Able to unpack " + ByteArrayUtil.printable(sequence) + " into " + t);
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("wellFormedSequences")
|
||||
void cantUnpackSequencesWithLastCharacter(byte[] sequence) throws Exception {
|
||||
Assertions.assertThrows(
|
||||
IllegalArgumentException.class,
|
||||
()
|
||||
-> Tuple.fromBytes(sequence, 0, sequence.length - 1),
|
||||
String.format("Able to unpack <%s> without last character", ByteArrayUtil.printable(sequence)));
|
||||
}
|
||||
|
||||
@Test
|
||||
void malformedStrings() throws Exception {
|
||||
// Malformed when packing
|
||||
List<String> strings = Arrays.asList("\ud83d", // high surrogate without low (end of string)
|
||||
"\ud83da", // high surrogate without low (not end of string)
|
||||
"\ud83d\ud8ed", // two high surrogates
|
||||
"\udd25", // low surrogate without low (start of string)
|
||||
"\udd26\udd6f", // two low surrogates
|
||||
"a\udd25", // low surrogate without high (not start of string)
|
||||
"a\udd25\udd6e", // two low surrogates (not start of string)
|
||||
"a\udd25\udd6f", // two low surrogates (not start of string)
|
||||
"\ud33d\udd25\udd25" // high surrogate followed by two low surrogates
|
||||
);
|
||||
|
||||
// Verify that it won't be packed
|
||||
for (String s : strings) {
|
||||
Tuple t = Tuple.from(s);
|
||||
try {
|
||||
t.getPackedSize();
|
||||
Assertions.fail("able to get packed size of malformed string " + ByteArrayUtil.printable(s.getBytes()));
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
try {
|
||||
t.pack();
|
||||
Assertions.fail("able to pack malformed string " + ByteArrayUtil.printable(s.getBytes()));
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
try {
|
||||
// Modify the memoized packed size to match what it would be if naively packed.
|
||||
// This checks to make sure the validation logic invoked right before packing
|
||||
// works,
|
||||
// but getting that code path to execute means modifying the tuple's internal
|
||||
// state, hence
|
||||
// the reflection.
|
||||
Field f = Tuple.class.getDeclaredField("memoizedPackedSize");
|
||||
AccessController.doPrivileged((PrivilegedExceptionAction<Void>)() -> {
|
||||
if (!f.isAccessible()) {
|
||||
f.setAccessible(true);
|
||||
}
|
||||
f.setInt(t, 2 + s.getBytes("UTF-8").length);
|
||||
return null;
|
||||
});
|
||||
t.pack();
|
||||
Assertions.fail("able to pack malformed string");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,369 @@
|
|||
/*
|
||||
* TupleSerializationTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb.tuple;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.nio.BufferOverflowException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
|
||||
import com.apple.foundationdb.FDBLibraryRule;
|
||||
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.RegisterExtension;
|
||||
import org.junit.jupiter.params.ParameterizedTest;
|
||||
import org.junit.jupiter.params.provider.MethodSource;
|
||||
|
||||
/**
|
||||
* Tests about serializing and deserializing Tuples.
|
||||
*
|
||||
* This class should be used when adding/modifying tests that involve the serialization logic,
|
||||
* while {@link TuplePackingTest} should be used to test packing behavior. Granted,
|
||||
* that distinction is pretty arbitrary and not really clear, but the main motivation for separating the two
|
||||
* classes is just to avoid having a single ludicrously large test file, so it's probably not the end of the world
|
||||
* if this rule isn't perfectly followed.
|
||||
*/
|
||||
class TupleSerializationTest {
|
||||
@RegisterExtension static final FDBLibraryRule fdbLib = FDBLibraryRule.current();
|
||||
|
||||
private static final byte FF = (byte)0xff;
|
||||
|
||||
private static class TupleSerialization {
|
||||
private final Tuple tuple;
|
||||
private final byte[] serialization;
|
||||
|
||||
TupleSerialization(Tuple tuple, byte[] serialization) {
|
||||
this.tuple = tuple;
|
||||
this.serialization = serialization;
|
||||
}
|
||||
}
|
||||
|
||||
static Collection<TupleSerialization> serializedForms() {
|
||||
return Arrays.asList(
|
||||
new TupleSerialization(Tuple.from(), new byte[0]),
|
||||
new TupleSerialization(Tuple.from(0L), new byte[] { 0x14 }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.ZERO), new byte[] { 0x14 }),
|
||||
new TupleSerialization(Tuple.from(1L), new byte[] { 0x15, 0x01 }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.ONE), new byte[] { 0x15, 0x01 }),
|
||||
new TupleSerialization(Tuple.from(-1L), new byte[] { 0x13, FF - 1 }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.ONE.negate()), new byte[] { 0x13, FF - 1 }),
|
||||
new TupleSerialization(Tuple.from(255L), new byte[] { 0x15, FF }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.valueOf(255)), new byte[] { 0x15, FF }),
|
||||
new TupleSerialization(Tuple.from(-255L), new byte[] { 0x13, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.valueOf(-255)), new byte[] { 0x13, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(256L), new byte[] { 0x16, 0x01, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.valueOf(256)), new byte[] { 0x16, 0x01, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(-256L), new byte[] { 0x12, FF - 1, FF }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.valueOf(-256)), new byte[] { 0x12, FF - 1, FF }),
|
||||
new TupleSerialization(Tuple.from(65536), new byte[] { 0x17, 0x01, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(-65536), new byte[] { 0x11, FF - 1, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(Long.MAX_VALUE), new byte[] { 0x1C, 0x7f, FF, FF, FF, FF, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.valueOf(Long.MAX_VALUE)),
|
||||
new byte[] { 0x1C, 0x7f, FF, FF, FF, FF, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE)),
|
||||
new byte[] { 0x1C, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.ONE.shiftLeft(64).subtract(BigInteger.ONE)),
|
||||
new byte[] { 0x1C, FF, FF, FF, FF, FF, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.ONE.shiftLeft(64)),
|
||||
new byte[] { 0x1D, 0x09, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(-((1L << 32) - 1)), new byte[] { 0x10, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.ONE.shiftLeft(32).subtract(BigInteger.ONE).negate()),
|
||||
new byte[] { 0x10, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Long.MIN_VALUE + 2),
|
||||
new byte[] { 0x0C, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01 }),
|
||||
new TupleSerialization(Tuple.from(Long.MIN_VALUE + 1),
|
||||
new byte[] { 0x0C, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.valueOf(Long.MIN_VALUE).add(BigInteger.ONE)),
|
||||
new byte[] { 0x0C, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Long.MIN_VALUE), new byte[] { 0x0C, 0x7f, FF, FF, FF, FF, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.valueOf(Long.MIN_VALUE)),
|
||||
new byte[] { 0x0C, 0x7f, FF, FF, FF, FF, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.valueOf(Long.MIN_VALUE).subtract(BigInteger.ONE)),
|
||||
new byte[] { 0x0C, 0x7f, FF, FF, FF, FF, FF, FF, FF - 1 }),
|
||||
new TupleSerialization(Tuple.from(BigInteger.ONE.shiftLeft(64).subtract(BigInteger.ONE).negate()),
|
||||
new byte[] { 0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(3.14f), new byte[] { 0x20, (byte)0xc0, 0x48, (byte)0xf5, (byte)0xc3 }),
|
||||
new TupleSerialization(Tuple.from(-3.14f),
|
||||
new byte[] { 0x20, (byte)0x3f, (byte)0xb7, (byte)0x0a, (byte)0x3c }),
|
||||
new TupleSerialization(Tuple.from(3.14), new byte[] { 0x21, (byte)0xc0, (byte)0x09, (byte)0x1e, (byte)0xb8,
|
||||
(byte)0x51, (byte)0xeb, (byte)0x85, (byte)0x1f }),
|
||||
new TupleSerialization(Tuple.from(-3.14), new byte[] { 0x21, (byte)0x3f, (byte)0xf6, (byte)0xe1, (byte)0x47,
|
||||
(byte)0xae, (byte)0x14, (byte)0x7a, (byte)0xe0 }),
|
||||
new TupleSerialization(Tuple.from(0.0f), new byte[] { 0x20, (byte)0x80, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(-0.0f), new byte[] { 0x20, 0x7f, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(0.0),
|
||||
new byte[] { 0x21, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(-0.0), new byte[] { 0x21, 0x7f, FF, FF, FF, FF, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(Float.POSITIVE_INFINITY),
|
||||
new byte[] { 0x20, FF, (byte)0x80, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Float.NEGATIVE_INFINITY), new byte[] { 0x20, 0x00, 0x7f, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(Double.POSITIVE_INFINITY),
|
||||
new byte[] { 0x21, FF, (byte)0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Double.NEGATIVE_INFINITY),
|
||||
new byte[] { 0x21, 0x00, 0x0f, FF, FF, FF, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(Float.intBitsToFloat(Integer.MAX_VALUE)),
|
||||
new byte[] { 0x20, FF, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(Double.longBitsToDouble(Long.MAX_VALUE)),
|
||||
new byte[] { 0x21, FF, FF, FF, FF, FF, FF, FF, FF }),
|
||||
new TupleSerialization(Tuple.from(Float.intBitsToFloat(~0)), new byte[] { 0x20, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Double.longBitsToDouble(~0L)),
|
||||
new byte[] { 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from((Object) new byte[0]), new byte[] { 0x01, 0x00 }),
|
||||
new TupleSerialization(Tuple.from((Object) new byte[] { 0x01, 0x02, 0x03 }),
|
||||
new byte[] { 0x01, 0x01, 0x02, 0x03, 0x00 }),
|
||||
new TupleSerialization(Tuple.from((Object) new byte[] { 0x00, 0x00, 0x00, 0x04 }),
|
||||
new byte[] { 0x01, 0x00, FF, 0x00, FF, 0x00, FF, 0x04, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(""), new byte[] { 0x02, 0x00 }),
|
||||
new TupleSerialization(Tuple.from("hello"), new byte[] { 0x02, 'h', 'e', 'l', 'l', 'o', 0x00 }),
|
||||
new TupleSerialization(Tuple.from("\u4e2d\u6587"),
|
||||
new byte[] { 0x02, (byte)0xe4, (byte)0xb8, (byte)0xad, (byte)0xe6, (byte)0x96,
|
||||
(byte)0x87, 0x00 }), // chinese (three bytes per code point)
|
||||
new TupleSerialization(Tuple.from("\u03bc\u03ac\u03b8\u03b7\u03bc\u03b1"),
|
||||
new byte[] { 0x02, (byte)0xce, (byte)0xbc, (byte)0xce, (byte)0xac, (byte)0xce,
|
||||
(byte)0xb8, (byte)0xce, (byte)0xb7, (byte)0xce, (byte)0xbc, (byte)0xce,
|
||||
(byte)0xb1, 0x00 }), // Greek (two bytes per codepoint)
|
||||
new TupleSerialization(Tuple.from(new String(new int[] { 0x1f525 }, 0, 1)),
|
||||
new byte[] { 0x02, (byte)0xf0, (byte)0x9f, (byte)0x94, (byte)0xa5,
|
||||
0x00 }), // fire emoji as unicode codepoint
|
||||
new TupleSerialization(Tuple.from("\ud83d\udd25"), new byte[] { 0x02, (byte)0xf0, (byte)0x9f, (byte)0x94,
|
||||
(byte)0xa5, 0x00 }), // fire emoji in UTF-16
|
||||
new TupleSerialization(
|
||||
Tuple.from("\ud83e\udd6f"),
|
||||
new byte[] { 0x02, (byte)0xf0, (byte)0x9f, (byte)0xa5, (byte)0xaf, 0x00 }), // bagel emoji in UTF-16
|
||||
new TupleSerialization(Tuple.from(new String(new int[] { 0x1f9a5 }, 0, 1)),
|
||||
new byte[] { 0x02, (byte)0xf0, (byte)0x9f, (byte)0xa6, (byte)0xa5,
|
||||
0x00 }), // currently unused UTF-8 code point (will be sloth)
|
||||
new TupleSerialization(Tuple.from("\ud83e\udda5"),
|
||||
new byte[] { 0x02, (byte)0xf0, (byte)0x9f, (byte)0xa6, (byte)0xa5,
|
||||
0x00 }), // currently unused UTF-8 code point (will be sloth)
|
||||
new TupleSerialization(
|
||||
Tuple.from(new String(new int[] { 0x10FFFF }, 0, 1)),
|
||||
new byte[] { 0x02, (byte)0xf4, (byte)0x8f, (byte)0xbf, (byte)0xbf, 0x00 }), // maximum unicode codepoint
|
||||
new TupleSerialization(
|
||||
Tuple.from("\udbff\udfff"),
|
||||
new byte[] { 0x02, (byte)0xf4, (byte)0x8f, (byte)0xbf, (byte)0xbf, 0x00 }), // maximum unicode codepoint
|
||||
new TupleSerialization(Tuple.from(Tuple.from((Object)null)), new byte[] { 0x05, 0x00, FF, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Tuple.from(null, "hello")),
|
||||
new byte[] { 0x05, 0x00, FF, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Arrays.asList(null, "hello")),
|
||||
new byte[] { 0x05, 0x00, FF, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Tuple.from(null, "hell\0")),
|
||||
new byte[] { 0x05, 0x00, FF, 0x02, 'h', 'e', 'l', 'l', 0x00, FF, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Arrays.asList(null, "hell\0")),
|
||||
new byte[] { 0x05, 0x00, FF, 0x02, 'h', 'e', 'l', 'l', 0x00, FF, 0x00, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(Tuple.from((Object)null), "hello"),
|
||||
new byte[] { 0x05, 0x00, FF, 0x00, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00 }),
|
||||
new TupleSerialization(
|
||||
Tuple.from(Tuple.from((Object)null), "hello", new byte[] { 0x01, 0x00 }, new byte[0]),
|
||||
new byte[] { 0x05, 0x00, FF, 0x00, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00, 0x01, 0x01, 0x00, FF, 0x00,
|
||||
0x01, 0x00 }),
|
||||
new TupleSerialization(Tuple.from(new UUID(0xba5eba11, 0x5ca1ab1e)),
|
||||
new byte[] { 0x30, FF, FF, FF, FF, (byte)0xba, 0x5e, (byte)0xba, 0x11, 0x00, 0x00,
|
||||
0x00, 0x00, 0x5c, (byte)0xa1, (byte)0xab, 0x1e }),
|
||||
new TupleSerialization(Tuple.from(false), new byte[] { 0x26 }),
|
||||
new TupleSerialization(Tuple.from(true), new byte[] { 0x27 }),
|
||||
new TupleSerialization(Tuple.from((short)0x3019), new byte[] { 0x16, 0x30, 0x19 }),
|
||||
new TupleSerialization(Tuple.from((byte)0x03), new byte[] { 0x15, 0x03 }),
|
||||
new TupleSerialization(
|
||||
Tuple.from(Versionstamp.complete(new byte[] { (byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd,
|
||||
(byte)0xee, FF, 0x00, 0x01, 0x02, 0x03 })),
|
||||
new byte[] { 0x33, (byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee, FF, 0x00, 0x01, 0x02,
|
||||
0x03, 0x00, 0x00 }),
|
||||
new TupleSerialization(
|
||||
Tuple.from(Versionstamp.complete(
|
||||
new byte[] { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a }, 657)),
|
||||
new byte[] { 0x33, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x02, (byte)0x91 }));
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("serializedForms")
|
||||
void testSerializationPackedSize(TupleSerialization serialization) throws Exception {
|
||||
Assertions.assertEquals(serialization.serialization.length, serialization.tuple.getPackedSize(),
|
||||
"Incorrect packed size for tuple <" + serialization.tuple + ">");
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("serializedForms")
|
||||
void testSerializationPacking(TupleSerialization serialization) throws Exception {
|
||||
byte[] packed = serialization.tuple.pack();
|
||||
Assertions.assertArrayEquals(serialization.serialization, packed,
|
||||
"Incorrect packing for tuple <" + serialization.tuple + ">");
|
||||
}
|
||||
|
||||
@ParameterizedTest
|
||||
@MethodSource("serializedForms")
|
||||
void testSerializationDePacking(TupleSerialization serialization) throws Exception {
|
||||
Tuple depacked = Tuple.fromItems(Tuple.fromBytes(serialization.serialization).getItems());
|
||||
|
||||
Assertions.assertEquals(serialization.tuple, depacked, "Incorrect tuple after unpacking and deserialization");
|
||||
}
|
||||
|
||||
/* Error handling tests on packing*/
|
||||
|
||||
static final List<Tuple> offsetAndLengthTuples =
|
||||
Arrays.asList(new Tuple(), Tuple.from((Object)null), Tuple.from(null, new byte[] { 0x10, 0x66 }),
|
||||
Tuple.from("dummy_string"), Tuple.from(1066L));
|
||||
|
||||
@Test
|
||||
void testFromBytesWithNegativeLengthFails() throws Exception {
|
||||
Tuple allTuples = offsetAndLengthTuples.stream().reduce(new Tuple(), Tuple::addAll);
|
||||
byte[] allTupleBytes = allTuples.pack();
|
||||
|
||||
// should throw
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> { Tuple.fromBytes(allTupleBytes, 0, -1); });
|
||||
}
|
||||
|
||||
@Test
|
||||
void testFromBytesWithTooLargeOffsetFails() throws Exception {
|
||||
Tuple allTuples = offsetAndLengthTuples.stream().reduce(new Tuple(), Tuple::addAll);
|
||||
byte[] allTupleBytes = allTuples.pack();
|
||||
|
||||
// should throw
|
||||
Assertions.assertThrows(IllegalArgumentException.class,
|
||||
() -> { Tuple.fromBytes(allTupleBytes, allTupleBytes.length + 1, 4); });
|
||||
}
|
||||
|
||||
@Test
|
||||
void testFromBytesWithTooLargeLengthFails() throws Exception {
|
||||
Tuple allTuples = offsetAndLengthTuples.stream().reduce(new Tuple(), Tuple::addAll);
|
||||
byte[] allTupleBytes = allTuples.pack();
|
||||
|
||||
// should throw
|
||||
Assertions.assertThrows(IllegalArgumentException.class,
|
||||
() -> { Tuple.fromBytes(allTupleBytes, 0, allTupleBytes.length + 1); });
|
||||
}
|
||||
|
||||
@Test
|
||||
void testNotAbleToExceedArrayLengthInFromBytes() throws Exception {
|
||||
Tuple allTuples = offsetAndLengthTuples.stream().reduce(new Tuple(), Tuple::addAll);
|
||||
byte[] allTupleBytes = allTuples.pack();
|
||||
|
||||
// should throw
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> {
|
||||
Tuple.fromBytes(allTupleBytes, allTupleBytes.length / 2, allTupleBytes.length / 2 + 2);
|
||||
});
|
||||
}
|
||||
|
||||
@Test
|
||||
void testEmptyAtEndTuple() throws Exception {
|
||||
Tuple allTuples = offsetAndLengthTuples.stream().reduce(new Tuple(), Tuple::addAll);
|
||||
byte[] allTupleBytes = allTuples.pack();
|
||||
|
||||
// Allow an offset to equal the length of the array, but essentially only a
|
||||
// zero-length is allowed there.
|
||||
Tuple emptyAtEndTuple = Tuple.fromBytes(allTupleBytes, allTupleBytes.length, 0);
|
||||
Assertions.assertTrue(emptyAtEndTuple.isEmpty(), "tuple with no bytes is not empty");
|
||||
}
|
||||
|
||||
@Test
|
||||
void testFromBytesWithNegativeOffsetFails() throws Exception {
|
||||
Tuple allTuples = offsetAndLengthTuples.stream().reduce(new Tuple(), Tuple::addAll);
|
||||
byte[] allTupleBytes = allTuples.pack();
|
||||
|
||||
// should throw
|
||||
Assertions.assertThrows(IllegalArgumentException.class, () -> { Tuple.fromBytes(allTupleBytes, -1, 4); });
|
||||
}
|
||||
|
||||
@Test
|
||||
void testUnpackedTupleMatchesSerializedTuple() throws Exception {
|
||||
Tuple allTuples = offsetAndLengthTuples.stream().reduce(new Tuple(), Tuple::addAll);
|
||||
byte[] allTupleBytes = allTuples.pack();
|
||||
|
||||
// Unpack each tuple individually using their lengths
|
||||
int offset = 0;
|
||||
for (Tuple t : offsetAndLengthTuples) {
|
||||
int length = t.getPackedSize();
|
||||
Tuple unpacked = Tuple.fromBytes(allTupleBytes, offset, length);
|
||||
Assertions.assertEquals(t, unpacked,
|
||||
"unpacked tuple " + unpacked + " does not match serialized tuple " + t);
|
||||
offset += length;
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testUnpackedTupleMatchesCombinedTuple() throws Exception {
|
||||
List<Tuple> tuples = offsetAndLengthTuples;
|
||||
Tuple allTuples = tuples.stream().reduce(new Tuple(), Tuple::addAll);
|
||||
byte[] allTupleBytes = allTuples.pack();
|
||||
|
||||
// Unpack successive pairs of tuples.
|
||||
int offset = 0;
|
||||
for (int i = 0; i < tuples.size() - 1; i++) {
|
||||
Tuple combinedTuple = tuples.get(i).addAll(tuples.get(i + 1));
|
||||
Tuple unpacked = Tuple.fromBytes(allTupleBytes, offset, combinedTuple.getPackedSize());
|
||||
Assertions.assertEquals(unpacked, combinedTuple,
|
||||
"unpacked tuple " + unpacked + " does not match combined tuple " + combinedTuple);
|
||||
offset += tuples.get(i).getPackedSize();
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
void testPackIntoBuffer() throws Exception {
|
||||
Tuple t = Tuple.from("hello", 3.14f, "world");
|
||||
|
||||
ByteBuffer buffer = ByteBuffer.allocate("hello".length() + 2 + Float.BYTES + 1 + "world".length() + 2);
|
||||
t.packInto(buffer);
|
||||
Assertions.assertArrayEquals(buffer.array(), t.pack(), "buffer and tuple do not match");
|
||||
|
||||
buffer = ByteBuffer.allocate(t.getPackedSize() + 2);
|
||||
buffer.order(ByteOrder.LITTLE_ENDIAN);
|
||||
t.packInto(buffer);
|
||||
Assertions.assertArrayEquals(ByteArrayUtil.join(t.pack(), new byte[] { 0x00, 0x00 }), buffer.array(),
|
||||
"buffer and tuple do not match");
|
||||
Assertions.assertEquals(ByteOrder.LITTLE_ENDIAN, buffer.order(), "byte order changed");
|
||||
|
||||
buffer = ByteBuffer.allocate(t.getPackedSize() + 2);
|
||||
buffer.put((byte)0x01).put((byte)0x02);
|
||||
t.packInto(buffer);
|
||||
Assertions.assertArrayEquals(t.pack(new byte[] { 0x01, 0x02 }), buffer.array(),
|
||||
"buffer and tuple do not match");
|
||||
|
||||
buffer = ByteBuffer.allocate(t.getPackedSize() - 1);
|
||||
try {
|
||||
t.packInto(buffer);
|
||||
Assertions.fail("able to pack into buffer that was too small");
|
||||
} catch (BufferOverflowException expected) {
|
||||
}
|
||||
|
||||
Tuple tCopy = Tuple.fromItems(t.getItems()); // remove memoized stuff
|
||||
buffer = ByteBuffer.allocate(t.getPackedSize() - 1);
|
||||
try {
|
||||
tCopy.packInto(buffer);
|
||||
Assertions.fail("able to pack into buffer that was too small");
|
||||
} catch (BufferOverflowException expected) {
|
||||
}
|
||||
|
||||
Tuple tWithIncomplete = Tuple.from(Versionstamp.incomplete(3));
|
||||
buffer = ByteBuffer.allocate(tWithIncomplete.getPackedSize());
|
||||
try {
|
||||
tWithIncomplete.packInto(buffer);
|
||||
Assertions.fail("able to pack incomplete versionstamp into buffer");
|
||||
} catch (IllegalArgumentException expected) {
|
||||
}
|
||||
|
||||
Assertions.assertEquals(0, buffer.arrayOffset(),
|
||||
"offset changed after unsuccessful pack with incomplete versionstamp");
|
||||
}
|
||||
}
|
|
@ -62,6 +62,17 @@ public interface Database extends AutoCloseable, TransactionContext {
|
|||
*/
|
||||
Transaction createTransaction(Executor e);
|
||||
|
||||
/**
|
||||
* Creates a {@link Transaction} that operates on this {@code Database} with the given {@link Executor}
|
||||
* for asynchronous callbacks.
|
||||
*
|
||||
* @param e the {@link Executor} to use when executing asynchronous callbacks for the database
|
||||
* @param eventKeeper the {@link EventKeeper} to use when tracking instrumented calls for the transaction.
|
||||
*
|
||||
* @return a newly created {@code Transaction} that reads from and writes to this {@code Database}.
|
||||
*/
|
||||
Transaction createTransaction(Executor e, EventKeeper eventKeeper);
|
||||
|
||||
/**
|
||||
* Returns a set of options that can be set on a {@code Database}
|
||||
*
|
||||
|
@ -69,6 +80,15 @@ public interface Database extends AutoCloseable, TransactionContext {
|
|||
*/
|
||||
DatabaseOptions options();
|
||||
|
||||
/**
|
||||
* Returns a value which indicates the saturation of the client
|
||||
* <br>
|
||||
* <b>Note:</b> By default, this value is updated every second
|
||||
*
|
||||
* @return a value where 0 indicates that the client is idle and 1 (or larger) indicates that the client is saturated.
|
||||
*/
|
||||
double getMainThreadBusyness();
|
||||
|
||||
/**
|
||||
* Runs a read-only transactional function against this {@code Database} with retry logic.
|
||||
* {@link Function#apply(Object) apply(ReadTransaction)} will be called on the
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
/*
|
||||
* DirectBufferIterator.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2015-2020 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.util.Iterator;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
/**
|
||||
* Holds the direct buffer that is shared with JNI wrapper. A typical usage is as follows:
|
||||
*
|
||||
* The serialization format of result is =>
|
||||
* [int keyCount, boolean more, ListOf<(int keyLen, int valueLen, byte[] key, byte[] value)>]
|
||||
*/
|
||||
class DirectBufferIterator implements Iterator<KeyValue>, AutoCloseable {
|
||||
private ByteBuffer byteBuffer;
|
||||
private int current = 0;
|
||||
private int keyCount = -1;
|
||||
private boolean more = false;
|
||||
|
||||
public DirectBufferIterator(ByteBuffer buffer) {
|
||||
byteBuffer = buffer;
|
||||
byteBuffer.order(ByteOrder.nativeOrder());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (byteBuffer != null) {
|
||||
DirectBufferPool.getInstance().add(byteBuffer);
|
||||
byteBuffer = null;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hasResultReady() {
|
||||
return keyCount > -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
assert (hasResultReady());
|
||||
return current < keyCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyValue next() {
|
||||
assert (hasResultReady()); // Must be called once its ready.
|
||||
if (!hasNext()) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
|
||||
final int keyLen = byteBuffer.getInt();
|
||||
final int valueLen = byteBuffer.getInt();
|
||||
byte[] key = new byte[keyLen];
|
||||
byteBuffer.get(key);
|
||||
|
||||
byte[] value = new byte[valueLen];
|
||||
byteBuffer.get(value);
|
||||
|
||||
current += 1;
|
||||
return new KeyValue(key, value);
|
||||
}
|
||||
|
||||
public ByteBuffer getBuffer() {
|
||||
return byteBuffer;
|
||||
}
|
||||
|
||||
public int count() {
|
||||
assert (hasResultReady());
|
||||
return keyCount;
|
||||
}
|
||||
|
||||
public boolean hasMore() {
|
||||
assert (hasResultReady());
|
||||
return more;
|
||||
}
|
||||
|
||||
public int currentIndex() {
|
||||
return current;
|
||||
}
|
||||
|
||||
public void readResultsSummary() {
|
||||
byteBuffer.rewind();
|
||||
byteBuffer.position(0);
|
||||
|
||||
keyCount = byteBuffer.getInt();
|
||||
more = byteBuffer.getInt() > 0;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* DirectBufferPool.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2015-2020 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
|
||||
/**
|
||||
* A singleton that manages a pool of {@link DirectByteBuffer}, that will be
|
||||
* shared by the {@link DirectBufferIterator} instances. It is responsibilty of
|
||||
* user to return the borrowed buffers.
|
||||
*/
|
||||
class DirectBufferPool {
|
||||
static final DirectBufferPool __instance = new DirectBufferPool();
|
||||
|
||||
// When tuning this, make sure that the size of the buffer,
|
||||
// is always greater than the maximum size KV allowed by FDB.
|
||||
// Current limits is :
|
||||
// 10kB for key + 100kB for value + 1 int for count + 1 int for more + 2 int for KV size
|
||||
static public final int MIN_BUFFER_SIZE = (10 + 100) * 1000 + Integer.BYTES * 4;
|
||||
|
||||
static private final int DEFAULT_NUM_BUFFERS = 128;
|
||||
static private final int DEFAULT_BUFFER_SIZE = 1024 * 512;
|
||||
|
||||
private ArrayBlockingQueue<ByteBuffer> buffers;
|
||||
private int currentBufferCapacity;
|
||||
|
||||
public DirectBufferPool() {
|
||||
resize(DEFAULT_NUM_BUFFERS, DEFAULT_BUFFER_SIZE);
|
||||
}
|
||||
|
||||
public static DirectBufferPool getInstance() {
|
||||
return __instance;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resizes buffer pool with given capacity and buffer size. Throws OutOfMemory exception
|
||||
* if unable to allocate as asked.
|
||||
*/
|
||||
public synchronized void resize(int newPoolSize, int bufferSize) {
|
||||
if (bufferSize < MIN_BUFFER_SIZE) {
|
||||
throw new IllegalArgumentException("'bufferSize' must be at-least: " + MIN_BUFFER_SIZE + " bytes");
|
||||
}
|
||||
buffers = new ArrayBlockingQueue<>(newPoolSize);
|
||||
currentBufferCapacity = bufferSize;
|
||||
while (buffers.size() < newPoolSize) {
|
||||
ByteBuffer buffer = ByteBuffer.allocateDirect(bufferSize);
|
||||
buffers.add(buffer);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Requests a {@link DirectByteBuffer} from our pool. Returns null if pool is empty.
|
||||
*/
|
||||
public synchronized ByteBuffer poll() {
|
||||
return buffers.poll();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link DirectByteBuffer} that was borrowed from our pool.
|
||||
*/
|
||||
public synchronized void add(ByteBuffer buffer) {
|
||||
if (buffer.capacity() != currentBufferCapacity) {
|
||||
// This can happen when a resize is called while there are outstanding requests,
|
||||
// older buffers will be returned eventually.
|
||||
return;
|
||||
}
|
||||
|
||||
buffers.offer(buffer);
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue