Merge remote-tracking branch 'upstream/master' into spilled-only-peek
This commit is contained in:
commit
51fd42a4d2
|
@ -457,3 +457,30 @@ John W. Wilkinson (JSON Spirit)
|
|||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
SHIBUKAWA Yoshiki (sphinxcontrib-rubydomain)
|
||||
Copyright (c) 2010 by SHIBUKAWA Yoshiki.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
|
|
@ -26,7 +26,7 @@ project(foundationdb
|
|||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake")
|
||||
message (STATUS "${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR}")
|
||||
if("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")
|
||||
message(FATAL_ERROR "In-source builds are forbidden, unsupported, and stupid!!")
|
||||
message(FATAL_ERROR "In-source builds are forbidden")
|
||||
endif()
|
||||
|
||||
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||
|
@ -196,7 +196,7 @@ if (CMAKE_EXPORT_COMPILE_COMMANDS)
|
|||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
COMMENT "Build compile commands for IDE"
|
||||
)
|
||||
add_custom_target(procossed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||
add_custom_target(processed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
|
|
|
@ -30,7 +30,7 @@ At project launch, FoundationDB has a light governance structure. The intention
|
|||
|
||||
We draw inspiration from the Apache Software Foundation's informal motto: ["community over code"](https://blogs.apache.org/foundation/entry/asf_15_community_over_code), and their emphasis on meritocratic rules. You'll also observe that some initial community structure is [inspired by the Swift community](https://swift.org/community/#community-structure).
|
||||
|
||||
The project technical lead is Ben Collins (bbc@apple.com).
|
||||
The project technical lead is Evan Tschannen (ejt@apple.com).
|
||||
|
||||
Members of the Apple FoundationDB team are part of the initial core committers helping review individual contributions; you'll see them commenting on your pull requests. Future committers to the open source project, and the process for adding individuals in this role will be formalized in the future.
|
||||
|
||||
|
@ -38,7 +38,7 @@ Members of the Apple FoundationDB team are part of the initial core committers h
|
|||
### Opening a Pull Request
|
||||
We love pull requests! For minor changes, feel free to open up a PR directly. For larger feature development and any changes that may require community discussion, we ask that you discuss your ideas on the [community forums](https://forums.foundationdb.org) prior to opening a PR, and then reference that thread within your PR comment.
|
||||
|
||||
Upon release, no community CI is available. Tests can be run locally, and core committers will validate pull requests prior to merging them.
|
||||
CI will be run automatically for core committers, and for community PRs it will be initiated by the request of a core committer. Tests can also be run locally via `ctest`, and core committers can run additional validation on pull requests prior to merging them.
|
||||
|
||||
### Reporting issues
|
||||
Please refer to the section below on [using GitHub issues and the community forums](#using-github-issues-and-community-forums) for more info.
|
||||
|
@ -65,4 +65,4 @@ GitHub Issues should be used for tracking tasks. If you know the specific code t
|
|||
* Implementing an agreed upon feature: *GitHub Issues*
|
||||
|
||||
### Project and Development Updates
|
||||
Stay connected to the project and the community! For project and community updates, follow the [FoundationDB project blog](https://www.foundationdb.org/blog/). Development announcements will be made via the community forums' [dev-announce](https://forums.foundationdb.org/c/development/dev-announce) section.
|
||||
Stay connected to the project and the community! For project and community updates, follow the [FoundationDB project blog](https://www.foundationdb.org/blog/). Development announcements will be made via the community forums' [dev-announce](https://forums.foundationdb.org/c/development/dev-announce) section.
|
||||
|
|
|
@ -6,8 +6,7 @@ set(SRCS
|
|||
FDBLibTLSSession.cpp
|
||||
FDBLibTLSSession.h
|
||||
FDBLibTLSVerify.cpp
|
||||
FDBLibTLSVerify.h
|
||||
ReferenceCounted.h)
|
||||
FDBLibTLSVerify.h)
|
||||
|
||||
add_library(FDBLibTLS ${SRCS})
|
||||
target_link_libraries(FDBLibTLS PUBLIC LibreSSL boost_target)
|
||||
target_link_libraries(FDBLibTLS PUBLIC LibreSSL boost_target PRIVATE flow)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|X64">
|
||||
<Configuration>Debug</Configuration>
|
||||
|
@ -23,11 +23,11 @@
|
|||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
</Project>
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
#include "flow/FastRef.h"
|
||||
|
||||
#include <tls.h>
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
#include "flow/FastRef.h"
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSPlugin.h"
|
||||
#include "FDBLibTLS/FDBLibTLSVerify.h"
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
*/
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSSession.h"
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "flow/Trace.h"
|
||||
|
||||
#include <openssl/bio.h>
|
||||
|
@ -60,7 +62,7 @@ static ssize_t tls_write_func(struct tls *ctx, const void *buf, size_t buflen, v
|
|||
|
||||
FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy, bool is_client, const char* servername, TLSSendCallbackFunc send_func, void* send_ctx, TLSRecvCallbackFunc recv_func, void* recv_ctx, void* uidptr) :
|
||||
tls_ctx(NULL), tls_sctx(NULL), is_client(is_client), policy(policy), send_func(send_func), send_ctx(send_ctx),
|
||||
recv_func(recv_func), recv_ctx(recv_ctx), handshake_completed(false) {
|
||||
recv_func(recv_func), recv_ctx(recv_ctx), handshake_completed(false), lastVerifyFailureLogged(0.0) {
|
||||
if (uidptr)
|
||||
uid = * (UID*) uidptr;
|
||||
|
||||
|
@ -342,8 +344,11 @@ bool FDBLibTLSSession::verify_peer() {
|
|||
|
||||
if (!rc) {
|
||||
// log the various failure reasons
|
||||
for (std::string reason : verify_failure_reasons) {
|
||||
TraceEvent("FDBLibTLSVerifyFailure", uid).detail("Reason", reason).suppressFor(1.0);
|
||||
if(now() - lastVerifyFailureLogged > 1.0) {
|
||||
for (std::string reason : verify_failure_reasons) {
|
||||
lastVerifyFailureLogged = now();
|
||||
TraceEvent("FDBLibTLSVerifyFailure", uid).detail("Reason", reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
#include "flow/FastRef.h"
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSPolicy.h"
|
||||
#include "FDBLibTLS/FDBLibTLSVerify.h"
|
||||
|
@ -61,6 +61,7 @@ struct FDBLibTLSSession : ITLSSession, ReferenceCounted<FDBLibTLSSession> {
|
|||
bool handshake_completed;
|
||||
|
||||
UID uid;
|
||||
double lastVerifyFailureLogged;
|
||||
};
|
||||
|
||||
#endif /* FDB_LIBTLS_SESSION_H */
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "ReferenceCounted.h"
|
||||
#include "flow/FastRef.h"
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
|
|
@ -1,108 +0,0 @@
|
|||
/*
|
||||
* ReferenceCounted.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_REFERENCE_COUNTED_H
|
||||
#define FDB_REFERENCE_COUNTED_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
template <class T>
|
||||
struct ReferenceCounted {
|
||||
void addref() { ++referenceCount; }
|
||||
void delref() { if (--referenceCount == 0) { delete (T*)this; } }
|
||||
|
||||
ReferenceCounted() : referenceCount(1) {}
|
||||
|
||||
private:
|
||||
ReferenceCounted(const ReferenceCounted&) = delete;
|
||||
void operator=(const ReferenceCounted&) = delete;
|
||||
int32_t referenceCount;
|
||||
};
|
||||
|
||||
template <class P>
|
||||
void addref(P* ptr) { ptr->addref(); }
|
||||
template <class P>
|
||||
void delref(P* ptr) { ptr->delref(); }
|
||||
|
||||
template <class P>
|
||||
struct Reference {
|
||||
Reference() : ptr(NULL) {}
|
||||
explicit Reference( P* ptr ) : ptr(ptr) {}
|
||||
static Reference<P> addRef( P* ptr ) { ptr->addref(); return Reference(ptr); }
|
||||
|
||||
Reference(const Reference& r) : ptr(r.getPtr()) { if (ptr) addref(ptr); }
|
||||
Reference(Reference && r) : ptr(r.getPtr()) { r.ptr = NULL; }
|
||||
|
||||
template <class Q>
|
||||
Reference(const Reference<Q>& r) : ptr(r.getPtr()) { if (ptr) addref(ptr); }
|
||||
template <class Q>
|
||||
Reference(Reference<Q> && r) : ptr(r.getPtr()) { r.setPtrUnsafe(NULL); }
|
||||
|
||||
~Reference() { if (ptr) delref(ptr); }
|
||||
Reference& operator=(const Reference& r) {
|
||||
P* oldPtr = ptr;
|
||||
P* newPtr = r.ptr;
|
||||
if (oldPtr != newPtr) {
|
||||
if (newPtr) addref(newPtr);
|
||||
ptr = newPtr;
|
||||
if (oldPtr) delref(oldPtr);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
Reference& operator=(Reference&& r) {
|
||||
P* oldPtr = ptr;
|
||||
P* newPtr = r.ptr;
|
||||
if (oldPtr != newPtr) {
|
||||
r.ptr = NULL;
|
||||
ptr = newPtr;
|
||||
if (oldPtr) delref(oldPtr);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void clear() {
|
||||
P* oldPtr = ptr;
|
||||
if (oldPtr) {
|
||||
ptr = NULL;
|
||||
delref(oldPtr);
|
||||
}
|
||||
}
|
||||
|
||||
P* operator->() const { return ptr; }
|
||||
P& operator*() const { return *ptr; }
|
||||
P* getPtr() const { return ptr; }
|
||||
|
||||
void setPtrUnsafe( P* p ) { ptr = p; }
|
||||
|
||||
P* extractPtr() { auto *p = ptr; ptr = NULL; return p; }
|
||||
|
||||
bool boolean_test() const { return ptr != 0; }
|
||||
private:
|
||||
P *ptr;
|
||||
};
|
||||
|
||||
template <class P>
|
||||
bool operator==( const Reference<P>& lhs, const Reference<P>& rhs ) {
|
||||
return lhs.getPtr() == rhs.getPtr();
|
||||
}
|
||||
|
||||
#endif /* FDB_REFERENCE_COUNTED_H */
|
|
@ -23,6 +23,6 @@
|
|||
FDBLibTLS_BUILD_SOURCES +=
|
||||
|
||||
|
||||
FDBLibTLS_CFLAGS := -fPIC -I/usr/local/include -I$(BOOSTDIR) -I. -DUSE_UCONTEXT
|
||||
FDBLibTLS_CFLAGS := -fPIC -I/usr/local/include -isystem$(BOOSTDIR) -I. -DUSE_UCONTEXT
|
||||
|
||||
lib/libFDBLibTLS.a: bin/coverage.FDBLibTLS.xml
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <boost/circular_buffer.hpp>
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSPlugin.h"
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include <openssl/objects.h>
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSPlugin.h"
|
||||
#include "FDBLibTLS/FDBLibTLSPolicy.h"
|
||||
|
@ -53,25 +52,6 @@ struct FDBLibTLSVerifyTest {
|
|||
std::map<int, Criteria> root_criteria;
|
||||
};
|
||||
|
||||
static std::string printable( std::string const& val ) {
|
||||
static char const digits[] = "0123456789ABCDEF";
|
||||
std::string s;
|
||||
|
||||
for ( int i = 0; i < val.size(); i++ ) {
|
||||
uint8_t b = val[i];
|
||||
if (b >= 32 && b < 127 && b != '\\')
|
||||
s += (char)b;
|
||||
else if (b == '\\')
|
||||
s += "\\\\";
|
||||
else {
|
||||
s += "\\x";
|
||||
s += digits[(b >> 4) & 15];
|
||||
s += digits[b & 15];
|
||||
}
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
static std::string criteriaToString(std::map<int, Criteria> const& criteria) {
|
||||
std::string s;
|
||||
for (auto &pair: criteria) {
|
||||
|
|
23
Makefile
23
Makefile
|
@ -1,28 +1,27 @@
|
|||
export
|
||||
PLATFORM := $(shell uname)
|
||||
ARCH := $(shell uname -m)
|
||||
ifeq ("$(wildcard /etc/centos-release)", "")
|
||||
LIBSTDCPP_HACK = 1
|
||||
else
|
||||
LIBSTDCPP_HACK = 0
|
||||
endif
|
||||
|
||||
TOPDIR := $(shell pwd)
|
||||
|
||||
# Allow custom libc++ hack for Ubuntu
|
||||
ifeq ("$(wildcard /etc/centos-release)", "")
|
||||
LIBSTDCPP_HACK ?= 1
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH),x86_64)
|
||||
ARCH := x64
|
||||
else
|
||||
$(error Not prepared to compile on $(ARCH))
|
||||
endif
|
||||
|
||||
MONO := $(shell which mono)
|
||||
MONO := $(shell which mono 2>/dev/null)
|
||||
ifeq ($(MONO),)
|
||||
MONO := /usr/bin/mono
|
||||
endif
|
||||
|
||||
MCS := $(shell which mcs)
|
||||
MCS := $(shell which mcs 2>/dev/null)
|
||||
ifeq ($(MCS),)
|
||||
MCS := $(shell which dmcs)
|
||||
MCS := $(shell which dmcs 2>/dev/null)
|
||||
endif
|
||||
ifeq ($(MCS),)
|
||||
MCS := /usr/bin/mcs
|
||||
|
@ -56,8 +55,8 @@ else ifeq ($(PLATFORM),Darwin)
|
|||
CC := /usr/bin/clang
|
||||
CXX := /usr/bin/clang
|
||||
|
||||
CFLAGS += -mmacosx-version-min=10.7 -stdlib=libc++
|
||||
CXXFLAGS += -mmacosx-version-min=10.7 -std=c++11 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
|
||||
CFLAGS += -mmacosx-version-min=10.14 -stdlib=libc++
|
||||
CXXFLAGS += -mmacosx-version-min=10.14 -std=c++17 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
|
||||
|
||||
.LIBPATTERNS := lib%.dylib lib%.a
|
||||
|
||||
|
@ -70,7 +69,7 @@ else
|
|||
endif
|
||||
BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME}
|
||||
|
||||
CCACHE := $(shell which ccache)
|
||||
CCACHE := $(shell which ccache 2>/dev/null)
|
||||
ifneq ($(CCACHE),)
|
||||
CCACHE_CC := $(CCACHE) $(CC)
|
||||
CCACHE_CXX := $(CCACHE) $(CXX)
|
||||
|
|
104
README.md
104
README.md
|
@ -25,52 +25,16 @@ Developers interested in using the FoundationDB store for an application can get
|
|||
|
||||
### Compiling from source
|
||||
|
||||
Developers on a OS for which there is no binary package, or who would like to start hacking on the code can get started by compiling from source.
|
||||
Developers on an OS for which there is no binary package, or who would like
|
||||
to start hacking on the code, can get started by compiling from source.
|
||||
|
||||
Currently there are two build systems: a collection of Makefiles and a
|
||||
CMake-based. Both of them should work for most users and CMake will eventually
|
||||
become the only build system available.
|
||||
|
||||
## Makefile
|
||||
|
||||
#### MacOS
|
||||
|
||||
1. Check out this repo on your Mac.
|
||||
1. Install the Xcode command-line tools.
|
||||
1. Download version 1.67.0 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.67.0/).
|
||||
1. Set the `BOOSTDIR` environment variable to the location containing this boost installation.
|
||||
1. Install [Mono](http://www.mono-project.com/download/stable/).
|
||||
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
|
||||
1. Navigate to the directory where you checked out the foundationdb repo.
|
||||
1. Run `make`.
|
||||
|
||||
#### Linux
|
||||
|
||||
1. Install [Docker](https://www.docker.com/).
|
||||
1. Check out the foundationdb repo.
|
||||
1. Run the docker image interactively [Docker Run](https://docs.docker.com/engine/reference/run/#general-form) with the directory containing the foundationdb repo mounted [Docker Mounts](https://docs.docker.com/storage/volumes/).
|
||||
|
||||
```shell
|
||||
docker run -it -v '/local/dir/path/foundationdb:/docker/dir/path/foundationdb' foundationdb/foundationdb-build:latest
|
||||
```
|
||||
|
||||
1. Navigate to the container's mounted directory which contains the foundationdb repo.
|
||||
|
||||
```shell
|
||||
cd /docker/dir/path/foundationdb
|
||||
```
|
||||
|
||||
1. Run `make`.
|
||||
|
||||
This will build the fdbserver binary and the python bindings. If you want to build our other bindings, you will need to install a runtime for the language whose binding you want to build. Each binding has an `.mk` file which provides specific targets for that binding.
|
||||
CMake-based build system. Both of them should currently work for most users,
|
||||
and CMake should be the preferred choice as it will eventually become the only
|
||||
build system available.
|
||||
|
||||
## CMake
|
||||
|
||||
FoundationDB is currently in the process of migrating the build system to cmake.
|
||||
The CMake build system is currently used by several developers. However, most of
|
||||
the testing and packaging infrastructure still uses the old VisualStudio+Make
|
||||
based build system.
|
||||
|
||||
To build with CMake, generally the following is required (works on Linux and
|
||||
Mac OS - for Windows see below):
|
||||
|
||||
|
@ -109,7 +73,7 @@ necessary dependencies. After each successful cmake run, cmake will tell you
|
|||
which language bindings it is going to build.
|
||||
|
||||
|
||||
### Generating compile_commands.json
|
||||
### Generating `compile_commands.json`
|
||||
|
||||
CMake can build a compilation database for you. However, the default generated
|
||||
one is not too useful as it operates on the generated files. When running make,
|
||||
|
@ -120,15 +84,27 @@ directory. This can than be used for tools like
|
|||
code-completion and code navigation in flow. It is not yet perfect (it will show
|
||||
a few errors) but we are constantly working on improving the development experience.
|
||||
|
||||
CMake will not produce a `compile_commands.json`, you must pass
|
||||
`-DCMAKE_EXPORT_COMPILE_COMMANDS=ON`. This also enables the target
|
||||
`processed_compile_commands`, which rewrites `compile_commands.json` to
|
||||
describe the actor compiler source file, not the post-processed output files,
|
||||
and places the output file in the source directory. This file should then be
|
||||
picked up automatically by any tooling.
|
||||
|
||||
Note that if building inside of the `foundationdb/foundationdb-build` docker
|
||||
image, the resulting paths will still be incorrect and require manual fixing.
|
||||
One will wish to re-run `cmake` with `-DCMAKE_EXPORT_COMPILE_COMMANDS=OFF` to
|
||||
prevent it from reverting the manual changes.
|
||||
|
||||
### Using IDEs
|
||||
|
||||
CMake has built in support for a number of popular IDEs. However, because flow
|
||||
CMake has built in support for a number of popular IDEs. However, because flow
|
||||
files are precompiled with the actor compiler, an IDE will not be very useful as
|
||||
a user will only be presented with the generated code - which is not what she
|
||||
wants to edit and get IDE features for.
|
||||
|
||||
The good news is, that it is possible to generate project files for editing
|
||||
flow with a supported IDE. There is a cmake option called `OPEN_FOR_IDE` which
|
||||
flow with a supported IDE. There is a CMake option called `OPEN_FOR_IDE` which
|
||||
will generate a project which can be opened in an IDE for editing. You won't be
|
||||
able to build this project, but you will be able to edit the files and get most
|
||||
edit and navigation features your IDE supports.
|
||||
|
@ -145,8 +121,9 @@ You should create a second build-directory which you will use for building
|
|||
|
||||
### Linux
|
||||
|
||||
There are no special requirements for Linux. However, we are currently working
|
||||
on a Docker-based build as well.
|
||||
There are no special requirements for Linux. A docker image can be pulled from
|
||||
`foundationdb/foundationdb-build` that has all of FoundationDB's dependencies
|
||||
pre-installed, and is what the CI uses to build and test PRs.
|
||||
|
||||
If you want to create a package you have to tell cmake what platform it is for.
|
||||
And then you can build by simply calling `cpack`. So for debian, call:
|
||||
|
@ -224,3 +201,38 @@ will automatically find it and build with TLS support.
|
|||
|
||||
If you installed WIX before running `cmake` you should find the
|
||||
`FDBInstaller.msi` in your build directory under `packaging/msi`.
|
||||
|
||||
## Makefile
|
||||
|
||||
#### MacOS
|
||||
|
||||
1. Check out this repo on your Mac.
|
||||
1. Install the Xcode command-line tools.
|
||||
1. Download version 1.67.0 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.67.0/).
|
||||
1. Set the `BOOSTDIR` environment variable to the location containing this boost installation.
|
||||
1. Install [Mono](http://www.mono-project.com/download/stable/).
|
||||
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
|
||||
1. Navigate to the directory where you checked out the foundationdb repo.
|
||||
1. Run `make`.
|
||||
|
||||
#### Linux
|
||||
|
||||
1. Install [Docker](https://www.docker.com/).
|
||||
1. Check out the foundationdb repo.
|
||||
1. Run the docker image interactively [Docker Run](https://docs.docker.com/engine/reference/run/#general-form) with the directory containing the foundationdb repo mounted [Docker Mounts](https://docs.docker.com/storage/volumes/).
|
||||
|
||||
```shell
|
||||
docker run -it -v '/local/dir/path/foundationdb:/docker/dir/path/foundationdb' foundationdb/foundationdb-build:latest
|
||||
```
|
||||
|
||||
1. Run `$ scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash` within the running container. This enables a more modern compiler, which is required to build FoundationDB.
|
||||
1. Navigate to the container's mounted directory which contains the foundationdb repo.
|
||||
|
||||
```shell
|
||||
cd /docker/dir/path/foundationdb
|
||||
```
|
||||
|
||||
1. Run `make`.
|
||||
|
||||
This will build the fdbserver binary and the python bindings. If you want to build our other bindings, you will need to install a runtime for the language whose binding you want to build. Each binding has an `.mk` file which provides specific targets for that binding.
|
||||
|
||||
|
|
|
@ -49,17 +49,30 @@ endif()
|
|||
|
||||
# The tests don't build on windows
|
||||
if(NOT WIN32)
|
||||
set(MAKO_SRCS
|
||||
test/mako/mako.c
|
||||
test/mako/mako.h
|
||||
test/mako/utils.c
|
||||
test/mako/utils.h
|
||||
test/mako/zipf.c
|
||||
test/mako/zipf.h)
|
||||
|
||||
if(OPEN_FOR_IDE)
|
||||
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)
|
||||
add_library(fdb_c_ryw_benchmark OBJECT test/ryw_benchmark.c test/test.h)
|
||||
add_library(mako OBJECT ${MAKO_SRCS})
|
||||
else()
|
||||
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
||||
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
||||
add_executable(mako ${MAKO_SRCS})
|
||||
strip_debug_symbols(fdb_c_performance_test)
|
||||
strip_debug_symbols(fdb_c_ryw_benchmark)
|
||||
endif()
|
||||
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c)
|
||||
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c)
|
||||
# do not set RPATH for mako
|
||||
set_property(TARGET mako PROPERTY SKIP_BUILD_RPATH TRUE)
|
||||
target_link_libraries(mako PRIVATE fdb_c)
|
||||
endif()
|
||||
|
||||
# TODO: re-enable once the old vcxproj-based build system is removed.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Release)' == 'true' ">
|
||||
|
@ -38,14 +38,14 @@
|
|||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -83,6 +83,7 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
|
|||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
@ -98,6 +99,7 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
|
|||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
|
|
@ -24,15 +24,21 @@ fdb_c_CFLAGS := $(fdbclient_CFLAGS)
|
|||
fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS)
|
||||
fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a $(FDB_TLS_LIB)
|
||||
fdb_c_STATIC_LIBS := $(TLS_LIBS)
|
||||
fdb_c_tests_LIBS := -Llib -lfdb_c
|
||||
fdb_c_tests_LIBS := -Llib -lfdb_c -lstdc++
|
||||
fdb_c_tests_HEADERS := -Ibindings/c
|
||||
|
||||
CLEAN_TARGETS += fdb_c_tests_clean
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
fdb_c_LDFLAGS += -Wl,--version-script=bindings/c/fdb_c.map -static-libgcc -Wl,-z,nodelete -lm -lpthread -lrt -ldl
|
||||
ifeq ($(LIBSTDCPP_HACK),1)
|
||||
fdb_c_LIBS += lib/libstdc++.a
|
||||
# Link our custom libstdc++ statically in Ubuntu, if hacking
|
||||
ifeq ("$(wildcard /etc/centos-release)", "")
|
||||
ifeq ($(LIBSTDCPP_HACK),1)
|
||||
fdb_c_LIBS += lib/libstdc++.a
|
||||
endif
|
||||
# Link stdc++ statically in Centos, if not hacking
|
||||
else
|
||||
fdb_c_STATIC_LIBS += -static-libstdc++
|
||||
endif
|
||||
fdb_c_tests_LIBS += -lpthread
|
||||
endif
|
||||
|
@ -86,11 +92,11 @@ bindings/c/foundationdb/fdb_c_options.g.h: bin/vexillographer.exe fdbclient/vexi
|
|||
|
||||
bin/fdb_c_performance_test: bindings/c/test/performance_test.c bindings/c/test/test.h fdb_c
|
||||
@echo "Compiling fdb_c_performance_test"
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c $(fdb_c_tests_LIBS)
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c $(fdb_c_tests_LIBS)
|
||||
|
||||
bin/fdb_c_ryw_benchmark: bindings/c/test/ryw_benchmark.c bindings/c/test/test.h fdb_c
|
||||
@echo "Compiling fdb_c_ryw_benchmark"
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c $(fdb_c_tests_LIBS)
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c $(fdb_c_tests_LIBS)
|
||||
|
||||
packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz: bin/fdb_c_performance_test bin/fdb_c_ryw_benchmark
|
||||
@echo "Packaging $@"
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,140 @@
|
|||
#ifndef MAKO_H
|
||||
#define MAKO_H
|
||||
#pragma once
|
||||
|
||||
#ifndef FDB_API_VERSION
|
||||
#define FDB_API_VERSION 610
|
||||
#endif
|
||||
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <pthread.h>
|
||||
#include <sys/types.h>
|
||||
#if defined(__linux__)
|
||||
#include <linux/limits.h>
|
||||
#elif defined(__APPLE__)
|
||||
#include <sys/syslimits.h>
|
||||
#else
|
||||
#include <limits.h>
|
||||
#endif
|
||||
|
||||
#define DEFAULT_RETRY_COUNT 3
|
||||
|
||||
#define VERBOSE_NONE 0
|
||||
#define VERBOSE_DEFAULT 1
|
||||
#define VERBOSE_ANNOYING 2
|
||||
#define VERBOSE_DEBUG 3
|
||||
|
||||
#define MODE_INVALID -1
|
||||
#define MODE_CLEAN 0
|
||||
#define MODE_BUILD 1
|
||||
#define MODE_RUN 2
|
||||
|
||||
/* we set mako_txn_t and mako_args_t only once in the master process,
|
||||
* and won't be touched by child processes.
|
||||
*/
|
||||
|
||||
/* transaction specification */
|
||||
#define OP_GETREADVERSION 0
|
||||
#define OP_GET 1
|
||||
#define OP_GETRANGE 2
|
||||
#define OP_SGET 3
|
||||
#define OP_SGETRANGE 4
|
||||
#define OP_UPDATE 5
|
||||
#define OP_INSERT 6
|
||||
#define OP_INSERTRANGE 7
|
||||
#define OP_CLEAR 8
|
||||
#define OP_SETCLEAR 9
|
||||
#define OP_CLEARRANGE 10
|
||||
#define OP_SETCLEARRANGE 11
|
||||
#define OP_COMMIT 12
|
||||
#define MAX_OP 13 /* update this when adding a new operation */
|
||||
|
||||
#define OP_COUNT 0
|
||||
#define OP_RANGE 1
|
||||
#define OP_REVERSE 2
|
||||
|
||||
/* for arguments */
|
||||
#define ARG_KEYLEN 1
|
||||
#define ARG_VALLEN 2
|
||||
#define ARG_TPS 3
|
||||
#define ARG_COMMITGET 4
|
||||
#define ARG_SAMPLING 5
|
||||
#define ARG_VERSION 6
|
||||
#define ARG_KNOBS 7
|
||||
#define ARG_FLATBUFFERS 8
|
||||
#define ARG_TRACE 9
|
||||
#define ARG_TRACEPATH 10
|
||||
|
||||
#define KEYPREFIX "mako"
|
||||
#define KEYPREFIXLEN 4
|
||||
|
||||
typedef struct {
|
||||
/* for each operation, it stores "count", "range" and "reverse" */
|
||||
int ops[MAX_OP][3];
|
||||
} mako_txnspec_t;
|
||||
|
||||
#define KNOB_MAX 256
|
||||
|
||||
/* benchmark parameters */
|
||||
typedef struct {
|
||||
int json;
|
||||
int num_processes;
|
||||
int num_threads;
|
||||
int mode;
|
||||
int rows; /* is 2 billion enough? */
|
||||
int seconds;
|
||||
int iteration;
|
||||
int tps;
|
||||
int sampling;
|
||||
int key_length;
|
||||
int value_length;
|
||||
int zipf;
|
||||
int commit_get;
|
||||
int verbose;
|
||||
mako_txnspec_t txnspec;
|
||||
char cluster_file[PATH_MAX];
|
||||
int trace;
|
||||
char tracepath[PATH_MAX];
|
||||
char knobs[KNOB_MAX];
|
||||
uint8_t flatbuffers;
|
||||
} mako_args_t;
|
||||
|
||||
/* shared memory */
|
||||
#define SIGNAL_RED 0
|
||||
#define SIGNAL_GREEN 1
|
||||
#define SIGNAL_OFF 2
|
||||
|
||||
typedef struct {
|
||||
int signal;
|
||||
int readycount;
|
||||
} mako_shmhdr_t;
|
||||
|
||||
typedef struct {
|
||||
uint64_t xacts;
|
||||
uint64_t conflicts;
|
||||
uint64_t ops[MAX_OP];
|
||||
uint64_t errors[MAX_OP];
|
||||
uint64_t latency_samples[MAX_OP];
|
||||
uint64_t latency_us_total[MAX_OP];
|
||||
uint64_t latency_us_min[MAX_OP];
|
||||
uint64_t latency_us_max[MAX_OP];
|
||||
} mako_stats_t;
|
||||
|
||||
/* per-process information */
|
||||
typedef struct {
|
||||
int worker_id;
|
||||
FDBDatabase *database;
|
||||
mako_args_t *args;
|
||||
mako_shmhdr_t *shm;
|
||||
} process_info_t;
|
||||
|
||||
/* args for threads */
|
||||
typedef struct {
|
||||
int thread_id;
|
||||
process_info_t *process;
|
||||
} thread_args_t;
|
||||
|
||||
/* process type */
|
||||
typedef enum { proc_master = 0, proc_worker, proc_stats } proc_type_t;
|
||||
|
||||
#endif /* MAKO_H */
|
|
@ -0,0 +1,160 @@
|
|||
##############
|
||||
mako Benchmark
|
||||
##############
|
||||
|
||||
| mako (named after a small, but very fast shark) is a micro-benchmark for FoundationDB
|
||||
| which is designed to be very light and flexible
|
||||
| so that you can stress a particular part of an FoundationDB cluster without introducing unnecessary overhead.
|
||||
|
||||
|
||||
How to Build
|
||||
============
|
||||
| ``mako`` gets build automatically when you build FoundationDB.
|
||||
| To build ``mako`` manually, simply build ``mako`` target in the FoundationDB build directory.
|
||||
| e.g. If you're using Unix Makefiles
|
||||
| ``make mako``
|
||||
|
||||
|
||||
Architecture
|
||||
============
|
||||
- mako is a stand-alone program written in C,
|
||||
which communicates to FoundationDB using C binding API (``libfdb_c.so``)
|
||||
- It creates one master process, and one or more worker processes (multi-process)
|
||||
- Each worker process creates one or more multiple threads (multi-thread)
|
||||
- All threads within the same process share the same network thread
|
||||
|
||||
|
||||
Data Specification
|
||||
==================
|
||||
- Key has a fixed prefix + sequential number + padding (e.g. ``mako000000xxxxxx``)
|
||||
- Value is a random string (e.g. ``;+)Rf?H.DS&UmZpf``)
|
||||
|
||||
|
||||
Arguments
|
||||
=========
|
||||
- | ``--mode <mode>``
|
||||
| One of the following modes must be specified. (Required)
|
||||
| - ``clean``: Clean up existing data
|
||||
| - ``build``: Populate data
|
||||
| - ``run``: Run the benchmark
|
||||
|
||||
- | ``-c | --cluster <cluster file>``
|
||||
| FDB cluster file (Required)
|
||||
|
||||
- | ``-p | --procs <procs>``
|
||||
| Number of worker processes (Default: 1)
|
||||
|
||||
- | ``-t | --threads <threads>``
|
||||
| Number of threads per worker process (Default: 1)
|
||||
|
||||
- | ``-r | --rows <rows>``
|
||||
| Number of rows populated (Default: 10000)
|
||||
|
||||
- | ``-s | --seconds <seconds>``
|
||||
| Test duration in seconds (Default: 30)
|
||||
| This option cannot be set with ``--iteration``.
|
||||
|
||||
- | ``-i | --iteration <iters>``
|
||||
| Specify the number of operations to be executed.
|
||||
| This option cannot be set with ``--seconds``.
|
||||
|
||||
- | ``--tps <tps>``
|
||||
| Target total transaction-per-second (TPS) of all worker processes/threads
|
||||
| (Default: Unset / Unthrottled)
|
||||
|
||||
- | ``--keylen <num>``
|
||||
| Key string length in bytes (Default and Minimum: 16)
|
||||
|
||||
- | ``--vallen <num>``
|
||||
| Value string length in bytes (Default and Minimum: 16)
|
||||
|
||||
- | ``-x | --transaction <string>``
|
||||
| Transaction specification described in details in the following section. (Default: ``g10``)
|
||||
|
||||
- | ``-z | --zipf``
|
||||
| Generate a skewed workload based on Zipf distribution (Default: Unset = Uniform)
|
||||
|
||||
- | ``--sampling <num>``
|
||||
| Sampling rate (1 sample / <num> ops) for latency stats
|
||||
|
||||
- | ``--trace``
|
||||
| Enable tracing. The trace file will be created in the current directory.
|
||||
|
||||
- | ``--tracepath <path>``
|
||||
| Enable tracing and set the trace file path.
|
||||
|
||||
- | ``--knobs <knobs>``
|
||||
| Set client knobs
|
||||
|
||||
- | ``--flatbuffers``
|
||||
| Enable flatbuffers
|
||||
|
||||
- | ``--commitget``
|
||||
| Force commit for read-only transactions
|
||||
|
||||
- | ``-v | --verbose <level>``
|
||||
| Set verbose level (Default: 1)
|
||||
| - 0 – Minimal
|
||||
| - 1 – Default
|
||||
| - 2 – Annoying
|
||||
| - 3 – Very Annoying (a.k.a. DEBUG)
|
||||
|
||||
|
||||
Transaction Specification
|
||||
=========================
|
||||
| A transaction may contain multiple operations of multiple types.
|
||||
| You can specify multiple operations for one operation type by specifying "Count".
|
||||
| For RANGE operations, "Range" needs to be specified in addition to "Count".
|
||||
| Every transaction is committed unless it contains only GET / GET RANGE operations.
|
||||
|
||||
Operation Types
|
||||
---------------
|
||||
- ``g`` – GET
|
||||
- ``gr`` – GET RANGE
|
||||
- ``sg`` – Snapshot GET
|
||||
- ``sgr`` – Snapshot GET RANGE
|
||||
- ``u`` – Update (= GET followed by SET)
|
||||
- ``i`` – Insert (= SET with a new key)
|
||||
- ``ir`` – Insert Range (Sequential)
|
||||
- ``c`` – CLEAR
|
||||
- ``sc`` – SET & CLEAR
|
||||
- ``cr`` – CLEAR RANGE
|
||||
- ``scr`` – SET & CLEAR RANGE
|
||||
- ``grv`` – GetReadVersion()
|
||||
|
||||
Format
|
||||
------
|
||||
| One operation type is defined as ``<Type><Count>`` or ``<Type><Count>:<Range>``.
|
||||
| When Count is omitted, it's equivalent to setting it to 1. (e.g. ``g`` is equivalent to ``g1``)
|
||||
| Multiple operation types can be concatenated. (e.g. ``g9u1`` = 9 GETs and 1 update)
|
||||
|
||||
Transaction Specification Examples
|
||||
----------------------------------
|
||||
- | 100 GETs (No Commit)
|
||||
| ``g100``
|
||||
|
||||
- | 10 GET RANGE with Range of 50 (No Commit)
|
||||
| ``gr10:50``
|
||||
|
||||
- | 90 GETs and 10 Updates (Committed)
|
||||
| ``g90u10``
|
||||
|
||||
- | 80 GETs, 10 Updates and 10 Inserts (Committed)
|
||||
| ``g90u10i10``
|
||||
|
||||
|
||||
Execution Examples
|
||||
==================
|
||||
|
||||
Preparation
|
||||
-----------
|
||||
- Start the FoundationDB cluster and create a database
|
||||
- Set LD_LIBRARY_PATH pointing to a proper ``libfdb_c.so``
|
||||
|
||||
Build
|
||||
-----
|
||||
``mako --cluster /etc/foundationdb/fdb.cluster --mode build --rows 1000000 --procs 4``
|
||||
|
||||
Run
|
||||
---
|
||||
``mako --cluster /etc/foundationdb/fdb.cluster --mode run --rows 1000000 --procs 2 --threads 8 --transaction "g8ui" --seconds 60 --tps 1000``
|
|
@ -0,0 +1,81 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <math.h>
|
||||
#include "utils.h"
|
||||
#include "mako.h"
|
||||
|
||||
/* uniform-distribution random */
|
||||
int urand(int low, int high) {
|
||||
double r = rand() / (1.0 + RAND_MAX);
|
||||
int range = high - low + 1;
|
||||
return (int)((r * range) + low);
|
||||
}
|
||||
|
||||
/* random string */
|
||||
/* len is the buffer size, must include null */
|
||||
void randstr(char *str, int len) {
|
||||
int i;
|
||||
for (i = 0; i < len-1; i++) {
|
||||
str[i] = '!' + urand(0, 'z'-'!'); /* generage a char from '!' to 'z' */
|
||||
}
|
||||
str[len-1] = '\0';
|
||||
}
|
||||
|
||||
/* random numeric string */
|
||||
/* len is the buffer size, must include null */
|
||||
void randnumstr(char *str, int len) {
|
||||
int i;
|
||||
for (i = 0; i < len-1; i++) {
|
||||
str[i] = '0' + urand(0, 9); /* generage a char from '!' to 'z' */
|
||||
}
|
||||
str[len-1] = '\0';
|
||||
}
|
||||
|
||||
/* return the first key to be inserted */
|
||||
int insert_begin(int rows, int p_idx, int t_idx, int total_p, int total_t) {
|
||||
double interval = (double)rows / total_p / total_t;
|
||||
return (int)(round(interval * ((p_idx * total_t) + t_idx)));
|
||||
}
|
||||
|
||||
/* return the last key to be inserted */
|
||||
int insert_end(int rows, int p_idx, int t_idx, int total_p, int total_t) {
|
||||
double interval = (double)rows / total_p / total_t;
|
||||
return (int)(round(interval * ((p_idx * total_t) + t_idx + 1) - 1));
|
||||
}
|
||||
|
||||
/* devide val equally among threads */
|
||||
int compute_thread_portion(int val, int p_idx, int t_idx, int total_p, int total_t) {
|
||||
int interval = val / total_p / total_t;
|
||||
int remaining = val - (interval * total_p * total_t);
|
||||
if ((p_idx * total_t + t_idx) < remaining) {
|
||||
return interval+1;
|
||||
} else if (interval == 0) {
|
||||
return -1;
|
||||
}
|
||||
/* else */
|
||||
return interval;
|
||||
}
|
||||
|
||||
/* number of digits */
|
||||
int digits(int num) {
|
||||
int digits = 0;
|
||||
while (num > 0) {
|
||||
num /= 10;
|
||||
digits++;
|
||||
}
|
||||
return digits;
|
||||
}
|
||||
|
||||
|
||||
/* generate a key for a given key number */
|
||||
/* len is the buffer size, key length + null */
|
||||
void genkey(char *str, int num, int rows, int len) {
|
||||
int i;
|
||||
int rowdigit = digits(rows);
|
||||
sprintf(str, KEYPREFIX "%0.*d", rowdigit, num);
|
||||
for (i = (KEYPREFIXLEN + rowdigit); i < len-1; i++) {
|
||||
str[i] = 'x';
|
||||
}
|
||||
str[len-1] = '\0';
|
||||
}
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
#ifndef UTILS_H
|
||||
#define UTILS_H
|
||||
#pragma once
|
||||
|
||||
/* uniform-distribution random */
|
||||
/* return a uniform random number between low and high, both inclusive */
|
||||
int urand(int low, int high);
|
||||
|
||||
/* write a random string of the length of (len-1) to memory pointed by str
|
||||
* with a null-termination character at str[len-1].
|
||||
*/
|
||||
void randstr(char *str, int len);
|
||||
|
||||
/* write a random numeric string of the length of (len-1) to memory pointed by str
|
||||
* with a null-termination character at str[len-1].
|
||||
*/
|
||||
void randnumstr(char *str, int len);
|
||||
|
||||
/* given the total number of rows to be inserted,
|
||||
* the worker process index p_idx and the thread index t_idx (both 0-based),
|
||||
* and the total number of processes, total_p, and threads, total_t,
|
||||
* returns the first row number assigned to this partition.
|
||||
*/
|
||||
int insert_begin(int rows, int p_idx, int t_idx, int total_p, int total_t);
|
||||
|
||||
/* similar to insert_begin, insert_end returns the last row numer */
|
||||
int insert_end(int rows, int p_idx, int t_idx, int total_p, int total_t);
|
||||
|
||||
/* devide a value equally among threads */
|
||||
int compute_thread_portion(int val, int p_idx, int t_idx, int total_p,
|
||||
int total_t);
|
||||
|
||||
/* similar to insert_begin/end, compute_thread_tps computes
|
||||
* the per-thread target TPS for given configuration.
|
||||
*/
|
||||
#define compute_thread_tps(val, p_idx, t_idx, total_p, total_t) \
|
||||
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
|
||||
|
||||
/* similar to compute_thread_tps,
|
||||
* compute_thread_iters computs the number of iterations.
|
||||
*/
|
||||
#define compute_thread_iters(val, p_idx, t_idx, total_p, total_t) \
|
||||
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
|
||||
|
||||
/* get the number of digits */
|
||||
int digits(int num);
|
||||
|
||||
/* generate a key for a given key number */
|
||||
/* len is the buffer size, key length + null */
|
||||
void genkey(char *str, int num, int rows, int len);
|
||||
|
||||
#endif /* UTILS_H */
|
|
@ -0,0 +1,132 @@
|
|||
/*
|
||||
* zipfian distribution copied from YCSB
|
||||
* https://github.com/brianfrankcooper/YCSB
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <math.h>
|
||||
#include <time.h>
|
||||
#include "zipf.h"
|
||||
|
||||
/* global static */
|
||||
static int items;
|
||||
static int base;
|
||||
static double zipfianconstant;
|
||||
static double alpha, zetan, eta, theta, zeta2theta;
|
||||
static int countforzeta;
|
||||
static int allowitemcountdecrease = 0;
|
||||
|
||||
/* declarations */
|
||||
double zetastatic2(int st, int n, double theta, double initialsum);
|
||||
double zeta2(int st, int n, double theta_val, double initialsum);
|
||||
double zetastatic(int n, double theta);
|
||||
double zeta(int n, double theta_val);
|
||||
|
||||
|
||||
double rand_double() {
|
||||
return (double)rand() / (double)RAND_MAX;
|
||||
}
|
||||
|
||||
|
||||
int next_int(int itemcount) {
|
||||
double u, uz;
|
||||
int ret;
|
||||
|
||||
if (itemcount != countforzeta) {
|
||||
zetan = zeta2(countforzeta, itemcount, theta, zetan);
|
||||
} else if ((itemcount < countforzeta) && (allowitemcountdecrease)) {
|
||||
zetan = zeta(itemcount, theta);
|
||||
}
|
||||
eta = (1 - pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
|
||||
|
||||
u = rand_double();
|
||||
uz = u * zetan;
|
||||
|
||||
if (uz < 1.0) {
|
||||
return base;
|
||||
}
|
||||
|
||||
if (uz < 1.0 + pow(0.5, theta)) {
|
||||
return base + 1;
|
||||
}
|
||||
|
||||
ret = base + (int)(itemcount * pow(eta * u - eta + 1, alpha));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int zipfian_next() {
|
||||
return next_int(items);
|
||||
}
|
||||
|
||||
double zetastatic2(int st, int n, double theta, double initialsum) {
|
||||
int i;
|
||||
double sum = initialsum;
|
||||
for (i = st; i < n; i++) {
|
||||
sum += 1 / pow(i + 1, theta);
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
double zeta2(int st, int n, double theta_val, double initialsum) {
|
||||
countforzeta = n;
|
||||
return zetastatic2(st, n, theta_val, initialsum);
|
||||
}
|
||||
|
||||
double zetastatic(int n, double theta) {
|
||||
return zetastatic2(0, n, theta, 0);
|
||||
}
|
||||
|
||||
double zeta(int n, double theta_val) {
|
||||
countforzeta = n;
|
||||
return zetastatic(n, theta_val);
|
||||
}
|
||||
|
||||
void zipfian_generator4(int min, int max, double _zipfianconstant, double _zetan) {
|
||||
items = max - min + 1;
|
||||
base = min;
|
||||
zipfianconstant = _zipfianconstant;
|
||||
|
||||
theta = zipfianconstant;
|
||||
zeta2theta = zeta(2, theta);
|
||||
alpha = 1.0 / (1.0 - theta);
|
||||
zetan = _zetan;
|
||||
countforzeta = items;
|
||||
eta = (1 - pow(2.0 / items, 1 - theta)) / (1 - zeta2theta / zetan);
|
||||
|
||||
zipfian_next();
|
||||
}
|
||||
|
||||
void zipfian_generator3(int min, int max, double zipfianconstant) {
|
||||
zipfian_generator4(min, max, zipfianconstant, zetastatic(max - min + 1, zipfianconstant));
|
||||
}
|
||||
|
||||
void zipfian_generator2(int min, int max) {
|
||||
zipfian_generator3(min, max, ZIPFIAN_CONSTANT);
|
||||
}
|
||||
|
||||
void zipfian_generator(int items) {
|
||||
zipfian_generator2(0, items - 1);
|
||||
}
|
||||
|
||||
|
||||
#if 0 /* test */
|
||||
void main() {
|
||||
int i = 0;
|
||||
int histogram[1000] = { 0 };
|
||||
|
||||
srand(time(0));
|
||||
|
||||
zipfian_generator(1000);
|
||||
|
||||
for (i = 0; i < 1000000; i++) {
|
||||
int val = next_value();
|
||||
//printf("%d\n", val);
|
||||
histogram[val]++;
|
||||
}
|
||||
|
||||
for (i = 0; i < 1000; i++) {
|
||||
printf("%d\n", histogram[i]);
|
||||
}
|
||||
}
|
||||
#endif
|
|
@ -0,0 +1,15 @@
|
|||
/*
|
||||
* zipfian distribution copied from YCSB
|
||||
* https://github.com/brianfrankcooper/YCSB
|
||||
*/
|
||||
|
||||
#ifndef ZIPF_H
|
||||
#define ZIPF_H
|
||||
#pragma once
|
||||
|
||||
#define ZIPFIAN_CONSTANT 0.99
|
||||
|
||||
void zipfian_generator(int items);
|
||||
int zipfian_next();
|
||||
|
||||
#endif /* ZIPF_H */
|
|
@ -258,8 +258,6 @@ namespace FDB {
|
|||
|
||||
typedef Standalone<KeyRangeRef> KeyRange;
|
||||
|
||||
std::string printable( const StringRef& val );
|
||||
|
||||
template <class T>
|
||||
static std::string describe(T const& item) {
|
||||
return item.toString();
|
||||
|
|
|
@ -66,7 +66,7 @@ namespace FDB {
|
|||
}
|
||||
|
||||
loop {
|
||||
state int64_t candidate = g_random->randomInt(start, start + window);
|
||||
state int64_t candidate = deterministicRandom()->randomInt(start, start + window);
|
||||
|
||||
// if thread safety is needed, this should be locked {
|
||||
state Future<FDBStandalone<RangeResultRef>> latestCounter = tr->getRange(counters.range(), 1, true, true);
|
||||
|
|
|
@ -81,12 +81,6 @@ void fdb_flow_test() {
|
|||
fdb->setupNetwork();
|
||||
startThread(networkThread, fdb);
|
||||
|
||||
int randomSeed = platform::getRandomSeed();
|
||||
|
||||
g_random = new DeterministicRandom(randomSeed);
|
||||
g_nondeterministic_random = new DeterministicRandom(platform::getRandomSeed());
|
||||
g_debug_random = new DeterministicRandom(platform::getRandomSeed());
|
||||
|
||||
g_network = newNet2( false );
|
||||
|
||||
openTraceFile(NetworkAddress(), 1000000, 1000000, ".");
|
||||
|
@ -428,16 +422,4 @@ namespace FDB {
|
|||
void TransactionImpl::reset() {
|
||||
fdb_transaction_reset( tr );
|
||||
}
|
||||
|
||||
std::string printable( const StringRef& val ) {
|
||||
std::string s;
|
||||
for(int i=0; i<val.size(); i++) {
|
||||
uint8_t b = val[i];
|
||||
if (b >= 32 && b < 127 && b != '\\') s += (char)b;
|
||||
else if (b == '\\') s += "\\\\";
|
||||
else s += format("\\x%02x", b);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Release)' == 'true' ">
|
||||
|
@ -63,12 +63,12 @@
|
|||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -99,6 +99,7 @@
|
|||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -126,6 +127,7 @@
|
|||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -107,7 +107,7 @@ struct DirectoryCreateSubspaceFunc : InstructionFunc {
|
|||
state Tuple path = wait(popTuple(data));
|
||||
Tuple rawPrefix = wait(data->stack.waitAndPop());
|
||||
|
||||
logOp(format("Created subspace at %s: %s", tupleToString(path).c_str(), printable(rawPrefix.getString(0)).c_str()));
|
||||
logOp(format("Created subspace at %s: %s", tupleToString(path).c_str(), rawPrefix.getString(0).printable().c_str()));
|
||||
data->directoryData.push(new Subspace(path, rawPrefix.getString(0)));
|
||||
return Void();
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ struct DirectoryCreateLayerFunc : InstructionFunc {
|
|||
else {
|
||||
Subspace* nodeSubspace = data->directoryData.directoryList[index1].subspace.get();
|
||||
Subspace* contentSubspace = data->directoryData.directoryList[index2].subspace.get();
|
||||
logOp(format("Create directory layer: node_subspace (%d) = %s, content_subspace (%d) = %s, allow_manual_prefixes = %d", index1, printable(nodeSubspace->key()).c_str(), index2, printable(nodeSubspace->key()).c_str(), allowManualPrefixes));
|
||||
logOp(format("Create directory layer: node_subspace (%d) = %s, content_subspace (%d) = %s, allow_manual_prefixes = %d", index1, nodeSubspace->key().printable().c_str(), index2, nodeSubspace->key().printable().c_str(), allowManualPrefixes));
|
||||
data->directoryData.push(Reference<IDirectory>(new DirectoryLayer(*nodeSubspace, *contentSubspace, allowManualPrefixes)));
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ struct DirectoryChangeFunc : InstructionFunc {
|
|||
|
||||
if(LOG_DIRS) {
|
||||
DirectoryOrSubspace d = data->directoryData.directoryList[data->directoryData.directoryListIndex];
|
||||
printf("Changed directory to %d (%s @\'%s\')\n", data->directoryData.directoryListIndex, d.typeString().c_str(), d.directory.present() ? pathToString(d.directory.get()->getPath()).c_str() : printable(d.subspace.get()->key()).c_str());
|
||||
printf("Changed directory to %d (%s @\'%s\')\n", data->directoryData.directoryListIndex, d.typeString().c_str(), d.directory.present() ? pathToString(d.directory.get()->getPath()).c_str() : d.subspace.get()->key().printable().c_str());
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
|
@ -192,7 +192,7 @@ struct DirectoryCreateOrOpenFunc : InstructionFunc {
|
|||
Standalone<StringRef> layer = layerTuple.getType(0) == Tuple::NULL_TYPE ? StringRef() : layerTuple.getString(0);
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("create_or_open %s: layer=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), printable(layer).c_str()));
|
||||
logOp(format("create_or_open %s: layer=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), layer.printable().c_str()));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, layer] () {
|
||||
return directory->createOrOpen(instruction->tr, path, layer);
|
||||
|
@ -217,7 +217,7 @@ struct DirectoryCreateFunc : InstructionFunc {
|
|||
Optional<Standalone<StringRef>> prefix = args[1].getType(0) == Tuple::NULL_TYPE ? Optional<Standalone<StringRef>>() : args[1].getString(0);
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("create %s: layer=%s, prefix=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), printable(layer).c_str(), prefix.present() ? printable(prefix.get()).c_str() : "<not present>"));
|
||||
logOp(format("create %s: layer=%s, prefix=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), layer.printable().c_str(), prefix.present() ? prefix.get().printable().c_str() : "<not present>"));
|
||||
|
||||
Reference<DirectorySubspace> dirSubspace = wait(executeMutation(instruction, [this, directory, layer, prefix] () {
|
||||
return directory->create(instruction->tr, path, layer, prefix);
|
||||
|
@ -241,7 +241,7 @@ struct DirectoryOpenFunc : InstructionFunc {
|
|||
Standalone<StringRef> layer = layerTuple.getType(0) == Tuple::NULL_TYPE ? StringRef() : layerTuple.getString(0);
|
||||
|
||||
Reference<IDirectory> directory = data->directoryData.directory();
|
||||
logOp(format("open %s: layer=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), printable(layer).c_str()));
|
||||
logOp(format("open %s: layer=%s", pathToString(combinePaths(directory->getPath(), path)).c_str(), layer.printable().c_str()));
|
||||
Reference<DirectorySubspace> dirSubspace = wait(directory->open(instruction->tr, path, layer));
|
||||
data->directoryData.push(dirSubspace);
|
||||
|
||||
|
@ -433,7 +433,7 @@ struct DirectoryUnpackKeyFunc : InstructionFunc {
|
|||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple key = wait(data->stack.waitAndPop());
|
||||
Subspace *subspace = data->directoryData.subspace();
|
||||
logOp(format("Unpack %s in subspace with prefix %s", printable(key.getString(0)).c_str(), printable(subspace->key()).c_str()));
|
||||
logOp(format("Unpack %s in subspace with prefix %s", key.getString(0).printable().c_str(), subspace->key().printable().c_str()));
|
||||
Tuple tuple = subspace->unpack(key.getString(0));
|
||||
for(int i = 0; i < tuple.size(); ++i) {
|
||||
data->stack.push(tuple.subTuple(i, i+1).pack());
|
||||
|
@ -483,7 +483,7 @@ struct DirectoryOpenSubspaceFunc : InstructionFunc {
|
|||
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
|
||||
Tuple tuple = wait(popTuple(data));
|
||||
Subspace *subspace = data->directoryData.subspace();
|
||||
logOp(format("open_subspace %s (at %s)", tupleToString(tuple).c_str(), printable(subspace->key()).c_str()));
|
||||
logOp(format("open_subspace %s (at %s)", tupleToString(tuple).c_str(), subspace->key().printable().c_str()));
|
||||
Subspace *child = new Subspace(subspace->subspace(tuple));
|
||||
data->directoryData.push(child);
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ std::string tupleToString(Tuple const& tuple) {
|
|||
if(type == Tuple::UTF8) {
|
||||
str += "u";
|
||||
}
|
||||
str += "\'" + printable(tuple.getString(i)) + "\'";
|
||||
str += "\'" + tuple.getString(i).printable() + "\'";
|
||||
}
|
||||
else if(type == Tuple::INT) {
|
||||
str += format("%ld", tuple.getInt(i));
|
||||
|
@ -220,9 +220,9 @@ ACTOR static Future<Void> debugPrintRange(Reference<Transaction> tr, std::string
|
|||
|
||||
Standalone<RangeResultRef> results = wait(getRange(tr, KeyRange(KeyRangeRef(subspace + '\x00', subspace + '\xff'))));
|
||||
printf("==================================================DB:%s:%s, count:%d\n", msg.c_str(),
|
||||
printable(subspace).c_str(), results.size());
|
||||
StringRef(subspace).printable().c_str(), results.size());
|
||||
for (auto & s : results) {
|
||||
printf("=====key:%s, value:%s\n", printable(StringRef(s.key)).c_str(), printable(StringRef(s.value)).c_str());
|
||||
printf("=====key:%s, value:%s\n", StringRef(s.key).printable().c_str(), StringRef(s.value).printable().c_str());
|
||||
}
|
||||
|
||||
return Void();
|
||||
|
@ -1030,7 +1030,7 @@ struct TuplePackFunc : InstructionFunc {
|
|||
for (; i < items1.size(); ++i) {
|
||||
Standalone<StringRef> str = wait(items1[i].value);
|
||||
Tuple itemTuple = Tuple::unpack(str);
|
||||
if(g_random->coinflip()) {
|
||||
if(deterministicRandom()->coinflip()) {
|
||||
Tuple::ElementType type = itemTuple.getType(0);
|
||||
if(type == Tuple::NULL_TYPE) {
|
||||
tuple.appendNull();
|
||||
|
@ -1119,7 +1119,7 @@ struct TupleRangeFunc : InstructionFunc {
|
|||
for (; i < items1.size(); ++i) {
|
||||
Standalone<StringRef> str = wait(items1[i].value);
|
||||
Tuple itemTuple = Tuple::unpack(str);
|
||||
if(g_random->coinflip()) {
|
||||
if(deterministicRandom()->coinflip()) {
|
||||
Tuple::ElementType type = itemTuple.getType(0);
|
||||
if(type == Tuple::NULL_TYPE) {
|
||||
tuple.appendNull();
|
||||
|
@ -1791,7 +1791,7 @@ ACTOR void _test_versionstamp() {
|
|||
|
||||
ASSERT(trVersion.compare(dbVersion) == 0);
|
||||
|
||||
fprintf(stderr, "%s\n", printable(trVersion).c_str());
|
||||
fprintf(stderr, "%s\n", trVersion.printable().c_str());
|
||||
|
||||
g_network->stop();
|
||||
}
|
||||
|
@ -1809,8 +1809,7 @@ int main( int argc, char** argv ) {
|
|||
try {
|
||||
platformInit();
|
||||
registerCrashHandler();
|
||||
g_random = new DeterministicRandom(1);
|
||||
g_nondeterministic_random = new DeterministicRandom(platform::getRandomSeed());
|
||||
setThreadLocalDeterministicRandomSeed(1);
|
||||
|
||||
// Get arguments
|
||||
if (argc < 3) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -35,14 +35,14 @@
|
|||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>false</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -69,6 +69,7 @@
|
|||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
|
@ -82,6 +83,7 @@
|
|||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions> @../../../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -107,6 +109,7 @@
|
|||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
|
||||
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -35,8 +35,7 @@ _fdb_flow_tester_clean:
|
|||
@rm -rf bindings/flow/bin
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
fdb_flow_tester_LIBS += -ldl -lpthread -lrt
|
||||
fdb_flow_tester_LDFLAGS += -static-libstdc++ -static-libgcc
|
||||
fdb_flow_tester_LDFLAGS += -static-libstdc++ -static-libgcc -ldl -lpthread -lrt -lm
|
||||
else ifeq ($(PLATFORM),osx)
|
||||
fdb_flow_tester_LDFLAGS += -lc++
|
||||
endif
|
||||
|
|
|
@ -5,7 +5,7 @@ fdb-go
|
|||
|
||||
This package requires:
|
||||
|
||||
- Go 1.1+ with CGO enabled
|
||||
- Go 1.11+ with CGO enabled
|
||||
- [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
|
||||
- FoundationDB C API 2.0.x-6.1.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c))
|
||||
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
// Deprecated: Use OpenDatabase or OpenDefault to obtain a database handle directly
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
|
|
@ -41,6 +41,7 @@ package directory
|
|||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
)
|
||||
|
@ -54,6 +55,18 @@ const (
|
|||
_MICROVERSION int32 = 0
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDirAlreadyExists is returned when trying to create a directory while it already exists.
|
||||
ErrDirAlreadyExists = errors.New("the directory already exists")
|
||||
|
||||
// ErrDirNotExists is returned when opening or listing a directory that does not exist.
|
||||
ErrDirNotExists = errors.New("the directory does not exist")
|
||||
|
||||
// ErrParentDirDoesNotExist is returned when opening a directory and one or more
|
||||
// parent directories in the path do not exist.
|
||||
ErrParentDirDoesNotExist = errors.New("the parent directory does not exist")
|
||||
)
|
||||
|
||||
// Directory represents a subspace of keys in a FoundationDB database,
|
||||
// identified by a hierarchical path.
|
||||
type Directory interface {
|
||||
|
@ -69,8 +82,9 @@ type Directory interface {
|
|||
CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error)
|
||||
|
||||
// Open opens the directory specified by path (relative to this Directory),
|
||||
// and returns the directory and its contents as a DirectorySubspace (or an
|
||||
// error if the directory does not exist).
|
||||
// and returns the directory and its contents as a DirectorySubspace (or ErrDirNotExists
|
||||
// error if the directory does not exist, or ErrParentDirDoesNotExist if one of the parent
|
||||
// directories in the path does not exist).
|
||||
//
|
||||
// If the byte slice layer is specified, it is compared against the layer
|
||||
// specified when the directory was created, and an error is returned if
|
||||
|
@ -79,7 +93,7 @@ type Directory interface {
|
|||
|
||||
// Create creates a directory specified by path (relative to this
|
||||
// Directory), and returns the directory and its contents as a
|
||||
// DirectorySubspace (or an error if the directory already exists).
|
||||
// DirectorySubspace (or ErrDirAlreadyExists if the directory already exists).
|
||||
//
|
||||
// If the byte slice layer is specified, it is recorded as the layer and
|
||||
// will be checked when opening the directory in the future.
|
||||
|
|
|
@ -99,7 +99,7 @@ func (dl directoryLayer) createOrOpen(rtr fdb.ReadTransaction, tr *fdb.Transacti
|
|||
}
|
||||
|
||||
if !allowOpen {
|
||||
return nil, errors.New("the directory already exists")
|
||||
return nil, ErrDirAlreadyExists
|
||||
}
|
||||
|
||||
if layer != nil {
|
||||
|
@ -112,7 +112,7 @@ func (dl directoryLayer) createOrOpen(rtr fdb.ReadTransaction, tr *fdb.Transacti
|
|||
}
|
||||
|
||||
if !allowCreate {
|
||||
return nil, errors.New("the directory does not exist")
|
||||
return nil, ErrDirNotExists
|
||||
}
|
||||
|
||||
if e := dl.checkVersion(rtr, tr); e != nil {
|
||||
|
@ -161,7 +161,7 @@ func (dl directoryLayer) createOrOpen(rtr fdb.ReadTransaction, tr *fdb.Transacti
|
|||
}
|
||||
|
||||
if parentNode == nil {
|
||||
return nil, errors.New("the parent directory does not exist")
|
||||
return nil, ErrParentDirDoesNotExist
|
||||
}
|
||||
|
||||
node := dl.nodeWithPrefix(prefix)
|
||||
|
@ -254,7 +254,7 @@ func (dl directoryLayer) List(rt fdb.ReadTransactor, path []string) ([]string, e
|
|||
|
||||
node := dl.find(rtr, path).prefetchMetadata(rtr)
|
||||
if !node.exists() {
|
||||
return nil, errors.New("the directory does not exist")
|
||||
return nil, ErrDirNotExists
|
||||
}
|
||||
|
||||
if node.isInPartition(nil, true) {
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
|
|
@ -22,11 +22,9 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <stdlib.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
|
|
@ -22,22 +22,20 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lfdb_c -lm
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <string.h>
|
||||
|
||||
extern void unlockMutex(void*);
|
||||
|
||||
void go_callback(FDBFuture* f, void* m) {
|
||||
unlockMutex(m);
|
||||
}
|
||||
|
||||
void go_set_callback(void* f, void* m) {
|
||||
fdb_future_set_callback(f, (FDBCallback)&go_callback, m);
|
||||
}
|
||||
*/
|
||||
// #cgo LDFLAGS: -lfdb_c -lm
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
// #include <string.h>
|
||||
//
|
||||
// extern void unlockMutex(void*);
|
||||
//
|
||||
// void go_callback(FDBFuture* f, void* m) {
|
||||
// unlockMutex(m);
|
||||
// }
|
||||
//
|
||||
// void go_set_callback(void* f, void* m) {
|
||||
// fdb_future_set_callback(f, (FDBCallback)&go_callback, m);
|
||||
// }
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
@ -100,15 +98,18 @@ func fdb_future_block_until_ready(f *C.FDBFuture) {
|
|||
m.Lock()
|
||||
}
|
||||
|
||||
func (f future) BlockUntilReady() {
|
||||
func (f *future) BlockUntilReady() {
|
||||
defer runtime.KeepAlive(f)
|
||||
fdb_future_block_until_ready(f.ptr)
|
||||
}
|
||||
|
||||
func (f future) IsReady() bool {
|
||||
func (f *future) IsReady() bool {
|
||||
defer runtime.KeepAlive(f)
|
||||
return C.fdb_future_is_ready(f.ptr) != 0
|
||||
}
|
||||
|
||||
func (f future) Cancel() {
|
||||
func (f *future) Cancel() {
|
||||
defer runtime.KeepAlive(f)
|
||||
C.fdb_future_cancel(f.ptr)
|
||||
}
|
||||
|
||||
|
@ -140,6 +141,8 @@ type futureByteSlice struct {
|
|||
|
||||
func (f *futureByteSlice) Get() ([]byte, error) {
|
||||
f.o.Do(func() {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
var present C.fdb_bool_t
|
||||
var value *C.uint8_t
|
||||
var length C.int
|
||||
|
@ -195,6 +198,8 @@ type futureKey struct {
|
|||
|
||||
func (f *futureKey) Get() (Key, error) {
|
||||
f.o.Do(func() {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
var value *C.uint8_t
|
||||
var length C.int
|
||||
|
||||
|
@ -241,7 +246,9 @@ type futureNil struct {
|
|||
*future
|
||||
}
|
||||
|
||||
func (f futureNil) Get() error {
|
||||
func (f *futureNil) Get() error {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
f.BlockUntilReady()
|
||||
if err := C.fdb_future_get_error(f.ptr); err != 0 {
|
||||
return Error{int(err)}
|
||||
|
@ -250,7 +257,7 @@ func (f futureNil) Get() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f futureNil) MustGet() {
|
||||
func (f *futureNil) MustGet() {
|
||||
if err := f.Get(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -272,7 +279,9 @@ func stringRefToSlice(ptr unsafe.Pointer) []byte {
|
|||
return C.GoBytes(src, size)
|
||||
}
|
||||
|
||||
func (f futureKeyValueArray) Get() ([]KeyValue, bool, error) {
|
||||
func (f *futureKeyValueArray) Get() ([]KeyValue, bool, error) {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
f.BlockUntilReady()
|
||||
|
||||
var kvs *C.FDBKeyValue
|
||||
|
@ -316,17 +325,20 @@ type futureInt64 struct {
|
|||
*future
|
||||
}
|
||||
|
||||
func (f futureInt64) Get() (int64, error) {
|
||||
func (f *futureInt64) Get() (int64, error) {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
f.BlockUntilReady()
|
||||
|
||||
var ver C.int64_t
|
||||
if err := C.fdb_future_get_version(f.ptr, &ver); err != 0 {
|
||||
return 0, Error{int(err)}
|
||||
}
|
||||
|
||||
return int64(ver), nil
|
||||
}
|
||||
|
||||
func (f futureInt64) MustGet() int64 {
|
||||
func (f *futureInt64) MustGet() int64 {
|
||||
val, err := f.Get()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -356,7 +368,9 @@ type futureStringSlice struct {
|
|||
*future
|
||||
}
|
||||
|
||||
func (f futureStringSlice) Get() ([]string, error) {
|
||||
func (f *futureStringSlice) Get() ([]string, error) {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
f.BlockUntilReady()
|
||||
|
||||
var strings **C.char
|
||||
|
@ -375,7 +389,7 @@ func (f futureStringSlice) Get() ([]string, error) {
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func (f futureStringSlice) MustGet() []string {
|
||||
func (f *futureStringSlice) MustGet() []string {
|
||||
val, err := f.Get()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
@ -46,6 +46,13 @@ func (o NetworkOptions) SetLocalAddress(param string) error {
|
|||
return o.setOpt(10, []byte(param))
|
||||
}
|
||||
|
||||
// enable the object serializer for network communication
|
||||
//
|
||||
// Parameter: 0 is false, every other value is true
|
||||
func (o NetworkOptions) SetUseObjectSerializer(param int64) error {
|
||||
return o.setOpt(11, int64ToBytes(param))
|
||||
}
|
||||
|
||||
// Deprecated
|
||||
//
|
||||
// Parameter: path to cluster file
|
||||
|
@ -444,7 +451,7 @@ const (
|
|||
// Infrequently used. The client has passed a specific row limit and wants
|
||||
// that many rows delivered in a single batch. Because of iterator operation
|
||||
// in client drivers make request batches transparent to the user, consider
|
||||
// ``WANT_ALL`` StreamingMode instead. A row limit must be specified if this
|
||||
// “WANT_ALL“ StreamingMode instead. A row limit must be specified if this
|
||||
// mode is used.
|
||||
StreamingModeExact StreamingMode = 1
|
||||
|
||||
|
@ -561,15 +568,15 @@ type ErrorPredicate int
|
|||
|
||||
const (
|
||||
|
||||
// Returns ``true`` if the error indicates the operations in the
|
||||
// transactions should be retried because of transient error.
|
||||
// Returns “true“ if the error indicates the operations in the transactions
|
||||
// should be retried because of transient error.
|
||||
ErrorPredicateRetryable ErrorPredicate = 50000
|
||||
|
||||
// Returns ``true`` if the error indicates the transaction may have
|
||||
// succeeded, though not in a way the system can verify.
|
||||
// Returns “true“ if the error indicates the transaction may have succeeded,
|
||||
// though not in a way the system can verify.
|
||||
ErrorPredicateMaybeCommitted ErrorPredicate = 50001
|
||||
|
||||
// Returns ``true`` if the error indicates the transaction has not
|
||||
// committed, though in a way that can be retried.
|
||||
// Returns “true“ if the error indicates the transaction has not committed,
|
||||
// though in a way that can be retried.
|
||||
ErrorPredicateRetryableNotCommitted ErrorPredicate = 50002
|
||||
)
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
// A ReadTransaction can asynchronously read from a FoundationDB
|
||||
|
|
|
@ -28,14 +28,8 @@
|
|||
#define JNI_NULL nullptr
|
||||
|
||||
#if defined(__GNUG__)
|
||||
#define thread_local __thread
|
||||
// TODO: figure out why the default definition suppresses visibility
|
||||
#undef JNIEXPORT
|
||||
#define JNIEXPORT __attribute__ ((visibility ("default")))
|
||||
#elif defined(_MSC_VER)
|
||||
#define thread_local __declspec(thread)
|
||||
#else
|
||||
#error Missing thread local storage
|
||||
#endif
|
||||
|
||||
static JavaVM* g_jvm = nullptr;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -28,14 +28,14 @@
|
|||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -63,6 +63,7 @@
|
|||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
@ -78,6 +79,7 @@
|
|||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
FROM centos:6
|
||||
LABEL version=0.1.2
|
||||
LABEL version=0.1.5
|
||||
ENV DOCKER_IMAGEVER=0.1.5
|
||||
|
||||
# Install dependencies for developer tools, bindings,\
|
||||
# documentation, actorcompiler, and packaging tools\
|
||||
RUN yum install -y yum-utils &&\
|
||||
yum-config-manager --enable rhel-server-rhscl-7-rpms &&\
|
||||
yum -y install centos-release-scl epel-release &&\
|
||||
yum -y install devtoolset-7 mono-core java-1.8.0-openjdk-devel \
|
||||
rh-python36-python-devel rh-ruby24 golang python27 \
|
||||
rpm-build debbuild python-pip npm ccache distcc &&\
|
||||
yum -y install devtoolset-8 java-1.8.0-openjdk-devel \
|
||||
rh-python36-python-devel devtoolset-8-valgrind-devel \
|
||||
mono-core rh-ruby24 golang python27 rpm-build debbuild \
|
||||
python-pip npm dos2unix valgrind-devel ccache &&\
|
||||
pip install boto3==1.1.1
|
||||
|
||||
USER root
|
||||
|
@ -35,8 +37,9 @@ RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.1
|
|||
RUN curl -L https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.2.tar.gz > /tmp/libressl.tar.gz &&\
|
||||
cd /tmp && echo "b8cb31e59f1294557bfc80f2a662969bc064e83006ceef0574e2553a1c254fd5 libressl.tar.gz" > libressl-sha.txt &&\
|
||||
sha256sum -c libressl-sha.txt && tar xf libressl.tar.gz &&\
|
||||
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
|
||||
cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- make -j`nproc` install &&\
|
||||
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-8 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
|
||||
cd /tmp/libressl-2.8.2 && scl enable devtoolset-8 -- make -j`nproc` install &&\
|
||||
rm -rf /tmp/libressl-2.8.2 /tmp/libressl.tar.gz
|
||||
|
||||
CMD scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash
|
||||
ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0
|
||||
CMD scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash
|
||||
|
|
|
@ -2,7 +2,7 @@ version: "3"
|
|||
|
||||
services:
|
||||
common: &common
|
||||
image: foundationdb/foundationdb-build:0.1.2
|
||||
image: foundationdb/foundationdb-build:0.1.5
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
|
@ -12,12 +12,14 @@ services:
|
|||
working_dir: /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb
|
||||
environment:
|
||||
- MAKEJOBS=1
|
||||
- USE_CCACHE=1
|
||||
- BUILD_DIR=./work
|
||||
|
||||
release-setup: &release-setup
|
||||
<<: *build-setup
|
||||
environment:
|
||||
- MAKEJOBS=1
|
||||
- USE_CCACHE=1
|
||||
- RELEASE=true
|
||||
- BUILD_DIR=./work
|
||||
|
||||
|
@ -26,16 +28,19 @@ services:
|
|||
|
||||
build-docs:
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" package_html'
|
||||
volumes:
|
||||
- ..:/foundationdb
|
||||
working_dir: /foundationdb
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" docpackage'
|
||||
|
||||
|
||||
release-packages: &release-packages
|
||||
<<: *release-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
|
||||
|
||||
snapshot-packages: &snapshot-packages
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
|
||||
|
||||
prb-packages:
|
||||
<<: *snapshot-packages
|
||||
|
@ -43,11 +48,11 @@ services:
|
|||
|
||||
release-bindings: &release-bindings
|
||||
<<: *release-setup
|
||||
command: bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
|
||||
snapshot-bindings: &snapshot-bindings
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" python_binding'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
|
||||
prb-bindings:
|
||||
<<: *snapshot-bindings
|
||||
|
@ -55,7 +60,7 @@ services:
|
|||
|
||||
snapshot-cmake: &snapshot-cmake
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
|
||||
|
||||
prb-cmake:
|
||||
<<: *snapshot-cmake
|
||||
|
@ -63,7 +68,7 @@ services:
|
|||
|
||||
snapshot-ctest: &snapshot-ctest
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -L fast -j "$${MAKEJOBS}" --output-on-failure'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -L fast -j "$${MAKEJOBS}" --output-on-failure'
|
||||
|
||||
prb-ctest:
|
||||
<<: *snapshot-ctest
|
||||
|
@ -71,7 +76,7 @@ services:
|
|||
|
||||
snapshot-correctness: &snapshot-correctness
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure
|
||||
|
||||
prb-correctness:
|
||||
<<: *snapshot-correctness
|
||||
|
|
|
@ -13,7 +13,7 @@ elseif(CPACK_GENERATOR MATCHES "DEB")
|
|||
set(CPACK_COMPONENTS_ALL clients-deb server-deb)
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
|
||||
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
|
||||
elseif(CPACK_GENERATOR MATCHES "PackageMaker")
|
||||
elseif(CPACK_GENERATOR MATCHES "productbuild")
|
||||
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
|
||||
set(CPACK_COMPONENTS_ALL clients-pm server-pm)
|
||||
set(CPACK_STRIP_FILES TRUE)
|
||||
|
@ -21,10 +21,11 @@ elseif(CPACK_GENERATOR MATCHES "PackageMaker")
|
|||
set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall)
|
||||
set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
|
||||
# Commenting out this readme file until it works within packaging
|
||||
# set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
|
||||
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
|
||||
set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources)
|
||||
# Changing the path of this file as CMAKE_BINARY_DIR does not seem to be defined
|
||||
set(CPACK_RESOURCE_FILE_LICENSE License.txt)
|
||||
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt)
|
||||
set(CPACK_PACKAGE_FILE_NAME "foundationdb-${PROJECT_VERSION}.${CURRENT_GIT_VERSION}${prerelease_string}")
|
||||
elseif(CPACK_GENERATOR MATCHES "TGZ")
|
||||
set(CPACK_STRIP_FILES TRUE)
|
||||
set(CPACK_COMPONENTS_ALL clients-tgz server-tgz)
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
set(USE_GPERFTOOLS OFF CACHE BOOL "Use gperfools for profiling")
|
||||
set(PORTABLE_BINARY OFF CACHE BOOL "Create a binary that runs on older OS versions")
|
||||
set(USE_VALGRIND OFF CACHE BOOL "Compile for valgrind usage")
|
||||
set(USE_GOLD_LINKER OFF CACHE BOOL "Use gold linker")
|
||||
set(ALLOC_INSTRUMENTATION OFF CACHE BOOL "Instrument alloc")
|
||||
set(WITH_UNDODB OFF CACHE BOOL "Use rr or undodb")
|
||||
set(USE_ASAN OFF CACHE BOOL "Compile with address sanitizer")
|
||||
set(FDB_RELEASE OFF CACHE BOOL "This is a building of a final release")
|
||||
set(USE_LD "LD" CACHE STRING "The linker to use for building: can be LD (system default, default choice), GOLD, or LLD")
|
||||
set(USE_LIBCXX OFF CACHE BOOL "Use libc++")
|
||||
set(USE_CCACHE OFF CACHE BOOL "Use ccache for compilation if available")
|
||||
|
||||
if(USE_GPERFTOOLS)
|
||||
find_package(Gperftools REQUIRED)
|
||||
|
@ -43,11 +46,26 @@ else()
|
|||
add_definitions(-DUSE_UCONTEXT)
|
||||
endif()
|
||||
|
||||
if ((NOT USE_CCACHE) AND (NOT "$ENV{USE_CCACHE}" STREQUAL ""))
|
||||
string(TOUPPER "$ENV{USE_CCACHE}" USE_CCACHEENV)
|
||||
if (("${USE_CCACHEENV}" STREQUAL "ON") OR ("${USE_CCACHEENV}" STREQUAL "1") OR ("${USE_CCACHEENV}" STREQUAL "YES"))
|
||||
set(USE_CCACHE ON)
|
||||
endif()
|
||||
endif()
|
||||
if (USE_CCACHE)
|
||||
FIND_PROGRAM(CCACHE_FOUND "ccache")
|
||||
if(CCACHE_FOUND)
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE ccache)
|
||||
set_property(GLOBAL PROPERTY RULE_LAUNCH_LINK ccache)
|
||||
else()
|
||||
message(SEND_ERROR "CCACHE is ON, but ccache was not found")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
include(CheckFunctionExists)
|
||||
set(CMAKE_REQUIRED_INCLUDES stdlib.h malloc.h)
|
||||
set(CMAKE_REQUIRED_LIBRARIES c)
|
||||
|
||||
|
||||
if(WIN32)
|
||||
# see: https://docs.microsoft.com/en-us/windows/desktop/WinProg/using-the-windows-headers
|
||||
# this sets the windows target version to Windows 7
|
||||
|
@ -55,11 +73,6 @@ if(WIN32)
|
|||
add_compile_options(/W3 /EHsc /std:c++17 /bigobj $<$<CONFIG:Release>:/Zi> /MP)
|
||||
add_compile_definitions(_WIN32_WINNT=${WINDOWS_TARGET} BOOST_ALL_NO_LIB)
|
||||
else()
|
||||
if(USE_GOLD_LINKER)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
endif()
|
||||
|
||||
set(GCC NO)
|
||||
set(CLANG NO)
|
||||
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "AppleClang")
|
||||
|
@ -70,17 +83,36 @@ else()
|
|||
set(GCC YES)
|
||||
endif()
|
||||
|
||||
# check linker flags.
|
||||
if ((NOT (USE_LD STREQUAL "LD")) AND (NOT (USE_LD STREQUAL "GOLD")) AND (NOT (USE_LD STREQUAL "LLD")))
|
||||
message (FATAL_ERROR "USE_LD must be set to LD, GOLD, or LLD!")
|
||||
endif()
|
||||
|
||||
# if USE_LD=LD, then we don't do anything, defaulting to whatever system
|
||||
# linker is available (e.g. binutils doesn't normally exist on macOS, so this
|
||||
# implies the default xcode linker, and other distros may choose others by
|
||||
# default).
|
||||
|
||||
if(USE_LD STREQUAL "GOLD")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
endif()
|
||||
|
||||
if(USE_LD STREQUAL "LLD")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld -Wl,--disable-new-dtags")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld -Wl,--disable-new-dtags")
|
||||
endif()
|
||||
|
||||
# we always compile with debug symbols. CPack will strip them out
|
||||
# and create a debuginfo rpm
|
||||
add_compile_options(-ggdb)
|
||||
set(USE_ASAN OFF CACHE BOOL "Compile with address sanitizer")
|
||||
add_compile_options(-ggdb -fno-omit-frame-pointer)
|
||||
if(USE_ASAN)
|
||||
add_compile_options(
|
||||
-fno-omit-frame-pointer -fsanitize=address
|
||||
-fsanitize=address
|
||||
-DUSE_ASAN)
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fno-omit-frame-pointer -fsanitize=address ${CMAKE_THREAD_LIBS_INIT}")
|
||||
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} -fsanitize=address")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -fsanitize=address")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address ${CMAKE_THREAD_LIBS_INIT}")
|
||||
endif()
|
||||
|
||||
if(PORTABLE_BINARY)
|
||||
|
@ -100,8 +132,12 @@ else()
|
|||
add_compile_options(-DVALGRIND -DUSE_VALGRIND)
|
||||
endif()
|
||||
if (CLANG)
|
||||
if (APPLE)
|
||||
add_compile_options(-stdlib=libc++)
|
||||
if (APPLE OR USE_LIBCXX)
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-stdlib=libc++>)
|
||||
add_compile_definitions(WITH_LIBCXX)
|
||||
if (NOT APPLE)
|
||||
add_link_options(-stdlib=libc++ -lc++abi -Wl,-build-id=sha1)
|
||||
endif()
|
||||
endif()
|
||||
add_compile_options(
|
||||
-Wno-unknown-warning-option
|
||||
|
|
|
@ -1,5 +1,13 @@
|
|||
set(FORCE_ALL_COMPONENTS OFF CACHE BOOL "Fails cmake if not all dependencies are found")
|
||||
|
||||
################################################################################
|
||||
# Valgrind
|
||||
################################################################################
|
||||
|
||||
if(USE_VALGRIND)
|
||||
find_package(Valgrind REQUIRED)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
# LibreSSL
|
||||
################################################################################
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
find_path(VALGRIND_INCLUDE_DIR
|
||||
NAMES
|
||||
valgrind.h
|
||||
PATH_SUFFIXES include valgrind)
|
||||
|
||||
find_package_handle_standard_args(Valgrind
|
||||
REQUIRED_VARS VALGRIND_INCLUDE_DIR
|
||||
FAIL_MESSAGE "Could not find Valgrind header files, try set the path to the Valgrind headers in the variable Valgrind_ROOT")
|
||||
|
||||
if(VALGRIND_FOUND)
|
||||
add_library(Valgrind INTERFACE)
|
||||
target_include_directories(Valgrind INTERFACE "${VALGRIND_INCLUDE_DIR}")
|
||||
endif()
|
|
@ -147,14 +147,15 @@ function(fdb_install)
|
|||
endfunction()
|
||||
|
||||
if(APPLE)
|
||||
set(CPACK_GENERATOR TGZ PackageMaker)
|
||||
set(CPACK_GENERATOR TGZ productbuild)
|
||||
else()
|
||||
set(CPACK_GENERATOR RPM DEB TGZ)
|
||||
endif()
|
||||
|
||||
|
||||
set(CPACK_PACKAGE_CHECKSUM SHA256)
|
||||
set(CPACK_PROJECT_CONFIG_FILE "${CMAKE_SOURCE_DIR}/cmake/CPackConfig.cmake")
|
||||
configure_file("${CMAKE_SOURCE_DIR}/cmake/CPackConfig.cmake" "${CMAKE_BINARY_DIR}/packaging/CPackConfig.cmake")
|
||||
set(CPACK_PROJECT_CONFIG_FILE "${CMAKE_BINARY_DIR}/packaging/CPackConfig.cmake")
|
||||
|
||||
################################################################################
|
||||
# Version information
|
||||
|
@ -332,7 +333,7 @@ set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_CONTROL_EXTRA
|
|||
# MacOS configuration
|
||||
################################################################################
|
||||
|
||||
if(NOT WIN32)
|
||||
if(APPLE)
|
||||
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/osx/uninstall-FoundationDB.sh
|
||||
DESTINATION "usr/local/foundationdb"
|
||||
COMPONENT clients-pm)
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
## FDB Backup Data Format
|
||||
|
||||
### Introduction
|
||||
This document describes the data format of the files generated by FoundationDB (FDB) backup procedure.
|
||||
The target readers who may benefit from reading this document are:
|
||||
* who make changes on the current backup or restore procedure;
|
||||
* who writes tools to digest the backup data for analytical purpose;
|
||||
* who wants to understand the internals of how backup and restore works.
|
||||
|
||||
The description of the backup data format is based on FDB 5.2 to FDB 6.1. The backup data format may (although unlikely) change after FDB 6.1.
|
||||
|
||||
|
||||
### Files generated by backup
|
||||
The backup procedure generates two types of files: range files and log files.
|
||||
* A range file describes key-value pairs in a range at the version when the backup process takes a snapshot of the range. Different range files have data for different ranges at different versions.
|
||||
* A log file describes the mutations taken from a version v<sub>1</sub> to v<sub>2</sub> during the backup procedure.
|
||||
|
||||
With the key-value pairs in range file and the mutations in log file, the restore procedure can restore the database into a consistent state at a user-provided version v<sub>k</sub> if the backup data is claimed by the restore as restorable at v<sub>k</sub>. (The details of determining if a set of backup data is restorable at a version is out of scope of this document and can be found at [backup.md](https://github.com/xumengpanda/foundationdb/blob/cd873831ecd18653c5bf459d6f72d14a99b619c4/design/backup.md).
|
||||
|
||||
|
||||
### Filename conventions
|
||||
The backup files will be saved in a directory (i.e., url) specified by users. Under the directory, the range files are in the `snapshots` folder. The log files are in the `logs` folder.
|
||||
|
||||
The convention of the range filename is ` snapshots/snapshot,beginVersion,beginVersion,blockSize`, where `beginVersion` is the version when the key-values in the range file are recorded, and blockSize is the size of data blocks in the range file.
|
||||
|
||||
The convention of the log filename is `logs/,versionPrefix/log,beginVersion,endVersion,randomUID, blockSize`, where the versionPrefix is a 2-level path (`x/y`) where beginVersion should go such that `x/y/*` contains (10^smallestBucket) possible versions; the randomUID is a random UID, the `beginVersion` and `endVersion` are the version range (left inclusive, right exclusive) when the mutations are recorded; and the `blockSize` is the data block size in the log file.
|
||||
|
||||
We will use an example to explain what each field in the range and log filename means.
|
||||
Suppose under the backup directory, we have a range file `snapshots/snapshot,78994177,78994177,97` and a log file `logs/0000/0000/log,78655645,98655645,149a0bdfedecafa2f648219d5eba816e,1048576`.
|
||||
The range file’s filename tells us that all key-value pairs decoded from the file are the KV value in DB at the version `78994177`. The data block size is `97` bytes.
|
||||
The log file’s filename tells us that the mutations in the log file were the mutations in the DB during the version range `[78655645,98655645)`, and the data block size is `1048576` bytes.
|
||||
|
||||
|
||||
### Data format in a range file
|
||||
A range file can have one to many data blocks. Each data block has a set of key-value pairs.
|
||||
A data block is encoded as follows: `Header startKey k1v1 k2v2 Padding`.
|
||||
|
||||
|
||||
Example:
|
||||
|
||||
The client code writes keys in this sequence:
|
||||
a c d e f g h i j z
|
||||
The backup procedure records the key-value pairs in the database into range file.
|
||||
|
||||
H = header P = padding a...z = keys v = value | = block boundary
|
||||
|
||||
Encoded file: H a cv dv ev P | H e ev fv gv hv P | H h hv iv jv z
|
||||
Decoded in blocks yields:
|
||||
Block 1: range [a, e) with kv pairs cv, dv
|
||||
Block 2: range [e, h) with kv pairs ev, fv, gv
|
||||
Block 3: range [h, z) with kv pairs hv, iv, jv
|
||||
|
||||
NOTE: All blocks except for the final block will have one last value which will not be used. This isn't actually a waste since if the next KV pair wouldn't fit within the block after the value then the space after the final key to the next 1MB boundary would just be padding anyway.
|
||||
|
||||
The code related to how a range file is written is in the `struct RangeFileWriter` in `namespace fileBackup`.
|
||||
|
||||
The code that decodes a range block is in `ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeRangeFileBlock(Reference<IAsyncFile> file, int64_t offset, int len)`.
|
||||
|
||||
|
||||
### Data format in a log file
|
||||
A log file can have one to many data blocks.
|
||||
Each block is encoded as `Header, [Param1, Param2]... padding`.
|
||||
The first 32bits in `Param1` and `Param2` specifies the length of the `Param1` and `Param2`.
|
||||
`Param1` specifies the version when the mutations happened;
|
||||
`Param2` encodes the group of mutations happened at the version.
|
||||
|
||||
Note that if the group of mutations is bigger than the block size, the mutation group will be split across multiple data blocks.
|
||||
For example, we may get `[Param1, Param2_part0]`, `[Param1, Param2_part1]`. By concatenating the `Param2_part0` and `Param2_part1`, we can get the group of all mutations happened in the version specified in `Param1`.
|
||||
|
||||
The encoding format for `Param1` is as follows:
|
||||
`hashValue|commitVersion|part`,
|
||||
where `hashValue` is the hash of the commitVersion, `commitVersion` is the version when the mutations in `Param2`(s) are taken, and `part` is the part number in case we need to concatenate the `Param2` to get the group of all mutations.
|
||||
`hashValue` takes 8bits, `commitVersion` takes 64bits, and `part` takes 32bits.
|
||||
|
||||
Note that in case of concatenating the partial group of mutations in `Param2` to get the full group of all mutations, the part number should be continuous.
|
||||
|
||||
The encoding format for the group of mutations, which is Param2 or the concatenated Param2 in case of partial group of mutations in a block, is as follows:
|
||||
`length_of_the_mutation_group | encoded_mutation_1 | … | encoded_mutation_k`.
|
||||
The `encoded_mutation_i` is encoded as follows
|
||||
`type|kLen|vLen|Key|Value`
|
||||
where type is the mutation type, such as Set or Clear, `kLen` and `vLen` respectively are the length of the key and value in the mutation. `Key` and `Value` are the serialized value of the Key and Value in the mutation.
|
||||
|
||||
The code related to how a log file is written is in the `struct LogFileWriter` in `namespace fileBackup`.
|
||||
|
||||
The code that decodes a mutation block is in `ACTOR Future<Standalone<VectorRef<KeyValueRef>>> decodeLogFileBlock(Reference<IAsyncFile> file, int64_t offset, int len)`.
|
||||
|
||||
|
||||
### Endianness
|
||||
When the restore decodes a serialized integer from the backup file, it needs to convert the serialized value from big endian to little endian.
|
||||
|
||||
The reason is as follows: When the backup procedure transfers the data to remote blob store, the backup data is encoded in big endian. However, FoundationDB currently only run on little endian machines. The endianness affects the interpretation of an integer, so we must perform the endianness convertion.
|
|
@ -43,7 +43,7 @@ function(add_documentation_target)
|
|||
add_custom_command(
|
||||
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${target}_done
|
||||
COMMAND ${CMAKE_COMMAND} -E make_directory ${out_dir} &&
|
||||
${python_command} ${SPHINX_COMMAND} -b ${target}
|
||||
${python_command} ${SPHINX_COMMAND} -W -b ${target}
|
||||
-d ${doctree} -c ${sphinx_dir}
|
||||
${sphinx_dir}/source
|
||||
${CMAKE_CURRENT_BINARY_DIR}/${target} &&
|
||||
|
|
|
@ -70,7 +70,7 @@ buildsphinx:
|
|||
cd $(BUILDDIR); \
|
||||
curl -OL $(VENV_URL); \
|
||||
tar zxvf $(VENV_VERSION).tar.gz; \
|
||||
./$(VENV_VERSION)/virtualenv.py venv; \
|
||||
python2 ./$(VENV_VERSION)/virtualenv.py venv; \
|
||||
fi
|
||||
. $(VENVDIR)/bin/activate && \
|
||||
cp .pip.conf $(VENVDIR)/pip.conf && \
|
||||
|
@ -87,7 +87,7 @@ cleanvirtualenv:
|
|||
rm -rf $(VENVDIR)
|
||||
|
||||
html: buildsphinx cleanhtml
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
$(SPHINXBUILD) -W -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
@echo
|
||||
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
|
||||
|
||||
|
|
|
@ -32,11 +32,11 @@ extensions = [
|
|||
'sphinx.ext.ifconfig',
|
||||
'brokenrole',
|
||||
'relativelink',
|
||||
'sphinxcontrib.rubydomain'
|
||||
'rubydomain',
|
||||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = [sys.prefix + '/_templates']
|
||||
templates_path = []
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
@ -143,7 +143,7 @@ html_title = 'FoundationDB ' + version
|
|||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = [sys.prefix + '/_static']
|
||||
html_static_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
|
|
|
@ -0,0 +1,714 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
sphinx.domains.ruby
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The Ruby domain.
|
||||
|
||||
:copyright: Copyright 2010 by SHIBUKAWA Yoshiki
|
||||
:license: BSD
|
||||
|
||||
Copyright (c) 2010 by SHIBUKAWA Yoshiki.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst import directives, Directive
|
||||
|
||||
from sphinx import addnodes
|
||||
from sphinx.roles import XRefRole
|
||||
from sphinx.locale import l_, _
|
||||
from sphinx.domains import Domain, ObjType, Index
|
||||
from sphinx.directives import ObjectDescription
|
||||
from sphinx.util.nodes import make_refnode
|
||||
from sphinx.util.docfields import Field, GroupedField, TypedField
|
||||
|
||||
|
||||
# REs for Ruby signatures
|
||||
rb_sig_re = re.compile(
|
||||
r'''^ ([\w.]*\.)? # class name(s)
|
||||
(\$?\w+\??!?) \s* # thing name
|
||||
(?: \((.*)\) # optional: arguments
|
||||
(?:\s* -> \s* (.*))? # return annotation
|
||||
)? $ # and nothing more
|
||||
''', re.VERBOSE)
|
||||
|
||||
rb_paramlist_re = re.compile(r'([\[\],])') # split at '[', ']' and ','
|
||||
|
||||
separators = {
|
||||
'method':'#', 'attr_reader':'#', 'attr_writer':'#', 'attr_accessor':'#',
|
||||
'function':'.', 'classmethod':'.', 'class':'::', 'module':'::',
|
||||
'global':'', 'const':'::'}
|
||||
|
||||
rb_separator = re.compile(r"(?:\w+)?(?:::)?(?:\.)?(?:#)?")
|
||||
|
||||
|
||||
def ruby_rsplit(fullname):
|
||||
items = [item for item in rb_separator.findall(fullname)]
|
||||
return ''.join(items[:-2]), items[-1]
|
||||
|
||||
|
||||
class RubyObject(ObjectDescription):
|
||||
"""
|
||||
Description of a general Ruby object.
|
||||
"""
|
||||
option_spec = {
|
||||
'noindex': directives.flag,
|
||||
'module': directives.unchanged,
|
||||
}
|
||||
|
||||
doc_field_types = [
|
||||
TypedField('parameter', label=l_('Parameters'),
|
||||
names=('param', 'parameter', 'arg', 'argument'),
|
||||
typerolename='obj', typenames=('paramtype', 'type')),
|
||||
TypedField('variable', label=l_('Variables'), rolename='obj',
|
||||
names=('var', 'ivar', 'cvar'),
|
||||
typerolename='obj', typenames=('vartype',)),
|
||||
GroupedField('exceptions', label=l_('Raises'), rolename='exc',
|
||||
names=('raises', 'raise', 'exception', 'except'),
|
||||
can_collapse=True),
|
||||
Field('returnvalue', label=l_('Returns'), has_arg=False,
|
||||
names=('returns', 'return')),
|
||||
Field('returntype', label=l_('Return type'), has_arg=False,
|
||||
names=('rtype',)),
|
||||
]
|
||||
|
||||
def get_signature_prefix(self, sig):
|
||||
"""
|
||||
May return a prefix to put before the object name in the signature.
|
||||
"""
|
||||
return ''
|
||||
|
||||
def needs_arglist(self):
|
||||
"""
|
||||
May return true if an empty argument list is to be generated even if
|
||||
the document contains none.
|
||||
"""
|
||||
return False
|
||||
|
||||
def handle_signature(self, sig, signode):
|
||||
"""
|
||||
Transform a Ruby signature into RST nodes.
|
||||
Returns (fully qualified name of the thing, classname if any).
|
||||
|
||||
If inside a class, the current class name is handled intelligently:
|
||||
* it is stripped from the displayed name if present
|
||||
* it is added to the full name (return value) if not present
|
||||
"""
|
||||
m = rb_sig_re.match(sig)
|
||||
if m is None:
|
||||
raise ValueError
|
||||
name_prefix, name, arglist, retann = m.groups()
|
||||
if not name_prefix:
|
||||
name_prefix = ""
|
||||
# determine module and class name (if applicable), as well as full name
|
||||
modname = self.options.get(
|
||||
'module', self.env.temp_data.get('rb:module'))
|
||||
classname = self.env.temp_data.get('rb:class')
|
||||
if self.objtype == 'global':
|
||||
add_module = False
|
||||
modname = None
|
||||
classname = None
|
||||
fullname = name
|
||||
elif classname:
|
||||
add_module = False
|
||||
if name_prefix and name_prefix.startswith(classname):
|
||||
fullname = name_prefix + name
|
||||
# class name is given again in the signature
|
||||
name_prefix = name_prefix[len(classname):].lstrip('.')
|
||||
else:
|
||||
separator = separators[self.objtype]
|
||||
fullname = classname + separator + name_prefix + name
|
||||
else:
|
||||
add_module = True
|
||||
if name_prefix:
|
||||
classname = name_prefix.rstrip('.')
|
||||
fullname = name_prefix + name
|
||||
else:
|
||||
classname = ''
|
||||
fullname = name
|
||||
|
||||
signode['module'] = modname
|
||||
signode['class'] = self.class_name = classname
|
||||
signode['fullname'] = fullname
|
||||
|
||||
sig_prefix = self.get_signature_prefix(sig)
|
||||
if sig_prefix:
|
||||
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
|
||||
|
||||
if name_prefix:
|
||||
signode += addnodes.desc_addname(name_prefix, name_prefix)
|
||||
# exceptions are a special case, since they are documented in the
|
||||
# 'exceptions' module.
|
||||
elif add_module and self.env.config.add_module_names:
|
||||
if self.objtype == 'global':
|
||||
nodetext = ''
|
||||
signode += addnodes.desc_addname(nodetext, nodetext)
|
||||
else:
|
||||
modname = self.options.get(
|
||||
'module', self.env.temp_data.get('rb:module'))
|
||||
if modname and modname != 'exceptions':
|
||||
nodetext = modname + separators[self.objtype]
|
||||
signode += addnodes.desc_addname(nodetext, nodetext)
|
||||
|
||||
signode += addnodes.desc_name(name, name)
|
||||
if not arglist:
|
||||
if self.needs_arglist():
|
||||
# for callables, add an empty parameter list
|
||||
signode += addnodes.desc_parameterlist()
|
||||
if retann:
|
||||
signode += addnodes.desc_returns(retann, retann)
|
||||
return fullname, name_prefix
|
||||
signode += addnodes.desc_parameterlist()
|
||||
|
||||
stack = [signode[-1]]
|
||||
for token in rb_paramlist_re.split(arglist):
|
||||
if token == '[':
|
||||
opt = addnodes.desc_optional()
|
||||
stack[-1] += opt
|
||||
stack.append(opt)
|
||||
elif token == ']':
|
||||
try:
|
||||
stack.pop()
|
||||
except IndexError:
|
||||
raise ValueError
|
||||
elif not token or token == ',' or token.isspace():
|
||||
pass
|
||||
else:
|
||||
token = token.strip()
|
||||
stack[-1] += addnodes.desc_parameter(token, token)
|
||||
if len(stack) != 1:
|
||||
raise ValueError
|
||||
if retann:
|
||||
signode += addnodes.desc_returns(retann, retann)
|
||||
return fullname, name_prefix
|
||||
|
||||
def get_index_text(self, modname, name):
|
||||
"""
|
||||
Return the text for the index entry of the object.
|
||||
"""
|
||||
raise NotImplementedError('must be implemented in subclasses')
|
||||
|
||||
def _is_class_member(self):
|
||||
return self.objtype.endswith('method') or self.objtype.startswith('attr')
|
||||
|
||||
def add_target_and_index(self, name_cls, sig, signode):
|
||||
if self.objtype == 'global':
|
||||
modname = ''
|
||||
else:
|
||||
modname = self.options.get(
|
||||
'module', self.env.temp_data.get('rb:module'))
|
||||
separator = separators[self.objtype]
|
||||
if self._is_class_member():
|
||||
if signode['class']:
|
||||
prefix = modname and modname + '::' or ''
|
||||
else:
|
||||
prefix = modname and modname + separator or ''
|
||||
else:
|
||||
prefix = modname and modname + separator or ''
|
||||
fullname = prefix + name_cls[0]
|
||||
# note target
|
||||
if fullname not in self.state.document.ids:
|
||||
signode['names'].append(fullname)
|
||||
signode['ids'].append(fullname)
|
||||
signode['first'] = (not self.names)
|
||||
self.state.document.note_explicit_target(signode)
|
||||
objects = self.env.domaindata['rb']['objects']
|
||||
if fullname in objects:
|
||||
self.env.warn(
|
||||
self.env.docname,
|
||||
'duplicate object description of %s, ' % fullname +
|
||||
'other instance in ' +
|
||||
self.env.doc2path(objects[fullname][0]),
|
||||
self.lineno)
|
||||
objects[fullname] = (self.env.docname, self.objtype)
|
||||
|
||||
indextext = self.get_index_text(modname, name_cls)
|
||||
if indextext:
|
||||
self.indexnode['entries'].append(('single', indextext,
|
||||
fullname, fullname, None))
|
||||
|
||||
def before_content(self):
|
||||
# needed for automatic qualification of members (reset in subclasses)
|
||||
self.clsname_set = False
|
||||
|
||||
def after_content(self):
|
||||
if self.clsname_set:
|
||||
self.env.temp_data['rb:class'] = None
|
||||
|
||||
|
||||
class RubyModulelevel(RubyObject):
|
||||
"""
|
||||
Description of an object on module level (functions, data).
|
||||
"""
|
||||
|
||||
def needs_arglist(self):
|
||||
return self.objtype == 'function'
|
||||
|
||||
def get_index_text(self, modname, name_cls):
|
||||
if self.objtype == 'function':
|
||||
if not modname:
|
||||
return _('%s() (global function)') % name_cls[0]
|
||||
return _('%s() (module function in %s)') % (name_cls[0], modname)
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
||||
class RubyGloballevel(RubyObject):
|
||||
"""
|
||||
Description of an object on module level (functions, data).
|
||||
"""
|
||||
|
||||
def get_index_text(self, modname, name_cls):
|
||||
if self.objtype == 'global':
|
||||
return _('%s (global variable)') % name_cls[0]
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
||||
class RubyEverywhere(RubyObject):
|
||||
"""
|
||||
Description of a class member (methods, attributes).
|
||||
"""
|
||||
|
||||
def needs_arglist(self):
|
||||
return self.objtype == 'method'
|
||||
|
||||
def get_index_text(self, modname, name_cls):
|
||||
name, cls = name_cls
|
||||
add_modules = self.env.config.add_module_names
|
||||
if self.objtype == 'method':
|
||||
try:
|
||||
clsname, methname = ruby_rsplit(name)
|
||||
except ValueError:
|
||||
if modname:
|
||||
return _('%s() (in module %s)') % (name, modname)
|
||||
else:
|
||||
return '%s()' % name
|
||||
if modname and add_modules:
|
||||
return _('%s() (%s::%s method)') % (methname, modname,
|
||||
clsname)
|
||||
else:
|
||||
return _('%s() (%s method)') % (methname, clsname)
|
||||
else:
|
||||
return ''
|
||||
|
||||
|
||||
class RubyClasslike(RubyObject):
|
||||
"""
|
||||
Description of a class-like object (classes, exceptions).
|
||||
"""
|
||||
|
||||
def get_signature_prefix(self, sig):
|
||||
return self.objtype + ' '
|
||||
|
||||
def get_index_text(self, modname, name_cls):
|
||||
if self.objtype == 'class':
|
||||
if not modname:
|
||||
return _('%s (class)') % name_cls[0]
|
||||
return _('%s (class in %s)') % (name_cls[0], modname)
|
||||
elif self.objtype == 'exception':
|
||||
return name_cls[0]
|
||||
else:
|
||||
return ''
|
||||
|
||||
def before_content(self):
|
||||
RubyObject.before_content(self)
|
||||
if self.names:
|
||||
self.env.temp_data['rb:class'] = self.names[0][0]
|
||||
self.clsname_set = True
|
||||
|
||||
|
||||
class RubyClassmember(RubyObject):
|
||||
"""
|
||||
Description of a class member (methods, attributes).
|
||||
"""
|
||||
|
||||
def needs_arglist(self):
|
||||
return self.objtype.endswith('method')
|
||||
|
||||
def get_signature_prefix(self, sig):
|
||||
if self.objtype == 'classmethod':
|
||||
return "classmethod %s." % self.class_name
|
||||
elif self.objtype == 'attr_reader':
|
||||
return "attribute [R] "
|
||||
elif self.objtype == 'attr_writer':
|
||||
return "attribute [W] "
|
||||
elif self.objtype == 'attr_accessor':
|
||||
return "attribute [R/W] "
|
||||
return ''
|
||||
|
||||
def get_index_text(self, modname, name_cls):
|
||||
name, cls = name_cls
|
||||
add_modules = self.env.config.add_module_names
|
||||
if self.objtype == 'classmethod':
|
||||
try:
|
||||
clsname, methname = ruby_rsplit(name)
|
||||
except ValueError:
|
||||
return '%s()' % name
|
||||
if modname:
|
||||
return _('%s() (%s.%s class method)') % (methname, modname,
|
||||
clsname)
|
||||
else:
|
||||
return _('%s() (%s class method)') % (methname, clsname)
|
||||
elif self.objtype.startswith('attr'):
|
||||
try:
|
||||
clsname, attrname = ruby_rsplit(name)
|
||||
except ValueError:
|
||||
return name
|
||||
if modname and add_modules:
|
||||
return _('%s (%s.%s attribute)') % (attrname, modname, clsname)
|
||||
else:
|
||||
return _('%s (%s attribute)') % (attrname, clsname)
|
||||
else:
|
||||
return ''
|
||||
|
||||
def before_content(self):
|
||||
RubyObject.before_content(self)
|
||||
lastname = self.names and self.names[-1][1]
|
||||
if lastname and not self.env.temp_data.get('rb:class'):
|
||||
self.env.temp_data['rb:class'] = lastname.strip('.')
|
||||
self.clsname_set = True
|
||||
|
||||
|
||||
class RubyModule(Directive):
|
||||
"""
|
||||
Directive to mark description of a new module.
|
||||
"""
|
||||
|
||||
has_content = False
|
||||
required_arguments = 1
|
||||
optional_arguments = 0
|
||||
final_argument_whitespace = False
|
||||
option_spec = {
|
||||
'platform': lambda x: x,
|
||||
'synopsis': lambda x: x,
|
||||
'noindex': directives.flag,
|
||||
'deprecated': directives.flag,
|
||||
}
|
||||
|
||||
def run(self):
|
||||
env = self.state.document.settings.env
|
||||
modname = self.arguments[0].strip()
|
||||
noindex = 'noindex' in self.options
|
||||
env.temp_data['rb:module'] = modname
|
||||
env.domaindata['rb']['modules'][modname] = \
|
||||
(env.docname, self.options.get('synopsis', ''),
|
||||
self.options.get('platform', ''), 'deprecated' in self.options)
|
||||
targetnode = nodes.target('', '', ids=['module-' + modname], ismod=True)
|
||||
self.state.document.note_explicit_target(targetnode)
|
||||
ret = [targetnode]
|
||||
# XXX this behavior of the module directive is a mess...
|
||||
if 'platform' in self.options:
|
||||
platform = self.options['platform']
|
||||
node = nodes.paragraph()
|
||||
node += nodes.emphasis('', _('Platforms: '))
|
||||
node += nodes.Text(platform, platform)
|
||||
ret.append(node)
|
||||
# the synopsis isn't printed; in fact, it is only used in the
|
||||
# modindex currently
|
||||
if not noindex:
|
||||
indextext = _('%s (module)') % modname
|
||||
inode = addnodes.index(entries=[('single', indextext,
|
||||
'module-' + modname, modname, None)])
|
||||
ret.append(inode)
|
||||
return ret
|
||||
|
||||
|
||||
class RubyCurrentModule(Directive):
|
||||
"""
|
||||
This directive is just to tell Sphinx that we're documenting
|
||||
stuff in module foo, but links to module foo won't lead here.
|
||||
"""
|
||||
|
||||
has_content = False
|
||||
required_arguments = 1
|
||||
optional_arguments = 0
|
||||
final_argument_whitespace = False
|
||||
option_spec = {}
|
||||
|
||||
def run(self):
|
||||
env = self.state.document.settings.env
|
||||
modname = self.arguments[0].strip()
|
||||
if modname == 'None':
|
||||
env.temp_data['rb:module'] = None
|
||||
else:
|
||||
env.temp_data['rb:module'] = modname
|
||||
return []
|
||||
|
||||
|
||||
class RubyXRefRole(XRefRole):
|
||||
def process_link(self, env, refnode, has_explicit_title, title, target):
|
||||
if not has_explicit_title:
|
||||
title = title.lstrip('.') # only has a meaning for the target
|
||||
title = title.lstrip('#')
|
||||
if title.startswith("::"):
|
||||
title = title[2:]
|
||||
target = target.lstrip('~') # only has a meaning for the title
|
||||
# if the first character is a tilde, don't display the module/class
|
||||
# parts of the contents
|
||||
if title[0:1] == '~':
|
||||
m = re.search(r"(?:\.)?(?:#)?(?:::)?(.*)\Z", title)
|
||||
if m:
|
||||
title = m.group(1)
|
||||
if not title.startswith("$"):
|
||||
refnode['rb:module'] = env.temp_data.get('rb:module')
|
||||
refnode['rb:class'] = env.temp_data.get('rb:class')
|
||||
# if the first character is a dot, search more specific namespaces first
|
||||
# else search builtins first
|
||||
if target[0:1] == '.':
|
||||
target = target[1:]
|
||||
refnode['refspecific'] = True
|
||||
return title, target
|
||||
|
||||
|
||||
class RubyModuleIndex(Index):
|
||||
"""
|
||||
Index subclass to provide the Ruby module index.
|
||||
"""
|
||||
|
||||
name = 'modindex'
|
||||
localname = l_('Ruby Module Index')
|
||||
shortname = l_('modules')
|
||||
|
||||
def generate(self, docnames=None):
|
||||
content = {}
|
||||
# list of prefixes to ignore
|
||||
ignores = self.domain.env.config['modindex_common_prefix']
|
||||
ignores = sorted(ignores, key=len, reverse=True)
|
||||
# list of all modules, sorted by module name
|
||||
modules = sorted(self.domain.data['modules'].iteritems(),
|
||||
key=lambda x: x[0].lower())
|
||||
# sort out collapsable modules
|
||||
prev_modname = ''
|
||||
num_toplevels = 0
|
||||
for modname, (docname, synopsis, platforms, deprecated) in modules:
|
||||
if docnames and docname not in docnames:
|
||||
continue
|
||||
|
||||
for ignore in ignores:
|
||||
if modname.startswith(ignore):
|
||||
modname = modname[len(ignore):]
|
||||
stripped = ignore
|
||||
break
|
||||
else:
|
||||
stripped = ''
|
||||
|
||||
# we stripped the whole module name?
|
||||
if not modname:
|
||||
modname, stripped = stripped, ''
|
||||
|
||||
entries = content.setdefault(modname[0].lower(), [])
|
||||
|
||||
package = modname.split('::')[0]
|
||||
if package != modname:
|
||||
# it's a submodule
|
||||
if prev_modname == package:
|
||||
# first submodule - make parent a group head
|
||||
entries[-1][1] = 1
|
||||
elif not prev_modname.startswith(package):
|
||||
# submodule without parent in list, add dummy entry
|
||||
entries.append([stripped + package, 1, '', '', '', '', ''])
|
||||
subtype = 2
|
||||
else:
|
||||
num_toplevels += 1
|
||||
subtype = 0
|
||||
|
||||
qualifier = deprecated and _('Deprecated') or ''
|
||||
entries.append([stripped + modname, subtype, docname,
|
||||
'module-' + stripped + modname, platforms,
|
||||
qualifier, synopsis])
|
||||
prev_modname = modname
|
||||
|
||||
# apply heuristics when to collapse modindex at page load:
|
||||
# only collapse if number of toplevel modules is larger than
|
||||
# number of submodules
|
||||
collapse = len(modules) - num_toplevels < num_toplevels
|
||||
|
||||
# sort by first letter
|
||||
content = sorted(content.iteritems())
|
||||
|
||||
return content, collapse
|
||||
|
||||
|
||||
class RubyDomain(Domain):
|
||||
"""Ruby language domain."""
|
||||
name = 'rb'
|
||||
label = 'Ruby'
|
||||
object_types = {
|
||||
'function': ObjType(l_('function'), 'func', 'obj'),
|
||||
'global': ObjType(l_('global variable'), 'global', 'obj'),
|
||||
'method': ObjType(l_('method'), 'meth', 'obj'),
|
||||
'class': ObjType(l_('class'), 'class', 'obj'),
|
||||
'exception': ObjType(l_('exception'), 'exc', 'obj'),
|
||||
'classmethod': ObjType(l_('class method'), 'meth', 'obj'),
|
||||
'attr_reader': ObjType(l_('attribute'), 'attr', 'obj'),
|
||||
'attr_writer': ObjType(l_('attribute'), 'attr', 'obj'),
|
||||
'attr_accessor': ObjType(l_('attribute'), 'attr', 'obj'),
|
||||
'const': ObjType(l_('const'), 'const', 'obj'),
|
||||
'module': ObjType(l_('module'), 'mod', 'obj'),
|
||||
}
|
||||
|
||||
directives = {
|
||||
'function': RubyModulelevel,
|
||||
'global': RubyGloballevel,
|
||||
'method': RubyEverywhere,
|
||||
'const': RubyEverywhere,
|
||||
'class': RubyClasslike,
|
||||
'exception': RubyClasslike,
|
||||
'classmethod': RubyClassmember,
|
||||
'attr_reader': RubyClassmember,
|
||||
'attr_writer': RubyClassmember,
|
||||
'attr_accessor': RubyClassmember,
|
||||
'module': RubyModule,
|
||||
'currentmodule': RubyCurrentModule,
|
||||
}
|
||||
|
||||
roles = {
|
||||
'func': RubyXRefRole(fix_parens=False),
|
||||
'global':RubyXRefRole(),
|
||||
'class': RubyXRefRole(),
|
||||
'exc': RubyXRefRole(),
|
||||
'meth': RubyXRefRole(fix_parens=False),
|
||||
'attr': RubyXRefRole(),
|
||||
'const': RubyXRefRole(),
|
||||
'mod': RubyXRefRole(),
|
||||
'obj': RubyXRefRole(),
|
||||
}
|
||||
initial_data = {
|
||||
'objects': {}, # fullname -> docname, objtype
|
||||
'modules': {}, # modname -> docname, synopsis, platform, deprecated
|
||||
}
|
||||
indices = [
|
||||
RubyModuleIndex,
|
||||
]
|
||||
|
||||
def clear_doc(self, docname):
|
||||
for fullname, (fn, _) in self.data['objects'].items():
|
||||
if fn == docname:
|
||||
del self.data['objects'][fullname]
|
||||
for modname, (fn, _, _, _) in self.data['modules'].items():
|
||||
if fn == docname:
|
||||
del self.data['modules'][modname]
|
||||
|
||||
def find_obj(self, env, modname, classname, name, type, searchorder=0):
|
||||
"""
|
||||
Find a Ruby object for "name", perhaps using the given module and/or
|
||||
classname.
|
||||
"""
|
||||
# skip parens
|
||||
if name[-2:] == '()':
|
||||
name = name[:-2]
|
||||
|
||||
if not name:
|
||||
return None, None
|
||||
|
||||
objects = self.data['objects']
|
||||
|
||||
newname = None
|
||||
if searchorder == 1:
|
||||
if modname and classname and \
|
||||
modname + '::' + classname + '#' + name in objects:
|
||||
newname = modname + '::' + classname + '#' + name
|
||||
elif modname and classname and \
|
||||
modname + '::' + classname + '.' + name in objects:
|
||||
newname = modname + '::' + classname + '.' + name
|
||||
elif modname and modname + '::' + name in objects:
|
||||
newname = modname + '::' + name
|
||||
elif modname and modname + '#' + name in objects:
|
||||
newname = modname + '#' + name
|
||||
elif modname and modname + '.' + name in objects:
|
||||
newname = modname + '.' + name
|
||||
elif classname and classname + '.' + name in objects:
|
||||
newname = classname + '.' + name
|
||||
elif classname and classname + '#' + name in objects:
|
||||
newname = classname + '#' + name
|
||||
elif name in objects:
|
||||
newname = name
|
||||
else:
|
||||
if name in objects:
|
||||
newname = name
|
||||
elif classname and classname + '.' + name in objects:
|
||||
newname = classname + '.' + name
|
||||
elif classname and classname + '#' + name in objects:
|
||||
newname = classname + '#' + name
|
||||
elif modname and modname + '::' + name in objects:
|
||||
newname = modname + '::' + name
|
||||
elif modname and modname + '#' + name in objects:
|
||||
newname = modname + '#' + name
|
||||
elif modname and modname + '.' + name in objects:
|
||||
newname = modname + '.' + name
|
||||
elif modname and classname and \
|
||||
modname + '::' + classname + '#' + name in objects:
|
||||
newname = modname + '::' + classname + '#' + name
|
||||
elif modname and classname and \
|
||||
modname + '::' + classname + '.' + name in objects:
|
||||
newname = modname + '::' + classname + '.' + name
|
||||
# special case: object methods
|
||||
elif type in ('func', 'meth') and '.' not in name and \
|
||||
'object.' + name in objects:
|
||||
newname = 'object.' + name
|
||||
if newname is None:
|
||||
return None, None
|
||||
return newname, objects[newname]
|
||||
|
||||
def resolve_xref(self, env, fromdocname, builder,
|
||||
typ, target, node, contnode):
|
||||
if (typ == 'mod' or
|
||||
typ == 'obj' and target in self.data['modules']):
|
||||
docname, synopsis, platform, deprecated = \
|
||||
self.data['modules'].get(target, ('','','', ''))
|
||||
if not docname:
|
||||
return None
|
||||
else:
|
||||
title = '%s%s%s' % ((platform and '(%s) ' % platform),
|
||||
synopsis,
|
||||
(deprecated and ' (deprecated)' or ''))
|
||||
return make_refnode(builder, fromdocname, docname,
|
||||
'module-' + target, contnode, title)
|
||||
else:
|
||||
modname = node.get('rb:module')
|
||||
clsname = node.get('rb:class')
|
||||
searchorder = node.hasattr('refspecific') and 1 or 0
|
||||
name, obj = self.find_obj(env, modname, clsname,
|
||||
target, typ, searchorder)
|
||||
if not obj:
|
||||
return None
|
||||
else:
|
||||
return make_refnode(builder, fromdocname, obj[0], name,
|
||||
contnode, name)
|
||||
|
||||
def get_objects(self):
|
||||
for modname, info in self.data['modules'].iteritems():
|
||||
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
|
||||
for refname, (docname, type) in self.data['objects'].iteritems():
|
||||
yield (refname, refname, type, docname, refname, 1)
|
||||
|
||||
|
||||
def setup(app):
|
||||
app.add_domain(RubyDomain)
|
|
@ -1,5 +1,4 @@
|
|||
--index-url https://pypi.python.org/simple
|
||||
sphinx==1.5.6
|
||||
sphinx-bootstrap-theme==0.4.8
|
||||
pygments-style-solarized
|
||||
sphinxcontrib-rubydomain==0.1dev-20100804
|
||||
pygments-style-solarized
|
|
@ -232,7 +232,7 @@ The procedures for adding and removing machines can be combined into a recipe fo
|
|||
Converting an existing cluster to use TLS
|
||||
=========================================
|
||||
|
||||
A FoundationDB cluster has the option of supporting :doc:`Transport Layer Security (TLS) <tls>`. To enable TLS on an existing, non-TLS cluster, see :ref:`Converting a running cluster <converting-existing-cluster>`.
|
||||
A FoundationDB cluster has the option of supporting :doc:`Transport Layer Security (TLS) <tls>`. To enable TLS on an existing, non-TLS cluster, see :ref:`Converting a running cluster <converting-existing-cluster-after-6.1>`.
|
||||
|
||||
.. _administration-monitoring-cluster-status:
|
||||
|
||||
|
|
|
@ -37,12 +37,22 @@
|
|||
.. |node-subspace| replace:: FIXME
|
||||
.. |content-subspace| replace:: FIXME
|
||||
.. |allow-manual-prefixes| replace:: FIXME
|
||||
.. |retry-limit-transaction-option| replace:: FIXME
|
||||
.. |timeout-transaction-option| replace:: FIXME
|
||||
.. |max-retry-delay-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-database-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-database-option| replace:: FIXME
|
||||
.. |retry-limit-database-option| replace:: FIXME
|
||||
.. |max-retry-delay-database-option| replace:: FIXME
|
||||
.. |timeout-database-option| replace:: FIXME
|
||||
|
||||
.. include:: api-common.rst.inc
|
||||
|
||||
.. |future-warning| replace:: :data:`future` must represent a result of the appropriate type (i.e. must have been returned by a function documented as returning this type), or the results are undefined.
|
||||
.. |future-warning| replace:: ``future`` must represent a result of the appropriate type (i.e. must have been returned by a function documented as returning this type), or the results are undefined.
|
||||
|
||||
.. |future-get-return1| replace:: Returns zero if :data:`future` is ready and not in an error state, and a non-zero :ref:`error code <developer-guide-error-codes>` otherwise
|
||||
.. |future-get-return1| replace:: Returns zero if ``future`` is ready and not in an error state, and a non-zero :ref:`error code <developer-guide-error-codes>` otherwise
|
||||
|
||||
.. |future-get-return2| replace:: (in which case the value of any out parameter is undefined)
|
||||
|
||||
|
@ -74,9 +84,9 @@
|
|||
|
||||
.. |snapshot| replace:: Non-zero if this is a :ref:`snapshot read <snapshots>`.
|
||||
|
||||
.. |sets-and-clears1| replace:: Modify the database snapshot represented by :data:`transaction`
|
||||
.. |sets-and-clears1| replace:: Modify the database snapshot represented by ``transaction``
|
||||
|
||||
.. |sets-and-clears2| replace:: The modification affects the actual database only if :data:`transaction` is later committed with :func:`fdb_transaction_commit()`.
|
||||
.. |sets-and-clears2| replace:: The modification affects the actual database only if ``transaction`` is later committed with :func:`fdb_transaction_commit()`.
|
||||
|
||||
=====
|
||||
C API
|
||||
|
@ -105,7 +115,7 @@ The FoundationDB C bindings are provided as a shared object which may be linked
|
|||
Linux
|
||||
-----
|
||||
|
||||
When linking against ``libfdb_c.so``, you must also link against ``libm``, ``libpthread`` and ``librt``. These dependencies will be resolved by the dynamic linker when using this API via :func:`dlopen()` or an FFI.
|
||||
When linking against ``libfdb_c.so``, you must also link against ``libm``, ``libpthread`` and ``librt``. These dependencies will be resolved by the dynamic linker when using this API via ``dlopen()`` or an FFI.
|
||||
|
||||
macOS
|
||||
--------
|
||||
|
@ -115,37 +125,37 @@ When linking against ``libfdb_c.dylib``, no additional libraries are required.
|
|||
API versioning
|
||||
==============
|
||||
|
||||
Prior to including ``fdb_c.h``, you must define the :macro:`FDB_API_VERSION` macro. This, together with the :func:`fdb_select_api_version()` function, allows programs written against an older version of the API to compile and run with newer versions of the C library. The current version of the FoundationDB C API is |api-version|. ::
|
||||
Prior to including ``fdb_c.h``, you must define the ``FDB_API_VERSION`` macro. This, together with the :func:`fdb_select_api_version()` function, allows programs written against an older version of the API to compile and run with newer versions of the C library. The current version of the FoundationDB C API is |api-version|. ::
|
||||
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
.. function:: fdb_error_t fdb_select_api_version(int version)
|
||||
|
||||
Must be called before any other API functions. :data:`version` must be less than or equal to :macro:`FDB_API_VERSION` (and should almost always be equal).
|
||||
Must be called before any other API functions. ``version`` must be less than or equal to ``FDB_API_VERSION`` (and should almost always be equal).
|
||||
|
||||
Language bindings implemented in C which themselves expose API versioning will usually pass the version requested by the application, instead of always passing :macro:`FDB_API_VERSION`.
|
||||
Language bindings implemented in C which themselves expose API versioning will usually pass the version requested by the application, instead of always passing ``FDB_API_VERSION``.
|
||||
|
||||
Passing a version less than :macro:`FDB_API_VERSION` will cause the API to behave as it did in the older version.
|
||||
Passing a version less than ``FDB_API_VERSION`` will cause the API to behave as it did in the older version.
|
||||
|
||||
It is an error to call this function after it has returned successfully. It is not thread safe, and if called from more than one thread simultaneously its behavior is undefined.
|
||||
|
||||
.. note:: This is actually implemented as a macro. If you are accessing this API via :func:`dlopen()` or an FFI, you will need to use :func:`fdb_select_api_version_impl()`.
|
||||
.. note:: This is actually implemented as a macro. If you are accessing this API via ``dlopen()`` or an FFI, you will need to use :func:`fdb_select_api_version_impl()`.
|
||||
|
||||
.. warning:: |api-version-multi-version-warning|
|
||||
|
||||
.. function:: fdb_error_t fdb_select_api_version_impl(int runtime_version, int header_version)
|
||||
|
||||
This is the actual entry point called by the :func:`fdb_select_api_version` macro. It should never be called directly from C, but if you are accessing this API via :func:`dlopen()` or an FFI, you will need to use it. ``fdb_select_api_version(v)`` is equivalent to ``fdb_select_api_version_impl(v, FDB_API_VERSION)``.
|
||||
This is the actual entry point called by the :func:`fdb_select_api_version` macro. It should never be called directly from C, but if you are accessing this API via ``dlopen()`` or an FFI, you will need to use it. ``fdb_select_api_version(v)`` is equivalent to ``fdb_select_api_version_impl(v, FDB_API_VERSION)``.
|
||||
|
||||
It is an error to call this function after it has returned successfully. It is not thread safe, and if called from more than one thread simultaneously its behavior is undefined.
|
||||
|
||||
:data:`runtime_version`
|
||||
The version of run-time behavior the API is requested to provide. Must be less than or equal to :data:`header_version`, and should almost always be equal.
|
||||
``runtime_version``
|
||||
The version of run-time behavior the API is requested to provide. Must be less than or equal to ``header_version``, and should almost always be equal.
|
||||
|
||||
Language bindings which themselves expose API versioning will usually pass the version requested by the application.
|
||||
|
||||
:data:`header_version`
|
||||
``header_version``
|
||||
The version of the ABI (application binary interface) that the calling code expects to find in the shared library. If you are using an FFI, this *must* correspond to the version of the API you are using as a reference (currently |api-version|). For example, the number of arguments that a function takes may be affected by this value, and an incorrect value is unlikely to yield success.
|
||||
|
||||
.. warning:: |api-version-multi-version-warning|
|
||||
|
@ -263,7 +273,7 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
.. type:: FDBCallback
|
||||
|
||||
A pointer to a function which takes :type:`FDBFuture*` and :type:`void*` and returns :type:`void`.
|
||||
A pointer to a function which takes :type:`FDBFuture*` and ``void*`` and returns ``void``.
|
||||
|
||||
.. function:: void fdb_future_release_memory(FDBFuture* future)
|
||||
|
||||
|
@ -279,13 +289,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
.. function:: fdb_error_t fdb_future_get_version(FDBFuture* future, int64_t* out_version)
|
||||
|
||||
Extracts a value of type version from an :type:`FDBFuture` into a caller-provided variable of type :type:`int64_t`. |future-warning|
|
||||
Extracts a version from an :type:`FDBFuture` into a caller-provided variable of type ``int64_t``. |future-warning|
|
||||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
.. function:: fdb_error_t fdb_future_get_key(FDBFuture* future, uint8_t const** out_key, int* out_key_length)
|
||||
|
||||
Extracts a value of type key from an :type:`FDBFuture` into caller-provided variables of type :type:`uint8_t*` (a pointer to the beginning of the key) and :type:`int` (the length of the key). |future-warning|
|
||||
Extracts a key from an :type:`FDBFuture` into caller-provided variables of type ``uint8_t*`` (a pointer to the beginning of the key) and ``int`` (the length of the key). |future-warning|
|
||||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
|
@ -297,13 +307,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
:data:`*out_present`
|
||||
``*out_present``
|
||||
Set to non-zero if (and only if) the requested value was present in the database. (If zero, the other outputs are meaningless.)
|
||||
|
||||
:data:`*out_value`
|
||||
``*out_value``
|
||||
Set to point to the first byte of the value.
|
||||
|
||||
:data:`*out_value_length`
|
||||
``*out_value_length``
|
||||
Set to the length of the value (in bytes).
|
||||
|
||||
|future-memory-mine|
|
||||
|
@ -314,10 +324,10 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
:data:`*out_strings`
|
||||
``*out_strings``
|
||||
Set to point to the first string in the array.
|
||||
|
||||
:data:`*out_count`
|
||||
``*out_count``
|
||||
Set to the number of strings in the array.
|
||||
|
||||
|future-memory-mine|
|
||||
|
@ -328,13 +338,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
:data:`*out_kv`
|
||||
``*out_kv``
|
||||
Set to point to the first :type:`FDBKeyValue` object in the array.
|
||||
|
||||
:data:`*out_count`
|
||||
``*out_count``
|
||||
Set to the number of :type:`FDBKeyValue` objects in the array.
|
||||
|
||||
:data:`*out_more`
|
||||
``*out_more``
|
||||
Set to true if (but not necessarily only if) values remain in the *key* range requested (possibly beyond the limits requested).
|
||||
|
||||
|future-memory-mine|
|
||||
|
@ -350,17 +360,17 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
int value_length;
|
||||
} FDBKeyValue;
|
||||
|
||||
:data:`key`
|
||||
``key``
|
||||
A pointer to a key.
|
||||
|
||||
:data:`key_length`
|
||||
The length of the key pointed to by :data:`key`.
|
||||
``key_length``
|
||||
The length of the key pointed to by ``key``.
|
||||
|
||||
:data:`value`
|
||||
``value``
|
||||
A pointer to a value.
|
||||
|
||||
:data:`value_length`
|
||||
The length of the value pointed to by :data:`value`.
|
||||
``value_length``
|
||||
The length of the value pointed to by ``value``.
|
||||
|
||||
Database
|
||||
========
|
||||
|
@ -375,10 +385,10 @@ An |database-blurb1| Modifications to a database are performed via transactions.
|
|||
|
||||
Creates a new database connected the specified cluster. The caller assumes ownership of the :type:`FDBDatabase` object and must destroy it with :func:`fdb_database_destroy()`.
|
||||
|
||||
:data:`cluster_file_path`
|
||||
``cluster_file_path``
|
||||
A NULL-terminated string giving a local path of a :ref:`cluster file <foundationdb-cluster-file>` (often called 'fdb.cluster') which contains connection information for the FoundationDB cluster. If cluster_file_path is NULL or an empty string, then a :ref:`default cluster file <default-cluster-file>` will be used.
|
||||
|
||||
:data:`*out_database`
|
||||
``*out_database``
|
||||
Set to point to the newly created :type:`FDBDatabase`.
|
||||
|
||||
.. function:: void fdb_database_destroy(FDBDatabase* database)
|
||||
|
@ -397,7 +407,7 @@ An |database-blurb1| Modifications to a database are performed via transactions.
|
|||
|
||||
Creates a new transaction on the given database. The caller assumes ownership of the :type:`FDBTransaction` object and must destroy it with :func:`fdb_transaction_destroy()`.
|
||||
|
||||
:data:`*out_transaction`
|
||||
``*out_transaction``
|
||||
Set to point to the newly created :type:`FDBTransaction`.
|
||||
|
||||
Transaction
|
||||
|
@ -439,75 +449,75 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. function:: FDBFuture* fdb_transaction_get(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, fdb_bool_t snapshot)
|
||||
|
||||
Reads a value from the database snapshot represented by :data:`transaction`.
|
||||
Reads a value from the database snapshot represented by ``transaction``.
|
||||
|
||||
|future-return0| the value of :data:`key_name` in the database. |future-return1| call :func:`fdb_future_get_value()` to extract the value, |future-return2|
|
||||
|future-return0| the value of ``key_name`` in the database. |future-return1| call :func:`fdb_future_get_value()` to extract the value, |future-return2|
|
||||
|
||||
See :func:`fdb_future_get_value()` to see exactly how results are unpacked. If :data:`key_name` is not present in the database, the result is not an error, but a zero for :data:`*out_present` returned from that function.
|
||||
See :func:`fdb_future_get_value()` to see exactly how results are unpacked. If ``key_name`` is not present in the database, the result is not an error, but a zero for ``*out_present`` returned from that function.
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key to be looked up in the database. |no-null|
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
:data:`snapshot`
|
||||
``snapshot``
|
||||
|snapshot|
|
||||
|
||||
.. function:: FDBFuture* fdb_transaction_get_key(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, fdb_bool_t or_equal, int offset, fdb_bool_t snapshot)
|
||||
|
||||
Resolves a :ref:`key selector <key-selectors>` against the keys in the database snapshot represented by :data:`transaction`.
|
||||
Resolves a :ref:`key selector <key-selectors>` against the keys in the database snapshot represented by ``transaction``.
|
||||
|
||||
|future-return0| the key in the database matching the :ref:`key selector <key-selectors>`. |future-return1| call :func:`fdb_future_get_key()` to extract the key, |future-return2|
|
||||
|
||||
:data:`key_name`, :data:`key_name_length`, :data:`or_equal`, :data:`offset`
|
||||
``key_name``, ``key_name_length``, ``or_equal``, ``offset``
|
||||
The four components of a :ref:`key selector <key-selectors>`.
|
||||
|
||||
:data:`snapshot`
|
||||
``snapshot``
|
||||
|snapshot|
|
||||
|
||||
.. function:: FDBFuture* fdb_transaction_get_addresses_for_key(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length)
|
||||
|
||||
Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing :data:`key_name` and its associated value.
|
||||
Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing ``key_name`` and its associated value.
|
||||
|
||||
|future-return0| an array of strings. |future-return1| call :func:`fdb_future_get_string_array()` to extract the string array, |future-return2|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key whose location is to be queried.
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
.. |range-limited-by| replace:: If this limit was reached before the end of the specified range, then the :data:`*more` return of :func:`fdb_future_get_keyvalue_array()` will be set to a non-zero value.
|
||||
.. |range-limited-by| replace:: If this limit was reached before the end of the specified range, then the ``*more`` return of :func:`fdb_future_get_keyvalue_array()` will be set to a non-zero value.
|
||||
|
||||
.. function:: FDBFuture* fdb_transaction_get_range(FDBTransaction* transaction, uint8_t const* begin_key_name, int begin_key_name_length, fdb_bool_t begin_or_equal, int begin_offset, uint8_t const* end_key_name, int end_key_name_length, fdb_bool_t end_or_equal, int end_offset, int limit, int target_bytes, FDBStreamingMode mode, int iteration, fdb_bool_t snapshot, fdb_bool_t reverse)
|
||||
|
||||
Reads all key-value pairs in the database snapshot represented by :data:`transaction` (potentially limited by :data:`limit`, :data:`target_bytes`, or :data:`mode`) which have a key lexicographically greater than or equal to the key resolved by the begin :ref:`key selector <key-selectors>` and lexicographically less than the key resolved by the end :ref:`key selector <key-selectors>`.
|
||||
Reads all key-value pairs in the database snapshot represented by ``transaction`` (potentially limited by :data:`limit`, :data:`target_bytes`, or :data:`mode`) which have a key lexicographically greater than or equal to the key resolved by the begin :ref:`key selector <key-selectors>` and lexicographically less than the key resolved by the end :ref:`key selector <key-selectors>`.
|
||||
|
||||
|future-return0| an :type:`FDBKeyValue` array. |future-return1| call :func:`fdb_future_get_keyvalue_array()` to extract the key-value array, |future-return2|
|
||||
|
||||
:data:`begin_key_name`, :data:`begin_key_name_length`, :data:`begin_or_equal`, :data:`begin_offset`
|
||||
``begin_key_name``, :data:`begin_key_name_length`, :data:`begin_or_equal`, :data:`begin_offset`
|
||||
The four components of a :ref:`key selector <key-selectors>` describing the beginning of the range.
|
||||
|
||||
:data:`end_key_name`, :data:`end_key_name_length`, :data:`end_or_equal`, :data:`end_offset`
|
||||
``end_key_name``, :data:`end_key_name_length`, :data:`end_or_equal`, :data:`end_offset`
|
||||
The four components of a :ref:`key selector <key-selectors>` describing the end of the range.
|
||||
|
||||
:data:`limit`
|
||||
``limit``
|
||||
If non-zero, indicates the maximum number of key-value pairs to return. |range-limited-by|
|
||||
|
||||
:data:`target_bytes`
|
||||
``target_bytes``
|
||||
If non-zero, indicates a (soft) cap on the combined number of bytes of keys and values to return. |range-limited-by|
|
||||
|
||||
:data:`mode`
|
||||
``mode``
|
||||
One of the :type:`FDBStreamingMode` values indicating how the caller would like the data in the range returned.
|
||||
|
||||
:data:`iteration`
|
||||
If :data:`mode` is :data:`FDB_STREAMING_MODE_ITERATOR`, this parameter should start at 1 and be incremented by 1 for each successive call while reading this range. In all other cases it is ignored.
|
||||
``iteration``
|
||||
If ``mode`` is :data:`FDB_STREAMING_MODE_ITERATOR`, this parameter should start at 1 and be incremented by 1 for each successive call while reading this range. In all other cases it is ignored.
|
||||
|
||||
:data:`snapshot`
|
||||
``snapshot``
|
||||
|snapshot|
|
||||
|
||||
:data:`reverse`
|
||||
``reverse``
|
||||
|
||||
If non-zero, key-value pairs will be returned in reverse lexicographical order beginning at the end of the range.
|
||||
|
||||
|
@ -515,31 +525,31 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
An enumeration of available streaming modes to be passed to :func:`fdb_transaction_get_range()`.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_ITERATOR`
|
||||
``FDB_STREAMING_MODE_ITERATOR``
|
||||
|
||||
The caller is implementing an iterator (most likely in a binding to a higher level language). The amount of data returned depends on the value of the :data:`iteration` parameter to :func:`fdb_transaction_get_range()`.
|
||||
The caller is implementing an iterator (most likely in a binding to a higher level language). The amount of data returned depends on the value of the ``iteration`` parameter to :func:`fdb_transaction_get_range()`.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_SMALL`
|
||||
``FDB_STREAMING_MODE_SMALL``
|
||||
|
||||
Data is returned in small batches (not much more expensive than reading individual key-value pairs).
|
||||
|
||||
:data:`FDB_STREAMING_MODE_MEDIUM`
|
||||
``FDB_STREAMING_MODE_MEDIUM``
|
||||
|
||||
Data is returned in batches between _SMALL and _LARGE.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_LARGE`
|
||||
``FDB_STREAMING_MODE_LARGE``
|
||||
|
||||
Data is returned in batches large enough to be, in a high-concurrency environment, nearly as efficient as possible. If the caller does not need the entire range, some disk and network bandwidth may be wasted. The batch size may be still be too small to allow a single client to get high throughput from the database.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_SERIAL`
|
||||
``FDB_STREAMING_MODE_SERIAL``
|
||||
|
||||
Data is returned in batches large enough that an individual client can get reasonable read bandwidth from the database. If the caller does not need the entire range, considerable disk and network bandwidth may be wasted.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_WANT_ALL`
|
||||
``FDB_STREAMING_MODE_WANT_ALL``
|
||||
|
||||
The caller intends to consume the entire range and would like it all transferred as early as possible.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_EXACT`
|
||||
``FDB_STREAMING_MODE_EXACT``
|
||||
|
||||
The caller has passed a specific row limit and wants that many rows delivered in a single batch.
|
||||
|
||||
|
@ -549,17 +559,17 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|sets-and-clears2|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key to be inserted into the database. |no-null|
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
:data:`value`
|
||||
``value``
|
||||
A pointer to the value to be inserted into the database. |no-null|
|
||||
|
||||
:data:`value_length`
|
||||
|length-of| :data:`value`.
|
||||
``value_length``
|
||||
|length-of| ``value``.
|
||||
|
||||
.. function:: void fdb_transaction_clear(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length)
|
||||
|
||||
|
@ -567,11 +577,11 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|sets-and-clears2|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key to be removed from the database. |no-null|
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
.. function:: void fdb_transaction_clear_range(FDBTransaction* transaction, uint8_t const* begin_key_name, int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length)
|
||||
|
||||
|
@ -579,17 +589,17 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|sets-and-clears2|
|
||||
|
||||
:data:`begin_key_name`
|
||||
``begin_key_name``
|
||||
A pointer to the name of the key specifying the beginning of the range to clear. |no-null|
|
||||
|
||||
:data:`begin_key_name_length`
|
||||
|length-of| :data:`begin_key_name`.
|
||||
``begin_key_name_length``
|
||||
|length-of| ``begin_key_name``.
|
||||
|
||||
:data:`end_key_name`
|
||||
``end_key_name``
|
||||
A pointer to the name of the key specifying the end of the range to clear. |no-null|
|
||||
|
||||
:data:`end_key_name_length`
|
||||
|length-of| :data:`end_key_name_length`.
|
||||
``end_key_name_length``
|
||||
|length-of| ``end_key_name_length``.
|
||||
|
||||
.. function:: void fdb_transaction_atomic_op(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, uint8_t const* param, int param_length, FDBMutationType operationType)
|
||||
|
||||
|
@ -605,64 +615,64 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|sets-and-clears2|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key whose value is to be mutated.
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
:data:`param`
|
||||
A pointer to the parameter with which the atomic operation will mutate the value associated with :data:`key_name`.
|
||||
``param``
|
||||
A pointer to the parameter with which the atomic operation will mutate the value associated with ``key_name``.
|
||||
|
||||
:data:`param_length`
|
||||
|length-of| :data:`param`.
|
||||
``param_length``
|
||||
|length-of| ``param``.
|
||||
|
||||
:data:`operation_type`
|
||||
``operation_type``
|
||||
One of the :type:`FDBMutationType` values indicating which operation should be performed.
|
||||
|
||||
.. type:: FDBMutationType
|
||||
|
||||
An enumeration of available opcodes to be passed to :func:`fdb_transaction_atomic_op()`
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_ADD`
|
||||
``FDB_MUTATION_TYPE_ADD``
|
||||
|
||||
|atomic-add1|
|
||||
|
||||
|atomic-add2|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_AND`
|
||||
``FDB_MUTATION_TYPE_AND``
|
||||
|
||||
|atomic-and|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_OR`
|
||||
``FDB_MUTATION_TYPE_OR``
|
||||
|
||||
|atomic-or|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_XOR`
|
||||
``FDB_MUTATION_TYPE_XOR``
|
||||
|
||||
|atomic-xor|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_MAX`
|
||||
``FDB_MUTATION_TYPE_MAX``
|
||||
|
||||
|atomic-max1|
|
||||
|
||||
|atomic-max-min|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_BYTE_MAX`
|
||||
``FDB_MUTATION_TYPE_BYTE_MAX``
|
||||
|
||||
|atomic-byte-max|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_MIN`
|
||||
``FDB_MUTATION_TYPE_MIN``
|
||||
|
||||
|atomic-min1|
|
||||
|
||||
|atomic-max-min|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_BYTE_MIN`
|
||||
``FDB_MUTATION_TYPE_BYTE_MIN``
|
||||
|
||||
|atomic-byte-min|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_KEY`
|
||||
``FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_KEY``
|
||||
|
||||
|atomic-set-versionstamped-key-1|
|
||||
|
||||
|
@ -674,7 +684,7 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. warning :: |atomic-versionstamps-tuple-warning-key|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE`
|
||||
``FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE``
|
||||
|
||||
|atomic-set-versionstamped-value|
|
||||
|
||||
|
@ -686,7 +696,7 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. function:: FDBFuture* fdb_transaction_commit(FDBTransaction* transaction)
|
||||
|
||||
Attempts to commit the sets and clears previously applied to the database snapshot represented by :data:`transaction` to the actual database. The commit may or may not succeed -- in particular, if a conflicting transaction previously committed, then the commit must fail in order to preserve transactional isolation. If the commit does succeed, the transaction is durably committed to the database and all subsequently started transactions will observe its effects.
|
||||
Attempts to commit the sets and clears previously applied to the database snapshot represented by ``transaction`` to the actual database. The commit may or may not succeed -- in particular, if a conflicting transaction previously committed, then the commit must fail in order to preserve transactional isolation. If the commit does succeed, the transaction is durably committed to the database and all subsequently started transactions will observe its effects.
|
||||
|
||||
It is not necessary to commit a read-only transaction -- you can simply call :func:`fdb_transaction_destroy()`.
|
||||
|
||||
|
@ -700,7 +710,7 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. function:: fdb_error_t fdb_transaction_get_committed_version(FDBTransaction* transaction, int64_t* out_version)
|
||||
|
||||
Retrieves the database version number at which a given transaction was committed. :func:`fdb_transaction_commit()` must have been called on :data:`transaction` and the resulting future must be ready and not an error before this function is called, or the behavior is undefined. Read-only transactions do not modify the database when committed and will have a committed version of -1. Keep in mind that a transaction which reads keys and then sets them to their current values may be optimized to a read-only transaction.
|
||||
Retrieves the database version number at which a given transaction was committed. :func:`fdb_transaction_commit()` must have been called on ``transaction`` and the resulting future must be ready and not an error before this function is called, or the behavior is undefined. Read-only transactions do not modify the database when committed and will have a committed version of -1. Keep in mind that a transaction which reads keys and then sets them to their current values may be optimized to a read-only transaction.
|
||||
|
||||
Note that database versions are not necessarily unique to a given transaction and so cannot be used to determine in what order two transactions completed. The only use for this function is to manually enforce causal consistency when calling :func:`fdb_transaction_set_read_version()` on another subsequent transaction.
|
||||
|
||||
|
@ -726,11 +736,11 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|transaction-watch-limit-blurb|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key to watch. |no-null|
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
|
||||
.. function:: FDBFuture* fdb_transaction_on_error(FDBTransaction* transaction, fdb_error_t error)
|
||||
|
@ -751,7 +761,7 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. function:: void fdb_transaction_reset(FDBTransaction* transaction)
|
||||
|
||||
Reset :data:`transaction` to its initial state. This is similar to calling :func:`fdb_transaction_destroy()` followed by :func:`fdb_database_create_transaction()`. It is not necessary to call :func:`fdb_transaction_reset()` when handling an error with :func:`fdb_transaction_on_error()` since the transaction has already been reset.
|
||||
Reset ``transaction`` to its initial state. This is similar to calling :func:`fdb_transaction_destroy()` followed by :func:`fdb_database_create_transaction()`. It is not necessary to call :func:`fdb_transaction_reset()` when handling an error with :func:`fdb_transaction_on_error()` since the transaction has already been reset.
|
||||
|
||||
.. function:: void fdb_transaction_cancel(FDBTransaction* transaction)
|
||||
|
||||
|
@ -769,30 +779,30 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. note:: |conflict-range-note|
|
||||
|
||||
:data:`begin_key_name`
|
||||
``begin_key_name``
|
||||
A pointer to the name of the key specifying the beginning of the conflict range. |no-null|
|
||||
|
||||
:data:`begin_key_name_length`
|
||||
|length-of| :data:`begin_key_name`.
|
||||
``begin_key_name_length``
|
||||
|length-of| ``begin_key_name``.
|
||||
|
||||
:data:`end_key_name`
|
||||
``end_key_name``
|
||||
A pointer to the name of the key specifying the end of the conflict range. |no-null|
|
||||
|
||||
:data:`end_key_name_length`
|
||||
|length-of| :data:`end_key_name_length`.
|
||||
``end_key_name_length``
|
||||
|length-of| ``end_key_name_length``.
|
||||
|
||||
:data:`type`
|
||||
``type``
|
||||
One of the :type:`FDBConflictRangeType` values indicating what type of conflict range is being set.
|
||||
|
||||
.. type:: FDBConflictRangeType
|
||||
|
||||
An enumeration of available conflict range types to be passed to :func:`fdb_transaction_add_conflict_range()`.
|
||||
|
||||
:data:`FDB_CONFLICT_RANGE_TYPE_READ`
|
||||
``FDB_CONFLICT_RANGE_TYPE_READ``
|
||||
|
||||
|add-read-conflict-range-blurb|
|
||||
|
||||
:data:`FDB_CONFLICT_RANGE_TYPE_WRITE`
|
||||
``FDB_CONFLICT_RANGE_TYPE_WRITE``
|
||||
|
||||
|add-write-conflict-range-blurb|
|
||||
|
||||
|
|
|
@ -45,10 +45,10 @@
|
|||
Snapshot reads selectively relax FoundationDB's isolation property, reducing :ref:`conflicts <developer-guide-transaction-conflicts>` but making it harder to reason about concurrency.
|
||||
|
||||
.. |snapshot-blurb2| replace::
|
||||
By default, FoundationDB transactions guarantee :ref:`serializable isolation <ACID>`, resulting in a state that is *as if* transactions were executed one at a time, even if they were executed concurrently. Serializability has little performance cost when there are few :ref:`conflicts <developer-guide-transaction-conflicts>` but can be expensive when there are many. FoundationDB therefore also permits individual reads within a transaction to be done as snapshot reads.
|
||||
By default, FoundationDB transactions guarantee :ref:`strictly serializable isolation <ACID>`, resulting in a state that is *as if* transactions were executed one at a time, even if they were executed concurrently. Serializability has little performance cost when there are few :ref:`conflicts <developer-guide-transaction-conflicts>` but can be expensive when there are many. FoundationDB therefore also permits individual reads within a transaction to be done as snapshot reads.
|
||||
|
||||
.. |snapshot-blurb3| replace::
|
||||
Snapshot reads differ from ordinary (serializable) reads by permitting the values they read to be modified by concurrent transactions, whereas serializable reads cause conflicts in that case. Like serializable reads, snapshot reads see the effects of prior writes in the same transaction. For more information on the use of snapshot reads, see :ref:`snapshot isolation`.
|
||||
Snapshot reads differ from ordinary (strictly serializable) reads by permitting the values they read to be modified by concurrent transactions, whereas strictly serializable reads cause conflicts in that case. Like strictly serializable reads, snapshot reads see the effects of prior writes in the same transaction. For more information on the use of snapshot reads, see :ref:`snapshot isolation`.
|
||||
|
||||
.. |snapshot-blurb4| replace::
|
||||
Snapshot reads also interact with transaction commit a little differently than normal reads. If a snapshot read is outstanding when transaction commit is called that read will immediately return an error. (Normally, transaction commit will wait until outstanding reads return before committing.)
|
||||
|
@ -94,7 +94,7 @@
|
|||
By combining these logical steps into a single, read-free operation, FoundationDB can guarantee that the transaction will not conflict due to the operation. This makes atomic operations ideal for operating on keys that are frequently modified. A common example is the use of a key-value pair as a counter.
|
||||
|
||||
.. |atomic-ops-warning| replace::
|
||||
If a transaction uses both an atomic operation and a serializable read on the same key, the benefits of using the atomic operation (for both conflict checking and performance) are lost.
|
||||
If a transaction uses both an atomic operation and a strictly serializable read on the same key, the benefits of using the atomic operation (for both conflict checking and performance) are lost.
|
||||
|
||||
.. |atomic-add1| replace::
|
||||
Performs an addition of little-endian integers. If the existing value in the database is not present or shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``.
|
||||
|
@ -194,7 +194,7 @@
|
|||
|
||||
.. |conflict-range-note| replace::
|
||||
|
||||
Most applications will use the serializable isolation that transactions provide by default and will not need to manipulate conflict ranges.
|
||||
Most applications will use the strictly serializable isolation that transactions provide by default and will not need to manipulate conflict ranges.
|
||||
|
||||
.. |conflict-range-blurb| replace::
|
||||
|
||||
|
|
|
@ -539,7 +539,7 @@ Snapshot reads
|
|||
|
||||
.. method:: Transaction.snapshot.get_read_version()
|
||||
|
||||
Identical to :meth:`Transaction.get_read_version` (since snapshot and serializable reads use the same read version).
|
||||
Identical to :meth:`Transaction.get_read_version` (since snapshot and strictly serializable reads use the same read version).
|
||||
|
||||
|
||||
Writing data
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
.. |snapshot-ryw-disable-database-option| replace:: :meth:`Database.options.set_snapshot_ryw_disable`
|
||||
.. |future-type-string| replace:: a :class:`Future`
|
||||
.. |read-your-writes-disable-option| replace:: :meth:`Transaction.options.set_read_your_writes_disable`
|
||||
.. |read-your-writes-disable-option| replace:: :meth:`Transaction.options.set_read_your_writes_disable`
|
||||
.. |retry-limit-transaction-option| replace:: :meth:`Transaction.options.set_retry_limit`
|
||||
.. |timeout-transaction-option| replace:: :meth:`Transaction.options.set_timeout`
|
||||
.. |max-retry-delay-transaction-option| replace:: :meth:`Transaction.options.set_max_retry_delay`
|
||||
|
@ -498,7 +497,7 @@ Snapshot reads
|
|||
|
||||
.. method:: Transaction.snapshot.get_read_version() -> Version
|
||||
|
||||
Identical to :meth:`Transaction.get_read_version` (since snapshot and serializable reads use the same read version).
|
||||
Identical to :meth:`Transaction.get_read_version` (since snapshot and strictly serializable reads use the same read version).
|
||||
|
||||
Writing data
|
||||
------------
|
||||
|
|
|
@ -238,7 +238,7 @@ The ``start`` subcommand is used to start a backup. If there is already a backu
|
|||
.. program:: fdbbackup modify
|
||||
|
||||
``modify``
|
||||
---------
|
||||
----------
|
||||
|
||||
The ``modify`` subcommand is used to modify parameters of a running backup. All specified changes are made in a single transaction.
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ What does choosing Availability mean?
|
|||
|
||||
Let's consider an **AP** database. In such a database, reads and writes would always succeed, even when network connectivity is unavailable between nodes. If possible, these would certainly seem like desirable properties!
|
||||
|
||||
However, the downside is stark. Imagine a simple distributed database consisting of two nodes and a network partition making them unable communicate. To be Available, each of the two nodes must continue to accept writes from clients.
|
||||
However, the downside is stark. Imagine a simple distributed database consisting of two nodes and a network partition making them unable to communicate. To be Available, each of the two nodes must continue to accept writes from clients.
|
||||
|
||||
.. figure:: /images/AP_Partition.png
|
||||
|
||||
|
|
|
@ -2,6 +2,48 @@
|
|||
Client Testing
|
||||
###############
|
||||
|
||||
###################################
|
||||
Testing Error Handling with Buggify
|
||||
###################################
|
||||
|
||||
FoundationDB clients need to handle errors correctly. Wrong error handling can lead to many bugs - in the worst case it can
|
||||
lead to a corrupted database. Because of this it is important that an application or layer author tests properly their
|
||||
application during failure scenarios. But this is non-trivial. In a developement environment cluster failures are very
|
||||
unlikely and it is therefore possible that certain types of exceptions are never tested in a controlled environment.
|
||||
|
||||
The simplest way of testing for these kind of errors is a simple mechanism called ``Buggify``. If this option is enabled
|
||||
in the client, the client will randomly throw errors that an application might see in a production environment. Enable this
|
||||
option in testing will greatly improve the probability that error handling is tested properly.
|
||||
|
||||
Options to Control Buggify
|
||||
==========================
|
||||
|
||||
There are four network options to control the buggify behavior. By default, buggify is disabled (as it will behave in a way
|
||||
that is not desirable in a production environment). The options to control buggify are:
|
||||
|
||||
- ``buggify_enable``
|
||||
This option takes no argument and will enable buggify.
|
||||
- ``buggify_disable``
|
||||
This can be used to disable buggify again.
|
||||
- ``client_buggify_section_activated_probability`` (default ``25``)
|
||||
A number between 0 and 100.
|
||||
- ``client_buggify_section_fired_probability`` (default ``25``)
|
||||
A number between 0 and 100.
|
||||
|
||||
The way buggify works is by enabling sections in the code first that get only executed with a certain probability. Generally
|
||||
these code sections will simply introduce a synthetic error.
|
||||
|
||||
When a section is passed for the first time, the client library will decide randomly whether that code section will be enabled
|
||||
or not. It will be enabled with a probability of ``client_buggify_section_activated_probability``.
|
||||
|
||||
Whenever the client executes a buggify-enabled code-block, it will randomly execute it. This is to make sure that a certain
|
||||
exception doesn't always fire. The probably for executing such a section is ``client_buggify_section_fired_probability``.
|
||||
|
||||
################################
|
||||
Simulation and Cluster Workloads
|
||||
################################
|
||||
|
||||
|
||||
FoundationDB comes with its own testing framework. Tests are implemented as workloads. A workload is nothing more than a class
|
||||
that gets called by server processes running the ``tester`` role. Additionally, a ``fdbserver`` process can run a simulator that
|
||||
simulates a full fdb cluster with several machines and different configurations in one process. This simulator can run the same
|
||||
|
|
|
@ -530,7 +530,7 @@ The second feature is the ability to add one or more synchronous replicas of the
|
|||
|
||||
An example configuration would be four total datacenters, two on the east coast, two on the west coast, with a preference for fast write latencies from the west coast. One datacenter on each coast would be sized to store a full copy of the data. The second datacenter on each coast would only have a few FoundationDB processes.
|
||||
|
||||
While everything is healthy, writes need to be made durable in both west coast datacenters before a commit can succeed. The geographic proximity of the two datacenters minimizes the additional commit latency. Reads can be served from either region, and clients can get data from whichever region is closer. Getting a read version from the each coast region will still require communicating with a west coast datacenter. Clients can cache read versions if they can tolerate reading stale data to avoid waiting on read versions.
|
||||
While everything is healthy, writes need to be made durable in both west coast datacenters before a commit can succeed. The geographic proximity of the two datacenters minimizes the additional commit latency. Reads can be served from either region, and clients can get data from whichever region is closer. Getting a read version from east coast region will still require communicating with a west coast datacenter. Clients can cache read versions if they can tolerate reading stale data to avoid waiting on read versions.
|
||||
|
||||
If either west coast datacenter fails, the last few mutations will be propagated from the remaining west coast datacenter to the east coast. At this point, FoundationDB will start accepting commits on the east coast. Once the west coast comes back online, the system will automatically start copying all the data that was committed to the east coast back to the west coast replica. Once the west coast has caught up, the system will automatically switch back to accepting writes from the west coast again.
|
||||
|
||||
|
@ -615,7 +615,7 @@ The number of replicas in each region is controlled by redundancy level. For exa
|
|||
Asymmetric configurations
|
||||
-------------------------
|
||||
|
||||
The fact that satellite policies are configured per region allows for asymmetric configurations. For example, FoudnationDB can have a three datacenter setup where there are two datacenters on the west coast (WC1, WC2) and one datacenter on the east coast (EC1). The west coast region can be set as the preferred active region by setting the priority of its primary datacenter higher than the east coast datacenter. The west coast region should have a satellite policy configured, so that when it is active, FoundationDB is making mutations durable in both west coast datacenters. In the rare event that one of the west coast datacenter have failed, FoundationDB will fail over to the east coast datacenter. Because this region does not a satellite datacenter, the mutations will only be made durable in one datacenter while the transaction subsystem is located here. However this is justifiable because the region will only be active if a datacenter has already been lost.
|
||||
The fact that satellite policies are configured per region allows for asymmetric configurations. For example, FoudnationDB can have a three datacenter setup where there are two datacenters on the west coast (WC1, WC2) and one datacenter on the east coast (EC1). The west coast region can be set as the preferred active region by setting the priority of its primary datacenter higher than the east coast datacenter. The west coast region should have a satellite policy configured, so that when it is active, FoundationDB is making mutations durable in both west coast datacenters. In the rare event that one of the west coast datacenters has failed, FoundationDB will fail over to the east coast datacenter. Because this region does not a satellite datacenter, the mutations will only be made durable in one datacenter while the transaction subsystem is located here. However, this is justifiable because the region will only be active if a datacenter has already been lost.
|
||||
|
||||
This is the region configuration that implements the example::
|
||||
|
||||
|
@ -669,7 +669,7 @@ To configure an existing database to regions, do the following steps:
|
|||
|
||||
4. Configure ``usable_regions=2``. This will cause the cluster to start copying data between the regions.
|
||||
|
||||
5. Watch ``status`` and wait until data movement is complete. This will mean signal that the remote datacenter has a full replica of all of the data in the database.
|
||||
5. Watch ``status`` and wait until data movement is complete. This will signal that the remote datacenter has a full replica of all of the data in the database.
|
||||
|
||||
6. Change the region configuration to have a non-negative priority for the primary datacenters in both regions. This will enable automatic failover between regions.
|
||||
|
||||
|
@ -680,7 +680,7 @@ When a primary datacenter fails, the cluster will go into a degraded state. It w
|
|||
|
||||
.. warning:: While a datacenter has failed, the maximum write throughput of the cluster will be roughly 1/3 of normal performance. This is because the transaction logs need to store all of the mutations being committed, so that once the other datacenter comes back online, it can replay history to catch back up.
|
||||
|
||||
To drop the dead datacenter do the follow steps:
|
||||
To drop the dead datacenter do the following steps:
|
||||
|
||||
1. Configure the region configuration so that the dead datacenter has a negative priority.
|
||||
|
||||
|
|
|
@ -27,4 +27,4 @@ Consistency model
|
|||
|
||||
Consistency models serve to define the guarantees the database provides about when concurrent writes become visible to readers. Consistency models fall along a spectrum, depending on the strength of the guarantees. In general, stronger consistency models make reasoning about the database easier and speed development. For example, *causal* consistency guarantees that readers see all previously committed writes. *Eventual* consistency guarantees only that readers see writes after "sufficient time". Eventual consistency is the model used in many of the first-generation NoSQL systems.
|
||||
|
||||
FoundationDB provides the strongest possible consistency model, `sequential consistency <http://en.wikipedia.org/wiki/Sequential_consistency>`_ (closely related to `serializability <http://en.wikipedia.org/wiki/Serializability>`_ from the database literature), providing the greatest possible ease of development.
|
||||
FoundationDB provides strict serializability, the strongest possible consistency model, to provide the greatest possible ease of development.
|
||||
|
|
|
@ -39,6 +39,16 @@
|
|||
.. |node-subspace| replace:: FIXME
|
||||
.. |content-subspace| replace:: FIXME
|
||||
.. |allow-manual-prefixes| replace:: FIXME
|
||||
.. |retry-limit-transaction-option| replace:: FIXME
|
||||
.. |timeout-transaction-option| replace:: FIXME
|
||||
.. |max-retry-delay-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-database-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-database-option| replace:: FIXME
|
||||
.. |retry-limit-database-option| replace:: FIXME
|
||||
.. |max-retry-delay-database-option| replace:: FIXME
|
||||
.. |timeout-database-option| replace:: FIXME
|
||||
|
||||
.. include:: api-common.rst.inc
|
||||
|
||||
|
|
|
@ -39,6 +39,16 @@
|
|||
.. |node-subspace| replace:: FIXME
|
||||
.. |content-subspace| replace:: FIXME
|
||||
.. |allow-manual-prefixes| replace:: FIXME
|
||||
.. |retry-limit-transaction-option| replace:: FIXME
|
||||
.. |timeout-transaction-option| replace:: FIXME
|
||||
.. |max-retry-delay-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-database-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-database-option| replace:: FIXME
|
||||
.. |retry-limit-database-option| replace:: FIXME
|
||||
.. |max-retry-delay-database-option| replace:: FIXME
|
||||
.. |timeout-database-option| replace:: FIXME
|
||||
|
||||
.. include:: api-common.rst.inc
|
||||
|
||||
|
@ -321,7 +331,7 @@ Transaction basics
|
|||
Transactions in FoundationDB
|
||||
----------------------------
|
||||
|
||||
FoundationDB provides concurrency control via transactions, allowing multiple clients to concurrently read and write data in the database with strong guarantees about how they affect each other. Specifically, FoundationDB provides global, ACID transactions with serializable isolation using optimistic concurrency.
|
||||
FoundationDB provides concurrency control via transactions, allowing multiple clients to concurrently read and write data in the database with strong guarantees about how they affect each other. Specifically, FoundationDB provides global, ACID transactions with strict serializability using optimistic concurrency.
|
||||
|
||||
All reads and modifications of key-value pairs in FoundationDB are done within the context of a transaction. A transaction is a small unit of work that is both reliably performed and logically independent of other transactions.
|
||||
|
||||
|
@ -575,20 +585,20 @@ The following example illustrates both techniques. Together, they make a transac
|
|||
Conflict ranges
|
||||
---------------
|
||||
|
||||
By default, FoundationDB transactions guarantee :ref:`serializable isolation <ACID>`, which results in a state that *could* have been produced by executing transactions one at a time, even though they may actually have been executed concurrently. FoundationDB maintains serializable isolation by detecting conflicts among concurrent transactions and allowing only a non-conflicting subset of them to succeed. Two concurrent transactions conflict if the first to commit writes a value that the second reads. In this case, the second transaction will fail. Clients will usually retry failed transactions.
|
||||
By default, FoundationDB transactions guarantee :ref:`strict serializability <ACID>`, which results in a state that *could* have been produced by executing transactions one at a time, even though they may actually have been executed concurrently. FoundationDB maintains strict serializability by detecting conflicts among concurrent transactions and allowing only a non-conflicting subset of them to succeed. Two concurrent transactions conflict if the first to commit writes a value that the second reads. In this case, the second transaction will fail. Clients will usually retry failed transactions.
|
||||
|
||||
To detect conflicts, FoundationDB tracks the ranges of keys each transaction reads and writes. While most applications will use the serializable isolation that transactions provide by default, FoundationDB also provides several API features that manipulate conflict ranges to allow more precise control.
|
||||
To detect conflicts, FoundationDB tracks the ranges of keys each transaction reads and writes. While most applications will use the strictly serializable isolation that transactions provide by default, FoundationDB also provides several API features that manipulate conflict ranges to allow more precise control.
|
||||
|
||||
Conflicts can be *avoided*, reducing isolation, in two ways:
|
||||
|
||||
* Instead of ordinary (serializable) reads, you can perform :ref:`snapshot reads <snapshot isolation>`, which do not add read conflict ranges.
|
||||
* Instead of ordinary (strictly serializable) reads, you can perform :ref:`snapshot reads <snapshot isolation>`, which do not add read conflict ranges.
|
||||
* You can use :ref:`transaction options <api-python-transaction-options>` to disable conflict ranges for writes.
|
||||
|
||||
Conflicts can be *created*, increasing isolation, by :ref:`explicitly adding <api-python-conflict-ranges>` read or write conflict ranges.
|
||||
|
||||
.. note:: *add read conflict range* behaves as if the client is reading the range. This means *add read conflict range* will not add conflict ranges for keys that have been written earlier in the same transaction. This is the intended behavior, as it allows users to compose transactions together without introducing unnecessary conflicts.
|
||||
|
||||
For example, suppose you have a transactional function that increments a set of counters using atomic addition. :ref:`developer-guide-atomic-operations` do not add read conflict ranges and so cannot cause the transaction in which they occur to fail. Most of the time, this is exactly what we want. However, suppose there is another transaction that (infrequently) resets one or more counters, and our contract requires that we must advance all specified counters in unison. We want to guarantee that if a counter is reset during an incrementing transaction, then the incrementing transaction will conflict. We can selectively add read conflicts ranges for this purpose::
|
||||
For example, suppose you have a transactional function that increments a set of counters using atomic addition. :ref:`developer-guide-atomic-operations` do not add read conflict ranges and so cannot cause the transaction in which they occur to fail. More precisely, the read version for an atomic operation is the same as transaction's commit version, and thus no conflicting write from other transactions could be serialized between the read and write of the key. Most of the time, this is exactly what we want. However, suppose there is another transaction that (infrequently) resets one or more counters, and our contract requires that we must advance all specified counters in unison. We want to guarantee that if a counter is reset during an incrementing transaction, then the incrementing transaction will conflict. We can selectively add read conflicts ranges for this purpose::
|
||||
|
||||
@fdb.transactional
|
||||
def guarded_increment(tr, counters):
|
||||
|
@ -603,9 +613,9 @@ Snapshot reads
|
|||
|
||||
|snapshot-blurb1|
|
||||
|
||||
The serializable isolation that transactions maintain by default has little performance cost when there are few conflicts but can be expensive when there are many. FoundationDB therefore also permits individual reads within a transaction to be done as snapshot reads. Snapshot reads differ from ordinary (serializable) reads by permitting the values they read to be modified by concurrent transactions, whereas serializable reads cause conflicts in that case.
|
||||
The strictly serializable isolation that transactions maintain by default has little performance cost when there are few conflicts but can be expensive when there are many. FoundationDB therefore also permits individual reads within a transaction to be done as snapshot reads. Snapshot reads differ from ordinary (strictly serializable) reads by permitting the values they read to be modified by concurrent transactions, whereas strictly serializable reads cause conflicts in that case.
|
||||
|
||||
Consider a transaction which needs to remove and return an arbitrary value from a small range of keys. The simplest implementation (using serializable isolation) would be::
|
||||
Consider a transaction which needs to remove and return an arbitrary value from a small range of keys. The simplest implementation (using strictly serializable isolation) would be::
|
||||
|
||||
@fdb.transactional
|
||||
def remove_one(tr, range):
|
||||
|
@ -626,7 +636,7 @@ Unfortunately, if a concurrent transaction happens to insert a new key anywhere
|
|||
|
||||
This transaction accomplishes the same task but won't conflict with the insert of a key elsewhere in the range. It will only conflict with a modification to the key it actually returns.
|
||||
|
||||
By default, snapshot reads see the effects of prior writes in the same transaction. (This read-your-writes behavior is the same as for ordinary, serializable reads.) Read-your-writes allows transactional functions (such as the above example) to be easily composed within a single transaction because each function will see the writes of previously invoked functions.
|
||||
By default, snapshot reads see the effects of prior writes in the same transaction. (This read-your-writes behavior is the same as for ordinary, strictly serializable reads.) Read-your-writes allows transactional functions (such as the above example) to be easily composed within a single transaction because each function will see the writes of previously invoked functions.
|
||||
|
||||
.. note::
|
||||
| The default read-your-writes behavior of snapshot reads is well-suited to the large majority of use cases. In less frequent cases, you may want to read from only a single version of the database. This behavior can be achieved through the appropriate :ref:`transaction options <api-python-snapshot-ryw>`. Transaction options are an advanced feature of the API and should be used with caution.
|
||||
|
@ -643,7 +653,7 @@ Using snapshot reads is appropriate when the following conditions all hold:
|
|||
|
||||
* A particular read of frequently written values causes too many conflicts.
|
||||
* There isn't an easy way to reduce conflicts by splitting up data more granularly.
|
||||
* Any necessary invariants can be validated with added conflict ranges or more narrowly targeted serializable reads.
|
||||
* Any necessary invariants can be validated with added conflict ranges or more narrowly targeted strictly serializable reads.
|
||||
|
||||
Transaction cancellation
|
||||
------------------------
|
||||
|
|
|
@ -10,38 +10,38 @@ macOS
|
|||
|
||||
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
|
||||
|
||||
* `FoundationDB-6.1.6.pkg <https://www.foundationdb.org/downloads/6.1.6/macOS/installers/FoundationDB-6.1.6.pkg>`_
|
||||
* `FoundationDB-6.1.10.pkg <https://www.foundationdb.org/downloads/6.1.10/macOS/installers/FoundationDB-6.1.10.pkg>`_
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
|
||||
* `foundationdb-clients-6.1.6-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.6/ubuntu/installers/foundationdb-clients_6.1.6-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.1.6-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.6/ubuntu/installers/foundationdb-server_6.1.6-1_amd64.deb>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.1.10-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.10/ubuntu/installers/foundationdb-clients_6.1.10-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.1.10-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.10/ubuntu/installers/foundationdb-server_6.1.10-1_amd64.deb>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL6
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
|
||||
|
||||
* `foundationdb-clients-6.1.6-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel6/installers/foundationdb-clients-6.1.6-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.6-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel6/installers/foundationdb-server-6.1.6-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.1.10-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.10/rhel6/installers/foundationdb-clients-6.1.10-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.10-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.10/rhel6/installers/foundationdb-server-6.1.10-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL7
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
|
||||
|
||||
* `foundationdb-clients-6.1.6-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel7/installers/foundationdb-clients-6.1.6-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.6-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel7/installers/foundationdb-server-6.1.6-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.1.10-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.10/rhel7/installers/foundationdb-clients-6.1.10-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.10-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.10/rhel7/installers/foundationdb-server-6.1.10-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
|
||||
|
||||
* `foundationdb-6.1.6-x64.msi <https://www.foundationdb.org/downloads/6.1.6/windows/installers/foundationdb-6.1.6-x64.msi>`_
|
||||
* `foundationdb-6.1.10-x64.msi <https://www.foundationdb.org/downloads/6.1.10/windows/installers/foundationdb-6.1.10-x64.msi>`_
|
||||
|
||||
API Language Bindings
|
||||
=====================
|
||||
|
@ -58,20 +58,20 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
|
|||
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
|
||||
|
||||
* `foundationdb-6.1.6.tar.gz <https://www.foundationdb.org/downloads/6.1.6/bindings/python/foundationdb-6.1.6.tar.gz>`_
|
||||
* `foundationdb-6.1.10.tar.gz <https://www.foundationdb.org/downloads/6.1.10/bindings/python/foundationdb-6.1.10.tar.gz>`_
|
||||
|
||||
Ruby 1.9.3/2.0.0+
|
||||
-----------------
|
||||
|
||||
* `fdb-6.1.6.gem <https://www.foundationdb.org/downloads/6.1.6/bindings/ruby/fdb-6.1.6.gem>`_
|
||||
* `fdb-6.1.10.gem <https://www.foundationdb.org/downloads/6.1.10/bindings/ruby/fdb-6.1.10.gem>`_
|
||||
|
||||
Java 8+
|
||||
-------
|
||||
|
||||
* `fdb-java-6.1.6.jar <https://www.foundationdb.org/downloads/6.1.6/bindings/java/fdb-java-6.1.6.jar>`_
|
||||
* `fdb-java-6.1.6-javadoc.jar <https://www.foundationdb.org/downloads/6.1.6/bindings/java/fdb-java-6.1.6-javadoc.jar>`_
|
||||
* `fdb-java-6.1.10.jar <https://www.foundationdb.org/downloads/6.1.10/bindings/java/fdb-java-6.1.10.jar>`_
|
||||
* `fdb-java-6.1.10-javadoc.jar <https://www.foundationdb.org/downloads/6.1.10/bindings/java/fdb-java-6.1.10-javadoc.jar>`_
|
||||
|
||||
Go 1.1+
|
||||
-------
|
||||
Go 1.11+
|
||||
--------
|
||||
|
||||
The FoundationDB Go package is available on `GitHub <https://github.com/apple/foundationdb/tree/master/bindings/go>`_.
|
||||
|
|
|
@ -30,7 +30,7 @@ FoundationDB stores each piece of data on multiple servers. If a server containi
|
|||
Ordered Key-Value API
|
||||
---------------------
|
||||
|
||||
Simple can be powerful. FoundationDB uses an ordered key-value data model (and richer data models are exposed via :doc:`layers <layer-concept>`. Each "row" within the database consists of a key that is used to reference the row and a value which stores data associated with the key. No specific format for the keys or values is required; they are simply binary data. Because keys are kept in lexicographical (sorted) order, ranges of key-value pairs can be read efficiently.
|
||||
Simple can be powerful. FoundationDB uses an ordered key-value data model (and richer data models are exposed via :doc:`layers <layer-concept>`). Each "row" within the database consists of a key that is used to reference the row and a value which stores data associated with the key. No specific format for the keys or values is required; they are simply binary data. Because keys are kept in lexicographical (sorted) order, ranges of key-value pairs can be read efficiently.
|
||||
|
||||
Watches
|
||||
-------
|
||||
|
|
|
@ -40,7 +40,7 @@ Promises and futures can be used within a single process, but their real strengt
|
|||
wait()
|
||||
------
|
||||
|
||||
At the point when a receiver holding a ``Future<T>`` needs the ``T`` to continue computation, it invokes the ``wait()`` statement with the ``Future<T>`` as its parameter. The ``wait()`` statement allows the calling actor to pause execution until the value of the future is set, returning a value of type ``T`` During the wait, other actors can continue execution, providing asynchronous concurrency within a single process.
|
||||
At the point when a receiver holding a ``Future<T>`` needs the ``T`` to continue computation, it invokes the ``wait()`` statement with the ``Future<T>`` as its parameter. The ``wait()`` statement allows the calling actor to pause execution until the value of the future is set, returning a value of type ``T``. During the wait, other actors can continue execution, providing asynchronous concurrency within a single process.
|
||||
|
||||
ACTOR
|
||||
-----
|
||||
|
@ -154,5 +154,5 @@ Some preprocessor definitions will not fix all issues though. When programming f
|
|||
foo([x]() { x->bar(); })
|
||||
}
|
||||
|
||||
- state variables in don't follow the normal scoping rules. So in flow a state variable can be defined in a inner scope and later it can be used in the outer scope. In order to not break compilation in IDE-mode, always define state variables in the outermost scope they will be used.
|
||||
- state variables in flow don't follow the normal scoping rules. So in flow a state variable can be defined in a inner scope and later it can be used in the outer scope. In order to not break compilation in IDE-mode, always define state variables in the outermost scope they will be used.
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
.. -*- mode: rst; -*-
|
||||
|
||||
.. |json-status-format| replace::
|
||||
.. code-block:: javascript
|
||||
|
||||
"cluster":{
|
||||
"layers":{
|
||||
"_valid":true,
|
||||
|
|
|
@ -42,12 +42,9 @@ JSON format
|
|||
|
||||
The following format informally describes the JSON containing the status data. The possible values of ``<name_string>`` and ``<description_string>`` are described in :ref:`mr-status-message`. The format is representative: *any field can be missing at any time*, depending on the database state. Clients should be prepared to flexibly handle format variations resulting from different database states.
|
||||
|
||||
.. code-block:: javascript
|
||||
.. include:: mr-status-json-schemas.rst.inc
|
||||
|
||||
.. node:: |json-status-format|
|
||||
|
||||
.. mr-status-message:
|
||||
.. _mr-status-message:
|
||||
|
||||
Message components
|
||||
------------------
|
||||
|
@ -96,7 +93,7 @@ cluster.processes.<process>.messages incorrect_cluster_file_contents Clus
|
|||
cluster.processes.<process>.messages io_error <error> occured in <subsystem>
|
||||
cluster.processes.<process>.messages platform_error <error> occured in <subsystem>
|
||||
cluster.processes.<process>.messages process_error <error> occured in <subsystem>
|
||||
==================================== =============================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
==================================== ==================================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
|
||||
The JSON path ``cluster.recovery_state``, when it exists, is an Object containing at least ``"name"`` and ``"description"``. The possible values for those fields are in the following table:
|
||||
|
||||
|
|
|
@ -144,9 +144,9 @@ Bindings
|
|||
* C API calls made on the network thread could be reordered with calls made from other threads. [6.0.2] `(Issue #518) <https://github.com/apple/foundationdb/issues/518>`_
|
||||
* The TLS_PLUGIN option is now a no-op and has been deprecated. [6.0.10] `(PR #710) <https://github.com/apple/foundationdb/pull/710>`_
|
||||
* Java: the `Versionstamp::getUserVersion() </javadoc/com/apple/foundationdb/tuple/Versionstamp.html#getUserVersion-->`_ method did not handle user versions greater than ``0x00FF`` due to operator precedence errors. [6.0.11] `(Issue #761) <https://github.com/apple/foundationdb/issues/761>`_
|
||||
* Python: bindings didn't work with Python 3.7 because of the new `async` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
|
||||
* Go: `PrefixRange` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
|
||||
* Go: Add Tuple layer support for `uint`, `uint64`, and `*big.Int` integers up to 255 bytes. Integer values will be decoded into the first of `int64`, `uint64`, or `*big.Int` in which they fit. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
|
||||
* Python: bindings didn't work with Python 3.7 because of the new ``async`` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
|
||||
* Go: ``PrefixRange`` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
|
||||
* Go: Add Tuple layer support for ``uint``, ``uint64``, and ``*big.Int`` integers up to 255 bytes. Integer values will be decoded into the first of ``int64``, ``uint64``, or ``*big.Int`` in which they fit. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
|
||||
* Ruby: Add Tuple layer support for integers up to 255 bytes. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
|
||||
* Python: bindings didn't work with Python 3.7 because of the new ``async`` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
|
||||
* Go: ``PrefixRange`` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
|
||||
|
|
|
@ -2,7 +2,29 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.1.6
|
||||
6.1.10
|
||||
======
|
||||
|
||||
Performance
|
||||
-----------
|
||||
|
||||
* Improved the recovery speed of storage servers with large amount of data. `(PR #1700) <https://github.com/apple/foundationdb/pull/1700>`_
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
||||
* The ``fdbrestore`` commands ``abort``, ``wait``, and ``status`` would use a default cluster file instead of the destination cluster file argument. `(PR #1701) <https://github.com/apple/foundationdb/pull/1701>`_
|
||||
|
||||
6.1.9
|
||||
=====
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
||||
* Sometimes a minority of coordinators would not converge to the leader. `(PR #1649) <https://github.com/apple/foundationdb/pull/1649>`_
|
||||
* HTTP responses indicating a server-side error are no longer expected to contain a ResponseID header. `(PR #1651) <https://github.com/apple/foundationdb/pull/1651>`_
|
||||
|
||||
6.1.8
|
||||
=====
|
||||
|
||||
Features
|
||||
|
@ -20,7 +42,7 @@ Features
|
|||
* Separated data distribution from the master into its own role. `(PR #1062) <https://github.com/apple/foundationdb/pull/1062>`_
|
||||
* Separated ratekeeper from the master into its own role. `(PR #1176) <https://github.com/apple/foundationdb/pull/1176>`_
|
||||
* Added a ``CompareAndClear`` atomic op that clears a key if its value matches the supplied value. `(PR #1105) <https://github.com/apple/foundationdb/pull/1105>`_
|
||||
* Added support for IPv6. `(PR #1176) <https://github.com/apple/foundationdb/pull/1178>`_
|
||||
* Added support for IPv6. `(PR #1178) <https://github.com/apple/foundationdb/pull/1178>`_
|
||||
* FDB can now simultaneously listen to TLS and unencrypted ports to facilitate smoother migration to and from TLS. `(PR #1157) <https://github.com/apple/foundationdb/pull/1157>`_
|
||||
* Added ``DISABLE_POSIX_KERNEL_AIO`` knob to fallback to libeio instead of kernel async I/O (KAIO) for systems that do not support KAIO or O_DIRECT flag. `(PR #1283) <https://github.com/apple/foundationdb/pull/1283>`_
|
||||
* Added support for configuring the cluster to use the primary and remote DC's as satellites. `(PR #1320) <https://github.com/apple/foundationdb/pull/1320>`_
|
||||
|
@ -50,6 +72,7 @@ Performance
|
|||
* Increase the rate that deleted pages are made available for reuse in the SQLite storage engine. Rename and add knobs to provide more control over this process. [6.1.3] `(PR #1485) <https://github.com/apple/foundationdb/pull/1485>`_
|
||||
* SQLite page files now grow and shrink in chunks based on a knob which defaults to an effective chunk size of 100MB. [6.1.4] `(PR #1482) <https://github.com/apple/foundationdb/pull/1482>`_ `(PR #1499) <https://github.com/apple/foundationdb/pull/1499>`_
|
||||
* Reduced the rate at which data is moved between servers, to reduce the impact a failure has on cluster performance. [6.1.4] `(PR #1499) <https://github.com/apple/foundationdb/pull/1499>`_
|
||||
* Avoid closing saturated network connections which have not received ping packets. [6.1.7] `(PR #1601) <https://github.com/apple/foundationdb/pull/1601>`_
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
@ -76,6 +99,7 @@ Fixes
|
|||
* The ``configure`` command in ``fdbcli`` returned successfully even when the configuration was not changed for some error types. [6.1.4] `(PR #1509) <https://github.com/apple/foundationdb/pull/1509>`_
|
||||
* Safety protections in the ``configure`` command in ``fdbcli`` would trigger spuriously when changing between ``three_datacenter`` replication and a region configuration. [6.1.4] `(PR #1509) <https://github.com/apple/foundationdb/pull/1509>`_
|
||||
* Status could report an incorrect reason for ongoing data movement. [6.1.5] `(PR #1544) <https://github.com/apple/foundationdb/pull/1544>`_
|
||||
* Storage servers were considered failed as soon as they were rebooted, instead of waiting to see if they rejoin the cluster. [6.1.8] `(PR #1618) <https://github.com/apple/foundationdb/pull/1618>`_
|
||||
|
||||
Status
|
||||
------
|
||||
|
@ -127,6 +151,8 @@ Fixes only impacting 6.1.0+
|
|||
* Memory tracking trace events could cause the program to crash when called from inside a trace event. [6.1.5] `(PR #1541) <https://github.com/apple/foundationdb/pull/1541>`_
|
||||
* TLogs will replace a large file with an empty file rather than doing a large truncate operation. [6.1.5] `(PR #1545) <https://github.com/apple/foundationdb/pull/1545>`_
|
||||
* Fix PR #1545 to work on Windows and Linux. [6.1.6] `(PR #1556) <https://github.com/apple/foundationdb/pull/1556>`_
|
||||
* Adding a read conflict range for the metadata version key no longer requires read access to the system keys. [6.1.6] `(PR #1556) <https://github.com/apple/foundationdb/pull/1556>`_
|
||||
* The TLog's disk queue files would grow indefinitely after a storage server was removed from the cluster. [6.1.8] `(PR #1617) <https://github.com/apple/foundationdb/pull/1617>`_
|
||||
|
||||
Earlier release notes
|
||||
---------------------
|
||||
|
@ -148,4 +174,4 @@ Earlier release notes
|
|||
* :doc:`Beta 2 (API Version 22) </old-release-notes/release-notes-022>`
|
||||
* :doc:`Beta 1 (API Version 21) </old-release-notes/release-notes-021>`
|
||||
* :doc:`Alpha 6 (API Version 16) </old-release-notes/release-notes-016>`
|
||||
* :doc:`Alpha 5 (API Version 14) </old-release-notes/release-notes-014>`
|
||||
* :doc:`Alpha 5 (API Version 14) </old-release-notes/release-notes-014>`
|
||||
|
|
|
@ -20,6 +20,9 @@ Status
|
|||
Bindings
|
||||
--------
|
||||
|
||||
* Go: The Go bindings now require Go version 1.11 or later.
|
||||
* Go: Fix issue with finalizers running too early that could lead to undefined behavior. `(PR #1451) <https://github.com/apple/foundationdb/pull/1451>`_.
|
||||
|
||||
Other Changes
|
||||
-------------
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
.. default-domain:: rb
|
||||
.. highlight:: ruby
|
||||
.. module:: FDB
|
||||
|
||||
################
|
||||
Time-Series Data
|
||||
|
@ -95,7 +93,7 @@ Ordering and Transactions
|
|||
|
||||
FoundationDB’s ability to let you structure your data in different ways, keep track of metrics, and search it with varying granularity is a direct result of two key features of our key-value store: global ordering and ACID transactions. And as you’ve seen from the code included above, the direct impact of these properties is simpler application code and overall faster development.
|
||||
|
||||
Global ordering makes a big difference if you’re attempting to process significant amounts of sequential information because the database can retrieve that information quickly and efficiently. So rather than having to package your data into a single database object or broadcast a request for many individual data elements that correspond to a given range of application data (e.g. time0, time1, time2, . . ., timen), a globally ordered storage system, like FoundationDB, can generate a single range request to the database for the matching data. And internally, FoundationDB can further optimize requests by knowing which data resides on which machines, so there’s no need to broadcast the data request to all machines in the cluster.
|
||||
Global ordering makes a big difference if you’re attempting to process significant amounts of sequential information because the database can retrieve that information quickly and efficiently. So rather than having to package your data into a single database object or broadcast a request for many individual data elements that correspond to a given range of application data (e.g. time0, time1, time2, . . ., timeN), a globally ordered storage system, like FoundationDB, can generate a single range request to the database for the matching data. And internally, FoundationDB can further optimize requests by knowing which data resides on which machines, so there’s no need to broadcast the data request to all machines in the cluster.
|
||||
|
||||
Global indexing also makes a huge difference in terms of application complexity and database efficiency. Many non-relational databases provide node-specific indexing and secondary indexing, but if you wanted global indexes, you would have to build those at the application level to ensure the index and related data get updated atomically.
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ Parameters and client bindings
|
|||
------------------------------
|
||||
|
||||
Automatic TLS certificate refresh
|
||||
------------------------------
|
||||
---------------------------------
|
||||
|
||||
The TLS certificate will be automatically refreshed on a configurable cadence. The server will inspect the CA, certificate, and key files in the specified locations periodically, and will begin using the new versions if following criterion were met:
|
||||
|
||||
|
@ -351,4 +351,4 @@ A verification string of::
|
|||
Would pass, and:
|
||||
|
||||
* Require that the Subject has a Subject Alternative Name extension, which has one or more members of type DNS that begins with the value ``prod.``.
|
||||
* Require that the Subject has a Subject Alternative Name extension, which has one or more members of type DNS that ends with the value ``.com``.
|
||||
* Require that the Subject has a Subject Alternative Name extension, which has one or more members of type DNS that ends with the value ``.org``.
|
||||
|
|
|
@ -75,4 +75,4 @@ Transactions guarantee the *durability* of writes (the "D" in ACID). This guaran
|
|||
Transactions are the future of NoSQL
|
||||
====================================
|
||||
|
||||
As NoSQL databases become more broadly used for a wide variety of purposes, more applications built on them employ non-trivial concurrency from multiple clients. Without adequate concurrency control, all the traditional problems of concurrency re-emerge and create a significant burden for application developers. ACID transactions simplify concurrency for developers by providing serializable operations that can be composed to properly engineer application software. If you're building an application that needs to be scalable and you don't have transactions, you will eventually be burned. Fortunately, the scalability, fault-tolerance, and performance of NoSQL databases are still achievable with transactions. The choice to use transactions is ultimately not a matter of fundamental tradeoffs but of sound engineering. As the technology matures, transactions will form a foundational capability for future NoSQL databases.
|
||||
As NoSQL databases become more broadly used for a wide variety of purposes, more applications built on them employ non-trivial concurrency from multiple clients. Without adequate concurrency control, all the traditional problems of concurrency re-emerge and create a significant burden for application developers. ACID transactions simplify concurrency for developers by providing strictly serializable operations that can be composed to properly engineer application software. If you're building an application that needs to be scalable and you don't have transactions, you will eventually be burned. Fortunately, the scalability, fault-tolerance, and performance of NoSQL databases are still achievable with transactions. The choice to use transactions is ultimately not a matter of fundamental tradeoffs but of sound engineering. As the technology matures, transactions will form a foundational capability for future NoSQL databases.
|
||||
|
|
|
@ -1450,7 +1450,7 @@ ACTOR Future<Void> updateAgentPollRate(Database src, std::string rootKey, std::s
|
|||
}
|
||||
|
||||
ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest, std::string name, enumProgramExe exe, double *pollDelay, Database taskDest = Database(),
|
||||
std::string id = g_nondeterministic_random->randomUniqueID().toString()) {
|
||||
std::string id = nondeterministicRandom()->randomUniqueID().toString()) {
|
||||
state std::string metaKey = layerStatusMetaPrefixRange.begin.toString() + "json/" + name;
|
||||
state std::string rootKey = backupStatusPrefixRange.begin.toString() + name + "/json";
|
||||
state std::string instanceKey = rootKey + "/" + "agent-" + id;
|
||||
|
@ -1491,7 +1491,7 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest, std::string name
|
|||
}
|
||||
}
|
||||
|
||||
wait(delay(CLIENT_KNOBS->BACKUP_STATUS_DELAY * ( ( 1.0 - CLIENT_KNOBS->BACKUP_STATUS_JITTER ) + 2 * g_random->random01() * CLIENT_KNOBS->BACKUP_STATUS_JITTER )));
|
||||
wait(delay(CLIENT_KNOBS->BACKUP_STATUS_DELAY * ( ( 1.0 - CLIENT_KNOBS->BACKUP_STATUS_JITTER ) + 2 * deterministicRandom()->random01() * CLIENT_KNOBS->BACKUP_STATUS_JITTER )));
|
||||
|
||||
// Now that status was written at least once by this process (and hopefully others), start the poll rate control updater if it wasn't started yet
|
||||
if(!pollRateUpdater.isValid() && pollDelay != nullptr)
|
||||
|
@ -1506,7 +1506,7 @@ ACTOR Future<Void> statusUpdateActor(Database statusUpdateDest, std::string name
|
|||
|
||||
ACTOR Future<Void> runDBAgent(Database src, Database dest) {
|
||||
state double pollDelay = 1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE;
|
||||
std::string id = g_nondeterministic_random->randomUniqueID().toString();
|
||||
std::string id = nondeterministicRandom()->randomUniqueID().toString();
|
||||
state Future<Void> status = statusUpdateActor(src, "dr_backup", EXE_DR_AGENT, &pollDelay, dest, id);
|
||||
state Future<Void> status_other = statusUpdateActor(dest, "dr_backup_dest", EXE_DR_AGENT, &pollDelay, dest, id);
|
||||
|
||||
|
@ -1947,7 +1947,7 @@ Reference<IBackupContainer> openBackupContainer(const char *name, std::string de
|
|||
return c;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> runRestore(std::string destClusterFile, std::string originalClusterFile, std::string tagName, std::string container, Standalone<VectorRef<KeyRangeRef>> ranges, Version targetVersion, std::string targetTimestamp, bool performRestore, bool verbose, bool waitForDone, std::string addPrefix, std::string removePrefix) {
|
||||
ACTOR Future<Void> runRestore(Database db, std::string originalClusterFile, std::string tagName, std::string container, Standalone<VectorRef<KeyRangeRef>> ranges, Version targetVersion, std::string targetTimestamp, bool performRestore, bool verbose, bool waitForDone, std::string addPrefix, std::string removePrefix) {
|
||||
if(ranges.empty()) {
|
||||
ranges.push_back_deep(ranges.arena(), normalKeys);
|
||||
}
|
||||
|
@ -1957,16 +1957,6 @@ ACTOR Future<Void> runRestore(std::string destClusterFile, std::string originalC
|
|||
throw restore_error();
|
||||
}
|
||||
|
||||
if(destClusterFile.empty()) {
|
||||
fprintf(stderr, "Restore destination cluster file must be specified explicitly.\n");
|
||||
throw restore_error();
|
||||
}
|
||||
|
||||
if(!fileExists(destClusterFile)) {
|
||||
fprintf(stderr, "Restore destination cluster file '%s' does not exist.\n", destClusterFile.c_str());
|
||||
throw restore_error();
|
||||
}
|
||||
|
||||
state Optional<Database> origDb;
|
||||
|
||||
// Resolve targetTimestamp if given
|
||||
|
@ -1988,7 +1978,6 @@ ACTOR Future<Void> runRestore(std::string destClusterFile, std::string originalC
|
|||
}
|
||||
|
||||
try {
|
||||
state Database db = Database::createDatabase(destClusterFile, Database::API_VERSION_LATEST);
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
state Reference<IBackupContainer> bc = openBackupContainer(exeRestore.toString().c_str(), container);
|
||||
|
@ -3404,9 +3393,26 @@ int main(int argc, char* argv[]) {
|
|||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
|
||||
if(restoreClusterFileDest.empty()) {
|
||||
fprintf(stderr, "Restore destination cluster file must be specified explicitly.\n");
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
|
||||
if(!fileExists(restoreClusterFileDest)) {
|
||||
fprintf(stderr, "Restore destination cluster file '%s' does not exist.\n", restoreClusterFileDest.c_str());
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
|
||||
try {
|
||||
db = Database::createDatabase(restoreClusterFileDest, Database::API_VERSION_LATEST);
|
||||
} catch(Error &e) {
|
||||
fprintf(stderr, "Restore destination cluster file '%s' invalid: %s\n", restoreClusterFileDest.c_str(), e.what());
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
|
||||
switch(restoreType) {
|
||||
case RESTORE_START:
|
||||
f = stopAfter( runRestore(restoreClusterFileDest, restoreClusterFileOrig, tagName, restoreContainer, backupKeys, restoreVersion, restoreTimestamp, !dryRun, !quietDisplay, waitForDone, addPrefix, removePrefix) );
|
||||
f = stopAfter( runRestore(db, restoreClusterFileOrig, tagName, restoreContainer, backupKeys, restoreVersion, restoreTimestamp, !dryRun, !quietDisplay, waitForDone, addPrefix, removePrefix) );
|
||||
break;
|
||||
case RESTORE_WAIT:
|
||||
f = stopAfter( success(ba.waitRestore(db, KeyRef(tagName), true)) );
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -37,12 +37,12 @@
|
|||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -66,6 +66,7 @@
|
|||
</Lib>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
|
@ -82,6 +83,7 @@
|
|||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -107,6 +109,7 @@
|
|||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -470,6 +470,10 @@ void initHelp() {
|
|||
"include all|<ADDRESS>*",
|
||||
"permit previously-excluded servers to rejoin the database",
|
||||
"If `all' is specified, the excluded servers list is cleared.\n\nFor each IP address or IP:port pair in <ADDRESS>*, removes any matching exclusions from the excluded servers list. (A specified IP will match all IP:* exclusion entries)");
|
||||
helpMap["snapshot"] = CommandHelp(
|
||||
"snapshot <BINARY-PATH>:<ARG1=VAL1>,<ARG2=VAL2>,...",
|
||||
"snapshot the database",
|
||||
"invokes binary provided in binary-path with the arg,value pairs on TLog, Storage and Coordinators nodes. UID is a reserved ARG key.");
|
||||
helpMap["setclass"] = CommandHelp(
|
||||
"setclass <ADDRESS> <unset|storage|transaction|default>",
|
||||
"change the class of a process",
|
||||
|
@ -2121,6 +2125,11 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
|
|||
return false;
|
||||
}
|
||||
|
||||
ACTOR Future<bool> createSnapshot(Database db, StringRef snapCmd) {
|
||||
wait(makeInterruptable(mgmtSnapCreate(db, snapCmd)));
|
||||
return false;
|
||||
}
|
||||
|
||||
ACTOR Future<bool> setClass( Database db, std::vector<StringRef> tokens ) {
|
||||
if( tokens.size() == 1 ) {
|
||||
vector<ProcessData> _workers = wait( makeInterruptable(getWorkers(db)) );
|
||||
|
@ -2540,7 +2549,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
warn = checkStatus( timeWarning(5.0, "\nWARNING: Long delay (Ctrl-C to interrupt)\n"), ccf );
|
||||
|
||||
try {
|
||||
state UID randomID = g_random->randomUniqueID();
|
||||
state UID randomID = deterministicRandom()->randomUniqueID();
|
||||
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line);
|
||||
|
||||
bool malformed, partial;
|
||||
|
@ -2720,6 +2729,17 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
|||
continue;
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "snapshot")) {
|
||||
if (tokens.size() != 2) {
|
||||
printUsage(tokens[0]);
|
||||
is_error = true;
|
||||
} else {
|
||||
bool err = wait(createSnapshot(db, tokens[1]));
|
||||
if (err) is_error = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (tokencmp(tokens[0], "setclass")) {
|
||||
if (tokens.size() != 3 && tokens.size() != 1) {
|
||||
printUsage(tokens[0]);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -39,14 +39,14 @@
|
|||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>false</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -73,6 +73,7 @@
|
|||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
|
@ -87,6 +88,7 @@
|
|||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -113,6 +115,7 @@
|
|||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
|
||||
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -39,7 +39,7 @@ ACTOR Future<Void> sendStuff(int id, Reference<IRateControl> t, int bytes) {
|
|||
state double ts = timer();
|
||||
state int total = 0;
|
||||
while(total < bytes) {
|
||||
state int r = std::min<int>(g_random->randomInt(0,1000), bytes - total);
|
||||
state int r = std::min<int>(deterministicRandom()->randomInt(0,1000), bytes - total);
|
||||
wait(t->getAllowance(r));
|
||||
total += r;
|
||||
}
|
||||
|
|
|
@ -429,7 +429,7 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RangeResultWithVersi
|
|||
values.resize(values.arena(), values.size() / 2);
|
||||
values.more = true;
|
||||
// Half of the time wait for this tr to expire so that the next read is at a different version
|
||||
if(g_random->random01() < 0.5)
|
||||
if(deterministicRandom()->random01() < 0.5)
|
||||
wait(delay(6.0));
|
||||
}
|
||||
|
||||
|
@ -488,7 +488,7 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RCGroup> results, Fu
|
|||
rangevalue.resize(rangevalue.arena(), rangevalue.size() / 2);
|
||||
rangevalue.more = true;
|
||||
// Half of the time wait for this tr to expire so that the next read is at a different version
|
||||
if(g_random->random01() < 0.5)
|
||||
if(deterministicRandom()->random01() < 0.5)
|
||||
wait(delay(6.0));
|
||||
}
|
||||
|
||||
|
|
|
@ -334,14 +334,14 @@ public:
|
|||
}
|
||||
|
||||
Future<Reference<IBackupFile>> writeLogFile(Version beginVersion, Version endVersion, int blockSize) {
|
||||
return writeFile(logVersionFolderString(beginVersion) + format("log,%lld,%lld,%s,%d", beginVersion, endVersion, g_random->randomUniqueID().toString().c_str(), blockSize));
|
||||
return writeFile(logVersionFolderString(beginVersion) + format("log,%lld,%lld,%s,%d", beginVersion, endVersion, deterministicRandom()->randomUniqueID().toString().c_str(), blockSize));
|
||||
}
|
||||
|
||||
Future<Reference<IBackupFile>> writeRangeFile(Version snapshotBeginVersion, int snapshotFileCount, Version fileVersion, int blockSize) {
|
||||
std::string fileName = format("range,%" PRId64 ",%s,%d", fileVersion, g_random->randomUniqueID().toString().c_str(), blockSize);
|
||||
std::string fileName = format("range,%" PRId64 ",%s,%d", fileVersion, deterministicRandom()->randomUniqueID().toString().c_str(), blockSize);
|
||||
|
||||
// In order to test backward compatibility in simulation, sometimes write to the old path format
|
||||
if(g_network->isSimulated() && g_random->coinflip()) {
|
||||
if(g_network->isSimulated() && deterministicRandom()->coinflip()) {
|
||||
return writeFile(old_rangeVersionFolderString(fileVersion) + fileName);
|
||||
}
|
||||
|
||||
|
@ -1224,7 +1224,7 @@ public:
|
|||
if(g_network->isSimulated()) {
|
||||
if(!fileExists(fullPath))
|
||||
throw file_not_found();
|
||||
std::string uniquePath = fullPath + "." + g_random->randomUniqueID().toString() + ".lnk";
|
||||
std::string uniquePath = fullPath + "." + deterministicRandom()->randomUniqueID().toString() + ".lnk";
|
||||
unlink(uniquePath.c_str());
|
||||
ASSERT(symlink(basename(path).c_str(), uniquePath.c_str()) == 0);
|
||||
fullPath = uniquePath = uniquePath;
|
||||
|
@ -1243,16 +1243,16 @@ public:
|
|||
blockSize = atoi(path.substr(lastComma + 1).c_str());
|
||||
}
|
||||
if(blockSize <= 0) {
|
||||
blockSize = g_random->randomInt(1e4, 1e6);
|
||||
blockSize = deterministicRandom()->randomInt(1e4, 1e6);
|
||||
}
|
||||
if(g_random->random01() < .01) {
|
||||
blockSize /= g_random->randomInt(1, 3);
|
||||
if(deterministicRandom()->random01() < .01) {
|
||||
blockSize /= deterministicRandom()->randomInt(1, 3);
|
||||
}
|
||||
|
||||
return map(f, [=](Reference<IAsyncFile> fr) {
|
||||
int readAhead = g_random->randomInt(0, 3);
|
||||
int reads = g_random->randomInt(1, 3);
|
||||
int cacheSize = g_random->randomInt(0, 3);
|
||||
int readAhead = deterministicRandom()->randomInt(0, 3);
|
||||
int reads = deterministicRandom()->randomInt(1, 3);
|
||||
int cacheSize = deterministicRandom()->randomInt(0, 3);
|
||||
return Reference<IAsyncFile>(new AsyncFileReadAheadCache(fr, blockSize, readAhead, reads, cacheSize));
|
||||
});
|
||||
}
|
||||
|
@ -1295,7 +1295,7 @@ public:
|
|||
int flags = IAsyncFile::OPEN_NO_AIO | IAsyncFile::OPEN_CREATE | IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_READWRITE;
|
||||
std::string fullPath = joinPath(m_path, path);
|
||||
platform::createDirectory(parentDirectory(fullPath));
|
||||
std::string temp = fullPath + "." + g_random->randomUniqueID().toString() + ".temp";
|
||||
std::string temp = fullPath + "." + deterministicRandom()->randomUniqueID().toString() + ".temp";
|
||||
Future<Reference<IAsyncFile>> f = IAsyncFileSystem::filesystem()->open(temp, flags, 0644);
|
||||
return map(f, [=](Reference<IAsyncFile> f) {
|
||||
return Reference<IBackupFile>(new BackupFile(path, f, fullPath));
|
||||
|
@ -1730,7 +1730,7 @@ ACTOR Future<Void> writeAndVerifyFile(Reference<IBackupContainer> c, Reference<I
|
|||
if(size > 0) {
|
||||
content = makeString(size);
|
||||
for(int i = 0; i < content.size(); ++i)
|
||||
mutateString(content)[i] = (uint8_t)g_random->randomInt(0, 256);
|
||||
mutateString(content)[i] = (uint8_t)deterministicRandom()->randomInt(0, 256);
|
||||
|
||||
wait(f->append(content.begin(), content.size()));
|
||||
}
|
||||
|
@ -1749,7 +1749,7 @@ ACTOR Future<Void> writeAndVerifyFile(Reference<IBackupContainer> c, Reference<I
|
|||
|
||||
// Randomly advance version by up to 1 second of versions
|
||||
Version nextVersion(Version v) {
|
||||
int64_t increment = g_random->randomInt64(1, CLIENT_KNOBS->CORE_VERSIONSPERSECOND);
|
||||
int64_t increment = deterministicRandom()->randomInt64(1, CLIENT_KNOBS->CORE_VERSIONSPERSECOND);
|
||||
return v + increment;
|
||||
}
|
||||
|
||||
|
@ -1773,20 +1773,20 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
|
|||
state std::map<Version, int64_t> snapshotSizes;
|
||||
state int nRangeFiles = 0;
|
||||
state std::map<Version, std::string> logs;
|
||||
state Version v = g_random->randomInt64(0, std::numeric_limits<Version>::max() / 2);
|
||||
state Version v = deterministicRandom()->randomInt64(0, std::numeric_limits<Version>::max() / 2);
|
||||
|
||||
// List of sizes to use to test edge cases on underlying file implementations
|
||||
state std::vector<int> fileSizes = {0, 10000000, 5000005};
|
||||
|
||||
loop {
|
||||
state Version logStart = v;
|
||||
state int kvfiles = g_random->randomInt(0, 3);
|
||||
state int kvfiles = deterministicRandom()->randomInt(0, 3);
|
||||
|
||||
while(kvfiles > 0) {
|
||||
if(snapshots.empty()) {
|
||||
snapshots[v] = {};
|
||||
snapshotSizes[v] = 0;
|
||||
if(g_random->coinflip()) {
|
||||
if(deterministicRandom()->coinflip()) {
|
||||
v = nextVersion(v);
|
||||
}
|
||||
}
|
||||
|
@ -1799,7 +1799,7 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
|
|||
snapshotSizes.rbegin()->second += size;
|
||||
writes.push_back(writeAndVerifyFile(c, range, size));
|
||||
|
||||
if(g_random->random01() < .2) {
|
||||
if(deterministicRandom()->random01() < .2) {
|
||||
writes.push_back(c->writeKeyspaceSnapshotFile(snapshots.rbegin()->second, snapshotSizes.rbegin()->second));
|
||||
snapshots[v] = {};
|
||||
snapshotSizes[v] = 0;
|
||||
|
@ -1809,7 +1809,7 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
|
|||
--kvfiles;
|
||||
}
|
||||
|
||||
if(logStart == v || g_random->coinflip()) {
|
||||
if(logStart == v || deterministicRandom()->coinflip()) {
|
||||
v = nextVersion(v);
|
||||
}
|
||||
state Reference<IBackupFile> log = wait(c->writeLogFile(logStart, v, 10));
|
||||
|
@ -1818,7 +1818,7 @@ ACTOR Future<Void> testBackupContainer(std::string url) {
|
|||
writes.push_back(writeAndVerifyFile(c, log, size));
|
||||
|
||||
// Randomly stop after a snapshot has finished and all manually seeded file sizes have been used.
|
||||
if(fileSizes.empty() && !snapshots.empty() && snapshots.rbegin()->second.empty() && g_random->random01() < .2) {
|
||||
if(fileSizes.empty() && !snapshots.empty() && snapshots.rbegin()->second.empty() && deterministicRandom()->random01() < .2) {
|
||||
snapshots.erase(snapshots.rbegin()->first);
|
||||
break;
|
||||
}
|
||||
|
@ -1918,7 +1918,7 @@ TEST_CASE("/backup/containers_list") {
|
|||
TEST_CASE("/backup/time") {
|
||||
// test formatTime()
|
||||
for(int i = 0; i < 1000; ++i) {
|
||||
int64_t ts = g_random->randomInt64(0, std::numeric_limits<int32_t>::max());
|
||||
int64_t ts = deterministicRandom()->randomInt64(0, std::numeric_limits<int32_t>::max());
|
||||
ASSERT(BackupAgentBase::parseTime(BackupAgentBase::formatTime(ts)) == ts);
|
||||
}
|
||||
|
||||
|
|
|
@ -42,6 +42,15 @@ struct ClusterInterface {
|
|||
UID id() const { return openDatabase.getEndpoint().token; }
|
||||
NetworkAddress address() const { return openDatabase.getEndpoint().getPrimaryAddress(); }
|
||||
|
||||
bool hasMessage() {
|
||||
return openDatabase.getFuture().isReady() ||
|
||||
failureMonitoring.getFuture().isReady() ||
|
||||
databaseStatus.getFuture().isReady() ||
|
||||
ping.getFuture().isReady() ||
|
||||
getClientWorkers.getFuture().isReady() ||
|
||||
forceRecovery.getFuture().isReady();
|
||||
}
|
||||
|
||||
void initEndpoints() {
|
||||
openDatabase.getEndpoint( TaskClusterController );
|
||||
failureMonitoring.getEndpoint( TaskFailureMonitor );
|
||||
|
|
|
@ -44,7 +44,8 @@ static const char* typeString[] = { "SetValue",
|
|||
"ByteMax",
|
||||
"MinV2",
|
||||
"AndV2",
|
||||
"CompareAndClear" };
|
||||
"CompareAndClear",
|
||||
"Exec" };
|
||||
|
||||
struct MutationRef {
|
||||
static const int OVERHEAD_BYTES = 12; //12 is the size of Header in MutationList entries
|
||||
|
@ -70,6 +71,9 @@ struct MutationRef {
|
|||
MinV2,
|
||||
AndV2,
|
||||
CompareAndClear,
|
||||
// ExecOp is always set with FIRST_IN_BATCH option to quickly identify
|
||||
// the op in a transaction batch while parsing it in TLog
|
||||
Exec,
|
||||
MAX_ATOMIC_OP
|
||||
};
|
||||
// This is stored this way for serialization purposes.
|
||||
|
|
|
@ -735,7 +735,7 @@ namespace dbBackup {
|
|||
Optional<Value> stopValue = wait(fStopValue);
|
||||
state Version stopVersionData = stopValue.present() ? BinaryReader::fromStringRef<Version>(stopValue.get(), Unversioned()) : -1;
|
||||
|
||||
if(endVersion - beginVersion > g_random->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) {
|
||||
if(endVersion - beginVersion > deterministicRandom()->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) {
|
||||
TraceEvent("DBA_CopyLogs").detail("BeginVersion", beginVersion).detail("ApplyVersion", applyVersion).detail("EndVersion", endVersion).detail("StopVersionData", stopVersionData).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]);
|
||||
}
|
||||
|
||||
|
@ -939,7 +939,7 @@ namespace dbBackup {
|
|||
tr->set(task->params[BackupAgentBase::keyConfigLogUid].withPrefix(applyMutationsEndRange.begin), BinaryWriter::toValue(beginVersion, Unversioned()));
|
||||
Optional<Value> stopWhenDone = wait(fStopWhenDone);
|
||||
|
||||
if(endVersion - beginVersion > g_random->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) {
|
||||
if(endVersion - beginVersion > deterministicRandom()->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) {
|
||||
TraceEvent("DBA_CopyDiffLogs").detail("BeginVersion", beginVersion).detail("EndVersion", endVersion).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]);
|
||||
}
|
||||
|
||||
|
@ -1478,7 +1478,7 @@ namespace dbBackup {
|
|||
if (existingDestUidValue.present()) {
|
||||
destUidValue = existingDestUidValue.get();
|
||||
} else {
|
||||
destUidValue = BinaryWriter::toValue(g_random->randomUniqueID(), Unversioned());
|
||||
destUidValue = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
||||
srcTr->set(destUidLookupPath, destUidValue);
|
||||
}
|
||||
}
|
||||
|
@ -1830,7 +1830,7 @@ public:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> submitBackup(DatabaseBackupAgent* backupAgent, Reference<ReadYourWritesTransaction> tr, Key tagName, Standalone<VectorRef<KeyRangeRef>> backupRanges, bool stopWhenDone, Key addPrefix, Key removePrefix, bool lockDB, bool databasesInSync) {
|
||||
state UID logUid = g_random->randomUniqueID();
|
||||
state UID logUid = deterministicRandom()->randomUniqueID();
|
||||
state Key logUidValue = BinaryWriter::toValue(logUid, Unversioned());
|
||||
state UID logUidCurrent = wait(backupAgent->getLogUid(tr, tagName));
|
||||
|
||||
|
@ -1960,7 +1960,7 @@ public:
|
|||
checkAtomicSwitchOverConfig(srcStatus, destStatus, tagName);
|
||||
}
|
||||
|
||||
state UID logUid = g_random->randomUniqueID();
|
||||
state UID logUid = deterministicRandom()->randomUniqueID();
|
||||
state Key logUidValue = BinaryWriter::toValue(logUid, Unversioned());
|
||||
state UID logUidCurrent = wait(drAgent.getLogUid(backupAgent->taskBucket->src, tagName));
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ public:
|
|||
|
||||
Database clone() const { return Database(new DatabaseContext( cluster, clientInfo, clientInfoMonitor, dbId, taskID, clientLocality, enableLocalityLoadBalance, lockAware, apiVersion )); }
|
||||
|
||||
pair<KeyRange,Reference<LocationInfo>> getCachedLocation( const KeyRef&, bool isBackward = false );
|
||||
std::pair<KeyRange,Reference<LocationInfo>> getCachedLocation( const KeyRef&, bool isBackward = false );
|
||||
bool getCachedLocations( const KeyRangeRef&, vector<std::pair<KeyRange,Reference<LocationInfo>>>&, int limit, bool reverse );
|
||||
Reference<LocationInfo> setCachedLocation( const KeyRangeRef&, const vector<struct StorageServerInterface>& );
|
||||
void invalidateCache( const KeyRef&, bool isBackward = false );
|
||||
|
@ -148,6 +148,7 @@ public:
|
|||
int64_t transactionsMaybeCommitted;
|
||||
int64_t transactionsResourceConstrained;
|
||||
int64_t transactionsProcessBehind;
|
||||
int64_t transactionWaitsForFullRecovery;
|
||||
ContinuousSample<double> latencies, readLatencies, commitLatencies, GRVLatencies, mutationsPerCommit, bytesPerCommit;
|
||||
|
||||
int outstandingWatches;
|
||||
|
|
|
@ -21,11 +21,14 @@
|
|||
#ifndef FDBCLIENT_FDBTYPES_H
|
||||
#define FDBCLIENT_FDBTYPES_H
|
||||
|
||||
#include <algorithm>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
|
||||
using std::vector;
|
||||
using std::pair;
|
||||
typedef int64_t Version;
|
||||
typedef uint64_t LogEpoch;
|
||||
typedef uint64_t Sequence;
|
||||
|
@ -761,7 +764,7 @@ static bool addressExcluded( std::set<AddressExclusion> const& exclusions, Netwo
|
|||
struct ClusterControllerPriorityInfo {
|
||||
enum DCFitness { FitnessPrimary, FitnessRemote, FitnessPreferred, FitnessUnknown, FitnessBad }; //cannot be larger than 7 because of leader election mask
|
||||
|
||||
static DCFitness calculateDCFitness(Optional<Key> const& dcId, vector<Optional<Key>> const& dcPriority) {
|
||||
static DCFitness calculateDCFitness(Optional<Key> const& dcId, std::vector<Optional<Key>> const& dcPriority) {
|
||||
if(!dcPriority.size()) {
|
||||
return FitnessUnknown;
|
||||
} else if(dcPriority.size() == 1) {
|
||||
|
|
|
@ -143,7 +143,7 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
double slowThreshold = .200 + waitfor + FLOW_KNOBS->MAX_BUGGIFIED_DELAY;
|
||||
double warnAlwaysThreshold = CLIENT_KNOBS->FAILURE_MIN_DELAY/2;
|
||||
|
||||
if (elapsed > slowThreshold && g_random->random01() < elapsed / warnAlwaysThreshold) {
|
||||
if (elapsed > slowThreshold && deterministicRandom()->random01() < elapsed / warnAlwaysThreshold) {
|
||||
TraceEvent(elapsed > warnAlwaysThreshold ? SevWarnAlways : SevWarn, "FailureMonitorClientSlow").detail("Elapsed", elapsed).detail("Expected", waitfor);
|
||||
}
|
||||
|
||||
|
@ -167,6 +167,11 @@ ACTOR Future<Void> failureMonitorClientLoop(
|
|||
}
|
||||
|
||||
ACTOR Future<Void> failureMonitorClient( Reference<AsyncVar<Optional<struct ClusterInterface>>> ci, bool trackMyStatus ) {
|
||||
TraceEvent("FailureMonitorStart").detail("IsClient", FlowTransport::transport().isClient());
|
||||
if (FlowTransport::transport().isClient()) {
|
||||
wait(Never());
|
||||
}
|
||||
|
||||
state SimpleFailureMonitor* monitor = static_cast<SimpleFailureMonitor*>( &IFailureMonitor::failureMonitor() );
|
||||
state Reference<FailureMonitorClientState> fmState = Reference<FailureMonitorClientState>(new FailureMonitorClientState());
|
||||
auto localAddr = g_network->getLocalAddresses();
|
||||
|
|
|
@ -1171,7 +1171,7 @@ namespace fileBackup {
|
|||
// Start writing a new file after verifying this task should keep running as of a new read version (which must be >= outVersion)
|
||||
outVersion = values.second;
|
||||
// block size must be at least large enough for 3 max size keys and 2 max size values + overhead so 250k conservatively.
|
||||
state int blockSize = BUGGIFY ? g_random->randomInt(250e3, 4e6) : CLIENT_KNOBS->BACKUP_RANGEFILE_BLOCK_SIZE;
|
||||
state int blockSize = BUGGIFY ? deterministicRandom()->randomInt(250e3, 4e6) : CLIENT_KNOBS->BACKUP_RANGEFILE_BLOCK_SIZE;
|
||||
state Version snapshotBeginVersion;
|
||||
state int64_t snapshotRangeFileCount;
|
||||
|
||||
|
@ -1600,7 +1600,7 @@ namespace fileBackup {
|
|||
state std::vector<KeyRange> rangesToAdd;
|
||||
|
||||
// Limit number of tasks added per transaction
|
||||
int taskBatchSize = BUGGIFY ? g_random->randomInt(1, countShardsToDispatch + 1) : CLIENT_KNOBS->BACKUP_DISPATCH_ADDTASK_SIZE;
|
||||
int taskBatchSize = BUGGIFY ? deterministicRandom()->randomInt(1, countShardsToDispatch + 1) : CLIENT_KNOBS->BACKUP_DISPATCH_ADDTASK_SIZE;
|
||||
int added = 0;
|
||||
|
||||
while(countShardsToDispatch > 0 && added < taskBatchSize && shardMap.size() > 0) {
|
||||
|
@ -1696,7 +1696,7 @@ namespace fileBackup {
|
|||
Version scheduledVersion = invalidVersion;
|
||||
// If the next dispatch version is in the future, choose a random version at which to start the new task.
|
||||
if(nextDispatchVersion > recentReadVersion)
|
||||
scheduledVersion = recentReadVersion + g_random->random01() * (nextDispatchVersion - recentReadVersion);
|
||||
scheduledVersion = recentReadVersion + deterministicRandom()->random01() * (nextDispatchVersion - recentReadVersion);
|
||||
|
||||
// Range tasks during the initial snapshot should run at a higher priority
|
||||
int priority = latestSnapshotEndVersion.present() ? 0 : 1;
|
||||
|
@ -1862,7 +1862,7 @@ namespace fileBackup {
|
|||
}
|
||||
|
||||
// Block size must be at least large enough for 1 max size key, 1 max size value, and overhead, so conservatively 125k.
|
||||
state int blockSize = BUGGIFY ? g_random->randomInt(125e3, 4e6) : CLIENT_KNOBS->BACKUP_LOGFILE_BLOCK_SIZE;
|
||||
state int blockSize = BUGGIFY ? deterministicRandom()->randomInt(125e3, 4e6) : CLIENT_KNOBS->BACKUP_LOGFILE_BLOCK_SIZE;
|
||||
state Reference<IBackupFile> outFile = wait(bc->writeLogFile(beginVersion, endVersion, blockSize));
|
||||
state LogFileWriter logFile(outFile, blockSize);
|
||||
|
||||
|
@ -2631,7 +2631,7 @@ namespace fileBackup {
|
|||
|
||||
state int start = 0;
|
||||
state int end = data.size();
|
||||
state int dataSizeLimit = BUGGIFY ? g_random->randomInt(256 * 1024, 10e6) : CLIENT_KNOBS->RESTORE_WRITE_TX_SIZE;
|
||||
state int dataSizeLimit = BUGGIFY ? deterministicRandom()->randomInt(256 * 1024, 10e6) : CLIENT_KNOBS->RESTORE_WRITE_TX_SIZE;
|
||||
|
||||
tr->reset();
|
||||
loop{
|
||||
|
@ -2818,7 +2818,7 @@ namespace fileBackup {
|
|||
|
||||
state int start = 0;
|
||||
state int end = data.size();
|
||||
state int dataSizeLimit = BUGGIFY ? g_random->randomInt(256 * 1024, 10e6) : CLIENT_KNOBS->RESTORE_WRITE_TX_SIZE;
|
||||
state int dataSizeLimit = BUGGIFY ? deterministicRandom()->randomInt(256 * 1024, 10e6) : CLIENT_KNOBS->RESTORE_WRITE_TX_SIZE;
|
||||
|
||||
tr->reset();
|
||||
loop {
|
||||
|
@ -3578,7 +3578,7 @@ public:
|
|||
prevConfig.clear(tr);
|
||||
}
|
||||
|
||||
state BackupConfig config(g_random->randomUniqueID());
|
||||
state BackupConfig config(deterministicRandom()->randomUniqueID());
|
||||
state UID uid = config.getUid();
|
||||
|
||||
// This check will ensure that current backupUid is later than the last backup Uid
|
||||
|
@ -3631,7 +3631,7 @@ public:
|
|||
if (existingDestUidValue.present()) {
|
||||
destUidValue = existingDestUidValue.get();
|
||||
} else {
|
||||
destUidValue = BinaryWriter::toValue(g_random->randomUniqueID(), Unversioned());
|
||||
destUidValue = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
||||
tr->set(destUidLookupPath, destUidValue);
|
||||
}
|
||||
}
|
||||
|
@ -3922,6 +3922,8 @@ public:
|
|||
doc.setKey("Tag", tag.tagName);
|
||||
|
||||
if(uidAndAbortedFlag.present()) {
|
||||
doc.setKey("UID", uidAndAbortedFlag.get().first.toString());
|
||||
|
||||
state BackupConfig config(uidAndAbortedFlag.get().first);
|
||||
|
||||
state EBackupState backupState = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
|
@ -4288,7 +4290,7 @@ public:
|
|||
//Lock src, record commit version
|
||||
state Transaction tr(cx);
|
||||
state Version commitVersion;
|
||||
state UID randomUid = g_random->randomUniqueID();
|
||||
state UID randomUid = deterministicRandom()->randomUniqueID();
|
||||
loop {
|
||||
try {
|
||||
// We must get a commit version so add a conflict range that won't likely cause conflicts
|
||||
|
@ -4368,7 +4370,7 @@ const int BackupAgentBase::logHeaderSize = 12;
|
|||
const int FileBackupAgent::dataFooterSize = 20;
|
||||
|
||||
Future<Version> FileBackupAgent::restore(Database cx, Optional<Database> cxOrig, Key tagName, Key url, Standalone<VectorRef<KeyRangeRef>> ranges, bool waitForComplete, Version targetVersion, bool verbose, Key addPrefix, Key removePrefix, bool lockDB) {
|
||||
return FileBackupAgentImpl::restore(this, cx, cxOrig, tagName, url, ranges, waitForComplete, targetVersion, verbose, addPrefix, removePrefix, lockDB, g_random->randomUniqueID());
|
||||
return FileBackupAgentImpl::restore(this, cx, cxOrig, tagName, url, ranges, waitForComplete, targetVersion, verbose, addPrefix, removePrefix, lockDB, deterministicRandom()->randomUniqueID());
|
||||
}
|
||||
|
||||
Future<Version> FileBackupAgent::atomicRestore(Database cx, Key tagName, Standalone<VectorRef<KeyRangeRef>> ranges, Key addPrefix, Key removePrefix) {
|
||||
|
|
|
@ -323,7 +323,7 @@ namespace HTTP {
|
|||
try {
|
||||
state std::string requestID;
|
||||
if(!requestIDHeader.empty()) {
|
||||
requestID = g_random->randomUniqueID().toString();
|
||||
requestID = deterministicRandom()->randomUniqueID().toString();
|
||||
requestID = requestID.insert(20, "-");
|
||||
requestID = requestID.insert(16, "-");
|
||||
requestID = requestID.insert(12, "-");
|
||||
|
@ -392,17 +392,17 @@ namespace HTTP {
|
|||
responseID = iid->second;
|
||||
}
|
||||
event.detail("RequestIDReceived", responseID);
|
||||
if(requestID != responseID) {
|
||||
|
||||
// If the response code is 5xx (server error) then a response ID is not expected
|
||||
// so a missing id will be ignored but a mismatching id will still be an error.
|
||||
bool serverError = r->code >= 500 && r->code < 600;
|
||||
|
||||
// If request/response IDs do not match and either this is not a server error
|
||||
// or it is but the response ID is not empty then log an error.
|
||||
if(requestID != responseID && (!serverError || !responseID.empty()) ) {
|
||||
err = http_bad_request_id();
|
||||
|
||||
// Log a non-debug a error
|
||||
Severity sev = SevError;
|
||||
// If the response code is 5xx (server error) and the responseID is empty then just warn
|
||||
if(responseID.empty() && r->code >= 500 && r->code < 600) {
|
||||
sev = SevWarnAlways;
|
||||
}
|
||||
|
||||
TraceEvent(sev, "HTTPRequestFailedIDMismatch")
|
||||
TraceEvent(SevError, "HTTPRequestFailedIDMismatch")
|
||||
.detail("DebugID", conn->getDebugID())
|
||||
.detail("RemoteAddress", conn->getPeerAddress())
|
||||
.detail("Verb", verb)
|
||||
|
@ -433,6 +433,7 @@ namespace HTTP {
|
|||
return r;
|
||||
} catch(Error &e) {
|
||||
double elapsed = timer() - send_start;
|
||||
// A bad_request_id error would have already been logged in verbose mode before err is thrown above.
|
||||
if(CLIENT_KNOBS->HTTP_VERBOSE_LEVEL > 0 && e.code() != error_code_http_bad_request_id) {
|
||||
printf("[%s] HTTP *ERROR*=%s early=%d, time=%fs %s %s contentLen=%d [%d out]\n",
|
||||
conn->getDebugID().toString().c_str(), e.name(), earlyResponse, elapsed, verb.c_str(), resource.c_str(), contentLen, total_sent);
|
||||
|
|
|
@ -44,10 +44,10 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
|
||||
// wrong_shard_server sometimes comes from the only nonfailed server, so we need to avoid a fast spin
|
||||
|
||||
init( WRONG_SHARD_SERVER_DELAY, .01 ); if( randomize && BUGGIFY ) WRONG_SHARD_SERVER_DELAY = g_random->random01(); // FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY; // SOMEDAY: This delay can limit performance of retrieving data when the cache is mostly wrong (e.g. dumping the database after a test)
|
||||
init( FUTURE_VERSION_RETRY_DELAY, .01 ); if( randomize && BUGGIFY ) FUTURE_VERSION_RETRY_DELAY = g_random->random01();// FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY;
|
||||
init( WRONG_SHARD_SERVER_DELAY, .01 ); if( randomize && BUGGIFY ) WRONG_SHARD_SERVER_DELAY = deterministicRandom()->random01(); // FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY; // SOMEDAY: This delay can limit performance of retrieving data when the cache is mostly wrong (e.g. dumping the database after a test)
|
||||
init( FUTURE_VERSION_RETRY_DELAY, .01 ); if( randomize && BUGGIFY ) FUTURE_VERSION_RETRY_DELAY = deterministicRandom()->random01();// FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY;
|
||||
init( REPLY_BYTE_LIMIT, 80000 );
|
||||
init( DEFAULT_BACKOFF, .01 ); if( randomize && BUGGIFY ) DEFAULT_BACKOFF = g_random->random01();
|
||||
init( DEFAULT_BACKOFF, .01 ); if( randomize && BUGGIFY ) DEFAULT_BACKOFF = deterministicRandom()->random01();
|
||||
init( DEFAULT_MAX_BACKOFF, 1.0 );
|
||||
init( BACKOFF_GROWTH_RATE, 2.0 );
|
||||
init( RESOURCE_CONSTRAINED_MAX_BACKOFF, 30.0 );
|
||||
|
@ -185,8 +185,8 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init(CSI_SAMPLING_PROBABILITY, -1.0);
|
||||
init(CSI_SIZE_LIMIT, std::numeric_limits<int64_t>::max());
|
||||
if (randomize && BUGGIFY) {
|
||||
CSI_SAMPLING_PROBABILITY = g_random->random01() / 10; // rand range 0 - 0.1
|
||||
CSI_SIZE_LIMIT = g_random->randomInt(1024 * 1024, 100 * 1024 * 1024); // 1 MB - 100 MB
|
||||
CSI_SAMPLING_PROBABILITY = deterministicRandom()->random01() / 10; // rand range 0 - 0.1
|
||||
CSI_SIZE_LIMIT = deterministicRandom()->randomInt(1024 * 1024, 100 * 1024 * 1024); // 1 MB - 100 MB
|
||||
}
|
||||
init(CSI_STATUS_DELAY, 10.0 );
|
||||
|
||||
|
@ -194,5 +194,5 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( CONSISTENCY_CHECK_ONE_ROUND_TARGET_COMPLETION_TIME, 7 * 24 * 60 * 60 ); // 7 days
|
||||
|
||||
// TLS related
|
||||
init( CHECK_CONNECTED_COORDINATOR_NUM_DELAY, 1.0 ); if( randomize && BUGGIFY ) CHECK_CONNECTED_COORDINATOR_NUM_DELAY = g_random->random01() * 60.0; // In seconds
|
||||
init( CHECK_CONNECTED_COORDINATOR_NUM_DELAY, 1.0 ); if( randomize && BUGGIFY ) CHECK_CONNECTED_COORDINATOR_NUM_DELAY = deterministicRandom()->random01() * 60.0; // In seconds
|
||||
}
|
||||
|
|
|
@ -298,14 +298,14 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
std::string initKey = configKeysPrefix.toString() + "initialized";
|
||||
state bool creating = m.count( initKey ) != 0;
|
||||
if (creating) {
|
||||
m[initIdKey.toString()] = g_random->randomUniqueID().toString();
|
||||
m[initIdKey.toString()] = deterministicRandom()->randomUniqueID().toString();
|
||||
if (!isCompleteConfiguration(m)) {
|
||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION;
|
||||
}
|
||||
}
|
||||
|
||||
state Future<Void> tooLong = delay(4.5);
|
||||
state Key versionKey = BinaryWriter::toValue(g_random->randomUniqueID(),Unversioned());
|
||||
state Future<Void> tooLong = delay(60);
|
||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(),Unversioned());
|
||||
state bool oldReplicationUsesDcId = false;
|
||||
loop {
|
||||
try {
|
||||
|
@ -763,7 +763,7 @@ ConfigureAutoResult parseConfig( StatusObject const& status ) {
|
|||
|
||||
ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoResult conf ) {
|
||||
state Transaction tr(cx);
|
||||
state Key versionKey = BinaryWriter::toValue(g_random->randomUniqueID(),Unversioned());
|
||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(),Unversioned());
|
||||
|
||||
if(!conf.address_class.size())
|
||||
return ConfigurationResult::INCOMPLETE_CONFIGURATION; //FIXME: correct return type
|
||||
|
@ -790,7 +790,7 @@ ACTOR Future<ConfigurationResult::Type> autoConfig( Database cx, ConfigureAutoRe
|
|||
}
|
||||
|
||||
if(conf.address_class.size())
|
||||
tr.set(processClassChangeKey, g_random->randomUniqueID().toString());
|
||||
tr.set(processClassChangeKey, deterministicRandom()->randomUniqueID().toString());
|
||||
|
||||
if(conf.auto_logs != conf.old_logs)
|
||||
tr.set(configKeysPrefix.toString() + "auto_logs", format("%d", conf.auto_logs));
|
||||
|
@ -950,7 +950,7 @@ ACTOR Future<CoordinatorsResult::Type> changeQuorum( Database cx, Reference<IQuo
|
|||
if ( old.coordinators() == desiredCoordinators && old.clusterKeyName() == newName)
|
||||
return retries ? CoordinatorsResult::SUCCESS : CoordinatorsResult::SAME_NETWORK_ADDRESSES;
|
||||
|
||||
state ClusterConnectionString conn( desiredCoordinators, StringRef( newName + ':' + g_random->randomAlphaNumeric( 32 ) ) );
|
||||
state ClusterConnectionString conn( desiredCoordinators, StringRef( newName + ':' + deterministicRandom()->randomAlphaNumeric( 32 ) ) );
|
||||
|
||||
if(g_network->isSimulated()) {
|
||||
for(int i = 0; i < (desiredCoordinators.size()/2)+1; i++) {
|
||||
|
@ -1124,7 +1124,7 @@ struct AutoQuorumChange : IQuorumChange {
|
|||
|
||||
void addDesiredWorkers(vector<NetworkAddress>& chosen, const vector<ProcessData>& workers, int desiredCount, const std::set<AddressExclusion>& excluded) {
|
||||
vector<ProcessData> remainingWorkers(workers);
|
||||
g_random->randomShuffle(remainingWorkers);
|
||||
deterministicRandom()->randomShuffle(remainingWorkers);
|
||||
|
||||
std::partition(remainingWorkers.begin(), remainingWorkers.end(), [](const ProcessData& data) { return (data.processClass == ProcessClass::CoordinatorClass); });
|
||||
|
||||
|
@ -1197,8 +1197,8 @@ Reference<IQuorumChange> autoQuorumChange( int desired ) { return Reference<IQuo
|
|||
|
||||
ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers ) {
|
||||
state Transaction tr(cx);
|
||||
state Key versionKey = BinaryWriter::toValue(g_random->randomUniqueID(),Unversioned());
|
||||
state std::string excludeVersionKey = g_random->randomUniqueID().toString();
|
||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(),Unversioned());
|
||||
state std::string excludeVersionKey = deterministicRandom()->randomUniqueID().toString();
|
||||
|
||||
loop {
|
||||
try {
|
||||
|
@ -1227,8 +1227,8 @@ ACTOR Future<Void> excludeServers( Database cx, vector<AddressExclusion> servers
|
|||
ACTOR Future<Void> includeServers( Database cx, vector<AddressExclusion> servers ) {
|
||||
state bool includeAll = false;
|
||||
state Transaction tr(cx);
|
||||
state Key versionKey = BinaryWriter::toValue(g_random->randomUniqueID(),Unversioned());
|
||||
state std::string excludeVersionKey = g_random->randomUniqueID().toString();
|
||||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(),Unversioned());
|
||||
state std::string excludeVersionKey = deterministicRandom()->randomUniqueID().toString();
|
||||
|
||||
loop {
|
||||
try {
|
||||
|
@ -1301,7 +1301,7 @@ ACTOR Future<Void> setClass( Database cx, AddressExclusion server, ProcessClass
|
|||
}
|
||||
|
||||
if(foundChange)
|
||||
tr.set(processClassChangeKey, g_random->randomUniqueID().toString());
|
||||
tr.set(processClassChangeKey, deterministicRandom()->randomUniqueID().toString());
|
||||
|
||||
wait( tr.commit() );
|
||||
return Void();
|
||||
|
@ -1474,6 +1474,29 @@ ACTOR Future<Void> waitForExcludedServers( Database cx, vector<AddressExclusion>
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> mgmtSnapCreate(Database cx, StringRef snapCmd) {
|
||||
state int retryCount = 0;
|
||||
|
||||
loop {
|
||||
state UID snapUID = deterministicRandom()->randomUniqueID();
|
||||
try {
|
||||
wait(snapCreate(cx, snapCmd, snapUID));
|
||||
printf("Snapshots tagged with UID: %s, check logs for status\n", snapUID.toString().c_str());
|
||||
TraceEvent("SnapCreateSucceeded").detail("snapUID", snapUID);
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
++retryCount;
|
||||
TraceEvent(retryCount > 3 ? SevWarn : SevInfo, "SnapCreateFailed").error(e);
|
||||
if (retryCount > 3) {
|
||||
fprintf(stderr, "Snapshot create failed, %d (%s)."
|
||||
" Please cleanup any instance level snapshots created.\n", e.code(), e.what());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitForFullReplication( Database cx ) {
|
||||
state ReadYourWritesTransaction tr(cx);
|
||||
loop {
|
||||
|
@ -1851,7 +1874,7 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
|
|||
workers.push_back(data);
|
||||
}
|
||||
|
||||
auto noAssignIndex = g_random->randomInt(0, workers.size());
|
||||
auto noAssignIndex = deterministicRandom()->randomInt(0, workers.size());
|
||||
workers[noAssignIndex].processClass._class = ProcessClass::CoordinatorClass;
|
||||
|
||||
change.addDesiredWorkers(chosen, workers, 5, excluded);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue