Merge branch 'release-6.1' into merge-release-6.1-into-master

# Conflicts:
#	documentation/sphinx/source/release-notes.rst
#	fdbserver/QuietDatabase.actor.cpp
#	versions.target
This commit is contained in:
A.J. Beamon 2019-05-23 09:28:45 -07:00
commit d29c7e4c9b
35 changed files with 278 additions and 339 deletions

View File

@ -26,7 +26,7 @@ project(foundationdb
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake")
message (STATUS "${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR}")
if("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")
message(FATAL_ERROR "In-source builds are forbidden, unsupported, and stupid!!")
message(FATAL_ERROR "In-source builds are forbidden")
endif()
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)

View File

@ -6,8 +6,7 @@ set(SRCS
FDBLibTLSSession.cpp
FDBLibTLSSession.h
FDBLibTLSVerify.cpp
FDBLibTLSVerify.h
ReferenceCounted.h)
FDBLibTLSVerify.h)
add_library(FDBLibTLS ${SRCS})
target_link_libraries(FDBLibTLS PUBLIC LibreSSL boost_target)
target_link_libraries(FDBLibTLS PUBLIC LibreSSL boost_target PRIVATE flow)

View File

@ -24,7 +24,7 @@
#pragma once
#include "fdbrpc/ITLSPlugin.h"
#include "ReferenceCounted.h"
#include "flow/FastRef.h"
#include <tls.h>

View File

@ -24,7 +24,7 @@
#pragma once
#include "fdbrpc/ITLSPlugin.h"
#include "ReferenceCounted.h"
#include "flow/FastRef.h"
#include "FDBLibTLS/FDBLibTLSPlugin.h"
#include "FDBLibTLS/FDBLibTLSVerify.h"

View File

@ -19,6 +19,8 @@
*/
#include "FDBLibTLS/FDBLibTLSSession.h"
#include "flow/flow.h"
#include "flow/Trace.h"
#include <openssl/bio.h>
@ -60,7 +62,7 @@ static ssize_t tls_write_func(struct tls *ctx, const void *buf, size_t buflen, v
FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy, bool is_client, const char* servername, TLSSendCallbackFunc send_func, void* send_ctx, TLSRecvCallbackFunc recv_func, void* recv_ctx, void* uidptr) :
tls_ctx(NULL), tls_sctx(NULL), is_client(is_client), policy(policy), send_func(send_func), send_ctx(send_ctx),
recv_func(recv_func), recv_ctx(recv_ctx), handshake_completed(false) {
recv_func(recv_func), recv_ctx(recv_ctx), handshake_completed(false), lastVerifyFailureLogged(0.0) {
if (uidptr)
uid = * (UID*) uidptr;
@ -342,8 +344,11 @@ bool FDBLibTLSSession::verify_peer() {
if (!rc) {
// log the various failure reasons
for (std::string reason : verify_failure_reasons) {
TraceEvent("FDBLibTLSVerifyFailure", uid).suppressFor(1.0).detail("Reason", reason);
if(now() - lastVerifyFailureLogged > 1.0) {
for (std::string reason : verify_failure_reasons) {
lastVerifyFailureLogged = now();
TraceEvent("FDBLibTLSVerifyFailure", uid).detail("Reason", reason);
}
}
}

View File

@ -24,7 +24,7 @@
#pragma once
#include "fdbrpc/ITLSPlugin.h"
#include "ReferenceCounted.h"
#include "flow/FastRef.h"
#include "FDBLibTLS/FDBLibTLSPolicy.h"
#include "FDBLibTLS/FDBLibTLSVerify.h"
@ -61,6 +61,7 @@ struct FDBLibTLSSession : ITLSSession, ReferenceCounted<FDBLibTLSSession> {
bool handshake_completed;
UID uid;
double lastVerifyFailureLogged;
};
#endif /* FDB_LIBTLS_SESSION_H */

View File

@ -25,7 +25,7 @@
#include <stdint.h>
#include "ReferenceCounted.h"
#include "flow/FastRef.h"
#include <map>
#include <string>

View File

@ -1,108 +0,0 @@
/*
* ReferenceCounted.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FDB_REFERENCE_COUNTED_H
#define FDB_REFERENCE_COUNTED_H
#pragma once
#include <stdlib.h>
template <class T>
struct ReferenceCounted {
void addref() { ++referenceCount; }
void delref() { if (--referenceCount == 0) { delete (T*)this; } }
ReferenceCounted() : referenceCount(1) {}
private:
ReferenceCounted(const ReferenceCounted&) = delete;
void operator=(const ReferenceCounted&) = delete;
int32_t referenceCount;
};
template <class P>
void addref(P* ptr) { ptr->addref(); }
template <class P>
void delref(P* ptr) { ptr->delref(); }
template <class P>
struct Reference {
Reference() : ptr(NULL) {}
explicit Reference( P* ptr ) : ptr(ptr) {}
static Reference<P> addRef( P* ptr ) { ptr->addref(); return Reference(ptr); }
Reference(const Reference& r) : ptr(r.getPtr()) { if (ptr) addref(ptr); }
Reference(Reference && r) : ptr(r.getPtr()) { r.ptr = NULL; }
template <class Q>
Reference(const Reference<Q>& r) : ptr(r.getPtr()) { if (ptr) addref(ptr); }
template <class Q>
Reference(Reference<Q> && r) : ptr(r.getPtr()) { r.setPtrUnsafe(NULL); }
~Reference() { if (ptr) delref(ptr); }
Reference& operator=(const Reference& r) {
P* oldPtr = ptr;
P* newPtr = r.ptr;
if (oldPtr != newPtr) {
if (newPtr) addref(newPtr);
ptr = newPtr;
if (oldPtr) delref(oldPtr);
}
return *this;
}
Reference& operator=(Reference&& r) {
P* oldPtr = ptr;
P* newPtr = r.ptr;
if (oldPtr != newPtr) {
r.ptr = NULL;
ptr = newPtr;
if (oldPtr) delref(oldPtr);
}
return *this;
}
void clear() {
P* oldPtr = ptr;
if (oldPtr) {
ptr = NULL;
delref(oldPtr);
}
}
P* operator->() const { return ptr; }
P& operator*() const { return *ptr; }
P* getPtr() const { return ptr; }
void setPtrUnsafe( P* p ) { ptr = p; }
P* extractPtr() { auto *p = ptr; ptr = NULL; return p; }
bool boolean_test() const { return ptr != 0; }
private:
P *ptr;
};
template <class P>
bool operator==( const Reference<P>& lhs, const Reference<P>& rhs ) {
return lhs.getPtr() == rhs.getPtr();
}
#endif /* FDB_REFERENCE_COUNTED_H */

View File

@ -31,7 +31,6 @@
#include <boost/circular_buffer.hpp>
#include "fdbrpc/ITLSPlugin.h"
#include "ReferenceCounted.h"
#include "FDBLibTLS/FDBLibTLSPlugin.h"

View File

@ -28,7 +28,6 @@
#include <openssl/objects.h>
#include "fdbrpc/ITLSPlugin.h"
#include "ReferenceCounted.h"
#include "FDBLibTLS/FDBLibTLSPlugin.h"
#include "FDBLibTLS/FDBLibTLSPolicy.h"

View File

@ -36,7 +36,7 @@ extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = [sys.prefix + '/_templates']
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
@ -143,7 +143,7 @@ html_title = 'FoundationDB ' + version
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [sys.prefix + '/_static']
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.

View File

@ -232,7 +232,7 @@ The procedures for adding and removing machines can be combined into a recipe fo
Converting an existing cluster to use TLS
=========================================
A FoundationDB cluster has the option of supporting :doc:`Transport Layer Security (TLS) <tls>`. To enable TLS on an existing, non-TLS cluster, see :ref:`Converting a running cluster <converting-existing-cluster>`.
A FoundationDB cluster has the option of supporting :doc:`Transport Layer Security (TLS) <tls>`. To enable TLS on an existing, non-TLS cluster, see :ref:`Converting a running cluster <converting-existing-cluster-after-6.1>`.
.. _administration-monitoring-cluster-status:

View File

@ -37,12 +37,22 @@
.. |node-subspace| replace:: FIXME
.. |content-subspace| replace:: FIXME
.. |allow-manual-prefixes| replace:: FIXME
.. |retry-limit-transaction-option| replace:: FIXME
.. |timeout-transaction-option| replace:: FIXME
.. |max-retry-delay-transaction-option| replace:: FIXME
.. |snapshot-ryw-enable-transaction-option| replace:: FIXME
.. |snapshot-ryw-disable-transaction-option| replace:: FIXME
.. |snapshot-ryw-enable-database-option| replace:: FIXME
.. |snapshot-ryw-disable-database-option| replace:: FIXME
.. |retry-limit-database-option| replace:: FIXME
.. |max-retry-delay-database-option| replace:: FIXME
.. |timeout-database-option| replace:: FIXME
.. include:: api-common.rst.inc
.. |future-warning| replace:: :data:`future` must represent a result of the appropriate type (i.e. must have been returned by a function documented as returning this type), or the results are undefined.
.. |future-warning| replace:: ``future`` must represent a result of the appropriate type (i.e. must have been returned by a function documented as returning this type), or the results are undefined.
.. |future-get-return1| replace:: Returns zero if :data:`future` is ready and not in an error state, and a non-zero :ref:`error code <developer-guide-error-codes>` otherwise
.. |future-get-return1| replace:: Returns zero if ``future`` is ready and not in an error state, and a non-zero :ref:`error code <developer-guide-error-codes>` otherwise
.. |future-get-return2| replace:: (in which case the value of any out parameter is undefined)
@ -74,9 +84,9 @@
.. |snapshot| replace:: Non-zero if this is a :ref:`snapshot read <snapshots>`.
.. |sets-and-clears1| replace:: Modify the database snapshot represented by :data:`transaction`
.. |sets-and-clears1| replace:: Modify the database snapshot represented by ``transaction``
.. |sets-and-clears2| replace:: The modification affects the actual database only if :data:`transaction` is later committed with :func:`fdb_transaction_commit()`.
.. |sets-and-clears2| replace:: The modification affects the actual database only if ``transaction`` is later committed with :func:`fdb_transaction_commit()`.
=====
C API
@ -105,7 +115,7 @@ The FoundationDB C bindings are provided as a shared object which may be linked
Linux
-----
When linking against ``libfdb_c.so``, you must also link against ``libm``, ``libpthread`` and ``librt``. These dependencies will be resolved by the dynamic linker when using this API via :func:`dlopen()` or an FFI.
When linking against ``libfdb_c.so``, you must also link against ``libm``, ``libpthread`` and ``librt``. These dependencies will be resolved by the dynamic linker when using this API via ``dlopen()`` or an FFI.
macOS
--------
@ -115,37 +125,37 @@ When linking against ``libfdb_c.dylib``, no additional libraries are required.
API versioning
==============
Prior to including ``fdb_c.h``, you must define the :macro:`FDB_API_VERSION` macro. This, together with the :func:`fdb_select_api_version()` function, allows programs written against an older version of the API to compile and run with newer versions of the C library. The current version of the FoundationDB C API is |api-version|. ::
Prior to including ``fdb_c.h``, you must define the ``FDB_API_VERSION`` macro. This, together with the :func:`fdb_select_api_version()` function, allows programs written against an older version of the API to compile and run with newer versions of the C library. The current version of the FoundationDB C API is |api-version|. ::
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
.. function:: fdb_error_t fdb_select_api_version(int version)
Must be called before any other API functions. :data:`version` must be less than or equal to :macro:`FDB_API_VERSION` (and should almost always be equal).
Must be called before any other API functions. ``version`` must be less than or equal to ``FDB_API_VERSION`` (and should almost always be equal).
Language bindings implemented in C which themselves expose API versioning will usually pass the version requested by the application, instead of always passing :macro:`FDB_API_VERSION`.
Language bindings implemented in C which themselves expose API versioning will usually pass the version requested by the application, instead of always passing ``FDB_API_VERSION``.
Passing a version less than :macro:`FDB_API_VERSION` will cause the API to behave as it did in the older version.
Passing a version less than ``FDB_API_VERSION`` will cause the API to behave as it did in the older version.
It is an error to call this function after it has returned successfully. It is not thread safe, and if called from more than one thread simultaneously its behavior is undefined.
.. note:: This is actually implemented as a macro. If you are accessing this API via :func:`dlopen()` or an FFI, you will need to use :func:`fdb_select_api_version_impl()`.
.. note:: This is actually implemented as a macro. If you are accessing this API via ``dlopen()`` or an FFI, you will need to use :func:`fdb_select_api_version_impl()`.
.. warning:: |api-version-multi-version-warning|
.. function:: fdb_error_t fdb_select_api_version_impl(int runtime_version, int header_version)
This is the actual entry point called by the :func:`fdb_select_api_version` macro. It should never be called directly from C, but if you are accessing this API via :func:`dlopen()` or an FFI, you will need to use it. ``fdb_select_api_version(v)`` is equivalent to ``fdb_select_api_version_impl(v, FDB_API_VERSION)``.
This is the actual entry point called by the :func:`fdb_select_api_version` macro. It should never be called directly from C, but if you are accessing this API via ``dlopen()`` or an FFI, you will need to use it. ``fdb_select_api_version(v)`` is equivalent to ``fdb_select_api_version_impl(v, FDB_API_VERSION)``.
It is an error to call this function after it has returned successfully. It is not thread safe, and if called from more than one thread simultaneously its behavior is undefined.
:data:`runtime_version`
The version of run-time behavior the API is requested to provide. Must be less than or equal to :data:`header_version`, and should almost always be equal.
``runtime_version``
The version of run-time behavior the API is requested to provide. Must be less than or equal to ``header_version``, and should almost always be equal.
Language bindings which themselves expose API versioning will usually pass the version requested by the application.
:data:`header_version`
``header_version``
The version of the ABI (application binary interface) that the calling code expects to find in the shared library. If you are using an FFI, this *must* correspond to the version of the API you are using as a reference (currently |api-version|). For example, the number of arguments that a function takes may be affected by this value, and an incorrect value is unlikely to yield success.
.. warning:: |api-version-multi-version-warning|
@ -263,7 +273,7 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
.. type:: FDBCallback
A pointer to a function which takes :type:`FDBFuture*` and :type:`void*` and returns :type:`void`.
A pointer to a function which takes :type:`FDBFuture*` and ``void*`` and returns ``void``.
.. function:: void fdb_future_release_memory(FDBFuture* future)
@ -279,13 +289,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
.. function:: fdb_error_t fdb_future_get_version(FDBFuture* future, int64_t* out_version)
Extracts a value of type version from an :type:`FDBFuture` into a caller-provided variable of type :type:`int64_t`. |future-warning|
Extracts a version from an :type:`FDBFuture` into a caller-provided variable of type ``int64_t``. |future-warning|
|future-get-return1| |future-get-return2|.
.. function:: fdb_error_t fdb_future_get_key(FDBFuture* future, uint8_t const** out_key, int* out_key_length)
Extracts a value of type key from an :type:`FDBFuture` into caller-provided variables of type :type:`uint8_t*` (a pointer to the beginning of the key) and :type:`int` (the length of the key). |future-warning|
Extracts a key from an :type:`FDBFuture` into caller-provided variables of type ``uint8_t*`` (a pointer to the beginning of the key) and ``int`` (the length of the key). |future-warning|
|future-get-return1| |future-get-return2|.
@ -297,13 +307,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|future-get-return1| |future-get-return2|.
:data:`*out_present`
``*out_present``
Set to non-zero if (and only if) the requested value was present in the database. (If zero, the other outputs are meaningless.)
:data:`*out_value`
``*out_value``
Set to point to the first byte of the value.
:data:`*out_value_length`
``*out_value_length``
Set to the length of the value (in bytes).
|future-memory-mine|
@ -314,10 +324,10 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|future-get-return1| |future-get-return2|.
:data:`*out_strings`
``*out_strings``
Set to point to the first string in the array.
:data:`*out_count`
``*out_count``
Set to the number of strings in the array.
|future-memory-mine|
@ -328,13 +338,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|future-get-return1| |future-get-return2|.
:data:`*out_kv`
``*out_kv``
Set to point to the first :type:`FDBKeyValue` object in the array.
:data:`*out_count`
``*out_count``
Set to the number of :type:`FDBKeyValue` objects in the array.
:data:`*out_more`
``*out_more``
Set to true if (but not necessarily only if) values remain in the *key* range requested (possibly beyond the limits requested).
|future-memory-mine|
@ -350,17 +360,17 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
int value_length;
} FDBKeyValue;
:data:`key`
``key``
A pointer to a key.
:data:`key_length`
The length of the key pointed to by :data:`key`.
``key_length``
The length of the key pointed to by ``key``.
:data:`value`
``value``
A pointer to a value.
:data:`value_length`
The length of the value pointed to by :data:`value`.
``value_length``
The length of the value pointed to by ``value``.
Database
========
@ -375,10 +385,10 @@ An |database-blurb1| Modifications to a database are performed via transactions.
Creates a new database connected the specified cluster. The caller assumes ownership of the :type:`FDBDatabase` object and must destroy it with :func:`fdb_database_destroy()`.
:data:`cluster_file_path`
``cluster_file_path``
A NULL-terminated string giving a local path of a :ref:`cluster file <foundationdb-cluster-file>` (often called 'fdb.cluster') which contains connection information for the FoundationDB cluster. If cluster_file_path is NULL or an empty string, then a :ref:`default cluster file <default-cluster-file>` will be used.
:data:`*out_database`
``*out_database``
Set to point to the newly created :type:`FDBDatabase`.
.. function:: void fdb_database_destroy(FDBDatabase* database)
@ -397,7 +407,7 @@ An |database-blurb1| Modifications to a database are performed via transactions.
Creates a new transaction on the given database. The caller assumes ownership of the :type:`FDBTransaction` object and must destroy it with :func:`fdb_transaction_destroy()`.
:data:`*out_transaction`
``*out_transaction``
Set to point to the newly created :type:`FDBTransaction`.
Transaction
@ -439,75 +449,75 @@ Applications must provide error handling and an appropriate retry loop around th
.. function:: FDBFuture* fdb_transaction_get(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, fdb_bool_t snapshot)
Reads a value from the database snapshot represented by :data:`transaction`.
Reads a value from the database snapshot represented by ``transaction``.
|future-return0| the value of :data:`key_name` in the database. |future-return1| call :func:`fdb_future_get_value()` to extract the value, |future-return2|
|future-return0| the value of ``key_name`` in the database. |future-return1| call :func:`fdb_future_get_value()` to extract the value, |future-return2|
See :func:`fdb_future_get_value()` to see exactly how results are unpacked. If :data:`key_name` is not present in the database, the result is not an error, but a zero for :data:`*out_present` returned from that function.
See :func:`fdb_future_get_value()` to see exactly how results are unpacked. If ``key_name`` is not present in the database, the result is not an error, but a zero for ``*out_present`` returned from that function.
:data:`key_name`
``key_name``
A pointer to the name of the key to be looked up in the database. |no-null|
:data:`key_name_length`
|length-of| :data:`key_name`.
``key_name_length``
|length-of| ``key_name``.
:data:`snapshot`
``snapshot``
|snapshot|
.. function:: FDBFuture* fdb_transaction_get_key(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, fdb_bool_t or_equal, int offset, fdb_bool_t snapshot)
Resolves a :ref:`key selector <key-selectors>` against the keys in the database snapshot represented by :data:`transaction`.
Resolves a :ref:`key selector <key-selectors>` against the keys in the database snapshot represented by ``transaction``.
|future-return0| the key in the database matching the :ref:`key selector <key-selectors>`. |future-return1| call :func:`fdb_future_get_key()` to extract the key, |future-return2|
:data:`key_name`, :data:`key_name_length`, :data:`or_equal`, :data:`offset`
``key_name``, ``key_name_length``, ``or_equal``, ``offset``
The four components of a :ref:`key selector <key-selectors>`.
:data:`snapshot`
``snapshot``
|snapshot|
.. function:: FDBFuture* fdb_transaction_get_addresses_for_key(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length)
Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing :data:`key_name` and its associated value.
Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing ``key_name`` and its associated value.
|future-return0| an array of strings. |future-return1| call :func:`fdb_future_get_string_array()` to extract the string array, |future-return2|
:data:`key_name`
``key_name``
A pointer to the name of the key whose location is to be queried.
:data:`key_name_length`
|length-of| :data:`key_name`.
``key_name_length``
|length-of| ``key_name``.
.. |range-limited-by| replace:: If this limit was reached before the end of the specified range, then the :data:`*more` return of :func:`fdb_future_get_keyvalue_array()` will be set to a non-zero value.
.. |range-limited-by| replace:: If this limit was reached before the end of the specified range, then the ``*more`` return of :func:`fdb_future_get_keyvalue_array()` will be set to a non-zero value.
.. function:: FDBFuture* fdb_transaction_get_range(FDBTransaction* transaction, uint8_t const* begin_key_name, int begin_key_name_length, fdb_bool_t begin_or_equal, int begin_offset, uint8_t const* end_key_name, int end_key_name_length, fdb_bool_t end_or_equal, int end_offset, int limit, int target_bytes, FDBStreamingMode mode, int iteration, fdb_bool_t snapshot, fdb_bool_t reverse)
Reads all key-value pairs in the database snapshot represented by :data:`transaction` (potentially limited by :data:`limit`, :data:`target_bytes`, or :data:`mode`) which have a key lexicographically greater than or equal to the key resolved by the begin :ref:`key selector <key-selectors>` and lexicographically less than the key resolved by the end :ref:`key selector <key-selectors>`.
Reads all key-value pairs in the database snapshot represented by ``transaction`` (potentially limited by :data:`limit`, :data:`target_bytes`, or :data:`mode`) which have a key lexicographically greater than or equal to the key resolved by the begin :ref:`key selector <key-selectors>` and lexicographically less than the key resolved by the end :ref:`key selector <key-selectors>`.
|future-return0| an :type:`FDBKeyValue` array. |future-return1| call :func:`fdb_future_get_keyvalue_array()` to extract the key-value array, |future-return2|
:data:`begin_key_name`, :data:`begin_key_name_length`, :data:`begin_or_equal`, :data:`begin_offset`
``begin_key_name``, :data:`begin_key_name_length`, :data:`begin_or_equal`, :data:`begin_offset`
The four components of a :ref:`key selector <key-selectors>` describing the beginning of the range.
:data:`end_key_name`, :data:`end_key_name_length`, :data:`end_or_equal`, :data:`end_offset`
``end_key_name``, :data:`end_key_name_length`, :data:`end_or_equal`, :data:`end_offset`
The four components of a :ref:`key selector <key-selectors>` describing the end of the range.
:data:`limit`
``limit``
If non-zero, indicates the maximum number of key-value pairs to return. |range-limited-by|
:data:`target_bytes`
``target_bytes``
If non-zero, indicates a (soft) cap on the combined number of bytes of keys and values to return. |range-limited-by|
:data:`mode`
``mode``
One of the :type:`FDBStreamingMode` values indicating how the caller would like the data in the range returned.
:data:`iteration`
If :data:`mode` is :data:`FDB_STREAMING_MODE_ITERATOR`, this parameter should start at 1 and be incremented by 1 for each successive call while reading this range. In all other cases it is ignored.
``iteration``
If ``mode`` is :data:`FDB_STREAMING_MODE_ITERATOR`, this parameter should start at 1 and be incremented by 1 for each successive call while reading this range. In all other cases it is ignored.
:data:`snapshot`
``snapshot``
|snapshot|
:data:`reverse`
``reverse``
If non-zero, key-value pairs will be returned in reverse lexicographical order beginning at the end of the range.
@ -515,31 +525,31 @@ Applications must provide error handling and an appropriate retry loop around th
An enumeration of available streaming modes to be passed to :func:`fdb_transaction_get_range()`.
:data:`FDB_STREAMING_MODE_ITERATOR`
``FDB_STREAMING_MODE_ITERATOR``
The caller is implementing an iterator (most likely in a binding to a higher level language). The amount of data returned depends on the value of the :data:`iteration` parameter to :func:`fdb_transaction_get_range()`.
The caller is implementing an iterator (most likely in a binding to a higher level language). The amount of data returned depends on the value of the ``iteration`` parameter to :func:`fdb_transaction_get_range()`.
:data:`FDB_STREAMING_MODE_SMALL`
``FDB_STREAMING_MODE_SMALL``
Data is returned in small batches (not much more expensive than reading individual key-value pairs).
:data:`FDB_STREAMING_MODE_MEDIUM`
``FDB_STREAMING_MODE_MEDIUM``
Data is returned in batches between _SMALL and _LARGE.
:data:`FDB_STREAMING_MODE_LARGE`
``FDB_STREAMING_MODE_LARGE``
Data is returned in batches large enough to be, in a high-concurrency environment, nearly as efficient as possible. If the caller does not need the entire range, some disk and network bandwidth may be wasted. The batch size may be still be too small to allow a single client to get high throughput from the database.
:data:`FDB_STREAMING_MODE_SERIAL`
``FDB_STREAMING_MODE_SERIAL``
Data is returned in batches large enough that an individual client can get reasonable read bandwidth from the database. If the caller does not need the entire range, considerable disk and network bandwidth may be wasted.
:data:`FDB_STREAMING_MODE_WANT_ALL`
``FDB_STREAMING_MODE_WANT_ALL``
The caller intends to consume the entire range and would like it all transferred as early as possible.
:data:`FDB_STREAMING_MODE_EXACT`
``FDB_STREAMING_MODE_EXACT``
The caller has passed a specific row limit and wants that many rows delivered in a single batch.
@ -549,17 +559,17 @@ Applications must provide error handling and an appropriate retry loop around th
|sets-and-clears2|
:data:`key_name`
``key_name``
A pointer to the name of the key to be inserted into the database. |no-null|
:data:`key_name_length`
|length-of| :data:`key_name`.
``key_name_length``
|length-of| ``key_name``.
:data:`value`
``value``
A pointer to the value to be inserted into the database. |no-null|
:data:`value_length`
|length-of| :data:`value`.
``value_length``
|length-of| ``value``.
.. function:: void fdb_transaction_clear(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length)
@ -567,11 +577,11 @@ Applications must provide error handling and an appropriate retry loop around th
|sets-and-clears2|
:data:`key_name`
``key_name``
A pointer to the name of the key to be removed from the database. |no-null|
:data:`key_name_length`
|length-of| :data:`key_name`.
``key_name_length``
|length-of| ``key_name``.
.. function:: void fdb_transaction_clear_range(FDBTransaction* transaction, uint8_t const* begin_key_name, int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length)
@ -579,17 +589,17 @@ Applications must provide error handling and an appropriate retry loop around th
|sets-and-clears2|
:data:`begin_key_name`
``begin_key_name``
A pointer to the name of the key specifying the beginning of the range to clear. |no-null|
:data:`begin_key_name_length`
|length-of| :data:`begin_key_name`.
``begin_key_name_length``
|length-of| ``begin_key_name``.
:data:`end_key_name`
``end_key_name``
A pointer to the name of the key specifying the end of the range to clear. |no-null|
:data:`end_key_name_length`
|length-of| :data:`end_key_name_length`.
``end_key_name_length``
|length-of| ``end_key_name_length``.
.. function:: void fdb_transaction_atomic_op(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, uint8_t const* param, int param_length, FDBMutationType operationType)
@ -605,64 +615,64 @@ Applications must provide error handling and an appropriate retry loop around th
|sets-and-clears2|
:data:`key_name`
``key_name``
A pointer to the name of the key whose value is to be mutated.
:data:`key_name_length`
|length-of| :data:`key_name`.
``key_name_length``
|length-of| ``key_name``.
:data:`param`
A pointer to the parameter with which the atomic operation will mutate the value associated with :data:`key_name`.
``param``
A pointer to the parameter with which the atomic operation will mutate the value associated with ``key_name``.
:data:`param_length`
|length-of| :data:`param`.
``param_length``
|length-of| ``param``.
:data:`operation_type`
``operation_type``
One of the :type:`FDBMutationType` values indicating which operation should be performed.
.. type:: FDBMutationType
An enumeration of available opcodes to be passed to :func:`fdb_transaction_atomic_op()`
:data:`FDB_MUTATION_TYPE_ADD`
``FDB_MUTATION_TYPE_ADD``
|atomic-add1|
|atomic-add2|
:data:`FDB_MUTATION_TYPE_AND`
``FDB_MUTATION_TYPE_AND``
|atomic-and|
:data:`FDB_MUTATION_TYPE_OR`
``FDB_MUTATION_TYPE_OR``
|atomic-or|
:data:`FDB_MUTATION_TYPE_XOR`
``FDB_MUTATION_TYPE_XOR``
|atomic-xor|
:data:`FDB_MUTATION_TYPE_MAX`
``FDB_MUTATION_TYPE_MAX``
|atomic-max1|
|atomic-max-min|
:data:`FDB_MUTATION_TYPE_BYTE_MAX`
``FDB_MUTATION_TYPE_BYTE_MAX``
|atomic-byte-max|
:data:`FDB_MUTATION_TYPE_MIN`
``FDB_MUTATION_TYPE_MIN``
|atomic-min1|
|atomic-max-min|
:data:`FDB_MUTATION_TYPE_BYTE_MIN`
``FDB_MUTATION_TYPE_BYTE_MIN``
|atomic-byte-min|
:data:`FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_KEY`
``FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_KEY``
|atomic-set-versionstamped-key-1|
@ -674,7 +684,7 @@ Applications must provide error handling and an appropriate retry loop around th
.. warning :: |atomic-versionstamps-tuple-warning-key|
:data:`FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE`
``FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE``
|atomic-set-versionstamped-value|
@ -686,7 +696,7 @@ Applications must provide error handling and an appropriate retry loop around th
.. function:: FDBFuture* fdb_transaction_commit(FDBTransaction* transaction)
Attempts to commit the sets and clears previously applied to the database snapshot represented by :data:`transaction` to the actual database. The commit may or may not succeed -- in particular, if a conflicting transaction previously committed, then the commit must fail in order to preserve transactional isolation. If the commit does succeed, the transaction is durably committed to the database and all subsequently started transactions will observe its effects.
Attempts to commit the sets and clears previously applied to the database snapshot represented by ``transaction`` to the actual database. The commit may or may not succeed -- in particular, if a conflicting transaction previously committed, then the commit must fail in order to preserve transactional isolation. If the commit does succeed, the transaction is durably committed to the database and all subsequently started transactions will observe its effects.
It is not necessary to commit a read-only transaction -- you can simply call :func:`fdb_transaction_destroy()`.
@ -700,7 +710,7 @@ Applications must provide error handling and an appropriate retry loop around th
.. function:: fdb_error_t fdb_transaction_get_committed_version(FDBTransaction* transaction, int64_t* out_version)
Retrieves the database version number at which a given transaction was committed. :func:`fdb_transaction_commit()` must have been called on :data:`transaction` and the resulting future must be ready and not an error before this function is called, or the behavior is undefined. Read-only transactions do not modify the database when committed and will have a committed version of -1. Keep in mind that a transaction which reads keys and then sets them to their current values may be optimized to a read-only transaction.
Retrieves the database version number at which a given transaction was committed. :func:`fdb_transaction_commit()` must have been called on ``transaction`` and the resulting future must be ready and not an error before this function is called, or the behavior is undefined. Read-only transactions do not modify the database when committed and will have a committed version of -1. Keep in mind that a transaction which reads keys and then sets them to their current values may be optimized to a read-only transaction.
Note that database versions are not necessarily unique to a given transaction and so cannot be used to determine in what order two transactions completed. The only use for this function is to manually enforce causal consistency when calling :func:`fdb_transaction_set_read_version()` on another subsequent transaction.
@ -726,11 +736,11 @@ Applications must provide error handling and an appropriate retry loop around th
|transaction-watch-limit-blurb|
:data:`key_name`
``key_name``
A pointer to the name of the key to watch. |no-null|
:data:`key_name_length`
|length-of| :data:`key_name`.
``key_name_length``
|length-of| ``key_name``.
.. function:: FDBFuture* fdb_transaction_on_error(FDBTransaction* transaction, fdb_error_t error)
@ -751,7 +761,7 @@ Applications must provide error handling and an appropriate retry loop around th
.. function:: void fdb_transaction_reset(FDBTransaction* transaction)
Reset :data:`transaction` to its initial state. This is similar to calling :func:`fdb_transaction_destroy()` followed by :func:`fdb_database_create_transaction()`. It is not necessary to call :func:`fdb_transaction_reset()` when handling an error with :func:`fdb_transaction_on_error()` since the transaction has already been reset.
Reset ``transaction`` to its initial state. This is similar to calling :func:`fdb_transaction_destroy()` followed by :func:`fdb_database_create_transaction()`. It is not necessary to call :func:`fdb_transaction_reset()` when handling an error with :func:`fdb_transaction_on_error()` since the transaction has already been reset.
.. function:: void fdb_transaction_cancel(FDBTransaction* transaction)
@ -769,30 +779,30 @@ Applications must provide error handling and an appropriate retry loop around th
.. note:: |conflict-range-note|
:data:`begin_key_name`
``begin_key_name``
A pointer to the name of the key specifying the beginning of the conflict range. |no-null|
:data:`begin_key_name_length`
|length-of| :data:`begin_key_name`.
``begin_key_name_length``
|length-of| ``begin_key_name``.
:data:`end_key_name`
``end_key_name``
A pointer to the name of the key specifying the end of the conflict range. |no-null|
:data:`end_key_name_length`
|length-of| :data:`end_key_name_length`.
``end_key_name_length``
|length-of| ``end_key_name_length``.
:data:`type`
``type``
One of the :type:`FDBConflictRangeType` values indicating what type of conflict range is being set.
.. type:: FDBConflictRangeType
An enumeration of available conflict range types to be passed to :func:`fdb_transaction_add_conflict_range()`.
:data:`FDB_CONFLICT_RANGE_TYPE_READ`
``FDB_CONFLICT_RANGE_TYPE_READ``
|add-read-conflict-range-blurb|
:data:`FDB_CONFLICT_RANGE_TYPE_WRITE`
``FDB_CONFLICT_RANGE_TYPE_WRITE``
|add-write-conflict-range-blurb|

View File

@ -26,7 +26,6 @@
.. |snapshot-ryw-disable-database-option| replace:: :meth:`Database.options.set_snapshot_ryw_disable`
.. |future-type-string| replace:: a :class:`Future`
.. |read-your-writes-disable-option| replace:: :meth:`Transaction.options.set_read_your_writes_disable`
.. |read-your-writes-disable-option| replace:: :meth:`Transaction.options.set_read_your_writes_disable`
.. |retry-limit-transaction-option| replace:: :meth:`Transaction.options.set_retry_limit`
.. |timeout-transaction-option| replace:: :meth:`Transaction.options.set_timeout`
.. |max-retry-delay-transaction-option| replace:: :meth:`Transaction.options.set_max_retry_delay`

View File

@ -238,7 +238,7 @@ The ``start`` subcommand is used to start a backup. If there is already a backu
.. program:: fdbbackup modify
``modify``
---------
----------
The ``modify`` subcommand is used to modify parameters of a running backup. All specified changes are made in a single transaction.

View File

@ -39,6 +39,16 @@
.. |node-subspace| replace:: FIXME
.. |content-subspace| replace:: FIXME
.. |allow-manual-prefixes| replace:: FIXME
.. |retry-limit-transaction-option| replace:: FIXME
.. |timeout-transaction-option| replace:: FIXME
.. |max-retry-delay-transaction-option| replace:: FIXME
.. |snapshot-ryw-enable-transaction-option| replace:: FIXME
.. |snapshot-ryw-disable-transaction-option| replace:: FIXME
.. |snapshot-ryw-enable-database-option| replace:: FIXME
.. |snapshot-ryw-disable-database-option| replace:: FIXME
.. |retry-limit-database-option| replace:: FIXME
.. |max-retry-delay-database-option| replace:: FIXME
.. |timeout-database-option| replace:: FIXME
.. include:: api-common.rst.inc

View File

@ -39,6 +39,16 @@
.. |node-subspace| replace:: FIXME
.. |content-subspace| replace:: FIXME
.. |allow-manual-prefixes| replace:: FIXME
.. |retry-limit-transaction-option| replace:: FIXME
.. |timeout-transaction-option| replace:: FIXME
.. |max-retry-delay-transaction-option| replace:: FIXME
.. |snapshot-ryw-enable-transaction-option| replace:: FIXME
.. |snapshot-ryw-disable-transaction-option| replace:: FIXME
.. |snapshot-ryw-enable-database-option| replace:: FIXME
.. |snapshot-ryw-disable-database-option| replace:: FIXME
.. |retry-limit-database-option| replace:: FIXME
.. |max-retry-delay-database-option| replace:: FIXME
.. |timeout-database-option| replace:: FIXME
.. include:: api-common.rst.inc

View File

@ -10,38 +10,38 @@ macOS
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
* `FoundationDB-6.1.7.pkg <https://www.foundationdb.org/downloads/6.1.7/macOS/installers/FoundationDB-6.1.7.pkg>`_
* `FoundationDB-6.1.8.pkg <https://www.foundationdb.org/downloads/6.1.8/macOS/installers/FoundationDB-6.1.8.pkg>`_
Ubuntu
------
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
* `foundationdb-clients-6.1.7-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.7/ubuntu/installers/foundationdb-clients_6.1.7-1_amd64.deb>`_
* `foundationdb-server-6.1.7-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.7/ubuntu/installers/foundationdb-server_6.1.7-1_amd64.deb>`_ (depends on the clients package)
* `foundationdb-clients-6.1.8-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.8/ubuntu/installers/foundationdb-clients_6.1.7-1_amd64.deb>`_
* `foundationdb-server-6.1.8-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.8/ubuntu/installers/foundationdb-server_6.1.7-1_amd64.deb>`_ (depends on the clients package)
RHEL/CentOS EL6
---------------
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
* `foundationdb-clients-6.1.7-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.7/rhel6/installers/foundationdb-clients-6.1.7-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.1.7-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.7/rhel6/installers/foundationdb-server-6.1.7-1.el6.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.1.8-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.8/rhel6/installers/foundationdb-clients-6.1.8-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.1.8-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.8/rhel6/installers/foundationdb-server-6.1.8-1.el6.x86_64.rpm>`_ (depends on the clients package)
RHEL/CentOS EL7
---------------
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
* `foundationdb-clients-6.1.7-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.7/rhel7/installers/foundationdb-clients-6.1.7-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.1.7-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.7/rhel7/installers/foundationdb-server-6.1.7-1.el7.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.1.8-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.8/rhel7/installers/foundationdb-clients-6.1.8-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.1.8-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.8/rhel7/installers/foundationdb-server-6.1.8-1.el7.x86_64.rpm>`_ (depends on the clients package)
Windows
-------
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
* `foundationdb-6.1.7-x64.msi <https://www.foundationdb.org/downloads/6.1.7/windows/installers/foundationdb-6.1.7-x64.msi>`_
* `foundationdb-6.1.8-x64.msi <https://www.foundationdb.org/downloads/6.1.8/windows/installers/foundationdb-6.1.8-x64.msi>`_
API Language Bindings
=====================
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
* `foundationdb-6.1.7.tar.gz <https://www.foundationdb.org/downloads/6.1.7/bindings/python/foundationdb-6.1.7.tar.gz>`_
* `foundationdb-6.1.8.tar.gz <https://www.foundationdb.org/downloads/6.1.8/bindings/python/foundationdb-6.1.8.tar.gz>`_
Ruby 1.9.3/2.0.0+
-----------------
* `fdb-6.1.7.gem <https://www.foundationdb.org/downloads/6.1.7/bindings/ruby/fdb-6.1.7.gem>`_
* `fdb-6.1.8.gem <https://www.foundationdb.org/downloads/6.1.8/bindings/ruby/fdb-6.1.8.gem>`_
Java 8+
-------
* `fdb-java-6.1.7.jar <https://www.foundationdb.org/downloads/6.1.7/bindings/java/fdb-java-6.1.7.jar>`_
* `fdb-java-6.1.7-javadoc.jar <https://www.foundationdb.org/downloads/6.1.7/bindings/java/fdb-java-6.1.7-javadoc.jar>`_
* `fdb-java-6.1.8.jar <https://www.foundationdb.org/downloads/6.1.8/bindings/java/fdb-java-6.1.8.jar>`_
* `fdb-java-6.1.8-javadoc.jar <https://www.foundationdb.org/downloads/6.1.8/bindings/java/fdb-java-6.1.8-javadoc.jar>`_
Go 1.11+
-------

View File

@ -1,6 +1,7 @@
.. -*- mode: rst; -*-
.. |json-status-format| replace::
.. code-block:: javascript
"cluster":{
"layers":{
"_valid":true,

View File

@ -42,12 +42,9 @@ JSON format
The following format informally describes the JSON containing the status data. The possible values of ``<name_string>`` and ``<description_string>`` are described in :ref:`mr-status-message`. The format is representative: *any field can be missing at any time*, depending on the database state. Clients should be prepared to flexibly handle format variations resulting from different database states.
.. code-block:: javascript
.. include:: mr-status-json-schemas.rst.inc
.. node:: |json-status-format|
.. mr-status-message:
.. _mr-status-message:
Message components
------------------
@ -96,7 +93,7 @@ cluster.processes.<process>.messages incorrect_cluster_file_contents Clus
cluster.processes.<process>.messages io_error <error> occured in <subsystem>
cluster.processes.<process>.messages platform_error <error> occured in <subsystem>
cluster.processes.<process>.messages process_error <error> occured in <subsystem>
==================================== =============================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
==================================== ==================================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
The JSON path ``cluster.recovery_state``, when it exists, is an Object containing at least ``"name"`` and ``"description"``. The possible values for those fields are in the following table:

View File

@ -144,9 +144,9 @@ Bindings
* C API calls made on the network thread could be reordered with calls made from other threads. [6.0.2] `(Issue #518) <https://github.com/apple/foundationdb/issues/518>`_
* The TLS_PLUGIN option is now a no-op and has been deprecated. [6.0.10] `(PR #710) <https://github.com/apple/foundationdb/pull/710>`_
* Java: the `Versionstamp::getUserVersion() </javadoc/com/apple/foundationdb/tuple/Versionstamp.html#getUserVersion-->`_ method did not handle user versions greater than ``0x00FF`` due to operator precedence errors. [6.0.11] `(Issue #761) <https://github.com/apple/foundationdb/issues/761>`_
* Python: bindings didn't work with Python 3.7 because of the new `async` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
* Go: `PrefixRange` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
* Go: Add Tuple layer support for `uint`, `uint64`, and `*big.Int` integers up to 255 bytes. Integer values will be decoded into the first of `int64`, `uint64`, or `*big.Int` in which they fit. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
* Python: bindings didn't work with Python 3.7 because of the new ``async`` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
* Go: ``PrefixRange`` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
* Go: Add Tuple layer support for ``uint``, ``uint64``, and ``*big.Int`` integers up to 255 bytes. Integer values will be decoded into the first of ``int64``, ``uint64``, or ``*big.Int`` in which they fit. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
* Ruby: Add Tuple layer support for integers up to 255 bytes. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
* Python: bindings didn't work with Python 3.7 because of the new ``async`` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
* Go: ``PrefixRange`` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_

View File

@ -2,7 +2,7 @@
Release Notes
#############
6.1.7
6.1.8
=====
Features
@ -20,7 +20,7 @@ Features
* Separated data distribution from the master into its own role. `(PR #1062) <https://github.com/apple/foundationdb/pull/1062>`_
* Separated ratekeeper from the master into its own role. `(PR #1176) <https://github.com/apple/foundationdb/pull/1176>`_
* Added a ``CompareAndClear`` atomic op that clears a key if its value matches the supplied value. `(PR #1105) <https://github.com/apple/foundationdb/pull/1105>`_
* Added support for IPv6. `(PR #1176) <https://github.com/apple/foundationdb/pull/1178>`_
* Added support for IPv6. `(PR #1178) <https://github.com/apple/foundationdb/pull/1178>`_
* FDB can now simultaneously listen to TLS and unencrypted ports to facilitate smoother migration to and from TLS. `(PR #1157) <https://github.com/apple/foundationdb/pull/1157>`_
* Added ``DISABLE_POSIX_KERNEL_AIO`` knob to fallback to libeio instead of kernel async I/O (KAIO) for systems that do not support KAIO or O_DIRECT flag. `(PR #1283) <https://github.com/apple/foundationdb/pull/1283>`_
* Added support for configuring the cluster to use the primary and remote DC's as satellites. `(PR #1320) <https://github.com/apple/foundationdb/pull/1320>`_
@ -77,6 +77,7 @@ Fixes
* The ``configure`` command in ``fdbcli`` returned successfully even when the configuration was not changed for some error types. [6.1.4] `(PR #1509) <https://github.com/apple/foundationdb/pull/1509>`_
* Safety protections in the ``configure`` command in ``fdbcli`` would trigger spuriously when changing between ``three_datacenter`` replication and a region configuration. [6.1.4] `(PR #1509) <https://github.com/apple/foundationdb/pull/1509>`_
* Status could report an incorrect reason for ongoing data movement. [6.1.5] `(PR #1544) <https://github.com/apple/foundationdb/pull/1544>`_
* Storage servers were considered failed as soon as they were rebooted, instead of waiting to see if they rejoin the cluster. [6.1.8] `(PR #1618) <https://github.com/apple/foundationdb/pull/1618>`_
Status
------
@ -129,6 +130,7 @@ Fixes only impacting 6.1.0+
* TLogs will replace a large file with an empty file rather than doing a large truncate operation. [6.1.5] `(PR #1545) <https://github.com/apple/foundationdb/pull/1545>`_
* Fix PR #1545 to work on Windows and Linux. [6.1.6] `(PR #1556) <https://github.com/apple/foundationdb/pull/1556>`_
* Adding a read conflict range for the metadata version key no longer requires read access to the system keys. [6.1.6] `(PR #1556) <https://github.com/apple/foundationdb/pull/1556>`_
* The TLog's disk queue files would grow indefinitely after a storage server was removed from the cluster. [6.1.8] `(PR #1617) <https://github.com/apple/foundationdb/pull/1617>`_
Earlier release notes
---------------------
@ -150,4 +152,4 @@ Earlier release notes
* :doc:`Beta 2 (API Version 22) </old-release-notes/release-notes-022>`
* :doc:`Beta 1 (API Version 21) </old-release-notes/release-notes-021>`
* :doc:`Alpha 6 (API Version 16) </old-release-notes/release-notes-016>`
* :doc:`Alpha 5 (API Version 14) </old-release-notes/release-notes-014>`
* :doc:`Alpha 5 (API Version 14) </old-release-notes/release-notes-014>`

View File

@ -1,6 +1,4 @@
.. default-domain:: rb
.. highlight:: ruby
.. module:: FDB
################
Time-Series Data
@ -95,7 +93,7 @@ Ordering and Transactions
FoundationDBs ability to let you structure your data in different ways, keep track of metrics, and search it with varying granularity is a direct result of two key features of our key-value store: global ordering and ACID transactions. And as youve seen from the code included above, the direct impact of these properties is simpler application code and overall faster development.
Global ordering makes a big difference if youre attempting to process significant amounts of sequential information because the database can retrieve that information quickly and efficiently. So rather than having to package your data into a single database object or broadcast a request for many individual data elements that correspond to a given range of application data (e.g. time0, time1, time2, . . ., timen), a globally ordered storage system, like FoundationDB, can generate a single range request to the database for the matching data. And internally, FoundationDB can further optimize requests by knowing which data resides on which machines, so theres no need to broadcast the data request to all machines in the cluster.
Global ordering makes a big difference if youre attempting to process significant amounts of sequential information because the database can retrieve that information quickly and efficiently. So rather than having to package your data into a single database object or broadcast a request for many individual data elements that correspond to a given range of application data (e.g. time0, time1, time2, . . ., timeN), a globally ordered storage system, like FoundationDB, can generate a single range request to the database for the matching data. And internally, FoundationDB can further optimize requests by knowing which data resides on which machines, so theres no need to broadcast the data request to all machines in the cluster.
Global indexing also makes a huge difference in terms of application complexity and database efficiency. Many non-relational databases provide node-specific indexing and secondary indexing, but if you wanted global indexes, you would have to build those at the application level to ensure the index and related data get updated atomically.

View File

@ -144,7 +144,7 @@ Parameters and client bindings
------------------------------
Automatic TLS certificate refresh
------------------------------
---------------------------------
The TLS certificate will be automatically refreshed on a configurable cadence. The server will inspect the CA, certificate, and key files in the specified locations periodically, and will begin using the new versions if following criterion were met:

View File

@ -2730,9 +2730,11 @@ ACTOR Future<Void> waitHealthyZoneChange( DDTeamCollection* self ) {
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
Optional<Value> val = wait(tr.get(healthyZoneKey));
state Future<Void> healthyZoneTimeout = Never();
if(val.present()) {
auto p = decodeHealthyZoneValue(val.get());
if(p.second > tr.getReadVersion().get()) {
healthyZoneTimeout = delay((p.second - tr.getReadVersion().get())/(double)SERVER_KNOBS->VERSIONS_PER_SECOND);
self->healthyZone.set(p.first);
} else {
self->healthyZone.set(Optional<Key>());
@ -2740,9 +2742,10 @@ ACTOR Future<Void> waitHealthyZoneChange( DDTeamCollection* self ) {
} else {
self->healthyZone.set(Optional<Key>());
}
state Future<Void> watchFuture = tr.watch(healthyZoneKey);
wait(tr.commit());
wait(watchFuture);
wait(watchFuture || healthyZoneTimeout);
tr.reset();
} catch(Error& e) {
wait( tr.onError(e) );
@ -2822,24 +2825,15 @@ ACTOR Future<Void> storageServerFailureTracker(
if( status->isFailed )
self->restartRecruiting.trigger();
state double startTime = now();
Future<Void> healthChanged = Never();
if(status->isFailed) {
ASSERT(!inHealthyZone);
healthChanged = IFailureMonitor::failureMonitor().onStateEqual( interf.waitFailure.getEndpoint(), FailureStatus(false));
} else if(!inHealthyZone) {
healthChanged = waitFailureClient(interf.waitFailure, SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME, 0, TaskDataDistribution);
healthChanged = waitFailureClientStrict(interf.waitFailure, SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME, TaskDataDistribution);
}
choose {
when ( wait(healthChanged) ) {
double elapsed = now() - startTime;
if(!status->isFailed && elapsed < SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME) {
wait(delay(SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME - elapsed));
if(!IFailureMonitor::failureMonitor().getState( interf.waitFailure.getEndpoint() ).isFailed()) {
continue;
}
}
status->isFailed = !status->isFailed;
if(!status->isFailed && !server->teams.size()) {
self->doBuildTeams = true;

View File

@ -436,6 +436,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
specialCounter(cc, "PersistentDataVersion", [this](){ return this->persistentDataVersion; });
specialCounter(cc, "PersistentDataDurableVersion", [this](){ return this->persistentDataDurableVersion; });
specialCounter(cc, "KnownCommittedVersion", [this](){ return this->knownCommittedVersion; });
specialCounter(cc, "QueuePoppedVersion", [this](){ return this->persistentDataDurableVersion; });
specialCounter(cc, "SharedBytesInput", [tLogData](){ return tLogData->bytesInput; });
specialCounter(cc, "SharedBytesDurable", [tLogData](){ return tLogData->bytesDurable; });
specialCounter(cc, "SharedOverheadBytesInput", [tLogData](){ return tLogData->overheadBytesInput; });

View File

@ -29,6 +29,7 @@
#include "fdbserver/ServerDBInfo.h"
#include "fdbserver/Status.h"
#include "fdbclient/ManagementAPI.actor.h"
#include <boost/lexical_cast.hpp>
#include "flow/actorcompiler.h" // This must be the last #include.
ACTOR Future<vector<WorkerDetails>> getWorkers( Reference<AsyncVar<ServerDBInfo>> dbInfo, int flags = 0 ) {
@ -96,8 +97,7 @@ ACTOR Future<int64_t> getDataInFlight( Database cx, WorkerInterface distributorW
TraceEvent("DataInFlight").detail("Stage", "ContactingDataDistributor");
TraceEventFields md = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
EventLogRequest( LiteralStringRef("TotalDataInFlight") ) ), 1.0 ) );
int64_t dataInFlight;
sscanf(md.getValue("TotalBytes").c_str(), "%" SCNd64, &dataInFlight);
int64_t dataInFlight = boost::lexical_cast<int64_t>(md.getValue("TotalBytes"));
return dataInFlight;
} catch( Error &e ) {
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).error(e).detail("Reason", "Failed to extract DataInFlight");
@ -125,8 +125,16 @@ int64_t getQueueSize( const TraceEventFields& md ) {
return inputBytes - durableBytes;
}
//Computes the popped version lag for tlogs
int64_t getPoppedVersionLag( const TraceEventFields& md ) {
int64_t persistentDataDurableVersion = boost::lexical_cast<int64_t>(md.getValue("PersistentDataDurableVersion"));
int64_t queuePoppedVersion = boost::lexical_cast<int64_t>(md.getValue("QueuePoppedVersion"));
return persistentDataDurableVersion - queuePoppedVersion;
}
// This is not robust in the face of a TLog failure
ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
ACTOR Future<std::pair<int64_t,int64_t>> getTLogQueueInfo( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
TraceEvent("MaxTLogQueueSize").detail("Stage", "ContactingLogs");
state std::vector<WorkerDetails> workers = wait(getWorkers(dbInfo));
@ -151,17 +159,19 @@ ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<Serve
TraceEvent("MaxTLogQueueSize").detail("Stage", "ComputingMax").detail("MessageCount", messages.size());
state int64_t maxQueueSize = 0;
state int64_t maxPoppedVersionLag = 0;
state int i = 0;
for(; i < messages.size(); i++) {
try {
maxQueueSize = std::max( maxQueueSize, getQueueSize( messages[i].get() ) );
maxPoppedVersionLag = std::max( maxPoppedVersionLag, getPoppedVersionLag( messages[i].get() ) );
} catch( Error &e ) {
TraceEvent("QuietDatabaseFailure").detail("Reason", "Failed to extract MaxTLogQueue").detail("Tlog", tlogs[i].id());
throw;
}
}
return maxQueueSize;
return std::make_pair( maxQueueSize, maxPoppedVersionLag );
}
ACTOR Future<vector<StorageServerInterface>> getStorageServers( Database cx, bool use_system_priority = false) {
@ -239,12 +249,10 @@ ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface
TraceEvent("DataDistributionQueueSize").detail("Stage", "GotString");
int64_t inQueue;
sscanf(movingDataMessage.getValue("InQueue").c_str(), "%" SCNd64, &inQueue);
int64_t inQueue = boost::lexical_cast<int64_t>(movingDataMessage.getValue("InQueue"));
if(reportInFlight) {
int64_t inFlight;
sscanf(movingDataMessage.getValue("InFlight").c_str(), "%" SCNd64, &inFlight);
int64_t inFlight = boost::lexical_cast<int64_t>(movingDataMessage.getValue("InFlight"));
inQueue += inFlight;
}
@ -275,23 +283,13 @@ ACTOR Future<bool> getTeamCollectionValid(Database cx, WorkerInterface dataDistr
TraceEvent("GetTeamCollectionValid").detail("Stage", "GotString");
int64_t currentTeamNumber;
int64_t desiredTeamNumber;
int64_t maxTeamNumber;
int64_t currentMachineTeamNumber;
int64_t healthyMachineTeamCount;
int64_t desiredMachineTeamNumber;
int64_t maxMachineTeamNumber;
sscanf(teamCollectionInfoMessage.getValue("CurrentTeamNumber").c_str(), "%" SCNd64, &currentTeamNumber);
sscanf(teamCollectionInfoMessage.getValue("DesiredTeamNumber").c_str(), "%" SCNd64, &desiredTeamNumber);
sscanf(teamCollectionInfoMessage.getValue("MaxTeamNumber").c_str(), "%" SCNd64, &maxTeamNumber);
sscanf(teamCollectionInfoMessage.getValue("CurrentMachineTeamNumber").c_str(), "%" SCNd64,
&currentMachineTeamNumber);
sscanf(teamCollectionInfoMessage.getValue("CurrentHealthyMachineTeamNumber").c_str(), "%" SCNd64,
&healthyMachineTeamCount);
sscanf(teamCollectionInfoMessage.getValue("DesiredMachineTeams").c_str(), "%" SCNd64,
&desiredMachineTeamNumber);
sscanf(teamCollectionInfoMessage.getValue("MaxMachineTeams").c_str(), "%" SCNd64, &maxMachineTeamNumber);
int64_t currentTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentTeamNumber"));
int64_t desiredTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("DesiredTeamNumber"));
int64_t maxTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MaxTeamNumber"));
int64_t currentMachineTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentMachineTeamNumber"));
int64_t healthyMachineTeamCount = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentHealthyMachineTeamNumber"));
int64_t desiredMachineTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("DesiredMachineTeams"));
int64_t maxMachineTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MaxMachineTeams"));
// Team number is always valid when we disable teamRemover. This avoids false positive in simulation test
if (SERVER_KNOBS->TR_FLAG_DISABLE_TEAM_REMOVER) {
@ -398,7 +396,7 @@ ACTOR Future<Void> reconfigureAfter(Database cx, double time, Reference<AsyncVar
}
ACTOR Future<Void> waitForQuietDatabase( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, std::string phase, int64_t dataInFlightGate = 2e6,
int64_t maxTLogQueueGate = 5e6, int64_t maxStorageServerQueueGate = 5e6, int64_t maxDataDistributionQueueSize = 0 ) {
int64_t maxTLogQueueGate = 5e6, int64_t maxStorageServerQueueGate = 5e6, int64_t maxDataDistributionQueueSize = 0, int64_t maxPoppedVersionLag = 30e6 ) {
state Future<Void> reconfig = reconfigureAfter(cx, 100 + (g_random->random01()*100), dbInfo, "QuietDatabase");
TraceEvent(("QuietDatabase" + phase + "Begin").c_str());
@ -418,26 +416,28 @@ ACTOR Future<Void> waitForQuietDatabase( Database cx, Reference<AsyncVar<ServerD
TraceEvent("QuietDatabaseGotDataDistributor", distributorUID).detail("Locality", distributorWorker.locality.toString());
state Future<int64_t> dataInFlight = getDataInFlight( cx, distributorWorker);
state Future<int64_t> tLogQueueSize = getMaxTLogQueueSize( cx, dbInfo );
state Future<std::pair<int64_t,int64_t>> tLogQueueInfo = getTLogQueueInfo( cx, dbInfo );
state Future<int64_t> dataDistributionQueueSize = getDataDistributionQueueSize( cx, distributorWorker, dataInFlightGate == 0);
state Future<bool> teamCollectionValid = getTeamCollectionValid(cx, distributorWorker);
state Future<int64_t> storageQueueSize = getMaxStorageServerQueueSize( cx, dbInfo );
state Future<bool> dataDistributionActive = getDataDistributionActive( cx, distributorWorker );
state Future<bool> storageServersRecruiting = getStorageServersRecruiting ( cx, distributorWorker, distributorUID );
wait(success(dataInFlight) && success(tLogQueueSize) && success(dataDistributionQueueSize) &&
wait(success(dataInFlight) && success(tLogQueueInfo) && success(dataDistributionQueueSize) &&
success(teamCollectionValid) && success(storageQueueSize) && success(dataDistributionActive) &&
success(storageServersRecruiting));
TraceEvent(("QuietDatabase" + phase).c_str())
.detail("DataInFlight", dataInFlight.get())
.detail("MaxTLogQueueSize", tLogQueueSize.get())
.detail("DataDistributionQueueSize", dataDistributionQueueSize.get())
.detail("TeamCollectionValid", teamCollectionValid.get())
.detail("MaxStorageQueueSize", storageQueueSize.get())
.detail("DataDistributionActive", dataDistributionActive.get())
.detail("StorageServersRecruiting", storageServersRecruiting.get());
if (dataInFlight.get() > dataInFlightGate || tLogQueueSize.get() > maxTLogQueueGate ||
TraceEvent(("QuietDatabase" + phase).c_str())
.detail("DataInFlight", dataInFlight.get())
.detail("MaxTLogQueueSize", tLogQueueInfo.get().first)
.detail("MaxTLogPoppedVersionLag", tLogQueueInfo.get().second)
.detail("DataDistributionQueueSize", dataDistributionQueueSize.get())
.detail("TeamCollectionValid", teamCollectionValid.get())
.detail("MaxStorageQueueSize", storageQueueSize.get())
.detail("DataDistributionActive", dataDistributionActive.get())
.detail("StorageServersRecruiting", storageServersRecruiting.get());
if (dataInFlight.get() > dataInFlightGate || tLogQueueInfo.get().first > maxTLogQueueGate || tLogQueueInfo.get().second > maxPoppedVersionLag ||
dataDistributionQueueSize.get() > maxDataDistributionQueueSize ||
storageQueueSize.get() > maxStorageServerQueueGate || dataDistributionActive.get() == false ||
storageServersRecruiting.get() == true || teamCollectionValid.get() == false) {
@ -470,6 +470,6 @@ ACTOR Future<Void> waitForQuietDatabase( Database cx, Reference<AsyncVar<ServerD
}
Future<Void> quietDatabase( Database const& cx, Reference<AsyncVar<ServerDBInfo>> const& dbInfo, std::string phase, int64_t dataInFlightGate,
int64_t maxTLogQueueGate, int64_t maxStorageServerQueueGate, int64_t maxDataDistributionQueueSize ) {
return waitForQuietDatabase(cx, dbInfo, phase, dataInFlightGate, maxTLogQueueGate, maxStorageServerQueueGate, maxDataDistributionQueueSize);
int64_t maxTLogQueueGate, int64_t maxStorageServerQueueGate, int64_t maxDataDistributionQueueSize, int64_t maxPoppedVersionLag ) {
return waitForQuietDatabase(cx, dbInfo, phase, dataInFlightGate, maxTLogQueueGate, maxStorageServerQueueGate, maxDataDistributionQueueSize, maxPoppedVersionLag);
}

View File

@ -29,7 +29,7 @@
#include "flow/actorcompiler.h"
Future<int64_t> getDataInFlight( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const& );
Future<int64_t> getMaxTLogQueueSize( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const& );
Future<std::pair<int64_t,int64_t>> getTLogQueueInfo( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const& );
Future<int64_t> getMaxStorageServerQueueSize( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const& );
Future<int64_t> getDataDistributionQueueSize( Database const &cx, Reference<AsyncVar<struct ServerDBInfo>> const&, bool const& reportInFlight );
Future<bool> getTeamCollectionValid(Database const& cx, WorkerInterface const&);

View File

@ -423,6 +423,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
NotifiedVersion version, queueCommittedVersion;
Version queueCommittingVersion;
Version knownCommittedVersion, durableKnownCommittedVersion, minKnownCommittedVersion;
Version queuePoppedVersion;
Deque<std::pair<Version, Standalone<VectorRef<uint8_t>>>> messageBlocks;
std::vector<std::vector<Reference<TagData>>> tag_data; //tag.locality | tag.id
@ -476,7 +477,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
explicit LogData(TLogData* tLogData, TLogInterface interf, Tag remoteTag, bool isPrimary, int logRouterTags, UID recruitmentID, uint64_t protocolVersion, std::vector<Tag> tags) : tLogData(tLogData), knownCommittedVersion(0), logId(interf.id()),
cc("TLog", interf.id().toString()), bytesInput("BytesInput", cc), bytesDurable("BytesDurable", cc), remoteTag(remoteTag), isPrimary(isPrimary), logRouterTags(logRouterTags), recruitmentID(recruitmentID), protocolVersion(protocolVersion),
logSystem(new AsyncVar<Reference<ILogSystem>>()), logRouterPoppedVersion(0), durableKnownCommittedVersion(0), minKnownCommittedVersion(0), allTags(tags.begin(), tags.end()), terminated(tLogData->terminated.getFuture()),
logSystem(new AsyncVar<Reference<ILogSystem>>()), logRouterPoppedVersion(0), durableKnownCommittedVersion(0), minKnownCommittedVersion(0), queuePoppedVersion(0), allTags(tags.begin(), tags.end()), terminated(tLogData->terminated.getFuture()),
// These are initialized differently on init() or recovery
recoveryCount(), stopped(false), initialized(false), queueCommittingVersion(0), newPersistentDataVersion(invalidVersion), unrecoveredBefore(1), recoveredAt(1), unpoppedRecoveredTags(0),
logRouterPopToVersion(0), locality(tagLocalityInvalid)
@ -493,6 +494,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
specialCounter(cc, "PersistentDataVersion", [this](){ return this->persistentDataVersion; });
specialCounter(cc, "PersistentDataDurableVersion", [this](){ return this->persistentDataDurableVersion; });
specialCounter(cc, "KnownCommittedVersion", [this](){ return this->knownCommittedVersion; });
specialCounter(cc, "QueuePoppedVersion", [this](){ return this->queuePoppedVersion; });
specialCounter(cc, "SharedBytesInput", [tLogData](){ return tLogData->bytesInput; });
specialCounter(cc, "SharedBytesDurable", [tLogData](){ return tLogData->bytesDurable; });
specialCounter(cc, "SharedOverheadBytesInput", [tLogData](){ return tLogData->overheadBytesInput; });
@ -633,23 +635,15 @@ void updatePersistentPopped( TLogData* self, Reference<LogData> logData, Referen
}
ACTOR Future<Void> updatePoppedLocation( TLogData* self, Reference<LogData> logData, Reference<LogData::TagData> data ) {
// txsTag is spilled by value, so by definition, its poppable location is always up to the persistentDataVersion.
// txsTag is spilled by value, so we do not need to track its popped location.
if (data->tag == txsTag) {
auto locationIter = logData->versionLocation.lower_bound(std::max<Version>(data->popped, logData->persistentDataVersion));
if (locationIter != logData->versionLocation.end()) {
data->poppedLocation = locationIter->value.first;
} else {
// We have no data, so whatever our previous value was is better than anything new we know how
// to assign. Ideally, we'd use the most recent commit location, but that's surprisingly
// difficult to track.
}
return Void();
}
if (!data->requiresPoppedLocationUpdate) return Void();
data->requiresPoppedLocationUpdate = false;
if (data->popped < logData->persistentDataVersion) {
if (data->popped <= logData->persistentDataVersion) {
// Recover the next needed location in the Disk Queue from the index.
Standalone<VectorRef<KeyValueRef>> kvrefs = wait(
self->persistentData->readRange(KeyRangeRef(
@ -702,13 +696,19 @@ ACTOR Future<Void> popDiskQueue( TLogData* self, Reference<LogData> logData ) {
}
wait(waitForAll(updates));
auto lastItem = logData->versionLocation.lastItem();
IDiskQueue::location minLocation = lastItem == logData->versionLocation.end() ? 0 : lastItem->value.second;
IDiskQueue::location minLocation = 0;
Version minVersion = 0;
auto locationIter = logData->versionLocation.lower_bound(logData->persistentDataVersion);
if (locationIter != logData->versionLocation.end()) {
minLocation = locationIter->value.first;
minVersion = locationIter->key;
}
for(int tagLocality = 0; tagLocality < logData->tag_data.size(); tagLocality++) {
for(int tagId = 0; tagId < logData->tag_data[tagLocality].size(); tagId++) {
Reference<LogData::TagData> tagData = logData->tag_data[tagLocality][tagId];
if (tagData) {
if (tagData && tagData->tag != txsTag && !tagData->nothingPersistent) {
minLocation = std::min(minLocation, tagData->poppedLocation);
minVersion = std::min(minVersion, tagData->popped);
}
}
}
@ -721,6 +721,7 @@ ACTOR Future<Void> popDiskQueue( TLogData* self, Reference<LogData> logData ) {
lastCommittedLocation = locationIter->value.first;
}
self->persistentQueue->pop( std::min(minLocation, lastCommittedLocation) );
logData->queuePoppedVersion = std::max(logData->queuePoppedVersion, minVersion);
}
return Void();
@ -747,6 +748,7 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
for(tagId = 0; tagId < logData->tag_data[tagLocality].size(); tagId++) {
state Reference<LogData::TagData> tagData = logData->tag_data[tagLocality][tagId];
if(tagData) {
wait(tagData->eraseMessagesBefore( tagData->popped, self, logData, TaskUpdateStorage ));
state Version currentVersion = 0;
// Clear recently popped versions from persistentData if necessary
updatePersistentPopped( self, logData, tagData );

View File

@ -55,6 +55,16 @@ ACTOR Future<Void> waitFailureClient(RequestStream<ReplyPromise<Void>> waitFailu
}
}
ACTOR Future<Void> waitFailureClientStrict(RequestStream<ReplyPromise<Void>> waitFailure, double failureReactionTime, int taskID){
loop {
wait(waitFailureClient(waitFailure, 0, 0, taskID));
wait(delay(failureReactionTime, taskID) || IFailureMonitor::failureMonitor().onStateEqual( waitFailure.getEndpoint(), FailureStatus(false)));
if(IFailureMonitor::failureMonitor().getState( waitFailure.getEndpoint() ).isFailed()) {
return Void();
}
}
}
ACTOR Future<Void> waitFailureTracker(RequestStream<ReplyPromise<Void>> waitFailure, Reference<AsyncVar<bool>> failed, double reactionTime, double reactionSlope, int taskID){
loop {
try {

View File

@ -28,6 +28,9 @@ Future<Void> waitFailureServer(const FutureStream<ReplyPromise<Void>>& waitFailu
Future<Void> waitFailureClient(const RequestStream<ReplyPromise<Void>>& waitFailure,
double const& failureReactionTime=0, double const& failureReactionSlope=0, int const& taskID=TaskDefaultEndpoint);
// talks to a wait failure server, returns Void on failure, reaction time is always waited
Future<Void> waitFailureClientStrict(const RequestStream<ReplyPromise<Void>>& waitFailure, double const& failureReactionTime=0, int const& taskID=TaskDefaultEndpoint);
// talks to a wait failure server, updates failed to be true or false based on failure status.
Future<Void> waitFailureTracker(const RequestStream<ReplyPromise<Void>>& waitFailure, Reference<AsyncVar<bool>> const& failed,
double const& failureReactionTime=0, double const& failureReactionSlope=0, int const& taskID=TaskDefaultEndpoint);

View File

@ -219,13 +219,19 @@ struct ConsistencyCheckWorkload : TestWorkload
}
//Check that nothing is in the TLog queues
int64_t maxTLogQueueSize = wait(getMaxTLogQueueSize(cx, self->dbInfo));
if(maxTLogQueueSize > 1e5) // FIXME: Should be zero?
std::pair<int64_t,int64_t> maxTLogQueueInfo = wait(getTLogQueueInfo(cx, self->dbInfo));
if(maxTLogQueueInfo.first > 1e5) // FIXME: Should be zero?
{
TraceEvent("ConsistencyCheck_NonZeroTLogQueue").detail("MaxQueueSize", maxTLogQueueSize);
TraceEvent("ConsistencyCheck_NonZeroTLogQueue").detail("MaxQueueSize", maxTLogQueueInfo.first);
self->testFailure("Non-zero tlog queue size");
}
if(maxTLogQueueInfo.second > 30e6)
{
TraceEvent("ConsistencyCheck_PoppedVersionLag").detail("PoppedVersionLag", maxTLogQueueInfo.second);
self->testFailure("large popped version lag");
}
//Check that nothing is in the storage server queues
try
{

View File

@ -214,7 +214,7 @@ double testKeyToDouble(const KeyRef& p, const KeyRef& prefix);
ACTOR Future<Void> databaseWarmer(Database cx);
Future<Void> quietDatabase( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const&, std::string phase, int64_t dataInFlightGate = 2e6, int64_t maxTLogQueueGate = 5e6,
int64_t maxStorageServerQueueGate = 5e6, int64_t maxDataDistributionQueueSize = 0);
int64_t maxStorageServerQueueGate = 5e6, int64_t maxDataDistributionQueueSize = 0, int64_t maxPoppedVersionLag = 30e6);
#include "flow/unactorcompiler.h"

View File

@ -1098,6 +1098,7 @@ std::string TraceEventFields::getValue(std::string key) const {
}
else {
TraceEvent ev(SevWarn, "TraceEventFieldNotFound");
ev.suppressFor(1.0);
if(tryGetValue("Type", value)) {
ev.detail("Event", value);
}

View File

@ -32,7 +32,7 @@
<Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'>
<Product Name='$(var.Title)'
Id='{B440CAB2-F9C8-4185-9863-E8A7E7587FED}'
Id='{311BF306-11DD-487C-B2BC-D2A1D85DFCA3}'
UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}'
Version='$(var.Version)'
Manufacturer='$(var.Manufacturer)'