Merge branch 'master' into features/sqlite-crc32c
This commit is contained in:
commit
53f8ba499c
|
@ -26,7 +26,7 @@ project(foundationdb
|
|||
set(CMAKE_MODULE_PATH "${CMAKE_MODULE_PATH};${PROJECT_SOURCE_DIR}/cmake")
|
||||
message (STATUS "${PROJECT_SOURCE_DIR} ${PROJECT_BINARY_DIR}")
|
||||
if("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")
|
||||
message(FATAL_ERROR "In-source builds are forbidden, unsupported, and stupid!!")
|
||||
message(FATAL_ERROR "In-source builds are forbidden")
|
||||
endif()
|
||||
|
||||
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
|
||||
|
@ -196,7 +196,7 @@ if (CMAKE_EXPORT_COMPILE_COMMANDS)
|
|||
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
|
||||
COMMENT "Build compile commands for IDE"
|
||||
)
|
||||
add_custom_target(procossed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||
add_custom_target(processed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
|
|
|
@ -6,8 +6,7 @@ set(SRCS
|
|||
FDBLibTLSSession.cpp
|
||||
FDBLibTLSSession.h
|
||||
FDBLibTLSVerify.cpp
|
||||
FDBLibTLSVerify.h
|
||||
ReferenceCounted.h)
|
||||
FDBLibTLSVerify.h)
|
||||
|
||||
add_library(FDBLibTLS ${SRCS})
|
||||
target_link_libraries(FDBLibTLS PUBLIC LibreSSL boost_target)
|
||||
target_link_libraries(FDBLibTLS PUBLIC LibreSSL boost_target PRIVATE flow)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|X64">
|
||||
<Configuration>Debug</Configuration>
|
||||
|
@ -23,11 +23,11 @@
|
|||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
</Project>
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
#include "flow/FastRef.h"
|
||||
|
||||
#include <tls.h>
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
#include "flow/FastRef.h"
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSPlugin.h"
|
||||
#include "FDBLibTLS/FDBLibTLSVerify.h"
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
*/
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSSession.h"
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "flow/Trace.h"
|
||||
|
||||
#include <openssl/bio.h>
|
||||
|
@ -60,7 +62,7 @@ static ssize_t tls_write_func(struct tls *ctx, const void *buf, size_t buflen, v
|
|||
|
||||
FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy, bool is_client, const char* servername, TLSSendCallbackFunc send_func, void* send_ctx, TLSRecvCallbackFunc recv_func, void* recv_ctx, void* uidptr) :
|
||||
tls_ctx(NULL), tls_sctx(NULL), is_client(is_client), policy(policy), send_func(send_func), send_ctx(send_ctx),
|
||||
recv_func(recv_func), recv_ctx(recv_ctx), handshake_completed(false) {
|
||||
recv_func(recv_func), recv_ctx(recv_ctx), handshake_completed(false), lastVerifyFailureLogged(0.0) {
|
||||
if (uidptr)
|
||||
uid = * (UID*) uidptr;
|
||||
|
||||
|
@ -342,8 +344,11 @@ bool FDBLibTLSSession::verify_peer() {
|
|||
|
||||
if (!rc) {
|
||||
// log the various failure reasons
|
||||
for (std::string reason : verify_failure_reasons) {
|
||||
TraceEvent("FDBLibTLSVerifyFailure", uid).detail("Reason", reason).suppressFor(1.0);
|
||||
if(now() - lastVerifyFailureLogged > 1.0) {
|
||||
for (std::string reason : verify_failure_reasons) {
|
||||
lastVerifyFailureLogged = now();
|
||||
TraceEvent("FDBLibTLSVerifyFailure", uid).detail("Reason", reason);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
#include "flow/FastRef.h"
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSPolicy.h"
|
||||
#include "FDBLibTLS/FDBLibTLSVerify.h"
|
||||
|
@ -61,6 +61,7 @@ struct FDBLibTLSSession : ITLSSession, ReferenceCounted<FDBLibTLSSession> {
|
|||
bool handshake_completed;
|
||||
|
||||
UID uid;
|
||||
double lastVerifyFailureLogged;
|
||||
};
|
||||
|
||||
#endif /* FDB_LIBTLS_SESSION_H */
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
#include <stdint.h>
|
||||
|
||||
#include "ReferenceCounted.h"
|
||||
#include "flow/FastRef.h"
|
||||
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
|
|
@ -1,108 +0,0 @@
|
|||
/*
|
||||
* ReferenceCounted.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDB_REFERENCE_COUNTED_H
|
||||
#define FDB_REFERENCE_COUNTED_H
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
template <class T>
|
||||
struct ReferenceCounted {
|
||||
void addref() { ++referenceCount; }
|
||||
void delref() { if (--referenceCount == 0) { delete (T*)this; } }
|
||||
|
||||
ReferenceCounted() : referenceCount(1) {}
|
||||
|
||||
private:
|
||||
ReferenceCounted(const ReferenceCounted&) = delete;
|
||||
void operator=(const ReferenceCounted&) = delete;
|
||||
int32_t referenceCount;
|
||||
};
|
||||
|
||||
template <class P>
|
||||
void addref(P* ptr) { ptr->addref(); }
|
||||
template <class P>
|
||||
void delref(P* ptr) { ptr->delref(); }
|
||||
|
||||
template <class P>
|
||||
struct Reference {
|
||||
Reference() : ptr(NULL) {}
|
||||
explicit Reference( P* ptr ) : ptr(ptr) {}
|
||||
static Reference<P> addRef( P* ptr ) { ptr->addref(); return Reference(ptr); }
|
||||
|
||||
Reference(const Reference& r) : ptr(r.getPtr()) { if (ptr) addref(ptr); }
|
||||
Reference(Reference && r) : ptr(r.getPtr()) { r.ptr = NULL; }
|
||||
|
||||
template <class Q>
|
||||
Reference(const Reference<Q>& r) : ptr(r.getPtr()) { if (ptr) addref(ptr); }
|
||||
template <class Q>
|
||||
Reference(Reference<Q> && r) : ptr(r.getPtr()) { r.setPtrUnsafe(NULL); }
|
||||
|
||||
~Reference() { if (ptr) delref(ptr); }
|
||||
Reference& operator=(const Reference& r) {
|
||||
P* oldPtr = ptr;
|
||||
P* newPtr = r.ptr;
|
||||
if (oldPtr != newPtr) {
|
||||
if (newPtr) addref(newPtr);
|
||||
ptr = newPtr;
|
||||
if (oldPtr) delref(oldPtr);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
Reference& operator=(Reference&& r) {
|
||||
P* oldPtr = ptr;
|
||||
P* newPtr = r.ptr;
|
||||
if (oldPtr != newPtr) {
|
||||
r.ptr = NULL;
|
||||
ptr = newPtr;
|
||||
if (oldPtr) delref(oldPtr);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void clear() {
|
||||
P* oldPtr = ptr;
|
||||
if (oldPtr) {
|
||||
ptr = NULL;
|
||||
delref(oldPtr);
|
||||
}
|
||||
}
|
||||
|
||||
P* operator->() const { return ptr; }
|
||||
P& operator*() const { return *ptr; }
|
||||
P* getPtr() const { return ptr; }
|
||||
|
||||
void setPtrUnsafe( P* p ) { ptr = p; }
|
||||
|
||||
P* extractPtr() { auto *p = ptr; ptr = NULL; return p; }
|
||||
|
||||
bool boolean_test() const { return ptr != 0; }
|
||||
private:
|
||||
P *ptr;
|
||||
};
|
||||
|
||||
template <class P>
|
||||
bool operator==( const Reference<P>& lhs, const Reference<P>& rhs ) {
|
||||
return lhs.getPtr() == rhs.getPtr();
|
||||
}
|
||||
|
||||
#endif /* FDB_REFERENCE_COUNTED_H */
|
|
@ -31,7 +31,6 @@
|
|||
#include <boost/circular_buffer.hpp>
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSPlugin.h"
|
||||
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include <openssl/objects.h>
|
||||
|
||||
#include "fdbrpc/ITLSPlugin.h"
|
||||
#include "ReferenceCounted.h"
|
||||
|
||||
#include "FDBLibTLS/FDBLibTLSPlugin.h"
|
||||
#include "FDBLibTLS/FDBLibTLSPolicy.h"
|
||||
|
|
23
Makefile
23
Makefile
|
@ -1,28 +1,27 @@
|
|||
export
|
||||
PLATFORM := $(shell uname)
|
||||
ARCH := $(shell uname -m)
|
||||
ifeq ("$(wildcard /etc/centos-release)", "")
|
||||
LIBSTDCPP_HACK = 1
|
||||
else
|
||||
LIBSTDCPP_HACK = 0
|
||||
endif
|
||||
|
||||
TOPDIR := $(shell pwd)
|
||||
|
||||
# Allow custom libc++ hack for Ubuntu
|
||||
ifeq ("$(wildcard /etc/centos-release)", "")
|
||||
LIBSTDCPP_HACK ?= 1
|
||||
endif
|
||||
|
||||
ifeq ($(ARCH),x86_64)
|
||||
ARCH := x64
|
||||
else
|
||||
$(error Not prepared to compile on $(ARCH))
|
||||
endif
|
||||
|
||||
MONO := $(shell which mono)
|
||||
MONO := $(shell which mono 2>/dev/null)
|
||||
ifeq ($(MONO),)
|
||||
MONO := /usr/bin/mono
|
||||
endif
|
||||
|
||||
MCS := $(shell which mcs)
|
||||
MCS := $(shell which mcs 2>/dev/null)
|
||||
ifeq ($(MCS),)
|
||||
MCS := $(shell which dmcs)
|
||||
MCS := $(shell which dmcs 2>/dev/null)
|
||||
endif
|
||||
ifeq ($(MCS),)
|
||||
MCS := /usr/bin/mcs
|
||||
|
@ -56,8 +55,8 @@ else ifeq ($(PLATFORM),Darwin)
|
|||
CC := /usr/bin/clang
|
||||
CXX := /usr/bin/clang
|
||||
|
||||
CFLAGS += -mmacosx-version-min=10.7 -stdlib=libc++
|
||||
CXXFLAGS += -mmacosx-version-min=10.7 -std=c++11 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
|
||||
CFLAGS += -mmacosx-version-min=10.14 -stdlib=libc++
|
||||
CXXFLAGS += -mmacosx-version-min=10.14 -std=c++17 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
|
||||
|
||||
.LIBPATTERNS := lib%.dylib lib%.a
|
||||
|
||||
|
@ -70,7 +69,7 @@ else
|
|||
endif
|
||||
BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME}
|
||||
|
||||
CCACHE := $(shell which ccache)
|
||||
CCACHE := $(shell which ccache 2>/dev/null)
|
||||
ifneq ($(CCACHE),)
|
||||
CCACHE_CC := $(CCACHE) $(CC)
|
||||
CCACHE_CXX := $(CCACHE) $(CXX)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Release)' == 'true' ">
|
||||
|
@ -38,14 +38,14 @@
|
|||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -83,6 +83,7 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
|
|||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
@ -98,6 +99,7 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
|
|||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
|
|
@ -24,15 +24,21 @@ fdb_c_CFLAGS := $(fdbclient_CFLAGS)
|
|||
fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS)
|
||||
fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a $(FDB_TLS_LIB)
|
||||
fdb_c_STATIC_LIBS := $(TLS_LIBS)
|
||||
fdb_c_tests_LIBS := -Llib -lfdb_c
|
||||
fdb_c_tests_LIBS := -Llib -lfdb_c -lstdc++
|
||||
fdb_c_tests_HEADERS := -Ibindings/c
|
||||
|
||||
CLEAN_TARGETS += fdb_c_tests_clean
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
fdb_c_LDFLAGS += -Wl,--version-script=bindings/c/fdb_c.map -static-libgcc -Wl,-z,nodelete -lm -lpthread -lrt -ldl
|
||||
ifeq ($(LIBSTDCPP_HACK),1)
|
||||
fdb_c_LIBS += lib/libstdc++.a
|
||||
# Link our custom libstdc++ statically in Ubuntu, if hacking
|
||||
ifeq ("$(wildcard /etc/centos-release)", "")
|
||||
ifeq ($(LIBSTDCPP_HACK),1)
|
||||
fdb_c_LIBS += lib/libstdc++.a
|
||||
endif
|
||||
# Link stdc++ statically in Centos, if not hacking
|
||||
else
|
||||
fdb_c_STATIC_LIBS += -static-libstdc++
|
||||
endif
|
||||
fdb_c_tests_LIBS += -lpthread
|
||||
endif
|
||||
|
@ -86,11 +92,11 @@ bindings/c/foundationdb/fdb_c_options.g.h: bin/vexillographer.exe fdbclient/vexi
|
|||
|
||||
bin/fdb_c_performance_test: bindings/c/test/performance_test.c bindings/c/test/test.h fdb_c
|
||||
@echo "Compiling fdb_c_performance_test"
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c $(fdb_c_tests_LIBS)
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c $(fdb_c_tests_LIBS)
|
||||
|
||||
bin/fdb_c_ryw_benchmark: bindings/c/test/ryw_benchmark.c bindings/c/test/test.h fdb_c
|
||||
@echo "Compiling fdb_c_ryw_benchmark"
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c $(fdb_c_tests_LIBS)
|
||||
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c $(fdb_c_tests_LIBS)
|
||||
|
||||
packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz: bin/fdb_c_performance_test bin/fdb_c_ryw_benchmark
|
||||
@echo "Packaging $@"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Release)' == 'true' ">
|
||||
|
@ -63,12 +63,12 @@
|
|||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -99,6 +99,7 @@
|
|||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -126,6 +127,7 @@
|
|||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -35,14 +35,14 @@
|
|||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>false</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -69,6 +69,7 @@
|
|||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
|
@ -82,6 +83,7 @@
|
|||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions> @../../../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -107,6 +109,7 @@
|
|||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
|
||||
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -35,8 +35,7 @@ _fdb_flow_tester_clean:
|
|||
@rm -rf bindings/flow/bin
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
fdb_flow_tester_LIBS += -ldl -lpthread -lrt
|
||||
fdb_flow_tester_LDFLAGS += -static-libstdc++ -static-libgcc
|
||||
fdb_flow_tester_LDFLAGS += -static-libstdc++ -static-libgcc -ldl -lpthread -lrt -lm
|
||||
else ifeq ($(PLATFORM),osx)
|
||||
fdb_flow_tester_LDFLAGS += -lc++
|
||||
endif
|
||||
|
|
|
@ -5,7 +5,7 @@ fdb-go
|
|||
|
||||
This package requires:
|
||||
|
||||
- Go 1.1+ with CGO enabled
|
||||
- Go 1.11+ with CGO enabled
|
||||
- [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
|
||||
- FoundationDB C API 2.0.x-6.1.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c))
|
||||
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
// Deprecated: Use OpenDatabase or OpenDefault to obtain a database handle directly
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
|
|
@ -22,11 +22,9 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <stdlib.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
// #include <stdlib.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
|
|
@ -22,22 +22,20 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lfdb_c -lm
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <string.h>
|
||||
|
||||
extern void unlockMutex(void*);
|
||||
|
||||
void go_callback(FDBFuture* f, void* m) {
|
||||
unlockMutex(m);
|
||||
}
|
||||
|
||||
void go_set_callback(void* f, void* m) {
|
||||
fdb_future_set_callback(f, (FDBCallback)&go_callback, m);
|
||||
}
|
||||
*/
|
||||
// #cgo LDFLAGS: -lfdb_c -lm
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
// #include <string.h>
|
||||
//
|
||||
// extern void unlockMutex(void*);
|
||||
//
|
||||
// void go_callback(FDBFuture* f, void* m) {
|
||||
// unlockMutex(m);
|
||||
// }
|
||||
//
|
||||
// void go_set_callback(void* f, void* m) {
|
||||
// fdb_future_set_callback(f, (FDBCallback)&go_callback, m);
|
||||
// }
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
@ -100,15 +98,18 @@ func fdb_future_block_until_ready(f *C.FDBFuture) {
|
|||
m.Lock()
|
||||
}
|
||||
|
||||
func (f future) BlockUntilReady() {
|
||||
func (f *future) BlockUntilReady() {
|
||||
defer runtime.KeepAlive(f)
|
||||
fdb_future_block_until_ready(f.ptr)
|
||||
}
|
||||
|
||||
func (f future) IsReady() bool {
|
||||
func (f *future) IsReady() bool {
|
||||
defer runtime.KeepAlive(f)
|
||||
return C.fdb_future_is_ready(f.ptr) != 0
|
||||
}
|
||||
|
||||
func (f future) Cancel() {
|
||||
func (f *future) Cancel() {
|
||||
defer runtime.KeepAlive(f)
|
||||
C.fdb_future_cancel(f.ptr)
|
||||
}
|
||||
|
||||
|
@ -140,6 +141,8 @@ type futureByteSlice struct {
|
|||
|
||||
func (f *futureByteSlice) Get() ([]byte, error) {
|
||||
f.o.Do(func() {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
var present C.fdb_bool_t
|
||||
var value *C.uint8_t
|
||||
var length C.int
|
||||
|
@ -195,6 +198,8 @@ type futureKey struct {
|
|||
|
||||
func (f *futureKey) Get() (Key, error) {
|
||||
f.o.Do(func() {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
var value *C.uint8_t
|
||||
var length C.int
|
||||
|
||||
|
@ -241,7 +246,9 @@ type futureNil struct {
|
|||
*future
|
||||
}
|
||||
|
||||
func (f futureNil) Get() error {
|
||||
func (f *futureNil) Get() error {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
f.BlockUntilReady()
|
||||
if err := C.fdb_future_get_error(f.ptr); err != 0 {
|
||||
return Error{int(err)}
|
||||
|
@ -250,7 +257,7 @@ func (f futureNil) Get() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f futureNil) MustGet() {
|
||||
func (f *futureNil) MustGet() {
|
||||
if err := f.Get(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
@ -272,7 +279,9 @@ func stringRefToSlice(ptr unsafe.Pointer) []byte {
|
|||
return C.GoBytes(src, size)
|
||||
}
|
||||
|
||||
func (f futureKeyValueArray) Get() ([]KeyValue, bool, error) {
|
||||
func (f *futureKeyValueArray) Get() ([]KeyValue, bool, error) {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
f.BlockUntilReady()
|
||||
|
||||
var kvs *C.FDBKeyValue
|
||||
|
@ -316,17 +325,20 @@ type futureInt64 struct {
|
|||
*future
|
||||
}
|
||||
|
||||
func (f futureInt64) Get() (int64, error) {
|
||||
func (f *futureInt64) Get() (int64, error) {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
f.BlockUntilReady()
|
||||
|
||||
var ver C.int64_t
|
||||
if err := C.fdb_future_get_version(f.ptr, &ver); err != 0 {
|
||||
return 0, Error{int(err)}
|
||||
}
|
||||
|
||||
return int64(ver), nil
|
||||
}
|
||||
|
||||
func (f futureInt64) MustGet() int64 {
|
||||
func (f *futureInt64) MustGet() int64 {
|
||||
val, err := f.Get()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -356,7 +368,9 @@ type futureStringSlice struct {
|
|||
*future
|
||||
}
|
||||
|
||||
func (f futureStringSlice) Get() ([]string, error) {
|
||||
func (f *futureStringSlice) Get() ([]string, error) {
|
||||
defer runtime.KeepAlive(f.future)
|
||||
|
||||
f.BlockUntilReady()
|
||||
|
||||
var strings **C.char
|
||||
|
@ -375,7 +389,7 @@ func (f futureStringSlice) Get() ([]string, error) {
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func (f futureStringSlice) MustGet() []string {
|
||||
func (f *futureStringSlice) MustGet() []string {
|
||||
val, err := f.Get()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
|
|
@ -46,6 +46,13 @@ func (o NetworkOptions) SetLocalAddress(param string) error {
|
|||
return o.setOpt(10, []byte(param))
|
||||
}
|
||||
|
||||
// enable the object serializer for network communication
|
||||
//
|
||||
// Parameter: 0 is false, every other value is true
|
||||
func (o NetworkOptions) SetUseObjectSerializer(param int64) error {
|
||||
return o.setOpt(11, int64ToBytes(param))
|
||||
}
|
||||
|
||||
// Deprecated
|
||||
//
|
||||
// Parameter: path to cluster file
|
||||
|
@ -444,7 +451,7 @@ const (
|
|||
// Infrequently used. The client has passed a specific row limit and wants
|
||||
// that many rows delivered in a single batch. Because of iterator operation
|
||||
// in client drivers make request batches transparent to the user, consider
|
||||
// ``WANT_ALL`` StreamingMode instead. A row limit must be specified if this
|
||||
// “WANT_ALL“ StreamingMode instead. A row limit must be specified if this
|
||||
// mode is used.
|
||||
StreamingModeExact StreamingMode = 1
|
||||
|
||||
|
@ -561,15 +568,15 @@ type ErrorPredicate int
|
|||
|
||||
const (
|
||||
|
||||
// Returns ``true`` if the error indicates the operations in the
|
||||
// transactions should be retried because of transient error.
|
||||
// Returns “true“ if the error indicates the operations in the transactions
|
||||
// should be retried because of transient error.
|
||||
ErrorPredicateRetryable ErrorPredicate = 50000
|
||||
|
||||
// Returns ``true`` if the error indicates the transaction may have
|
||||
// succeeded, though not in a way the system can verify.
|
||||
// Returns “true“ if the error indicates the transaction may have succeeded,
|
||||
// though not in a way the system can verify.
|
||||
ErrorPredicateMaybeCommitted ErrorPredicate = 50001
|
||||
|
||||
// Returns ``true`` if the error indicates the transaction has not
|
||||
// committed, though in a way that can be retried.
|
||||
// Returns “true“ if the error indicates the transaction has not committed,
|
||||
// though in a way that can be retried.
|
||||
ErrorPredicateRetryableNotCommitted ErrorPredicate = 50002
|
||||
)
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
import (
|
||||
|
|
|
@ -22,10 +22,8 @@
|
|||
|
||||
package fdb
|
||||
|
||||
/*
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
*/
|
||||
// #define FDB_API_VERSION 610
|
||||
// #include <foundationdb/fdb_c.h>
|
||||
import "C"
|
||||
|
||||
// A ReadTransaction can asynchronously read from a FoundationDB
|
||||
|
|
|
@ -28,14 +28,8 @@
|
|||
#define JNI_NULL nullptr
|
||||
|
||||
#if defined(__GNUG__)
|
||||
#define thread_local __thread
|
||||
// TODO: figure out why the default definition suppresses visibility
|
||||
#undef JNIEXPORT
|
||||
#define JNIEXPORT __attribute__ ((visibility ("default")))
|
||||
#elif defined(_MSC_VER)
|
||||
#define thread_local __declspec(thread)
|
||||
#else
|
||||
#error Missing thread local storage
|
||||
#endif
|
||||
|
||||
static JavaVM* g_jvm = nullptr;
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -28,14 +28,14 @@
|
|||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>DynamicLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -63,6 +63,7 @@
|
|||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
|
@ -78,6 +79,7 @@
|
|||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
FROM centos:6
|
||||
LABEL version=0.1.2
|
||||
LABEL version=0.1.4
|
||||
ENV DOCKER_IMAGEVER=0.1.4
|
||||
|
||||
# Install dependencies for developer tools, bindings,\
|
||||
# documentation, actorcompiler, and packaging tools\
|
||||
RUN yum install -y yum-utils &&\
|
||||
yum-config-manager --enable rhel-server-rhscl-7-rpms &&\
|
||||
yum -y install centos-release-scl epel-release &&\
|
||||
yum -y install devtoolset-7 mono-core java-1.8.0-openjdk-devel \
|
||||
rh-python36-python-devel rh-ruby24 golang python27 \
|
||||
rpm-build debbuild python-pip npm ccache distcc &&\
|
||||
yum -y install devtoolset-8 java-1.8.0-openjdk-devel \
|
||||
rh-python36-python-devel devtoolset-8-valgrind-devel \
|
||||
mono-core rh-ruby24 golang python27 rpm-build debbuild \
|
||||
python-pip npm dos2unix valgrind-devel &&\
|
||||
pip install boto3==1.1.1
|
||||
|
||||
USER root
|
||||
|
@ -35,8 +37,9 @@ RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.1
|
|||
RUN curl -L https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.2.tar.gz > /tmp/libressl.tar.gz &&\
|
||||
cd /tmp && echo "b8cb31e59f1294557bfc80f2a662969bc064e83006ceef0574e2553a1c254fd5 libressl.tar.gz" > libressl-sha.txt &&\
|
||||
sha256sum -c libressl-sha.txt && tar xf libressl.tar.gz &&\
|
||||
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
|
||||
cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- make -j`nproc` install &&\
|
||||
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-8 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
|
||||
cd /tmp/libressl-2.8.2 && scl enable devtoolset-8 -- make -j`nproc` install &&\
|
||||
rm -rf /tmp/libressl-2.8.2 /tmp/libressl.tar.gz
|
||||
|
||||
CMD scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash
|
||||
ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0
|
||||
CMD scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash
|
||||
|
|
|
@ -2,7 +2,7 @@ version: "3"
|
|||
|
||||
services:
|
||||
common: &common
|
||||
image: foundationdb/foundationdb-build:0.1.2
|
||||
image: foundationdb/foundationdb-build:0.1.4
|
||||
|
||||
build-setup: &build-setup
|
||||
<<: *common
|
||||
|
@ -26,16 +26,19 @@ services:
|
|||
|
||||
build-docs:
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" package_html'
|
||||
volumes:
|
||||
- ..:/foundationdb
|
||||
working_dir: /foundationdb
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" docpackage'
|
||||
|
||||
|
||||
release-packages: &release-packages
|
||||
<<: *release-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
|
||||
|
||||
snapshot-packages: &snapshot-packages
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
|
||||
|
||||
prb-packages:
|
||||
<<: *snapshot-packages
|
||||
|
@ -43,11 +46,11 @@ services:
|
|||
|
||||
release-bindings: &release-bindings
|
||||
<<: *release-setup
|
||||
command: bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
|
||||
snapshot-bindings: &snapshot-bindings
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" python_binding'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
|
||||
|
||||
prb-bindings:
|
||||
<<: *snapshot-bindings
|
||||
|
@ -55,7 +58,7 @@ services:
|
|||
|
||||
snapshot-cmake: &snapshot-cmake
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
|
||||
|
||||
prb-cmake:
|
||||
<<: *snapshot-cmake
|
||||
|
@ -63,7 +66,7 @@ services:
|
|||
|
||||
snapshot-ctest: &snapshot-ctest
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -L fast -j "$${MAKEJOBS}" --output-on-failure'
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -L fast -j "$${MAKEJOBS}" --output-on-failure'
|
||||
|
||||
prb-ctest:
|
||||
<<: *snapshot-ctest
|
||||
|
@ -71,7 +74,7 @@ services:
|
|||
|
||||
snapshot-correctness: &snapshot-correctness
|
||||
<<: *build-setup
|
||||
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure
|
||||
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure
|
||||
|
||||
prb-correctness:
|
||||
<<: *snapshot-correctness
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
set(USE_GPERFTOOLS OFF CACHE BOOL "Use gperfools for profiling")
|
||||
set(PORTABLE_BINARY OFF CACHE BOOL "Create a binary that runs on older OS versions")
|
||||
set(USE_VALGRIND OFF CACHE BOOL "Compile for valgrind usage")
|
||||
set(USE_GOLD_LINKER OFF CACHE BOOL "Use gold linker")
|
||||
set(ALLOC_INSTRUMENTATION OFF CACHE BOOL "Instrument alloc")
|
||||
set(WITH_UNDODB OFF CACHE BOOL "Use rr or undodb")
|
||||
set(USE_ASAN OFF CACHE BOOL "Compile with address sanitizer")
|
||||
set(FDB_RELEASE OFF CACHE BOOL "This is a building of a final release")
|
||||
set(USE_LD "LD" CACHE STRING "The linker to use for building: can be LD (system default, default choice), GOLD, or LLD")
|
||||
|
||||
if(USE_GPERFTOOLS)
|
||||
find_package(Gperftools REQUIRED)
|
||||
|
@ -47,7 +48,6 @@ include(CheckFunctionExists)
|
|||
set(CMAKE_REQUIRED_INCLUDES stdlib.h malloc.h)
|
||||
set(CMAKE_REQUIRED_LIBRARIES c)
|
||||
|
||||
|
||||
if(WIN32)
|
||||
# see: https://docs.microsoft.com/en-us/windows/desktop/WinProg/using-the-windows-headers
|
||||
# this sets the windows target version to Windows 7
|
||||
|
@ -55,10 +55,6 @@ if(WIN32)
|
|||
add_compile_options(/W3 /EHsc /std:c++17 /bigobj $<$<CONFIG:Release>:/Zi> /MP)
|
||||
add_compile_definitions(_WIN32_WINNT=${WINDOWS_TARGET} BOOST_ALL_NO_LIB)
|
||||
else()
|
||||
if(USE_GOLD_LINKER)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
endif()
|
||||
|
||||
set(GCC NO)
|
||||
set(CLANG NO)
|
||||
|
@ -70,10 +66,29 @@ else()
|
|||
set(GCC YES)
|
||||
endif()
|
||||
|
||||
# check linker flags.
|
||||
if ((NOT (USE_LD STREQUAL "LD")) AND (NOT (USE_LD STREQUAL "GOLD")) AND (NOT (USE_LD STREQUAL "LLD")))
|
||||
message (FATAL_ERROR "USE_LD must be set to LD, GOLD, or LLD!")
|
||||
endif()
|
||||
|
||||
# if USE_LD=LD, then we don't do anything, defaulting to whatever system
|
||||
# linker is available (e.g. binutils doesn't normally exist on macOS, so this
|
||||
# implies the default xcode linker, and other distros may choose others by
|
||||
# default).
|
||||
|
||||
if(USE_LD STREQUAL "GOLD")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
endif()
|
||||
|
||||
if(USE_LD STREQUAL "LLD")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld -Wl,--disable-new-dtags")
|
||||
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld -Wl,--disable-new-dtags")
|
||||
endif()
|
||||
|
||||
# we always compile with debug symbols. CPack will strip them out
|
||||
# and create a debuginfo rpm
|
||||
add_compile_options(-ggdb)
|
||||
set(USE_ASAN OFF CACHE BOOL "Compile with address sanitizer")
|
||||
if(USE_ASAN)
|
||||
add_compile_options(
|
||||
-fno-omit-frame-pointer -fsanitize=address
|
||||
|
|
|
@ -70,7 +70,7 @@ buildsphinx:
|
|||
cd $(BUILDDIR); \
|
||||
curl -OL $(VENV_URL); \
|
||||
tar zxvf $(VENV_VERSION).tar.gz; \
|
||||
./$(VENV_VERSION)/virtualenv.py venv; \
|
||||
python2 ./$(VENV_VERSION)/virtualenv.py venv; \
|
||||
fi
|
||||
. $(VENVDIR)/bin/activate && \
|
||||
cp .pip.conf $(VENVDIR)/pip.conf && \
|
||||
|
|
|
@ -36,7 +36,7 @@ extensions = [
|
|||
]
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = [sys.prefix + '/_templates']
|
||||
templates_path = []
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
@ -143,7 +143,7 @@ html_title = 'FoundationDB ' + version
|
|||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = [sys.prefix + '/_static']
|
||||
html_static_path = []
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
|
|
|
@ -232,7 +232,7 @@ The procedures for adding and removing machines can be combined into a recipe fo
|
|||
Converting an existing cluster to use TLS
|
||||
=========================================
|
||||
|
||||
A FoundationDB cluster has the option of supporting :doc:`Transport Layer Security (TLS) <tls>`. To enable TLS on an existing, non-TLS cluster, see :ref:`Converting a running cluster <converting-existing-cluster>`.
|
||||
A FoundationDB cluster has the option of supporting :doc:`Transport Layer Security (TLS) <tls>`. To enable TLS on an existing, non-TLS cluster, see :ref:`Converting a running cluster <converting-existing-cluster-after-6.1>`.
|
||||
|
||||
.. _administration-monitoring-cluster-status:
|
||||
|
||||
|
|
|
@ -37,12 +37,22 @@
|
|||
.. |node-subspace| replace:: FIXME
|
||||
.. |content-subspace| replace:: FIXME
|
||||
.. |allow-manual-prefixes| replace:: FIXME
|
||||
.. |retry-limit-transaction-option| replace:: FIXME
|
||||
.. |timeout-transaction-option| replace:: FIXME
|
||||
.. |max-retry-delay-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-database-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-database-option| replace:: FIXME
|
||||
.. |retry-limit-database-option| replace:: FIXME
|
||||
.. |max-retry-delay-database-option| replace:: FIXME
|
||||
.. |timeout-database-option| replace:: FIXME
|
||||
|
||||
.. include:: api-common.rst.inc
|
||||
|
||||
.. |future-warning| replace:: :data:`future` must represent a result of the appropriate type (i.e. must have been returned by a function documented as returning this type), or the results are undefined.
|
||||
.. |future-warning| replace:: ``future`` must represent a result of the appropriate type (i.e. must have been returned by a function documented as returning this type), or the results are undefined.
|
||||
|
||||
.. |future-get-return1| replace:: Returns zero if :data:`future` is ready and not in an error state, and a non-zero :ref:`error code <developer-guide-error-codes>` otherwise
|
||||
.. |future-get-return1| replace:: Returns zero if ``future`` is ready and not in an error state, and a non-zero :ref:`error code <developer-guide-error-codes>` otherwise
|
||||
|
||||
.. |future-get-return2| replace:: (in which case the value of any out parameter is undefined)
|
||||
|
||||
|
@ -74,9 +84,9 @@
|
|||
|
||||
.. |snapshot| replace:: Non-zero if this is a :ref:`snapshot read <snapshots>`.
|
||||
|
||||
.. |sets-and-clears1| replace:: Modify the database snapshot represented by :data:`transaction`
|
||||
.. |sets-and-clears1| replace:: Modify the database snapshot represented by ``transaction``
|
||||
|
||||
.. |sets-and-clears2| replace:: The modification affects the actual database only if :data:`transaction` is later committed with :func:`fdb_transaction_commit()`.
|
||||
.. |sets-and-clears2| replace:: The modification affects the actual database only if ``transaction`` is later committed with :func:`fdb_transaction_commit()`.
|
||||
|
||||
=====
|
||||
C API
|
||||
|
@ -105,7 +115,7 @@ The FoundationDB C bindings are provided as a shared object which may be linked
|
|||
Linux
|
||||
-----
|
||||
|
||||
When linking against ``libfdb_c.so``, you must also link against ``libm``, ``libpthread`` and ``librt``. These dependencies will be resolved by the dynamic linker when using this API via :func:`dlopen()` or an FFI.
|
||||
When linking against ``libfdb_c.so``, you must also link against ``libm``, ``libpthread`` and ``librt``. These dependencies will be resolved by the dynamic linker when using this API via ``dlopen()`` or an FFI.
|
||||
|
||||
macOS
|
||||
--------
|
||||
|
@ -115,37 +125,37 @@ When linking against ``libfdb_c.dylib``, no additional libraries are required.
|
|||
API versioning
|
||||
==============
|
||||
|
||||
Prior to including ``fdb_c.h``, you must define the :macro:`FDB_API_VERSION` macro. This, together with the :func:`fdb_select_api_version()` function, allows programs written against an older version of the API to compile and run with newer versions of the C library. The current version of the FoundationDB C API is |api-version|. ::
|
||||
Prior to including ``fdb_c.h``, you must define the ``FDB_API_VERSION`` macro. This, together with the :func:`fdb_select_api_version()` function, allows programs written against an older version of the API to compile and run with newer versions of the C library. The current version of the FoundationDB C API is |api-version|. ::
|
||||
|
||||
#define FDB_API_VERSION 610
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
.. function:: fdb_error_t fdb_select_api_version(int version)
|
||||
|
||||
Must be called before any other API functions. :data:`version` must be less than or equal to :macro:`FDB_API_VERSION` (and should almost always be equal).
|
||||
Must be called before any other API functions. ``version`` must be less than or equal to ``FDB_API_VERSION`` (and should almost always be equal).
|
||||
|
||||
Language bindings implemented in C which themselves expose API versioning will usually pass the version requested by the application, instead of always passing :macro:`FDB_API_VERSION`.
|
||||
Language bindings implemented in C which themselves expose API versioning will usually pass the version requested by the application, instead of always passing ``FDB_API_VERSION``.
|
||||
|
||||
Passing a version less than :macro:`FDB_API_VERSION` will cause the API to behave as it did in the older version.
|
||||
Passing a version less than ``FDB_API_VERSION`` will cause the API to behave as it did in the older version.
|
||||
|
||||
It is an error to call this function after it has returned successfully. It is not thread safe, and if called from more than one thread simultaneously its behavior is undefined.
|
||||
|
||||
.. note:: This is actually implemented as a macro. If you are accessing this API via :func:`dlopen()` or an FFI, you will need to use :func:`fdb_select_api_version_impl()`.
|
||||
.. note:: This is actually implemented as a macro. If you are accessing this API via ``dlopen()`` or an FFI, you will need to use :func:`fdb_select_api_version_impl()`.
|
||||
|
||||
.. warning:: |api-version-multi-version-warning|
|
||||
|
||||
.. function:: fdb_error_t fdb_select_api_version_impl(int runtime_version, int header_version)
|
||||
|
||||
This is the actual entry point called by the :func:`fdb_select_api_version` macro. It should never be called directly from C, but if you are accessing this API via :func:`dlopen()` or an FFI, you will need to use it. ``fdb_select_api_version(v)`` is equivalent to ``fdb_select_api_version_impl(v, FDB_API_VERSION)``.
|
||||
This is the actual entry point called by the :func:`fdb_select_api_version` macro. It should never be called directly from C, but if you are accessing this API via ``dlopen()`` or an FFI, you will need to use it. ``fdb_select_api_version(v)`` is equivalent to ``fdb_select_api_version_impl(v, FDB_API_VERSION)``.
|
||||
|
||||
It is an error to call this function after it has returned successfully. It is not thread safe, and if called from more than one thread simultaneously its behavior is undefined.
|
||||
|
||||
:data:`runtime_version`
|
||||
The version of run-time behavior the API is requested to provide. Must be less than or equal to :data:`header_version`, and should almost always be equal.
|
||||
``runtime_version``
|
||||
The version of run-time behavior the API is requested to provide. Must be less than or equal to ``header_version``, and should almost always be equal.
|
||||
|
||||
Language bindings which themselves expose API versioning will usually pass the version requested by the application.
|
||||
|
||||
:data:`header_version`
|
||||
``header_version``
|
||||
The version of the ABI (application binary interface) that the calling code expects to find in the shared library. If you are using an FFI, this *must* correspond to the version of the API you are using as a reference (currently |api-version|). For example, the number of arguments that a function takes may be affected by this value, and an incorrect value is unlikely to yield success.
|
||||
|
||||
.. warning:: |api-version-multi-version-warning|
|
||||
|
@ -263,7 +273,7 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
.. type:: FDBCallback
|
||||
|
||||
A pointer to a function which takes :type:`FDBFuture*` and :type:`void*` and returns :type:`void`.
|
||||
A pointer to a function which takes :type:`FDBFuture*` and ``void*`` and returns ``void``.
|
||||
|
||||
.. function:: void fdb_future_release_memory(FDBFuture* future)
|
||||
|
||||
|
@ -279,13 +289,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
.. function:: fdb_error_t fdb_future_get_version(FDBFuture* future, int64_t* out_version)
|
||||
|
||||
Extracts a value of type version from an :type:`FDBFuture` into a caller-provided variable of type :type:`int64_t`. |future-warning|
|
||||
Extracts a version from an :type:`FDBFuture` into a caller-provided variable of type ``int64_t``. |future-warning|
|
||||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
.. function:: fdb_error_t fdb_future_get_key(FDBFuture* future, uint8_t const** out_key, int* out_key_length)
|
||||
|
||||
Extracts a value of type key from an :type:`FDBFuture` into caller-provided variables of type :type:`uint8_t*` (a pointer to the beginning of the key) and :type:`int` (the length of the key). |future-warning|
|
||||
Extracts a key from an :type:`FDBFuture` into caller-provided variables of type ``uint8_t*`` (a pointer to the beginning of the key) and ``int`` (the length of the key). |future-warning|
|
||||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
|
@ -297,13 +307,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
:data:`*out_present`
|
||||
``*out_present``
|
||||
Set to non-zero if (and only if) the requested value was present in the database. (If zero, the other outputs are meaningless.)
|
||||
|
||||
:data:`*out_value`
|
||||
``*out_value``
|
||||
Set to point to the first byte of the value.
|
||||
|
||||
:data:`*out_value_length`
|
||||
``*out_value_length``
|
||||
Set to the length of the value (in bytes).
|
||||
|
||||
|future-memory-mine|
|
||||
|
@ -314,10 +324,10 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
:data:`*out_strings`
|
||||
``*out_strings``
|
||||
Set to point to the first string in the array.
|
||||
|
||||
:data:`*out_count`
|
||||
``*out_count``
|
||||
Set to the number of strings in the array.
|
||||
|
||||
|future-memory-mine|
|
||||
|
@ -328,13 +338,13 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
|
||||
|future-get-return1| |future-get-return2|.
|
||||
|
||||
:data:`*out_kv`
|
||||
``*out_kv``
|
||||
Set to point to the first :type:`FDBKeyValue` object in the array.
|
||||
|
||||
:data:`*out_count`
|
||||
``*out_count``
|
||||
Set to the number of :type:`FDBKeyValue` objects in the array.
|
||||
|
||||
:data:`*out_more`
|
||||
``*out_more``
|
||||
Set to true if (but not necessarily only if) values remain in the *key* range requested (possibly beyond the limits requested).
|
||||
|
||||
|future-memory-mine|
|
||||
|
@ -350,17 +360,17 @@ See :ref:`developer-guide-programming-with-futures` for further (language-indepe
|
|||
int value_length;
|
||||
} FDBKeyValue;
|
||||
|
||||
:data:`key`
|
||||
``key``
|
||||
A pointer to a key.
|
||||
|
||||
:data:`key_length`
|
||||
The length of the key pointed to by :data:`key`.
|
||||
``key_length``
|
||||
The length of the key pointed to by ``key``.
|
||||
|
||||
:data:`value`
|
||||
``value``
|
||||
A pointer to a value.
|
||||
|
||||
:data:`value_length`
|
||||
The length of the value pointed to by :data:`value`.
|
||||
``value_length``
|
||||
The length of the value pointed to by ``value``.
|
||||
|
||||
Database
|
||||
========
|
||||
|
@ -375,10 +385,10 @@ An |database-blurb1| Modifications to a database are performed via transactions.
|
|||
|
||||
Creates a new database connected the specified cluster. The caller assumes ownership of the :type:`FDBDatabase` object and must destroy it with :func:`fdb_database_destroy()`.
|
||||
|
||||
:data:`cluster_file_path`
|
||||
``cluster_file_path``
|
||||
A NULL-terminated string giving a local path of a :ref:`cluster file <foundationdb-cluster-file>` (often called 'fdb.cluster') which contains connection information for the FoundationDB cluster. If cluster_file_path is NULL or an empty string, then a :ref:`default cluster file <default-cluster-file>` will be used.
|
||||
|
||||
:data:`*out_database`
|
||||
``*out_database``
|
||||
Set to point to the newly created :type:`FDBDatabase`.
|
||||
|
||||
.. function:: void fdb_database_destroy(FDBDatabase* database)
|
||||
|
@ -397,7 +407,7 @@ An |database-blurb1| Modifications to a database are performed via transactions.
|
|||
|
||||
Creates a new transaction on the given database. The caller assumes ownership of the :type:`FDBTransaction` object and must destroy it with :func:`fdb_transaction_destroy()`.
|
||||
|
||||
:data:`*out_transaction`
|
||||
``*out_transaction``
|
||||
Set to point to the newly created :type:`FDBTransaction`.
|
||||
|
||||
Transaction
|
||||
|
@ -439,75 +449,75 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. function:: FDBFuture* fdb_transaction_get(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, fdb_bool_t snapshot)
|
||||
|
||||
Reads a value from the database snapshot represented by :data:`transaction`.
|
||||
Reads a value from the database snapshot represented by ``transaction``.
|
||||
|
||||
|future-return0| the value of :data:`key_name` in the database. |future-return1| call :func:`fdb_future_get_value()` to extract the value, |future-return2|
|
||||
|future-return0| the value of ``key_name`` in the database. |future-return1| call :func:`fdb_future_get_value()` to extract the value, |future-return2|
|
||||
|
||||
See :func:`fdb_future_get_value()` to see exactly how results are unpacked. If :data:`key_name` is not present in the database, the result is not an error, but a zero for :data:`*out_present` returned from that function.
|
||||
See :func:`fdb_future_get_value()` to see exactly how results are unpacked. If ``key_name`` is not present in the database, the result is not an error, but a zero for ``*out_present`` returned from that function.
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key to be looked up in the database. |no-null|
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
:data:`snapshot`
|
||||
``snapshot``
|
||||
|snapshot|
|
||||
|
||||
.. function:: FDBFuture* fdb_transaction_get_key(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, fdb_bool_t or_equal, int offset, fdb_bool_t snapshot)
|
||||
|
||||
Resolves a :ref:`key selector <key-selectors>` against the keys in the database snapshot represented by :data:`transaction`.
|
||||
Resolves a :ref:`key selector <key-selectors>` against the keys in the database snapshot represented by ``transaction``.
|
||||
|
||||
|future-return0| the key in the database matching the :ref:`key selector <key-selectors>`. |future-return1| call :func:`fdb_future_get_key()` to extract the key, |future-return2|
|
||||
|
||||
:data:`key_name`, :data:`key_name_length`, :data:`or_equal`, :data:`offset`
|
||||
``key_name``, ``key_name_length``, ``or_equal``, ``offset``
|
||||
The four components of a :ref:`key selector <key-selectors>`.
|
||||
|
||||
:data:`snapshot`
|
||||
``snapshot``
|
||||
|snapshot|
|
||||
|
||||
.. function:: FDBFuture* fdb_transaction_get_addresses_for_key(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length)
|
||||
|
||||
Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing :data:`key_name` and its associated value.
|
||||
Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing ``key_name`` and its associated value.
|
||||
|
||||
|future-return0| an array of strings. |future-return1| call :func:`fdb_future_get_string_array()` to extract the string array, |future-return2|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key whose location is to be queried.
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
.. |range-limited-by| replace:: If this limit was reached before the end of the specified range, then the :data:`*more` return of :func:`fdb_future_get_keyvalue_array()` will be set to a non-zero value.
|
||||
.. |range-limited-by| replace:: If this limit was reached before the end of the specified range, then the ``*more`` return of :func:`fdb_future_get_keyvalue_array()` will be set to a non-zero value.
|
||||
|
||||
.. function:: FDBFuture* fdb_transaction_get_range(FDBTransaction* transaction, uint8_t const* begin_key_name, int begin_key_name_length, fdb_bool_t begin_or_equal, int begin_offset, uint8_t const* end_key_name, int end_key_name_length, fdb_bool_t end_or_equal, int end_offset, int limit, int target_bytes, FDBStreamingMode mode, int iteration, fdb_bool_t snapshot, fdb_bool_t reverse)
|
||||
|
||||
Reads all key-value pairs in the database snapshot represented by :data:`transaction` (potentially limited by :data:`limit`, :data:`target_bytes`, or :data:`mode`) which have a key lexicographically greater than or equal to the key resolved by the begin :ref:`key selector <key-selectors>` and lexicographically less than the key resolved by the end :ref:`key selector <key-selectors>`.
|
||||
Reads all key-value pairs in the database snapshot represented by ``transaction`` (potentially limited by :data:`limit`, :data:`target_bytes`, or :data:`mode`) which have a key lexicographically greater than or equal to the key resolved by the begin :ref:`key selector <key-selectors>` and lexicographically less than the key resolved by the end :ref:`key selector <key-selectors>`.
|
||||
|
||||
|future-return0| an :type:`FDBKeyValue` array. |future-return1| call :func:`fdb_future_get_keyvalue_array()` to extract the key-value array, |future-return2|
|
||||
|
||||
:data:`begin_key_name`, :data:`begin_key_name_length`, :data:`begin_or_equal`, :data:`begin_offset`
|
||||
``begin_key_name``, :data:`begin_key_name_length`, :data:`begin_or_equal`, :data:`begin_offset`
|
||||
The four components of a :ref:`key selector <key-selectors>` describing the beginning of the range.
|
||||
|
||||
:data:`end_key_name`, :data:`end_key_name_length`, :data:`end_or_equal`, :data:`end_offset`
|
||||
``end_key_name``, :data:`end_key_name_length`, :data:`end_or_equal`, :data:`end_offset`
|
||||
The four components of a :ref:`key selector <key-selectors>` describing the end of the range.
|
||||
|
||||
:data:`limit`
|
||||
``limit``
|
||||
If non-zero, indicates the maximum number of key-value pairs to return. |range-limited-by|
|
||||
|
||||
:data:`target_bytes`
|
||||
``target_bytes``
|
||||
If non-zero, indicates a (soft) cap on the combined number of bytes of keys and values to return. |range-limited-by|
|
||||
|
||||
:data:`mode`
|
||||
``mode``
|
||||
One of the :type:`FDBStreamingMode` values indicating how the caller would like the data in the range returned.
|
||||
|
||||
:data:`iteration`
|
||||
If :data:`mode` is :data:`FDB_STREAMING_MODE_ITERATOR`, this parameter should start at 1 and be incremented by 1 for each successive call while reading this range. In all other cases it is ignored.
|
||||
``iteration``
|
||||
If ``mode`` is :data:`FDB_STREAMING_MODE_ITERATOR`, this parameter should start at 1 and be incremented by 1 for each successive call while reading this range. In all other cases it is ignored.
|
||||
|
||||
:data:`snapshot`
|
||||
``snapshot``
|
||||
|snapshot|
|
||||
|
||||
:data:`reverse`
|
||||
``reverse``
|
||||
|
||||
If non-zero, key-value pairs will be returned in reverse lexicographical order beginning at the end of the range.
|
||||
|
||||
|
@ -515,31 +525,31 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
An enumeration of available streaming modes to be passed to :func:`fdb_transaction_get_range()`.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_ITERATOR`
|
||||
``FDB_STREAMING_MODE_ITERATOR``
|
||||
|
||||
The caller is implementing an iterator (most likely in a binding to a higher level language). The amount of data returned depends on the value of the :data:`iteration` parameter to :func:`fdb_transaction_get_range()`.
|
||||
The caller is implementing an iterator (most likely in a binding to a higher level language). The amount of data returned depends on the value of the ``iteration`` parameter to :func:`fdb_transaction_get_range()`.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_SMALL`
|
||||
``FDB_STREAMING_MODE_SMALL``
|
||||
|
||||
Data is returned in small batches (not much more expensive than reading individual key-value pairs).
|
||||
|
||||
:data:`FDB_STREAMING_MODE_MEDIUM`
|
||||
``FDB_STREAMING_MODE_MEDIUM``
|
||||
|
||||
Data is returned in batches between _SMALL and _LARGE.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_LARGE`
|
||||
``FDB_STREAMING_MODE_LARGE``
|
||||
|
||||
Data is returned in batches large enough to be, in a high-concurrency environment, nearly as efficient as possible. If the caller does not need the entire range, some disk and network bandwidth may be wasted. The batch size may be still be too small to allow a single client to get high throughput from the database.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_SERIAL`
|
||||
``FDB_STREAMING_MODE_SERIAL``
|
||||
|
||||
Data is returned in batches large enough that an individual client can get reasonable read bandwidth from the database. If the caller does not need the entire range, considerable disk and network bandwidth may be wasted.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_WANT_ALL`
|
||||
``FDB_STREAMING_MODE_WANT_ALL``
|
||||
|
||||
The caller intends to consume the entire range and would like it all transferred as early as possible.
|
||||
|
||||
:data:`FDB_STREAMING_MODE_EXACT`
|
||||
``FDB_STREAMING_MODE_EXACT``
|
||||
|
||||
The caller has passed a specific row limit and wants that many rows delivered in a single batch.
|
||||
|
||||
|
@ -549,17 +559,17 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|sets-and-clears2|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key to be inserted into the database. |no-null|
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
:data:`value`
|
||||
``value``
|
||||
A pointer to the value to be inserted into the database. |no-null|
|
||||
|
||||
:data:`value_length`
|
||||
|length-of| :data:`value`.
|
||||
``value_length``
|
||||
|length-of| ``value``.
|
||||
|
||||
.. function:: void fdb_transaction_clear(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length)
|
||||
|
||||
|
@ -567,11 +577,11 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|sets-and-clears2|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key to be removed from the database. |no-null|
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
.. function:: void fdb_transaction_clear_range(FDBTransaction* transaction, uint8_t const* begin_key_name, int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length)
|
||||
|
||||
|
@ -579,17 +589,17 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|sets-and-clears2|
|
||||
|
||||
:data:`begin_key_name`
|
||||
``begin_key_name``
|
||||
A pointer to the name of the key specifying the beginning of the range to clear. |no-null|
|
||||
|
||||
:data:`begin_key_name_length`
|
||||
|length-of| :data:`begin_key_name`.
|
||||
``begin_key_name_length``
|
||||
|length-of| ``begin_key_name``.
|
||||
|
||||
:data:`end_key_name`
|
||||
``end_key_name``
|
||||
A pointer to the name of the key specifying the end of the range to clear. |no-null|
|
||||
|
||||
:data:`end_key_name_length`
|
||||
|length-of| :data:`end_key_name_length`.
|
||||
``end_key_name_length``
|
||||
|length-of| ``end_key_name_length``.
|
||||
|
||||
.. function:: void fdb_transaction_atomic_op(FDBTransaction* transaction, uint8_t const* key_name, int key_name_length, uint8_t const* param, int param_length, FDBMutationType operationType)
|
||||
|
||||
|
@ -605,64 +615,64 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|sets-and-clears2|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key whose value is to be mutated.
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
:data:`param`
|
||||
A pointer to the parameter with which the atomic operation will mutate the value associated with :data:`key_name`.
|
||||
``param``
|
||||
A pointer to the parameter with which the atomic operation will mutate the value associated with ``key_name``.
|
||||
|
||||
:data:`param_length`
|
||||
|length-of| :data:`param`.
|
||||
``param_length``
|
||||
|length-of| ``param``.
|
||||
|
||||
:data:`operation_type`
|
||||
``operation_type``
|
||||
One of the :type:`FDBMutationType` values indicating which operation should be performed.
|
||||
|
||||
.. type:: FDBMutationType
|
||||
|
||||
An enumeration of available opcodes to be passed to :func:`fdb_transaction_atomic_op()`
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_ADD`
|
||||
``FDB_MUTATION_TYPE_ADD``
|
||||
|
||||
|atomic-add1|
|
||||
|
||||
|atomic-add2|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_AND`
|
||||
``FDB_MUTATION_TYPE_AND``
|
||||
|
||||
|atomic-and|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_OR`
|
||||
``FDB_MUTATION_TYPE_OR``
|
||||
|
||||
|atomic-or|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_XOR`
|
||||
``FDB_MUTATION_TYPE_XOR``
|
||||
|
||||
|atomic-xor|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_MAX`
|
||||
``FDB_MUTATION_TYPE_MAX``
|
||||
|
||||
|atomic-max1|
|
||||
|
||||
|atomic-max-min|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_BYTE_MAX`
|
||||
``FDB_MUTATION_TYPE_BYTE_MAX``
|
||||
|
||||
|atomic-byte-max|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_MIN`
|
||||
``FDB_MUTATION_TYPE_MIN``
|
||||
|
||||
|atomic-min1|
|
||||
|
||||
|atomic-max-min|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_BYTE_MIN`
|
||||
``FDB_MUTATION_TYPE_BYTE_MIN``
|
||||
|
||||
|atomic-byte-min|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_KEY`
|
||||
``FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_KEY``
|
||||
|
||||
|atomic-set-versionstamped-key-1|
|
||||
|
||||
|
@ -674,7 +684,7 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. warning :: |atomic-versionstamps-tuple-warning-key|
|
||||
|
||||
:data:`FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE`
|
||||
``FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE``
|
||||
|
||||
|atomic-set-versionstamped-value|
|
||||
|
||||
|
@ -686,7 +696,7 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. function:: FDBFuture* fdb_transaction_commit(FDBTransaction* transaction)
|
||||
|
||||
Attempts to commit the sets and clears previously applied to the database snapshot represented by :data:`transaction` to the actual database. The commit may or may not succeed -- in particular, if a conflicting transaction previously committed, then the commit must fail in order to preserve transactional isolation. If the commit does succeed, the transaction is durably committed to the database and all subsequently started transactions will observe its effects.
|
||||
Attempts to commit the sets and clears previously applied to the database snapshot represented by ``transaction`` to the actual database. The commit may or may not succeed -- in particular, if a conflicting transaction previously committed, then the commit must fail in order to preserve transactional isolation. If the commit does succeed, the transaction is durably committed to the database and all subsequently started transactions will observe its effects.
|
||||
|
||||
It is not necessary to commit a read-only transaction -- you can simply call :func:`fdb_transaction_destroy()`.
|
||||
|
||||
|
@ -700,7 +710,7 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. function:: fdb_error_t fdb_transaction_get_committed_version(FDBTransaction* transaction, int64_t* out_version)
|
||||
|
||||
Retrieves the database version number at which a given transaction was committed. :func:`fdb_transaction_commit()` must have been called on :data:`transaction` and the resulting future must be ready and not an error before this function is called, or the behavior is undefined. Read-only transactions do not modify the database when committed and will have a committed version of -1. Keep in mind that a transaction which reads keys and then sets them to their current values may be optimized to a read-only transaction.
|
||||
Retrieves the database version number at which a given transaction was committed. :func:`fdb_transaction_commit()` must have been called on ``transaction`` and the resulting future must be ready and not an error before this function is called, or the behavior is undefined. Read-only transactions do not modify the database when committed and will have a committed version of -1. Keep in mind that a transaction which reads keys and then sets them to their current values may be optimized to a read-only transaction.
|
||||
|
||||
Note that database versions are not necessarily unique to a given transaction and so cannot be used to determine in what order two transactions completed. The only use for this function is to manually enforce causal consistency when calling :func:`fdb_transaction_set_read_version()` on another subsequent transaction.
|
||||
|
||||
|
@ -726,11 +736,11 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
|transaction-watch-limit-blurb|
|
||||
|
||||
:data:`key_name`
|
||||
``key_name``
|
||||
A pointer to the name of the key to watch. |no-null|
|
||||
|
||||
:data:`key_name_length`
|
||||
|length-of| :data:`key_name`.
|
||||
``key_name_length``
|
||||
|length-of| ``key_name``.
|
||||
|
||||
|
||||
.. function:: FDBFuture* fdb_transaction_on_error(FDBTransaction* transaction, fdb_error_t error)
|
||||
|
@ -751,7 +761,7 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. function:: void fdb_transaction_reset(FDBTransaction* transaction)
|
||||
|
||||
Reset :data:`transaction` to its initial state. This is similar to calling :func:`fdb_transaction_destroy()` followed by :func:`fdb_database_create_transaction()`. It is not necessary to call :func:`fdb_transaction_reset()` when handling an error with :func:`fdb_transaction_on_error()` since the transaction has already been reset.
|
||||
Reset ``transaction`` to its initial state. This is similar to calling :func:`fdb_transaction_destroy()` followed by :func:`fdb_database_create_transaction()`. It is not necessary to call :func:`fdb_transaction_reset()` when handling an error with :func:`fdb_transaction_on_error()` since the transaction has already been reset.
|
||||
|
||||
.. function:: void fdb_transaction_cancel(FDBTransaction* transaction)
|
||||
|
||||
|
@ -769,30 +779,30 @@ Applications must provide error handling and an appropriate retry loop around th
|
|||
|
||||
.. note:: |conflict-range-note|
|
||||
|
||||
:data:`begin_key_name`
|
||||
``begin_key_name``
|
||||
A pointer to the name of the key specifying the beginning of the conflict range. |no-null|
|
||||
|
||||
:data:`begin_key_name_length`
|
||||
|length-of| :data:`begin_key_name`.
|
||||
``begin_key_name_length``
|
||||
|length-of| ``begin_key_name``.
|
||||
|
||||
:data:`end_key_name`
|
||||
``end_key_name``
|
||||
A pointer to the name of the key specifying the end of the conflict range. |no-null|
|
||||
|
||||
:data:`end_key_name_length`
|
||||
|length-of| :data:`end_key_name_length`.
|
||||
``end_key_name_length``
|
||||
|length-of| ``end_key_name_length``.
|
||||
|
||||
:data:`type`
|
||||
``type``
|
||||
One of the :type:`FDBConflictRangeType` values indicating what type of conflict range is being set.
|
||||
|
||||
.. type:: FDBConflictRangeType
|
||||
|
||||
An enumeration of available conflict range types to be passed to :func:`fdb_transaction_add_conflict_range()`.
|
||||
|
||||
:data:`FDB_CONFLICT_RANGE_TYPE_READ`
|
||||
``FDB_CONFLICT_RANGE_TYPE_READ``
|
||||
|
||||
|add-read-conflict-range-blurb|
|
||||
|
||||
:data:`FDB_CONFLICT_RANGE_TYPE_WRITE`
|
||||
``FDB_CONFLICT_RANGE_TYPE_WRITE``
|
||||
|
||||
|add-write-conflict-range-blurb|
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
.. |snapshot-ryw-disable-database-option| replace:: :meth:`Database.options.set_snapshot_ryw_disable`
|
||||
.. |future-type-string| replace:: a :class:`Future`
|
||||
.. |read-your-writes-disable-option| replace:: :meth:`Transaction.options.set_read_your_writes_disable`
|
||||
.. |read-your-writes-disable-option| replace:: :meth:`Transaction.options.set_read_your_writes_disable`
|
||||
.. |retry-limit-transaction-option| replace:: :meth:`Transaction.options.set_retry_limit`
|
||||
.. |timeout-transaction-option| replace:: :meth:`Transaction.options.set_timeout`
|
||||
.. |max-retry-delay-transaction-option| replace:: :meth:`Transaction.options.set_max_retry_delay`
|
||||
|
|
|
@ -238,7 +238,7 @@ The ``start`` subcommand is used to start a backup. If there is already a backu
|
|||
.. program:: fdbbackup modify
|
||||
|
||||
``modify``
|
||||
---------
|
||||
----------
|
||||
|
||||
The ``modify`` subcommand is used to modify parameters of a running backup. All specified changes are made in a single transaction.
|
||||
|
||||
|
|
|
@ -39,6 +39,16 @@
|
|||
.. |node-subspace| replace:: FIXME
|
||||
.. |content-subspace| replace:: FIXME
|
||||
.. |allow-manual-prefixes| replace:: FIXME
|
||||
.. |retry-limit-transaction-option| replace:: FIXME
|
||||
.. |timeout-transaction-option| replace:: FIXME
|
||||
.. |max-retry-delay-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-database-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-database-option| replace:: FIXME
|
||||
.. |retry-limit-database-option| replace:: FIXME
|
||||
.. |max-retry-delay-database-option| replace:: FIXME
|
||||
.. |timeout-database-option| replace:: FIXME
|
||||
|
||||
.. include:: api-common.rst.inc
|
||||
|
||||
|
|
|
@ -39,6 +39,16 @@
|
|||
.. |node-subspace| replace:: FIXME
|
||||
.. |content-subspace| replace:: FIXME
|
||||
.. |allow-manual-prefixes| replace:: FIXME
|
||||
.. |retry-limit-transaction-option| replace:: FIXME
|
||||
.. |timeout-transaction-option| replace:: FIXME
|
||||
.. |max-retry-delay-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-transaction-option| replace:: FIXME
|
||||
.. |snapshot-ryw-enable-database-option| replace:: FIXME
|
||||
.. |snapshot-ryw-disable-database-option| replace:: FIXME
|
||||
.. |retry-limit-database-option| replace:: FIXME
|
||||
.. |max-retry-delay-database-option| replace:: FIXME
|
||||
.. |timeout-database-option| replace:: FIXME
|
||||
|
||||
.. include:: api-common.rst.inc
|
||||
|
||||
|
|
|
@ -10,38 +10,38 @@ macOS
|
|||
|
||||
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
|
||||
|
||||
* `FoundationDB-6.1.6.pkg <https://www.foundationdb.org/downloads/6.1.6/macOS/installers/FoundationDB-6.1.6.pkg>`_
|
||||
* `FoundationDB-6.1.8.pkg <https://www.foundationdb.org/downloads/6.1.8/macOS/installers/FoundationDB-6.1.8.pkg>`_
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
|
||||
* `foundationdb-clients-6.1.6-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.6/ubuntu/installers/foundationdb-clients_6.1.6-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.1.6-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.6/ubuntu/installers/foundationdb-server_6.1.6-1_amd64.deb>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.1.8-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.8/ubuntu/installers/foundationdb-clients_6.1.7-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.1.8-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.8/ubuntu/installers/foundationdb-server_6.1.7-1_amd64.deb>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL6
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
|
||||
|
||||
* `foundationdb-clients-6.1.6-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel6/installers/foundationdb-clients-6.1.6-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.6-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel6/installers/foundationdb-server-6.1.6-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.1.8-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.8/rhel6/installers/foundationdb-clients-6.1.8-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.8-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.8/rhel6/installers/foundationdb-server-6.1.8-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL7
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
|
||||
|
||||
* `foundationdb-clients-6.1.6-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel7/installers/foundationdb-clients-6.1.6-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.6-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.6/rhel7/installers/foundationdb-server-6.1.6-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.1.8-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.8/rhel7/installers/foundationdb-clients-6.1.8-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.8-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.8/rhel7/installers/foundationdb-server-6.1.8-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
|
||||
|
||||
* `foundationdb-6.1.6-x64.msi <https://www.foundationdb.org/downloads/6.1.6/windows/installers/foundationdb-6.1.6-x64.msi>`_
|
||||
* `foundationdb-6.1.8-x64.msi <https://www.foundationdb.org/downloads/6.1.8/windows/installers/foundationdb-6.1.8-x64.msi>`_
|
||||
|
||||
API Language Bindings
|
||||
=====================
|
||||
|
@ -58,20 +58,20 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
|
|||
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
|
||||
|
||||
* `foundationdb-6.1.6.tar.gz <https://www.foundationdb.org/downloads/6.1.6/bindings/python/foundationdb-6.1.6.tar.gz>`_
|
||||
* `foundationdb-6.1.8.tar.gz <https://www.foundationdb.org/downloads/6.1.8/bindings/python/foundationdb-6.1.8.tar.gz>`_
|
||||
|
||||
Ruby 1.9.3/2.0.0+
|
||||
-----------------
|
||||
|
||||
* `fdb-6.1.6.gem <https://www.foundationdb.org/downloads/6.1.6/bindings/ruby/fdb-6.1.6.gem>`_
|
||||
* `fdb-6.1.8.gem <https://www.foundationdb.org/downloads/6.1.8/bindings/ruby/fdb-6.1.8.gem>`_
|
||||
|
||||
Java 8+
|
||||
-------
|
||||
|
||||
* `fdb-java-6.1.6.jar <https://www.foundationdb.org/downloads/6.1.6/bindings/java/fdb-java-6.1.6.jar>`_
|
||||
* `fdb-java-6.1.6-javadoc.jar <https://www.foundationdb.org/downloads/6.1.6/bindings/java/fdb-java-6.1.6-javadoc.jar>`_
|
||||
* `fdb-java-6.1.8.jar <https://www.foundationdb.org/downloads/6.1.8/bindings/java/fdb-java-6.1.8.jar>`_
|
||||
* `fdb-java-6.1.8-javadoc.jar <https://www.foundationdb.org/downloads/6.1.8/bindings/java/fdb-java-6.1.8-javadoc.jar>`_
|
||||
|
||||
Go 1.1+
|
||||
-------
|
||||
Go 1.11+
|
||||
--------
|
||||
|
||||
The FoundationDB Go package is available on `GitHub <https://github.com/apple/foundationdb/tree/master/bindings/go>`_.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
.. -*- mode: rst; -*-
|
||||
|
||||
.. |json-status-format| replace::
|
||||
.. code-block:: javascript
|
||||
|
||||
"cluster":{
|
||||
"layers":{
|
||||
"_valid":true,
|
||||
|
|
|
@ -42,12 +42,9 @@ JSON format
|
|||
|
||||
The following format informally describes the JSON containing the status data. The possible values of ``<name_string>`` and ``<description_string>`` are described in :ref:`mr-status-message`. The format is representative: *any field can be missing at any time*, depending on the database state. Clients should be prepared to flexibly handle format variations resulting from different database states.
|
||||
|
||||
.. code-block:: javascript
|
||||
.. include:: mr-status-json-schemas.rst.inc
|
||||
|
||||
.. node:: |json-status-format|
|
||||
|
||||
.. mr-status-message:
|
||||
.. _mr-status-message:
|
||||
|
||||
Message components
|
||||
------------------
|
||||
|
@ -96,7 +93,7 @@ cluster.processes.<process>.messages incorrect_cluster_file_contents Clus
|
|||
cluster.processes.<process>.messages io_error <error> occured in <subsystem>
|
||||
cluster.processes.<process>.messages platform_error <error> occured in <subsystem>
|
||||
cluster.processes.<process>.messages process_error <error> occured in <subsystem>
|
||||
==================================== =============================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
==================================== ==================================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
|
||||
The JSON path ``cluster.recovery_state``, when it exists, is an Object containing at least ``"name"`` and ``"description"``. The possible values for those fields are in the following table:
|
||||
|
||||
|
|
|
@ -144,9 +144,9 @@ Bindings
|
|||
* C API calls made on the network thread could be reordered with calls made from other threads. [6.0.2] `(Issue #518) <https://github.com/apple/foundationdb/issues/518>`_
|
||||
* The TLS_PLUGIN option is now a no-op and has been deprecated. [6.0.10] `(PR #710) <https://github.com/apple/foundationdb/pull/710>`_
|
||||
* Java: the `Versionstamp::getUserVersion() </javadoc/com/apple/foundationdb/tuple/Versionstamp.html#getUserVersion-->`_ method did not handle user versions greater than ``0x00FF`` due to operator precedence errors. [6.0.11] `(Issue #761) <https://github.com/apple/foundationdb/issues/761>`_
|
||||
* Python: bindings didn't work with Python 3.7 because of the new `async` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
|
||||
* Go: `PrefixRange` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
|
||||
* Go: Add Tuple layer support for `uint`, `uint64`, and `*big.Int` integers up to 255 bytes. Integer values will be decoded into the first of `int64`, `uint64`, or `*big.Int` in which they fit. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
|
||||
* Python: bindings didn't work with Python 3.7 because of the new ``async`` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
|
||||
* Go: ``PrefixRange`` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
|
||||
* Go: Add Tuple layer support for ``uint``, ``uint64``, and ``*big.Int`` integers up to 255 bytes. Integer values will be decoded into the first of ``int64``, ``uint64``, or ``*big.Int`` in which they fit. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
|
||||
* Ruby: Add Tuple layer support for integers up to 255 bytes. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
|
||||
* Python: bindings didn't work with Python 3.7 because of the new ``async`` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
|
||||
* Go: ``PrefixRange`` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.1.6
|
||||
6.1.8
|
||||
=====
|
||||
|
||||
Features
|
||||
|
@ -20,7 +20,7 @@ Features
|
|||
* Separated data distribution from the master into its own role. `(PR #1062) <https://github.com/apple/foundationdb/pull/1062>`_
|
||||
* Separated ratekeeper from the master into its own role. `(PR #1176) <https://github.com/apple/foundationdb/pull/1176>`_
|
||||
* Added a ``CompareAndClear`` atomic op that clears a key if its value matches the supplied value. `(PR #1105) <https://github.com/apple/foundationdb/pull/1105>`_
|
||||
* Added support for IPv6. `(PR #1176) <https://github.com/apple/foundationdb/pull/1178>`_
|
||||
* Added support for IPv6. `(PR #1178) <https://github.com/apple/foundationdb/pull/1178>`_
|
||||
* FDB can now simultaneously listen to TLS and unencrypted ports to facilitate smoother migration to and from TLS. `(PR #1157) <https://github.com/apple/foundationdb/pull/1157>`_
|
||||
* Added ``DISABLE_POSIX_KERNEL_AIO`` knob to fallback to libeio instead of kernel async I/O (KAIO) for systems that do not support KAIO or O_DIRECT flag. `(PR #1283) <https://github.com/apple/foundationdb/pull/1283>`_
|
||||
* Added support for configuring the cluster to use the primary and remote DC's as satellites. `(PR #1320) <https://github.com/apple/foundationdb/pull/1320>`_
|
||||
|
@ -50,6 +50,7 @@ Performance
|
|||
* Increase the rate that deleted pages are made available for reuse in the SQLite storage engine. Rename and add knobs to provide more control over this process. [6.1.3] `(PR #1485) <https://github.com/apple/foundationdb/pull/1485>`_
|
||||
* SQLite page files now grow and shrink in chunks based on a knob which defaults to an effective chunk size of 100MB. [6.1.4] `(PR #1482) <https://github.com/apple/foundationdb/pull/1482>`_ `(PR #1499) <https://github.com/apple/foundationdb/pull/1499>`_
|
||||
* Reduced the rate at which data is moved between servers, to reduce the impact a failure has on cluster performance. [6.1.4] `(PR #1499) <https://github.com/apple/foundationdb/pull/1499>`_
|
||||
* Avoid closing saturated network connections which have not received ping packets. [6.1.7] `(PR #1601) <https://github.com/apple/foundationdb/pull/1601>`_
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
@ -76,6 +77,7 @@ Fixes
|
|||
* The ``configure`` command in ``fdbcli`` returned successfully even when the configuration was not changed for some error types. [6.1.4] `(PR #1509) <https://github.com/apple/foundationdb/pull/1509>`_
|
||||
* Safety protections in the ``configure`` command in ``fdbcli`` would trigger spuriously when changing between ``three_datacenter`` replication and a region configuration. [6.1.4] `(PR #1509) <https://github.com/apple/foundationdb/pull/1509>`_
|
||||
* Status could report an incorrect reason for ongoing data movement. [6.1.5] `(PR #1544) <https://github.com/apple/foundationdb/pull/1544>`_
|
||||
* Storage servers were considered failed as soon as they were rebooted, instead of waiting to see if they rejoin the cluster. [6.1.8] `(PR #1618) <https://github.com/apple/foundationdb/pull/1618>`_
|
||||
|
||||
Status
|
||||
------
|
||||
|
@ -127,6 +129,8 @@ Fixes only impacting 6.1.0+
|
|||
* Memory tracking trace events could cause the program to crash when called from inside a trace event. [6.1.5] `(PR #1541) <https://github.com/apple/foundationdb/pull/1541>`_
|
||||
* TLogs will replace a large file with an empty file rather than doing a large truncate operation. [6.1.5] `(PR #1545) <https://github.com/apple/foundationdb/pull/1545>`_
|
||||
* Fix PR #1545 to work on Windows and Linux. [6.1.6] `(PR #1556) <https://github.com/apple/foundationdb/pull/1556>`_
|
||||
* Adding a read conflict range for the metadata version key no longer requires read access to the system keys. [6.1.6] `(PR #1556) <https://github.com/apple/foundationdb/pull/1556>`_
|
||||
* The TLog's disk queue files would grow indefinitely after a storage server was removed from the cluster. [6.1.8] `(PR #1617) <https://github.com/apple/foundationdb/pull/1617>`_
|
||||
|
||||
Earlier release notes
|
||||
---------------------
|
||||
|
@ -148,4 +152,4 @@ Earlier release notes
|
|||
* :doc:`Beta 2 (API Version 22) </old-release-notes/release-notes-022>`
|
||||
* :doc:`Beta 1 (API Version 21) </old-release-notes/release-notes-021>`
|
||||
* :doc:`Alpha 6 (API Version 16) </old-release-notes/release-notes-016>`
|
||||
* :doc:`Alpha 5 (API Version 14) </old-release-notes/release-notes-014>`
|
||||
* :doc:`Alpha 5 (API Version 14) </old-release-notes/release-notes-014>`
|
||||
|
|
|
@ -20,6 +20,9 @@ Status
|
|||
Bindings
|
||||
--------
|
||||
|
||||
* Go: The Go bindings now require Go version 1.11 or later.
|
||||
* Go: Fix issue with finalizers running too early that could lead to undefined behavior. `(PR #1451) <https://github.com/apple/foundationdb/pull/1451>`_.
|
||||
|
||||
Other Changes
|
||||
-------------
|
||||
|
||||
|
|
|
@ -1,6 +1,4 @@
|
|||
.. default-domain:: rb
|
||||
.. highlight:: ruby
|
||||
.. module:: FDB
|
||||
|
||||
################
|
||||
Time-Series Data
|
||||
|
@ -95,7 +93,7 @@ Ordering and Transactions
|
|||
|
||||
FoundationDB’s ability to let you structure your data in different ways, keep track of metrics, and search it with varying granularity is a direct result of two key features of our key-value store: global ordering and ACID transactions. And as you’ve seen from the code included above, the direct impact of these properties is simpler application code and overall faster development.
|
||||
|
||||
Global ordering makes a big difference if you’re attempting to process significant amounts of sequential information because the database can retrieve that information quickly and efficiently. So rather than having to package your data into a single database object or broadcast a request for many individual data elements that correspond to a given range of application data (e.g. time0, time1, time2, . . ., timen), a globally ordered storage system, like FoundationDB, can generate a single range request to the database for the matching data. And internally, FoundationDB can further optimize requests by knowing which data resides on which machines, so there’s no need to broadcast the data request to all machines in the cluster.
|
||||
Global ordering makes a big difference if you’re attempting to process significant amounts of sequential information because the database can retrieve that information quickly and efficiently. So rather than having to package your data into a single database object or broadcast a request for many individual data elements that correspond to a given range of application data (e.g. time0, time1, time2, . . ., timeN), a globally ordered storage system, like FoundationDB, can generate a single range request to the database for the matching data. And internally, FoundationDB can further optimize requests by knowing which data resides on which machines, so there’s no need to broadcast the data request to all machines in the cluster.
|
||||
|
||||
Global indexing also makes a huge difference in terms of application complexity and database efficiency. Many non-relational databases provide node-specific indexing and secondary indexing, but if you wanted global indexes, you would have to build those at the application level to ensure the index and related data get updated atomically.
|
||||
|
||||
|
|
|
@ -144,7 +144,7 @@ Parameters and client bindings
|
|||
------------------------------
|
||||
|
||||
Automatic TLS certificate refresh
|
||||
------------------------------
|
||||
---------------------------------
|
||||
|
||||
The TLS certificate will be automatically refreshed on a configurable cadence. The server will inspect the CA, certificate, and key files in the specified locations periodically, and will begin using the new versions if following criterion were met:
|
||||
|
||||
|
@ -351,4 +351,4 @@ A verification string of::
|
|||
Would pass, and:
|
||||
|
||||
* Require that the Subject has a Subject Alternative Name extension, which has one or more members of type DNS that begins with the value ``prod.``.
|
||||
* Require that the Subject has a Subject Alternative Name extension, which has one or more members of type DNS that ends with the value ``.com``.
|
||||
* Require that the Subject has a Subject Alternative Name extension, which has one or more members of type DNS that ends with the value ``.org``.
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -37,12 +37,12 @@
|
|||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -66,6 +66,7 @@
|
|||
</Lib>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
|
@ -82,6 +83,7 @@
|
|||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -107,6 +109,7 @@
|
|||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -39,14 +39,14 @@
|
|||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>false</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -73,6 +73,7 @@
|
|||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
|
@ -87,6 +88,7 @@
|
|||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -113,6 +115,7 @@
|
|||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
|
||||
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -60,7 +60,7 @@ public:
|
|||
|
||||
Database clone() const { return Database(new DatabaseContext( cluster, clientInfo, clientInfoMonitor, dbId, taskID, clientLocality, enableLocalityLoadBalance, lockAware, apiVersion )); }
|
||||
|
||||
pair<KeyRange,Reference<LocationInfo>> getCachedLocation( const KeyRef&, bool isBackward = false );
|
||||
std::pair<KeyRange,Reference<LocationInfo>> getCachedLocation( const KeyRef&, bool isBackward = false );
|
||||
bool getCachedLocations( const KeyRangeRef&, vector<std::pair<KeyRange,Reference<LocationInfo>>>&, int limit, bool reverse );
|
||||
Reference<LocationInfo> setCachedLocation( const KeyRangeRef&, const vector<struct StorageServerInterface>& );
|
||||
void invalidateCache( const KeyRef&, bool isBackward = false );
|
||||
|
|
|
@ -21,11 +21,14 @@
|
|||
#ifndef FDBCLIENT_FDBTYPES_H
|
||||
#define FDBCLIENT_FDBTYPES_H
|
||||
|
||||
#include <algorithm>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
|
||||
using std::vector;
|
||||
using std::pair;
|
||||
typedef int64_t Version;
|
||||
typedef uint64_t LogEpoch;
|
||||
typedef uint64_t Sequence;
|
||||
|
@ -761,7 +764,7 @@ static bool addressExcluded( std::set<AddressExclusion> const& exclusions, Netwo
|
|||
struct ClusterControllerPriorityInfo {
|
||||
enum DCFitness { FitnessPrimary, FitnessRemote, FitnessPreferred, FitnessUnknown, FitnessBad }; //cannot be larger than 7 because of leader election mask
|
||||
|
||||
static DCFitness calculateDCFitness(Optional<Key> const& dcId, vector<Optional<Key>> const& dcPriority) {
|
||||
static DCFitness calculateDCFitness(Optional<Key> const& dcId, std::vector<Optional<Key>> const& dcPriority) {
|
||||
if(!dcPriority.size()) {
|
||||
return FitnessUnknown;
|
||||
} else if(dcPriority.size() == 1) {
|
||||
|
|
|
@ -3922,6 +3922,8 @@ public:
|
|||
doc.setKey("Tag", tag.tagName);
|
||||
|
||||
if(uidAndAbortedFlag.present()) {
|
||||
doc.setKey("UID", uidAndAbortedFlag.get().first.toString());
|
||||
|
||||
state BackupConfig config(uidAndAbortedFlag.get().first);
|
||||
|
||||
state EBackupState backupState = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
|
|
|
@ -304,7 +304,7 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
}
|
||||
}
|
||||
|
||||
state Future<Void> tooLong = delay(4.5);
|
||||
state Future<Void> tooLong = delay(60);
|
||||
state Key versionKey = BinaryWriter::toValue(g_random->randomUniqueID(),Unversioned());
|
||||
state bool oldReplicationUsesDcId = false;
|
||||
loop {
|
||||
|
|
|
@ -23,6 +23,9 @@
|
|||
#define FDBCLIENT_MASTERPROXYINTERFACE_H
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/StorageServerInterface.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
|
@ -168,7 +171,7 @@ struct GetReadVersionRequest : TimedRequest {
|
|||
struct GetKeyServerLocationsReply {
|
||||
constexpr static FileIdentifier file_identifier = 10636023;
|
||||
Arena arena;
|
||||
vector<pair<KeyRangeRef, vector<StorageServerInterface>>> results;
|
||||
std::vector<std::pair<KeyRangeRef, vector<StorageServerInterface>>> results;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
@ -213,7 +216,7 @@ struct GetStorageServerRejoinInfoReply {
|
|||
Tag tag;
|
||||
Optional<Tag> newTag;
|
||||
bool newLocality;
|
||||
vector<pair<Version, Tag>> history;
|
||||
std::vector<std::pair<Version, Tag>> history;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
|
|
@ -62,6 +62,7 @@ extern const char* getHGVersion();
|
|||
using std::make_pair;
|
||||
using std::max;
|
||||
using std::min;
|
||||
using std::pair;
|
||||
|
||||
NetworkOptions networkOptions;
|
||||
Reference<TLSOptions> tlsOptions;
|
||||
|
|
|
@ -1344,7 +1344,7 @@ void ReadYourWritesTransaction::addReadConflictRange( KeyRangeRef const& keys )
|
|||
}
|
||||
|
||||
if (tr.apiVersionAtLeast(300)) {
|
||||
if (keys.begin > getMaxReadKey() || keys.end > getMaxReadKey()) {
|
||||
if ((keys.begin > getMaxReadKey() || keys.end > getMaxReadKey()) && (keys.begin != metadataVersionKey || keys.end != metadataVersionKeyEnd)) {
|
||||
throw key_outside_legal_range();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -587,6 +587,7 @@ const KeyRef maxUIDKey = LiteralStringRef("\xff\xff\xff\xff\xff\xff\xff\xff\xff\
|
|||
|
||||
const KeyRef databaseLockedKey = LiteralStringRef("\xff/dbLocked");
|
||||
const KeyRef metadataVersionKey = LiteralStringRef("\xff/metadataVersion");
|
||||
const KeyRef metadataVersionKeyEnd = LiteralStringRef("\xff/metadataVersion\x00");
|
||||
const KeyRef metadataVersionRequiredValue = LiteralStringRef("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00");
|
||||
const KeyRef mustContainSystemMutationsKey = LiteralStringRef("\xff/mustContainSystemMutations");
|
||||
|
||||
|
@ -624,3 +625,8 @@ std::pair<Key,Version> decodeHealthyZoneValue( ValueRef const& value) {
|
|||
reader >> version;
|
||||
return std::make_pair(zoneId, version);
|
||||
}
|
||||
|
||||
const KeyRangeRef testOnlyTxnStateStorePrefixRange(
|
||||
LiteralStringRef("\xff/TESTONLYtxnStateStore/"),
|
||||
LiteralStringRef("\xff/TESTONLYtxnStateStore0")
|
||||
);
|
||||
|
|
|
@ -266,6 +266,7 @@ extern const KeyRef maxUIDKey;
|
|||
|
||||
extern const KeyRef databaseLockedKey;
|
||||
extern const KeyRef metadataVersionKey;
|
||||
extern const KeyRef metadataVersionKeyEnd;
|
||||
extern const KeyRef metadataVersionRequiredValue;
|
||||
extern const KeyRef mustContainSystemMutationsKey;
|
||||
|
||||
|
@ -282,4 +283,8 @@ extern const KeyRef healthyZoneKey;
|
|||
const Value healthyZoneValue( StringRef const& zoneId, Version version );
|
||||
std::pair<Key,Version> decodeHealthyZoneValue( ValueRef const& );
|
||||
|
||||
// All mutations done to this range are blindly copied into txnStateStore.
|
||||
// Used to create artifically large txnStateStore instances in testing.
|
||||
extern const KeyRangeRef testOnlyTxnStateStorePrefixRange;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -483,7 +483,8 @@ public:
|
|||
return r->second;
|
||||
}
|
||||
|
||||
static const int overheadPerItem = 128*4;
|
||||
// For each item in the versioned map, 4 PTree nodes are potentially allocated:
|
||||
static const int overheadPerItem = NextFastAllocatedSize<sizeof(PTreeT)>::Result*4;
|
||||
struct iterator;
|
||||
|
||||
VersionedMap() : oldestVersion(0), latestVersion(0) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -149,12 +149,12 @@
|
|||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -174,6 +174,7 @@
|
|||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
|
@ -190,6 +191,7 @@
|
|||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -217,6 +219,7 @@
|
|||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|x64">
|
||||
<Configuration>Debug</Configuration>
|
||||
|
@ -19,14 +19,14 @@
|
|||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -48,6 +48,7 @@
|
|||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
@ -59,6 +60,7 @@
|
|||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
|
|
@ -33,10 +33,15 @@ EvictablePage::~EvictablePage() {
|
|||
else
|
||||
aligned_free(data);
|
||||
}
|
||||
if (index > -1) {
|
||||
pageCache->pages[index] = pageCache->pages.back();
|
||||
pageCache->pages[index]->index = index;
|
||||
pageCache->pages.pop_back();
|
||||
if (EvictablePageCache::RANDOM == pageCache->cacheEvictionType) {
|
||||
if (index > -1) {
|
||||
pageCache->pages[index] = pageCache->pages.back();
|
||||
pageCache->pages[index]->index = index;
|
||||
pageCache->pages.pop_back();
|
||||
}
|
||||
} else {
|
||||
// remove it from the LRU
|
||||
pageCache->lruPages.erase(EvictablePageCache::List::s_iterator_to(*this));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,6 +102,8 @@ Future<Void> AsyncFileCached::read_write_impl( AsyncFileCached* self, void* data
|
|||
if ( p == self->pages.end() ) {
|
||||
AFCPage* page = new AFCPage( self, pageOffset );
|
||||
p = self->pages.insert( std::make_pair(pageOffset, page) ).first;
|
||||
} else {
|
||||
self->pageCache->updateHit(p->second);
|
||||
}
|
||||
|
||||
int bytesInPage = std::min(self->pageCache->pageSize - offsetInPage, remaining);
|
||||
|
@ -133,6 +140,8 @@ Future<Void> AsyncFileCached::readZeroCopy( void** data, int* length, int64_t of
|
|||
if ( p == pages.end() ) {
|
||||
AFCPage* page = new AFCPage( this, offset );
|
||||
p = pages.insert( std::make_pair(offset, page) ).first;
|
||||
} else {
|
||||
p->second->pageCache->updateHit(p->second);
|
||||
}
|
||||
|
||||
*data = p->second->data;
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
#elif !defined(FLOW_ASYNCFILECACHED_ACTOR_H)
|
||||
#define FLOW_ASYNCFILECACHED_ACTOR_H
|
||||
|
||||
#include <boost/intrusive/list.hpp>
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
#include "flow/Knobs.h"
|
||||
|
@ -34,10 +36,12 @@
|
|||
#include "flow/network.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace bi = boost::intrusive;
|
||||
struct EvictablePage {
|
||||
void* data;
|
||||
int index;
|
||||
class Reference<struct EvictablePageCache> pageCache;
|
||||
bi::list_member_hook<> member_hook;
|
||||
|
||||
virtual bool evict() = 0; // true if page was evicted, false if it isn't immediately evictable (but will be evicted regardless if possible)
|
||||
|
||||
|
@ -46,30 +50,86 @@ struct EvictablePage {
|
|||
};
|
||||
|
||||
struct EvictablePageCache : ReferenceCounted<EvictablePageCache> {
|
||||
EvictablePageCache() : pageSize(0), maxPages(0) {}
|
||||
explicit EvictablePageCache(int pageSize, int64_t maxSize) : pageSize(pageSize), maxPages(maxSize / pageSize) {}
|
||||
using List = bi::list< EvictablePage, bi::member_hook< EvictablePage, bi::list_member_hook<>, &EvictablePage::member_hook>>;
|
||||
enum CacheEvictionType { RANDOM = 0, LRU = 1 };
|
||||
|
||||
static CacheEvictionType evictionPolicyStringToEnum(const std::string &policy) {
|
||||
std::string cep = policy;
|
||||
std::transform(cep.begin(), cep.end(), cep.begin(), ::tolower);
|
||||
if (cep != "random" && cep != "lru")
|
||||
throw invalid_cache_eviction_policy();
|
||||
|
||||
if (cep == "random")
|
||||
return RANDOM;
|
||||
return LRU;
|
||||
}
|
||||
|
||||
EvictablePageCache() : pageSize(0), maxPages(0), cacheEvictionType(RANDOM) {}
|
||||
|
||||
explicit EvictablePageCache(int pageSize, int64_t maxSize) : pageSize(pageSize), maxPages(maxSize / pageSize), cacheEvictionType(evictionPolicyStringToEnum(FLOW_KNOBS->CACHE_EVICTION_POLICY)) {
|
||||
cacheHits.init(LiteralStringRef("EvictablePageCache.CacheHits"));
|
||||
cacheMisses.init(LiteralStringRef("EvictablePageCache.CacheMisses"));
|
||||
cacheEvictions.init(LiteralStringRef("EvictablePageCache.CacheEvictions"));
|
||||
}
|
||||
|
||||
void allocate(EvictablePage* page) {
|
||||
try_evict();
|
||||
try_evict();
|
||||
page->data = pageSize == 4096 ? FastAllocator<4096>::allocate() : aligned_alloc(4096,pageSize);
|
||||
page->index = pages.size();
|
||||
pages.push_back(page);
|
||||
if (RANDOM == cacheEvictionType) {
|
||||
page->index = pages.size();
|
||||
pages.push_back(page);
|
||||
} else {
|
||||
lruPages.push_back(*page); // new page is considered the most recently used (placed at LRU tail)
|
||||
}
|
||||
++cacheMisses;
|
||||
}
|
||||
|
||||
void updateHit(EvictablePage* page) {
|
||||
if (RANDOM != cacheEvictionType) {
|
||||
// on a hit, update page's location in the LRU so that it's most recent (tail)
|
||||
lruPages.erase(List::s_iterator_to(*page));
|
||||
lruPages.push_back(*page);
|
||||
}
|
||||
++cacheHits;
|
||||
}
|
||||
|
||||
void try_evict() {
|
||||
if (pages.size() >= (uint64_t)maxPages && !pages.empty()) {
|
||||
for (int i = 0; i < FLOW_KNOBS->MAX_EVICT_ATTEMPTS; i++) { // If we don't manage to evict anything, just go ahead and exceed the cache limit
|
||||
int toEvict = g_random->randomInt(0, pages.size());
|
||||
if (pages[toEvict]->evict())
|
||||
break;
|
||||
if (RANDOM == cacheEvictionType) {
|
||||
if (pages.size() >= (uint64_t)maxPages && !pages.empty()) {
|
||||
for (int i = 0; i < FLOW_KNOBS->MAX_EVICT_ATTEMPTS; i++) { // If we don't manage to evict anything, just go ahead and exceed the cache limit
|
||||
int toEvict = g_random->randomInt(0, pages.size());
|
||||
if (pages[toEvict]->evict()) {
|
||||
++cacheEvictions;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// For now, LRU is the only other CACHE_EVICTION option
|
||||
if (lruPages.size() >= (uint64_t)maxPages) {
|
||||
int i = 0;
|
||||
// try the least recently used pages first (starting at head of the LRU list)
|
||||
for (List::iterator it = lruPages.begin();
|
||||
it != lruPages.end() && i < FLOW_KNOBS->MAX_EVICT_ATTEMPTS;
|
||||
++it, ++i) { // If we don't manage to evict anything, just go ahead and exceed the cache limit
|
||||
if (it->evict()) {
|
||||
++cacheEvictions;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<EvictablePage*> pages;
|
||||
List lruPages;
|
||||
int pageSize;
|
||||
int64_t maxPages;
|
||||
Int64MetricHandle cacheHits;
|
||||
Int64MetricHandle cacheMisses;
|
||||
Int64MetricHandle cacheEvictions;
|
||||
const CacheEvictionType cacheEvictionType;
|
||||
};
|
||||
|
||||
struct OpenFileInfo : NonCopyable {
|
||||
|
|
|
@ -294,16 +294,18 @@ struct Peer : NonCopyable {
|
|||
ReliablePacketList reliable;
|
||||
AsyncTrigger dataToSend; // Triggered when unsent.empty() becomes false
|
||||
Future<Void> connect;
|
||||
AsyncTrigger incompatibleDataRead;
|
||||
AsyncTrigger resetPing;
|
||||
bool compatible;
|
||||
bool outgoingConnectionIdle; // We don't actually have a connection open and aren't trying to open one because we don't have anything to send
|
||||
double lastConnectTime;
|
||||
double reconnectionDelay;
|
||||
int peerReferences;
|
||||
bool incompatibleProtocolVersionNewer;
|
||||
int64_t bytesReceived;
|
||||
|
||||
explicit Peer( TransportData* transport, NetworkAddress const& destination )
|
||||
: transport(transport), destination(destination), outgoingConnectionIdle(false), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true), incompatibleProtocolVersionNewer(false), peerReferences(-1)
|
||||
: transport(transport), destination(destination), outgoingConnectionIdle(false), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME),
|
||||
compatible(true), incompatibleProtocolVersionNewer(false), peerReferences(-1), bytesReceived(0)
|
||||
{
|
||||
connect = connectionKeeper(this);
|
||||
}
|
||||
|
@ -401,11 +403,23 @@ struct Peer : NonCopyable {
|
|||
|
||||
state ReplyPromise<Void> reply;
|
||||
FlowTransport::transport().sendUnreliable( SerializeSource<ReplyPromise<Void>>(reply), remotePing.getEndpoint() );
|
||||
|
||||
choose {
|
||||
when (wait( delay( FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT ) )) { TraceEvent("ConnectionTimeout").suppressFor(1.0).detail("WithAddr", peer->destination); throw connection_failed(); }
|
||||
when (wait( reply.getFuture() )) {}
|
||||
when (wait( peer->incompatibleDataRead.onTrigger())) {}
|
||||
state int64_t startingBytes = peer->bytesReceived;
|
||||
loop {
|
||||
choose {
|
||||
when (wait( delay( FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT ) )) {
|
||||
if(startingBytes == peer->bytesReceived) {
|
||||
TraceEvent("ConnectionTimeout").suppressFor(1.0).detail("WithAddr", peer->destination);
|
||||
throw connection_failed();
|
||||
}
|
||||
startingBytes = peer->bytesReceived;
|
||||
}
|
||||
when (wait( reply.getFuture() )) {
|
||||
break;
|
||||
}
|
||||
when (wait( peer->resetPing.onTrigger())) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -719,6 +733,9 @@ ACTOR static Future<Void> connectionReader(
|
|||
}
|
||||
|
||||
int readBytes = conn->read( unprocessed_end, buffer_end );
|
||||
if(peer) {
|
||||
peer->bytesReceived += readBytes;
|
||||
}
|
||||
if (!readBytes) break;
|
||||
state bool readWillBlock = readBytes != readAllBytes;
|
||||
unprocessed_end += readBytes;
|
||||
|
@ -818,7 +835,7 @@ ACTOR static Future<Void> connectionReader(
|
|||
}
|
||||
else if(!expectConnectPacket) {
|
||||
unprocessed_begin = unprocessed_end;
|
||||
peer->incompatibleDataRead.trigger();
|
||||
peer->resetPing.trigger();
|
||||
}
|
||||
|
||||
if (readWillBlock)
|
||||
|
@ -985,7 +1002,7 @@ void FlowTransport::removePeerReference( const Endpoint& endpoint, NetworkMessag
|
|||
.detail("Token", endpoint.token);
|
||||
}
|
||||
if(peer->peerReferences == 0 && peer->reliable.empty() && peer->unsent.empty()) {
|
||||
peer->incompatibleDataRead.trigger();
|
||||
peer->resetPing.trigger();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|X64">
|
||||
<Configuration>Debug</Configuration>
|
||||
|
@ -138,12 +138,12 @@
|
|||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -181,6 +181,7 @@
|
|||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -209,6 +210,7 @@
|
|||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -174,7 +174,7 @@ SimClogging g_clogging;
|
|||
|
||||
struct Sim2Conn : IConnection, ReferenceCounted<Sim2Conn> {
|
||||
Sim2Conn( ISimulator::ProcessInfo* process )
|
||||
: process(process), dbgid( g_random->randomUniqueID() ), opened(false), closedByCaller(false)
|
||||
: process(process), dbgid( g_random->randomUniqueID() ), opened(false), closedByCaller(false), stopReceive(Never())
|
||||
{
|
||||
pipes = sender(this) && receiver(this);
|
||||
}
|
||||
|
@ -209,6 +209,7 @@ struct Sim2Conn : IConnection, ReferenceCounted<Sim2Conn> {
|
|||
|
||||
void peerClosed() {
|
||||
leakedConnectionTracker = trackLeakedConnection(this);
|
||||
stopReceive = delay(1.0);
|
||||
}
|
||||
|
||||
// Reads as many bytes as possible from the read buffer into [begin,end) and returns the number of bytes read (might be 0)
|
||||
|
@ -285,6 +286,7 @@ private:
|
|||
Future<Void> leakedConnectionTracker;
|
||||
|
||||
Future<Void> pipes;
|
||||
Future<Void> stopReceive;
|
||||
|
||||
int availableSendBufferForPeer() const { return sendBufSize - (writtenBytes.get() - receivedBytes.get()); } // SOMEDAY: acknowledgedBytes instead of receivedBytes
|
||||
|
||||
|
@ -317,6 +319,9 @@ private:
|
|||
ASSERT( g_simulator.getCurrentProcess() == self->process );
|
||||
wait( delay( g_clogging.getRecvDelay( self->process->address, self->peerProcess->address ) ) );
|
||||
ASSERT( g_simulator.getCurrentProcess() == self->process );
|
||||
if(self->stopReceive.isReady()) {
|
||||
wait(Future<Void>(Never()));
|
||||
}
|
||||
self->receivedBytes.set( pos );
|
||||
wait( Future<Void>(Void()) ); // Prior notification can delete self and cancel this actor
|
||||
ASSERT( g_simulator.getCurrentProcess() == self->process );
|
||||
|
|
|
@ -179,7 +179,7 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
|||
}
|
||||
}
|
||||
} else if( m.param1 == databaseLockedKey || m.param1 == metadataVersionKey || m.param1 == mustContainSystemMutationsKey || m.param1.startsWith(applyMutationsBeginRange.begin) ||
|
||||
m.param1.startsWith(applyMutationsAddPrefixRange.begin) || m.param1.startsWith(applyMutationsRemovePrefixRange.begin) || m.param1.startsWith(tagLocalityListPrefix) || m.param1.startsWith(serverTagHistoryPrefix) ) {
|
||||
m.param1.startsWith(applyMutationsAddPrefixRange.begin) || m.param1.startsWith(applyMutationsRemovePrefixRange.begin) || m.param1.startsWith(tagLocalityListPrefix) || m.param1.startsWith(serverTagHistoryPrefix) || m.param1.startsWith(testOnlyTxnStateStorePrefixRange.begin) ) {
|
||||
if(!initialCommit) txnStateStore->set(KeyValueRef(m.param1, m.param2));
|
||||
}
|
||||
else if (m.param1.startsWith(applyMutationsEndRange.begin)) {
|
||||
|
@ -352,6 +352,9 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
|||
if (range.contains(mustContainSystemMutationsKey)) {
|
||||
if(!initialCommit) txnStateStore->clear(singleKeyRange(mustContainSystemMutationsKey));
|
||||
}
|
||||
if (range.intersects(testOnlyTxnStateStorePrefixRange)) {
|
||||
if(!initialCommit) txnStateStore->clear(range & testOnlyTxnStateStorePrefixRange);
|
||||
}
|
||||
if(range.intersects(applyMutationsEndRange)) {
|
||||
KeyRangeRef commonEndRange(range & applyMutationsEndRange);
|
||||
if(!initialCommit) txnStateStore->clear(commonEndRange);
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
#define CONFLICTSET_H
|
||||
#pragma once
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
|
||||
struct ConflictSet;
|
||||
|
@ -40,23 +43,23 @@ struct ConflictBatch {
|
|||
};
|
||||
|
||||
void addTransaction( const CommitTransactionRef& transaction );
|
||||
void detectConflicts(Version now, Version newOldestVersion, vector<int>& nonConflicting, vector<int>* tooOldTransactions = NULL);
|
||||
void GetTooOldTransactions(vector<int>& tooOldTransactions);
|
||||
void detectConflicts(Version now, Version newOldestVersion, std::vector<int>& nonConflicting, std::vector<int>* tooOldTransactions = NULL);
|
||||
void GetTooOldTransactions(std::vector<int>& tooOldTransactions);
|
||||
|
||||
private:
|
||||
ConflictSet* cs;
|
||||
Standalone< VectorRef< struct TransactionInfo* > > transactionInfo;
|
||||
vector<struct KeyInfo> points;
|
||||
std::vector<struct KeyInfo> points;
|
||||
int transactionCount;
|
||||
vector< pair<StringRef,StringRef> > combinedWriteConflictRanges;
|
||||
vector< struct ReadConflictRange > combinedReadConflictRanges;
|
||||
std::vector< std::pair<StringRef,StringRef> > combinedWriteConflictRanges;
|
||||
std::vector< struct ReadConflictRange > combinedReadConflictRanges;
|
||||
bool* transactionConflictStatus;
|
||||
|
||||
void checkIntraBatchConflicts();
|
||||
void combineWriteConflictRanges();
|
||||
void checkReadConflictRanges();
|
||||
void mergeWriteConflictRanges(Version now);
|
||||
void addConflictRanges(Version now, vector< pair<StringRef,StringRef> >::iterator begin, vector< pair<StringRef,StringRef> >::iterator end, class SkipList* part);
|
||||
void addConflictRanges(Version now, std::vector< std::pair<StringRef,StringRef> >::iterator begin, std::vector< std::pair<StringRef,StringRef> >::iterator end, class SkipList* part);
|
||||
};
|
||||
|
||||
#endif
|
|
@ -2730,9 +2730,11 @@ ACTOR Future<Void> waitHealthyZoneChange( DDTeamCollection* self ) {
|
|||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
Optional<Value> val = wait(tr.get(healthyZoneKey));
|
||||
state Future<Void> healthyZoneTimeout = Never();
|
||||
if(val.present()) {
|
||||
auto p = decodeHealthyZoneValue(val.get());
|
||||
if(p.second > tr.getReadVersion().get()) {
|
||||
healthyZoneTimeout = delay((p.second - tr.getReadVersion().get())/(double)SERVER_KNOBS->VERSIONS_PER_SECOND);
|
||||
self->healthyZone.set(p.first);
|
||||
} else {
|
||||
self->healthyZone.set(Optional<Key>());
|
||||
|
@ -2740,9 +2742,10 @@ ACTOR Future<Void> waitHealthyZoneChange( DDTeamCollection* self ) {
|
|||
} else {
|
||||
self->healthyZone.set(Optional<Key>());
|
||||
}
|
||||
|
||||
state Future<Void> watchFuture = tr.watch(healthyZoneKey);
|
||||
wait(tr.commit());
|
||||
wait(watchFuture);
|
||||
wait(watchFuture || healthyZoneTimeout);
|
||||
tr.reset();
|
||||
} catch(Error& e) {
|
||||
wait( tr.onError(e) );
|
||||
|
@ -2822,24 +2825,15 @@ ACTOR Future<Void> storageServerFailureTracker(
|
|||
if( status->isFailed )
|
||||
self->restartRecruiting.trigger();
|
||||
|
||||
state double startTime = now();
|
||||
Future<Void> healthChanged = Never();
|
||||
if(status->isFailed) {
|
||||
ASSERT(!inHealthyZone);
|
||||
healthChanged = IFailureMonitor::failureMonitor().onStateEqual( interf.waitFailure.getEndpoint(), FailureStatus(false));
|
||||
} else if(!inHealthyZone) {
|
||||
healthChanged = waitFailureClient(interf.waitFailure, SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME, 0, TaskDataDistribution);
|
||||
healthChanged = waitFailureClientStrict(interf.waitFailure, SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME, TaskDataDistribution);
|
||||
}
|
||||
choose {
|
||||
when ( wait(healthChanged) ) {
|
||||
double elapsed = now() - startTime;
|
||||
if(!status->isFailed && elapsed < SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME) {
|
||||
wait(delay(SERVER_KNOBS->DATA_DISTRIBUTION_FAILURE_REACTION_TIME - elapsed));
|
||||
if(!IFailureMonitor::failureMonitor().getState( interf.waitFailure.getEndpoint() ).isFailed()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
status->isFailed = !status->isFailed;
|
||||
if(!status->isFailed && !server->teams.size()) {
|
||||
self->doBuildTeams = true;
|
||||
|
|
|
@ -204,7 +204,7 @@ public:
|
|||
return tag.id % logServers.size();
|
||||
}
|
||||
|
||||
void updateLocalitySet( vector<LocalityData> const& localities ) {
|
||||
void updateLocalitySet( std::vector<LocalityData> const& localities ) {
|
||||
LocalityMap<int>* logServerMap;
|
||||
|
||||
logServerSet = Reference<LocalitySet>(new LocalityMap<int>());
|
||||
|
@ -418,7 +418,7 @@ struct ILogSystem {
|
|||
|
||||
struct MergedPeekCursor : IPeekCursor, ReferenceCounted<MergedPeekCursor> {
|
||||
Reference<LogSet> logSet;
|
||||
vector< Reference<IPeekCursor> > serverCursors;
|
||||
std::vector< Reference<IPeekCursor> > serverCursors;
|
||||
std::vector<LocalityEntry> locations;
|
||||
std::vector< std::pair<LogMessageVersion, int> > sortedVersions;
|
||||
Tag tag;
|
||||
|
@ -429,9 +429,9 @@ struct ILogSystem {
|
|||
UID randomID;
|
||||
int tLogReplicationFactor;
|
||||
|
||||
MergedPeekCursor( vector< Reference<ILogSystem::IPeekCursor> > const& serverCursors, Version begin );
|
||||
MergedPeekCursor( std::vector< Reference<ILogSystem::IPeekCursor> > const& serverCursors, Version begin );
|
||||
MergedPeekCursor( std::vector<Reference<AsyncVar<OptionalInterface<TLogInterface>>>> const& logServers, int bestServer, int readQuorum, Tag tag, Version begin, Version end, bool parallelGetMore, std::vector<LocalityData> const& tLogLocalities, Reference<IReplicationPolicy> const tLogPolicy, int tLogReplicationFactor );
|
||||
MergedPeekCursor( vector< Reference<IPeekCursor> > const& serverCursors, LogMessageVersion const& messageVersion, int bestServer, int readQuorum, Optional<LogMessageVersion> nextVersion, Reference<LogSet> logSet, int tLogReplicationFactor );
|
||||
MergedPeekCursor( std::vector< Reference<IPeekCursor> > const& serverCursors, LogMessageVersion const& messageVersion, int bestServer, int readQuorum, Optional<LogMessageVersion> nextVersion, Reference<LogSet> logSet, int tLogReplicationFactor );
|
||||
|
||||
virtual Reference<IPeekCursor> cloneNoMore();
|
||||
virtual void setProtocolVersion( uint64_t version );
|
||||
|
@ -636,7 +636,7 @@ struct ILogSystem {
|
|||
virtual Reference<IPeekCursor> peek( UID dbgid, Version begin, Optional<Version> end, std::vector<Tag> tags, bool parallelGetMore = false ) = 0;
|
||||
// Same contract as peek(), but for a set of tags
|
||||
|
||||
virtual Reference<IPeekCursor> peekSingle( UID dbgid, Version begin, Tag tag, vector<pair<Version,Tag>> history = vector<pair<Version,Tag>>() ) = 0;
|
||||
virtual Reference<IPeekCursor> peekSingle( UID dbgid, Version begin, Tag tag, std::vector<std::pair<Version,Tag>> history = std::vector<std::pair<Version,Tag>>() ) = 0;
|
||||
// Same contract as peek(), but blocks until the preferred log server(s) for the given tag are available (and is correspondingly less expensive)
|
||||
|
||||
virtual Reference<IPeekCursor> peekLogRouter( UID dbgid, Version begin, Tag tag ) = 0;
|
||||
|
@ -690,7 +690,7 @@ struct ILogSystem {
|
|||
virtual Future<Void> onLogSystemConfigChange() = 0;
|
||||
// Returns when the log system configuration has changed due to a tlog rejoin.
|
||||
|
||||
virtual void getPushLocations( std::vector<Tag> const& tags, vector<int>& locations ) = 0;
|
||||
virtual void getPushLocations( std::vector<Tag> const& tags, std::vector<int>& locations ) = 0;
|
||||
|
||||
virtual bool hasRemoteLogs() = 0;
|
||||
|
||||
|
@ -807,10 +807,10 @@ struct LogPushData : NonCopyable {
|
|||
|
||||
private:
|
||||
Reference<ILogSystem> logSystem;
|
||||
vector<Tag> next_message_tags;
|
||||
vector<Tag> prev_tags;
|
||||
vector<BinaryWriter> messagesWriter;
|
||||
vector<int> msg_locations;
|
||||
std::vector<Tag> next_message_tags;
|
||||
std::vector<Tag> prev_tags;
|
||||
std::vector<BinaryWriter> messagesWriter;
|
||||
std::vector<int> msg_locations;
|
||||
uint32_t subsequence;
|
||||
};
|
||||
|
||||
|
|
|
@ -436,6 +436,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
specialCounter(cc, "PersistentDataVersion", [this](){ return this->persistentDataVersion; });
|
||||
specialCounter(cc, "PersistentDataDurableVersion", [this](){ return this->persistentDataDurableVersion; });
|
||||
specialCounter(cc, "KnownCommittedVersion", [this](){ return this->knownCommittedVersion; });
|
||||
specialCounter(cc, "QueuePoppedVersion", [this](){ return this->persistentDataDurableVersion; });
|
||||
specialCounter(cc, "SharedBytesInput", [tLogData](){ return tLogData->bytesInput; });
|
||||
specialCounter(cc, "SharedBytesDurable", [tLogData](){ return tLogData->bytesDurable; });
|
||||
specialCounter(cc, "SharedOverheadBytesInput", [tLogData](){ return tLogData->overheadBytesInput; });
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "fdbserver/Status.h"
|
||||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
ACTOR Future<vector<WorkerDetails>> getWorkers( Reference<AsyncVar<ServerDBInfo>> dbInfo, int flags = 0 ) {
|
||||
|
@ -96,8 +97,7 @@ ACTOR Future<int64_t> getDataInFlight( Database cx, WorkerInterface distributorW
|
|||
TraceEvent("DataInFlight").detail("Stage", "ContactingDataDistributor");
|
||||
TraceEventFields md = wait( timeoutError(distributorWorker.eventLogRequest.getReply(
|
||||
EventLogRequest( LiteralStringRef("TotalDataInFlight") ) ), 1.0 ) );
|
||||
int64_t dataInFlight;
|
||||
sscanf(md.getValue("TotalBytes").c_str(), "%" SCNd64, &dataInFlight);
|
||||
int64_t dataInFlight = boost::lexical_cast<int64_t>(md.getValue("TotalBytes"));
|
||||
return dataInFlight;
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("QuietDatabaseFailure", distributorWorker.id()).error(e).detail("Reason", "Failed to extract DataInFlight");
|
||||
|
@ -125,8 +125,16 @@ int64_t getQueueSize( const TraceEventFields& md ) {
|
|||
return inputBytes - durableBytes;
|
||||
}
|
||||
|
||||
//Computes the popped version lag for tlogs
|
||||
int64_t getPoppedVersionLag( const TraceEventFields& md ) {
|
||||
int64_t persistentDataDurableVersion = boost::lexical_cast<int64_t>(md.getValue("PersistentDataDurableVersion"));
|
||||
int64_t queuePoppedVersion = boost::lexical_cast<int64_t>(md.getValue("QueuePoppedVersion"));
|
||||
|
||||
return persistentDataDurableVersion - queuePoppedVersion;
|
||||
}
|
||||
|
||||
// This is not robust in the face of a TLog failure
|
||||
ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
ACTOR Future<std::pair<int64_t,int64_t>> getTLogQueueInfo( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
TraceEvent("MaxTLogQueueSize").detail("Stage", "ContactingLogs");
|
||||
|
||||
state std::vector<WorkerDetails> workers = wait(getWorkers(dbInfo));
|
||||
|
@ -151,17 +159,19 @@ ACTOR Future<int64_t> getMaxTLogQueueSize( Database cx, Reference<AsyncVar<Serve
|
|||
TraceEvent("MaxTLogQueueSize").detail("Stage", "ComputingMax").detail("MessageCount", messages.size());
|
||||
|
||||
state int64_t maxQueueSize = 0;
|
||||
state int64_t maxPoppedVersionLag = 0;
|
||||
state int i = 0;
|
||||
for(; i < messages.size(); i++) {
|
||||
try {
|
||||
maxQueueSize = std::max( maxQueueSize, getQueueSize( messages[i].get() ) );
|
||||
maxPoppedVersionLag = std::max( maxPoppedVersionLag, getPoppedVersionLag( messages[i].get() ) );
|
||||
} catch( Error &e ) {
|
||||
TraceEvent("QuietDatabaseFailure").detail("Reason", "Failed to extract MaxTLogQueue").detail("Tlog", tlogs[i].id());
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
return maxQueueSize;
|
||||
return std::make_pair( maxQueueSize, maxPoppedVersionLag );
|
||||
}
|
||||
|
||||
ACTOR Future<vector<StorageServerInterface>> getStorageServers( Database cx, bool use_system_priority = false) {
|
||||
|
@ -239,12 +249,10 @@ ACTOR Future<int64_t> getDataDistributionQueueSize( Database cx, WorkerInterface
|
|||
|
||||
TraceEvent("DataDistributionQueueSize").detail("Stage", "GotString");
|
||||
|
||||
int64_t inQueue;
|
||||
sscanf(movingDataMessage.getValue("InQueue").c_str(), "%" SCNd64, &inQueue);
|
||||
int64_t inQueue = boost::lexical_cast<int64_t>(movingDataMessage.getValue("InQueue"));
|
||||
|
||||
if(reportInFlight) {
|
||||
int64_t inFlight;
|
||||
sscanf(movingDataMessage.getValue("InFlight").c_str(), "%" SCNd64, &inFlight);
|
||||
int64_t inFlight = boost::lexical_cast<int64_t>(movingDataMessage.getValue("InFlight"));
|
||||
inQueue += inFlight;
|
||||
}
|
||||
|
||||
|
@ -275,23 +283,13 @@ ACTOR Future<bool> getTeamCollectionValid(Database cx, WorkerInterface dataDistr
|
|||
|
||||
TraceEvent("GetTeamCollectionValid").detail("Stage", "GotString");
|
||||
|
||||
int64_t currentTeamNumber;
|
||||
int64_t desiredTeamNumber;
|
||||
int64_t maxTeamNumber;
|
||||
int64_t currentMachineTeamNumber;
|
||||
int64_t healthyMachineTeamCount;
|
||||
int64_t desiredMachineTeamNumber;
|
||||
int64_t maxMachineTeamNumber;
|
||||
sscanf(teamCollectionInfoMessage.getValue("CurrentTeamNumber").c_str(), "%" SCNd64, ¤tTeamNumber);
|
||||
sscanf(teamCollectionInfoMessage.getValue("DesiredTeamNumber").c_str(), "%" SCNd64, &desiredTeamNumber);
|
||||
sscanf(teamCollectionInfoMessage.getValue("MaxTeamNumber").c_str(), "%" SCNd64, &maxTeamNumber);
|
||||
sscanf(teamCollectionInfoMessage.getValue("CurrentMachineTeamNumber").c_str(), "%" SCNd64,
|
||||
¤tMachineTeamNumber);
|
||||
sscanf(teamCollectionInfoMessage.getValue("CurrentHealthyMachineTeamNumber").c_str(), "%" SCNd64,
|
||||
&healthyMachineTeamCount);
|
||||
sscanf(teamCollectionInfoMessage.getValue("DesiredMachineTeams").c_str(), "%" SCNd64,
|
||||
&desiredMachineTeamNumber);
|
||||
sscanf(teamCollectionInfoMessage.getValue("MaxMachineTeams").c_str(), "%" SCNd64, &maxMachineTeamNumber);
|
||||
int64_t currentTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentTeamNumber"));
|
||||
int64_t desiredTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("DesiredTeamNumber"));
|
||||
int64_t maxTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MaxTeamNumber"));
|
||||
int64_t currentMachineTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentMachineTeamNumber"));
|
||||
int64_t healthyMachineTeamCount = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("CurrentHealthyMachineTeamNumber"));
|
||||
int64_t desiredMachineTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("DesiredMachineTeams"));
|
||||
int64_t maxMachineTeamNumber = boost::lexical_cast<int64_t>(teamCollectionInfoMessage.getValue("MaxMachineTeams"));
|
||||
|
||||
// Team number is always valid when we disable teamRemover. This avoids false positive in simulation test
|
||||
if (SERVER_KNOBS->TR_FLAG_DISABLE_TEAM_REMOVER) {
|
||||
|
@ -398,7 +396,7 @@ ACTOR Future<Void> reconfigureAfter(Database cx, double time, Reference<AsyncVar
|
|||
}
|
||||
|
||||
ACTOR Future<Void> waitForQuietDatabase( Database cx, Reference<AsyncVar<ServerDBInfo>> dbInfo, std::string phase, int64_t dataInFlightGate = 2e6,
|
||||
int64_t maxTLogQueueGate = 5e6, int64_t maxStorageServerQueueGate = 5e6, int64_t maxDataDistributionQueueSize = 0 ) {
|
||||
int64_t maxTLogQueueGate = 5e6, int64_t maxStorageServerQueueGate = 5e6, int64_t maxDataDistributionQueueSize = 0, int64_t maxPoppedVersionLag = 30e6 ) {
|
||||
state Future<Void> reconfig = reconfigureAfter(cx, 100 + (g_random->random01()*100), dbInfo, "QuietDatabase");
|
||||
|
||||
TraceEvent(("QuietDatabase" + phase + "Begin").c_str());
|
||||
|
@ -418,26 +416,28 @@ ACTOR Future<Void> waitForQuietDatabase( Database cx, Reference<AsyncVar<ServerD
|
|||
TraceEvent("QuietDatabaseGotDataDistributor", distributorUID).detail("Locality", distributorWorker.locality.toString());
|
||||
|
||||
state Future<int64_t> dataInFlight = getDataInFlight( cx, distributorWorker);
|
||||
state Future<int64_t> tLogQueueSize = getMaxTLogQueueSize( cx, dbInfo );
|
||||
state Future<std::pair<int64_t,int64_t>> tLogQueueInfo = getTLogQueueInfo( cx, dbInfo );
|
||||
state Future<int64_t> dataDistributionQueueSize = getDataDistributionQueueSize( cx, distributorWorker, dataInFlightGate == 0);
|
||||
state Future<bool> teamCollectionValid = getTeamCollectionValid(cx, distributorWorker);
|
||||
state Future<int64_t> storageQueueSize = getMaxStorageServerQueueSize( cx, dbInfo );
|
||||
state Future<bool> dataDistributionActive = getDataDistributionActive( cx, distributorWorker );
|
||||
state Future<bool> storageServersRecruiting = getStorageServersRecruiting ( cx, distributorWorker, distributorUID );
|
||||
|
||||
wait(success(dataInFlight) && success(tLogQueueSize) && success(dataDistributionQueueSize) &&
|
||||
wait(success(dataInFlight) && success(tLogQueueInfo) && success(dataDistributionQueueSize) &&
|
||||
success(teamCollectionValid) && success(storageQueueSize) && success(dataDistributionActive) &&
|
||||
success(storageServersRecruiting));
|
||||
TraceEvent(("QuietDatabase" + phase).c_str())
|
||||
.detail("DataInFlight", dataInFlight.get())
|
||||
.detail("MaxTLogQueueSize", tLogQueueSize.get())
|
||||
.detail("DataDistributionQueueSize", dataDistributionQueueSize.get())
|
||||
.detail("TeamCollectionValid", teamCollectionValid.get())
|
||||
.detail("MaxStorageQueueSize", storageQueueSize.get())
|
||||
.detail("DataDistributionActive", dataDistributionActive.get())
|
||||
.detail("StorageServersRecruiting", storageServersRecruiting.get());
|
||||
|
||||
if (dataInFlight.get() > dataInFlightGate || tLogQueueSize.get() > maxTLogQueueGate ||
|
||||
TraceEvent(("QuietDatabase" + phase).c_str())
|
||||
.detail("DataInFlight", dataInFlight.get())
|
||||
.detail("MaxTLogQueueSize", tLogQueueInfo.get().first)
|
||||
.detail("MaxTLogPoppedVersionLag", tLogQueueInfo.get().second)
|
||||
.detail("DataDistributionQueueSize", dataDistributionQueueSize.get())
|
||||
.detail("TeamCollectionValid", teamCollectionValid.get())
|
||||
.detail("MaxStorageQueueSize", storageQueueSize.get())
|
||||
.detail("DataDistributionActive", dataDistributionActive.get())
|
||||
.detail("StorageServersRecruiting", storageServersRecruiting.get());
|
||||
|
||||
if (dataInFlight.get() > dataInFlightGate || tLogQueueInfo.get().first > maxTLogQueueGate || tLogQueueInfo.get().second > maxPoppedVersionLag ||
|
||||
dataDistributionQueueSize.get() > maxDataDistributionQueueSize ||
|
||||
storageQueueSize.get() > maxStorageServerQueueGate || dataDistributionActive.get() == false ||
|
||||
storageServersRecruiting.get() == true || teamCollectionValid.get() == false) {
|
||||
|
@ -470,6 +470,6 @@ ACTOR Future<Void> waitForQuietDatabase( Database cx, Reference<AsyncVar<ServerD
|
|||
}
|
||||
|
||||
Future<Void> quietDatabase( Database const& cx, Reference<AsyncVar<ServerDBInfo>> const& dbInfo, std::string phase, int64_t dataInFlightGate,
|
||||
int64_t maxTLogQueueGate, int64_t maxStorageServerQueueGate, int64_t maxDataDistributionQueueSize ) {
|
||||
return waitForQuietDatabase(cx, dbInfo, phase, dataInFlightGate, maxTLogQueueGate, maxStorageServerQueueGate, maxDataDistributionQueueSize);
|
||||
int64_t maxTLogQueueGate, int64_t maxStorageServerQueueGate, int64_t maxDataDistributionQueueSize, int64_t maxPoppedVersionLag ) {
|
||||
return waitForQuietDatabase(cx, dbInfo, phase, dataInFlightGate, maxTLogQueueGate, maxStorageServerQueueGate, maxDataDistributionQueueSize, maxPoppedVersionLag);
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "flow/actorcompiler.h"
|
||||
|
||||
Future<int64_t> getDataInFlight( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const& );
|
||||
Future<int64_t> getMaxTLogQueueSize( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const& );
|
||||
Future<std::pair<int64_t,int64_t>> getTLogQueueInfo( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const& );
|
||||
Future<int64_t> getMaxStorageServerQueueSize( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const& );
|
||||
Future<int64_t> getDataDistributionQueueSize( Database const &cx, Reference<AsyncVar<struct ServerDBInfo>> const&, bool const& reportInFlight );
|
||||
Future<bool> getTeamCollectionValid(Database const& cx, WorkerInterface const&);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <algorithm>
|
||||
#include <numeric>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
/*
|
||||
#ifdef __GNUG__
|
||||
|
@ -43,9 +44,8 @@
|
|||
|
||||
using std::min;
|
||||
using std::max;
|
||||
using std::make_pair;
|
||||
|
||||
static vector<PerfDoubleCounter*> skc;
|
||||
static std::vector<PerfDoubleCounter*> skc;
|
||||
|
||||
static thread_local uint32_t g_seed = 0;
|
||||
|
||||
|
@ -199,7 +199,7 @@ bool operator == (const KeyInfo& lhs, const KeyInfo& rhs ) {
|
|||
return !(lhs<rhs || rhs<lhs);
|
||||
}
|
||||
|
||||
void swapSort(vector<KeyInfo>& points, int a, int b){
|
||||
void swapSort(std::vector<KeyInfo>& points, int a, int b){
|
||||
if (points[b] < points[a]){
|
||||
KeyInfo temp;
|
||||
temp = points[a];
|
||||
|
@ -208,7 +208,7 @@ void swapSort(vector<KeyInfo>& points, int a, int b){
|
|||
}
|
||||
}
|
||||
|
||||
void smallSort(vector<KeyInfo>& points, int start, int N){
|
||||
void smallSort(std::vector<KeyInfo>& points, int start, int N){
|
||||
for (int i=1;i<N;i++)
|
||||
for (int j=i;j>0;j-=2)
|
||||
swapSort(points, start+j-1, start+j);
|
||||
|
@ -224,12 +224,12 @@ struct SortTask {
|
|||
SortTask(int begin, int size, int character) : begin(begin), size(size), character(character) {}
|
||||
};
|
||||
|
||||
void sortPoints(vector<KeyInfo>& points){
|
||||
vector<SortTask> tasks;
|
||||
vector<KeyInfo> newPoints;
|
||||
vector<int> counts;
|
||||
void sortPoints(std::vector<KeyInfo>& points){
|
||||
std::vector<SortTask> tasks;
|
||||
std::vector<KeyInfo> newPoints;
|
||||
std::vector<int> counts;
|
||||
|
||||
tasks.push_back( SortTask(0, points.size(), 0) );
|
||||
tasks.emplace_back(0, points.size(), 0);
|
||||
|
||||
while (tasks.size()){
|
||||
SortTask st = tasks.back();
|
||||
|
@ -259,7 +259,7 @@ void sortPoints(vector<KeyInfo>& points){
|
|||
for(int i=0;i<counts.size();i++){
|
||||
int temp = counts[i];
|
||||
if (temp > 1)
|
||||
tasks.push_back(SortTask(st.begin+total, temp, st.character+1));
|
||||
tasks.emplace_back(st.begin+total, temp, st.character+1);
|
||||
counts[i] = total;
|
||||
total += temp;
|
||||
}
|
||||
|
@ -569,7 +569,7 @@ public:
|
|||
}
|
||||
|
||||
void concatenate( SkipList* input, int count ) {
|
||||
vector<Finger> ends( count-1 );
|
||||
std::vector<Finger> ends( count-1 );
|
||||
for(int i=0; i<ends.size(); i++)
|
||||
input[i].getEnd( ends[i] );
|
||||
|
||||
|
@ -948,9 +948,9 @@ struct ConflictSet {
|
|||
SkipList versionHistory;
|
||||
Key removalKey;
|
||||
Version oldestVersion;
|
||||
vector<PAction> worker_nextAction;
|
||||
vector<Event*> worker_ready;
|
||||
vector<Event*> worker_finished;
|
||||
std::vector<PAction> worker_nextAction;
|
||||
std::vector<Event*> worker_ready;
|
||||
std::vector<Event*> worker_finished;
|
||||
};
|
||||
|
||||
ConflictSet* newConflictSet() { return new ConflictSet; }
|
||||
|
@ -989,18 +989,18 @@ void ConflictBatch::addTransaction( const CommitTransactionRef& tr ) {
|
|||
info->readRanges.resize( arena, tr.read_conflict_ranges.size() );
|
||||
info->writeRanges.resize( arena, tr.write_conflict_ranges.size() );
|
||||
|
||||
vector<KeyInfo> &points = this->points;
|
||||
std::vector<KeyInfo>& points = this->points;
|
||||
for(int r=0; r<tr.read_conflict_ranges.size(); r++) {
|
||||
const KeyRangeRef& range = tr.read_conflict_ranges[r];
|
||||
points.push_back( KeyInfo( range.begin, false, true, false, t, &info->readRanges[r].first ) );
|
||||
points.emplace_back(range.begin, false, true, false, t, &info->readRanges[r].first);
|
||||
//points.back().keyEnd = StringRef(buf,range.second);
|
||||
points.push_back( KeyInfo( range.end, false, false, false, t, &info->readRanges[r].second ) );
|
||||
combinedReadConflictRanges.push_back( ReadConflictRange( range.begin, range.end, tr.read_snapshot, t ) );
|
||||
points.emplace_back(range.end, false, false, false, t, &info->readRanges[r].second);
|
||||
combinedReadConflictRanges.emplace_back(range.begin, range.end, tr.read_snapshot, t);
|
||||
}
|
||||
for(int r=0; r<tr.write_conflict_ranges.size(); r++) {
|
||||
const KeyRangeRef& range = tr.write_conflict_ranges[r];
|
||||
points.push_back( KeyInfo( range.begin, false, true, true, t, &info->writeRanges[r].first ) );
|
||||
points.push_back( KeyInfo( range.end, false, false, true, t, &info->writeRanges[r].second ) );
|
||||
points.emplace_back(range.begin, false, true, true, t, &info->writeRanges[r].first);
|
||||
points.emplace_back(range.end, false, false, true, t, &info->writeRanges[r].second);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1008,7 +1008,7 @@ void ConflictBatch::addTransaction( const CommitTransactionRef& tr ) {
|
|||
}
|
||||
|
||||
class MiniConflictSet2 : NonCopyable {
|
||||
vector<bool> values;
|
||||
std::vector<bool> values;
|
||||
public:
|
||||
explicit MiniConflictSet2( int size ) {
|
||||
values.assign( size, false );
|
||||
|
@ -1028,21 +1028,21 @@ public:
|
|||
class MiniConflictSet : NonCopyable {
|
||||
typedef uint64_t wordType;
|
||||
enum { bucketShift = 6, bucketMask=sizeof(wordType)*8-1 };
|
||||
vector<wordType> values; // undefined when andValues is true for a range of values
|
||||
vector<wordType> orValues;
|
||||
vector<wordType> andValues;
|
||||
std::vector<wordType> values; // undefined when andValues is true for a range of values
|
||||
std::vector<wordType> orValues;
|
||||
std::vector<wordType> andValues;
|
||||
MiniConflictSet2 debug; // SOMEDAY: Test on big ranges, eliminate this
|
||||
|
||||
uint64_t bitMask(unsigned int bit){ // computes results for bit%word
|
||||
return (((wordType)1) << ( bit & bucketMask )); // '&' unnecesary?
|
||||
}
|
||||
void setNthBit(vector<wordType> &v, const unsigned int bit){
|
||||
void setNthBit(std::vector<wordType>& v, const unsigned int bit){
|
||||
v[bit>>bucketShift] |= bitMask(bit);
|
||||
}
|
||||
void clearNthBit(vector<wordType> &v, const unsigned int bit){
|
||||
void clearNthBit(std::vector<wordType>& v, const unsigned int bit){
|
||||
v[bit>>bucketShift] &= ~(bitMask(bit));
|
||||
}
|
||||
bool getNthBit(const vector<wordType> &v, const unsigned int bit){
|
||||
bool getNthBit(const std::vector<wordType>& v, const unsigned int bit){
|
||||
return (v[bit>>bucketShift] & bitMask(bit)) != 0;
|
||||
}
|
||||
int wordsForNBits(unsigned int bits){
|
||||
|
@ -1060,7 +1060,7 @@ class MiniConflictSet : NonCopyable {
|
|||
return (b&bucketMask) ? lowBits(b) : -1;
|
||||
}
|
||||
|
||||
void setBits(vector<wordType> &v, int bitBegin, int bitEnd, bool fillMiddle){
|
||||
void setBits(std::vector<wordType>& v, int bitBegin, int bitEnd, bool fillMiddle){
|
||||
if (bitBegin >= bitEnd) return;
|
||||
int beginWord = bitBegin>>bucketShift;
|
||||
int lastWord = ((bitEnd+bucketMask) >> bucketShift) - 1;
|
||||
|
@ -1075,7 +1075,7 @@ class MiniConflictSet : NonCopyable {
|
|||
}
|
||||
}
|
||||
|
||||
bool orBits(vector<wordType> &v, int bitBegin, int bitEnd, bool getMiddle) {
|
||||
bool orBits(std::vector<wordType>& v, int bitBegin, int bitEnd, bool getMiddle) {
|
||||
if (bitBegin >= bitEnd) return false;
|
||||
int beginWord = bitBegin >> bucketShift;
|
||||
int lastWord = ((bitEnd+bucketMask) >> bucketShift) - 1;
|
||||
|
@ -1152,7 +1152,7 @@ void ConflictBatch::checkIntraBatchConflicts() {
|
|||
}
|
||||
}
|
||||
|
||||
void ConflictBatch::GetTooOldTransactions(vector<int>& tooOldTransactions) {
|
||||
void ConflictBatch::GetTooOldTransactions(std::vector<int>& tooOldTransactions) {
|
||||
for (int i = 0; i<transactionInfo.size(); i++) {
|
||||
if (transactionInfo[i]->tooOld) {
|
||||
tooOldTransactions.push_back(i);
|
||||
|
@ -1160,7 +1160,7 @@ void ConflictBatch::GetTooOldTransactions(vector<int>& tooOldTransactions) {
|
|||
}
|
||||
}
|
||||
|
||||
void ConflictBatch::detectConflicts(Version now, Version newOldestVersion, vector<int>& nonConflicting, vector<int>* tooOldTransactions) {
|
||||
void ConflictBatch::detectConflicts(Version now, Version newOldestVersion, std::vector<int>& nonConflicting, std::vector<int>* tooOldTransactions) {
|
||||
double t = timer();
|
||||
sortPoints( points );
|
||||
//std::sort( combinedReadConflictRanges.begin(), combinedReadConflictRanges.end() );
|
||||
|
@ -1232,7 +1232,7 @@ DISABLE_ZERO_DIVISION_FLAG
|
|||
}
|
||||
}
|
||||
|
||||
void ConflictBatch::addConflictRanges(Version now, vector< pair<StringRef,StringRef> >::iterator begin, vector< pair<StringRef,StringRef> >::iterator end,SkipList* part) {
|
||||
void ConflictBatch::addConflictRanges(Version now, std::vector< std::pair<StringRef,StringRef> >::iterator begin, std::vector< std::pair<StringRef,StringRef> >::iterator end,SkipList* part) {
|
||||
int count = end-begin;
|
||||
#if 0
|
||||
//for(auto w = begin; w != end; ++w)
|
||||
|
@ -1262,16 +1262,16 @@ void ConflictBatch::mergeWriteConflictRanges(Version now) {
|
|||
return;
|
||||
|
||||
if (PARALLEL_THREAD_COUNT) {
|
||||
vector<SkipList> parts;
|
||||
std::vector<SkipList> parts;
|
||||
for (int i = 0; i < PARALLEL_THREAD_COUNT; i++)
|
||||
parts.push_back(SkipList());
|
||||
parts.emplace_back();
|
||||
|
||||
vector<StringRef> splits( parts.size()-1 );
|
||||
std::vector<StringRef> splits( parts.size()-1 );
|
||||
for(int s=0; s<splits.size(); s++)
|
||||
splits[s] = combinedWriteConflictRanges[ (s+1)*combinedWriteConflictRanges.size()/parts.size() ].first;
|
||||
|
||||
cs->versionHistory.partition( splits.size() ? &splits[0] : NULL, splits.size(), &parts[0] );
|
||||
vector<double> tstart(PARALLEL_THREAD_COUNT), tend(PARALLEL_THREAD_COUNT);
|
||||
std::vector<double> tstart(PARALLEL_THREAD_COUNT), tend(PARALLEL_THREAD_COUNT);
|
||||
Event done[PARALLEL_THREAD_COUNT ? PARALLEL_THREAD_COUNT : 1];
|
||||
double before = timer();
|
||||
for(int t=0; t<parts.size(); t++) {
|
||||
|
@ -1325,8 +1325,8 @@ void ConflictBatch::combineWriteConflictRanges()
|
|||
if (point.write && !transactionConflictStatus[ point.transaction ]) {
|
||||
if (point.begin) {
|
||||
activeWriteCount++;
|
||||
if (activeWriteCount == 1)
|
||||
combinedWriteConflictRanges.push_back( make_pair( point.key, KeyRef() ) );
|
||||
if (activeWriteCount == 1)
|
||||
combinedWriteConflictRanges.emplace_back(point.key, KeyRef());
|
||||
} else /*if (point.end)*/ {
|
||||
activeWriteCount--;
|
||||
if (activeWriteCount == 0)
|
||||
|
@ -1431,8 +1431,8 @@ void skipListTest() {
|
|||
Arena testDataArena;
|
||||
VectorRef< VectorRef<KeyRangeRef> > testData;
|
||||
testData.resize(testDataArena, 500);
|
||||
vector<vector<uint8_t>> success( testData.size() );
|
||||
vector<vector<uint8_t>> success2( testData.size() );
|
||||
std::vector<std::vector<uint8_t>> success( testData.size() );
|
||||
std::vector<std::vector<uint8_t>> success2( testData.size() );
|
||||
for(int i=0; i<testData.size(); i++) {
|
||||
testData[i].resize(testDataArena, 5000);
|
||||
success[i].assign( testData[i].size(), false );
|
||||
|
@ -1454,10 +1454,10 @@ void skipListTest() {
|
|||
int cranges = 0, tcount = 0;
|
||||
|
||||
start = timer();
|
||||
vector<vector<int>> nonConflict( testData.size() );
|
||||
std::vector<std::vector<int>> nonConflict( testData.size() );
|
||||
for(int i=0; i<testData.size(); i++) {
|
||||
Arena buf;
|
||||
vector<CommitTransactionRef> trs;
|
||||
std::vector<CommitTransactionRef> trs;
|
||||
double t = timer();
|
||||
for(int j=0; j+readCount+writeCount<=testData[i].size(); j+=readCount+writeCount) {
|
||||
CommitTransactionRef tr;
|
||||
|
|
|
@ -423,6 +423,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
NotifiedVersion version, queueCommittedVersion;
|
||||
Version queueCommittingVersion;
|
||||
Version knownCommittedVersion, durableKnownCommittedVersion, minKnownCommittedVersion;
|
||||
Version queuePoppedVersion;
|
||||
|
||||
Deque<std::pair<Version, Standalone<VectorRef<uint8_t>>>> messageBlocks;
|
||||
std::vector<std::vector<Reference<TagData>>> tag_data; //tag.locality | tag.id
|
||||
|
@ -476,7 +477,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
|
||||
explicit LogData(TLogData* tLogData, TLogInterface interf, Tag remoteTag, bool isPrimary, int logRouterTags, UID recruitmentID, uint64_t protocolVersion, std::vector<Tag> tags) : tLogData(tLogData), knownCommittedVersion(0), logId(interf.id()),
|
||||
cc("TLog", interf.id().toString()), bytesInput("BytesInput", cc), bytesDurable("BytesDurable", cc), remoteTag(remoteTag), isPrimary(isPrimary), logRouterTags(logRouterTags), recruitmentID(recruitmentID), protocolVersion(protocolVersion),
|
||||
logSystem(new AsyncVar<Reference<ILogSystem>>()), logRouterPoppedVersion(0), durableKnownCommittedVersion(0), minKnownCommittedVersion(0), allTags(tags.begin(), tags.end()), terminated(tLogData->terminated.getFuture()),
|
||||
logSystem(new AsyncVar<Reference<ILogSystem>>()), logRouterPoppedVersion(0), durableKnownCommittedVersion(0), minKnownCommittedVersion(0), queuePoppedVersion(0), allTags(tags.begin(), tags.end()), terminated(tLogData->terminated.getFuture()),
|
||||
// These are initialized differently on init() or recovery
|
||||
recoveryCount(), stopped(false), initialized(false), queueCommittingVersion(0), newPersistentDataVersion(invalidVersion), unrecoveredBefore(1), recoveredAt(1), unpoppedRecoveredTags(0),
|
||||
logRouterPopToVersion(0), locality(tagLocalityInvalid)
|
||||
|
@ -493,6 +494,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
specialCounter(cc, "PersistentDataVersion", [this](){ return this->persistentDataVersion; });
|
||||
specialCounter(cc, "PersistentDataDurableVersion", [this](){ return this->persistentDataDurableVersion; });
|
||||
specialCounter(cc, "KnownCommittedVersion", [this](){ return this->knownCommittedVersion; });
|
||||
specialCounter(cc, "QueuePoppedVersion", [this](){ return this->queuePoppedVersion; });
|
||||
specialCounter(cc, "SharedBytesInput", [tLogData](){ return tLogData->bytesInput; });
|
||||
specialCounter(cc, "SharedBytesDurable", [tLogData](){ return tLogData->bytesDurable; });
|
||||
specialCounter(cc, "SharedOverheadBytesInput", [tLogData](){ return tLogData->overheadBytesInput; });
|
||||
|
@ -633,23 +635,15 @@ void updatePersistentPopped( TLogData* self, Reference<LogData> logData, Referen
|
|||
}
|
||||
|
||||
ACTOR Future<Void> updatePoppedLocation( TLogData* self, Reference<LogData> logData, Reference<LogData::TagData> data ) {
|
||||
// txsTag is spilled by value, so by definition, its poppable location is always up to the persistentDataVersion.
|
||||
// txsTag is spilled by value, so we do not need to track its popped location.
|
||||
if (data->tag == txsTag) {
|
||||
auto locationIter = logData->versionLocation.lower_bound(std::max<Version>(data->popped, logData->persistentDataVersion));
|
||||
if (locationIter != logData->versionLocation.end()) {
|
||||
data->poppedLocation = locationIter->value.first;
|
||||
} else {
|
||||
// We have no data, so whatever our previous value was is better than anything new we know how
|
||||
// to assign. Ideally, we'd use the most recent commit location, but that's surprisingly
|
||||
// difficult to track.
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
if (!data->requiresPoppedLocationUpdate) return Void();
|
||||
data->requiresPoppedLocationUpdate = false;
|
||||
|
||||
if (data->popped < logData->persistentDataVersion) {
|
||||
if (data->popped <= logData->persistentDataVersion) {
|
||||
// Recover the next needed location in the Disk Queue from the index.
|
||||
Standalone<VectorRef<KeyValueRef>> kvrefs = wait(
|
||||
self->persistentData->readRange(KeyRangeRef(
|
||||
|
@ -702,13 +696,19 @@ ACTOR Future<Void> popDiskQueue( TLogData* self, Reference<LogData> logData ) {
|
|||
}
|
||||
wait(waitForAll(updates));
|
||||
|
||||
auto lastItem = logData->versionLocation.lastItem();
|
||||
IDiskQueue::location minLocation = lastItem == logData->versionLocation.end() ? 0 : lastItem->value.second;
|
||||
IDiskQueue::location minLocation = 0;
|
||||
Version minVersion = 0;
|
||||
auto locationIter = logData->versionLocation.lower_bound(logData->persistentDataVersion);
|
||||
if (locationIter != logData->versionLocation.end()) {
|
||||
minLocation = locationIter->value.first;
|
||||
minVersion = locationIter->key;
|
||||
}
|
||||
for(int tagLocality = 0; tagLocality < logData->tag_data.size(); tagLocality++) {
|
||||
for(int tagId = 0; tagId < logData->tag_data[tagLocality].size(); tagId++) {
|
||||
Reference<LogData::TagData> tagData = logData->tag_data[tagLocality][tagId];
|
||||
if (tagData) {
|
||||
if (tagData && tagData->tag != txsTag && !tagData->nothingPersistent) {
|
||||
minLocation = std::min(minLocation, tagData->poppedLocation);
|
||||
minVersion = std::min(minVersion, tagData->popped);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -721,6 +721,7 @@ ACTOR Future<Void> popDiskQueue( TLogData* self, Reference<LogData> logData ) {
|
|||
lastCommittedLocation = locationIter->value.first;
|
||||
}
|
||||
self->persistentQueue->pop( std::min(minLocation, lastCommittedLocation) );
|
||||
logData->queuePoppedVersion = std::max(logData->queuePoppedVersion, minVersion);
|
||||
}
|
||||
|
||||
return Void();
|
||||
|
@ -747,6 +748,7 @@ ACTOR Future<Void> updatePersistentData( TLogData* self, Reference<LogData> logD
|
|||
for(tagId = 0; tagId < logData->tag_data[tagLocality].size(); tagId++) {
|
||||
state Reference<LogData::TagData> tagData = logData->tag_data[tagLocality][tagId];
|
||||
if(tagData) {
|
||||
wait(tagData->eraseMessagesBefore( tagData->popped, self, logData, TaskUpdateStorage ));
|
||||
state Version currentVersion = 0;
|
||||
// Clear recently popped versions from persistentData if necessary
|
||||
updatePersistentPopped( self, logData, tagData );
|
||||
|
|
|
@ -767,7 +767,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
}
|
||||
}
|
||||
|
||||
virtual Reference<IPeekCursor> peekSingle( UID dbgid, Version begin, Tag tag, vector<pair<Version,Tag>> history ) {
|
||||
virtual Reference<IPeekCursor> peekSingle( UID dbgid, Version begin, Tag tag, std::vector<std::pair<Version,Tag>> history ) {
|
||||
while(history.size() && begin >= history.back().first) {
|
||||
history.pop_back();
|
||||
}
|
||||
|
|
|
@ -55,6 +55,16 @@ ACTOR Future<Void> waitFailureClient(RequestStream<ReplyPromise<Void>> waitFailu
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitFailureClientStrict(RequestStream<ReplyPromise<Void>> waitFailure, double failureReactionTime, int taskID){
|
||||
loop {
|
||||
wait(waitFailureClient(waitFailure, 0, 0, taskID));
|
||||
wait(delay(failureReactionTime, taskID) || IFailureMonitor::failureMonitor().onStateEqual( waitFailure.getEndpoint(), FailureStatus(false)));
|
||||
if(IFailureMonitor::failureMonitor().getState( waitFailure.getEndpoint() ).isFailed()) {
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitFailureTracker(RequestStream<ReplyPromise<Void>> waitFailure, Reference<AsyncVar<bool>> failed, double reactionTime, double reactionSlope, int taskID){
|
||||
loop {
|
||||
try {
|
||||
|
|
|
@ -28,6 +28,9 @@ Future<Void> waitFailureServer(const FutureStream<ReplyPromise<Void>>& waitFailu
|
|||
Future<Void> waitFailureClient(const RequestStream<ReplyPromise<Void>>& waitFailure,
|
||||
double const& failureReactionTime=0, double const& failureReactionSlope=0, int const& taskID=TaskDefaultEndpoint);
|
||||
|
||||
// talks to a wait failure server, returns Void on failure, reaction time is always waited
|
||||
Future<Void> waitFailureClientStrict(const RequestStream<ReplyPromise<Void>>& waitFailure, double const& failureReactionTime=0, int const& taskID=TaskDefaultEndpoint);
|
||||
|
||||
// talks to a wait failure server, updates failed to be true or false based on failure status.
|
||||
Future<Void> waitFailureTracker(const RequestStream<ReplyPromise<Void>>& waitFailure, Reference<AsyncVar<bool>> const& failed,
|
||||
double const& failureReactionTime=0, double const& failureReactionSlope=0, int const& taskID=TaskDefaultEndpoint);
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include "fdbrpc/TLSConnection.h"
|
||||
#include "fdbrpc/Net2FileSystem.h"
|
||||
#include "fdbrpc/Platform.h"
|
||||
#include "fdbrpc/AsyncFileCached.actor.h"
|
||||
#include "fdbserver/CoroFlow.h"
|
||||
#include "flow/SignalSafeUnwind.h"
|
||||
#if defined(CMAKE_BUILD) || !defined(WIN32)
|
||||
|
@ -105,8 +106,8 @@ CSimpleOpt::SOption g_rgOptions[] = {
|
|||
{ OPT_MAXLOGS, "--maxlogs", SO_REQ_SEP },
|
||||
{ OPT_MAXLOGSSIZE, "--maxlogssize", SO_REQ_SEP },
|
||||
{ OPT_LOGGROUP, "--loggroup", SO_REQ_SEP },
|
||||
#ifdef _WIN32
|
||||
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
|
||||
#ifdef _WIN32
|
||||
{ OPT_NEWCONSOLE, "-n", SO_NONE },
|
||||
{ OPT_NEWCONSOLE, "--newconsole", SO_NONE },
|
||||
{ OPT_NOBOX, "-q", SO_NONE },
|
||||
|
@ -504,6 +505,15 @@ void parentWatcher(void *parentHandle) {
|
|||
criticalError( FDB_EXIT_SUCCESS, "ParentProcessExited", "Parent process exited" );
|
||||
TraceEvent(SevError, "ParentProcessWaitFailed").detail("RetCode", signal).GetLastError();
|
||||
}
|
||||
#else
|
||||
void* parentWatcher(void *arg) {
|
||||
int *parent_pid = (int*) arg;
|
||||
while(1) {
|
||||
sleep(1);
|
||||
if(getppid() != *parent_pid)
|
||||
criticalError( FDB_EXIT_SUCCESS, "ParentProcessExited", "Parent process exited" );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void printVersion() {
|
||||
|
@ -1166,6 +1176,14 @@ int main(int argc, char* argv[]) {
|
|||
case OPT_NOBOX:
|
||||
SetErrorMode(SetErrorMode(0) | SEM_NOGPFAULTERRORBOX);
|
||||
break;
|
||||
#else
|
||||
case OPT_PARENTPID: {
|
||||
auto pid_str = args.OptionArg();
|
||||
int *parent_pid = new(int);
|
||||
*parent_pid = atoi(pid_str);
|
||||
startThread(&parentWatcher, parent_pid);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case OPT_TESTFILE:
|
||||
testFile = args.OptionArg();
|
||||
|
@ -1449,6 +1467,9 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
if (!serverKnobs->setKnob("server_mem_limit", std::to_string(memLimit))) ASSERT(false);
|
||||
|
||||
// evictionPolicyStringToEnum will throw an exception if the string is not recognized as a valid
|
||||
EvictablePageCache::evictionPolicyStringToEnum(flowKnobs->CACHE_EVICTION_POLICY);
|
||||
|
||||
if (role == SkipListTest) {
|
||||
skipListTest();
|
||||
flushAndExit(FDB_EXIT_SUCCESS);
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -240,12 +240,12 @@
|
|||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -269,6 +269,7 @@
|
|||
</Lib>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
|
||||
|
@ -286,6 +287,7 @@
|
|||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
<PreprocessToFile>false</PreprocessToFile>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
@ -311,6 +313,7 @@
|
|||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
<BufferSecurityCheck>false</BufferSecurityCheck>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Console</SubSystem>
|
||||
|
|
|
@ -429,12 +429,12 @@ ACTOR Future<std::vector<Message>> _listInboxMessages(Database cx, uint64_t inbo
|
|||
//printf(" -> cached message %016llx from feed %016llx\n", messageId, feed);
|
||||
if(messageId >= cursor) {
|
||||
//printf(" -> entering message %016llx from feed %016llx\n", messageId, feed);
|
||||
feedLatest.insert(pair<MessageId, Feed>(messageId, feed));
|
||||
feedLatest.emplace(messageId, feed);
|
||||
} else {
|
||||
// replace this with the first message older than the cursor
|
||||
MessageId mId = wait(getFeedLatestAtOrAfter(&tr, feed, cursor));
|
||||
if(mId) {
|
||||
feedLatest.insert(pair<MessageId, Feed>(mId, feed));
|
||||
feedLatest.emplace(mId, feed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -465,7 +465,7 @@ ACTOR Future<std::vector<Message>> _listInboxMessages(Database cx, uint64_t inbo
|
|||
|
||||
MessageId nextMessage = wait(getFeedLatestAtOrAfter(&tr, f, id + 1));
|
||||
if(nextMessage) {
|
||||
feedLatest.insert(pair<MessageId, Feed>(nextMessage, f));
|
||||
feedLatest.emplace(nextMessage, f);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -52,6 +52,7 @@
|
|||
#include "flow/TDMetric.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
using std::pair;
|
||||
using std::make_pair;
|
||||
|
||||
#pragma region Data Structures
|
||||
|
@ -199,11 +200,11 @@ private:
|
|||
};
|
||||
|
||||
struct UpdateEagerReadInfo {
|
||||
vector<KeyRef> keyBegin;
|
||||
vector<Key> keyEnd; // these are for ClearRange
|
||||
std::vector<KeyRef> keyBegin;
|
||||
std::vector<Key> keyEnd; // these are for ClearRange
|
||||
|
||||
vector<pair<KeyRef, int>> keys;
|
||||
vector<Optional<Value>> value;
|
||||
std::vector<std::pair<KeyRef, int>> keys;
|
||||
std::vector<Optional<Value>> value;
|
||||
|
||||
Arena arena;
|
||||
|
||||
|
@ -223,13 +224,13 @@ struct UpdateEagerReadInfo {
|
|||
// CompareAndClear is likely to be used after another atomic operation on same key.
|
||||
keys.back().second = std::max(keys.back().second, m.param2.size() + 1);
|
||||
} else {
|
||||
keys.push_back(pair<KeyRef, int>(m.param1, m.param2.size() + 1));
|
||||
keys.emplace_back(m.param1, m.param2.size() + 1);
|
||||
}
|
||||
} else if ((m.type == MutationRef::AppendIfFits) || (m.type == MutationRef::ByteMin) ||
|
||||
(m.type == MutationRef::ByteMax))
|
||||
keys.push_back(pair<KeyRef, int>(m.param1, CLIENT_KNOBS->VALUE_SIZE_LIMIT));
|
||||
keys.emplace_back(m.param1, CLIENT_KNOBS->VALUE_SIZE_LIMIT);
|
||||
else if (isAtomicOp((MutationRef::Type) m.type))
|
||||
keys.push_back(pair<KeyRef, int>(m.param1, m.param2.size()));
|
||||
keys.emplace_back(m.param1, m.param2.size());
|
||||
}
|
||||
|
||||
void finishKeyBegin() {
|
||||
|
@ -2239,8 +2240,8 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
|
|||
// version. The latter depends on data->newestAvailableVersion, so loop over the ranges of that.
|
||||
// SOMEDAY: Could this just use shards? Then we could explicitly do the removeDataRange here when an adding/transferred shard is cancelled
|
||||
auto vr = data->newestAvailableVersion.intersectingRanges(keys);
|
||||
vector<std::pair<KeyRange,Version>> changeNewestAvailable;
|
||||
vector<KeyRange> removeRanges;
|
||||
std::vector<std::pair<KeyRange,Version>> changeNewestAvailable;
|
||||
std::vector<KeyRange> removeRanges;
|
||||
for (auto r = vr.begin(); r != vr.end(); ++r) {
|
||||
KeyRangeRef range = keys & r->range();
|
||||
bool dataAvailable = r->value()==latestVersion || r->value() >= version;
|
||||
|
@ -2255,7 +2256,7 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
|
|||
if (dataAvailable) {
|
||||
ASSERT( r->value() == latestVersion); // Not that we care, but this used to be checked instead of dataAvailable
|
||||
ASSERT( data->mutableData().getLatestVersion() > version || context == CSK_RESTORE );
|
||||
changeNewestAvailable.push_back(make_pair(range, version));
|
||||
changeNewestAvailable.emplace_back(range, version);
|
||||
removeRanges.push_back( range );
|
||||
}
|
||||
data->addShard( ShardInfo::newNotAssigned(range) );
|
||||
|
@ -2263,7 +2264,7 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
|
|||
} else if (!dataAvailable) {
|
||||
// SOMEDAY: Avoid restarting adding/transferred shards
|
||||
if (version==0){ // bypass fetchkeys; shard is known empty at version 0
|
||||
changeNewestAvailable.push_back(make_pair(range, latestVersion));
|
||||
changeNewestAvailable.emplace_back(range, latestVersion);
|
||||
data->addShard( ShardInfo::newReadWrite(range, data) );
|
||||
setAvailableStatus(data, range, true);
|
||||
} else {
|
||||
|
@ -2272,7 +2273,7 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
|
|||
data->addShard( ShardInfo::newAdding(data, range) );
|
||||
}
|
||||
} else {
|
||||
changeNewestAvailable.push_back(make_pair(range, latestVersion));
|
||||
changeNewestAvailable.emplace_back(range, latestVersion);
|
||||
data->addShard( ShardInfo::newReadWrite(range, data) );
|
||||
}
|
||||
}
|
||||
|
@ -2438,7 +2439,7 @@ private:
|
|||
rollback( data, rollbackVersion, currentVersion );
|
||||
}
|
||||
|
||||
data->recoveryVersionSkips.push_back(std::make_pair(rollbackVersion, currentVersion - rollbackVersion));
|
||||
data->recoveryVersionSkips.emplace_back(rollbackVersion, currentVersion - rollbackVersion);
|
||||
} else if (m.type == MutationRef::SetValue && m.param1 == killStoragePrivateKey) {
|
||||
throw worker_removed();
|
||||
} else if ((m.type == MutationRef::SetValue || m.type == MutationRef::ClearRange) && m.param1.substr(1).startsWith(serverTagPrefix)) {
|
||||
|
@ -3694,7 +3695,7 @@ void versionedMapTest() {
|
|||
printf("SS Ptree node is %zu bytes\n", sizeof( StorageServer::VersionedData::PTreeT ) );
|
||||
|
||||
const int NSIZE = sizeof(VersionedMap<int,int>::PTreeT);
|
||||
const int ASIZE = NSIZE<=64 ? 64 : NextPowerOfTwo<NSIZE>::Result;
|
||||
const int ASIZE = NSIZE<=64 ? 64 : NextFastAllocatedSize<NSIZE>::Result;
|
||||
|
||||
auto before = FastAllocator< ASIZE >::getTotalMemory();
|
||||
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbserver/workloads/workloads.actor.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
|
@ -75,18 +77,18 @@ struct IOLog {
|
|||
ProcessLog issueR, completionR, durationR;
|
||||
ProcessLog issueW, completionW, durationW;
|
||||
|
||||
vector<pair<std::string, ProcessLog*> > logs;
|
||||
std::vector<std::pair<std::string, ProcessLog*> > logs;
|
||||
|
||||
IOLog(){
|
||||
logs.push_back(std::make_pair("issue", &issue));
|
||||
logs.push_back(std::make_pair("completion", &completion));
|
||||
logs.push_back(std::make_pair("duration", &duration));
|
||||
logs.push_back(std::make_pair("issueR", &issueR));
|
||||
logs.push_back(std::make_pair("completionR", &completionR));
|
||||
logs.push_back(std::make_pair("durationR", &durationR));
|
||||
logs.push_back(std::make_pair("issueW", &issueW));
|
||||
logs.push_back(std::make_pair("completionW", &completionW));
|
||||
logs.push_back(std::make_pair("durationW", &durationW));
|
||||
logs.emplace_back("issue", &issue);
|
||||
logs.emplace_back("completion", &completion);
|
||||
logs.emplace_back("duration", &duration);
|
||||
logs.emplace_back("issueR", &issueR);
|
||||
logs.emplace_back("completionR", &completionR);
|
||||
logs.emplace_back("durationR", &durationR);
|
||||
logs.emplace_back("issueW", &issueW);
|
||||
logs.emplace_back("completionW", &completionW);
|
||||
logs.emplace_back("durationW", &durationW);
|
||||
|
||||
duration.logLatency = true;
|
||||
durationR.logLatency = true;
|
||||
|
@ -138,10 +140,10 @@ struct IOLog {
|
|||
struct AsyncFileReadWorkload : public AsyncFileWorkload
|
||||
{
|
||||
//Buffers used to store what is being read or written
|
||||
vector<Reference<AsyncFileBuffer> > readBuffers;
|
||||
std::vector<Reference<AsyncFileBuffer> > readBuffers;
|
||||
|
||||
//The futures for the asynchronous read operations
|
||||
vector<Future<int> > readFutures;
|
||||
std::vector<Future<int> > readFutures;
|
||||
|
||||
//Number of reads to perform in parallel. Read tests are performed only if this is greater than zero
|
||||
int numParallelReads;
|
||||
|
@ -280,7 +282,7 @@ struct AsyncFileReadWorkload : public AsyncFileWorkload
|
|||
if (self->unbatched) {
|
||||
self->ioLog = new IOLog();
|
||||
|
||||
vector<Future<Void>> readers;
|
||||
std::vector<Future<Void>> readers;
|
||||
|
||||
for(int i=0; i<self->numParallelReads; i++)
|
||||
readers.push_back( readLoop(self, i, self->fixedRate / self->numParallelReads) );
|
||||
|
@ -333,14 +335,12 @@ struct AsyncFileReadWorkload : public AsyncFileWorkload
|
|||
}
|
||||
}
|
||||
|
||||
virtual void getMetrics(vector<PerfMetric>& m)
|
||||
{
|
||||
if(enabled)
|
||||
{
|
||||
m.push_back(PerfMetric("Bytes read/sec", bytesRead.getValue() / testDuration, false));
|
||||
m.push_back(PerfMetric("Average CPU Utilization (Percentage)", averageCpuUtilization * 100, false));
|
||||
virtual void getMetrics(std::vector<PerfMetric>& m) {
|
||||
if (enabled) {
|
||||
m.emplace_back("Bytes read/sec", bytesRead.getValue() / testDuration, false);
|
||||
m.emplace_back("Average CPU Utilization (Percentage)", averageCpuUtilization * 100, false);
|
||||
}
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
WorkloadFactory<AsyncFileReadWorkload> AsyncFileReadWorkloadFactory("AsyncFileRead");
|
||||
|
|
|
@ -29,6 +29,9 @@
|
|||
#define FDBSERVER_BULK_SETUP_ACTOR_H
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbclient/NativeAPI.actor.h"
|
||||
#include "fdbserver/workloads/workloads.actor.h"
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
|
@ -102,14 +105,14 @@ Future<uint64_t> setupRange( Database cx, T* workload, uint64_t begin, uint64_t
|
|||
}
|
||||
|
||||
ACTOR template<class T>
|
||||
Future<uint64_t> setupRangeWorker( Database cx, T* workload, vector<std::pair<uint64_t,uint64_t>>* jobs, double maxKeyInsertRate, int keySaveIncrement, int actorId) {
|
||||
Future<uint64_t> setupRangeWorker( Database cx, T* workload, std::vector<std::pair<uint64_t,uint64_t>>* jobs, double maxKeyInsertRate, int keySaveIncrement, int actorId) {
|
||||
state double nextStart;
|
||||
state uint64_t loadedRanges = 0;
|
||||
state int lastStoredKeysLoaded = 0;
|
||||
state uint64_t keysLoaded = 0;
|
||||
state uint64_t bytesStored = 0;
|
||||
while (jobs->size()){
|
||||
state pair<uint64_t, uint64_t> job = jobs->back();
|
||||
state std::pair<uint64_t, uint64_t> job = jobs->back();
|
||||
jobs->pop_back();
|
||||
nextStart = now() + (job.second-job.first)/maxKeyInsertRate;
|
||||
uint64_t numBytes = wait( setupRange(cx, workload, job.first, job.second) );
|
||||
|
@ -152,7 +155,7 @@ Future<uint64_t> setupRangeWorker( Database cx, T* workload, vector<std::pair<ui
|
|||
//to reach that mark. Returns a vector of times (in seconds) corresponding to the counts in the countsOfInterest vector.
|
||||
|
||||
//Expects countsOfInterest to be sorted in ascending order
|
||||
ACTOR static Future<vector<pair<uint64_t, double> > > trackInsertionCount(Database cx, vector<uint64_t> countsOfInterest, double checkInterval)
|
||||
ACTOR static Future<std::vector<std::pair<uint64_t, double> > > trackInsertionCount(Database cx, std::vector<uint64_t> countsOfInterest, double checkInterval)
|
||||
{
|
||||
state KeyRange keyPrefix = KeyRangeRef(std::string("keycount"), std::string("keycount") + char(255));
|
||||
state KeyRange bytesPrefix = KeyRangeRef(std::string("bytesstored"), std::string("bytesstored") + char(255));
|
||||
|
@ -160,7 +163,7 @@ ACTOR static Future<vector<pair<uint64_t, double> > > trackInsertionCount(Databa
|
|||
state uint64_t lastInsertionCount = 0;
|
||||
state int currentCountIndex = 0;
|
||||
|
||||
state vector<pair<uint64_t, double> > countInsertionRates;
|
||||
state std::vector<std::pair<uint64_t, double> > countInsertionRates;
|
||||
|
||||
state double startTime = now();
|
||||
|
||||
|
@ -184,7 +187,7 @@ ACTOR static Future<vector<pair<uint64_t, double> > > trackInsertionCount(Databa
|
|||
bytesInserted += *(uint64_t*)bytes[i].value.begin();
|
||||
|
||||
while(currentCountIndex < countsOfInterest.size() && countsOfInterest[currentCountIndex] > lastInsertionCount && countsOfInterest[currentCountIndex] <= numInserted)
|
||||
countInsertionRates.push_back(pair<uint64_t, double>(countsOfInterest[currentCountIndex++], bytesInserted / (now() - startTime)));
|
||||
countInsertionRates.emplace_back(countsOfInterest[currentCountIndex++], bytesInserted / (now() - startTime));
|
||||
|
||||
lastInsertionCount = numInserted;
|
||||
wait(delay(checkInterval));
|
||||
|
@ -198,13 +201,16 @@ ACTOR static Future<vector<pair<uint64_t, double> > > trackInsertionCount(Databa
|
|||
return countInsertionRates;
|
||||
}
|
||||
|
||||
ACTOR template<class T>
|
||||
Future<Void> bulkSetup( Database cx, T* workload, uint64_t nodeCount, Promise<double> setupTime,
|
||||
bool valuesInconsequential = false, double postSetupWarming = 0.0, double maxKeyInsertRate = 1e12,
|
||||
vector<uint64_t> insertionCountsToMeasure = vector<uint64_t>(), Promise<vector<pair<uint64_t, double> > > ratesAtKeyCounts = Promise<vector<pair<uint64_t, double> > >(),
|
||||
int keySaveIncrement = 0, double keyCheckInterval = 0.1 ) {
|
||||
ACTOR template <class T>
|
||||
Future<Void> bulkSetup(Database cx, T* workload, uint64_t nodeCount, Promise<double> setupTime,
|
||||
bool valuesInconsequential = false, double postSetupWarming = 0.0,
|
||||
double maxKeyInsertRate = 1e12,
|
||||
std::vector<uint64_t> insertionCountsToMeasure = std::vector<uint64_t>(),
|
||||
Promise<std::vector<std::pair<uint64_t, double>>> ratesAtKeyCounts =
|
||||
Promise<std::vector<std::pair<uint64_t, double>>>(),
|
||||
int keySaveIncrement = 0, double keyCheckInterval = 0.1) {
|
||||
|
||||
state vector<pair<uint64_t,uint64_t>> jobs;
|
||||
state std::vector<std::pair<uint64_t,uint64_t>> jobs;
|
||||
state uint64_t startNode = (nodeCount * workload->clientId) / workload->clientCount;
|
||||
state uint64_t endNode = (nodeCount * (workload->clientId+1)) / workload->clientCount;
|
||||
|
||||
|
@ -226,7 +232,7 @@ Future<Void> bulkSetup( Database cx, T* workload, uint64_t nodeCount, Promise<do
|
|||
.detail("End", endNode)
|
||||
.detail("CheckMethod", "SimpleValueSize");
|
||||
setupTime.send(0.0);
|
||||
ratesAtKeyCounts.send(vector<pair<uint64_t, double> >());
|
||||
ratesAtKeyCounts.send(std::vector<std::pair<uint64_t, double> >());
|
||||
return Void();
|
||||
} else {
|
||||
TraceEvent("BulkRangeNotFound")
|
||||
|
@ -257,14 +263,14 @@ Future<Void> bulkSetup( Database cx, T* workload, uint64_t nodeCount, Promise<do
|
|||
|
||||
// create a random vector of range-create jobs
|
||||
for(uint64_t n=startNode; n<endNode; n+=BULK_SETUP_RANGE_SIZE)
|
||||
jobs.push_back( std::make_pair( n, std::min(endNode, n+BULK_SETUP_RANGE_SIZE) ) );
|
||||
jobs.emplace_back(n, std::min(endNode, n+BULK_SETUP_RANGE_SIZE));
|
||||
g_random->randomShuffle(jobs);
|
||||
|
||||
// fire up the workers and wait for them to eat all the jobs
|
||||
double maxWorkerInsertRate = maxKeyInsertRate / BULK_SETUP_WORKERS / workload->clientCount;
|
||||
state vector<Future<uint64_t>> fs;
|
||||
state std::vector<Future<uint64_t>> fs;
|
||||
|
||||
state Future<vector<pair<uint64_t, double> > > insertionTimes = vector<pair<uint64_t, double> >();
|
||||
state Future<std::vector<std::pair<uint64_t, double> > > insertionTimes = std::vector<std::pair<uint64_t, double>>();
|
||||
|
||||
if(insertionCountsToMeasure.size() > 0)
|
||||
{
|
||||
|
|
|
@ -219,13 +219,19 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
}
|
||||
|
||||
//Check that nothing is in the TLog queues
|
||||
int64_t maxTLogQueueSize = wait(getMaxTLogQueueSize(cx, self->dbInfo));
|
||||
if(maxTLogQueueSize > 1e5) // FIXME: Should be zero?
|
||||
std::pair<int64_t,int64_t> maxTLogQueueInfo = wait(getTLogQueueInfo(cx, self->dbInfo));
|
||||
if(maxTLogQueueInfo.first > 1e5) // FIXME: Should be zero?
|
||||
{
|
||||
TraceEvent("ConsistencyCheck_NonZeroTLogQueue").detail("MaxQueueSize", maxTLogQueueSize);
|
||||
TraceEvent("ConsistencyCheck_NonZeroTLogQueue").detail("MaxQueueSize", maxTLogQueueInfo.first);
|
||||
self->testFailure("Non-zero tlog queue size");
|
||||
}
|
||||
|
||||
if(maxTLogQueueInfo.second > 30e6)
|
||||
{
|
||||
TraceEvent("ConsistencyCheck_PoppedVersionLag").detail("PoppedVersionLag", maxTLogQueueInfo.second);
|
||||
self->testFailure("large popped version lag");
|
||||
}
|
||||
|
||||
//Check that nothing is in the storage server queues
|
||||
try
|
||||
{
|
||||
|
@ -268,11 +274,11 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
}
|
||||
|
||||
//Get a list of key servers; verify that the TLogs and master all agree about who the key servers are
|
||||
state Promise<vector<pair<KeyRange, vector<StorageServerInterface>>>> keyServerPromise;
|
||||
state Promise<std::vector<std::pair<KeyRange, std::vector<StorageServerInterface>>>> keyServerPromise;
|
||||
bool keyServerResult = wait(self->getKeyServers(cx, self, keyServerPromise));
|
||||
if(keyServerResult)
|
||||
{
|
||||
state vector<pair<KeyRange, vector<StorageServerInterface>>> keyServers = keyServerPromise.getFuture().get();
|
||||
state std::vector<std::pair<KeyRange, vector<StorageServerInterface>>> keyServers = keyServerPromise.getFuture().get();
|
||||
|
||||
//Get the locations of all the shards in the database
|
||||
state Promise<Standalone<VectorRef<KeyValueRef>>> keyLocationPromise;
|
||||
|
@ -323,9 +329,9 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
//Get a list of storage servers from the master and compares them with the TLogs.
|
||||
//If this is a quiescent check, then each master proxy needs to respond, otherwise only one needs to respond.
|
||||
//Returns false if there is a failure (in this case, keyServersPromise will never be set)
|
||||
ACTOR Future<bool> getKeyServers(Database cx, ConsistencyCheckWorkload *self, Promise<vector<pair<KeyRange, vector<StorageServerInterface>>>> keyServersPromise)
|
||||
ACTOR Future<bool> getKeyServers(Database cx, ConsistencyCheckWorkload *self, Promise<std::vector<std::pair<KeyRange, vector<StorageServerInterface>>>> keyServersPromise)
|
||||
{
|
||||
state vector<pair<KeyRange, vector<StorageServerInterface>>> keyServers;
|
||||
state std::vector<std::pair<KeyRange, vector<StorageServerInterface>>> keyServers;
|
||||
|
||||
//Try getting key server locations from the master proxies
|
||||
state vector<Future<ErrorOr<GetKeyServerLocationsReply>>> keyServerLocationFutures;
|
||||
|
@ -382,7 +388,7 @@ struct ConsistencyCheckWorkload : TestWorkload
|
|||
|
||||
//Retrieves the locations of all shards in the database
|
||||
//Returns false if there is a failure (in this case, keyLocationPromise will never be set)
|
||||
ACTOR Future<bool> getKeyLocations(Database cx, vector<pair<KeyRange, vector<StorageServerInterface>>> shards, ConsistencyCheckWorkload *self, Promise<Standalone<VectorRef<KeyValueRef>>> keyLocationPromise)
|
||||
ACTOR Future<bool> getKeyLocations(Database cx, std::vector<std::pair<KeyRange, vector<StorageServerInterface>>> shards, ConsistencyCheckWorkload *self, Promise<Standalone<VectorRef<KeyValueRef>>> keyLocationPromise)
|
||||
{
|
||||
state Standalone<VectorRef<KeyValueRef>> keyLocations;
|
||||
state Key beginKey = allKeys.begin.withPrefix(keyServersPrefix);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include <vector>
|
||||
|
||||
#include "fdbrpc/ContinuousSample.h"
|
||||
#include "fdbclient/NativeAPI.actor.h"
|
||||
|
@ -33,7 +34,7 @@ struct QueuePushWorkload : TestWorkload {
|
|||
std::string valueString;
|
||||
Key endingKey, startingKey;
|
||||
|
||||
vector<Future<Void>> clients;
|
||||
std::vector<Future<Void>> clients;
|
||||
PerfIntCounter transactions, retries;
|
||||
ContinuousSample<double> commitLatencies, GRVLatencies;
|
||||
|
||||
|
@ -58,25 +59,25 @@ struct QueuePushWorkload : TestWorkload {
|
|||
|
||||
virtual Future<bool> check( Database const& cx ) { return true; }
|
||||
|
||||
virtual void getMetrics( vector<PerfMetric>& m ) {
|
||||
virtual void getMetrics(std::vector<PerfMetric>& m ) {
|
||||
double duration = testDuration;
|
||||
int writes = transactions.getValue();
|
||||
m.push_back( PerfMetric( "Measured Duration", duration, true ) );
|
||||
m.push_back( PerfMetric( "Operations/sec", writes / duration, false ) );
|
||||
m.emplace_back("Measured Duration", duration, true);
|
||||
m.emplace_back("Operations/sec", writes / duration, false);
|
||||
m.push_back( transactions.getMetric() );
|
||||
m.push_back( retries.getMetric() );
|
||||
|
||||
m.push_back( PerfMetric( "Mean GRV Latency (ms)", 1000 * GRVLatencies.mean(), true ) );
|
||||
m.push_back( PerfMetric( "Median GRV Latency (ms, averaged)", 1000 * GRVLatencies.median(), true ) );
|
||||
m.push_back( PerfMetric( "90% GRV Latency (ms, averaged)", 1000 * GRVLatencies.percentile( 0.90 ), true ) );
|
||||
m.push_back( PerfMetric( "98% GRV Latency (ms, averaged)", 1000 * GRVLatencies.percentile( 0.98 ), true ) );
|
||||
m.emplace_back("Mean GRV Latency (ms)", 1000 * GRVLatencies.mean(), true);
|
||||
m.emplace_back("Median GRV Latency (ms, averaged)", 1000 * GRVLatencies.median(), true);
|
||||
m.emplace_back("90% GRV Latency (ms, averaged)", 1000 * GRVLatencies.percentile( 0.90 ), true);
|
||||
m.emplace_back("98% GRV Latency (ms, averaged)", 1000 * GRVLatencies.percentile( 0.98 ), true);
|
||||
|
||||
m.push_back( PerfMetric( "Mean Commit Latency (ms)", 1000 * commitLatencies.mean(), true ) );
|
||||
m.push_back( PerfMetric( "Median Commit Latency (ms, averaged)", 1000 * commitLatencies.median(), true ) );
|
||||
m.push_back( PerfMetric( "90% Commit Latency (ms, averaged)", 1000 * commitLatencies.percentile( 0.90 ), true ) );
|
||||
m.push_back( PerfMetric( "98% Commit Latency (ms, averaged)", 1000 * commitLatencies.percentile( 0.98 ), true ) );
|
||||
m.emplace_back("Mean Commit Latency (ms)", 1000 * commitLatencies.mean(), true);
|
||||
m.emplace_back("Median Commit Latency (ms, averaged)", 1000 * commitLatencies.median(), true);
|
||||
m.emplace_back("90% Commit Latency (ms, averaged)", 1000 * commitLatencies.percentile( 0.90 ), true);
|
||||
m.emplace_back("98% Commit Latency (ms, averaged)", 1000 * commitLatencies.percentile( 0.98 ), true);
|
||||
|
||||
m.push_back( PerfMetric( "Bytes written/sec", (writes * (keyBytes + valueBytes)) / duration, false ) );
|
||||
m.emplace_back("Bytes written/sec", (writes * (keyBytes + valueBytes)) / duration, false);
|
||||
}
|
||||
|
||||
static Key keyForIndex( int base, int offset ) { return StringRef( format( "%08x%08x", base, offset ) ); }
|
||||
|
@ -127,7 +128,7 @@ struct QueuePushWorkload : TestWorkload {
|
|||
lastKey = self->endingKey;
|
||||
}
|
||||
|
||||
pair<int, int> unpacked = valuesForKey( lastKey );
|
||||
std::pair<int, int> unpacked = valuesForKey( lastKey );
|
||||
|
||||
if( self->forward )
|
||||
tr.set( keyForIndex( unpacked.first + unpacked.second, g_random->randomInt(1, 1000) ),
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
*/
|
||||
|
||||
#include <boost/lexical_cast.hpp>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbrpc/ContinuousSample.h"
|
||||
#include "fdbclient/NativeAPI.actor.h"
|
||||
|
@ -106,15 +108,15 @@ struct ReadWriteWorkload : KVWorkload {
|
|||
EventMetricHandle<TransactionFailureMetric> transactionFailureMetric;
|
||||
EventMetricHandle<ReadMetric> readMetric;
|
||||
|
||||
vector<Future<Void>> clients;
|
||||
std::vector<Future<Void>> clients;
|
||||
PerfIntCounter aTransactions, bTransactions, retries;
|
||||
ContinuousSample<double> latencies, readLatencies, commitLatencies, GRVLatencies, fullReadLatencies;
|
||||
double readLatencyTotal; int readLatencyCount;
|
||||
|
||||
vector<uint64_t> insertionCountsToMeasure;
|
||||
vector<pair<uint64_t, double> > ratesAtKeyCounts;
|
||||
std::vector<uint64_t> insertionCountsToMeasure;
|
||||
std::vector<std::pair<uint64_t, double> > ratesAtKeyCounts;
|
||||
|
||||
vector<PerfMetric> periodicMetrics;
|
||||
std::vector<PerfMetric> periodicMetrics;
|
||||
|
||||
bool doSetup;
|
||||
|
||||
|
@ -193,9 +195,9 @@ struct ReadWriteWorkload : KVWorkload {
|
|||
ASSERT((keyForIndex(a, false) <= keyForIndex(b, false)));
|
||||
}
|
||||
|
||||
vector<std::string> insertionCountsToMeasureString = getOption(options, LiteralStringRef("insertionCountsToMeasure"), vector<std::string>());
|
||||
for(int i = 0; i < insertionCountsToMeasureString.size(); i++)
|
||||
{
|
||||
std::vector<std::string> insertionCountsToMeasureString =
|
||||
getOption(options, LiteralStringRef("insertionCountsToMeasure"), std::vector<std::string>());
|
||||
for (int i = 0; i < insertionCountsToMeasureString.size(); i++) {
|
||||
try
|
||||
{
|
||||
uint64_t count = boost::lexical_cast<uint64_t>(insertionCountsToMeasureString[i]);
|
||||
|
@ -225,7 +227,7 @@ struct ReadWriteWorkload : KVWorkload {
|
|||
ACTOR static Future<bool> traceDumpWorkers( Reference<AsyncVar<ServerDBInfo>> db ) {
|
||||
try {
|
||||
loop {
|
||||
ErrorOr<vector<WorkerDetails>> workerList = wait( db->get().clusterInterface.getWorkers.tryGetReply( GetWorkersRequest() ) );
|
||||
ErrorOr<std::vector<WorkerDetails>> workerList = wait( db->get().clusterInterface.getWorkers.tryGetReply( GetWorkersRequest() ) );
|
||||
if( workerList.present() ) {
|
||||
std::vector<Future<ErrorOr<Void>>> dumpRequests;
|
||||
for( int i = 0; i < workerList.get().size(); i++)
|
||||
|
@ -254,53 +256,53 @@ struct ReadWriteWorkload : KVWorkload {
|
|||
return true;
|
||||
}
|
||||
|
||||
virtual void getMetrics( vector<PerfMetric>& m ) {
|
||||
virtual void getMetrics(std::vector<PerfMetric>& m) {
|
||||
double duration = metricsDuration;
|
||||
int reads = (aTransactions.getValue() * readsPerTransactionA) + (bTransactions.getValue() * readsPerTransactionB);
|
||||
int writes = (aTransactions.getValue() * writesPerTransactionA) + (bTransactions.getValue() * writesPerTransactionB);
|
||||
m.push_back( PerfMetric( "Measured Duration", duration, true ) );
|
||||
m.push_back( PerfMetric( "Transactions/sec", (aTransactions.getValue() + bTransactions.getValue()) / duration, false ) );
|
||||
m.push_back( PerfMetric( "Operations/sec", ( ( reads + writes ) / duration ), false ) );
|
||||
m.emplace_back("Measured Duration", duration, true);
|
||||
m.emplace_back("Transactions/sec", (aTransactions.getValue() + bTransactions.getValue()) / duration, false);
|
||||
m.emplace_back("Operations/sec", ( ( reads + writes ) / duration ), false);
|
||||
m.push_back( aTransactions.getMetric() );
|
||||
m.push_back( bTransactions.getMetric() );
|
||||
m.push_back( retries.getMetric() );
|
||||
m.push_back( PerfMetric( "Mean load time (seconds)", loadTime, true ) );
|
||||
m.push_back( PerfMetric( "Read rows", reads, false ) );
|
||||
m.push_back( PerfMetric( "Write rows", writes, false ) );
|
||||
m.emplace_back("Mean load time (seconds)", loadTime, true);
|
||||
m.emplace_back("Read rows", reads, false);
|
||||
m.emplace_back("Write rows", writes, false);
|
||||
|
||||
if(!rampUpLoad) {
|
||||
m.push_back(PerfMetric("Mean Latency (ms)", 1000 * latencies.mean(), true));
|
||||
m.push_back(PerfMetric("Median Latency (ms, averaged)", 1000 * latencies.median(), true));
|
||||
m.push_back(PerfMetric("90% Latency (ms, averaged)", 1000 * latencies.percentile(0.90), true));
|
||||
m.push_back(PerfMetric("98% Latency (ms, averaged)", 1000 * latencies.percentile(0.98), true));
|
||||
m.push_back(PerfMetric("Max Latency (ms, averaged)", 1000 * latencies.max(), true));
|
||||
m.emplace_back("Mean Latency (ms)", 1000 * latencies.mean(), true);
|
||||
m.emplace_back("Median Latency (ms, averaged)", 1000 * latencies.median(), true);
|
||||
m.emplace_back("90% Latency (ms, averaged)", 1000 * latencies.percentile(0.90), true);
|
||||
m.emplace_back("98% Latency (ms, averaged)", 1000 * latencies.percentile(0.98), true);
|
||||
m.emplace_back("Max Latency (ms, averaged)", 1000 * latencies.max(), true);
|
||||
|
||||
m.push_back(PerfMetric("Mean Row Read Latency (ms)", 1000 * readLatencies.mean(), true));
|
||||
m.push_back(PerfMetric("Median Row Read Latency (ms, averaged)", 1000 * readLatencies.median(), true));
|
||||
m.push_back(PerfMetric("Max Row Read Latency (ms, averaged)", 1000 * readLatencies.max(), true));
|
||||
m.emplace_back("Mean Row Read Latency (ms)", 1000 * readLatencies.mean(), true);
|
||||
m.emplace_back("Median Row Read Latency (ms, averaged)", 1000 * readLatencies.median(), true);
|
||||
m.emplace_back("Max Row Read Latency (ms, averaged)", 1000 * readLatencies.max(), true);
|
||||
|
||||
m.push_back(PerfMetric("Mean Total Read Latency (ms)", 1000 * fullReadLatencies.mean(), true));
|
||||
m.push_back(PerfMetric("Median Total Read Latency (ms, averaged)", 1000 * fullReadLatencies.median(), true));
|
||||
m.push_back(PerfMetric("Max Total Latency (ms, averaged)", 1000 * fullReadLatencies.max(), true));
|
||||
m.emplace_back("Mean Total Read Latency (ms)", 1000 * fullReadLatencies.mean(), true);
|
||||
m.emplace_back("Median Total Read Latency (ms, averaged)", 1000 * fullReadLatencies.median(), true);
|
||||
m.emplace_back("Max Total Latency (ms, averaged)", 1000 * fullReadLatencies.max(), true);
|
||||
|
||||
m.push_back(PerfMetric("Mean GRV Latency (ms)", 1000 * GRVLatencies.mean(), true));
|
||||
m.push_back(PerfMetric("Median GRV Latency (ms, averaged)", 1000 * GRVLatencies.median(), true));
|
||||
m.push_back(PerfMetric("Max GRV Latency (ms, averaged)", 1000 * GRVLatencies.max(), true));
|
||||
m.emplace_back("Mean GRV Latency (ms)", 1000 * GRVLatencies.mean(), true);
|
||||
m.emplace_back("Median GRV Latency (ms, averaged)", 1000 * GRVLatencies.median(), true);
|
||||
m.emplace_back("Max GRV Latency (ms, averaged)", 1000 * GRVLatencies.max(), true);
|
||||
|
||||
m.push_back(PerfMetric("Mean Commit Latency (ms)", 1000 * commitLatencies.mean(), true));
|
||||
m.push_back(PerfMetric("Median Commit Latency (ms, averaged)", 1000 * commitLatencies.median(), true));
|
||||
m.push_back(PerfMetric("Max Commit Latency (ms, averaged)", 1000 * commitLatencies.max(), true));
|
||||
m.emplace_back("Mean Commit Latency (ms)", 1000 * commitLatencies.mean(), true);
|
||||
m.emplace_back("Median Commit Latency (ms, averaged)", 1000 * commitLatencies.median(), true);
|
||||
m.emplace_back("Max Commit Latency (ms, averaged)", 1000 * commitLatencies.max(), true);
|
||||
}
|
||||
|
||||
m.push_back( PerfMetric( "Read rows/sec", reads / duration, false ) );
|
||||
m.push_back( PerfMetric( "Write rows/sec", writes / duration, false ) );
|
||||
m.push_back( PerfMetric( "Bytes read/sec", (reads * (keyBytes + (minValueBytes+maxValueBytes)*0.5)) / duration, false ) );
|
||||
m.push_back( PerfMetric( "Bytes written/sec", (writes * (keyBytes + (minValueBytes+maxValueBytes)*0.5)) / duration, false ) );
|
||||
m.emplace_back("Read rows/sec", reads / duration, false);
|
||||
m.emplace_back("Write rows/sec", writes / duration, false);
|
||||
m.emplace_back("Bytes read/sec", (reads * (keyBytes + (minValueBytes+maxValueBytes)*0.5)) / duration, false);
|
||||
m.emplace_back("Bytes written/sec", (writes * (keyBytes + (minValueBytes+maxValueBytes)*0.5)) / duration, false);
|
||||
m.insert(m.end(), periodicMetrics.begin(), periodicMetrics.end());
|
||||
|
||||
vector<pair<uint64_t, double> >::iterator ratesItr = ratesAtKeyCounts.begin();
|
||||
std::vector<std::pair<uint64_t, double> >::iterator ratesItr = ratesAtKeyCounts.begin();
|
||||
for(; ratesItr != ratesAtKeyCounts.end(); ratesItr++)
|
||||
m.push_back(PerfMetric(format("%ld keys imported bytes/sec", ratesItr->first), ratesItr->second, false));
|
||||
m.emplace_back(format("%ld keys imported bytes/sec", ratesItr->first), ratesItr->second, false);
|
||||
}
|
||||
|
||||
Value randomValue() { return StringRef( (uint8_t*)valueString.c_str(), g_random->randomInt(minValueBytes, maxValueBytes+1) ); }
|
||||
|
@ -336,40 +338,40 @@ struct ReadWriteWorkload : KVWorkload {
|
|||
bool recordEnd = self->shouldRecord( now() );
|
||||
if( recordBegin && recordEnd ) {
|
||||
std::string ts = format("T=%04.0fs:", elapsed);
|
||||
self->periodicMetrics.push_back( PerfMetric( ts + "Operations/sec", (ops-last_ops)/self->periodicLoggingInterval, false ) );
|
||||
self->periodicMetrics.emplace_back(ts + "Operations/sec", (ops-last_ops)/self->periodicLoggingInterval, false);
|
||||
|
||||
//if(self->rampUpLoad) {
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Mean Latency (ms)", 1000 * self->latencies.mean(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Median Latency (ms, averaged)", 1000 * self->latencies.median(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "5% Latency (ms, averaged)", 1000 * self->latencies.percentile(.05), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "95% Latency (ms, averaged)", 1000 * self->latencies.percentile(.95), true));
|
||||
self->periodicMetrics.emplace_back(ts + "Mean Latency (ms)", 1000 * self->latencies.mean(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "Median Latency (ms, averaged)", 1000 * self->latencies.median(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "5% Latency (ms, averaged)", 1000 * self->latencies.percentile(.05), true);
|
||||
self->periodicMetrics.emplace_back(ts + "95% Latency (ms, averaged)", 1000 * self->latencies.percentile(.95), true);
|
||||
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Mean Row Read Latency (ms)", 1000 * self->readLatencies.mean(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Median Row Read Latency (ms, averaged)", 1000 * self->readLatencies.median(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "5% Row Read Latency (ms, averaged)", 1000 * self->readLatencies.percentile(.05), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "95% Row Read Latency (ms, averaged)", 1000 * self->readLatencies.percentile(.95), true));
|
||||
self->periodicMetrics.emplace_back(ts + "Mean Row Read Latency (ms)", 1000 * self->readLatencies.mean(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "Median Row Read Latency (ms, averaged)", 1000 * self->readLatencies.median(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "5% Row Read Latency (ms, averaged)", 1000 * self->readLatencies.percentile(.05), true);
|
||||
self->periodicMetrics.emplace_back(ts + "95% Row Read Latency (ms, averaged)", 1000 * self->readLatencies.percentile(.95), true);
|
||||
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Mean Total Read Latency (ms)", 1000 * self->fullReadLatencies.mean(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Median Total Read Latency (ms, averaged)", 1000 * self->fullReadLatencies.median(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "5% Total Read Latency (ms, averaged)", 1000 * self->fullReadLatencies.percentile(.05), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "95% Total Read Latency (ms, averaged)", 1000 * self->fullReadLatencies.percentile(.95), true));
|
||||
self->periodicMetrics.emplace_back(ts + "Mean Total Read Latency (ms)", 1000 * self->fullReadLatencies.mean(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "Median Total Read Latency (ms, averaged)", 1000 * self->fullReadLatencies.median(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "5% Total Read Latency (ms, averaged)", 1000 * self->fullReadLatencies.percentile(.05), true);
|
||||
self->periodicMetrics.emplace_back(ts + "95% Total Read Latency (ms, averaged)", 1000 * self->fullReadLatencies.percentile(.95), true);
|
||||
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Mean GRV Latency (ms)", 1000 * self->GRVLatencies.mean(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Median GRV Latency (ms, averaged)", 1000 * self->GRVLatencies.median(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "5% GRV Latency (ms, averaged)", 1000 * self->GRVLatencies.percentile(.05), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "95% GRV Latency (ms, averaged)", 1000 * self->GRVLatencies.percentile(.95), true));
|
||||
self->periodicMetrics.emplace_back(ts + "Mean GRV Latency (ms)", 1000 * self->GRVLatencies.mean(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "Median GRV Latency (ms, averaged)", 1000 * self->GRVLatencies.median(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "5% GRV Latency (ms, averaged)", 1000 * self->GRVLatencies.percentile(.05), true);
|
||||
self->periodicMetrics.emplace_back(ts + "95% GRV Latency (ms, averaged)", 1000 * self->GRVLatencies.percentile(.95), true);
|
||||
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Mean Commit Latency (ms)", 1000 * self->commitLatencies.mean(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Median Commit Latency (ms, averaged)", 1000 * self->commitLatencies.median(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "5% Commit Latency (ms, averaged)", 1000 * self->commitLatencies.percentile(.05), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "95% Commit Latency (ms, averaged)", 1000 * self->commitLatencies.percentile(.95), true));
|
||||
self->periodicMetrics.emplace_back(ts + "Mean Commit Latency (ms)", 1000 * self->commitLatencies.mean(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "Median Commit Latency (ms, averaged)", 1000 * self->commitLatencies.median(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "5% Commit Latency (ms, averaged)", 1000 * self->commitLatencies.percentile(.05), true);
|
||||
self->periodicMetrics.emplace_back(ts + "95% Commit Latency (ms, averaged)", 1000 * self->commitLatencies.percentile(.95), true);
|
||||
//}
|
||||
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Max Latency (ms, averaged)", 1000 * self->latencies.max(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Max Row Read Latency (ms, averaged)", 1000 * self->readLatencies.max(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Max Total Read Latency (ms, averaged)", 1000 * self->fullReadLatencies.max(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Max GRV Latency (ms, averaged)", 1000 * self->GRVLatencies.max(), true));
|
||||
self->periodicMetrics.push_back(PerfMetric(ts + "Max Commit Latency (ms, averaged)", 1000 * self->commitLatencies.max(), true));
|
||||
self->periodicMetrics.emplace_back(ts + "Max Latency (ms, averaged)", 1000 * self->latencies.max(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "Max Row Read Latency (ms, averaged)", 1000 * self->readLatencies.max(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "Max Total Read Latency (ms, averaged)", 1000 * self->fullReadLatencies.max(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "Max GRV Latency (ms, averaged)", 1000 * self->GRVLatencies.max(), true);
|
||||
self->periodicMetrics.emplace_back(ts + "Max Commit Latency (ms, averaged)", 1000 * self->commitLatencies.max(), true);
|
||||
}
|
||||
last_ops = ops;
|
||||
|
||||
|
@ -450,7 +452,7 @@ struct ReadWriteWorkload : KVWorkload {
|
|||
return Void();
|
||||
|
||||
state Promise<double> loadTime;
|
||||
state Promise<vector<pair<uint64_t, double> > > ratesAtKeyCounts;
|
||||
state Promise<std::vector<std::pair<uint64_t, double> > > ratesAtKeyCounts;
|
||||
|
||||
wait( bulkSetup( cx, self, self->nodeCount, loadTime, self->insertionCountsToMeasure.empty(), self->warmingDelay, self->maxInsertRate,
|
||||
self->insertionCountsToMeasure, ratesAtKeyCounts ) );
|
||||
|
@ -481,7 +483,7 @@ struct ReadWriteWorkload : KVWorkload {
|
|||
|
||||
wait( delay( std::max(0.1, 1.0 - (now() - startTime) ) ) );
|
||||
|
||||
vector<Future<Void>> clients;
|
||||
std::vector<Future<Void>> clients;
|
||||
if(self->enableReadLatencyLogging)
|
||||
clients.push_back(tracePeriodically(self));
|
||||
|
||||
|
@ -554,9 +556,9 @@ struct ReadWriteWorkload : KVWorkload {
|
|||
state double tstart = now();
|
||||
state bool aTransaction = g_random->random01() > (self->rampTransactionType ? self->sweepAlpha(startTime) : self->alpha);
|
||||
|
||||
state vector<int64_t> keys;
|
||||
state vector<Value> values;
|
||||
state vector<KeyRange> extra_ranges;
|
||||
state std::vector<int64_t> keys;
|
||||
state std::vector<Value> values;
|
||||
state std::vector<KeyRange> extra_ranges;
|
||||
int reads = aTransaction ? self->readsPerTransactionA : self->readsPerTransactionB;
|
||||
state int writes = aTransaction ? self->writesPerTransactionA : self->writesPerTransactionB;
|
||||
state int extra_read_conflict_ranges = writes ? self->extraReadConflictRangesPerTransaction : 0;
|
||||
|
|
|
@ -33,7 +33,7 @@ struct WriteBandwidthWorkload : KVWorkload {
|
|||
double testDuration, warmingDelay, loadTime, maxInsertRate;
|
||||
std::string valueString;
|
||||
|
||||
vector<Future<Void>> clients;
|
||||
std::vector<Future<Void>> clients;
|
||||
PerfIntCounter transactions, retries;
|
||||
ContinuousSample<double> commitLatencies, GRVLatencies;
|
||||
|
||||
|
@ -56,29 +56,29 @@ struct WriteBandwidthWorkload : KVWorkload {
|
|||
|
||||
virtual Future<bool> check( Database const& cx ) { return true; }
|
||||
|
||||
virtual void getMetrics( vector<PerfMetric>& m ) {
|
||||
virtual void getMetrics( std::vector<PerfMetric>& m ) {
|
||||
double duration = testDuration;
|
||||
int writes = transactions.getValue() * keysPerTransaction;
|
||||
m.push_back( PerfMetric( "Measured Duration", duration, true ) );
|
||||
m.push_back( PerfMetric( "Transactions/sec", transactions.getValue() / duration, false ) );
|
||||
m.push_back( PerfMetric( "Operations/sec", writes / duration, false ) );
|
||||
m.push_back( transactions.getMetric() );
|
||||
m.push_back( retries.getMetric() );
|
||||
m.push_back( PerfMetric( "Mean load time (seconds)", loadTime, true ) );
|
||||
m.push_back( PerfMetric( "Write rows", writes, false ) );
|
||||
m.emplace_back("Measured Duration", duration, true);
|
||||
m.emplace_back("Transactions/sec", transactions.getValue() / duration, false);
|
||||
m.emplace_back("Operations/sec", writes / duration, false);
|
||||
m.push_back(transactions.getMetric());
|
||||
m.push_back(retries.getMetric());
|
||||
m.emplace_back("Mean load time (seconds)", loadTime, true);
|
||||
m.emplace_back("Write rows", writes, false);
|
||||
|
||||
m.push_back( PerfMetric( "Mean GRV Latency (ms)", 1000 * GRVLatencies.mean(), true ) );
|
||||
m.push_back( PerfMetric( "Median GRV Latency (ms, averaged)", 1000 * GRVLatencies.median(), true ) );
|
||||
m.push_back( PerfMetric( "90% GRV Latency (ms, averaged)", 1000 * GRVLatencies.percentile( 0.90 ), true ) );
|
||||
m.push_back( PerfMetric( "98% GRV Latency (ms, averaged)", 1000 * GRVLatencies.percentile( 0.98 ), true ) );
|
||||
m.emplace_back("Mean GRV Latency (ms)", 1000 * GRVLatencies.mean(), true);
|
||||
m.emplace_back("Median GRV Latency (ms, averaged)", 1000 * GRVLatencies.median(), true);
|
||||
m.emplace_back("90% GRV Latency (ms, averaged)", 1000 * GRVLatencies.percentile( 0.90 ), true);
|
||||
m.emplace_back("98% GRV Latency (ms, averaged)", 1000 * GRVLatencies.percentile( 0.98 ), true);
|
||||
|
||||
m.push_back( PerfMetric( "Mean Commit Latency (ms)", 1000 * commitLatencies.mean(), true ) );
|
||||
m.push_back( PerfMetric( "Median Commit Latency (ms, averaged)", 1000 * commitLatencies.median(), true ) );
|
||||
m.push_back( PerfMetric( "90% Commit Latency (ms, averaged)", 1000 * commitLatencies.percentile( 0.90 ), true ) );
|
||||
m.push_back( PerfMetric( "98% Commit Latency (ms, averaged)", 1000 * commitLatencies.percentile( 0.98 ), true ) );
|
||||
m.emplace_back("Mean Commit Latency (ms)", 1000 * commitLatencies.mean(), true);
|
||||
m.emplace_back("Median Commit Latency (ms, averaged)", 1000 * commitLatencies.median(), true);
|
||||
m.emplace_back("90% Commit Latency (ms, averaged)", 1000 * commitLatencies.percentile( 0.90 ), true);
|
||||
m.emplace_back("98% Commit Latency (ms, averaged)", 1000 * commitLatencies.percentile( 0.98 ), true);
|
||||
|
||||
m.push_back( PerfMetric( "Write rows/sec", writes / duration, false ) );
|
||||
m.push_back( PerfMetric( "Bytes written/sec", (writes * (keyBytes + (minValueBytes+maxValueBytes)*0.5)) / duration, false ) );
|
||||
m.emplace_back("Write rows/sec", writes / duration, false);
|
||||
m.emplace_back("Bytes written/sec", (writes * (keyBytes + (minValueBytes+maxValueBytes)*0.5)) / duration, false);
|
||||
}
|
||||
|
||||
Value randomValue() { return StringRef( (uint8_t*)valueString.c_str(), g_random->randomInt(minValueBytes, maxValueBytes+1) ); }
|
||||
|
@ -89,7 +89,7 @@ struct WriteBandwidthWorkload : KVWorkload {
|
|||
|
||||
ACTOR Future<Void> _setup( Database cx, WriteBandwidthWorkload *self ) {
|
||||
state Promise<double> loadTime;
|
||||
state Promise<vector<pair<uint64_t, double> > > ratesAtKeyCounts;
|
||||
state Promise<std::vector<std::pair<uint64_t, double> > > ratesAtKeyCounts;
|
||||
|
||||
wait( bulkSetup( cx, self, self->nodeCount, loadTime, true, self->warmingDelay, self->maxInsertRate ) );
|
||||
self->loadTime = loadTime.getFuture().get();
|
||||
|
|
|
@ -214,7 +214,7 @@ double testKeyToDouble(const KeyRef& p, const KeyRef& prefix);
|
|||
ACTOR Future<Void> databaseWarmer(Database cx);
|
||||
|
||||
Future<Void> quietDatabase( Database const& cx, Reference<AsyncVar<struct ServerDBInfo>> const&, std::string phase, int64_t dataInFlightGate = 2e6, int64_t maxTLogQueueGate = 5e6,
|
||||
int64_t maxStorageServerQueueGate = 5e6, int64_t maxDataDistributionQueueSize = 0);
|
||||
int64_t maxStorageServerQueueGate = 5e6, int64_t maxDataDistributionQueueSize = 0, int64_t maxPoppedVersionLag = 30e6);
|
||||
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<Import Project="$(SolutionDir)versions.target" />
|
||||
<PropertyGroup Condition="'$(Release)' != 'true' ">
|
||||
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
|
||||
|
@ -28,14 +28,14 @@
|
|||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
|
||||
<ConfigurationType>Application</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>MultiByte</CharacterSet>
|
||||
<PlatformToolset>v140_xp</PlatformToolset>
|
||||
<PlatformToolset>v141</PlatformToolset>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
|
@ -52,6 +52,7 @@
|
|||
<ItemDefinitionGroup>
|
||||
<ClCompile>
|
||||
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
|
||||
|
@ -60,6 +61,7 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
@ -74,6 +76,7 @@
|
|||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<LanguageStandard>stdcpp17</LanguageStandard>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
|
|
@ -76,74 +76,3 @@ struct Traceable<std::pair<T, U>> {
|
|||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
TEST_CASE("/flow/TraceEvent") {
|
||||
state unsigned i;
|
||||
state double startTime;
|
||||
state std::vector<std::string> strings;
|
||||
state std::vector<int> keyIdx;
|
||||
state std::vector<int> pairRnd;
|
||||
state std::vector<int> num;
|
||||
state std::vector<double> doub;
|
||||
state std::vector<int> strIdx;
|
||||
strings.reserve(10000);
|
||||
keyIdx.reserve(1e6);
|
||||
pairRnd.reserve(1e6);
|
||||
num.reserve(1e6);
|
||||
doub.reserve(1e6);
|
||||
strIdx.reserve(1e6);
|
||||
for (i = 0; i < 100; ++i) {
|
||||
for (int j = 0; j < 100; ++j) {
|
||||
strings.emplace_back(g_random->randomAlphaNumeric(g_random->randomInt(1, 30)));
|
||||
}
|
||||
wait(delay(0));
|
||||
}
|
||||
for (i = 0; i < 1e6; ++i) {
|
||||
keyIdx.emplace_back(g_random->randomInt(0, strings.size()));
|
||||
pairRnd.emplace_back(g_random->randomInt(-1000, 1000));
|
||||
num.emplace_back(g_random->randomInt(0, 1000));
|
||||
doub.emplace_back(g_random->random01());
|
||||
strIdx.emplace_back(g_random->randomInt(0, strings.size()));
|
||||
}
|
||||
TraceEvent("PairsFilled")
|
||||
.detail("MemoryUsage", getMemoryUsage());
|
||||
printf("Sleeping for 20 seconds - attach perf now to PID %d\n", getpid());
|
||||
wait(delay(20));
|
||||
printf("Done sleeping\n");
|
||||
startTime = g_network->now();
|
||||
for (i = 0; i < 100000; ++i) {
|
||||
for (unsigned j = 0; j < 100; ++j) {
|
||||
int idx = (i+1)*j % keyIdx.size();
|
||||
StringRef key(strings[keyIdx[idx]]);
|
||||
auto p = std::make_pair(key, pairRnd[idx]);
|
||||
TraceEvent("TestTraceLineNoDebug")
|
||||
.detail("Num", num[idx])
|
||||
.detail("Double", doub[idx])
|
||||
.detail("Str", strings[strIdx[idx]])
|
||||
.detail("Pair", p);
|
||||
}
|
||||
wait(delay(0));
|
||||
}
|
||||
TraceEvent("TraceDuration")
|
||||
.detail("Duration", g_network->now() - startTime);
|
||||
startTime = g_network->now();
|
||||
for (i = 0; i < 1000000; ++i) {
|
||||
for (unsigned j = 0; j < 100; ++j) {
|
||||
int idx = (i+1)*j % keyIdx.size();
|
||||
StringRef key(strings[keyIdx[idx]]);
|
||||
auto p = std::make_pair(key, pairRnd[idx]);
|
||||
TraceEvent(SevDebug, "TestTraceLineDebug")
|
||||
.detail("Num", num[idx])
|
||||
.detail("Double", doub[idx])
|
||||
.detail("Str", strings[strIdx[idx]])
|
||||
.detail("Pair", p);
|
||||
}
|
||||
wait(delay(0));
|
||||
}
|
||||
TraceEvent("TraceDuration")
|
||||
.detail("Duration", g_network->now() - startTime);
|
||||
printf("benchmark done\n");
|
||||
wait(delay(10));
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -239,14 +239,15 @@ static int64_t getSizeCode(int i) {
|
|||
case 16: return 1;
|
||||
case 32: return 2;
|
||||
case 64: return 3;
|
||||
case 128: return 4;
|
||||
case 256: return 5;
|
||||
case 512: return 6;
|
||||
case 1024: return 7;
|
||||
case 2048: return 8;
|
||||
case 4096: return 9;
|
||||
case 8192: return 10;
|
||||
default: return 11;
|
||||
case 96: return 4;
|
||||
case 128: return 5;
|
||||
case 256: return 6;
|
||||
case 512: return 7;
|
||||
case 1024: return 8;
|
||||
case 2048: return 9;
|
||||
case 4096: return 10;
|
||||
case 8192: return 11;
|
||||
default: return 12;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -496,6 +497,7 @@ void releaseAllThreadMagazines() {
|
|||
FastAllocator<16>::releaseThreadMagazines();
|
||||
FastAllocator<32>::releaseThreadMagazines();
|
||||
FastAllocator<64>::releaseThreadMagazines();
|
||||
FastAllocator<96>::releaseThreadMagazines();
|
||||
FastAllocator<128>::releaseThreadMagazines();
|
||||
FastAllocator<256>::releaseThreadMagazines();
|
||||
FastAllocator<512>::releaseThreadMagazines();
|
||||
|
@ -511,6 +513,7 @@ int64_t getTotalUnusedAllocatedMemory() {
|
|||
unusedMemory += FastAllocator<16>::getApproximateMemoryUnused();
|
||||
unusedMemory += FastAllocator<32>::getApproximateMemoryUnused();
|
||||
unusedMemory += FastAllocator<64>::getApproximateMemoryUnused();
|
||||
unusedMemory += FastAllocator<96>::getApproximateMemoryUnused();
|
||||
unusedMemory += FastAllocator<128>::getApproximateMemoryUnused();
|
||||
unusedMemory += FastAllocator<256>::getApproximateMemoryUnused();
|
||||
unusedMemory += FastAllocator<512>::getApproximateMemoryUnused();
|
||||
|
@ -525,6 +528,7 @@ int64_t getTotalUnusedAllocatedMemory() {
|
|||
template class FastAllocator<16>;
|
||||
template class FastAllocator<32>;
|
||||
template class FastAllocator<64>;
|
||||
template class FastAllocator<96>;
|
||||
template class FastAllocator<128>;
|
||||
template class FastAllocator<256>;
|
||||
template class FastAllocator<512>;
|
||||
|
|
|
@ -158,7 +158,7 @@ int64_t getTotalUnusedAllocatedMemory();
|
|||
void setFastAllocatorThreadInitFunction( void (*)() ); // The given function will be called at least once in each thread that allocates from a FastAllocator. Currently just one such function is tracked.
|
||||
|
||||
template<int X>
|
||||
class NextPowerOfTwo {
|
||||
class NextFastAllocatedSize {
|
||||
static const int A = X-1;
|
||||
static const int B = A | (A>>1);
|
||||
static const int C = B | (B>>2);
|
||||
|
@ -166,7 +166,7 @@ class NextPowerOfTwo {
|
|||
static const int E = D | (D>>8);
|
||||
static const int F = E | (E>>16);
|
||||
public:
|
||||
static const int Result = F+1;
|
||||
static const int Result = (X > 64 && X <= 96) ? 96 : F+1;
|
||||
};
|
||||
|
||||
template <class Object>
|
||||
|
@ -175,13 +175,13 @@ public:
|
|||
static void* operator new(size_t s) {
|
||||
if (s != sizeof(Object)) abort();
|
||||
INSTRUMENT_ALLOCATE(typeid(Object).name());
|
||||
void* p = FastAllocator<sizeof(Object)<=64 ? 64 : NextPowerOfTwo<sizeof(Object)>::Result>::allocate();
|
||||
void* p = FastAllocator<sizeof(Object)<=64 ? 64 : NextFastAllocatedSize<sizeof(Object)>::Result>::allocate();
|
||||
return p;
|
||||
}
|
||||
|
||||
static void operator delete(void* s) {
|
||||
INSTRUMENT_RELEASE(typeid(Object).name());
|
||||
FastAllocator<sizeof(Object)<=64 ? 64 : NextPowerOfTwo<sizeof(Object)>::Result>::release(s);
|
||||
FastAllocator<sizeof(Object)<=64 ? 64 : NextFastAllocatedSize<sizeof(Object)>::Result>::release(s);
|
||||
}
|
||||
// Redefine placement new so you can still use it
|
||||
static void* operator new( size_t, void* p ) { return p; }
|
||||
|
@ -192,6 +192,7 @@ static void* allocateFast(int size) {
|
|||
if (size <= 16) return FastAllocator<16>::allocate();
|
||||
if (size <= 32) return FastAllocator<32>::allocate();
|
||||
if (size <= 64) return FastAllocator<64>::allocate();
|
||||
if (size <= 96) return FastAllocator<96>::allocate();
|
||||
if (size <= 128) return FastAllocator<128>::allocate();
|
||||
if (size <= 256) return FastAllocator<256>::allocate();
|
||||
if (size <= 512) return FastAllocator<512>::allocate();
|
||||
|
@ -202,6 +203,7 @@ static void freeFast(int size, void* ptr) {
|
|||
if (size <= 16) return FastAllocator<16>::release(ptr);
|
||||
if (size <= 32) return FastAllocator<32>::release(ptr);
|
||||
if (size <= 64) return FastAllocator<64>::release(ptr);
|
||||
if (size <= 96) return FastAllocator<96>::release(ptr);
|
||||
if (size <= 128) return FastAllocator<128>::release(ptr);
|
||||
if (size <= 256) return FastAllocator<256>::release(ptr);
|
||||
if (size <= 512) return FastAllocator<512>::release(ptr);
|
||||
|
|
|
@ -78,6 +78,7 @@ FlowKnobs::FlowKnobs(bool randomize, bool isSimulated) {
|
|||
init( BUGGIFY_SIM_PAGE_CACHE_4K, 1e6 );
|
||||
init( BUGGIFY_SIM_PAGE_CACHE_64K, 1e6 );
|
||||
init( MAX_EVICT_ATTEMPTS, 100 ); if( randomize && BUGGIFY ) MAX_EVICT_ATTEMPTS = 2;
|
||||
init( CACHE_EVICTION_POLICY, "random" );
|
||||
init( PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION, 0.1 ); if( randomize && BUGGIFY ) PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION = 0.0; else if( randomize && BUGGIFY ) PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION = 1.0;
|
||||
|
||||
//AsyncFileKAIO
|
||||
|
|
|
@ -93,6 +93,7 @@ public:
|
|||
int64_t SIM_PAGE_CACHE_64K;
|
||||
int64_t BUGGIFY_SIM_PAGE_CACHE_4K;
|
||||
int64_t BUGGIFY_SIM_PAGE_CACHE_64K;
|
||||
std::string CACHE_EVICTION_POLICY; // for now, "random", "lru", are supported
|
||||
int MAX_EVICT_ATTEMPTS;
|
||||
double PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION;
|
||||
double TOO_MANY_CONNECTIONS_CLOSED_RESET_DELAY;
|
||||
|
|
|
@ -195,5 +195,3 @@ struct union_like_traits<std::variant<Alternatives...>> : std::true_type {
|
|||
member = a;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -2477,6 +2477,7 @@ void outOfMemory() {
|
|||
TRACEALLOCATOR(16);
|
||||
TRACEALLOCATOR(32);
|
||||
TRACEALLOCATOR(64);
|
||||
TRACEALLOCATOR(96);
|
||||
TRACEALLOCATOR(128);
|
||||
TRACEALLOCATOR(256);
|
||||
TRACEALLOCATOR(512);
|
||||
|
|
|
@ -79,23 +79,6 @@
|
|||
#define DISABLE_ZERO_DIVISION_FLAG _Pragma("GCC diagnostic ignored \"-Wdiv-by-zero\"")
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Thread-local storage (but keep in mind any platform-specific
|
||||
* restrictions on where this is valid and/or ignored).
|
||||
*
|
||||
* http://en.wikipedia.org/wiki/Thread-local_storage
|
||||
*
|
||||
* SOMEDAY: Intel C++ compiler uses g++ syntax on Linux and MSC syntax
|
||||
* on Windows.
|
||||
*/
|
||||
#if defined(__GNUG__)
|
||||
#define thread_local __thread
|
||||
#elif defined(_MSC_VER)
|
||||
#define thread_local __declspec(thread)
|
||||
#else
|
||||
#error Missing thread local storage
|
||||
#endif
|
||||
|
||||
#if defined(__GNUG__)
|
||||
#define force_inline inline __attribute__((__always_inline__))
|
||||
#elif defined(_MSC_VER)
|
||||
|
|
|
@ -89,6 +89,9 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta
|
|||
.detail("CachePageReadsMerged", netData.countFileCachePageReadsMerged - statState->networkState.countFileCachePageReadsMerged)
|
||||
.detail("CacheWrites", netData.countFileCacheWrites - statState->networkState.countFileCacheWrites)
|
||||
.detail("CacheReads", netData.countFileCacheReads - statState->networkState.countFileCacheReads)
|
||||
.detail("CacheHits", netData.countFilePageCacheHits - statState->networkState.countFilePageCacheHits)
|
||||
.detail("CacheMisses", netData.countFilePageCacheMisses - statState->networkState.countFilePageCacheMisses)
|
||||
.detail("CacheEvictions", netData.countFilePageCacheEvictions - statState->networkState.countFilePageCacheEvictions)
|
||||
.detail("ZoneID", machineState.zoneId)
|
||||
.detail("MachineID", machineState.machineId)
|
||||
.detail("AIOSubmitCount", netData.countAIOSubmit - statState->networkState.countAIOSubmit)
|
||||
|
@ -105,6 +108,7 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta
|
|||
.DETAILALLOCATORMEMUSAGE(16)
|
||||
.DETAILALLOCATORMEMUSAGE(32)
|
||||
.DETAILALLOCATORMEMUSAGE(64)
|
||||
.DETAILALLOCATORMEMUSAGE(96)
|
||||
.DETAILALLOCATORMEMUSAGE(128)
|
||||
.DETAILALLOCATORMEMUSAGE(256)
|
||||
.DETAILALLOCATORMEMUSAGE(512)
|
||||
|
@ -256,6 +260,7 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta
|
|||
TRACEALLOCATOR(16);
|
||||
TRACEALLOCATOR(32);
|
||||
TRACEALLOCATOR(64);
|
||||
TRACEALLOCATOR(96);
|
||||
TRACEALLOCATOR(128);
|
||||
TRACEALLOCATOR(256);
|
||||
TRACEALLOCATOR(512);
|
||||
|
|
|
@ -74,6 +74,9 @@ struct NetworkData {
|
|||
int64_t countFileCachePageReadsMerged;
|
||||
int64_t countFileCacheFinds;
|
||||
int64_t countFileCacheReadBytes;
|
||||
int64_t countFilePageCacheHits;
|
||||
int64_t countFilePageCacheMisses;
|
||||
int64_t countFilePageCacheEvictions;
|
||||
int64_t countConnEstablished;
|
||||
int64_t countConnClosedWithError;
|
||||
int64_t countConnClosedWithoutError;
|
||||
|
@ -121,6 +124,9 @@ struct NetworkData {
|
|||
countFileCachePageReadsMerged = getValue(LiteralStringRef("AsyncFile.CountCachePageReadsMerged"));
|
||||
countFileCacheFinds = getValue(LiteralStringRef("AsyncFile.CountCacheFinds"));
|
||||
countFileCacheReadBytes = getValue(LiteralStringRef("AsyncFile.CountCacheReadBytes"));
|
||||
countFilePageCacheHits = getValue(LiteralStringRef("EvictablePageCache.CacheHits"));
|
||||
countFilePageCacheMisses = getValue(LiteralStringRef("EvictablePageCache.CacheMisses"));
|
||||
countFilePageCacheEvictions = getValue(LiteralStringRef("EvictablePageCache.CacheEvictions"));
|
||||
}
|
||||
};
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue