Merge branch 'master' into thread-safe-random-number-generation

# Conflicts:
#	fdbclient/ManagementAPI.actor.cpp
#	fdbrpc/AsyncFileCached.actor.h
#	fdbrpc/genericactors.actor.cpp
#	fdbrpc/sim2.actor.cpp
#	fdbserver/DiskQueue.actor.cpp
#	fdbserver/workloads/BulkSetup.actor.h
#	flow/ActorCollection.actor.cpp
#	flow/Net2.actor.cpp
#	flow/Trace.cpp
#	flow/flow.cpp
This commit is contained in:
A.J. Beamon 2019-05-23 08:35:47 -07:00
commit 603721e125
144 changed files with 4520 additions and 1250 deletions

View File

@ -196,7 +196,7 @@ if (CMAKE_EXPORT_COMPILE_COMMANDS)
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
COMMENT "Build compile commands for IDE"
)
add_custom_target(procossed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
add_custom_target(processed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
endif()
################################################################################

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|X64">
<Configuration>Debug</Configuration>
@ -23,11 +23,11 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
</Project>

View File

@ -243,11 +243,11 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
// Verify the certificate.
if ((store_ctx = X509_STORE_CTX_new()) == NULL) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid);
reason = "FDBLibTLSOutOfMemory";
reason = "Out of memory";
goto err;
}
if (!X509_STORE_CTX_init(store_ctx, NULL, sk_X509_value(certs, 0), certs)) {
reason = "FDBLibTLSStoreCtxInit";
reason = "Store ctx init";
goto err;
}
X509_STORE_CTX_trusted_stack(store_ctx, policy->roots);
@ -256,31 +256,31 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
X509_VERIFY_PARAM_set_flags(X509_STORE_CTX_get0_param(store_ctx), X509_V_FLAG_NO_CHECK_TIME);
if (X509_verify_cert(store_ctx) <= 0) {
const char *errstr = X509_verify_cert_error_string(X509_STORE_CTX_get_error(store_ctx));
reason = "FDBLibTLSVerifyCert VerifyError " + std::string(errstr);
reason = "Verify cert error: " + std::string(errstr);
goto err;
}
// Check subject criteria.
cert = sk_X509_value(store_ctx->chain, 0);
if ((subject = X509_get_subject_name(cert)) == NULL) {
reason = "FDBLibTLSCertSubjectError";
reason = "Cert subject error";
goto err;
}
for (auto &pair: verify->subject_criteria) {
if (!match_criteria(cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
reason = "FDBLibTLSCertSubjectMatchFailure";
reason = "Cert subject match failure";
goto err;
}
}
// Check issuer criteria.
if ((issuer = X509_get_issuer_name(cert)) == NULL) {
reason = "FDBLibTLSCertIssuerError";
reason = "Cert issuer error";
goto err;
}
for (auto &pair: verify->issuer_criteria) {
if (!match_criteria(cert, issuer, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
reason = "FDBLibTLSCertIssuerMatchFailure";
reason = "Cert issuer match failure";
goto err;
}
}
@ -288,12 +288,12 @@ std::tuple<bool,std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLSV
// Check root criteria - this is the subject of the final certificate in the stack.
cert = sk_X509_value(store_ctx->chain, sk_X509_num(store_ctx->chain) - 1);
if ((subject = X509_get_subject_name(cert)) == NULL) {
reason = "FDBLibTLSRootSubjectError";
reason = "Root subject error";
goto err;
}
for (auto &pair: verify->root_criteria) {
if (!match_criteria(cert, subject, pair.first, pair.second.criteria, pair.second.match_type, pair.second.location)) {
reason = "FDBLibTLSRootSubjectMatchFailure";
reason = "Root subject match failure";
goto err;
}
}
@ -343,7 +343,7 @@ bool FDBLibTLSSession::verify_peer() {
if (!rc) {
// log the various failure reasons
for (std::string reason : verify_failure_reasons) {
TraceEvent(reason.c_str(), uid).suppressFor(1.0);
TraceEvent("FDBLibTLSVerifyFailure", uid).suppressFor(1.0).detail("Reason", reason);
}
}

View File

@ -1,28 +1,27 @@
export
PLATFORM := $(shell uname)
ARCH := $(shell uname -m)
ifeq ("$(wildcard /etc/centos-release)", "")
LIBSTDCPP_HACK = 1
else
LIBSTDCPP_HACK = 0
endif
TOPDIR := $(shell pwd)
# Allow custom libc++ hack for Ubuntu
ifeq ("$(wildcard /etc/centos-release)", "")
LIBSTDCPP_HACK ?= 1
endif
ifeq ($(ARCH),x86_64)
ARCH := x64
else
$(error Not prepared to compile on $(ARCH))
endif
MONO := $(shell which mono)
MONO := $(shell which mono 2>/dev/null)
ifeq ($(MONO),)
MONO := /usr/bin/mono
endif
MCS := $(shell which mcs)
MCS := $(shell which mcs 2>/dev/null)
ifeq ($(MCS),)
MCS := $(shell which dmcs)
MCS := $(shell which dmcs 2>/dev/null)
endif
ifeq ($(MCS),)
MCS := /usr/bin/mcs
@ -43,7 +42,7 @@ ifeq ($(PLATFORM),Linux)
CC ?= gcc
CXX ?= g++
CXXFLAGS += -std=c++0x
CXXFLAGS += -std=c++17
BOOST_BASEDIR ?= /opt
TLS_LIBDIR ?= /usr/local/lib
@ -56,8 +55,8 @@ else ifeq ($(PLATFORM),Darwin)
CC := /usr/bin/clang
CXX := /usr/bin/clang
CFLAGS += -mmacosx-version-min=10.7 -stdlib=libc++
CXXFLAGS += -mmacosx-version-min=10.7 -std=c++11 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
CFLAGS += -mmacosx-version-min=10.14 -stdlib=libc++
CXXFLAGS += -mmacosx-version-min=10.14 -std=c++17 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
.LIBPATTERNS := lib%.dylib lib%.a
@ -70,7 +69,7 @@ else
endif
BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME}
CCACHE := $(shell which ccache)
CCACHE := $(shell which ccache 2>/dev/null)
ifneq ($(CCACHE),)
CCACHE_CC := $(CCACHE) $(CC)
CCACHE_CXX := $(CCACHE) $(CXX)

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Condition="'$(Release)' != 'true' ">
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
@ -38,14 +38,14 @@
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -83,6 +83,7 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
@ -98,6 +99,7 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>

View File

@ -24,15 +24,21 @@ fdb_c_CFLAGS := $(fdbclient_CFLAGS)
fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS)
fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a $(FDB_TLS_LIB)
fdb_c_STATIC_LIBS := $(TLS_LIBS)
fdb_c_tests_LIBS := -Llib -lfdb_c
fdb_c_tests_LIBS := -Llib -lfdb_c -lstdc++
fdb_c_tests_HEADERS := -Ibindings/c
CLEAN_TARGETS += fdb_c_tests_clean
ifeq ($(PLATFORM),linux)
fdb_c_LDFLAGS += -Wl,--version-script=bindings/c/fdb_c.map -static-libgcc -Wl,-z,nodelete -lm -lpthread -lrt -ldl
ifeq ($(LIBSTDCPP_HACK),1)
fdb_c_LIBS += lib/libstdc++.a
# Link our custom libstdc++ statically in Ubuntu, if hacking
ifeq ("$(wildcard /etc/centos-release)", "")
ifeq ($(LIBSTDCPP_HACK),1)
fdb_c_LIBS += lib/libstdc++.a
endif
# Link stdc++ statically in Centos, if not hacking
else
fdb_c_STATIC_LIBS += -static-libstdc++
endif
fdb_c_tests_LIBS += -lpthread
endif
@ -86,11 +92,11 @@ bindings/c/foundationdb/fdb_c_options.g.h: bin/vexillographer.exe fdbclient/vexi
bin/fdb_c_performance_test: bindings/c/test/performance_test.c bindings/c/test/test.h fdb_c
@echo "Compiling fdb_c_performance_test"
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c $(fdb_c_tests_LIBS)
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c $(fdb_c_tests_LIBS)
bin/fdb_c_ryw_benchmark: bindings/c/test/ryw_benchmark.c bindings/c/test/test.h fdb_c
@echo "Compiling fdb_c_ryw_benchmark"
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c $(fdb_c_tests_LIBS)
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c $(fdb_c_tests_LIBS)
packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz: bin/fdb_c_performance_test bin/fdb_c_ryw_benchmark
@echo "Packaging $@"

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Condition="'$(Release)' != 'true' ">
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
@ -63,12 +63,12 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -99,6 +99,7 @@
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
@ -126,6 +127,7 @@
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<MinimalRebuild>false</MinimalRebuild>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
@ -35,14 +35,14 @@
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>false</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -69,6 +69,7 @@
<ItemDefinitionGroup>
<ClCompile>
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
@ -82,6 +83,7 @@
<MinimalRebuild>false</MinimalRebuild>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions> @../../../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
@ -107,6 +109,7 @@
<BufferSecurityCheck>false</BufferSecurityCheck>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>

View File

@ -35,8 +35,7 @@ _fdb_flow_tester_clean:
@rm -rf bindings/flow/bin
ifeq ($(PLATFORM),linux)
fdb_flow_tester_LIBS += -ldl -lpthread -lrt
fdb_flow_tester_LDFLAGS += -static-libstdc++ -static-libgcc
fdb_flow_tester_LDFLAGS += -static-libstdc++ -static-libgcc -ldl -lpthread -lrt -lm
else ifeq ($(PLATFORM),osx)
fdb_flow_tester_LDFLAGS += -lc++
endif

View File

@ -5,7 +5,7 @@ fdb-go
This package requires:
- Go 1.1+ with CGO enabled
- Go 1.11+ with CGO enabled
- [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
- FoundationDB C API 2.0.x-6.1.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c))

View File

@ -0,0 +1,41 @@
package bench
import (
"bytes"
"encoding/binary"
"testing"
)
var result []byte
func Benchmark_Int64ToBytesBuffer(b *testing.B) {
b.ReportAllocs()
var r []byte
for n := 0; n < b.N; n++ {
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.LittleEndian, int64(n)); err != nil {
b.Error("failed to write int64:", err)
}
b.SetBytes(int64(buf.Len()))
r = buf.Bytes()
}
result = r
}
func Benchmark_Int64ToBytesPut(b *testing.B) {
b.ReportAllocs()
var r []byte
for n := 0; n < b.N; n++ {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(n))
b.SetBytes(int64(len(buf)))
r = buf
}
result = r
}

View File

@ -66,11 +66,7 @@ func writeOptBytes(w io.Writer, receiver string, function string, opt Option) {
func writeOptInt(w io.Writer, receiver string, function string, opt Option) {
fmt.Fprintf(w, `func (o %s) %s(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(%d, b)
return o.setOpt(%d, int64ToBytes(param))
}
`, receiver, function, opt.Code)
}
@ -205,16 +201,13 @@ func main() {
package fdb
import (
"bytes"
"encoding/binary"
)
func int64ToBytes(i int64) ([]byte, error) {
buf := new(bytes.Buffer)
if e := binary.Write(buf, binary.LittleEndian, i); e != nil {
return nil, e
}
return buf.Bytes(), nil
func int64ToBytes(i int64) []byte {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(i))
return buf
}
`)

View File

@ -22,10 +22,8 @@
package fdb
/*
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
// #define FDB_API_VERSION 610
// #include <foundationdb/fdb_c.h>
import "C"
// Deprecated: Use OpenDatabase or OpenDefault to obtain a database handle directly

View File

@ -22,10 +22,8 @@
package fdb
/*
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
// #define FDB_API_VERSION 610
// #include <foundationdb/fdb_c.h>
import "C"
import (
@ -216,7 +214,10 @@ func (d Database) LocalityGetBoundaryKeys(er ExactRange, limit int, readVersion
tr.Options().SetLockAware()
bk, ek := er.FDBRangeKeys()
ffer := KeyRange{append(Key("\xFF/keyServers/"), bk.FDBKey()...), append(Key("\xFF/keyServers/"), ek.FDBKey()...)}
ffer := KeyRange{
append(Key("\xFF/keyServers/"), bk.FDBKey()...),
append(Key("\xFF/keyServers/"), ek.FDBKey()...),
}
kvs, e := tr.Snapshot().GetRange(ffer, RangeOptions{Limit: limit}).GetSliceWithError()
if e != nil {

View File

@ -22,10 +22,8 @@
package fdb
/*
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
// #define FDB_API_VERSION 610
// #include <foundationdb/fdb_c.h>
import "C"
import (

View File

@ -22,11 +22,9 @@
package fdb
/*
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
#include <stdlib.h>
*/
// #define FDB_API_VERSION 610
// #include <foundationdb/fdb_c.h>
// #include <stdlib.h>
import "C"
import (
@ -139,7 +137,10 @@ func APIVersion(version int) error {
if e == 2203 {
maxSupportedVersion := C.fdb_get_max_api_version()
if headerVersion > int(maxSupportedVersion) {
return fmt.Errorf("This version of the FoundationDB Go binding is not supported by the installed FoundationDB C library. The binding requires a library that supports API version %d, but the installed library supports a maximum version of %d.", headerVersion, maxSupportedVersion)
return fmt.Errorf("This version of the FoundationDB Go binding is "+
"not supported by the installed FoundationDB C library. "+
"The binding requires a library that supports API version %d, "+
"but the installed library supports a maximum version of %d.", headerVersion, maxSupportedVersion)
}
return fmt.Errorf("API version %d is not supported by the installed FoundationDB C library.", version)
}

View File

@ -22,22 +22,20 @@
package fdb
/*
#cgo LDFLAGS: -lfdb_c -lm
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
#include <string.h>
extern void unlockMutex(void*);
void go_callback(FDBFuture* f, void* m) {
unlockMutex(m);
}
void go_set_callback(void* f, void* m) {
fdb_future_set_callback(f, (FDBCallback)&go_callback, m);
}
*/
// #cgo LDFLAGS: -lfdb_c -lm
// #define FDB_API_VERSION 610
// #include <foundationdb/fdb_c.h>
// #include <string.h>
//
// extern void unlockMutex(void*);
//
// void go_callback(FDBFuture* f, void* m) {
// unlockMutex(m);
// }
//
// void go_set_callback(void* f, void* m) {
// fdb_future_set_callback(f, (FDBCallback)&go_callback, m);
// }
import "C"
import (
@ -100,15 +98,18 @@ func fdb_future_block_until_ready(f *C.FDBFuture) {
m.Lock()
}
func (f future) BlockUntilReady() {
func (f *future) BlockUntilReady() {
defer runtime.KeepAlive(f)
fdb_future_block_until_ready(f.ptr)
}
func (f future) IsReady() bool {
func (f *future) IsReady() bool {
defer runtime.KeepAlive(f)
return C.fdb_future_is_ready(f.ptr) != 0
}
func (f future) Cancel() {
func (f *future) Cancel() {
defer runtime.KeepAlive(f)
C.fdb_future_cancel(f.ptr)
}
@ -140,6 +141,8 @@ type futureByteSlice struct {
func (f *futureByteSlice) Get() ([]byte, error) {
f.o.Do(func() {
defer runtime.KeepAlive(f.future)
var present C.fdb_bool_t
var value *C.uint8_t
var length C.int
@ -195,6 +198,8 @@ type futureKey struct {
func (f *futureKey) Get() (Key, error) {
f.o.Do(func() {
defer runtime.KeepAlive(f.future)
var value *C.uint8_t
var length C.int
@ -241,7 +246,9 @@ type futureNil struct {
*future
}
func (f futureNil) Get() error {
func (f *futureNil) Get() error {
defer runtime.KeepAlive(f.future)
f.BlockUntilReady()
if err := C.fdb_future_get_error(f.ptr); err != 0 {
return Error{int(err)}
@ -250,7 +257,7 @@ func (f futureNil) Get() error {
return nil
}
func (f futureNil) MustGet() {
func (f *futureNil) MustGet() {
if err := f.Get(); err != nil {
panic(err)
}
@ -272,7 +279,9 @@ func stringRefToSlice(ptr unsafe.Pointer) []byte {
return C.GoBytes(src, size)
}
func (f futureKeyValueArray) Get() ([]KeyValue, bool, error) {
func (f *futureKeyValueArray) Get() ([]KeyValue, bool, error) {
defer runtime.KeepAlive(f.future)
f.BlockUntilReady()
var kvs *C.FDBKeyValue
@ -316,17 +325,20 @@ type futureInt64 struct {
*future
}
func (f futureInt64) Get() (int64, error) {
func (f *futureInt64) Get() (int64, error) {
defer runtime.KeepAlive(f.future)
f.BlockUntilReady()
var ver C.int64_t
if err := C.fdb_future_get_version(f.ptr, &ver); err != 0 {
return 0, Error{int(err)}
}
return int64(ver), nil
}
func (f futureInt64) MustGet() int64 {
func (f *futureInt64) MustGet() int64 {
val, err := f.Get()
if err != nil {
panic(err)
@ -356,7 +368,9 @@ type futureStringSlice struct {
*future
}
func (f futureStringSlice) Get() ([]string, error) {
func (f *futureStringSlice) Get() ([]string, error) {
defer runtime.KeepAlive(f.future)
f.BlockUntilReady()
var strings **C.char
@ -375,7 +389,7 @@ func (f futureStringSlice) Get() ([]string, error) {
return ret, nil
}
func (f futureStringSlice) MustGet() []string {
func (f *futureStringSlice) MustGet() []string {
val, err := f.Get()
if err != nil {
panic(err)

View File

@ -30,16 +30,13 @@
package fdb
import (
"bytes"
"encoding/binary"
)
func int64ToBytes(i int64) ([]byte, error) {
buf := new(bytes.Buffer)
if e := binary.Write(buf, binary.LittleEndian, i); e != nil {
return nil, e
}
return buf.Bytes(), nil
func int64ToBytes(i int64) []byte {
buf := make([]byte, 8)
binary.LittleEndian.PutUint64(buf, uint64(i))
return buf
}
// Deprecated
@ -49,6 +46,13 @@ func (o NetworkOptions) SetLocalAddress(param string) error {
return o.setOpt(10, []byte(param))
}
// enable the object serializer for network communication
//
// Parameter: 0 is false, every other value is true
func (o NetworkOptions) SetUseObjectSerializer(param int64) error {
return o.setOpt(11, int64ToBytes(param))
}
// Deprecated
//
// Parameter: path to cluster file
@ -67,22 +71,14 @@ func (o NetworkOptions) SetTraceEnable(param string) error {
//
// Parameter: max size of a single trace output file
func (o NetworkOptions) SetTraceRollSize(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(31, b)
return o.setOpt(31, int64ToBytes(param))
}
// Sets the maximum size of all the trace output files put together. This value should be in the range ``[0, INT64_MAX]``. If the value is set to 0, there is no limit on the total size of the files. The default is a maximum size of 104,857,600 bytes. If the default roll size is used, this means that a maximum of 10 trace files will be written at a time.
//
// Parameter: max total size of trace files
func (o NetworkOptions) SetTraceMaxLogsSize(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(32, b)
return o.setOpt(32, int64ToBytes(param))
}
// Sets the 'LogGroup' attribute with the specified value for all events in the trace output files. The default log group is 'default'.
@ -162,22 +158,14 @@ func (o NetworkOptions) SetBuggifyDisable() error {
//
// Parameter: probability expressed as a percentage between 0 and 100
func (o NetworkOptions) SetBuggifySectionActivatedProbability(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(50, b)
return o.setOpt(50, int64ToBytes(param))
}
// Set the probability of an active BUGGIFY section being fired
//
// Parameter: probability expressed as a percentage between 0 and 100
func (o NetworkOptions) SetBuggifySectionFiredProbability(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(51, b)
return o.setOpt(51, int64ToBytes(param))
}
// Set the ca bundle
@ -244,22 +232,14 @@ func (o NetworkOptions) SetEnableSlowTaskProfiling() error {
//
// Parameter: Max location cache entries
func (o DatabaseOptions) SetLocationCacheSize(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(10, b)
return o.setOpt(10, int64ToBytes(param))
}
// Set the maximum number of watches allowed to be outstanding on a database connection. Increasing this number could result in increased resource usage. Reducing this number will not cancel any outstanding watches. Defaults to 10000 and cannot be larger than 1000000.
//
// Parameter: Max outstanding watches
func (o DatabaseOptions) SetMaxWatches(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(20, b)
return o.setOpt(20, int64ToBytes(param))
}
// Specify the machine ID that was passed to fdbserver processes running on the same machine as this client, for better location-aware load balancing.
@ -280,33 +260,21 @@ func (o DatabaseOptions) SetDatacenterId(param string) error {
//
// Parameter: value in milliseconds of timeout
func (o DatabaseOptions) SetTransactionTimeout(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(500, b)
return o.setOpt(500, int64ToBytes(param))
}
// Set a timeout in milliseconds which, when elapsed, will cause a transaction automatically to be cancelled. This sets the ``retry_limit`` option of each transaction created by this database. See the transaction option description for more information.
//
// Parameter: number of times to retry
func (o DatabaseOptions) SetTransactionRetryLimit(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(501, b)
return o.setOpt(501, int64ToBytes(param))
}
// Set the maximum amount of backoff delay incurred in the call to ``onError`` if the error is retryable. This sets the ``max_retry_delay`` option of each transaction created by this database. See the transaction option description for more information.
//
// Parameter: value in milliseconds of maximum delay
func (o DatabaseOptions) SetTransactionMaxRetryDelay(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(502, b)
return o.setOpt(502, int64ToBytes(param))
}
// Snapshot read operations will see the results of writes done in the same transaction. This is the default behavior.
@ -417,33 +385,21 @@ func (o TransactionOptions) SetLogTransaction() error {
//
// Parameter: value in milliseconds of timeout
func (o TransactionOptions) SetTimeout(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(500, b)
return o.setOpt(500, int64ToBytes(param))
}
// Set a maximum number of retries after which additional calls to ``onError`` will throw the most recently seen error code. Valid parameter values are ``[-1, INT_MAX]``. If set to -1, will disable the retry limit. Prior to API version 610, like all other transaction options, the retry limit must be reset after a call to ``onError``. If the API version is 610 or greater, the retry limit is not reset after an ``onError`` call. Note that at all API versions, it is safe and legal to set the retry limit each time the transaction begins, so most code written assuming the older behavior can be upgraded to the newer behavior without requiring any modification, and the caller is not required to implement special logic in retry loops to only conditionally set this option.
//
// Parameter: number of times to retry
func (o TransactionOptions) SetRetryLimit(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(501, b)
return o.setOpt(501, int64ToBytes(param))
}
// Set the maximum amount of backoff delay incurred in the call to ``onError`` if the error is retryable. Defaults to 1000 ms. Valid parameter values are ``[0, INT_MAX]``. If the maximum retry delay is less than the current retry delay of the transaction, then the current retry delay will be clamped to the maximum retry delay. Prior to API version 610, like all other transaction options, the maximum retry delay must be reset after a call to ``onError``. If the API version is 610 or greater, the retry limit is not reset after an ``onError`` call. Note that at all API versions, it is safe and legal to set the maximum retry delay each time the transaction begins, so most code written assuming the older behavior can be upgraded to the newer behavior without requiring any modification, and the caller is not required to implement special logic in retry loops to only conditionally set this option.
//
// Parameter: value in milliseconds of maximum delay
func (o TransactionOptions) SetMaxRetryDelay(param int64) error {
b, e := int64ToBytes(param)
if e != nil {
return e
}
return o.setOpt(502, b)
return o.setOpt(502, int64ToBytes(param))
}
// Snapshot read operations will see the results of writes done in the same transaction. This is the default behavior.
@ -495,7 +451,7 @@ const (
// Infrequently used. The client has passed a specific row limit and wants
// that many rows delivered in a single batch. Because of iterator operation
// in client drivers make request batches transparent to the user, consider
// ``WANT_ALL`` StreamingMode instead. A row limit must be specified if this
// “WANT_ALL“ StreamingMode instead. A row limit must be specified if this
// mode is used.
StreamingModeExact StreamingMode = 1
@ -612,15 +568,15 @@ type ErrorPredicate int
const (
// Returns ``true`` if the error indicates the operations in the
// transactions should be retried because of transient error.
// Returns “true“ if the error indicates the operations in the transactions
// should be retried because of transient error.
ErrorPredicateRetryable ErrorPredicate = 50000
// Returns ``true`` if the error indicates the transaction may have
// succeeded, though not in a way the system can verify.
// Returns “true“ if the error indicates the transaction may have succeeded,
// though not in a way the system can verify.
ErrorPredicateMaybeCommitted ErrorPredicate = 50001
// Returns ``true`` if the error indicates the transaction has not
// committed, though in a way that can be retried.
// Returns “true“ if the error indicates the transaction has not committed,
// though in a way that can be retried.
ErrorPredicateRetryableNotCommitted ErrorPredicate = 50002
)

View File

@ -22,10 +22,8 @@
package fdb
/*
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
// #define FDB_API_VERSION 610
// #include <foundationdb/fdb_c.h>
import "C"
import (

View File

@ -22,10 +22,8 @@
package fdb
/*
#define FDB_API_VERSION 610
#include <foundationdb/fdb_c.h>
*/
// #define FDB_API_VERSION 610
// #include <foundationdb/fdb_c.h>
import "C"
// A ReadTransaction can asynchronously read from a FoundationDB
@ -184,7 +182,9 @@ func (t Transaction) Snapshot() Snapshot {
// Typical code will not use OnError directly. (Database).Transact uses
// OnError internally to implement a correct retry loop.
func (t Transaction) OnError(e Error) FutureNil {
return &futureNil{newFuture(C.fdb_transaction_on_error(t.ptr, C.fdb_error_t(e.Code)))}
return &futureNil{
future: newFuture(C.fdb_transaction_on_error(t.ptr, C.fdb_error_t(e.Code))),
}
}
// Commit attempts to commit the modifications made in the transaction to the
@ -198,7 +198,9 @@ func (t Transaction) OnError(e Error) FutureNil {
// see
// https://apple.github.io/foundationdb/developer-guide.html#transactions-with-unknown-results.
func (t Transaction) Commit() FutureNil {
return &futureNil{newFuture(C.fdb_transaction_commit(t.ptr))}
return &futureNil{
future: newFuture(C.fdb_transaction_commit(t.ptr)),
}
}
// Watch creates a watch and returns a FutureNil that will become ready when the
@ -232,11 +234,20 @@ func (t Transaction) Commit() FutureNil {
// cancelled by calling (FutureNil).Cancel on its returned future.
func (t Transaction) Watch(key KeyConvertible) FutureNil {
kb := key.FDBKey()
return &futureNil{newFuture(C.fdb_transaction_watch(t.ptr, byteSliceToPtr(kb), C.int(len(kb))))}
return &futureNil{
future: newFuture(C.fdb_transaction_watch(t.ptr, byteSliceToPtr(kb), C.int(len(kb)))),
}
}
func (t *transaction) get(key []byte, snapshot int) FutureByteSlice {
return &futureByteSlice{future: newFuture(C.fdb_transaction_get(t.ptr, byteSliceToPtr(key), C.int(len(key)), C.fdb_bool_t(snapshot)))}
return &futureByteSlice{
future: newFuture(C.fdb_transaction_get(
t.ptr,
byteSliceToPtr(key),
C.int(len(key)),
C.fdb_bool_t(snapshot),
)),
}
}
// Get returns the (future) value associated with the specified key. The read is
@ -253,7 +264,24 @@ func (t *transaction) doGetRange(r Range, options RangeOptions, snapshot bool, i
bkey := bsel.Key.FDBKey()
ekey := esel.Key.FDBKey()
return futureKeyValueArray{newFuture(C.fdb_transaction_get_range(t.ptr, byteSliceToPtr(bkey), C.int(len(bkey)), C.fdb_bool_t(boolToInt(bsel.OrEqual)), C.int(bsel.Offset), byteSliceToPtr(ekey), C.int(len(ekey)), C.fdb_bool_t(boolToInt(esel.OrEqual)), C.int(esel.Offset), C.int(options.Limit), C.int(0), C.FDBStreamingMode(options.Mode-1), C.int(iteration), C.fdb_bool_t(boolToInt(snapshot)), C.fdb_bool_t(boolToInt(options.Reverse))))}
return futureKeyValueArray{
future: newFuture(C.fdb_transaction_get_range(
t.ptr,
byteSliceToPtr(bkey),
C.int(len(bkey)),
C.fdb_bool_t(boolToInt(bsel.OrEqual)),
C.int(bsel.Offset),
byteSliceToPtr(ekey),
C.int(len(ekey)),
C.fdb_bool_t(boolToInt(esel.OrEqual)),
C.int(esel.Offset),
C.int(options.Limit),
C.int(0),
C.FDBStreamingMode(options.Mode-1),
C.int(iteration),
C.fdb_bool_t(boolToInt(snapshot)),
C.fdb_bool_t(boolToInt(options.Reverse)),
))}
}
func (t *transaction) getRange(r Range, options RangeOptions, snapshot bool) RangeResult {
@ -278,7 +306,9 @@ func (t Transaction) GetRange(r Range, options RangeOptions) RangeResult {
}
func (t *transaction) getReadVersion() FutureInt64 {
return &futureInt64{newFuture(C.fdb_transaction_get_read_version(t.ptr))}
return &futureInt64{
future: newFuture(C.fdb_transaction_get_read_version(t.ptr)),
}
}
// (Infrequently used) GetReadVersion returns the (future) transaction read version. The read is
@ -358,7 +388,16 @@ func boolToInt(b bool) int {
func (t *transaction) getKey(sel KeySelector, snapshot int) FutureKey {
key := sel.Key.FDBKey()
return &futureKey{future: newFuture(C.fdb_transaction_get_key(t.ptr, byteSliceToPtr(key), C.int(len(key)), C.fdb_bool_t(boolToInt(sel.OrEqual)), C.int(sel.Offset), C.fdb_bool_t(snapshot)))}
return &futureKey{
future: newFuture(C.fdb_transaction_get_key(
t.ptr,
byteSliceToPtr(key),
C.int(len(key)),
C.fdb_bool_t(boolToInt(sel.OrEqual)),
C.int(sel.Offset),
C.fdb_bool_t(snapshot),
)),
}
}
// GetKey returns the future key referenced by the provided key selector. The
@ -375,14 +414,28 @@ func (t Transaction) GetKey(sel Selectable) FutureKey {
}
func (t Transaction) atomicOp(key []byte, param []byte, code int) {
C.fdb_transaction_atomic_op(t.ptr, byteSliceToPtr(key), C.int(len(key)), byteSliceToPtr(param), C.int(len(param)), C.FDBMutationType(code))
C.fdb_transaction_atomic_op(
t.ptr,
byteSliceToPtr(key),
C.int(len(key)),
byteSliceToPtr(param),
C.int(len(param)),
C.FDBMutationType(code),
)
}
func addConflictRange(t *transaction, er ExactRange, crtype conflictRangeType) error {
begin, end := er.FDBRangeKeys()
bkb := begin.FDBKey()
ekb := end.FDBKey()
if err := C.fdb_transaction_add_conflict_range(t.ptr, byteSliceToPtr(bkb), C.int(len(bkb)), byteSliceToPtr(ekb), C.int(len(ekb)), C.FDBConflictRangeType(crtype)); err != 0 {
if err := C.fdb_transaction_add_conflict_range(
t.ptr,
byteSliceToPtr(bkb),
C.int(len(bkb)),
byteSliceToPtr(ekb),
C.int(len(ekb)),
C.FDBConflictRangeType(crtype),
); err != 0 {
return Error{int(err)}
}
@ -414,7 +467,11 @@ func copyAndAppend(orig []byte, b byte) []byte {
// For more information on conflict ranges, see
// https://apple.github.io/foundationdb/developer-guide.html#conflict-ranges.
func (t Transaction) AddReadConflictKey(key KeyConvertible) error {
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeRead)
return addConflictRange(
t.transaction,
KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))},
conflictRangeTypeRead,
)
}
// AddWriteConflictRange adds a range of keys to the transactions write
@ -435,7 +492,11 @@ func (t Transaction) AddWriteConflictRange(er ExactRange) error {
// For more information on conflict ranges, see
// https://apple.github.io/foundationdb/developer-guide.html#conflict-ranges.
func (t Transaction) AddWriteConflictKey(key KeyConvertible) error {
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeWrite)
return addConflictRange(
t.transaction,
KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))},
conflictRangeTypeWrite,
)
}
// Options returns a TransactionOptions instance suitable for setting options
@ -446,7 +507,13 @@ func (t Transaction) Options() TransactionOptions {
func localityGetAddressesForKey(t *transaction, key KeyConvertible) FutureStringSlice {
kb := key.FDBKey()
return &futureStringSlice{newFuture(C.fdb_transaction_get_addresses_for_key(t.ptr, byteSliceToPtr(kb), C.int(len(kb))))}
return &futureStringSlice{
future: newFuture(C.fdb_transaction_get_addresses_for_key(
t.ptr,
byteSliceToPtr(kb),
C.int(len(kb)),
)),
}
}
// LocalityGetAddressesForKey returns the (future) public network addresses of

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
@ -28,14 +28,14 @@
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -63,6 +63,7 @@
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
@ -78,6 +79,7 @@
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>

View File

@ -1,14 +1,16 @@
FROM centos:6
LABEL version=0.1.2
LABEL version=0.1.4
ENV DOCKER_IMAGEVER=0.1.4
# Install dependencies for developer tools, bindings,\
# documentation, actorcompiler, and packaging tools\
RUN yum install -y yum-utils &&\
yum-config-manager --enable rhel-server-rhscl-7-rpms &&\
yum -y install centos-release-scl epel-release &&\
yum -y install devtoolset-7 mono-core java-1.8.0-openjdk-devel \
rh-python36-python-devel rh-ruby24 golang python27 \
rpm-build debbuild python-pip npm ccache distcc &&\
yum -y install devtoolset-8 java-1.8.0-openjdk-devel \
rh-python36-python-devel devtoolset-8-valgrind-devel \
mono-core rh-ruby24 golang python27 rpm-build debbuild \
python-pip npm dos2unix valgrind-devel &&\
pip install boto3==1.1.1
USER root
@ -35,8 +37,9 @@ RUN curl -L https://github.com/Kitware/CMake/releases/download/v3.13.4/cmake-3.1
RUN curl -L https://ftp.openbsd.org/pub/OpenBSD/LibreSSL/libressl-2.8.2.tar.gz > /tmp/libressl.tar.gz &&\
cd /tmp && echo "b8cb31e59f1294557bfc80f2a662969bc064e83006ceef0574e2553a1c254fd5 libressl.tar.gz" > libressl-sha.txt &&\
sha256sum -c libressl-sha.txt && tar xf libressl.tar.gz &&\
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
cd /tmp/libressl-2.8.2 && scl enable devtoolset-7 -- make -j`nproc` install &&\
cd libressl-2.8.2 && cd /tmp/libressl-2.8.2 && scl enable devtoolset-8 -- ./configure --prefix=/usr/local/stow/libressl CFLAGS="-fPIC -O3" --prefix=/usr/local &&\
cd /tmp/libressl-2.8.2 && scl enable devtoolset-8 -- make -j`nproc` install &&\
rm -rf /tmp/libressl-2.8.2 /tmp/libressl.tar.gz
CMD scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash
ENV JAVA_HOME=/usr/lib/jvm/java-1.8.0
CMD scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash

View File

@ -2,7 +2,7 @@ version: "3"
services:
common: &common
image: foundationdb/foundationdb-build:0.1.2
image: foundationdb/foundationdb-build:0.1.4
build-setup: &build-setup
<<: *common
@ -26,16 +26,19 @@ services:
build-docs:
<<: *build-setup
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" package_html'
volumes:
- ..:/foundationdb
working_dir: /foundationdb
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" docpackage'
release-packages: &release-packages
<<: *release-setup
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
snapshot-packages: &snapshot-packages
<<: *build-setup
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" packages'
prb-packages:
<<: *snapshot-packages
@ -43,11 +46,11 @@ services:
release-bindings: &release-bindings
<<: *release-setup
command: bash -c 'make -j "$${MAKEJOBS}" bindings'
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
snapshot-bindings: &snapshot-bindings
<<: *build-setup
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" python_binding'
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'make -j "$${MAKEJOBS}" bindings'
prb-bindings:
<<: *snapshot-bindings
@ -55,7 +58,7 @@ services:
snapshot-cmake: &snapshot-cmake
<<: *build-setup
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=0 -DVALGRIND=0 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" packages preinstall && cpack'
prb-cmake:
<<: *snapshot-cmake
@ -63,7 +66,7 @@ services:
snapshot-ctest: &snapshot-ctest
<<: *build-setup
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -L fast -j "$${MAKEJOBS}" --output-on-failure'
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -L fast -j "$${MAKEJOBS}" --output-on-failure'
prb-ctest:
<<: *snapshot-ctest
@ -71,7 +74,7 @@ services:
snapshot-correctness: &snapshot-correctness
<<: *build-setup
command: scl enable devtoolset-7 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure
command: scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash -c 'mkdir -p "$${BUILD_DIR}" && cd "$${BUILD_DIR}" && cmake -DFDB_RELEASE=1 /__this_is_some_very_long_name_dir_needed_to_fix_a_bug_with_debug_rpms__/foundationdb && make -j "$${MAKEJOBS}" && ctest -j "$${MAKEJOBS}" --output-on-failure
prb-correctness:
<<: *snapshot-correctness

View File

@ -1,10 +1,11 @@
set(USE_GPERFTOOLS OFF CACHE BOOL "Use gperfools for profiling")
set(PORTABLE_BINARY OFF CACHE BOOL "Create a binary that runs on older OS versions")
set(USE_VALGRIND OFF CACHE BOOL "Compile for valgrind usage")
set(USE_GOLD_LINKER OFF CACHE BOOL "Use gold linker")
set(ALLOC_INSTRUMENTATION OFF CACHE BOOL "Instrument alloc")
set(WITH_UNDODB OFF CACHE BOOL "Use rr or undodb")
set(USE_ASAN OFF CACHE BOOL "Compile with address sanitizer")
set(FDB_RELEASE OFF CACHE BOOL "This is a building of a final release")
set(USE_LD "LD" CACHE STRING "The linker to use for building: can be LD (system default, default choice), GOLD, or LLD")
if(USE_GPERFTOOLS)
find_package(Gperftools REQUIRED)
@ -47,18 +48,13 @@ include(CheckFunctionExists)
set(CMAKE_REQUIRED_INCLUDES stdlib.h malloc.h)
set(CMAKE_REQUIRED_LIBRARIES c)
if(WIN32)
# see: https://docs.microsoft.com/en-us/windows/desktop/WinProg/using-the-windows-headers
# this sets the windows target version to Windows 7
set(WINDOWS_TARGET 0x0601)
add_compile_options(/W3 /EHsc /std:c++14 /bigobj $<$<CONFIG:Release>:/Zi> /MP)
add_compile_options(/W3 /EHsc /std:c++17 /bigobj $<$<CONFIG:Release>:/Zi> /MP)
add_compile_definitions(_WIN32_WINNT=${WINDOWS_TARGET} BOOST_ALL_NO_LIB)
else()
if(USE_GOLD_LINKER)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
endif()
set(GCC NO)
set(CLANG NO)
@ -70,10 +66,29 @@ else()
set(GCC YES)
endif()
# check linker flags.
if ((NOT (USE_LD STREQUAL "LD")) AND (NOT (USE_LD STREQUAL "GOLD")) AND (NOT (USE_LD STREQUAL "LLD")))
message (FATAL_ERROR "USE_LD must be set to LD, GOLD, or LLD!")
endif()
# if USE_LD=LD, then we don't do anything, defaulting to whatever system
# linker is available (e.g. binutils doesn't normally exist on macOS, so this
# implies the default xcode linker, and other distros may choose others by
# default).
if(USE_LD STREQUAL "GOLD")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
endif()
if(USE_LD STREQUAL "LLD")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld -Wl,--disable-new-dtags")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=lld -Wl,--disable-new-dtags")
endif()
# we always compile with debug symbols. CPack will strip them out
# and create a debuginfo rpm
add_compile_options(-ggdb)
set(USE_ASAN OFF CACHE BOOL "Compile with address sanitizer")
if(USE_ASAN)
add_compile_options(
-fno-omit-frame-pointer -fsanitize=address
@ -95,7 +110,7 @@ else()
-mmmx
-mavx
-msse4.2)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-std=c++11>)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-std=c++17>)
if (USE_VALGRIND)
add_compile_options(-DVALGRIND -DUSE_VALGRIND)
endif()

View File

@ -70,7 +70,7 @@ buildsphinx:
cd $(BUILDDIR); \
curl -OL $(VENV_URL); \
tar zxvf $(VENV_VERSION).tar.gz; \
./$(VENV_VERSION)/virtualenv.py venv; \
python2 ./$(VENV_VERSION)/virtualenv.py venv; \
fi
. $(VENVDIR)/bin/activate && \
cp .pip.conf $(VENVDIR)/pip.conf && \

View File

@ -10,38 +10,38 @@ macOS
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
* `FoundationDB-6.1.5.pkg <https://www.foundationdb.org/downloads/6.1.5/macOS/installers/FoundationDB-6.1.5.pkg>`_
* `FoundationDB-6.1.7.pkg <https://www.foundationdb.org/downloads/6.1.7/macOS/installers/FoundationDB-6.1.7.pkg>`_
Ubuntu
------
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
* `foundationdb-clients-6.1.5-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.5/ubuntu/installers/foundationdb-clients_6.1.5-1_amd64.deb>`_
* `foundationdb-server-6.1.5-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.5/ubuntu/installers/foundationdb-server_6.1.5-1_amd64.deb>`_ (depends on the clients package)
* `foundationdb-clients-6.1.7-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.7/ubuntu/installers/foundationdb-clients_6.1.7-1_amd64.deb>`_
* `foundationdb-server-6.1.7-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.7/ubuntu/installers/foundationdb-server_6.1.7-1_amd64.deb>`_ (depends on the clients package)
RHEL/CentOS EL6
---------------
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
* `foundationdb-clients-6.1.5-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.5/rhel6/installers/foundationdb-clients-6.1.5-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.1.5-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.5/rhel6/installers/foundationdb-server-6.1.5-1.el6.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.1.7-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.7/rhel6/installers/foundationdb-clients-6.1.7-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.1.7-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.7/rhel6/installers/foundationdb-server-6.1.7-1.el6.x86_64.rpm>`_ (depends on the clients package)
RHEL/CentOS EL7
---------------
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
* `foundationdb-clients-6.1.5-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.5/rhel7/installers/foundationdb-clients-6.1.5-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.1.5-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.5/rhel7/installers/foundationdb-server-6.1.5-1.el7.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.1.7-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.7/rhel7/installers/foundationdb-clients-6.1.7-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.1.7-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.7/rhel7/installers/foundationdb-server-6.1.7-1.el7.x86_64.rpm>`_ (depends on the clients package)
Windows
-------
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
* `foundationdb-6.1.5-x64.msi <https://www.foundationdb.org/downloads/6.1.5/windows/installers/foundationdb-6.1.5-x64.msi>`_
* `foundationdb-6.1.7-x64.msi <https://www.foundationdb.org/downloads/6.1.7/windows/installers/foundationdb-6.1.7-x64.msi>`_
API Language Bindings
=====================
@ -58,20 +58,20 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
* `foundationdb-6.1.5.tar.gz <https://www.foundationdb.org/downloads/6.1.5/bindings/python/foundationdb-6.1.5.tar.gz>`_
* `foundationdb-6.1.7.tar.gz <https://www.foundationdb.org/downloads/6.1.7/bindings/python/foundationdb-6.1.7.tar.gz>`_
Ruby 1.9.3/2.0.0+
-----------------
* `fdb-6.1.5.gem <https://www.foundationdb.org/downloads/6.1.5/bindings/ruby/fdb-6.1.5.gem>`_
* `fdb-6.1.7.gem <https://www.foundationdb.org/downloads/6.1.7/bindings/ruby/fdb-6.1.7.gem>`_
Java 8+
-------
* `fdb-java-6.1.5.jar <https://www.foundationdb.org/downloads/6.1.5/bindings/java/fdb-java-6.1.5.jar>`_
* `fdb-java-6.1.5-javadoc.jar <https://www.foundationdb.org/downloads/6.1.5/bindings/java/fdb-java-6.1.5-javadoc.jar>`_
* `fdb-java-6.1.7.jar <https://www.foundationdb.org/downloads/6.1.7/bindings/java/fdb-java-6.1.7.jar>`_
* `fdb-java-6.1.7-javadoc.jar <https://www.foundationdb.org/downloads/6.1.7/bindings/java/fdb-java-6.1.7-javadoc.jar>`_
Go 1.1+
Go 1.11+
-------
The FoundationDB Go package is available on `GitHub <https://github.com/apple/foundationdb/tree/master/bindings/go>`_.

View File

@ -2,7 +2,7 @@
Release Notes
#############
6.1.5
6.1.7
=====
Features
@ -50,6 +50,7 @@ Performance
* Increase the rate that deleted pages are made available for reuse in the SQLite storage engine. Rename and add knobs to provide more control over this process. [6.1.3] `(PR #1485) <https://github.com/apple/foundationdb/pull/1485>`_
* SQLite page files now grow and shrink in chunks based on a knob which defaults to an effective chunk size of 100MB. [6.1.4] `(PR #1482) <https://github.com/apple/foundationdb/pull/1482>`_ `(PR #1499) <https://github.com/apple/foundationdb/pull/1499>`_
* Reduced the rate at which data is moved between servers, to reduce the impact a failure has on cluster performance. [6.1.4] `(PR #1499) <https://github.com/apple/foundationdb/pull/1499>`_
* Avoid closing saturated network connections which have not received ping packets. [6.1.7] `(PR #1601) <https://github.com/apple/foundationdb/pull/1601>`_
Fixes
-----
@ -126,6 +127,8 @@ Fixes only impacting 6.1.0+
* The transaction log spill-by-reference policy could read too much data from disk. [6.1.5] `(PR #1527) <https://github.com/apple/foundationdb/pull/1527>`_
* Memory tracking trace events could cause the program to crash when called from inside a trace event. [6.1.5] `(PR #1541) <https://github.com/apple/foundationdb/pull/1541>`_
* TLogs will replace a large file with an empty file rather than doing a large truncate operation. [6.1.5] `(PR #1545) <https://github.com/apple/foundationdb/pull/1545>`_
* Fix PR #1545 to work on Windows and Linux. [6.1.6] `(PR #1556) <https://github.com/apple/foundationdb/pull/1556>`_
* Adding a read conflict range for the metadata version key no longer requires read access to the system keys. [6.1.6] `(PR #1556) <https://github.com/apple/foundationdb/pull/1556>`_
Earlier release notes
---------------------

View File

@ -20,6 +20,9 @@ Status
Bindings
--------
* Go: The Go bindings now require Go version 1.11 or later.
* Go: Fix issue with finalizers running too early that could lead to undefined behavior. `(PR #1451) <https://github.com/apple/foundationdb/pull/1451>`_.
Other Changes
-------------

View File

@ -351,4 +351,4 @@ A verification string of::
Would pass, and:
* Require that the Subject has a Subject Alternative Name extension, which has one or more members of type DNS that begins with the value ``prod.``.
* Require that the Subject has a Subject Alternative Name extension, which has one or more members of type DNS that ends with the value ``.com``.
* Require that the Subject has a Subject Alternative Name extension, which has one or more members of type DNS that ends with the value ``.org``.

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
@ -37,12 +37,12 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -66,6 +66,7 @@
</Lib>
<ClCompile>
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
@ -82,6 +83,7 @@
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
@ -107,6 +109,7 @@
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<BufferSecurityCheck>false</BufferSecurityCheck>
<MinimalRebuild>false</MinimalRebuild>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
@ -39,14 +39,14 @@
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>false</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -73,6 +73,7 @@
<ItemDefinitionGroup>
<ClCompile>
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
@ -87,6 +88,7 @@
<MinimalRebuild>false</MinimalRebuild>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
@ -113,6 +115,7 @@
<BufferSecurityCheck>false</BufferSecurityCheck>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>

View File

@ -27,6 +27,7 @@
// ClientDBInfo is all the information needed by a database client to access the database
// It is returned (and kept up to date) by the OpenDatabaseRequest interface of ClusterInterface
struct ClientDBInfo {
constexpr static FileIdentifier file_identifier = 5355080;
UID id; // Changes each time anything else changes
vector< MasterProxyInterface > proxies;
double clientTxnInfoSampleRate;
@ -38,7 +39,9 @@ struct ClientDBInfo {
template <class Archive>
void serialize(Archive& ar) {
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
if constexpr (!is_fb_function<Archive>) {
ASSERT(ar.protocolVersion() >= 0x0FDB00A200040001LL);
}
serializer(ar, proxies, id, clientTxnInfoSampleRate, clientTxnInfoSizeLimit);
}
};

View File

@ -30,6 +30,7 @@
// Streams from WorkerInterface that are safe and useful to call from a client.
// A ClientWorkerInterface is embedded as the first element of a WorkerInterface.
struct ClientWorkerInterface {
constexpr static FileIdentifier file_identifier = 12418152;
RequestStream< struct RebootRequest > reboot;
RequestStream< struct ProfilerRequest > profiler;
@ -45,6 +46,7 @@ struct ClientWorkerInterface {
};
struct RebootRequest {
constexpr static FileIdentifier file_identifier = 11913957;
bool deleteData;
bool checkData;
@ -57,6 +59,7 @@ struct RebootRequest {
};
struct ProfilerRequest {
constexpr static FileIdentifier file_identifier = 15437862;
ReplyPromise<Void> reply;
enum class Type : std::int8_t {

View File

@ -29,6 +29,7 @@
#include "fdbclient/ClientWorkerInterface.h"
struct ClusterInterface {
constexpr static FileIdentifier file_identifier = 15888863;
RequestStream< struct OpenDatabaseRequest > openDatabase;
RequestStream< struct FailureMonitoringRequest > failureMonitoring;
RequestStream< struct StatusRequest > databaseStatus;
@ -56,6 +57,23 @@ struct ClusterInterface {
}
};
struct ClusterControllerClientInterface {
constexpr static FileIdentifier file_identifier = 14997695;
ClusterInterface clientInterface;
bool operator==(ClusterControllerClientInterface const& r) const {
return clientInterface.id() == r.clientInterface.id();
}
bool operator!=(ClusterControllerClientInterface const& r) const {
return clientInterface.id() != r.clientInterface.id();
}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, clientInterface);
}
};
struct ClientVersionRef {
StringRef clientVersion;
StringRef sourceVersion;
@ -113,6 +131,7 @@ struct ClientVersionRef {
};
struct OpenDatabaseRequest {
constexpr static FileIdentifier file_identifier = 2799502;
// Sent by the native API to the cluster controller to open a database and track client
// info changes. Returns immediately if the current client info id is different from
// knownClientInfoID; otherwise returns when it next changes (or perhaps after a long interval)
@ -126,12 +145,16 @@ struct OpenDatabaseRequest {
template <class Ar>
void serialize(Ar& ar) {
ASSERT( ar.protocolVersion() >= 0x0FDB00A400040001LL );
serializer(ar, issues, supportedVersions, connectedCoordinatorsNum, traceLogGroup, knownClientInfoID, reply, arena);
if constexpr (!is_fb_function<Ar>) {
ASSERT(ar.protocolVersion() >= 0x0FDB00A400040001LL);
}
serializer(ar, issues, supportedVersions, connectedCoordinatorsNum, traceLogGroup, knownClientInfoID, reply,
arena);
}
};
struct SystemFailureStatus {
constexpr static FileIdentifier file_identifier = 3194108;
NetworkAddressList addresses;
FailureStatus status;
@ -144,6 +167,21 @@ struct SystemFailureStatus {
}
};
struct FailureMonitoringReply {
constexpr static FileIdentifier file_identifier = 6820325;
VectorRef< SystemFailureStatus > changes;
Version failureInformationVersion;
bool allOthersFailed; // If true, changes are relative to all servers being failed, otherwise to the version given in the request
int clientRequestIntervalMS, // after this many milliseconds, send another request
considerServerFailedTimeoutMS; // after this many additional milliseconds, consider the ClusterController itself to be failed
Arena arena;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, changes, failureInformationVersion, allOthersFailed, clientRequestIntervalMS, considerServerFailedTimeoutMS, arena);
}
};
struct FailureMonitoringRequest {
// Sent by all participants to the cluster controller reply.clientRequestIntervalMS
// ms after receiving the previous reply.
@ -155,6 +193,7 @@ struct FailureMonitoringRequest {
// The failureInformationVersion returned in reply should be passed back to the
// next request to facilitate delta compression of the failure information.
constexpr static FileIdentifier file_identifier = 5867851;
Optional<FailureStatus> senderStatus;
Version failureInformationVersion;
NetworkAddressList addresses;
@ -166,30 +205,8 @@ struct FailureMonitoringRequest {
}
};
struct FailureMonitoringReply {
VectorRef< SystemFailureStatus > changes;
Version failureInformationVersion;
bool allOthersFailed; // If true, changes are relative to all servers being failed, otherwise to the version given in the request
int clientRequestIntervalMS, // after this many milliseconds, send another request
considerServerFailedTimeoutMS; // after this many additional milliseconds, consider the ClusterController itself to be failed
Arena arena;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, changes, failureInformationVersion, allOthersFailed, clientRequestIntervalMS, considerServerFailedTimeoutMS, arena);
}
};
struct StatusRequest {
ReplyPromise< struct StatusReply > reply;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, reply);
}
};
struct StatusReply {
constexpr static FileIdentifier file_identifier = 9980504;
StatusObject statusObj;
std::string statusStr;
@ -214,7 +231,18 @@ struct StatusReply {
}
};
struct StatusRequest {
constexpr static FileIdentifier file_identifier = 14419140;
ReplyPromise< struct StatusReply > reply;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, reply);
}
};
struct GetClientWorkersRequest {
constexpr static FileIdentifier file_identifier = 10771791;
ReplyPromise<vector<ClientWorkerInterface>> reply;
GetClientWorkersRequest() {}
@ -226,6 +254,7 @@ struct GetClientWorkersRequest {
};
struct ForceRecoveryRequest {
constexpr static FileIdentifier file_identifier = 14821350;
Key dcId;
ReplyPromise<Void> reply;

View File

@ -91,6 +91,7 @@ private:
};
struct LeaderInfo {
constexpr static FileIdentifier file_identifier = 8338794;
UID changeID;
static const uint64_t mask = ~(127ll << 57);
Value serializedInfo;
@ -126,6 +127,7 @@ struct LeaderInfo {
};
struct GetLeaderRequest {
constexpr static FileIdentifier file_identifier = 214727;
Key key;
UID knownLeader;
ReplyPromise< Optional<LeaderInfo> > reply;

View File

@ -60,7 +60,7 @@ public:
Database clone() const { return Database(new DatabaseContext( cluster, clientInfo, clientInfoMonitor, dbId, taskID, clientLocality, enableLocalityLoadBalance, lockAware, apiVersion )); }
pair<KeyRange,Reference<LocationInfo>> getCachedLocation( const KeyRef&, bool isBackward = false );
std::pair<KeyRange,Reference<LocationInfo>> getCachedLocation( const KeyRef&, bool isBackward = false );
bool getCachedLocations( const KeyRangeRef&, vector<std::pair<KeyRange,Reference<LocationInfo>>>&, int limit, bool reverse );
Reference<LocationInfo> setCachedLocation( const KeyRangeRef&, const vector<struct StorageServerInterface>& );
void invalidateCache( const KeyRef&, bool isBackward = false );

View File

@ -21,11 +21,14 @@
#ifndef FDBCLIENT_FDBTYPES_H
#define FDBCLIENT_FDBTYPES_H
#include <algorithm>
#include <set>
#include <string>
#include <vector>
#include "flow/flow.h"
#include "fdbclient/Knobs.h"
using std::vector;
using std::pair;
typedef int64_t Version;
typedef uint64_t LogEpoch;
typedef uint64_t Sequence;
@ -73,6 +76,32 @@ struct Tag {
template <class Ar> void load( Ar& ar, Tag& tag ) { tag.serialize_unversioned(ar); }
template <class Ar> void save( Ar& ar, Tag const& tag ) { const_cast<Tag&>(tag).serialize_unversioned(ar); }
template <>
struct struct_like_traits<Tag> : std::true_type {
using Member = Tag;
using types = pack<uint16_t, int8_t>;
template <int i>
static const index_t<i, types>& get(const Member& m) {
if constexpr (i == 0) {
return m.id;
} else {
static_assert(i == 1);
return m.locality;
}
}
template <int i, class Type>
static const void assign(Member& m, const Type& t) {
if constexpr (i == 0) {
m.id = t;
} else {
static_assert(i == 1);
m.locality = t;
}
}
};
static const Tag invalidTag {tagLocalitySpecial, 0};
static const Tag txsTag {tagLocalitySpecial, 1};
@ -523,6 +552,7 @@ struct Traceable<RangeResultRef> : std::true_type {
};
struct KeyValueStoreType {
constexpr static FileIdentifier file_identifier = 6560359;
// These enumerated values are stored in the database configuration, so can NEVER be changed. Only add new ones just before END.
enum StoreType {
SSD_BTREE_V1,
@ -734,7 +764,7 @@ static bool addressExcluded( std::set<AddressExclusion> const& exclusions, Netwo
struct ClusterControllerPriorityInfo {
enum DCFitness { FitnessPrimary, FitnessRemote, FitnessPreferred, FitnessUnknown, FitnessBad }; //cannot be larger than 7 because of leader election mask
static DCFitness calculateDCFitness(Optional<Key> const& dcId, vector<Optional<Key>> const& dcPriority) {
static DCFitness calculateDCFitness(Optional<Key> const& dcId, std::vector<Optional<Key>> const& dcPriority) {
if(!dcPriority.size()) {
return FitnessUnknown;
} else if(dcPriority.size() == 1) {

View File

@ -3922,6 +3922,8 @@ public:
doc.setKey("Tag", tag.tagName);
if(uidAndAbortedFlag.present()) {
doc.setKey("UID", uidAndAbortedFlag.get().first.toString());
state BackupConfig config(uidAndAbortedFlag.get().first);
state EBackupState backupState = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));

View File

@ -304,7 +304,7 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
}
}
state Future<Void> tooLong = delay(4.5);
state Future<Void> tooLong = delay(60);
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(),Unversioned());
state bool oldReplicationUsesDcId = false;
loop {

View File

@ -23,6 +23,9 @@
#define FDBCLIENT_MASTERPROXYINTERFACE_H
#pragma once
#include <utility>
#include <vector>
#include "fdbclient/FDBTypes.h"
#include "fdbclient/StorageServerInterface.h"
#include "fdbclient/CommitTransaction.h"
@ -30,6 +33,7 @@
#include "flow/Stats.h"
struct MasterProxyInterface {
constexpr static FileIdentifier file_identifier = 8954922;
enum { LocationAwareLoadBalance = 1 };
enum { AlwaysFresh = 1 };
@ -71,6 +75,7 @@ struct MasterProxyInterface {
};
struct CommitID {
constexpr static FileIdentifier file_identifier = 14254927;
Version version; // returns invalidVersion if transaction conflicts
uint16_t txnBatchId;
Optional<Value> metadataVersion;
@ -85,6 +90,7 @@ struct CommitID {
};
struct CommitTransactionRequest : TimedRequest {
constexpr static FileIdentifier file_identifier = 93948;
enum {
FLAG_IS_LOCK_AWARE = 0x1,
FLAG_FIRST_IN_BATCH = 0x2
@ -121,6 +127,7 @@ static inline int getBytes( CommitTransactionRequest const& r ) {
}
struct GetReadVersionReply {
constexpr static FileIdentifier file_identifier = 15709388;
Version version;
bool locked;
Optional<Value> metadataVersion;
@ -132,6 +139,7 @@ struct GetReadVersionReply {
};
struct GetReadVersionRequest : TimedRequest {
constexpr static FileIdentifier file_identifier = 838566;
enum {
PRIORITY_SYSTEM_IMMEDIATE = 15 << 24, // Highest possible priority, always executed even if writes are otherwise blocked
PRIORITY_DEFAULT = 8 << 24,
@ -161,8 +169,9 @@ struct GetReadVersionRequest : TimedRequest {
};
struct GetKeyServerLocationsReply {
constexpr static FileIdentifier file_identifier = 10636023;
Arena arena;
vector<pair<KeyRangeRef, vector<StorageServerInterface>>> results;
std::vector<std::pair<KeyRangeRef, vector<StorageServerInterface>>> results;
template <class Ar>
void serialize(Ar& ar) {
@ -171,6 +180,7 @@ struct GetKeyServerLocationsReply {
};
struct GetKeyServerLocationsRequest {
constexpr static FileIdentifier file_identifier = 9144680;
Arena arena;
KeyRef begin;
Optional<KeyRef> end;
@ -188,6 +198,7 @@ struct GetKeyServerLocationsRequest {
};
struct GetRawCommittedVersionRequest {
constexpr static FileIdentifier file_identifier = 12954034;
Optional<UID> debugID;
ReplyPromise<GetReadVersionReply> reply;
@ -200,11 +211,12 @@ struct GetRawCommittedVersionRequest {
};
struct GetStorageServerRejoinInfoReply {
constexpr static FileIdentifier file_identifier = 9469225;
Version version;
Tag tag;
Optional<Tag> newTag;
bool newLocality;
vector<pair<Version, Tag>> history;
std::vector<std::pair<Version, Tag>> history;
template <class Ar>
void serialize(Ar& ar) {
@ -213,6 +225,7 @@ struct GetStorageServerRejoinInfoReply {
};
struct GetStorageServerRejoinInfoRequest {
constexpr static FileIdentifier file_identifier = 994279;
UID id;
Optional<Value> dcId;
ReplyPromise< GetStorageServerRejoinInfoReply > reply;
@ -227,6 +240,7 @@ struct GetStorageServerRejoinInfoRequest {
};
struct TxnStateRequest {
constexpr static FileIdentifier file_identifier = 15250781;
Arena arena;
VectorRef<KeyValueRef> data;
Sequence sequence;
@ -239,22 +253,9 @@ struct TxnStateRequest {
}
};
struct GetHealthMetricsRequest
{
ReplyPromise<struct GetHealthMetricsReply> reply;
bool detailed;
explicit GetHealthMetricsRequest(bool detailed = false) : detailed(detailed) {}
template <class Ar>
void serialize(Ar& ar)
{
serializer(ar, reply, detailed);
}
};
struct GetHealthMetricsReply
{
constexpr static FileIdentifier file_identifier = 11544290;
Standalone<StringRef> serialized;
HealthMetrics healthMetrics;
@ -282,4 +283,19 @@ struct GetHealthMetricsReply
}
};
struct GetHealthMetricsRequest
{
constexpr static FileIdentifier file_identifier = 11403900;
ReplyPromise<struct GetHealthMetricsReply> reply;
bool detailed;
explicit GetHealthMetricsRequest(bool detailed = false) : detailed(detailed) {}
template <class Ar>
void serialize(Ar& ar)
{
serializer(ar, reply, detailed);
}
};
#endif

View File

@ -233,6 +233,56 @@ TEST_CASE("/fdbclient/MonitorLeader/parseConnectionString/basic") {
return Void();
}
TEST_CASE("/flow/FlatBuffers/LeaderInfo") {
{
LeaderInfo in;
LeaderInfo out;
in.forward = g_random->coinflip();
in.changeID = g_random->randomUniqueID();
{
std::string rndString(g_random->randomInt(10, 400), 'x');
for (auto& c : rndString) {
c = g_random->randomAlphaNumeric();
}
in.serializedInfo = rndString;
}
ObjectWriter writer;
writer.serialize(in);
Standalone<StringRef> copy = writer.toStringRef();
ArenaObjectReader reader(copy.arena(), copy);
reader.deserialize(out);
ASSERT(in.forward == out.forward);
ASSERT(in.changeID == out.changeID);
ASSERT(in.serializedInfo == out.serializedInfo);
}
LeaderInfo leaderInfo;
leaderInfo.forward = g_random->coinflip();
leaderInfo.changeID = g_random->randomUniqueID();
{
std::string rndString(g_random->randomInt(10, 400), 'x');
for (auto& c : rndString) {
c = g_random->randomAlphaNumeric();
}
leaderInfo.serializedInfo = rndString;
}
ErrorOr<EnsureTable<Optional<LeaderInfo>>> objIn(leaderInfo);
ErrorOr<EnsureTable<Optional<LeaderInfo>>> objOut;
Standalone<StringRef> copy;
ObjectWriter writer;
writer.serialize(objIn);
copy = writer.toStringRef();
ArenaObjectReader reader(copy.arena(), copy);
reader.deserialize(objOut);
ASSERT(!objOut.isError());
ASSERT(objOut.get().asUnderlyingType().present());
LeaderInfo outLeader = objOut.get().asUnderlyingType().get();
ASSERT(outLeader.changeID == leaderInfo.changeID);
ASSERT(outLeader.forward == leaderInfo.forward);
ASSERT(outLeader.serializedInfo == leaderInfo.serializedInfo);
return Void();
}
TEST_CASE("/fdbclient/MonitorLeader/parseConnectionString/fuzz") {
// For a static connection string, add in fuzzed comments and whitespace
// SOMEDAY: create a series of random connection strings, rather than the one we started with
@ -463,3 +513,22 @@ ACTOR Future<Void> monitorLeaderInternal( Reference<ClusterConnectionFile> connF
}
}
ACTOR Future<Void> asyncDeserializeClusterInterface(Reference<AsyncVar<Value>> serializedInfo,
Reference<AsyncVar<Optional<ClusterInterface>>> outKnownLeader) {
state Reference<AsyncVar<Optional<ClusterControllerClientInterface>>> knownLeader(
new AsyncVar<Optional<ClusterControllerClientInterface>>{});
state Future<Void> deserializer = asyncDeserialize(serializedInfo, knownLeader, g_network->useObjectSerializer());
loop {
choose {
when(wait(deserializer)) { UNSTOPPABLE_ASSERT(false); }
when(wait(knownLeader->onChange())) {
if (knownLeader->get().present()) {
outKnownLeader->set(knownLeader->get().get().clientInterface);
} else {
outKnownLeader->set(Optional<ClusterInterface>{});
}
}
}
}
}

View File

@ -24,6 +24,7 @@
#include "fdbclient/FDBTypes.h"
#include "fdbclient/CoordinationInterface.h"
#include "fdbclient/ClusterInterface.h"
#define CLUSTER_FILE_ENV_VAR_NAME "FDB_CLUSTER_FILE"
@ -40,12 +41,34 @@ Future<Void> monitorLeader( Reference<ClusterConnectionFile> const& connFile, Re
Future<Void> monitorLeaderInternal( Reference<ClusterConnectionFile> const& connFile, Reference<AsyncVar<Value>> const& outSerializedLeaderInfo, Reference<AsyncVar<int>> const& connectedCoordinatorsNum );
template <class LeaderInterface>
Future<Void> monitorLeader( Reference<ClusterConnectionFile> const& connFile, Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader, Reference<AsyncVar<int>> connectedCoordinatorsNum ) {
struct LeaderDeserializer {
Future<Void> operator()(const Reference<AsyncVar<Value>>& serializedInfo,
const Reference<AsyncVar<Optional<LeaderInterface>>>& outKnownLeader) {
return asyncDeserialize(serializedInfo, outKnownLeader, g_network->useObjectSerializer());
}
};
Future<Void> asyncDeserializeClusterInterface(const Reference<AsyncVar<Value>>& serializedInfo,
const Reference<AsyncVar<Optional<ClusterInterface>>>& outKnownLeader);
template <>
struct LeaderDeserializer<ClusterInterface> {
Future<Void> operator()(const Reference<AsyncVar<Value>>& serializedInfo,
const Reference<AsyncVar<Optional<ClusterInterface>>>& outKnownLeader) {
return asyncDeserializeClusterInterface(serializedInfo, outKnownLeader);
}
};
template <class LeaderInterface>
Future<Void> monitorLeader(Reference<ClusterConnectionFile> const& connFile,
Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader,
Reference<AsyncVar<int>> connectedCoordinatorsNum) {
LeaderDeserializer<LeaderInterface> deserializer;
Reference<AsyncVar<Value>> serializedInfo( new AsyncVar<Value> );
Future<Void> m = monitorLeaderInternal( connFile, serializedInfo, connectedCoordinatorsNum );
return m || asyncDeserialize( serializedInfo, outKnownLeader );
return m || deserializer( serializedInfo, outKnownLeader );
}
#pragma endregion
#endif
#endif

View File

@ -61,6 +61,7 @@ extern const char* getHGVersion();
using std::make_pair;
using std::max;
using std::min;
using std::pair;
NetworkOptions networkOptions;
Reference<TLSOptions> tlsOptions;
@ -868,6 +869,9 @@ Future<Void> Cluster::onConnected() {
void setNetworkOption(FDBNetworkOptions::Option option, Optional<StringRef> value) {
switch(option) {
// SOMEDAY: If the network is already started, should these three throw an error?
case FDBNetworkOptions::USE_OBJECT_SERIALIZER:
networkOptions.useObjectSerializer = extractIntOption(value) != 0;
break;
case FDBNetworkOptions::TRACE_ENABLE:
networkOptions.traceDirectory = value.present() ? value.get().toString() : "";
break;
@ -1005,7 +1009,7 @@ void setupNetwork(uint64_t transportId, bool useMetrics) {
if (!networkOptions.logClientInfo.present())
networkOptions.logClientInfo = true;
g_network = newNet2(false, useMetrics || networkOptions.traceDirectory.present());
g_network = newNet2(false, useMetrics || networkOptions.traceDirectory.present(), networkOptions.useObjectSerializer);
FlowTransport::createInstance(transportId);
Net2FileSystem::newFileSystem();

View File

@ -56,12 +56,13 @@ struct NetworkOptions {
Optional<bool> logClientInfo;
Standalone<VectorRef<ClientVersionRef>> supportedVersions;
bool slowTaskProfilingEnabled;
bool useObjectSerializer;
// The default values, TRACE_DEFAULT_ROLL_SIZE and TRACE_DEFAULT_MAX_LOGS_SIZE are located in Trace.h.
NetworkOptions()
: localAddress(""), clusterFile(""), traceDirectory(Optional<std::string>()),
traceRollSize(TRACE_DEFAULT_ROLL_SIZE), traceMaxLogsSize(TRACE_DEFAULT_MAX_LOGS_SIZE), traceLogGroup("default"),
traceFormat("xml"), slowTaskProfilingEnabled(false) {}
traceFormat("xml"), slowTaskProfilingEnabled(false), useObjectSerializer(false) {}
};
class Database {

View File

@ -1344,7 +1344,7 @@ void ReadYourWritesTransaction::addReadConflictRange( KeyRangeRef const& keys )
}
if (tr.apiVersionAtLeast(300)) {
if (keys.begin > getMaxReadKey() || keys.end > getMaxReadKey()) {
if ((keys.begin > getMaxReadKey() || keys.end > getMaxReadKey()) && (keys.begin != metadataVersionKey || keys.end != metadataVersionKeyEnd)) {
throw key_outside_legal_range();
}
}

View File

@ -30,11 +30,8 @@
#include "flow/Stats.h"
struct StorageServerInterface {
enum {
BUSY_ALLOWED = 0,
BUSY_FORCE = 1,
BUSY_LOCAL = 2
};
constexpr static FileIdentifier file_identifier = 15302073;
enum { BUSY_ALLOWED = 0, BUSY_FORCE = 1, BUSY_LOCAL = 2 };
enum { LocationAwareLoadBalance = 1 };
enum { AlwaysFresh = 0 };
@ -69,11 +66,16 @@ struct StorageServerInterface {
void serialize( Ar& ar ) {
// StorageServerInterface is persisted in the database and in the tLog's data structures, so changes here have to be
// versioned carefully!
serializer(ar, uniqueID, locality, getVersion, getValue, getKey, getKeyValues, getShardState, waitMetrics,
splitMetrics, getPhysicalMetrics, waitFailure, getQueuingMetrics, getKeyValueStoreType);
if( ar.protocolVersion() >= 0x0FDB00A200090001LL )
serializer(ar, watchValue);
if constexpr (!is_fb_function<Ar>) {
serializer(ar, uniqueID, locality, getVersion, getValue, getKey, getKeyValues, getShardState, waitMetrics,
splitMetrics, getPhysicalMetrics, waitFailure, getQueuingMetrics, getKeyValueStoreType);
if (ar.protocolVersion() >= 0x0FDB00A200090001LL) serializer(ar, watchValue);
} else {
serializer(ar, uniqueID, locality, getVersion, getValue, getKey, getKeyValues, getShardState, waitMetrics,
splitMetrics, getPhysicalMetrics, waitFailure, getQueuingMetrics, getKeyValueStoreType,
watchValue);
}
}
bool operator == (StorageServerInterface const& s) const { return uniqueID == s.uniqueID; }
bool operator < (StorageServerInterface const& s) const { return uniqueID < s.uniqueID; }
@ -109,6 +111,7 @@ struct ServerCacheInfo {
};
struct GetValueReply : public LoadBalancedReply {
constexpr static FileIdentifier file_identifier = 1378929;
Optional<Value> value;
GetValueReply() {}
@ -121,6 +124,7 @@ struct GetValueReply : public LoadBalancedReply {
};
struct GetValueRequest : TimedRequest {
constexpr static FileIdentifier file_identifier = 8454530;
Key key;
Version version;
Optional<UID> debugID;
@ -136,6 +140,7 @@ struct GetValueRequest : TimedRequest {
};
struct WatchValueRequest {
constexpr static FileIdentifier file_identifier = 14747733;
Key key;
Optional<Value> value;
Version version;
@ -152,6 +157,7 @@ struct WatchValueRequest {
};
struct GetKeyValuesReply : public LoadBalancedReply {
constexpr static FileIdentifier file_identifier = 1783066;
Arena arena;
VectorRef<KeyValueRef> data;
Version version; // useful when latestVersion was requested
@ -164,6 +170,7 @@ struct GetKeyValuesReply : public LoadBalancedReply {
};
struct GetKeyValuesRequest : TimedRequest {
constexpr static FileIdentifier file_identifier = 6795746;
Arena arena;
KeySelectorRef begin, end;
Version version; // or latestVersion
@ -180,6 +187,7 @@ struct GetKeyValuesRequest : TimedRequest {
};
struct GetKeyReply : public LoadBalancedReply {
constexpr static FileIdentifier file_identifier = 11226513;
KeySelector sel;
GetKeyReply() {}
@ -192,6 +200,7 @@ struct GetKeyReply : public LoadBalancedReply {
};
struct GetKeyRequest : TimedRequest {
constexpr static FileIdentifier file_identifier = 10457870;
Arena arena;
KeySelectorRef sel;
Version version; // or latestVersion
@ -207,6 +216,7 @@ struct GetKeyRequest : TimedRequest {
};
struct GetShardStateRequest {
constexpr static FileIdentifier file_identifier = 15860168;
enum waitMode {
NO_WAIT = 0,
FETCHING = 1,
@ -226,6 +236,7 @@ struct GetShardStateRequest {
};
struct StorageMetrics {
constexpr static FileIdentifier file_identifier = 13622226;
int64_t bytes; // total storage
int64_t bytesPerKSecond; // network bandwidth (average over 10s)
int64_t iosPerKSecond;
@ -278,6 +289,7 @@ struct StorageMetrics {
struct WaitMetricsRequest {
// Waits for any of the given minimum or maximum metrics to be exceeded, and then returns the current values
// Send a reversed range for min, max to receive an immediate report
constexpr static FileIdentifier file_identifier = 1795961;
Arena arena;
KeyRangeRef keys;
StorageMetrics min, max;
@ -296,6 +308,7 @@ struct WaitMetricsRequest {
};
struct SplitMetricsReply {
constexpr static FileIdentifier file_identifier = 11530792;
Standalone<VectorRef<KeyRef>> splits;
StorageMetrics used;
@ -306,6 +319,7 @@ struct SplitMetricsReply {
};
struct SplitMetricsRequest {
constexpr static FileIdentifier file_identifier = 10463876;
Arena arena;
KeyRangeRef keys;
StorageMetrics limits;
@ -324,6 +338,7 @@ struct SplitMetricsRequest {
};
struct GetPhysicalMetricsReply {
constexpr static FileIdentifier file_identifier = 15491478;
StorageMetrics load;
StorageMetrics free;
StorageMetrics capacity;
@ -335,6 +350,7 @@ struct GetPhysicalMetricsReply {
};
struct GetPhysicalMetricsRequest {
constexpr static FileIdentifier file_identifier = 13290999;
ReplyPromise<GetPhysicalMetricsReply> reply;
template <class Ar>
@ -343,17 +359,8 @@ struct GetPhysicalMetricsRequest {
}
};
struct StorageQueuingMetricsRequest {
// SOMEDAY: Send threshold value to avoid polling faster than the information changes?
ReplyPromise<struct StorageQueuingMetricsReply> reply;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, reply);
}
};
struct StorageQueuingMetricsReply {
constexpr static FileIdentifier file_identifier = 7633366;
double localTime;
int64_t instanceID; // changes if bytesDurable and bytesInput reset
int64_t bytesDurable, bytesInput;
@ -369,4 +376,15 @@ struct StorageQueuingMetricsReply {
}
};
struct StorageQueuingMetricsRequest {
// SOMEDAY: Send threshold value to avoid polling faster than the information changes?
constexpr static FileIdentifier file_identifier = 3978640;
ReplyPromise<struct StorageQueuingMetricsReply> reply;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, reply);
}
};
#endif

View File

@ -587,6 +587,7 @@ const KeyRef maxUIDKey = LiteralStringRef("\xff\xff\xff\xff\xff\xff\xff\xff\xff\
const KeyRef databaseLockedKey = LiteralStringRef("\xff/dbLocked");
const KeyRef metadataVersionKey = LiteralStringRef("\xff/metadataVersion");
const KeyRef metadataVersionKeyEnd = LiteralStringRef("\xff/metadataVersion\x00");
const KeyRef metadataVersionRequiredValue = LiteralStringRef("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00");
const KeyRef mustContainSystemMutationsKey = LiteralStringRef("\xff/mustContainSystemMutations");

View File

@ -266,6 +266,7 @@ extern const KeyRef maxUIDKey;
extern const KeyRef databaseLockedKey;
extern const KeyRef metadataVersionKey;
extern const KeyRef metadataVersionKeyEnd;
extern const KeyRef metadataVersionRequiredValue;
extern const KeyRef mustContainSystemMutationsKey;

View File

@ -483,7 +483,8 @@ public:
return r->second;
}
static const int overheadPerItem = 128*4;
// For each item in the versioned map, 4 PTree nodes are potentially allocated:
static const int overheadPerItem = NextFastAllocatedSize<sizeof(PTreeT)>::Result*4;
struct iterator;
VersionedMap() : oldestVersion(0), latestVersion(0) {

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
@ -149,12 +149,12 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -174,6 +174,7 @@
<ItemDefinitionGroup>
<ClCompile>
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
@ -190,6 +191,7 @@
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
@ -217,6 +219,7 @@
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<MinimalRebuild>false</MinimalRebuild>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>

View File

@ -33,6 +33,9 @@ description is not currently required but encouraged.
<Option name="local_address" code="10"
paramType="String" paramDescription="IP:PORT"
description="Deprecated"/>
<Option name="use_object_serializer" code="11"
paramType="Int" paramDescription="0 is false, every other value is true"
description="enable the object serializer for network communication"/>
<Option name="cluster_file" code="20"
paramType="String" paramDescription="path to cluster file"
description="Deprecated"/>

View File

@ -324,7 +324,7 @@ public:
#endif
/** Strict less ordering by name of key only */
struct KeyOrder : std::binary_function<Entry, Entry, bool> {
struct KeyOrder {
bool operator()(const Entry & lhs, const Entry & rhs) const {
const static SI_STRLESS isLess = SI_STRLESS();
return isLess(lhs.pItem, rhs.pItem);
@ -332,7 +332,7 @@ public:
};
/** Strict less ordering by order, and then name of key */
struct LoadOrder : std::binary_function<Entry, Entry, bool> {
struct LoadOrder {
bool operator()(const Entry & lhs, const Entry & rhs) const {
if (lhs.nOrder != rhs.nOrder) {
return lhs.nOrder < rhs.nOrder;

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
@ -19,14 +19,14 @@
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -48,6 +48,7 @@
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
@ -59,6 +60,7 @@
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>

View File

@ -33,10 +33,15 @@ EvictablePage::~EvictablePage() {
else
aligned_free(data);
}
if (index > -1) {
pageCache->pages[index] = pageCache->pages.back();
pageCache->pages[index]->index = index;
pageCache->pages.pop_back();
if (EvictablePageCache::RANDOM == pageCache->cacheEvictionType) {
if (index > -1) {
pageCache->pages[index] = pageCache->pages.back();
pageCache->pages[index]->index = index;
pageCache->pages.pop_back();
}
} else {
// remove it from the LRU
pageCache->lruPages.erase(EvictablePageCache::List::s_iterator_to(*this));
}
}
@ -97,6 +102,8 @@ Future<Void> AsyncFileCached::read_write_impl( AsyncFileCached* self, void* data
if ( p == self->pages.end() ) {
AFCPage* page = new AFCPage( self, pageOffset );
p = self->pages.insert( std::make_pair(pageOffset, page) ).first;
} else {
self->pageCache->updateHit(p->second);
}
int bytesInPage = std::min(self->pageCache->pageSize - offsetInPage, remaining);
@ -133,6 +140,8 @@ Future<Void> AsyncFileCached::readZeroCopy( void** data, int* length, int64_t of
if ( p == pages.end() ) {
AFCPage* page = new AFCPage( this, offset );
p = pages.insert( std::make_pair(offset, page) ).first;
} else {
p->second->pageCache->updateHit(p->second);
}
*data = p->second->data;

View File

@ -27,6 +27,8 @@
#elif !defined(FLOW_ASYNCFILECACHED_ACTOR_H)
#define FLOW_ASYNCFILECACHED_ACTOR_H
#include <boost/intrusive/list.hpp>
#include "flow/flow.h"
#include "fdbrpc/IAsyncFile.h"
#include "flow/Knobs.h"
@ -34,10 +36,12 @@
#include "flow/network.h"
#include "flow/actorcompiler.h" // This must be the last #include.
namespace bi = boost::intrusive;
struct EvictablePage {
void* data;
int index;
class Reference<struct EvictablePageCache> pageCache;
bi::list_member_hook<> member_hook;
virtual bool evict() = 0; // true if page was evicted, false if it isn't immediately evictable (but will be evicted regardless if possible)
@ -46,30 +50,86 @@ struct EvictablePage {
};
struct EvictablePageCache : ReferenceCounted<EvictablePageCache> {
EvictablePageCache() : pageSize(0), maxPages(0) {}
explicit EvictablePageCache(int pageSize, int64_t maxSize) : pageSize(pageSize), maxPages(maxSize / pageSize) {}
using List = bi::list< EvictablePage, bi::member_hook< EvictablePage, bi::list_member_hook<>, &EvictablePage::member_hook>>;
enum CacheEvictionType { RANDOM = 0, LRU = 1 };
static CacheEvictionType evictionPolicyStringToEnum(const std::string &policy) {
std::string cep = policy;
std::transform(cep.begin(), cep.end(), cep.begin(), ::tolower);
if (cep != "random" && cep != "lru")
throw invalid_cache_eviction_policy();
if (cep == "random")
return RANDOM;
return LRU;
}
EvictablePageCache() : pageSize(0), maxPages(0), cacheEvictionType(RANDOM) {}
explicit EvictablePageCache(int pageSize, int64_t maxSize) : pageSize(pageSize), maxPages(maxSize / pageSize), cacheEvictionType(evictionPolicyStringToEnum(FLOW_KNOBS->CACHE_EVICTION_POLICY)) {
cacheHits.init(LiteralStringRef("EvictablePageCache.CacheHits"));
cacheMisses.init(LiteralStringRef("EvictablePageCache.CacheMisses"));
cacheEvictions.init(LiteralStringRef("EvictablePageCache.CacheEvictions"));
}
void allocate(EvictablePage* page) {
try_evict();
try_evict();
page->data = pageSize == 4096 ? FastAllocator<4096>::allocate() : aligned_alloc(4096,pageSize);
page->index = pages.size();
pages.push_back(page);
if (RANDOM == cacheEvictionType) {
page->index = pages.size();
pages.push_back(page);
} else {
lruPages.push_back(*page); // new page is considered the most recently used (placed at LRU tail)
}
++cacheMisses;
}
void updateHit(EvictablePage* page) {
if (RANDOM != cacheEvictionType) {
// on a hit, update page's location in the LRU so that it's most recent (tail)
lruPages.erase(List::s_iterator_to(*page));
lruPages.push_back(*page);
}
++cacheHits;
}
void try_evict() {
if (pages.size() >= (uint64_t)maxPages && !pages.empty()) {
for (int i = 0; i < FLOW_KNOBS->MAX_EVICT_ATTEMPTS; i++) { // If we don't manage to evict anything, just go ahead and exceed the cache limit
int toEvict = deterministicRandom()->randomInt(0, pages.size());
if (pages[toEvict]->evict())
break;
if (RANDOM == cacheEvictionType) {
if (pages.size() >= (uint64_t)maxPages && !pages.empty()) {
for (int i = 0; i < FLOW_KNOBS->MAX_EVICT_ATTEMPTS; i++) { // If we don't manage to evict anything, just go ahead and exceed the cache limit
int toEvict = deterministicRandom()->randomInt(0, pages.size());
if (pages[toEvict]->evict()) {
++cacheEvictions;
break;
}
}
}
} else {
// For now, LRU is the only other CACHE_EVICTION option
if (lruPages.size() >= (uint64_t)maxPages) {
int i = 0;
// try the least recently used pages first (starting at head of the LRU list)
for (List::iterator it = lruPages.begin();
it != lruPages.end() && i < FLOW_KNOBS->MAX_EVICT_ATTEMPTS;
++it, ++i) { // If we don't manage to evict anything, just go ahead and exceed the cache limit
if (it->evict()) {
++cacheEvictions;
break;
}
}
}
}
}
std::vector<EvictablePage*> pages;
List lruPages;
int pageSize;
int64_t maxPages;
Int64MetricHandle cacheHits;
Int64MetricHandle cacheMisses;
Int64MetricHandle cacheEvictions;
const CacheEvictionType cacheEvictionType;
};
struct OpenFileInfo : NonCopyable {

View File

@ -265,7 +265,7 @@ public:
result = fallocate( fd, 0, 0, size);
if (result != 0) {
int fallocateErrCode = errno;
TraceEvent("AsyncFileKAIOAllocateError").detail("Fd",fd).detail("Filename", filename).GetLastError();
TraceEvent("AsyncFileKAIOAllocateError").detail("Fd",fd).detail("Filename", filename).detail("Size", size).GetLastError();
if ( fallocateErrCode == EOPNOTSUPP ) {
// Mark fallocate as unsupported. Try again with truncate.
ctx.fallocateSupported = false;

View File

@ -200,6 +200,7 @@ struct YieldMockNetwork : INetwork, ReferenceCounted<YieldMockNetwork> {
virtual void run() { return baseNetwork->run(); }
virtual void getDiskBytes(std::string const& directory, int64_t& free, int64_t& total) { return baseNetwork->getDiskBytes(directory,free,total); }
virtual bool isAddressOnThisHost(NetworkAddress const& addr) { return baseNetwork->isAddressOnThisHost(addr); }
virtual bool useObjectSerializer() const { return baseNetwork->useObjectSerializer(); }
};
struct NonserializableThing {};

View File

@ -25,6 +25,7 @@
#include "flow/Net2Packet.h"
#include "flow/ActorCollection.h"
#include "flow/TDMetric.actor.h"
#include "flow/ObjectSerializer.h"
#include "fdbrpc/FailureMonitor.h"
#include "fdbrpc/crc32c.h"
#include "fdbrpc/simulator.h"
@ -126,6 +127,12 @@ struct EndpointNotFoundReceiver : NetworkMessageReceiver {
Endpoint e; reader >> e;
IFailureMonitor::failureMonitor().endpointNotFound(e);
}
virtual void receive(ArenaObjectReader& reader) {
Endpoint e;
reader.deserialize(e);
IFailureMonitor::failureMonitor().endpointNotFound(e);
}
};
struct PingReceiver : NetworkMessageReceiver {
@ -138,6 +145,11 @@ struct PingReceiver : NetworkMessageReceiver {
ReplyPromise<Void> reply; reader >> reply;
reply.send(Void());
}
virtual void receive(ArenaObjectReader& reader) {
ReplyPromise<Void> reply;
reader.deserialize(reply);
reply.send(Void());
}
};
class TransportData {
@ -282,16 +294,18 @@ struct Peer : NonCopyable {
ReliablePacketList reliable;
AsyncTrigger dataToSend; // Triggered when unsent.empty() becomes false
Future<Void> connect;
AsyncTrigger incompatibleDataRead;
AsyncTrigger resetPing;
bool compatible;
bool outgoingConnectionIdle; // We don't actually have a connection open and aren't trying to open one because we don't have anything to send
double lastConnectTime;
double reconnectionDelay;
int peerReferences;
bool incompatibleProtocolVersionNewer;
int64_t bytesReceived;
explicit Peer( TransportData* transport, NetworkAddress const& destination )
: transport(transport), destination(destination), outgoingConnectionIdle(false), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true), incompatibleProtocolVersionNewer(false), peerReferences(-1)
: transport(transport), destination(destination), outgoingConnectionIdle(false), lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME),
compatible(true), incompatibleProtocolVersionNewer(false), peerReferences(-1), bytesReceived(0)
{
connect = connectionKeeper(this);
}
@ -317,7 +331,8 @@ struct Peer : NonCopyable {
}
pkt.connectPacketLength = sizeof(pkt) - sizeof(pkt.connectPacketLength);
pkt.protocolVersion = currentProtocolVersion;
pkt.protocolVersion =
g_network->useObjectSerializer() ? addObjectSerializerFlag(currentProtocolVersion) : currentProtocolVersion;
pkt.connectionId = transport->transportId;
PacketBuffer* pb_first = new PacketBuffer;
@ -371,7 +386,6 @@ struct Peer : NonCopyable {
// Send an (ignored) packet to make sure that, if our outgoing connection died before the peer made this connection attempt,
// we eventually find out that our connection is dead, close it, and then respond to the next connection reattempt from peer.
//sendPacket( self, SerializeSourceRaw(StringRef()), Endpoint(peer->address(), TOKEN_IGNORE_PACKET), false );
}
}
@ -389,11 +403,23 @@ struct Peer : NonCopyable {
state ReplyPromise<Void> reply;
FlowTransport::transport().sendUnreliable( SerializeSource<ReplyPromise<Void>>(reply), remotePing.getEndpoint() );
choose {
when (wait( delay( FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT ) )) { TraceEvent("ConnectionTimeout").suppressFor(1.0).detail("WithAddr", peer->destination); throw connection_failed(); }
when (wait( reply.getFuture() )) {}
when (wait( peer->incompatibleDataRead.onTrigger())) {}
state int64_t startingBytes = peer->bytesReceived;
loop {
choose {
when (wait( delay( FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT ) )) {
if(startingBytes == peer->bytesReceived) {
TraceEvent("ConnectionTimeout").suppressFor(1.0).detail("WithAddr", peer->destination);
throw connection_failed();
}
startingBytes = peer->bytesReceived;
}
when (wait( reply.getFuture() )) {
break;
}
when (wait( peer->resetPing.onTrigger())) {
break;
}
}
}
}
}
@ -529,7 +555,7 @@ TransportData::~TransportData() {
}
}
ACTOR static void deliver( TransportData* self, Endpoint destination, ArenaReader reader, bool inReadSocket ) {
ACTOR static void deliver(TransportData* self, Endpoint destination, ArenaReader reader, bool inReadSocket) {
int priority = self->endpoints.getPriority(destination.token);
if (priority < TaskReadSocket || !inReadSocket) {
wait( delay(0, priority) );
@ -541,8 +567,15 @@ ACTOR static void deliver( TransportData* self, Endpoint destination, ArenaReade
if (receiver) {
try {
g_currentDeliveryPeerAddress = destination.addresses;
receiver->receive( reader );
g_currentDeliveryPeerAddress = {NetworkAddress()};
if (g_network->useObjectSerializer()) {
StringRef data = reader.arenaReadAll();
ASSERT(data.size() > 8);
ArenaObjectReader objReader(reader.arena(), reader.arenaReadAll());
receiver->receive(objReader);
} else {
receiver->receive(reader);
}
g_currentDeliveryPeerAddress = { NetworkAddress() };
} catch (Error& e) {
g_currentDeliveryPeerAddress = {NetworkAddress()};
TraceEvent(SevError, "ReceiverError").error(e).detail("Token", destination.token.toString()).detail("Peer", destination.getPrimaryAddress());
@ -561,7 +594,8 @@ ACTOR static void deliver( TransportData* self, Endpoint destination, ArenaReade
g_network->setCurrentTask( TaskReadSocket );
}
static void scanPackets( TransportData* transport, uint8_t*& unprocessed_begin, uint8_t* e, Arena& arena, NetworkAddress const& peerAddress, uint64_t peerProtocolVersion ) {
static void scanPackets(TransportData* transport, uint8_t*& unprocessed_begin, uint8_t* e, Arena& arena,
NetworkAddress const& peerAddress, uint64_t peerProtocolVersion) {
// Find each complete packet in the given byte range and queue a ready task to deliver it.
// Remove the complete packets from the range by increasing unprocessed_begin.
// There won't be more than 64K of data plus one packet, so this shouldn't take a long time.
@ -633,8 +667,9 @@ static void scanPackets( TransportData* transport, uint8_t*& unprocessed_begin,
#if VALGRIND
VALGRIND_CHECK_MEM_IS_DEFINED(p, packetLen);
#endif
ArenaReader reader( arena, StringRef(p, packetLen), AssumeVersion(peerProtocolVersion) );
UID token; reader >> token;
ArenaReader reader(arena, StringRef(p, packetLen), AssumeVersion(currentProtocolVersion));
UID token;
reader >> token;
++transport->countPacketsReceived;
@ -649,7 +684,8 @@ static void scanPackets( TransportData* transport, uint8_t*& unprocessed_begin,
transport->warnAlwaysForLargePacket = false;
}
deliver( transport, Endpoint( {peerAddress}, token ), std::move(reader), true );
ASSERT(!reader.empty());
deliver(transport, Endpoint({ peerAddress }, token), std::move(reader), true);
unprocessed_begin = p = p + packetLen;
}
@ -673,7 +709,7 @@ ACTOR static Future<Void> connectionReader(
state bool incompatiblePeerCounted = false;
state bool incompatibleProtocolVersionNewer = false;
state NetworkAddress peerAddress;
state uint64_t peerProtocolVersion = 0;
state uint64_t peerProtocolVersion;
peerAddress = conn->getPeerAddress();
if (peer == nullptr) {
@ -697,6 +733,9 @@ ACTOR static Future<Void> connectionReader(
}
int readBytes = conn->read( unprocessed_end, buffer_end );
if(peer) {
peer->bytesReceived += readBytes;
}
if (!readBytes) break;
state bool readWillBlock = readBytes != readAllBytes;
unprocessed_end += readBytes;
@ -711,7 +750,8 @@ ACTOR static Future<Void> connectionReader(
serializer(pktReader, pkt);
uint64_t connectionId = pkt.connectionId;
if( (pkt.protocolVersion & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask) ) {
if(g_network->useObjectSerializer() != hasObjectSerializerFlag(pkt.protocolVersion) ||
(removeFlags(pkt.protocolVersion) & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask)) {
incompatibleProtocolVersionNewer = pkt.protocolVersion > currentProtocolVersion;
NetworkAddress addr = pkt.canonicalRemotePort
? NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort)
@ -748,7 +788,8 @@ ACTOR static Future<Void> connectionReader(
TraceEvent("ConnectionEstablished", conn->getDebugID())
.suppressFor(1.0)
.detail("Peer", conn->getPeerAddress())
.detail("ConnectionId", connectionId);
.detail("ConnectionId", connectionId)
.detail("UseObjectSerializer", false);
}
if(connectionId > 1) {
@ -757,8 +798,8 @@ ACTOR static Future<Void> connectionReader(
unprocessed_begin += connectPacketSize;
expectConnectPacket = false;
peerProtocolVersion = protocolVersion;
if (peer != nullptr) {
peerProtocolVersion = protocolVersion;
// Outgoing connection; port information should be what we expect
TraceEvent("ConnectedOutgoing")
.suppressFor(1.0)
@ -770,7 +811,9 @@ ACTOR static Future<Void> connectionReader(
incompatiblePeerCounted = true;
}
ASSERT( pkt.canonicalRemotePort == peerAddress.port );
onConnected.send(peer);
} else {
peerProtocolVersion = protocolVersion;
if (pkt.canonicalRemotePort) {
peerAddress = NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort, true,
peerAddress.isTLS());
@ -792,7 +835,7 @@ ACTOR static Future<Void> connectionReader(
}
else if(!expectConnectPacket) {
unprocessed_begin = unprocessed_end;
peer->incompatibleDataRead.trigger();
peer->resetPing.trigger();
}
if (readWillBlock)
@ -959,7 +1002,7 @@ void FlowTransport::removePeerReference( const Endpoint& endpoint, NetworkMessag
.detail("Token", endpoint.token);
}
if(peer->peerReferences == 0 && peer->reliable.empty() && peer->unsent.empty()) {
peer->incompatibleDataRead.trigger();
peer->resetPing.trigger();
}
}
}
@ -993,14 +1036,22 @@ static PacketID sendPacket( TransportData* self, ISerializeSource const& what, c
TEST(true); // "Loopback" delivery
// SOMEDAY: Would it be better to avoid (de)serialization by doing this check in flow?
BinaryWriter wr( AssumeVersion(currentProtocolVersion) );
what.serializeBinaryWriter(wr);
Standalone<StringRef> copy = wr.toValue();
Standalone<StringRef> copy;
if (g_network->useObjectSerializer()) {
ObjectWriter wr;
what.serializeObjectWriter(wr);
copy = wr.toStringRef();
} else {
BinaryWriter wr( AssumeVersion(currentProtocolVersion) );
what.serializeBinaryWriter(wr);
copy = wr.toValue();
}
#if VALGRIND
VALGRIND_CHECK_MEM_IS_DEFINED(copy.begin(), copy.size());
#endif
deliver( self, destination, ArenaReader(copy.arena(), copy, AssumeVersion(currentProtocolVersion)), false );
ASSERT(copy.size() > 0);
deliver(self, destination, ArenaReader(copy.arena(), copy, AssumeVersion(currentProtocolVersion)), false);
return (PacketID)NULL;
} else {
@ -1039,7 +1090,7 @@ static PacketID sendPacket( TransportData* self, ISerializeSource const& what, c
wr.writeAhead(packetInfoSize , &packetInfoBuffer);
wr << destination.token;
what.serializePacketWriter(wr);
what.serializePacketWriter(wr, g_network->useObjectSerializer());
pb = wr.finish();
len = wr.size() - packetInfoSize;

View File

@ -24,12 +24,14 @@
#include <algorithm>
#include "flow/network.h"
#include "flow/FileIdentifier.h"
#pragma pack(push, 4)
class Endpoint {
public:
// Endpoint represents a particular service (e.g. a serialized Promise<T> or PromiseStream<T>)
// An endpoint is either "local" (used for receiving data) or "remote" (used for sending data)
constexpr static FileIdentifier file_identifier = 10618805;
typedef UID Token;
NetworkAddressList addresses;
Token token;
@ -72,22 +74,31 @@ public:
template <class Ar>
void serialize(Ar& ar) {
if (ar.isDeserializing && ar.protocolVersion() < 0x0FDB00B061020001LL) {
addresses.secondaryAddress = Optional<NetworkAddress>();
serializer(ar, addresses.address, token);
} else {
if constexpr (is_fb_function<Ar>) {
serializer(ar, addresses, token);
if (ar.isDeserializing) {
if constexpr (Ar::isDeserializing) {
choosePrimaryAddress();
}
} else {
if (ar.isDeserializing && ar.protocolVersion() < 0x0FDB00B061020001LL) {
addresses.secondaryAddress = Optional<NetworkAddress>();
serializer(ar, addresses.address, token);
} else {
serializer(ar, addresses, token);
if (ar.isDeserializing) {
choosePrimaryAddress();
}
}
}
}
};
#pragma pack(pop)
class ArenaObjectReader;
class NetworkMessageReceiver {
public:
virtual void receive( ArenaReader& ) = 0;
virtual void receive(ArenaObjectReader&) = 0;
virtual bool isStream() const { return false; }
};

View File

@ -25,6 +25,7 @@
#include "flow/flow.h"
struct ProcessClass {
constexpr static FileIdentifier file_identifier = 6697257;
// This enum is stored in restartInfo.ini for upgrade tests, so be very careful about changing the existing items!
enum ClassType { UnsetClass, StorageClass, TransactionClass, ResolutionClass, TesterClass, ProxyClass, MasterClass, StatelessClass, LogClass, ClusterControllerClass, LogRouterClass, DataDistributorClass, CoordinatorClass, RatekeeperClass, InvalidClass = -1 };
enum Fitness { BestFit, GoodFit, UnsetFit, OkayFit, WorstFit, ExcludeFit, NeverAssign }; //cannot be larger than 7 because of leader election mask
@ -193,39 +194,40 @@ public:
void serialize(Ar& ar) {
// Locality is persisted in the database inside StorageServerInterface, so changes here have to be
// versioned carefully!
if (ar.protocolVersion() >= 0x0FDB00A446020001LL) {
Standalone<StringRef> key;
Optional<Standalone<StringRef>> value;
uint64_t mapSize = (uint64_t)_data.size();
serializer(ar, mapSize);
if (ar.isDeserializing) {
for (size_t i = 0; i < mapSize; i++) {
serializer(ar, key, value);
_data[key] = value;
if constexpr (is_fb_function<Ar>) {
serializer(ar, _data);
} else {
if (ar.protocolVersion() >= 0x0FDB00A446020001LL) {
Standalone<StringRef> key;
Optional<Standalone<StringRef>> value;
uint64_t mapSize = (uint64_t)_data.size();
serializer(ar, mapSize);
if (ar.isDeserializing) {
for (size_t i = 0; i < mapSize; i++) {
serializer(ar, key, value);
_data[key] = value;
}
} else {
for (auto it = _data.begin(); it != _data.end(); it++) {
key = it->first;
value = it->second;
serializer(ar, key, value);
}
}
}
else {
for (auto it = _data.begin(); it != _data.end(); it++) {
key = it->first;
value = it->second;
serializer(ar, key, value);
}
}
}
else {
ASSERT(ar.isDeserializing);
UID zoneId, dcId, processId;
serializer(ar, zoneId, dcId);
set(keyZoneId, Standalone<StringRef>(zoneId.toString()));
set(keyDcId, Standalone<StringRef>(dcId.toString()));
} else {
ASSERT(ar.isDeserializing);
UID zoneId, dcId, processId;
serializer(ar, zoneId, dcId);
set(keyZoneId, Standalone<StringRef>(zoneId.toString()));
set(keyDcId, Standalone<StringRef>(dcId.toString()));
if (ar.protocolVersion() >= 0x0FDB00A340000001LL) {
serializer(ar, processId);
set(keyProcessId, Standalone<StringRef>(processId.toString()));
}
else {
int _machineClass = ProcessClass::UnsetClass;
serializer(ar, _machineClass);
if (ar.protocolVersion() >= 0x0FDB00A340000001LL) {
serializer(ar, processId);
set(keyProcessId, Standalone<StringRef>(processId.toString()));
} else {
int _machineClass = ProcessClass::UnsetClass;
serializer(ar, _machineClass);
}
}
}
}

View File

@ -29,6 +29,7 @@
using std::vector;
struct PerfMetric {
constexpr static FileIdentifier file_identifier = 5980618;
PerfMetric() : m_name(""), m_value(0), m_averaged(false), m_format_code( "%.3g" ) {}
PerfMetric( std::string name, double value, bool averaged ) : m_name(name), m_value(value), m_averaged(averaged), m_format_code( "%.3g" ) {}
PerfMetric( std::string name, double value, bool averaged, std::string format_code ) : m_name(name), m_value(value), m_averaged(averaged), m_format_code(format_code) {}

View File

@ -142,6 +142,8 @@ PolicyAcross::PolicyAcross(int count, std::string const& attribKey, Reference<IR
return;
}
PolicyAcross::PolicyAcross() : _policy(new PolicyOne()) {}
PolicyAcross::~PolicyAcross()
{
return;

View File

@ -29,148 +29,148 @@ template <class Ar>
void serializeReplicationPolicy(Ar& ar, Reference<IReplicationPolicy>& policy);
extern void testReplicationPolicy(int nTests);
struct IReplicationPolicy : public ReferenceCounted<IReplicationPolicy> {
IReplicationPolicy() {}
virtual ~IReplicationPolicy() {}
virtual std::string name() const = 0;
virtual std::string info() const = 0;
virtual void addref() { ReferenceCounted<IReplicationPolicy>::addref(); }
virtual void delref() { ReferenceCounted<IReplicationPolicy>::delref(); }
virtual int maxResults() const = 0;
virtual int depth() const = 0;
virtual bool selectReplicas(
Reference<LocalitySet> & fromServers,
std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry> & results ) = 0;
virtual void traceLocalityRecords(Reference<LocalitySet> const& fromServers);
virtual void traceOneLocalityRecord(Reference<LocalityRecord> record, Reference<LocalitySet> const& fromServers);
virtual bool validate(
std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers ) const = 0;
IReplicationPolicy() {}
virtual ~IReplicationPolicy() {}
virtual std::string name() const = 0;
virtual std::string info() const = 0;
virtual void addref() { ReferenceCounted<IReplicationPolicy>::addref(); }
virtual void delref() { ReferenceCounted<IReplicationPolicy>::delref(); }
virtual int maxResults() const = 0;
virtual int depth() const = 0;
virtual bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results) = 0;
virtual void traceLocalityRecords(Reference<LocalitySet> const& fromServers);
virtual void traceOneLocalityRecord(Reference<LocalityRecord> record, Reference<LocalitySet> const& fromServers);
virtual bool validate(std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers) const = 0;
bool operator == ( const IReplicationPolicy& r ) const { return info() == r.info(); }
bool operator != ( const IReplicationPolicy& r ) const { return info() != r.info(); }
bool operator==(const IReplicationPolicy& r) const { return info() == r.info(); }
bool operator!=(const IReplicationPolicy& r) const { return info() != r.info(); }
template <class Ar>
void serialize(Ar& ar) {
Reference<IReplicationPolicy> refThis(this);
serializeReplicationPolicy(ar, refThis);
refThis->delref_no_destroy();
}
template <class Ar>
void serialize(Ar& ar) {
static_assert(!is_fb_function<Ar>);
Reference<IReplicationPolicy> refThis(this);
serializeReplicationPolicy(ar, refThis);
refThis->delref_no_destroy();
}
virtual void deserializationDone() = 0;
// Utility functions
bool selectReplicas(
Reference<LocalitySet> & fromServers,
std::vector<LocalityEntry> & results );
bool validate(
Reference<LocalitySet> const& solutionSet ) const;
bool validateFull(
bool solved,
std::vector<LocalityEntry> const& solutionSet,
std::vector<LocalityEntry> const& alsoServers,
Reference<LocalitySet> const& fromServers );
// Utility functions
bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry>& results);
bool validate(Reference<LocalitySet> const& solutionSet) const;
bool validateFull(bool solved, std::vector<LocalityEntry> const& solutionSet,
std::vector<LocalityEntry> const& alsoServers, Reference<LocalitySet> const& fromServers);
// Returns a set of the attributes that this policy uses in selection and validation.
std::set<std::string> attributeKeys() const
{ std::set<std::string> keys; this->attributeKeys(&keys); return keys; }
virtual void attributeKeys(std::set<std::string>*) const = 0;
// Returns a set of the attributes that this policy uses in selection and validation.
std::set<std::string> attributeKeys() const {
std::set<std::string> keys;
this->attributeKeys(&keys);
return keys;
}
virtual void attributeKeys(std::set<std::string>*) const = 0;
};
template <class Archive>
inline void load( Archive& ar, Reference<IReplicationPolicy>& value ) {
inline void load(Archive& ar, Reference<IReplicationPolicy>& value) {
bool present;
ar >> present;
if (present) {
serializeReplicationPolicy(ar, value);
}
else {
} else {
value.clear();
}
}
template <class Archive>
inline void save( Archive& ar, const Reference<IReplicationPolicy>& value ) {
inline void save(Archive& ar, const Reference<IReplicationPolicy>& value) {
bool present = (value.getPtr() != nullptr);
ar << present;
if (present) {
serializeReplicationPolicy(ar, (Reference<IReplicationPolicy>&) value);
serializeReplicationPolicy(ar, (Reference<IReplicationPolicy>&)value);
}
}
struct PolicyOne : IReplicationPolicy, public ReferenceCounted<PolicyOne> {
PolicyOne() {};
virtual ~PolicyOne() {};
PolicyOne(){};
explicit PolicyOne(const PolicyOne& o) {}
virtual ~PolicyOne(){};
virtual std::string name() const { return "One"; }
virtual std::string info() const { return "1"; }
virtual int maxResults() const { return 1; }
virtual int depth() const { return 1; }
virtual bool validate(
std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers ) const;
virtual bool selectReplicas(
Reference<LocalitySet> & fromServers,
std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry> & results );
virtual bool validate(std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers) const;
virtual bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results);
template <class Ar>
void serialize(Ar& ar) {
static_assert(!is_fb_function<Ar>);
}
virtual void deserializationDone() {}
virtual void attributeKeys(std::set<std::string>* set) const override { return; }
};
struct PolicyAcross : IReplicationPolicy, public ReferenceCounted<PolicyAcross> {
friend struct serializable_traits<PolicyAcross*>;
PolicyAcross(int count, std::string const& attribKey, Reference<IReplicationPolicy> const policy);
explicit PolicyAcross();
explicit PolicyAcross(const PolicyAcross& other) : PolicyAcross(other._count, other._attribKey, other._policy) {}
virtual ~PolicyAcross();
virtual std::string name() const { return "Across"; }
virtual std::string info() const
{ return format("%s^%d x ", _attribKey.c_str(), _count) + _policy->info(); }
virtual std::string info() const { return format("%s^%d x ", _attribKey.c_str(), _count) + _policy->info(); }
virtual int maxResults() const { return _count * _policy->maxResults(); }
virtual int depth() const { return 1 + _policy->depth(); }
virtual bool validate(
std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers ) const;
virtual bool selectReplicas(
Reference<LocalitySet> & fromServers,
std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry> & results );
virtual int depth() const { return 1 + _policy->depth(); }
virtual bool validate(std::vector<LocalityEntry> const& solutionSet, Reference<LocalitySet> const& fromServers) const;
virtual bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results);
template <class Ar>
void serialize(Ar& ar) {
static_assert(!is_fb_function<Ar>);
serializer(ar, _attribKey, _count);
serializeReplicationPolicy(ar, _policy);
}
static bool compareAddedResults(const std::pair<int, int>& rhs, const std::pair<int, int>& lhs)
{ return (rhs.first < lhs.first) || (!(lhs.first < rhs.first) && (rhs.second < lhs.second)); }
virtual void deserializationDone() {}
virtual void attributeKeys(std::set<std::string> *set) const override
{ set->insert(_attribKey); _policy->attributeKeys(set); }
static bool compareAddedResults(const std::pair<int, int>& rhs, const std::pair<int, int>& lhs) {
return (rhs.first < lhs.first) || (!(lhs.first < rhs.first) && (rhs.second < lhs.second));
}
virtual void attributeKeys(std::set<std::string>* set) const override {
set->insert(_attribKey);
_policy->attributeKeys(set);
}
protected:
int _count;
std::string _attribKey;
Reference<IReplicationPolicy> _policy;
int _count;
std::string _attribKey;
Reference<IReplicationPolicy> _policy;
// Cache temporary members
std::vector<AttribValue> _usedValues;
std::vector<LocalityEntry> _newResults;
Reference<LocalitySet> _selected;
VectorRef<std::pair<int,int>> _addedResults;
Arena _arena;
std::vector<AttribValue> _usedValues;
std::vector<LocalityEntry> _newResults;
Reference<LocalitySet> _selected;
VectorRef<std::pair<int, int>> _addedResults;
Arena _arena;
};
struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
PolicyAnd(std::vector<Reference<IReplicationPolicy>> policies): _policies(policies), _sortedPolicies(policies)
{
friend struct serializable_traits<PolicyAnd*>;
PolicyAnd(std::vector<Reference<IReplicationPolicy>> policies) : _policies(policies), _sortedPolicies(policies) {
// Sort the policy array
std::sort(_sortedPolicies.begin(), _sortedPolicies.end(), PolicyAnd::comparePolicy);
}
explicit PolicyAnd(const PolicyAnd& other) : _policies(other._policies), _sortedPolicies(other._sortedPolicies) {}
explicit PolicyAnd() {}
virtual ~PolicyAnd() {}
virtual std::string name() const { return "And"; }
virtual std::string info() const {
std::string infoText;
std::string infoText;
for (auto& policy : _policies) {
infoText += ((infoText.length()) ? " & (" : "(") + policy->info() + ")";
infoText += ((infoText.length()) ? " & (" : "(") + policy->info() + ")";
}
if (_policies.size()) infoText = "(" + infoText + ")";
return infoText;
@ -192,91 +192,107 @@ struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
}
return depthMax;
}
virtual bool validate(
std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers ) const;
virtual bool validate(std::vector<LocalityEntry> const& solutionSet,
Reference<LocalitySet> const& fromServers) const;
virtual bool selectReplicas(
Reference<LocalitySet> & fromServers,
std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry> & results );
virtual bool selectReplicas(Reference<LocalitySet>& fromServers, std::vector<LocalityEntry> const& alsoServers,
std::vector<LocalityEntry>& results);
static bool comparePolicy(const Reference<IReplicationPolicy>& rhs, const Reference<IReplicationPolicy>& lhs)
{ return (lhs->maxResults() < rhs->maxResults()) || (!(rhs->maxResults() < lhs->maxResults()) && (lhs->depth() < rhs->depth())); }
static bool comparePolicy(const Reference<IReplicationPolicy>& rhs, const Reference<IReplicationPolicy>& lhs) {
return (lhs->maxResults() < rhs->maxResults()) ||
(!(rhs->maxResults() < lhs->maxResults()) && (lhs->depth() < rhs->depth()));
}
template <class Ar>
void serialize(Ar& ar) {
static_assert(!is_fb_function<Ar>);
int count = _policies.size();
serializer(ar, count);
_policies.resize(count);
for(int i = 0; i < count; i++) {
for (int i = 0; i < count; i++) {
serializeReplicationPolicy(ar, _policies[i]);
}
if(Ar::isDeserializing) {
if (Ar::isDeserializing) {
_sortedPolicies = _policies;
std::sort(_sortedPolicies.begin(), _sortedPolicies.end(), PolicyAnd::comparePolicy);
}
}
virtual void attributeKeys(std::set<std::string> *set) const override
{ for (const Reference<IReplicationPolicy>& r : _policies) { r->attributeKeys(set); } }
virtual void deserializationDone() {
_sortedPolicies = _policies;
std::sort(_sortedPolicies.begin(), _sortedPolicies.end(), PolicyAnd::comparePolicy);
}
virtual void attributeKeys(std::set<std::string>* set) const override {
for (const Reference<IReplicationPolicy>& r : _policies) {
r->attributeKeys(set);
}
}
protected:
std::vector<Reference<IReplicationPolicy>> _policies;
std::vector<Reference<IReplicationPolicy>> _sortedPolicies;
std::vector<Reference<IReplicationPolicy>> _policies;
std::vector<Reference<IReplicationPolicy>> _sortedPolicies;
};
extern int testReplication();
template <class Ar>
void serializeReplicationPolicy(Ar& ar, Reference<IReplicationPolicy>& policy) {
if(Ar::isDeserializing) {
if (Ar::isDeserializing) {
StringRef name;
serializer(ar, name);
if(name == LiteralStringRef("One")) {
if (name == LiteralStringRef("One")) {
PolicyOne* pointer = new PolicyOne();
pointer->serialize(ar);
policy = Reference<IReplicationPolicy>(pointer);
}
else if(name == LiteralStringRef("Across")) {
} else if (name == LiteralStringRef("Across")) {
PolicyAcross* pointer = new PolicyAcross(0, "", Reference<IReplicationPolicy>());
pointer->serialize(ar);
policy = Reference<IReplicationPolicy>(pointer);
}
else if(name == LiteralStringRef("And")) {
PolicyAnd* pointer = new PolicyAnd({});
} else if (name == LiteralStringRef("And")) {
PolicyAnd* pointer = new PolicyAnd{};
pointer->serialize(ar);
policy = Reference<IReplicationPolicy>(pointer);
}
else if(name == LiteralStringRef("None")) {
} else if (name == LiteralStringRef("None")) {
policy = Reference<IReplicationPolicy>();
} else {
TraceEvent(SevError, "SerializingInvalidPolicyType").detail("PolicyName", name);
}
else {
TraceEvent(SevError, "SerializingInvalidPolicyType")
.detail("PolicyName", name);
}
}
else {
} else {
std::string name = policy ? policy->name() : "None";
Standalone<StringRef> nameRef = StringRef(name);
serializer(ar, nameRef);
if(name == "One") {
if (name == "One") {
((PolicyOne*)policy.getPtr())->serialize(ar);
}
else if(name == "Across") {
} else if (name == "Across") {
((PolicyAcross*)policy.getPtr())->serialize(ar);
}
else if(name == "And") {
} else if (name == "And") {
((PolicyAnd*)policy.getPtr())->serialize(ar);
}
else if(name == "None") {}
else {
TraceEvent(SevError, "SerializingInvalidPolicyType")
.detail("PolicyName", name);
} else if (name == "None") {
} else {
TraceEvent(SevError, "SerializingInvalidPolicyType").detail("PolicyName", name);
}
}
}
template <>
struct dynamic_size_traits<Reference<IReplicationPolicy>> : std::true_type {
static WriteRawMemory save(const Reference<IReplicationPolicy>& value) {
BinaryWriter writer(IncludeVersion());
serializeReplicationPolicy(writer, const_cast<Reference<IReplicationPolicy>&>(value));
std::unique_ptr<uint8_t[]> memory(new uint8_t[writer.getLength()]);
memcpy(memory.get(), writer.getData(), writer.getLength());
return std::make_pair<OwnershipErasedPtr<const uint8_t>, size_t>(ownedPtr(const_cast<const uint8_t*>(memory.release())), writer.getLength());
}
// Context is an arbitrary type that is plumbed by reference throughout the
// load call tree.
template <class Context>
static void load(const uint8_t* buf, size_t sz, Reference<IReplicationPolicy>& value, Context&) {
StringRef str(buf, sz);
BinaryReader reader(str, IncludeVersion());
serializeReplicationPolicy(reader, value);
}
};
#endif

View File

@ -23,6 +23,7 @@
#pragma once
#include "flow/flow.h"
#include "flow/serialize.h"
#include "fdbrpc/FlowTransport.h" // NetworkMessageReceiver Endpoint
#include "fdbrpc/FailureMonitor.h"
#include "fdbrpc/networksender.actor.h"
@ -92,16 +93,27 @@ struct NetSAV : SAV<T>, FlowReceiver, FastAllocated<NetSAV<T>> {
SAV<T>::sendErrorAndDelPromiseRef(error);
}
}
virtual void receive(ArenaObjectReader& reader) {
if (!SAV<T>::canBeSet()) return;
this->addPromiseRef();
ErrorOr<EnsureTable<T>> message;
reader.deserialize(message);
if (message.isError()) {
SAV<T>::sendErrorAndDelPromiseRef(message.getError());
} else {
SAV<T>::sendAndDelPromiseRef(message.get().asUnderlyingType());
}
}
};
template <class T>
class ReplyPromise sealed
class ReplyPromise sealed : public ComposedIdentifier<T, 0x2>
{
public:
template <class U>
void send(U && value) const {
void send(U&& value) const {
sav->send(std::forward<U>(value));
}
template <class E>
@ -163,6 +175,22 @@ void load(Ar& ar, ReplyPromise<T>& value) {
networkSender(value.getFuture(), endpoint);
}
template <class T>
struct serializable_traits<ReplyPromise<T>> : std::true_type {
template<class Archiver>
static void serialize(Archiver& ar, ReplyPromise<T>& p) {
if constexpr (Archiver::isDeserializing) {
UID token;
serializer(ar, token);
auto endpoint = FlowTransport::transport().loadedEndpoint(token);
p = ReplyPromise<T>(endpoint);
networkSender(p.getFuture(), endpoint);
} else {
const auto& ep = p.getEndpoint().token;
serializer(ar, ep);
}
}
};
template <class Reply>
ReplyPromise<Reply> const& getReplyPromise(ReplyPromise<Reply> const& p) { return p; }
@ -210,6 +238,13 @@ struct NetNotifiedQueue : NotifiedQueue<T>, FlowReceiver, FastAllocated<NetNotif
this->send(std::move(message));
this->delPromiseRef();
}
virtual void receive(ArenaObjectReader& reader) {
this->addPromiseRef();
T message;
reader.deserialize(message);
this->send(std::move(message));
this->delPromiseRef();
}
virtual bool isStream() const { return true; }
};
@ -379,5 +414,21 @@ void load(Ar& ar, RequestStream<T>& value) {
value = RequestStream<T>(endpoint);
}
template <class T>
struct serializable_traits<RequestStream<T>> : std::true_type {
template <class Archiver>
static void serialize(Archiver& ar, RequestStream<T>& stream) {
if constexpr (Archiver::isDeserializing) {
Endpoint endpoint;
serializer(ar, endpoint);
stream = RequestStream<T>(endpoint);
} else {
const auto& ep = stream.getEndpoint();
serializer(ar, ep);
UNSTOPPABLE_ASSERT(ep.getPrimaryAddress().isValid()); // No serializing PromiseStreams on a client with no public address
}
}
};
#endif
#include "fdbrpc/genericactors.actor.h"

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|X64">
<Configuration>Debug</Configuration>
@ -138,12 +138,12 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -181,6 +181,7 @@
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
@ -209,6 +210,7 @@
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<MinimalRebuild>false</MinimalRebuild>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>

View File

@ -24,11 +24,6 @@
#include "fdbrpc/simulator.h"
#include "flow/actorcompiler.h"
ACTOR void simDeliverDuplicate( Standalone<StringRef> data, Endpoint destination ) {
wait( delay( deterministicRandom()->random01() * FLOW_KNOBS->MAX_DELIVER_DUPLICATE_DELAY ) );
FlowTransport::transport().sendUnreliable( SerializeSourceRaw(data), destination );
}
ACTOR Future<Void> disableConnectionFailuresAfter( double time, std::string context ) {
wait( delay(time) );

View File

@ -31,14 +31,22 @@
#include "flow/actorcompiler.h" // This must be the last #include.
ACTOR template <class T>
void networkSender( Future<T> input, Endpoint endpoint ) {
void networkSender(Future<T> input, Endpoint endpoint) {
try {
T value = wait( input );
FlowTransport::transport().sendUnreliable( SerializeBoolAnd<T>(true, value), endpoint, false );
T value = wait(input);
if (g_network->useObjectSerializer()) {
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(value), endpoint);
} else {
FlowTransport::transport().sendUnreliable(SerializeBoolAnd<T>(true, value), endpoint, false);
}
} catch (Error& err) {
//if (err.code() == error_code_broken_promise) return;
ASSERT( err.code() != error_code_actor_cancelled );
FlowTransport::transport().sendUnreliable( SerializeBoolAnd<Error>(false, err), endpoint, false );
// if (err.code() == error_code_broken_promise) return;
ASSERT(err.code() != error_code_actor_cancelled);
if (g_network->useObjectSerializer()) {
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(err), endpoint);
} else {
FlowTransport::transport().sendUnreliable(SerializeBoolAnd<Error>(false, err), endpoint, false);
}
}
}
#include "flow/unactorcompiler.h"

View File

@ -174,7 +174,7 @@ SimClogging g_clogging;
struct Sim2Conn : IConnection, ReferenceCounted<Sim2Conn> {
Sim2Conn( ISimulator::ProcessInfo* process )
: process(process), dbgid( deterministicRandom()->randomUniqueID() ), opened(false), closedByCaller(false)
: process(process), dbgid( deterministicRandom()->randomUniqueID() ), opened(false), closedByCaller(false), stopReceive(Never())
{
pipes = sender(this) && receiver(this);
}
@ -209,6 +209,7 @@ struct Sim2Conn : IConnection, ReferenceCounted<Sim2Conn> {
void peerClosed() {
leakedConnectionTracker = trackLeakedConnection(this);
stopReceive = delay(1.0);
}
// Reads as many bytes as possible from the read buffer into [begin,end) and returns the number of bytes read (might be 0)
@ -285,6 +286,7 @@ private:
Future<Void> leakedConnectionTracker;
Future<Void> pipes;
Future<Void> stopReceive;
int availableSendBufferForPeer() const { return sendBufSize - (writtenBytes.get() - receivedBytes.get()); } // SOMEDAY: acknowledgedBytes instead of receivedBytes
@ -317,6 +319,9 @@ private:
ASSERT( g_simulator.getCurrentProcess() == self->process );
wait( delay( g_clogging.getRecvDelay( self->process->address, self->peerProcess->address ) ) );
ASSERT( g_simulator.getCurrentProcess() == self->process );
if(self->stopReceive.isReady()) {
wait(Future<Void>(Never()));
}
self->receivedBytes.set( pos );
wait( Future<Void>(Void()) ); // Prior notification can delete self and cancel this actor
ASSERT( g_simulator.getCurrentProcess() == self->process );
@ -603,11 +608,16 @@ private:
if (randLog)
fprintf( randLog, "SFT1 %s %s %s %" PRId64 "\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str(), size );
if (size == 0) {
// KAIO will return EINVAL, as len==0 is an error.
throw io_error();
}
if(self->delayOnWrite)
wait( waitUntilDiskReady( self->diskParameters, 0 ) );
if( _chsize( self->h, (long) size ) == -1 ) {
TraceEvent(SevWarn, "SimpleFileIOError").detail("Location", 6);
TraceEvent(SevWarn, "SimpleFileIOError").detail("Location", 6).detail("Filename", self->filename).detail("Size", size).detail("Fd", self->h).GetLastError();
throw io_error();
}
@ -1073,6 +1083,10 @@ public:
return primaryTLogsDead || primaryProcessesDead.validate(storagePolicy);
}
virtual bool useObjectSerializer() const {
return net2->useObjectSerializer();
}
// The following function will determine if the specified configuration of available and dead processes can allow the cluster to survive
virtual bool canKillProcesses(std::vector<ProcessInfo*> const& availableProcesses, std::vector<ProcessInfo*> const& deadProcesses, KillType kt, KillType* newKillType) const
{
@ -1565,10 +1579,10 @@ public:
machines.erase(machineId);
}
Sim2() : time(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(-1) {
Sim2(bool objSerializer) : time(0.0), taskCount(0), yielded(false), yield_limit(0), currentTaskID(-1) {
// Not letting currentProcess be NULL eliminates some annoying special cases
currentProcess = new ProcessInfo( "NoMachine", LocalityData(Optional<Standalone<StringRef>>(), StringRef(), StringRef(), StringRef()), ProcessClass(), {NetworkAddress()}, this, "", "" );
g_network = net2 = newNet2(false, true);
currentProcess = new ProcessInfo("NoMachine", LocalityData(Optional<Standalone<StringRef>>(), StringRef(), StringRef(), StringRef()), ProcessClass(), {NetworkAddress()}, this, "", "");
g_network = net2 = newNet2(false, true, objSerializer);
Net2FileSystem::newFileSystem();
check_yield(0);
}
@ -1673,9 +1687,9 @@ public:
int yield_limit; // how many more times yield may return false before next returning true
};
void startNewSimulator() {
void startNewSimulator(bool objSerializer) {
ASSERT( !g_network );
g_network = g_pSimulator = new Sim2();
g_network = g_pSimulator = new Sim2(objSerializer);
g_simulator.connectionFailuresDisableDuration = deterministicRandom()->random01() < 0.5 ? 0 : 1e6;
}

View File

@ -69,10 +69,11 @@ public:
ProcessInfo(const char* name, LocalityData locality, ProcessClass startingClass, NetworkAddressList addresses,
INetworkConnections *net, const char* dataFolder, const char* coordinationFolder )
: name(name), locality(locality), startingClass(startingClass), addresses(addresses), address(addresses.address), dataFolder(dataFolder),
network(net), coordinationFolder(coordinationFolder), failed(false), excluded(false), cpuTicks(0),
rebooting(false), fault_injection_p1(0), fault_injection_p2(0),
fault_injection_r(0), machine(0), cleared(false) {}
: name(name), locality(locality), startingClass(startingClass),
addresses(addresses), address(addresses.address), dataFolder(dataFolder),
network(net), coordinationFolder(coordinationFolder), failed(false), excluded(false), cpuTicks(0),
rebooting(false), fault_injection_p1(0), fault_injection_p2(0),
fault_injection_r(0), machine(0), cleared(false) {}
Future<KillType> onShutdown() { return shutdownSignal.getFuture(); }
@ -154,6 +155,7 @@ public:
virtual bool isAvailable() const = 0;
virtual bool datacenterDead(Optional<Standalone<StringRef>> dcId) const = 0;
virtual void displayWorkers() const;
virtual bool useObjectSerializer() const = 0;
virtual void addRole(NetworkAddress const& address, std::string const& role) {
roleAddresses[address][role] ++;
@ -325,7 +327,7 @@ private:
extern ISimulator* g_pSimulator;
#define g_simulator (*g_pSimulator)
void startNewSimulator();
void startNewSimulator(bool useObjectSerializer);
//Parameters used to simulate disk performance
struct DiskParameters : ReferenceCounted<DiskParameters> {

View File

@ -171,7 +171,7 @@ set(FDBSERVER_SRCS
workloads/WriteBandwidth.actor.cpp
workloads/WriteDuringRead.actor.cpp)
set(SQLITE_SRCS
add_library(fdb_sqlite STATIC
sqlite/btree.h
sqlite/hash.h
sqlite/sqlite3.h
@ -180,12 +180,10 @@ set(SQLITE_SRCS
sqlite/sqliteLimit.h
sqlite/sqlite3.amalgamation.c)
add_library(sqlite ${SQLITE_SRCS})
target_compile_definitions(sqlite PRIVATE $<$<CONFIG:Debug>:NDEBUG>)
# Suppress warnings in sqlite since it's third party
if(NOT WIN32)
target_compile_options(sqlite BEFORE PRIVATE -w) # disable warnings for third party
target_compile_definitions(fdb_sqlite PRIVATE $<$<CONFIG:Debug>:NDEBUG>)
target_compile_options(fdb_sqlite BEFORE PRIVATE -w) # disable warnings for third party
endif()
set(java_workload_docstring "Build the Java workloads (makes fdbserver link against JNI)")
@ -200,7 +198,7 @@ add_flow_target(EXECUTABLE NAME fdbserver SRCS ${FDBSERVER_SRCS})
target_include_directories(fdbserver PRIVATE
${CMAKE_CURRENT_BINARY_DIR}/workloads
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
target_link_libraries(fdbserver PRIVATE fdbclient sqlite)
target_link_libraries(fdbserver PRIVATE fdbclient fdb_sqlite)
if(WITH_JAVA_WORKLOAD)
if(NOT JNI_FOUND)
message(SEND_ERROR "Trying to build Java workload but couldn't find JNI")

View File

@ -35,6 +35,8 @@
// This interface and its serialization depend on slicing, since the client will deserialize only the first part of this structure
struct ClusterControllerFullInterface {
constexpr static FileIdentifier file_identifier =
ClusterControllerClientInterface::file_identifier;
ClusterInterface clientInterface;
RequestStream< struct RecruitFromConfigurationRequest > recruitFromConfiguration;
RequestStream< struct RecruitRemoteFromConfigurationRequest > recruitRemoteFromConfiguration;
@ -60,29 +62,17 @@ struct ClusterControllerFullInterface {
}
template <class Ar>
void serialize( Ar& ar ) {
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
serializer(ar, clientInterface, recruitFromConfiguration, recruitRemoteFromConfiguration, recruitStorage, registerWorker, getWorkers, registerMaster, getServerDBInfo);
}
};
struct RecruitFromConfigurationRequest {
DatabaseConfiguration configuration;
bool recruitSeedServers;
int maxOldLogRouters;
ReplyPromise< struct RecruitFromConfigurationReply > reply;
RecruitFromConfigurationRequest() {}
explicit RecruitFromConfigurationRequest(DatabaseConfiguration const& configuration, bool recruitSeedServers, int maxOldLogRouters)
: configuration(configuration), recruitSeedServers(recruitSeedServers), maxOldLogRouters(maxOldLogRouters) {}
template <class Ar>
void serialize( Ar& ar ) {
serializer(ar, configuration, recruitSeedServers, maxOldLogRouters, reply);
void serialize(Ar& ar) {
if constexpr (!is_fb_function<Ar>) {
ASSERT(ar.protocolVersion() >= 0x0FDB00A200040001LL);
}
serializer(ar, clientInterface, recruitFromConfiguration, recruitRemoteFromConfiguration, recruitStorage,
registerWorker, getWorkers, registerMaster, getServerDBInfo);
}
};
struct RecruitFromConfigurationReply {
constexpr static FileIdentifier file_identifier = 2224085;
vector<WorkerInterface> tLogs;
vector<WorkerInterface> satelliteTLogs;
vector<WorkerInterface> proxies;
@ -100,7 +90,36 @@ struct RecruitFromConfigurationReply {
}
};
struct RecruitFromConfigurationRequest {
constexpr static FileIdentifier file_identifier = 2023046;
DatabaseConfiguration configuration;
bool recruitSeedServers;
int maxOldLogRouters;
ReplyPromise< struct RecruitFromConfigurationReply > reply;
RecruitFromConfigurationRequest() {}
explicit RecruitFromConfigurationRequest(DatabaseConfiguration const& configuration, bool recruitSeedServers, int maxOldLogRouters)
: configuration(configuration), recruitSeedServers(recruitSeedServers), maxOldLogRouters(maxOldLogRouters) {}
template <class Ar>
void serialize( Ar& ar ) {
serializer(ar, configuration, recruitSeedServers, maxOldLogRouters, reply);
}
};
struct RecruitRemoteFromConfigurationReply {
constexpr static FileIdentifier file_identifier = 9091392;
vector<WorkerInterface> remoteTLogs;
vector<WorkerInterface> logRouters;
template <class Ar>
void serialize( Ar& ar ) {
serializer(ar, remoteTLogs, logRouters);
}
};
struct RecruitRemoteFromConfigurationRequest {
constexpr static FileIdentifier file_identifier = 3235995;
DatabaseConfiguration configuration;
Optional<Key> dcId;
int logRouterCount;
@ -116,17 +135,8 @@ struct RecruitRemoteFromConfigurationRequest {
}
};
struct RecruitRemoteFromConfigurationReply {
vector<WorkerInterface> remoteTLogs;
vector<WorkerInterface> logRouters;
template <class Ar>
void serialize( Ar& ar ) {
serializer(ar, remoteTLogs, logRouters);
}
};
struct RecruitStorageReply {
constexpr static FileIdentifier file_identifier = 15877089;
WorkerInterface worker;
ProcessClass processClass;
@ -137,6 +147,7 @@ struct RecruitStorageReply {
};
struct RecruitStorageRequest {
constexpr static FileIdentifier file_identifier = 905920;
std::vector<Optional<Standalone<StringRef>>> excludeMachines; //< Don't recruit any of these machines
std::vector<AddressExclusion> excludeAddresses; //< Don't recruit any of these addresses
std::vector<Optional<Standalone<StringRef>>> includeDCs;
@ -150,6 +161,7 @@ struct RecruitStorageRequest {
};
struct RegisterWorkerReply {
constexpr static FileIdentifier file_identifier = 16475696;
ProcessClass processClass;
ClusterControllerPriorityInfo priorityInfo;
@ -163,6 +175,7 @@ struct RegisterWorkerReply {
};
struct RegisterWorkerRequest {
constexpr static FileIdentifier file_identifier = 14332605;
WorkerInterface wi;
ProcessClass initialClass;
ProcessClass processClass;
@ -184,6 +197,7 @@ struct RegisterWorkerRequest {
};
struct GetWorkersRequest {
constexpr static FileIdentifier file_identifier = 1254174;
enum { TESTER_CLASS_ONLY = 0x1, NON_EXCLUDED_PROCESSES_ONLY = 0x2 };
int flags;
@ -199,6 +213,7 @@ struct GetWorkersRequest {
};
struct RegisterMasterRequest {
constexpr static FileIdentifier file_identifier = 10773445;
UID id;
LocalityData mi;
LogSystemConfig logSystemConfig;
@ -215,22 +230,13 @@ struct RegisterMasterRequest {
RegisterMasterRequest() {}
template <class Ar>
void serialize( Ar& ar ) {
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
serializer(ar, id, mi, logSystemConfig, proxies, resolvers, recoveryCount, registrationCount, configuration, priorCommittedLogServers, recoveryState, recoveryStalled, reply);
}
};
struct GetServerDBInfoRequest {
UID knownServerInfoID;
Standalone<VectorRef<StringRef>> issues;
std::vector<NetworkAddress> incompatiblePeers;
ReplyPromise< struct ServerDBInfo > reply;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, knownServerInfoID, issues, incompatiblePeers, reply);
if constexpr (!is_fb_function<Ar>) {
ASSERT(ar.protocolVersion() >= 0x0FDB00A200040001LL);
}
serializer(ar, id, mi, logSystemConfig, proxies, resolvers, recoveryCount, registrationCount, configuration,
priorCommittedLogServers, recoveryState, recoveryStalled, reply);
}
};

View File

@ -22,6 +22,9 @@
#define CONFLICTSET_H
#pragma once
#include <utility>
#include <vector>
#include "fdbclient/CommitTransaction.h"
struct ConflictSet;
@ -40,23 +43,23 @@ struct ConflictBatch {
};
void addTransaction( const CommitTransactionRef& transaction );
void detectConflicts(Version now, Version newOldestVersion, vector<int>& nonConflicting, vector<int>* tooOldTransactions = NULL);
void GetTooOldTransactions(vector<int>& tooOldTransactions);
void detectConflicts(Version now, Version newOldestVersion, std::vector<int>& nonConflicting, std::vector<int>* tooOldTransactions = NULL);
void GetTooOldTransactions(std::vector<int>& tooOldTransactions);
private:
ConflictSet* cs;
Standalone< VectorRef< struct TransactionInfo* > > transactionInfo;
vector<struct KeyInfo> points;
std::vector<struct KeyInfo> points;
int transactionCount;
vector< pair<StringRef,StringRef> > combinedWriteConflictRanges;
vector< struct ReadConflictRange > combinedReadConflictRanges;
std::vector< std::pair<StringRef,StringRef> > combinedWriteConflictRanges;
std::vector< struct ReadConflictRange > combinedReadConflictRanges;
bool* transactionConflictStatus;
void checkIntraBatchConflicts();
void combineWriteConflictRanges();
void checkReadConflictRanges();
void mergeWriteConflictRanges(Version now);
void addConflictRanges(Version now, vector< pair<StringRef,StringRef> >::iterator begin, vector< pair<StringRef,StringRef> >::iterator end, class SkipList* part);
void addConflictRanges(Version now, std::vector< std::pair<StringRef,StringRef> >::iterator begin, std::vector< std::pair<StringRef,StringRef> >::iterator end, class SkipList* part);
};
#endif

View File

@ -25,6 +25,7 @@
#include "fdbclient/CoordinationInterface.h"
struct GenerationRegInterface {
constexpr static FileIdentifier file_identifier = 16726744;
RequestStream< struct GenerationRegReadRequest > read;
RequestStream< struct GenerationRegWriteRequest > write;
@ -52,6 +53,7 @@ struct GenerationRegInterface {
};
struct UniqueGeneration {
constexpr static FileIdentifier file_identifier = 16684234;
uint64_t generation;
UID uid;
UniqueGeneration() : generation(0) {}
@ -70,7 +72,20 @@ struct UniqueGeneration {
}
};
struct GenerationRegReadReply {
constexpr static FileIdentifier file_identifier = 12623609;
Optional<Value> value;
UniqueGeneration gen, rgen;
GenerationRegReadReply() {}
GenerationRegReadReply( Optional<Value> value, UniqueGeneration gen, UniqueGeneration rgen ) : value(value), gen(gen), rgen(rgen) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, value, gen, rgen);
}
};
struct GenerationRegReadRequest {
constexpr static FileIdentifier file_identifier = 8975311;
Key key;
UniqueGeneration gen;
ReplyPromise<struct GenerationRegReadReply> reply;
@ -82,18 +97,8 @@ struct GenerationRegReadRequest {
}
};
struct GenerationRegReadReply {
Optional<Value> value;
UniqueGeneration gen, rgen;
GenerationRegReadReply() {}
GenerationRegReadReply( Optional<Value> value, UniqueGeneration gen, UniqueGeneration rgen ) : value(value), gen(gen), rgen(rgen) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, value, gen, rgen);
}
};
struct GenerationRegWriteRequest {
constexpr static FileIdentifier file_identifier = 3521510;
KeyValue kv;
UniqueGeneration gen;
ReplyPromise< UniqueGeneration > reply;
@ -116,6 +121,7 @@ struct LeaderElectionRegInterface : ClientLeaderRegInterface {
};
struct CandidacyRequest {
constexpr static FileIdentifier file_identifier = 14473958;
Key key;
LeaderInfo myInfo;
UID knownLeader, prevChangeID;
@ -131,6 +137,7 @@ struct CandidacyRequest {
};
struct LeaderHeartbeatRequest {
constexpr static FileIdentifier file_identifier = 9495992;
Key key;
LeaderInfo myInfo;
UID prevChangeID;
@ -146,6 +153,7 @@ struct LeaderHeartbeatRequest {
};
struct ForwardRequest {
constexpr static FileIdentifier file_identifier = 13570359;
Key key;
Value conn; // a cluster connection string
ReplyPromise<Void> reply;

View File

@ -25,6 +25,7 @@
#include "fdbrpc/Locality.h"
struct DataDistributorInterface {
constexpr static FileIdentifier file_identifier = 12383874;
RequestStream<ReplyPromise<Void>> waitFailure;
RequestStream<struct HaltDataDistributorRequest> haltDataDistributor;
struct LocalityData locality;
@ -49,6 +50,7 @@ struct DataDistributorInterface {
};
struct HaltDataDistributorRequest {
constexpr static FileIdentifier file_identifier = 1904127;
UID requesterID;
ReplyPromise<Void> reply;

View File

@ -164,7 +164,7 @@ public:
readyToPush(Void()), fileSizeWarningLimit(fileSizeWarningLimit), lastCommit(Void()), isFirstCommit(true)
{
if (BUGGIFY)
fileExtensionBytes = 1<<10 * deterministicRandom()->randomSkewedUInt32( 1, 40<<10 );
fileExtensionBytes = _PAGE_SIZE * deterministicRandom()->randomSkewedUInt32( 1, 10<<10 );
if (BUGGIFY)
fileShrinkBytes = _PAGE_SIZE * deterministicRandom()->randomSkewedUInt32( 1, 10<<10 );
files[0].dbgFilename = filename(0);
@ -283,21 +283,29 @@ public:
TraceEvent("DiskQueueReplaceTruncateEnded").detail("Filename", file->getFilename());
}
#if defined(_WIN32)
ACTOR static Future<Reference<IAsyncFile>> replaceFile(Reference<IAsyncFile> toReplace) {
// Windows doesn't support a rename over an open file.
wait( toReplace->truncate(4<<10) );
return toReplace;
}
#else
ACTOR static Future<Reference<IAsyncFile>> replaceFile(Reference<IAsyncFile> toReplace) {
incrementalTruncate( toReplace );
Reference<IAsyncFile> _replacement = wait( IAsyncFileSystem::filesystem()->open( toReplace->getFilename(), IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_UNBUFFERED | IAsyncFile::OPEN_LOCK, 0 ) );
Reference<IAsyncFile> _replacement = wait( IAsyncFileSystem::filesystem()->open( toReplace->getFilename(), IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_CREATE | IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_UNBUFFERED | IAsyncFile::OPEN_LOCK, 0600 ) );
state Reference<IAsyncFile> replacement = _replacement;
wait( replacement->sync() );
return replacement;
}
#endif
Future<Void> push(StringRef pageData, vector<Reference<SyncQueue>>* toSync) {
Future<Future<Void>> push(StringRef pageData, vector<Reference<SyncQueue>>* toSync) {
return push( this, pageData, toSync );
}
ACTOR static Future<Void> push(RawDiskQueue_TwoFiles* self, StringRef pageData, vector<Reference<SyncQueue>>* toSync) {
ACTOR static Future<Future<Void>> push(RawDiskQueue_TwoFiles* self, StringRef pageData, vector<Reference<SyncQueue>>* toSync) {
// Write the given data to the queue files, swapping or extending them if necessary.
// Don't do any syncs, but push the modified file(s) onto toSync.
ASSERT( self->readingFile == 2 );
@ -325,21 +333,27 @@ public:
std::swap(self->firstPages[0], self->firstPages[1]);
self->files[1].popped = 0;
self->writingPos = 0;
*self->firstPages[1] = *(const Page*)pageData.begin();
const int64_t activeDataVolume = pageCeiling(self->files[0].size - self->files[0].popped + self->fileExtensionBytes + self->fileShrinkBytes);
const int64_t desiredMaxFileSize = std::max( activeDataVolume, SERVER_KNOBS->TLOG_HARD_LIMIT_BYTES * 2 );
if (self->files[1].size > desiredMaxFileSize) {
const int64_t desiredMaxFileSize = pageCeiling( std::max( activeDataVolume, SERVER_KNOBS->TLOG_HARD_LIMIT_BYTES * 2 ) );
const bool frivolouslyTruncate = BUGGIFY_WITH_PROB(0.1);
if (self->files[1].size > desiredMaxFileSize || frivolouslyTruncate) {
// Either shrink self->files[1] to the size of self->files[0], or chop off fileShrinkBytes
int64_t maxShrink = std::max( pageFloor(self->files[1].size - desiredMaxFileSize), self->fileShrinkBytes );
if (maxShrink / SERVER_KNOBS->DISK_QUEUE_FILE_EXTENSION_BYTES >
SERVER_KNOBS->DISK_QUEUE_MAX_TRUNCATE_EXTENTS) {
int64_t maxShrink = pageFloor( std::max( self->files[1].size - desiredMaxFileSize, self->fileShrinkBytes ) );
if ((maxShrink > SERVER_KNOBS->DISK_QUEUE_MAX_TRUNCATE_BYTES) ||
(frivolouslyTruncate && g_random->random01() < 0.3)) {
TEST(true); // Replacing DiskQueue file
TraceEvent("DiskQueueReplaceFile", self->dbgid).detail("Filename", self->files[1].f->getFilename()).detail("OldFileSize", self->files[1].size).detail("ElidedTruncateSize", maxShrink);
Reference<IAsyncFile> newFile = wait( replaceFile(self->files[1].f) );
self->files[1].setFile(newFile);
self->files[1].size = 0;
waitfor.push_back( self->files[1].f->truncate( self->fileExtensionBytes ) );
self->files[1].size = self->fileExtensionBytes;
} else {
self->files[1].size -= maxShrink;
const int64_t startingSize = self->files[1].size;
self->files[1].size -= std::min(maxShrink, self->files[1].size);
self->files[1].size = std::max(self->files[1].size, self->fileExtensionBytes);
TraceEvent("DiskQueueTruncate", self->dbgid).detail("Filename", self->files[1].f->getFilename()).detail("OldFileSize", startingSize).detail("NewFileSize", self->files[1].size);
waitfor.push_back( self->files[1].f->truncate( self->files[1].size ) );
}
}
@ -355,9 +369,8 @@ public:
TraceEvent(SevWarnAlways, "DiskQueueFileTooLarge", self->dbgid).suppressFor(1.0).detail("Filename", self->filename(1)).detail("Size", self->files[1].size);
}
}
}
if (self->writingPos == 0) {
} else if (self->writingPos == 0) {
// If this is the first write to a brand new disk queue file.
*self->firstPages[1] = *(const Page*)pageData.begin();
}
@ -368,8 +381,7 @@ public:
waitfor.push_back( self->files[1].f->write( pageData.begin(), pageData.size(), self->writingPos ) );
self->writingPos += pageData.size();
wait( waitForAll(waitfor) );
return Void();
return waitForAll(waitfor);
}
ACTOR static UNCANCELLABLE Future<Void> pushAndCommit(RawDiskQueue_TwoFiles* self, StringRef pageData, StringBuffer* pageMem, uint64_t poppedPages) {
@ -396,11 +408,11 @@ public:
TEST( pageData.size() > sizeof(Page) ); // push more than one page of data
Future<Void> pushed = self->push( pageData, &syncFiles );
Future<Void> pushed = wait( self->push( pageData, &syncFiles ) );
pushing.send(Void());
wait( pushed );
ASSERT( syncFiles.size() >= 1 && syncFiles.size() <= 2 );
TEST(2==syncFiles.size()); // push spans both files
wait( pushed );
delete pageMem;
pageMem = 0;

View File

@ -20,6 +20,7 @@
#define SQLITE_THREADSAFE 0 // also in sqlite3.amalgamation.c!
#include "fdbrpc/crc32c.h"
#include "fdbserver/IKeyValueStore.h"
#include "fdbserver/CoroFlow.h"
#include "fdbserver/Knobs.h"
@ -90,32 +91,46 @@ struct PageChecksumCodec {
char *pData = (char *)data;
int dataLen = pageLen - sizeof(SumType);
SumType sum;
SumType *pSumInPage = (SumType *)(pData + dataLen);
// Write sum directly to page or to sum variable based on mode
SumType *sumOut = write ? pSumInPage : &sum;
sumOut->part1 = pageNumber; //DO NOT CHANGE
sumOut->part2 = 0x5ca1ab1e;
hashlittle2(pData, dataLen, &sumOut->part1, &sumOut->part2);
// Verify if not in write mode
if(!write && sum != *pSumInPage) {
if(!silent)
TraceEvent (SevError, "SQLitePageChecksumFailure")
.error(checksum_failed())
.detail("CodecPageSize", pageSize)
.detail("CodecReserveSize", reserveSize)
.detail("Filename", filename)
.detail("PageNumber", pageNumber)
.detail("PageSize", pageLen)
.detail("ChecksumInPage", pSumInPage->toString())
.detail("ChecksumCalculated", sum.toString());
return false;
if (write) {
// Always write a hashlittle2 checksum for new pages
pSumInPage->part1 = pageNumber; // DO NOT CHANGE
pSumInPage->part2 = 0x5ca1ab1e;
hashlittle2(pData, dataLen, &pSumInPage->part1, &pSumInPage->part2);
return true;
}
return true;
SumType sum;
if (pSumInPage->part1 == 0) {
// part1 being 0 indicates with high probability that a CRC32 checksum
// was used, so check that first. If this checksum fails, there is still
// some chance the page was written with hashlittle2, so fall back to checking
// hashlittle2
sum.part1 = 0;
sum.part2 = crc32c_append(0xfdbeefdb, static_cast<uint8_t*>(data), dataLen);
if (sum == *pSumInPage) return true;
}
SumType hashLittle2Sum;
hashLittle2Sum.part1 = pageNumber; // DO NOT CHANGE
hashLittle2Sum.part2 = 0x5ca1ab1e;
hashlittle2(pData, dataLen, &hashLittle2Sum.part1, &hashLittle2Sum.part2);
if (hashLittle2Sum == *pSumInPage) return true;
if (!silent) {
TraceEvent trEvent(SevError, "SQLitePageChecksumFailure");
trEvent.error(checksum_failed())
.detail("CodecPageSize", pageSize)
.detail("CodecReserveSize", reserveSize)
.detail("Filename", filename)
.detail("PageNumber", pageNumber)
.detail("PageSize", pageLen)
.detail("ChecksumInPage", pSumInPage->toString())
.detail("ChecksumCalculatedHL2", hashLittle2Sum.toString());
if (pSumInPage->part1 == 0) trEvent.detail("ChecksumCalculatedCRC", sum.toString());
}
return false;
}
static void * codec(void *vpSelf, void *data, Pgno pageNumber, int op) {

View File

@ -75,7 +75,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
init( TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH, 16<<10 ); if ( randomize && BUGGIFY ) TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH = 500;
init( DISK_QUEUE_FILE_EXTENSION_BYTES, 10<<20 ); // BUGGIFYd per file within the DiskQueue
init( DISK_QUEUE_FILE_SHRINK_BYTES, 100<<20 ); // BUGGIFYd per file within the DiskQueue
init( DISK_QUEUE_MAX_TRUNCATE_EXTENTS, 1<<10 ); if ( randomize && BUGGIFY ) DISK_QUEUE_MAX_TRUNCATE_EXTENTS = 0;
init( DISK_QUEUE_MAX_TRUNCATE_BYTES, 2<<30 ); if ( randomize && BUGGIFY ) DISK_QUEUE_MAX_TRUNCATE_BYTES = 0;
init( TLOG_DEGRADED_DELAY_COUNT, 5 );
init( TLOG_DEGRADED_DURATION, 5.0 );

View File

@ -79,7 +79,7 @@ public:
int64_t TLOG_SPILL_REFERENCE_MAX_BYTES_PER_BATCH;
int64_t DISK_QUEUE_FILE_EXTENSION_BYTES; // When we grow the disk queue, by how many bytes should it grow?
int64_t DISK_QUEUE_FILE_SHRINK_BYTES; // When we shrink the disk queue, by how many bytes should it shrink?
int DISK_QUEUE_MAX_TRUNCATE_EXTENTS;
int DISK_QUEUE_MAX_TRUNCATE_BYTES; // A truncate larger than this will cause the file to be replaced instead.
int TLOG_DEGRADED_DELAY_COUNT;
double TLOG_DEGRADED_DURATION;

View File

@ -75,8 +75,10 @@ ACTOR Future<Void> changeLeaderCoordinators( ServerCoordinators coordinators, Va
return Void();
}
ACTOR Future<Void> tryBecomeLeaderInternal( ServerCoordinators coordinators, Value proposedSerializedInterface, Reference<AsyncVar<Value>> outSerializedLeader, bool hasConnected, Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo ) {
state Reference<AsyncVar<vector<Optional<LeaderInfo>>>> nominees( new AsyncVar<vector<Optional<LeaderInfo>>>() );
ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators, Value proposedSerializedInterface,
Reference<AsyncVar<Value>> outSerializedLeader, bool hasConnected,
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo) {
state Reference<AsyncVar<vector<Optional<LeaderInfo>>>> nominees(new AsyncVar<vector<Optional<LeaderInfo>>>());
state LeaderInfo myInfo;
state Future<Void> candidacies;
state bool iAmLeader = false;

View File

@ -24,6 +24,7 @@
#include "fdbrpc/fdbrpc.h"
#include "fdbrpc/Locality.h"
#include "fdbclient/FDBTypes.h"
class ServerCoordinators;
@ -57,9 +58,12 @@ Future<Void> tryBecomeLeader( ServerCoordinators const& coordinators,
bool hasConnected,
Reference<AsyncVar<ClusterControllerPriorityInfo>> const& asyncPriorityInfo)
{
Reference<AsyncVar<Value>> serializedInfo( new AsyncVar<Value> );
Future<Void> m = tryBecomeLeaderInternal( coordinators, BinaryWriter::toValue(proposedInterface, IncludeVersion()), serializedInfo, hasConnected, asyncPriorityInfo );
return m || asyncDeserialize( serializedInfo, outKnownLeader );
Reference<AsyncVar<Value>> serializedInfo(new AsyncVar<Value>);
Future<Void> m = tryBecomeLeaderInternal(
coordinators,
g_network->useObjectSerializer() ? ObjectWriter::toValue(proposedInterface) : BinaryWriter::toValue(proposedInterface, IncludeVersion()),
serializedInfo, hasConnected, asyncPriorityInfo);
return m || asyncDeserialize(serializedInfo, outKnownLeader, g_network->useObjectSerializer());
}
#pragma endregion

View File

@ -204,7 +204,7 @@ public:
return tag.id % logServers.size();
}
void updateLocalitySet( vector<LocalityData> const& localities ) {
void updateLocalitySet( std::vector<LocalityData> const& localities ) {
LocalityMap<int>* logServerMap;
logServerSet = Reference<LocalitySet>(new LocalityMap<int>());
@ -418,7 +418,7 @@ struct ILogSystem {
struct MergedPeekCursor : IPeekCursor, ReferenceCounted<MergedPeekCursor> {
Reference<LogSet> logSet;
vector< Reference<IPeekCursor> > serverCursors;
std::vector< Reference<IPeekCursor> > serverCursors;
std::vector<LocalityEntry> locations;
std::vector< std::pair<LogMessageVersion, int> > sortedVersions;
Tag tag;
@ -429,9 +429,9 @@ struct ILogSystem {
UID randomID;
int tLogReplicationFactor;
MergedPeekCursor( vector< Reference<ILogSystem::IPeekCursor> > const& serverCursors, Version begin );
MergedPeekCursor( std::vector< Reference<ILogSystem::IPeekCursor> > const& serverCursors, Version begin );
MergedPeekCursor( std::vector<Reference<AsyncVar<OptionalInterface<TLogInterface>>>> const& logServers, int bestServer, int readQuorum, Tag tag, Version begin, Version end, bool parallelGetMore, std::vector<LocalityData> const& tLogLocalities, Reference<IReplicationPolicy> const tLogPolicy, int tLogReplicationFactor );
MergedPeekCursor( vector< Reference<IPeekCursor> > const& serverCursors, LogMessageVersion const& messageVersion, int bestServer, int readQuorum, Optional<LogMessageVersion> nextVersion, Reference<LogSet> logSet, int tLogReplicationFactor );
MergedPeekCursor( std::vector< Reference<IPeekCursor> > const& serverCursors, LogMessageVersion const& messageVersion, int bestServer, int readQuorum, Optional<LogMessageVersion> nextVersion, Reference<LogSet> logSet, int tLogReplicationFactor );
virtual Reference<IPeekCursor> cloneNoMore();
virtual void setProtocolVersion( uint64_t version );
@ -636,7 +636,7 @@ struct ILogSystem {
virtual Reference<IPeekCursor> peek( UID dbgid, Version begin, Optional<Version> end, std::vector<Tag> tags, bool parallelGetMore = false ) = 0;
// Same contract as peek(), but for a set of tags
virtual Reference<IPeekCursor> peekSingle( UID dbgid, Version begin, Tag tag, vector<pair<Version,Tag>> history = vector<pair<Version,Tag>>() ) = 0;
virtual Reference<IPeekCursor> peekSingle( UID dbgid, Version begin, Tag tag, std::vector<std::pair<Version,Tag>> history = std::vector<std::pair<Version,Tag>>() ) = 0;
// Same contract as peek(), but blocks until the preferred log server(s) for the given tag are available (and is correspondingly less expensive)
virtual Reference<IPeekCursor> peekLogRouter( UID dbgid, Version begin, Tag tag ) = 0;
@ -690,7 +690,7 @@ struct ILogSystem {
virtual Future<Void> onLogSystemConfigChange() = 0;
// Returns when the log system configuration has changed due to a tlog rejoin.
virtual void getPushLocations( std::vector<Tag> const& tags, vector<int>& locations ) = 0;
virtual void getPushLocations( std::vector<Tag> const& tags, std::vector<int>& locations ) = 0;
virtual bool hasRemoteLogs() = 0;
@ -807,10 +807,10 @@ struct LogPushData : NonCopyable {
private:
Reference<ILogSystem> logSystem;
vector<Tag> next_message_tags;
vector<Tag> prev_tags;
vector<BinaryWriter> messagesWriter;
vector<int> msg_locations;
std::vector<Tag> next_message_tags;
std::vector<Tag> prev_tags;
std::vector<BinaryWriter> messagesWriter;
std::vector<int> msg_locations;
uint32_t subsequence;
};

View File

@ -28,6 +28,7 @@
template <class Interface>
struct OptionalInterface {
friend class serializable_traits<OptionalInterface<Interface>>;
// Represents an interface with a known id() and possibly known actual endpoints.
// For example, an OptionalInterface<TLogInterface> represents a particular tlog by id, which you might or might not presently know how to communicate with
@ -58,7 +59,27 @@ protected:
class LogSet;
struct OldLogData;
template <class Interface>
struct serializable_traits<OptionalInterface<Interface>> : std::true_type {
template <class Archiver>
static void serialize(Archiver& ar, OptionalInterface<Interface>& m) {
if constexpr (!Archiver::isDeserializing) {
if (m.iface.present()) {
m.ident = m.iface.get().id();
}
}
::serializer(ar, m.iface, m.ident);
if constexpr (Archiver::isDeserializing) {
if (m.iface.present()) {
m.ident = m.iface.get().id();
}
}
}
};
struct TLogSet {
constexpr static FileIdentifier file_identifier = 6302317;
std::vector<OptionalInterface<TLogInterface>> tLogs;
std::vector<OptionalInterface<TLogInterface>> logRouters;
int32_t tLogWriteAntiQuorum, tLogReplicationFactor;
@ -116,17 +137,23 @@ struct TLogSet {
template <class Ar>
void serialize( Ar& ar ) {
serializer(ar, tLogs, logRouters, tLogWriteAntiQuorum, tLogReplicationFactor, tLogPolicy, tLogLocalities, isLocal, locality, startVersion, satelliteTagLocations);
if (ar.isDeserializing && ar.protocolVersion() < 0x0FDB00B061030001LL) {
tLogVersion = TLogVersion::V2;
if constexpr (is_fb_function<Ar>) {
serializer(ar, tLogs, logRouters, tLogWriteAntiQuorum, tLogReplicationFactor, tLogPolicy, tLogLocalities,
isLocal, locality, startVersion, satelliteTagLocations, tLogVersion);
} else {
serializer(ar, tLogVersion);
serializer(ar, tLogs, logRouters, tLogWriteAntiQuorum, tLogReplicationFactor, tLogPolicy, tLogLocalities, isLocal, locality, startVersion, satelliteTagLocations);
if (ar.isDeserializing && ar.protocolVersion() < 0x0FDB00B061030001LL) {
tLogVersion = TLogVersion::V2;
} else {
serializer(ar, tLogVersion);
}
ASSERT(tLogPolicy.getPtr() == nullptr || tLogVersion != TLogVersion::UNSET);
}
ASSERT(tLogPolicy.getPtr() == nullptr || tLogVersion != TLogVersion::UNSET);
}
};
struct OldTLogConf {
constexpr static FileIdentifier file_identifier = 16233772;
std::vector<TLogSet> tLogs;
Version epochEnd;
int32_t logRouterTags;
@ -136,7 +163,7 @@ struct OldTLogConf {
explicit OldTLogConf(const OldLogData&);
std::string toString() const {
return format("end: %d tags: %d %s", epochEnd, logRouterTags, describe(tLogs).c_str());
return format("end: %d tags: %d %s", epochEnd, logRouterTags, describe(tLogs).c_str());
}
bool operator == ( const OldTLogConf& rhs ) const {
@ -168,6 +195,7 @@ enum class LogSystemType {
BINARY_SERIALIZABLE(LogSystemType);
struct LogSystemConfig {
constexpr static FileIdentifier file_identifier = 16360847;
LogSystemType logSystemType;
std::vector<TLogSet> tLogs;
int32_t logRouterTags;

View File

@ -31,6 +31,7 @@
typedef uint64_t DBRecoveryCount;
struct MasterInterface {
constexpr static FileIdentifier file_identifier = 5979145;
LocalityData locality;
RequestStream< ReplyPromise<Void> > waitFailure;
RequestStream< struct TLogRejoinRequest > tlogRejoin; // sent by tlog (whether or not rebooted) to communicate with a new master
@ -42,7 +43,9 @@ struct MasterInterface {
UID id() const { return changeCoordinators.getEndpoint().token; }
template <class Archive>
void serialize(Archive& ar) {
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
if constexpr (!is_fb_function<Archive>) {
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
}
serializer(ar, locality, waitFailure, tlogRejoin, changeCoordinators, getCommitVersion);
}
@ -52,6 +55,7 @@ struct MasterInterface {
};
struct TLogRejoinRequest {
constexpr static FileIdentifier file_identifier = 15692200;
TLogInterface myInterface;
ReplyPromise<bool> reply; // false means someone else registered, so we should re-register. true means this master is recovered, so don't send again to the same master.
@ -64,6 +68,7 @@ struct TLogRejoinRequest {
};
struct ChangeCoordinatorsRequest {
constexpr static FileIdentifier file_identifier = 13605416;
Standalone<StringRef> newConnectionString;
ReplyPromise<Void> reply; // normally throws even on success!
@ -77,6 +82,7 @@ struct ChangeCoordinatorsRequest {
};
struct ResolverMoveRef {
constexpr static FileIdentifier file_identifier = 11945475;
KeyRangeRef range;
int dest;
@ -102,6 +108,7 @@ struct ResolverMoveRef {
};
struct GetCommitVersionReply {
constexpr static FileIdentifier file_identifier = 3568822;
Standalone<VectorRef<ResolverMoveRef>> resolverChanges;
Version resolverChangesVersion;
Version version;
@ -118,6 +125,7 @@ struct GetCommitVersionReply {
};
struct GetCommitVersionRequest {
constexpr static FileIdentifier file_identifier = 16683181;
uint64_t requestNum;
uint64_t mostRecentProcessedRequestNum;
UID requestingProxy;

View File

@ -24,6 +24,7 @@
#include "fdbclient/FDBTypes.h"
#include "fdbrpc/fdbrpc.h"
#include "flow/FileIdentifier.h"
struct NetworkTestInterface {
RequestStream< struct NetworkTestRequest > test;
@ -32,7 +33,19 @@ struct NetworkTestInterface {
NetworkTestInterface( INetwork* local );
};
struct NetworkTestReply {
constexpr static FileIdentifier file_identifier = 14465374;
Value value;
NetworkTestReply() {}
NetworkTestReply( Value value ) : value(value) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, value);
}
};
struct NetworkTestRequest {
constexpr static FileIdentifier file_identifier = 4146513;
Key key;
uint32_t replySize;
ReplyPromise<struct NetworkTestReply> reply;
@ -44,16 +57,6 @@ struct NetworkTestRequest {
}
};
struct NetworkTestReply {
Value value;
NetworkTestReply() {}
NetworkTestReply( Value value ) : value(value) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, value);
}
};
Future<Void> networkTestServer();
Future<Void> networkTestClient( std:: string const& testServers );

View File

@ -26,6 +26,7 @@
#include "fdbrpc/Locality.h"
struct RatekeeperInterface {
constexpr static FileIdentifier file_identifier = 5983305;
RequestStream<ReplyPromise<Void>> waitFailure;
RequestStream<struct GetRateInfoRequest> getRateInfo;
RequestStream<struct HaltRatekeeperRequest> haltRatekeeper;
@ -51,7 +52,21 @@ struct RatekeeperInterface {
}
};
struct GetRateInfoReply {
constexpr static FileIdentifier file_identifier = 7845006;
double transactionRate;
double batchTransactionRate;
double leaseDuration;
HealthMetrics healthMetrics;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, transactionRate, batchTransactionRate, leaseDuration, healthMetrics);
}
};
struct GetRateInfoRequest {
constexpr static FileIdentifier file_identifier = 9068521;
UID requesterID;
int64_t totalReleasedTransactions;
int64_t batchReleasedTransactions;
@ -68,19 +83,8 @@ struct GetRateInfoRequest {
}
};
struct GetRateInfoReply {
double transactionRate;
double batchTransactionRate;
double leaseDuration;
HealthMetrics healthMetrics;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, transactionRate, batchTransactionRate, leaseDuration, healthMetrics);
}
};
struct HaltRatekeeperRequest {
constexpr static FileIdentifier file_identifier = 6997218;
UID requesterID;
ReplyPromise<Void> reply;

View File

@ -25,6 +25,7 @@
#include "fdbclient/FDBTypes.h"
struct ResolverInterface {
constexpr static FileIdentifier file_identifier = 1755944;
enum { LocationAwareLoadBalance = 1 };
enum { AlwaysFresh = 1 };
@ -54,6 +55,7 @@ struct ResolverInterface {
};
struct StateTransactionRef {
constexpr static FileIdentifier file_identifier = 6150271;
StateTransactionRef() {}
StateTransactionRef(const bool committed, VectorRef<MutationRef> const& mutations) : committed(committed), mutations(mutations) {}
StateTransactionRef(Arena &p, const StateTransactionRef &toCopy) : committed(toCopy.committed), mutations(p, toCopy.mutations) {}
@ -70,6 +72,7 @@ struct StateTransactionRef {
};
struct ResolveTransactionBatchReply {
constexpr static FileIdentifier file_identifier = 15472264;
Arena arena;
VectorRef<uint8_t> committed;
Optional<UID> debugID;
@ -83,6 +86,7 @@ struct ResolveTransactionBatchReply {
};
struct ResolveTransactionBatchRequest {
constexpr static FileIdentifier file_identifier = 16462858;
Arena arena;
Version prevVersion;
@ -100,6 +104,7 @@ struct ResolveTransactionBatchRequest {
};
struct ResolutionMetricsRequest {
constexpr static FileIdentifier file_identifier = 11663527;
ReplyPromise<int64_t> reply;
template <class Archive>
@ -109,6 +114,7 @@ struct ResolutionMetricsRequest {
};
struct ResolutionSplitReply {
constexpr static FileIdentifier file_identifier = 12137765;
Key key;
int64_t used;
template <class Archive>
@ -119,6 +125,7 @@ struct ResolutionSplitReply {
};
struct ResolutionSplitRequest {
constexpr static FileIdentifier file_identifier = 167535;
KeyRange range;
int64_t offset;
bool front;

View File

@ -28,6 +28,7 @@
#include "fdbrpc/Locality.h"
struct RestoreInterface {
constexpr static FileIdentifier file_identifier = 13398189;
RequestStream< struct TestRequest > test;
bool operator == (RestoreInterface const& r) const { return id() == r.id(); }
@ -45,7 +46,21 @@ struct RestoreInterface {
}
};
struct TestReply {
constexpr static FileIdentifier file_identifier = 12075719;
int replyData;
TestReply() : replyData(0) {}
explicit TestReply(int replyData) : replyData(replyData) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, replyData);
}
};
struct TestRequest {
constexpr static FileIdentifier file_identifier = 14404487;
int testData;
ReplyPromise< struct TestReply > reply;
@ -58,18 +73,6 @@ struct TestRequest {
}
};
struct TestReply {
int replyData;
TestReply() : replyData(0) {}
explicit TestReply(int replyData) : replyData(replyData) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, replyData);
}
};
Future<Void> restoreWorker(Reference<ClusterConnectionFile> const& ccf, LocalityData const& locality);
#endif

View File

@ -31,6 +31,7 @@
#include "fdbserver/LatencyBandConfig.h"
struct ServerDBInfo {
constexpr static FileIdentifier file_identifier = 13838807;
// This structure contains transient information which is broadcast to all workers for a database,
// permitting them to communicate with each other. It is not available to the client. This mechanism
// (see GetServerDBInfoRequest) is closely parallel to OpenDatabaseRequest for the client.
@ -61,4 +62,17 @@ struct ServerDBInfo {
}
};
struct GetServerDBInfoRequest {
constexpr static FileIdentifier file_identifier = 9467438;
UID knownServerInfoID;
Standalone<VectorRef<StringRef>> issues;
std::vector<NetworkAddress> incompatiblePeers;
ReplyPromise< struct ServerDBInfo > reply;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, knownServerInfoID, issues, incompatiblePeers, reply);
}
};
#endif

View File

@ -211,8 +211,11 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
wait( delay( waitTime ) );
state ISimulator::ProcessInfo *process = g_simulator.newProcess( "Server", ip, port, listenPerProcess, localities, processClass, dataFolder->c_str(), coordFolder->c_str() );
wait( g_simulator.onProcess(process, TaskDefaultYield) ); // Now switch execution to the process on which we will run
state ISimulator::ProcessInfo* process =
g_simulator.newProcess("Server", ip, port, listenPerProcess, localities, processClass, dataFolder->c_str(),
coordFolder->c_str());
wait(g_simulator.onProcess(process,
TaskDefaultYield)); // Now switch execution to the process on which we will run
state Future<ISimulator::KillType> onShutdown = process->onShutdown();
try {
@ -600,9 +603,10 @@ IPAddress makeIPAddressForSim(bool isIPv6, std::array<int, 4> parts) {
#include "fdbclient/MonitorLeader.h"
ACTOR Future<Void> restartSimulatedSystem(
vector<Future<Void>> *systemActors, std::string baseFolder, int* pTesterCount,
Optional<ClusterConnectionString> *pConnString, Standalone<StringRef> *pStartingConfiguration, Reference<TLSOptions> tlsOptions, int extraDB) {
ACTOR Future<Void> restartSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFolder, int* pTesterCount,
Optional<ClusterConnectionString>* pConnString,
Standalone<StringRef>* pStartingConfiguration,
Reference<TLSOptions> tlsOptions, int extraDB) {
CSimpleIni ini;
ini.SetUnicode();
ini.LoadFile(joinPath(baseFolder, "restartInfo.ini").c_str());
@ -697,9 +701,11 @@ ACTOR Future<Void> restartSimulatedSystem(
localities.set(LiteralStringRef("data_hall"), dcUID);
// SOMEDAY: parse backup agent from test file
systemActors->push_back( reportErrors( simulatedMachine(
conn, ipAddrs, usingSSL, tlsOptions, localities, processClass, baseFolder, true, i == useSeedForMachine, enableExtraDB, usingSSL && (listenersPerProcess == 1 || processClass == ProcessClass::TesterClass) ),
processClass == ProcessClass::TesterClass ? "SimulatedTesterMachine" : "SimulatedMachine") );
systemActors->push_back(reportErrors(
simulatedMachine(conn, ipAddrs, usingSSL, tlsOptions, localities, processClass, baseFolder, true,
i == useSeedForMachine, enableExtraDB,
usingSSL && (listenersPerProcess == 1 || processClass == ProcessClass::TesterClass)),
processClass == ProcessClass::TesterClass ? "SimulatedTesterMachine" : "SimulatedMachine"));
}
g_simulator.desiredCoordinators = desiredCoordinators;
@ -1276,7 +1282,7 @@ void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFo
LocalityData localities(Optional<Standalone<StringRef>>(), zoneId, machineId, dcUID);
localities.set(LiteralStringRef("data_hall"), dcUID);
systemActors->push_back(reportErrors(simulatedMachine(conn, ips, sslEnabled, tlsOptions,
localities, processClass, baseFolder, false, machine == useSeedForMachine, true, sslOnly ), "SimulatedMachine"));
localities, processClass, baseFolder, false, machine == useSeedForMachine, true, sslOnly), "SimulatedMachine"));
if (extraDB && g_simulator.extraDB->toString() != conn.toString()) {
std::vector<IPAddress> extraIps;
@ -1290,7 +1296,7 @@ void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFo
localities.set(LiteralStringRef("data_hall"), dcUID);
systemActors->push_back(reportErrors(simulatedMachine(*g_simulator.extraDB, extraIps, sslEnabled, tlsOptions,
localities,
processClass, baseFolder, false, machine == useSeedForMachine, false, sslOnly ), "SimulatedMachine"));
processClass, baseFolder, false, machine == useSeedForMachine, false, sslOnly), "SimulatedMachine"));
}
assignedMachines++;
@ -1318,7 +1324,7 @@ void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFo
systemActors->push_back( reportErrors( simulatedMachine(
conn, ips, sslEnabled, tlsOptions,
localities, ProcessClass(ProcessClass::TesterClass, ProcessClass::CommandLineSource),
baseFolder, false, i == useSeedForMachine, false, sslEnabled ),
baseFolder, false, i == useSeedForMachine, false, sslEnabled),
"SimulatedTesterMachine") );
}
*pStartingConfiguration = startingConfigString;
@ -1374,7 +1380,7 @@ void checkExtraDB(const char *testFile, int &extraDB, int &minimumReplication, i
ifs.close();
}
ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool rebooting, Reference<TLSOptions> tlsOptions ) {
ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool rebooting, Reference<TLSOptions> tlsOptions) {
state vector<Future<Void>> systemActors;
state Optional<ClusterConnectionString> connFile;
state Standalone<StringRef> startingConfiguration;
@ -1404,9 +1410,10 @@ ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool reboot
try {
//systemActors.push_back( startSystemMonitor(dataFolder) );
if (rebooting) {
wait( timeoutError( restartSimulatedSystem( &systemActors, dataFolder, &testerCount, &connFile, &startingConfiguration, tlsOptions, extraDB), 100.0 ) );
}
else {
wait(timeoutError(restartSimulatedSystem(&systemActors, dataFolder, &testerCount, &connFile,
&startingConfiguration, tlsOptions, extraDB),
100.0));
} else {
g_expect_full_pointermap = 1;
setupSimulatedSystem(&systemActors, dataFolder, &testerCount, &connFile, &startingConfiguration, extraDB,
minimumReplication, minimumRegions, tlsOptions);

View File

@ -24,6 +24,7 @@
#include <algorithm>
#include <numeric>
#include <string>
#include <vector>
/*
#ifdef __GNUG__
@ -43,9 +44,8 @@
using std::min;
using std::max;
using std::make_pair;
static vector<PerfDoubleCounter*> skc;
static std::vector<PerfDoubleCounter*> skc;
static thread_local uint32_t g_seed = 0;
@ -199,7 +199,7 @@ bool operator == (const KeyInfo& lhs, const KeyInfo& rhs ) {
return !(lhs<rhs || rhs<lhs);
}
void swapSort(vector<KeyInfo>& points, int a, int b){
void swapSort(std::vector<KeyInfo>& points, int a, int b){
if (points[b] < points[a]){
KeyInfo temp;
temp = points[a];
@ -208,7 +208,7 @@ void swapSort(vector<KeyInfo>& points, int a, int b){
}
}
void smallSort(vector<KeyInfo>& points, int start, int N){
void smallSort(std::vector<KeyInfo>& points, int start, int N){
for (int i=1;i<N;i++)
for (int j=i;j>0;j-=2)
swapSort(points, start+j-1, start+j);
@ -224,12 +224,12 @@ struct SortTask {
SortTask(int begin, int size, int character) : begin(begin), size(size), character(character) {}
};
void sortPoints(vector<KeyInfo>& points){
vector<SortTask> tasks;
vector<KeyInfo> newPoints;
vector<int> counts;
void sortPoints(std::vector<KeyInfo>& points){
std::vector<SortTask> tasks;
std::vector<KeyInfo> newPoints;
std::vector<int> counts;
tasks.push_back( SortTask(0, points.size(), 0) );
tasks.emplace_back(0, points.size(), 0);
while (tasks.size()){
SortTask st = tasks.back();
@ -259,7 +259,7 @@ void sortPoints(vector<KeyInfo>& points){
for(int i=0;i<counts.size();i++){
int temp = counts[i];
if (temp > 1)
tasks.push_back(SortTask(st.begin+total, temp, st.character+1));
tasks.emplace_back(st.begin+total, temp, st.character+1);
counts[i] = total;
total += temp;
}
@ -569,7 +569,7 @@ public:
}
void concatenate( SkipList* input, int count ) {
vector<Finger> ends( count-1 );
std::vector<Finger> ends( count-1 );
for(int i=0; i<ends.size(); i++)
input[i].getEnd( ends[i] );
@ -948,9 +948,9 @@ struct ConflictSet {
SkipList versionHistory;
Key removalKey;
Version oldestVersion;
vector<PAction> worker_nextAction;
vector<Event*> worker_ready;
vector<Event*> worker_finished;
std::vector<PAction> worker_nextAction;
std::vector<Event*> worker_ready;
std::vector<Event*> worker_finished;
};
ConflictSet* newConflictSet() { return new ConflictSet; }
@ -989,18 +989,18 @@ void ConflictBatch::addTransaction( const CommitTransactionRef& tr ) {
info->readRanges.resize( arena, tr.read_conflict_ranges.size() );
info->writeRanges.resize( arena, tr.write_conflict_ranges.size() );
vector<KeyInfo> &points = this->points;
std::vector<KeyInfo>& points = this->points;
for(int r=0; r<tr.read_conflict_ranges.size(); r++) {
const KeyRangeRef& range = tr.read_conflict_ranges[r];
points.push_back( KeyInfo( range.begin, false, true, false, t, &info->readRanges[r].first ) );
points.emplace_back(range.begin, false, true, false, t, &info->readRanges[r].first);
//points.back().keyEnd = StringRef(buf,range.second);
points.push_back( KeyInfo( range.end, false, false, false, t, &info->readRanges[r].second ) );
combinedReadConflictRanges.push_back( ReadConflictRange( range.begin, range.end, tr.read_snapshot, t ) );
points.emplace_back(range.end, false, false, false, t, &info->readRanges[r].second);
combinedReadConflictRanges.emplace_back(range.begin, range.end, tr.read_snapshot, t);
}
for(int r=0; r<tr.write_conflict_ranges.size(); r++) {
const KeyRangeRef& range = tr.write_conflict_ranges[r];
points.push_back( KeyInfo( range.begin, false, true, true, t, &info->writeRanges[r].first ) );
points.push_back( KeyInfo( range.end, false, false, true, t, &info->writeRanges[r].second ) );
points.emplace_back(range.begin, false, true, true, t, &info->writeRanges[r].first);
points.emplace_back(range.end, false, false, true, t, &info->writeRanges[r].second);
}
}
@ -1008,7 +1008,7 @@ void ConflictBatch::addTransaction( const CommitTransactionRef& tr ) {
}
class MiniConflictSet2 : NonCopyable {
vector<bool> values;
std::vector<bool> values;
public:
explicit MiniConflictSet2( int size ) {
values.assign( size, false );
@ -1028,21 +1028,21 @@ public:
class MiniConflictSet : NonCopyable {
typedef uint64_t wordType;
enum { bucketShift = 6, bucketMask=sizeof(wordType)*8-1 };
vector<wordType> values; // undefined when andValues is true for a range of values
vector<wordType> orValues;
vector<wordType> andValues;
std::vector<wordType> values; // undefined when andValues is true for a range of values
std::vector<wordType> orValues;
std::vector<wordType> andValues;
MiniConflictSet2 debug; // SOMEDAY: Test on big ranges, eliminate this
uint64_t bitMask(unsigned int bit){ // computes results for bit%word
return (((wordType)1) << ( bit & bucketMask )); // '&' unnecesary?
}
void setNthBit(vector<wordType> &v, const unsigned int bit){
void setNthBit(std::vector<wordType>& v, const unsigned int bit){
v[bit>>bucketShift] |= bitMask(bit);
}
void clearNthBit(vector<wordType> &v, const unsigned int bit){
void clearNthBit(std::vector<wordType>& v, const unsigned int bit){
v[bit>>bucketShift] &= ~(bitMask(bit));
}
bool getNthBit(const vector<wordType> &v, const unsigned int bit){
bool getNthBit(const std::vector<wordType>& v, const unsigned int bit){
return (v[bit>>bucketShift] & bitMask(bit)) != 0;
}
int wordsForNBits(unsigned int bits){
@ -1060,7 +1060,7 @@ class MiniConflictSet : NonCopyable {
return (b&bucketMask) ? lowBits(b) : -1;
}
void setBits(vector<wordType> &v, int bitBegin, int bitEnd, bool fillMiddle){
void setBits(std::vector<wordType>& v, int bitBegin, int bitEnd, bool fillMiddle){
if (bitBegin >= bitEnd) return;
int beginWord = bitBegin>>bucketShift;
int lastWord = ((bitEnd+bucketMask) >> bucketShift) - 1;
@ -1075,7 +1075,7 @@ class MiniConflictSet : NonCopyable {
}
}
bool orBits(vector<wordType> &v, int bitBegin, int bitEnd, bool getMiddle) {
bool orBits(std::vector<wordType>& v, int bitBegin, int bitEnd, bool getMiddle) {
if (bitBegin >= bitEnd) return false;
int beginWord = bitBegin >> bucketShift;
int lastWord = ((bitEnd+bucketMask) >> bucketShift) - 1;
@ -1152,7 +1152,7 @@ void ConflictBatch::checkIntraBatchConflicts() {
}
}
void ConflictBatch::GetTooOldTransactions(vector<int>& tooOldTransactions) {
void ConflictBatch::GetTooOldTransactions(std::vector<int>& tooOldTransactions) {
for (int i = 0; i<transactionInfo.size(); i++) {
if (transactionInfo[i]->tooOld) {
tooOldTransactions.push_back(i);
@ -1160,7 +1160,7 @@ void ConflictBatch::GetTooOldTransactions(vector<int>& tooOldTransactions) {
}
}
void ConflictBatch::detectConflicts(Version now, Version newOldestVersion, vector<int>& nonConflicting, vector<int>* tooOldTransactions) {
void ConflictBatch::detectConflicts(Version now, Version newOldestVersion, std::vector<int>& nonConflicting, std::vector<int>* tooOldTransactions) {
double t = timer();
sortPoints( points );
//std::sort( combinedReadConflictRanges.begin(), combinedReadConflictRanges.end() );
@ -1232,7 +1232,7 @@ DISABLE_ZERO_DIVISION_FLAG
}
}
void ConflictBatch::addConflictRanges(Version now, vector< pair<StringRef,StringRef> >::iterator begin, vector< pair<StringRef,StringRef> >::iterator end,SkipList* part) {
void ConflictBatch::addConflictRanges(Version now, std::vector< std::pair<StringRef,StringRef> >::iterator begin, std::vector< std::pair<StringRef,StringRef> >::iterator end,SkipList* part) {
int count = end-begin;
#if 0
//for(auto w = begin; w != end; ++w)
@ -1262,16 +1262,16 @@ void ConflictBatch::mergeWriteConflictRanges(Version now) {
return;
if (PARALLEL_THREAD_COUNT) {
vector<SkipList> parts;
std::vector<SkipList> parts;
for (int i = 0; i < PARALLEL_THREAD_COUNT; i++)
parts.push_back(SkipList());
parts.emplace_back();
vector<StringRef> splits( parts.size()-1 );
std::vector<StringRef> splits( parts.size()-1 );
for(int s=0; s<splits.size(); s++)
splits[s] = combinedWriteConflictRanges[ (s+1)*combinedWriteConflictRanges.size()/parts.size() ].first;
cs->versionHistory.partition( splits.size() ? &splits[0] : NULL, splits.size(), &parts[0] );
vector<double> tstart(PARALLEL_THREAD_COUNT), tend(PARALLEL_THREAD_COUNT);
std::vector<double> tstart(PARALLEL_THREAD_COUNT), tend(PARALLEL_THREAD_COUNT);
Event done[PARALLEL_THREAD_COUNT ? PARALLEL_THREAD_COUNT : 1];
double before = timer();
for(int t=0; t<parts.size(); t++) {
@ -1325,8 +1325,8 @@ void ConflictBatch::combineWriteConflictRanges()
if (point.write && !transactionConflictStatus[ point.transaction ]) {
if (point.begin) {
activeWriteCount++;
if (activeWriteCount == 1)
combinedWriteConflictRanges.push_back( make_pair( point.key, KeyRef() ) );
if (activeWriteCount == 1)
combinedWriteConflictRanges.emplace_back(point.key, KeyRef());
} else /*if (point.end)*/ {
activeWriteCount--;
if (activeWriteCount == 0)
@ -1431,8 +1431,8 @@ void skipListTest() {
Arena testDataArena;
VectorRef< VectorRef<KeyRangeRef> > testData;
testData.resize(testDataArena, 500);
vector<vector<uint8_t>> success( testData.size() );
vector<vector<uint8_t>> success2( testData.size() );
std::vector<std::vector<uint8_t>> success( testData.size() );
std::vector<std::vector<uint8_t>> success2( testData.size() );
for(int i=0; i<testData.size(); i++) {
testData[i].resize(testDataArena, 5000);
success[i].assign( testData[i].size(), false );
@ -1454,10 +1454,10 @@ void skipListTest() {
int cranges = 0, tcount = 0;
start = timer();
vector<vector<int>> nonConflict( testData.size() );
std::vector<std::vector<int>> nonConflict( testData.size() );
for(int i=0; i<testData.size(); i++) {
Arena buf;
vector<CommitTransactionRef> trs;
std::vector<CommitTransactionRef> trs;
double t = timer();
for(int j=0; j+readCount+writeCount<=testData[i].size(); j+=readCount+writeCount) {
CommitTransactionRef tr;

View File

@ -29,6 +29,7 @@
#include <iterator>
struct TLogInterface {
constexpr static FileIdentifier file_identifier = 16308510;
enum { LocationAwareLoadBalance = 1 };
enum { AlwaysFresh = 1 };
@ -64,13 +65,16 @@ struct TLogInterface {
template <class Ar>
void serialize( Ar& ar ) {
ASSERT(ar.isDeserializing || uniqueID != UID());
if constexpr (!is_fb_function<Ar>) {
ASSERT(ar.isDeserializing || uniqueID != UID());
}
serializer(ar, uniqueID, sharedTLogID, locality, peekMessages, popMessages
, commit, lock, getQueuingMetrics, confirmRunning, waitFailure, recoveryFinished);
}
};
struct TLogRecoveryFinishedRequest {
constexpr static FileIdentifier file_identifier = 8818668;
ReplyPromise<Void> reply;
TLogRecoveryFinishedRequest() {}
@ -82,6 +86,7 @@ struct TLogRecoveryFinishedRequest {
};
struct TLogLockResult {
constexpr static FileIdentifier file_identifier = 11822027;
Version end;
Version knownCommittedVersion;
@ -92,6 +97,7 @@ struct TLogLockResult {
};
struct TLogConfirmRunningRequest {
constexpr static FileIdentifier file_identifier = 10929130;
Optional<UID> debugID;
ReplyPromise<Void> reply;
@ -136,6 +142,7 @@ struct VerUpdateRef {
};
struct TLogPeekReply {
constexpr static FileIdentifier file_identifier = 11365689;
Arena arena;
StringRef messages;
Version end;
@ -151,6 +158,7 @@ struct TLogPeekReply {
};
struct TLogPeekRequest {
constexpr static FileIdentifier file_identifier = 11001131;
Arena arena;
Version begin;
Tag tag;
@ -168,6 +176,7 @@ struct TLogPeekRequest {
};
struct TLogPopRequest {
constexpr static FileIdentifier file_identifier = 5556423;
Arena arena;
Version to;
Version durableKnownCommittedVersion;
@ -201,6 +210,7 @@ struct TagMessagesRef {
};
struct TLogCommitRequest {
constexpr static FileIdentifier file_identifier = 4022206;
Arena arena;
Version prevVersion, version, knownCommittedVersion, minKnownCommittedVersion;
@ -218,16 +228,8 @@ struct TLogCommitRequest {
}
};
struct TLogQueuingMetricsRequest {
ReplyPromise<struct TLogQueuingMetricsReply> reply;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, reply);
}
};
struct TLogQueuingMetricsReply {
constexpr static FileIdentifier file_identifier = 12206626;
double localTime;
int64_t instanceID; // changes if bytesDurable and bytesInput reset
int64_t bytesDurable, bytesInput;
@ -240,4 +242,14 @@ struct TLogQueuingMetricsReply {
}
};
struct TLogQueuingMetricsRequest {
constexpr static FileIdentifier file_identifier = 7798476;
ReplyPromise<struct TLogQueuingMetricsReply> reply;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, reply);
}
};
#endif

View File

@ -767,7 +767,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
}
}
virtual Reference<IPeekCursor> peekSingle( UID dbgid, Version begin, Tag tag, vector<pair<Version,Tag>> history ) {
virtual Reference<IPeekCursor> peekSingle( UID dbgid, Version begin, Tag tag, std::vector<std::pair<Version,Tag>> history ) {
while(history.size() && begin >= history.back().first) {
history.pop_back();
}

View File

@ -33,6 +33,7 @@
#include "flow/actorcompiler.h" // has to be last include
struct WorkloadInterface {
constexpr static FileIdentifier file_identifier = 4454551;
RequestStream<ReplyPromise<Void>> setup;
RequestStream<ReplyPromise<Void>> start;
RequestStream<ReplyPromise<bool>> check;
@ -48,6 +49,7 @@ struct WorkloadInterface {
};
struct WorkloadRequest {
constexpr static FileIdentifier file_identifier = 8121024;
Arena arena;
StringRef title;
int timeout;
@ -79,6 +81,7 @@ struct WorkloadRequest {
};
struct TesterInterface {
constexpr static FileIdentifier file_identifier = 4465210;
RequestStream<WorkloadRequest> recruitments;
UID id() const { return recruitments.getEndpoint().token; }

View File

@ -41,6 +41,7 @@
#define DUMPTOKEN( name ) TraceEvent("DumpToken", recruited.id()).detail("Name", #name).detail("Token", name.getEndpoint().token)
struct WorkerInterface {
constexpr static FileIdentifier file_identifier = 14712718;
ClientWorkerInterface clientInterface;
LocalityData locality;
RequestStream< struct InitializeTLogRequest > tLog;
@ -75,6 +76,7 @@ struct WorkerInterface {
};
struct WorkerDetails {
constexpr static FileIdentifier file_identifier = 9973980;
WorkerInterface interf;
ProcessClass processClass;
bool degraded;
@ -89,6 +91,7 @@ struct WorkerDetails {
};
struct InitializeTLogRequest {
constexpr static FileIdentifier file_identifier = 15604392;
UID recruitmentID;
LogSystemConfig recoverFrom;
Version recoverAt;
@ -116,6 +119,7 @@ struct InitializeTLogRequest {
};
struct InitializeLogRouterRequest {
constexpr static FileIdentifier file_identifier = 2976228;
uint64_t recoveryCount;
Tag routerTag;
Version startVersion;
@ -132,6 +136,7 @@ struct InitializeLogRouterRequest {
// FIXME: Rename to InitializeMasterRequest, etc
struct RecruitMasterRequest {
constexpr static FileIdentifier file_identifier = 12684574;
Arena arena;
LifetimeToken lifetime;
bool forceRecovery;
@ -139,12 +144,15 @@ struct RecruitMasterRequest {
template <class Ar>
void serialize(Ar& ar) {
ASSERT( ar.protocolVersion() >= 0x0FDB00A200040001LL );
if constexpr (!is_fb_function<Ar>) {
ASSERT(ar.protocolVersion() >= 0x0FDB00A200040001LL);
}
serializer(ar, lifetime, forceRecovery, reply, arena);
}
};
struct InitializeMasterProxyRequest {
constexpr static FileIdentifier file_identifier = 10344153;
MasterInterface master;
uint64_t recoveryCount;
Version recoveryTransactionVersion;
@ -158,6 +166,7 @@ struct InitializeMasterProxyRequest {
};
struct InitializeDataDistributorRequest {
constexpr static FileIdentifier file_identifier = 8858952;
UID reqId;
ReplyPromise<DataDistributorInterface> reply;
@ -170,6 +179,7 @@ struct InitializeDataDistributorRequest {
};
struct InitializeRatekeeperRequest {
constexpr static FileIdentifier file_identifier = 6416816;
UID reqId;
ReplyPromise<RatekeeperInterface> reply;
@ -182,6 +192,7 @@ struct InitializeRatekeeperRequest {
};
struct InitializeResolverRequest {
constexpr static FileIdentifier file_identifier = 7413317;
uint64_t recoveryCount;
int proxyCount;
int resolverCount;
@ -194,6 +205,7 @@ struct InitializeResolverRequest {
};
struct InitializeStorageReply {
constexpr static FileIdentifier file_identifier = 10390645;
StorageServerInterface interf;
Version addedVersion;
@ -204,6 +216,7 @@ struct InitializeStorageReply {
};
struct InitializeStorageRequest {
constexpr static FileIdentifier file_identifier = 16665642;
Tag seedTag; //< If this server will be passed to seedShardServers, this will be a tag, otherwise it is invalidTag
UID reqId;
UID interfaceId;
@ -217,6 +230,7 @@ struct InitializeStorageRequest {
};
struct TraceBatchDumpRequest {
constexpr static FileIdentifier file_identifier = 8184121;
ReplyPromise<Void> reply;
template <class Ar>
@ -226,6 +240,7 @@ struct TraceBatchDumpRequest {
};
struct LoadedReply {
constexpr static FileIdentifier file_identifier = 9956350;
Standalone<StringRef> payload;
UID id;
@ -236,6 +251,7 @@ struct LoadedReply {
};
struct LoadedPingRequest {
constexpr static FileIdentifier file_identifier = 4590979;
UID id;
bool loadReply;
Standalone<StringRef> payload;
@ -248,6 +264,7 @@ struct LoadedPingRequest {
};
struct CoordinationPingMessage {
constexpr static FileIdentifier file_identifier = 9982747;
UID clusterControllerId;
int64_t timeStep;
@ -261,6 +278,7 @@ struct CoordinationPingMessage {
};
struct SetMetricsLogRateRequest {
constexpr static FileIdentifier file_identifier = 4245995;
uint32_t metricsLogsPerSecond;
SetMetricsLogRateRequest() : metricsLogsPerSecond( 1 ) {}
@ -273,6 +291,7 @@ struct SetMetricsLogRateRequest {
};
struct EventLogRequest {
constexpr static FileIdentifier file_identifier = 122319;
bool getLastError;
Standalone<StringRef> eventName;
ReplyPromise< TraceEventFields > reply;
@ -307,6 +326,7 @@ struct DebugEntryRef {
};
struct DiskStoreRequest {
constexpr static FileIdentifier file_identifier = 1986262;
bool includePartialStores;
ReplyPromise<Standalone<VectorRef<UID>>> reply;

View File

@ -54,6 +54,7 @@
#include "fdbrpc/TLSConnection.h"
#include "fdbrpc/Net2FileSystem.h"
#include "fdbrpc/Platform.h"
#include "fdbrpc/AsyncFileCached.actor.h"
#include "fdbserver/CoroFlow.h"
#include "flow/SignalSafeUnwind.h"
#if defined(CMAKE_BUILD) || !defined(WIN32)
@ -79,82 +80,84 @@
enum {
OPT_CONNFILE, OPT_SEEDCONNFILE, OPT_SEEDCONNSTRING, OPT_ROLE, OPT_LISTEN, OPT_PUBLICADDR, OPT_DATAFOLDER, OPT_LOGFOLDER, OPT_PARENTPID, OPT_NEWCONSOLE, OPT_NOBOX, OPT_TESTFILE, OPT_RESTARTING, OPT_RANDOMSEED, OPT_KEY, OPT_MEMLIMIT, OPT_STORAGEMEMLIMIT, OPT_MACHINEID, OPT_DCID, OPT_MACHINE_CLASS, OPT_BUGGIFY, OPT_VERSION, OPT_CRASHONERROR, OPT_HELP, OPT_NETWORKIMPL, OPT_NOBUFSTDOUT, OPT_BUFSTDOUTERR, OPT_TRACECLOCK, OPT_NUMTESTERS, OPT_DEVHELP, OPT_ROLLSIZE, OPT_MAXLOGS, OPT_MAXLOGSSIZE, OPT_KNOB, OPT_TESTSERVERS, OPT_TEST_ON_SERVERS, OPT_METRICSCONNFILE, OPT_METRICSPREFIX,
OPT_LOGGROUP, OPT_LOCALITY, OPT_IO_TRUST_SECONDS, OPT_IO_TRUST_WARN_ONLY, OPT_FILESYSTEM, OPT_PROFILER_RSS_SIZE, OPT_KVFILE, OPT_TRACE_FORMAT };
OPT_LOGGROUP, OPT_LOCALITY, OPT_IO_TRUST_SECONDS, OPT_IO_TRUST_WARN_ONLY, OPT_FILESYSTEM, OPT_PROFILER_RSS_SIZE, OPT_KVFILE, OPT_TRACE_FORMAT, OPT_USE_OBJECT_SERIALIZER };
CSimpleOpt::SOption g_rgOptions[] = {
{ OPT_CONNFILE, "-C", SO_REQ_SEP },
{ OPT_CONNFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_SEEDCONNFILE, "--seed_cluster_file", SO_REQ_SEP },
{ OPT_SEEDCONNSTRING, "--seed_connection_string", SO_REQ_SEP },
{ OPT_ROLE, "-r", SO_REQ_SEP },
{ OPT_ROLE, "--role", SO_REQ_SEP },
{ OPT_PUBLICADDR, "-p", SO_REQ_SEP },
{ OPT_PUBLICADDR, "--public_address", SO_REQ_SEP },
{ OPT_LISTEN, "-l", SO_REQ_SEP },
{ OPT_LISTEN, "--listen_address", SO_REQ_SEP },
{ OPT_CONNFILE, "-C", SO_REQ_SEP },
{ OPT_CONNFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_SEEDCONNFILE, "--seed_cluster_file", SO_REQ_SEP },
{ OPT_SEEDCONNSTRING, "--seed_connection_string", SO_REQ_SEP },
{ OPT_ROLE, "-r", SO_REQ_SEP },
{ OPT_ROLE, "--role", SO_REQ_SEP },
{ OPT_PUBLICADDR, "-p", SO_REQ_SEP },
{ OPT_PUBLICADDR, "--public_address", SO_REQ_SEP },
{ OPT_LISTEN, "-l", SO_REQ_SEP },
{ OPT_LISTEN, "--listen_address", SO_REQ_SEP },
#ifdef __linux__
{ OPT_FILESYSTEM, "--data_filesystem", SO_REQ_SEP },
{ OPT_PROFILER_RSS_SIZE, "--rsssize", SO_REQ_SEP },
#endif
{ OPT_DATAFOLDER, "-d", SO_REQ_SEP },
{ OPT_DATAFOLDER, "--datadir", SO_REQ_SEP },
{ OPT_LOGFOLDER, "-L", SO_REQ_SEP },
{ OPT_LOGFOLDER, "--logdir", SO_REQ_SEP },
{ OPT_ROLLSIZE, "-Rs", SO_REQ_SEP },
{ OPT_ROLLSIZE, "--logsize", SO_REQ_SEP },
{ OPT_MAXLOGS, "--maxlogs", SO_REQ_SEP },
{ OPT_MAXLOGSSIZE, "--maxlogssize", SO_REQ_SEP },
{ OPT_LOGGROUP, "--loggroup", SO_REQ_SEP },
{ OPT_DATAFOLDER, "-d", SO_REQ_SEP },
{ OPT_DATAFOLDER, "--datadir", SO_REQ_SEP },
{ OPT_LOGFOLDER, "-L", SO_REQ_SEP },
{ OPT_LOGFOLDER, "--logdir", SO_REQ_SEP },
{ OPT_ROLLSIZE, "-Rs", SO_REQ_SEP },
{ OPT_ROLLSIZE, "--logsize", SO_REQ_SEP },
{ OPT_MAXLOGS, "--maxlogs", SO_REQ_SEP },
{ OPT_MAXLOGSSIZE, "--maxlogssize", SO_REQ_SEP },
{ OPT_LOGGROUP, "--loggroup", SO_REQ_SEP },
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#ifdef _WIN32
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
{ OPT_NEWCONSOLE, "-n", SO_NONE },
{ OPT_NEWCONSOLE, "--newconsole", SO_NONE },
{ OPT_NOBOX, "-q", SO_NONE },
{ OPT_NOBOX, "--no_dialog", SO_NONE },
{ OPT_NEWCONSOLE, "-n", SO_NONE },
{ OPT_NEWCONSOLE, "--newconsole", SO_NONE },
{ OPT_NOBOX, "-q", SO_NONE },
{ OPT_NOBOX, "--no_dialog", SO_NONE },
#endif
{ OPT_KVFILE, "--kvfile", SO_REQ_SEP },
{ OPT_TESTFILE, "-f", SO_REQ_SEP },
{ OPT_TESTFILE, "--testfile", SO_REQ_SEP },
{ OPT_RESTARTING, "-R", SO_NONE },
{ OPT_RESTARTING, "--restarting", SO_NONE },
{ OPT_RANDOMSEED, "-s", SO_REQ_SEP },
{ OPT_RANDOMSEED, "--seed", SO_REQ_SEP },
{ OPT_KEY, "-k", SO_REQ_SEP },
{ OPT_KEY, "--key", SO_REQ_SEP },
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
{ OPT_MEMLIMIT, "--memory", SO_REQ_SEP },
{ OPT_STORAGEMEMLIMIT, "-M", SO_REQ_SEP },
{ OPT_STORAGEMEMLIMIT, "--storage_memory", SO_REQ_SEP },
{ OPT_MACHINEID, "-i", SO_REQ_SEP },
{ OPT_MACHINEID, "--machine_id", SO_REQ_SEP },
{ OPT_DCID, "-a", SO_REQ_SEP },
{ OPT_DCID, "--datacenter_id", SO_REQ_SEP },
{ OPT_MACHINE_CLASS, "-c", SO_REQ_SEP },
{ OPT_MACHINE_CLASS, "--class", SO_REQ_SEP },
{ OPT_BUGGIFY, "-b", SO_REQ_SEP },
{ OPT_BUGGIFY, "--buggify", SO_REQ_SEP },
{ OPT_VERSION, "-v", SO_NONE },
{ OPT_VERSION, "--version", SO_NONE },
{ OPT_CRASHONERROR, "--crash", SO_NONE },
{ OPT_NETWORKIMPL, "-N", SO_REQ_SEP },
{ OPT_NETWORKIMPL, "--network", SO_REQ_SEP },
{ OPT_NOBUFSTDOUT, "--unbufferedout", SO_NONE },
{ OPT_BUFSTDOUTERR, "--bufferedout", SO_NONE },
{ OPT_TRACECLOCK, "--traceclock", SO_REQ_SEP },
{ OPT_NUMTESTERS, "--num_testers", SO_REQ_SEP },
{ OPT_HELP, "-?", SO_NONE },
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_LOCALITY, "--locality_", SO_REQ_SEP },
{ OPT_TESTSERVERS, "--testservers", SO_REQ_SEP },
{ OPT_TEST_ON_SERVERS, "--testonservers", SO_NONE },
{ OPT_METRICSCONNFILE, "--metrics_cluster", SO_REQ_SEP },
{ OPT_METRICSPREFIX, "--metrics_prefix", SO_REQ_SEP },
{ OPT_IO_TRUST_SECONDS, "--io_trust_seconds", SO_REQ_SEP },
{ OPT_IO_TRUST_WARN_ONLY, "--io_trust_warn_only", SO_NONE },
{ OPT_TRACE_FORMAT , "--trace_format", SO_REQ_SEP },
{ OPT_KVFILE, "--kvfile", SO_REQ_SEP },
{ OPT_TESTFILE, "-f", SO_REQ_SEP },
{ OPT_TESTFILE, "--testfile", SO_REQ_SEP },
{ OPT_RESTARTING, "-R", SO_NONE },
{ OPT_RESTARTING, "--restarting", SO_NONE },
{ OPT_RANDOMSEED, "-s", SO_REQ_SEP },
{ OPT_RANDOMSEED, "--seed", SO_REQ_SEP },
{ OPT_KEY, "-k", SO_REQ_SEP },
{ OPT_KEY, "--key", SO_REQ_SEP },
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
{ OPT_MEMLIMIT, "--memory", SO_REQ_SEP },
{ OPT_STORAGEMEMLIMIT, "-M", SO_REQ_SEP },
{ OPT_STORAGEMEMLIMIT, "--storage_memory", SO_REQ_SEP },
{ OPT_MACHINEID, "-i", SO_REQ_SEP },
{ OPT_MACHINEID, "--machine_id", SO_REQ_SEP },
{ OPT_DCID, "-a", SO_REQ_SEP },
{ OPT_DCID, "--datacenter_id", SO_REQ_SEP },
{ OPT_MACHINE_CLASS, "-c", SO_REQ_SEP },
{ OPT_MACHINE_CLASS, "--class", SO_REQ_SEP },
{ OPT_BUGGIFY, "-b", SO_REQ_SEP },
{ OPT_BUGGIFY, "--buggify", SO_REQ_SEP },
{ OPT_VERSION, "-v", SO_NONE },
{ OPT_VERSION, "--version", SO_NONE },
{ OPT_CRASHONERROR, "--crash", SO_NONE },
{ OPT_NETWORKIMPL, "-N", SO_REQ_SEP },
{ OPT_NETWORKIMPL, "--network", SO_REQ_SEP },
{ OPT_NOBUFSTDOUT, "--unbufferedout", SO_NONE },
{ OPT_BUFSTDOUTERR, "--bufferedout", SO_NONE },
{ OPT_TRACECLOCK, "--traceclock", SO_REQ_SEP },
{ OPT_NUMTESTERS, "--num_testers", SO_REQ_SEP },
{ OPT_HELP, "-?", SO_NONE },
{ OPT_HELP, "-h", SO_NONE },
{ OPT_HELP, "--help", SO_NONE },
{ OPT_DEVHELP, "--dev-help", SO_NONE },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_LOCALITY, "--locality_", SO_REQ_SEP },
{ OPT_TESTSERVERS, "--testservers", SO_REQ_SEP },
{ OPT_TEST_ON_SERVERS, "--testonservers", SO_NONE },
{ OPT_METRICSCONNFILE, "--metrics_cluster", SO_REQ_SEP },
{ OPT_METRICSPREFIX, "--metrics_prefix", SO_REQ_SEP },
{ OPT_IO_TRUST_SECONDS, "--io_trust_seconds", SO_REQ_SEP },
{ OPT_IO_TRUST_WARN_ONLY, "--io_trust_warn_only", SO_NONE },
{ OPT_TRACE_FORMAT , "--trace_format", SO_REQ_SEP },
{ OPT_USE_OBJECT_SERIALIZER, "-S", SO_REQ_SEP },
{ OPT_USE_OBJECT_SERIALIZER, "--object-serializer", SO_REQ_SEP },
#ifndef TLS_DISABLED
TLS_OPTION_FLAGS
@ -501,6 +504,15 @@ void parentWatcher(void *parentHandle) {
criticalError( FDB_EXIT_SUCCESS, "ParentProcessExited", "Parent process exited" );
TraceEvent(SevError, "ParentProcessWaitFailed").detail("RetCode", signal).GetLastError();
}
#else
void* parentWatcher(void *arg) {
int *parent_pid = (int*) arg;
while(1) {
sleep(1);
if(getppid() != *parent_pid)
criticalError( FDB_EXIT_SUCCESS, "ParentProcessExited", "Parent process exited" );
}
}
#endif
static void printVersion() {
@ -566,6 +578,10 @@ static void printUsage( const char *name, bool devhelp ) {
" Machine class (valid options are storage, transaction,\n"
" resolution, proxy, master, test, unset, stateless, log, router,\n"
" and cluster_controller).\n");
printf(" -S ON|OFF, --object-serializer ON|OFF\n"
" Use object serializer for sending messages. The object serializer\n"
" is currently a beta feature and it allows fdb processes to talk to\n"
" each other even if they don't have the same version\n");
#ifndef TLS_DISABLED
printf(TLS_HELP);
#endif
@ -923,6 +939,7 @@ int main(int argc, char* argv[]) {
double fileIoTimeout = 0.0;
bool fileIoWarnOnly = false;
uint64_t rsssize = -1;
bool useObjectSerializer = false;
if( argc == 1 ) {
printUsage(argv[0], false);
@ -1158,6 +1175,14 @@ int main(int argc, char* argv[]) {
case OPT_NOBOX:
SetErrorMode(SetErrorMode(0) | SEM_NOGPFAULTERRORBOX);
break;
#else
case OPT_PARENTPID: {
auto pid_str = args.OptionArg();
int *parent_pid = new(int);
*parent_pid = atoi(pid_str);
startThread(&parentWatcher, parent_pid);
break;
}
#endif
case OPT_TESTFILE:
testFile = args.OptionArg();
@ -1259,6 +1284,21 @@ int main(int argc, char* argv[]) {
fprintf(stderr, "WARNING: Unrecognized trace format `%s'\n", args.OptionArg());
}
break;
case OPT_USE_OBJECT_SERIALIZER:
{
std::string s = args.OptionArg();
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
if (s == "on" || s == "true" || s == "1") {
useObjectSerializer = true;
} else if (s == "off" || s == "false" || s == "0") {
useObjectSerializer = false;
} else {
fprintf(stderr, "ERROR: Could not parse object serializer option: `%s'\n", s.c_str());
printHelpTeaser(argv[0]);
flushAndExit(FDB_EXIT_ERROR);
}
break;
}
#ifndef TLS_DISABLED
case TLSOptions::OPT_TLS_PLUGIN:
args.OptionArg();
@ -1414,6 +1454,9 @@ int main(int argc, char* argv[]) {
}
if (!serverKnobs->setKnob("server_mem_limit", std::to_string(memLimit))) ASSERT(false);
// evictionPolicyStringToEnum will throw an exception if the string is not recognized as a valid
EvictablePageCache::evictionPolicyStringToEnum(flowKnobs->CACHE_EVICTION_POLICY);
if (role == SkipListTest) {
skipListTest();
flushAndExit(FDB_EXIT_SUCCESS);
@ -1469,10 +1512,10 @@ int main(int argc, char* argv[]) {
if (role == Simulation || role == CreateTemplateDatabase) {
//startOldSimulator();
startNewSimulator();
startNewSimulator(useObjectSerializer);
openTraceFile(NetworkAddress(), rollsize, maxLogsSize, logFolder, "trace", logGroup);
} else {
g_network = newNet2(useThreadPool, true);
g_network = newNet2(useThreadPool, true, useObjectSerializer);
FlowTransport::createInstance(1);
const bool expectsPublicAddress = (role == FDBD || role == NetworkTestServer || role == Restore);

View File

@ -1,5 +1,5 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
@ -240,12 +240,12 @@
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v140_xp</PlatformToolset>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
@ -269,6 +269,7 @@
</Lib>
<ClCompile>
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
@ -286,6 +287,7 @@
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<PreprocessToFile>false</PreprocessToFile>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
@ -311,6 +313,7 @@
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<BufferSecurityCheck>false</BufferSecurityCheck>
<MinimalRebuild>false</MinimalRebuild>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>

View File

@ -429,12 +429,12 @@ ACTOR Future<std::vector<Message>> _listInboxMessages(Database cx, uint64_t inbo
//printf(" -> cached message %016llx from feed %016llx\n", messageId, feed);
if(messageId >= cursor) {
//printf(" -> entering message %016llx from feed %016llx\n", messageId, feed);
feedLatest.insert(pair<MessageId, Feed>(messageId, feed));
feedLatest.emplace(messageId, feed);
} else {
// replace this with the first message older than the cursor
MessageId mId = wait(getFeedLatestAtOrAfter(&tr, feed, cursor));
if(mId) {
feedLatest.insert(pair<MessageId, Feed>(mId, feed));
feedLatest.emplace(mId, feed);
}
}
}
@ -465,7 +465,7 @@ ACTOR Future<std::vector<Message>> _listInboxMessages(Database cx, uint64_t inbo
MessageId nextMessage = wait(getFeedLatestAtOrAfter(&tr, f, id + 1));
if(nextMessage) {
feedLatest.insert(pair<MessageId, Feed>(nextMessage, f));
feedLatest.emplace(nextMessage, f);
}
}

View File

@ -15,6 +15,9 @@
*/
#ifndef _BTREE_H_
#define _BTREE_H_
#ifndef NDEBUG
#define NDEBUG
#endif
/* TODO: This definition is just included so other modules compile. It
** needs to be revisited.

View File

@ -1,3 +1,6 @@
#ifndef NDEBUG
#define NDEBUG
#endif
#ifndef NDEBUG
#define SQLITE_DEBUG 1
#endif

View File

@ -52,6 +52,7 @@
#include "flow/TDMetric.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
using std::pair;
using std::make_pair;
#pragma region Data Structures
@ -199,11 +200,11 @@ private:
};
struct UpdateEagerReadInfo {
vector<KeyRef> keyBegin;
vector<Key> keyEnd; // these are for ClearRange
std::vector<KeyRef> keyBegin;
std::vector<Key> keyEnd; // these are for ClearRange
vector<pair<KeyRef, int>> keys;
vector<Optional<Value>> value;
std::vector<std::pair<KeyRef, int>> keys;
std::vector<Optional<Value>> value;
Arena arena;
@ -223,13 +224,13 @@ struct UpdateEagerReadInfo {
// CompareAndClear is likely to be used after another atomic operation on same key.
keys.back().second = std::max(keys.back().second, m.param2.size() + 1);
} else {
keys.push_back(pair<KeyRef, int>(m.param1, m.param2.size() + 1));
keys.emplace_back(m.param1, m.param2.size() + 1);
}
} else if ((m.type == MutationRef::AppendIfFits) || (m.type == MutationRef::ByteMin) ||
(m.type == MutationRef::ByteMax))
keys.push_back(pair<KeyRef, int>(m.param1, CLIENT_KNOBS->VALUE_SIZE_LIMIT));
keys.emplace_back(m.param1, CLIENT_KNOBS->VALUE_SIZE_LIMIT);
else if (isAtomicOp((MutationRef::Type) m.type))
keys.push_back(pair<KeyRef, int>(m.param1, m.param2.size()));
keys.emplace_back(m.param1, m.param2.size());
}
void finishKeyBegin() {
@ -2239,8 +2240,8 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
// version. The latter depends on data->newestAvailableVersion, so loop over the ranges of that.
// SOMEDAY: Could this just use shards? Then we could explicitly do the removeDataRange here when an adding/transferred shard is cancelled
auto vr = data->newestAvailableVersion.intersectingRanges(keys);
vector<std::pair<KeyRange,Version>> changeNewestAvailable;
vector<KeyRange> removeRanges;
std::vector<std::pair<KeyRange,Version>> changeNewestAvailable;
std::vector<KeyRange> removeRanges;
for (auto r = vr.begin(); r != vr.end(); ++r) {
KeyRangeRef range = keys & r->range();
bool dataAvailable = r->value()==latestVersion || r->value() >= version;
@ -2255,7 +2256,7 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
if (dataAvailable) {
ASSERT( r->value() == latestVersion); // Not that we care, but this used to be checked instead of dataAvailable
ASSERT( data->mutableData().getLatestVersion() > version || context == CSK_RESTORE );
changeNewestAvailable.push_back(make_pair(range, version));
changeNewestAvailable.emplace_back(range, version);
removeRanges.push_back( range );
}
data->addShard( ShardInfo::newNotAssigned(range) );
@ -2263,7 +2264,7 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
} else if (!dataAvailable) {
// SOMEDAY: Avoid restarting adding/transferred shards
if (version==0){ // bypass fetchkeys; shard is known empty at version 0
changeNewestAvailable.push_back(make_pair(range, latestVersion));
changeNewestAvailable.emplace_back(range, latestVersion);
data->addShard( ShardInfo::newReadWrite(range, data) );
setAvailableStatus(data, range, true);
} else {
@ -2272,7 +2273,7 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
data->addShard( ShardInfo::newAdding(data, range) );
}
} else {
changeNewestAvailable.push_back(make_pair(range, latestVersion));
changeNewestAvailable.emplace_back(range, latestVersion);
data->addShard( ShardInfo::newReadWrite(range, data) );
}
}
@ -2438,7 +2439,7 @@ private:
rollback( data, rollbackVersion, currentVersion );
}
data->recoveryVersionSkips.push_back(std::make_pair(rollbackVersion, currentVersion - rollbackVersion));
data->recoveryVersionSkips.emplace_back(rollbackVersion, currentVersion - rollbackVersion);
} else if (m.type == MutationRef::SetValue && m.param1 == killStoragePrivateKey) {
throw worker_removed();
} else if ((m.type == MutationRef::SetValue || m.type == MutationRef::ClearRange) && m.param1.substr(1).startsWith(serverTagPrefix)) {
@ -3694,7 +3695,7 @@ void versionedMapTest() {
printf("SS Ptree node is %zu bytes\n", sizeof( StorageServer::VersionedData::PTreeT ) );
const int NSIZE = sizeof(VersionedMap<int,int>::PTreeT);
const int ASIZE = NSIZE<=64 ? 64 : NextPowerOfTwo<NSIZE>::Result;
const int ASIZE = NSIZE<=64 ? 64 : NextFastAllocatedSize<NSIZE>::Result;
auto before = FastAllocator< ASIZE >::getTotalMemory();

Some files were not shown because too many files have changed in this diff Show More