Merge branch 'master' into ryanworl/remove-finalizers

This commit is contained in:
Vishesh Yadav 2020-04-16 01:01:00 -07:00 committed by GitHub
commit b6b9f13016
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
640 changed files with 55608 additions and 25498 deletions

10
.gitignore vendored
View File

@ -31,8 +31,10 @@ bindings/ruby/lib/fdboptions.rb
bindings/ruby/fdb.gemspec
fdbclient/vexillographer/obj/
fdbrpc/hgVersion*.h
fdbrpc/SourceVersion*.h
fdbrpc/libeio/config.h
flow/hgVersion*.h
flow/SourceVersion*.h
generated.mk
versions.h
packaging/msi/FDBInstaller.wix*
@ -79,6 +81,11 @@ compile_commands.json
flow/actorcompiler/obj
flow/coveragetool/obj
# IDE indexing (commonly used tools)
/compile_commands.json
/.ccls-cache
/.clangd
# Temporary and user configuration files
*~
*.orig
@ -87,5 +94,4 @@ flow/coveragetool/obj
.envrc
.DS_Store
temp/
/compile_commands.json
/.ccls-cache
/versions.target

View File

@ -27,11 +27,6 @@ Mark Adler, Robert Važan (CRC-32C [Castagnoli] for C++ and .NET)
3. This notice may not be removed or altered from any source distribution.
Steven J. Bethard (argparse.py from https://code.google.com/p/argparse/)
argparse is licensed under the Python license, see:
https://code.google.com/p/argparse/source/browse/LICENSE.txt and
https://code.google.com/p/argparse/source/browse/doc/source/Python-License.txt
Russ Cox (asm.S from libcoroutine)
This software was developed as part of a project at MIT.
@ -484,3 +479,29 @@ SHIBUKAWA Yoshiki (sphinxcontrib-rubydomain)
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Armon Dadgar (ART)
Copyright (c) 2012, Armon Dadgar
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the organization nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL ARMON DADGAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -16,9 +16,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required(VERSION 3.12)
cmake_minimum_required(VERSION 3.13)
project(foundationdb
VERSION 6.1.0
VERSION 6.3.0
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
HOMEPAGE_URL "http://www.foundationdb.org/"
LANGUAGES C CXX ASM)
@ -29,18 +29,23 @@ if("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")
message(FATAL_ERROR "In-source builds are forbidden")
endif()
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(STATUS "Setting build type to 'Release' as none was specified")
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build" FORCE)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release"
"MinSizeRel" "RelWithDebInfo")
if (OPEN_FOR_IDE)
message(STATUS "Defaulting build type to 'Debug' for OPEN_FOR_IDE")
set(CMAKE_BUILD_TYPE Debug CACHE STRING "Choose the type of build" FORCE)
else()
message(STATUS "Setting build type to 'Release' as none was specified")
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build" FORCE)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release"
"MinSizeRel" "RelWithDebInfo")
endif()
endif()
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/lib)
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
################################################################################
# Packages used for bindings
################################################################################
@ -75,26 +80,10 @@ message(STATUS "Current git version ${CURRENT_GIT_VERSION}")
# Version information
################################################################################
set(USE_VERSIONS_TARGET OFF CACHE BOOL "Use the deprecated versions.target file")
if(USE_VERSIONS_TARGET)
add_custom_target(version_file ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/versions.target)
execute_process(
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/get_version.sh ${CMAKE_CURRENT_SOURCE_DIR}/versions.target
OUTPUT_VARIABLE FDB_VERSION_WNL)
execute_process(
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/get_package_name.sh ${CMAKE_CURRENT_SOURCE_DIR}/versions.target
OUTPUT_VARIABLE FDB_PACKAGE_NAME_WNL)
string(STRIP "${FDB_VERSION_WNL}" FDB_VERSION)
string(STRIP "${FDB_PACKAGE_NAME_WNL}" FDB_PACKAGE_NAME)
set(FDB_VERSION_PLAIN ${FDB_VERSION})
if(NOT FDB_RELEASE)
set(FDB_VERSION "${FDB_VERSION}-PRERELEASE")
endif()
else()
set(FDB_PACKAGE_NAME "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}")
set(FDB_VERSION ${PROJECT_VERSION})
set(FDB_VERSION_PLAIN ${FDB_VERSION})
endif()
set(FDB_PACKAGE_NAME "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}")
set(FDB_VERSION ${PROJECT_VERSION})
set(FDB_VERSION_PLAIN ${FDB_VERSION})
configure_file(${CMAKE_SOURCE_DIR}/versions.target.cmake ${CMAKE_SOURCE_DIR}/versions.target)
message(STATUS "FDB version is ${FDB_VERSION}")
message(STATUS "FDB package name is ${FDB_PACKAGE_NAME}")
@ -158,9 +147,6 @@ set(SEED "0x${SEED_}" CACHE STRING "Random seed for testing")
################################################################################
include(CompileBoost)
if(WITH_TLS)
add_subdirectory(FDBLibTLS)
endif()
add_subdirectory(flow)
add_subdirectory(fdbrpc)
add_subdirectory(fdbclient)
@ -171,9 +157,12 @@ if(NOT WIN32)
else()
add_subdirectory(fdbservice)
endif()
add_subdirectory(bindings)
add_subdirectory(fdbbackup)
add_subdirectory(contrib)
add_subdirectory(tests)
if(WITH_PYTHON)
add_subdirectory(bindings)
endif()
if(WITH_DOCUMENTATION)
add_subdirectory(documentation)
endif()
@ -188,13 +177,13 @@ endif()
# process compile commands for IDE
################################################################################
if (CMAKE_EXPORT_COMPILE_COMMANDS)
if (CMAKE_EXPORT_COMPILE_COMMANDS AND WITH_PYTHON)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py
ARGS -b ${CMAKE_CURRENT_BINARY_DIR} -s ${CMAKE_CURRENT_SOURCE_DIR} -o ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
COMMENT "Build compile commands for IDE"
OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py
ARGS -b ${CMAKE_CURRENT_BINARY_DIR} -s ${CMAKE_CURRENT_SOURCE_DIR} -o ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/build/gen_compile_db.py ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json
COMMENT "Build compile commands for IDE"
)
add_custom_target(processed_compile_commands ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/compile_commands.json ${CMAKE_CURRENT_BINARY_DIR}/compile_commands.json)
endif()

View File

@ -8,5 +8,5 @@ set(SRCS
FDBLibTLSVerify.cpp
FDBLibTLSVerify.h)
add_library(FDBLibTLS ${SRCS})
add_library(FDBLibTLS STATIC ${SRCS})
target_link_libraries(FDBLibTLS PUBLIC LibreSSL boost_target PRIVATE flow)

View File

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|X64">
<Configuration>Debug</Configuration>
<Platform>X64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|X64">
<Configuration>Release</Configuration>
<Platform>X64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClInclude Include="FDBLibTLSPlugin.h" />
<ClCompile Include="FDBLibTLSPlugin.cpp" />
<ClInclude Include="FDBLibTLSPolicy.h" />
<ClCompile Include="FDBLibTLSPolicy.cpp" />
<ClInclude Include="FDBLibTLSVerify.h" />
<ClCompile Include="FDBLibTLSVerify.cpp" />
<ClInclude Include="FDBLibTLSSession.h" />
<ClCompile Include="FDBLibTLSSession.cpp" />
</ItemGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
</Project>

View File

@ -300,7 +300,7 @@ bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[],
}
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start)));
verify_rules.push_back(verify);
} catch ( const std::runtime_error& e ) {
} catch ( const std::runtime_error& ) {
verify_rules.clear();
std::string verifyString((const char*)verify_peers[i], verify_peers_len[i]);
TraceEvent(SevError, "FDBLibTLSVerifyPeersParseError").detail("Config", verifyString);

View File

@ -347,7 +347,7 @@ bool FDBLibTLSSession::verify_peer() {
if(now() - lastVerifyFailureLogged > 1.0) {
for (std::string reason : verify_failure_reasons) {
lastVerifyFailureLogged = now();
TraceEvent("FDBLibTLSVerifyFailure", uid).detail("Reason", reason);
TraceEvent("FDBLibTLSVerifyFailure", uid).suppressFor(1.0).detail("Reason", reason);
}
}
}

View File

@ -1,109 +0,0 @@
PROJECTPATH = $(dir $(realpath $(firstword $(MAKEFILE_LIST))))
PLUGINPATH = $(PROJECTPATH)/$(PLUGIN)
CFLAGS ?= -O2 -g
CXXFLAGS ?= -std=c++0x
CFLAGS += -I/usr/local/include -I../flow -I../fdbrpc
LDFLAGS += -L/usr/local/lib
LIBS += -ltls -lssl -lcrypto
PLATFORM := $(shell uname)
ifneq ($(PLATFORM),Darwin)
PLATFORM := $(shell uname -o)
endif
ifeq ($(PLATFORM),Cygwin)
HOST := x86_64-w64-mingw32
CC := $(HOST)-gcc
CXX := $(HOST)-g++
STRIP := $(HOST)-strip --strip-all
DYEXT = dll
PLUGINPATH = $(PLUGIN)
LIBS += -static-libstdc++ -static-libgcc
LIBS += -lws2_32
LINK_LDFLAGS = -shared
LINK_LDFLAGS += -Wl,-soname,$(PLUGIN)
LINK_LDFLAGS += -Wl,--version-script=FDBLibTLS.map
LINK_LDFLAGS += -Wl,-Bstatic $(LIBS) -Wl,-Bdynamic
else ifeq ($(PLATFORM),Darwin)
CC := clang
CXX := clang++
STRIP := strip -S -x
CFLAGS += -fPIC
DYEXT = dylib
vpath %.a /usr/local/lib
.LIBPATTERNS = lib%.a lib%.dylib lib%.so
LINK_LDFLAGS = -shared
LINK_LDFLAGS += -Wl,-exported_symbols_list,FDBLibTLS.symbols
LINK_LDFLAGS += -Wl,-dylib_install_name,$(PLUGIN)
LINK_LDFLAGS += $(LIBS)
else ifeq ($(PLATFORM),GNU/Linux)
CC := clang
CXX := clang++
STRIP := strip --strip-all
CFLAGS += -fPIC
DYEXT = so
LIBS += -static-libstdc++ -static-libgcc -lrt
LINK_LDFLAGS = -shared
LINK_LDFLAGS += -Wl,-soname,$(PLUGIN)
LINK_LDFLAGS += -Wl,--version-script=FDBLibTLS.map
LINK_LDFLAGS += -Wl,-Bstatic $(LIBS) -Wl,-Bdynamic
else
$(error Unknown platform $(PLATFORM))
endif
PLUGIN := FDBLibTLS.$(DYEXT)
OBJECTS := FDBLibTLSPlugin.o FDBLibTLSPolicy.o FDBLibTLSSession.o FDBLibTLSVerify.o
LINKLINE := $(CXXFLAGS) $(CFLAGS) $(LDFLAGS) $(OBJECTS) $(LINK_LDFLAGS) -o $(PLUGIN)
all: $(PLUGIN)
build-depends-linux:
apt install clang make libboost-dev
clean:
@rm -f *.o *.d $(PLUGIN) plugin-test verify-test
@rm -rf *.dSYM
DEPS := $(patsubst %.o,%.d,$(OBJECTS))
-include $(DEPS)
$(OBJECTS): %.o: %.cpp Makefile
@echo "Compiling $<"
@$(CXX) $(CXXFLAGS) $(CFLAGS) $(INCLUDES) -c $< -o $@ -MD -MP
$(PLUGIN): $(OBJECTS) Makefile
@echo "Linking $@"
@$(CXX) $(LINKLINE)
@echo "Stripping $@"
@$(STRIP) $@
test: test-plugin test-verify
test-plugin: plugin-test.cpp $(PLUGIN) Makefile
@echo "Compiling plugin-test"
@$(CXX) $(CXXFLAGS) $(CFLAGS) plugin-test.cpp -ldl -o plugin-test
@echo "Running plugin-test..."
@$(PROJECTPATH)/plugin-test $(PLUGINPATH)
test-verify: verify-test.cpp $(OBJECTS) Makefile
@echo "Compiling verify-test"
@$(CXX) $(CXXFLAGS) $(CFLAGS) $(LDFLAGS) $(OBJECTS) verify-test.cpp $(LIBS) -o verify-test
@echo "Running verify-test..."
@$(PROJECTPATH)/verify-test

View File

@ -1,28 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
FDBLibTLS_BUILD_SOURCES +=
FDBLibTLS_CFLAGS := -fPIC -I/usr/local/include -isystem$(BOOSTDIR) -I. -DUSE_UCONTEXT
lib/libFDBLibTLS.a: bin/coverage.FDBLibTLS.xml

242
Makefile
View File

@ -1,242 +0,0 @@
export
PLATFORM := $(shell uname)
ARCH := $(shell uname -m)
TOPDIR := $(shell pwd)
# Allow custom libc++ hack for Ubuntu
ifeq ("$(wildcard /etc/centos-release)", "")
LIBSTDCPP_HACK ?= 1
endif
ifeq ($(ARCH),x86_64)
ARCH := x64
else
$(error Not prepared to compile on $(ARCH))
endif
MONO := $(shell which mono 2>/dev/null)
ifeq ($(MONO),)
MONO := /usr/bin/mono
endif
MCS := $(shell which mcs 2>/dev/null)
ifeq ($(MCS),)
MCS := $(shell which dmcs 2>/dev/null)
endif
ifeq ($(MCS),)
MCS := /usr/bin/mcs
endif
CFLAGS := -Werror -Wno-error=format -fPIC -DNO_INTELLISENSE -fvisibility=hidden -DNDEBUG=1 -Wreturn-type -fno-omit-frame-pointer
ifeq ($(RELEASE),true)
CFLAGS += -DFDB_CLEAN_BUILD
endif
ifeq ($(NIGHTLY),true)
CFLAGS += -DFDB_CLEAN_BUILD
endif
BOOST_BASENAME ?= boost_1_67_0
ifeq ($(PLATFORM),Linux)
PLATFORM := linux
CC ?= gcc
CXX ?= g++
CXXFLAGS += -std=c++17
BOOST_BASEDIR ?= /opt
TLS_LIBDIR ?= /usr/local/lib
DLEXT := so
java_DLEXT := so
TARGET_LIBC_VERSION ?= 2.11
else ifeq ($(PLATFORM),Darwin)
PLATFORM := osx
CC := /usr/bin/clang
CXX := /usr/bin/clang
CFLAGS += -mmacosx-version-min=10.14 -stdlib=libc++
CXXFLAGS += -mmacosx-version-min=10.14 -std=c++17 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
.LIBPATTERNS := lib%.dylib lib%.a
BOOST_BASEDIR ?= ${HOME}
TLS_LIBDIR ?= /usr/local/lib
DLEXT := dylib
java_DLEXT := jnilib
else
$(error Not prepared to compile on platform $(PLATFORM))
endif
BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME}
CCACHE := $(shell which ccache 2>/dev/null)
ifneq ($(CCACHE),)
CCACHE_CC := $(CCACHE) $(CC)
CCACHE_CXX := $(CCACHE) $(CXX)
else
CCACHE_CC := $(CC)
CCACHE_CXX := $(CXX)
endif
# Default variables don't get pushed into the environment, but scripts in build/
# rely on the existence of CC in the environment.
ifeq ($(origin CC), default)
CC := $(CC)
endif
ACTORCOMPILER := bin/actorcompiler.exe
# UNSTRIPPED := 1
# Normal optimization level
CFLAGS += -O2
# Or turn off optimization entirely
# CFLAGS += -O0
# Debugging symbols are a good thing (and harmless, since we keep them
# in external debug files)
CFLAGS += -g
# valgrind-compatibile builds are enabled by uncommenting lines in valgind.mk
# Define the TLS compilation and link variables
ifdef TLS_DISABLED
CFLAGS += -DTLS_DISABLED
FDB_TLS_LIB :=
TLS_LIBS :=
else
FDB_TLS_LIB := lib/libFDBLibTLS.a
TLS_LIBS += $(addprefix $(TLS_LIBDIR)/,libtls.a libssl.a libcrypto.a)
endif
CXXFLAGS += -Wno-deprecated -DBOOST_ERROR_CODE_HEADER_ONLY -DBOOST_SYSTEM_NO_DEPRECATED
LDFLAGS :=
LIBS :=
STATIC_LIBS :=
# Add library search paths (that aren't -Llib) to the VPATH
VPATH += $(addprefix :,$(filter-out lib,$(patsubst -L%,%,$(filter -L%,$(LDFLAGS)))))
CS_PROJECTS := flow/actorcompiler flow/coveragetool fdbclient/vexillographer
CPP_PROJECTS := flow fdbrpc fdbclient fdbbackup fdbserver fdbcli bindings/c bindings/java fdbmonitor bindings/flow/tester bindings/flow
ifndef TLS_DISABLED
CPP_PROJECTS += FDBLibTLS
endif
OTHER_PROJECTS := bindings/python bindings/ruby bindings/go
CS_MK_GENERATED := $(CS_PROJECTS:=/generated.mk)
CPP_MK_GENERATED := $(CPP_PROJECTS:=/generated.mk)
MK_GENERATED := $(CS_MK_GENERATED) $(CPP_MK_GENERATED)
# build/valgrind.mk needs to be included before any _MK_GENERATED (which in turn includes local.mk)
MK_INCLUDE := build/scver.mk build/valgrind.mk $(CS_MK_GENERATED) $(CPP_MK_GENERATED) $(OTHER_PROJECTS:=/include.mk) build/packages.mk
ALL_MAKEFILES := Makefile $(MK_INCLUDE) $(patsubst %/generated.mk,%/local.mk,$(MK_GENERATED))
TARGETS =
.PHONY: clean all Makefiles
default: fdbserver fdbbackup fdbcli fdb_c fdb_python fdb_python_sdist
all: $(CS_PROJECTS) $(CPP_PROJECTS) $(OTHER_PROJECTS)
# These are always defined and ready to use. Any target that uses them and needs them up to date
# should depend on versions.target
VERSION := $(shell cat versions.target | grep '<Version>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,')
PACKAGE_NAME := $(shell cat versions.target | grep '<PackageName>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,')
versions.h: Makefile versions.target
@rm -f $@
ifeq ($(RELEASE),true)
@echo "#define FDB_VT_VERSION \"$(VERSION)\"" >> $@
else
@echo "#define FDB_VT_VERSION \"$(VERSION)-PRERELEASE\"" >> $@
endif
@echo "#define FDB_VT_PACKAGE_NAME \"$(PACKAGE_NAME)\"" >> $@
bindings: fdb_c fdb_python fdb_ruby fdb_java fdb_flow fdb_flow_tester fdb_go fdb_go_tester fdb_c_tests
Makefiles: $(MK_GENERATED)
$(CS_MK_GENERATED): build/csprojtom4.py build/csproj.mk Makefile
@echo "Creating $@"
@python build/csprojtom4.py $(@D)/*.csproj | m4 -DGENDIR="$(@D)" -DGENNAME=`basename $(@D)/*.csproj .csproj` - build/csproj.mk > $(@D)/generated.mk
$(CPP_MK_GENERATED): build/vcxprojtom4.py build/vcxproj.mk Makefile
@echo "Creating $@"
@python build/vcxprojtom4.py $(@D)/*.vcxproj | m4 -DGENDIR="$(@D)" -DGENNAME=`basename $(@D)/*.vcxproj .vcxproj` - build/vcxproj.mk > $(@D)/generated.mk
DEPSDIR := .deps
OBJDIR := .objs
CMDDIR := .cmds
COMPILE_COMMANDS_JSONS := $(addprefix $(CMDDIR)/,$(addsuffix /compile_commands.json,${CPP_PROJECTS}))
compile_commands.json: build/concatinate_jsons.py ${COMPILE_COMMANDS_JSONS}
@build/concatinate_jsons.py ${COMPILE_COMMANDS_JSONS}
include $(MK_INCLUDE)
clean: $(CLEAN_TARGETS) docpreview_clean
@echo "Cleaning toplevel"
@rm -rf $(OBJDIR)
@rm -rf $(DEPSDIR)
@rm -rf lib/
@rm -rf bin/coverage.*.xml
@rm -rf $(CMDDIR) compile_commands.json
@find . -name "*.g.cpp" -exec rm -f {} \; -or -name "*.g.h" -exec rm -f {} \;
targets:
@echo "Available targets:"
@for i in $(sort $(TARGETS)); do echo " $$i" ; done
@echo "Append _clean to clean specific target."
lib/libstdc++.a: $(shell $(CC) -print-file-name=libstdc++_pic.a)
@echo "Frobnicating $@"
@mkdir -p lib
@rm -rf .libstdc++
@mkdir .libstdc++
@(cd .libstdc++ && ar x $<)
@for i in .libstdc++/*.o ; do \
nm $$i | grep -q \@ || continue ; \
nm $$i | awk '$$3 ~ /@@/ { COPY = $$3; sub(/@@.*/, "", COPY); print $$3, COPY; }' > .libstdc++/replacements ; \
objcopy --redefine-syms=.libstdc++/replacements $$i $$i.new && mv $$i.new $$i ; \
rm .libstdc++/replacements ; \
nm $$i | awk '$$3 ~ /@/ { print $$3; }' > .libstdc++/deletes ; \
objcopy --strip-symbols=.libstdc++/deletes $$i $$i.new && mv $$i.new $$i ; \
rm .libstdc++/deletes ; \
done
@ar rcs $@ .libstdc++/*.o
@rm -r .libstdc++
docpreview: javadoc
@echo "Generating docpreview"
@TARGETS= $(MAKE) -C documentation docpreview
docpreview_clean:
@echo "Cleaning docpreview"
@CLEAN_TARGETS= $(MAKE) -C documentation -s --no-print-directory docpreview_clean
packages/foundationdb-docs-$(VERSION).tar.gz: FORCE javadoc
@echo "Packaging documentation"
@TARGETS= $(MAKE) -C documentation docpackage
@mkdir -p packages
@rm -f packages/foundationdb-docs-$(VERSION).tar.gz
@cp documentation/sphinx/.dist/foundationdb-docs-$(VERSION).tar.gz packages/foundationdb-docs-$(VERSION).tar.gz
docpackage: packages/foundationdb-docs-$(VERSION).tar.gz
FORCE:
.SECONDEXPANSION:
bin/coverage.%.xml: bin/coveragetool.exe $$(%_ALL_SOURCES)
@echo "Creating $@"
@$(MONO) bin/coveragetool.exe $@ $(filter-out $<,$^) >/dev/null
$(CPP_MK_GENERATED): $$(@D)/*.vcxproj
$(CS_MK_GENERATED): $$(@D)/*.csproj

View File

@ -33,13 +33,17 @@ CMake-based build system. Both of them should currently work for most users,
and CMake should be the preferred choice as it will eventually become the only
build system available.
If compiling for local development, please set `-DUSE_WERROR=ON` in
cmake. Our CI compiles with `-Werror` on, so this way you'll find out about
compiler warnings that break the build earlier.
## CMake
To build with CMake, generally the following is required (works on Linux and
Mac OS - for Windows see below):
1. Check out this repository.
1. Install cmake Version 3.12 or higher [CMake](https://cmake.org/)
1. Install cmake Version 3.13 or higher [CMake](https://cmake.org/)
1. Download version 1.67 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.67.0/).
1. Unpack boost (you don't need to compile it)
1. Install [Mono](http://www.mono-project.com/download/stable/).
@ -47,17 +51,17 @@ Mac OS - for Windows see below):
1. Create a build directory (you can have the build directory anywhere you
like): `mkdir build`
1. `cd build`
1. `cmake -DBOOST_ROOT=<PATH_TO_BOOST> <PATH_TO_FOUNDATIONDB_DIRECTORY>`
1. `make`
1. `cmake -GNinja -DBOOST_ROOT=<PATH_TO_BOOST> <PATH_TO_FOUNDATIONDB_DIRECTORY>`
1. `ninja`
CMake will try to find its dependencies. However, for LibreSSL this can be often
problematic (especially if OpenSSL is installed as well). For that we recommend
passing the argument `-DLibreSSL_ROOT` to cmake. So, for example, if you
LibreSSL is installed under /usr/local/libressl-2.8.3, you should call cmake like
LibreSSL is installed under `/usr/local/libressl-2.8.3`, you should call cmake like
this:
```
cmake -DLibreSSL_ROOT=/usr/local/libressl-2.8.3/ ../foundationdb
cmake -GNinja -DLibreSSL_ROOT=/usr/local/libressl-2.8.3/ ../foundationdb
```
FoundationDB will build just fine without LibreSSL, however, the resulting
@ -129,31 +133,31 @@ If you want to create a package you have to tell cmake what platform it is for.
And then you can build by simply calling `cpack`. So for debian, call:
```
cmake -DINSTALL_LAYOUT=DEB <FDB_SOURCE_DIR>
make
cpack
cmake -GNinja <FDB_SOURCE_DIR>
ninja
cpack -G DEB
```
For RPM simply replace `DEB` with `RPM`.
### MacOS
The build under MacOS will work the same way as on Linux. To get LibreSSL and boost you
can use [Homebrew](https://brew.sh/). LibreSSL will not be installed in
`/usr/local` instead it will stay in `/usr/local/Cellar`. So the cmake command
will look something like this:
The build under MacOS will work the same way as on Linux. To get LibreSSL,
boost, and ninja you can use [Homebrew](https://brew.sh/). LibreSSL will not be
installed in `/usr/local` instead it will stay in `/usr/local/Cellar`. So the
cmake command will look something like this:
```sh
cmake -DLibreSSL_ROOT=/usr/local/Cellar/libressl/2.8.3 <PATH_TO_FOUNDATIONDB_SOURCE>
cmake -GNinja -DLibreSSL_ROOT=/usr/local/Cellar/libressl/2.8.3 <PATH_TO_FOUNDATIONDB_SOURCE>
```
To generate a installable package, you have to call CMake with the corresponding
arguments and then use cpack to generate the package:
```sh
cmake -DINSTALL_LAYOUT=OSX <FDB_SOURCE_DIR>
make
cpack
cmake -GNinja <FDB_SOURCE_DIR>
ninja
cpack -G productbuild
```
### Windows
@ -202,7 +206,7 @@ will automatically find it and build with TLS support.
If you installed WIX before running `cmake` you should find the
`FDBInstaller.msi` in your build directory under `packaging/msi`.
## Makefile
## Makefile (Deprecated - all users should transition to using cmake)
#### MacOS
@ -219,7 +223,7 @@ If you installed WIX before running `cmake` you should find the
1. Install [Docker](https://www.docker.com/).
1. Check out the foundationdb repo.
1. Run the docker image interactively [Docker Run](https://docs.docker.com/engine/reference/run/#general-form) with the directory containing the foundationdb repo mounted [Docker Mounts](https://docs.docker.com/storage/volumes/).
1. Run the docker image interactively with [Docker Run](https://docs.docker.com/engine/reference/run/#general-form), and with the directory containing the foundationdb repo mounted via [Docker Mounts](https://docs.docker.com/storage/volumes/).
```shell
docker run -it -v '/local/dir/path/foundationdb:/docker/dir/path/foundationdb' foundationdb/foundationdb-build:latest

View File

@ -13,3 +13,6 @@ endif()
if(WITH_RUBY)
add_subdirectory(ruby)
endif()
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
package_bindingtester()
endif()

View File

@ -26,7 +26,7 @@ sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings',
import util
FDB_API_VERSION = 620
FDB_API_VERSION = 630
LOGGING = {
'version': 1,

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
#
# bindingtester.py
#
@ -38,15 +38,13 @@ from functools import reduce
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
import bindingtester
from bindingtester import FDB_API_VERSION
from bindingtester import Result
from bindingtester import util
from bindingtester.tests import Test, InstructionSet
from known_testers import Tester
from bindingtester.known_testers import Tester
import fdb
import fdb.tuple
@ -110,9 +108,10 @@ class ResultSet(object):
# Fill in 'None' values for testers that didn't produce a result and generate an output string describing the results
all_results = {i: results[i] if i in results else None for i in range(len(self.tester_results))}
result_str = '\n'.join([' %-*s - %s' % (name_length, self.tester_results.keys()[i], r) for i, r in all_results.items()])
result_keys = list(self.tester_results.keys())
result_str = '\n'.join([' %-*s - %s' % (name_length, result_keys[i], r) for i, r in all_results.items()])
result_list = results.values()
result_list = list(results.values())
# If any of our results matches the global error filter, we ignore the result
if any(r.matches_global_error_filter(self.specification) for r in result_list):
@ -158,7 +157,7 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
api_version = min_version
elif random.random() < 0.9:
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430,
440, 450, 460, 500, 510, 520, 600, 610, 620] if v >= min_version and v <= max_version])
440, 450, 460, 500, 510, 520, 600, 610, 620, 630] if v >= min_version and v <= max_version])
else:
api_version = random.randint(min_version, max_version)
@ -200,7 +199,7 @@ class TestRunner(object):
raise Exception('Not all testers support concurrency')
# Test types should be intersection of all tester supported types
self.args.types = reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers))
self.args.types = list(reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers)))
self.args.no_directory_snapshot_ops = self.args.no_directory_snapshot_ops or any([not tester.directory_snapshot_ops_enabled for tester in self.testers])
@ -264,19 +263,19 @@ class TestRunner(object):
if self.args.concurrency == 1:
self.test.setup(self.args)
test_instructions = {fdb.Subspace((self.args.instruction_prefix,)): self.test.generate(self.args, 0)}
test_instructions = {fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'),)): self.test.generate(self.args, 0)}
else:
test_instructions = {}
main_thread = InstructionSet()
for i in range(self.args.concurrency):
# thread_spec = fdb.Subspace(('thread_spec', i))
thread_spec = 'thread_spec%d' % i
thread_spec = b'thread_spec%d' % i
main_thread.push_args(thread_spec)
main_thread.append('START_THREAD')
self.test.setup(self.args)
test_instructions[fdb.Subspace((thread_spec,))] = self.test.generate(self.args, i)
test_instructions[fdb.Subspace((self.args.instruction_prefix,))] = main_thread
test_instructions[fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'),))] = main_thread
return test_instructions

View File

@ -20,7 +20,7 @@
import os
MAX_API_VERSION = 620
MAX_API_VERSION = 630
COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple']
ALL_TYPES = COMMON_TYPES + ['versionstamp']

View File

@ -25,7 +25,8 @@ BREAKONERROR="${BREAKONERROR:-0}"
RUNSCRIPTS="${RUNSCRIPTS:-1}"
RUNTESTS="${RUNTESTS:-1}"
RANDOMTEST="${RANDOMTEST:-0}"
BINDINGTESTS="${BINDINGTESTS:-python python3 java java_async ruby go flow}"
# BINDINGTESTS="${BINDINGTESTS:-python python3 java java_async ruby go flow}"
BINDINGTESTS="${BINDINGTESTS:-python python3 java java_async go flow}"
LOGLEVEL="${LOGLEVEL:-INFO}"
_BINDINGTESTS=(${BINDINGTESTS})
DISABLEDTESTS=()
@ -186,7 +187,7 @@ function runScriptedTest()
else
local test="${1}"
if ! runCommand "Scripting ${test} ..." 'python' '-u' "${TESTFILE}" "${test}" --test-name scripted --logging-level "${LOGLEVEL}"
if ! runCommand "Scripting ${test} ..." 'python3' '-u' "${TESTFILE}" "${test}" --test-name scripted --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi
@ -211,25 +212,25 @@ function runTest()
fi
# API
if ([[ "${TESTINDEX}" -eq 0 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[0]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name api --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
if ([[ "${TESTINDEX}" -eq 0 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[0]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name api --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi
# Concurrent API
if ([[ "${TESTINDEX}" -eq 1 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[1]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name api --concurrency "${CONCURRENCY}" --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
if ([[ "${TESTINDEX}" -eq 1 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[1]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name api --concurrency "${CONCURRENCY}" --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi
# Directory
if ([[ "${TESTINDEX}" -eq 2 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[2]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name directory --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
if ([[ "${TESTINDEX}" -eq 2 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[2]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name directory --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi
# Directory HCA
if ([[ "${TESTINDEX}" -eq 3 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[3]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name directory_hca --concurrency "${CONCURRENCY}" --num-ops "${HCAOPERATIONS}" --logging-level "${LOGLEVEL}"
if ([[ "${TESTINDEX}" -eq 3 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[3]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name directory_hca --concurrency "${CONCURRENCY}" --num-ops "${HCAOPERATIONS}" --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi

View File

@ -164,6 +164,13 @@ futures must apply the following rules to the result:
database using the get() method. May optionally push a future onto the
stack.
#### GET_ESTIMATED_RANGE_SIZE
Pops the top two items off of the stack as BEGIN_KEY and END_KEY to
construct a key range. Then call the `getEstimatedRangeSize` API of
the language binding. Make sure the API returns without error. Finally
push the string "GOT_ESTIMATED_RANGE_SIZE" onto the stack.
#### GET_KEY (_SNAPSHOT, _DATABASE)
Pops the top four items off of the stack as KEY, OR_EQUAL, OFFSET, PREFIX

View File

@ -37,8 +37,8 @@ class ResultSpecification(object):
self.ordering_index = ordering_index
if global_error_filter is not None:
error_str = '|'.join(['%d' % e for e in global_error_filter])
self.error_regex = re.compile(r'\x01+ERROR\x00\xff*\x01' + error_str + r'\x00')
error_str = b'|'.join([b'%d' % e for e in global_error_filter])
self.error_regex = re.compile(rb'\x01+ERROR\x00\xff*\x01' + error_str + rb'\x00')
else:
self.error_regex = None
@ -90,7 +90,7 @@ class Test(object):
def versionstamp_value(self, raw_bytes, version_pos=0):
if hasattr(self, 'api_version') and self.api_version < 520:
if version_pos != 0:
raise ValueError("unable to set non-zero version position before 520 in values")
raise ValueError('unable to set non-zero version position before 520 in values')
return raw_bytes
else:
return raw_bytes + struct.pack('<L', version_pos)
@ -109,7 +109,7 @@ class Instruction(object):
def __init__(self, operation):
self.operation = operation
self.argument = None
self.value = fdb.tuple.pack((unicode(self.operation),))
self.value = fdb.tuple.pack((self.operation,))
def to_value(self):
return self.value
@ -125,7 +125,7 @@ class PushInstruction(Instruction):
def __init__(self, argument):
self.operation = 'PUSH'
self.argument = argument
self.value = fdb.tuple.pack((unicode("PUSH"), argument))
self.value = fdb.tuple.pack(('PUSH', argument))
def __str__(self):
return '%s %s' % (self.operation, self.argument)

View File

@ -157,6 +157,7 @@ class ApiTest(Test):
read_conflicts = ['READ_CONFLICT_RANGE', 'READ_CONFLICT_KEY']
write_conflicts = ['WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT']
txn_sizes = ['GET_APPROXIMATE_SIZE']
storage_metrics = ['GET_ESTIMATED_RANGE_SIZE']
op_choices += reads
op_choices += mutations
@ -170,9 +171,10 @@ class ApiTest(Test):
op_choices += write_conflicts
op_choices += resets
op_choices += txn_sizes
op_choices += storage_metrics
idempotent_atomic_ops = [u'BIT_AND', u'BIT_OR', u'MAX', u'MIN', u'BYTE_MIN', u'BYTE_MAX']
atomic_ops = idempotent_atomic_ops + [u'ADD', u'BIT_XOR', u'APPEND_IF_FITS']
idempotent_atomic_ops = ['BIT_AND', 'BIT_OR', 'MAX', 'MIN', 'BYTE_MIN', 'BYTE_MAX']
atomic_ops = idempotent_atomic_ops + ['ADD', 'BIT_XOR', 'APPEND_IF_FITS']
if args.concurrency > 1:
self.max_keys = random.randint(100, 1000)
@ -357,26 +359,26 @@ class ApiTest(Test):
split = random.randint(0, 70)
prefix = self.random.random_string(20 + split)
if prefix.endswith('\xff'):
if prefix.endswith(b'\xff'):
# Necessary to make sure that the SET_VERSIONSTAMPED_VALUE check
# correctly finds where the version is supposed to fit in.
prefix += '\x00'
prefix += b'\x00'
suffix = self.random.random_string(70 - split)
rand_str2 = prefix + fdb.tuple.Versionstamp._UNSET_TR_VERSION + suffix
key3 = self.versionstamped_keys.pack() + rand_str2
index = len(self.versionstamped_keys.pack()) + len(prefix)
key3 = self.versionstamp_key(key3, index)
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE',
instructions.push_args('SET_VERSIONSTAMPED_VALUE',
key1,
self.versionstamp_value(fdb.tuple.Versionstamp._UNSET_TR_VERSION + rand_str2))
instructions.append('ATOMIC_OP')
if args.api_version >= 520:
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', key2, self.versionstamp_value(rand_str2, len(prefix)))
instructions.push_args('SET_VERSIONSTAMPED_VALUE', key2, self.versionstamp_value(rand_str2, len(prefix)))
instructions.append('ATOMIC_OP')
instructions.push_args(u'SET_VERSIONSTAMPED_KEY', key3, rand_str1)
instructions.push_args('SET_VERSIONSTAMPED_KEY', key3, rand_str1)
instructions.append('ATOMIC_OP')
self.can_use_key_selectors = False
@ -467,17 +469,17 @@ class ApiTest(Test):
instructions.push_args(rand_str)
test_util.to_front(instructions, 1)
instructions.push_args(u'SET_VERSIONSTAMPED_KEY')
instructions.push_args('SET_VERSIONSTAMPED_KEY')
instructions.append('ATOMIC_OP')
if self.api_version >= 520:
version_value_key_2 = self.versionstamped_values_2.pack((rand_str,))
versionstamped_value = self.versionstamp_value(fdb.tuple.pack(tup), first_incomplete - len(prefix))
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', version_value_key_2, versionstamped_value)
instructions.push_args('SET_VERSIONSTAMPED_VALUE', version_value_key_2, versionstamped_value)
instructions.append('ATOMIC_OP')
version_value_key = self.versionstamped_values.pack((rand_str,))
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', version_value_key,
instructions.push_args('SET_VERSIONSTAMPED_VALUE', version_value_key,
self.versionstamp_value(fdb.tuple.Versionstamp._UNSET_TR_VERSION + fdb.tuple.pack(tup)))
instructions.append('ATOMIC_OP')
self.can_use_key_selectors = False
@ -500,8 +502,8 @@ class ApiTest(Test):
# Use SUB to test if integers are correctly unpacked
elif op == 'SUB':
a = self.random.random_int() / 2
b = self.random.random_int() / 2
a = self.random.random_int() // 2
b = self.random.random_int() // 2
instructions.push_args(0, a, b)
instructions.append(op)
instructions.push_args(1)
@ -536,6 +538,21 @@ class ApiTest(Test):
instructions.push_args(d)
instructions.append(op)
self.add_strings(1)
elif op == 'GET_ESTIMATED_RANGE_SIZE':
# Protect against inverted range and identical keys
key1 = self.workspace.pack(self.random.random_tuple(1))
key2 = self.workspace.pack(self.random.random_tuple(1))
while key1 == key2:
key1 = self.workspace.pack(self.random.random_tuple(1))
key2 = self.workspace.pack(self.random.random_tuple(1))
if key1 > key2:
key1, key2 = key2, key1
instructions.push_args(key1, key2)
instructions.append(op)
self.add_strings(1)
else:
assert False, 'Unknown operation: ' + op
@ -566,7 +583,7 @@ class ApiTest(Test):
next_begin = None
incorrect_versionstamps = 0
for k, v in tr.get_range(begin_key, self.versionstamped_values.range().stop, limit=limit):
next_begin = k + '\x00'
next_begin = k + b'\x00'
random_id = self.versionstamped_values.unpack(k)[0]
versioned_value = v[10:].replace(fdb.tuple.Versionstamp._UNSET_TR_VERSION, v[:10], 1)
@ -602,6 +619,6 @@ class ApiTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.workspace, global_error_filter=[1007, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021])
ResultSpecification(self.workspace, global_error_filter=[1007, 1009, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021])
]

View File

@ -52,15 +52,15 @@ class DirectoryTest(Test):
self.dir_list.append(child)
self.dir_index = directory_util.DEFAULT_DIRECTORY_INDEX
def generate_layer(self):
def generate_layer(self, allow_partition=True):
if random.random() < 0.7:
return ''
return b''
else:
choice = random.randint(0, 3)
if choice == 0:
return 'partition'
if choice == 0 and allow_partition:
return b'partition'
elif choice == 1:
return 'test_layer'
return b'test_layer'
else:
return self.random.random_string(random.randint(0, 5))
@ -98,7 +98,7 @@ class DirectoryTest(Test):
instructions.append('NEW_TRANSACTION')
default_path = unicode('default%d' % self.next_path)
default_path = 'default%d' % self.next_path
self.next_path += 1
self.dir_list = directory_util.setup_directories(instructions, default_path, self.random)
self.root = self.dir_list[0]
@ -114,7 +114,7 @@ class DirectoryTest(Test):
instructions.push_args(layer)
instructions.push_args(*test_util.with_length(path))
instructions.append('DIRECTORY_OPEN')
self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False, is_partition=(layer=='partition'))))
self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False, is_partition=(layer==b'partition'))))
# print('%d. Selected %s, dir=%s, dir_id=%s, has_known_prefix=%s, dir_list_len=%d' \
# % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), self.dir_list[-1].dir_id, False, len(self.dir_list)-1))
@ -184,7 +184,9 @@ class DirectoryTest(Test):
test_util.blocking_commit(instructions)
path = generate_path()
op_args = test_util.with_length(path) + (self.generate_layer(),)
# Partitions that use the high-contention allocator can result in non-determinism if they fail to commit,
# so we disallow them in comparison tests
op_args = test_util.with_length(path) + (self.generate_layer(allow_partition=args.concurrency>1),)
directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
if not op.endswith('_DATABASE') and args.concurrency == 1:
@ -199,7 +201,7 @@ class DirectoryTest(Test):
elif root_op == 'DIRECTORY_CREATE':
layer = self.generate_layer()
is_partition = layer == 'partition'
is_partition = layer == b'partition'
prefix = generate_prefix(require_unique=is_partition and args.concurrency==1, is_partition=is_partition, min_length=0)
@ -256,7 +258,7 @@ class DirectoryTest(Test):
self.dir_list.append(dir_entry.add_child(new_path, child_entry))
# Make sure that the default directory subspace still exists after moving the specified directory
if dir_entry.state.is_directory and not dir_entry.state.is_subspace and old_path == (u'',):
if dir_entry.state.is_directory and not dir_entry.state.is_subspace and old_path == ('',):
self.ensure_default_directory_subspace(instructions, default_path)
elif root_op == 'DIRECTORY_MOVE_TO':
@ -291,7 +293,7 @@ class DirectoryTest(Test):
dir_entry.delete(path)
# Make sure that the default directory subspace still exists after removing the specified directory
if path == () or (dir_entry.state.is_directory and not dir_entry.state.is_subspace and path == (u'',)):
if path == () or (dir_entry.state.is_directory and not dir_entry.state.is_subspace and path == ('',)):
self.ensure_default_directory_subspace(instructions, default_path)
elif root_op == 'DIRECTORY_LIST' or root_op == 'DIRECTORY_EXISTS':
@ -378,7 +380,7 @@ class DirectoryTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021]),
ResultSpecification(self.directory_log, ordering_index=0),
ResultSpecification(self.subspace_log, ordering_index=0)
]
@ -392,15 +394,15 @@ def generate_path(min_length=0):
path = ()
for i in range(length):
if random.random() < 0.05:
path = path + (u'',)
path = path + ('',)
else:
path = path + (random.choice([u'1', u'2', u'3']),)
path = path + (random.choice(['1', '2', '3']),)
return path
def generate_prefix(require_unique=False, is_partition=False, min_length=1):
fixed_prefix = 'abcdefg'
fixed_prefix = b'abcdefg'
if not require_unique and min_length == 0 and random.random() < 0.8:
return None
elif require_unique or is_partition or min_length > len(fixed_prefix) or random.random() < 0.5:
@ -409,13 +411,13 @@ def generate_prefix(require_unique=False, is_partition=False, min_length=1):
length = random.randint(min_length, min_length+5)
if length == 0:
return ''
return b''
if not is_partition:
first = chr(random.randint(ord('\x1d'), 255) % 255)
return first + ''.join(chr(random.randrange(0, 256)) for i in range(0, length - 1))
first = random.randint(ord('\x1d'), 255) % 255
return bytes([first] + [random.randrange(0, 256) for i in range(0, length - 1)])
else:
return ''.join(chr(random.randrange(ord('\x02'), ord('\x14'))) for i in range(0, length))
return bytes([random.randrange(ord('\x02'), ord('\x14')) for i in range(0, length)])
else:
prefix = fixed_prefix
generated = prefix[0:random.randrange(min_length, len(prefix))]

View File

@ -40,7 +40,7 @@ class DirectoryHcaTest(Test):
def setup(self, args):
self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types)
self.transactions = ['tr%d' % i for i in range(3)] # SOMEDAY: parameterize this number?
self.transactions = [b'tr%d' % i for i in range(3)] # SOMEDAY: parameterize this number?
self.barrier_num = 0
self.max_directories_per_transaction = 30
@ -59,7 +59,7 @@ class DirectoryHcaTest(Test):
def barrier(self, instructions, thread_number, thread_ending=False):
if not thread_ending:
instructions.push_args(self.coordination[(self.barrier_num + 1)][thread_number].key(), '')
instructions.push_args(self.coordination[(self.barrier_num + 1)][thread_number].key(), b'')
instructions.append('SET_DATABASE')
instructions.append('WAIT_FUTURE')
@ -76,7 +76,7 @@ class DirectoryHcaTest(Test):
instructions.append('NEW_TRANSACTION')
default_path = unicode('default%d' % self.next_path)
default_path = 'default%d' % self.next_path
self.next_path += 1
dir_list = directory_util.setup_directories(instructions, default_path, self.random)
num_dirs = len(dir_list)
@ -102,7 +102,7 @@ class DirectoryHcaTest(Test):
for i in range(num_directories):
path = (self.random.random_unicode_str(16),)
op_args = test_util.with_length(path) + ('', None)
op_args = test_util.with_length(path) + (b'', None)
directory_util.push_instruction_and_record_prefix(instructions, 'DIRECTORY_CREATE',
op_args, path, num_dirs, self.random, self.prefix_log)
num_dirs += 1
@ -121,7 +121,7 @@ class DirectoryHcaTest(Test):
def pre_run(self, tr, args):
if args.concurrency > 1:
for i in range(args.concurrency):
tr[self.coordination[0][i]] = ''
tr[self.coordination[0][i]] = b''
def validate(self, db, args):
errors = []

View File

@ -249,7 +249,7 @@ def run_test():
# Test moving an entry
assert not entry.state.has_known_prefix
assert not entry.state.is_subspace
assert entry.state.children.keys() == ['1']
assert list(entry.state.children.keys()) == ['1']
for e in all_entries:
validate_dir(e, root)

View File

@ -32,25 +32,25 @@ from bindingtester.tests.directory_state_tree import DirectoryStateTreeNode
fdb.api_version(FDB_API_VERSION)
DEFAULT_DIRECTORY_INDEX = 4
DEFAULT_DIRECTORY_PREFIX = 'default'
DIRECTORY_ERROR_STRING = 'DIRECTORY_ERROR'
DEFAULT_DIRECTORY_PREFIX = b'default'
DIRECTORY_ERROR_STRING = b'DIRECTORY_ERROR'
def setup_directories(instructions, default_path, random):
# Clients start with the default directory layer in the directory list
DirectoryStateTreeNode.reset()
dir_list = [DirectoryStateTreeNode.get_layer('\xfe')]
dir_list = [DirectoryStateTreeNode.get_layer(b'\xfe')]
instructions.push_args(0, '\xfe')
instructions.push_args(0, b'\xfe')
instructions.append('DIRECTORY_CREATE_SUBSPACE')
dir_list.append(DirectoryStateTreeNode(False, True))
instructions.push_args(0, '')
instructions.push_args(0, b'')
instructions.append('DIRECTORY_CREATE_SUBSPACE')
dir_list.append(DirectoryStateTreeNode(False, True))
instructions.push_args(1, 2, 1)
instructions.append('DIRECTORY_CREATE_LAYER')
dir_list.append(DirectoryStateTreeNode.get_layer('\xfe'))
dir_list.append(DirectoryStateTreeNode.get_layer(b'\xfe'))
create_default_directory_subspace(instructions, default_path, random)
dir_list.append(dir_list[0].add_child((default_path,), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
@ -67,7 +67,7 @@ def create_default_directory_subspace(instructions, path, random):
instructions.push_args(3)
instructions.append('DIRECTORY_CHANGE')
prefix = random.random_string(16)
instructions.push_args(1, path, '', '%s-%s' % (DEFAULT_DIRECTORY_PREFIX, prefix))
instructions.push_args(1, path, b'', b'%s-%s' % (DEFAULT_DIRECTORY_PREFIX, prefix))
instructions.append('DIRECTORY_CREATE_DATABASE')
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
@ -88,14 +88,14 @@ def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_inde
instructions.push_args(dir_index)
instructions.append('DIRECTORY_CHANGE')
instructions.push_args(1, '', random.random_string(16), '')
instructions.push_args(1, b'', random.random_string(16), b'')
instructions.append('DIRECTORY_PACK_KEY')
test_util.to_front(instructions, 3) # move the existence result up to the front of the stack
t = util.subspace_to_tuple(subspace)
instructions.push_args(len(t) + 3, *t)
instructions.append('TUPLE_PACK') # subspace[<exists>][<packed_key>][random.random_string(16)] = ''
instructions.append('TUPLE_PACK') # subspace[<exists>][<packed_key>][random.random_string(16)] = b''
instructions.append('SET')
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
@ -128,7 +128,7 @@ def check_for_duplicate_prefixes(db, subspace):
def validate_hca_state(db):
hca = fdb.Subspace(('\xfe', 'hca'), '\xfe')
hca = fdb.Subspace((b'\xfe', b'hca'), b'\xfe')
counters = hca[0]
recent = hca[1]

View File

@ -34,7 +34,7 @@ fdb.api_version(FDB_API_VERSION)
class ScriptedTest(Test):
TEST_API_VERSION = 620
TEST_API_VERSION = 630
def __init__(self, subspace):
super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION)
@ -62,20 +62,20 @@ class ScriptedTest(Test):
test_instructions = ThreadedInstructionSet()
main_thread = test_instructions.create_thread()
foo = [self.workspace.pack(('foo%d' % i,)) for i in range(0, 6)]
foo = [self.workspace.pack((b'foo%d' % i,)) for i in range(0, 6)]
main_thread.append('NEW_TRANSACTION')
main_thread.push_args(1020)
main_thread.append('ON_ERROR')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('GET_READ_VERSION')
main_thread.push_args(foo[1], 'bar')
main_thread.push_args(foo[1], b'bar')
main_thread.append('SET')
main_thread.push_args(foo[1])
main_thread.append('GET')
self.add_result(main_thread, args, 'bar')
self.add_result(main_thread, args, b'bar')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(2000)
main_thread.append('ON_ERROR')
@ -91,39 +91,39 @@ class ScriptedTest(Test):
main_thread.append('DUP')
main_thread.append('DUP')
main_thread.append('GET')
self.add_result(main_thread, args, 'bar')
self.add_result(main_thread, args, b'bar')
main_thread.append('CLEAR')
main_thread.append('GET_SNAPSHOT')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[1])
main_thread.append('GET_DATABASE')
self.add_result(main_thread, args, 'bar')
self.add_result(main_thread, args, b'bar')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('SET_READ_VERSION')
main_thread.push_args(foo[1])
main_thread.append('DUP')
main_thread.append('GET')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('CLEAR')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, test_util.error_string(1020))
main_thread.push_args(foo[1])
main_thread.append('GET_SNAPSHOT')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[1])
main_thread.append('CLEAR')
main_thread.append('COMMIT')
main_thread.append('WAIT_FUTURE')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('GET_COMMITTED_VERSION')
main_thread.append('RESET')
main_thread.append('EMPTY_STACK')
main_thread.append('NEW_TRANSACTION')
main_thread.push_args(1, 'bar', foo[1], foo[2], 'bar2', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5')
main_thread.push_args(1, b'bar', foo[1], foo[2], b'bar2', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5')
main_thread.append('SWAP')
main_thread.append('SET')
main_thread.append('SET')
@ -131,112 +131,112 @@ class ScriptedTest(Test):
main_thread.append('SET')
main_thread.append('SET_DATABASE')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('SET_READ_VERSION')
main_thread.push_args(foo[2])
main_thread.append('GET')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('NEW_TRANSACTION')
main_thread.push_args('', 0, -1, '')
main_thread.push_args(b'', 0, -1, b'')
main_thread.append('GET_KEY')
self.add_result(main_thread, args, '')
self.add_result(main_thread, args, b'')
main_thread.append('NEW_TRANSACTION')
main_thread.append('GET_READ_VERSION_SNAPSHOT')
main_thread.push_args('random', foo[1], foo[3], 0, 1, 1)
main_thread.push_args(b'random', foo[1], foo[3], 0, 1, 1)
main_thread.append('POP')
main_thread.append('GET_RANGE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], 'bar2', foo[1], 'bar')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], b'bar2', foo[1], b'bar')))
main_thread.push_args(foo[1], foo[3], 1, 1, 0)
main_thread.append('GET_RANGE_SNAPSHOT')
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], 'bar2')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], b'bar2')))
main_thread.push_args(foo[1], foo[3], 0, 0, 4)
main_thread.append('GET_RANGE_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2')))
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[3], foo[5])
main_thread.append('CLEAR_RANGE')
main_thread.push_args(foo[1], 0, 3, '')
main_thread.push_args(foo[1], 0, 3, b'')
main_thread.append('GET_KEY')
self.add_result(main_thread, args, foo[5])
main_thread.push_args(foo[1], 1, 2, '')
main_thread.push_args(foo[1], 1, 2, b'')
main_thread.append('GET_KEY_SNAPSHOT')
self.add_result(main_thread, args, foo[5])
main_thread.push_args(foo[5], 0, -2, '')
main_thread.push_args(foo[5], 0, -2, b'')
main_thread.append('GET_KEY_DATABASE')
self.add_result(main_thread, args, foo[2])
main_thread.push_args(self.workspace.key(), 2, 0, 2)
main_thread.append('GET_RANGE_STARTS_WITH')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2')))
main_thread.push_args(self.workspace.key(), 4, 0, 3)
main_thread.append('GET_RANGE_STARTS_WITH_SNAPSHOT')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[5], b'bar5')))
main_thread.push_args(self.workspace.key(), 3, 1, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[5], 'bar5', foo[4], 'bar4', foo[3], 'bar3')))
main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, '')
self.add_result(main_thread, args, fdb.tuple.pack((foo[5], b'bar5', foo[4], b'bar4', foo[3], b'bar3')))
main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, b'')
main_thread.append('GET_RANGE_SELECTOR')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, '')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2')))
main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, b'')
main_thread.append('GET_RANGE_SELECTOR_SNAPSHOT')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5')))
main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, '')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[5], b'bar5')))
main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, b'')
main_thread.append('GET_RANGE_SELECTOR_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[3], b'bar3')))
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(self.workspace.key())
main_thread.append('CLEAR_RANGE_STARTS_WITH')
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH')
self.add_result(main_thread, args, '')
self.add_result(main_thread, args, b'')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('SET_READ_VERSION')
main_thread.push_args(foo[1])
main_thread.append('GET')
self.add_result(main_thread, args, 'bar')
self.add_result(main_thread, args, b'bar')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5')
main_thread.push_args(foo[1], b'bar', foo[2], b'bar2', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[2])
main_thread.append('CLEAR_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5')))
main_thread.push_args(foo[3], foo[5])
main_thread.append('CLEAR_RANGE_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[5], 'bar5')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[5], b'bar5')))
main_thread.push_args(self.workspace.key())
main_thread.append('CLEAR_RANGE_STARTS_WITH_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, '')
self.add_result(main_thread, args, b'')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('NEW_TRANSACTION')
main_thread.push_args(foo[1], foo[5], 0, 0, 0)
@ -250,7 +250,7 @@ class ScriptedTest(Test):
self.append_range_test(main_thread, args, 1000, 8)
main_thread.append('EMPTY_STACK')
tup = (0, 'foo', -1093, u'unicode\u9348test', 0xffffffff + 100, 'bar\x00\xff')
tup = (0, b'foo', -1093, 'unicode\u9348test', 0xffffffff + 100, b'bar\x00\xff')
main_thread.push_args(*test_util.with_length(tup))
main_thread.append('TUPLE_PACK')
main_thread.append('DUP')
@ -272,58 +272,58 @@ class ScriptedTest(Test):
self.add_result(main_thread, args, rng.stop)
self.add_result(main_thread, args, rng.start)
stampKey = 'stampedXXXXXXXXXXsuffix'
stampKeyIndex = stampKey.find('XXXXXXXXXX')
main_thread.push_args(u'SET_VERSIONSTAMPED_KEY', self.versionstamp_key(stampKey, stampKeyIndex), 'stampedBar')
stampKey = b'stampedXXXXXXXXXXsuffix'
stampKeyIndex = stampKey.find(b'XXXXXXXXXX')
main_thread.push_args('SET_VERSIONSTAMPED_KEY', self.versionstamp_key(stampKey, stampKeyIndex), b'stampedBar')
main_thread.append('ATOMIC_OP')
main_thread.push_args(u'SET_VERSIONSTAMPED_VALUE', 'stampedValue', self.versionstamp_value('XXXXXXXXXX'))
main_thread.push_args('SET_VERSIONSTAMPED_VALUE', b'stampedValue', self.versionstamp_value(b'XXXXXXXXXX'))
main_thread.append('ATOMIC_OP')
if self.api_version >= 520:
stampValue = 'stampedXXXXXXXXXXsuffix'
stampValueIndex = stampValue.find('XXXXXXXXXX')
main_thread.push_args(u'SET_VERSIONSTAMPED_VALUE', 'stampedValue2', self.versionstamp_value(stampValue, stampValueIndex))
stampValue = b'stampedXXXXXXXXXXsuffix'
stampValueIndex = stampValue.find(b'XXXXXXXXXX')
main_thread.push_args('SET_VERSIONSTAMPED_VALUE', b'stampedValue2', self.versionstamp_value(stampValue, stampValueIndex))
main_thread.append('ATOMIC_OP')
main_thread.push_args('suffix')
main_thread.push_args(b'suffix')
main_thread.append('GET_VERSIONSTAMP')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
main_thread.push_args('stamped')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(b'stamped')
main_thread.append('CONCAT')
main_thread.append('CONCAT')
main_thread.append('GET')
self.add_result(main_thread, args, 'stampedBar')
self.add_result(main_thread, args, b'stampedBar')
main_thread.push_args('stampedValue', 'suffix')
main_thread.push_args(b'stampedValue', b'suffix')
main_thread.append('GET')
main_thread.push_args('stamped')
main_thread.push_args(b'stamped')
main_thread.append('CONCAT')
main_thread.append('CONCAT')
main_thread.append('GET')
self.add_result(main_thread, args, 'stampedBar')
self.add_result(main_thread, args, b'stampedBar')
if self.api_version >= 520:
main_thread.push_args('stampedValue2')
main_thread.push_args(b'stampedValue2')
main_thread.append('GET')
main_thread.append('GET')
self.add_result(main_thread, args, 'stampedBar')
self.add_result(main_thread, args, b'stampedBar')
main_thread.append('GET_VERSIONSTAMP')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, test_util.error_string(2021))
main_thread.push_args('sentinel')
main_thread.push_args(b'sentinel')
main_thread.append('UNIT_TESTS')
self.add_result(main_thread, args, 'sentinel')
self.add_result(main_thread, args, b'sentinel')
if not args.no_threads:
wait_key = 'waitKey'
wait_key = b'waitKey'
# threads = [self.thread_subspace[i] for i in range(0, 2)]
threads = ['thread_spec%d' % i for i in range(0, 2)]
threads = [b'thread_spec%d' % i for i in range(0, 2)]
for thread_spec in threads:
main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), '')
main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), b'')
main_thread.append('SET_DATABASE')
main_thread.append('WAIT_FUTURE')
@ -333,7 +333,7 @@ class ScriptedTest(Test):
main_thread.append('START_THREAD')
thread = test_instructions.create_thread(fdb.Subspace((thread_spec,)))
thread.append('NEW_TRANSACTION')
thread.push_args(foo[1], foo[1], 'bar%s' % thread_spec, self.workspace.pack(
thread.push_args(foo[1], foo[1], b'bar%s' % thread_spec, self.workspace.pack(
(wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec)))
thread.append('GET')
thread.append('POP')
@ -348,7 +348,7 @@ class ScriptedTest(Test):
thread.append('NEW_TRANSACTION')
thread.push_args(foo[1])
thread.append('GET')
self.add_result(thread, args, 'barthread_spec0', 'barthread_spec1')
self.add_result(thread, args, b'barthread_spec0', b'barthread_spec1')
main_thread.append('EMPTY_STACK')
# if len(main_thread) > args.num_ops:
@ -358,7 +358,7 @@ class ScriptedTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.results_subspace, ordering_index=0, global_error_filter=[1007, 1021])
ResultSpecification(self.results_subspace, ordering_index=0, global_error_filter=[1007, 1009, 1021])
]
def get_expected_results(self):
@ -372,7 +372,7 @@ class ScriptedTest(Test):
kvpairs = []
for i in range(0, num_pairs * 2):
kvpairs.append(self.workspace.pack(('foo', ''.join(chr(random.randint(0, 254)) for i in range(0, kv_length)))))
kvpairs.append(self.workspace.pack((b'foo', bytes([random.randint(0, 254) for i in range(0, kv_length)]))))
kvpairs = list(set(kvpairs))
if len(kvpairs) % 2 == 1:
@ -380,24 +380,24 @@ class ScriptedTest(Test):
kvpairs.sort()
instructions.push_args(*kvpairs)
for i in range(0, len(kvpairs) / 2):
for i in range(0, len(kvpairs) // 2):
instructions.append('SET')
if i % 100 == 99:
test_util.blocking_commit(instructions)
self.add_result(instructions, args, 'RESULT_NOT_PRESENT')
self.add_result(instructions, args, b'RESULT_NOT_PRESENT')
foo_range = self.workspace.range(('foo',))
foo_range = self.workspace.range((b'foo',))
instructions.push_args(foo_range.start, foo_range.stop, 0, 0, -1)
instructions.append('GET_RANGE')
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
instructions.push_args(self.workspace.key(), 0, 0, -1)
instructions.append('GET_RANGE_STARTS_WITH')
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
instructions.push_args(foo_range.start, 0, 1, foo_range.stop, 0, 1, 0, 0, -1, '')
instructions.push_args(foo_range.start, 0, 1, foo_range.stop, 0, 1, 0, 0, -1, b'')
instructions.append('GET_RANGE_SELECTOR')
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
test_util.blocking_commit(instructions)
self.add_result(instructions, args, 'RESULT_NOT_PRESENT')
self.add_result(instructions, args, b'RESULT_NOT_PRESENT')
def add_result(self, instructions, args, *values):
key = self.results_subspace.pack((len(self.results),))

View File

@ -36,10 +36,10 @@ class RandomGenerator(object):
def __init__(self, max_int_bits=64, api_version=FDB_API_VERSION, types=COMMON_TYPES):
self.max_int_bits = max_int_bits
self.api_version = api_version
self.types = types
self.types = list(types)
def random_unicode_str(self, length):
return u''.join(self.random_unicode_char() for i in range(0, length))
return ''.join(self.random_unicode_char() for i in range(0, length))
def random_int(self):
num_bits = random.randint(0, self.max_int_bits) # This way, we test small numbers with higher probability
@ -123,7 +123,7 @@ class RandomGenerator(object):
smaller_size = random.randint(1, len(to_add))
tuples.append(to_add[:smaller_size])
else:
non_empty = filter(lambda (_, x): (isinstance(x, list) or isinstance(x, tuple)) and len(x) > 0, enumerate(to_add))
non_empty = [x for x in enumerate(to_add) if (isinstance(x[1], list) or isinstance(x[1], tuple)) and len(x[1]) > 0]
if len(non_empty) > 0 and random.random() < 0.25:
# Add a smaller list to test prefixes of nested structures.
idx, choice = random.choice(non_empty)
@ -153,24 +153,24 @@ class RandomGenerator(object):
def random_string(self, length):
if length == 0:
return ''
return b''
return chr(random.randint(0, 254)) + ''.join(chr(random.randint(0, 255)) for i in range(0, length - 1))
return bytes([random.randint(0, 254)] + [random.randint(0, 255) for i in range(0, length - 1)])
def random_unicode_char(self):
while True:
if random.random() < 0.05:
# Choose one of these special character sequences.
specials = [u'\U0001f4a9', u'\U0001f63c', u'\U0001f3f3\ufe0f\u200d\U0001f308', u'\U0001f1f5\U0001f1f2', u'\uf8ff',
u'\U0002a2b2', u'\u05e9\u05dc\u05d5\u05dd']
specials = ['\U0001f4a9', '\U0001f63c', '\U0001f3f3\ufe0f\u200d\U0001f308', '\U0001f1f5\U0001f1f2', '\uf8ff',
'\U0002a2b2', '\u05e9\u05dc\u05d5\u05dd']
return random.choice(specials)
c = random.randint(0, 0xffff)
if unicodedata.category(unichr(c))[0] in 'LMNPSZ':
return unichr(c)
if unicodedata.category(chr(c))[0] in 'LMNPSZ':
return chr(c)
def error_string(error_code):
return fdb.tuple.pack(('ERROR', str(error_code)))
return fdb.tuple.pack((b'ERROR', bytes(str(error_code), 'utf-8')))
def blocking_commit(instructions):

View File

@ -86,6 +86,6 @@ class TupleTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.workspace, global_error_filter=[1007, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021]),
ResultSpecification(self.workspace, global_error_filter=[1007, 1009, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021]),
]

View File

@ -29,15 +29,15 @@ import fdb
def initialize_logger_level(logging_level):
logger = get_logger()
assert logging_level in ["DEBUG", "INFO", "WARNING", "ERROR"]
assert logging_level in ['DEBUG', 'INFO', 'WARNING', 'ERROR']
if logging_level == "DEBUG":
if logging_level == 'DEBUG':
logger.setLevel(logging.DEBUG)
elif logging_level == "INFO":
elif logging_level == 'INFO':
logger.setLevel(logging.INFO)
elif logging_level == "WARNING":
elif logging_level == 'WARNING':
logger.setLevel(logging.WARNING)
elif logging_level == "ERROR":
elif logging_level == 'ERROR':
logger.setLevel(logging.ERROR)
@ -49,7 +49,7 @@ def get_logger():
def signal_number_to_name(signal_num):
name = []
for key in signal.__dict__.keys():
if key.startswith("SIG") and getattr(signal, key) == signal_num:
if key.startswith('SIG') and getattr(signal, key) == signal_num:
name.append(key)
if len(name) == 1:
return name[0]

View File

@ -38,6 +38,21 @@ else()
endif()
add_dependencies(fdb_c fdb_c_generated fdb_c_options)
target_link_libraries(fdb_c PUBLIC $<BUILD_INTERFACE:fdbclient>)
if(APPLE)
set(symbols ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.symbols)
add_custom_command(OUTPUT ${symbols}
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/symbolify.py
${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c.h
${symbols}
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/symbolify.py ${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c.h
COMMENT "Generate exported_symbols_list")
add_custom_target(exported_symbols_list DEPENDS ${symbols})
add_dependencies(fdb_c exported_symbols_list)
target_link_options(fdb_c PRIVATE "LINKER:-no_weak_exports,-exported_symbols_list,${symbols}")
elseif(WIN32)
else()
target_link_options(fdb_c PRIVATE "LINKER:--version-script=${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.map,-z,nodelete")
endif()
target_include_directories(fdb_c PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
@ -53,9 +68,7 @@ if(NOT WIN32)
test/mako/mako.c
test/mako/mako.h
test/mako/utils.c
test/mako/utils.h
test/mako/zipf.c
test/mako/zipf.h)
test/mako/utils.h)
if(OPEN_FOR_IDE)
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)

View File

@ -34,6 +34,10 @@ BOOL WINAPI DllMain( HINSTANCE dll, DWORD reason, LPVOID reserved ) {
#elif defined( __unixish__ )
#ifdef __INTEL_COMPILER
#pragma warning ( disable:2415 )
#endif
static pthread_key_t threadDestructorKey;
static void threadDestructor(void*) {
@ -57,4 +61,4 @@ static int threadDestructorKeyInit = initThreadDestructorKey();
#else
#error Port me!
#endif
#endif

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#define FDB_INCLUDE_LEGACY_TYPES
#include "fdbclient/MultiVersionTransaction.h"
@ -44,8 +44,9 @@ int g_api_version = 0;
// Legacy (pre API version 610)
#define CLUSTER(c) ((char*)c)
/*
* While we could just use the MultiVersionApi instance directly, this #define allows us to swap in any other IClientApi instance (e.g. from ThreadSafeApi)
/*
* While we could just use the MultiVersionApi instance directly, this #define allows us to swap in any other IClientApi
* instance (e.g. from ThreadSafeApi)
*/
#define API ((IClientApi*)MultiVersionApi::api)
@ -74,12 +75,10 @@ fdb_bool_t fdb_error_predicate( int predicate_test, fdb_error_t code ) {
code == error_code_cluster_version_changed;
}
if(predicate_test == FDBErrorPredicates::RETRYABLE_NOT_COMMITTED) {
return code == error_code_not_committed ||
code == error_code_transaction_too_old ||
code == error_code_future_version ||
code == error_code_database_locked ||
code == error_code_proxy_memory_limit_exceeded ||
code == error_code_process_behind;
return code == error_code_not_committed || code == error_code_transaction_too_old ||
code == error_code_future_version || code == error_code_database_locked ||
code == error_code_proxy_memory_limit_exceeded || code == error_code_batch_transaction_throttled ||
code == error_code_process_behind;
}
return false;
}
@ -107,12 +106,10 @@ fdb_error_t fdb_network_set_option( FDBNetworkOption option,
API->setNetworkOption( (FDBNetworkOptions::Option)option, value ? StringRef( value, value_length ) : Optional<StringRef>() ); );
}
extern "C"
fdb_error_t fdb_setup_network_impl() {
CATCH_AND_RETURN( API->setupNetwork(); );
}
extern "C"
fdb_error_t fdb_setup_network_v13( const char* localAddress ) {
fdb_error_t errorCode = fdb_network_set_option( FDB_NET_OPTION_LOCAL_ADDRESS, (uint8_t const*)localAddress, strlen(localAddress) );
if(errorCode != 0)
@ -159,7 +156,6 @@ fdb_error_t fdb_future_block_until_ready( FDBFuture* f ) {
CATCH_AND_RETURN( TSAVB(f)->blockUntilReady(); );
}
extern "C" DLLEXPORT
fdb_bool_t fdb_future_is_error_v22( FDBFuture* f ) {
return TSAVB(f)->isError();
}
@ -200,12 +196,10 @@ fdb_error_t fdb_future_set_callback( FDBFuture* f,
CATCH_AND_RETURN( TSAVB(f)->callOrSetAsCallback( cb, ignore, 0 ); );
}
extern "C" DLLEXPORT
fdb_error_t fdb_future_get_error_impl( FDBFuture* f ) {
return TSAVB(f)->getErrorCode();
}
extern "C" DLLEXPORT
fdb_error_t fdb_future_get_error_v22( FDBFuture* f, const char** description ) {
if ( !( TSAVB(f)->isError() ) )
return error_code_future_not_error;
@ -214,7 +208,6 @@ fdb_error_t fdb_future_get_error_v22( FDBFuture* f, const char** description ) {
return TSAVB(f)->error.code();
}
extern "C" DLLEXPORT
fdb_error_t fdb_future_get_version_v619( FDBFuture* f, int64_t* out_version ) {
CATCH_AND_RETURN( *out_version = TSAV(Version, f)->get(); );
}
@ -233,14 +226,12 @@ fdb_error_t fdb_future_get_key( FDBFuture* f, uint8_t const** out_key,
*out_key_length = key.size(); );
}
extern "C" DLLEXPORT
fdb_error_t fdb_future_get_cluster_v609( FDBFuture* f, FDBCluster** out_cluster ) {
CATCH_AND_RETURN(
*out_cluster = (FDBCluster*)
( (TSAV( char*, f )->get() ) ); );
}
extern "C" DLLEXPORT
fdb_error_t fdb_future_get_database_v609( FDBFuture* f, FDBDatabase** out_database ) {
CATCH_AND_RETURN(
*out_database = (FDBDatabase*)
@ -259,7 +250,6 @@ fdb_error_t fdb_future_get_value( FDBFuture* f, fdb_bool_t* out_present,
} );
}
extern "C"
fdb_error_t fdb_future_get_keyvalue_array_impl(
FDBFuture* f, FDBKeyValue const** out_kv,
int* out_count, fdb_bool_t* out_more )
@ -271,7 +261,6 @@ fdb_error_t fdb_future_get_keyvalue_array_impl(
*out_more = rrr.more; );
}
extern "C"
fdb_error_t fdb_future_get_keyvalue_array_v13(
FDBFuture* f, FDBKeyValue const** out_kv, int* out_count)
{
@ -281,7 +270,7 @@ fdb_error_t fdb_future_get_keyvalue_array_v13(
*out_count = rrr.size(); );
}
extern "C"
extern "C" DLLEXPORT
fdb_error_t fdb_future_get_string_array(
FDBFuture* f, const char*** out_strings, int* out_count)
{
@ -292,7 +281,6 @@ fdb_error_t fdb_future_get_string_array(
);
}
extern "C" DLLEXPORT
FDBFuture* fdb_create_cluster_v609( const char* cluster_file_path ) {
char *path;
if(cluster_file_path) {
@ -306,7 +294,6 @@ FDBFuture* fdb_create_cluster_v609( const char* cluster_file_path ) {
return (FDBFuture*)ThreadFuture<char*>(path).extractPtr();
}
extern "C" DLLEXPORT
fdb_error_t fdb_cluster_set_option_v609( FDBCluster* c,
FDBClusterOption option,
uint8_t const* value,
@ -316,12 +303,19 @@ fdb_error_t fdb_cluster_set_option_v609( FDBCluster* c,
return error_code_success;
}
extern "C" DLLEXPORT
void fdb_cluster_destroy_v609( FDBCluster* c ) {
CATCH_AND_DIE( delete[] CLUSTER(c); );
}
extern "C" DLLEXPORT
// This exists so that fdb_cluster_create_database doesn't need to call the public symbol fdb_create_database.
// If it does and this is an external client loaded though the multi-version API, then it may inadvertently call
// the version of the function in the primary library if it was loaded into the global symbols.
fdb_error_t fdb_create_database_impl( const char* cluster_file_path, FDBDatabase** out_database ) {
CATCH_AND_RETURN(
*out_database = (FDBDatabase*)API->createDatabase( cluster_file_path ? cluster_file_path : "" ).extractPtr();
);
}
FDBFuture* fdb_cluster_create_database_v609( FDBCluster* c, uint8_t const* db_name,
int db_name_length )
{
@ -330,7 +324,7 @@ FDBFuture* fdb_cluster_create_database_v609( FDBCluster* c, uint8_t const* db_na
}
FDBDatabase *db;
fdb_error_t err = fdb_create_database(CLUSTER(c), &db);
fdb_error_t err = fdb_create_database_impl(CLUSTER(c), &db);
if(err) {
return (FDBFuture*)ThreadFuture<Reference<IDatabase>>(Error(err)).extractPtr();
}
@ -340,9 +334,7 @@ FDBFuture* fdb_cluster_create_database_v609( FDBCluster* c, uint8_t const* db_na
extern "C" DLLEXPORT
fdb_error_t fdb_create_database( const char* cluster_file_path, FDBDatabase** out_database ) {
CATCH_AND_RETURN(
*out_database = (FDBDatabase*)API->createDatabase( cluster_file_path ? cluster_file_path : "" ).extractPtr();
);
return fdb_create_database_impl( cluster_file_path, out_database );
}
extern "C" DLLEXPORT
@ -394,21 +386,18 @@ FDBFuture* fdb_transaction_get_read_version( FDBTransaction* tr ) {
return (FDBFuture*)( TXN(tr)->getReadVersion().extractPtr() );
}
extern "C"
FDBFuture* fdb_transaction_get_impl( FDBTransaction* tr, uint8_t const* key_name,
int key_name_length, fdb_bool_t snapshot ) {
return (FDBFuture*)
( TXN(tr)->get( KeyRef( key_name, key_name_length ), snapshot ).extractPtr() );
}
extern "C"
FDBFuture* fdb_transaction_get_v13( FDBTransaction* tr, uint8_t const* key_name,
int key_name_length )
{
return fdb_transaction_get_impl( tr, key_name, key_name_length, 0 );
}
extern "C"
FDBFuture* fdb_transaction_get_key_impl( FDBTransaction* tr, uint8_t const* key_name,
int key_name_length, fdb_bool_t or_equal,
int offset, fdb_bool_t snapshot ) {
@ -419,7 +408,6 @@ FDBFuture* fdb_transaction_get_key_impl( FDBTransaction* tr, uint8_t const* key_
snapshot ).extractPtr() );
}
extern "C"
FDBFuture* fdb_transaction_get_key_v13( FDBTransaction* tr, uint8_t const* key_name,
int key_name_length, fdb_bool_t or_equal,
int offset ) {
@ -427,14 +415,13 @@ FDBFuture* fdb_transaction_get_key_v13( FDBTransaction* tr, uint8_t const* key_n
or_equal, offset, false );
}
extern "C"
extern "C" DLLEXPORT
FDBFuture* fdb_transaction_get_addresses_for_key( FDBTransaction* tr, uint8_t const* key_name,
int key_name_length ){
return (FDBFuture*)( TXN(tr)->getAddressesForKey( KeyRef(key_name, key_name_length) ).extractPtr() );
}
extern "C"
FDBFuture* fdb_transaction_get_range_impl(
FDBTransaction* tr, uint8_t const* begin_key_name,
int begin_key_name_length, fdb_bool_t begin_or_equal, int begin_offset,
@ -505,7 +492,6 @@ FDBFuture* fdb_transaction_get_range_impl(
snapshot, reverse ).extractPtr() );
}
extern "C"
FDBFuture* fdb_transaction_get_range_selector_v13(
FDBTransaction* tr, uint8_t const* begin_key_name, int begin_key_name_length,
fdb_bool_t begin_or_equal, int begin_offset, uint8_t const* end_key_name,
@ -517,7 +503,6 @@ FDBFuture* fdb_transaction_get_range_selector_v13(
limit, 0, FDB_STREAMING_MODE_EXACT, 0, false, false);
}
extern "C"
FDBFuture* fdb_transaction_get_range_v13(
FDBTransaction* tr, uint8_t const* begin_key_name, int begin_key_name_length,
uint8_t const* end_key_name, int end_key_name_length, int limit )
@ -600,7 +585,6 @@ FDBFuture* fdb_transaction_get_versionstamp( FDBTransaction* tr )
return (FDBFuture*)(TXN(tr)->getVersionstamp().extractPtr());
}
extern "C"
fdb_error_t fdb_transaction_set_option_impl( FDBTransaction* tr,
FDBTransactionOption option,
uint8_t const* value,
@ -610,7 +594,6 @@ fdb_error_t fdb_transaction_set_option_impl( FDBTransaction* tr,
TXN(tr)->setOption( (FDBTransactionOptions::Option)option, value ? StringRef( value, value_length ) : Optional<StringRef>() ); );
}
extern "C"
void fdb_transaction_set_option_v13( FDBTransaction* tr,
FDBTransactionOption option )
{
@ -644,6 +627,13 @@ fdb_error_t fdb_transaction_add_conflict_range( FDBTransaction*tr, uint8_t const
}
extern "C" DLLEXPORT
FDBFuture* fdb_transaction_get_estimated_range_size_bytes( FDBTransaction* tr, uint8_t const* begin_key_name,
int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length ) {
KeyRangeRef range(KeyRef(begin_key_name, begin_key_name_length), KeyRef(end_key_name, end_key_name_length));
return (FDBFuture*)(TXN(tr)->getEstimatedRangeSizeBytes(range).extractPtr());
}
#include "fdb_c_function_pointers.g.h"
#define FDB_API_CHANGED(func, ver) if (header_version < ver) fdb_api_ptr_##func = (void*)&(func##_v##ver##_PREV); else if (fdb_api_ptr_##func == (void*)&fdb_api_ptr_unimpl) fdb_api_ptr_##func = (void*)&(func##_impl);
@ -680,6 +670,10 @@ fdb_error_t fdb_select_api_version_impl( int runtime_version, int header_version
// Versioned API changes -- descending order by version (new changes at top)
// FDB_API_CHANGED( function, ver ) means there is a new implementation as of ver, and a function function_(ver-1) is the old implementation
// FDB_API_REMOVED( function, ver ) means the function was removed as of ver, and function_(ver-1) is the old implementation
//
// WARNING: use caution when implementing removed functions by calling public API functions. This can lead to undesired behavior when
// using the multi-version API. Instead, it is better to have both the removed and public functions call an internal implementation function.
// See fdb_create_database_impl for an example.
FDB_API_REMOVED( fdb_future_get_version, 620 );
FDB_API_REMOVED( fdb_create_cluster, 610 );
FDB_API_REMOVED( fdb_cluster_create_database, 610 );

View File

@ -1,126 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Condition="'$(Release)' != 'true' ">
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
<PreprocessorDefinitions>FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</PropertyGroup>
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClInclude Include="fdb_c_function_pointers.g.h" />
<ClInclude Include="foundationdb\fdb_c.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="fdb_c.cpp" />
<ClCompile Include="ThreadCleanup.cpp" />
</ItemGroup>
<ItemGroup>
<MASM Include="fdb_c.g.asm" />
</ItemGroup>
<ItemGroup>
<None Include="generate_asm.py" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{CACB2C8E-3E55-4309-A411-2A9C56C6C1CB}</ProjectGuid>
<RootNamespace>c</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
<Import Project="$(VCTargetsPath)\BuildCustomizations\masm.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<ItemDefinitionGroup>
<PreBuildEvent>
</PreBuildEvent>
<PostBuildEvent>
<Command>
FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)-%%i"</Command>
</PostBuildEvent>
</ItemDefinitionGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<PropertyGroup>
<CustomBuildBeforeTargets>_MASM;ClCompile</CustomBuildBeforeTargets>
</PropertyGroup>
<ItemDefinitionGroup>
<CustomBuildStep>
<Command>c:\Python27\python.exe "$(ProjectDir)/generate_asm.py" windows "$(ProjectDir)/fdb_c.cpp" "$(ProjectDir)/fdb_c.g.asm" "$(ProjectDir)/fdb_c_function_pointers.g.h"</Command>
<Message>Generating API trampolines</Message>
<Outputs>$(ProjectDir)/fdb_c_function_pointers.g.h;$(ProjectDir)/fdb_c.g.asm</Outputs>
<Inputs>$(ProjectDir)/fdb_c.cpp;$(ProjectDir)/generate_asm.py</Inputs>
</CustomBuildStep>
</ItemDefinitionGroup>
<ImportGroup Label="ExtensionTargets">
<Import Project="$(VCTargetsPath)\BuildCustomizations\masm.targets" />
</ImportGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
</Project>

View File

@ -28,10 +28,10 @@
#endif
#if !defined(FDB_API_VERSION)
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 620)
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 630)
#elif FDB_API_VERSION < 13
#error API version no longer supported (upgrade to 13)
#elif FDB_API_VERSION > 620
#elif FDB_API_VERSION > 630
#error Requested API version requires a newer version of this header
#endif
@ -91,12 +91,21 @@ extern "C" {
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_add_network_thread_completion_hook(void (*hook)(void*), void *hook_parameter);
#pragma pack(push, 4)
#if FDB_API_VERSION >= 630
typedef struct keyvalue {
const uint8_t* key;
int key_length;
const uint8_t* value;
int value_length;
} FDBKeyValue;
#else
typedef struct keyvalue {
const void* key;
int key_length;
const void* value;
int value_length;
} FDBKeyValue;
#endif
#pragma pack(pop)
DLLEXPORT void fdb_future_cancel( FDBFuture* f );
@ -120,11 +129,6 @@ extern "C" {
fdb_future_get_error( FDBFuture* f );
#endif
#if FDB_API_VERSION < 620
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_future_get_version( FDBFuture* f, int64_t* out_version );
#endif
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_future_get_int64( FDBFuture* f, int64_t* out );
@ -252,6 +256,10 @@ extern "C" {
int end_key_name_length,
FDBConflictRangeType type);
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
fdb_transaction_get_estimated_range_size_bytes( FDBTransaction* tr, uint8_t const* begin_key_name,
int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length);
#define FDB_KEYSEL_LAST_LESS_THAN(k, l) k, l, 0, 0
#define FDB_KEYSEL_LAST_LESS_OR_EQUAL(k, l) k, l, 1, 0
#define FDB_KEYSEL_FIRST_GREATER_THAN(k, l) k, l, 1, 1
@ -265,6 +273,13 @@ extern "C" {
/* LEGACY API VERSIONS */
#if FDB_API_VERSION < 620
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t
fdb_future_get_version( FDBFuture* f, int64_t* out_version );
#else
#define fdb_future_get_version(f, ov) FDB_REMOVED_FUNCTION
#endif
#if FDB_API_VERSION < 610 || defined FDB_INCLUDE_LEGACY_TYPES
typedef struct FDB_cluster FDBCluster;
@ -292,6 +307,13 @@ extern "C" {
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
fdb_cluster_create_database( FDBCluster* c, uint8_t const* db_name,
int db_name_length );
#else
#define fdb_future_get_cluster(f, oc) FDB_REMOVED_FUNCTION
#define fdb_future_get_database(f, od) FDB_REMOVED_FUNCTION
#define fdb_create_cluster(cfp) FDB_REMOVED_FUNCTION
#define fdb_cluster_destroy(c) FDB_REMOVED_FUNCTION
#define fdb_cluster_set_option(c, o, v, vl) FDB_REMOVED_FUNCTION
#define fdb_cluster_create_database(c, dn, dnl) FDB_REMOVED_FUNCTION
#endif
#if FDB_API_VERSION < 23

View File

@ -1,113 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
fdb_c_CFLAGS := $(fdbclient_CFLAGS)
fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS)
fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a $(FDB_TLS_LIB)
fdb_c_STATIC_LIBS := $(TLS_LIBS)
fdb_c_tests_LIBS := -Llib -lfdb_c -lstdc++
fdb_c_tests_HEADERS := -Ibindings/c
CLEAN_TARGETS += fdb_c_tests_clean
ifeq ($(PLATFORM),linux)
fdb_c_LDFLAGS += -Wl,--version-script=bindings/c/fdb_c.map -static-libgcc -Wl,-z,nodelete -lm -lpthread -lrt -ldl
# Link our custom libstdc++ statically in Ubuntu, if hacking
ifeq ("$(wildcard /etc/centos-release)", "")
ifeq ($(LIBSTDCPP_HACK),1)
fdb_c_LIBS += lib/libstdc++.a
endif
# Link stdc++ statically in Centos, if not hacking
else
fdb_c_STATIC_LIBS += -static-libstdc++
endif
fdb_c_tests_LIBS += -lpthread
endif
ifeq ($(PLATFORM),osx)
fdb_c_LDFLAGS += -lc++ -Xlinker -exported_symbols_list -Xlinker bindings/c/fdb_c.symbols
fdb_c_tests_LIBS += -lpthread
lib/libfdb_c.dylib: bindings/c/fdb_c.symbols
bindings/c/fdb_c.symbols: bindings/c/foundationdb/fdb_c.h $(ALL_MAKEFILES)
@awk '{sub(/^[ \t]+/, "");} /^#/ {next;} /DLLEXPORT\ .*[^ ]\(/ {sub(/\(.*/, ""); print "_" $$NF; next;} /DLLEXPORT/ { DLLEXPORT=1; next;} DLLEXPORT==1 {sub(/\(.*/, ""); print "_" $$0; DLLEXPORT=0}' $< | sort | uniq > $@
fdb_c_clean: fdb_c_symbols_clean
fdb_c_symbols_clean:
@rm -f bindings/c/fdb_c.symbols
fdb_javac_release: lib/libfdb_c.$(DLEXT)
mkdir -p lib
rm -f lib/libfdb_c.$(java_DLEXT)-*
cp lib/libfdb_c.$(DLEXT) lib/libfdb_c.$(DLEXT)-$(VERSION_ID)
cp lib/libfdb_c.$(DLEXT)-debug lib/libfdb_c.$(DLEXT)-debug-$(VERSION_ID)
fdb_javac_release_clean:
rm -f lib/libfdb_c.$(DLEXT)-*
rm -f lib/libfdb_c.$(javac_DLEXT)-*
# OS X needs to put its java lib in packages
packages: fdb_javac_lib_package
fdb_javac_lib_package: lib/libfdb_c.dylib
mkdir -p packages
cp lib/libfdb_c.$(DLEXT) packages/libfdb_c.$(DLEXT)-$(VERSION_ID)
cp lib/libfdb_c.$(DLEXT)-debug packages/libfdb_c.$(DLEXT)-debug-$(VERSION_ID)
endif
fdb_c_GENERATED_SOURCES += bindings/c/foundationdb/fdb_c_options.g.h bindings/c/fdb_c.g.S bindings/c/fdb_c_function_pointers.g.h
bindings/c/%.g.S bindings/c/%_function_pointers.g.h: bindings/c/%.cpp bindings/c/generate_asm.py $(ALL_MAKEFILES)
@echo "Scanning $<"
@bindings/c/generate_asm.py $(PLATFORM) bindings/c/fdb_c.cpp bindings/c/fdb_c.g.S bindings/c/fdb_c_function_pointers.g.h
.PRECIOUS: bindings/c/fdb_c_function_pointers.g.h
fdb_c_BUILD_SOURCES += bindings/c/fdb_c.g.S
bindings/c/foundationdb/fdb_c_options.g.h: bin/vexillographer.exe fdbclient/vexillographer/fdb.options $(ALL_MAKEFILES)
@echo "Building $@"
@$(MONO) bin/vexillographer.exe fdbclient/vexillographer/fdb.options c $@
bin/fdb_c_performance_test: bindings/c/test/performance_test.c bindings/c/test/test.h fdb_c
@echo "Compiling fdb_c_performance_test"
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/performance_test.c $(fdb_c_tests_LIBS)
bin/fdb_c_ryw_benchmark: bindings/c/test/ryw_benchmark.c bindings/c/test/test.h fdb_c
@echo "Compiling fdb_c_ryw_benchmark"
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ bindings/c/test/ryw_benchmark.c $(fdb_c_tests_LIBS)
packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz: bin/fdb_c_performance_test bin/fdb_c_ryw_benchmark
@echo "Packaging $@"
@rm -rf packages/fdb-c-tests-$(VERSION)-$(PLATFORM)
@mkdir -p packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin
@cp bin/fdb_c_performance_test packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin
@cp bin/fdb_c_ryw_benchmark packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin
@tar -C packages -czvf $@ fdb-c-tests-$(VERSION)-$(PLATFORM) > /dev/null
@rm -rf packages/fdb-c-tests-$(VERSION)-$(PLATFORM)
fdb_c_tests: packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz
fdb_c_tests_clean:
@rm -f packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz

10
bindings/c/symbolify.py Normal file
View File

@ -0,0 +1,10 @@
if __name__ == '__main__':
import re
import sys
r = re.compile('DLLEXPORT[^(]*(fdb_[^(]*)[(]')
(fdb_c_h, symbols_file) = sys.argv[1:]
with open(fdb_c_h, 'r') as f:
symbols = sorted(set('_' + m.group(1) for m in r.finditer(f.read())))
with open(symbols_file, 'w') as f:
f.write('\n'.join(symbols))
f.write('\n')

3262
bindings/c/test/mako/mako.c Executable file → Normal file

File diff suppressed because it is too large Load Diff

160
bindings/c/test/mako/mako.h Executable file → Normal file
View File

@ -3,7 +3,7 @@
#pragma once
#ifndef FDB_API_VERSION
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#endif
#include <foundationdb/fdb_c.h>
@ -17,8 +17,6 @@
#include <limits.h>
#endif
#define DEFAULT_RETRY_COUNT 3
#define VERBOSE_NONE 0
#define VERBOSE_DEFAULT 1
#define VERBOSE_ANNOYING 2
@ -29,74 +27,97 @@
#define MODE_BUILD 1
#define MODE_RUN 2
/* we set mako_txn_t and mako_args_t only once in the master process,
* and won't be touched by child processes.
*/
#define FDB_SUCCESS 0
#define FDB_ERROR_RETRY -1
#define FDB_ERROR_ABORT -2
#define FDB_ERROR_CONFLICT -3
/* transaction specification */
#define OP_GETREADVERSION 0
#define OP_GET 1
#define OP_GETRANGE 2
#define OP_SGET 3
#define OP_SGETRANGE 4
#define OP_UPDATE 5
#define OP_INSERT 6
#define OP_INSERTRANGE 7
#define OP_CLEAR 8
#define OP_SETCLEAR 9
#define OP_CLEARRANGE 10
#define OP_SETCLEARRANGE 11
#define OP_COMMIT 12
#define MAX_OP 13 /* update this when adding a new operation */
enum Operations {
OP_GETREADVERSION,
OP_GET,
OP_GETRANGE,
OP_SGET,
OP_SGETRANGE,
OP_UPDATE,
OP_INSERT,
OP_INSERTRANGE,
OP_CLEAR,
OP_SETCLEAR,
OP_CLEARRANGE,
OP_SETCLEARRANGE,
OP_COMMIT,
MAX_OP /* must be the last item */
};
#define OP_COUNT 0
#define OP_RANGE 1
#define OP_REVERSE 2
/* for arguments */
#define ARG_KEYLEN 1
#define ARG_VALLEN 2
#define ARG_TPS 3
#define ARG_COMMITGET 4
#define ARG_SAMPLING 5
#define ARG_VERSION 6
#define ARG_KNOBS 7
#define ARG_FLATBUFFERS 8
#define ARG_TRACE 9
#define ARG_TRACEPATH 10
/* for long arguments */
enum Arguments {
ARG_KEYLEN,
ARG_VALLEN,
ARG_TPS,
ARG_COMMITGET,
ARG_SAMPLING,
ARG_VERSION,
ARG_KNOBS,
ARG_FLATBUFFERS,
ARG_TRACE,
ARG_TRACEPATH,
ARG_TRACEFORMAT,
ARG_TPSMAX,
ARG_TPSMIN,
ARG_TPSINTERVAL,
ARG_TPSCHANGE,
ARG_TXNTRACE
};
enum TPSChangeTypes { TPS_SIN, TPS_SQUARE, TPS_PULSE };
#define KEYPREFIX "mako"
#define KEYPREFIXLEN 4
/* we set mako_txnspec_t and mako_args_t only once in the master process,
* and won't be touched by child processes.
*/
typedef struct {
/* for each operation, it stores "count", "range" and "reverse" */
int ops[MAX_OP][3];
/* for each operation, it stores "count", "range" and "reverse" */
int ops[MAX_OP][3];
} mako_txnspec_t;
#define KNOB_MAX 256
/* benchmark parameters */
typedef struct {
int json;
int num_processes;
int num_threads;
int mode;
int rows; /* is 2 billion enough? */
int seconds;
int iteration;
int tps;
int sampling;
int key_length;
int value_length;
int zipf;
int commit_get;
int verbose;
mako_txnspec_t txnspec;
char cluster_file[PATH_MAX];
int trace;
char tracepath[PATH_MAX];
char knobs[KNOB_MAX];
uint8_t flatbuffers;
int api_version;
int json;
int num_processes;
int num_threads;
int mode;
int rows; /* is 2 billion enough? */
int seconds;
int iteration;
int tpsmax;
int tpsmin;
int tpsinterval;
int tpschange;
int sampling;
int key_length;
int value_length;
int zipf;
int commit_get;
int verbose;
mako_txnspec_t txnspec;
char cluster_file[PATH_MAX];
int trace;
char tracepath[PATH_MAX];
int traceformat; /* 0 - XML, 1 - JSON */
char knobs[KNOB_MAX];
uint8_t flatbuffers;
int txntrace;
} mako_args_t;
/* shared memory */
@ -105,33 +126,34 @@ typedef struct {
#define SIGNAL_OFF 2
typedef struct {
int signal;
int readycount;
int signal;
int readycount;
double throttle_factor;
} mako_shmhdr_t;
typedef struct {
uint64_t xacts;
uint64_t conflicts;
uint64_t ops[MAX_OP];
uint64_t errors[MAX_OP];
uint64_t latency_samples[MAX_OP];
uint64_t latency_us_total[MAX_OP];
uint64_t latency_us_min[MAX_OP];
uint64_t latency_us_max[MAX_OP];
uint64_t xacts;
uint64_t conflicts;
uint64_t ops[MAX_OP];
uint64_t errors[MAX_OP];
uint64_t latency_samples[MAX_OP];
uint64_t latency_us_total[MAX_OP];
uint64_t latency_us_min[MAX_OP];
uint64_t latency_us_max[MAX_OP];
} mako_stats_t;
/* per-process information */
typedef struct {
int worker_id;
FDBDatabase *database;
mako_args_t *args;
mako_shmhdr_t *shm;
int worker_id;
FDBDatabase* database;
mako_args_t* args;
mako_shmhdr_t* shm;
} process_info_t;
/* args for threads */
typedef struct {
int thread_id;
process_info_t *process;
int thread_id;
process_info_t* process;
} thread_args_t;
/* process type */

View File

@ -1,27 +1,27 @@
##############
mako Benchmark
🦈 Mako Benchmark
##############
| mako (named after a small, but very fast shark) is a micro-benchmark for FoundationDB
| Mako (named after a very fast shark) is a micro-benchmark for FoundationDB
| which is designed to be very light and flexible
| so that you can stress a particular part of an FoundationDB cluster without introducing unnecessary overhead.
How to Build
============
| ``mako`` gets build automatically when you build FoundationDB.
| ``mako`` gets built automatically when you build FoundationDB.
| To build ``mako`` manually, simply build ``mako`` target in the FoundationDB build directory.
| e.g. If you're using Unix Makefiles
| e.g. If you're using Unix Makefiles, type:
| ``make mako``
Architecture
============
- mako is a stand-alone program written in C,
which communicates to FoundationDB using C binding API (``libfdb_c.so``)
- It creates one master process, and one or more worker processes (multi-process)
- Each worker process creates one or more multiple threads (multi-thread)
- All threads within the same process share the same network thread
which communicates to FoundationDB using C API (via ``libfdb_c.so``)
- It creates one master process, one stats emitter process, and one or more worker processes (multi-process)
- Each worker process creates one FDB network thread, and one or more worker threads (multi-thread)
- All worker threads within the same process share the same network thread
Data Specification
@ -32,7 +32,7 @@ Data Specification
Arguments
=========
- | ``--mode <mode>``
- | ``-m | --mode <mode>``
| One of the following modes must be specified. (Required)
| - ``clean``: Clean up existing data
| - ``build``: Populate data
@ -41,6 +41,9 @@ Arguments
- | ``-c | --cluster <cluster file>``
| FDB cluster file (Required)
- | ``-a | --api_version <api_version>``
| FDB API version to use (Default: Latest)
- | ``-p | --procs <procs>``
| Number of worker processes (Default: 1)
@ -48,7 +51,7 @@ Arguments
| Number of threads per worker process (Default: 1)
- | ``-r | --rows <rows>``
| Number of rows populated (Default: 10000)
| Number of rows initially populated (Default: 100000)
- | ``-s | --seconds <seconds>``
| Test duration in seconds (Default: 30)
@ -58,12 +61,23 @@ Arguments
| Specify the number of operations to be executed.
| This option cannot be set with ``--seconds``.
- | ``--tps <tps>``
| Target total transaction-per-second (TPS) of all worker processes/threads
- | ``--tps|--tpsmax <tps>``
| Target total transaction-per-second (TPS) of all worker processes/threads.
| When --tpsmin is also specified, this defines the upper-bound TPS.
| (Default: Unset / Unthrottled)
- | ``--tpsmin <tps>``
| Target total lower-bound TPS of all worker processes/threads
| (Default: Unset / Unthrottled)
- | ``--tpsinterval <seconds>``
| Time period TPS oscillates between --tpsmax and --tpsmin (Default: 10)
- | ``--tpschange <sin|square|pulse>``
| Shape of the TPS change (Default: sin)
- | ``--keylen <num>``
| Key string length in bytes (Default and Minimum: 16)
| Key string length in bytes (Default and Minimum: 32)
- | ``--vallen <num>``
| Value string length in bytes (Default and Minimum: 16)
@ -75,22 +89,19 @@ Arguments
| Generate a skewed workload based on Zipf distribution (Default: Unset = Uniform)
- | ``--sampling <num>``
| Sampling rate (1 sample / <num> ops) for latency stats
| Sampling rate (1 sample / <num> ops) for latency stats (Default: 1000)
- | ``--trace``
| Enable tracing. The trace file will be created in the current directory.
| Enable tracing. The trace file will be created in the current directory. (Default: Unset)
- | ``--tracepath <path>``
| Enable tracing and set the trace file path.
- | ``--knobs <knobs>``
| Set client knobs
- | ``--flatbuffers``
| Enable flatbuffers
| Set client knobs (comma-separated)
- | ``--commitget``
| Force commit for read-only transactions
| Force commit for read-only transactions (Default: Unset)
- | ``-v | --verbose <level>``
| Set verbose level (Default: 1)
@ -102,10 +113,10 @@ Arguments
Transaction Specification
=========================
| A transaction may contain multiple operations of multiple types.
| A transaction may contain multiple operations of various types.
| You can specify multiple operations for one operation type by specifying "Count".
| For RANGE operations, "Range" needs to be specified in addition to "Count".
| Every transaction is committed unless it contains only GET / GET RANGE operations.
| For RANGE operations, the "Range" needs to be specified in addition to "Count".
| Every transaction is committed unless the transaction is read-only.
Operation Types
---------------
@ -126,21 +137,22 @@ Format
------
| One operation type is defined as ``<Type><Count>`` or ``<Type><Count>:<Range>``.
| When Count is omitted, it's equivalent to setting it to 1. (e.g. ``g`` is equivalent to ``g1``)
| Multiple operation types can be concatenated. (e.g. ``g9u1`` = 9 GETs and 1 update)
| Multiple operation types within the same trancaction can be concatenated. (e.g. ``g9u1`` = 9 GETs and 1 update)
Transaction Specification Examples
----------------------------------
- | 100 GETs (No Commit)
- | 100 GETs (Non-commited)
| ``g100``
- | 10 GET RANGE with Range of 50 (No Commit)
- | 10 GET RANGE with Range of 50 (Non-commited)
| ``gr10:50``
- | 90 GETs and 10 Updates (Committed)
| ``g90u10``
- | 80 GETs, 10 Updates and 10 Inserts (Committed)
| ``g90u10i10``
- | 70 GETs, 10 Updates and 10 Inserts (Committed)
| ``g70u10i10``
| This is 80-20.
Execution Examples
@ -149,12 +161,14 @@ Execution Examples
Preparation
-----------
- Start the FoundationDB cluster and create a database
- Set LD_LIBRARY_PATH pointing to a proper ``libfdb_c.so``
- Set ``LD_LIBRARY_PATH`` environment variable pointing to a proper ``libfdb_c.so`` shared library
Build
-----
Populate Initial Database
-------------------------
``mako --cluster /etc/foundationdb/fdb.cluster --mode build --rows 1000000 --procs 4``
Note: You may be able to speed up the data population by increasing the number of processes or threads.
Run
---
Run a mixed workload with a total of 8 threads for 60 seconds, keeping the throughput limited to 1000 TPS.
``mako --cluster /etc/foundationdb/fdb.cluster --mode run --rows 1000000 --procs 2 --threads 8 --transaction "g8ui" --seconds 60 --tps 1000``

92
bindings/c/test/mako/utils.c Executable file → Normal file
View File

@ -1,81 +1,79 @@
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "utils.h"
#include "mako.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
/* uniform-distribution random */
int urand(int low, int high) {
double r = rand() / (1.0 + RAND_MAX);
int range = high - low + 1;
return (int)((r * range) + low);
double r = rand() / (1.0 + RAND_MAX);
int range = high - low + 1;
return (int)((r * range) + low);
}
/* random string */
/* len is the buffer size, must include null */
void randstr(char *str, int len) {
int i;
for (i = 0; i < len-1; i++) {
str[i] = '!' + urand(0, 'z'-'!'); /* generage a char from '!' to 'z' */
}
str[len-1] = '\0';
void randstr(char* str, int len) {
int i;
for (i = 0; i < len - 1; i++) {
str[i] = '!' + urand(0, 'z' - '!'); /* generage a char from '!' to 'z' */
}
str[len - 1] = '\0';
}
/* random numeric string */
/* len is the buffer size, must include null */
void randnumstr(char *str, int len) {
int i;
for (i = 0; i < len-1; i++) {
str[i] = '0' + urand(0, 9); /* generage a char from '!' to 'z' */
}
str[len-1] = '\0';
void randnumstr(char* str, int len) {
int i;
for (i = 0; i < len - 1; i++) {
str[i] = '0' + urand(0, 9); /* generage a char from '!' to 'z' */
}
str[len - 1] = '\0';
}
/* return the first key to be inserted */
int insert_begin(int rows, int p_idx, int t_idx, int total_p, int total_t) {
double interval = (double)rows / total_p / total_t;
return (int)(round(interval * ((p_idx * total_t) + t_idx)));
double interval = (double)rows / total_p / total_t;
return (int)(round(interval * ((p_idx * total_t) + t_idx)));
}
/* return the last key to be inserted */
int insert_end(int rows, int p_idx, int t_idx, int total_p, int total_t) {
double interval = (double)rows / total_p / total_t;
return (int)(round(interval * ((p_idx * total_t) + t_idx + 1) - 1));
double interval = (double)rows / total_p / total_t;
return (int)(round(interval * ((p_idx * total_t) + t_idx + 1) - 1));
}
/* devide val equally among threads */
int compute_thread_portion(int val, int p_idx, int t_idx, int total_p, int total_t) {
int interval = val / total_p / total_t;
int remaining = val - (interval * total_p * total_t);
if ((p_idx * total_t + t_idx) < remaining) {
return interval+1;
} else if (interval == 0) {
return -1;
}
/* else */
return interval;
int interval = val / total_p / total_t;
int remaining = val - (interval * total_p * total_t);
if ((p_idx * total_t + t_idx) < remaining) {
return interval + 1;
} else if (interval == 0) {
return -1;
}
/* else */
return interval;
}
/* number of digits */
int digits(int num) {
int digits = 0;
while (num > 0) {
num /= 10;
digits++;
}
return digits;
int digits = 0;
while (num > 0) {
num /= 10;
digits++;
}
return digits;
}
/* generate a key for a given key number */
/* len is the buffer size, key length + null */
void genkey(char *str, int num, int rows, int len) {
int i;
int rowdigit = digits(rows);
sprintf(str, KEYPREFIX "%0.*d", rowdigit, num);
for (i = (KEYPREFIXLEN + rowdigit); i < len-1; i++) {
str[i] = 'x';
}
str[len-1] = '\0';
void genkey(char* str, int num, int rows, int len) {
int i;
int rowdigit = digits(rows);
sprintf(str, KEYPREFIX "%0.*d", rowdigit, num);
for (i = (KEYPREFIXLEN + rowdigit); i < len - 1; i++) {
str[i] = 'x';
}
str[len - 1] = '\0';
}

17
bindings/c/test/mako/utils.h Executable file → Normal file
View File

@ -9,12 +9,12 @@ int urand(int low, int high);
/* write a random string of the length of (len-1) to memory pointed by str
* with a null-termination character at str[len-1].
*/
void randstr(char *str, int len);
void randstr(char* str, int len);
/* write a random numeric string of the length of (len-1) to memory pointed by str
* with a null-termination character at str[len-1].
*/
void randnumstr(char *str, int len);
void randnumstr(char* str, int len);
/* given the total number of rows to be inserted,
* the worker process index p_idx and the thread index t_idx (both 0-based),
@ -27,26 +27,25 @@ int insert_begin(int rows, int p_idx, int t_idx, int total_p, int total_t);
int insert_end(int rows, int p_idx, int t_idx, int total_p, int total_t);
/* devide a value equally among threads */
int compute_thread_portion(int val, int p_idx, int t_idx, int total_p,
int total_t);
int compute_thread_portion(int val, int p_idx, int t_idx, int total_p, int total_t);
/* similar to insert_begin/end, compute_thread_tps computes
* the per-thread target TPS for given configuration.
*/
#define compute_thread_tps(val, p_idx, t_idx, total_p, total_t) \
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
#define compute_thread_tps(val, p_idx, t_idx, total_p, total_t) \
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
/* similar to compute_thread_tps,
* compute_thread_iters computs the number of iterations.
*/
#define compute_thread_iters(val, p_idx, t_idx, total_p, total_t) \
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
#define compute_thread_iters(val, p_idx, t_idx, total_p, total_t) \
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
/* get the number of digits */
int digits(int num);
/* generate a key for a given key number */
/* len is the buffer size, key length + null */
void genkey(char *str, int num, int rows, int len);
void genkey(char* str, int num, int rows, int len);
#endif /* UTILS_H */

View File

@ -603,7 +603,7 @@ void runTests(struct ResultSet *rs) {
int main(int argc, char **argv) {
srand(time(NULL));
struct ResultSet *rs = newResultSet();
checkError(fdb_select_api_version(620), "select API version", rs);
checkError(fdb_select_api_version(630), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
valueStr = (uint8_t*)malloc((sizeof(uint8_t))*valueSize);

View File

@ -244,7 +244,7 @@ void runTests(struct ResultSet *rs) {
int main(int argc, char **argv) {
srand(time(NULL));
struct ResultSet *rs = newResultSet();
checkError(fdb_select_api_version(620), "select API version", rs);
checkError(fdb_select_api_version(630), "select API version", rs);
printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, keySize);

View File

@ -29,7 +29,7 @@
#include <inttypes.h>
#ifndef FDB_API_VERSION
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#endif
#include <foundationdb/fdb_c.h>
@ -236,7 +236,7 @@ void* runNetwork() {
FDBDatabase* openDatabase(struct ResultSet *rs, pthread_t *netThread) {
checkError(fdb_setup_network(), "setup network", rs);
pthread_create(netThread, NULL, &runNetwork, NULL);
pthread_create(netThread, NULL, (void*)(&runNetwork), NULL);
FDBDatabase *db;
checkError(fdb_create_database(NULL, &db), "create database", rs);

View File

@ -97,7 +97,7 @@ void runTests(struct ResultSet *rs) {
int main(int argc, char **argv) {
srand(time(NULL));
struct ResultSet *rs = newResultSet();
checkError(fdb_select_api_version(620), "select API version", rs);
checkError(fdb_select_api_version(630), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, KEY_SIZE);

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#include "foundationdb/fdb_c.h"
#undef DLLEXPORT
#include "workloads.h"
@ -258,7 +258,7 @@ struct SimpleWorkload : FDBWorkload {
insertsPerTx = context->getOption("insertsPerTx", 100ul);
opsPerTx = context->getOption("opsPerTx", 100ul);
runFor = context->getOption("runFor", 10.0);
auto err = fdb_select_api_version(620);
auto err = fdb_select_api_version(630);
if (err) {
context->trace(FDBSeverity::Info, "SelectAPIVersionFailed",
{ { "Error", std::string(fdb_get_error(err)) } });

View File

@ -16,7 +16,7 @@ set(SRCS
fdb_flow.actor.cpp
fdb_flow.h)
add_flow_target(NAME fdb_flow SRCS ${SRCS} STATIC_LIBRARY)
add_flow_target(STATIC_LIBRARY NAME fdb_flow SRCS ${SRCS})
target_link_libraries(fdb_flow PUBLIC fdb_c)
add_subdirectory(tester)
@ -29,10 +29,16 @@ if(NOT OPEN_FOR_IDE)
endif()
endforeach()
if(NOT FDB_RELEASE)
set(prerelease_string "-PRERELEASE")
else()
set(prerelease_string "")
endif()
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/packages)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packages)
set(package_dir ${CMAKE_CURRENT_BINARY_DIR}/packages/fdb-flow-${CMAKE_PROJECT_VERSION})
set(tar_file ${CMAKE_BINARY_DIR}/packages/fdb-flow-${CMAKE_PROJECT_VERSION}.tar.gz)
set(tar_file ${CMAKE_BINARY_DIR}/packages/fdb-flow-${CMAKE_PROJECT_VERSION}${prerelease_string}-x86_64.tar.gz)
add_custom_command(OUTPUT ${tar_file}
COMMAND
${CMAKE_COMMAND} -E make_directory ${package_dir} &&

View File

@ -25,6 +25,7 @@
#include "flow/DeterministicRandom.h"
#include "flow/SystemMonitor.h"
#include "flow/TLSConfig.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
using namespace FDB;
@ -35,7 +36,7 @@ THREAD_FUNC networkThread(void* fdb) {
}
ACTOR Future<Void> _test() {
API *fdb = FDB::API::selectAPIVersion(620);
API *fdb = FDB::API::selectAPIVersion(630);
auto db = fdb->createDatabase();
state Reference<Transaction> tr = db->createTransaction();
@ -78,11 +79,11 @@ ACTOR Future<Void> _test() {
}
void fdb_flow_test() {
API *fdb = FDB::API::selectAPIVersion(620);
API *fdb = FDB::API::selectAPIVersion(630);
fdb->setupNetwork();
startThread(networkThread, fdb);
g_network = newNet2( false );
g_network = newNet2(TLSConfig());
openTraceFile(NetworkAddress(), 1000000, 1000000, ".");
systemMonitor();
@ -131,6 +132,8 @@ namespace FDB {
GetRangeLimits limits = GetRangeLimits(), bool snapshot = false,
bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) override;
Future<int64_t> getEstimatedRangeSizeBytes(const KeyRange& keys) override;
void addReadConflictRange(KeyRangeRef const& keys) override;
void addReadConflictKey(KeyRef const& key) override;
@ -345,6 +348,14 @@ namespace FDB {
} );
}
Future<int64_t> TransactionImpl::getEstimatedRangeSizeBytes(const KeyRange& keys) {
return backToFuture<int64_t>(fdb_transaction_get_estimated_range_size_bytes(tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size()), [](Reference<CFuture> f) {
int64_t bytes;
throw_on_error(fdb_future_get_int64(f->f, &bytes));
return bytes;
});
}
void TransactionImpl::addReadConflictRange(KeyRangeRef const& keys) {
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_READ ) );
}

View File

@ -23,7 +23,7 @@
#include <flow/flow.h>
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#include <bindings/c/foundationdb/fdb_c.h>
#undef DLLEXPORT
@ -89,6 +89,8 @@ namespace FDB {
streamingMode);
}
virtual Future<int64_t> getEstimatedRangeSizeBytes(const KeyRange& keys) = 0;
virtual void addReadConflictRange(KeyRangeRef const& keys) = 0;
virtual void addReadConflictKey(KeyRef const& key) = 0;

View File

@ -1,152 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Condition="'$(Release)' != 'true' ">
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
<PreprocessorDefinitions>FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</PropertyGroup>
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|X64">
<Configuration>Debug</Configuration>
<Platform>X64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|X64">
<Configuration>Release</Configuration>
<Platform>X64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="fdb_flow.actor.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="DirectoryPartition.h" />
<ClInclude Include="FDBLoanerTypes.h" />
<ClInclude Include="fdb_flow.h" />
<ClInclude Include="Tuple.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="Tuple.cpp" />
<ClInclude Include="IDirectory.h" />
<ClInclude Include="Subspace.h" />
<ClCompile Include="Subspace.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="HighContentionAllocator.h" />
<ActorCompiler Include="HighContentionAllocator.actor.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="DirectoryLayer.h" />
<ActorCompiler Include="DirectoryLayer.actor.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="DirectorySubspace.h" />
<ClCompile Include="DirectorySubspace.cpp" />
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="Node.actor.cpp" />
</ItemGroup>
<ItemGroup>
<None Include="no_intellisense.opt" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGUID>{2BA0A5E2-EB4C-4A32-948C-CBAABD77AF87}</ProjectGUID>
<TargetFrameworkVersion>v4.5.2</TargetFrameworkVersion>
<Keyword>Win32Proj</Keyword>
<RootNamespace>fdb_flow</RootNamespace>
</PropertyGroup>
<PropertyGroup>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<BuildLogFile>$(IntDir)\$(MSBuildProjectName).log</BuildLogFile>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets">
<Import Project="$(LocalAppData)\Microsoft\VisualStudio\10.0\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(LocalAppData)\Microsoft\VisualStudio\10.0\Microsoft.Cpp.$(Platform).user.props')" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<MinimalRebuild>false</MinimalRebuild>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<Optimization>Disabled</Optimization>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>Advapi32.lib</AdditionalDependencies>
</Link>
<Lib>
<AdditionalDependencies>$(TargetDir)flow.lib</AdditionalDependencies>
</Lib>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<Optimization>Full</Optimization>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
<EnablePREfast>false</EnablePREfast>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<MinimalRebuild>false</MinimalRebuild>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>false</EnableCOMDATFolding>
<OptimizeReferences>false</OptimizeReferences>
<LinkTimeCodeGeneration>Default</LinkTimeCodeGeneration>
<AdditionalDependencies>Advapi32.lib</AdditionalDependencies>
<AdditionalOptions>/LTCG %(AdditionalOptions)</AdditionalOptions>
</Link>
<Lib>
<AdditionalDependencies>$(TargetDir)flow.lib</AdditionalDependencies>
</Lib>
</ItemDefinitionGroup>
<ImportGroup Label="ExtensionTargets">
<Import Project="..\..\flow\actorcompiler\ActorCompiler.targets" />
</ImportGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<Target Name="MyPreCompileSteps" AfterTargets="CLCompile">
<Exec Command="&quot;$(SolutionDir)bin\$(Configuration)\coveragetool.exe&quot; &quot;$(OutDir)coverage.$(TargetName).xml&quot; @(ActorCompiler -> '%(RelativeDir)%(Filename)%(Extension)', ' ') @(CLInclude -> '%(RelativeDir)%(Filename)%(Extension)', ' ') @(CLCompile -> '%(RelativeDir)%(Filename)%(Extension)', ' ')" />
</Target>
</Project>

View File

@ -1,44 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
fdb_flow_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS)
fdb_flow_LDFLAGS := -Llib -lfdb_c $(fdbrpc_LDFLAGS)
fdb_flow_LIBS :=
packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz: fdb_flow
@echo "Packaging fdb_flow"
@rm -rf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)
@mkdir -p packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/lib packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/flow packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/c/foundationdb
@cp lib/libfdb_flow.a packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/lib
@find bindings/flow -name '*.h' -not -path 'bindings/flow/tester/*' -exec cp {} packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/flow \;
@find bindings/c/foundationdb -name '*.h' -exec cp {} packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/c/foundationdb \;
@tar czf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz -C packages fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)
@rm -rf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)
FDB_FLOW: packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz
FDB_FLOW_clean:
@echo "Cleaning fdb_flow package"
@rm -rf packages/fdb-flow-*.tar.gz
packages: FDB_FLOW
packages_clean: FDB_FLOW_clean

View File

@ -2,5 +2,5 @@ set(TEST_SRCS
DirectoryTester.actor.cpp
Tester.actor.cpp
Tester.actor.h)
add_flow_target(NAME fdb_flow_tester EXECUTABLE SRCS ${TEST_SRCS})
add_flow_target(EXECUTABLE NAME fdb_flow_tester SRCS ${TEST_SRCS})
target_link_libraries(fdb_flow_tester fdb_flow)

View File

@ -28,6 +28,7 @@
#include "bindings/flow/FDBLoanerTypes.h"
#include "fdbrpc/fdbrpc.h"
#include "flow/DeterministicRandom.h"
#include "flow/TLSConfig.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
// Otherwise we have to type setupNetwork(), FDB::open(), etc.
@ -216,19 +217,19 @@ ACTOR Future< Standalone<RangeResultRef> > getRange(Reference<Transaction> tr, K
}
}
ACTOR static Future<Void> debugPrintRange(Reference<Transaction> tr, std::string subspace, std::string msg) {
if (!tr)
return Void();
Standalone<RangeResultRef> results = wait(getRange(tr, KeyRange(KeyRangeRef(subspace + '\x00', subspace + '\xff'))));
printf("==================================================DB:%s:%s, count:%d\n", msg.c_str(),
StringRef(subspace).printable().c_str(), results.size());
for (auto & s : results) {
printf("=====key:%s, value:%s\n", StringRef(s.key).printable().c_str(), StringRef(s.value).printable().c_str());
}
return Void();
}
//ACTOR static Future<Void> debugPrintRange(Reference<Transaction> tr, std::string subspace, std::string msg) {
// if (!tr)
// return Void();
//
// Standalone<RangeResultRef> results = wait(getRange(tr, KeyRange(KeyRangeRef(subspace + '\x00', subspace + '\xff'))));
// printf("==================================================DB:%s:%s, count:%d\n", msg.c_str(),
// StringRef(subspace).printable().c_str(), results.size());
// for (auto & s : results) {
// printf("=====key:%s, value:%s\n", StringRef(s.key).printable().c_str(), StringRef(s.value).printable().c_str());
// }
//
// return Void();
//}
ACTOR Future<Void> stackSub(FlowTesterStack* stack) {
if (stack->data.size() < 2)
@ -429,9 +430,8 @@ struct LogStackFunc : InstructionFunc {
wait(logStack(data, entries, prefix));
entries.clear();
}
wait(logStack(data, entries, prefix));
}
wait(logStack(data, entries, prefix));
return Void();
}
@ -638,6 +638,29 @@ struct GetFunc : InstructionFunc {
const char* GetFunc::name = "GET";
REGISTER_INSTRUCTION_FUNC(GetFunc);
struct GetEstimatedRangeSize : InstructionFunc {
static const char* name;
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
state std::vector<StackItem> items = data->stack.pop(2);
if (items.size() != 2)
return Void();
Standalone<StringRef> s1 = wait(items[0].value);
state Standalone<StringRef> beginKey = Tuple::unpack(s1).getString(0);
Standalone<StringRef> s2 = wait(items[1].value);
state Standalone<StringRef> endKey = Tuple::unpack(s2).getString(0);
Future<int64_t> fsize = instruction->tr->getEstimatedRangeSizeBytes(KeyRangeRef(beginKey, endKey));
int64_t size = wait(fsize);
data->stack.pushTuple(LiteralStringRef("GOT_ESTIMATED_RANGE_SIZE"));
return Void();
}
};
const char* GetEstimatedRangeSize::name = "GET_ESTIMATED_RANGE_SIZE";
REGISTER_INSTRUCTION_FUNC(GetEstimatedRangeSize);
struct GetKeyFunc : InstructionFunc {
static const char* name;
@ -1583,6 +1606,7 @@ struct UnitTestsFunc : InstructionFunc {
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_TRANSACTION_RETRY_LIMIT, Optional<StringRef>(StringRef((const uint8_t*)&retryLimit, 8)));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_TRANSACTION_RETRY_LIMIT, Optional<StringRef>(StringRef((const uint8_t*)&noRetryLimit, 8)));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_TRANSACTION_CAUSAL_READ_RISKY);
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_TRANSACTION_INCLUDE_PORT_IN_ADDRESS);
state Reference<Transaction> tr = data->db->createTransaction();
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE);
@ -1601,6 +1625,8 @@ struct UnitTestsFunc : InstructionFunc {
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_TRANSACTION_LOGGING_ENABLE, Optional<StringRef>(LiteralStringRef("my_transaction")));
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_READ_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_INCLUDE_PORT_IN_ADDRESS);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_REPORT_CONFLICTING_KEYS);
Optional<FDBStandalone<ValueRef> > _ = wait(tr->get(LiteralStringRef("\xff")));
tr->cancel();
@ -1746,7 +1772,7 @@ ACTOR void startTest(std::string clusterFilename, StringRef prefix, int apiVersi
populateOpsThatCreateDirectories(); // FIXME
// This is "our" network
g_network = newNet2(false);
g_network = newNet2(TLSConfig());
ASSERT(!API::isAPIVersionSelected());
try {
@ -1789,9 +1815,9 @@ ACTOR void startTest(std::string clusterFilename, StringRef prefix, int apiVersi
ACTOR void _test_versionstamp() {
try {
g_network = newNet2(false);
g_network = newNet2(TLSConfig());
API *fdb = FDB::API::selectAPIVersion(620);
API *fdb = FDB::API::selectAPIVersion(630);
fdb->setupNetwork();
startThread(networkThread, fdb);

View File

@ -1,131 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
<PreReleaseDecoration>
</PreReleaseDecoration>
</PropertyGroup>
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|X64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|X64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="Tester.actor.h" />
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="DirectoryTester.actor.cpp" />
<ActorCompiler Include="Tester.actor.cpp" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{086EB89C-CDBD-4ABE-8296-5CA224244C80}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>fdb_flow_tester</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>false</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../../../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;BOOST_ALL_NO_LIB;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<MinimalRebuild>false</MinimalRebuild>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions> @../../../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;$(SolutionDir)bin\$(Configuration)\fdb_c.lib;$(SolutionDir)bin\$(Configuration)\fdb_flow.lib;Advapi32.lib</AdditionalDependencies>
</Link>
<PreBuildEvent>
<Command>
</Command>
</PreBuildEvent>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>Full</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<BufferSecurityCheck>false</BufferSecurityCheck>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>false</EnableCOMDATFolding>
<OptimizeReferences>false</OptimizeReferences>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;$(SolutionDir)bin\$(Configuration)\fdb_c.lib;$(SolutionDir)bin\$(Configuration)\fdb_flow.lib;Advapi32.lib</AdditionalDependencies>
<LinkTimeCodeGeneration>Default</LinkTimeCodeGeneration>
</Link>
<PreBuildEvent>
<Command>
</Command>
</PreBuildEvent>
</ItemDefinitionGroup>
<ImportGroup Label="ExtensionTargets">
<Import Project="..\..\..\flow\actorcompiler\ActorCompiler.targets" />
</ImportGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
</Project>

View File

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ActorCompiler Include="DirectoryTester.actor.cpp" />
<ActorCompiler Include="Tester.actor.cpp" />
<ActorCompiler Include="Tester.actor.h" />
</ItemGroup>
</Project>

View File

@ -1,41 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
fdb_flow_tester_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS)
fdb_flow_tester_LDFLAGS := -Llib $(fdbrpc_LDFLAGS) -lfdb_c
fdb_flow_tester_LIBS := lib/libfdb_flow.a lib/libflow.a lib/libfdb_c.$(DLEXT)
fdb_flow_tester: lib/libfdb_c.$(DLEXT)
@mkdir -p bindings/flow/bin
@rm -f bindings/flow/bin/fdb_flow_tester
@cp bin/fdb_flow_tester bindings/flow/bin/fdb_flow_tester
fdb_flow_tester_clean: _fdb_flow_tester_clean
_fdb_flow_tester_clean:
@rm -rf bindings/flow/bin
ifeq ($(PLATFORM),linux)
fdb_flow_tester_LDFLAGS += -static-libstdc++ -static-libgcc -ldl -lpthread -lrt -lm
else ifeq ($(PLATFORM),osx)
fdb_flow_tester_LDFLAGS += -lc++
endif

View File

@ -9,7 +9,7 @@ This package requires:
- [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
- FoundationDB C API 2.0.x-6.1.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c))
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-620.
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-630.
To install this package, you can run the "fdb-go-install.sh" script (for versions 5.0.x and greater):

View File

@ -1,103 +0,0 @@
#
# include.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
TARGETS += fdb_go fdb_go_tester
CLEAN_TARGETS += fdb_go_clean fdb_go_tester_clean
GOPATH := $(CURDIR)/bindings/go/build
GO_IMPORT_PATH := github.com/apple/foundationdb/bindings/go/src
GO_DEST := $(GOPATH)/src/$(GO_IMPORT_PATH)
.PHONY: fdb_go fdb_go_path fdb_go_fmt fdb_go_fmt_check fdb_go_tester fdb_go_tester_clean
# We only override if the environment didn't set it (this is used by
# the fdbwebsite documentation build process)
GODOC_DIR ?= bindings/go
CGO_CFLAGS := -I$(CURDIR)/bindings/c
CGO_LDFLAGS := -L$(CURDIR)/lib
ifeq ($(PLATFORM),linux)
GOPLATFORM := linux_amd64
else ifeq ($(PLATFORM),osx)
GOPLATFORM := darwin_amd64
else
$(error Not prepared to compile on platform $(PLATFORM))
endif
GO_PACKAGE_OUTDIR := $(GOPATH)/pkg/$(GOPLATFORM)/$(GO_IMPORT_PATH)
GO_PACKAGES := fdb fdb/tuple fdb/subspace fdb/directory
GO_PACKAGE_OBJECTS := $(addprefix $(GO_PACKAGE_OUTDIR)/,$(GO_PACKAGES:=.a))
GO_GEN := $(CURDIR)/bindings/go/src/fdb/generated.go
GO_SRC := $(shell find $(CURDIR)/bindings/go/src -name '*.go') $(GO_GEN)
fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC) fdb_go_fmt_check
fdb_go_fmt: $(GO_SRC)
@echo "Formatting fdb_go"
@gofmt -w $(GO_SRC)
fdb_go_fmt_check: $(GO_SRC)
@echo "Checking fdb_go"
@bash -c 'fmtoutstr=$$(gofmt -l $(GO_SRC)) ; if [[ -n "$${fmtoutstr}" ]] ; then echo "Detected go formatting violations for the following files:" ; echo "$${fmtoutstr}" ; echo "Try running: make fdb_go_fmt"; exit 1 ; fi'
$(GO_DEST)/.stamp: $(GO_SRC)
@echo "Creating fdb_go_path"
@mkdir -p $(GO_DEST)
@cp -r bindings/go/src/* $(GO_DEST)
@touch $(GO_DEST)/.stamp
fdb_go_path: $(GO_DEST)/.stamp
fdb_go_clean:
@echo "Cleaning fdb_go"
@rm -rf $(GOPATH)
fdb_go_tester: $(GOPATH)/bin/_stacktester
fdb_go_tester_clean:
@echo "Cleaning fdb_go_tester"
@rm -rf $(GOPATH)/bin
$(GOPATH)/bin/_stacktester: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OBJECTS)
@echo "Compiling $(basename $(notdir $@))"
@go install $(GO_IMPORT_PATH)/_stacktester
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a
@echo "Compiling fdb/tuple"
@go install $(GO_IMPORT_PATH)/fdb/tuple
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a
@echo "Compiling fdb/subspace"
@go install $(GO_IMPORT_PATH)/fdb/subspace
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a
@echo "Compiling fdb/directory"
@go install $(GO_IMPORT_PATH)/fdb/directory
$(GO_PACKAGE_OUTDIR)/fdb.a: $(GO_DEST)/.stamp lib/libfdb_c.$(DLEXT) $(GO_SRC)
@echo "Compiling fdb"
@go install $(GO_IMPORT_PATH)/fdb
$(GO_GEN): bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
@echo "Building $@"
@go run bindings/go/src/_util/translate_fdb_options.go < fdbclient/vexillographer/fdb.options > $@

View File

@ -569,6 +569,16 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
}
sm.store(idx, res.(fdb.FutureByteSlice))
case op == "GET_ESTIMATED_RANGE_SIZE":
r := sm.popKeyRange()
_, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
_ = rtr.GetEstimatedRangeSizeBytes(r).MustGet()
sm.store(idx, []byte("GOT_ESTIMATED_RANGE_SIZE"))
return nil, nil
})
if e != nil {
panic(e)
}
case op == "COMMIT":
sm.store(idx, sm.currentTransaction().Commit())
case op == "RESET":
@ -805,6 +815,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
db.Options().SetTransactionRetryLimit(10)
db.Options().SetTransactionRetryLimit(-1)
db.Options().SetTransactionCausalReadRisky()
db.Options().SetTransactionIncludePortInAddress()
if !fdb.IsAPIVersionSelected() {
log.Fatal("API version should be selected")
@ -850,6 +861,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
tr.Options().SetLogTransaction()
tr.Options().SetReadLockAware()
tr.Options().SetLockAware()
tr.Options().SetIncludePortInAddress()
return tr.Get(fdb.Key("\xff")).MustGet(), nil
})

View File

@ -22,11 +22,11 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"
// Deprecated: Use OpenDatabase or OpenDefault to obtain a database handle directly
// Deprecated: Use OpenDatabase or OpenDefault to obtain a database handle directly.
// Cluster is a handle to a FoundationDB cluster. Cluster is a lightweight
// object that may be efficiently copied, and is safe for concurrent use by
// multiple goroutines.
@ -34,7 +34,7 @@ type Cluster struct {
clusterFileName string
}
// Deprecated: Use OpenDatabase or OpenDefault to obtain a database handle directly
// Deprecated: Use OpenDatabase or OpenDefault to obtain a database handle directly.
// OpenDatabase returns a database handle from the FoundationDB cluster.
//
// The database name must be []byte("DB").

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -45,6 +45,10 @@ func (dp directoryPartition) Pack(t tuple.Tuple) fdb.Key {
panic("cannot pack keys using the root of a directory partition")
}
func (dp directoryPartition) PackWithVersionstamp(t tuple.Tuple) (fdb.Key, error) {
panic("cannot pack keys using the root of a directory partition")
}
func (dp directoryPartition) Unpack(k fdb.KeyConvertible) (tuple.Tuple, error) {
panic("cannot unpack keys using the root of a directory partition")
}

View File

@ -46,7 +46,7 @@ A basic interaction with the FoundationDB API is demonstrated below:
func main() {
// Different API versions may expose different runtime behaviors.
fdb.MustAPIVersion(620)
fdb.MustAPIVersion(630)
// Open the default database from the system cluster
db := fdb.MustOpenDefault()
@ -139,6 +139,16 @@ error. The above example may be rewritten as:
return []string{valueOne, valueTwo}, nil
})
MustGet returns nil (which is different from empty slice []byte{}), when the
key doesn't exist, and hence non-existence can be checked as follows:
val := tr.Get(fdb.Key("foobar")).MustGet()
if val == nil {
fmt.Println("foobar does not exist.")
} else {
fmt.Println("foobar exists.")
}
Any panic that occurs during execution of the caller-provided function will be
recovered by the (Database).Transact method. If the error is an FDB Error, it
will either result in a retry of the function or be returned by Transact. If the
@ -203,7 +213,8 @@ operands to atomic operations in this API must be provided as appropriately
encoded byte slices. To convert a Go type to a byte slice, see the binary
package.
The current atomic operations in this API are Add, BitAnd, BitOr, BitXor, Max, Min,
SetVersionstampedKey, SetVersionstampedValue (all methods on Transaction).
The current atomic operations in this API are Add, BitAnd, BitOr, BitXor,
CompareAndClear, Max, Min, SetVersionstampedKey, SetVersionstampedValue
(all methods on Transaction).
*/
package fdb

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
// #include <stdlib.h>
import "C"
@ -108,7 +108,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// library, an error will be returned. APIVersion must be called prior to any
// other functions in the fdb package.
//
// Currently, this package supports API versions 200 through 620.
// Currently, this package supports API versions 200 through 630.
//
// Warning: When using the multi-version client API, setting an API version that
// is not supported by a particular client library will prevent that client from
@ -116,7 +116,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// the API version of your application after upgrading your client until the
// cluster has also been upgraded.
func APIVersion(version int) error {
headerVersion := 620
headerVersion := 630
networkMutex.Lock()
defer networkMutex.Unlock()
@ -128,7 +128,7 @@ func APIVersion(version int) error {
return errAPIVersionAlreadySet
}
if version < 200 || version > 620 {
if version < 200 || version > 630 {
return errAPIVersionNotSupported
}
@ -236,8 +236,12 @@ func StartNetwork() error {
const DefaultClusterFile string = ""
// OpenDefault returns a database handle to the FoundationDB cluster identified
// by the DefaultClusterFile on the current machine. The FoundationDB client
// networking engine will be initialized first, if necessary.
// by the DefaultClusterFile on the current machine.
//
// A single client can use this function multiple times to connect to different
// clusters simultaneously, with each invocation requiring its own cluster file.
// To connect to multiple clusters running at different, incompatible versions,
// the multi-version client API must be used.
func OpenDefault() (Database, error) {
return OpenDatabase(DefaultClusterFile)
}
@ -254,6 +258,11 @@ func MustOpenDefault() Database {
// Open returns a database handle to the FoundationDB cluster identified
// by the provided cluster file and database name.
//
// A single client can use this function multiple times to connect to different
// clusters simultaneously, with each invocation requiring its own cluster file.
// To connect to multiple clusters running at different, incompatible versions,
// the multi-version client API must be used.
func OpenDatabase(clusterFile string) (Database, error) {
networkMutex.Lock()
defer networkMutex.Unlock()
@ -283,6 +292,8 @@ func OpenDatabase(clusterFile string) (Database, error) {
return db, nil
}
// MustOpenDatabase is like OpenDatabase but panics if the default database cannot
// be opened.
func MustOpenDatabase(clusterFile string) Database {
db, err := OpenDatabase(clusterFile)
if err != nil {
@ -291,7 +302,7 @@ func MustOpenDatabase(clusterFile string) Database {
return db
}
// Deprecated: Use OpenDatabase instead
// Deprecated: Use OpenDatabase instead.
// The database name must be []byte("DB").
func Open(clusterFile string, dbName []byte) (Database, error) {
if bytes.Compare(dbName, []byte("DB")) != 0 {
@ -300,7 +311,7 @@ func Open(clusterFile string, dbName []byte) (Database, error) {
return OpenDatabase(clusterFile)
}
// Deprecated: Use MustOpenDatabase instead
// Deprecated: Use MustOpenDatabase instead.
// MustOpen is like Open but panics if the database cannot be opened.
func MustOpen(clusterFile string, dbName []byte) Database {
db, err := Open(clusterFile, dbName)

View File

@ -0,0 +1,5 @@
package fdb
//#cgo CFLAGS: -I/usr/local/include/
//#cgo LDFLAGS: -L/usr/local/lib/
import "C"

View File

@ -0,0 +1,5 @@
package fdb
//#cgo CFLAGS: -I"C:/Program Files/foundationdb/include"
//#cgo LDFLAGS: -L"C:/Program Files/foundationdb/bin" -lfdb_c
import "C"

View File

@ -23,7 +23,7 @@
package fdb
// #cgo LDFLAGS: -lfdb_c -lm
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
// #include <string.h>
//
@ -77,6 +77,7 @@ func newFuture(ptr *C.FDBFuture) *future {
return &future{ptr}
}
// Note: This function guarantees the callback will be executed **at most once**.
func fdb_future_block_until_ready(f *C.FDBFuture) {
if C.fdb_future_is_ready(f) != 0 {
return

View File

@ -46,13 +46,6 @@ func (o NetworkOptions) SetLocalAddress(param string) error {
return o.setOpt(10, []byte(param))
}
// enable the object serializer for network communication
//
// Parameter: 0 is false, every other value is true
func (o NetworkOptions) SetUseObjectSerializer(param int64) error {
return o.setOpt(11, int64ToBytes(param))
}
// Deprecated
//
// Parameter: path to cluster file
@ -95,6 +88,13 @@ func (o NetworkOptions) SetTraceFormat(param string) error {
return o.setOpt(34, []byte(param))
}
// Select clock source for trace files. now (default) or realtime are supported.
//
// Parameter: Trace clock source
func (o NetworkOptions) SetTraceClockSource(param string) error {
return o.setOpt(35, []byte(param))
}
// Set internal tuning or debugging knobs
//
// Parameter: knob_name=knob_value
@ -325,12 +325,22 @@ func (o DatabaseOptions) SetTransactionSizeLimit(param int64) error {
return o.setOpt(503, int64ToBytes(param))
}
// The read version will be committed, and usually will be the latest committed, but might not be the latest committed in the event of a simultaneous fault and misbehaving clock.
func (o DatabaseOptions) SetTransactionCausalReadRisky() error {
return o.setOpt(504, nil)
}
// Addresses returned by get_addresses_for_key include the port when enabled. As of api version 700, this option is enabled by default and setting this has no effect.
func (o DatabaseOptions) SetTransactionIncludePortInAddress() error {
return o.setOpt(505, nil)
}
// The transaction, if not self-conflicting, may be committed a second time after commit succeeds, in the event of a fault
func (o TransactionOptions) SetCausalWriteRisky() error {
return o.setOpt(10, nil)
}
// The read version will be committed, and usually will be the latest committed, but might not be the latest committed in the event of a fault or partition
// The read version will be committed, and usually will be the latest committed, but might not be the latest committed in the event of a simultaneous fault and misbehaving clock.
func (o TransactionOptions) SetCausalReadRisky() error {
return o.setOpt(20, nil)
}
@ -340,6 +350,11 @@ func (o TransactionOptions) SetCausalReadDisable() error {
return o.setOpt(21, nil)
}
// Addresses returned by get_addresses_for_key include the port when enabled. As of api version 700, this option is enabled by default and setting this has no effect.
func (o TransactionOptions) SetIncludePortInAddress() error {
return o.setOpt(23, nil)
}
// The next write performed on this transaction will not generate a write conflict range. As a result, other transactions which read the key(s) being modified by the next write will not conflict with this transaction. Care needs to be taken when using this option on a transaction that is shared between multiple threads. When setting this option, write conflict ranges will be disabled on the next write operation, regardless of what thread it is on.
func (o TransactionOptions) SetNextWriteNoWriteConflictRange() error {
return o.setOpt(30, nil)
@ -497,7 +512,8 @@ const (
// small portion of data is transferred to the client initially (in order to
// minimize costs if the client doesn't read the entire range), and as the
// caller iterates over more items in the range larger batches will be
// transferred in order to minimize latency.
// transferred in order to minimize latency. After enough iterations, the
// iterator mode will eventually reach the same byte limit as ``WANT_ALL``
StreamingModeIterator StreamingMode = 0
// Infrequently used. The client has passed a specific row limit and wants

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"
@ -55,7 +55,8 @@ type RangeOptions struct {
// Reverse indicates that the read should be performed in lexicographic
// (false) or reverse lexicographic (true) order. When Reverse is true and
// Limit is non-zero, the last Limit key-value pairs in the range are
// returned.
// returned. Reading ranges in reverse is supported natively by the
// database and should have minimal extra cost.
Reverse bool
}

View File

@ -86,3 +86,11 @@ func (s Snapshot) GetReadVersion() FutureInt64 {
func (s Snapshot) GetDatabase() Database {
return s.transaction.db
}
func (s Snapshot) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
beginKey, endKey := r.FDBRangeKeys()
return s.getEstimatedRangeSizeBytes(
beginKey.FDBKey(),
endKey.FDBKey(),
)
}

View File

@ -54,6 +54,15 @@ type Subspace interface {
// Subspace prepended.
Pack(t tuple.Tuple) fdb.Key
// PackWithVersionstamp returns the key encoding the specified tuple in
// the subspace so that it may be used as the key in fdb.Transaction's
// SetVersionstampedKey() method. The passed tuple must contain exactly
// one incomplete tuple.Versionstamp instance or the method will return
// with an error. The behavior here is the same as if one used the
// tuple.PackWithVersionstamp() method to appropriately pack together this
// subspace and the passed tuple.
PackWithVersionstamp(t tuple.Tuple) (fdb.Key, error)
// Unpack returns the Tuple encoded by the given key with the prefix of this
// Subspace removed. Unpack will return an error if the key is not in this
// Subspace or does not encode a well-formed Tuple.
@ -108,6 +117,10 @@ func (s subspace) Pack(t tuple.Tuple) fdb.Key {
return fdb.Key(concat(s.b, t.Pack()...))
}
func (s subspace) PackWithVersionstamp(t tuple.Tuple) (fdb.Key, error) {
return t.PackWithVersionstamp(s.b)
}
func (s subspace) Unpack(k fdb.KeyConvertible) (tuple.Tuple, error) {
key := k.FDBKey()
if !bytes.HasPrefix(key, s.b) {

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"
import "sync"
@ -40,6 +40,7 @@ type ReadTransaction interface {
GetReadVersion() FutureInt64
GetDatabase() Database
Snapshot() Snapshot
GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64
ReadTransactor
}
@ -309,6 +310,28 @@ func (t Transaction) GetRange(r Range, options RangeOptions) RangeResult {
return t.getRange(r, options, false)
}
func (t *transaction) getEstimatedRangeSizeBytes(beginKey Key, endKey Key) FutureInt64 {
return &futureInt64{
future: newFuture(C.fdb_transaction_get_estimated_range_size_bytes(
t.ptr,
byteSliceToPtr(beginKey),
C.int(len(beginKey)),
byteSliceToPtr(endKey),
C.int(len(endKey)),
)),
}
}
// GetEstimatedRangeSizeBytes will get an estimate for the number of bytes
// stored in the given range.
func (t Transaction) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
beginKey, endKey := r.FDBRangeKeys()
return t.getEstimatedRangeSizeBytes(
beginKey.FDBKey(),
endKey.FDBKey(),
)
}
func (t *transaction) getReadVersion() FutureInt64 {
return &futureInt64{
future: newFuture(C.fdb_transaction_get_read_version(t.ptr)),
@ -341,6 +364,11 @@ func (t Transaction) Clear(key KeyConvertible) {
// ClearRange removes all keys k such that begin <= k < end, and their
// associated values. ClearRange returns immediately, having modified the
// snapshot of the database represented by the transaction.
// Range clears are efficient with FoundationDB -- clearing large amounts of data
// will be fast. However, this will not immediately free up disk -
// data for the deleted range is cleaned up in the background.
// For purposes of computing the transaction size, only the begin and end keys of a clear range are counted.
// The size of the data stored in the range does not count against the transaction size limit.
func (t Transaction) ClearRange(er ExactRange) {
begin, end := er.FDBRangeKeys()
bkb := begin.FDBKey()

View File

@ -169,8 +169,6 @@ file(WRITE ${MANIFEST_FILE} ${MANIFEST_TEXT})
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES} ${CMAKE_SOURCE_DIR}/LICENSE
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib VERSION ${CMAKE_PROJECT_VERSION} MANIFEST ${MANIFEST_FILE})
add_dependencies(fdb-java fdb_java_options fdb_java)
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
add_dependencies(foundationdb-tests fdb_java_options)
# TODO[mpilman]: The java RPM will require some more effort (mostly on debian). However,
# most people will use the fat-jar, so it is not clear how high this priority is.
@ -215,20 +213,38 @@ if(NOT OPEN_FOR_IDE)
else()
set(lib_destination "linux/amd64")
endif()
set(lib_destination "${unpack_dir}/lib/${lib_destination}")
set(lib_destination "${unpack_dir}/lib/${lib_destination}")
set(jni_package "${CMAKE_BINARY_DIR}/packages/lib")
file(MAKE_DIRECTORY ${lib_destination})
file(MAKE_DIRECTORY ${jni_package})
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_java> ${lib_destination} &&
${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_java> ${jni_package} &&
${CMAKE_COMMAND} -E touch ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
COMMENT "Copy jni library for fat jar")
add_custom_target(copy_lib DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/lib_copied)
add_dependencies(copy_lib unpack_jar)
set(target_jar ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}.jar)
if(NOT FDB_RELEASE)
set(prerelease_string "-PRERELEASE")
else()
set(prerelease_string "")
endif()
set(target_jar ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}${prerelease_string}.jar)
add_custom_command(OUTPUT ${target_jar}
COMMAND ${Java_JAR_EXECUTABLE} cfm ${target_jar} ${unpack_dir}/META-INF/MANIFEST.MF .
WORKING_DIRECTORY ${unpack_dir}
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
COMMENT "Build ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}.jar")
COMMENT "Build ${target_jar}")
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
add_dependencies(foundationdb-tests fdb_java_options)
set(tests_jar ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}${prerelease_string}-tests.jar)
add_custom_command(OUTPUT ${tests_jar}
COMMAND ${CMAKE_COMMAND} -E copy foundationdb-tests.jar "${tests_jar}"
WORKING_DIRECTORY .
DEPENDS foundationdb-tests
COMMENT "Build ${tests_jar}")
add_custom_target(fdb-java-tests ALL DEPENDS ${tests_jar})
add_dependencies(fdb-java-tests foundationdb-tests)
add_custom_target(fat-jar ALL DEPENDS ${target_jar})
add_dependencies(fat-jar fdb-java)
add_dependencies(fat-jar copy_lib)

View File

@ -19,7 +19,7 @@
*/
#include <foundationdb/ClientWorkload.h>
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#include <foundationdb/fdb_c.h>
#include <jni.h>
@ -368,9 +368,11 @@ struct JVM {
{ { "send", "(JZ)V", reinterpret_cast<void*>(&promiseSend) } });
auto fdbClass = getClass("com/apple/foundationdb/FDB");
jmethodID selectMethod =
env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(IZ)Lcom/apple/foundationdb/FDB;");
env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(I)Lcom/apple/foundationdb/FDB;");
checkException();
env->CallStaticObjectMethod(fdbClass, selectMethod, jint(620), jboolean(false));
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(630));
checkException();
env->CallObjectMethod(fdbInstance, getMethod(fdbClass, "disableShutdownHook", "()V"));
checkException();
}

View File

@ -21,7 +21,7 @@
#include <jni.h>
#include <string.h>
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#include <foundationdb/fdb_c.h>
@ -646,6 +646,35 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
return (jlong)f;
}
JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1getEstimatedRangeSizeBytes(JNIEnv *jenv, jobject, jlong tPtr,
jbyteArray beginKeyBytes, jbyteArray endKeyBytes) {
if( !tPtr || !beginKeyBytes || !endKeyBytes) {
throwParamNotNull(jenv);
return 0;
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *startKey = (uint8_t *)jenv->GetByteArrayElements( beginKeyBytes, JNI_NULL );
if(!startKey) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return 0;
}
uint8_t *endKey = (uint8_t *)jenv->GetByteArrayElements(endKeyBytes, JNI_NULL);
if (!endKey) {
jenv->ReleaseByteArrayElements( beginKeyBytes, (jbyte *)startKey, JNI_ABORT );
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return 0;
}
FDBFuture *f = fdb_transaction_get_estimated_range_size_bytes( tr, startKey, jenv->GetArrayLength( beginKeyBytes ), endKey, jenv->GetArrayLength( endKeyBytes ) );
jenv->ReleaseByteArrayElements( beginKeyBytes, (jbyte *)startKey, JNI_ABORT );
jenv->ReleaseByteArrayElements( endKeyBytes, (jbyte *)endKey, JNI_ABORT );
return (jlong)f;
}
JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1set(JNIEnv *jenv, jobject, jlong tPtr, jbyteArray keyBytes, jbyteArray valueBytes) {
if( !tPtr || !keyBytes || !valueBytes ) {
throwParamNotNull(jenv);

View File

@ -1,104 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
<PreprocessorDefinitions>FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreReleaseDecoration>
</PreReleaseDecoration>
</PropertyGroup>
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{9617584C-22E8-4272-934F-733F378BF6AE}</ProjectGuid>
<RootNamespace>java</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<IncludePath>..\..\;C:\Program Files\Java\jdk6\include\win32;C:\Program Files\Java\jdk6\include;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdb_c.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<SubSystem>Windows</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="fdbJNI.cpp" />
</ItemGroup>
<ImportGroup Label="ExtensionTargets" />
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<Target Name="AfterClean">
<ItemGroup>
<FilesToDelete Include="$(OutDir)fdb_java.dll-*">
<Visible>false</Visible>
</FilesToDelete>
</ItemGroup>
<Message Text="Cleaning old dll files" Importance="high" />
<Delete Files="@(FilesToDelete)" />
</Target>
</Project>

View File

@ -1,222 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
fdb_java_LDFLAGS := -Llib
fdb_java_CFLAGS := $(fdbclient_CFLAGS) -Ibindings/c
# We only override if the environment didn't set it (this is used by
# the fdbwebsite documentation build process)
JAVADOC_DIR ?= bindings/java
fdb_java_LIBS := lib/libfdb_c.$(DLEXT)
ifeq ($(RELEASE),true)
JARVER = $(VERSION)
APPLEJARVER = $(VERSION)
else
JARVER = $(VERSION)-PRERELEASE
APPLEJARVER = $(VERSION)-SNAPSHOT
endif
ifeq ($(PLATFORM),linux)
JAVA_HOME ?= /usr/lib/jvm/java-8-openjdk-amd64
fdb_java_CFLAGS += -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/linux
fdb_java_LDFLAGS += -static-libgcc
java_ARCH := amd64
else ifeq ($(PLATFORM),osx)
JAVA_HOME ?= $(shell /usr/libexec/java_home)
fdb_java_CFLAGS += -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/darwin
java_ARCH := x86_64
endif
JAVA_GENERATED_SOURCES := bindings/java/src/main/com/apple/foundationdb/NetworkOptions.java bindings/java/src/main/com/apple/foundationdb/DatabaseOptions.java bindings/java/src/main/com/apple/foundationdb/TransactionOptions.java bindings/java/src/main/com/apple/foundationdb/StreamingMode.java bindings/java/src/main/com/apple/foundationdb/ConflictRangeType.java bindings/java/src/main/com/apple/foundationdb/MutationType.java bindings/java/src/main/com/apple/foundationdb/FDBException.java
JAVA_SOURCES := $(JAVA_GENERATED_SOURCES) bindings/java/src/main/com/apple/foundationdb/*.java bindings/java/src/main/com/apple/foundationdb/async/*.java bindings/java/src/main/com/apple/foundationdb/tuple/*.java bindings/java/src/main/com/apple/foundationdb/directory/*.java bindings/java/src/main/com/apple/foundationdb/subspace/*.java bindings/java/src/test/com/apple/foundationdb/test/*.java
fdb_java: bindings/java/foundationdb-client.jar bindings/java/foundationdb-tests.jar
bindings/java/foundationdb-tests.jar: bindings/java/.classstamp
@echo "Building $@"
@jar cf $@ -C bindings/java/classes/test com/apple/foundationdb
bindings/java/foundationdb-client.jar: bindings/java/.classstamp lib/libfdb_java.$(DLEXT)
@echo "Building $@"
@rm -rf bindings/java/classes/main/lib/$(PLATFORM)/$(java_ARCH)
@mkdir -p bindings/java/classes/main/lib/$(PLATFORM)/$(java_ARCH)
@cp lib/libfdb_java.$(DLEXT) bindings/java/classes/main/lib/$(PLATFORM)/$(java_ARCH)/libfdb_java.$(java_DLEXT)
@jar cf $@ -C bindings/java/classes/main com/apple/foundationdb -C bindings/java/classes/main lib
fdb_java_jar_clean:
@rm -rf $(JAVA_GENERATED_SOURCES)
@rm -rf bindings/java/classes
@rm -f bindings/java/foundationdb-client.jar bindings/java/foundationdb-tests.jar bindings/java/.classstamp
# Redefinition of a target already defined in generated.mk, but it's "okay" and the way things were done before.
fdb_java_clean: fdb_java_jar_clean
bindings/java/src/main/com/apple/foundationdb/StreamingMode.java: bin/vexillographer.exe fdbclient/vexillographer/fdb.options
@echo "Building Java options"
@$(MONO) bin/vexillographer.exe fdbclient/vexillographer/fdb.options java $(@D)
bindings/java/src/main/com/apple/foundationdb/MutationType.java: bindings/java/src/main/com/apple/foundationdb/StreamingMode.java
@true
bindings/java/src/main/com/apple/foundationdb/ConflictRangeType.java: bindings/java/src/main/com/apple/foundationdb/StreamingMode.java
@true
bindings/java/src/main/com/apple/foundationdb/FDBException.java: bindings/java/src/main/com/apple/foundationdb/StreamingMode.java
@true
bindings/java/src/main/com/apple/foundationdb/%Options.java: bindings/java/src/main/com/apple/foundationdb/StreamingMode.java
@true
bindings/java/src/main/overview.html: bindings/java/src/main/overview.html.in $(ALL_MAKEFILES) versions.target
@m4 -DVERSION=$(VERSION) $< > $@
bindings/java/.classstamp: $(JAVA_SOURCES)
@echo "Compiling Java source"
@rm -rf bindings/java/classes
@mkdir -p bindings/java/classes/main
@mkdir -p bindings/java/classes/test
@$(JAVAC) $(JAVAFLAGS) -d bindings/java/classes/main bindings/java/src/main/com/apple/foundationdb/*.java bindings/java/src/main/com/apple/foundationdb/async/*.java bindings/java/src/main/com/apple/foundationdb/tuple/*.java bindings/java/src/main/com/apple/foundationdb/directory/*.java bindings/java/src/main/com/apple/foundationdb/subspace/*.java
@$(JAVAC) $(JAVAFLAGS) -cp bindings/java/classes/main -d bindings/java/classes/test bindings/java/src/test/com/apple/foundationdb/test/*.java
@echo timestamp > bindings/java/.classstamp
javadoc: $(JAVA_SOURCES) bindings/java/src/main/overview.html
@echo "Generating Javadocs"
@mkdir -p $(JAVADOC_DIR)/javadoc/
@javadoc -quiet -public -notimestamp -source 1.8 -sourcepath bindings/java/src/main \
-overview bindings/java/src/main/overview.html -d $(JAVADOC_DIR)/javadoc/ \
-windowtitle "FoundationDB Java Client API" \
-doctitle "FoundationDB Java Client API" \
-link "http://docs.oracle.com/javase/8/docs/api" \
com.apple.foundationdb com.apple.foundationdb.async com.apple.foundationdb.tuple com.apple.foundationdb.directory com.apple.foundationdb.subspace
javadoc_clean:
@rm -rf $(JAVADOC_DIR)/javadoc
@rm -f bindings/java/src/main/overview.html
ifeq ($(PLATFORM),linux)
# We only need javadoc from one source
TARGETS += javadoc
CLEAN_TARGETS += javadoc_clean
# _release builds the lib on macOS and the jars (including the macOS lib) on Linux
TARGETS += fdb_java_release
CLEAN_TARGETS += fdb_java_release_clean
ifneq ($(FATJAR),)
packages/fdb-java-$(JARVER).jar: $(MAC_OBJ_JAVA) $(WINDOWS_OBJ_JAVA)
endif
bindings/java/pom.xml: bindings/java/pom.xml.in $(ALL_MAKEFILES) versions.target
@echo "Generating $@"
@m4 -DVERSION=$(JARVER) -DNAME=fdb-java $< > $@
bindings/java/fdb-java-$(APPLEJARVER).pom: bindings/java/pom.xml
@echo "Copying $@"
sed -e 's/-PRERELEASE/-SNAPSHOT/g' bindings/java/pom.xml > "$@"
packages/fdb-java-$(JARVER).jar: fdb_java versions.target
@echo "Building $@"
@rm -f $@
@rm -rf packages/jar_regular
@mkdir -p packages/jar_regular
@cd packages/jar_regular && unzip -qq $(TOPDIR)/bindings/java/foundationdb-client.jar
ifneq ($(FATJAR),)
@mkdir -p packages/jar_regular/lib/windows/amd64
@mkdir -p packages/jar_regular/lib/osx/x86_64
@cp $(MAC_OBJ_JAVA) packages/jar_regular/lib/osx/x86_64/libfdb_java.jnilib
@cp $(WINDOWS_OBJ_JAVA) packages/jar_regular/lib/windows/amd64/fdb_java.dll
endif
@cd packages/jar_regular && jar cf $(TOPDIR)/$@ *
@rm -r packages/jar_regular
@cd bindings && jar uf $(TOPDIR)/$@ ../LICENSE
packages/fdb-java-$(JARVER)-tests.jar: fdb_java versions.target
@echo "Building $@"
@rm -f $@
@cp $(TOPDIR)/bindings/java/foundationdb-tests.jar packages/fdb-java-$(JARVER)-tests.jar
packages/fdb-java-$(JARVER)-sources.jar: $(JAVA_GENERATED_SOURCES) versions.target
@echo "Building $@"
@rm -f $@
@jar cf $(TOPDIR)/$@ -C bindings/java/src/main com/apple/foundationdb
packages/fdb-java-$(JARVER)-javadoc.jar: javadoc versions.target
@echo "Building $@"
@rm -f $@
@cd $(JAVADOC_DIR)/javadoc/ && jar cf $(TOPDIR)/$@ *
@cd bindings && jar uf $(TOPDIR)/$@ ../LICENSE
packages/fdb-java-$(JARVER)-bundle.jar: packages/fdb-java-$(JARVER).jar packages/fdb-java-$(JARVER)-javadoc.jar packages/fdb-java-$(JARVER)-sources.jar bindings/java/pom.xml bindings/java/fdb-java-$(APPLEJARVER).pom versions.target
@echo "Building $@"
@rm -f $@
@rm -rf packages/bundle_regular
@mkdir -p packages/bundle_regular
@cp packages/fdb-java-$(JARVER).jar packages/fdb-java-$(JARVER)-javadoc.jar packages/fdb-java-$(JARVER)-sources.jar bindings/java/fdb-java-$(APPLEJARVER).pom packages/bundle_regular
@cp bindings/java/pom.xml packages/bundle_regular/pom.xml
@cd packages/bundle_regular && jar cf $(TOPDIR)/$@ *
@rm -rf packages/bundle_regular
fdb_java_release: packages/fdb-java-$(JARVER)-bundle.jar packages/fdb-java-$(JARVER)-tests.jar
fdb_java_release_clean:
@echo "Cleaning Java release"
@rm -f packages/fdb-java-*.jar packages/fdb-java-*-sources.jar bindings/java/pom.xml bindings/java/fdb-java-$(APPLEJARVER).pom
# Linux is where we build all the java packages
packages: fdb_java_release
packages_clean: fdb_java_release_clean
ifneq ($(FATJAR),)
MAC_OBJ_JAVA := lib/libfdb_java.jnilib-$(VERSION_ID)
WINDOWS_OBJ_JAVA := lib/fdb_java.dll-$(VERSION_ID)
endif
else ifeq ($(PLATFORM),osx)
TARGETS += fdb_java_release
CLEAN_TARGETS += fdb_java_release_clean
fdb_java_release: lib/libfdb_java.$(DLEXT)
@mkdir -p lib
@rm -f lib/libfdb_java.$(java_DLEXT)-*
@cp lib/libfdb_java.$(DLEXT) lib/libfdb_java.$(java_DLEXT)-$(VERSION_ID)
@cp lib/libfdb_java.$(DLEXT)-debug lib/libfdb_java.$(java_DLEXT)-debug-$(VERSION_ID)
fdb_java_release_clean:
@rm -f lib/libfdb_java.$(DLEXT)-*
@rm -f lib/libfdb_java.$(java_DLEXT)-*
# macOS needs to put its java lib in packages
packages: fdb_java_lib_package
fdb_java_lib_package: fdb_java_release
mkdir -p packages
cp lib/libfdb_java.$(java_DLEXT)-$(VERSION_ID) packages
cp lib/libfdb_java.$(java_DLEXT)-debug-$(VERSION_ID) packages
endif

View File

@ -30,12 +30,12 @@ import java.util.concurrent.atomic.AtomicInteger;
/**
* The starting point for accessing FoundationDB.
* <br>
* <h3>Setting API version</h3>
* <h2>Setting API version</h2>
* The FoundationDB API is accessed with a call to {@link #selectAPIVersion(int)}.
* This call is required before using any other part of the API. The call allows
* an error to be thrown at this point to prevent client code from accessing a later library
* with incorrect assumptions from the current version. The API version documented here is version
* {@code 620}.<br><br>
* {@code 630}.<br><br>
* FoundationDB encapsulates multiple versions of its interface by requiring
* the client to explicitly specify the version of the API it uses. The purpose
* of this design is to allow you to upgrade the server, client libraries, or
@ -49,11 +49,11 @@ import java.util.concurrent.atomic.AtomicInteger;
* being used to connect to the cluster. In particular, you should not advance
* the API version of your application after upgrading your client until the
* cluster has also been upgraded.<br>
* <h3>Getting a database</h3>
* <h2>Getting a database</h2>
* Once the API version has been set, the easiest way to get a {@link Database} object to use is
* to call {@link #open}.
* <br>
* <h3>Client networking</h3>
* <h2>Client networking</h2>
* The network is started either implicitly with a call to a variant of {@link #open()}
* or started explicitly with a call to {@link #startNetwork()}.
* <br>
@ -85,6 +85,8 @@ public class FDB {
private volatile boolean netStarted = false;
private volatile boolean netStopped = false;
volatile boolean warnOnUnclosed = true;
private boolean useShutdownHook = true;
private Thread shutdownHook;
private final Semaphore netRunning = new Semaphore(1);
private final NetworkOptions options;
@ -104,15 +106,8 @@ public class FDB {
* Called only once to create the FDB singleton.
*/
private FDB(int apiVersion) {
this(apiVersion, true);
}
private FDB(int apiVersion, boolean controlRuntime) {
this.apiVersion = apiVersion;
options = new NetworkOptions(this::Network_setOption);
if (controlRuntime) {
Runtime.getRuntime().addShutdownHook(new Thread(this::stopNetwork));
}
}
/**
@ -167,9 +162,9 @@ public class FDB {
* object.<br><br>
*
* Warning: When using the multi-version client API, setting an API version that
* is not supported by a particular client library will prevent that client from
* is not supported by a particular client library will prevent that client from
* being used to connect to the cluster. In particular, you should not advance
* the API version of your application after upgrading your client until the
* the API version of your application after upgrading your client until the
* cluster has also been upgraded.
*
* @param version the API version required
@ -177,13 +172,6 @@ public class FDB {
* @return the FoundationDB API object
*/
public static FDB selectAPIVersion(final int version) throws FDBException {
return selectAPIVersion(version, true);
}
/**
This function is called from C++ if the VM is controlled directly from FDB
*/
private static synchronized FDB selectAPIVersion(final int version, boolean controlRuntime) throws FDBException {
if(singleton != null) {
if(version != singleton.getAPIVersion()) {
throw new IllegalArgumentException(
@ -193,13 +181,30 @@ public class FDB {
}
if(version < 510)
throw new IllegalArgumentException("API version not supported (minimum 510)");
if(version > 620)
throw new IllegalArgumentException("API version not supported (maximum 620)");
if(version > 630)
throw new IllegalArgumentException("API version not supported (maximum 630)");
Select_API_version(version);
FDB fdb = new FDB(version, controlRuntime);
singleton = new FDB(version);
return singleton = fdb;
return singleton;
}
/**
* Disables shutdown hook that stops network thread upon process shutdown. This is useful if you need to run
* your own shutdown hook that uses the FDB instance and you need to avoid race conditions
* with the default shutdown hook. Replacement shutdown hook should stop the network thread manually
* by calling {@link #stopNetwork}.
*/
public synchronized void disableShutdownHook() {
useShutdownHook = false;
if(shutdownHook != null) {
// If this method was called after network thread started and shutdown hook was installed,
// remove this hook
Runtime.getRuntime().removeShutdownHook(shutdownHook);
// Release thread reference for GC
shutdownHook = null;
}
}
/**
@ -292,9 +297,14 @@ public class FDB {
}
/**
* Initializes networking, connects with the
* <a href="/foundationdb/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>,
* and opens the database.
* Initializes networking if required and connects to the cluster specified by the
* <a href="/foundationdb/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>.<br>
* <br>
* A single client can use this function multiple times to connect to different
* clusters simultaneously, with each invocation requiring its own cluster file.
* To connect to multiple clusters running at different, incompatible versions,
* the <a href="/foundationdb/api-general.html#multi-version-client-api" target="_blank">multi-version client API</a>
* must be used.
*
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@link Database}
*/
@ -303,8 +313,13 @@ public class FDB {
}
/**
* Initializes networking, connects to the cluster specified by {@code clusterFilePath}
* and opens the database.
* Initializes networking if required and connects to the cluster specified by {@code clusterFilePath}.<br>
* <br>
* A single client can use this function multiple times to connect to different
* clusters simultaneously, with each invocation requiring its own cluster file.
* To connect to multiple clusters running at different, incompatible versions,
* the <a href="/foundationdb/api-general.html#multi-version-client-api" target="_blank">multi-version client API</a>
* must be used.
*
* @param clusterFilePath the
* <a href="/foundationdb/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
@ -319,8 +334,13 @@ public class FDB {
}
/**
* Initializes networking, connects to the cluster specified by {@code clusterFilePath}
* and opens the database.
* Initializes networking if required and connects to the cluster specified by {@code clusterFilePath}.<br>
* <br>
* A single client can use this function multiple times to connect to different
* clusters simultaneously, with each invocation requiring its own cluster file.
* To connect to multiple clusters running at different, incompatible versions,
* the <a href="/foundationdb/api-general.html#multi-version-client-api" target="_blank">multi-version client API</a>
* must be used.
*
* @param clusterFilePath the
* <a href="/foundationdb/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
@ -390,6 +410,11 @@ public class FDB {
if(netStarted) {
return;
}
if(useShutdownHook) {
// Register shutdown hook that stops network thread if user did not opt out
shutdownHook = new Thread(this::stopNetwork, "fdb-shutdown-hook");
Runtime.getRuntime().addShutdownHook(shutdownHook);
}
Network_setup();
netStarted = true;
@ -482,4 +507,4 @@ public class FDB {
private native boolean Error_predicate(int predicate, int code);
private native long Database_create(String clusterFilePath) throws FDBException;
}
}

View File

@ -70,6 +70,16 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
return getKey_internal(selector, true);
}
@Override
public CompletableFuture<Long> getEstimatedRangeSizeBytes(byte[] begin, byte[] end) {
return FDBTransaction.this.getEstimatedRangeSizeBytes(begin, end);
}
@Override
public CompletableFuture<Long> getEstimatedRangeSizeBytes(Range range) {
return FDBTransaction.this.getEstimatedRangeSizeBytes(range);
}
///////////////////
// getRange -> KeySelectors
///////////////////
@ -257,6 +267,21 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
}
}
@Override
public CompletableFuture<Long> getEstimatedRangeSizeBytes(byte[] begin, byte[] end) {
pointerReadLock.lock();
try {
return new FutureInt64(Transaction_getEstimatedRangeSizeBytes(getPtr(), begin, end), executor);
} finally {
pointerReadLock.unlock();
}
}
@Override
public CompletableFuture<Long> getEstimatedRangeSizeBytes(Range range) {
return this.getEstimatedRangeSizeBytes(range.begin, range.end);
}
///////////////////
// getRange -> KeySelectors
///////////////////
@ -659,4 +684,5 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
private native long Transaction_watch(long ptr, byte[] key) throws FDBException;
private native void Transaction_cancel(long cPtr);
private native long Transaction_getKeyLocations(long cPtr, byte[] key);
private native long Transaction_getEstimatedRangeSizeBytes(long cPtr, byte[] keyBegin, byte[] keyEnd);
}

View File

@ -44,6 +44,9 @@ abstract class NativeFuture<T> extends CompletableFuture<T> implements AutoClose
//
// Since this must be called from a constructor, we assume that close
// cannot be called concurrently.
//
// Note: This function guarantees the callback will be executed **at most once**.
//
protected void registerMarshalCallback(Executor executor) {
if(cPtr != 0) {
Future_registerCallback(cPtr, () -> executor.execute(this::marshalWhenDone));

View File

@ -184,7 +184,9 @@ public interface ReadTransaction extends ReadTransactionContext {
* <i>first</i> keys in the range. Pass {@link #ROW_LIMIT_UNLIMITED} if this query
* should not limit the number of results. If {@code reverse} is {@code true} rows
* will be limited starting at the end of the range.
* @param reverse return results starting at the end of the range in reverse order
* @param reverse return results starting at the end of the range in reverse order.
* Reading ranges in reverse is supported natively by the database and should
* have minimal extra cost.
*
* @return a handle to access the results of the asynchronous call
*/
@ -205,11 +207,22 @@ public interface ReadTransaction extends ReadTransactionContext {
* <i>first</i> keys in the range. Pass {@link #ROW_LIMIT_UNLIMITED} if this query
* should not limit the number of results. If {@code reverse} is {@code true} rows
* will be limited starting at the end of the range.
* @param reverse return results starting at the end of the range in reverse order
* @param reverse return results starting at the end of the range in reverse order.
* Reading ranges in reverse is supported natively by the database and should
* have minimal extra cost.
* @param mode provide a hint about how the results are to be used. This
* can provide speed improvements or efficiency gains based on the caller's
* knowledge of the upcoming access pattern.
*
* <p>
* When converting the result of this query to a list using {@link AsyncIterable#asList()} with the {@code ITERATOR} streaming
* mode, the query is automatically modified to fetch results in larger batches. This is done because it is
* known in advance that the {@link AsyncIterable#asList()} function will fetch all results in the range. If a limit is specified,
* the {@code EXACT} streaming mode will be used, and otherwise it will use {@code WANT_ALL}.
*
* To achieve comparable performance when iterating over an entire range without using {@link AsyncIterable#asList()}, the same
* streaming mode would need to be used.
* </p>
* @return a handle to access the results of the asynchronous call
*/
AsyncIterable<KeyValue> getRange(KeySelector begin, KeySelector end,
@ -263,7 +276,9 @@ public interface ReadTransaction extends ReadTransactionContext {
* <i>first</i> keys in the range. Pass {@link #ROW_LIMIT_UNLIMITED} if this query
* should not limit the number of results. If {@code reverse} is {@code true} rows
* will be limited starting at the end of the range.
* @param reverse return results starting at the end of the range in reverse order
* @param reverse return results starting at the end of the range in reverse order.
* Reading ranges in reverse is supported natively by the database and should
* have minimal extra cost.
*
* @return a handle to access the results of the asynchronous call
*/
@ -284,11 +299,22 @@ public interface ReadTransaction extends ReadTransactionContext {
* <i>first</i> keys in the range. Pass {@link #ROW_LIMIT_UNLIMITED} if this query
* should not limit the number of results. If {@code reverse} is {@code true} rows
* will be limited starting at the end of the range.
* @param reverse return results starting at the end of the range in reverse order
* @param reverse return results starting at the end of the range in reverse order.
* Reading ranges in reverse is supported natively by the database and should
* have minimal extra cost.
* @param mode provide a hint about how the results are to be used. This
* can provide speed improvements or efficiency gains based on the caller's
* knowledge of the upcoming access pattern.
*
* <p>
* When converting the result of this query to a list using {@link AsyncIterable#asList()} with the {@code ITERATOR} streaming
* mode, the query is automatically modified to fetch results in larger batches. This is done because it is
* known in advance that the {@link AsyncIterable#asList()} function will fetch all results in the range. If a limit is specified,
* the {@code EXACT} streaming mode will be used, and otherwise it will use {@code WANT_ALL}.
*
* To achieve comparable performance when iterating over an entire range without using {@link AsyncIterable#asList()}, the same
* streaming mode would need to be used.
* </p>
* @return a handle to access the results of the asynchronous call
*/
AsyncIterable<KeyValue> getRange(byte[] begin, byte[] end,
@ -351,7 +377,9 @@ public interface ReadTransaction extends ReadTransactionContext {
* <i>first</i> keys in the range. Pass {@link #ROW_LIMIT_UNLIMITED} if this query
* should not limit the number of results. If {@code reverse} is {@code true} rows
* will be limited starting at the end of the range.
* @param reverse return results starting at the end of the range in reverse order
* @param reverse return results starting at the end of the range in reverse order.
* Reading ranges in reverse is supported natively by the database and should
* have minimal extra cost.
*
* @return a handle to access the results of the asynchronous call
*/
@ -375,16 +403,47 @@ public interface ReadTransaction extends ReadTransactionContext {
* <i>first</i> keys in the range. Pass {@link #ROW_LIMIT_UNLIMITED} if this query
* should not limit the number of results. If {@code reverse} is {@code true} rows
* will be limited starting at the end of the range.
* @param reverse return results starting at the end of the range in reverse order
* @param reverse return results starting at the end of the range in reverse order.
* Reading ranges in reverse is supported natively by the database and should
* have minimal extra cost.
* @param mode provide a hint about how the results are to be used. This
* can provide speed improvements or efficiency gains based on the caller's
* knowledge of the upcoming access pattern.
*
* <p>
* When converting the result of this query to a list using {@link AsyncIterable#asList()} with the {@code ITERATOR} streaming
* mode, the query is automatically modified to fetch results in larger batches. This is done because it is
* known in advance that the {@link AsyncIterable#asList()} function will fetch all results in the range. If a limit is specified,
* the {@code EXACT} streaming mode will be used, and otherwise it will use {@code WANT_ALL}.
*
* To achieve comparable performance when iterating over an entire range without using {@link AsyncIterable#asList()}, the same
* streaming mode would need to be used.
* </p>
* @return a handle to access the results of the asynchronous call
*/
AsyncIterable<KeyValue> getRange(Range range,
int limit, boolean reverse, StreamingMode mode);
/**
* Gets an estimate for the number of bytes stored in the given range.
*
* @param begin the beginning of the range (inclusive)
* @param end the end of the range (exclusive)
*
* @return a handle to access the results of the asynchronous call
*/
CompletableFuture<Long> getEstimatedRangeSizeBytes(byte[] begin, byte[] end);
/**
* Gets an estimate for the number of bytes stored in the given range.
*
* @param range the range of the keys
*
* @return a handle to access the results of the asynchronous call
*/
CompletableFuture<Long> getEstimatedRangeSizeBytes(Range range);
/**
* Returns a set of options that can be set on a {@code Transaction}
*

View File

@ -157,7 +157,13 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
* exclusive; that is, the key (if one exists) that is specified as the end
* of the range will NOT be cleared as part of this operation. Range clears are
* efficient with FoundationDB -- clearing large amounts of data will be fast.
* However, this will not immediately free up disk - data for the deleted range
* is cleaned up in the background.
* This will not affect the database until {@link #commit} is called.
* <br>
* For purposes of computing the transaction size, only the begin and end keys of a clear range are counted.
* The size of the data stored in the range does not count against the transaction size limit.
*
*
* @param range the range of keys to clear
*

View File

@ -817,9 +817,9 @@ public class DirectoryLayer implements Directory {
private static long unpackLittleEndian(byte[] bytes) {
assert bytes.length == 8;
int value = 0;
long value = 0;
for(int i = 0; i < 8; ++i) {
value += (bytes[i] << (i * 8));
value += (Byte.toUnsignedLong(bytes[i]) << (i * 8));
}
return value;
}

View File

@ -359,6 +359,21 @@ public class ByteArrayUtil {
return copy;
}
/**
* Computes the key that would sort immediately after {@code key}.
* {@code key} must be non-null.
*
* @param key byte array for which next key is to be computed
*
* @return a newly created byte array that would sort immediately after {@code key}
*/
public static byte[] keyAfter(byte[] key) {
byte[] copy = new byte[key.length + 1];
System.arraycopy(key, 0, copy, 0, key.length);
copy[key.length] = 0x0;
return copy;
}
/**
* Get a copy of an array, with all matching characters stripped from trailing edge.
* @param input array to copy. Must not be null.

View File

@ -39,7 +39,7 @@ import com.apple.foundationdb.Range;
* the same order in which they would sort in FoundationDB. {@code Tuple}s sort
* first by the first element, then by the second, etc. This makes the tuple layer
* ideal for building a variety of higher-level data models.<br>
* <h3>Types</h3>
* <h2>Types</h2>
* A {@code Tuple} can
* contain byte arrays ({@code byte[]}), {@link String}s, {@link Number}s, {@link UUID}s,
* {@code boolean}s, {@link List}s, {@link Versionstamp}s, other {@code Tuple}s, and {@code null}.
@ -50,7 +50,7 @@ import com.apple.foundationdb.Range;
* a {@code long} integral value, so the range will be constrained to
* [{@code -2^63}, {@code 2^63-1}]. Note that for numbers outside this range the way that Java
* truncates integral values may yield unexpected results.<br>
* <h3>{@code null} values</h3>
* <h2>{@code null} values</h2>
* The FoundationDB tuple specification has a special type-code for {@code None}; {@code nil}; or,
* as Java would understand it, {@code null}.
* The behavior of the layer in the presence of {@code null} varies by type with the intention

View File

@ -2,7 +2,7 @@
<BODY>
This documents the client API for using FoundationDB from Java.<br>
<br>
<h3>Installation</h3>
<h1>Installation</h1>
FoundationDB's Java bindings rely on native libraries that are installed as part of the
FoundationDB client binaries installation (see
<a href="/foundationdb/api-general.html#installing-client-binaries" target="_blank">
@ -10,10 +10,10 @@ Installing FoundationDB client binaries</a>). The JAR can be downloaded from
<a href="https://www.foundationdb.org/download/">our website</a>
and then added to your classpath.<br>
<br>
<h3>Getting started</h3>
<h1>Getting started</h1>
To start using FoundationDB from Java, create an instance of the
{@link com.apple.foundationdb.FDB FoundationDB API interface} with the version of the
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 620}).
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 630}).
With this API object you can then open {@link com.apple.foundationdb.Cluster Cluster}s and
{@link com.apple.foundationdb.Database Database}s and start using
{@link com.apple.foundationdb.Transaction Transaction}s.
@ -29,7 +29,7 @@ import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(620);
FDB fdb = FDB.selectAPIVersion(630);
try(Database db = fdb.open()) {
// Run an operation on the database
@ -50,7 +50,7 @@ public class Example {
}
}
</pre>
<h3>FoundationDB {@link com.apple.foundationdb.tuple Tuple API}</h3>
<h1>FoundationDB {@link com.apple.foundationdb.tuple Tuple API}</h1>
The {@link com.apple.foundationdb.tuple Tuple API} is provided with the core Java API for FoundationDB.
This layer is provided in some form in all official language bindings. It enables
cross-language support for storing and retrieving typed data from the
@ -60,7 +60,7 @@ binary data that FoundationDB supports. And, just as importantly, data packed in
and <a href="/foundationdb/data-modeling.html#data-modeling-tuples">general Tuple documentation</a>
for information about how Tuples sort and can be used to efficiently model data.
<br>
<h3>FoundationDB {@link com.apple.foundationdb.directory Directory API}</h3>
<h1>FoundationDB {@link com.apple.foundationdb.directory Directory API}</h1>
The {@link com.apple.foundationdb.directory Directory API} is provided with the core
Java API for FoundationDB. This layer is provided in some form in all official
language bindings. The FoundationDB API provides directories as a tool for

View File

@ -27,7 +27,7 @@ import com.apple.foundationdb.Database;
import com.apple.foundationdb.FDB;
public abstract class AbstractTester {
public static final int API_VERSION = 620;
public static final int API_VERSION = 630;
protected static final int NUM_RUNS = 25;
protected static final Charset ASCII = Charset.forName("ASCII");

View File

@ -223,6 +223,12 @@ public class AsyncStackTester {
inst.push(inst.readTcx.readAsync(readTr -> readTr.get((byte[]) param)));
});
}
else if (op == StackOperation.GET_ESTIMATED_RANGE_SIZE) {
List<Object> params = inst.popParams(2).join();
return inst.readTr.getEstimatedRangeSizeBytes((byte[])params.get(0), (byte[])params.get(1)).thenAcceptAsync(size -> {
inst.push("GOT_ESTIMATED_RANGE_SIZE".getBytes());
}, FDB.DEFAULT_EXECUTOR);
}
else if(op == StackOperation.GET_RANGE) {
return inst.popParams(5).thenComposeAsync(params -> {
int limit = StackUtils.getInt(params.get(2));
@ -495,6 +501,7 @@ public class AsyncStackTester {
db.options().setTransactionRetryLimit(10);
db.options().setTransactionRetryLimit(-1);
db.options().setTransactionCausalReadRisky();
db.options().setTransactionIncludePortInAddress();
tr.options().setPrioritySystemImmediate();
tr.options().setPriorityBatch();
@ -512,6 +519,7 @@ public class AsyncStackTester {
tr.options().setLogTransaction();
tr.options().setReadLockAware();
tr.options().setLockAware();
tr.options().setIncludePortInAddress();
if(!(new FDBException("Fake", 1020)).isRetryable() ||
(new FDBException("Fake", 10)).isRetryable())
@ -665,10 +673,7 @@ public class AsyncStackTester {
};
if(operations == null || ++currentOp == operations.size()) {
Transaction tr = db.createTransaction();
return tr.getRange(nextKey, endKey, 1000).asList()
.whenComplete((x, t) -> tr.close())
return db.readAsync(readTr -> readTr.getRange(nextKey, endKey, 1000).asList())
.thenComposeAsync(next -> {
if(next.size() < 1) {
//System.out.println("No key found after: " + ByteArrayUtil.printable(nextKey.getKey()));

View File

@ -33,7 +33,7 @@ public class BlockingBenchmark {
private static final int PARALLEL = 100;
public static void main(String[] args) throws InterruptedException {
FDB fdb = FDB.selectAPIVersion(620);
FDB fdb = FDB.selectAPIVersion(630);
// The cluster file DOES NOT need to be valid, although it must exist.
// This is because the database is never really contacted in this test.

View File

@ -48,7 +48,7 @@ public class ConcurrentGetSetGet {
}
public static void main(String[] args) {
try(Database database = FDB.selectAPIVersion(620).open()) {
try(Database database = FDB.selectAPIVersion(630).open()) {
new ConcurrentGetSetGet().apply(database);
}
}

View File

@ -33,7 +33,7 @@ import com.apple.foundationdb.directory.DirectorySubspace;
public class DirectoryTest {
public static void main(String[] args) throws Exception {
try {
FDB fdb = FDB.selectAPIVersion(620);
FDB fdb = FDB.selectAPIVersion(630);
try(Database db = fdb.open()) {
runTests(db);
}

View File

@ -26,7 +26,7 @@ import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(620);
FDB fdb = FDB.selectAPIVersion(630);
try(Database db = fdb.open()) {
// Run an operation on the database

View File

@ -31,7 +31,7 @@ public class IterableTest {
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(620);
FDB fdb = FDB.selectAPIVersion(630);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -34,7 +34,7 @@ import com.apple.foundationdb.tuple.ByteArrayUtil;
public class LocalityTests {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(620);
FDB fdb = FDB.selectAPIVersion(630);
try(Database database = fdb.open(args[0])) {
try(Transaction tr = database.createTransaction()) {
String[] keyAddresses = LocalityUtil.getAddressesForKey(tr, "a".getBytes()).join();

View File

@ -43,7 +43,7 @@ public class ParallelRandomScan {
private static final int PARALLELISM_STEP = 5;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(620);
FDB api = FDB.selectAPIVersion(630);
try(Database database = api.open(args[0])) {
for(int i = PARALLELISM_MIN; i <= PARALLELISM_MAX; i += PARALLELISM_STEP) {
runTest(database, i, ROWS, DURATION_MS);

View File

@ -34,7 +34,7 @@ import com.apple.foundationdb.Transaction;
import com.apple.foundationdb.async.AsyncIterable;
public class RangeTest {
private static final int API_VERSION = 620;
private static final int API_VERSION = 630;
public static void main(String[] args) {
System.out.println("About to use version " + API_VERSION);

View File

@ -34,7 +34,7 @@ public class SerialInsertion {
private static final int NODES = 1000000;
public static void main(String[] args) {
FDB api = FDB.selectAPIVersion(620);
FDB api = FDB.selectAPIVersion(630);
try(Database database = api.open()) {
long start = System.currentTimeMillis();

View File

@ -39,7 +39,7 @@ public class SerialIteration {
private static final int THREAD_COUNT = 1;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(620);
FDB api = FDB.selectAPIVersion(630);
try(Database database = api.open(args[0])) {
for(int i = 1; i <= THREAD_COUNT; i++) {
runThreadedTest(database, i);

View File

@ -30,7 +30,7 @@ public class SerialTest {
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(620);
FDB fdb = FDB.selectAPIVersion(630);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -39,7 +39,7 @@ public class SnapshotTransactionTest {
private static final Subspace SUBSPACE = new Subspace(Tuple.from("test", "conflict_ranges"));
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(620);
FDB fdb = FDB.selectAPIVersion(630);
try(Database db = fdb.open()) {
snapshotReadShouldNotConflict(db);
snapshotShouldNotAddConflictRange(db);

View File

@ -56,6 +56,7 @@ enum StackOperation {
GET_COMMITTED_VERSION,
GET_APPROXIMATE_SIZE,
GET_VERSIONSTAMP,
GET_ESTIMATED_RANGE_SIZE,
SET_READ_VERSION,
ON_ERROR,
SUB,

Some files were not shown because too many files have changed in this diff Show More