Merge branch 'release-6.3' into release-6-2-2020-10-23

This commit is contained in:
Russell Sears 2020-10-23 12:24:48 -07:00 committed by GitHub
commit 92a5178b4a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
665 changed files with 66121 additions and 22449 deletions

10
.gitignore vendored
View File

@ -31,8 +31,10 @@ bindings/ruby/lib/fdboptions.rb
bindings/ruby/fdb.gemspec
fdbclient/vexillographer/obj/
fdbrpc/hgVersion*.h
fdbrpc/SourceVersion*.h
fdbrpc/libeio/config.h
flow/hgVersion*.h
flow/SourceVersion*.h
generated.mk
versions.h
packaging/msi/FDBInstaller.wix*
@ -79,6 +81,11 @@ compile_commands.json
flow/actorcompiler/obj
flow/coveragetool/obj
# IDE indexing (commonly used tools)
/compile_commands.json
/.ccls-cache
/.clangd
# Temporary and user configuration files
*~
*.orig
@ -87,5 +94,4 @@ flow/coveragetool/obj
.envrc
.DS_Store
temp/
/compile_commands.json
/.ccls-cache
/versions.target

View File

@ -27,11 +27,6 @@ Mark Adler, Robert Važan (CRC-32C [Castagnoli] for C++ and .NET)
3. This notice may not be removed or altered from any source distribution.
Steven J. Bethard (argparse.py from https://code.google.com/p/argparse/)
argparse is licensed under the Python license, see:
https://code.google.com/p/argparse/source/browse/LICENSE.txt and
https://code.google.com/p/argparse/source/browse/doc/source/Python-License.txt
Russ Cox (asm.S from libcoroutine)
This software was developed as part of a project at MIT.
@ -484,3 +479,60 @@ SHIBUKAWA Yoshiki (sphinxcontrib-rubydomain)
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Armon Dadgar (ART)
Copyright (c) 2012, Armon Dadgar
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the organization nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL ARMON DADGAR BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Copyright (C) 2009 The Guava Authors
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing permissions and limitations under
the License.
sse2neon Authors (sse2neon)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -16,9 +16,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
cmake_minimum_required(VERSION 3.12)
cmake_minimum_required(VERSION 3.13)
project(foundationdb
VERSION 6.2.28
VERSION 6.3.10
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."
HOMEPAGE_URL "http://www.foundationdb.org/"
LANGUAGES C CXX ASM)
@ -29,18 +29,23 @@ if("${PROJECT_SOURCE_DIR}" STREQUAL "${PROJECT_BINARY_DIR}")
message(FATAL_ERROR "In-source builds are forbidden")
endif()
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
if(NOT CMAKE_BUILD_TYPE AND NOT CMAKE_CONFIGURATION_TYPES)
message(STATUS "Setting build type to 'Release' as none was specified")
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build" FORCE)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release"
"MinSizeRel" "RelWithDebInfo")
if (OPEN_FOR_IDE)
message(STATUS "Defaulting build type to 'Debug' for OPEN_FOR_IDE")
set(CMAKE_BUILD_TYPE Debug CACHE STRING "Choose the type of build" FORCE)
else()
message(STATUS "Setting build type to 'Release' as none was specified")
set(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build" FORCE)
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release"
"MinSizeRel" "RelWithDebInfo")
endif()
endif()
set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin)
set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/lib)
set(OPEN_FOR_IDE OFF CACHE BOOL "Open this in an IDE (won't compile/link)")
################################################################################
# Packages used for bindings
################################################################################
@ -75,42 +80,10 @@ message(STATUS "Current git version ${CURRENT_GIT_VERSION}")
# Version information
################################################################################
if(NOT WIN32)
add_custom_target(version_file ALL DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/versions.target)
execute_process(
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/get_version.sh ${CMAKE_CURRENT_SOURCE_DIR}/versions.target
OUTPUT_VARIABLE FDB_VERSION_WNL)
execute_process(
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/build/get_package_name.sh ${CMAKE_CURRENT_SOURCE_DIR}/versions.target
OUTPUT_VARIABLE FDB_PACKAGE_NAME_WNL)
string(STRIP "${FDB_VERSION_WNL}" FDB_VERSION_TARGET_FILE)
string(STRIP "${FDB_PACKAGE_NAME_WNL}" FDB_PACKAGE_NAME_TARGET_FILE)
endif()
set(USE_VERSIONS_TARGET OFF CACHE BOOL "Use the deprecated versions.target file")
if(USE_VERSIONS_TARGET)
if (WIN32)
message(FATAL_ERROR "USE_VERSION_TARGET us not supported on Windows")
endif()
set(FDB_VERSION ${FDB_VERION_TARGET_FILE})
set(FDB_PACKAGE_NAME ${FDB_PACKAGE_NAME_TARGET_FILE})
set(FDB_VERSION_PLAIN ${FDB_VERSION})
else()
set(FDB_PACKAGE_NAME "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}")
set(FDB_VERSION ${PROJECT_VERSION})
set(FDB_VERSION_PLAIN ${FDB_VERSION})
if(NOT WIN32)
# we need to assert that the cmake version is in sync with the target version
if(NOT (FDB_VERSION STREQUAL FDB_VERSION_TARGET_FILE))
message(SEND_ERROR "The project version in cmake is set to ${FDB_VERSION},\
but versions.target has it at ${FDB_VERSION_TARGET_FILE}")
endif()
if(NOT (FDB_PACKAGE_NAME STREQUAL FDB_PACKAGE_NAME_TARGET_FILE))
message(SEND_ERROR "The package name in cmake is set to ${FDB_PACKAGE_NAME},\
but versions.target has it set to ${FDB_PACKAGE_NAME_TARGET_FILE}")
endif()
endif()
endif()
set(FDB_PACKAGE_NAME "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}")
set(FDB_VERSION ${PROJECT_VERSION})
set(FDB_VERSION_PLAIN ${FDB_VERSION})
configure_file(${CMAKE_SOURCE_DIR}/versions.target.cmake ${CMAKE_SOURCE_DIR}/versions.target)
message(STATUS "FDB version is ${FDB_VERSION}")
message(STATUS "FDB package name is ${FDB_PACKAGE_NAME}")
@ -173,6 +146,10 @@ set(SEED "0x${SEED_}" CACHE STRING "Random seed for testing")
# components
################################################################################
if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
include_directories(/usr/local/include)
endif()
include(CompileBoost)
add_subdirectory(flow)
add_subdirectory(fdbrpc)
@ -184,15 +161,15 @@ if(NOT WIN32)
else()
add_subdirectory(fdbservice)
endif()
add_subdirectory(fdbbackup)
add_subdirectory(contrib)
add_subdirectory(tests)
if(WITH_PYTHON)
add_subdirectory(bindings)
endif()
add_subdirectory(fdbbackup)
add_subdirectory(tests)
if(WITH_DOCUMENTATION)
add_subdirectory(documentation)
endif()
add_subdirectory(monitoring)
if(WIN32)
add_subdirectory(packaging/msi)
@ -200,6 +177,10 @@ else()
include(CPack)
endif()
if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
add_link_options(-lexecinfo)
endif()
################################################################################
# process compile commands for IDE
################################################################################

View File

@ -8,5 +8,5 @@ set(SRCS
FDBLibTLSVerify.cpp
FDBLibTLSVerify.h)
add_library(FDBLibTLS ${SRCS})
target_link_libraries(FDBLibTLS PUBLIC LibreSSL boost_target PRIVATE flow)
add_library(FDBLibTLS STATIC ${SRCS})
target_link_libraries(FDBLibTLS PUBLIC OpenSSL::SSL boost_target PRIVATE flow)

View File

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|X64">
<Configuration>Debug</Configuration>
<Platform>X64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|X64">
<Configuration>Release</Configuration>
<Platform>X64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClInclude Include="FDBLibTLSPlugin.h" />
<ClCompile Include="FDBLibTLSPlugin.cpp" />
<ClInclude Include="FDBLibTLSPolicy.h" />
<ClCompile Include="FDBLibTLSPolicy.cpp" />
<ClInclude Include="FDBLibTLSVerify.h" />
<ClCompile Include="FDBLibTLSVerify.cpp" />
<ClInclude Include="FDBLibTLSSession.h" />
<ClCompile Include="FDBLibTLSSession.cpp" />
</ItemGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
</Project>

View File

@ -300,7 +300,7 @@ bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[],
}
Reference<FDBLibTLSVerify> verify = Reference<FDBLibTLSVerify>(new FDBLibTLSVerify(verifyString.substr(start)));
verify_rules.push_back(verify);
} catch ( const std::runtime_error& e ) {
} catch ( const std::runtime_error& ) {
verify_rules.clear();
std::string verifyString((const char*)verify_peers[i], verify_peers_len[i]);
TraceEvent(SevError, "FDBLibTLSVerifyPeersParseError").detail("Config", verifyString);

View File

@ -1,109 +0,0 @@
PROJECTPATH = $(dir $(realpath $(firstword $(MAKEFILE_LIST))))
PLUGINPATH = $(PROJECTPATH)/$(PLUGIN)
CFLAGS ?= -O2 -g
CXXFLAGS ?= -std=c++0x
CFLAGS += -I/usr/local/include -I../flow -I../fdbrpc
LDFLAGS += -L/usr/local/lib
LIBS += -ltls -lssl -lcrypto
PLATFORM := $(shell uname)
ifneq ($(PLATFORM),Darwin)
PLATFORM := $(shell uname -o)
endif
ifeq ($(PLATFORM),Cygwin)
HOST := x86_64-w64-mingw32
CC := $(HOST)-gcc
CXX := $(HOST)-g++
STRIP := $(HOST)-strip --strip-all
DYEXT = dll
PLUGINPATH = $(PLUGIN)
LIBS += -static-libstdc++ -static-libgcc
LIBS += -lws2_32
LINK_LDFLAGS = -shared
LINK_LDFLAGS += -Wl,-soname,$(PLUGIN)
LINK_LDFLAGS += -Wl,--version-script=FDBLibTLS.map
LINK_LDFLAGS += -Wl,-Bstatic $(LIBS) -Wl,-Bdynamic
else ifeq ($(PLATFORM),Darwin)
CC := clang
CXX := clang++
STRIP := strip -S -x
CFLAGS += -fPIC
DYEXT = dylib
vpath %.a /usr/local/lib
.LIBPATTERNS = lib%.a lib%.dylib lib%.so
LINK_LDFLAGS = -shared
LINK_LDFLAGS += -Wl,-exported_symbols_list,FDBLibTLS.symbols
LINK_LDFLAGS += -Wl,-dylib_install_name,$(PLUGIN)
LINK_LDFLAGS += $(LIBS)
else ifeq ($(PLATFORM),GNU/Linux)
CC := clang
CXX := clang++
STRIP := strip --strip-all
CFLAGS += -fPIC
DYEXT = so
LIBS += -static-libstdc++ -static-libgcc -lrt
LINK_LDFLAGS = -shared
LINK_LDFLAGS += -Wl,-soname,$(PLUGIN)
LINK_LDFLAGS += -Wl,--version-script=FDBLibTLS.map
LINK_LDFLAGS += -Wl,-Bstatic $(LIBS) -Wl,-Bdynamic
else
$(error Unknown platform $(PLATFORM))
endif
PLUGIN := FDBLibTLS.$(DYEXT)
OBJECTS := FDBLibTLSPlugin.o FDBLibTLSPolicy.o FDBLibTLSSession.o FDBLibTLSVerify.o
LINKLINE := $(CXXFLAGS) $(CFLAGS) $(LDFLAGS) $(OBJECTS) $(LINK_LDFLAGS) -o $(PLUGIN)
all: $(PLUGIN)
build-depends-linux:
apt install clang make libboost-dev
clean:
@rm -f *.o *.d $(PLUGIN) plugin-test verify-test
@rm -rf *.dSYM
DEPS := $(patsubst %.o,%.d,$(OBJECTS))
-include $(DEPS)
$(OBJECTS): %.o: %.cpp Makefile
@echo "Compiling $<"
@$(CXX) $(CXXFLAGS) $(CFLAGS) $(INCLUDES) -c $< -o $@ -MD -MP
$(PLUGIN): $(OBJECTS) Makefile
@echo "Linking $@"
@$(CXX) $(LINKLINE)
@echo "Stripping $@"
@$(STRIP) $@
test: test-plugin test-verify
test-plugin: plugin-test.cpp $(PLUGIN) Makefile
@echo "Compiling plugin-test"
@$(CXX) $(CXXFLAGS) $(CFLAGS) plugin-test.cpp -ldl -o plugin-test
@echo "Running plugin-test..."
@$(PROJECTPATH)/plugin-test $(PLUGINPATH)
test-verify: verify-test.cpp $(OBJECTS) Makefile
@echo "Compiling verify-test"
@$(CXX) $(CXXFLAGS) $(CFLAGS) $(LDFLAGS) $(OBJECTS) verify-test.cpp $(LIBS) -o verify-test
@echo "Running verify-test..."
@$(PROJECTPATH)/verify-test

View File

@ -1,28 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
FDBLibTLS_BUILD_SOURCES +=
FDBLibTLS_CFLAGS := -fPIC -I/usr/local/include -isystem$(BOOSTDIR) -I. -DUSE_UCONTEXT
lib/libFDBLibTLS.a: bin/coverage.FDBLibTLS.xml

243
Makefile
View File

@ -1,243 +0,0 @@
export
PLATFORM := $(shell uname)
ARCH := $(shell uname -m)
TOPDIR := $(shell pwd)
# Allow custom libc++ hack for Ubuntu
ifeq ("$(wildcard /etc/centos-release)", "")
LIBSTDCPP_HACK ?= 1
endif
ifeq ($(ARCH),x86_64)
ARCH := x64
else
$(error Not prepared to compile on $(ARCH))
endif
MONO := $(shell which mono 2>/dev/null)
ifeq ($(MONO),)
MONO := /usr/bin/mono
endif
MCS := $(shell which mcs 2>/dev/null)
ifeq ($(MCS),)
MCS := $(shell which dmcs 2>/dev/null)
endif
ifeq ($(MCS),)
MCS := /usr/bin/mcs
endif
CFLAGS := -Werror -Wno-error=format -fPIC -DNO_INTELLISENSE -fvisibility=hidden -DNDEBUG=1 -Wreturn-type -fno-omit-frame-pointer
ifeq ($(RELEASE),true)
CFLAGS += -DFDB_CLEAN_BUILD
endif
ifeq ($(NIGHTLY),true)
CFLAGS += -DFDB_CLEAN_BUILD
endif
BOOST_BASENAME ?= boost_1_67_0
ifeq ($(PLATFORM),Linux)
PLATFORM := linux
CC ?= gcc
CXX ?= g++
ifneq '' '$(findstring clang++,$(CXX))'
CXXFLAGS += -Wno-undefined-var-template -Wno-unknown-warning-option -Wno-unused-command-line-argument
endif
CXXFLAGS += -std=c++17
BOOST_BASEDIR ?= /opt
TLS_LIBDIR ?= /usr/local/lib64
DLEXT := so
java_DLEXT := so
TARGET_LIBC_VERSION ?= 2.11
else ifeq ($(PLATFORM),Darwin)
PLATFORM := osx
CC := /usr/bin/clang
CXX := /usr/bin/clang
CFLAGS += -mmacosx-version-min=10.14 -stdlib=libc++
CXXFLAGS += -mmacosx-version-min=10.14 -std=c++17 -stdlib=libc++ -msse4.2 -Wno-undefined-var-template -Wno-unknown-warning-option
.LIBPATTERNS := lib%.dylib lib%.a
BOOST_BASEDIR ?= ${HOME}
TLS_LIBDIR ?= /usr/local/lib64
DLEXT := dylib
java_DLEXT := jnilib
else
$(error Not prepared to compile on platform $(PLATFORM))
endif
BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME}
CCACHE := $(shell which ccache 2>/dev/null)
ifneq ($(CCACHE),)
CCACHE_CC := $(CCACHE) $(CC)
CCACHE_CXX := $(CCACHE) $(CXX)
else
CCACHE_CC := $(CC)
CCACHE_CXX := $(CXX)
endif
# Default variables don't get pushed into the environment, but scripts in build/
# rely on the existence of CC in the environment.
ifeq ($(origin CC), default)
CC := $(CC)
endif
ACTORCOMPILER := bin/actorcompiler.exe
# UNSTRIPPED := 1
# Normal optimization level
CFLAGS += -O2
# Or turn off optimization entirely
# CFLAGS += -O0
# Debugging symbols are a good thing (and harmless, since we keep them
# in external debug files)
CFLAGS += -g
# valgrind-compatibile builds are enabled by uncommenting lines in valgind.mk
# Define the TLS compilation and link variables
ifdef TLS_DISABLED
CFLAGS += -DTLS_DISABLED
FDB_TLS_LIB :=
TLS_LIBS :=
else
FDB_TLS_LIB :=
TLS_LIBS += $(addprefix $(TLS_LIBDIR)/,libssl.a libcrypto.a)
endif
CXXFLAGS += -Wno-deprecated -DBOOST_ERROR_CODE_HEADER_ONLY -DBOOST_SYSTEM_NO_DEPRECATED
LDFLAGS :=
LIBS :=
STATIC_LIBS :=
# Add library search paths (that aren't -Llib) to the VPATH
VPATH += $(addprefix :,$(filter-out lib,$(patsubst -L%,%,$(filter -L%,$(LDFLAGS)))))
CS_PROJECTS := flow/actorcompiler flow/coveragetool fdbclient/vexillographer
CPP_PROJECTS := flow fdbrpc fdbclient fdbbackup fdbserver fdbcli bindings/c bindings/java fdbmonitor bindings/flow/tester bindings/flow
OTHER_PROJECTS := bindings/python bindings/ruby bindings/go
CS_MK_GENERATED := $(CS_PROJECTS:=/generated.mk)
CPP_MK_GENERATED := $(CPP_PROJECTS:=/generated.mk)
MK_GENERATED := $(CS_MK_GENERATED) $(CPP_MK_GENERATED)
# build/valgrind.mk needs to be included before any _MK_GENERATED (which in turn includes local.mk)
MK_INCLUDE := build/scver.mk build/valgrind.mk $(CS_MK_GENERATED) $(CPP_MK_GENERATED) $(OTHER_PROJECTS:=/include.mk) build/packages.mk
ALL_MAKEFILES := Makefile $(MK_INCLUDE) $(patsubst %/generated.mk,%/local.mk,$(MK_GENERATED))
TARGETS =
.PHONY: clean all Makefiles
default: fdbserver fdbbackup fdbcli fdb_c fdb_python fdb_python_sdist
all: $(CS_PROJECTS) $(CPP_PROJECTS) $(OTHER_PROJECTS)
# These are always defined and ready to use. Any target that uses them and needs them up to date
# should depend on versions.target
VERSION := $(shell cat versions.target | grep '<Version>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,')
PACKAGE_NAME := $(shell cat versions.target | grep '<PackageName>' | sed -e 's,^[^>]*>,,' -e 's,<.*,,')
versions.h: Makefile versions.target
@rm -f $@
ifeq ($(RELEASE),true)
@echo "#define FDB_VT_VERSION \"$(VERSION)\"" >> $@
else
@echo "#define FDB_VT_VERSION \"$(VERSION)-PRERELEASE\"" >> $@
endif
@echo "#define FDB_VT_PACKAGE_NAME \"$(PACKAGE_NAME)\"" >> $@
bindings: fdb_c fdb_python fdb_ruby fdb_java fdb_flow fdb_flow_tester fdb_go fdb_go_tester fdb_c_tests
Makefiles: $(MK_GENERATED)
$(CS_MK_GENERATED): build/csprojtom4.py build/csproj.mk Makefile
@echo "Creating $@"
@python build/csprojtom4.py $(@D)/*.csproj | m4 -DGENDIR="$(@D)" -DGENNAME=`basename $(@D)/*.csproj .csproj` - build/csproj.mk > $(@D)/generated.mk
$(CPP_MK_GENERATED): build/vcxprojtom4.py build/vcxproj.mk Makefile
@echo "Creating $@"
@python build/vcxprojtom4.py $(@D)/*.vcxproj | m4 -DGENDIR="$(@D)" -DGENNAME=`basename $(@D)/*.vcxproj .vcxproj` - build/vcxproj.mk > $(@D)/generated.mk
DEPSDIR := .deps
OBJDIR := .objs
CMDDIR := .cmds
COMPILE_COMMANDS_JSONS := $(addprefix $(CMDDIR)/,$(addsuffix /compile_commands.json,${CPP_PROJECTS}))
compile_commands.json: build/concatinate_jsons.py ${COMPILE_COMMANDS_JSONS}
@build/concatinate_jsons.py ${COMPILE_COMMANDS_JSONS}
include $(MK_INCLUDE)
clean: $(CLEAN_TARGETS) docpreview_clean
@echo "Cleaning toplevel"
@rm -rf $(OBJDIR)
@rm -rf $(DEPSDIR)
@rm -rf lib/
@rm -rf bin/coverage.*.xml
@rm -rf $(CMDDIR) compile_commands.json
@find . -name "*.g.cpp" -exec rm -f {} \; -or -name "*.g.h" -exec rm -f {} \;
targets:
@echo "Available targets:"
@for i in $(sort $(TARGETS)); do echo " $$i" ; done
@echo "Append _clean to clean specific target."
lib/libstdc++.a: $(shell $(CC) -print-file-name=libstdc++_pic.a)
@echo "Frobnicating $@"
@mkdir -p lib
@rm -rf .libstdc++
@mkdir .libstdc++
@(cd .libstdc++ && ar x $<)
@for i in .libstdc++/*.o ; do \
nm $$i | grep -q \@ || continue ; \
nm $$i | awk '$$3 ~ /@@/ { COPY = $$3; sub(/@@.*/, "", COPY); print $$3, COPY; }' > .libstdc++/replacements ; \
objcopy --redefine-syms=.libstdc++/replacements $$i $$i.new && mv $$i.new $$i ; \
rm .libstdc++/replacements ; \
nm $$i | awk '$$3 ~ /@/ { print $$3; }' > .libstdc++/deletes ; \
objcopy --strip-symbols=.libstdc++/deletes $$i $$i.new && mv $$i.new $$i ; \
rm .libstdc++/deletes ; \
done
@ar rcs $@ .libstdc++/*.o
@rm -r .libstdc++
docpreview: javadoc
@echo "Generating docpreview"
@TARGETS= $(MAKE) -C documentation docpreview
docpreview_clean:
@echo "Cleaning docpreview"
@CLEAN_TARGETS= $(MAKE) -C documentation -s --no-print-directory docpreview_clean
packages/foundationdb-docs-$(VERSION).tar.gz: FORCE javadoc
@echo "Packaging documentation"
@TARGETS= $(MAKE) -C documentation docpackage
@mkdir -p packages
@rm -f packages/foundationdb-docs-$(VERSION).tar.gz
@cp documentation/sphinx/.dist/foundationdb-docs-$(VERSION).tar.gz packages/foundationdb-docs-$(VERSION).tar.gz
docpackage: packages/foundationdb-docs-$(VERSION).tar.gz
FORCE:
.SECONDEXPANSION:
bin/coverage.%.xml: bin/coveragetool.exe $$(%_ALL_SOURCES)
@echo "Creating $@"
@$(MONO) bin/coveragetool.exe $@ $(filter-out $<,$^) >/dev/null
$(CPP_MK_GENERATED): $$(@D)/*.vcxproj
$(CS_MK_GENERATED): $$(@D)/*.csproj

163
README.md
View File

@ -20,7 +20,7 @@ Contributing to FoundationDB can be in contributions to the code base, sharing y
### Binary downloads
Developers interested in using the FoundationDB store for an application can get started easily by downloading and installing a binary package. Please see the [downloads page](https://www.foundationdb.org/download/) for a list of available packages.
Developers interested in using FoundationDB can get started by downloading and installing a binary package. Please see the [downloads page](https://www.foundationdb.org/download/) for a list of available packages.
### Compiling from source
@ -28,40 +28,24 @@ Developers interested in using the FoundationDB store for an application can get
Developers on an OS for which there is no binary package, or who would like
to start hacking on the code, can get started by compiling from source.
Currently there are two build systems: a collection of Makefiles and a
CMake-based build system. Both of them should currently work for most users,
and CMake should be the preferred choice as it will eventually become the only
build system available.
The official docker image for building is `foundationdb/foundationdb-build`. It has all dependencies installed. To build outside the official docker image you'll need at least these dependencies:
## CMake
1. Install cmake Version 3.13 or higher [CMake](https://cmake.org/)
1. Install [Mono](http://www.mono-project.com/download/stable/)
1. Install [Ninja](https://ninja-build.org/) (optional, but recommended)
To build with CMake, generally the following is required (works on Linux and
Mac OS - for Windows see below):
If compiling for local development, please set `-DUSE_WERROR=ON` in
cmake. Our CI compiles with `-Werror` on, so this way you'll find out about
compiler warnings that break the build earlier.
Once you have your dependencies, you can run cmake and then build:
1. Check out this repository.
1. Install cmake Version 3.12 or higher [CMake](https://cmake.org/)
1. Download version 1.67 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.67.0/).
1. Unpack boost (you don't need to compile it)
1. Install [Mono](http://www.mono-project.com/download/stable/).
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
1. Create a build directory (you can have the build directory anywhere you
like): `mkdir build`
1. `cd build`
1. `cmake -DBOOST_ROOT=<PATH_TO_BOOST> <PATH_TO_FOUNDATIONDB_DIRECTORY>`
1. `make`
CMake will try to find its dependencies. However, for LibreSSL this can be often
problematic (especially if OpenSSL is installed as well). For that we recommend
passing the argument `-DLibreSSL_ROOT` to cmake. So, for example, if you
LibreSSL is installed under /usr/local/libressl-2.8.3, you should call cmake like
this:
```
cmake -DLibreSSL_ROOT=/usr/local/libressl-2.8.3/ ../foundationdb
```
FoundationDB will build just fine without LibreSSL, however, the resulting
binaries won't support TLS connections.
like). There is currently a directory in the source tree called build, but you should not use it. See [#3098](https://github.com/apple/foundationdb/issues/3098)
1. `cd <PATH_TO_BUILD_DIRECTORY>`
1. `cmake -G Ninja <PATH_TO_FOUNDATIONDB_DIRECTORY>`
1. `ninja # If this crashes it probably ran out of memory. Try ninja -j1`
### Language Bindings
@ -116,8 +100,38 @@ create a XCode-project with the following command:
cmake -G Xcode -DOPEN_FOR_IDE=ON <FDB_SOURCE_DIRECTORY>
```
You should create a second build-directory which you will use for building
(probably with make or ninja) and debugging.
You should create a second build-directory which you will use for building and debugging.
#### FreeBSD
1. Check out this repo on your server.
1. Install compile-time dependencies from ports.
1. (Optional) Use tmpfs & ccache for significantly faster repeat builds
1. (Optional) Install a [JDK](https://www.freshports.org/java/openjdk8/)
for Java Bindings. FoundationDB currently builds with Java 8.
1. Navigate to the directory where you checked out the foundationdb
repo.
1. Build from source.
```shell
sudo pkg install -r FreeBSD \
shells/bash devel/cmake devel/ninja devel/ccache \
lang/mono lang/python3 \
devel/boost-libs devel/libeio \
security/openssl
mkdir .build && cd .build
cmake -G Ninja \
-DUSE_CCACHE=on \
-DDISABLE_TLS=off \
-DUSE_DTRACE=off \
..
ninja -j 10
# run fast tests
ctest -L fast
# run all tests
ctest --output-on-failure -v
```
### Linux
@ -125,35 +139,27 @@ There are no special requirements for Linux. A docker image can be pulled from
`foundationdb/foundationdb-build` that has all of FoundationDB's dependencies
pre-installed, and is what the CI uses to build and test PRs.
If you want to create a package you have to tell cmake what platform it is for.
And then you can build by simply calling `cpack`. So for debian, call:
```
cmake -DINSTALL_LAYOUT=DEB <FDB_SOURCE_DIR>
make
cpack
cmake -G Ninja <FDB_SOURCE_DIR>
ninja
cpack -G DEB
```
For RPM simply replace `DEB` with `RPM`.
### MacOS
The build under MacOS will work the same way as on Linux. To get LibreSSL and boost you
can use [Homebrew](https://brew.sh/). LibreSSL will not be installed in
`/usr/local` instead it will stay in `/usr/local/Cellar`. So the cmake command
will look something like this:
The build under MacOS will work the same way as on Linux. To get boost and ninja you can use [Homebrew](https://brew.sh/).
```sh
cmake -DLibreSSL_ROOT=/usr/local/Cellar/libressl/2.8.3 <PATH_TO_FOUNDATIONDB_SOURCE>
cmake -G Ninja <PATH_TO_FOUNDATIONDB_SOURCE>
```
To generate a installable package, you have to call CMake with the corresponding
arguments and then use cpack to generate the package:
To generate a installable package, you can use cpack:
```sh
cmake -DINSTALL_LAYOUT=OSX <FDB_SOURCE_DIR>
make
cpack
ninja
cpack -G productbuild
```
### Windows
@ -163,15 +169,15 @@ that Visual Studio is used to compile.
1. Install Visual Studio 2017 (Community Edition is tested)
1. Install cmake Version 3.12 or higher [CMake](https://cmake.org/)
1. Download version 1.67 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.67.0/).
1. Download version 1.72 of [Boost](https://dl.bintray.com/boostorg/release/1.72.0/source/boost_1_72_0.tar.bz2)
1. Unpack boost (you don't need to compile it)
1. Install [Mono](http://www.mono-project.com/download/stable/).
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
1. Install [Mono](http://www.mono-project.com/download/stable/)
1. (Optional) Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8
1. Set `JAVA_HOME` to the unpacked location and JAVA_COMPILE to
`$JAVA_HOME/bin/javac`.
1. Install [Python](https://www.python.org/downloads/) if it is not already installed by Visual Studio.
1. Install [Python](https://www.python.org/downloads/) if it is not already installed by Visual Studio
1. (Optional) Install [WIX](http://wixtoolset.org/). Without it Visual Studio
won't build the Windows installer.
won't build the Windows installer
1. Create a build directory (you can have the build directory anywhere you
like): `mkdir build`
1. `cd build`
@ -183,56 +189,7 @@ that Visual Studio is used to compile.
Studio will only know about the generated files. `msbuild` is located at
`c:\Program Files (x86)\MSBuild\14.0\Bin\MSBuild.exe` for Visual Studio 15.
If you want TLS support to be enabled under Windows you currently have to build
and install LibreSSL yourself as the newer LibreSSL versions are not provided
for download from the LibreSSL homepage. To build LibreSSL:
1. Download and unpack libressl (>= 2.8.2)
2. `cd libressl-2.8.2`
3. `mkdir build`
4. `cd build`
5. `cmake -G "Visual Studio 15 2017 Win64" ..`
6. Open the generated `LibreSSL.sln` in Visual Studio as administrator (this is
necessary for the install)
7. Build the `INSTALL` project in `Release` mode
This will install LibreSSL under `C:\Program Files\LibreSSL`. After that `cmake`
will automatically find it and build with TLS support.
If you installed WIX before running `cmake` you should find the
`FDBInstaller.msi` in your build directory under `packaging/msi`.
## Makefile
#### MacOS
1. Check out this repo on your Mac.
1. Install the Xcode command-line tools.
1. Download version 1.67.0 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.67.0/).
1. Set the `BOOSTDIR` environment variable to the location containing this boost installation.
1. Install [Mono](http://www.mono-project.com/download/stable/).
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
1. Navigate to the directory where you checked out the foundationdb repo.
1. Run `make`.
#### Linux
1. Install [Docker](https://www.docker.com/).
1. Check out the foundationdb repo.
1. Run the docker image interactively [Docker Run](https://docs.docker.com/engine/reference/run/#general-form) with the directory containing the foundationdb repo mounted [Docker Mounts](https://docs.docker.com/storage/volumes/).
```shell
docker run -it -v '/local/dir/path/foundationdb:/docker/dir/path/foundationdb' foundationdb/foundationdb-build:latest
```
1. Run `$ scl enable devtoolset-8 python27 rh-python36 rh-ruby24 -- bash` within the running container. This enables a more modern compiler, which is required to build FoundationDB.
1. Navigate to the container's mounted directory which contains the foundationdb repo.
```shell
cd /docker/dir/path/foundationdb
```
1. Run `make`.
This will build the fdbserver binary and the python bindings. If you want to build our other bindings, you will need to install a runtime for the language whose binding you want to build. Each binding has an `.mk` file which provides specific targets for that binding.
TODO: Re-add instructions for TLS support [#3022](https://github.com/apple/foundationdb/issues/3022)

View File

@ -13,3 +13,6 @@ endif()
if(WITH_RUBY)
add_subdirectory(ruby)
endif()
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
package_bindingtester()
endif()

View File

@ -26,7 +26,7 @@ sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings',
import util
FDB_API_VERSION = 620
FDB_API_VERSION = 630
LOGGING = {
'version': 1,

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
#
# bindingtester.py
#
@ -38,15 +38,13 @@ from functools import reduce
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
import bindingtester
from bindingtester import FDB_API_VERSION
from bindingtester import Result
from bindingtester import util
from bindingtester.tests import Test, InstructionSet
from known_testers import Tester
from bindingtester.known_testers import Tester
import fdb
import fdb.tuple
@ -110,9 +108,10 @@ class ResultSet(object):
# Fill in 'None' values for testers that didn't produce a result and generate an output string describing the results
all_results = {i: results[i] if i in results else None for i in range(len(self.tester_results))}
result_str = '\n'.join([' %-*s - %s' % (name_length, self.tester_results.keys()[i], r) for i, r in all_results.items()])
result_keys = list(self.tester_results.keys())
result_str = '\n'.join([' %-*s - %s' % (name_length, result_keys[i], r) for i, r in all_results.items()])
result_list = results.values()
result_list = list(results.values())
# If any of our results matches the global error filter, we ignore the result
if any(r.matches_global_error_filter(self.specification) for r in result_list):
@ -158,7 +157,7 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
api_version = min_version
elif random.random() < 0.9:
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430,
440, 450, 460, 500, 510, 520, 600, 610, 620] if v >= min_version and v <= max_version])
440, 450, 460, 500, 510, 520, 600, 610, 620, 630] if v >= min_version and v <= max_version])
else:
api_version = random.randint(min_version, max_version)
@ -200,7 +199,7 @@ class TestRunner(object):
raise Exception('Not all testers support concurrency')
# Test types should be intersection of all tester supported types
self.args.types = reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers))
self.args.types = list(reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers)))
self.args.no_directory_snapshot_ops = self.args.no_directory_snapshot_ops or any([not tester.directory_snapshot_ops_enabled for tester in self.testers])
@ -264,19 +263,19 @@ class TestRunner(object):
if self.args.concurrency == 1:
self.test.setup(self.args)
test_instructions = {fdb.Subspace((self.args.instruction_prefix,)): self.test.generate(self.args, 0)}
test_instructions = {fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'),)): self.test.generate(self.args, 0)}
else:
test_instructions = {}
main_thread = InstructionSet()
for i in range(self.args.concurrency):
# thread_spec = fdb.Subspace(('thread_spec', i))
thread_spec = 'thread_spec%d' % i
thread_spec = b'thread_spec%d' % i
main_thread.push_args(thread_spec)
main_thread.append('START_THREAD')
self.test.setup(self.args)
test_instructions[fdb.Subspace((thread_spec,))] = self.test.generate(self.args, i)
test_instructions[fdb.Subspace((self.args.instruction_prefix,))] = main_thread
test_instructions[fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'),))] = main_thread
return test_instructions

View File

@ -20,7 +20,7 @@
import os
MAX_API_VERSION = 620
MAX_API_VERSION = 630
COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple']
ALL_TYPES = COMMON_TYPES + ['versionstamp']

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
######################################################
#
# FoundationDB Binding Test Script
@ -25,7 +25,8 @@ BREAKONERROR="${BREAKONERROR:-0}"
RUNSCRIPTS="${RUNSCRIPTS:-1}"
RUNTESTS="${RUNTESTS:-1}"
RANDOMTEST="${RANDOMTEST:-0}"
BINDINGTESTS="${BINDINGTESTS:-python python3 java java_async ruby go flow}"
# BINDINGTESTS="${BINDINGTESTS:-python python3 java java_async ruby go flow}"
BINDINGTESTS="${BINDINGTESTS:-python python3 java java_async go flow}"
LOGLEVEL="${LOGLEVEL:-INFO}"
_BINDINGTESTS=(${BINDINGTESTS})
DISABLEDTESTS=()
@ -186,7 +187,7 @@ function runScriptedTest()
else
local test="${1}"
if ! runCommand "Scripting ${test} ..." 'python' '-u' "${TESTFILE}" "${test}" --test-name scripted --logging-level "${LOGLEVEL}"
if ! runCommand "Scripting ${test} ..." 'python3' '-u' "${TESTFILE}" "${test}" --test-name scripted --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi
@ -211,25 +212,25 @@ function runTest()
fi
# API
if ([[ "${TESTINDEX}" -eq 0 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[0]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name api --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
if ([[ "${TESTINDEX}" -eq 0 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[0]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name api --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi
# Concurrent API
if ([[ "${TESTINDEX}" -eq 1 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[1]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name api --concurrency "${CONCURRENCY}" --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
if ([[ "${TESTINDEX}" -eq 1 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[1]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name api --concurrency "${CONCURRENCY}" --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi
# Directory
if ([[ "${TESTINDEX}" -eq 2 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[2]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name directory --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
if ([[ "${TESTINDEX}" -eq 2 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[2]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name directory --compare --num-ops "${OPERATIONS}" --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi
# Directory HCA
if ([[ "${TESTINDEX}" -eq 3 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[3]}" 'python' '-u' "${TESTFILE}" "${test}" --test-name directory_hca --concurrency "${CONCURRENCY}" --num-ops "${HCAOPERATIONS}" --logging-level "${LOGLEVEL}"
if ([[ "${TESTINDEX}" -eq 3 ]] || [[ "${TESTINDEX}" -eq "${TESTTOTAL}" ]]) && ([[ "${BREAKONERROR}" -eq 0 ]] || [[ "${status}" -eq 0 ]]) && ! runCommand " ${TESTTYPES[3]}" 'python3' '-u' "${TESTFILE}" "${test}" --test-name directory_hca --concurrency "${CONCURRENCY}" --num-ops "${HCAOPERATIONS}" --logging-level "${LOGLEVEL}"
then
let status="${status} + 1"
fi

View File

@ -1,4 +1,4 @@
#!/bin/bash
#!/usr/bin/env bash
LOGGING_LEVEL=WARNING

View File

@ -164,6 +164,13 @@ futures must apply the following rules to the result:
database using the get() method. May optionally push a future onto the
stack.
#### GET_ESTIMATED_RANGE_SIZE
Pops the top two items off of the stack as BEGIN_KEY and END_KEY to
construct a key range. Then call the `getEstimatedRangeSize` API of
the language binding. Make sure the API returns without error. Finally
push the string "GOT_ESTIMATED_RANGE_SIZE" onto the stack.
#### GET_KEY (_SNAPSHOT, _DATABASE)
Pops the top four items off of the stack as KEY, OR_EQUAL, OFFSET, PREFIX

View File

@ -37,8 +37,8 @@ class ResultSpecification(object):
self.ordering_index = ordering_index
if global_error_filter is not None:
error_str = '|'.join(['%d' % e for e in global_error_filter])
self.error_regex = re.compile(r'\x01+ERROR\x00\xff*\x01' + error_str + r'\x00')
error_str = b'|'.join([b'%d' % e for e in global_error_filter])
self.error_regex = re.compile(rb'\x01+ERROR\x00\xff*\x01' + error_str + rb'\x00')
else:
self.error_regex = None
@ -90,7 +90,7 @@ class Test(object):
def versionstamp_value(self, raw_bytes, version_pos=0):
if hasattr(self, 'api_version') and self.api_version < 520:
if version_pos != 0:
raise ValueError("unable to set non-zero version position before 520 in values")
raise ValueError('unable to set non-zero version position before 520 in values')
return raw_bytes
else:
return raw_bytes + struct.pack('<L', version_pos)
@ -109,7 +109,7 @@ class Instruction(object):
def __init__(self, operation):
self.operation = operation
self.argument = None
self.value = fdb.tuple.pack((unicode(self.operation),))
self.value = fdb.tuple.pack((self.operation,))
def to_value(self):
return self.value
@ -125,7 +125,7 @@ class PushInstruction(Instruction):
def __init__(self, argument):
self.operation = 'PUSH'
self.argument = argument
self.value = fdb.tuple.pack((unicode("PUSH"), argument))
self.value = fdb.tuple.pack(('PUSH', argument))
def __str__(self):
return '%s %s' % (self.operation, self.argument)

View File

@ -157,6 +157,7 @@ class ApiTest(Test):
read_conflicts = ['READ_CONFLICT_RANGE', 'READ_CONFLICT_KEY']
write_conflicts = ['WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT']
txn_sizes = ['GET_APPROXIMATE_SIZE']
storage_metrics = ['GET_ESTIMATED_RANGE_SIZE']
op_choices += reads
op_choices += mutations
@ -170,9 +171,10 @@ class ApiTest(Test):
op_choices += write_conflicts
op_choices += resets
op_choices += txn_sizes
op_choices += storage_metrics
idempotent_atomic_ops = [u'BIT_AND', u'BIT_OR', u'MAX', u'MIN', u'BYTE_MIN', u'BYTE_MAX']
atomic_ops = idempotent_atomic_ops + [u'ADD', u'BIT_XOR', u'APPEND_IF_FITS']
idempotent_atomic_ops = ['BIT_AND', 'BIT_OR', 'MAX', 'MIN', 'BYTE_MIN', 'BYTE_MAX']
atomic_ops = idempotent_atomic_ops + ['ADD', 'BIT_XOR', 'APPEND_IF_FITS']
if args.concurrency > 1:
self.max_keys = random.randint(100, 1000)
@ -357,26 +359,26 @@ class ApiTest(Test):
split = random.randint(0, 70)
prefix = self.random.random_string(20 + split)
if prefix.endswith('\xff'):
if prefix.endswith(b'\xff'):
# Necessary to make sure that the SET_VERSIONSTAMPED_VALUE check
# correctly finds where the version is supposed to fit in.
prefix += '\x00'
prefix += b'\x00'
suffix = self.random.random_string(70 - split)
rand_str2 = prefix + fdb.tuple.Versionstamp._UNSET_TR_VERSION + suffix
key3 = self.versionstamped_keys.pack() + rand_str2
index = len(self.versionstamped_keys.pack()) + len(prefix)
key3 = self.versionstamp_key(key3, index)
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE',
instructions.push_args('SET_VERSIONSTAMPED_VALUE',
key1,
self.versionstamp_value(fdb.tuple.Versionstamp._UNSET_TR_VERSION + rand_str2))
instructions.append('ATOMIC_OP')
if args.api_version >= 520:
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', key2, self.versionstamp_value(rand_str2, len(prefix)))
instructions.push_args('SET_VERSIONSTAMPED_VALUE', key2, self.versionstamp_value(rand_str2, len(prefix)))
instructions.append('ATOMIC_OP')
instructions.push_args(u'SET_VERSIONSTAMPED_KEY', key3, rand_str1)
instructions.push_args('SET_VERSIONSTAMPED_KEY', key3, rand_str1)
instructions.append('ATOMIC_OP')
self.can_use_key_selectors = False
@ -467,17 +469,17 @@ class ApiTest(Test):
instructions.push_args(rand_str)
test_util.to_front(instructions, 1)
instructions.push_args(u'SET_VERSIONSTAMPED_KEY')
instructions.push_args('SET_VERSIONSTAMPED_KEY')
instructions.append('ATOMIC_OP')
if self.api_version >= 520:
version_value_key_2 = self.versionstamped_values_2.pack((rand_str,))
versionstamped_value = self.versionstamp_value(fdb.tuple.pack(tup), first_incomplete - len(prefix))
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', version_value_key_2, versionstamped_value)
instructions.push_args('SET_VERSIONSTAMPED_VALUE', version_value_key_2, versionstamped_value)
instructions.append('ATOMIC_OP')
version_value_key = self.versionstamped_values.pack((rand_str,))
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', version_value_key,
instructions.push_args('SET_VERSIONSTAMPED_VALUE', version_value_key,
self.versionstamp_value(fdb.tuple.Versionstamp._UNSET_TR_VERSION + fdb.tuple.pack(tup)))
instructions.append('ATOMIC_OP')
self.can_use_key_selectors = False
@ -500,8 +502,8 @@ class ApiTest(Test):
# Use SUB to test if integers are correctly unpacked
elif op == 'SUB':
a = self.random.random_int() / 2
b = self.random.random_int() / 2
a = self.random.random_int() // 2
b = self.random.random_int() // 2
instructions.push_args(0, a, b)
instructions.append(op)
instructions.push_args(1)
@ -536,6 +538,21 @@ class ApiTest(Test):
instructions.push_args(d)
instructions.append(op)
self.add_strings(1)
elif op == 'GET_ESTIMATED_RANGE_SIZE':
# Protect against inverted range and identical keys
key1 = self.workspace.pack(self.random.random_tuple(1))
key2 = self.workspace.pack(self.random.random_tuple(1))
while key1 == key2:
key1 = self.workspace.pack(self.random.random_tuple(1))
key2 = self.workspace.pack(self.random.random_tuple(1))
if key1 > key2:
key1, key2 = key2, key1
instructions.push_args(key1, key2)
instructions.append(op)
self.add_strings(1)
else:
assert False, 'Unknown operation: ' + op
@ -566,7 +583,7 @@ class ApiTest(Test):
next_begin = None
incorrect_versionstamps = 0
for k, v in tr.get_range(begin_key, self.versionstamped_values.range().stop, limit=limit):
next_begin = k + '\x00'
next_begin = k + b'\x00'
random_id = self.versionstamped_values.unpack(k)[0]
versioned_value = v[10:].replace(fdb.tuple.Versionstamp._UNSET_TR_VERSION, v[:10], 1)
@ -602,6 +619,6 @@ class ApiTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.workspace, global_error_filter=[1007, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021])
ResultSpecification(self.workspace, global_error_filter=[1007, 1009, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021])
]

View File

@ -52,15 +52,15 @@ class DirectoryTest(Test):
self.dir_list.append(child)
self.dir_index = directory_util.DEFAULT_DIRECTORY_INDEX
def generate_layer(self):
def generate_layer(self, allow_partition=True):
if random.random() < 0.7:
return ''
return b''
else:
choice = random.randint(0, 3)
if choice == 0:
return 'partition'
if choice == 0 and allow_partition:
return b'partition'
elif choice == 1:
return 'test_layer'
return b'test_layer'
else:
return self.random.random_string(random.randint(0, 5))
@ -98,7 +98,7 @@ class DirectoryTest(Test):
instructions.append('NEW_TRANSACTION')
default_path = unicode('default%d' % self.next_path)
default_path = 'default%d' % self.next_path
self.next_path += 1
self.dir_list = directory_util.setup_directories(instructions, default_path, self.random)
self.root = self.dir_list[0]
@ -114,7 +114,7 @@ class DirectoryTest(Test):
instructions.push_args(layer)
instructions.push_args(*test_util.with_length(path))
instructions.append('DIRECTORY_OPEN')
self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False, is_partition=(layer=='partition'))))
self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False, is_partition=(layer==b'partition'))))
# print('%d. Selected %s, dir=%s, dir_id=%s, has_known_prefix=%s, dir_list_len=%d' \
# % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), self.dir_list[-1].dir_id, False, len(self.dir_list)-1))
@ -184,7 +184,9 @@ class DirectoryTest(Test):
test_util.blocking_commit(instructions)
path = generate_path()
op_args = test_util.with_length(path) + (self.generate_layer(),)
# Partitions that use the high-contention allocator can result in non-determinism if they fail to commit,
# so we disallow them in comparison tests
op_args = test_util.with_length(path) + (self.generate_layer(allow_partition=args.concurrency>1),)
directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
if not op.endswith('_DATABASE') and args.concurrency == 1:
@ -199,7 +201,7 @@ class DirectoryTest(Test):
elif root_op == 'DIRECTORY_CREATE':
layer = self.generate_layer()
is_partition = layer == 'partition'
is_partition = layer == b'partition'
prefix = generate_prefix(require_unique=is_partition and args.concurrency==1, is_partition=is_partition, min_length=0)
@ -256,7 +258,7 @@ class DirectoryTest(Test):
self.dir_list.append(dir_entry.add_child(new_path, child_entry))
# Make sure that the default directory subspace still exists after moving the specified directory
if dir_entry.state.is_directory and not dir_entry.state.is_subspace and old_path == (u'',):
if dir_entry.state.is_directory and not dir_entry.state.is_subspace and old_path == ('',):
self.ensure_default_directory_subspace(instructions, default_path)
elif root_op == 'DIRECTORY_MOVE_TO':
@ -291,7 +293,7 @@ class DirectoryTest(Test):
dir_entry.delete(path)
# Make sure that the default directory subspace still exists after removing the specified directory
if path == () or (dir_entry.state.is_directory and not dir_entry.state.is_subspace and path == (u'',)):
if path == () or (dir_entry.state.is_directory and not dir_entry.state.is_subspace and path == ('',)):
self.ensure_default_directory_subspace(instructions, default_path)
elif root_op == 'DIRECTORY_LIST' or root_op == 'DIRECTORY_EXISTS':
@ -378,7 +380,7 @@ class DirectoryTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021]),
ResultSpecification(self.directory_log, ordering_index=0),
ResultSpecification(self.subspace_log, ordering_index=0)
]
@ -392,15 +394,15 @@ def generate_path(min_length=0):
path = ()
for i in range(length):
if random.random() < 0.05:
path = path + (u'',)
path = path + ('',)
else:
path = path + (random.choice([u'1', u'2', u'3']),)
path = path + (random.choice(['1', '2', '3']),)
return path
def generate_prefix(require_unique=False, is_partition=False, min_length=1):
fixed_prefix = 'abcdefg'
fixed_prefix = b'abcdefg'
if not require_unique and min_length == 0 and random.random() < 0.8:
return None
elif require_unique or is_partition or min_length > len(fixed_prefix) or random.random() < 0.5:
@ -409,13 +411,13 @@ def generate_prefix(require_unique=False, is_partition=False, min_length=1):
length = random.randint(min_length, min_length+5)
if length == 0:
return ''
return b''
if not is_partition:
first = chr(random.randint(ord('\x1d'), 255) % 255)
return first + ''.join(chr(random.randrange(0, 256)) for i in range(0, length - 1))
first = random.randint(ord('\x1d'), 255) % 255
return bytes([first] + [random.randrange(0, 256) for i in range(0, length - 1)])
else:
return ''.join(chr(random.randrange(ord('\x02'), ord('\x14'))) for i in range(0, length))
return bytes([random.randrange(ord('\x02'), ord('\x14')) for i in range(0, length)])
else:
prefix = fixed_prefix
generated = prefix[0:random.randrange(min_length, len(prefix))]

View File

@ -40,7 +40,7 @@ class DirectoryHcaTest(Test):
def setup(self, args):
self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types)
self.transactions = ['tr%d' % i for i in range(3)] # SOMEDAY: parameterize this number?
self.transactions = [b'tr%d' % i for i in range(3)] # SOMEDAY: parameterize this number?
self.barrier_num = 0
self.max_directories_per_transaction = 30
@ -59,7 +59,7 @@ class DirectoryHcaTest(Test):
def barrier(self, instructions, thread_number, thread_ending=False):
if not thread_ending:
instructions.push_args(self.coordination[(self.barrier_num + 1)][thread_number].key(), '')
instructions.push_args(self.coordination[(self.barrier_num + 1)][thread_number].key(), b'')
instructions.append('SET_DATABASE')
instructions.append('WAIT_FUTURE')
@ -76,7 +76,7 @@ class DirectoryHcaTest(Test):
instructions.append('NEW_TRANSACTION')
default_path = unicode('default%d' % self.next_path)
default_path = 'default%d' % self.next_path
self.next_path += 1
dir_list = directory_util.setup_directories(instructions, default_path, self.random)
num_dirs = len(dir_list)
@ -102,7 +102,7 @@ class DirectoryHcaTest(Test):
for i in range(num_directories):
path = (self.random.random_unicode_str(16),)
op_args = test_util.with_length(path) + ('', None)
op_args = test_util.with_length(path) + (b'', None)
directory_util.push_instruction_and_record_prefix(instructions, 'DIRECTORY_CREATE',
op_args, path, num_dirs, self.random, self.prefix_log)
num_dirs += 1
@ -121,7 +121,7 @@ class DirectoryHcaTest(Test):
def pre_run(self, tr, args):
if args.concurrency > 1:
for i in range(args.concurrency):
tr[self.coordination[0][i]] = ''
tr[self.coordination[0][i]] = b''
def validate(self, db, args):
errors = []

View File

@ -249,7 +249,7 @@ def run_test():
# Test moving an entry
assert not entry.state.has_known_prefix
assert not entry.state.is_subspace
assert entry.state.children.keys() == ['1']
assert list(entry.state.children.keys()) == ['1']
for e in all_entries:
validate_dir(e, root)

View File

@ -32,25 +32,25 @@ from bindingtester.tests.directory_state_tree import DirectoryStateTreeNode
fdb.api_version(FDB_API_VERSION)
DEFAULT_DIRECTORY_INDEX = 4
DEFAULT_DIRECTORY_PREFIX = 'default'
DIRECTORY_ERROR_STRING = 'DIRECTORY_ERROR'
DEFAULT_DIRECTORY_PREFIX = b'default'
DIRECTORY_ERROR_STRING = b'DIRECTORY_ERROR'
def setup_directories(instructions, default_path, random):
# Clients start with the default directory layer in the directory list
DirectoryStateTreeNode.reset()
dir_list = [DirectoryStateTreeNode.get_layer('\xfe')]
dir_list = [DirectoryStateTreeNode.get_layer(b'\xfe')]
instructions.push_args(0, '\xfe')
instructions.push_args(0, b'\xfe')
instructions.append('DIRECTORY_CREATE_SUBSPACE')
dir_list.append(DirectoryStateTreeNode(False, True))
instructions.push_args(0, '')
instructions.push_args(0, b'')
instructions.append('DIRECTORY_CREATE_SUBSPACE')
dir_list.append(DirectoryStateTreeNode(False, True))
instructions.push_args(1, 2, 1)
instructions.append('DIRECTORY_CREATE_LAYER')
dir_list.append(DirectoryStateTreeNode.get_layer('\xfe'))
dir_list.append(DirectoryStateTreeNode.get_layer(b'\xfe'))
create_default_directory_subspace(instructions, default_path, random)
dir_list.append(dir_list[0].add_child((default_path,), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
@ -67,7 +67,7 @@ def create_default_directory_subspace(instructions, path, random):
instructions.push_args(3)
instructions.append('DIRECTORY_CHANGE')
prefix = random.random_string(16)
instructions.push_args(1, path, '', '%s-%s' % (DEFAULT_DIRECTORY_PREFIX, prefix))
instructions.push_args(1, path, b'', b'%s-%s' % (DEFAULT_DIRECTORY_PREFIX, prefix))
instructions.append('DIRECTORY_CREATE_DATABASE')
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
@ -88,14 +88,14 @@ def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_inde
instructions.push_args(dir_index)
instructions.append('DIRECTORY_CHANGE')
instructions.push_args(1, '', random.random_string(16), '')
instructions.push_args(1, b'', random.random_string(16), b'')
instructions.append('DIRECTORY_PACK_KEY')
test_util.to_front(instructions, 3) # move the existence result up to the front of the stack
t = util.subspace_to_tuple(subspace)
instructions.push_args(len(t) + 3, *t)
instructions.append('TUPLE_PACK') # subspace[<exists>][<packed_key>][random.random_string(16)] = ''
instructions.append('TUPLE_PACK') # subspace[<exists>][<packed_key>][random.random_string(16)] = b''
instructions.append('SET')
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
@ -128,7 +128,7 @@ def check_for_duplicate_prefixes(db, subspace):
def validate_hca_state(db):
hca = fdb.Subspace(('\xfe', 'hca'), '\xfe')
hca = fdb.Subspace((b'\xfe', b'hca'), b'\xfe')
counters = hca[0]
recent = hca[1]

View File

@ -34,7 +34,7 @@ fdb.api_version(FDB_API_VERSION)
class ScriptedTest(Test):
TEST_API_VERSION = 620
TEST_API_VERSION = 630
def __init__(self, subspace):
super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION)
@ -62,20 +62,20 @@ class ScriptedTest(Test):
test_instructions = ThreadedInstructionSet()
main_thread = test_instructions.create_thread()
foo = [self.workspace.pack(('foo%d' % i,)) for i in range(0, 6)]
foo = [self.workspace.pack((b'foo%d' % i,)) for i in range(0, 6)]
main_thread.append('NEW_TRANSACTION')
main_thread.push_args(1020)
main_thread.append('ON_ERROR')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('GET_READ_VERSION')
main_thread.push_args(foo[1], 'bar')
main_thread.push_args(foo[1], b'bar')
main_thread.append('SET')
main_thread.push_args(foo[1])
main_thread.append('GET')
self.add_result(main_thread, args, 'bar')
self.add_result(main_thread, args, b'bar')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(2000)
main_thread.append('ON_ERROR')
@ -91,39 +91,39 @@ class ScriptedTest(Test):
main_thread.append('DUP')
main_thread.append('DUP')
main_thread.append('GET')
self.add_result(main_thread, args, 'bar')
self.add_result(main_thread, args, b'bar')
main_thread.append('CLEAR')
main_thread.append('GET_SNAPSHOT')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[1])
main_thread.append('GET_DATABASE')
self.add_result(main_thread, args, 'bar')
self.add_result(main_thread, args, b'bar')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('SET_READ_VERSION')
main_thread.push_args(foo[1])
main_thread.append('DUP')
main_thread.append('GET')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('CLEAR')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, test_util.error_string(1020))
main_thread.push_args(foo[1])
main_thread.append('GET_SNAPSHOT')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[1])
main_thread.append('CLEAR')
main_thread.append('COMMIT')
main_thread.append('WAIT_FUTURE')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('GET_COMMITTED_VERSION')
main_thread.append('RESET')
main_thread.append('EMPTY_STACK')
main_thread.append('NEW_TRANSACTION')
main_thread.push_args(1, 'bar', foo[1], foo[2], 'bar2', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5')
main_thread.push_args(1, b'bar', foo[1], foo[2], b'bar2', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5')
main_thread.append('SWAP')
main_thread.append('SET')
main_thread.append('SET')
@ -131,112 +131,112 @@ class ScriptedTest(Test):
main_thread.append('SET')
main_thread.append('SET_DATABASE')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('SET_READ_VERSION')
main_thread.push_args(foo[2])
main_thread.append('GET')
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('NEW_TRANSACTION')
main_thread.push_args('', 0, -1, '')
main_thread.push_args(b'', 0, -1, b'')
main_thread.append('GET_KEY')
self.add_result(main_thread, args, '')
self.add_result(main_thread, args, b'')
main_thread.append('NEW_TRANSACTION')
main_thread.append('GET_READ_VERSION_SNAPSHOT')
main_thread.push_args('random', foo[1], foo[3], 0, 1, 1)
main_thread.push_args(b'random', foo[1], foo[3], 0, 1, 1)
main_thread.append('POP')
main_thread.append('GET_RANGE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], 'bar2', foo[1], 'bar')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], b'bar2', foo[1], b'bar')))
main_thread.push_args(foo[1], foo[3], 1, 1, 0)
main_thread.append('GET_RANGE_SNAPSHOT')
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], 'bar2')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], b'bar2')))
main_thread.push_args(foo[1], foo[3], 0, 0, 4)
main_thread.append('GET_RANGE_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2')))
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[3], foo[5])
main_thread.append('CLEAR_RANGE')
main_thread.push_args(foo[1], 0, 3, '')
main_thread.push_args(foo[1], 0, 3, b'')
main_thread.append('GET_KEY')
self.add_result(main_thread, args, foo[5])
main_thread.push_args(foo[1], 1, 2, '')
main_thread.push_args(foo[1], 1, 2, b'')
main_thread.append('GET_KEY_SNAPSHOT')
self.add_result(main_thread, args, foo[5])
main_thread.push_args(foo[5], 0, -2, '')
main_thread.push_args(foo[5], 0, -2, b'')
main_thread.append('GET_KEY_DATABASE')
self.add_result(main_thread, args, foo[2])
main_thread.push_args(self.workspace.key(), 2, 0, 2)
main_thread.append('GET_RANGE_STARTS_WITH')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2')))
main_thread.push_args(self.workspace.key(), 4, 0, 3)
main_thread.append('GET_RANGE_STARTS_WITH_SNAPSHOT')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[5], b'bar5')))
main_thread.push_args(self.workspace.key(), 3, 1, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[5], 'bar5', foo[4], 'bar4', foo[3], 'bar3')))
main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, '')
self.add_result(main_thread, args, fdb.tuple.pack((foo[5], b'bar5', foo[4], b'bar4', foo[3], b'bar3')))
main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, b'')
main_thread.append('GET_RANGE_SELECTOR')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2')))
main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, '')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2')))
main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, b'')
main_thread.append('GET_RANGE_SELECTOR_SNAPSHOT')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5')))
main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, '')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[5], b'bar5')))
main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, b'')
main_thread.append('GET_RANGE_SELECTOR_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[3], b'bar3')))
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(self.workspace.key())
main_thread.append('CLEAR_RANGE_STARTS_WITH')
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH')
self.add_result(main_thread, args, '')
self.add_result(main_thread, args, b'')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('SET_READ_VERSION')
main_thread.push_args(foo[1])
main_thread.append('GET')
self.add_result(main_thread, args, 'bar')
self.add_result(main_thread, args, b'bar')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5')
main_thread.push_args(foo[1], b'bar', foo[2], b'bar2', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(foo[2])
main_thread.append('CLEAR_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5')))
main_thread.push_args(foo[3], foo[5])
main_thread.append('CLEAR_RANGE_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[5], 'bar5')))
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[5], b'bar5')))
main_thread.push_args(self.workspace.key())
main_thread.append('CLEAR_RANGE_STARTS_WITH_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, '')
self.add_result(main_thread, args, b'')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('NEW_TRANSACTION')
main_thread.push_args(foo[1], foo[5], 0, 0, 0)
@ -250,7 +250,7 @@ class ScriptedTest(Test):
self.append_range_test(main_thread, args, 1000, 8)
main_thread.append('EMPTY_STACK')
tup = (0, 'foo', -1093, u'unicode\u9348test', 0xffffffff + 100, 'bar\x00\xff')
tup = (0, b'foo', -1093, 'unicode\u9348test', 0xffffffff + 100, b'bar\x00\xff')
main_thread.push_args(*test_util.with_length(tup))
main_thread.append('TUPLE_PACK')
main_thread.append('DUP')
@ -272,58 +272,58 @@ class ScriptedTest(Test):
self.add_result(main_thread, args, rng.stop)
self.add_result(main_thread, args, rng.start)
stampKey = 'stampedXXXXXXXXXXsuffix'
stampKeyIndex = stampKey.find('XXXXXXXXXX')
main_thread.push_args(u'SET_VERSIONSTAMPED_KEY', self.versionstamp_key(stampKey, stampKeyIndex), 'stampedBar')
stampKey = b'stampedXXXXXXXXXXsuffix'
stampKeyIndex = stampKey.find(b'XXXXXXXXXX')
main_thread.push_args('SET_VERSIONSTAMPED_KEY', self.versionstamp_key(stampKey, stampKeyIndex), b'stampedBar')
main_thread.append('ATOMIC_OP')
main_thread.push_args(u'SET_VERSIONSTAMPED_VALUE', 'stampedValue', self.versionstamp_value('XXXXXXXXXX'))
main_thread.push_args('SET_VERSIONSTAMPED_VALUE', b'stampedValue', self.versionstamp_value(b'XXXXXXXXXX'))
main_thread.append('ATOMIC_OP')
if self.api_version >= 520:
stampValue = 'stampedXXXXXXXXXXsuffix'
stampValueIndex = stampValue.find('XXXXXXXXXX')
main_thread.push_args(u'SET_VERSIONSTAMPED_VALUE', 'stampedValue2', self.versionstamp_value(stampValue, stampValueIndex))
stampValue = b'stampedXXXXXXXXXXsuffix'
stampValueIndex = stampValue.find(b'XXXXXXXXXX')
main_thread.push_args('SET_VERSIONSTAMPED_VALUE', b'stampedValue2', self.versionstamp_value(stampValue, stampValueIndex))
main_thread.append('ATOMIC_OP')
main_thread.push_args('suffix')
main_thread.push_args(b'suffix')
main_thread.append('GET_VERSIONSTAMP')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
main_thread.push_args('stamped')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(b'stamped')
main_thread.append('CONCAT')
main_thread.append('CONCAT')
main_thread.append('GET')
self.add_result(main_thread, args, 'stampedBar')
self.add_result(main_thread, args, b'stampedBar')
main_thread.push_args('stampedValue', 'suffix')
main_thread.push_args(b'stampedValue', b'suffix')
main_thread.append('GET')
main_thread.push_args('stamped')
main_thread.push_args(b'stamped')
main_thread.append('CONCAT')
main_thread.append('CONCAT')
main_thread.append('GET')
self.add_result(main_thread, args, 'stampedBar')
self.add_result(main_thread, args, b'stampedBar')
if self.api_version >= 520:
main_thread.push_args('stampedValue2')
main_thread.push_args(b'stampedValue2')
main_thread.append('GET')
main_thread.append('GET')
self.add_result(main_thread, args, 'stampedBar')
self.add_result(main_thread, args, b'stampedBar')
main_thread.append('GET_VERSIONSTAMP')
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, 'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, test_util.error_string(2021))
main_thread.push_args('sentinel')
main_thread.push_args(b'sentinel')
main_thread.append('UNIT_TESTS')
self.add_result(main_thread, args, 'sentinel')
self.add_result(main_thread, args, b'sentinel')
if not args.no_threads:
wait_key = 'waitKey'
wait_key = b'waitKey'
# threads = [self.thread_subspace[i] for i in range(0, 2)]
threads = ['thread_spec%d' % i for i in range(0, 2)]
threads = [b'thread_spec%d' % i for i in range(0, 2)]
for thread_spec in threads:
main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), '')
main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), b'')
main_thread.append('SET_DATABASE')
main_thread.append('WAIT_FUTURE')
@ -333,7 +333,7 @@ class ScriptedTest(Test):
main_thread.append('START_THREAD')
thread = test_instructions.create_thread(fdb.Subspace((thread_spec,)))
thread.append('NEW_TRANSACTION')
thread.push_args(foo[1], foo[1], 'bar%s' % thread_spec, self.workspace.pack(
thread.push_args(foo[1], foo[1], b'bar%s' % thread_spec, self.workspace.pack(
(wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec)))
thread.append('GET')
thread.append('POP')
@ -348,7 +348,7 @@ class ScriptedTest(Test):
thread.append('NEW_TRANSACTION')
thread.push_args(foo[1])
thread.append('GET')
self.add_result(thread, args, 'barthread_spec0', 'barthread_spec1')
self.add_result(thread, args, b'barthread_spec0', b'barthread_spec1')
main_thread.append('EMPTY_STACK')
# if len(main_thread) > args.num_ops:
@ -358,7 +358,7 @@ class ScriptedTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.results_subspace, ordering_index=0, global_error_filter=[1007, 1021])
ResultSpecification(self.results_subspace, ordering_index=0, global_error_filter=[1007, 1009, 1021])
]
def get_expected_results(self):
@ -372,7 +372,7 @@ class ScriptedTest(Test):
kvpairs = []
for i in range(0, num_pairs * 2):
kvpairs.append(self.workspace.pack(('foo', ''.join(chr(random.randint(0, 254)) for i in range(0, kv_length)))))
kvpairs.append(self.workspace.pack((b'foo', bytes([random.randint(0, 254) for i in range(0, kv_length)]))))
kvpairs = list(set(kvpairs))
if len(kvpairs) % 2 == 1:
@ -380,24 +380,24 @@ class ScriptedTest(Test):
kvpairs.sort()
instructions.push_args(*kvpairs)
for i in range(0, len(kvpairs) / 2):
for i in range(0, len(kvpairs) // 2):
instructions.append('SET')
if i % 100 == 99:
test_util.blocking_commit(instructions)
self.add_result(instructions, args, 'RESULT_NOT_PRESENT')
self.add_result(instructions, args, b'RESULT_NOT_PRESENT')
foo_range = self.workspace.range(('foo',))
foo_range = self.workspace.range((b'foo',))
instructions.push_args(foo_range.start, foo_range.stop, 0, 0, -1)
instructions.append('GET_RANGE')
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
instructions.push_args(self.workspace.key(), 0, 0, -1)
instructions.append('GET_RANGE_STARTS_WITH')
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
instructions.push_args(foo_range.start, 0, 1, foo_range.stop, 0, 1, 0, 0, -1, '')
instructions.push_args(foo_range.start, 0, 1, foo_range.stop, 0, 1, 0, 0, -1, b'')
instructions.append('GET_RANGE_SELECTOR')
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
test_util.blocking_commit(instructions)
self.add_result(instructions, args, 'RESULT_NOT_PRESENT')
self.add_result(instructions, args, b'RESULT_NOT_PRESENT')
def add_result(self, instructions, args, *values):
key = self.results_subspace.pack((len(self.results),))

View File

@ -36,10 +36,10 @@ class RandomGenerator(object):
def __init__(self, max_int_bits=64, api_version=FDB_API_VERSION, types=COMMON_TYPES):
self.max_int_bits = max_int_bits
self.api_version = api_version
self.types = types
self.types = list(types)
def random_unicode_str(self, length):
return u''.join(self.random_unicode_char() for i in range(0, length))
return ''.join(self.random_unicode_char() for i in range(0, length))
def random_int(self):
num_bits = random.randint(0, self.max_int_bits) # This way, we test small numbers with higher probability
@ -123,7 +123,7 @@ class RandomGenerator(object):
smaller_size = random.randint(1, len(to_add))
tuples.append(to_add[:smaller_size])
else:
non_empty = filter(lambda (_, x): (isinstance(x, list) or isinstance(x, tuple)) and len(x) > 0, enumerate(to_add))
non_empty = [x for x in enumerate(to_add) if (isinstance(x[1], list) or isinstance(x[1], tuple)) and len(x[1]) > 0]
if len(non_empty) > 0 and random.random() < 0.25:
# Add a smaller list to test prefixes of nested structures.
idx, choice = random.choice(non_empty)
@ -153,24 +153,24 @@ class RandomGenerator(object):
def random_string(self, length):
if length == 0:
return ''
return b''
return chr(random.randint(0, 254)) + ''.join(chr(random.randint(0, 255)) for i in range(0, length - 1))
return bytes([random.randint(0, 254)] + [random.randint(0, 255) for i in range(0, length - 1)])
def random_unicode_char(self):
while True:
if random.random() < 0.05:
# Choose one of these special character sequences.
specials = [u'\U0001f4a9', u'\U0001f63c', u'\U0001f3f3\ufe0f\u200d\U0001f308', u'\U0001f1f5\U0001f1f2', u'\uf8ff',
u'\U0002a2b2', u'\u05e9\u05dc\u05d5\u05dd']
specials = ['\U0001f4a9', '\U0001f63c', '\U0001f3f3\ufe0f\u200d\U0001f308', '\U0001f1f5\U0001f1f2', '\uf8ff',
'\U0002a2b2', '\u05e9\u05dc\u05d5\u05dd']
return random.choice(specials)
c = random.randint(0, 0xffff)
if unicodedata.category(unichr(c))[0] in 'LMNPSZ':
return unichr(c)
if unicodedata.category(chr(c))[0] in 'LMNPSZ':
return chr(c)
def error_string(error_code):
return fdb.tuple.pack(('ERROR', str(error_code)))
return fdb.tuple.pack((b'ERROR', bytes(str(error_code), 'utf-8')))
def blocking_commit(instructions):

View File

@ -86,6 +86,6 @@ class TupleTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.workspace, global_error_filter=[1007, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021]),
ResultSpecification(self.workspace, global_error_filter=[1007, 1009, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021]),
]

View File

@ -29,15 +29,15 @@ import fdb
def initialize_logger_level(logging_level):
logger = get_logger()
assert logging_level in ["DEBUG", "INFO", "WARNING", "ERROR"]
assert logging_level in ['DEBUG', 'INFO', 'WARNING', 'ERROR']
if logging_level == "DEBUG":
if logging_level == 'DEBUG':
logger.setLevel(logging.DEBUG)
elif logging_level == "INFO":
elif logging_level == 'INFO':
logger.setLevel(logging.INFO)
elif logging_level == "WARNING":
elif logging_level == 'WARNING':
logger.setLevel(logging.WARNING)
elif logging_level == "ERROR":
elif logging_level == 'ERROR':
logger.setLevel(logging.ERROR)
@ -49,7 +49,7 @@ def get_logger():
def signal_number_to_name(signal_num):
name = []
for key in signal.__dict__.keys():
if key.startswith("SIG") and getattr(signal, key) == signal_num:
if key.startswith('SIG') and getattr(signal, key) == signal_num:
name.append(key)
if len(name) == 1:
return name[0]

View File

@ -13,6 +13,8 @@ if(APPLE)
elseif(WIN32)
set(platform "windows")
set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.asm)
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux" AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
set(platform "linux-aarch64")
endif()
add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
@ -68,9 +70,7 @@ if(NOT WIN32)
test/mako/mako.c
test/mako/mako.h
test/mako/utils.c
test/mako/utils.h
test/mako/zipf.c
test/mako/zipf.h)
test/mako/utils.h)
if(OPEN_FOR_IDE)
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)

View File

@ -34,6 +34,10 @@ BOOL WINAPI DllMain( HINSTANCE dll, DWORD reason, LPVOID reserved ) {
#elif defined( __unixish__ )
#ifdef __INTEL_COMPILER
#pragma warning ( disable:2415 )
#endif
static pthread_key_t threadDestructorKey;
static void threadDestructor(void*) {
@ -57,4 +61,4 @@ static int threadDestructorKeyInit = initThreadDestructorKey();
#else
#error Port me!
#endif
#endif

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#define FDB_INCLUDE_LEGACY_TYPES
#include "fdbclient/MultiVersionTransaction.h"
@ -44,8 +44,9 @@ int g_api_version = 0;
// Legacy (pre API version 610)
#define CLUSTER(c) ((char*)c)
/*
* While we could just use the MultiVersionApi instance directly, this #define allows us to swap in any other IClientApi instance (e.g. from ThreadSafeApi)
/*
* While we could just use the MultiVersionApi instance directly, this #define allows us to swap in any other IClientApi
* instance (e.g. from ThreadSafeApi)
*/
#define API ((IClientApi*)MultiVersionApi::api)
@ -74,12 +75,10 @@ fdb_bool_t fdb_error_predicate( int predicate_test, fdb_error_t code ) {
code == error_code_cluster_version_changed;
}
if(predicate_test == FDBErrorPredicates::RETRYABLE_NOT_COMMITTED) {
return code == error_code_not_committed ||
code == error_code_transaction_too_old ||
code == error_code_future_version ||
code == error_code_database_locked ||
code == error_code_proxy_memory_limit_exceeded ||
code == error_code_process_behind;
return code == error_code_not_committed || code == error_code_transaction_too_old ||
code == error_code_future_version || code == error_code_database_locked ||
code == error_code_proxy_memory_limit_exceeded || code == error_code_batch_transaction_throttled ||
code == error_code_process_behind || code == error_code_tag_throttled;
}
return false;
}
@ -450,7 +449,7 @@ FDBFuture* fdb_transaction_get_range_impl(
/* _ITERATOR mode maps to one of the known streaming modes
depending on iteration */
static const int mode_bytes_array[] = {CLIENT_KNOBS->BYTE_LIMIT_UNLIMITED, 256, 1000, 4096, 80000};
const int mode_bytes_array[] = { CLIENT_KNOBS->BYTE_LIMIT_UNLIMITED, 256, 1000, 4096, 80000 };
/* The progression used for FDB_STREAMING_MODE_ITERATOR.
Goes from small -> medium -> large. Then 1.5 * previous until serial. */
@ -628,6 +627,13 @@ fdb_error_t fdb_transaction_add_conflict_range( FDBTransaction*tr, uint8_t const
}
extern "C" DLLEXPORT
FDBFuture* fdb_transaction_get_estimated_range_size_bytes( FDBTransaction* tr, uint8_t const* begin_key_name,
int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length ) {
KeyRangeRef range(KeyRef(begin_key_name, begin_key_name_length), KeyRef(end_key_name, end_key_name_length));
return (FDBFuture*)(TXN(tr)->getEstimatedRangeSizeBytes(range).extractPtr());
}
#include "fdb_c_function_pointers.g.h"
#define FDB_API_CHANGED(func, ver) if (header_version < ver) fdb_api_ptr_##func = (void*)&(func##_v##ver##_PREV); else if (fdb_api_ptr_##func == (void*)&fdb_api_ptr_unimpl) fdb_api_ptr_##func = (void*)&(func##_impl);

View File

@ -1,126 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Condition="'$(Release)' != 'true' ">
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
<PreprocessorDefinitions>FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</PropertyGroup>
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ClInclude Include="fdb_c_function_pointers.g.h" />
<ClInclude Include="foundationdb\fdb_c.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="fdb_c.cpp" />
<ClCompile Include="ThreadCleanup.cpp" />
</ItemGroup>
<ItemGroup>
<MASM Include="fdb_c.g.asm" />
</ItemGroup>
<ItemGroup>
<None Include="generate_asm.py" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{CACB2C8E-3E55-4309-A411-2A9C56C6C1CB}</ProjectGuid>
<RootNamespace>c</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
<Import Project="$(VCTargetsPath)\BuildCustomizations\masm.props" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<ItemDefinitionGroup>
<PreBuildEvent>
</PreBuildEvent>
<PostBuildEvent>
<Command>
FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)-%%i"</Command>
</PostBuildEvent>
</ItemDefinitionGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<PropertyGroup>
<CustomBuildBeforeTargets>_MASM;ClCompile</CustomBuildBeforeTargets>
</PropertyGroup>
<ItemDefinitionGroup>
<CustomBuildStep>
<Command>c:\Python27\python.exe "$(ProjectDir)/generate_asm.py" windows "$(ProjectDir)/fdb_c.cpp" "$(ProjectDir)/fdb_c.g.asm" "$(ProjectDir)/fdb_c_function_pointers.g.h"</Command>
<Message>Generating API trampolines</Message>
<Outputs>$(ProjectDir)/fdb_c_function_pointers.g.h;$(ProjectDir)/fdb_c.g.asm</Outputs>
<Inputs>$(ProjectDir)/fdb_c.cpp;$(ProjectDir)/generate_asm.py</Inputs>
</CustomBuildStep>
</ItemDefinitionGroup>
<ImportGroup Label="ExtensionTargets">
<Import Project="$(VCTargetsPath)\BuildCustomizations\masm.targets" />
</ImportGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
</Project>

View File

@ -28,10 +28,10 @@
#endif
#if !defined(FDB_API_VERSION)
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 620)
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 630)
#elif FDB_API_VERSION < 13
#error API version no longer supported (upgrade to 13)
#elif FDB_API_VERSION > 620
#elif FDB_API_VERSION > 630
#error Requested API version requires a newer version of this header
#endif
@ -91,12 +91,21 @@ extern "C" {
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_add_network_thread_completion_hook(void (*hook)(void*), void *hook_parameter);
#pragma pack(push, 4)
#if FDB_API_VERSION >= 630
typedef struct keyvalue {
const uint8_t* key;
int key_length;
const uint8_t* value;
int value_length;
} FDBKeyValue;
#else
typedef struct keyvalue {
const void* key;
int key_length;
const void* value;
int value_length;
} FDBKeyValue;
#endif
#pragma pack(pop)
DLLEXPORT void fdb_future_cancel( FDBFuture* f );
@ -247,6 +256,10 @@ extern "C" {
int end_key_name_length,
FDBConflictRangeType type);
DLLEXPORT WARN_UNUSED_RESULT FDBFuture*
fdb_transaction_get_estimated_range_size_bytes( FDBTransaction* tr, uint8_t const* begin_key_name,
int begin_key_name_length, uint8_t const* end_key_name, int end_key_name_length);
#define FDB_KEYSEL_LAST_LESS_THAN(k, l) k, l, 0, 0
#define FDB_KEYSEL_LAST_LESS_OR_EQUAL(k, l) k, l, 1, 0
#define FDB_KEYSEL_FIRST_GREATER_THAN(k, l) k, l, 1, 1

View File

@ -59,9 +59,10 @@ def write_windows_asm(asmfile, functions):
def write_unix_asm(asmfile, functions, prefix):
asmfile.write(".intel_syntax noprefix\n")
if platform != "linux-aarch64":
asmfile.write(".intel_syntax noprefix\n")
if platform == "linux":
if platform.startswith('linux') or platform == "freebsd":
asmfile.write("\n.data\n")
for f in functions:
asmfile.write("\t.extern fdb_api_ptr_%s\n" % f)
@ -73,10 +74,15 @@ def write_unix_asm(asmfile, functions, prefix):
for f in functions:
asmfile.write("\n.globl %s%s\n" % (prefix, f))
asmfile.write("%s%s:\n" % (prefix, f))
asmfile.write(
"\tmov r11, qword ptr [%sfdb_api_ptr_%s@GOTPCREL+rip]\n" % (prefix, f))
asmfile.write("\tmov r11, qword ptr [r11]\n")
asmfile.write("\tjmp r11\n")
if platform == "linux-aarch64":
asmfile.write("\tldr x16, =fdb_api_ptr_%s\n" % (f))
asmfile.write("\tldr x16, [x16]\n")
asmfile.write("\tbr x16\n")
else:
asmfile.write(
"\tmov r11, qword ptr [%sfdb_api_ptr_%s@GOTPCREL+rip]\n" % (prefix, f))
asmfile.write("\tmov r11, qword ptr [r11]\n")
asmfile.write("\tjmp r11\n")
with open(asm, 'w') as asmfile:
@ -86,7 +92,7 @@ with open(asm, 'w') as asmfile:
hfile.write(
"void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
if platform == "linux":
if platform.startswith('linux'):
write_unix_asm(asmfile, functions, '')
elif platform == "osx":
write_unix_asm(asmfile, functions, '_')

View File

@ -1,113 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
fdb_c_CFLAGS := $(fdbclient_CFLAGS)
fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS)
fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a $(FDB_TLS_LIB)
fdb_c_STATIC_LIBS := $(TLS_LIBS)
fdb_c_tests_LIBS := -Llib -lfdb_c -lstdc++
fdb_c_tests_HEADERS := -Ibindings/c
CLEAN_TARGETS += fdb_c_tests_clean
ifeq ($(PLATFORM),linux)
fdb_c_LDFLAGS += -Wl,--version-script=bindings/c/fdb_c.map -static-libgcc -Wl,-z,nodelete -lm -lpthread -lrt -ldl
# Link our custom libstdc++ statically in Ubuntu, if hacking
ifeq ("$(wildcard /etc/centos-release)", "")
ifeq ($(LIBSTDCPP_HACK),1)
fdb_c_LIBS += lib/libstdc++.a
endif
# Link stdc++ statically in Centos, if not hacking
else
fdb_c_STATIC_LIBS += -static-libstdc++
endif
fdb_c_tests_LIBS += -lpthread
endif
ifeq ($(PLATFORM),osx)
fdb_c_LDFLAGS += -lc++ -Xlinker -exported_symbols_list -Xlinker bindings/c/fdb_c.symbols
fdb_c_tests_LIBS += -lpthread
lib/libfdb_c.dylib: bindings/c/fdb_c.symbols
bindings/c/fdb_c.symbols: bindings/c/foundationdb/fdb_c.h $(ALL_MAKEFILES)
@awk '{sub(/^[ \t]+/, "");} /^#/ {next;} /DLLEXPORT\ .*[^ ]\(/ {sub(/\(.*/, ""); print "_" $$NF; next;} /DLLEXPORT/ { DLLEXPORT=1; next;} DLLEXPORT==1 {sub(/\(.*/, ""); print "_" $$0; DLLEXPORT=0}' $< | sort | uniq > $@
fdb_c_clean: fdb_c_symbols_clean
fdb_c_symbols_clean:
@rm -f bindings/c/fdb_c.symbols
fdb_javac_release: lib/libfdb_c.$(DLEXT)
mkdir -p lib
rm -f lib/libfdb_c.$(java_DLEXT)-*
cp lib/libfdb_c.$(DLEXT) lib/libfdb_c.$(DLEXT)-$(VERSION_ID)
cp lib/libfdb_c.$(DLEXT)-debug lib/libfdb_c.$(DLEXT)-debug-$(VERSION_ID)
fdb_javac_release_clean:
rm -f lib/libfdb_c.$(DLEXT)-*
rm -f lib/libfdb_c.$(javac_DLEXT)-*
# OS X needs to put its java lib in packages
packages: fdb_javac_lib_package
fdb_javac_lib_package: lib/libfdb_c.dylib
mkdir -p packages
cp lib/libfdb_c.$(DLEXT) packages/libfdb_c.$(DLEXT)-$(VERSION_ID)
cp lib/libfdb_c.$(DLEXT)-debug packages/libfdb_c.$(DLEXT)-debug-$(VERSION_ID)
endif
fdb_c_GENERATED_SOURCES += bindings/c/foundationdb/fdb_c_options.g.h bindings/c/fdb_c.g.S bindings/c/fdb_c_function_pointers.g.h
bindings/c/%.g.S bindings/c/%_function_pointers.g.h: bindings/c/%.cpp bindings/c/generate_asm.py $(ALL_MAKEFILES)
@echo "Scanning $<"
@bindings/c/generate_asm.py $(PLATFORM) bindings/c/fdb_c.cpp bindings/c/fdb_c.g.S bindings/c/fdb_c_function_pointers.g.h
.PRECIOUS: bindings/c/fdb_c_function_pointers.g.h
fdb_c_BUILD_SOURCES += bindings/c/fdb_c.g.S
bindings/c/foundationdb/fdb_c_options.g.h: bin/vexillographer.exe fdbclient/vexillographer/fdb.options $(ALL_MAKEFILES)
@echo "Building $@"
@$(MONO) bin/vexillographer.exe fdbclient/vexillographer/fdb.options c $@
bin/fdb_c_performance_test: bindings/c/test/performance_test.c bindings/c/test/test.h fdb_c
@echo "Compiling fdb_c_performance_test"
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ -c bindings/c/test/performance_test.c
bin/fdb_c_ryw_benchmark: bindings/c/test/ryw_benchmark.c bindings/c/test/test.h fdb_c
@echo "Compiling fdb_c_ryw_benchmark"
@$(CC) $(CFLAGS) $(fdb_c_tests_HEADERS) -o $@ -c bindings/c/test/ryw_benchmark.c
packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz: bin/fdb_c_performance_test bin/fdb_c_ryw_benchmark
@echo "Packaging $@"
@rm -rf packages/fdb-c-tests-$(VERSION)-$(PLATFORM)
@mkdir -p packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin
@cp bin/fdb_c_performance_test packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin
@cp bin/fdb_c_ryw_benchmark packages/fdb-c-tests-$(VERSION)-$(PLATFORM)/bin
@tar -C packages -czvf $@ fdb-c-tests-$(VERSION)-$(PLATFORM) > /dev/null
@rm -rf packages/fdb-c-tests-$(VERSION)-$(PLATFORM)
fdb_c_tests: packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz
fdb_c_tests_clean:
@rm -f packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz

3281
bindings/c/test/mako/mako.c Executable file → Normal file

File diff suppressed because it is too large Load Diff

160
bindings/c/test/mako/mako.h Executable file → Normal file
View File

@ -3,7 +3,7 @@
#pragma once
#ifndef FDB_API_VERSION
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#endif
#include <foundationdb/fdb_c.h>
@ -17,8 +17,6 @@
#include <limits.h>
#endif
#define DEFAULT_RETRY_COUNT 3
#define VERBOSE_NONE 0
#define VERBOSE_DEFAULT 1
#define VERBOSE_ANNOYING 2
@ -29,74 +27,97 @@
#define MODE_BUILD 1
#define MODE_RUN 2
/* we set mako_txn_t and mako_args_t only once in the master process,
* and won't be touched by child processes.
*/
#define FDB_SUCCESS 0
#define FDB_ERROR_RETRY -1
#define FDB_ERROR_ABORT -2
#define FDB_ERROR_CONFLICT -3
/* transaction specification */
#define OP_GETREADVERSION 0
#define OP_GET 1
#define OP_GETRANGE 2
#define OP_SGET 3
#define OP_SGETRANGE 4
#define OP_UPDATE 5
#define OP_INSERT 6
#define OP_INSERTRANGE 7
#define OP_CLEAR 8
#define OP_SETCLEAR 9
#define OP_CLEARRANGE 10
#define OP_SETCLEARRANGE 11
#define OP_COMMIT 12
#define MAX_OP 13 /* update this when adding a new operation */
enum Operations {
OP_GETREADVERSION,
OP_GET,
OP_GETRANGE,
OP_SGET,
OP_SGETRANGE,
OP_UPDATE,
OP_INSERT,
OP_INSERTRANGE,
OP_CLEAR,
OP_SETCLEAR,
OP_CLEARRANGE,
OP_SETCLEARRANGE,
OP_COMMIT,
MAX_OP /* must be the last item */
};
#define OP_COUNT 0
#define OP_RANGE 1
#define OP_REVERSE 2
/* for arguments */
#define ARG_KEYLEN 1
#define ARG_VALLEN 2
#define ARG_TPS 3
#define ARG_COMMITGET 4
#define ARG_SAMPLING 5
#define ARG_VERSION 6
#define ARG_KNOBS 7
#define ARG_FLATBUFFERS 8
#define ARG_TRACE 9
#define ARG_TRACEPATH 10
/* for long arguments */
enum Arguments {
ARG_KEYLEN,
ARG_VALLEN,
ARG_TPS,
ARG_COMMITGET,
ARG_SAMPLING,
ARG_VERSION,
ARG_KNOBS,
ARG_FLATBUFFERS,
ARG_TRACE,
ARG_TRACEPATH,
ARG_TRACEFORMAT,
ARG_TPSMAX,
ARG_TPSMIN,
ARG_TPSINTERVAL,
ARG_TPSCHANGE,
ARG_TXNTRACE
};
enum TPSChangeTypes { TPS_SIN, TPS_SQUARE, TPS_PULSE };
#define KEYPREFIX "mako"
#define KEYPREFIXLEN 4
/* we set mako_txnspec_t and mako_args_t only once in the master process,
* and won't be touched by child processes.
*/
typedef struct {
/* for each operation, it stores "count", "range" and "reverse" */
int ops[MAX_OP][3];
/* for each operation, it stores "count", "range" and "reverse" */
int ops[MAX_OP][3];
} mako_txnspec_t;
#define KNOB_MAX 256
/* benchmark parameters */
typedef struct {
int json;
int num_processes;
int num_threads;
int mode;
int rows; /* is 2 billion enough? */
int seconds;
int iteration;
int tps;
int sampling;
int key_length;
int value_length;
int zipf;
int commit_get;
int verbose;
mako_txnspec_t txnspec;
char cluster_file[PATH_MAX];
int trace;
char tracepath[PATH_MAX];
char knobs[KNOB_MAX];
uint8_t flatbuffers;
int api_version;
int json;
int num_processes;
int num_threads;
int mode;
int rows; /* is 2 billion enough? */
int seconds;
int iteration;
int tpsmax;
int tpsmin;
int tpsinterval;
int tpschange;
int sampling;
int key_length;
int value_length;
int zipf;
int commit_get;
int verbose;
mako_txnspec_t txnspec;
char cluster_file[PATH_MAX];
int trace;
char tracepath[PATH_MAX];
int traceformat; /* 0 - XML, 1 - JSON */
char knobs[KNOB_MAX];
uint8_t flatbuffers;
int txntrace;
} mako_args_t;
/* shared memory */
@ -105,33 +126,34 @@ typedef struct {
#define SIGNAL_OFF 2
typedef struct {
int signal;
int readycount;
int signal;
int readycount;
double throttle_factor;
} mako_shmhdr_t;
typedef struct {
uint64_t xacts;
uint64_t conflicts;
uint64_t ops[MAX_OP];
uint64_t errors[MAX_OP];
uint64_t latency_samples[MAX_OP];
uint64_t latency_us_total[MAX_OP];
uint64_t latency_us_min[MAX_OP];
uint64_t latency_us_max[MAX_OP];
uint64_t xacts;
uint64_t conflicts;
uint64_t ops[MAX_OP];
uint64_t errors[MAX_OP];
uint64_t latency_samples[MAX_OP];
uint64_t latency_us_total[MAX_OP];
uint64_t latency_us_min[MAX_OP];
uint64_t latency_us_max[MAX_OP];
} mako_stats_t;
/* per-process information */
typedef struct {
int worker_id;
FDBDatabase *database;
mako_args_t *args;
mako_shmhdr_t *shm;
int worker_id;
FDBDatabase* database;
mako_args_t* args;
mako_shmhdr_t* shm;
} process_info_t;
/* args for threads */
typedef struct {
int thread_id;
process_info_t *process;
int thread_id;
process_info_t* process;
} thread_args_t;
/* process type */

View File

@ -1,27 +1,27 @@
##############
mako Benchmark
🦈 Mako Benchmark
##############
| mako (named after a small, but very fast shark) is a micro-benchmark for FoundationDB
| Mako (named after a very fast shark) is a micro-benchmark for FoundationDB
| which is designed to be very light and flexible
| so that you can stress a particular part of an FoundationDB cluster without introducing unnecessary overhead.
How to Build
============
| ``mako`` gets build automatically when you build FoundationDB.
| ``mako`` gets built automatically when you build FoundationDB.
| To build ``mako`` manually, simply build ``mako`` target in the FoundationDB build directory.
| e.g. If you're using Unix Makefiles
| e.g. If you're using Unix Makefiles, type:
| ``make mako``
Architecture
============
- mako is a stand-alone program written in C,
which communicates to FoundationDB using C binding API (``libfdb_c.so``)
- It creates one master process, and one or more worker processes (multi-process)
- Each worker process creates one or more multiple threads (multi-thread)
- All threads within the same process share the same network thread
which communicates to FoundationDB using C API (via ``libfdb_c.so``)
- It creates one master process, one stats emitter process, and one or more worker processes (multi-process)
- Each worker process creates one FDB network thread, and one or more worker threads (multi-thread)
- All worker threads within the same process share the same network thread
Data Specification
@ -32,7 +32,7 @@ Data Specification
Arguments
=========
- | ``--mode <mode>``
- | ``-m | --mode <mode>``
| One of the following modes must be specified. (Required)
| - ``clean``: Clean up existing data
| - ``build``: Populate data
@ -41,6 +41,9 @@ Arguments
- | ``-c | --cluster <cluster file>``
| FDB cluster file (Required)
- | ``-a | --api_version <api_version>``
| FDB API version to use (Default: Latest)
- | ``-p | --procs <procs>``
| Number of worker processes (Default: 1)
@ -48,7 +51,7 @@ Arguments
| Number of threads per worker process (Default: 1)
- | ``-r | --rows <rows>``
| Number of rows populated (Default: 10000)
| Number of rows initially populated (Default: 100000)
- | ``-s | --seconds <seconds>``
| Test duration in seconds (Default: 30)
@ -58,12 +61,23 @@ Arguments
| Specify the number of operations to be executed.
| This option cannot be set with ``--seconds``.
- | ``--tps <tps>``
| Target total transaction-per-second (TPS) of all worker processes/threads
- | ``--tps|--tpsmax <tps>``
| Target total transaction-per-second (TPS) of all worker processes/threads.
| When --tpsmin is also specified, this defines the upper-bound TPS.
| (Default: Unset / Unthrottled)
- | ``--tpsmin <tps>``
| Target total lower-bound TPS of all worker processes/threads
| (Default: Unset / Unthrottled)
- | ``--tpsinterval <seconds>``
| Time period TPS oscillates between --tpsmax and --tpsmin (Default: 10)
- | ``--tpschange <sin|square|pulse>``
| Shape of the TPS change (Default: sin)
- | ``--keylen <num>``
| Key string length in bytes (Default and Minimum: 16)
| Key string length in bytes (Default and Minimum: 32)
- | ``--vallen <num>``
| Value string length in bytes (Default and Minimum: 16)
@ -75,22 +89,19 @@ Arguments
| Generate a skewed workload based on Zipf distribution (Default: Unset = Uniform)
- | ``--sampling <num>``
| Sampling rate (1 sample / <num> ops) for latency stats
| Sampling rate (1 sample / <num> ops) for latency stats (Default: 1000)
- | ``--trace``
| Enable tracing. The trace file will be created in the current directory.
| Enable tracing. The trace file will be created in the current directory. (Default: Unset)
- | ``--tracepath <path>``
| Enable tracing and set the trace file path.
- | ``--knobs <knobs>``
| Set client knobs
- | ``--flatbuffers``
| Enable flatbuffers
| Set client knobs (comma-separated)
- | ``--commitget``
| Force commit for read-only transactions
| Force commit for read-only transactions (Default: Unset)
- | ``-v | --verbose <level>``
| Set verbose level (Default: 1)
@ -102,10 +113,10 @@ Arguments
Transaction Specification
=========================
| A transaction may contain multiple operations of multiple types.
| A transaction may contain multiple operations of various types.
| You can specify multiple operations for one operation type by specifying "Count".
| For RANGE operations, "Range" needs to be specified in addition to "Count".
| Every transaction is committed unless it contains only GET / GET RANGE operations.
| For RANGE operations, the "Range" needs to be specified in addition to "Count".
| Every transaction is committed unless the transaction is read-only.
Operation Types
---------------
@ -126,21 +137,22 @@ Format
------
| One operation type is defined as ``<Type><Count>`` or ``<Type><Count>:<Range>``.
| When Count is omitted, it's equivalent to setting it to 1. (e.g. ``g`` is equivalent to ``g1``)
| Multiple operation types can be concatenated. (e.g. ``g9u1`` = 9 GETs and 1 update)
| Multiple operation types within the same trancaction can be concatenated. (e.g. ``g9u1`` = 9 GETs and 1 update)
Transaction Specification Examples
----------------------------------
- | 100 GETs (No Commit)
- | 100 GETs (Non-commited)
| ``g100``
- | 10 GET RANGE with Range of 50 (No Commit)
- | 10 GET RANGE with Range of 50 (Non-commited)
| ``gr10:50``
- | 90 GETs and 10 Updates (Committed)
| ``g90u10``
- | 80 GETs, 10 Updates and 10 Inserts (Committed)
| ``g90u10i10``
- | 70 GETs, 10 Updates and 10 Inserts (Committed)
| ``g70u10i10``
| This is 80-20.
Execution Examples
@ -149,12 +161,14 @@ Execution Examples
Preparation
-----------
- Start the FoundationDB cluster and create a database
- Set LD_LIBRARY_PATH pointing to a proper ``libfdb_c.so``
- Set ``LD_LIBRARY_PATH`` environment variable pointing to a proper ``libfdb_c.so`` shared library
Build
-----
Populate Initial Database
-------------------------
``mako --cluster /etc/foundationdb/fdb.cluster --mode build --rows 1000000 --procs 4``
Note: You may be able to speed up the data population by increasing the number of processes or threads.
Run
---
Run a mixed workload with a total of 8 threads for 60 seconds, keeping the throughput limited to 1000 TPS.
``mako --cluster /etc/foundationdb/fdb.cluster --mode run --rows 1000000 --procs 2 --threads 8 --transaction "g8ui" --seconds 60 --tps 1000``

92
bindings/c/test/mako/utils.c Executable file → Normal file
View File

@ -1,81 +1,79 @@
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "utils.h"
#include "mako.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
/* uniform-distribution random */
int urand(int low, int high) {
double r = rand() / (1.0 + RAND_MAX);
int range = high - low + 1;
return (int)((r * range) + low);
double r = rand() / (1.0 + RAND_MAX);
int range = high - low + 1;
return (int)((r * range) + low);
}
/* random string */
/* len is the buffer size, must include null */
void randstr(char *str, int len) {
int i;
for (i = 0; i < len-1; i++) {
str[i] = '!' + urand(0, 'z'-'!'); /* generage a char from '!' to 'z' */
}
str[len-1] = '\0';
void randstr(char* str, int len) {
int i;
for (i = 0; i < len - 1; i++) {
str[i] = '!' + urand(0, 'z' - '!'); /* generage a char from '!' to 'z' */
}
str[len - 1] = '\0';
}
/* random numeric string */
/* len is the buffer size, must include null */
void randnumstr(char *str, int len) {
int i;
for (i = 0; i < len-1; i++) {
str[i] = '0' + urand(0, 9); /* generage a char from '!' to 'z' */
}
str[len-1] = '\0';
void randnumstr(char* str, int len) {
int i;
for (i = 0; i < len - 1; i++) {
str[i] = '0' + urand(0, 9); /* generage a char from '!' to 'z' */
}
str[len - 1] = '\0';
}
/* return the first key to be inserted */
int insert_begin(int rows, int p_idx, int t_idx, int total_p, int total_t) {
double interval = (double)rows / total_p / total_t;
return (int)(round(interval * ((p_idx * total_t) + t_idx)));
double interval = (double)rows / total_p / total_t;
return (int)(round(interval * ((p_idx * total_t) + t_idx)));
}
/* return the last key to be inserted */
int insert_end(int rows, int p_idx, int t_idx, int total_p, int total_t) {
double interval = (double)rows / total_p / total_t;
return (int)(round(interval * ((p_idx * total_t) + t_idx + 1) - 1));
double interval = (double)rows / total_p / total_t;
return (int)(round(interval * ((p_idx * total_t) + t_idx + 1) - 1));
}
/* devide val equally among threads */
int compute_thread_portion(int val, int p_idx, int t_idx, int total_p, int total_t) {
int interval = val / total_p / total_t;
int remaining = val - (interval * total_p * total_t);
if ((p_idx * total_t + t_idx) < remaining) {
return interval+1;
} else if (interval == 0) {
return -1;
}
/* else */
return interval;
int interval = val / total_p / total_t;
int remaining = val - (interval * total_p * total_t);
if ((p_idx * total_t + t_idx) < remaining) {
return interval + 1;
} else if (interval == 0) {
return -1;
}
/* else */
return interval;
}
/* number of digits */
int digits(int num) {
int digits = 0;
while (num > 0) {
num /= 10;
digits++;
}
return digits;
int digits = 0;
while (num > 0) {
num /= 10;
digits++;
}
return digits;
}
/* generate a key for a given key number */
/* len is the buffer size, key length + null */
void genkey(char *str, int num, int rows, int len) {
int i;
int rowdigit = digits(rows);
sprintf(str, KEYPREFIX "%0.*d", rowdigit, num);
for (i = (KEYPREFIXLEN + rowdigit); i < len-1; i++) {
str[i] = 'x';
}
str[len-1] = '\0';
void genkey(char* str, int num, int rows, int len) {
int i;
int rowdigit = digits(rows);
sprintf(str, KEYPREFIX "%0.*d", rowdigit, num);
for (i = (KEYPREFIXLEN + rowdigit); i < len - 1; i++) {
str[i] = 'x';
}
str[len - 1] = '\0';
}

17
bindings/c/test/mako/utils.h Executable file → Normal file
View File

@ -9,12 +9,12 @@ int urand(int low, int high);
/* write a random string of the length of (len-1) to memory pointed by str
* with a null-termination character at str[len-1].
*/
void randstr(char *str, int len);
void randstr(char* str, int len);
/* write a random numeric string of the length of (len-1) to memory pointed by str
* with a null-termination character at str[len-1].
*/
void randnumstr(char *str, int len);
void randnumstr(char* str, int len);
/* given the total number of rows to be inserted,
* the worker process index p_idx and the thread index t_idx (both 0-based),
@ -27,26 +27,25 @@ int insert_begin(int rows, int p_idx, int t_idx, int total_p, int total_t);
int insert_end(int rows, int p_idx, int t_idx, int total_p, int total_t);
/* devide a value equally among threads */
int compute_thread_portion(int val, int p_idx, int t_idx, int total_p,
int total_t);
int compute_thread_portion(int val, int p_idx, int t_idx, int total_p, int total_t);
/* similar to insert_begin/end, compute_thread_tps computes
* the per-thread target TPS for given configuration.
*/
#define compute_thread_tps(val, p_idx, t_idx, total_p, total_t) \
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
#define compute_thread_tps(val, p_idx, t_idx, total_p, total_t) \
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
/* similar to compute_thread_tps,
* compute_thread_iters computs the number of iterations.
*/
#define compute_thread_iters(val, p_idx, t_idx, total_p, total_t) \
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
#define compute_thread_iters(val, p_idx, t_idx, total_p, total_t) \
compute_thread_portion(val, p_idx, t_idx, total_p, total_t)
/* get the number of digits */
int digits(int num);
/* generate a key for a given key number */
/* len is the buffer size, key length + null */
void genkey(char *str, int num, int rows, int len);
void genkey(char* str, int num, int rows, int len);
#endif /* UTILS_H */

View File

@ -603,7 +603,7 @@ void runTests(struct ResultSet *rs) {
int main(int argc, char **argv) {
srand(time(NULL));
struct ResultSet *rs = newResultSet();
checkError(fdb_select_api_version(620), "select API version", rs);
checkError(fdb_select_api_version(630), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
valueStr = (uint8_t*)malloc((sizeof(uint8_t))*valueSize);

View File

@ -244,7 +244,7 @@ void runTests(struct ResultSet *rs) {
int main(int argc, char **argv) {
srand(time(NULL));
struct ResultSet *rs = newResultSet();
checkError(fdb_select_api_version(620), "select API version", rs);
checkError(fdb_select_api_version(630), "select API version", rs);
printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, keySize);

View File

@ -29,7 +29,7 @@
#include <inttypes.h>
#ifndef FDB_API_VERSION
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#endif
#include <foundationdb/fdb_c.h>
@ -236,7 +236,7 @@ void* runNetwork() {
FDBDatabase* openDatabase(struct ResultSet *rs, pthread_t *netThread) {
checkError(fdb_setup_network(), "setup network", rs);
pthread_create(netThread, NULL, &runNetwork, NULL);
pthread_create(netThread, NULL, (void*)(&runNetwork), NULL);
FDBDatabase *db;
checkError(fdb_create_database(NULL, &db), "create database", rs);

View File

@ -97,7 +97,7 @@ void runTests(struct ResultSet *rs) {
int main(int argc, char **argv) {
srand(time(NULL));
struct ResultSet *rs = newResultSet();
checkError(fdb_select_api_version(620), "select API version", rs);
checkError(fdb_select_api_version(630), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, KEY_SIZE);

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#include "foundationdb/fdb_c.h"
#undef DLLEXPORT
#include "workloads.h"
@ -258,7 +258,7 @@ struct SimpleWorkload : FDBWorkload {
insertsPerTx = context->getOption("insertsPerTx", 100ul);
opsPerTx = context->getOption("opsPerTx", 100ul);
runFor = context->getOption("runFor", 10.0);
auto err = fdb_select_api_version(620);
auto err = fdb_select_api_version(630);
if (err) {
context->trace(FDBSeverity::Info, "SelectAPIVersionFailed",
{ { "Error", std::string(fdb_get_error(err)) } });

View File

@ -18,6 +18,12 @@ set(SRCS
add_flow_target(STATIC_LIBRARY NAME fdb_flow SRCS ${SRCS})
target_link_libraries(fdb_flow PUBLIC fdb_c)
target_include_directories(fdb_flow PUBLIC
"${CMAKE_CURRENT_BINARY_DIR}"
"${CMAKE_CURRENT_SOURCE_DIR}"
"${CMAKE_CURRENT_SOURCE_DIR}/tester"
"${CMAKE_CURRENT_BINARY_DIR}/tester"
)
add_subdirectory(tester)

View File

@ -253,8 +253,8 @@ namespace FDB {
for(size_t i = 0; i < value.size(); i++) {
size_t offset = value.offsets[i];
size_t next_offset = (i+1 < value.offsets.size() ? value.offsets[i+1] : value.data.size());
ASSERT(offset < value.data.size());
ASSERT(next_offset <= value.data.size());
ASSERT_LT(offset, value.data.size());
ASSERT_LE(next_offset, value.data.size());
uint8_t code = value.data[offset];
if(code == NULL_CODE) {
data.push_back( data.arena(), NULL_CODE );
@ -363,7 +363,7 @@ namespace FDB {
int64_t swap;
bool neg = false;
ASSERT(offsets[index] < data.size());
ASSERT_LT(offsets[index], data.size());
uint8_t code = data[offsets[index]];
if(code < NEG_INT_START || code > POS_INT_END) {
throw invalid_tuple_data_type();
@ -392,7 +392,7 @@ namespace FDB {
if(index >= offsets.size()) {
throw invalid_tuple_index();
}
ASSERT(offsets[index] < data.size());
ASSERT_LT(offsets[index], data.size());
uint8_t code = data[offsets[index]];
if(code == FALSE_CODE) {
return false;
@ -407,7 +407,7 @@ namespace FDB {
if(index >= offsets.size()) {
throw invalid_tuple_index();
}
ASSERT(offsets[index] < data.size());
ASSERT_LT(offsets[index], data.size());
uint8_t code = data[offsets[index]];
if(code != FLOAT_CODE) {
throw invalid_tuple_data_type();
@ -415,7 +415,7 @@ namespace FDB {
float swap;
uint8_t* bytes = (uint8_t*)&swap;
ASSERT(offsets[index] + 1 + sizeof(float) <= data.size());
ASSERT_LE(offsets[index] + 1 + sizeof(float), data.size());
swap = *(float*)(data.begin() + offsets[index] + 1);
adjust_floating_point( bytes, sizeof(float), false );
@ -426,7 +426,7 @@ namespace FDB {
if(index >= offsets.size()) {
throw invalid_tuple_index();
}
ASSERT(offsets[index] < data.size());
ASSERT_LT(offsets[index], data.size());
uint8_t code = data[offsets[index]];
if(code != DOUBLE_CODE) {
throw invalid_tuple_data_type();
@ -434,7 +434,7 @@ namespace FDB {
double swap;
uint8_t* bytes = (uint8_t*)&swap;
ASSERT(offsets[index] + 1 + sizeof(double) <= data.size());
ASSERT_LE(offsets[index] + 1 + sizeof(double), data.size());
swap = *(double*)(data.begin() + offsets[index] + 1);
adjust_floating_point( bytes, sizeof(double), false );
@ -446,12 +446,12 @@ namespace FDB {
throw invalid_tuple_index();
}
size_t offset = offsets[index];
ASSERT(offset < data.size());
ASSERT_LT(offset, data.size());
uint8_t code = data[offset];
if(code != UUID_CODE) {
throw invalid_tuple_data_type();
}
ASSERT(offset + Uuid::SIZE + 1 <= data.size());
ASSERT_LE(offset + Uuid::SIZE + 1, data.size());
StringRef uuidData(data.begin() + offset + 1, Uuid::SIZE);
return Uuid(uuidData);
}
@ -461,15 +461,15 @@ namespace FDB {
throw invalid_tuple_index();
}
size_t offset = offsets[index];
ASSERT(offset < data.size());
ASSERT_LT(offset, data.size());
uint8_t code = data[offset];
if(code != NESTED_CODE) {
throw invalid_tuple_data_type();
}
size_t next_offset = (index + 1 < offsets.size() ? offsets[index+1] : data.size());
ASSERT(next_offset <= data.size());
ASSERT(data[next_offset - 1] == (uint8_t)0x00);
ASSERT_LE(next_offset, data.size());
ASSERT_EQ(data[next_offset - 1], (uint8_t)0x00);
Standalone<VectorRef<uint8_t>> dest;
dest.reserve(dest.arena(), next_offset - offset);
std::vector<size_t> dest_offsets;
@ -493,21 +493,21 @@ namespace FDB {
}
} else {
// A null object within the nested tuple.
ASSERT(i + 1 < next_offset - 1);
ASSERT(data[i+1] == 0xff);
ASSERT_LT(i + 1, next_offset - 1);
ASSERT_EQ(data[i+1], 0xff);
i += 2;
}
}
else if(code == BYTES_CODE || code == STRING_CODE) {
size_t next_i = find_string_terminator(data, i+1) + 1;
ASSERT(next_i <= next_offset - 1);
ASSERT_LE(next_i, next_offset - 1);
size_t length = next_i - i - 1;
dest.append(dest.arena(), data.begin() + i + 1, length);
i = next_i;
}
else if(code >= NEG_INT_START && code <= POS_INT_END) {
size_t int_size = abs(code - INT_ZERO_CODE);
ASSERT(i + int_size <= next_offset - 1);
ASSERT_LE(i + int_size, next_offset - 1);
dest.append(dest.arena(), data.begin() + i + 1, int_size);
i += int_size + 1;
}
@ -515,17 +515,17 @@ namespace FDB {
i += 1;
}
else if(code == UUID_CODE) {
ASSERT(i + 1 + Uuid::SIZE <= next_offset - 1);
ASSERT_LE(i + 1 + Uuid::SIZE, next_offset - 1);
dest.append(dest.arena(), data.begin() + i + 1, Uuid::SIZE);
i += Uuid::SIZE + 1;
}
else if(code == FLOAT_CODE) {
ASSERT(i + 1 + sizeof(float) <= next_offset - 1);
ASSERT_LE(i + 1 + sizeof(float), next_offset - 1);
dest.append(dest.arena(), data.begin() + i + 1, sizeof(float));
i += sizeof(float) + 1;
}
else if(code == DOUBLE_CODE) {
ASSERT(i + 1 + sizeof(double) <= next_offset - 1);
ASSERT_LE(i + 1 + sizeof(double), next_offset - 1);
dest.append(dest.arena(), data.begin() + i + 1, sizeof(double));
i += sizeof(double) + 1;
}

View File

@ -36,7 +36,7 @@ THREAD_FUNC networkThread(void* fdb) {
}
ACTOR Future<Void> _test() {
API *fdb = FDB::API::selectAPIVersion(620);
API *fdb = FDB::API::selectAPIVersion(630);
auto db = fdb->createDatabase();
state Reference<Transaction> tr = db->createTransaction();
@ -79,7 +79,7 @@ ACTOR Future<Void> _test() {
}
void fdb_flow_test() {
API *fdb = FDB::API::selectAPIVersion(620);
API *fdb = FDB::API::selectAPIVersion(630);
fdb->setupNetwork();
startThread(networkThread, fdb);
@ -132,6 +132,8 @@ namespace FDB {
GetRangeLimits limits = GetRangeLimits(), bool snapshot = false,
bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) override;
Future<int64_t> getEstimatedRangeSizeBytes(const KeyRange& keys) override;
void addReadConflictRange(KeyRangeRef const& keys) override;
void addReadConflictKey(KeyRef const& key) override;
@ -346,6 +348,14 @@ namespace FDB {
} );
}
Future<int64_t> TransactionImpl::getEstimatedRangeSizeBytes(const KeyRange& keys) {
return backToFuture<int64_t>(fdb_transaction_get_estimated_range_size_bytes(tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size()), [](Reference<CFuture> f) {
int64_t bytes;
throw_on_error(fdb_future_get_int64(f->f, &bytes));
return bytes;
});
}
void TransactionImpl::addReadConflictRange(KeyRangeRef const& keys) {
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_READ ) );
}

View File

@ -23,7 +23,7 @@
#include <flow/flow.h>
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#include <bindings/c/foundationdb/fdb_c.h>
#undef DLLEXPORT
@ -89,6 +89,8 @@ namespace FDB {
streamingMode);
}
virtual Future<int64_t> getEstimatedRangeSizeBytes(const KeyRange& keys) = 0;
virtual void addReadConflictRange(KeyRangeRef const& keys) = 0;
virtual void addReadConflictKey(KeyRef const& key) = 0;

View File

@ -1,152 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Condition="'$(Release)' != 'true' ">
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
<PreprocessorDefinitions>FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
</PropertyGroup>
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|X64">
<Configuration>Debug</Configuration>
<Platform>X64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|X64">
<Configuration>Release</Configuration>
<Platform>X64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="fdb_flow.actor.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="DirectoryPartition.h" />
<ClInclude Include="FDBLoanerTypes.h" />
<ClInclude Include="fdb_flow.h" />
<ClInclude Include="Tuple.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="Tuple.cpp" />
<ClInclude Include="IDirectory.h" />
<ClInclude Include="Subspace.h" />
<ClCompile Include="Subspace.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="HighContentionAllocator.h" />
<ActorCompiler Include="HighContentionAllocator.actor.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="DirectoryLayer.h" />
<ActorCompiler Include="DirectoryLayer.actor.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="DirectorySubspace.h" />
<ClCompile Include="DirectorySubspace.cpp" />
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="Node.actor.cpp" />
</ItemGroup>
<ItemGroup>
<None Include="no_intellisense.opt" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGUID>{2BA0A5E2-EB4C-4A32-948C-CBAABD77AF87}</ProjectGUID>
<TargetFrameworkVersion>v4.5.2</TargetFrameworkVersion>
<Keyword>Win32Proj</Keyword>
<RootNamespace>fdb_flow</RootNamespace>
</PropertyGroup>
<PropertyGroup>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<BuildLogFile>$(IntDir)\$(MSBuildProjectName).log</BuildLogFile>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>StaticLibrary</ConfigurationType>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets">
<Import Project="$(LocalAppData)\Microsoft\VisualStudio\10.0\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(LocalAppData)\Microsoft\VisualStudio\10.0\Microsoft.Cpp.$(Platform).user.props')" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<MinimalRebuild>false</MinimalRebuild>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<Optimization>Disabled</Optimization>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>Advapi32.lib</AdditionalDependencies>
</Link>
<Lib>
<AdditionalDependencies>$(TargetDir)flow.lib</AdditionalDependencies>
</Lib>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<DebugInformationFormat>ProgramDatabase</DebugInformationFormat>
<Optimization>Full</Optimization>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
<EnablePREfast>false</EnablePREfast>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<MinimalRebuild>false</MinimalRebuild>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>false</EnableCOMDATFolding>
<OptimizeReferences>false</OptimizeReferences>
<LinkTimeCodeGeneration>Default</LinkTimeCodeGeneration>
<AdditionalDependencies>Advapi32.lib</AdditionalDependencies>
<AdditionalOptions>/LTCG %(AdditionalOptions)</AdditionalOptions>
</Link>
<Lib>
<AdditionalDependencies>$(TargetDir)flow.lib</AdditionalDependencies>
</Lib>
</ItemDefinitionGroup>
<ImportGroup Label="ExtensionTargets">
<Import Project="..\..\flow\actorcompiler\ActorCompiler.targets" />
</ImportGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<Target Name="MyPreCompileSteps" AfterTargets="CLCompile">
<Exec Command="&quot;$(SolutionDir)bin\$(Configuration)\coveragetool.exe&quot; &quot;$(OutDir)coverage.$(TargetName).xml&quot; @(ActorCompiler -> '%(RelativeDir)%(Filename)%(Extension)', ' ') @(CLInclude -> '%(RelativeDir)%(Filename)%(Extension)', ' ') @(CLCompile -> '%(RelativeDir)%(Filename)%(Extension)', ' ')" />
</Target>
</Project>

View File

@ -1,44 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
fdb_flow_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS)
fdb_flow_LDFLAGS := -Llib -lfdb_c $(fdbrpc_LDFLAGS)
fdb_flow_LIBS :=
packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz: fdb_flow
@echo "Packaging fdb_flow"
@rm -rf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)
@mkdir -p packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/lib packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/flow packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/c/foundationdb
@cp lib/libfdb_flow.a packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/lib
@find bindings/flow -name '*.h' -not -path 'bindings/flow/tester/*' -exec cp {} packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/flow \;
@find bindings/c/foundationdb -name '*.h' -exec cp {} packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)/include/bindings/c/foundationdb \;
@tar czf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz -C packages fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)
@rm -rf packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH)
FDB_FLOW: packages/fdb-flow-$(FLOWVER)-$(PLATFORM)-$(ARCH).tar.gz
FDB_FLOW_clean:
@echo "Cleaning fdb_flow package"
@rm -rf packages/fdb-flow-*.tar.gz
packages: FDB_FLOW
packages_clean: FDB_FLOW_clean

View File

@ -217,19 +217,19 @@ ACTOR Future< Standalone<RangeResultRef> > getRange(Reference<Transaction> tr, K
}
}
ACTOR static Future<Void> debugPrintRange(Reference<Transaction> tr, std::string subspace, std::string msg) {
if (!tr)
return Void();
Standalone<RangeResultRef> results = wait(getRange(tr, KeyRange(KeyRangeRef(subspace + '\x00', subspace + '\xff'))));
printf("==================================================DB:%s:%s, count:%d\n", msg.c_str(),
StringRef(subspace).printable().c_str(), results.size());
for (auto & s : results) {
printf("=====key:%s, value:%s\n", StringRef(s.key).printable().c_str(), StringRef(s.value).printable().c_str());
}
return Void();
}
//ACTOR static Future<Void> debugPrintRange(Reference<Transaction> tr, std::string subspace, std::string msg) {
// if (!tr)
// return Void();
//
// Standalone<RangeResultRef> results = wait(getRange(tr, KeyRange(KeyRangeRef(subspace + '\x00', subspace + '\xff'))));
// printf("==================================================DB:%s:%s, count:%d\n", msg.c_str(),
// StringRef(subspace).printable().c_str(), results.size());
// for (auto & s : results) {
// printf("=====key:%s, value:%s\n", StringRef(s.key).printable().c_str(), StringRef(s.value).printable().c_str());
// }
//
// return Void();
//}
ACTOR Future<Void> stackSub(FlowTesterStack* stack) {
if (stack->data.size() < 2)
@ -430,9 +430,8 @@ struct LogStackFunc : InstructionFunc {
wait(logStack(data, entries, prefix));
entries.clear();
}
wait(logStack(data, entries, prefix));
}
wait(logStack(data, entries, prefix));
return Void();
}
@ -639,6 +638,29 @@ struct GetFunc : InstructionFunc {
const char* GetFunc::name = "GET";
REGISTER_INSTRUCTION_FUNC(GetFunc);
struct GetEstimatedRangeSize : InstructionFunc {
static const char* name;
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
state std::vector<StackItem> items = data->stack.pop(2);
if (items.size() != 2)
return Void();
Standalone<StringRef> s1 = wait(items[0].value);
state Standalone<StringRef> beginKey = Tuple::unpack(s1).getString(0);
Standalone<StringRef> s2 = wait(items[1].value);
state Standalone<StringRef> endKey = Tuple::unpack(s2).getString(0);
Future<int64_t> fsize = instruction->tr->getEstimatedRangeSizeBytes(KeyRangeRef(beginKey, endKey));
int64_t size = wait(fsize);
data->stack.pushTuple(LiteralStringRef("GOT_ESTIMATED_RANGE_SIZE"));
return Void();
}
};
const char* GetEstimatedRangeSize::name = "GET_ESTIMATED_RANGE_SIZE";
REGISTER_INSTRUCTION_FUNC(GetEstimatedRangeSize);
struct GetKeyFunc : InstructionFunc {
static const char* name;
@ -1343,12 +1365,12 @@ const char* StartThreadFunc::name = "START_THREAD";
REGISTER_INSTRUCTION_FUNC(StartThreadFunc);
ACTOR template <class Function>
Future<decltype(fake<Function>()(Reference<ReadTransaction>()).getValue())> read(Reference<Database> db,
Function func) {
Future<decltype(std::declval<Function>()(Reference<ReadTransaction>()).getValue())> read(Reference<Database> db,
Function func) {
state Reference<ReadTransaction> tr = db->createTransaction();
loop {
try {
state decltype(fake<Function>()(Reference<ReadTransaction>()).getValue()) result = wait(func(tr));
state decltype(std::declval<Function>()(Reference<ReadTransaction>()).getValue()) result = wait(func(tr));
return result;
} catch (Error& e) {
wait(tr->onError(e));
@ -1604,6 +1626,7 @@ struct UnitTestsFunc : InstructionFunc {
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_READ_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_INCLUDE_PORT_IN_ADDRESS);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_REPORT_CONFLICTING_KEYS);
Optional<FDBStandalone<ValueRef> > _ = wait(tr->get(LiteralStringRef("\xff")));
tr->cancel();
@ -1794,7 +1817,7 @@ ACTOR void _test_versionstamp() {
try {
g_network = newNet2(TLSConfig());
API *fdb = FDB::API::selectAPIVersion(620);
API *fdb = FDB::API::selectAPIVersion(630);
fdb->setupNetwork();
startThread(networkThread, fdb);

View File

@ -27,6 +27,8 @@
#pragma once
#include <utility>
#include "flow/IDispatched.h"
#include "bindings/flow/fdb_flow.h"
#include "bindings/flow/IDirectory.h"
@ -34,11 +36,11 @@
#include "bindings/flow/DirectoryLayer.h"
#include "flow/actorcompiler.h" // This must be the last #include.
#define LOG_ALL 0
#define LOG_INSTRUCTIONS LOG_ALL || 0
#define LOG_OPS LOG_ALL || 0
#define LOG_DIRS LOG_ALL || 0
#define LOG_ERRORS LOG_ALL || 0
constexpr bool LOG_ALL = false;
constexpr bool LOG_INSTRUCTIONS = LOG_ALL || false;
constexpr bool LOG_OPS = LOG_ALL || false;
constexpr bool LOG_DIRS = LOG_ALL || false;
constexpr bool LOG_ERRORS = LOG_ALL || false;
struct FlowTesterData;
@ -57,7 +59,7 @@ struct FlowTesterStack {
void push(Future<Standalone<StringRef>> value) {
data.push_back(StackItem(index, value));
}
void push(Standalone<StringRef> value) {
push(Future<Standalone<StringRef>>(value));
}
@ -86,10 +88,10 @@ struct FlowTesterStack {
items.push_back(data.back());
data.pop_back();
count--;
}
}
return items;
}
Future<std::vector<FDB::Tuple>> waitAndPop(int count);
Future<FDB::Tuple> waitAndPop();
@ -106,7 +108,7 @@ struct FlowTesterStack {
struct InstructionData : public ReferenceCounted<InstructionData> {
bool isDatabase;
bool isSnapshot;
bool isSnapshot;
StringRef instruction;
Reference<FDB::Transaction> tr;
@ -153,7 +155,7 @@ struct DirectoryOrSubspace {
return "DirectorySubspace";
}
else if(directory.present()) {
return "IDirectory";
return "IDirectory";
}
else if(subspace.present()) {
return "Subspace";
@ -169,10 +171,10 @@ struct DirectoryTesterData {
int directoryListIndex;
int directoryErrorIndex;
Reference<FDB::IDirectory> directory() {
Reference<FDB::IDirectory> directory() {
ASSERT(directoryListIndex < directoryList.size());
ASSERT(directoryList[directoryListIndex].directory.present());
return directoryList[directoryListIndex].directory.get();
return directoryList[directoryListIndex].directory.get();
}
FDB::Subspace* subspace() {
@ -220,10 +222,10 @@ struct FlowTesterData : public ReferenceCounted<FlowTesterData> {
std::string tupleToString(FDB::Tuple const& tuple);
ACTOR template <class F>
Future<decltype(fake<F>()().getValue())> executeMutation(Reference<InstructionData> instruction, F func) {
Future<decltype(std::declval<F>()().getValue())> executeMutation(Reference<InstructionData> instruction, F func) {
loop {
try {
state decltype(fake<F>()().getValue()) result = wait(func());
state decltype(std::declval<F>()().getValue()) result = wait(func());
if(instruction->isDatabase) {
wait(instruction->tr->commit());
}

View File

@ -1,131 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
<PreReleaseDecoration>
</PreReleaseDecoration>
</PropertyGroup>
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|X64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|X64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="Tester.actor.h" />
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="DirectoryTester.actor.cpp" />
<ActorCompiler Include="Tester.actor.cpp" />
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{086EB89C-CDBD-4ABE-8296-5CA224244C80}</ProjectGuid>
<Keyword>Win32Proj</Keyword>
<RootNamespace>fdb_flow_tester</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="Configuration">
<ConfigurationType>Application</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>false</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'" Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../../../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
<PreprocessorDefinitions>FDB_VT_VERSION="$(Version)$(PreReleaseDecoration)";FDB_VT_PACKAGE_NAME="$(PackageName)";%(PreprocessorDefinitions)</PreprocessorDefinitions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<ClCompile>
<PrecompiledHeader>
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;BOOST_ALL_NO_LIB;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<MinimalRebuild>false</MinimalRebuild>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions> @../../../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;$(SolutionDir)bin\$(Configuration)\fdb_c.lib;$(SolutionDir)bin\$(Configuration)\fdb_flow.lib;Advapi32.lib</AdditionalDependencies>
</Link>
<PreBuildEvent>
<Command>
</Command>
</PreBuildEvent>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<PrecompiledHeader>
</PrecompiledHeader>
<Optimization>Full</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<BufferSecurityCheck>false</BufferSecurityCheck>
<EnableEnhancedInstructionSet>StreamingSIMDExtensions2</EnableEnhancedInstructionSet>
<AdditionalOptions> @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Console</SubSystem>
<GenerateDebugInformation>true</GenerateDebugInformation>
<EnableCOMDATFolding>false</EnableCOMDATFolding>
<OptimizeReferences>false</OptimizeReferences>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdbclient.lib;$(SolutionDir)bin\$(Configuration)\fdb_c.lib;$(SolutionDir)bin\$(Configuration)\fdb_flow.lib;Advapi32.lib</AdditionalDependencies>
<LinkTimeCodeGeneration>Default</LinkTimeCodeGeneration>
</Link>
<PreBuildEvent>
<Command>
</Command>
</PreBuildEvent>
</ItemDefinitionGroup>
<ImportGroup Label="ExtensionTargets">
<Import Project="..\..\..\flow\actorcompiler\ActorCompiler.targets" />
</ImportGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
</Project>

View File

@ -1,8 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ActorCompiler Include="DirectoryTester.actor.cpp" />
<ActorCompiler Include="Tester.actor.cpp" />
<ActorCompiler Include="Tester.actor.h" />
</ItemGroup>
</Project>

View File

@ -1,42 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
fdb_flow_tester_CFLAGS := -Ibindings/c $(fdbrpc_CFLAGS)
fdb_flow_tester_LDFLAGS := -Llib $(fdbrpc_LDFLAGS) -lfdb_c
fdb_flow_tester_LIBS := lib/libfdb_flow.a lib/libflow.a lib/libfdb_c.$(DLEXT)
fdb_flow_tester_STATIC_LIBS := $(TLS_LIBS)
fdb_flow_tester: lib/libfdb_c.$(DLEXT)
@mkdir -p bindings/flow/bin
@rm -f bindings/flow/bin/fdb_flow_tester
@cp bin/fdb_flow_tester bindings/flow/bin/fdb_flow_tester
fdb_flow_tester_clean: _fdb_flow_tester_clean
_fdb_flow_tester_clean:
@rm -rf bindings/flow/bin
ifeq ($(PLATFORM),linux)
fdb_flow_tester_LDFLAGS += -static-libstdc++ -static-libgcc -ldl -lpthread -lrt -lm
else ifeq ($(PLATFORM),osx)
fdb_flow_tester_LDFLAGS += -lc++
endif

View File

@ -99,6 +99,8 @@ function(build_go_package)
endif()
add_custom_command(OUTPUT ${outfile}
COMMAND ${CMAKE_COMMAND} -E env ${go_env}
${GO_EXECUTABLE} get -d ${GO_IMPORT_PATH}/${BGP_PATH} &&
${CMAKE_COMMAND} -E env ${go_env}
${GO_EXECUTABLE} install ${GO_IMPORT_PATH}/${BGP_PATH}
DEPENDS ${fdb_options_file}
COMMENT "Building ${BGP_NAME}")

View File

@ -9,7 +9,7 @@ This package requires:
- [Mono](http://www.mono-project.com/) (macOS or Linux) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
- FoundationDB C API 2.0.x-6.1.x (part of the [FoundationDB client packages](https://apple.github.io/foundationdb/downloads.html#c))
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-620.
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-630.
To install this package, you can run the "fdb-go-install.sh" script (for versions 5.0.x and greater):

View File

@ -25,6 +25,9 @@ platform=$(uname)
if [[ "${platform}" == "Darwin" ]] ; then
FDBLIBDIR="${FDBLIBDIR:-/usr/local/lib}"
libfdbc="libfdb_c.dylib"
elif [[ "${platform}" == "FreeBSD" ]] ; then
FDBLIBDIR="${FDBLIBDIR:-/lib}"
libfdbc="libfdb_c.so"
elif [[ "${platform}" == "Linux" ]] ; then
libfdbc="libfdb_c.so"
custom_libdir="${FDBLIBDIR:-}"
@ -248,8 +251,11 @@ else
:
elif [[ "${status}" -eq 0 ]] ; then
echo "Building generated files."
if [[ "${platform}" == "FreeBSD" ]] ; then
cmd=( 'gmake' '-C' "${fdbdir}" 'bindings/c/foundationdb/fdb_c_options.g.h' )
else
cmd=( 'make' '-C' "${fdbdir}" 'bindings/c/foundationdb/fdb_c_options.g.h' )
fi
echo "${cmd[*]}"
if ! "${cmd[@]}" ; then
let status="${status} + 1"

View File

@ -2,3 +2,5 @@ module github.com/apple/foundationdb/bindings/go
// The FoundationDB go bindings currently have no external golang dependencies outside of
// the go standard library.
require golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect

View File

@ -1,103 +0,0 @@
#
# include.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
TARGETS += fdb_go fdb_go_tester
CLEAN_TARGETS += fdb_go_clean fdb_go_tester_clean
GOPATH := $(CURDIR)/bindings/go/build
GO_IMPORT_PATH := github.com/apple/foundationdb/bindings/go/src
GO_DEST := $(GOPATH)/src/$(GO_IMPORT_PATH)
.PHONY: fdb_go fdb_go_path fdb_go_fmt fdb_go_fmt_check fdb_go_tester fdb_go_tester_clean
# We only override if the environment didn't set it (this is used by
# the fdbwebsite documentation build process)
GODOC_DIR ?= bindings/go
CGO_CFLAGS := -I$(CURDIR)/bindings/c
CGO_LDFLAGS := -L$(CURDIR)/lib
ifeq ($(PLATFORM),linux)
GOPLATFORM := linux_amd64
else ifeq ($(PLATFORM),osx)
GOPLATFORM := darwin_amd64
else
$(error Not prepared to compile on platform $(PLATFORM))
endif
GO_PACKAGE_OUTDIR := $(GOPATH)/pkg/$(GOPLATFORM)/$(GO_IMPORT_PATH)
GO_PACKAGES := fdb fdb/tuple fdb/subspace fdb/directory
GO_PACKAGE_OBJECTS := $(addprefix $(GO_PACKAGE_OUTDIR)/,$(GO_PACKAGES:=.a))
GO_GEN := $(CURDIR)/bindings/go/src/fdb/generated.go
GO_SRC := $(shell find $(CURDIR)/bindings/go/src -name '*.go') $(GO_GEN)
fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC) fdb_go_fmt_check
fdb_go_fmt: $(GO_SRC)
@echo "Formatting fdb_go"
@gofmt -w $(GO_SRC)
fdb_go_fmt_check: $(GO_SRC)
@echo "Checking fdb_go"
@bash -c 'fmtoutstr=$$(gofmt -l $(GO_SRC)) ; if [[ -n "$${fmtoutstr}" ]] ; then echo "Detected go formatting violations for the following files:" ; echo "$${fmtoutstr}" ; echo "Try running: make fdb_go_fmt"; exit 1 ; fi'
$(GO_DEST)/.stamp: $(GO_SRC)
@echo "Creating fdb_go_path"
@mkdir -p $(GO_DEST)
@cp -r bindings/go/src/* $(GO_DEST)
@touch $(GO_DEST)/.stamp
fdb_go_path: $(GO_DEST)/.stamp
fdb_go_clean:
@echo "Cleaning fdb_go"
@rm -rf $(GOPATH)
fdb_go_tester: $(GOPATH)/bin/_stacktester
fdb_go_tester_clean:
@echo "Cleaning fdb_go_tester"
@rm -rf $(GOPATH)/bin
$(GOPATH)/bin/_stacktester: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OBJECTS)
@echo "Compiling $(basename $(notdir $@))"
@go install $(GO_IMPORT_PATH)/_stacktester
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a
@echo "Compiling fdb/tuple"
@go install $(GO_IMPORT_PATH)/fdb/tuple
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a
@echo "Compiling fdb/subspace"
@go install $(GO_IMPORT_PATH)/fdb/subspace
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a
@echo "Compiling fdb/directory"
@go install $(GO_IMPORT_PATH)/fdb/directory
$(GO_PACKAGE_OUTDIR)/fdb.a: $(GO_DEST)/.stamp lib/libfdb_c.$(DLEXT) $(GO_SRC)
@echo "Compiling fdb"
@go install $(GO_IMPORT_PATH)/fdb
$(GO_GEN): bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
@echo "Building $@"
@go run bindings/go/src/_util/translate_fdb_options.go < fdbclient/vexillographer/fdb.options > $@

View File

@ -569,6 +569,16 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
}
sm.store(idx, res.(fdb.FutureByteSlice))
case op == "GET_ESTIMATED_RANGE_SIZE":
r := sm.popKeyRange()
_, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
_ = rtr.GetEstimatedRangeSizeBytes(r).MustGet()
sm.store(idx, []byte("GOT_ESTIMATED_RANGE_SIZE"))
return nil, nil
})
if e != nil {
panic(e)
}
case op == "COMMIT":
sm.store(idx, sm.currentTransaction().Commit())
case op == "RESET":

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -22,12 +22,14 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"
import (
"runtime"
"golang.org/x/xerrors"
)
// Database is a handle to a FoundationDB database. Database is a lightweight
@ -89,8 +91,10 @@ func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNi
return
}
ep, ok := e.(Error)
if ok {
// Check if the error chain contains an
// fdb.Error
var ep Error
if xerrors.As(e, &ep) {
e = onError(ep).Get()
}
@ -113,6 +117,10 @@ func retryable(wrapped func() (interface{}, error), onError func(Error) FutureNi
// will recover a panicked Error and either retry the transaction or return the
// error.
//
// The transaction is retried if the error is or wraps a retryable Error.
// The error is unwrapped with the xerrors.Wrapper. See https://godoc.org/golang.org/x/xerrors#Wrapper
// for details.
//
// Do not return Future objects from the function provided to Transact. The
// Transaction created by Transact may be finalized at any point after Transact
// returns, resulting in the cancellation of any outstanding
@ -153,6 +161,10 @@ func (d Database) Transact(f func(Transaction) (interface{}, error)) (interface{
// MustGet. ReadTransact will recover a panicked Error and either retry the
// transaction or return the error.
//
// The transaction is retried if the error is or wraps a retryable Error.
// The error is unwrapped with the xerrors.Wrapper. See https://godoc.org/golang.org/x/xerrors#Wrapper
// for details.
//
// Do not return Future objects from the function provided to ReadTransact. The
// Transaction created by ReadTransact may be finalized at any point after
// ReadTransact returns, resulting in the cancellation of any outstanding

View File

@ -45,6 +45,10 @@ func (dp directoryPartition) Pack(t tuple.Tuple) fdb.Key {
panic("cannot pack keys using the root of a directory partition")
}
func (dp directoryPartition) PackWithVersionstamp(t tuple.Tuple) (fdb.Key, error) {
panic("cannot pack keys using the root of a directory partition")
}
func (dp directoryPartition) Unpack(k fdb.KeyConvertible) (tuple.Tuple, error) {
panic("cannot unpack keys using the root of a directory partition")
}

View File

@ -23,6 +23,8 @@
package directory
import (
"fmt"
"strings"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
)
@ -43,6 +45,18 @@ type directorySubspace struct {
layer []byte
}
// String implements the fmt.Stringer interface and returns human-readable
// string representation of this object.
func (ds directorySubspace) String() string {
var path string
if len(ds.path) > 0 {
path = "(" + strings.Join(ds.path, ",") + ")"
} else {
path = "nil"
}
return fmt.Sprintf("DirectorySubspace(%s, %s)", path, fdb.Printable(ds.Bytes()))
}
func (d directorySubspace) CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
return d.dl.CreateOrOpen(t, d.dl.partitionSubpath(d.path, path), layer)
}

View File

@ -46,7 +46,7 @@ A basic interaction with the FoundationDB API is demonstrated below:
func main() {
// Different API versions may expose different runtime behaviors.
fdb.MustAPIVersion(620)
fdb.MustAPIVersion(630)
// Open the default database from the system cluster
db := fdb.MustOpenDefault()
@ -139,6 +139,16 @@ error. The above example may be rewritten as:
return []string{valueOne, valueTwo}, nil
})
MustGet returns nil (which is different from empty slice []byte{}), when the
key doesn't exist, and hence non-existence can be checked as follows:
val := tr.Get(fdb.Key("foobar")).MustGet()
if val == nil {
fmt.Println("foobar does not exist.")
} else {
fmt.Println("foobar exists.")
}
Any panic that occurs during execution of the caller-provided function will be
recovered by the (Database).Transact method. If the error is an FDB Error, it
will either result in a retry of the function or be returned by Transact. If the

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
// #include <stdlib.h>
import "C"
@ -108,7 +108,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// library, an error will be returned. APIVersion must be called prior to any
// other functions in the fdb package.
//
// Currently, this package supports API versions 200 through 620.
// Currently, this package supports API versions 200 through 630.
//
// Warning: When using the multi-version client API, setting an API version that
// is not supported by a particular client library will prevent that client from
@ -116,7 +116,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// the API version of your application after upgrading your client until the
// cluster has also been upgraded.
func APIVersion(version int) error {
headerVersion := 620
headerVersion := 630
networkMutex.Lock()
defer networkMutex.Unlock()
@ -128,7 +128,7 @@ func APIVersion(version int) error {
return errAPIVersionAlreadySet
}
if version < 200 || version > 620 {
if version < 200 || version > 630 {
return errAPIVersionNotSupported
}

View File

@ -0,0 +1,5 @@
package fdb
//#cgo CFLAGS: -I/usr/local/include/
//#cgo LDFLAGS: -L/usr/local/lib/
import "C"

View File

@ -32,14 +32,14 @@ import (
func ExampleOpenDefault() {
var e error
e = fdb.APIVersion(400)
e = fdb.APIVersion(630)
if e != nil {
fmt.Printf("Unable to set API version: %v\n", e)
return
}
// OpenDefault opens the database described by the platform-specific default
// cluster file and the database name []byte("DB").
// cluster file
db, e := fdb.OpenDefault()
if e != nil {
fmt.Printf("Unable to open default database: %v\n", e)
@ -47,16 +47,18 @@ func ExampleOpenDefault() {
}
_ = db
// Output:
}
func ExampleVersionstamp(t *testing.T) {
fdb.MustAPIVersion(400)
func TestVersionstamp(t *testing.T) {
fdb.MustAPIVersion(630)
db := fdb.MustOpenDefault()
setVs := func(t fdb.Transactor, key fdb.Key) (fdb.FutureKey, error) {
fmt.Printf("setOne called with: %T\n", t)
ret, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.SetVersionstampedValue(key, []byte("blahblahbl"))
tr.SetVersionstampedValue(key, []byte("blahblahbl\x00\x00\x00\x00"))
return tr.GetVersionstamp(), nil
})
return ret.(fdb.FutureKey), e
@ -76,16 +78,27 @@ func ExampleVersionstamp(t *testing.T) {
var v []byte
var fvs fdb.FutureKey
var k fdb.Key
var e error
fvs, _ = setVs(db, fdb.Key("foo"))
v, _ = getOne(db, fdb.Key("foo"))
t.Log(v)
k, _ = fvs.Get()
fvs, e = setVs(db, fdb.Key("foo"))
if e != nil {
t.Errorf("setOne failed %v", e)
}
v, e = getOne(db, fdb.Key("foo"))
if e != nil {
t.Errorf("getOne failed %v", e)
}
t.Logf("getOne returned %s", v)
k, e = fvs.Get()
if e != nil {
t.Errorf("setOne wait failed %v", e)
}
t.Log(k)
t.Logf("setOne returned %s", k)
}
func ExampleTransactor() {
fdb.MustAPIVersion(400)
fdb.MustAPIVersion(630)
db := fdb.MustOpenDefault()
setOne := func(t fdb.Transactor, key fdb.Key, value []byte) error {
@ -136,7 +149,7 @@ func ExampleTransactor() {
}
func ExampleReadTransactor() {
fdb.MustAPIVersion(400)
fdb.MustAPIVersion(630)
db := fdb.MustOpenDefault()
getOne := func(rt fdb.ReadTransactor, key fdb.Key) ([]byte, error) {
@ -189,7 +202,7 @@ func ExampleReadTransactor() {
}
func ExamplePrefixRange() {
fdb.MustAPIVersion(400)
fdb.MustAPIVersion(630)
db := fdb.MustOpenDefault()
tr, e := db.CreateTransaction()
@ -228,7 +241,7 @@ func ExamplePrefixRange() {
}
func ExampleRangeIterator() {
fdb.MustAPIVersion(400)
fdb.MustAPIVersion(630)
db := fdb.MustOpenDefault()
tr, e := db.CreateTransaction()
@ -278,6 +291,8 @@ func TestKeyToString(t *testing.T) {
t.Errorf("got '%v', want '%v' at case %v", s, c.expect, i)
}
}
// Output:
}
func ExamplePrintable() {

View File

@ -0,0 +1,5 @@
package fdb
//#cgo CFLAGS: -I"C:/Program Files/foundationdb/include"
//#cgo LDFLAGS: -L"C:/Program Files/foundationdb/bin" -lfdb_c
import "C"

View File

@ -23,7 +23,7 @@
package fdb
// #cgo LDFLAGS: -lfdb_c -lm
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
// #include <string.h>
//
@ -268,6 +268,7 @@ type futureKeyValueArray struct {
*future
}
//go:nocheckptr
func stringRefToSlice(ptr unsafe.Pointer) []byte {
size := *((*C.int)(unsafe.Pointer(uintptr(ptr) + 8)))

View File

@ -88,6 +88,20 @@ func (o NetworkOptions) SetTraceFormat(param string) error {
return o.setOpt(34, []byte(param))
}
// Select clock source for trace files. now (the default) or realtime are supported.
//
// Parameter: Trace clock source
func (o NetworkOptions) SetTraceClockSource(param string) error {
return o.setOpt(35, []byte(param))
}
// Once provided, this string will be used to replace the port/PID in the log file names.
//
// Parameter: The identifier that will be part of all trace file names
func (o NetworkOptions) SetTraceFileIdentifier(param string) error {
return o.setOpt(36, []byte(param))
}
// Set internal tuning or debugging knobs
//
// Parameter: knob_name=knob_value
@ -216,11 +230,16 @@ func (o NetworkOptions) SetDisableClientStatisticsLogging() error {
return o.setOpt(70, nil)
}
// Enables debugging feature to perform slow task profiling. Requires trace logging to be enabled. WARNING: this feature is not recommended for use in production.
// Deprecated
func (o NetworkOptions) SetEnableSlowTaskProfiling() error {
return o.setOpt(71, nil)
}
// Enables debugging feature to perform run loop profiling. Requires trace logging to be enabled. WARNING: this feature is not recommended for use in production.
func (o NetworkOptions) SetEnableRunLoopProfiling() error {
return o.setOpt(71, nil)
}
// Enable client buggify - will make requests randomly fail (intended for client testing)
func (o NetworkOptions) SetClientBuggifyEnable() error {
return o.setOpt(80, nil)
@ -323,7 +342,7 @@ func (o DatabaseOptions) SetTransactionCausalReadRisky() error {
return o.setOpt(504, nil)
}
// Addresses returned by get_addresses_for_key include the port when enabled. This will be enabled by default in api version 630, and this option will be deprecated.
// Addresses returned by get_addresses_for_key include the port when enabled. As of api version 630, this option is enabled by default and setting this has no effect.
func (o DatabaseOptions) SetTransactionIncludePortInAddress() error {
return o.setOpt(505, nil)
}
@ -343,7 +362,7 @@ func (o TransactionOptions) SetCausalReadDisable() error {
return o.setOpt(21, nil)
}
// Addresses returned by get_addresses_for_key include the port when enabled. This will be enabled by default in api version 630, and this option will be deprecated.
// Addresses returned by get_addresses_for_key include the port when enabled. As of api version 630, this option is enabled by default and setting this has no effect.
func (o TransactionOptions) SetIncludePortInAddress() error {
return o.setOpt(23, nil)
}
@ -434,6 +453,11 @@ func (o TransactionOptions) SetTransactionLoggingMaxFieldLength(param int64) err
return o.setOpt(405, int64ToBytes(param))
}
// Sets an identifier for server tracing of this transaction. When committed, this identifier triggers logging when each part of the transaction authority encounters it, which is helpful in diagnosing slowness in misbehaving clusters. The identifier is randomly generated. When there is also a debug_transaction_identifier, both IDs are logged together.
func (o TransactionOptions) SetServerRequestTracing() error {
return o.setOpt(406, nil)
}
// Set a timeout in milliseconds which, when elapsed, will cause the transaction automatically to be cancelled. Valid parameter values are ``[0, INT_MAX]``. If set to 0, will disable all timeouts. All pending and any future uses of the transaction will throw an exception. The transaction can be used again after it is reset. Prior to API version 610, like all other transaction options, the timeout must be reset after a call to ``onError``. If the API version is 610 or greater, the timeout is not reset after an ``onError`` call. This allows the user to specify a longer timeout on specific transactions than the default timeout specified through the ``transaction_timeout`` database option without the shorter database timeout cancelling transactions that encounter a retryable error. Note that at all API versions, it is safe and legal to set the timeout each time the transaction begins, so most code written assuming the older behavior can be upgraded to the newer behavior without requiring any modification, and the caller is not required to implement special logic in retry loops to only conditionally set this option.
//
// Parameter: value in milliseconds of timeout
@ -492,6 +516,11 @@ func (o TransactionOptions) SetUseProvisionalProxies() error {
return o.setOpt(711, nil)
}
// The transaction can retrieve keys that are conflicting with other transactions.
func (o TransactionOptions) SetReportConflictingKeys() error {
return o.setOpt(712, nil)
}
type StreamingMode int
const (
@ -506,13 +535,13 @@ const (
// minimize costs if the client doesn't read the entire range), and as the
// caller iterates over more items in the range larger batches will be
// transferred in order to minimize latency. After enough iterations, the
// iterator mode will eventually reach the same byte limit as “WANT_ALL“
// iterator mode will eventually reach the same byte limit as ``WANT_ALL``
StreamingModeIterator StreamingMode = 0
// Infrequently used. The client has passed a specific row limit and wants
// that many rows delivered in a single batch. Because of iterator operation
// in client drivers make request batches transparent to the user, consider
// “WANT_ALL“ StreamingMode instead. A row limit must be specified if this
// ``WANT_ALL`` StreamingMode instead. A row limit must be specified if this
// mode is used.
StreamingModeExact StreamingMode = 1
@ -629,15 +658,15 @@ type ErrorPredicate int
const (
// Returns “true“ if the error indicates the operations in the transactions
// Returns ``true`` if the error indicates the operations in the transactions
// should be retried because of transient error.
ErrorPredicateRetryable ErrorPredicate = 50000
// Returns “true“ if the error indicates the transaction may have succeeded,
// Returns ``true`` if the error indicates the transaction may have succeeded,
// though not in a way the system can verify.
ErrorPredicateMaybeCommitted ErrorPredicate = 50001
// Returns “true“ if the error indicates the transaction has not committed,
// Returns ``true`` if the error indicates the transaction has not committed,
// though in a way that can be retried.
ErrorPredicateRetryableNotCommitted ErrorPredicate = 50002
)

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"

View File

@ -86,3 +86,11 @@ func (s Snapshot) GetReadVersion() FutureInt64 {
func (s Snapshot) GetDatabase() Database {
return s.transaction.db
}
func (s Snapshot) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
beginKey, endKey := r.FDBRangeKeys()
return s.getEstimatedRangeSizeBytes(
beginKey.FDBKey(),
endKey.FDBKey(),
)
}

View File

@ -35,6 +35,8 @@ package subspace
import (
"bytes"
"errors"
"fmt"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
)
@ -54,6 +56,15 @@ type Subspace interface {
// Subspace prepended.
Pack(t tuple.Tuple) fdb.Key
// PackWithVersionstamp returns the key encoding the specified tuple in
// the subspace so that it may be used as the key in fdb.Transaction's
// SetVersionstampedKey() method. The passed tuple must contain exactly
// one incomplete tuple.Versionstamp instance or the method will return
// with an error. The behavior here is the same as if one used the
// tuple.PackWithVersionstamp() method to appropriately pack together this
// subspace and the passed tuple.
PackWithVersionstamp(t tuple.Tuple) (fdb.Key, error)
// Unpack returns the Tuple encoded by the given key with the prefix of this
// Subspace removed. Unpack will return an error if the key is not in this
// Subspace or does not encode a well-formed Tuple.
@ -67,13 +78,14 @@ type Subspace interface {
// FoundationDB keys (corresponding to the prefix of this Subspace).
fdb.KeyConvertible
// All Subspaces implement fdb.ExactRange and fdb.Range, and describe all
// keys logically in this Subspace.
// All Subspaces implement fdb.ExactRange and fdb.Range, and describe all
// keys strictly within the subspace that encode tuples. Specifically,
// this will include all keys in [prefix + '\x00', prefix + '\xff').
fdb.ExactRange
}
type subspace struct {
b []byte
rawPrefix []byte
}
// AllKeys returns the Subspace corresponding to all keys in a FoundationDB
@ -96,36 +108,46 @@ func FromBytes(b []byte) Subspace {
return subspace{s}
}
// String implements the fmt.Stringer interface and return the subspace
// as a human readable byte string provided by fdb.Printable.
func (s subspace) String() string {
return fmt.Sprintf("Subspace(rawPrefix=%s)", fdb.Printable(s.rawPrefix))
}
func (s subspace) Sub(el ...tuple.TupleElement) Subspace {
return subspace{concat(s.Bytes(), tuple.Tuple(el).Pack()...)}
}
func (s subspace) Bytes() []byte {
return s.b
return s.rawPrefix
}
func (s subspace) Pack(t tuple.Tuple) fdb.Key {
return fdb.Key(concat(s.b, t.Pack()...))
return fdb.Key(concat(s.rawPrefix, t.Pack()...))
}
func (s subspace) PackWithVersionstamp(t tuple.Tuple) (fdb.Key, error) {
return t.PackWithVersionstamp(s.rawPrefix)
}
func (s subspace) Unpack(k fdb.KeyConvertible) (tuple.Tuple, error) {
key := k.FDBKey()
if !bytes.HasPrefix(key, s.b) {
if !bytes.HasPrefix(key, s.rawPrefix) {
return nil, errors.New("key is not in subspace")
}
return tuple.Unpack(key[len(s.b):])
return tuple.Unpack(key[len(s.rawPrefix):])
}
func (s subspace) Contains(k fdb.KeyConvertible) bool {
return bytes.HasPrefix(k.FDBKey(), s.b)
return bytes.HasPrefix(k.FDBKey(), s.rawPrefix)
}
func (s subspace) FDBKey() fdb.Key {
return fdb.Key(s.b)
return fdb.Key(s.rawPrefix)
}
func (s subspace) FDBRangeKeys() (fdb.KeyConvertible, fdb.KeyConvertible) {
return fdb.Key(concat(s.b, 0x00)), fdb.Key(concat(s.b, 0xFF))
return fdb.Key(concat(s.rawPrefix, 0x00)), fdb.Key(concat(s.rawPrefix, 0xFF))
}
func (s subspace) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {

View File

@ -0,0 +1,15 @@
package subspace
import (
"fmt"
"testing"
)
func TestSubspaceString(t *testing.T) {
printed := fmt.Sprint(Sub([]byte("hello"), "world", 42, 0x99))
expected := "Subspace(rawPrefix=\\x01hello\\x00\\x02world\\x00\\x15*\\x15\\x99)"
if printed != expected {
t.Fatalf("printed subspace result differs, expected %v, got %v", expected, printed)
}
}

View File

@ -22,7 +22,7 @@
package fdb
// #define FDB_API_VERSION 620
// #define FDB_API_VERSION 630
// #include <foundationdb/fdb_c.h>
import "C"
@ -39,6 +39,7 @@ type ReadTransaction interface {
GetReadVersion() FutureInt64
GetDatabase() Database
Snapshot() Snapshot
GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64
ReadTransactor
}
@ -305,6 +306,34 @@ func (t Transaction) GetRange(r Range, options RangeOptions) RangeResult {
return t.getRange(r, options, false)
}
func (t *transaction) getEstimatedRangeSizeBytes(beginKey Key, endKey Key) FutureInt64 {
return &futureInt64{
future: newFuture(C.fdb_transaction_get_estimated_range_size_bytes(
t.ptr,
byteSliceToPtr(beginKey),
C.int(len(beginKey)),
byteSliceToPtr(endKey),
C.int(len(endKey)),
)),
}
}
// GetEstimatedRangeSizeBytes will get an estimate for the number of bytes
// stored in the given range.
// Note: the estimated size is calculated based on the sampling done by FDB server. The sampling
// algorithm works roughly in this way: the larger the key-value pair is, the more likely it would
// be sampled and the more accurate its sampled size would be. And due to
// that reason it is recommended to use this API to query against large ranges for accuracy considerations.
// For a rough reference, if the returned size is larger than 3MB, one can consider the size to be
// accurate.
func (t Transaction) GetEstimatedRangeSizeBytes(r ExactRange) FutureInt64 {
beginKey, endKey := r.FDBRangeKeys()
return t.getEstimatedRangeSizeBytes(
beginKey.FDBKey(),
endKey.FDBKey(),
)
}
func (t *transaction) getReadVersion() FutureInt64 {
return &futureInt64{
future: newFuture(C.fdb_transaction_get_read_version(t.ptr)),

View File

@ -43,6 +43,8 @@ import (
"fmt"
"math"
"math/big"
"strconv"
"strings"
"github.com/apple/foundationdb/bindings/go/src/fdb"
)
@ -66,6 +68,48 @@ type TupleElement interface{}
// packing T (modulo type normalization to []byte, uint64, and int64).
type Tuple []TupleElement
// String implements the fmt.Stringer interface and returns human-readable
// string representation of this tuple. For most elements, we use the
// object's default string representation.
func (tuple Tuple) String() string {
sb := strings.Builder{}
printTuple(tuple, &sb)
return sb.String()
}
func printTuple(tuple Tuple, sb *strings.Builder) {
sb.WriteString("(")
for i, t := range tuple {
switch t := t.(type) {
case Tuple:
printTuple(t, sb)
case nil:
sb.WriteString("<nil>")
case string:
sb.WriteString(strconv.Quote(t))
case UUID:
sb.WriteString("UUID(")
sb.WriteString(t.String())
sb.WriteString(")")
case []byte:
sb.WriteString("b\"")
sb.WriteString(fdb.Printable(t))
sb.WriteString("\"")
default:
// For user-defined and standard types, we use standard Go
// printer, which itself uses Stringer interface.
fmt.Fprintf(sb, "%v", t)
}
if (i < len(tuple) - 1) {
sb.WriteString(", ")
}
}
sb.WriteString(")")
}
// UUID wraps a basic byte array as a UUID. We do not provide any special
// methods for accessing or generating the UUID, but as Go does not provide
// a built-in UUID type, this simple wrapper allows for other libraries
@ -73,6 +117,10 @@ type Tuple []TupleElement
// an instance of this type.
type UUID [16]byte
func (uuid UUID) String() string {
return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:])
}
// Versionstamp is struct for a FoundationDB verionstamp. Versionstamps are
// 12 bytes long composed of a 10 byte transaction version and a 2 byte user
// version. The transaction version is filled in at commit time and the user
@ -82,6 +130,11 @@ type Versionstamp struct {
UserVersion uint16
}
// Returns a human-readable string for this Versionstamp.
func (vs Versionstamp) String() string {
return fmt.Sprintf("Versionstamp(%s, %d)", fdb.Printable(vs.TransactionVersion[:]), vs.UserVersion)
}
var incompleteTransactionVersion = [10]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
const versionstampLength = 12

View File

@ -4,6 +4,7 @@ import (
"bytes"
"encoding/gob"
"flag"
"fmt"
"math/rand"
"os"
"testing"
@ -118,3 +119,38 @@ func BenchmarkTuplePacking(b *testing.B) {
})
}
}
func TestTupleString(t *testing.T) {
testCases :=[ ]struct {
input Tuple
expected string
}{
{
Tuple{[]byte("hello"), "world", 42, 0x99},
"(b\"hello\", \"world\", 42, 153)",
},
{
Tuple{nil, Tuple{"Ok", Tuple{1, 2}, "Go"}, 42, 0x99},
"(<nil>, (\"Ok\", (1, 2), \"Go\"), 42, 153)",
},
{
Tuple{"Bool", true, false},
"(\"Bool\", true, false)",
},
{
Tuple{"UUID", testUUID},
"(\"UUID\", UUID(1100aabb-ccdd-eeff-1100-aabbccddeeff))",
},
{
Tuple{"Versionstamp", Versionstamp{[10]byte{0, 0, 0, 0xaa, 0, 0xbb, 0, 0xcc, 0, 0xdd}, 620}},
"(\"Versionstamp\", Versionstamp(\\x00\\x00\\x00\\xaa\\x00\\xbb\\x00\\xcc\\x00\\xdd, 620))",
},
}
for _, testCase := range testCases {
printed := fmt.Sprint(testCase.input)
if printed != testCase.expected {
t.Fatalf("printed tuple result differs, expected %v, got %v", testCase.expected, printed)
}
}
}

View File

@ -22,6 +22,8 @@ set(JAVA_BINDING_SRCS
src/main/com/apple/foundationdb/directory/NoSuchDirectoryException.java
src/main/com/apple/foundationdb/directory/package-info.java
src/main/com/apple/foundationdb/directory/PathUtil.java
src/main/com/apple/foundationdb/DirectBufferIterator.java
src/main/com/apple/foundationdb/DirectBufferPool.java
src/main/com/apple/foundationdb/FDB.java
src/main/com/apple/foundationdb/FDBDatabase.java
src/main/com/apple/foundationdb/FDBTransaction.java
@ -56,6 +58,7 @@ set(JAVA_BINDING_SRCS
src/main/com/apple/foundationdb/testing/Promise.java
src/main/com/apple/foundationdb/testing/PerfMetric.java
src/main/com/apple/foundationdb/tuple/ByteArrayUtil.java
src/main/com/apple/foundationdb/tuple/FastByteComparisons.java
src/main/com/apple/foundationdb/tuple/IterableComparator.java
src/main/com/apple/foundationdb/tuple/package-info.java
src/main/com/apple/foundationdb/tuple/StringUtil.java
@ -139,7 +142,7 @@ set_target_properties(java_workloads PROPERTIES
target_link_libraries(java_workloads PUBLIC fdb_c ${JNI_LIBRARIES})
target_include_directories(java_workloads PUBLIC ${JNI_INCLUDE_DIRS})
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8")
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8" "-XDignore.symbol.file")
set(CMAKE_JNI_TARGET TRUE)
# build a manifest file
@ -169,8 +172,6 @@ file(WRITE ${MANIFEST_FILE} ${MANIFEST_TEXT})
add_jar(fdb-java ${JAVA_BINDING_SRCS} ${GENERATED_JAVA_FILES} ${CMAKE_SOURCE_DIR}/LICENSE
OUTPUT_DIR ${PROJECT_BINARY_DIR}/lib VERSION ${CMAKE_PROJECT_VERSION} MANIFEST ${MANIFEST_FILE})
add_dependencies(fdb-java fdb_java_options fdb_java)
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
add_dependencies(foundationdb-tests fdb_java_options)
# TODO[mpilman]: The java RPM will require some more effort (mostly on debian). However,
# most people will use the fat-jar, so it is not clear how high this priority is.
@ -237,6 +238,16 @@ if(NOT OPEN_FOR_IDE)
WORKING_DIRECTORY ${unpack_dir}
DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
COMMENT "Build ${target_jar}")
add_jar(foundationdb-tests SOURCES ${JAVA_TESTS_SRCS} INCLUDE_JARS fdb-java)
add_dependencies(foundationdb-tests fdb_java_options)
set(tests_jar ${jar_destination}/fdb-java-${CMAKE_PROJECT_VERSION}${prerelease_string}-tests.jar)
add_custom_command(OUTPUT ${tests_jar}
COMMAND ${CMAKE_COMMAND} -E copy foundationdb-tests.jar "${tests_jar}"
WORKING_DIRECTORY .
DEPENDS foundationdb-tests
COMMENT "Build ${tests_jar}")
add_custom_target(fdb-java-tests ALL DEPENDS ${tests_jar})
add_dependencies(fdb-java-tests foundationdb-tests)
add_custom_target(fat-jar ALL DEPENDS ${target_jar})
add_dependencies(fat-jar fdb-java)
add_dependencies(fat-jar copy_lib)

View File

@ -19,7 +19,7 @@
*/
#include <foundationdb/ClientWorkload.h>
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#include <foundationdb/fdb_c.h>
#include <jni.h>
@ -371,9 +371,11 @@ struct JVM {
{ { "send", "(JZ)V", reinterpret_cast<void*>(&promiseSend) } });
auto fdbClass = getClass("com/apple/foundationdb/FDB");
jmethodID selectMethod =
env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(IZ)Lcom/apple/foundationdb/FDB;");
env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(I)Lcom/apple/foundationdb/FDB;");
checkException();
env->CallStaticObjectMethod(fdbClass, selectMethod, jint(620), jboolean(false));
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(630));
checkException();
env->CallObjectMethod(fdbInstance, getMethod(fdbClass, "disableShutdownHook", "()V"));
checkException();
}

View File

@ -31,7 +31,7 @@ make packages
#### Multi-Platform Jar-File
If you want to create a jar file that can run on more than one supported
architecture (the offical one supports MacOS, Linux, and Windows), you can do
architecture (the official one supports MacOS, Linux, and Windows), you can do
that by executing the following steps:
1. Create a directory called `lib` somewhere on your file system.

View File

@ -21,7 +21,7 @@
#include <jni.h>
#include <string.h>
#define FDB_API_VERSION 620
#define FDB_API_VERSION 630
#include <foundationdb/fdb_c.h>
@ -36,6 +36,11 @@ static JavaVM* g_jvm = nullptr;
static thread_local JNIEnv* g_thread_jenv = nullptr; // Defined for the network thread once it is running, and for any thread that has called registerCallback
static thread_local jmethodID g_IFutureCallback_call_methodID = JNI_NULL;
static thread_local bool is_external = false;
static jclass range_result_summary_class;
static jclass range_result_class;
static jclass string_class;
static jmethodID range_result_init;
static jmethodID range_result_summary_init;
void detachIfExternalThread(void *ignore) {
if(is_external && g_thread_jenv != nullptr) {
@ -275,10 +280,9 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureStrings_FutureString
return JNI_NULL;
}
jclass str_clazz = jenv->FindClass("java/lang/String");
if( jenv->ExceptionOccurred() )
return JNI_NULL;
jobjectArray arr = jenv->NewObjectArray(count, str_clazz, JNI_NULL);
jobjectArray arr = jenv->NewObjectArray(count, string_class, JNI_NULL);
if( !arr ) {
if( !jenv->ExceptionOccurred() )
throwOutOfMem(jenv);
@ -301,58 +305,12 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureStrings_FutureString
return arr;
}
JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureResults_FutureResults_1getSummary(JNIEnv *jenv, jobject, jlong future) {
if( !future ) {
throwParamNotNull(jenv);
return JNI_NULL;
}
jclass resultCls = jenv->FindClass("com/apple/foundationdb/RangeResultSummary");
if( jenv->ExceptionOccurred() )
return JNI_NULL;
jmethodID resultCtorId = jenv->GetMethodID(resultCls, "<init>", "([BIZ)V");
if( jenv->ExceptionOccurred() )
return JNI_NULL;
FDBFuture *f = (FDBFuture *)future;
const FDBKeyValue *kvs;
int count;
fdb_bool_t more;
fdb_error_t err = fdb_future_get_keyvalue_array( f, &kvs, &count, &more );
if( err ) {
safeThrow( jenv, getThrowable( jenv, err ) );
return JNI_NULL;
}
jbyteArray lastKey = JNI_NULL;
if(count) {
lastKey = jenv->NewByteArray(kvs[count - 1].key_length);
if( !lastKey ) {
if( !jenv->ExceptionOccurred() )
throwOutOfMem(jenv);
return JNI_NULL;
}
jenv->SetByteArrayRegion(lastKey, 0, kvs[count - 1].key_length, (jbyte *)kvs[count - 1].key);
}
jobject result = jenv->NewObject(resultCls, resultCtorId, lastKey, count, (jboolean)more);
if( jenv->ExceptionOccurred() )
return JNI_NULL;
return result;
}
// SOMEDAY: explore doing this more efficiently with Direct ByteBuffers
JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureResults_FutureResults_1get(JNIEnv *jenv, jobject, jlong future) {
if( !future ) {
throwParamNotNull(jenv);
return JNI_NULL;
}
jclass resultCls = jenv->FindClass("com/apple/foundationdb/RangeResult");
jmethodID resultCtorId = jenv->GetMethodID(resultCls, "<init>", "([B[IZ)V");
FDBFuture *f = (FDBFuture *)future;
@ -414,7 +372,7 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureResults_FutureResult
jenv->ReleaseByteArrayElements(keyValueArray, (jbyte *)keyvalues_barr, 0);
jenv->ReleaseIntArrayElements(lengthArray, length_barr, 0);
jobject result = jenv->NewObject(resultCls, resultCtorId, keyValueArray, lengthArray, (jboolean)more);
jobject result = jenv->NewObject(range_result_class, range_result_init, keyValueArray, lengthArray, (jboolean)more);
if( jenv->ExceptionOccurred() )
return JNI_NULL;
@ -646,6 +604,97 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
return (jlong)f;
}
JNIEXPORT void JNICALL Java_com_apple_foundationdb_FutureResults_FutureResults_1getDirect(
JNIEnv* jenv, jobject, jlong future, jobject jbuffer, jint bufferCapacity) {
if( !future ) {
throwParamNotNull(jenv);
return;
}
uint8_t* buffer = (uint8_t*)jenv->GetDirectBufferAddress(jbuffer);
if (!buffer) {
if (!jenv->ExceptionOccurred())
throwRuntimeEx(jenv, "Error getting handle to native resources");
return;
}
FDBFuture* f = (FDBFuture*)future;
const FDBKeyValue *kvs;
int count;
fdb_bool_t more;
fdb_error_t err = fdb_future_get_keyvalue_array( f, &kvs, &count, &more );
if( err ) {
safeThrow( jenv, getThrowable( jenv, err ) );
return;
}
// Capacity for Metadata+Keys+Values
// => sizeof(jint) for total key/value pairs
// => sizeof(jint) to store more flag
// => sizeof(jint) to store key length per KV pair
// => sizeof(jint) to store value length per KV pair
int totalCapacityNeeded = 2 * sizeof(jint);
for(int i = 0; i < count; i++) {
totalCapacityNeeded += kvs[i].key_length + kvs[i].value_length + 2*sizeof(jint);
if (bufferCapacity < totalCapacityNeeded) {
count = i; /* Only fit first `i` K/V pairs */
more = true;
break;
}
}
int offset = 0;
// First copy RangeResultSummary, i.e. [keyCount, more]
memcpy(buffer + offset, &count, sizeof(jint));
offset += sizeof(jint);
memcpy(buffer + offset, &more, sizeof(jint));
offset += sizeof(jint);
for (int i = 0; i < count; i++) {
memcpy(buffer + offset, &kvs[i].key_length, sizeof(jint));
memcpy(buffer + offset + sizeof(jint), &kvs[i].value_length, sizeof(jint));
offset += 2 * sizeof(jint);
memcpy(buffer + offset, kvs[i].key, kvs[i].key_length);
offset += kvs[i].key_length;
memcpy(buffer + offset, kvs[i].value, kvs[i].value_length);
offset += kvs[i].value_length;
}
}
JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1getEstimatedRangeSizeBytes(JNIEnv *jenv, jobject, jlong tPtr,
jbyteArray beginKeyBytes, jbyteArray endKeyBytes) {
if( !tPtr || !beginKeyBytes || !endKeyBytes) {
throwParamNotNull(jenv);
return 0;
}
FDBTransaction *tr = (FDBTransaction *)tPtr;
uint8_t *startKey = (uint8_t *)jenv->GetByteArrayElements( beginKeyBytes, JNI_NULL );
if(!startKey) {
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return 0;
}
uint8_t *endKey = (uint8_t *)jenv->GetByteArrayElements(endKeyBytes, JNI_NULL);
if (!endKey) {
jenv->ReleaseByteArrayElements( beginKeyBytes, (jbyte *)startKey, JNI_ABORT );
if( !jenv->ExceptionOccurred() )
throwRuntimeEx( jenv, "Error getting handle to native resources" );
return 0;
}
FDBFuture *f = fdb_transaction_get_estimated_range_size_bytes( tr, startKey, jenv->GetArrayLength( beginKeyBytes ), endKey, jenv->GetArrayLength( endKeyBytes ) );
jenv->ReleaseByteArrayElements( beginKeyBytes, (jbyte *)startKey, JNI_ABORT );
jenv->ReleaseByteArrayElements( endKeyBytes, (jbyte *)endKey, JNI_ABORT );
return (jlong)f;
}
JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1set(JNIEnv *jenv, jobject, jlong tPtr, jbyteArray keyBytes, jbyteArray valueBytes) {
if( !tPtr || !keyBytes || !valueBytes ) {
throwParamNotNull(jenv);
@ -1013,8 +1062,43 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FDB_Network_1stop(JNIEnv *jen
}
jint JNI_OnLoad(JavaVM *vm, void *reserved) {
JNIEnv *env;
g_jvm = vm;
return JNI_VERSION_1_1;
if (vm->GetEnv((void**)&env, JNI_VERSION_1_6) != JNI_OK) {
return JNI_ERR;
} else {
jclass local_range_result_class = env->FindClass("com/apple/foundationdb/RangeResult");
range_result_init = env->GetMethodID(local_range_result_class, "<init>", "([B[IZ)V");
range_result_class = (jclass) (env)->NewGlobalRef(local_range_result_class);
jclass local_range_result_summary_class = env->FindClass("com/apple/foundationdb/RangeResultSummary");
range_result_summary_init = env->GetMethodID(local_range_result_summary_class, "<init>", "([BIZ)V");
range_result_summary_class = (jclass) (env)->NewGlobalRef(local_range_result_summary_class);
jclass local_string_class = env->FindClass("java/lang/String");
string_class = (jclass) (env)->NewGlobalRef(local_string_class);
return JNI_VERSION_1_6;
}
}
// Is automatically called once the Classloader is destroyed
void JNI_OnUnload(JavaVM *vm, void *reserved) {
JNIEnv* env;
if (vm->GetEnv((void**)&env, JNI_VERSION_1_6) != JNI_OK) {
return;
} else {
// delete global references so the GC can collect them
if (range_result_summary_class != NULL) {
env->DeleteGlobalRef(range_result_summary_class);
}
if (range_result_class != NULL) {
env->DeleteGlobalRef(range_result_class);
}
if (string_class != NULL) {
env->DeleteGlobalRef(string_class);
}
}
}
#ifdef __cplusplus

View File

@ -1,104 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="14.1" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(SolutionDir)versions.target" />
<PropertyGroup Condition="'$(Release)' != 'true' ">
<PreReleaseDecoration>-PRERELEASE</PreReleaseDecoration>
</PropertyGroup>
<PropertyGroup Condition="'$(Release)' == 'true' ">
<PreprocessorDefinitions>FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreReleaseDecoration>
</PreReleaseDecoration>
</PropertyGroup>
<ItemGroup Label="ProjectConfigurations">
<ProjectConfiguration Include="Debug|x64">
<Configuration>Debug</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
<ProjectConfiguration Include="Release|x64">
<Configuration>Release</Configuration>
<Platform>x64</Platform>
</ProjectConfiguration>
</ItemGroup>
<PropertyGroup Label="Globals">
<ProjectGuid>{9617584C-22E8-4272-934F-733F378BF6AE}</ProjectGuid>
<RootNamespace>java</RootNamespace>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>true</UseDebugLibraries>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
<ConfigurationType>DynamicLibrary</ConfigurationType>
<UseDebugLibraries>false</UseDebugLibraries>
<WholeProgramOptimization>true</WholeProgramOptimization>
<CharacterSet>MultiByte</CharacterSet>
<PlatformToolset>v141</PlatformToolset>
</PropertyGroup>
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
<ImportGroup Label="ExtensionSettings">
</ImportGroup>
<ImportGroup Label="PropertySheets">
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<IncludePath>..\..\;C:\Program Files\Java\jdk6\include\win32;C:\Program Files\Java\jdk6\include;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
</PropertyGroup>
<ItemDefinitionGroup>
<Link>
<GenerateDebugInformation>true</GenerateDebugInformation>
<AdditionalDependencies>$(SolutionDir)bin\$(Configuration)\fdb_c.lib;%(AdditionalDependencies)</AdditionalDependencies>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<SubSystem>Windows</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
<LanguageStandard>stdcpp17</LanguageStandard>
</ClCompile>
<Link>
<EnableCOMDATFolding>true</EnableCOMDATFolding>
<OptimizeReferences>true</OptimizeReferences>
<SubSystem>Windows</SubSystem>
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClCompile Include="fdbJNI.cpp" />
</ItemGroup>
<ImportGroup Label="ExtensionTargets" />
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<Target Name="AfterClean">
<ItemGroup>
<FilesToDelete Include="$(OutDir)fdb_java.dll-*">
<Visible>false</Visible>
</FilesToDelete>
</ItemGroup>
<Message Text="Cleaning old dll files" Importance="high" />
<Delete Files="@(FilesToDelete)" />
</Target>
</Project>

View File

@ -1,222 +0,0 @@
#
# local.mk
#
# This source file is part of the FoundationDB open source project
#
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- mode: makefile; -*-
fdb_java_LDFLAGS := -Llib
fdb_java_CFLAGS := $(fdbclient_CFLAGS) -Ibindings/c
# We only override if the environment didn't set it (this is used by
# the fdbwebsite documentation build process)
JAVADOC_DIR ?= bindings/java
fdb_java_LIBS := lib/libfdb_c.$(DLEXT)
ifeq ($(RELEASE),true)
JARVER = $(VERSION)
APPLEJARVER = $(VERSION)
else
JARVER = $(VERSION)-PRERELEASE
APPLEJARVER = $(VERSION)-SNAPSHOT
endif
ifeq ($(PLATFORM),linux)
JAVA_HOME ?= /usr/lib/jvm/java-8-openjdk-amd64
fdb_java_CFLAGS += -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/linux
fdb_java_LDFLAGS += -static-libgcc
java_ARCH := amd64
else ifeq ($(PLATFORM),osx)
JAVA_HOME ?= $(shell /usr/libexec/java_home)
fdb_java_CFLAGS += -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/darwin
java_ARCH := x86_64
endif
JAVA_GENERATED_SOURCES := bindings/java/src/main/com/apple/foundationdb/NetworkOptions.java bindings/java/src/main/com/apple/foundationdb/DatabaseOptions.java bindings/java/src/main/com/apple/foundationdb/TransactionOptions.java bindings/java/src/main/com/apple/foundationdb/StreamingMode.java bindings/java/src/main/com/apple/foundationdb/ConflictRangeType.java bindings/java/src/main/com/apple/foundationdb/MutationType.java bindings/java/src/main/com/apple/foundationdb/FDBException.java
JAVA_SOURCES := $(JAVA_GENERATED_SOURCES) bindings/java/src/main/com/apple/foundationdb/*.java bindings/java/src/main/com/apple/foundationdb/async/*.java bindings/java/src/main/com/apple/foundationdb/tuple/*.java bindings/java/src/main/com/apple/foundationdb/directory/*.java bindings/java/src/main/com/apple/foundationdb/subspace/*.java bindings/java/src/test/com/apple/foundationdb/test/*.java
fdb_java: bindings/java/foundationdb-client.jar bindings/java/foundationdb-tests.jar
bindings/java/foundationdb-tests.jar: bindings/java/.classstamp
@echo "Building $@"
@jar cf $@ -C bindings/java/classes/test com/apple/foundationdb
bindings/java/foundationdb-client.jar: bindings/java/.classstamp lib/libfdb_java.$(DLEXT)
@echo "Building $@"
@rm -rf bindings/java/classes/main/lib/$(PLATFORM)/$(java_ARCH)
@mkdir -p bindings/java/classes/main/lib/$(PLATFORM)/$(java_ARCH)
@cp lib/libfdb_java.$(DLEXT) bindings/java/classes/main/lib/$(PLATFORM)/$(java_ARCH)/libfdb_java.$(java_DLEXT)
@jar cf $@ -C bindings/java/classes/main com/apple/foundationdb -C bindings/java/classes/main lib
fdb_java_jar_clean:
@rm -rf $(JAVA_GENERATED_SOURCES)
@rm -rf bindings/java/classes
@rm -f bindings/java/foundationdb-client.jar bindings/java/foundationdb-tests.jar bindings/java/.classstamp
# Redefinition of a target already defined in generated.mk, but it's "okay" and the way things were done before.
fdb_java_clean: fdb_java_jar_clean
bindings/java/src/main/com/apple/foundationdb/StreamingMode.java: bin/vexillographer.exe fdbclient/vexillographer/fdb.options
@echo "Building Java options"
@$(MONO) bin/vexillographer.exe fdbclient/vexillographer/fdb.options java $(@D)
bindings/java/src/main/com/apple/foundationdb/MutationType.java: bindings/java/src/main/com/apple/foundationdb/StreamingMode.java
@true
bindings/java/src/main/com/apple/foundationdb/ConflictRangeType.java: bindings/java/src/main/com/apple/foundationdb/StreamingMode.java
@true
bindings/java/src/main/com/apple/foundationdb/FDBException.java: bindings/java/src/main/com/apple/foundationdb/StreamingMode.java
@true
bindings/java/src/main/com/apple/foundationdb/%Options.java: bindings/java/src/main/com/apple/foundationdb/StreamingMode.java
@true
bindings/java/src/main/overview.html: bindings/java/src/main/overview.html.in $(ALL_MAKEFILES) versions.target
@m4 -DVERSION=$(VERSION) $< > $@
bindings/java/.classstamp: $(JAVA_SOURCES)
@echo "Compiling Java source"
@rm -rf bindings/java/classes
@mkdir -p bindings/java/classes/main
@mkdir -p bindings/java/classes/test
@$(JAVAC) $(JAVAFLAGS) -d bindings/java/classes/main bindings/java/src/main/com/apple/foundationdb/*.java bindings/java/src/main/com/apple/foundationdb/async/*.java bindings/java/src/main/com/apple/foundationdb/tuple/*.java bindings/java/src/main/com/apple/foundationdb/directory/*.java bindings/java/src/main/com/apple/foundationdb/subspace/*.java
@$(JAVAC) $(JAVAFLAGS) -cp bindings/java/classes/main -d bindings/java/classes/test bindings/java/src/test/com/apple/foundationdb/test/*.java
@echo timestamp > bindings/java/.classstamp
javadoc: $(JAVA_SOURCES) bindings/java/src/main/overview.html
@echo "Generating Javadocs"
@mkdir -p $(JAVADOC_DIR)/javadoc/
@javadoc -quiet -public -notimestamp -source 1.8 -sourcepath bindings/java/src/main \
-overview bindings/java/src/main/overview.html -d $(JAVADOC_DIR)/javadoc/ \
-windowtitle "FoundationDB Java Client API" \
-doctitle "FoundationDB Java Client API" \
-link "http://docs.oracle.com/javase/8/docs/api" \
com.apple.foundationdb com.apple.foundationdb.async com.apple.foundationdb.tuple com.apple.foundationdb.directory com.apple.foundationdb.subspace
javadoc_clean:
@rm -rf $(JAVADOC_DIR)/javadoc
@rm -f bindings/java/src/main/overview.html
ifeq ($(PLATFORM),linux)
# We only need javadoc from one source
TARGETS += javadoc
CLEAN_TARGETS += javadoc_clean
# _release builds the lib on macOS and the jars (including the macOS lib) on Linux
TARGETS += fdb_java_release
CLEAN_TARGETS += fdb_java_release_clean
ifneq ($(FATJAR),)
packages/fdb-java-$(JARVER).jar: $(MAC_OBJ_JAVA) $(WINDOWS_OBJ_JAVA)
endif
bindings/java/pom.xml: bindings/java/pom.xml.in $(ALL_MAKEFILES) versions.target
@echo "Generating $@"
@m4 -DVERSION=$(JARVER) -DNAME=fdb-java $< > $@
bindings/java/fdb-java-$(APPLEJARVER).pom: bindings/java/pom.xml
@echo "Copying $@"
sed -e 's/-PRERELEASE/-SNAPSHOT/g' bindings/java/pom.xml > "$@"
packages/fdb-java-$(JARVER).jar: fdb_java versions.target
@echo "Building $@"
@rm -f $@
@rm -rf packages/jar_regular
@mkdir -p packages/jar_regular
@cd packages/jar_regular && unzip -qq $(TOPDIR)/bindings/java/foundationdb-client.jar
ifneq ($(FATJAR),)
@mkdir -p packages/jar_regular/lib/windows/amd64
@mkdir -p packages/jar_regular/lib/osx/x86_64
@cp $(MAC_OBJ_JAVA) packages/jar_regular/lib/osx/x86_64/libfdb_java.jnilib
@cp $(WINDOWS_OBJ_JAVA) packages/jar_regular/lib/windows/amd64/fdb_java.dll
endif
@cd packages/jar_regular && jar cf $(TOPDIR)/$@ *
@rm -r packages/jar_regular
@cd bindings && jar uf $(TOPDIR)/$@ ../LICENSE
packages/fdb-java-$(JARVER)-tests.jar: fdb_java versions.target
@echo "Building $@"
@rm -f $@
@cp $(TOPDIR)/bindings/java/foundationdb-tests.jar packages/fdb-java-$(JARVER)-tests.jar
packages/fdb-java-$(JARVER)-sources.jar: $(JAVA_GENERATED_SOURCES) versions.target
@echo "Building $@"
@rm -f $@
@jar cf $(TOPDIR)/$@ -C bindings/java/src/main com/apple/foundationdb
packages/fdb-java-$(JARVER)-javadoc.jar: javadoc versions.target
@echo "Building $@"
@rm -f $@
@cd $(JAVADOC_DIR)/javadoc/ && jar cf $(TOPDIR)/$@ *
@cd bindings && jar uf $(TOPDIR)/$@ ../LICENSE
packages/fdb-java-$(JARVER)-bundle.jar: packages/fdb-java-$(JARVER).jar packages/fdb-java-$(JARVER)-javadoc.jar packages/fdb-java-$(JARVER)-sources.jar bindings/java/pom.xml bindings/java/fdb-java-$(APPLEJARVER).pom versions.target
@echo "Building $@"
@rm -f $@
@rm -rf packages/bundle_regular
@mkdir -p packages/bundle_regular
@cp packages/fdb-java-$(JARVER).jar packages/fdb-java-$(JARVER)-javadoc.jar packages/fdb-java-$(JARVER)-sources.jar bindings/java/fdb-java-$(APPLEJARVER).pom packages/bundle_regular
@cp bindings/java/pom.xml packages/bundle_regular/pom.xml
@cd packages/bundle_regular && jar cf $(TOPDIR)/$@ *
@rm -rf packages/bundle_regular
fdb_java_release: packages/fdb-java-$(JARVER)-bundle.jar packages/fdb-java-$(JARVER)-tests.jar
fdb_java_release_clean:
@echo "Cleaning Java release"
@rm -f packages/fdb-java-*.jar packages/fdb-java-*-sources.jar bindings/java/pom.xml bindings/java/fdb-java-$(APPLEJARVER).pom
# Linux is where we build all the java packages
packages: fdb_java_release
packages_clean: fdb_java_release_clean
ifneq ($(FATJAR),)
MAC_OBJ_JAVA := lib/libfdb_java.jnilib-$(VERSION_ID)
WINDOWS_OBJ_JAVA := lib/fdb_java.dll-$(VERSION_ID)
endif
else ifeq ($(PLATFORM),osx)
TARGETS += fdb_java_release
CLEAN_TARGETS += fdb_java_release_clean
fdb_java_release: lib/libfdb_java.$(DLEXT)
@mkdir -p lib
@rm -f lib/libfdb_java.$(java_DLEXT)-*
@cp lib/libfdb_java.$(DLEXT) lib/libfdb_java.$(java_DLEXT)-$(VERSION_ID)
@cp lib/libfdb_java.$(DLEXT)-debug lib/libfdb_java.$(java_DLEXT)-debug-$(VERSION_ID)
fdb_java_release_clean:
@rm -f lib/libfdb_java.$(DLEXT)-*
@rm -f lib/libfdb_java.$(java_DLEXT)-*
# macOS needs to put its java lib in packages
packages: fdb_java_lib_package
fdb_java_lib_package: fdb_java_release
mkdir -p packages
cp lib/libfdb_java.$(java_DLEXT)-$(VERSION_ID) packages
cp lib/libfdb_java.$(java_DLEXT)-debug-$(VERSION_ID) packages
endif

View File

@ -304,4 +304,58 @@ public class ArrayUtilTests {
fail("Not yet implemented");
}
private static final int SAMPLE_COUNT = 1000000;
private static final int SAMPLE_MAX_SIZE = 2048;
private List<byte[]> unsafe;
private List<byte[]> java;
@Before
public void init() {
unsafe = new ArrayList(SAMPLE_COUNT);
java = new ArrayList(SAMPLE_COUNT);
Random random = new Random();
for (int i = 0; i <= SAMPLE_COUNT; i++) {
byte[] addition = new byte[random.nextInt(SAMPLE_MAX_SIZE)];
random.nextBytes(addition);
unsafe.add(addition);
java.add(addition);
}
}
@Test
public void testComparatorSort() {
Collections.sort(unsafe, FastByteComparisons.lexicographicalComparerUnsafeImpl());
Collections.sort(java, FastByteComparisons.lexicographicalComparerJavaImpl());
Assert.assertTrue(unsafe.equals(java));
}
@Test
public void testUnsafeComparison() {
for (int i =0; i< SAMPLE_COUNT; i++) {
Assert.assertEquals(FastByteComparisons.lexicographicalComparerUnsafeImpl().compare(unsafe.get(i), java.get(i)), 0);
}
}
@Test
public void testJavaComparison() {
for (int i =0; i< SAMPLE_COUNT; i++) {
Assert.assertEquals(FastByteComparisons.lexicographicalComparerJavaImpl().compare(unsafe.get(i), java.get(i)), 0);
}
}
@Test
public void testUnsafeComparisonWithOffet() {
for (int i =0; i< SAMPLE_COUNT; i++) {
if (unsafe.get(i).length > 5)
Assert.assertEquals(FastByteComparisons.lexicographicalComparerUnsafeImpl().compareTo(unsafe.get(i), 4, unsafe.get(i).length - 4, java.get(i), 4, java.get(i).length - 4), 0);
}
}
@Test
public void testJavaComparisonWithOffset() {
for (int i =0; i< SAMPLE_COUNT; i++) {
if (unsafe.get(i).length > 5)
Assert.assertEquals(FastByteComparisons.lexicographicalComparerJavaImpl().compareTo(unsafe.get(i), 4, unsafe.get(i).length - 4, java.get(i), 4, java.get(i).length - 4), 0);
}
}
}

View File

@ -0,0 +1,108 @@
/*
* DirectBufferIterator.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2015-2020 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb;
import java.io.Closeable;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import java.util.Iterator;
import java.util.NoSuchElementException;
/**
* Holds the direct buffer that is shared with JNI wrapper. A typical usage is as follows:
*
* The serialization format of result is =>
* [int keyCount, boolean more, ListOf<(int keyLen, int valueLen, byte[] key, byte[] value)>]
*/
class DirectBufferIterator implements Iterator<KeyValue>, AutoCloseable {
private ByteBuffer byteBuffer;
private int current = 0;
private int keyCount = -1;
private boolean more = false;
public DirectBufferIterator(ByteBuffer buffer) {
byteBuffer = buffer;
byteBuffer.order(ByteOrder.nativeOrder());
}
@Override
public void close() {
if (byteBuffer != null) {
DirectBufferPool.getInstance().add(byteBuffer);
byteBuffer = null;
}
}
public boolean hasResultReady() {
return keyCount > -1;
}
@Override
public boolean hasNext() {
assert (hasResultReady());
return current < keyCount;
}
@Override
public KeyValue next() {
assert (hasResultReady()); // Must be called once its ready.
if (!hasNext()) {
throw new NoSuchElementException();
}
final int keyLen = byteBuffer.getInt();
final int valueLen = byteBuffer.getInt();
byte[] key = new byte[keyLen];
byteBuffer.get(key);
byte[] value = new byte[valueLen];
byteBuffer.get(value);
current += 1;
return new KeyValue(key, value);
}
public ByteBuffer getBuffer() {
return byteBuffer;
}
public int count() {
assert (hasResultReady());
return keyCount;
}
public boolean hasMore() {
assert (hasResultReady());
return more;
}
public int currentIndex() {
return current;
}
public void readResultsSummary() {
byteBuffer.rewind();
byteBuffer.position(0);
keyCount = byteBuffer.getInt();
more = byteBuffer.getInt() > 0;
}
}

View File

@ -0,0 +1,89 @@
/*
* DirectBufferPool.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2015-2020 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb;
import java.nio.ByteBuffer;
import java.util.concurrent.ArrayBlockingQueue;
/**
* A singleton that manages a pool of {@link DirectByteBuffer}, that will be
* shared by the {@link DirectBufferIterator} instances. It is responsibilty of
* user to return the borrowed buffers.
*/
class DirectBufferPool {
static final DirectBufferPool __instance = new DirectBufferPool();
// When tuning this, make sure that the size of the buffer,
// is always greater than the maximum size KV allowed by FDB.
// Current limits is :
// 10kB for key + 100kB for value + 1 int for count + 1 int for more + 2 int for KV size
static public final int MIN_BUFFER_SIZE = (10 + 100) * 1000 + Integer.BYTES * 4;
static private final int DEFAULT_NUM_BUFFERS = 128;
static private final int DEFAULT_BUFFER_SIZE = 1024 * 512;
private ArrayBlockingQueue<ByteBuffer> buffers;
private int currentBufferCapacity;
public DirectBufferPool() {
resize(DEFAULT_NUM_BUFFERS, DEFAULT_BUFFER_SIZE);
}
public static DirectBufferPool getInstance() {
return __instance;
}
/**
* Resizes buffer pool with given capacity and buffer size. Throws OutOfMemory exception
* if unable to allocate as asked.
*/
public synchronized void resize(int newPoolSize, int bufferSize) {
if (bufferSize < MIN_BUFFER_SIZE) {
throw new IllegalArgumentException("'bufferSize' must be at-least: " + MIN_BUFFER_SIZE + " bytes");
}
buffers = new ArrayBlockingQueue<>(newPoolSize);
currentBufferCapacity = bufferSize;
while (buffers.size() < newPoolSize) {
ByteBuffer buffer = ByteBuffer.allocateDirect(bufferSize);
buffers.add(buffer);
}
}
/**
* Requests a {@link DirectByteBuffer} from our pool. Returns null if pool is empty.
*/
public synchronized ByteBuffer poll() {
return buffers.poll();
}
/**
* Returns the {@link DirectByteBuffer} that was borrowed from our pool.
*/
public synchronized void add(ByteBuffer buffer) {
if (buffer.capacity() != currentBufferCapacity) {
// This can happen when a resize is called while there are outstanding requests,
// older buffers will be returned eventually.
return;
}
buffers.offer(buffer);
}
}

View File

@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicInteger;
* This call is required before using any other part of the API. The call allows
* an error to be thrown at this point to prevent client code from accessing a later library
* with incorrect assumptions from the current version. The API version documented here is version
* {@code 620}.<br><br>
* {@code 630}.<br><br>
* FoundationDB encapsulates multiple versions of its interface by requiring
* the client to explicitly specify the version of the API it uses. The purpose
* of this design is to allow you to upgrade the server, client libraries, or
@ -85,6 +85,10 @@ public class FDB {
private volatile boolean netStarted = false;
private volatile boolean netStopped = false;
volatile boolean warnOnUnclosed = true;
private boolean enableDirectBufferQueries = false;
private boolean useShutdownHook = true;
private Thread shutdownHook;
private final Semaphore netRunning = new Semaphore(1);
private final NetworkOptions options;
@ -104,15 +108,8 @@ public class FDB {
* Called only once to create the FDB singleton.
*/
private FDB(int apiVersion) {
this(apiVersion, true);
}
private FDB(int apiVersion, boolean controlRuntime) {
this.apiVersion = apiVersion;
options = new NetworkOptions(this::Network_setOption);
if (controlRuntime) {
Runtime.getRuntime().addShutdownHook(new Thread(this::stopNetwork));
}
}
/**
@ -167,9 +164,9 @@ public class FDB {
* object.<br><br>
*
* Warning: When using the multi-version client API, setting an API version that
* is not supported by a particular client library will prevent that client from
* is not supported by a particular client library will prevent that client from
* being used to connect to the cluster. In particular, you should not advance
* the API version of your application after upgrading your client until the
* the API version of your application after upgrading your client until the
* cluster has also been upgraded.
*
* @param version the API version required
@ -177,13 +174,6 @@ public class FDB {
* @return the FoundationDB API object
*/
public static FDB selectAPIVersion(final int version) throws FDBException {
return selectAPIVersion(version, true);
}
/**
This function is called from C++ if the VM is controlled directly from FDB
*/
private static synchronized FDB selectAPIVersion(final int version, boolean controlRuntime) throws FDBException {
if(singleton != null) {
if(version != singleton.getAPIVersion()) {
throw new IllegalArgumentException(
@ -193,13 +183,30 @@ public class FDB {
}
if(version < 510)
throw new IllegalArgumentException("API version not supported (minimum 510)");
if(version > 620)
throw new IllegalArgumentException("API version not supported (maximum 620)");
if(version > 630)
throw new IllegalArgumentException("API version not supported (maximum 630)");
Select_API_version(version);
FDB fdb = new FDB(version, controlRuntime);
singleton = new FDB(version);
return singleton = fdb;
return singleton;
}
/**
* Disables shutdown hook that stops network thread upon process shutdown. This is useful if you need to run
* your own shutdown hook that uses the FDB instance and you need to avoid race conditions
* with the default shutdown hook. Replacement shutdown hook should stop the network thread manually
* by calling {@link #stopNetwork}.
*/
public synchronized void disableShutdownHook() {
useShutdownHook = false;
if(shutdownHook != null) {
// If this method was called after network thread started and shutdown hook was installed,
// remove this hook
Runtime.getRuntime().removeShutdownHook(shutdownHook);
// Release thread reference for GC
shutdownHook = null;
}
}
/**
@ -224,6 +231,35 @@ public class FDB {
return apiVersion;
}
/**
* Enables or disables use of DirectByteBuffers for getRange() queries.
*
* @param enabled Whether DirectByteBuffer should be used for getRange() queries.
*/
public void enableDirectBufferQuery(boolean enabled) {
enableDirectBufferQueries = enabled;
}
/**
* Determines whether {@code getRange()} queries can use {@code DirectByteBuffer} from
* {@link DirectBufferPool} to copy results.
*
* @return {@code true} if direct buffer queries have been enabled and {@code false} otherwise
*/
public boolean isDirectBufferQueriesEnabled() {
return enableDirectBufferQueries;
}
/**
* Resizes the DirectBufferPool with given parameters, which is used by getRange() requests.
*
* @param poolSize Number of buffers in pool
* @param bufferSize Size of each buffer in bytes
*/
public void resizeDirectBufferPool(int poolSize, int bufferSize) {
DirectBufferPool.getInstance().resize(poolSize, bufferSize);
}
/**
* Connects to the cluster specified by the
* <a href="/foundationdb/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>.
@ -405,6 +441,11 @@ public class FDB {
if(netStarted) {
return;
}
if(useShutdownHook) {
// Register shutdown hook that stops network thread if user did not opt out
shutdownHook = new Thread(this::stopNetwork, "fdb-shutdown-hook");
Runtime.getRuntime().addShutdownHook(shutdownHook);
}
Network_setup();
netStarted = true;

View File

@ -25,6 +25,7 @@ import java.util.concurrent.CompletionException;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executor;
import java.util.function.Function;
import java.nio.ByteBuffer;
import com.apple.foundationdb.async.AsyncIterable;
import com.apple.foundationdb.async.AsyncUtil;
@ -36,7 +37,6 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
private final TransactionOptions options;
private boolean transactionOwner;
public final ReadTransaction snapshot;
class ReadSnapshot implements ReadTransaction {
@ -70,6 +70,16 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
return getKey_internal(selector, true);
}
@Override
public CompletableFuture<Long> getEstimatedRangeSizeBytes(byte[] begin, byte[] end) {
return FDBTransaction.this.getEstimatedRangeSizeBytes(begin, end);
}
@Override
public CompletableFuture<Long> getEstimatedRangeSizeBytes(Range range) {
return FDBTransaction.this.getEstimatedRangeSizeBytes(range);
}
///////////////////
// getRange -> KeySelectors
///////////////////
@ -257,6 +267,21 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
}
}
@Override
public CompletableFuture<Long> getEstimatedRangeSizeBytes(byte[] begin, byte[] end) {
pointerReadLock.lock();
try {
return new FutureInt64(Transaction_getEstimatedRangeSizeBytes(getPtr(), begin, end), executor);
} finally {
pointerReadLock.unlock();
}
}
@Override
public CompletableFuture<Long> getEstimatedRangeSizeBytes(Range range) {
return this.getEstimatedRangeSizeBytes(range.begin, range.end);
}
///////////////////
// getRange -> KeySelectors
///////////////////
@ -344,10 +369,11 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
" -- range get: (%s, %s) limit: %d, bytes: %d, mode: %d, iteration: %d, snap: %s, reverse %s",
begin.toString(), end.toString(), rowLimit, targetBytes, streamingMode,
iteration, Boolean.toString(isSnapshot), Boolean.toString(reverse)));*/
return new FutureResults(Transaction_getRange(
getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
end.getKey(), end.orEqual(), end.getOffset(), rowLimit, targetBytes,
streamingMode, iteration, isSnapshot, reverse), executor);
return new FutureResults(
Transaction_getRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
end.getKey(), end.orEqual(), end.getOffset(), rowLimit, targetBytes,
streamingMode, iteration, isSnapshot, reverse),
FDB.instance().isDirectBufferQueriesEnabled(), executor);
} finally {
pointerReadLock.unlock();
}
@ -659,4 +685,5 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
private native long Transaction_watch(long ptr, byte[] key) throws FDBException;
private native void Transaction_cancel(long cPtr);
private native long Transaction_getKeyLocations(long cPtr, byte[] key);
private native long Transaction_getEstimatedRangeSizeBytes(long cPtr, byte[] keyBegin, byte[] keyEnd);
}

View File

@ -20,12 +20,14 @@
package com.apple.foundationdb;
import java.nio.ByteBuffer;
import java.util.concurrent.Executor;
class FutureResults extends NativeFuture<RangeResultInfo> {
FutureResults(long cPtr, Executor executor) {
FutureResults(long cPtr, boolean enableDirectBufferQueries, Executor executor) {
super(cPtr);
registerMarshalCallback(executor);
this.enableDirectBufferQueries = enableDirectBufferQueries;
}
@Override
@ -44,26 +46,28 @@ class FutureResults extends NativeFuture<RangeResultInfo> {
return new RangeResultInfo(this);
}
public RangeResultSummary getSummary() {
try {
pointerReadLock.lock();
return FutureResults_getSummary(getPtr());
}
finally {
pointerReadLock.unlock();
}
}
public RangeResult getResults() {
ByteBuffer buffer = enableDirectBufferQueries
? DirectBufferPool.getInstance().poll()
: null;
try {
pointerReadLock.lock();
return FutureResults_get(getPtr());
}
finally {
if (buffer != null) {
try (DirectBufferIterator directIterator = new DirectBufferIterator(buffer)) {
FutureResults_getDirect(getPtr(), directIterator.getBuffer(), directIterator.getBuffer().capacity());
return new RangeResult(directIterator);
}
} else {
return FutureResults_get(getPtr());
}
} finally {
pointerReadLock.unlock();
}
}
private native RangeResultSummary FutureResults_getSummary(long ptr) throws FDBException;
private boolean enableDirectBufferQueries = false;
private native RangeResult FutureResults_get(long cPtr) throws FDBException;
private native void FutureResults_getDirect(long cPtr, ByteBuffer buffer, int capacity)
throws FDBException;
}

View File

@ -152,8 +152,6 @@ class RangeQuery implements AsyncIterable<KeyValue>, Iterable<KeyValue> {
@Override
public void accept(RangeResultInfo data, Throwable error) {
try {
final RangeResultSummary summary;
if(error != null) {
promise.completeExceptionally(error);
if(error instanceof Error) {
@ -163,7 +161,8 @@ class RangeQuery implements AsyncIterable<KeyValue>, Iterable<KeyValue> {
return;
}
summary = data.getSummary();
final RangeResult rangeResult = data.get();
final RangeResultSummary summary = rangeResult.getSummary();
if(summary.lastKey == null) {
promise.complete(Boolean.FALSE);
return;
@ -186,11 +185,11 @@ class RangeQuery implements AsyncIterable<KeyValue>, Iterable<KeyValue> {
// If this is the first fetch or the main chunk is exhausted
if(chunk == null || index == chunk.values.size()) {
nextChunk = null;
chunk = data.get();
chunk = rangeResult;
index = 0;
}
else {
nextChunk = data.get();
nextChunk = rangeResult;
}
}

View File

@ -51,4 +51,22 @@ class RangeResult {
}
this.more = more;
}
RangeResult(DirectBufferIterator iterator) {
iterator.readResultsSummary();
more = iterator.hasMore();
int count = iterator.count();
values = new ArrayList<KeyValue>(count);
for (int i = 0; i < count; ++i) {
values.add(iterator.next());
}
}
public RangeResultSummary getSummary() {
final int keyCount = values.size();
final byte[] lastKey = keyCount > 0 ? values.get(keyCount -1).getKey() : null;
return new RangeResultSummary(lastKey, keyCount, more);
}
}

View File

@ -21,10 +21,6 @@
package com.apple.foundationdb;
class RangeResultInfo {
RangeResultSummary getSummary() {
return f.getSummary();
}
RangeResult get() {
return f.getResults();
}

View File

@ -424,6 +424,37 @@ public interface ReadTransaction extends ReadTransactionContext {
AsyncIterable<KeyValue> getRange(Range range,
int limit, boolean reverse, StreamingMode mode);
/**
* Gets an estimate for the number of bytes stored in the given range.
* Note: the estimated size is calculated based on the sampling done by FDB server. The sampling
* algorithm works roughly in this way: the larger the key-value pair is, the more likely it would
* be sampled and the more accurate its sampled size would be. And due to
* that reason it is recommended to use this API to query against large ranges for accuracy considerations.
* For a rough reference, if the returned size is larger than 3MB, one can consider the size to be
* accurate.
*
* @param begin the beginning of the range (inclusive)
* @param end the end of the range (exclusive)
*
* @return a handle to access the results of the asynchronous call
*/
CompletableFuture<Long> getEstimatedRangeSizeBytes(byte[] begin, byte[] end);
/**
* Gets an estimate for the number of bytes stored in the given range.
* Note: the estimated size is calculated based on the sampling done by FDB server. The sampling
* algorithm works roughly in this way: the larger the key-value pair is, the more likely it would
* be sampled and the more accurate its sampled size would be. And due to
* that reason it is recommended to use this API to query against large ranges for accuracy considerations.
* For a rough reference, if the returned size is larger than 3MB, one can consider the size to be
* accurate.
* @param range the range of the keys
*
* @return a handle to access the results of the asynchronous call
*/
CompletableFuture<Long> getEstimatedRangeSizeBytes(Range range);
/**
* Returns a set of options that can be set on a {@code Transaction}
*

View File

@ -34,7 +34,8 @@ import com.apple.foundationdb.Transaction;
* {@link #printable(byte[])} for debugging non-text keys and values.
*
*/
public class ByteArrayUtil {
public class ByteArrayUtil extends FastByteComparisons {
private static final byte[] EMPTY_BYTES = new byte[0];
/**
* Joins a set of byte arrays into a larger array. The {@code interlude} is placed
@ -45,36 +46,46 @@ public class ByteArrayUtil {
* concatenated elements.
* @param parts the pieces to be joined. May be {@code null}, but does not allow
* for elements in the list to be {@code null}.
*
*
* @return a newly created concatenation of the input
*/
public static byte[] join(byte[] interlude, List<byte[]> parts) {
return interludeJoin(interlude, parts.toArray(new byte[0][]));
}
/**
* Joins a set of byte arrays into a larger array. The {@code interlude} is placed
* between each of the elements, but not at the beginning or end. In the case that
* the list is empty or {@code null}, a zero-length byte array will be returned.
*
* @param interlude can be {@code null} or zero length. Placed internally between
* concatenated elements.
* @param parts the pieces to be joined. May be {@code null}, but does not allow
* for elements in the array to be {@code null}.
*
* @return a newly created concatenation of the input
*/
public static byte[] interludeJoin(byte[] interlude, byte[][] parts) {
if(parts == null)
return new byte[0];
int partCount = parts.size();
int partCount = parts.length;
if(partCount == 0)
return new byte[0];
return EMPTY_BYTES;
if(interlude == null)
interlude = new byte[0];
interlude = EMPTY_BYTES;
int elementTotals = 0;
int interludeSize = interlude.length;
for(byte[] e : parts) {
elementTotals += e.length;
for (int i = 0; i < partCount; i++) {
elementTotals += parts[i].length;
}
byte[] dest = new byte[(interludeSize * (partCount - 1)) + elementTotals];
//System.out.println(" interlude -> " + ArrayUtils.printable(interlude));
int startByte = 0;
int index = 0;
for(byte[] part : parts) {
//System.out.println(" section -> " + ArrayUtils.printable(parts.get(i)));
int length = part.length;
for (int i = 0; i < partCount; i++) {
int length = parts[i].length;
if(length > 0) {
System.arraycopy(part, 0, dest, startByte, length);
System.arraycopy(parts[i], 0, dest, startByte, length);
startByte += length;
}
if(index < partCount - 1 && interludeSize > 0) {
@ -84,8 +95,6 @@ public class ByteArrayUtil {
}
index++;
}
//System.out.println(" complete -> " + ArrayUtils.printable(dest));
return dest;
}
@ -97,7 +106,7 @@ public class ByteArrayUtil {
* @return a newly created concatenation of the input
*/
public static byte[] join(byte[]... parts) {
return join(null, Arrays.asList(parts));
return interludeJoin(null, parts);
}
/**
@ -135,11 +144,7 @@ public class ByteArrayUtil {
if(src.length < start + pattern.length)
return false;
for(int i = 0; i < pattern.length; i++)
if(pattern[i] != src[start + i])
return false;
return true;
return compareTo(src, start, pattern.length, pattern, 0, pattern.length) == 0;
}
/**
@ -307,14 +312,7 @@ public class ByteArrayUtil {
* {@code r}.
*/
public static int compareUnsigned(byte[] l, byte[] r) {
for(int idx = 0; idx < l.length && idx < r.length; ++idx) {
if(l[idx] != r[idx]) {
return (l[idx] & 0xFF) < (r[idx] & 0xFF) ? -1 : 1;
}
}
if(l.length == r.length)
return 0;
return l.length < r.length ? -1 : 1;
return compareTo(l, 0, l.length, r, 0, r.length);
}
/**
@ -328,15 +326,11 @@ public class ByteArrayUtil {
* @return {@code true} if {@code array} starts with {@code prefix}
*/
public static boolean startsWith(byte[] array, byte[] prefix) {
// Short Circuit
if(array.length < prefix.length) {
return false;
}
for(int i = 0; i < prefix.length; ++i) {
if(prefix[i] != array[i]) {
return false;
}
}
return true;
return compareTo(array, 0, prefix.length, prefix, 0, prefix.length) == 0;
}
/**
@ -359,6 +353,21 @@ public class ByteArrayUtil {
return copy;
}
/**
* Computes the key that would sort immediately after {@code key}.
* {@code key} must be non-null.
*
* @param key byte array for which next key is to be computed
*
* @return a newly created byte array that would sort immediately after {@code key}
*/
public static byte[] keyAfter(byte[] key) {
byte[] copy = new byte[key.length + 1];
System.arraycopy(key, 0, copy, 0, key.length);
copy[key.length] = 0x0;
return copy;
}
/**
* Get a copy of an array, with all matching characters stripped from trailing edge.
* @param input array to copy. Must not be null.

View File

@ -0,0 +1,294 @@
/*
* FastByteComparisons.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb.tuple;
import java.lang.reflect.Field;
import java.nio.ByteOrder;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Comparator;
import sun.misc.Unsafe;
/**
* Utility code to do optimized byte-array comparison.
* This is borrowed and slightly modified from Guava's UnsignedBytes
* class to be able to compare arrays that start at non-zero offsets.
*/
abstract class FastByteComparisons {
private static final int UNSIGNED_MASK = 0xFF;
/**
* Lexicographically compare two byte arrays.
*
* @param buffer1 left operand, expected to not be null
* @param buffer2 right operand, expected to not be null
* @param offset1 Where to start comparing in the left buffer, expected to be &gt;= 0
* @param offset2 Where to start comparing in the right buffer, expected to be &gt;= 0
* @param length1 How much to compare from the left buffer, expected to be &gt;= 0
* @param length2 How much to compare from the right buffer, expected to be &gt;= 0
* @return 0 if equal, &lt; 0 if left is less than right, etc.
*/
public static int compareTo(byte[] buffer1, int offset1, int length1,
byte[] buffer2, int offset2, int length2) {
return LexicographicalComparerHolder.BEST_COMPARER.compareTo(
buffer1, offset1, length1, buffer2, offset2, length2);
}
/**
* Interface for both the java and unsafe comparators + offset based comparisons.
* @param <T>
*/
interface Comparer<T> extends Comparator<T> {
/**
* Lexicographically compare two byte arrays.
*
* @param buffer1 left operand
* @param buffer2 right operand
* @param offset1 Where to start comparing in the left buffer
* @param offset2 Where to start comparing in the right buffer
* @param length1 How much to compare from the left buffer
* @param length2 How much to compare from the right buffer
* @return 0 if equal, < 0 if left is less than right, etc.
*/
abstract public int compareTo(T buffer1, int offset1, int length1,
T buffer2, int offset2, int length2);
}
/**
* Pure Java Comparer
*
* @return
*/
static Comparer<byte[]> lexicographicalComparerJavaImpl() {
return LexicographicalComparerHolder.PureJavaComparer.INSTANCE;
}
/**
* Unsafe Comparer
*
* @return
*/
static Comparer<byte[]> lexicographicalComparerUnsafeImpl() {
return LexicographicalComparerHolder.UnsafeComparer.INSTANCE;
}
/**
* Provides a lexicographical comparer implementation; either a Java
* implementation or a faster implementation based on {@link Unsafe}.
*
* <p>Uses reflection to gracefully fall back to the Java implementation if
* {@code Unsafe} isn't available.
*/
private static class LexicographicalComparerHolder {
static final String UNSAFE_COMPARER_NAME =
LexicographicalComparerHolder.class.getName() + "$UnsafeComparer";
static final Comparer<byte[]> BEST_COMPARER = getBestComparer();
/**
* Returns the Unsafe-using Comparer, or falls back to the pure-Java
* implementation if unable to do so.
*/
static Comparer<byte[]> getBestComparer() {
String arch = System.getProperty("os.arch");
boolean unaligned = arch.equals("i386") || arch.equals("x86")
|| arch.equals("amd64") || arch.equals("x86_64");
if (!unaligned)
return lexicographicalComparerJavaImpl();
try {
Class<?> theClass = Class.forName(UNSAFE_COMPARER_NAME);
// yes, UnsafeComparer does implement Comparer<byte[]>
@SuppressWarnings("unchecked")
Comparer<byte[]> comparer =
(Comparer<byte[]>) theClass.getEnumConstants()[0];
return comparer;
} catch (Throwable t) { // ensure we really catch *everything*
return lexicographicalComparerJavaImpl();
}
}
/**
* Java Comparer doing byte by byte comparisons
*
*/
enum PureJavaComparer implements Comparer<byte[]> {
INSTANCE;
/**
*
* CompareTo looking at two buffers.
*
* @param buffer1 left operand
* @param buffer2 right operand
* @param offset1 Where to start comparing in the left buffer
* @param offset2 Where to start comparing in the right buffer
* @param length1 How much to compare from the left buffer
* @param length2 How much to compare from the right buffer
* @return 0 if equal, < 0 if left is less than right, etc.
*/
@Override
public int compareTo(byte[] buffer1, int offset1, int length1,
byte[] buffer2, int offset2, int length2) {
// Short circuit equal case
if (buffer1 == buffer2 &&
offset1 == offset2 &&
length1 == length2) {
return 0;
}
int end1 = offset1 + length1;
int end2 = offset2 + length2;
for (int i = offset1, j = offset2; i < end1 && j < end2; i++, j++) {
int a = (buffer1[i] & UNSIGNED_MASK);
int b = (buffer2[j] & UNSIGNED_MASK);
if (a != b) {
return a - b;
}
}
return length1 - length2;
}
/**
* Supports Comparator
*
* @param o1
* @param o2
* @return comparison
*/
@Override
public int compare(byte[] o1, byte[] o2) {
return compareTo(o1, 0, o1.length, o2, 0, o2.length);
}
}
/**
*
* Takes advantage of word based comparisons
*
*/
@SuppressWarnings("unused") // used via reflection
enum UnsafeComparer implements Comparer<byte[]> {
INSTANCE;
static final Unsafe theUnsafe;
/**
* The offset to the first element in a byte array.
*/
static final int BYTE_ARRAY_BASE_OFFSET;
@Override
public int compare(byte[] o1, byte[] o2) {
return compareTo(o1, 0, o1.length, o2, 0, o2.length);
}
static {
theUnsafe = (Unsafe) AccessController.doPrivileged(
(PrivilegedAction<Object>) () -> {
try {
Field f = Unsafe.class.getDeclaredField("theUnsafe");
f.setAccessible(true);
return f.get(null);
} catch (NoSuchFieldException e) {
// It doesn't matter what we throw;
// it's swallowed in getBestComparer().
throw new Error();
} catch (IllegalAccessException e) {
throw new Error();
}
});
BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class);
// sanity check - this should never fail
if (theUnsafe.arrayIndexScale(byte[].class) != 1) {
throw new AssertionError();
}
}
static final boolean LITTLE_ENDIAN =
ByteOrder.nativeOrder().equals(ByteOrder.LITTLE_ENDIAN);
/**
* Lexicographically compare two arrays.
*
* @param buffer1 left operand
* @param buffer2 right operand
* @param offset1 Where to start comparing in the left buffer
* @param offset2 Where to start comparing in the right buffer
* @param length1 How much to compare from the left buffer
* @param length2 How much to compare from the right buffer
* @return 0 if equal, < 0 if left is less than right, etc.
*/
@Override
public int compareTo(byte[] buffer1, int offset1, int length1,
byte[] buffer2, int offset2, int length2) {
// Short circuit equal case
if (buffer1 == buffer2 &&
offset1 == offset2 &&
length1 == length2) {
return 0;
}
final int stride = 8;
final int minLength = Math.min(length1, length2);
int strideLimit = minLength & ~(stride - 1);
final long offset1Adj = offset1 + BYTE_ARRAY_BASE_OFFSET;
final long offset2Adj = offset2 + BYTE_ARRAY_BASE_OFFSET;
int i;
/*
* Compare 8 bytes at a time. Benchmarking on x86 shows a stride of 8 bytes is no slower
* than 4 bytes even on 32-bit. On the other hand, it is substantially faster on 64-bit.
*/
for (i = 0; i < strideLimit; i += stride) {
long lw = theUnsafe.getLong(buffer1, offset1Adj + i);
long rw = theUnsafe.getLong(buffer2, offset2Adj + i);
if (lw != rw) {
if(!LITTLE_ENDIAN) {
return ((lw + Long.MIN_VALUE) < (rw + Long.MIN_VALUE)) ? -1 : 1;
}
/*
* We want to compare only the first index where left[index] != right[index]. This
* corresponds to the least significant nonzero byte in lw ^ rw, since lw and rw are
* little-endian. Long.numberOfTrailingZeros(diff) tells us the least significant
* nonzero bit, and zeroing out the first three bits of L.nTZ gives us the shift to get
* that least significant nonzero byte. This comparison logic is based on UnsignedBytes
* comparator from guava v21
*/
int n = Long.numberOfTrailingZeros(lw ^ rw) & ~0x7;
return ((int) ((lw >>> n) & UNSIGNED_MASK)) - ((int) ((rw >>> n) & UNSIGNED_MASK));
}
}
// The epilogue to cover the last (minLength % stride) elements.
for (; i < minLength; i++) {
int a = (buffer1[offset1 + i] & UNSIGNED_MASK);
int b = (buffer2[offset2 + i] & UNSIGNED_MASK);
if (a != b) {
return a - b;
}
}
return length1 - length2;
}
}
}
}

View File

@ -13,7 +13,7 @@ and then added to your classpath.<br>
<h1>Getting started</h1>
To start using FoundationDB from Java, create an instance of the
{@link com.apple.foundationdb.FDB FoundationDB API interface} with the version of the
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 620}).
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 630}).
With this API object you can then open {@link com.apple.foundationdb.Cluster Cluster}s and
{@link com.apple.foundationdb.Database Database}s and start using
{@link com.apple.foundationdb.Transaction Transaction}s.
@ -29,7 +29,7 @@ import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(620);
FDB fdb = FDB.selectAPIVersion(630);
try(Database db = fdb.open()) {
// Run an operation on the database

View File

@ -27,7 +27,7 @@ import com.apple.foundationdb.Database;
import com.apple.foundationdb.FDB;
public abstract class AbstractTester {
public static final int API_VERSION = 620;
public static final int API_VERSION = 630;
protected static final int NUM_RUNS = 25;
protected static final Charset ASCII = Charset.forName("ASCII");

Some files were not shown because too many files have changed in this diff Show More