Merge branch 'main' of github.com:apple/foundationdb into getsizetenant

This commit is contained in:
Ankita Kejriwal 2022-10-11 12:01:36 -07:00
commit 11658c7135
744 changed files with 39248 additions and 13616 deletions

5
.flake8 Normal file
View File

@ -0,0 +1,5 @@
[flake8]
ignore = E203, E266, E501, W503, F403, F401, E711
max-line-length = 79
max-complexity = 18
select = B,C,E,F,W,T4,B9

1
.gitignore vendored
View File

@ -64,6 +64,7 @@ packaging/msi/obj
simfdb
tests/oldBinaries
trace.*.xml
trace.*.json
.venv
# Editor files

9
.pre-commit-config.yaml Normal file
View File

@ -0,0 +1,9 @@
repos:
- repo: https://github.com/psf/black
rev: 2018e667a6a36ee3fbfa8041cd36512f92f60d49 # frozen: 22.8.0
hooks:
- id: black
- repo: https://github.com/pycqa/flake8
rev: f8e1b317742036ff11ff86356fd2b68147e169f7 # frozen: 5.0.4
hooks:
- id: flake8

View File

@ -22,6 +22,11 @@ else()
cmake_minimum_required(VERSION 3.13)
endif()
# silence deprecation warnings in newer versions of cmake
if(POLICY CMP0135)
cmake_policy(SET CMP0135 NEW)
endif()
project(foundationdb
VERSION 7.2.0
DESCRIPTION "FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions."

View File

@ -38,6 +38,9 @@ We love pull requests! For minor changes, feel free to open up a PR directly. Fo
CI will be run automatically for core committers, and for community PRs it will be initiated by the request of a core committer. Tests can also be run locally via `ctest`, and core committers can run additional validation on pull requests prior to merging them.
### Python pre-commit
We use a pre-commit pipeline with black and flake8 to enforce python best coding practices. Install pre-commit ```pip install pre-commit```. Install it in your FoundationDB directory ```pre-commit install```.
### Reporting issues
Please refer to the section below on [using GitHub issues and the community forums](#using-github-issues-and-community-forums) for more info.

View File

@ -34,6 +34,7 @@
#include <algorithm>
#include <exception>
#include <map>
#include <stdexcept>
#include <string>
#include <vector>

View File

@ -28,6 +28,7 @@
#include <algorithm>
#include <exception>
#include <cstring>
#include <stdexcept>
static int hexValue(char c) {
static char const digits[] = "0123456789ABCDEF";

View File

@ -49,6 +49,17 @@ from bindingtester.known_testers import Tester
import fdb
import fdb.tuple
API_VERSIONS = [
13, 14, 16, 21, 22, 23,
100, 200, 300,
400, 410, 420, 430, 440, 450, 460,
500, 510, 520,
600, 610, 620, 630,
700, 710, 720,
]
fdb.api_version(FDB_API_VERSION)
@ -156,8 +167,7 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
elif random.random() < 0.7:
api_version = min_version
elif random.random() < 0.9:
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430,
440, 450, 460, 500, 510, 520, 600, 610, 620, 630, 700, 710, 720] if v >= min_version and v <= max_version])
api_version = random.choice([v for v in API_VERSIONS if v >= min_version and v <= max_version])
else:
api_version = random.randint(min_version, max_version)

View File

@ -30,13 +30,13 @@ endif()
add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
COMMAND $<TARGET_FILE:Python3::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${os} ${cpu}
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
${asm_file}
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
${asm_file}
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
COMMENT "Generate C bindings")
add_custom_target(fdb_c_generated DEPENDS ${asm_file}
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h)
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h)
vexillographer_compile(TARGET fdb_c_options LANG c OUT ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h)
@ -66,9 +66,9 @@ if(APPLE)
set(symbols ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.symbols)
add_custom_command(OUTPUT ${symbols}
COMMAND $<TARGET_FILE:Python3::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/symbolify.py
${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c.h
${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c_internal.h
${symbols}
${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c.h
${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c_internal.h
${symbols}
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/symbolify.py ${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c.h ${CMAKE_CURRENT_SOURCE_DIR}/foundationdb/fdb_c_internal.h
COMMENT "Generate exported_symbols_list")
add_custom_target(exported_symbols_list DEPENDS ${symbols})
@ -76,7 +76,7 @@ if(APPLE)
target_link_options(fdb_c PRIVATE "LINKER:-no_weak_exports,-exported_symbols_list,${symbols}")
elseif(WIN32)
else()
if (NOT USE_UBSAN)
if(NOT USE_UBSAN)
# For ubsan we need to export type information for the vptr check to work.
# Otherwise we only want to export fdb symbols in the fdb c api.
target_link_options(fdb_c PRIVATE "LINKER:--version-script=${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.map")
@ -127,9 +127,9 @@ if(NOT WIN32)
test/unit/fdb_api.hpp)
add_library(fdb_cpp INTERFACE test/fdb_api.hpp)
target_sources(fdb_cpp INTERFACE )
target_sources(fdb_cpp INTERFACE)
target_include_directories(fdb_cpp INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/test)
target_link_libraries(fdb_cpp INTERFACE fmt::fmt)
target_link_libraries(fdb_cpp INTERFACE fdb_c fmt::fmt)
set(API_TESTER_SRCS
test/apitester/fdb_c_api_tester.cpp
@ -139,6 +139,9 @@ if(NOT WIN32)
test/apitester/TesterTestSpec.cpp
test/apitester/TesterTestSpec.h
test/apitester/TesterBlobGranuleCorrectnessWorkload.cpp
test/apitester/TesterBlobGranuleErrorsWorkload.cpp
test/apitester/TesterBlobGranuleUtil.cpp
test/apitester/TesterBlobGranuleUtil.h
test/apitester/TesterCancelTransactionWorkload.cpp
test/apitester/TesterCorrectnessWorkload.cpp
test/apitester/TesterExampleWorkload.cpp
@ -154,7 +157,7 @@ if(NOT WIN32)
test/apitester/TesterWatchAndWaitWorkload.cpp
test/apitester/TesterWorkload.cpp
test/apitester/TesterWorkload.h
)
)
add_library(fdb_c_unit_tests_impl OBJECT ${UNIT_TEST_SRCS})
add_library(fdb_c_api_tester_impl OBJECT ${API_TESTER_SRCS})
@ -196,6 +199,9 @@ if(NOT WIN32)
target_include_directories(fdb_c_api_tester_impl PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/ ${CMAKE_SOURCE_DIR}/flow/include ${CMAKE_BINARY_DIR}/flow/include)
target_link_libraries(fdb_c_api_tester_impl PRIVATE fdb_cpp toml11_target Threads::Threads fmt::fmt boost_target)
if(NOT APPLE)
target_link_libraries(fdb_c_api_tester_impl PRIVATE stdc++fs)
endif()
target_link_libraries(fdb_c_api_tester_impl PRIVATE SimpleOpt)
target_include_directories(fdb_c_unit_tests_impl PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/)
@ -222,208 +228,222 @@ if(NOT WIN32)
set(FDB_C_TARGET $<TARGET_OBJECTS:fdb_c>)
else()
set(FDB_C_TARGET $<TARGET_FILE:fdb_c>)
endif()
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
COMMAND ${CMAKE_COMMAND} -E copy ${FDB_C_TARGET} ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
DEPENDS fdb_c
COMMENT "Copy libfdb_c to use as external client for test")
add_custom_target(external_client DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so)
add_dependencies(fdb_c_unit_tests_impl external_client)
add_dependencies(disconnected_timeout_unit_tests external_client)
add_dependencies(fdb_c_api_tester_impl external_client)
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
COMMAND ${CMAKE_COMMAND} -E copy ${FDB_C_TARGET} ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
DEPENDS fdb_c
COMMENT "Copy libfdb_c to use as external client for test")
add_custom_target(external_client DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so)
add_dependencies(fdb_c_unit_tests_impl external_client)
add_dependencies(disconnected_timeout_unit_tests external_client)
add_dependencies(fdb_c_api_tester_impl external_client)
add_fdbclient_test(
NAME fdb_c_setup_tests
COMMAND $<TARGET_FILE:fdb_c_setup_tests>)
add_fdbclient_test(
NAME fdb_c_unit_tests
COMMAND $<TARGET_FILE:fdb_c_unit_tests>
@CLUSTER_FILE@
fdb)
add_fdbclient_test(
NAME fdb_c_unit_tests_version_510
COMMAND $<TARGET_FILE:fdb_c_unit_tests_version_510>
@CLUSTER_FILE@
fdb)
add_fdbclient_test(
NAME trace_partial_file_suffix_test
COMMAND $<TARGET_FILE:trace_partial_file_suffix_test>
@CLUSTER_FILE@
fdb)
add_fdbclient_test(
NAME fdb_c_external_client_unit_tests
COMMAND $<TARGET_FILE:fdb_c_unit_tests>
@CLUSTER_FILE@
fdb
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
)
add_unavailable_fdbclient_test(
NAME disconnected_timeout_unit_tests
COMMAND $<TARGET_FILE:disconnected_timeout_unit_tests>
@CLUSTER_FILE@
)
add_unavailable_fdbclient_test(
NAME disconnected_timeout_external_client_unit_tests
COMMAND $<TARGET_FILE:disconnected_timeout_unit_tests>
@CLUSTER_FILE@
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
)
add_fdbclient_test(
NAME fdb_c_api_tests
DISABLE_LOG_DUMP
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
--cluster-file
@CLUSTER_FILE@
--tester-binary
$<TARGET_FILE:fdb_c_api_tester>
--external-client-library
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
--test-dir
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
--tmp-dir
@TMP_DIR@
--log-dir
@LOG_DIR@
)
add_fdbclient_test(
NAME fdb_c_setup_tests
COMMAND $<TARGET_FILE:fdb_c_setup_tests>)
add_fdbclient_test(
NAME fdb_c_unit_tests
COMMAND $<TARGET_FILE:fdb_c_unit_tests>
@CLUSTER_FILE@
fdb)
add_fdbclient_test(
NAME fdb_c_unit_tests_version_510
COMMAND $<TARGET_FILE:fdb_c_unit_tests_version_510>
@CLUSTER_FILE@
fdb)
add_fdbclient_test(
NAME trace_partial_file_suffix_test
COMMAND $<TARGET_FILE:trace_partial_file_suffix_test>
@CLUSTER_FILE@
fdb)
add_fdbclient_test(
NAME fdb_c_external_client_unit_tests
COMMAND $<TARGET_FILE:fdb_c_unit_tests>
@CLUSTER_FILE@
fdb
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
)
add_unavailable_fdbclient_test(
NAME disconnected_timeout_unit_tests
COMMAND $<TARGET_FILE:disconnected_timeout_unit_tests>
@CLUSTER_FILE@
)
add_unavailable_fdbclient_test(
NAME disconnected_timeout_external_client_unit_tests
COMMAND $<TARGET_FILE:disconnected_timeout_unit_tests>
@CLUSTER_FILE@
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
)
add_fdbclient_test(
NAME fdb_c_api_tests
DISABLE_LOG_DUMP
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
--cluster-file
@CLUSTER_FILE@
--tester-binary
$<TARGET_FILE:fdb_c_api_tester>
--external-client-library
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
--test-dir
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
--tmp-dir
@TMP_DIR@
--log-dir
@LOG_DIR@
)
add_fdbclient_test(
NAME fdb_c_api_tests_blob_granule
DISABLE_LOG_DUMP
API_TEST_BLOB_GRANULES_ENABLED
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
--cluster-file
@CLUSTER_FILE@
--tester-binary
$<TARGET_FILE:fdb_c_api_tester>
--external-client-library
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
--test-dir
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/blobgranuletests
--blob-granule-local-file-path
@DATA_DIR@/fdbblob/
--tmp-dir
@TMP_DIR@
--log-dir
@LOG_DIR@
)
add_fdbclient_test(
NAME fdb_c_api_tests_local_only
DISABLE_LOG_DUMP
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
--cluster-file
@CLUSTER_FILE@
--tester-binary
$<TARGET_FILE:fdb_c_api_tester>
--test-dir
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/local_tests
--tmp-dir
@TMP_DIR@
--log-dir
@LOG_DIR@
)
add_fdbclient_test(
NAME fdb_c_api_tests_with_tls
DISABLE_LOG_DUMP
TLS_ENABLED
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
--cluster-file
@CLUSTER_FILE@
--tester-binary
$<TARGET_FILE:fdb_c_api_tester>
--external-client-library
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
--test-dir
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
--tmp-dir
@TMP_DIR@
--log-dir
@LOG_DIR@
--tls-cert-file
@CLIENT_CERT_FILE@
--tls-key-file
@CLIENT_KEY_FILE@
--tls-ca-file
@SERVER_CA_FILE@
)
add_fdbclient_test(
NAME fdb_c_api_tests_blob_granule
DISABLE_LOG_DUMP
API_TEST_BLOB_GRANULES_ENABLED
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
--cluster-file
@CLUSTER_FILE@
--tester-binary
$<TARGET_FILE:fdb_c_api_tester>
--external-client-library
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
--test-dir
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/blobgranuletests
--blob-granule-local-file-path
@DATA_DIR@/fdbblob/
--tmp-dir
@TMP_DIR@
--log-dir
@LOG_DIR@
)
add_test(NAME fdb_c_upgrade_to_future_version
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
add_fdbclient_test(
NAME fdb_c_api_tests_with_tls
DISABLE_LOG_DUMP
TLS_ENABLED
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
--cluster-file
@CLUSTER_FILE@
--tester-binary
$<TARGET_FILE:fdb_c_api_tester>
--external-client-library
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
--test-dir
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
--tmp-dir
@TMP_DIR@
--log-dir
@LOG_DIR@
--tls-cert-file
@CLIENT_CERT_FILE@
--tls-key-file
@CLIENT_KEY_FILE@
--tls-ca-file
@SERVER_CA_FILE@
)
add_test(NAME fdb_c_upgrade_to_future_version
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.2.0" "7.3.0" "7.2.0"
--process-number 3
)
set_tests_properties("fdb_c_upgrade_to_future_version" PROPERTIES ENVIRONMENT "${SANITIZER_OPTIONS}")
add_test(NAME fdb_c_upgrade_to_future_version_blob_granules
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/ApiBlobGranulesCorrectness.toml
--upgrade-path "7.2.0" "7.3.0" "7.2.0"
--blob-granules-enabled
--process-number 3
)
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT USE_SANITIZER)
add_test(NAME fdb_c_upgrade_single_threaded_630api
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadSingleThr.toml
--upgrade-path "6.3.23" "7.0.0" "7.1.9" "7.2.0"
--process-number 1
)
add_test(NAME fdb_c_upgrade_single_threaded_700api
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadSingleThr.toml
--upgrade-path "7.0.0" "7.1.9" "7.2.0"
--process-number 1
)
add_test(NAME fdb_c_upgrade_multi_threaded_630api
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.2.0" "7.3.0" "7.2.0"
--upgrade-path "6.3.23" "7.0.0" "7.1.9" "7.2.0" "7.1.9"
--process-number 3
)
set_tests_properties("fdb_c_upgrade_to_future_version" PROPERTIES ENVIRONMENT "${SANITIZER_OPTIONS}")
)
if (0) # reenable after stabilizing the test
add_test(NAME fdb_c_upgrade_to_future_version_blob_granules
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
add_test(NAME fdb_c_upgrade_multi_threaded_700api
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/ApiBlobGranulesCorrectness.toml
--upgrade-path "7.2.0" "7.3.0" "7.2.0"
--blob-granules-enabled
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.0.0" "7.1.9" "7.2.0" "7.1.9"
--process-number 3
)
endif()
)
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT USE_SANITIZER)
add_test(NAME fdb_c_upgrade_single_threaded_630api
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadSingleThr.toml
--upgrade-path "6.3.23" "7.0.0" "7.1.9" "7.2.0"
--process-number 1
)
add_test(NAME fdb_c_upgrade_single_threaded_700api
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadSingleThr.toml
--upgrade-path "7.0.0" "7.1.9" "7.2.0"
--process-number 1
)
add_test(NAME fdb_c_upgrade_multi_threaded_630api
add_test(NAME fdb_c_upgrade_multi_threaded_710api
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "6.3.23" "7.0.0" "7.1.9" "7.2.0" "7.1.9"
--process-number 3
)
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.1.9" "7.2.0" "7.1.9"
--process-number 3
)
add_test(NAME fdb_c_upgrade_multi_threaded_700api
add_test(NAME fdb_c_cluster_wiggle
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.0.0" "7.1.9" "7.2.0" "7.1.9"
--process-number 3
)
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.2.0" "wiggle"
--disable-log-dump
--process-number 3
--redundancy double
)
add_test(NAME fdb_c_upgrade_multi_threaded_710api
add_test(NAME fdb_c_wiggle_and_upgrade_latest
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.1.9" "7.2.0" "7.1.9"
--process-number 3
)
add_test(NAME fdb_c_cluster_wiggle
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.2.0" "wiggle"
--disable-log-dump
--process-number 3
--redundancy double
)
add_test(NAME fdb_c_wiggle_and_upgrade_latest
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.1.9" "wiggle" "7.2.0"
--disable-log-dump
--process-number 3
--redundancy double
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "7.1.9" "wiggle" "7.2.0"
--disable-log-dump
--process-number 3
--redundancy double
)
add_test(NAME fdb_c_wiggle_and_upgrade_63
COMMAND ${CMAKE_SOURCE_DIR}/tests/TestRunner/upgrade_test.py
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "6.3.24" "wiggle" "7.0.0"
--disable-log-dump
--process-number 3
--redundancy double
)
--build-dir ${CMAKE_BINARY_DIR}
--test-file ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests/upgrade/MixedApiWorkloadMultiThr.toml
--upgrade-path "6.3.24" "wiggle" "7.0.0"
--disable-log-dump
--process-number 3
--redundancy double
)
endif()
endif()
endif()
@ -442,12 +462,12 @@ set_target_properties(c_workloads PROPERTIES
LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/share/foundationdb")
target_link_libraries(c_workloads PUBLIC fdb_c)
if (NOT WIN32 AND NOT APPLE AND NOT OPEN_FOR_IDE)
if(NOT WIN32 AND NOT APPLE AND NOT OPEN_FOR_IDE)
target_link_options(c_workloads PRIVATE "LINKER:--version-script=${CMAKE_CURRENT_SOURCE_DIR}/external_workload.map,-z,nodelete")
endif()
# Generate shim library in Linux builds
if (OPEN_FOR_IDE)
if(OPEN_FOR_IDE)
add_library(fdb_c_shim OBJECT foundationdb/fdb_c_shim.h fdb_c_shim.cpp)
target_link_libraries(fdb_c_shim PUBLIC dl)
@ -479,14 +499,14 @@ elseif(NOT WIN32 AND NOT APPLE AND NOT USE_SANITIZER) # Linux Only, non-santizer
add_custom_command(OUTPUT ${SHIM_LIB_GEN_SRC}
COMMAND $<TARGET_FILE:Python3::Interpreter> ${IMPLIBSO_SRC_DIR}/implib-gen.py
--target ${CMAKE_SYSTEM_PROCESSOR}
--outdir ${SHIM_LIB_OUTPUT_DIR}
--dlopen-callback=fdb_shim_dlopen_callback
$<TARGET_FILE:fdb_c>
DEPENDS ${IMPLIBSO_SRC}
--target ${CMAKE_SYSTEM_PROCESSOR}
--outdir ${SHIM_LIB_OUTPUT_DIR}
--dlopen-callback=fdb_shim_dlopen_callback
$<TARGET_FILE:fdb_c>
DEPENDS ${IMPLIBSO_SRC} fdb_c
COMMENT "Generating source code for C shim library")
add_library(fdb_c_shim SHARED ${SHIM_LIB_GEN_SRC} foundationdb/fdb_c_shim.h fdb_c_shim.cpp)
add_library(fdb_c_shim STATIC ${SHIM_LIB_GEN_SRC} foundationdb/fdb_c_shim.h fdb_c_shim.cpp)
target_link_options(fdb_c_shim PRIVATE "LINKER:--version-script=${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.map,-z,nodelete,-z,noexecstack")
target_link_libraries(fdb_c_shim PUBLIC dl)
target_include_directories(fdb_c_shim PUBLIC
@ -506,12 +526,12 @@ elseif(NOT WIN32 AND NOT APPLE AND NOT USE_SANITIZER) # Linux Only, non-santizer
add_test(NAME fdb_c_shim_library_tests
COMMAND $<TARGET_FILE:Python3::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/test/fdb_c_shim_tests.py
--build-dir ${CMAKE_BINARY_DIR}
--unit-tests-bin $<TARGET_FILE:fdb_c_shim_unit_tests>
--api-tester-bin $<TARGET_FILE:fdb_c_shim_api_tester>
--shim-lib-tester-bin $<TARGET_FILE:fdb_c_shim_lib_tester>
--api-test-dir ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
)
--build-dir ${CMAKE_BINARY_DIR}
--unit-tests-bin $<TARGET_FILE:fdb_c_shim_unit_tests>
--api-tester-bin $<TARGET_FILE:fdb_c_shim_api_tester>
--shim-lib-tester-bin $<TARGET_FILE:fdb_c_shim_lib_tester>
--api-test-dir ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
)
endif() # End Linux only, non-sanitizer only
@ -560,16 +580,16 @@ fdb_install(
if(NOT WIN32 AND NOT APPLE AND NOT USE_SANITIZER) # Linux Only, non-sanitizer only
fdb_install(
FILES foundationdb/fdb_c_shim.h
DESTINATION include
DESTINATION_SUFFIX /foundationdb
COMPONENT clients)
fdb_install(
FILES foundationdb/fdb_c_shim.h
DESTINATION include
DESTINATION_SUFFIX /foundationdb
COMPONENT clients)
fdb_install(
TARGETS fdb_c_shim
EXPORT ${targets_export_name}
DESTINATION lib
COMPONENT clients)
fdb_install(
TARGETS fdb_c_shim
EXPORT ${targets_export_name}
DESTINATION lib
COMPONENT clients)
endif() # End Linux only, non-ubsan only

View File

@ -324,6 +324,15 @@ extern "C" DLLEXPORT fdb_error_t fdb_future_get_key_array(FDBFuture* f, FDBKey c
*out_count = na.size(););
}
extern "C" DLLEXPORT fdb_error_t fdb_future_get_granule_summary_array(FDBFuture* f,
FDBGranuleSummary const** out_ranges,
int* out_count) {
CATCH_AND_RETURN(Standalone<VectorRef<BlobGranuleSummaryRef>> na =
TSAV(Standalone<VectorRef<BlobGranuleSummaryRef>>, f)->get();
*out_ranges = (FDBGranuleSummary*)na.begin();
*out_count = na.size(););
}
extern "C" DLLEXPORT void fdb_result_destroy(FDBResult* r) {
CATCH_AND_DIE(TSAVB(r)->cancel(););
}
@ -539,10 +548,14 @@ extern "C" DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_database_verify_blob_rang
uint8_t const* end_key_name,
int end_key_name_length,
int64_t version) {
Optional<Version> rv;
if (version != latestVersion) {
rv = version;
}
return (FDBFuture*)(DB(db)
->verifyBlobRange(KeyRangeRef(StringRef(begin_key_name, begin_key_name_length),
StringRef(end_key_name, end_key_name_length)),
version)
rv)
.extractPtr());
}
@ -943,6 +956,74 @@ extern "C" DLLEXPORT FDBResult* fdb_transaction_read_blob_granules(FDBTransactio
return (FDBResult*)(TXN(tr)->readBlobGranules(range, beginVersion, rv, context).extractPtr()););
}
extern "C" DLLEXPORT FDBFuture* fdb_transaction_read_blob_granules_start(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
uint8_t const* end_key_name,
int end_key_name_length,
int64_t beginVersion,
int64_t readVersion,
int64_t* readVersionOut) {
Optional<Version> rv;
if (readVersion != latestVersion) {
rv = readVersion;
}
return (FDBFuture*)(TXN(tr)
->readBlobGranulesStart(KeyRangeRef(KeyRef(begin_key_name, begin_key_name_length),
KeyRef(end_key_name, end_key_name_length)),
beginVersion,
rv,
readVersionOut)
.extractPtr());
}
extern "C" DLLEXPORT FDBResult* fdb_transaction_read_blob_granules_finish(FDBTransaction* tr,
FDBFuture* f,
uint8_t const* begin_key_name,
int begin_key_name_length,
uint8_t const* end_key_name,
int end_key_name_length,
int64_t beginVersion,
int64_t readVersion,
FDBReadBlobGranuleContext* granule_context) {
// FIXME: better way to convert?
ReadBlobGranuleContext context;
context.userContext = granule_context->userContext;
context.start_load_f = granule_context->start_load_f;
context.get_load_f = granule_context->get_load_f;
context.free_load_f = granule_context->free_load_f;
context.debugNoMaterialize = granule_context->debugNoMaterialize;
context.granuleParallelism = granule_context->granuleParallelism;
ThreadFuture<Standalone<VectorRef<BlobGranuleChunkRef>>> startFuture(
TSAV(Standalone<VectorRef<BlobGranuleChunkRef>>, f));
return (FDBResult*)(TXN(tr)
->readBlobGranulesFinish(startFuture,
KeyRangeRef(KeyRef(begin_key_name, begin_key_name_length),
KeyRef(end_key_name, end_key_name_length)),
beginVersion,
readVersion,
context)
.extractPtr());
}
extern "C" DLLEXPORT FDBFuture* fdb_transaction_summarize_blob_granules(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
uint8_t const* end_key_name,
int end_key_name_length,
int64_t summaryVersion,
int rangeLimit) {
RETURN_FUTURE_ON_ERROR(
Standalone<VectorRef<BlobGranuleSummaryRef>>,
KeyRangeRef range(KeyRef(begin_key_name, begin_key_name_length), KeyRef(end_key_name, end_key_name_length));
Optional<Version> sv;
if (summaryVersion != latestVersion) { sv = summaryVersion; }
return (FDBFuture*)(TXN(tr)->summarizeBlobGranules(range, sv, rangeLimit).extractPtr()););
}
#include "fdb_c_function_pointers.g.h"
#define FDB_API_CHANGED(func, ver) \

View File

@ -84,12 +84,12 @@ DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_network_set_option(FDBNetworkOption
int value_length);
#if FDB_API_VERSION >= 14
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network();
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_setup_network(void);
#endif
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_run_network();
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_run_network(void);
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_stop_network();
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_stop_network(void);
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_add_network_thread_completion_hook(void (*hook)(void*),
void* hook_parameter);
@ -179,6 +179,14 @@ typedef struct keyrange {
const uint8_t* end_key;
int end_key_length;
} FDBKeyRange;
typedef struct granulesummary {
FDBKeyRange key_range;
int64_t snapshot_version;
int64_t snapshot_size;
int64_t delta_version;
int64_t delta_size;
} FDBGranuleSummary;
#pragma pack(pop)
typedef struct readgranulecontext {
@ -264,6 +272,10 @@ DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_keyrange_array(FDBFuture
FDBKeyRange const** out_ranges,
int* out_count);
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_granule_summary_array(FDBFuture* f,
FDBGranuleSummary const** out_summaries,
int* out_count);
/* FDBResult is a synchronous computation result, as opposed to a future that is asynchronous. */
DLLEXPORT void fdb_result_destroy(FDBResult* r);
@ -521,6 +533,14 @@ DLLEXPORT WARN_UNUSED_RESULT FDBResult* fdb_transaction_read_blob_granules(FDBTr
int64_t readVersion,
FDBReadBlobGranuleContext granuleContext);
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_summarize_blob_granules(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
uint8_t const* end_key_name,
int end_key_name_length,
int64_t summaryVersion,
int rangeLimit);
#define FDB_KEYSEL_LAST_LESS_THAN(k, l) k, l, 0, 0
#define FDB_KEYSEL_LAST_LESS_OR_EQUAL(k, l) k, l, 1, 0
#define FDB_KEYSEL_FIRST_GREATER_THAN(k, l) k, l, 1, 1
@ -528,8 +548,8 @@ DLLEXPORT WARN_UNUSED_RESULT FDBResult* fdb_transaction_read_blob_granules(FDBTr
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_select_api_version_impl(int runtime_version, int header_version);
DLLEXPORT int fdb_get_max_api_version();
DLLEXPORT const char* fdb_get_client_version();
DLLEXPORT int fdb_get_max_api_version(void);
DLLEXPORT const char* fdb_get_client_version(void);
/* LEGACY API VERSIONS */

View File

@ -51,6 +51,27 @@ DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_create_database_from_connection_str
DLLEXPORT void fdb_use_future_protocol_version();
// the logical read_blob_granules is broken out (at different points depending on the client type) into the asynchronous
// start() that happens on the fdb network thread, and synchronous finish() that happens off it
DLLEXPORT FDBFuture* fdb_transaction_read_blob_granules_start(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
uint8_t const* end_key_name,
int end_key_name_length,
int64_t beginVersion,
int64_t readVersion,
int64_t* readVersionOut);
DLLEXPORT FDBResult* fdb_transaction_read_blob_granules_finish(FDBTransaction* tr,
FDBFuture* f,
uint8_t const* begin_key_name,
int begin_key_name_length,
uint8_t const* end_key_name,
int end_key_name_length,
int64_t beginVersion,
int64_t readVersion,
FDBReadBlobGranuleContext* granuleContext);
#ifdef __cplusplus
}
#endif

View File

@ -41,6 +41,10 @@ ApiWorkload::ApiWorkload(const WorkloadConfig& config) : WorkloadBase(config) {
stopReceived = false;
checkingProgress = false;
apiVersion = config.apiVersion;
for (int i = 0; i < config.numTenants; ++i) {
tenants.push_back(fdb::ByteString(fdb::toBytesRef("tenant" + std::to_string(i))));
}
}
IWorkloadControlIfc* ApiWorkload::getControlIfc() {
@ -107,49 +111,57 @@ void ApiWorkload::randomOperation(TTaskFct cont) {
}
fdb::Key ApiWorkload::randomKeyName() {
return keyPrefix + Random::get().randomStringLowerCase(minKeyLength, maxKeyLength);
return keyPrefix + Random::get().randomByteStringLowerCase(minKeyLength, maxKeyLength);
}
fdb::Value ApiWorkload::randomValue() {
return Random::get().randomStringLowerCase(minValueLength, maxValueLength);
return Random::get().randomByteStringLowerCase(minValueLength, maxValueLength);
}
fdb::Key ApiWorkload::randomNotExistingKey() {
fdb::Key ApiWorkload::randomNotExistingKey(std::optional<int> tenantId) {
while (true) {
fdb::Key key = randomKeyName();
if (!store.exists(key)) {
if (!stores[tenantId].exists(key)) {
return key;
}
}
}
fdb::Key ApiWorkload::randomExistingKey() {
fdb::Key ApiWorkload::randomExistingKey(std::optional<int> tenantId) {
fdb::Key genKey = randomKeyName();
fdb::Key key = store.getKey(genKey, true, 1);
if (key != store.endKey()) {
fdb::Key key = stores[tenantId].getKey(genKey, true, 1);
if (key != stores[tenantId].endKey()) {
return key;
}
key = store.getKey(genKey, true, 0);
if (key != store.startKey()) {
key = stores[tenantId].getKey(genKey, true, 0);
if (key != stores[tenantId].startKey()) {
return key;
}
info("No existing key found, using a new random key.");
return genKey;
}
fdb::Key ApiWorkload::randomKey(double existingKeyRatio) {
fdb::Key ApiWorkload::randomKey(double existingKeyRatio, std::optional<int> tenantId) {
if (Random::get().randomBool(existingKeyRatio)) {
return randomExistingKey();
return randomExistingKey(tenantId);
} else {
return randomNotExistingKey();
return randomNotExistingKey(tenantId);
}
}
void ApiWorkload::populateDataTx(TTaskFct cont) {
std::optional<int> ApiWorkload::randomTenant() {
if (tenants.size() > 0) {
return Random::get().randomInt(0, tenants.size() - 1);
} else {
return {};
}
}
void ApiWorkload::populateDataTx(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = maxKeysPerTransaction;
auto kvPairs = std::make_shared<std::vector<fdb::KeyValue>>();
for (int i = 0; i < numKeys; i++) {
kvPairs->push_back(fdb::KeyValue{ randomNotExistingKey(), randomValue() });
kvPairs->push_back(fdb::KeyValue{ randomNotExistingKey(tenantId), randomValue() });
}
execTransaction(
[kvPairs](auto ctx) {
@ -158,37 +170,89 @@ void ApiWorkload::populateDataTx(TTaskFct cont) {
}
ctx->commit();
},
[this, kvPairs, cont]() {
[this, tenantId, kvPairs, cont]() {
for (const fdb::KeyValue& kv : *kvPairs) {
store.set(kv.key, kv.value);
stores[tenantId].set(kv.key, kv.value);
}
schedule(cont);
});
},
getTenant(tenantId));
}
void ApiWorkload::clearTenantData(TTaskFct cont, std::optional<int> tenantId) {
execTransaction(
[this](auto ctx) {
ctx->tx().clearRange(keyPrefix, keyPrefix + fdb::Key(1, '\xff'));
ctx->commit();
},
[this, tenantId, cont]() {
if (tenantId && tenantId.value() < tenants.size() - 1) {
clearTenantData(cont, tenantId.value() + 1);
} else {
schedule(cont);
}
},
getTenant(tenantId));
}
void ApiWorkload::clearData(TTaskFct cont) {
execTransaction(
[this](auto ctx) {
// Make this self-conflicting, so that if we're retrying on timeouts
// once we get a successful commit all previous attempts are no
// longer in-flight.
ctx->tx().addReadConflictRange(keyPrefix, keyPrefix + fdb::Key(1, '\xff'));
ctx->tx().clearRange(keyPrefix, keyPrefix + fdb::Key(1, '\xff'));
ctx->commit();
},
[this, cont]() { schedule(cont); });
}
void ApiWorkload::populateData(TTaskFct cont) {
if (store.size() < initialSize) {
populateDataTx([this, cont]() { populateData(cont); });
} else {
void ApiWorkload::populateTenantData(TTaskFct cont, std::optional<int> tenantId) {
while (stores[tenantId].size() >= initialSize && tenantId && tenantId.value() < tenants.size()) {
++tenantId.value();
}
if (tenantId >= tenants.size() || stores[tenantId].size() >= initialSize) {
info("Data population completed");
schedule(cont);
} else {
populateDataTx([this, cont, tenantId]() { populateTenantData(cont, tenantId); }, tenantId);
}
}
void ApiWorkload::randomInsertOp(TTaskFct cont) {
void ApiWorkload::createTenants(TTaskFct cont) {
execTransaction(
[this](auto ctx) {
auto futures = std::make_shared<std::vector<fdb::Future>>();
for (auto tenant : tenants) {
futures->push_back(fdb::Tenant::getTenant(ctx->tx(), tenant));
}
ctx->continueAfterAll(*futures, [this, ctx, futures]() {
for (int i = 0; i < futures->size(); ++i) {
if (!(*futures)[i].get<fdb::future_var::ValueRef>()) {
fdb::Tenant::createTenant(ctx->tx(), tenants[i]);
}
}
ctx->commit();
});
},
[this, cont]() { schedule(cont); });
}
void ApiWorkload::populateData(TTaskFct cont) {
if (tenants.size() > 0) {
createTenants([this, cont]() { populateTenantData(cont, std::make_optional(0)); });
} else {
populateTenantData(cont, {});
}
}
void ApiWorkload::randomInsertOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto kvPairs = std::make_shared<std::vector<fdb::KeyValue>>();
for (int i = 0; i < numKeys; i++) {
kvPairs->push_back(fdb::KeyValue{ randomNotExistingKey(), randomValue() });
kvPairs->push_back(fdb::KeyValue{ randomNotExistingKey(tenantId), randomValue() });
}
execTransaction(
[kvPairs](auto ctx) {
@ -197,19 +261,20 @@ void ApiWorkload::randomInsertOp(TTaskFct cont) {
}
ctx->commit();
},
[this, kvPairs, cont]() {
[this, kvPairs, cont, tenantId]() {
for (const fdb::KeyValue& kv : *kvPairs) {
store.set(kv.key, kv.value);
stores[tenantId].set(kv.key, kv.value);
}
schedule(cont);
});
},
getTenant(tenantId));
}
void ApiWorkload::randomClearOp(TTaskFct cont) {
void ApiWorkload::randomClearOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keys = std::make_shared<std::vector<fdb::Key>>();
for (int i = 0; i < numKeys; i++) {
keys->push_back(randomExistingKey());
keys->push_back(randomExistingKey(tenantId));
}
execTransaction(
[keys](auto ctx) {
@ -218,15 +283,16 @@ void ApiWorkload::randomClearOp(TTaskFct cont) {
}
ctx->commit();
},
[this, keys, cont]() {
[this, keys, cont, tenantId]() {
for (const auto& key : *keys) {
store.clear(key);
stores[tenantId].clear(key);
}
schedule(cont);
});
},
getTenant(tenantId));
}
void ApiWorkload::randomClearRangeOp(TTaskFct cont) {
void ApiWorkload::randomClearRangeOp(TTaskFct cont, std::optional<int> tenantId) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
@ -237,10 +303,19 @@ void ApiWorkload::randomClearRangeOp(TTaskFct cont) {
ctx->tx().clearRange(begin, end);
ctx->commit();
},
[this, begin, end, cont]() {
store.clear(begin, end);
[this, begin, end, cont, tenantId]() {
stores[tenantId].clear(begin, end);
schedule(cont);
});
},
getTenant(tenantId));
}
std::optional<fdb::BytesRef> ApiWorkload::getTenant(std::optional<int> tenantId) {
if (tenantId) {
return tenants[*tenantId];
} else {
return {};
}
}
} // namespace FdbApiTester

View File

@ -96,17 +96,23 @@ protected:
// Key prefix
fdb::Key keyPrefix;
// The number of tenants to configure in the cluster
std::vector<fdb::ByteString> tenants;
// In-memory store maintaining expected database state
KeyValueStore store;
std::unordered_map<std::optional<int>, KeyValueStore> stores;
ApiWorkload(const WorkloadConfig& config);
// Methods for generating random keys and values
fdb::Key randomKeyName();
fdb::Value randomValue();
fdb::Key randomNotExistingKey();
fdb::Key randomExistingKey();
fdb::Key randomKey(double existingKeyRatio);
fdb::Key randomNotExistingKey(std::optional<int> tenantId);
fdb::Key randomExistingKey(std::optional<int> tenantId);
fdb::Key randomKey(double existingKeyRatio, std::optional<int> tenantId);
// Chooses a random tenant from the available tenants (or an empty optional if tenants aren't used in the test)
std::optional<int> randomTenant();
// Generate initial random data for the workload
void populateData(TTaskFct cont);
@ -115,12 +121,18 @@ protected:
void clearData(TTaskFct cont);
// common operations
void randomInsertOp(TTaskFct cont);
void randomClearOp(TTaskFct cont);
void randomClearRangeOp(TTaskFct cont);
void randomInsertOp(TTaskFct cont, std::optional<int> tenantId);
void randomClearOp(TTaskFct cont, std::optional<int> tenantId);
void randomClearRangeOp(TTaskFct cont, std::optional<int> tenantId);
std::optional<fdb::BytesRef> getTenant(std::optional<int> tenantId);
private:
void populateDataTx(TTaskFct cont);
void populateDataTx(TTaskFct cont, std::optional<int> tenantId);
void populateTenantData(TTaskFct cont, std::optional<int> tenantId);
void createTenants(TTaskFct cont);
void clearTenantData(TTaskFct cont, std::optional<int> tenantId);
void randomOperations();
};

View File

@ -18,61 +18,13 @@
* limitations under the License.
*/
#include "TesterApiWorkload.h"
#include "TesterBlobGranuleUtil.h"
#include "TesterUtil.h"
#include <memory>
#include <fmt/format.h>
namespace FdbApiTester {
class TesterGranuleContext {
public:
std::unordered_map<int64_t, uint8_t*> loadsInProgress;
int64_t nextId = 0;
std::string basePath;
~TesterGranuleContext() {
// if there was an error or not all loads finished, delete data
for (auto& it : loadsInProgress) {
uint8_t* dataToFree = it.second;
delete[] dataToFree;
}
}
};
static int64_t granule_start_load(const char* filename,
int filenameLength,
int64_t offset,
int64_t length,
int64_t fullFileLength,
void* context) {
TesterGranuleContext* ctx = (TesterGranuleContext*)context;
int64_t loadId = ctx->nextId++;
uint8_t* buffer = new uint8_t[length];
std::ifstream fin(ctx->basePath + std::string(filename, filenameLength), std::ios::in | std::ios::binary);
fin.seekg(offset);
fin.read((char*)buffer, length);
ctx->loadsInProgress.insert({ loadId, buffer });
return loadId;
}
static uint8_t* granule_get_load(int64_t loadId, void* context) {
TesterGranuleContext* ctx = (TesterGranuleContext*)context;
return ctx->loadsInProgress.at(loadId);
}
static void granule_free_load(int64_t loadId, void* context) {
TesterGranuleContext* ctx = (TesterGranuleContext*)context;
auto it = ctx->loadsInProgress.find(loadId);
uint8_t* dataToFree = it->second;
delete[] dataToFree;
ctx->loadsInProgress.erase(it);
}
class ApiBlobGranuleCorrectnessWorkload : public ApiWorkload {
public:
ApiBlobGranuleCorrectnessWorkload(const WorkloadConfig& config) : ApiWorkload(config) {
@ -83,34 +35,39 @@ public:
}
private:
enum OpType { OP_INSERT, OP_CLEAR, OP_CLEAR_RANGE, OP_READ, OP_GET_RANGES, OP_LAST = OP_GET_RANGES };
// FIXME: use other new blob granule apis!
enum OpType {
OP_INSERT,
OP_CLEAR,
OP_CLEAR_RANGE,
OP_READ,
OP_GET_GRANULES,
OP_SUMMARIZE,
OP_GET_BLOB_RANGES,
OP_VERIFY,
OP_LAST = OP_VERIFY
};
std::vector<OpType> excludedOpTypes;
// Allow reads at the start to get blob_granule_transaction_too_old if BG data isn't initialized yet
// FIXME: should still guarantee a read succeeds eventually somehow
bool seenReadSuccess = false;
void randomReadOp(TTaskFct cont) {
void randomReadOp(TTaskFct cont, std::optional<int> tenantId) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
auto results = std::make_shared<std::vector<fdb::KeyValue>>();
auto tooOld = std::make_shared<bool>(false);
if (begin > end) {
std::swap(begin, end);
}
auto results = std::make_shared<std::vector<fdb::KeyValue>>();
auto tooOld = std::make_shared<bool>(false);
execTransaction(
[this, begin, end, results, tooOld](auto ctx) {
ctx->tx().setOption(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE);
TesterGranuleContext testerContext;
testerContext.basePath = ctx->getBGBasePath();
fdb::native::FDBReadBlobGranuleContext granuleContext;
granuleContext.userContext = &testerContext;
granuleContext.debugNoMaterialize = false;
granuleContext.granuleParallelism = 1;
granuleContext.start_load_f = &granule_start_load;
granuleContext.get_load_f = &granule_get_load;
granuleContext.free_load_f = &granule_free_load;
TesterGranuleContext testerContext(ctx->getBGBasePath());
fdb::native::FDBReadBlobGranuleContext granuleContext = createGranuleContext(&testerContext);
fdb::Result res = ctx->tx().readBlobGranules(
begin, end, 0 /* beginVersion */, -2 /* latest read version */, granuleContext);
@ -135,9 +92,10 @@ private:
ctx->done();
}
},
[this, begin, end, results, tooOld, cont]() {
[this, begin, end, results, tooOld, cont, tenantId]() {
if (!*tooOld) {
std::vector<fdb::KeyValue> expected = store.getRange(begin, end, store.size(), false);
std::vector<fdb::KeyValue> expected =
stores[tenantId].getRange(begin, end, stores[tenantId].size(), false);
if (results->size() != expected.size()) {
error(fmt::format("randomReadOp result size mismatch. expected: {} actual: {}",
expected.size(),
@ -168,16 +126,18 @@ private:
}
}
schedule(cont);
});
},
getTenant(tenantId));
}
void randomGetRangesOp(TTaskFct cont) {
void randomGetGranulesOp(TTaskFct cont, std::optional<int> tenantId) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
auto results = std::make_shared<std::vector<fdb::KeyRange>>();
if (begin > end) {
std::swap(begin, end);
}
auto results = std::make_shared<std::vector<fdb::KeyRange>>();
execTransaction(
[begin, end, results](auto ctx) {
fdb::Future f = ctx->tx().getBlobGranuleRanges(begin, end, 1000).eraseType();
@ -190,46 +150,180 @@ private:
true);
},
[this, begin, end, results, cont]() {
if (seenReadSuccess) {
ASSERT(results->size() > 0);
ASSERT(results->front().beginKey <= begin);
ASSERT(results->back().endKey >= end);
}
this->validateRanges(results, begin, end, seenReadSuccess);
schedule(cont);
},
getTenant(tenantId));
}
void randomSummarizeOp(TTaskFct cont, std::optional<int> tenantId) {
if (!seenReadSuccess) {
// tester can't handle this throwing bg_txn_too_old, so just don't call it unless we have already seen a
// read success
schedule(cont);
return;
}
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
auto results = std::make_shared<std::vector<fdb::GranuleSummary>>();
execTransaction(
[begin, end, results](auto ctx) {
fdb::Future f = ctx->tx().summarizeBlobGranules(begin, end, -2 /*latest version*/, 1000).eraseType();
ctx->continueAfter(
f,
[ctx, f, results]() {
*results = copyGranuleSummaryArray(f.get<fdb::future_var::GranuleSummaryRefArray>());
ctx->done();
},
true);
},
[this, begin, end, results, cont]() {
ASSERT(results->size() > 0);
ASSERT(results->front().keyRange.beginKey <= begin);
ASSERT(results->back().keyRange.endKey >= end);
for (int i = 0; i < results->size(); i++) {
// no empty or inverted ranges
ASSERT((*results)[i].beginKey < (*results)[i].endKey);
// TODO: could do validation of subsequent calls and ensure snapshot version never decreases
ASSERT((*results)[i].keyRange.beginKey < (*results)[i].keyRange.endKey);
ASSERT((*results)[i].snapshotVersion <= (*results)[i].deltaVersion);
ASSERT((*results)[i].snapshotSize > 0);
ASSERT((*results)[i].deltaSize >= 0);
}
for (int i = 1; i < results->size(); i++) {
// ranges contain entire requested key range
ASSERT((*results)[i].beginKey == (*results)[i - 1].endKey);
ASSERT((*results)[i].keyRange.beginKey == (*results)[i - 1].keyRange.endKey);
}
schedule(cont);
});
},
getTenant(tenantId));
}
void validateRanges(std::shared_ptr<std::vector<fdb::KeyRange>> results,
fdb::Key begin,
fdb::Key end,
bool shouldBeRanges) {
if (shouldBeRanges) {
ASSERT(results->size() > 0);
ASSERT(results->front().beginKey <= begin);
ASSERT(results->back().endKey >= end);
}
for (int i = 0; i < results->size(); i++) {
// no empty or inverted ranges
if ((*results)[i].beginKey >= (*results)[i].endKey) {
error(fmt::format("Empty/inverted range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey < (*results)[i].endKey);
}
for (int i = 1; i < results->size(); i++) {
// ranges contain entire requested key range
if ((*results)[i].beginKey != (*results)[i].endKey) {
error(fmt::format("Non-contiguous range [{0} - {1}) for getBlobGranuleRanges({2} - {3})",
fdb::toCharsRef((*results)[i].beginKey),
fdb::toCharsRef((*results)[i].endKey),
fdb::toCharsRef(begin),
fdb::toCharsRef(end)));
}
ASSERT((*results)[i].beginKey == (*results)[i - 1].endKey);
}
}
void randomGetBlobRangesOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
auto results = std::make_shared<std::vector<fdb::KeyRange>>();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end, results](auto ctx) {
fdb::Future f = ctx->db().listBlobbifiedRanges(begin, end, 1000).eraseType();
ctx->continueAfter(f, [ctx, f, results]() {
*results = copyKeyRangeArray(f.get<fdb::future_var::KeyRangeRefArray>());
ctx->done();
});
},
[this, begin, end, results, cont]() {
this->validateRanges(results, begin, end, seenReadSuccess);
schedule(cont);
},
/* failOnError = */ false);
}
void randomVerifyOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
auto verifyVersion = std::make_shared<int64_t>(false);
// info("Verify op starting");
execOperation(
[begin, end, verifyVersion](auto ctx) {
fdb::Future f = ctx->db().verifyBlobRange(begin, end, -2 /* latest version*/).eraseType();
ctx->continueAfter(f, [ctx, verifyVersion, f]() {
*verifyVersion = f.get<fdb::future_var::Int64>();
ctx->done();
});
},
[this, begin, end, verifyVersion, cont]() {
if (*verifyVersion == -1) {
ASSERT(!seenReadSuccess);
} else {
if (!seenReadSuccess) {
info("BlobGranuleCorrectness::randomVerifyOp first success");
}
seenReadSuccess = true;
}
// info(fmt::format("verify op done @ {}", *verifyVersion));
schedule(cont);
},
/* failOnError = */ false);
}
void randomOperation(TTaskFct cont) {
OpType txType = (store.size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
std::optional<int> tenantId = randomTenant();
OpType txType = (stores[tenantId].size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
while (std::count(excludedOpTypes.begin(), excludedOpTypes.end(), txType)) {
txType = (OpType)Random::get().randomInt(0, OP_LAST);
}
switch (txType) {
case OP_INSERT:
randomInsertOp(cont);
randomInsertOp(cont, tenantId);
break;
case OP_CLEAR:
randomClearOp(cont);
randomClearOp(cont, tenantId);
break;
case OP_CLEAR_RANGE:
randomClearRangeOp(cont);
randomClearRangeOp(cont, tenantId);
break;
case OP_READ:
randomReadOp(cont);
randomReadOp(cont, tenantId);
break;
case OP_GET_RANGES:
randomGetRangesOp(cont);
case OP_GET_GRANULES:
randomGetGranulesOp(cont, tenantId);
break;
case OP_SUMMARIZE:
randomSummarizeOp(cont, tenantId);
break;
case OP_GET_BLOB_RANGES:
randomGetBlobRangesOp(cont);
break;
case OP_VERIFY:
randomVerifyOp(cont);
break;
}
}

View File

@ -0,0 +1,316 @@
/*
* TesterBlobGranuleErrorsWorkload.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "TesterApiWorkload.h"
#include "TesterBlobGranuleUtil.h"
#include "TesterUtil.h"
#include <memory>
#include <fmt/format.h>
namespace FdbApiTester {
class BlobGranuleErrorsWorkload : public ApiWorkload {
public:
BlobGranuleErrorsWorkload(const WorkloadConfig& config) : ApiWorkload(config) {}
private:
enum OpType {
OP_READ_NO_MATERIALIZE,
OP_READ_FILE_LOAD_ERROR,
OP_READ_TOO_OLD,
OP_PURGE_UNALIGNED,
OP_BLOBBIFY_UNALIGNED,
OP_UNBLOBBIFY_UNALIGNED,
OP_CANCEL_GET_GRANULES,
OP_CANCEL_GET_RANGES,
OP_CANCEL_VERIFY,
OP_CANCEL_SUMMARIZE,
OP_CANCEL_BLOBBIFY,
OP_CANCEL_UNBLOBBIFY,
OP_CANCEL_PURGE,
OP_LAST = OP_CANCEL_PURGE
};
// could add summarize too old and verify too old as ops if desired but those are lower value
// Allow reads at the start to get blob_granule_transaction_too_old if BG data isn't initialized yet
// FIXME: should still guarantee a read succeeds eventually somehow
bool seenReadSuccess = false;
void doErrorOp(TTaskFct cont,
std::string basePathAddition,
bool doMaterialize,
int64_t readVersion,
fdb::native::fdb_error_t expectedError) {
fdb::Key begin = randomKeyName();
fdb::Key end = begin;
// [K - K) empty range will succeed read because there is trivially nothing to do, so don't do it
while (end == begin) {
end = randomKeyName();
}
if (begin > end) {
std::swap(begin, end);
}
execTransaction(
[this, begin, end, basePathAddition, doMaterialize, readVersion, expectedError](auto ctx) {
ctx->tx().setOption(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE);
TesterGranuleContext testerContext(ctx->getBGBasePath() + basePathAddition);
fdb::native::FDBReadBlobGranuleContext granuleContext = createGranuleContext(&testerContext);
granuleContext.debugNoMaterialize = !doMaterialize;
fdb::Result res =
ctx->tx().readBlobGranules(begin, end, 0 /* beginVersion */, readVersion, granuleContext);
auto out = fdb::Result::KeyValueRefArray{};
fdb::Error err = res.getKeyValueArrayNothrow(out);
if (err.code() == error_code_success) {
error(fmt::format("Operation succeeded in error test!"));
}
ASSERT(err.code() != error_code_success);
if (err.code() != expectedError) {
info(fmt::format("incorrect error. Expected {}, Got {}", expectedError, err.code()));
if (err.code() == error_code_blob_granule_transaction_too_old) {
ASSERT(!seenReadSuccess);
ctx->done();
} else {
ctx->onError(err);
}
} else {
if (err.code() != error_code_blob_granule_transaction_too_old) {
seenReadSuccess = true;
}
ctx->done();
}
},
[this, cont]() { schedule(cont); });
}
void randomOpReadNoMaterialize(TTaskFct cont) {
// ensure setting noMaterialize flag produces blob_granule_not_materialized
doErrorOp(cont, "", false, -2 /*latest read version */, error_code_blob_granule_not_materialized);
}
void randomOpReadFileLoadError(TTaskFct cont) {
// point to a file path that doesn't exist by adding an extra suffix
doErrorOp(cont, "extrapath/", true, -2 /*latest read version */, error_code_blob_granule_file_load_error);
}
void randomOpReadTooOld(TTaskFct cont) {
// read at a version (1) that should predate granule data
doErrorOp(cont, "", true, 1, error_code_blob_granule_transaction_too_old);
}
void randomPurgeUnalignedOp(TTaskFct cont) {
// blobbify/unblobbify need to be aligned to blob range boundaries, so this should always fail
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[this, begin, end](auto ctx) {
fdb::Future f = ctx->db().purgeBlobGranules(begin, end, -2, false).eraseType();
ctx->continueAfter(
f,
[this, ctx, f]() {
info(fmt::format("unaligned purge got {}", f.error().code()));
ASSERT(f.error().code() == error_code_unsupported_operation);
ctx->done();
},
true);
},
[this, cont]() { schedule(cont); });
}
void randomBlobbifyUnalignedOp(bool blobbify, TTaskFct cont) {
// blobbify/unblobbify need to be aligned to blob range boundaries, so this should always return false
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
auto success = std::make_shared<bool>(false);
execOperation(
[begin, end, blobbify, success](auto ctx) {
fdb::Future f = blobbify ? ctx->db().blobbifyRange(begin, end).eraseType()
: ctx->db().unblobbifyRange(begin, end).eraseType();
ctx->continueAfter(
f,
[ctx, f, success]() {
*success = f.get<fdb::future_var::Bool>();
ctx->done();
},
true);
},
[this, cont, success]() {
ASSERT(!(*success));
schedule(cont);
});
}
void randomCancelGetGranulesOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execTransaction(
[begin, end](auto ctx) {
fdb::Future f = ctx->tx().getBlobGranuleRanges(begin, end, 1000).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelGetRangesOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().listBlobbifiedRanges(begin, end, 1000).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelVerifyOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().verifyBlobRange(begin, end, -2 /* latest version*/).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelSummarizeOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execTransaction(
[begin, end](auto ctx) {
fdb::Future f = ctx->tx().summarizeBlobGranules(begin, end, -2, 1000).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelBlobbifyOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().blobbifyRange(begin, end).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelUnblobbifyOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().unblobbifyRange(begin, end).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomCancelPurgeOp(TTaskFct cont) {
fdb::Key begin = randomKeyName();
fdb::Key end = randomKeyName();
if (begin > end) {
std::swap(begin, end);
}
execOperation(
[begin, end](auto ctx) {
fdb::Future f = ctx->db().purgeBlobGranules(begin, end, -2, false).eraseType();
ctx->done();
},
[this, cont]() { schedule(cont); });
}
void randomOperation(TTaskFct cont) override {
OpType txType = (OpType)Random::get().randomInt(0, OP_LAST);
switch (txType) {
case OP_READ_NO_MATERIALIZE:
randomOpReadNoMaterialize(cont);
break;
case OP_READ_FILE_LOAD_ERROR:
randomOpReadFileLoadError(cont);
break;
case OP_READ_TOO_OLD:
randomOpReadTooOld(cont);
break;
case OP_PURGE_UNALIGNED:
// gets the correct error but it doesn't propagate properly in the test
// randomPurgeUnalignedOp(cont);
break;
case OP_BLOBBIFY_UNALIGNED:
randomBlobbifyUnalignedOp(true, cont);
break;
case OP_UNBLOBBIFY_UNALIGNED:
randomBlobbifyUnalignedOp(false, cont);
break;
case OP_CANCEL_GET_GRANULES:
randomCancelGetGranulesOp(cont);
break;
case OP_CANCEL_GET_RANGES:
randomCancelGetRangesOp(cont);
break;
case OP_CANCEL_VERIFY:
randomCancelVerifyOp(cont);
break;
case OP_CANCEL_SUMMARIZE:
randomCancelSummarizeOp(cont);
break;
case OP_CANCEL_BLOBBIFY:
randomCancelBlobbifyOp(cont);
break;
case OP_CANCEL_UNBLOBBIFY:
randomCancelUnblobbifyOp(cont);
break;
case OP_CANCEL_PURGE:
randomCancelPurgeOp(cont);
break;
}
}
};
WorkloadFactory<BlobGranuleErrorsWorkload> BlobGranuleErrorsWorkloadFactory("BlobGranuleErrors");
} // namespace FdbApiTester

View File

@ -0,0 +1,80 @@
/*
* TesterBlobGranuleUtil.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "TesterBlobGranuleUtil.h"
#include "TesterUtil.h"
#include <fstream>
namespace FdbApiTester {
// FIXME: avoid duplicating this between files!
static int64_t granule_start_load(const char* filename,
int filenameLength,
int64_t offset,
int64_t length,
int64_t fullFileLength,
void* context) {
TesterGranuleContext* ctx = (TesterGranuleContext*)context;
int64_t loadId = ctx->nextId++;
uint8_t* buffer = new uint8_t[length];
std::ifstream fin(ctx->basePath + std::string(filename, filenameLength), std::ios::in | std::ios::binary);
if (fin.fail()) {
delete[] buffer;
buffer = nullptr;
} else {
fin.seekg(offset);
fin.read((char*)buffer, length);
}
ctx->loadsInProgress.insert({ loadId, buffer });
return loadId;
}
static uint8_t* granule_get_load(int64_t loadId, void* context) {
TesterGranuleContext* ctx = (TesterGranuleContext*)context;
return ctx->loadsInProgress.at(loadId);
}
static void granule_free_load(int64_t loadId, void* context) {
TesterGranuleContext* ctx = (TesterGranuleContext*)context;
auto it = ctx->loadsInProgress.find(loadId);
uint8_t* dataToFree = it->second;
delete[] dataToFree;
ctx->loadsInProgress.erase(it);
}
fdb::native::FDBReadBlobGranuleContext createGranuleContext(const TesterGranuleContext* testerContext) {
fdb::native::FDBReadBlobGranuleContext granuleContext;
granuleContext.userContext = (void*)testerContext;
granuleContext.debugNoMaterialize = false;
granuleContext.granuleParallelism = 1 + Random::get().randomInt(0, 3);
granuleContext.start_load_f = &granule_start_load;
granuleContext.get_load_f = &granule_get_load;
granuleContext.free_load_f = &granule_free_load;
return granuleContext;
}
} // namespace FdbApiTester

View File

@ -0,0 +1,49 @@
/*
* TesterBlobGranuleUtil.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef APITESTER_BLOBGRANULE_UTIL_H
#define APITESTER_BLOBGRANULE_UTIL_H
#include "TesterUtil.h"
#include "test/fdb_api.hpp"
#include <unordered_map>
namespace FdbApiTester {
class TesterGranuleContext {
public:
std::unordered_map<int64_t, uint8_t*> loadsInProgress;
std::string basePath;
int64_t nextId;
TesterGranuleContext(const std::string& basePath) : basePath(basePath), nextId(0) {}
~TesterGranuleContext() {
// this should now never happen with proper memory management
ASSERT(loadsInProgress.empty());
}
};
fdb::native::FDBReadBlobGranuleContext createGranuleContext(const TesterGranuleContext* testerContext);
} // namespace FdbApiTester
#endif

View File

@ -31,11 +31,11 @@ private:
enum OpType { OP_CANCEL_GET, OP_CANCEL_AFTER_FIRST_GET, OP_LAST = OP_CANCEL_AFTER_FIRST_GET };
// Start multiple concurrent gets and cancel the transaction
void randomCancelGetTx(TTaskFct cont) {
void randomCancelGetTx(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keys = std::make_shared<std::vector<fdb::Key>>();
for (int i = 0; i < numKeys; i++) {
keys->push_back(randomKey(readExistingKeysRatio));
keys->push_back(randomKey(readExistingKeysRatio, tenantId));
}
execTransaction(
[keys](auto ctx) {
@ -45,25 +45,26 @@ private:
}
ctx->done();
},
[this, cont]() { schedule(cont); });
[this, cont]() { schedule(cont); },
getTenant(tenantId));
}
// Start multiple concurrent gets and cancel the transaction after the first get returns
void randomCancelAfterFirstResTx(TTaskFct cont) {
void randomCancelAfterFirstResTx(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keys = std::make_shared<std::vector<fdb::Key>>();
for (int i = 0; i < numKeys; i++) {
keys->push_back(randomKey(readExistingKeysRatio));
keys->push_back(randomKey(readExistingKeysRatio, tenantId));
}
execTransaction(
[this, keys](auto ctx) {
[this, keys, tenantId](auto ctx) {
std::vector<fdb::Future> futures;
for (const auto& key : *keys) {
futures.push_back(ctx->tx().get(key, false).eraseType());
}
for (int i = 0; i < keys->size(); i++) {
fdb::Future f = futures[i];
auto expectedVal = store.get((*keys)[i]);
auto expectedVal = stores[tenantId].get((*keys)[i]);
ctx->continueAfter(f, [expectedVal, f, this, ctx]() {
auto val = f.get<fdb::future_var::ValueRef>();
if (expectedVal != val) {
@ -75,17 +76,20 @@ private:
});
}
},
[this, cont]() { schedule(cont); });
[this, cont]() { schedule(cont); },
getTenant(tenantId));
}
void randomOperation(TTaskFct cont) override {
std::optional<int> tenantId = randomTenant();
OpType txType = (OpType)Random::get().randomInt(0, OP_LAST);
switch (txType) {
case OP_CANCEL_GET:
randomCancelGetTx(cont);
randomCancelGetTx(cont, tenantId);
break;
case OP_CANCEL_AFTER_FIRST_GET:
randomCancelAfterFirstResTx(cont);
randomCancelAfterFirstResTx(cont, tenantId);
break;
}
}

View File

@ -41,11 +41,11 @@ private:
OP_LAST = OP_COMMIT_READ
};
void randomCommitReadOp(TTaskFct cont) {
void randomCommitReadOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto kvPairs = std::make_shared<std::vector<fdb::KeyValue>>();
for (int i = 0; i < numKeys; i++) {
kvPairs->push_back(fdb::KeyValue{ randomKey(readExistingKeysRatio), randomValue() });
kvPairs->push_back(fdb::KeyValue{ randomKey(readExistingKeysRatio, tenantId), randomValue() });
}
execTransaction(
[kvPairs](auto ctx) {
@ -54,9 +54,9 @@ private:
}
ctx->commit();
},
[this, kvPairs, cont]() {
[this, kvPairs, cont, tenantId]() {
for (const fdb::KeyValue& kv : *kvPairs) {
store.set(kv.key, kv.value);
stores[tenantId].set(kv.key, kv.value);
}
auto results = std::make_shared<std::vector<std::optional<fdb::Value>>>();
execTransaction(
@ -78,10 +78,10 @@ private:
ctx->done();
});
},
[this, kvPairs, results, cont]() {
[this, kvPairs, results, cont, tenantId]() {
ASSERT(results->size() == kvPairs->size());
for (int i = 0; i < kvPairs->size(); i++) {
auto expected = store.get((*kvPairs)[i].key);
auto expected = stores[tenantId].get((*kvPairs)[i].key);
auto actual = (*results)[i];
if (actual != expected) {
error(
@ -93,16 +93,18 @@ private:
}
}
schedule(cont);
});
});
},
getTenant(tenantId));
},
getTenant(tenantId));
}
void randomGetOp(TTaskFct cont) {
void randomGetOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keys = std::make_shared<std::vector<fdb::Key>>();
auto results = std::make_shared<std::vector<std::optional<fdb::Value>>>();
for (int i = 0; i < numKeys; i++) {
keys->push_back(randomKey(readExistingKeysRatio));
keys->push_back(randomKey(readExistingKeysRatio, tenantId));
}
execTransaction(
[keys, results](auto ctx) {
@ -119,10 +121,10 @@ private:
ctx->done();
});
},
[this, keys, results, cont]() {
[this, keys, results, cont, tenantId]() {
ASSERT(results->size() == keys->size());
for (int i = 0; i < keys->size(); i++) {
auto expected = store.get((*keys)[i]);
auto expected = stores[tenantId].get((*keys)[i]);
if ((*results)[i] != expected) {
error(fmt::format("randomGetOp mismatch. key: {} expected: {:.80} actual: {:.80}",
fdb::toCharsRef((*keys)[i]),
@ -131,16 +133,17 @@ private:
}
}
schedule(cont);
});
},
getTenant(tenantId));
}
void randomGetKeyOp(TTaskFct cont) {
void randomGetKeyOp(TTaskFct cont, std::optional<int> tenantId) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keysWithSelectors = std::make_shared<std::vector<std::pair<fdb::Key, fdb::KeySelector>>>();
auto results = std::make_shared<std::vector<fdb::Key>>();
keysWithSelectors->reserve(numKeys);
for (int i = 0; i < numKeys; i++) {
auto key = randomKey(readExistingKeysRatio);
auto key = randomKey(readExistingKeysRatio, tenantId);
fdb::KeySelector selector;
selector.keyLength = key.size();
selector.orEqual = Random::get().randomBool(0.5);
@ -169,20 +172,20 @@ private:
ctx->done();
});
},
[this, keysWithSelectors, results, cont]() {
[this, keysWithSelectors, results, cont, tenantId]() {
ASSERT(results->size() == keysWithSelectors->size());
for (int i = 0; i < keysWithSelectors->size(); i++) {
auto const& key = (*keysWithSelectors)[i].first;
auto const& selector = (*keysWithSelectors)[i].second;
auto expected = store.getKey(key, selector.orEqual, selector.offset);
auto expected = stores[tenantId].getKey(key, selector.orEqual, selector.offset);
auto actual = (*results)[i];
// Local store only contains data for the current client, while fdb contains data from multiple
// clients. If getKey returned a key outside of the range for the current client, adjust the result
// to match what would be expected in the local store.
if (actual.substr(0, keyPrefix.size()) < keyPrefix) {
actual = store.startKey();
actual = stores[tenantId].startKey();
} else if ((*results)[i].substr(0, keyPrefix.size()) > keyPrefix) {
actual = store.endKey();
actual = stores[tenantId].endKey();
}
if (actual != expected) {
error(fmt::format("randomGetKeyOp mismatch. key: {}, orEqual: {}, offset: {}, expected: {} "
@ -195,37 +198,38 @@ private:
}
}
schedule(cont);
});
},
getTenant(tenantId));
}
void getRangeLoop(std::shared_ptr<ITransactionContext> ctx,
fdb::KeySelector begin,
fdb::KeySelector end,
fdb::Key endKey,
std::shared_ptr<std::vector<fdb::KeyValue>> results) {
auto f = ctx->tx().getRange(begin,
end,
fdb::key_select::firstGreaterOrEqual(endKey),
0 /*limit*/,
0 /*target_bytes*/,
FDB_STREAMING_MODE_WANT_ALL,
0 /*iteration*/,
false /*snapshot*/,
false /*reverse*/);
ctx->continueAfter(f, [this, ctx, f, end, results]() {
ctx->continueAfter(f, [this, ctx, f, endKey, results]() {
auto out = copyKeyValueArray(f.get());
results->insert(results->end(), out.first.begin(), out.first.end());
const bool more = out.second;
if (more) {
// Fetch the remaining results.
getRangeLoop(ctx, fdb::key_select::firstGreaterThan(results->back().key), end, results);
getRangeLoop(ctx, fdb::key_select::firstGreaterThan(results->back().key), endKey, results);
} else {
ctx->done();
}
});
}
void randomGetRangeOp(TTaskFct cont) {
auto begin = randomKey(readExistingKeysRatio);
auto end = randomKey(readExistingKeysRatio);
void randomGetRangeOp(TTaskFct cont, std::optional<int> tenantId) {
auto begin = randomKey(readExistingKeysRatio, tenantId);
auto end = randomKey(readExistingKeysRatio, tenantId);
auto results = std::make_shared<std::vector<fdb::KeyValue>>();
execTransaction(
@ -233,13 +237,10 @@ private:
// Clear the results vector, in case the transaction is retried.
results->clear();
getRangeLoop(ctx,
fdb::key_select::firstGreaterOrEqual(begin),
fdb::key_select::firstGreaterOrEqual(end),
results);
getRangeLoop(ctx, fdb::key_select::firstGreaterOrEqual(begin), end, results);
},
[this, begin, end, results, cont]() {
auto expected = store.getRange(begin, end, results->size() + 10, false);
[this, begin, end, results, cont, tenantId]() {
auto expected = stores[tenantId].getRange(begin, end, results->size() + 10, false);
if (results->size() != expected.size()) {
error(fmt::format("randomGetRangeOp mismatch. expected {} keys, actual {} keys",
expected.size(),
@ -260,32 +261,35 @@ private:
}
}
schedule(cont);
});
},
getTenant(tenantId));
}
void randomOperation(TTaskFct cont) {
OpType txType = (store.size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
std::optional<int> tenantId = randomTenant();
OpType txType = (stores[tenantId].size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
switch (txType) {
case OP_INSERT:
randomInsertOp(cont);
randomInsertOp(cont, tenantId);
break;
case OP_GET:
randomGetOp(cont);
randomGetOp(cont, tenantId);
break;
case OP_GET_KEY:
randomGetKeyOp(cont);
randomGetKeyOp(cont, tenantId);
break;
case OP_CLEAR:
randomClearOp(cont);
randomClearOp(cont, tenantId);
break;
case OP_GET_RANGE:
randomGetRangeOp(cont);
randomGetRangeOp(cont, tenantId);
break;
case OP_CLEAR_RANGE:
randomClearRangeOp(cont);
randomClearRangeOp(cont, tenantId);
break;
case OP_COMMIT_READ:
randomCommitReadOp(cont);
randomCommitReadOp(cont, tenantId);
break;
}
}

View File

@ -35,8 +35,8 @@ public:
void start() override { setAndGet(NO_OP_TASK); }
void setAndGet(TTaskFct cont) {
fdb::Key key = keyPrefix + random.randomStringLowerCase(10, 100);
fdb::Value value = random.randomStringLowerCase(10, 1000);
fdb::Key key = keyPrefix + random.randomByteStringLowerCase(10, 100);
fdb::Value value = random.randomByteStringLowerCase(10, 1000);
execTransaction(
[key, value](auto ctx) {
ctx->tx().set(key, value);

View File

@ -49,6 +49,7 @@ public:
int numClientThreads;
int numDatabases;
int numClients;
int numTenants = -1;
int statsIntervalMs = 0;
std::vector<std::pair<std::string, std::string>> knobs;
TestSpec testSpec;

View File

@ -65,6 +65,10 @@ std::unordered_map<std::string, std::function<void(const std::string& value, Tes
[](const std::string& value, TestSpec* spec) { //
spec->databasePerTransaction = (value == "true");
} },
{ "tamperClusterFile",
[](const std::string& value, TestSpec* spec) { //
spec->tamperClusterFile = (value == "true");
} },
{ "minFdbThreads",
[](const std::string& value, TestSpec* spec) { //
processIntOption(value, "minFdbThreads", spec->minFdbThreads, 1, 1000);
@ -96,6 +100,18 @@ std::unordered_map<std::string, std::function<void(const std::string& value, Tes
{ "maxClients",
[](const std::string& value, TestSpec* spec) { //
processIntOption(value, "maxClients", spec->maxClients, 1, 1000);
} },
{ "disableClientBypass",
[](const std::string& value, TestSpec* spec) { //
spec->disableClientBypass = (value == "true");
} },
{ "minTenants",
[](const std::string& value, TestSpec* spec) { //
processIntOption(value, "minTenants", spec->minTenants, 1, 1000);
} },
{ "maxTenants",
[](const std::string& value, TestSpec* spec) { //
processIntOption(value, "maxTenants", spec->maxTenants, 1, 1000);
} }
};

View File

@ -58,6 +58,9 @@ struct TestSpec {
// Execute each transaction in a separate database instance
bool databasePerTransaction = false;
// Test tampering the cluster file
bool tamperClusterFile = false;
// Size of the FDB client thread pool (a random number in the [min,max] range)
int minFdbThreads = 1;
int maxFdbThreads = 1;
@ -75,6 +78,13 @@ struct TestSpec {
int minClients = 1;
int maxClients = 10;
// Disable the ability to bypass the MVC API, for
// cases when there are no external clients
bool disableClientBypass = false;
// Number of tenants (a random number in the [min,max] range)
int minTenants = 0;
int maxTenants = 0;
// List of workloads with their options
std::vector<WorkloadSpec> workloads;
};

View File

@ -23,25 +23,23 @@
#include "foundationdb/fdb_c_types.h"
#include "test/apitester/TesterScheduler.h"
#include "test/fdb_api.hpp"
#include <cstddef>
#include <memory>
#include <stdexcept>
#include <string>
#include <unordered_map>
#include <mutex>
#include <atomic>
#include <chrono>
#include <thread>
#include <fmt/format.h>
#include <filesystem>
namespace FdbApiTester {
constexpr int LONG_WAIT_TIME_US = 2000000;
constexpr int LARGE_NUMBER_OF_RETRIES = 10;
void TransactionActorBase::complete(fdb::Error err) {
error = err;
context = {};
}
void ITransactionContext::continueAfterAll(std::vector<fdb::Future> futures, TTaskFct cont) {
auto counter = std::make_shared<std::atomic<int>>(futures.size());
auto errorCode = std::make_shared<std::atomic<fdb::Error>>(fdb::Error::success());
@ -72,20 +70,44 @@ void ITransactionContext::continueAfterAll(std::vector<fdb::Future> futures, TTa
*/
class TransactionContextBase : public ITransactionContext {
public:
TransactionContextBase(fdb::Transaction tx,
std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
TransactionContextBase(ITransactionExecutor* executor,
TOpStartFct startFct,
TOpContFct cont,
IScheduler* scheduler,
int retryLimit,
std::string bgBasePath)
: fdbTx(tx), txActor(txActor), contAfterDone(cont), scheduler(scheduler), retryLimit(retryLimit),
txState(TxState::IN_PROGRESS), commitCalled(false), bgBasePath(bgBasePath) {}
std::string bgBasePath,
std::optional<fdb::BytesRef> tenantName,
bool transactional)
: executor(executor), startFct(startFct), contAfterDone(cont), scheduler(scheduler), retryLimit(retryLimit),
txState(TxState::IN_PROGRESS), commitCalled(false), bgBasePath(bgBasePath), tenantName(tenantName),
transactional(transactional) {
databaseCreateErrorInjected = executor->getOptions().injectDatabaseCreateErrors &&
Random::get().randomBool(executor->getOptions().databaseCreateErrorRatio);
if (databaseCreateErrorInjected) {
fdbDb = fdb::Database(executor->getClusterFileForErrorInjection());
} else {
fdbDb = executor->selectDatabase();
}
if (transactional) {
if (tenantName) {
fdb::Tenant tenant = fdbDb.openTenant(*tenantName);
fdbTx = tenant.createTransaction();
} else {
fdbTx = fdbDb.createTransaction();
}
}
}
virtual ~TransactionContextBase() { ASSERT(txState == TxState::DONE); }
// A state machine:
// IN_PROGRESS -> (ON_ERROR -> IN_PROGRESS)* [-> ON_ERROR] -> DONE
enum class TxState { IN_PROGRESS, ON_ERROR, DONE };
fdb::Transaction tx() override { return fdbTx; }
fdb::Database db() override { return fdbDb.atomic_load(); }
fdb::Transaction tx() override { return fdbTx.atomic_load(); }
// Set a continuation to be executed when a future gets ready
void continueAfter(fdb::Future f, TTaskFct cont, bool retryOnError) override {
@ -94,6 +116,7 @@ public:
// Complete the transaction with a commit
void commit() override {
ASSERT(transactional);
std::unique_lock<std::mutex> lock(mutex);
if (txState != TxState::IN_PROGRESS) {
return;
@ -114,31 +137,79 @@ public:
}
txState = TxState::DONE;
lock.unlock();
// No need for lock from here on, because only one thread
// can enter DONE state and handle it
if (retriedErrors.size() >= LARGE_NUMBER_OF_RETRIES) {
fmt::print("Transaction succeeded after {} retries on errors: {}\n",
retriedErrors.size(),
fmt::join(retriedErrorCodes(), ", "));
}
// cancel transaction so that any pending operations on it
// fail gracefully
fdbTx.cancel();
txActor->complete(fdb::Error::success());
if (transactional) {
// cancel transaction so that any pending operations on it
// fail gracefully
fdbTx.cancel();
}
cleanUp();
contAfterDone();
ASSERT(txState == TxState::DONE);
contAfterDone(fdb::Error::success());
}
std::string getBGBasePath() override { return bgBasePath; }
virtual void onError(fdb::Error err) override {
std::unique_lock<std::mutex> lock(mutex);
if (txState != TxState::IN_PROGRESS) {
// Ignore further errors, if the transaction is in the error handing mode or completed
return;
}
txState = TxState::ON_ERROR;
lock.unlock();
// No need to hold the lock from here on, because ON_ERROR state is handled sequentially, and
// other callbacks are simply ignored while it stays in this state
if (!canRetry(err)) {
return;
}
ASSERT(!onErrorFuture);
if (databaseCreateErrorInjected && canBeInjectedDatabaseCreateError(err.code())) {
// Failed to create a database because of failure injection
// Restart by recreating the transaction in a valid database
recreateAndRestartTransaction();
} else if (transactional) {
onErrorArg = err;
onErrorFuture = tx().onError(err);
handleOnErrorFuture();
} else if (err.retryable()) {
restartTransaction();
} else {
transactionFailed(err);
}
}
protected:
virtual void doContinueAfter(fdb::Future f, TTaskFct cont, bool retryOnError) = 0;
virtual void handleOnErrorFuture() = 0;
// Clean up transaction state after completing the transaction
// Note that the object may live longer, because it is referenced
// by not yet triggered callbacks
virtual void cleanUp() {
void cleanUp() {
ASSERT(txState == TxState::DONE);
ASSERT(!onErrorFuture);
txActor = {};
cancelPendingFutures();
}
virtual void cancelPendingFutures() {}
bool canBeInjectedDatabaseCreateError(fdb::Error::CodeType errCode) {
return errCode == error_code_no_cluster_file_found || errCode == error_code_connection_string_invalid;
}
// Complete the transaction with an (unretriable) error
@ -150,9 +221,12 @@ protected:
}
txState = TxState::DONE;
lock.unlock();
txActor->complete(err);
// No need for lock from here on, because only one thread
// can enter DONE state and handle it
cleanUp();
contAfterDone();
contAfterDone(err);
}
// Handle result of an a transaction onError call
@ -163,14 +237,37 @@ protected:
if (err) {
transactionFailed(err);
} else {
std::unique_lock<std::mutex> lock(mutex);
txState = TxState::IN_PROGRESS;
commitCalled = false;
lock.unlock();
txActor->start();
restartTransaction();
}
}
void restartTransaction() {
ASSERT(txState == TxState::ON_ERROR);
cancelPendingFutures();
std::unique_lock<std::mutex> lock(mutex);
txState = TxState::IN_PROGRESS;
commitCalled = false;
lock.unlock();
startFct(shared_from_this());
}
void recreateAndRestartTransaction() {
auto thisRef = std::static_pointer_cast<TransactionContextBase>(shared_from_this());
scheduler->schedule([thisRef]() {
fdb::Database db = thisRef->executor->selectDatabase();
thisRef->fdbDb.atomic_store(db);
if (thisRef->transactional) {
if (thisRef->tenantName) {
fdb::Tenant tenant = db.openTenant(*thisRef->tenantName);
thisRef->fdbTx.atomic_store(tenant.createTransaction());
} else {
thisRef->fdbTx.atomic_store(db.createTransaction());
}
}
thisRef->restartTransaction();
});
}
// Checks if a transaction can be retried. Fails the transaction if the check fails
bool canRetry(fdb::Error lastErr) {
ASSERT(txState == TxState::ON_ERROR);
@ -196,44 +293,77 @@ protected:
return retriedErrorCodes;
}
// Pointer to the transaction executor interface
// Set in contructor, stays immutable
ITransactionExecutor* const executor;
// FDB database
// Provides a thread safe interface by itself (no need for mutex)
fdb::Database fdbDb;
// FDB transaction
// Provides a thread safe interface by itself (no need for mutex)
fdb::Transaction fdbTx;
// Actor implementing the transaction worklflow
std::shared_ptr<ITransactionActor> txActor;
// The function implementing the starting point of the transaction
// Set in constructor and reset on cleanup (no need for mutex)
TOpStartFct startFct;
// Mutex protecting access to shared mutable state
// Only the state that is accessible unter IN_PROGRESS state
// must be protected by mutex
std::mutex mutex;
// Continuation to be called after completion of the transaction
TTaskFct contAfterDone;
// Set in contructor, stays immutable
const TOpContFct contAfterDone;
// Reference to the scheduler
IScheduler* scheduler;
// Set in contructor, stays immutable
// Cannot be accessed in DONE state, workloads can be completed and the scheduler deleted
IScheduler* const scheduler;
// Retry limit
int retryLimit;
// Set in contructor, stays immutable
const int retryLimit;
// Transaction execution state
// Must be accessed under mutex
TxState txState;
// onError future used in ON_ERROR state
// onError future
// used only in ON_ERROR state (no need for mutex)
fdb::Future onErrorFuture;
// The error code on which onError was called
// used only in ON_ERROR state (no need for mutex)
fdb::Error onErrorArg;
// The time point of calling onError
// used only in ON_ERROR state (no need for mutex)
TimePoint onErrorCallTimePoint;
// Transaction is committed or being committed
// Must be accessed under mutex
bool commitCalled;
// A history of errors on which the transaction was retried
// used only in ON_ERROR and DONE states (no need for mutex)
std::vector<fdb::Error> retriedErrors;
// blob granule base path
std::string bgBasePath;
// Set in contructor, stays immutable
const std::string bgBasePath;
// Indicates if the database error was injected
// Accessed on initialization and in ON_ERROR state only (no need for mutex)
bool databaseCreateErrorInjected;
// The tenant that we will run this transaction in
const std::optional<fdb::BytesRef> tenantName;
// Specifies whether the operation is transactional
const bool transactional;
};
/**
@ -241,13 +371,16 @@ protected:
*/
class BlockingTransactionContext : public TransactionContextBase {
public:
BlockingTransactionContext(fdb::Transaction tx,
std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
BlockingTransactionContext(ITransactionExecutor* executor,
TOpStartFct startFct,
TOpContFct cont,
IScheduler* scheduler,
int retryLimit,
std::string bgBasePath)
: TransactionContextBase(tx, txActor, cont, scheduler, retryLimit, bgBasePath) {}
std::string bgBasePath,
std::optional<fdb::BytesRef> tenantName,
bool transactional)
: TransactionContextBase(executor, startFct, cont, scheduler, retryLimit, bgBasePath, tenantName, transactional) {
}
protected:
void doContinueAfter(fdb::Future f, TTaskFct cont, bool retryOnError) override {
@ -288,22 +421,8 @@ protected:
onError(err);
}
virtual void onError(fdb::Error err) override {
std::unique_lock<std::mutex> lock(mutex);
if (txState != TxState::IN_PROGRESS) {
// Ignore further errors, if the transaction is in the error handing mode or completed
return;
}
txState = TxState::ON_ERROR;
lock.unlock();
if (!canRetry(err)) {
return;
}
ASSERT(!onErrorFuture);
onErrorFuture = fdbTx.onError(err);
onErrorArg = err;
virtual void handleOnErrorFuture() override {
ASSERT(txState == TxState::ON_ERROR);
auto start = timeNow();
fdb::Error err2 = onErrorFuture.blockUntilReady();
@ -330,13 +449,16 @@ protected:
*/
class AsyncTransactionContext : public TransactionContextBase {
public:
AsyncTransactionContext(fdb::Transaction tx,
std::shared_ptr<ITransactionActor> txActor,
TTaskFct cont,
AsyncTransactionContext(ITransactionExecutor* executor,
TOpStartFct startFct,
TOpContFct cont,
IScheduler* scheduler,
int retryLimit,
std::string bgBasePath)
: TransactionContextBase(tx, txActor, cont, scheduler, retryLimit, bgBasePath) {}
std::string bgBasePath,
std::optional<fdb::BytesRef> tenantName,
bool transactional)
: TransactionContextBase(executor, startFct, cont, scheduler, retryLimit, bgBasePath, tenantName, transactional) {
}
protected:
void doContinueAfter(fdb::Future f, TTaskFct cont, bool retryOnError) override {
@ -344,7 +466,7 @@ protected:
if (txState != TxState::IN_PROGRESS) {
return;
}
callbackMap[f] = CallbackInfo{ f, cont, shared_from_this(), retryOnError, timeNow() };
callbackMap[f] = CallbackInfo{ f, cont, shared_from_this(), retryOnError, timeNow(), false };
lock.unlock();
try {
f.then([this](fdb::Future f) { futureReadyCallback(f, this); });
@ -383,7 +505,6 @@ protected:
if (txState != TxState::IN_PROGRESS) {
return;
}
lock.unlock();
fdb::Error err = f.error();
auto waitTimeUs = timeElapsedInUs(cbInfo.startTime, endTime);
if (waitTimeUs > LONG_WAIT_TIME_US) {
@ -392,32 +513,23 @@ protected:
err.code(),
err.what());
}
if (err.code() == error_code_transaction_cancelled) {
if (err.code() == error_code_transaction_cancelled || cbInfo.cancelled) {
return;
}
if (err.code() == error_code_success || !cbInfo.retryOnError) {
scheduler->schedule(cbInfo.cont);
return;
}
// We keep lock until here to prevent transitions from the IN_PROGRESS state
// which could possibly lead to completion of the workload and destruction
// of the scheduler
lock.unlock();
onError(err);
}
virtual void onError(fdb::Error err) override {
std::unique_lock<std::mutex> lock(mutex);
if (txState != TxState::IN_PROGRESS) {
// Ignore further errors, if the transaction is in the error handing mode or completed
return;
}
txState = TxState::ON_ERROR;
lock.unlock();
virtual void handleOnErrorFuture() override {
ASSERT(txState == TxState::ON_ERROR);
if (!canRetry(err)) {
return;
}
ASSERT(!onErrorFuture);
onErrorArg = err;
onErrorFuture = tx().onError(err);
onErrorCallTimePoint = timeNow();
onErrorThisRef = std::static_pointer_cast<AsyncTransactionContext>(shared_from_this());
try {
@ -457,17 +569,17 @@ protected:
scheduler->schedule([thisRef]() { thisRef->handleOnErrorResult(); });
}
void cleanUp() override {
TransactionContextBase::cleanUp();
void cancelPendingFutures() override {
// Cancel all pending operations
// Note that the callbacks of the cancelled futures will still be called
std::unique_lock<std::mutex> lock(mutex);
std::vector<fdb::Future> futures;
for (auto& iter : callbackMap) {
iter.second.cancelled = true;
futures.push_back(iter.second.future);
}
lock.unlock();
for (auto& f : futures) {
f.cancel();
}
@ -487,12 +599,16 @@ protected:
std::shared_ptr<ITransactionContext> thisRef;
bool retryOnError;
TimePoint startTime;
bool cancelled;
};
// Map for keeping track of future waits and holding necessary object references
// It can be accessed at any time when callbacks are triggered, so it mus always
// be mutex protected
std::unordered_map<fdb::Future, CallbackInfo> callbackMap;
// Holding reference to this for onError future C callback
// Accessed only in ON_ERROR state (no need for mutex)
std::shared_ptr<AsyncTransactionContext> onErrorThisRef;
};
@ -503,30 +619,98 @@ class TransactionExecutorBase : public ITransactionExecutor {
public:
TransactionExecutorBase(const TransactionExecutorOptions& options) : options(options), scheduler(nullptr) {}
~TransactionExecutorBase() {
if (tamperClusterFileThread.joinable()) {
tamperClusterFileThread.join();
}
}
void init(IScheduler* scheduler, const char* clusterFile, const std::string& bgBasePath) override {
this->scheduler = scheduler;
this->clusterFile = clusterFile;
this->bgBasePath = bgBasePath;
ASSERT(!options.tmpDir.empty());
emptyClusterFile.create(options.tmpDir, "fdbempty.cluster");
invalidClusterFile.create(options.tmpDir, "fdbinvalid.cluster");
invalidClusterFile.write(Random().get().randomStringLowerCase<std::string>(1, 100));
emptyListClusterFile.create(options.tmpDir, "fdbemptylist.cluster");
emptyListClusterFile.write(fmt::format("{}:{}@",
Random().get().randomStringLowerCase<std::string>(3, 8),
Random().get().randomStringLowerCase<std::string>(1, 100)));
if (options.tamperClusterFile) {
tamperedClusterFile.create(options.tmpDir, "fdb.cluster");
originalClusterFile = clusterFile;
this->clusterFile = tamperedClusterFile.getFileName();
// begin with a valid cluster file, but with non existing address
tamperedClusterFile.write(fmt::format("{}:{}@192.168.{}.{}:{}",
Random().get().randomStringLowerCase<std::string>(3, 8),
Random().get().randomStringLowerCase<std::string>(1, 100),
Random().get().randomInt(1, 254),
Random().get().randomInt(1, 254),
Random().get().randomInt(2000, 10000)));
tamperClusterFileThread = std::thread([this]() {
std::this_thread::sleep_for(std::chrono::seconds(2));
// now write an invalid connection string
tamperedClusterFile.write(fmt::format("{}:{}@",
Random().get().randomStringLowerCase<std::string>(3, 8),
Random().get().randomStringLowerCase<std::string>(1, 100)));
std::this_thread::sleep_for(std::chrono::seconds(2));
// finally use correct cluster file contents
std::filesystem::copy_file(std::filesystem::path(originalClusterFile),
std::filesystem::path(tamperedClusterFile.getFileName()),
std::filesystem::copy_options::overwrite_existing);
});
}
}
protected:
// Execute the transaction on the given database instance
void executeOnDatabase(fdb::Database db, std::shared_ptr<ITransactionActor> txActor, TTaskFct cont) {
const TransactionExecutorOptions& getOptions() override { return options; }
void execute(TOpStartFct startFct,
TOpContFct cont,
std::optional<fdb::BytesRef> tenantName,
bool transactional) override {
try {
fdb::Transaction tx = db.createTransaction();
std::shared_ptr<ITransactionContext> ctx;
if (options.blockOnFutures) {
ctx = std::make_shared<BlockingTransactionContext>(
tx, txActor, cont, scheduler, options.transactionRetryLimit, bgBasePath);
ctx = std::make_shared<BlockingTransactionContext>(this,
startFct,
cont,
scheduler,
options.transactionRetryLimit,
bgBasePath,
tenantName,
transactional);
} else {
ctx = std::make_shared<AsyncTransactionContext>(
tx, txActor, cont, scheduler, options.transactionRetryLimit, bgBasePath);
ctx = std::make_shared<AsyncTransactionContext>(this,
startFct,
cont,
scheduler,
options.transactionRetryLimit,
bgBasePath,
tenantName,
transactional);
}
txActor->init(ctx);
txActor->start();
startFct(ctx);
} catch (...) {
txActor->complete(fdb::Error(error_code_operation_failed));
cont();
cont(fdb::Error(error_code_operation_failed));
}
}
std::string getClusterFileForErrorInjection() override {
switch (Random::get().randomInt(0, 3)) {
case 0:
return fmt::format("{}{}", "not-existing-file", Random::get().randomStringLowerCase<std::string>(0, 2));
case 1:
return emptyClusterFile.getFileName();
case 2:
return invalidClusterFile.getFileName();
default: // case 3
return emptyListClusterFile.getFileName();
}
}
@ -535,6 +719,12 @@ protected:
std::string bgBasePath;
std::string clusterFile;
IScheduler* scheduler;
TmpFile emptyClusterFile;
TmpFile invalidClusterFile;
TmpFile emptyListClusterFile;
TmpFile tamperedClusterFile;
std::thread tamperClusterFileThread;
std::string originalClusterFile;
};
/**
@ -549,19 +739,19 @@ public:
void init(IScheduler* scheduler, const char* clusterFile, const std::string& bgBasePath) override {
TransactionExecutorBase::init(scheduler, clusterFile, bgBasePath);
for (int i = 0; i < options.numDatabases; i++) {
fdb::Database db(clusterFile);
fdb::Database db(this->clusterFile);
databases.push_back(db);
}
}
void execute(std::shared_ptr<ITransactionActor> txActor, TTaskFct cont) override {
fdb::Database selectDatabase() override {
int idx = Random::get().randomInt(0, options.numDatabases - 1);
executeOnDatabase(databases[idx], txActor, cont);
return databases[idx];
}
private:
void release() { databases.clear(); }
private:
std::vector<fdb::Database> databases;
};
@ -572,10 +762,7 @@ class DBPerTransactionExecutor : public TransactionExecutorBase {
public:
DBPerTransactionExecutor(const TransactionExecutorOptions& options) : TransactionExecutorBase(options) {}
void execute(std::shared_ptr<ITransactionActor> txActor, TTaskFct cont) override {
fdb::Database db(clusterFile.c_str());
executeOnDatabase(db, txActor, cont);
}
fdb::Database selectDatabase() override { return fdb::Database(clusterFile.c_str()); }
};
std::unique_ptr<ITransactionExecutor> createTransactionExecutor(const TransactionExecutorOptions& options) {

View File

@ -38,6 +38,9 @@ class ITransactionContext : public std::enable_shared_from_this<ITransactionCont
public:
virtual ~ITransactionContext() {}
// Current FDB database
virtual fdb::Database db() = 0;
// Current FDB transaction
virtual fdb::Transaction tx() = 0;
@ -62,57 +65,11 @@ public:
virtual void continueAfterAll(std::vector<fdb::Future> futures, TTaskFct cont);
};
/**
* Interface of an actor object implementing a concrete transaction
*/
class ITransactionActor {
public:
virtual ~ITransactionActor() {}
// Type of the lambda functions implementing a database operation
using TOpStartFct = std::function<void(std::shared_ptr<ITransactionContext>)>;
// Initialize with the given transaction context
virtual void init(std::shared_ptr<ITransactionContext> ctx) = 0;
// Start execution of the transaction, also called on retries
virtual void start() = 0;
// Transaction completion result (error_code_success in case of success)
virtual fdb::Error getError() = 0;
// Notification about the completion of the transaction
virtual void complete(fdb::Error err) = 0;
};
/**
* A helper base class for transaction actors
*/
class TransactionActorBase : public ITransactionActor {
public:
void init(std::shared_ptr<ITransactionContext> ctx) override { context = ctx; }
fdb::Error getError() override { return error; }
void complete(fdb::Error err) override;
protected:
std::shared_ptr<ITransactionContext> ctx() { return context; }
private:
std::shared_ptr<ITransactionContext> context;
fdb::Error error = fdb::Error::success();
};
// Type of the lambda functions implementing a transaction
using TTxStartFct = std::function<void(std::shared_ptr<ITransactionContext>)>;
/**
* A wrapper class for transactions implemented by lambda functions
*/
class TransactionFct : public TransactionActorBase {
public:
TransactionFct(TTxStartFct startFct) : startFct(startFct) {}
void start() override { startFct(this->ctx()); }
private:
TTxStartFct startFct;
};
// Type of the lambda functions implementing a database operation
using TOpContFct = std::function<void(fdb::Error)>;
/**
* Configuration of transaction execution mode
@ -124,11 +81,27 @@ struct TransactionExecutorOptions {
// Create each transaction in a separate database instance
bool databasePerTransaction = false;
// Enable injection of database create errors
bool injectDatabaseCreateErrors = false;
// Test tampering cluster file contents
bool tamperClusterFile = false;
// The probability of injected database create errors
// Used if injectDatabaseCreateErrors = true
double databaseCreateErrorRatio = 0.1;
// The size of the database instance pool
int numDatabases = 1;
// The number of tenants to create in the cluster. If 0, no tenants are used.
int numTenants = 0;
// Maximum number of retries per transaction (0 - unlimited)
int transactionRetryLimit = 0;
// Temporary directory
std::string tmpDir;
};
/**
@ -140,7 +113,13 @@ class ITransactionExecutor {
public:
virtual ~ITransactionExecutor() {}
virtual void init(IScheduler* sched, const char* clusterFile, const std::string& bgBasePath) = 0;
virtual void execute(std::shared_ptr<ITransactionActor> tx, TTaskFct cont) = 0;
virtual void execute(TOpStartFct start,
TOpContFct cont,
std::optional<fdb::BytesRef> tenantName,
bool transactional) = 0;
virtual fdb::Database selectDatabase() = 0;
virtual std::string getClusterFileForErrorInjection() = 0;
virtual const TransactionExecutorOptions& getOptions() = 0;
};
// Create a transaction executor for the given options

View File

@ -23,6 +23,9 @@
#include <algorithm>
#include <ctype.h>
#include <chrono>
#include <filesystem>
#include <fstream>
#include <string>
namespace FdbApiTester {
@ -46,16 +49,6 @@ Random& Random::get() {
return random;
}
fdb::ByteString Random::randomStringLowerCase(int minLength, int maxLength) {
int length = randomInt(minLength, maxLength);
fdb::ByteString str;
str.reserve(length);
for (int i = 0; i < length; i++) {
str += (char)randomInt('a', 'z');
}
return str;
}
bool Random::randomBool(double trueRatio) {
return std::uniform_real_distribution<double>(0.0, 1.0)(random) <= trueRatio;
}
@ -106,4 +99,52 @@ KeyRangeArray copyKeyRangeArray(fdb::future_var::KeyRangeRefArray::Type array) {
return out;
};
GranuleSummaryArray copyGranuleSummaryArray(fdb::future_var::GranuleSummaryRefArray::Type array) {
auto& [in_summaries, in_count] = array;
GranuleSummaryArray out;
for (int i = 0; i < in_count; ++i) {
fdb::native::FDBGranuleSummary nativeSummary = *in_summaries++;
fdb::GranuleSummary summary(nativeSummary);
out.push_back(summary);
}
return out;
};
TmpFile::~TmpFile() {
if (!filename.empty()) {
remove();
}
}
void TmpFile::create(std::string_view dir, std::string_view prefix) {
while (true) {
filename = fmt::format("{}/{}-{}", dir, prefix, Random::get().randomStringLowerCase<std::string>(6, 6));
if (!std::filesystem::exists(std::filesystem::path(filename))) {
break;
}
}
// Create an empty tmp file
std::fstream tmpFile(filename, std::fstream::out);
if (!tmpFile.good()) {
throw TesterError(fmt::format("Failed to create temporary file {}\n", filename));
}
}
void TmpFile::write(std::string_view data) {
std::ofstream ofs(filename, std::fstream::out | std::fstream::binary);
if (!ofs.good()) {
throw TesterError(fmt::format("Failed to write to the temporary file {}\n", filename));
}
ofs.write(data.data(), data.size());
}
void TmpFile::remove() {
if (!std::filesystem::remove(std::filesystem::path(filename))) {
fmt::print(stderr, "Failed to remove file {}\n", filename);
}
}
} // namespace FdbApiTester

View File

@ -66,7 +66,20 @@ public:
int randomInt(int min, int max);
fdb::ByteString randomStringLowerCase(int minLength, int maxLength);
template <class StringType>
StringType randomStringLowerCase(int minLength, int maxLength) {
int length = randomInt(minLength, maxLength);
StringType str;
str.reserve(length);
for (int i = 0; i < length; i++) {
str += (char)randomInt('a', 'z');
}
return str;
}
fdb::ByteString randomByteStringLowerCase(int minLength, int maxLength) {
return randomStringLowerCase<fdb::ByteString>(minLength, maxLength);
}
bool randomBool(double trueRatio);
@ -120,6 +133,9 @@ KeyValueArray copyKeyValueArray(fdb::future_var::KeyValueRefArray::Type array);
using KeyRangeArray = std::vector<fdb::KeyRange>;
KeyRangeArray copyKeyRangeArray(fdb::future_var::KeyRangeRefArray::Type array);
using GranuleSummaryArray = std::vector<fdb::GranuleSummary>;
GranuleSummaryArray copyGranuleSummaryArray(fdb::future_var::GranuleSummaryRefArray::Type array);
static_assert(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__, "Do not support non-little-endian systems");
// Converts a little-endian encoded number into an integral type.
@ -139,6 +155,19 @@ static fdb::ByteString toByteString(T value) {
return output;
}
// Creates a temporary file; file gets destroyed/deleted along with object destruction.
struct TmpFile {
public:
~TmpFile();
void create(std::string_view dir, std::string_view prefix);
void write(std::string_view data);
void remove();
const std::string& getFileName() const { return filename; }
private:
std::string filename;
};
} // namespace FdbApiTester
#endif

View File

@ -80,13 +80,14 @@ bool WorkloadConfig::getBoolOption(const std::string& name, bool defaultVal) con
WorkloadBase::WorkloadBase(const WorkloadConfig& config)
: manager(nullptr), tasksScheduled(0), numErrors(0), clientId(config.clientId), numClients(config.numClients),
failed(false), numTxCompleted(0) {
failed(false), numTxCompleted(0), numTxStarted(0), inProgress(false) {
maxErrors = config.getIntOption("maxErrors", 10);
workloadId = fmt::format("{}{}", config.name, clientId);
}
void WorkloadBase::init(WorkloadManager* manager) {
this->manager = manager;
inProgress = true;
}
void WorkloadBase::printStats() {
@ -94,6 +95,7 @@ void WorkloadBase::printStats() {
}
void WorkloadBase::schedule(TTaskFct task) {
ASSERT(inProgress);
if (failed) {
return;
}
@ -104,28 +106,49 @@ void WorkloadBase::schedule(TTaskFct task) {
});
}
void WorkloadBase::execTransaction(std::shared_ptr<ITransactionActor> tx, TTaskFct cont, bool failOnError) {
void WorkloadBase::execTransaction(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant,
bool failOnError) {
doExecute(startFct, cont, tenant, failOnError, true);
}
// Execute a non-transactional database operation within the workload
void WorkloadBase::execOperation(TOpStartFct startFct, TTaskFct cont, bool failOnError) {
doExecute(startFct, cont, {}, failOnError, false);
}
void WorkloadBase::doExecute(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant,
bool failOnError,
bool transactional) {
ASSERT(inProgress);
if (failed) {
return;
}
tasksScheduled++;
manager->txExecutor->execute(tx, [this, tx, cont, failOnError]() {
numTxCompleted++;
fdb::Error err = tx->getError();
if (err.code() == error_code_success) {
cont();
} else {
std::string msg = fmt::format("Transaction failed with error: {} ({})", err.code(), err.what());
if (failOnError) {
error(msg);
failed = true;
} else {
info(msg);
cont();
}
}
scheduledTaskDone();
});
numTxStarted++;
manager->txExecutor->execute(
startFct,
[this, startFct, cont, failOnError](fdb::Error err) {
numTxCompleted++;
if (err.code() == error_code_success) {
cont();
} else {
std::string msg = fmt::format("Transaction failed with error: {} ({})", err.code(), err.what());
if (failOnError) {
error(msg);
failed = true;
} else {
info(msg);
cont();
}
}
scheduledTaskDone();
},
tenant,
transactional);
}
void WorkloadBase::info(const std::string& msg) {
@ -143,11 +166,13 @@ void WorkloadBase::error(const std::string& msg) {
void WorkloadBase::scheduledTaskDone() {
if (--tasksScheduled == 0) {
inProgress = false;
if (numErrors > 0) {
error(fmt::format("Workload failed with {} errors", numErrors.load()));
} else {
info("Workload successfully completed");
}
ASSERT(numTxStarted == numTxCompleted);
manager->workloadDone(this, numErrors > 0);
}
}

View File

@ -82,6 +82,9 @@ struct WorkloadConfig {
// Total number of clients
int numClients;
// Number of Tenants
int numTenants;
// Selected FDB API version
int apiVersion;
@ -116,12 +119,13 @@ protected:
void schedule(TTaskFct task);
// Execute a transaction within the workload
void execTransaction(std::shared_ptr<ITransactionActor> tx, TTaskFct cont, bool failOnError = true);
void execTransaction(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant = std::optional<fdb::BytesRef>(),
bool failOnError = true);
// Execute a transaction within the workload, a convenience method for a tranasaction defined by a lambda function
void execTransaction(TTxStartFct start, TTaskFct cont, bool failOnError = true) {
execTransaction(std::make_shared<TransactionFct>(start), cont, failOnError);
}
// Execute a non-transactional database operation within the workload
void execOperation(TOpStartFct startFct, TTaskFct cont, bool failOnError = true);
// Log an error message, increase error counter
void error(const std::string& msg);
@ -135,6 +139,12 @@ protected:
private:
WorkloadManager* manager;
void doExecute(TOpStartFct startFct,
TTaskFct cont,
std::optional<fdb::BytesRef> tenant,
bool failOnError,
bool transactional);
// Decrease scheduled task counter, notify the workload manager
// that the task is done if no more tasks schedule
void scheduledTaskDone();
@ -164,6 +174,12 @@ protected:
// Number of completed transactions
std::atomic<int> numTxCompleted;
// Number of started transactions
std::atomic<int> numTxStarted;
// Workload is in progress (intialized, but not completed)
std::atomic<bool> inProgress;
};
// Workload manager

View File

@ -0,0 +1,22 @@
[[test]]
title = 'Blob Granule Errors Multi Threaded'
multiThreaded = true
buggify = true
minFdbThreads = 2
maxFdbThreads = 8
minDatabases = 2
maxDatabases = 8
minClientThreads = 2
maxClientThreads = 8
minClients = 2
maxClients = 8
[[test.workload]]
name = 'BlobGranuleErrors'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100

View File

@ -0,0 +1,22 @@
[[test]]
title = 'Blob Granule Errors Multi Threaded'
multiThreaded = true
buggify = true
minFdbThreads = 2
maxFdbThreads = 8
minDatabases = 2
maxDatabases = 8
minClientThreads = 2
maxClientThreads = 8
minClients = 2
maxClients = 8
[[test.workload]]
name = 'BlobGranuleErrors'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100

View File

@ -0,0 +1,15 @@
[[test]]
title = 'Blob Granule Errors Single Threaded'
minClients = 1
maxClients = 3
multiThreaded = false
[[test.workload]]
name = 'BlobGranuleErrors'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100

View File

@ -36,6 +36,8 @@ namespace FdbApiTester {
namespace {
#define API_VERSION_CLIENT_TMP_DIR 720
enum TesterOptionId {
OPT_CONNFILE,
OPT_HELP,
@ -285,7 +287,7 @@ void fdb_check(fdb::Error e) {
}
void applyNetworkOptions(TesterOptions& options) {
if (!options.tmpDir.empty() && options.apiVersion >= 720) {
if (!options.tmpDir.empty() && options.apiVersion >= API_VERSION_CLIENT_TMP_DIR) {
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_CLIENT_TMP_DIR, options.tmpDir);
}
if (!options.externalClientLibrary.empty()) {
@ -320,6 +322,10 @@ void applyNetworkOptions(TesterOptions& options) {
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_CLIENT_BUGGIFY_ENABLE);
}
if (options.testSpec.disableClientBypass && options.apiVersion >= 720) {
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_DISABLE_CLIENT_BYPASS);
}
if (options.trace) {
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_TRACE_ENABLE, options.traceDir);
fdb::network::setOption(FDBNetworkOption::FDB_NET_OPTION_TRACE_FORMAT, options.traceFormat);
@ -350,6 +356,12 @@ void randomizeOptions(TesterOptions& options) {
options.numClientThreads = random.randomInt(options.testSpec.minClientThreads, options.testSpec.maxClientThreads);
options.numDatabases = random.randomInt(options.testSpec.minDatabases, options.testSpec.maxDatabases);
options.numClients = random.randomInt(options.testSpec.minClients, options.testSpec.maxClients);
// Choose a random number of tenants. If a test is configured to allow 0 tenants, then use 0 tenants half the time.
if (options.testSpec.maxTenants >= options.testSpec.minTenants &&
(options.testSpec.minTenants > 0 || random.randomBool(0.5))) {
options.numTenants = random.randomInt(options.testSpec.minTenants, options.testSpec.maxTenants);
}
}
bool runWorkloads(TesterOptions& options) {
@ -358,7 +370,12 @@ bool runWorkloads(TesterOptions& options) {
txExecOptions.blockOnFutures = options.testSpec.blockOnFutures;
txExecOptions.numDatabases = options.numDatabases;
txExecOptions.databasePerTransaction = options.testSpec.databasePerTransaction;
// 7.1 and older releases crash on database create errors
txExecOptions.injectDatabaseCreateErrors = options.testSpec.buggify && options.apiVersion > 710;
txExecOptions.transactionRetryLimit = options.transactionRetryLimit;
txExecOptions.tmpDir = options.tmpDir.empty() ? std::string("/tmp") : options.tmpDir;
txExecOptions.tamperClusterFile = options.testSpec.tamperClusterFile;
txExecOptions.numTenants = options.numTenants;
std::vector<std::shared_ptr<IWorkload>> workloads;
workloads.reserve(options.testSpec.workloads.size() * options.numClients);
@ -370,6 +387,7 @@ bool runWorkloads(TesterOptions& options) {
config.options = workloadSpec.options;
config.clientId = i;
config.numClients = options.numClients;
config.numTenants = options.numTenants;
config.apiVersion = options.apiVersion;
std::shared_ptr<IWorkload> workload = IWorkloadFactory::create(workloadSpec.name, config);
if (!workload) {

View File

@ -0,0 +1,29 @@
[[test]]
title = 'API Correctness Single Threaded'
minClients = 1
maxClients = 3
minDatabases = 1
maxDatabases = 3
multiThreaded = false
disableClientBypass = true
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
[[test.workload]]
name = 'AtomicOpsCorrectness'
initialSize = 0
numRandomOperations = 100
[[test.workload]]
name = 'WatchAndWait'
initialSize = 0
numRandomOperations = 10

View File

@ -0,0 +1,21 @@
[[test]]
title = 'Multi-tenant API Correctness Multi Threaded'
multiThreaded = true
buggify = true
minFdbThreads = 2
maxFdbThreads = 8
minClients = 2
maxClients = 8
minTenants = 2
maxTenants = 5
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 5
initialSize = 100
numRandomOperations = 200
readExistingKeysRatio = 0.9

View File

@ -0,0 +1,24 @@
[[test]]
title = 'Test tampering the cluster file'
multiThreaded = true
buggify = true
tamperClusterFile = true
minFdbThreads = 2
maxFdbThreads = 4
minDatabases = 2
maxDatabases = 4
minClientThreads = 2
maxClientThreads = 4
minClients = 2
maxClients = 4
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9

View File

@ -44,7 +44,7 @@ int main(int argc, char** argv) {
if (argc != 2) {
printf("Usage: %s <cluster_file>", argv[0]);
}
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
fdb_check(fdb_setup_network());
std::thread network_thread{ &fdb_run_network };

View File

@ -46,6 +46,8 @@ namespace native {
#include <foundationdb/fdb_c.h>
}
#define TENANT_API_VERSION_GUARD 720
using ByteString = std::basic_string<uint8_t>;
using BytesRef = std::basic_string_view<uint8_t>;
using CharsRef = std::string_view;
@ -62,6 +64,22 @@ struct KeyRange {
Key beginKey;
Key endKey;
};
struct GranuleSummary {
KeyRange keyRange;
int64_t snapshotVersion;
int64_t snapshotSize;
int64_t deltaVersion;
int64_t deltaSize;
GranuleSummary(const native::FDBGranuleSummary& nativeSummary) {
keyRange.beginKey = fdb::Key(nativeSummary.key_range.begin_key, nativeSummary.key_range.begin_key_length);
keyRange.endKey = fdb::Key(nativeSummary.key_range.end_key, nativeSummary.key_range.end_key_length);
snapshotVersion = nativeSummary.snapshot_version;
snapshotSize = nativeSummary.snapshot_size;
deltaVersion = nativeSummary.delta_version;
deltaSize = nativeSummary.delta_size;
}
};
inline uint8_t const* toBytePtr(char const* ptr) noexcept {
return reinterpret_cast<uint8_t const*>(ptr);
@ -137,6 +155,13 @@ struct None {
struct Type {};
static Error extract(native::FDBFuture*, Type&) noexcept { return Error(0); }
};
struct Bool {
using Type = native::fdb_bool_t;
static Error extract(native::FDBFuture* f, Type& out) noexcept {
auto err = native::fdb_future_get_bool(f, &out);
return Error(err);
}
};
struct Int64 {
using Type = int64_t;
static Error extract(native::FDBFuture* f, Type& out) noexcept {
@ -200,6 +225,27 @@ struct KeyRangeRefArray {
}
};
struct GranuleSummaryRef : native::FDBGranuleSummary {
fdb::KeyRef beginKey() const noexcept {
return fdb::KeyRef(native::FDBGranuleSummary::key_range.begin_key,
native::FDBGranuleSummary::key_range.begin_key_length);
}
fdb::KeyRef endKey() const noexcept {
return fdb::KeyRef(native::FDBGranuleSummary::key_range.end_key,
native::FDBGranuleSummary::key_range.end_key_length);
}
};
struct GranuleSummaryRefArray {
using Type = std::tuple<GranuleSummaryRef const*, int>;
static Error extract(native::FDBFuture* f, Type& out) noexcept {
auto& [out_summaries, out_count] = out;
auto err = native::fdb_future_get_granule_summary_array(
f, reinterpret_cast<const native::FDBGranuleSummary**>(&out_summaries), &out_count);
return Error(err);
}
};
} // namespace future_var
[[noreturn]] inline void throwError(std::string_view preamble, Error err) {
@ -310,6 +356,7 @@ public:
class Future {
protected:
friend class Transaction;
friend class Database;
friend std::hash<Future>;
std::shared_ptr<native::FDBFuture> f;
@ -468,6 +515,14 @@ public:
Transaction(const Transaction&) noexcept = default;
Transaction& operator=(const Transaction&) noexcept = default;
void atomic_store(Transaction other) { std::atomic_store(&tr, other.tr); }
Transaction atomic_load() {
Transaction retVal;
retVal.tr = std::atomic_load(&tr);
return retVal;
}
bool valid() const noexcept { return tr != nullptr; }
explicit operator bool() const noexcept { return valid(); }
@ -573,6 +628,14 @@ public:
tr.get(), begin.data(), intSize(begin), end.data(), intSize(end), begin_version, read_version, context));
}
TypedFuture<future_var::GranuleSummaryRefArray> summarizeBlobGranules(KeyRef begin,
KeyRef end,
int64_t summaryVersion,
int rangeLimit) {
return native::fdb_transaction_summarize_blob_granules(
tr.get(), begin.data(), intSize(begin), end.data(), intSize(end), summaryVersion, rangeLimit);
}
TypedFuture<future_var::None> watch(KeyRef key) {
return native::fdb_transaction_watch(tr.get(), key.data(), intSize(key));
}
@ -599,6 +662,13 @@ public:
void clearRange(KeyRef begin, KeyRef end) {
native::fdb_transaction_clear_range(tr.get(), begin.data(), intSize(begin), end.data(), intSize(end));
}
void addReadConflictRange(KeyRef begin, KeyRef end) {
if (auto err = Error(native::fdb_transaction_add_conflict_range(
tr.get(), begin.data(), intSize(begin), end.data(), intSize(end), FDB_CONFLICT_RANGE_TYPE_READ))) {
throwError("fdb_transaction_add_conflict_range returned error: ", err);
}
}
};
class Tenant final {
@ -621,6 +691,7 @@ public:
static void createTenant(Transaction tr, BytesRef name) {
tr.setOption(FDBTransactionOption::FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, BytesRef());
tr.setOption(FDBTransactionOption::FDB_TR_OPTION_LOCK_AWARE, BytesRef());
tr.setOption(FDBTransactionOption::FDB_TR_OPTION_RAW_ACCESS, BytesRef());
tr.set(toBytesRef(fmt::format("{}{}", tenantManagementMapPrefix, toCharsRef(name))), BytesRef());
}
@ -662,6 +733,14 @@ public:
}
Database() noexcept : db(nullptr) {}
void atomic_store(Database other) { std::atomic_store(&db, other.db); }
Database atomic_load() {
Database retVal;
retVal.db = std::atomic_load(&db);
return retVal;
}
Error setOptionNothrow(FDBDatabaseOption option, int64_t value) noexcept {
return Error(native::fdb_database_set_option(
db.get(), option, reinterpret_cast<const uint8_t*>(&value), static_cast<int>(sizeof(value))));
@ -707,10 +786,50 @@ public:
throwError("Failed to create transaction: ", err);
return Transaction(tx_native);
}
TypedFuture<future_var::KeyRangeRefArray> listBlobbifiedRanges(KeyRef begin, KeyRef end, int rangeLimit) {
if (!db)
throw std::runtime_error("listBlobbifiedRanges from null database");
return native::fdb_database_list_blobbified_ranges(
db.get(), begin.data(), intSize(begin), end.data(), intSize(end), rangeLimit);
}
TypedFuture<future_var::Int64> verifyBlobRange(KeyRef begin, KeyRef end, int64_t version) {
if (!db)
throw std::runtime_error("verifyBlobRange from null database");
return native::fdb_database_verify_blob_range(
db.get(), begin.data(), intSize(begin), end.data(), intSize(end), version);
}
TypedFuture<future_var::Bool> blobbifyRange(KeyRef begin, KeyRef end) {
if (!db)
throw std::runtime_error("blobbifyRange from null database");
return native::fdb_database_blobbify_range(db.get(), begin.data(), intSize(begin), end.data(), intSize(end));
}
TypedFuture<future_var::Bool> unblobbifyRange(KeyRef begin, KeyRef end) {
if (!db)
throw std::runtime_error("unblobbifyRange from null database");
return native::fdb_database_unblobbify_range(db.get(), begin.data(), intSize(begin), end.data(), intSize(end));
}
TypedFuture<future_var::KeyRef> purgeBlobGranules(KeyRef begin, KeyRef end, int64_t version, bool force) {
if (!db)
throw std::runtime_error("purgeBlobGranules from null database");
native::fdb_bool_t forceBool = force;
return native::fdb_database_purge_blob_granules(
db.get(), begin.data(), intSize(begin), end.data(), intSize(end), version, forceBool);
}
TypedFuture<future_var::None> waitPurgeGranulesComplete(KeyRef purgeKey) {
if (!db)
throw std::runtime_error("purgeBlobGranules from null database");
return native::fdb_database_wait_purge_granules_complete(db.get(), purgeKey.data(), intSize(purgeKey));
}
};
inline Error selectApiVersionNothrow(int version) {
if (version < 720) {
if (version < TENANT_API_VERSION_GUARD) {
Tenant::tenantManagementMapPrefix = "\xff\xff/management/tenant_map/";
}
return Error(native::fdb_select_api_version(version));
@ -723,7 +842,7 @@ inline void selectApiVersion(int version) {
}
inline Error selectApiVersionCappedNothrow(int version) {
if (version < 720) {
if (version < TENANT_API_VERSION_GUARD) {
Tenant::tenantManagementMapPrefix = "\xff\xff/management/tenant_map/";
}
return Error(

View File

@ -4,6 +4,6 @@
int main(int argc, char* argv[]) {
(void)argc;
(void)argv;
fdb_select_api_version(720);
fdb_select_api_version(FDB_API_VERSION);
return 0;
}

View File

@ -26,6 +26,9 @@
extern thread_local mako::Logger logr;
// FIXME: use the same implementation as the api tester! this implementation was from back when mako was written in C
// and is inferior.
namespace mako::blob_granules::local_file {
int64_t startLoad(const char* filename,

View File

@ -641,7 +641,7 @@ void runTests(struct ResultSet* rs) {
int main(int argc, char** argv) {
srand(time(NULL));
struct ResultSet* rs = newResultSet();
checkError(fdb_select_api_version(720), "select API version", rs);
checkError(fdb_select_api_version(FDB_API_VERSION), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
valueStr = (uint8_t*)malloc((sizeof(uint8_t)) * valueSize);

View File

@ -285,7 +285,7 @@ void runTests(struct ResultSet* rs) {
int main(int argc, char** argv) {
srand(time(NULL));
struct ResultSet* rs = newResultSet();
checkError(fdb_select_api_version(720), "select API version", rs);
checkError(fdb_select_api_version(FDB_API_VERSION), "select API version", rs);
printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, keySize);

View File

@ -97,7 +97,7 @@ void runTests(struct ResultSet* rs) {
int main(int argc, char** argv) {
srand(time(NULL));
struct ResultSet* rs = newResultSet();
checkError(fdb_select_api_version(720), "select API version", rs);
checkError(fdb_select_api_version(FDB_API_VERSION), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, KEY_SIZE);

View File

@ -255,7 +255,7 @@ int main(int argc, char** argv) {
<< std::endl;
return 1;
}
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
if (argc >= 3) {
std::string externalClientLibrary = argv[2];
if (externalClientLibrary.substr(0, 2) != "--") {

View File

@ -84,6 +84,12 @@ void Future::cancel() {
return fdb_future_get_keyrange_array(future_, out_keyranges, out_count);
}
// GranuleSummaryArrayFuture
[[nodiscard]] fdb_error_t GranuleSummaryArrayFuture::get(const FDBGranuleSummary** out_summaries, int* out_count) {
return fdb_future_get_granule_summary_array(future_, out_summaries, out_count);
}
// KeyValueArrayFuture
[[nodiscard]] fdb_error_t KeyValueArrayFuture::get(const FDBKeyValue** out_kv, int* out_count, fdb_bool_t* out_more) {
@ -366,6 +372,7 @@ KeyRangeArrayFuture Transaction::get_blob_granule_ranges(std::string_view begin_
end_key.size(),
rangeLimit));
}
KeyValueArrayResult Transaction::read_blob_granules(std::string_view begin_key,
std::string_view end_key,
int64_t beginVersion,
@ -381,4 +388,17 @@ KeyValueArrayResult Transaction::read_blob_granules(std::string_view begin_key,
granuleContext));
}
GranuleSummaryArrayFuture Transaction::summarize_blob_granules(std::string_view begin_key,
std::string_view end_key,
int64_t summary_version,
int rangeLimit) {
return GranuleSummaryArrayFuture(fdb_transaction_summarize_blob_granules(tr_,
(const uint8_t*)begin_key.data(),
begin_key.size(),
(const uint8_t*)end_key.data(),
end_key.size(),
summary_version,
rangeLimit));
}
} // namespace fdb

View File

@ -161,6 +161,18 @@ private:
KeyRangeArrayFuture(FDBFuture* f) : Future(f) {}
};
class GranuleSummaryArrayFuture : public Future {
public:
// Call this function instead of fdb_future_get_granule_summary_array when using
// the GranuleSummaryArrayFuture type. It's behavior is identical to
// fdb_future_get_granule_summary_array.
fdb_error_t get(const FDBGranuleSummary** out_summaries, int* out_count);
private:
friend class Transaction;
GranuleSummaryArrayFuture(FDBFuture* f) : Future(f) {}
};
class EmptyFuture : public Future {
private:
friend class Transaction;
@ -354,6 +366,10 @@ public:
int64_t beginVersion,
int64_t endVersion,
FDBReadBlobGranuleContext granule_context);
GranuleSummaryArrayFuture summarize_blob_granules(std::string_view begin_key,
std::string_view end_key,
int64_t summaryVersion,
int rangeLimit);
private:
FDBTransaction* tr_;

View File

@ -42,13 +42,13 @@ TEST_CASE("setup") {
CHECK(err);
// Select current API version
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
// Error to call again after a successful return
err = fdb_select_api_version(720);
err = fdb_select_api_version(FDB_API_VERSION);
CHECK(err);
CHECK(fdb_get_max_api_version() >= 720);
CHECK(fdb_get_max_api_version() >= FDB_API_VERSION);
fdb_check(fdb_setup_network());
// Calling a second time should fail

View File

@ -53,7 +53,7 @@ bool file_exists(const char* path) {
}
int main(int argc, char** argv) {
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
std::string file_identifier = "trace_partial_file_suffix_test" + std::to_string(std::random_device{}());
std::string trace_partial_file_suffix = ".tmp";

View File

@ -1001,7 +1001,7 @@ GetMappedRangeResult getMappedIndexEntries(int beginId,
TEST_CASE("versionstamp_unit_test") {
// a random 12 bytes long StringRef as a versionstamp
StringRef str = "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12"_sr;
Versionstamp vs(str), vs2(str);
TupleVersionstamp vs(str), vs2(str);
ASSERT(vs == vs2);
ASSERT(vs.begin() != vs2.begin());
@ -1031,7 +1031,7 @@ TEST_CASE("versionstamp_unit_test") {
TEST_CASE("tuple_support_versionstamp") {
// a random 12 bytes long StringRef as a versionstamp
StringRef str = "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12"_sr;
Versionstamp vs(str);
TupleVersionstamp vs(str);
const Tuple t = Tuple::makeTuple(prefix, RECORD, vs, "{K[3]}"_sr, "{...}"_sr);
ASSERT(t.getVersionstamp(2) == vs);
@ -1047,7 +1047,7 @@ TEST_CASE("tuple_fail_to_append_truncated_versionstamp") {
// a truncated 11 bytes long StringRef as a versionstamp
StringRef str = "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11"_sr;
try {
Versionstamp truncatedVersionstamp(str);
TupleVersionstamp truncatedVersionstamp(str);
} catch (Error& e) {
return;
}
@ -1058,7 +1058,7 @@ TEST_CASE("tuple_fail_to_append_longer_versionstamp") {
// a longer than expected 13 bytes long StringRef as a versionstamp
StringRef str = "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11"_sr;
try {
Versionstamp longerVersionstamp(str);
TupleVersionstamp longerVersionstamp(str);
} catch (Error& e) {
return;
}
@ -2761,6 +2761,7 @@ TEST_CASE("Blob Granule Functions") {
auto confValue =
get_value("\xff/conf/blob_granules_enabled", /* snapshot */ false, { FDB_TR_OPTION_READ_SYSTEM_KEYS });
if (!confValue.has_value() || confValue.value() != "1") {
// std::cout << "skipping blob granule test" << std::endl;
return;
}
@ -2817,7 +2818,6 @@ TEST_CASE("Blob Granule Functions") {
fdb::KeyValueArrayResult r =
tr.read_blob_granules(key("bg"), key("bh"), originalReadVersion, -2, granuleContext);
fdb_error_t err = r.get(&out_kv, &out_count, &out_more);
;
if (err && err != 2037 /* blob_granule_not_materialized */) {
fdb::EmptyFuture f2 = tr.on_error(err);
fdb_check(wait_future(f2));
@ -2865,6 +2865,10 @@ TEST_CASE("Blob Granule Functions") {
int out_count;
fdb_check(f.get(&out_kr, &out_count));
CHECK(std::string((const char*)out_kr[0].begin_key, out_kr[0].begin_key_length) <= key("bg"));
CHECK(std::string((const char*)out_kr[out_count - 1].end_key, out_kr[out_count - 1].end_key_length) >=
key("bh"));
CHECK(out_count >= 1);
// check key ranges are in order
for (int i = 0; i < out_count; i++) {
@ -2872,9 +2876,9 @@ TEST_CASE("Blob Granule Functions") {
CHECK(std::string((const char*)out_kr[i].begin_key, out_kr[i].begin_key_length) <
std::string((const char*)out_kr[i].end_key, out_kr[i].end_key_length));
}
// Ranges themselves are sorted
// Ranges themselves are sorted and contiguous
for (int i = 0; i < out_count - 1; i++) {
CHECK(std::string((const char*)out_kr[i].end_key, out_kr[i].end_key_length) <=
CHECK(std::string((const char*)out_kr[i].end_key, out_kr[i].end_key_length) ==
std::string((const char*)out_kr[i + 1].begin_key, out_kr[i + 1].begin_key_length));
}
@ -2900,7 +2904,6 @@ TEST_CASE("Blob Granule Functions") {
fdb_check(wait_future(waitPurgeFuture));
// re-read again at the purge version to make sure it is still valid
while (1) {
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
fdb::KeyValueArrayResult r =
@ -2917,6 +2920,56 @@ TEST_CASE("Blob Granule Functions") {
tr.reset();
break;
}
// check granule summary
while (1) {
fdb::GranuleSummaryArrayFuture f = tr.summarize_blob_granules(key("bg"), key("bh"), originalReadVersion, 100);
fdb_error_t err = wait_future(f);
if (err) {
fdb::EmptyFuture f2 = tr.on_error(err);
fdb_check(wait_future(f2));
continue;
}
const FDBGranuleSummary* out_summaries;
int out_count;
fdb_check(f.get(&out_summaries, &out_count));
CHECK(out_count >= 1);
CHECK(out_count <= 100);
// check that ranges cover requested range
CHECK(std::string((const char*)out_summaries[0].key_range.begin_key,
out_summaries[0].key_range.begin_key_length) <= key("bg"));
CHECK(std::string((const char*)out_summaries[out_count - 1].key_range.end_key,
out_summaries[out_count - 1].key_range.end_key_length) >= key("bh"));
// check key ranges are in order
for (int i = 0; i < out_count; i++) {
// key range start < end
CHECK(std::string((const char*)out_summaries[i].key_range.begin_key,
out_summaries[i].key_range.begin_key_length) <
std::string((const char*)out_summaries[i].key_range.end_key,
out_summaries[i].key_range.end_key_length));
// sanity check versions and sizes
CHECK(out_summaries[i].snapshot_version <= originalReadVersion);
CHECK(out_summaries[i].delta_version <= originalReadVersion);
CHECK(out_summaries[i].snapshot_version <= out_summaries[i].delta_version);
CHECK(out_summaries[i].snapshot_size > 0);
CHECK(out_summaries[i].delta_size >= 0);
}
// Ranges themselves are sorted and contiguous
for (int i = 0; i < out_count - 1; i++) {
CHECK(std::string((const char*)out_summaries[i].key_range.end_key,
out_summaries[i].key_range.end_key_length) ==
std::string((const char*)out_summaries[i + 1].key_range.begin_key,
out_summaries[i + 1].key_range.begin_key_length));
}
tr.reset();
break;
}
}
int main(int argc, char** argv) {
@ -2926,7 +2979,7 @@ int main(int argc, char** argv) {
<< std::endl;
return 1;
}
fdb_check(fdb_select_api_version(720));
fdb_check(fdb_select_api_version(FDB_API_VERSION));
if (argc >= 4) {
std::string externalClientLibrary = argv[3];
if (externalClientLibrary.substr(0, 2) != "--") {

View File

@ -266,7 +266,7 @@ struct SimpleWorkload final : FDBWorkload {
insertsPerTx = context->getOption("insertsPerTx", 100ul);
opsPerTx = context->getOption("opsPerTx", 100ul);
runFor = context->getOption("runFor", 10.0);
auto err = fdb_select_api_version(720);
auto err = fdb_select_api_version(FDB_API_VERSION);
if (err) {
context->trace(
FDBSeverity::Info, "SelectAPIVersionFailed", { { "Error", std::string(fdb_get_error(err)) } });

View File

@ -23,17 +23,17 @@
namespace FDB {
const uint8_t DirectoryLayer::LITTLE_ENDIAN_LONG_ONE[8] = { 1, 0, 0, 0, 0, 0, 0, 0 };
const StringRef DirectoryLayer::HIGH_CONTENTION_KEY = LiteralStringRef("hca");
const StringRef DirectoryLayer::LAYER_KEY = LiteralStringRef("layer");
const StringRef DirectoryLayer::VERSION_KEY = LiteralStringRef("version");
const StringRef DirectoryLayer::HIGH_CONTENTION_KEY = "hca"_sr;
const StringRef DirectoryLayer::LAYER_KEY = "layer"_sr;
const StringRef DirectoryLayer::VERSION_KEY = "version"_sr;
const int64_t DirectoryLayer::SUB_DIR_KEY = 0;
const uint32_t DirectoryLayer::VERSION[3] = { 1, 0, 0 };
const StringRef DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX = LiteralStringRef("\xfe");
const StringRef DirectoryLayer::DEFAULT_NODE_SUBSPACE_PREFIX = "\xfe"_sr;
const Subspace DirectoryLayer::DEFAULT_NODE_SUBSPACE = Subspace(DEFAULT_NODE_SUBSPACE_PREFIX);
const Subspace DirectoryLayer::DEFAULT_CONTENT_SUBSPACE = Subspace();
const StringRef DirectoryLayer::PARTITION_LAYER = LiteralStringRef("partition");
const StringRef DirectoryLayer::PARTITION_LAYER = "partition"_sr;
DirectoryLayer::DirectoryLayer(Subspace nodeSubspace, Subspace contentSubspace, bool allowManualPrefixes)
: rootNode(nodeSubspace.get(nodeSubspace.key())), nodeSubspace(nodeSubspace), contentSubspace(contentSubspace),

View File

@ -31,7 +31,7 @@ typedef Standalone<KeyRef> Key;
typedef Standalone<ValueRef> Value;
inline Key keyAfter(const KeyRef& key) {
if (key == LiteralStringRef("\xff\xff"))
if (key == "\xff\xff"_sr)
return key;
Standalone<StringRef> r;
@ -43,7 +43,7 @@ inline Key keyAfter(const KeyRef& key) {
}
inline KeyRef keyAfter(const KeyRef& key, Arena& arena) {
if (key == LiteralStringRef("\xff\xff"))
if (key == "\xff\xff"_sr)
return key;
uint8_t* t = new (arena) uint8_t[key.size() + 1];
memcpy(t, key.begin(), key.size());

View File

@ -24,7 +24,9 @@
#pragma once
#include "bindings/flow/fdb_flow.h"
#include "fdbclient/Versionstamp.h"
#include "fdbclient/TupleVersionstamp.h"
typedef TupleVersionstamp Versionstamp;
namespace FDB {
struct Uuid {

View File

@ -38,7 +38,7 @@ THREAD_FUNC networkThread(void* fdb) {
}
ACTOR Future<Void> _test() {
API* fdb = FDB::API::selectAPIVersion(720);
API* fdb = FDB::API::selectAPIVersion(FDB_API_VERSION);
auto db = fdb->createDatabase();
state Reference<Transaction> tr = db->createTransaction();
@ -63,15 +63,14 @@ ACTOR Future<Void> _test() {
// wait( waitForAllReady( versions ) );
printf("Elapsed: %lf\n", timer_monotonic() - starttime);
tr->set(LiteralStringRef("foo"), LiteralStringRef("bar"));
tr->set("foo"_sr, "bar"_sr);
Optional<FDBStandalone<ValueRef>> v = wait(tr->get(LiteralStringRef("foo")));
Optional<FDBStandalone<ValueRef>> v = wait(tr->get("foo"_sr));
if (v.present()) {
printf("%s\n", v.get().toString().c_str());
}
FDBStandalone<RangeResultRef> r =
wait(tr->getRange(KeyRangeRef(LiteralStringRef("a"), LiteralStringRef("z")), 100));
FDBStandalone<RangeResultRef> r = wait(tr->getRange(KeyRangeRef("a"_sr, "z"_sr), 100));
for (auto kv : r) {
printf("%s is %s\n", kv.key.toString().c_str(), kv.value.toString().c_str());
@ -82,7 +81,7 @@ ACTOR Future<Void> _test() {
}
void fdb_flow_test() {
API* fdb = FDB::API::selectAPIVersion(720);
API* fdb = FDB::API::selectAPIVersion(FDB_API_VERSION);
fdb->setupNetwork();
startThread(networkThread, fdb);

View File

@ -545,11 +545,10 @@ struct DirectoryLogDirectoryFunc : InstructionFunc {
pathTuple.append(p, true);
}
instruction->tr->set(logSubspace.pack(LiteralStringRef("path"), true), pathTuple.pack());
instruction->tr->set(logSubspace.pack(LiteralStringRef("layer"), true),
Tuple().append(directory->getLayer()).pack());
instruction->tr->set(logSubspace.pack(LiteralStringRef("exists"), true), Tuple().append(exists ? 1 : 0).pack());
instruction->tr->set(logSubspace.pack(LiteralStringRef("children"), true), childrenTuple.pack());
instruction->tr->set(logSubspace.pack("path"_sr, true), pathTuple.pack());
instruction->tr->set(logSubspace.pack("layer"_sr, true), Tuple().append(directory->getLayer()).pack());
instruction->tr->set(logSubspace.pack("exists"_sr, true), Tuple().append(exists ? 1 : 0).pack());
instruction->tr->set(logSubspace.pack("children"_sr, true), childrenTuple.pack());
return Void();
}

View File

@ -470,12 +470,12 @@ ACTOR Future<Standalone<StringRef>> waitForVoid(Future<Void> f) {
try {
wait(f);
Tuple t;
t.append(LiteralStringRef("RESULT_NOT_PRESENT"));
t.append("RESULT_NOT_PRESENT"_sr);
return t.pack();
} catch (Error& e) {
// printf("FDBError1:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -493,7 +493,7 @@ ACTOR Future<Standalone<StringRef>> waitForValue(Future<FDBStandalone<KeyRef>> f
} catch (Error& e) {
// printf("FDBError2:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -509,7 +509,7 @@ ACTOR Future<Standalone<StringRef>> waitForValue(Future<Optional<FDBStandalone<V
if (value.present())
str = value.get();
else
str = LiteralStringRef("RESULT_NOT_PRESENT");
str = "RESULT_NOT_PRESENT"_sr;
Tuple t;
t.append(str);
@ -517,7 +517,7 @@ ACTOR Future<Standalone<StringRef>> waitForValue(Future<Optional<FDBStandalone<V
} catch (Error& e) {
// printf("FDBError3:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -543,7 +543,7 @@ ACTOR Future<Standalone<StringRef>> getKey(Future<FDBStandalone<KeyRef>> f, Stan
} catch (Error& e) {
// printf("FDBError4:%d\n", e.code());
Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", e.code()));
// pack above as error string into another tuple
Tuple ret;
@ -670,7 +670,7 @@ struct GetEstimatedRangeSize : InstructionFunc {
state Standalone<StringRef> endKey = Tuple::unpack(s2).getString(0);
Future<int64_t> fsize = instruction->tr->getEstimatedRangeSizeBytes(KeyRangeRef(beginKey, endKey));
int64_t size = wait(fsize);
data->stack.pushTuple(LiteralStringRef("GOT_ESTIMATED_RANGE_SIZE"));
data->stack.pushTuple("GOT_ESTIMATED_RANGE_SIZE"_sr);
return Void();
}
@ -698,7 +698,7 @@ struct GetRangeSplitPoints : InstructionFunc {
Future<FDBStandalone<VectorRef<KeyRef>>> fsplitPoints =
instruction->tr->getRangeSplitPoints(KeyRangeRef(beginKey, endKey), chunkSize);
FDBStandalone<VectorRef<KeyRef>> splitPoints = wait(fsplitPoints);
data->stack.pushTuple(LiteralStringRef("GOT_RANGE_SPLIT_POINTS"));
data->stack.pushTuple("GOT_RANGE_SPLIT_POINTS"_sr);
return Void();
}
@ -743,7 +743,7 @@ struct GetReadVersionFunc : InstructionFunc {
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
Version v = wait(instruction->tr->getReadVersion());
data->lastVersion = v;
data->stack.pushTuple(LiteralStringRef("GOT_READ_VERSION"));
data->stack.pushTuple("GOT_READ_VERSION"_sr);
return Void();
}
};
@ -767,7 +767,7 @@ struct GetCommittedVersionFunc : InstructionFunc {
static Future<Void> call(Reference<FlowTesterData> const& data, Reference<InstructionData> const& instruction) {
data->lastVersion = instruction->tr->getCommittedVersion();
data->stack.pushTuple(LiteralStringRef("GOT_COMMITTED_VERSION"));
data->stack.pushTuple("GOT_COMMITTED_VERSION"_sr);
return Void();
}
};
@ -781,7 +781,7 @@ struct GetApproximateSizeFunc : InstructionFunc {
ACTOR static Future<Void> call(Reference<FlowTesterData> data, Reference<InstructionData> instruction) {
int64_t _ = wait(instruction->tr->getApproximateSize());
(void)_; // disable unused variable warning
data->stack.pushTuple(LiteralStringRef("GOT_APPROXIMATE_SIZE"));
data->stack.pushTuple("GOT_APPROXIMATE_SIZE"_sr);
return Void();
}
};
@ -1485,7 +1485,7 @@ struct ReadConflictKeyFunc : InstructionFunc {
// printf("=========READ_CONFLICT_KEY:%s\n", printable(key).c_str());
instruction->tr->addReadConflictKey(key);
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_KEY"));
data->stack.pushTuple("SET_CONFLICT_KEY"_sr);
return Void();
}
};
@ -1506,7 +1506,7 @@ struct WriteConflictKeyFunc : InstructionFunc {
// printf("=========WRITE_CONFLICT_KEY:%s\n", printable(key).c_str());
instruction->tr->addWriteConflictKey(key);
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_KEY"));
data->stack.pushTuple("SET_CONFLICT_KEY"_sr);
return Void();
}
};
@ -1529,7 +1529,7 @@ struct ReadConflictRangeFunc : InstructionFunc {
// printf("=========READ_CONFLICT_RANGE:%s:%s\n", printable(begin).c_str(), printable(end).c_str());
instruction->tr->addReadConflictRange(KeyRange(KeyRangeRef(begin, end)));
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_RANGE"));
data->stack.pushTuple("SET_CONFLICT_RANGE"_sr);
return Void();
}
};
@ -1553,7 +1553,7 @@ struct WriteConflictRangeFunc : InstructionFunc {
// printf("=========WRITE_CONFLICT_RANGE:%s:%s\n", printable(begin).c_str(), printable(end).c_str());
instruction->tr->addWriteConflictRange(KeyRange(KeyRangeRef(begin, end)));
data->stack.pushTuple(LiteralStringRef("SET_CONFLICT_RANGE"));
data->stack.pushTuple("SET_CONFLICT_RANGE"_sr);
return Void();
}
};
@ -1643,10 +1643,8 @@ struct UnitTestsFunc : InstructionFunc {
Optional<StringRef>(StringRef((const uint8_t*)&locationCacheSize, 8)));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_MAX_WATCHES,
Optional<StringRef>(StringRef((const uint8_t*)&maxWatches, 8)));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_DATACENTER_ID,
Optional<StringRef>(LiteralStringRef("dc_id")));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_MACHINE_ID,
Optional<StringRef>(LiteralStringRef("machine_id")));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_DATACENTER_ID, Optional<StringRef>("dc_id"_sr));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_MACHINE_ID, Optional<StringRef>("machine_id"_sr));
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_SNAPSHOT_RYW_ENABLE);
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_SNAPSHOT_RYW_DISABLE);
data->db->setDatabaseOption(FDBDatabaseOption::FDB_DB_OPTION_TRANSACTION_LOGGING_MAX_FIELD_LENGTH,
@ -1685,13 +1683,13 @@ struct UnitTestsFunc : InstructionFunc {
Optional<StringRef>(StringRef((const uint8_t*)&maxRetryDelay, 8)));
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_USED_DURING_COMMIT_PROTECTION_DISABLE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_TRANSACTION_LOGGING_ENABLE,
Optional<StringRef>(LiteralStringRef("my_transaction")));
Optional<StringRef>("my_transaction"_sr));
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_READ_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_LOCK_AWARE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_INCLUDE_PORT_IN_ADDRESS);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_REPORT_CONFLICTING_KEYS);
Optional<FDBStandalone<ValueRef>> _ = wait(tr->get(LiteralStringRef("\xff")));
Optional<FDBStandalone<ValueRef>> _ = wait(tr->get("\xff"_sr));
tr->cancel();
return Void();
@ -1724,13 +1722,13 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
Tuple opTuple = Tuple::unpack(data->instructions[idx].value);
state Standalone<StringRef> op = opTuple.getString(0);
state bool isDatabase = op.endsWith(LiteralStringRef("_DATABASE"));
state bool isSnapshot = op.endsWith(LiteralStringRef("_SNAPSHOT"));
state bool isDirectory = op.startsWith(LiteralStringRef("DIRECTORY_"));
state bool isDatabase = op.endsWith("_DATABASE"_sr);
state bool isSnapshot = op.endsWith("_SNAPSHOT"_sr);
state bool isDirectory = op.startsWith("DIRECTORY_"_sr);
try {
if (LOG_INSTRUCTIONS) {
if (op != LiteralStringRef("SWAP") && op != LiteralStringRef("PUSH")) {
if (op != "SWAP"_sr && op != "PUSH"_sr) {
printf("%zu. %s\n", idx, tupleToString(opTuple).c_str());
fflush(stdout);
}
@ -1773,7 +1771,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
if (opsThatCreateDirectories.count(op.toString())) {
data->directoryData.directoryList.push_back(DirectoryOrSubspace());
}
data->stack.pushTuple(LiteralStringRef("DIRECTORY_ERROR"));
data->stack.pushTuple("DIRECTORY_ERROR"_sr);
} else {
data->stack.pushError(e.code());
}
@ -1873,7 +1871,7 @@ ACTOR void _test_versionstamp() {
try {
g_network = newNet2(TLSConfig());
API* fdb = FDB::API::selectAPIVersion(720);
API* fdb = FDB::API::selectAPIVersion(FDB_API_VERSION);
fdb->setupNetwork();
startThread(networkThread, fdb);
@ -1883,15 +1881,14 @@ ACTOR void _test_versionstamp() {
state Future<FDBStandalone<StringRef>> ftrVersion = tr->getVersionstamp();
tr->atomicOp(LiteralStringRef("foo"),
LiteralStringRef("blahblahbl\x00\x00\x00\x00"),
FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE);
tr->atomicOp(
"foo"_sr, "blahblahbl\x00\x00\x00\x00"_sr, FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE);
wait(tr->commit()); // should use retry loop
tr->reset();
Optional<FDBStandalone<StringRef>> optionalDbVersion = wait(tr->get(LiteralStringRef("foo")));
Optional<FDBStandalone<StringRef>> optionalDbVersion = wait(tr->get("foo"_sr));
state FDBStandalone<StringRef> dbVersion = optionalDbVersion.get();
FDBStandalone<StringRef> trVersion = wait(ftrVersion);

View File

@ -71,7 +71,7 @@ struct FlowTesterStack {
void pushError(int errorCode) {
FDB::Tuple t;
t.append(LiteralStringRef("ERROR"));
t.append("ERROR"_sr);
t.append(format("%d", errorCode));
// pack above as error string into another tuple
pushTuple(t.pack().toString());

View File

@ -128,7 +128,7 @@ func APIVersion(version int) error {
return errAPIVersionAlreadySet
}
if version < 200 || version > 720 {
if version < 200 || version > headerVersion {
return errAPIVersionNotSupported
}

View File

@ -29,10 +29,12 @@ import (
"github.com/apple/foundationdb/bindings/go/src/fdb"
)
const API_VERSION int = 720
func ExampleOpenDefault() {
var e error
e = fdb.APIVersion(720)
e = fdb.APIVersion(API_VERSION)
if e != nil {
fmt.Printf("Unable to set API version: %v\n", e)
return
@ -52,7 +54,7 @@ func ExampleOpenDefault() {
}
func TestVersionstamp(t *testing.T) {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
setVs := func(t fdb.Transactor, key fdb.Key) (fdb.FutureKey, error) {
@ -98,7 +100,7 @@ func TestVersionstamp(t *testing.T) {
}
func TestReadTransactionOptions(t *testing.T) {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
_, e := db.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
rtr.Options().SetAccessSystemKeys()
@ -110,7 +112,7 @@ func TestReadTransactionOptions(t *testing.T) {
}
func ExampleTransactor() {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
setOne := func(t fdb.Transactor, key fdb.Key, value []byte) error {
@ -161,7 +163,7 @@ func ExampleTransactor() {
}
func ExampleReadTransactor() {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
getOne := func(rt fdb.ReadTransactor, key fdb.Key) ([]byte, error) {
@ -214,7 +216,7 @@ func ExampleReadTransactor() {
}
func ExamplePrefixRange() {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
tr, e := db.CreateTransaction()
@ -253,7 +255,7 @@ func ExamplePrefixRange() {
}
func ExampleRangeIterator() {
fdb.MustAPIVersion(720)
fdb.MustAPIVersion(API_VERSION)
db := fdb.MustOpenDefault()
tr, e := db.CreateTransaction()

View File

@ -102,6 +102,11 @@ func (o NetworkOptions) SetTraceFileIdentifier(param string) error {
return o.setOpt(36, []byte(param))
}
// Use the same base trace file name for all client threads as it did before version 7.2. The current default behavior is to use distinct trace file names for client threads by including their version and thread index.
func (o NetworkOptions) SetTraceShareAmongClientThreads() error {
return o.setOpt(37, nil)
}
// Set file suffix for partially written log files.
//
// Parameter: Append this suffix to partially written log files. When a log file is complete, it is renamed to remove the suffix. No separator is added between the file and the suffix. If you want to add a file extension, you should include the separator - e.g. '.tmp' instead of 'tmp' to add the 'tmp' extension.
@ -261,6 +266,11 @@ func (o NetworkOptions) SetEnableRunLoopProfiling() error {
return o.setOpt(71, nil)
}
// Prevents the multi-version client API from being disabled, even if no external clients are configured. This option is required to use GRV caching.
func (o NetworkOptions) SetDisableClientBypass() error {
return o.setOpt(72, nil)
}
// Enable client buggify - will make requests randomly fail (intended for client testing)
func (o NetworkOptions) SetClientBuggifyEnable() error {
return o.setOpt(80, nil)
@ -617,11 +627,18 @@ func (o TransactionOptions) SetBypassUnreadable() error {
return o.setOpt(1100, nil)
}
// Allows this transaction to use cached GRV from the database context. Defaults to off. Upon first usage, starts a background updater to periodically update the cache to avoid stale read versions.
// Allows this transaction to use cached GRV from the database context. Defaults to off. Upon first usage, starts a background updater to periodically update the cache to avoid stale read versions. The disable_client_bypass option must also be set.
func (o TransactionOptions) SetUseGrvCache() error {
return o.setOpt(1101, nil)
}
// Attach given authorization token to the transaction such that subsequent tenant-aware requests are authorized
//
// Parameter: A JSON Web Token authorized to access data belonging to one or more tenants, indicated by 'tenants' claim of the token's payload.
func (o TransactionOptions) SetAuthorizationToken(param string) error {
return o.setOpt(2000, []byte(param))
}
type StreamingMode int
const (

View File

@ -140,20 +140,19 @@ vexillographer_compile(TARGET fdb_java_options LANG java OUT ${GENERATED_JAVA_DI
OUTPUT ${GENERATED_JAVA_FILES})
set(SYSTEM_NAME "linux")
if (APPLE)
if(APPLE)
set(SYSTEM_NAME "osx")
endif()
if(OPEN_FOR_IDE)
add_library(fdb_java OBJECT fdbJNI.cpp)
add_library(java_workloads OBJECT JavaWorkload.cpp)
else()
add_library(fdb_java SHARED fdbJNI.cpp)
add_library(java_workloads SHARED JavaWorkload.cpp)
target_link_libraries(java_workloads PRIVATE fdb_java_native)
endif()
if (NOT WIN32 AND NOT APPLE AND NOT OPEN_FOR_IDE)
if(NOT WIN32 AND NOT APPLE AND NOT OPEN_FOR_IDE)
target_link_options(java_workloads PRIVATE "LINKER:--version-script=${CMAKE_SOURCE_DIR}/bindings/c/external_workload.map,-z,nodelete")
endif()
@ -164,11 +163,13 @@ target_link_libraries(fdb_java PRIVATE fdb_java_native)
if(APPLE)
set_target_properties(fdb_java PROPERTIES SUFFIX ".jnilib")
endif()
set_target_properties(java_workloads PROPERTIES
LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/share/foundationdb")
target_link_libraries(java_workloads PUBLIC fdb_c ${JNI_LIBRARIES})
target_link_libraries(java_workloads PRIVATE flow) # mostly for boost
target_include_directories(java_workloads PUBLIC ${JNI_INCLUDE_DIRS})
if(NOT OPEN_FOR_IDE)
set_target_properties(java_workloads PROPERTIES
LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/share/foundationdb")
target_link_libraries(java_workloads PUBLIC fdb_c ${JNI_LIBRARIES})
target_link_libraries(java_workloads PRIVATE flow) # mostly for boost
target_include_directories(java_workloads PUBLIC ${JNI_INCLUDE_DIRS})
endif()
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8" "-XDignore.symbol.file")
set(CMAKE_JNI_TARGET TRUE)
@ -240,18 +241,18 @@ if(NOT OPEN_FOR_IDE)
set(lib_destination "osx/x86_64")
endif()
else()
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
set(lib_destination "linux/aarch64")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64le")
set(lib_destination "linux/ppc64le")
else()
set(lib_destination "linux/amd64")
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
set(lib_destination "linux/aarch64")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "ppc64le")
set(lib_destination "linux/ppc64le")
else()
set(lib_destination "linux/amd64")
endif()
endif()
set(lib_destination "${unpack_dir}/lib/${lib_destination}")
set(jni_package "${CMAKE_BINARY_DIR}/packages/lib")
set(lib_destination "${unpack_dir}/lib/${lib_destination}")
set(jni_package "${CMAKE_BINARY_DIR}/packages/lib")
file(MAKE_DIRECTORY ${lib_destination})
file(MAKE_DIRECTORY ${jni_package})
file(MAKE_DIRECTORY ${jni_package})
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/lib_copied
COMMAND ${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_java> ${lib_destination} &&
${CMAKE_COMMAND} -E copy $<TARGET_FILE:fdb_java> ${jni_package} &&
@ -290,7 +291,7 @@ if(NOT OPEN_FOR_IDE)
set(TEST_CP ${tests_jar} ${target_jar})
if(RUN_JUNIT_TESTS OR RUN_JAVA_INTEGRATION_TESTS)
if (USE_SANITIZER)
if(USE_SANITIZER)
message(WARNING "Cannot run java tests with sanitizer builds")
return()
endif()
@ -299,7 +300,7 @@ if(NOT OPEN_FOR_IDE)
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/junit/jupiter/junit-jupiter-engine/5.7.1/junit-jupiter-engine-5.7.1.jar"
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
EXPECTED_HASH SHA256=56616c9350b3624f76cffef6b24ce7bb222915bfd5688f96d3cf4cef34f077cb)
# https://search.maven.org/remotecontent?filepath=org/junit/jupiter/junit-jupiter-api/5.7.1/junit-jupiter-api-5.7.1.jar
# https://search.maven.org/remotecontent?filepath=org/junit/jupiter/junit-jupiter-api/5.7.1/junit-jupiter-api-5.7.1.jar
file(DOWNLOAD "https://search.maven.org/remotecontent?filepath=org/junit/jupiter/junit-jupiter-api/5.7.1/junit-jupiter-api-5.7.1.jar"
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
EXPECTED_HASH SHA256=ce7b985bc469e2625759a4ebc45533c70581a05a348278c1d6408e9b2e35e314)
@ -350,20 +351,20 @@ if(NOT OPEN_FOR_IDE)
# can be found at https://cmake.org/cmake/help/v3.19/manual/ctest.1.html)
add_jar(fdb-junit SOURCES ${JAVA_JUNIT_TESTS} ${JUNIT_RESOURCES} INCLUDE_JARS fdb-java
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar
)
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar
)
get_property(junit_jar_path TARGET fdb-junit PROPERTY JAR_FILE)
add_test(NAME java-unit
COMMAND ${Java_JAVA_EXECUTABLE}
-classpath "${target_jar}:${junit_jar_path}:${JUNIT_CLASSPATH}"
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
org.junit.platform.console.ConsoleLauncher "--details=summary" "-class-path=${junit_jar_path}" "--scan-classpath" "--disable-banner"
)
-classpath "${target_jar}:${junit_jar_path}:${JUNIT_CLASSPATH}"
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
org.junit.platform.console.ConsoleLauncher "--details=summary" "-class-path=${junit_jar_path}" "--scan-classpath" "--disable-banner"
)
endif()
@ -393,28 +394,28 @@ if(NOT OPEN_FOR_IDE)
# the directory layer with a unique path, etc.)
#
add_jar(fdb-integration SOURCES ${JAVA_INTEGRATION_TESTS} ${JAVA_INTEGRATION_RESOURCES} INCLUDE_JARS fdb-java
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar)
${CMAKE_BINARY_DIR}/packages/junit-jupiter-api-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/junit-jupiter-engine-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/junit-jupiter-params-5.7.1.jar
${CMAKE_BINARY_DIR}/packages/opentest4j-1.2.0.jar
${CMAKE_BINARY_DIR}/packages/apiguardian-api-1.1.1.jar)
get_property(integration_jar_path TARGET fdb-integration PROPERTY JAR_FILE)
# add_fdbclient_test will set FDB_CLUSTER_FILE if it's not set already
add_fdbclient_test(NAME java-integration
COMMAND ${Java_JAVA_EXECUTABLE}
-classpath "${target_jar}:${integration_jar_path}:${JUNIT_CLASSPATH}"
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner" "-T MultiClient"
)
COMMAND ${Java_JAVA_EXECUTABLE}
-classpath "${target_jar}:${integration_jar_path}:${JUNIT_CLASSPATH}"
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner" "-T MultiClient"
)
add_multi_fdbclient_test(NAME java-multi-integration
COMMAND ${Java_JAVA_EXECUTABLE}
-classpath "${target_jar}:${integration_jar_path}:${JUNIT_CLASSPATH}"
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner" "-t MultiClient"
)
COMMAND ${Java_JAVA_EXECUTABLE}
-classpath "${target_jar}:${integration_jar_path}:${JUNIT_CLASSPATH}"
-Djava.library.path=${CMAKE_BINARY_DIR}/lib
org.junit.platform.console.ConsoleLauncher "--details=summary" "--class-path=${integration_jar_path}" "--scan-classpath" "--disable-banner" "-t MultiClient"
)
endif()
endif()

View File

@ -379,7 +379,7 @@ struct JVM {
jmethodID selectMethod =
env->GetStaticMethodID(fdbClass, "selectAPIVersion", "(I)Lcom/apple/foundationdb/FDB;");
checkException();
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(720));
auto fdbInstance = env->CallStaticObjectMethod(fdbClass, selectMethod, jint(FDB_API_VERSION));
checkException();
env->CallObjectMethod(fdbInstance, getMethod(fdbClass, "disableShutdownHook", "()V"));
checkException();

View File

@ -1037,7 +1037,7 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBDatabase_Database_1verify
return 0;
}
FDBFuture* f = fdb_database_list_blobbified_ranges(
FDBFuture* f = fdb_database_verify_blob_range(
tr, startKey, jenv->GetArrayLength(beginKeyBytes), endKey, jenv->GetArrayLength(endKeyBytes), version);
jenv->ReleaseByteArrayElements(beginKeyBytes, (jbyte*)startKey, JNI_ABORT);
jenv->ReleaseByteArrayElements(endKeyBytes, (jbyte*)endKey, JNI_ABORT);

View File

@ -40,6 +40,8 @@ import org.junit.jupiter.api.Assertions;
* This test is to verify the atomicity of transactions.
*/
public class CycleMultiClientIntegrationTest {
public static final int API_VERSION = 720;
public static final MultiClientHelper clientHelper = new MultiClientHelper();
// more write txn than validate txn, as parent thread waits only for validate txn.
@ -51,7 +53,7 @@ public class CycleMultiClientIntegrationTest {
private static List<String> expected = new ArrayList<>(Arrays.asList("0", "1", "2", "3"));
public static void main(String[] args) throws Exception {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
setupThreads(fdb);
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
System.out.println("Starting tests");

View File

@ -40,7 +40,8 @@ import org.junit.jupiter.api.extension.ExtendWith;
*/
@ExtendWith(RequiresDatabase.class)
class DirectoryTest {
private static final FDB fdb = FDB.selectAPIVersion(720);
public static final int API_VERSION = 720;
private static final FDB fdb = FDB.selectAPIVersion(API_VERSION);
@Test
void testCanCreateDirectory() throws Exception {

View File

@ -41,7 +41,8 @@ import org.junit.jupiter.api.extension.ExtendWith;
@ExtendWith(RequiresDatabase.class)
class MappedRangeQueryIntegrationTest {
private static final FDB fdb = FDB.selectAPIVersion(720);
public static final int API_VERSION = 720;
private static final FDB fdb = FDB.selectAPIVersion(API_VERSION);
public String databaseArg = null;
private Database openFDB() { return fdb.open(databaseArg); }
@ -110,7 +111,7 @@ class MappedRangeQueryIntegrationTest {
boolean validate = true;
@Test
void comparePerformance() {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try (Database db = openFDB()) {
insertRecordsWithIndexes(numRecords, db);
instrument(rangeQueryAndThenRangeQueries, "rangeQueryAndThenRangeQueries", db);

View File

@ -41,7 +41,8 @@ import org.junit.jupiter.api.extension.ExtendWith;
*/
@ExtendWith(RequiresDatabase.class)
class RangeQueryIntegrationTest {
private static final FDB fdb = FDB.selectAPIVersion(720);
public static final int API_VERSION = 720;
private static final FDB fdb = FDB.selectAPIVersion(API_VERSION);
@BeforeEach
@AfterEach

View File

@ -41,6 +41,8 @@ import org.junit.jupiter.api.Assertions;
* are still seeting the initialValue even after new transactions set them to a new value.
*/
public class RepeatableReadMultiThreadClientTest {
public static final int API_VERSION = 720;
public static final MultiClientHelper clientHelper = new MultiClientHelper();
private static final int oldValueReadCount = 30;
@ -52,7 +54,7 @@ public class RepeatableReadMultiThreadClientTest {
private static final Map<Thread, OldValueReader> threadToOldValueReaders = new HashMap<>();
public static void main(String[] args) throws Exception {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
setupThreads(fdb);
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
System.out.println("Starting tests");

View File

@ -47,6 +47,7 @@ import org.opentest4j.TestAbortedException;
* be running a server and you don't want to deal with spurious test failures.
*/
public class RequiresDatabase implements ExecutionCondition, BeforeAllCallback {
public static final int API_VERSION = 720;
public static boolean canRunIntegrationTest() {
String prop = System.getProperty("run.integration.tests");
@ -80,7 +81,7 @@ public class RequiresDatabase implements ExecutionCondition, BeforeAllCallback {
* assume that if we are here, then canRunIntegrationTest() is returning true and we don't have to bother
* checking it.
*/
try (Database db = FDB.selectAPIVersion(720).open()) {
try (Database db = FDB.selectAPIVersion(API_VERSION).open()) {
db.run(tr -> {
CompletableFuture<byte[]> future = tr.get("test".getBytes());

View File

@ -19,6 +19,8 @@ import org.junit.jupiter.api.Assertions;
* This test is to verify the causal consistency of transactions for mutli-threaded client.
*/
public class SidebandMultiThreadClientTest {
public static final int API_VERSION = 720;
public static final MultiClientHelper clientHelper = new MultiClientHelper();
private static final Map<Database, BlockingQueue<String>> db2Queues = new HashMap<>();
@ -26,7 +28,7 @@ public class SidebandMultiThreadClientTest {
private static final int txnCnt = 1000;
public static void main(String[] args) throws Exception {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
setupThreads(fdb);
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
for (Database db : dbs) {

View File

@ -29,6 +29,8 @@ import org.junit.jupiter.api.extension.ExtensionContext;
* are not available for any reason.
*/
public class FDBLibraryRule implements BeforeAllCallback {
public static final int CURRENT_API_VERSION = 720;
private final int apiVersion;
// because FDB is a singleton (currently), this isn't a super-useful cache,
@ -37,7 +39,7 @@ public class FDBLibraryRule implements BeforeAllCallback {
public FDBLibraryRule(int apiVersion) { this.apiVersion = apiVersion; }
public static FDBLibraryRule current() { return new FDBLibraryRule(720); }
public static FDBLibraryRule current() { return new FDBLibraryRule(CURRENT_API_VERSION); }
public static FDBLibraryRule v63() { return new FDBLibraryRule(630); }

View File

@ -161,6 +161,19 @@ public interface Database extends AutoCloseable, TransactionContext {
*/
double getMainThreadBusyness();
/**
* Runs {@link #purgeBlobGranules(Function)} on the default executor.
*
* @param beginKey start of the key range
* @param endKey end of the key range
* @param force if true delete all data, if not keep data >= purgeVersion
*
* @return the key to watch for purge complete
*/
default CompletableFuture<byte[]> purgeBlobGranules(byte[] beginKey, byte[] endKey, boolean force) {
return purgeBlobGranules(beginKey, endKey, -2, force, getExecutor());
}
/**
* Runs {@link #purgeBlobGranules(Function)} on the default executor.
*
@ -242,7 +255,7 @@ public interface Database extends AutoCloseable, TransactionContext {
}
/**
* Sets a range to be unblobbified in the database.
* Unsets a blobbified range in the database. The range must be aligned to known blob ranges.
*
* @param beginKey start of the key range
* @param endKey end of the key range
@ -260,7 +273,7 @@ public interface Database extends AutoCloseable, TransactionContext {
* @param rangeLimit batch size
* @param e the {@link Executor} to use for asynchronous callbacks
* @return a future with the list of blobbified ranges.
* @return a future with the list of blobbified ranges: [lastLessThan(beginKey), firstGreaterThanOrEqual(endKey)]
*/
default CompletableFuture<KeyRangeArrayResult> listBlobbifiedRanges(byte[] beginKey, byte[] endKey, int rangeLimit) {
return listBlobbifiedRanges(beginKey, endKey, rangeLimit, getExecutor());
@ -274,10 +287,22 @@ public interface Database extends AutoCloseable, TransactionContext {
* @param rangeLimit batch size
* @param e the {@link Executor} to use for asynchronous callbacks
* @return a future with the list of blobbified ranges.
* @return a future with the list of blobbified ranges: [lastLessThan(beginKey), firstGreaterThanOrEqual(endKey)]
*/
CompletableFuture<KeyRangeArrayResult> listBlobbifiedRanges(byte[] beginKey, byte[] endKey, int rangeLimit, Executor e);
/**
* Runs {@link #verifyBlobRange(Function)} on the default executor.
*
* @param beginKey start of the key range
* @param endKey end of the key range
*
* @return a future with the version of the last blob granule.
*/
default CompletableFuture<Long> verifyBlobRange(byte[] beginKey, byte[] endKey) {
return verifyBlobRange(beginKey, endKey, -2, getExecutor());
}
/**
* Runs {@link #verifyBlobRange(Function)} on the default executor.
*

View File

@ -191,11 +191,6 @@ public class FDB {
Select_API_version(version);
singleton = new FDB(version);
if (version < 720) {
TenantManagement.TENANT_MAP_PREFIX = ByteArrayUtil.join(new byte[] { (byte)255, (byte)255 },
"/management/tenant_map/".getBytes());
}
return singleton;
}

View File

@ -262,7 +262,7 @@ public class TenantManagement {
this.begin = ByteArrayUtil.join(TENANT_MAP_PREFIX, begin);
this.end = ByteArrayUtil.join(TENANT_MAP_PREFIX, end);
tr.options().setReadSystemKeys();
tr.options().setRawAccess();
tr.options().setLockAware();
firstGet = tr.getRange(this.begin, this.end, limit);

View File

@ -28,8 +28,10 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static final int apiVersion = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(apiVersion);
try(Database db = fdb.open()) {
// Run an operation on the database

View File

@ -29,11 +29,13 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.Transaction;
public class BlockingBenchmark {
public static final int API_VERSION = 720;
private static final int REPS = 100000;
private static final int PARALLEL = 100;
public static void main(String[] args) throws InterruptedException {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
// The cluster file DOES NOT need to be valid, although it must exist.
// This is because the database is never really contacted in this test.

View File

@ -30,6 +30,8 @@ import com.apple.foundationdb.Database;
import com.apple.foundationdb.FDB;
public class ConcurrentGetSetGet {
public static final int API_VERSION = 720;
public static final Charset UTF8 = Charset.forName("UTF-8");
final Semaphore semaphore = new Semaphore(CONCURRENCY);
@ -48,7 +50,7 @@ public class ConcurrentGetSetGet {
}
public static void main(String[] args) {
try(Database database = FDB.selectAPIVersion(720).open()) {
try(Database database = FDB.selectAPIVersion(API_VERSION).open()) {
new ConcurrentGetSetGet().apply(database);
}
}

View File

@ -84,8 +84,8 @@ abstract class Context implements Runnable, AutoCloseable {
try {
executeOperations();
} catch(Throwable t) {
// EAT
t.printStackTrace();
System.exit(1);
}
while(children.size() > 0) {
//System.out.println("Shutting down...waiting on " + children.size() + " threads");
@ -147,10 +147,11 @@ abstract class Context implements Runnable, AutoCloseable {
private static synchronized boolean newTransaction(Database db, Optional<Tenant> tenant, String trName, boolean allowReplace) {
TransactionState oldState = transactionMap.get(trName);
if (oldState != null) {
releaseTransaction(oldState.transaction);
}
else if (!allowReplace) {
return false;
if (allowReplace) {
releaseTransaction(oldState.transaction);
} else {
return false;
}
}
TransactionState newState = new TransactionState(createTransaction(db, tenant), tenant);

View File

@ -25,8 +25,10 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static final int API_VERSION = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
// Run an operation on the database

View File

@ -28,10 +28,12 @@ import com.apple.foundationdb.KeyValue;
import com.apple.foundationdb.TransactionContext;
public class IterableTest {
public static final int API_VERSION = 720;
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -32,9 +32,10 @@ import com.apple.foundationdb.async.AsyncUtil;
import com.apple.foundationdb.tuple.ByteArrayUtil;
public class LocalityTests {
public static final int API_VERSION = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database database = fdb.open(args[0])) {
try(Transaction tr = database.createTransaction()) {
String[] keyAddresses = LocalityUtil.getAddressesForKey(tr, "a".getBytes()).join();

View File

@ -36,6 +36,8 @@ import com.apple.foundationdb.async.AsyncIterator;
import com.apple.foundationdb.tuple.ByteArrayUtil;
public class ParallelRandomScan {
public static final int API_VERSION = 720;
private static final int ROWS = 1000000;
private static final int DURATION_MS = 2000;
private static final int PARALLELISM_MIN = 10;
@ -43,7 +45,7 @@ public class ParallelRandomScan {
private static final int PARALLELISM_STEP = 5;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(720);
FDB api = FDB.selectAPIVersion(API_VERSION);
try(Database database = api.open(args[0])) {
for(int i = PARALLELISM_MIN; i <= PARALLELISM_MAX; i += PARALLELISM_STEP) {
runTest(database, i, ROWS, DURATION_MS);

View File

@ -29,12 +29,14 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.Transaction;
public class SerialInsertion {
public static final int API_VERSION = 720;
private static final int THREAD_COUNT = 10;
private static final int BATCH_SIZE = 1000;
private static final int NODES = 1000000;
public static void main(String[] args) {
FDB api = FDB.selectAPIVersion(720);
FDB api = FDB.selectAPIVersion(API_VERSION);
try(Database database = api.open()) {
long start = System.currentTimeMillis();

View File

@ -34,12 +34,14 @@ import com.apple.foundationdb.Transaction;
import com.apple.foundationdb.async.AsyncIterable;
public class SerialIteration {
public static final int API_VERSION = 720;
private static final int ROWS = 1000000;
private static final int RUNS = 25;
private static final int THREAD_COUNT = 1;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(720);
FDB api = FDB.selectAPIVersion(API_VERSION);
try(Database database = api.open(args[0])) {
for(int i = 1; i <= THREAD_COUNT; i++) {
runThreadedTest(database, i);

View File

@ -27,10 +27,12 @@ import com.apple.foundationdb.FDB;
import com.apple.foundationdb.TransactionContext;
public class SerialTest {
public static final int API_VERSION = 720;
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -35,11 +35,13 @@ import com.apple.foundationdb.tuple.Tuple;
* Some tests regarding conflict ranges to make sure they do what we expect.
*/
public class SnapshotTransactionTest {
public static final int API_VERSION = 720;
private static final int CONFLICT_CODE = 1020;
private static final Subspace SUBSPACE = new Subspace(Tuple.from("test", "conflict_ranges"));
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
snapshotReadShouldNotConflict(db);
snapshotShouldNotAddConflictRange(db);

View File

@ -32,12 +32,14 @@ import com.apple.foundationdb.tuple.Tuple;
import com.apple.foundationdb.tuple.Versionstamp;
public class TupleTest {
public static final int API_VERSION = 720;
private static final byte FF = (byte)0xff;
public static void main(String[] args) throws NoSuchFieldException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -31,8 +31,10 @@ import com.apple.foundationdb.tuple.Tuple;
import com.apple.foundationdb.tuple.Versionstamp;
public class VersionstampSmokeTest {
public static final int API_VERSION = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database db = fdb.open()) {
db.run(tr -> {
tr.clear(Tuple.from("prefix").range());

View File

@ -32,9 +32,10 @@ import com.apple.foundationdb.FDBException;
import com.apple.foundationdb.Transaction;
public class WatchTest {
public static final int API_VERSION = 720;
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(720);
FDB fdb = FDB.selectAPIVersion(API_VERSION);
try(Database database = fdb.open(args[0])) {
database.options().setLocationCacheSize(42);
try(Transaction tr = database.createTransaction()) {

View File

@ -75,38 +75,3 @@ add_custom_command(OUTPUT ${package_file}
add_custom_target(python_package DEPENDS ${package_file})
add_dependencies(python_package python_binding)
add_dependencies(packages python_package)
if (NOT WIN32 AND NOT OPEN_FOR_IDE)
add_fdbclient_test(
NAME single_process_fdbcli_tests
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
)
add_fdbclient_test(
NAME multi_process_fdbcli_tests
PROCESS_NUMBER 5
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
5
)
if (TARGET external_client) # external_client copies fdb_c to bindings/c/libfdb_c_external.so
add_fdbclient_test(
NAME single_process_external_client_fdbcli_tests
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c_external.so
)
add_fdbclient_test(
NAME multi_process_external_client_fdbcli_tests
PROCESS_NUMBER 5
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
${CMAKE_BINARY_DIR}
@CLUSTER_FILE@
5
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c_external.so
)
endif()
endif()

View File

@ -100,10 +100,8 @@ def api_version(ver):
_add_symbols(fdb.impl, list)
if ver >= 710:
if ver >= 630:
import fdb.tenant_management
if ver < 720:
fdb.tenant_management._tenant_map_prefix = b'\xff\xff/management/tenant_map/'
if ver < 610:
globals()["init"] = getattr(fdb.impl, "init")

View File

@ -1359,7 +1359,7 @@ else:
except:
# The system python on OS X can't find the library installed to /usr/local/lib if SIP is enabled
# find_library does find the location in /usr/local/lib, so if the above fails fallback to using it
lib_path = ctypes.util.find_library(capi_name)
lib_path = ctypes.util.find_library("fdb_c")
if lib_path is not None:
try:
_capi = ctypes.CDLL(lib_path)

View File

@ -103,7 +103,7 @@ class FDBTenantList(object):
# JSON strings of the tenant metadata
@_impl.transactional
def _list_tenants_impl(tr, begin, end, limit):
tr.options.set_read_system_keys()
tr.options.set_raw_access()
begin_key = b'%s%s' % (_tenant_map_prefix, begin)
end_key = b'%s%s' % (_tenant_map_prefix, end)

View File

@ -198,16 +198,17 @@ function(stage_correctness_package)
set(src_dir "${src_dir}/")
string(SUBSTRING ${src_dir} ${dir_len} -1 dest_dir)
string(SUBSTRING ${file} ${dir_len} -1 rel_out_file)
set(out_file ${STAGE_OUT_DIR}/${rel_out_file})
set(out_file ${STAGE_OUT_DIR}/${rel_out_file})
list(APPEND external_files ${out_file})
add_custom_command(
add_custom_command(
OUTPUT ${out_file}
DEPENDS ${file}
COMMAND ${CMAKE_COMMAND} -E copy ${file} ${out_file}
COMMENT "Copying ${STAGE_CONTEXT} external file ${file}"
)
DEPENDS ${file}
COMMAND ${CMAKE_COMMAND} -E copy ${file} ${out_file}
COMMENT "Copying ${STAGE_CONTEXT} external file ${file}"
)
endforeach()
endforeach()
list(APPEND package_files ${STAGE_OUT_DIR}/bin/fdbserver
${STAGE_OUT_DIR}/bin/coverage.fdbserver.xml
${STAGE_OUT_DIR}/bin/coverage.fdbclient.xml
@ -217,6 +218,7 @@ function(stage_correctness_package)
${STAGE_OUT_DIR}/bin/TraceLogHelper.dll
${STAGE_OUT_DIR}/CMakeCache.txt
)
add_custom_command(
OUTPUT ${package_files}
DEPENDS ${CMAKE_BINARY_DIR}/CMakeCache.txt
@ -238,6 +240,20 @@ function(stage_correctness_package)
${STAGE_OUT_DIR}/bin
COMMENT "Copying files for ${STAGE_CONTEXT} package"
)
set(test_harness_dir "${CMAKE_SOURCE_DIR}/contrib/TestHarness2")
file(GLOB_RECURSE test_harness2_files RELATIVE "${test_harness_dir}" CONFIGURE_DEPENDS "${test_harness_dir}/*.py")
foreach(file IN LISTS test_harness2_files)
set(src_file "${test_harness_dir}/${file}")
set(out_file "${STAGE_OUT_DIR}/${file}")
get_filename_component(dir "${out_file}" DIRECTORY)
file(MAKE_DIRECTORY "${dir}")
add_custom_command(OUTPUT ${out_file}
COMMAND ${CMAKE_COMMAND} -E copy "${src_file}" "${out_file}"
DEPENDS "${src_file}")
list(APPEND package_files "${out_file}")
endforeach()
list(APPEND package_files ${test_files} ${external_files})
if(STAGE_OUT_FILES)
set(${STAGE_OUT_FILES} ${package_files} PARENT_SCOPE)
@ -449,7 +465,11 @@ function(add_fdbclient_test)
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT ${T_TEST_TIMEOUT})
else()
# default timeout
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 300)
if(USE_SANITIZER)
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 1200)
else()
set_tests_properties("${T_NAME}" PROPERTIES TIMEOUT 300)
endif()
endif()
set_tests_properties("${T_NAME}" PROPERTIES ENVIRONMENT "${SANITIZER_OPTIONS}")
endfunction()

View File

@ -36,7 +36,7 @@ function(compile_boost)
set(B2_COMMAND "./b2")
set(BOOST_COMPILER_FLAGS -fvisibility=hidden -fPIC -std=c++17 -w)
set(BOOST_LINK_FLAGS "")
if(APPLE OR CLANG OR ICX OR USE_LIBCXX)
if(APPLE OR ICX OR USE_LIBCXX)
list(APPEND BOOST_COMPILER_FLAGS -stdlib=libc++ -nostdlib++)
list(APPEND BOOST_LINK_FLAGS -lc++ -lc++abi)
if (NOT APPLE)
@ -57,19 +57,27 @@ function(compile_boost)
# Build boost
include(ExternalProject)
set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install")
ExternalProject_add("${COMPILE_BOOST_TARGET}Project"
URL "https://boostorg.jfrog.io/artifactory/main/release/1.78.0/source/boost_1_78_0.tar.bz2"
URL_HASH SHA256=8681f175d4bdb26c52222665793eef08490d7758529330f98d3b29dd0735bccc
CONFIGURE_COMMAND ${BOOTSTRAP_COMMAND} ${BOOTSTRAP_ARGS} --with-libraries=${BOOTSTRAP_LIBRARIES} --with-toolset=${BOOST_TOOLSET}
BUILD_COMMAND ${B2_COMMAND} link=static ${COMPILE_BOOST_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install
BUILD_IN_SOURCE ON
INSTALL_COMMAND ""
UPDATE_COMMAND ""
BUILD_BYPRODUCTS "${BOOST_INSTALL_DIR}/boost/config.hpp"
"${BOOST_INSTALL_DIR}/lib/libboost_context.a"
"${BOOST_INSTALL_DIR}/lib/libboost_filesystem.a"
"${BOOST_INSTALL_DIR}/lib/libboost_iostreams.a")
URL "https://boostorg.jfrog.io/artifactory/main/release/1.78.0/source/boost_1_78_0.tar.bz2"
URL_HASH SHA256=8681f175d4bdb26c52222665793eef08490d7758529330f98d3b29dd0735bccc
CONFIGURE_COMMAND ${BOOTSTRAP_COMMAND}
${BOOTSTRAP_ARGS}
--with-libraries=${BOOTSTRAP_LIBRARIES}
--with-toolset=${BOOST_TOOLSET}
BUILD_COMMAND ${B2_COMMAND}
link=static
${COMPILE_BOOST_BUILD_ARGS}
--prefix=${BOOST_INSTALL_DIR}
${USER_CONFIG_FLAG} install
BUILD_IN_SOURCE ON
INSTALL_COMMAND ""
UPDATE_COMMAND ""
BUILD_BYPRODUCTS "${BOOST_INSTALL_DIR}/boost/config.hpp"
"${BOOST_INSTALL_DIR}/lib/libboost_context.a"
"${BOOST_INSTALL_DIR}/lib/libboost_filesystem.a"
"${BOOST_INSTALL_DIR}/lib/libboost_iostreams.a")
add_library(${COMPILE_BOOST_TARGET}_context STATIC IMPORTED)
add_dependencies(${COMPILE_BOOST_TARGET}_context ${COMPILE_BOOST_TARGET}Project)
@ -133,7 +141,7 @@ if(WIN32)
return()
endif()
find_package(Boost 1.78.0 EXACT QUIET COMPONENTS context filesystem CONFIG PATHS ${BOOST_HINT_PATHS})
find_package(Boost 1.78.0 EXACT QUIET COMPONENTS context filesystem iostreams CONFIG PATHS ${BOOST_HINT_PATHS})
set(FORCE_BOOST_BUILD OFF CACHE BOOL "Forces cmake to build boost and ignores any installed boost")
if(Boost_FOUND AND Boost_filesystem_FOUND AND Boost_context_FOUND AND Boost_iostreams_FOUND AND NOT FORCE_BOOST_BUILD)

View File

@ -4,30 +4,42 @@ find_package(RocksDB 6.27.3)
include(ExternalProject)
if (RocksDB_FOUND)
set(RocksDB_CMAKE_ARGS
-DUSE_RTTI=1
-DPORTABLE=${PORTABLE_ROCKSDB}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
-DCMAKE_SHARED_LINKER_FLAGS=${CMAKE_SHARED_LINKER_FLAGS}
-DCMAKE_STATIC_LINKER_FLAGS=${CMAKE_STATIC_LINKER_FLAGS}
-DCMAKE_EXE_LINKER_FLAGS=${CMAKE_EXE_LINKER_FLAGS}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DFAIL_ON_WARNINGS=OFF
-DWITH_GFLAGS=OFF
-DWITH_TESTS=OFF
-DWITH_TOOLS=OFF
-DWITH_CORE_TOOLS=OFF
-DWITH_BENCHMARK_TOOLS=OFF
-DWITH_BZ2=OFF
-DWITH_LZ4=ON
-DWITH_SNAPPY=OFF
-DWITH_ZLIB=OFF
-DWITH_ZSTD=OFF
-DWITH_LIBURING=${WITH_LIBURING}
-DWITH_TSAN=${USE_TSAN}
-DWITH_ASAN=${USE_ASAN}
-DWITH_UBSAN=${USE_UBSAN}
-DROCKSDB_BUILD_SHARED=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=True
)
if(ROCKSDB_FOUND)
ExternalProject_Add(rocksdb
SOURCE_DIR "${RocksDB_ROOT}"
DOWNLOAD_COMMAND ""
CMAKE_ARGS -DUSE_RTTI=1 -DPORTABLE=${PORTABLE_ROCKSDB}
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DWITH_GFLAGS=OFF
-DWITH_TESTS=OFF
-DWITH_TOOLS=OFF
-DWITH_CORE_TOOLS=OFF
-DWITH_BENCHMARK_TOOLS=OFF
-DWITH_BZ2=OFF
-DWITH_LZ4=ON
-DWITH_SNAPPY=OFF
-DWITH_ZLIB=OFF
-DWITH_ZSTD=OFF
-DWITH_LIBURING=${WITH_LIBURING}
-DWITH_TSAN=${USE_TSAN}
-DWITH_ASAN=${USE_ASAN}
-DWITH_UBSAN=${USE_UBSAN}
-DROCKSDB_BUILD_SHARED=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=True
CMAKE_ARGS ${RocksDB_CMAKE_ARGS}
BUILD_BYPRODUCTS <BINARY_DIR>/librocksdb.a
INSTALL_COMMAND ""
)
@ -37,28 +49,9 @@ if (RocksDB_FOUND)
${BINARY_DIR}/librocksdb.a)
else()
ExternalProject_Add(rocksdb
URL https://github.com/facebook/rocksdb/archive/refs/tags/v6.27.3.tar.gz
URL_HASH SHA256=ee29901749b9132692b26f0a6c1d693f47d1a9ed8e3771e60556afe80282bf58
CMAKE_ARGS -DUSE_RTTI=1 -DPORTABLE=${PORTABLE_ROCKSDB}
-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DWITH_GFLAGS=OFF
-DWITH_TESTS=OFF
-DWITH_TOOLS=OFF
-DWITH_CORE_TOOLS=OFF
-DWITH_BENCHMARK_TOOLS=OFF
-DWITH_BZ2=OFF
-DWITH_LZ4=ON
-DWITH_SNAPPY=OFF
-DWITH_ZLIB=OFF
-DWITH_ZSTD=OFF
-DWITH_LIBURING=${WITH_LIBURING}
-DWITH_TSAN=${USE_TSAN}
-DWITH_ASAN=${USE_ASAN}
-DWITH_UBSAN=${USE_UBSAN}
-DROCKSDB_BUILD_SHARED=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE=True
URL https://github.com/facebook/rocksdb/archive/refs/tags/v6.27.3.tar.gz
URL_HASH SHA256=ee29901749b9132692b26f0a6c1d693f47d1a9ed8e3771e60556afe80282bf58
CMAKE_ARGS ${RocksDB_CMAKE_ARGS}
BUILD_BYPRODUCTS <BINARY_DIR>/librocksdb.a
INSTALL_COMMAND ""
)
@ -68,7 +61,7 @@ else()
${BINARY_DIR}/librocksdb.a)
ExternalProject_Get_Property(rocksdb SOURCE_DIR)
set (ROCKSDB_INCLUDE_DIR "${SOURCE_DIR}/include")
set(ROCKSDB_INCLUDE_DIR "${SOURCE_DIR}/include")
set(ROCKSDB_FOUND TRUE)
endif()

27
cmake/CompileZstd.cmake Normal file
View File

@ -0,0 +1,27 @@
# Compile zstd
function(compile_zstd)
include(FetchContent)
FetchContent_Declare(ZSTD
GIT_REPOSITORY https://github.com/facebook/zstd.git
GIT_TAG v1.5.2
SOURCE_SUBDIR "build/cmake"
)
FetchContent_GetProperties(ZSTD)
if (NOT zstd_POPULATED)
FetchContent_Populate(ZSTD)
add_subdirectory(${zstd_SOURCE_DIR}/build/cmake ${zstd_BINARY_DIR})
if (CLANG)
target_compile_options(zstd PRIVATE -Wno-array-bounds -Wno-tautological-compare)
target_compile_options(libzstd_static PRIVATE -Wno-array-bounds -Wno-tautological-compare)
target_compile_options(zstd-frugal PRIVATE -Wno-array-bounds -Wno-tautological-compare)
endif()
endif()
set(ZSTD_LIB_INCLUDE_DIR ${zstd_SOURCE_DIR}/lib CACHE INTERNAL ZSTD_LIB_INCLUDE_DIR)
endfunction(compile_zstd)

View File

@ -25,6 +25,7 @@ env_set(STATIC_LINK_LIBCXX "${_static_link_libcxx}" BOOL "Statically link libstd
env_set(TRACE_PC_GUARD_INSTRUMENTATION_LIB "" STRING "Path to a library containing an implementation for __sanitizer_cov_trace_pc_guard. See https://clang.llvm.org/docs/SanitizerCoverage.html for more info.")
env_set(PROFILE_INSTR_GENERATE OFF BOOL "If set, build FDB as an instrumentation build to generate profiles")
env_set(PROFILE_INSTR_USE "" STRING "If set, build FDB with profile")
env_set(FULL_DEBUG_SYMBOLS OFF BOOL "Generate full debug symbols")
set(USE_SANITIZER OFF)
if(USE_ASAN OR USE_VALGRIND OR USE_MSAN OR USE_TSAN OR USE_UBSAN)
@ -164,9 +165,20 @@ else()
set(SANITIZER_COMPILE_OPTIONS)
set(SANITIZER_LINK_OPTIONS)
# we always compile with debug symbols. CPack will strip them out
# we always compile with debug symbols. For release builds CPack will strip them out
# and create a debuginfo rpm
add_compile_options(-ggdb -fno-omit-frame-pointer)
add_compile_options(-fno-omit-frame-pointer -gz)
add_link_options(-gz)
if(FDB_RELEASE OR FULL_DEBUG_SYMBOLS OR CMAKE_BUILD_TYPE STREQUAL "Debug")
# Configure with FULL_DEBUG_SYMBOLS=ON to generate all symbols for debugging with gdb
# Also generating full debug symbols in release builds, because they are packaged
# separately and installed optionally
add_compile_options(-ggdb)
else()
# Generating minimal debug symbols by default. They are sufficient for testing purposes
add_compile_options(-ggdb1)
endif()
if(TRACE_PC_GUARD_INSTRUMENTATION_LIB)
add_compile_options(-fsanitize-coverage=trace-pc-guard)
link_libraries(${TRACE_PC_GUARD_INSTRUMENTATION_LIB})
@ -201,6 +213,8 @@ else()
-fsanitize=undefined
# TODO(atn34) Re-enable -fsanitize=alignment once https://github.com/apple/foundationdb/issues/1434 is resolved
-fno-sanitize=alignment
# https://github.com/apple/foundationdb/issues/7955
-fno-sanitize=function
-DBOOST_USE_UCONTEXT)
list(APPEND SANITIZER_LINK_OPTIONS -fsanitize=undefined)
endif()
@ -278,16 +292,35 @@ else()
#add_compile_options(-fno-builtin-memcpy)
if (CLANG OR ICX)
add_compile_options()
if (APPLE OR USE_LIBCXX)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-stdlib=libc++>)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
if (NOT APPLE)
if (STATIC_LINK_LIBCXX)
add_link_options(-static-libgcc -nostdlib++ -Wl,-Bstatic -lc++ -lc++abi -Wl,-Bdynamic)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static-libgcc -nostdlib++ -Wl,-Bstatic -lc++ -lc++abi -Wl,-Bdynamic")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -static-libgcc -nostdlib++ -Wl,-Bstatic -lc++ -lc++abi -Wl,-Bdynamic")
else()
# Make sure that libc++ can be found be the platform's loader, so that thing's like cmake's "try_run" work.
find_library(LIBCXX_SO_PATH c++ /usr/local/lib)
if (LIBCXX_SO_PATH)
get_filename_component(LIBCXX_SO_DIR ${LIBCXX_SO_PATH} DIRECTORY)
if (APPLE)
set(ENV{DYLD_LIBRARY_PATH} "$ENV{DYLD_LIBRARY_PATH}:${LIBCXX_SO_DIR}")
elseif(WIN32)
set(ENV{PATH} "$ENV{PATH};${LIBCXX_SO_DIR}")
else()
set(ENV{LD_LIBRARY_PATH} "$ENV{LD_LIBRARY_PATH}:${LIBCXX_SO_DIR}")
endif()
endif()
endif()
add_link_options(-stdlib=libc++ -Wl,-build-id=sha1)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -stdlib=libc++ -Wl,-build-id=sha1")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -stdlib=libc++ -Wl,-build-id=sha1")
endif()
endif()
if (NOT APPLE AND NOT USE_LIBCXX)
message(STATUS "Linking libatomic")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -latomic")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -latomic")
endif()
if (OPEN_FOR_IDE)
add_compile_options(
-Wno-unknown-attributes)
@ -305,11 +338,19 @@ else()
-Wno-unknown-warning-option
-Wno-unused-parameter
-Wno-constant-logical-operand
# These need to be disabled for FDB's RocksDB storage server implementation
-Wno-deprecated-copy
-Wno-delete-non-abstract-non-virtual-dtor
-Wno-range-loop-construct
-Wno-reorder-ctor
# Needed for clang 13 (todo: Update above logic so that it figures out when to pass in -static-libstdc++ and when it will be ignored)
# When you remove this, you might need to move it back to the USE_CCACHE stanza. It was (only) there before I moved it here.
-Wno-unused-command-line-argument
)
if (USE_CCACHE)
add_compile_options(
-Wno-register
-Wno-unused-command-line-argument)
)
endif()
if (PROFILE_INSTR_GENERATE)
add_compile_options(-fprofile-instr-generate)

View File

@ -178,7 +178,7 @@ set(PORTABLE_ROCKSDB ON CACHE BOOL "Compile RocksDB in portable mode") # Set thi
set(WITH_LIBURING OFF CACHE BOOL "Build with liburing enabled") # Set this to ON to include liburing
# RocksDB is currently enabled by default for GCC but does not build with the latest
# Clang.
if (SSD_ROCKSDB_EXPERIMENTAL AND GCC)
if (SSD_ROCKSDB_EXPERIMENTAL AND NOT WIN32)
set(WITH_ROCKSDB_EXPERIMENTAL ON)
else()
set(WITH_ROCKSDB_EXPERIMENTAL OFF)
@ -200,6 +200,9 @@ else()
URL "https://github.com/ToruNiina/toml11/archive/v3.4.0.tar.gz"
URL_HASH SHA256=bc6d733efd9216af8c119d8ac64a805578c79cc82b813e4d1d880ca128bd154d
CMAKE_CACHE_ARGS
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
-DCMAKE_C_COMPILER:FILEPATH=${CMAKE_C_COMPILER}
-DCMAKE_CXX_COMPILER:FILEPATH=${CMAKE_CXX_COMPILER}
-DCMAKE_INSTALL_PREFIX:PATH=${CMAKE_CURRENT_BINARY_DIR}/toml11
-Dtoml11_BUILD_TEST:BOOL=OFF
BUILD_ALWAYS ON)

Some files were not shown because too many files have changed in this diff Show More