Merge remote-tracking branch 'origin/main' into change-data-hall
This commit is contained in:
commit
77786f4fc6
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
# Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -202,6 +202,7 @@ class TestRunner(object):
|
|||
self.args.types = list(reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers)))
|
||||
|
||||
self.args.no_directory_snapshot_ops = self.args.no_directory_snapshot_ops or any([not tester.directory_snapshot_ops_enabled for tester in self.testers])
|
||||
self.args.no_tenants = self.args.no_tenants or any([not tester.tenants_enabled for tester in self.testers])
|
||||
|
||||
def print_test(self):
|
||||
test_instructions = self._generate_test()
|
||||
|
@ -282,6 +283,17 @@ class TestRunner(object):
|
|||
def _insert_instructions(self, test_instructions):
|
||||
util.get_logger().info('\nInserting test into database...')
|
||||
del self.db[:]
|
||||
|
||||
while True:
|
||||
tr = self.db.create_transaction()
|
||||
try:
|
||||
tr.options.set_special_key_space_enable_writes()
|
||||
del tr[b'\xff\xff/management/tenant_map/' : b'\xff\xff/management/tenant_map0']
|
||||
tr.commit().wait()
|
||||
break
|
||||
except fdb.FDBError as e:
|
||||
tr.on_error(e).wait()
|
||||
|
||||
for subspace, thread in test_instructions.items():
|
||||
thread.insert_operations(self.db, subspace)
|
||||
|
||||
|
@ -445,6 +457,8 @@ def parse_args(argv):
|
|||
|
||||
parser.add_argument('--no-directory-snapshot-ops', action='store_true', help='Disables snapshot operations for directory instructions.')
|
||||
|
||||
parser.add_argument('--no-tenants', action='store_true', help='Disables tenant operations.')
|
||||
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ ALL_TYPES = COMMON_TYPES + ['versionstamp']
|
|||
|
||||
|
||||
class Tester:
|
||||
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES, directory_snapshot_ops_enabled=True):
|
||||
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES, directory_snapshot_ops_enabled=True, tenants_enabled=False):
|
||||
self.name = name
|
||||
self.cmd = cmd
|
||||
self.max_int_bits = max_int_bits
|
||||
|
@ -35,6 +35,7 @@ class Tester:
|
|||
self.threads_enabled = threads_enabled
|
||||
self.types = types
|
||||
self.directory_snapshot_ops_enabled = directory_snapshot_ops_enabled
|
||||
self.tenants_enabled = tenants_enabled
|
||||
|
||||
def supports_api_version(self, api_version):
|
||||
return api_version >= self.min_api_version and api_version <= self.max_api_version
|
||||
|
@ -57,8 +58,8 @@ _java_cmd = 'java -ea -cp %s:%s com.apple.foundationdb.test.' % (
|
|||
|
||||
# We could set min_api_version lower on some of these if the testers were updated to support them
|
||||
testers = {
|
||||
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
|
||||
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
|
||||
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 2040, 23, MAX_API_VERSION),
|
||||
'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
Overview
|
||||
--------
|
||||
|
||||
Tenant testing is an optional extension to the core binding tester that enables
|
||||
testing of the tenant API. This testing is enabled by adding some additional
|
||||
instructions and modifying the behavior of some existing instructions.
|
||||
|
||||
Additional State and Initialization
|
||||
-----------------------------------
|
||||
|
||||
Your tester should store an additional piece of state tracking the active tenant
|
||||
that is to be used to create transactions. This tenant must support an unset
|
||||
state, in which case transactions will be created directly on the database.
|
||||
|
||||
New Instructions
|
||||
----------------
|
||||
|
||||
The tenant API introduces some new operations:
|
||||
|
||||
#### TENANT_CREATE
|
||||
|
||||
Pops the top item off of the stack as TENANT_NAME. Creates a new tenant
|
||||
in the database with the name TENANT_NAME. May optionally push a future
|
||||
onto the stack.
|
||||
|
||||
#### TENANT_DELETE
|
||||
|
||||
Pops the top item off of the stack as TENANT_NAME. Deletes the tenant with
|
||||
the name TENANT_NAME from the database. May optionally push a future onto
|
||||
the stack.
|
||||
|
||||
#### TENANT_SET_ACTIVE
|
||||
|
||||
Pops the top item off of the stack as TENANT_NAME. Opens the tenant with
|
||||
name TENANT_NAME and stores it as the active tenant.
|
||||
|
||||
#### TENANT_CLEAR_ACTIVE
|
||||
|
||||
Unsets the active tenant.
|
||||
|
||||
Updates to Existing Instructions
|
||||
--------------------------------
|
||||
|
||||
Some existing operations in the binding tester will have slightly modified
|
||||
behavior when tenants are enabled.
|
||||
|
||||
#### NEW_TRANSACTION
|
||||
|
||||
When creating a new transaction, the active tenant should be used. If no active
|
||||
tenant is set, then the transaction should be created as normal using the
|
||||
database.
|
||||
|
||||
#### _TENANT suffix
|
||||
|
||||
Similar to the _DATABASE suffix, an operation with the _TENANT suffix indicates
|
||||
that the operation should be performed on the current active tenant object. If
|
||||
there is no active tenant, then the operation should be performed on the database
|
||||
as if _DATABASE was specified. In any case where the operation suffixed with
|
||||
_DATABASE is allowed to push a future onto the stack, the same operation suffixed
|
||||
with _TENANT is also allowed to push a future onto the stack.
|
||||
|
||||
If your binding does not support operations directly on a tenant object, you should
|
||||
simulate it using an anonymous transaction. Remember that set and clear operations
|
||||
must immediately commit (with appropriate retry behavior!).
|
||||
|
||||
Operations that can include the _TENANT prefix are:
|
||||
|
||||
GET_TENANT
|
||||
GET_KEY_TENANT
|
||||
GET_RANGE_TENANT
|
||||
GET_RANGE_STARTS_WITH_TENANT
|
||||
GET_RANGE_SELECTOR_TENANT
|
||||
SET_TENANT
|
||||
CLEAR_TENANT
|
||||
CLEAR_RANGE_TENANT
|
||||
CLEAR_RANGE_STARTS_WITH_TENANT
|
||||
ATOMIC_OP_TENANT
|
|
@ -58,6 +58,7 @@ class ApiTest(Test):
|
|||
self.outstanding_ops = []
|
||||
self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types)
|
||||
self.api_version = args.api_version
|
||||
self.allocated_tenants = set()
|
||||
|
||||
def add_stack_items(self, num):
|
||||
self.stack_size += num
|
||||
|
@ -137,6 +138,12 @@ class ApiTest(Test):
|
|||
test_util.to_front(instructions, self.stack_size - read[0])
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
||||
def choose_tenant(self, new_tenant_probability):
|
||||
if len(self.allocated_tenants) == 0 or random.random() < new_tenant_probability:
|
||||
return self.random.random_string(random.randint(0, 30))
|
||||
else:
|
||||
return random.choice(list(self.allocated_tenants))
|
||||
|
||||
def generate(self, args, thread_number):
|
||||
instructions = InstructionSet()
|
||||
|
||||
|
@ -158,6 +165,7 @@ class ApiTest(Test):
|
|||
write_conflicts = ['WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT']
|
||||
txn_sizes = ['GET_APPROXIMATE_SIZE']
|
||||
storage_metrics = ['GET_ESTIMATED_RANGE_SIZE', 'GET_RANGE_SPLIT_POINTS']
|
||||
tenants = ['TENANT_CREATE', 'TENANT_DELETE', 'TENANT_SET_ACTIVE', 'TENANT_CLEAR_ACTIVE']
|
||||
|
||||
op_choices += reads
|
||||
op_choices += mutations
|
||||
|
@ -173,6 +181,9 @@ class ApiTest(Test):
|
|||
op_choices += txn_sizes
|
||||
op_choices += storage_metrics
|
||||
|
||||
if not args.no_tenants:
|
||||
op_choices += tenants
|
||||
|
||||
idempotent_atomic_ops = ['BIT_AND', 'BIT_OR', 'MAX', 'MIN', 'BYTE_MIN', 'BYTE_MAX']
|
||||
atomic_ops = idempotent_atomic_ops + ['ADD', 'BIT_XOR', 'APPEND_IF_FITS']
|
||||
|
||||
|
@ -195,7 +206,7 @@ class ApiTest(Test):
|
|||
|
||||
# print 'Adding instruction %s at %d' % (op, index)
|
||||
|
||||
if args.concurrency == 1 and (op in database_mutations):
|
||||
if args.concurrency == 1 and (op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']):
|
||||
self.wait_for_reads(instructions)
|
||||
test_util.blocking_commit(instructions)
|
||||
self.can_get_commit_version = False
|
||||
|
@ -570,18 +581,39 @@ class ApiTest(Test):
|
|||
instructions.push_args(key1, key2, chunkSize)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
|
||||
elif op == 'TENANT_CREATE':
|
||||
tenant_name = self.choose_tenant(0.8)
|
||||
self.allocated_tenants.add(tenant_name)
|
||||
instructions.push_args(tenant_name)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
elif op == 'TENANT_DELETE':
|
||||
tenant_name = self.choose_tenant(0.2)
|
||||
if tenant_name in self.allocated_tenants:
|
||||
self.allocated_tenants.remove(tenant_name)
|
||||
instructions.push_args(tenant_name)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
elif op == 'TENANT_SET_ACTIVE':
|
||||
tenant_name = self.choose_tenant(0.8)
|
||||
instructions.push_args(tenant_name)
|
||||
instructions.append(op)
|
||||
elif op == 'TENANT_CLEAR_ACTIVE':
|
||||
instructions.append(op)
|
||||
else:
|
||||
assert False, 'Unknown operation: ' + op
|
||||
|
||||
if read_performed and op not in database_reads:
|
||||
self.outstanding_ops.append((self.stack_size, len(instructions) - 1))
|
||||
|
||||
if args.concurrency == 1 and (op in database_reads or op in database_mutations):
|
||||
if args.concurrency == 1 and (op in database_reads or op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']):
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
||||
instructions.begin_finalization()
|
||||
|
||||
if not args.no_tenants:
|
||||
instructions.append('TENANT_CLEAR_ACTIVE')
|
||||
|
||||
if args.concurrency == 1:
|
||||
self.wait_for_reads(instructions)
|
||||
test_util.blocking_commit(instructions)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
set(FDB_C_SRCS
|
||||
fdb_c.cpp
|
||||
foundationdb/fdb_c.h
|
||||
ThreadCleanup.cpp)
|
||||
foundationdb/fdb_c.h)
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/foundationdb)
|
||||
|
||||
|
@ -51,7 +50,7 @@ endif()
|
|||
add_dependencies(fdb_c fdb_c_generated fdb_c_options)
|
||||
add_dependencies(fdbclient fdb_c_options)
|
||||
add_dependencies(fdbclient_sampling fdb_c_options)
|
||||
target_link_libraries(fdb_c PUBLIC $<BUILD_INTERFACE:fdbclient>)
|
||||
target_link_libraries(fdb_c PRIVATE $<BUILD_INTERFACE:fdbclient>)
|
||||
if(APPLE)
|
||||
set(symbols ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.symbols)
|
||||
add_custom_command(OUTPUT ${symbols}
|
||||
|
@ -76,10 +75,8 @@ if(WIN32)
|
|||
set_property(SOURCE ${asm_file} PROPERTY LANGUAGE ASM_MASM)
|
||||
endif()
|
||||
|
||||
# The tests don't build on windows and ARM macs
|
||||
# doctest doesn't seem to compile on ARM macs, we should
|
||||
# check later whether this works
|
||||
if(NOT WIN32 AND NOT IS_ARM_MAC)
|
||||
# The tests don't build on windows
|
||||
if(NOT WIN32)
|
||||
set(MAKO_SRCS
|
||||
test/mako/mako.c
|
||||
test/mako/mako.h
|
||||
|
@ -94,11 +91,35 @@ if(NOT WIN32 AND NOT IS_ARM_MAC)
|
|||
|
||||
set(UNIT_TEST_VERSION_510_SRCS test/unit/unit_tests_version_510.cpp)
|
||||
set(TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS test/unit/trace_partial_file_suffix_test.cpp)
|
||||
set(DISCONNECTED_TIMEOUT_UNIT_TEST_SRCS
|
||||
set(DISCONNECTED_TIMEOUT_UNIT_TEST_SRCS
|
||||
test/unit/disconnected_timeout_tests.cpp
|
||||
test/unit/fdb_api.cpp
|
||||
test/unit/fdb_api.hpp)
|
||||
|
||||
set(API_TESTER_SRCS
|
||||
test/apitester/fdb_c_api_tester.cpp
|
||||
test/apitester/TesterApiWorkload.cpp
|
||||
test/apitester/TesterApiWorkload.h
|
||||
test/apitester/TesterApiWrapper.cpp
|
||||
test/apitester/TesterApiWrapper.h
|
||||
test/apitester/TesterTestSpec.cpp
|
||||
test/apitester/TesterTestSpec.h
|
||||
test/apitester/TesterCancelTransactionWorkload.cpp
|
||||
test/apitester/TesterCorrectnessWorkload.cpp
|
||||
test/apitester/TesterKeyValueStore.cpp
|
||||
test/apitester/TesterKeyValueStore.h
|
||||
test/apitester/TesterOptions.h
|
||||
test/apitester/TesterScheduler.cpp
|
||||
test/apitester/TesterScheduler.h
|
||||
test/apitester/TesterTransactionExecutor.cpp
|
||||
test/apitester/TesterTransactionExecutor.h
|
||||
test/apitester/TesterUtil.cpp
|
||||
test/apitester/TesterUtil.h
|
||||
test/apitester/TesterWorkload.cpp
|
||||
test/apitester/TesterWorkload.h
|
||||
../../flow/SimpleOpt.h
|
||||
)
|
||||
|
||||
if(OPEN_FOR_IDE)
|
||||
add_library(fdb_c_performance_test OBJECT test/performance_test.c test/test.h)
|
||||
add_library(fdb_c_ryw_benchmark OBJECT test/ryw_benchmark.c test/test.h)
|
||||
|
@ -109,23 +130,28 @@ if(NOT WIN32 AND NOT IS_ARM_MAC)
|
|||
add_library(fdb_c_unit_tests_version_510 OBJECT ${UNIT_TEST_VERSION_510_SRCS})
|
||||
add_library(trace_partial_file_suffix_test OBJECT ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
||||
add_library(disconnected_timeout_unit_tests OBJECT ${DISCONNECTED_TIMEOUT_UNIT_TEST_SRCS})
|
||||
add_library(fdb_c_api_tester OBJECT ${API_TESTER_SRCS})
|
||||
else()
|
||||
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
||||
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
||||
add_executable(fdb_c_txn_size_test test/txn_size_test.c test/test.h)
|
||||
add_executable(fdb_c_client_memory_test test/client_memory_test.cpp test/unit/fdb_api.cpp test/unit/fdb_api.hpp)
|
||||
add_executable(mako ${MAKO_SRCS})
|
||||
add_executable(fdb_c_setup_tests test/unit/setup_tests.cpp)
|
||||
add_executable(fdb_c_unit_tests ${UNIT_TEST_SRCS})
|
||||
add_executable(fdb_c_unit_tests_version_510 ${UNIT_TEST_VERSION_510_SRCS})
|
||||
add_executable(trace_partial_file_suffix_test ${TRACE_PARTIAL_FILE_SUFFIX_TEST_SRCS})
|
||||
add_executable(disconnected_timeout_unit_tests ${DISCONNECTED_TIMEOUT_UNIT_TEST_SRCS})
|
||||
add_executable(fdb_c_api_tester ${API_TESTER_SRCS})
|
||||
strip_debug_symbols(fdb_c_performance_test)
|
||||
strip_debug_symbols(fdb_c_ryw_benchmark)
|
||||
strip_debug_symbols(fdb_c_txn_size_test)
|
||||
strip_debug_symbols(fdb_c_client_memory_test)
|
||||
endif()
|
||||
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c)
|
||||
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c)
|
||||
target_link_libraries(fdb_c_txn_size_test PRIVATE fdb_c)
|
||||
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_txn_size_test PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_client_memory_test PRIVATE fdb_c Threads::Threads)
|
||||
|
||||
add_dependencies(fdb_c_setup_tests doctest)
|
||||
add_dependencies(fdb_c_unit_tests doctest)
|
||||
|
@ -136,14 +162,20 @@ if(NOT WIN32 AND NOT IS_ARM_MAC)
|
|||
target_include_directories(fdb_c_unit_tests_version_510 PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_include_directories(disconnected_timeout_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
|
||||
target_link_libraries(fdb_c_setup_tests PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_unit_tests PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_unit_tests PRIVATE fdb_c Threads::Threads fdbclient)
|
||||
target_link_libraries(fdb_c_unit_tests_version_510 PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(trace_partial_file_suffix_test PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(trace_partial_file_suffix_test PRIVATE fdb_c Threads::Threads flow)
|
||||
target_link_libraries(disconnected_timeout_unit_tests PRIVATE fdb_c Threads::Threads)
|
||||
|
||||
if(USE_SANITIZER)
|
||||
target_link_libraries(fdb_c_api_tester PRIVATE fdb_c toml11_target Threads::Threads fmt::fmt boost_asan)
|
||||
else()
|
||||
target_link_libraries(fdb_c_api_tester PRIVATE fdb_c toml11_target Threads::Threads fmt::fmt boost_target)
|
||||
endif()
|
||||
|
||||
# do not set RPATH for mako
|
||||
set_property(TARGET mako PROPERTY SKIP_BUILD_RPATH TRUE)
|
||||
target_link_libraries(mako PRIVATE fdb_c)
|
||||
target_link_libraries(mako PRIVATE fdb_c fdbclient)
|
||||
|
||||
if(NOT OPEN_FOR_IDE)
|
||||
# Make sure that fdb_c.h is compatible with c90
|
||||
|
@ -166,6 +198,7 @@ if(NOT WIN32 AND NOT IS_ARM_MAC)
|
|||
add_custom_target(external_client DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so)
|
||||
add_dependencies(fdb_c_unit_tests external_client)
|
||||
add_dependencies(disconnected_timeout_unit_tests external_client)
|
||||
add_dependencies(fdb_c_api_tester external_client)
|
||||
|
||||
add_fdbclient_test(
|
||||
NAME fdb_c_setup_tests
|
||||
|
@ -203,6 +236,19 @@ if(NOT WIN32 AND NOT IS_ARM_MAC)
|
|||
@CLUSTER_FILE@
|
||||
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
|
||||
)
|
||||
add_fdbclient_test(
|
||||
NAME fdb_c_api_tests
|
||||
DISABLE_LOG_DUMP
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/run_c_api_tests.py
|
||||
--cluster-file
|
||||
@CLUSTER_FILE@
|
||||
--tester-binary
|
||||
$<TARGET_FILE:fdb_c_api_tester>
|
||||
--external-client-library
|
||||
${CMAKE_CURRENT_BINARY_DIR}/libfdb_c_external.so
|
||||
--test-dir
|
||||
${CMAKE_SOURCE_DIR}/bindings/c/test/apitester/tests
|
||||
)
|
||||
endif()
|
||||
|
||||
set(c_workloads_srcs
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* ThreadCleanup.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "flow/Platform.h"
|
||||
#include "flow/FastAlloc.h"
|
||||
|
||||
#if defined(WIN32)
|
||||
|
||||
#include <Windows.h>
|
||||
|
||||
BOOL WINAPI DllMain(HINSTANCE dll, DWORD reason, LPVOID reserved) {
|
||||
|
||||
if (reason == DLL_THREAD_DETACH)
|
||||
releaseAllThreadMagazines();
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
#elif defined(__unixish__)
|
||||
|
||||
#ifdef __INTEL_COMPILER
|
||||
#pragma warning(disable : 2415)
|
||||
#endif
|
||||
|
||||
static pthread_key_t threadDestructorKey;
|
||||
|
||||
static void threadDestructor(void*) {
|
||||
releaseAllThreadMagazines();
|
||||
}
|
||||
|
||||
void registerThread() {
|
||||
pthread_setspecific(threadDestructorKey, (const void*)1);
|
||||
}
|
||||
|
||||
static int initThreadDestructorKey() {
|
||||
if (!pthread_key_create(&threadDestructorKey, &threadDestructor)) {
|
||||
registerThread();
|
||||
setFastAllocatorThreadInitFunction(®isterThread);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int threadDestructorKeyInit = initThreadDestructorKey();
|
||||
|
||||
#else
|
||||
#error Port me!
|
||||
#endif
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -37,12 +37,14 @@ int g_api_version = 0;
|
|||
* FDBFuture -> ThreadSingleAssignmentVarBase
|
||||
* FDBResult -> ThreadSingleAssignmentVarBase
|
||||
* FDBDatabase -> IDatabase
|
||||
* FDBTenant -> ITenant
|
||||
* FDBTransaction -> ITransaction
|
||||
*/
|
||||
#define TSAVB(f) ((ThreadSingleAssignmentVarBase*)(f))
|
||||
#define TSAV(T, f) ((ThreadSingleAssignmentVar<T>*)(f))
|
||||
|
||||
#define DB(d) ((IDatabase*)d)
|
||||
#define TENANT(t) ((ITenant*)t)
|
||||
#define TXN(t) ((ITransaction*)t)
|
||||
|
||||
// Legacy (pre API version 610)
|
||||
|
@ -76,7 +78,8 @@ extern "C" DLLEXPORT fdb_bool_t fdb_error_predicate(int predicate_test, fdb_erro
|
|||
return code == error_code_not_committed || code == error_code_transaction_too_old ||
|
||||
code == error_code_future_version || code == error_code_database_locked ||
|
||||
code == error_code_proxy_memory_limit_exceeded || code == error_code_batch_transaction_throttled ||
|
||||
code == error_code_process_behind || code == error_code_tag_throttled;
|
||||
code == error_code_process_behind || code == error_code_tag_throttled ||
|
||||
code == error_code_unknown_tenant;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -280,6 +283,16 @@ fdb_error_t fdb_future_get_keyvalue_array_v13(FDBFuture* f, FDBKeyValue const**
|
|||
*out_count = rrr.size(););
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT fdb_error_t fdb_future_get_mappedkeyvalue_array(FDBFuture* f,
|
||||
FDBMappedKeyValue const** out_kvm,
|
||||
int* out_count,
|
||||
fdb_bool_t* out_more) {
|
||||
CATCH_AND_RETURN(Standalone<MappedRangeResultRef> rrr = TSAV(Standalone<MappedRangeResultRef>, f)->get();
|
||||
*out_kvm = (FDBMappedKeyValue*)rrr.begin();
|
||||
*out_count = rrr.size();
|
||||
*out_more = rrr.more;);
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT fdb_error_t fdb_future_get_string_array(FDBFuture* f, const char*** out_strings, int* out_count) {
|
||||
CATCH_AND_RETURN(Standalone<VectorRef<const char*>> na = TSAV(Standalone<VectorRef<const char*>>, f)->get();
|
||||
*out_strings = (const char**)na.begin();
|
||||
|
@ -375,6 +388,14 @@ extern "C" DLLEXPORT void fdb_database_destroy(FDBDatabase* d) {
|
|||
CATCH_AND_DIE(DB(d)->delref(););
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT fdb_error_t fdb_database_open_tenant(FDBDatabase* d,
|
||||
uint8_t const* tenant_name,
|
||||
int tenant_name_length,
|
||||
FDBTenant** out_tenant) {
|
||||
CATCH_AND_RETURN(*out_tenant =
|
||||
(FDBTenant*)DB(d)->openTenant(TenantNameRef(tenant_name, tenant_name_length)).extractPtr(););
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT fdb_error_t fdb_database_create_transaction(FDBDatabase* d, FDBTransaction** out_transaction) {
|
||||
CATCH_AND_RETURN(Reference<ITransaction> tr = DB(d)->createTransaction();
|
||||
if (g_api_version <= 15) tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -428,6 +449,17 @@ extern "C" DLLEXPORT FDBFuture* fdb_database_get_server_protocol(FDBDatabase* db
|
|||
}).extractPtr());
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT fdb_error_t fdb_tenant_create_transaction(FDBTenant* tenant, FDBTransaction** out_transaction) {
|
||||
CATCH_AND_RETURN(*out_transaction = (FDBTransaction*)TENANT(tenant)->createTransaction().extractPtr(););
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT void fdb_tenant_destroy(FDBTenant* tenant) {
|
||||
try {
|
||||
TENANT(tenant)->delref();
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" DLLEXPORT void fdb_transaction_destroy(FDBTransaction* tr) {
|
||||
try {
|
||||
TXN(tr)->delref();
|
||||
|
@ -570,29 +602,29 @@ FDBFuture* fdb_transaction_get_range_impl(FDBTransaction* tr,
|
|||
.extractPtr());
|
||||
}
|
||||
|
||||
FDBFuture* fdb_transaction_get_range_and_flat_map_impl(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
FDBFuture* fdb_transaction_get_mapped_range_impl(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
FDBFuture* r = validate_and_update_parameters(limit, target_bytes, mode, iteration, reverse);
|
||||
if (r != nullptr)
|
||||
return r;
|
||||
return (
|
||||
FDBFuture*)(TXN(tr)
|
||||
->getRangeAndFlatMap(
|
||||
->getMappedRange(
|
||||
KeySelectorRef(KeyRef(begin_key_name, begin_key_name_length), begin_or_equal, begin_offset),
|
||||
KeySelectorRef(KeyRef(end_key_name, end_key_name_length), end_or_equal, end_offset),
|
||||
StringRef(mapper_name, mapper_name_length),
|
||||
|
@ -603,23 +635,23 @@ FDBFuture* fdb_transaction_get_range_and_flat_map_impl(FDBTransaction* tr,
|
|||
}
|
||||
|
||||
// TODO: Support FDB_API_ADDED in generate_asm.py and then this can be replaced with fdb_api_ptr_unimpl.
|
||||
FDBFuture* fdb_transaction_get_range_and_flat_map_v699(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
FDBFuture* fdb_transaction_get_mapped_range_v699(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
fprintf(stderr, "UNIMPLEMENTED FDB API FUNCTION\n");
|
||||
abort();
|
||||
}
|
||||
|
@ -803,9 +835,10 @@ extern "C" DLLEXPORT FDBResult* fdb_transaction_read_blob_granules(FDBTransactio
|
|||
context.get_load_f = granule_context.get_load_f;
|
||||
context.free_load_f = granule_context.free_load_f;
|
||||
context.debugNoMaterialize = granule_context.debugNoMaterialize;
|
||||
context.granuleParallelism = granule_context.granuleParallelism;
|
||||
|
||||
Optional<Version> rv;
|
||||
if (readVersion != invalidVersion) { rv = readVersion; }
|
||||
if (readVersion != latestVersion) { rv = readVersion; }
|
||||
|
||||
return (FDBResult*)(TXN(tr)->readBlobGranules(range, beginVersion, rv, context).extractPtr()););
|
||||
}
|
||||
|
@ -856,7 +889,7 @@ extern "C" DLLEXPORT fdb_error_t fdb_select_api_version_impl(int runtime_version
|
|||
// WARNING: use caution when implementing removed functions by calling public API functions. This can lead to
|
||||
// undesired behavior when using the multi-version API. Instead, it is better to have both the removed and public
|
||||
// functions call an internal implementation function. See fdb_create_database_impl for an example.
|
||||
FDB_API_CHANGED(fdb_transaction_get_range_and_flat_map, 700);
|
||||
FDB_API_CHANGED(fdb_transaction_get_mapped_range, 700);
|
||||
FDB_API_REMOVED(fdb_future_get_version, 620);
|
||||
FDB_API_REMOVED(fdb_create_cluster, 610);
|
||||
FDB_API_REMOVED(fdb_cluster_create_database, 610);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -67,6 +67,7 @@ extern "C" {
|
|||
typedef struct FDB_future FDBFuture;
|
||||
typedef struct FDB_result FDBResult;
|
||||
typedef struct FDB_database FDBDatabase;
|
||||
typedef struct FDB_tenant FDBTenant;
|
||||
typedef struct FDB_transaction FDBTransaction;
|
||||
|
||||
typedef int fdb_error_t;
|
||||
|
@ -113,6 +114,64 @@ typedef struct keyvalue {
|
|||
int value_length;
|
||||
} FDBKeyValue;
|
||||
#endif
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
/* Memory layout of KeySelectorRef. */
|
||||
typedef struct keyselector {
|
||||
FDBKey key;
|
||||
/* orEqual and offset have not be tested in C binding. Just a placeholder. */
|
||||
fdb_bool_t orEqual;
|
||||
int offset;
|
||||
} FDBKeySelector;
|
||||
|
||||
/* Memory layout of GetRangeReqAndResultRef. */
|
||||
typedef struct getrangereqandresult {
|
||||
FDBKeySelector begin;
|
||||
FDBKeySelector end;
|
||||
FDBKeyValue* data;
|
||||
int m_size, m_capacity;
|
||||
} FDBGetRangeReqAndResult;
|
||||
|
||||
/* Memory layout of MappedKeyValueRef.
|
||||
|
||||
Total 112 bytes
|
||||
- key (12 bytes)
|
||||
:74:8F:8E:5F:AE:7F:00:00
|
||||
:4A:00:00:00
|
||||
- value (12 bytes)
|
||||
:70:8F:8E:5F:AE:7F:00:00
|
||||
:00:00:00:00
|
||||
- begin selector (20 bytes)
|
||||
:30:8F:8E:5F:AE:7F:00:00
|
||||
:2D:00:00:00
|
||||
:00:7F:00:00
|
||||
:01:00:00:00
|
||||
- end selector (20 bytes)
|
||||
:EC:8E:8E:5F:AE:7F:00:00
|
||||
:2D:00:00:00
|
||||
:00:2B:3C:60
|
||||
:01:00:00:00
|
||||
- vector (16 bytes)
|
||||
:74:94:8E:5F:AE:7F:00:00
|
||||
:01:00:00:00
|
||||
:01:00:00:00
|
||||
- buffer (32 bytes)
|
||||
:00:20:D1:61:00:00:00:00
|
||||
:00:00:00:00:00:00:00:00
|
||||
:00:00:00:00:00:00:00:00
|
||||
:01:00:00:00:AE:7F:00:00
|
||||
*/
|
||||
typedef struct mappedkeyvalue {
|
||||
FDBKey key;
|
||||
FDBKey value;
|
||||
/* It's complicated to map a std::variant to C. For now we assume the underlying requests are always getRange and
|
||||
* take the shortcut. */
|
||||
FDBGetRangeReqAndResult getRange;
|
||||
unsigned char buffer[32];
|
||||
} FDBMappedKeyValue;
|
||||
|
||||
#pragma pack(push, 4)
|
||||
typedef struct keyrange {
|
||||
const uint8_t* begin_key;
|
||||
int begin_key_length;
|
||||
|
@ -126,7 +185,12 @@ typedef struct readgranulecontext {
|
|||
void* userContext;
|
||||
|
||||
/* Returns a unique id for the load. Asynchronous to support queueing multiple in parallel. */
|
||||
int64_t (*start_load_f)(const char* filename, int filenameLength, int64_t offset, int64_t length, void* context);
|
||||
int64_t (*start_load_f)(const char* filename,
|
||||
int filenameLength,
|
||||
int64_t offset,
|
||||
int64_t length,
|
||||
int64_t fullFileLength,
|
||||
void* context);
|
||||
|
||||
/* Returns data for the load. Pass the loadId returned by start_load_f */
|
||||
uint8_t* (*get_load_f)(int64_t loadId, void* context);
|
||||
|
@ -137,6 +201,9 @@ typedef struct readgranulecontext {
|
|||
/* Set this to true for testing if you don't want to read the granule files,
|
||||
just do the request to the blob workers */
|
||||
fdb_bool_t debugNoMaterialize;
|
||||
|
||||
/* Number of granules to load in parallel */
|
||||
int granuleParallelism;
|
||||
} FDBReadBlobGranuleContext;
|
||||
|
||||
DLLEXPORT void fdb_future_cancel(FDBFuture* f);
|
||||
|
@ -176,6 +243,12 @@ DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_keyvalue_array(FDBFuture
|
|||
int* out_count,
|
||||
fdb_bool_t* out_more);
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_mappedkeyvalue_array(FDBFuture* f,
|
||||
FDBMappedKeyValue const** out_kv,
|
||||
int* out_count,
|
||||
fdb_bool_t* out_more);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_key_array(FDBFuture* f,
|
||||
FDBKey const** out_key_array,
|
||||
int* out_count);
|
||||
|
@ -207,6 +280,11 @@ DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_database_set_option(FDBDatabase* d,
|
|||
uint8_t const* value,
|
||||
int value_length);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_database_open_tenant(FDBDatabase* d,
|
||||
uint8_t const* tenant_name,
|
||||
int tenant_name_length,
|
||||
FDBTenant** out_tenant);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_database_create_transaction(FDBDatabase* d,
|
||||
FDBTransaction** out_transaction);
|
||||
|
||||
|
@ -230,6 +308,11 @@ DLLEXPORT WARN_UNUSED_RESULT double fdb_database_get_main_thread_busyness(FDBDat
|
|||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_database_get_server_protocol(FDBDatabase* db, uint64_t expected_version);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_tenant_create_transaction(FDBTenant* tenant,
|
||||
FDBTransaction** out_transaction);
|
||||
|
||||
DLLEXPORT void fdb_tenant_destroy(FDBTenant* tenant);
|
||||
|
||||
DLLEXPORT void fdb_transaction_destroy(FDBTransaction* tr);
|
||||
|
||||
DLLEXPORT void fdb_transaction_cancel(FDBTransaction* tr);
|
||||
|
@ -283,23 +366,23 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range(FDBTransaction
|
|||
fdb_bool_t reverse);
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range_and_flat_map(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_mapped_range(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
|
||||
DLLEXPORT void fdb_transaction_set(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
|
@ -372,7 +455,7 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_blob_granule_ranges(
|
|||
uint8_t const* end_key_name,
|
||||
int end_key_name_length);
|
||||
|
||||
/* InvalidVersion (-1) for readVersion means get read version from transaction
|
||||
/* LatestVersion (-2) for readVersion means get read version from transaction
|
||||
Separated out as optional because BG reads can support longer-lived reads than normal FDB transactions */
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBResult* fdb_transaction_read_blob_granules(FDBTransaction* db,
|
||||
uint8_t const* begin_key_name,
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
/*
|
||||
* TesterApiWorkload.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "TesterApiWorkload.h"
|
||||
#include "TesterUtil.h"
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
ApiWorkload::ApiWorkload(const WorkloadConfig& config) : WorkloadBase(config) {
|
||||
minKeyLength = config.getIntOption("minKeyLength", 1);
|
||||
maxKeyLength = config.getIntOption("maxKeyLength", 64);
|
||||
minValueLength = config.getIntOption("minValueLength", 1);
|
||||
maxValueLength = config.getIntOption("maxValueLength", 1000);
|
||||
maxKeysPerTransaction = config.getIntOption("maxKeysPerTransaction", 50);
|
||||
initialSize = config.getIntOption("initialSize", 1000);
|
||||
readExistingKeysRatio = config.getFloatOption("readExistingKeysRatio", 0.9);
|
||||
keyPrefix = fmt::format("{}/", workloadId);
|
||||
}
|
||||
|
||||
void ApiWorkload::start() {
|
||||
schedule([this]() {
|
||||
// 1. Clear data
|
||||
clearData([this]() {
|
||||
// 2. Populate initial data
|
||||
populateData([this]() {
|
||||
// 3. Generate random workload
|
||||
runTests();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
std::string ApiWorkload::randomKeyName() {
|
||||
return keyPrefix + Random::get().randomStringLowerCase(minKeyLength, maxKeyLength);
|
||||
}
|
||||
|
||||
std::string ApiWorkload::randomValue() {
|
||||
return Random::get().randomStringLowerCase(minValueLength, maxValueLength);
|
||||
}
|
||||
|
||||
std::string ApiWorkload::randomNotExistingKey() {
|
||||
while (true) {
|
||||
std::string key = randomKeyName();
|
||||
if (!store.exists(key)) {
|
||||
return key;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::string ApiWorkload::randomExistingKey() {
|
||||
std::string genKey = randomKeyName();
|
||||
std::string key = store.getKey(genKey, true, 1);
|
||||
if (key != store.endKey()) {
|
||||
return key;
|
||||
}
|
||||
key = store.getKey(genKey, true, 0);
|
||||
if (key != store.startKey()) {
|
||||
return key;
|
||||
}
|
||||
info("No existing key found, using a new random key.");
|
||||
return genKey;
|
||||
}
|
||||
|
||||
std::string ApiWorkload::randomKey(double existingKeyRatio) {
|
||||
if (Random::get().randomBool(existingKeyRatio)) {
|
||||
return randomExistingKey();
|
||||
} else {
|
||||
return randomNotExistingKey();
|
||||
}
|
||||
}
|
||||
|
||||
void ApiWorkload::populateDataTx(TTaskFct cont) {
|
||||
int numKeys = maxKeysPerTransaction;
|
||||
auto kvPairs = std::make_shared<std::vector<KeyValue>>();
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
kvPairs->push_back(KeyValue{ randomNotExistingKey(), randomValue() });
|
||||
}
|
||||
execTransaction(
|
||||
[kvPairs](auto ctx) {
|
||||
for (const KeyValue& kv : *kvPairs) {
|
||||
ctx->tx()->set(kv.key, kv.value);
|
||||
}
|
||||
ctx->commit();
|
||||
},
|
||||
[this, kvPairs, cont]() {
|
||||
for (const KeyValue& kv : *kvPairs) {
|
||||
store.set(kv.key, kv.value);
|
||||
}
|
||||
schedule(cont);
|
||||
});
|
||||
}
|
||||
|
||||
void ApiWorkload::clearData(TTaskFct cont) {
|
||||
execTransaction(
|
||||
[this](auto ctx) {
|
||||
ctx->tx()->clearRange(keyPrefix, fmt::format("{}\xff", keyPrefix));
|
||||
ctx->commit();
|
||||
},
|
||||
[this, cont]() { schedule(cont); });
|
||||
}
|
||||
|
||||
void ApiWorkload::populateData(TTaskFct cont) {
|
||||
if (store.size() < initialSize) {
|
||||
populateDataTx([this, cont]() { populateData(cont); });
|
||||
} else {
|
||||
info("Data population completed");
|
||||
schedule(cont);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* TesterApiWorkload.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef APITESTER_API_WORKLOAD_H
|
||||
#define APITESTER_API_WORKLOAD_H
|
||||
|
||||
#include "TesterWorkload.h"
|
||||
#include "TesterKeyValueStore.h"
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
/**
|
||||
* Base class for implementing API testing workloads.
|
||||
* Provides various helper methods and reusable configuration parameters
|
||||
*/
|
||||
class ApiWorkload : public WorkloadBase {
|
||||
public:
|
||||
void start() override;
|
||||
|
||||
// Method to be overridden to run specific tests
|
||||
virtual void runTests() = 0;
|
||||
|
||||
protected:
|
||||
// The minimum length of a key
|
||||
int minKeyLength;
|
||||
|
||||
// The maximum length of a key
|
||||
int maxKeyLength;
|
||||
|
||||
// The minimum length of a value
|
||||
int minValueLength;
|
||||
|
||||
// The maximum length of a value
|
||||
int maxValueLength;
|
||||
|
||||
// Maximum number of keys to be accessed by a transaction
|
||||
int maxKeysPerTransaction;
|
||||
|
||||
// Initial data size (number of key-value pairs)
|
||||
int initialSize;
|
||||
|
||||
// The ratio of reading existing keys
|
||||
double readExistingKeysRatio;
|
||||
|
||||
// Key prefix
|
||||
std::string keyPrefix;
|
||||
|
||||
// In-memory store maintaining expected database state
|
||||
KeyValueStore store;
|
||||
|
||||
ApiWorkload(const WorkloadConfig& config);
|
||||
|
||||
// Methods for generating random keys and values
|
||||
std::string randomKeyName();
|
||||
std::string randomValue();
|
||||
std::string randomNotExistingKey();
|
||||
std::string randomExistingKey();
|
||||
std::string randomKey(double existingKeyRatio);
|
||||
|
||||
// Generate initial random data for the workload
|
||||
void populateData(TTaskFct cont);
|
||||
|
||||
// Clear the data of the workload
|
||||
void clearData(TTaskFct cont);
|
||||
|
||||
private:
|
||||
void populateDataTx(TTaskFct cont);
|
||||
};
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
||||
#endif
|
|
@ -0,0 +1,124 @@
|
|||
/*
|
||||
* TesterApiWrapper.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "TesterApiWrapper.h"
|
||||
#include "TesterUtil.h"
|
||||
#include <cstdint>
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
namespace {
|
||||
|
||||
void fdb_check(fdb_error_t e) {
|
||||
if (e) {
|
||||
fmt::print(stderr, "Unexpected error: %s\n", fdb_get_error(e));
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Future::Future(FDBFuture* f) : future_(f, fdb_future_destroy) {}
|
||||
|
||||
void Future::reset() {
|
||||
future_.reset();
|
||||
}
|
||||
|
||||
void Future::cancel() {
|
||||
ASSERT(future_);
|
||||
fdb_future_cancel(future_.get());
|
||||
}
|
||||
|
||||
fdb_error_t Future::getError() const {
|
||||
ASSERT(future_);
|
||||
return fdb_future_get_error(future_.get());
|
||||
}
|
||||
|
||||
std::optional<std::string> ValueFuture::getValue() const {
|
||||
ASSERT(future_);
|
||||
int out_present;
|
||||
const std::uint8_t* val;
|
||||
int vallen;
|
||||
fdb_check(fdb_future_get_value(future_.get(), &out_present, &val, &vallen));
|
||||
return out_present ? std::make_optional(std::string((const char*)val, vallen)) : std::nullopt;
|
||||
}
|
||||
|
||||
// Given an FDBDatabase, initializes a new transaction.
|
||||
Transaction::Transaction(FDBTransaction* tx) : tx_(tx, fdb_transaction_destroy) {}
|
||||
|
||||
ValueFuture Transaction::get(std::string_view key, fdb_bool_t snapshot) {
|
||||
ASSERT(tx_);
|
||||
return ValueFuture(fdb_transaction_get(tx_.get(), (const uint8_t*)key.data(), key.size(), snapshot));
|
||||
}
|
||||
|
||||
void Transaction::set(std::string_view key, std::string_view value) {
|
||||
ASSERT(tx_);
|
||||
fdb_transaction_set(tx_.get(), (const uint8_t*)key.data(), key.size(), (const uint8_t*)value.data(), value.size());
|
||||
}
|
||||
|
||||
void Transaction::clear(std::string_view key) {
|
||||
ASSERT(tx_);
|
||||
fdb_transaction_clear(tx_.get(), (const uint8_t*)key.data(), key.size());
|
||||
}
|
||||
|
||||
void Transaction::clearRange(std::string_view begin, std::string_view end) {
|
||||
ASSERT(tx_);
|
||||
fdb_transaction_clear_range(
|
||||
tx_.get(), (const uint8_t*)begin.data(), begin.size(), (const uint8_t*)end.data(), end.size());
|
||||
}
|
||||
|
||||
Future Transaction::commit() {
|
||||
ASSERT(tx_);
|
||||
return Future(fdb_transaction_commit(tx_.get()));
|
||||
}
|
||||
|
||||
void Transaction::cancel() {
|
||||
ASSERT(tx_);
|
||||
fdb_transaction_cancel(tx_.get());
|
||||
}
|
||||
|
||||
Future Transaction::onError(fdb_error_t err) {
|
||||
ASSERT(tx_);
|
||||
return Future(fdb_transaction_on_error(tx_.get(), err));
|
||||
}
|
||||
|
||||
void Transaction::reset() {
|
||||
ASSERT(tx_);
|
||||
fdb_transaction_reset(tx_.get());
|
||||
}
|
||||
|
||||
fdb_error_t Transaction::setOption(FDBTransactionOption option) {
|
||||
ASSERT(tx_);
|
||||
return fdb_transaction_set_option(tx_.get(), option, reinterpret_cast<const uint8_t*>(""), 0);
|
||||
}
|
||||
|
||||
fdb_error_t FdbApi::setOption(FDBNetworkOption option, std::string_view value) {
|
||||
return fdb_network_set_option(option, reinterpret_cast<const uint8_t*>(value.data()), value.size());
|
||||
}
|
||||
|
||||
fdb_error_t FdbApi::setOption(FDBNetworkOption option, int64_t value) {
|
||||
return fdb_network_set_option(option, reinterpret_cast<const uint8_t*>(&value), sizeof(value));
|
||||
}
|
||||
|
||||
fdb_error_t FdbApi::setOption(FDBNetworkOption option) {
|
||||
return fdb_network_set_option(option, reinterpret_cast<const uint8_t*>(""), 0);
|
||||
}
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,92 @@
|
|||
/*
|
||||
* TesterApiWrapper.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef APITESTER_API_WRAPPER_H
|
||||
#define APITESTER_API_WRAPPER_H
|
||||
|
||||
#include <string_view>
|
||||
#include <optional>
|
||||
#include <memory>
|
||||
|
||||
#define FDB_API_VERSION 710
|
||||
#include "bindings/c/foundationdb/fdb_c.h"
|
||||
|
||||
#undef ERROR
|
||||
#define ERROR(name, number, description) enum { error_code_##name = number };
|
||||
|
||||
#include "flow/error_definitions.h"
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
// Wrapper parent class to manage memory of an FDBFuture pointer. Cleans up
|
||||
// FDBFuture when this instance goes out of scope.
|
||||
class Future {
|
||||
public:
|
||||
Future() = default;
|
||||
Future(FDBFuture* f);
|
||||
|
||||
FDBFuture* fdbFuture() { return future_.get(); };
|
||||
|
||||
fdb_error_t getError() const;
|
||||
explicit operator bool() const { return future_ != nullptr; };
|
||||
void reset();
|
||||
void cancel();
|
||||
|
||||
protected:
|
||||
std::shared_ptr<FDBFuture> future_;
|
||||
};
|
||||
|
||||
class ValueFuture : public Future {
|
||||
public:
|
||||
ValueFuture() = default;
|
||||
ValueFuture(FDBFuture* f) : Future(f) {}
|
||||
std::optional<std::string> getValue() const;
|
||||
};
|
||||
|
||||
class Transaction {
|
||||
public:
|
||||
Transaction() = default;
|
||||
Transaction(FDBTransaction* tx);
|
||||
ValueFuture get(std::string_view key, fdb_bool_t snapshot);
|
||||
void set(std::string_view key, std::string_view value);
|
||||
void clear(std::string_view key);
|
||||
void clearRange(std::string_view begin, std::string_view end);
|
||||
Future commit();
|
||||
void cancel();
|
||||
Future onError(fdb_error_t err);
|
||||
void reset();
|
||||
fdb_error_t setOption(FDBTransactionOption option);
|
||||
|
||||
private:
|
||||
std::shared_ptr<FDBTransaction> tx_;
|
||||
};
|
||||
|
||||
class FdbApi {
|
||||
public:
|
||||
static fdb_error_t setOption(FDBNetworkOption option, std::string_view value);
|
||||
static fdb_error_t setOption(FDBNetworkOption option, int64_t value);
|
||||
static fdb_error_t setOption(FDBNetworkOption option);
|
||||
};
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
||||
#endif
|
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
* TesterCancelTransactionWorkload.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "TesterApiWorkload.h"
|
||||
#include "TesterUtil.h"
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
class CancelTransactionWorkload : public ApiWorkload {
|
||||
public:
|
||||
CancelTransactionWorkload(const WorkloadConfig& config) : ApiWorkload(config) {
|
||||
numRandomOperations = config.getIntOption("numRandomOperations", 1000);
|
||||
numOpLeft = numRandomOperations;
|
||||
}
|
||||
|
||||
void runTests() override { randomOperations(); }
|
||||
|
||||
private:
|
||||
enum OpType { OP_CANCEL_GET, OP_CANCEL_AFTER_FIRST_GET, OP_LAST = OP_CANCEL_AFTER_FIRST_GET };
|
||||
|
||||
// The number of operations to be executed
|
||||
int numRandomOperations;
|
||||
|
||||
// Operations counter
|
||||
int numOpLeft;
|
||||
|
||||
// Start multiple concurrent gets and cancel the transaction
|
||||
void randomCancelGetTx(TTaskFct cont) {
|
||||
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
|
||||
auto keys = std::make_shared<std::vector<std::string>>();
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
keys->push_back(randomKey(readExistingKeysRatio));
|
||||
}
|
||||
execTransaction(
|
||||
[keys](auto ctx) {
|
||||
std::vector<Future> futures;
|
||||
for (const auto& key : *keys) {
|
||||
futures.push_back(ctx->tx()->get(key, false));
|
||||
}
|
||||
ctx->done();
|
||||
},
|
||||
[this, cont]() { schedule(cont); });
|
||||
}
|
||||
|
||||
// Start multiple concurrent gets and cancel the transaction after the first get returns
|
||||
void randomCancelAfterFirstResTx(TTaskFct cont) {
|
||||
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
|
||||
auto keys = std::make_shared<std::vector<std::string>>();
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
keys->push_back(randomKey(readExistingKeysRatio));
|
||||
}
|
||||
execTransaction(
|
||||
[this, keys](auto ctx) {
|
||||
std::vector<ValueFuture> futures;
|
||||
for (const auto& key : *keys) {
|
||||
futures.push_back(ctx->tx()->get(key, false));
|
||||
}
|
||||
for (int i = 0; i < keys->size(); i++) {
|
||||
ValueFuture f = futures[i];
|
||||
auto expectedVal = store.get((*keys)[i]);
|
||||
ctx->continueAfter(f, [expectedVal, f, this, ctx]() {
|
||||
auto val = f.getValue();
|
||||
if (expectedVal != val) {
|
||||
error(fmt::format(
|
||||
"cancelAfterFirstResTx mismatch. expected: {:.80} actual: {:.80}", expectedVal, val));
|
||||
}
|
||||
ctx->done();
|
||||
});
|
||||
}
|
||||
},
|
||||
[this, cont]() { schedule(cont); });
|
||||
}
|
||||
|
||||
void randomOperation(TTaskFct cont) {
|
||||
OpType txType = (OpType)Random::get().randomInt(0, OP_LAST);
|
||||
switch (txType) {
|
||||
case OP_CANCEL_GET:
|
||||
randomCancelGetTx(cont);
|
||||
break;
|
||||
case OP_CANCEL_AFTER_FIRST_GET:
|
||||
randomCancelAfterFirstResTx(cont);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void randomOperations() {
|
||||
if (numOpLeft == 0)
|
||||
return;
|
||||
|
||||
numOpLeft--;
|
||||
randomOperation([this]() { randomOperations(); });
|
||||
}
|
||||
};
|
||||
|
||||
WorkloadFactory<CancelTransactionWorkload> MiscTestWorkloadFactory("CancelTransaction");
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,227 @@
|
|||
/*
|
||||
* TesterCorrectnessWorkload.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "TesterApiWorkload.h"
|
||||
#include "TesterUtil.h"
|
||||
#include <memory>
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
class ApiCorrectnessWorkload : public ApiWorkload {
|
||||
public:
|
||||
ApiCorrectnessWorkload(const WorkloadConfig& config) : ApiWorkload(config) {
|
||||
numRandomOperations = config.getIntOption("numRandomOperations", 1000);
|
||||
numOpLeft = numRandomOperations;
|
||||
}
|
||||
|
||||
void runTests() override { randomOperations(); }
|
||||
|
||||
private:
|
||||
enum OpType { OP_INSERT, OP_GET, OP_CLEAR, OP_CLEAR_RANGE, OP_COMMIT_READ, OP_LAST = OP_COMMIT_READ };
|
||||
|
||||
// The number of operations to be executed
|
||||
int numRandomOperations;
|
||||
|
||||
// Operations counter
|
||||
int numOpLeft;
|
||||
|
||||
void randomInsertOp(TTaskFct cont) {
|
||||
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
|
||||
auto kvPairs = std::make_shared<std::vector<KeyValue>>();
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
kvPairs->push_back(KeyValue{ randomNotExistingKey(), randomValue() });
|
||||
}
|
||||
execTransaction(
|
||||
[kvPairs](auto ctx) {
|
||||
for (const KeyValue& kv : *kvPairs) {
|
||||
ctx->tx()->set(kv.key, kv.value);
|
||||
}
|
||||
ctx->commit();
|
||||
},
|
||||
[this, kvPairs, cont]() {
|
||||
for (const KeyValue& kv : *kvPairs) {
|
||||
store.set(kv.key, kv.value);
|
||||
}
|
||||
schedule(cont);
|
||||
});
|
||||
}
|
||||
|
||||
void randomCommitReadOp(TTaskFct cont) {
|
||||
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
|
||||
auto kvPairs = std::make_shared<std::vector<KeyValue>>();
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
kvPairs->push_back(KeyValue{ randomKey(readExistingKeysRatio), randomValue() });
|
||||
}
|
||||
execTransaction(
|
||||
[kvPairs](auto ctx) {
|
||||
for (const KeyValue& kv : *kvPairs) {
|
||||
ctx->tx()->set(kv.key, kv.value);
|
||||
}
|
||||
ctx->commit();
|
||||
},
|
||||
[this, kvPairs, cont]() {
|
||||
for (const KeyValue& kv : *kvPairs) {
|
||||
store.set(kv.key, kv.value);
|
||||
}
|
||||
auto results = std::make_shared<std::vector<std::optional<std::string>>>();
|
||||
execTransaction(
|
||||
[kvPairs, results](auto ctx) {
|
||||
// TODO: Enable after merging with GRV caching
|
||||
// ctx->tx()->setOption(FDB_TR_OPTION_USE_GRV_CACHE);
|
||||
auto futures = std::make_shared<std::vector<Future>>();
|
||||
for (const auto& kv : *kvPairs) {
|
||||
futures->push_back(ctx->tx()->get(kv.key, false));
|
||||
}
|
||||
ctx->continueAfterAll(*futures, [ctx, futures, results]() {
|
||||
results->clear();
|
||||
for (auto& f : *futures) {
|
||||
results->push_back(((ValueFuture&)f).getValue());
|
||||
}
|
||||
ASSERT(results->size() == futures->size());
|
||||
ctx->done();
|
||||
});
|
||||
},
|
||||
[this, kvPairs, results, cont]() {
|
||||
ASSERT(results->size() == kvPairs->size());
|
||||
for (int i = 0; i < kvPairs->size(); i++) {
|
||||
auto expected = store.get((*kvPairs)[i].key);
|
||||
auto actual = (*results)[i];
|
||||
if (actual != expected) {
|
||||
error(
|
||||
fmt::format("randomCommitReadOp mismatch. key: {} expected: {:.80} actual: {:.80}",
|
||||
(*kvPairs)[i].key,
|
||||
expected,
|
||||
actual));
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
schedule(cont);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void randomGetOp(TTaskFct cont) {
|
||||
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
|
||||
auto keys = std::make_shared<std::vector<std::string>>();
|
||||
auto results = std::make_shared<std::vector<std::optional<std::string>>>();
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
keys->push_back(randomKey(readExistingKeysRatio));
|
||||
}
|
||||
execTransaction(
|
||||
[keys, results](auto ctx) {
|
||||
auto futures = std::make_shared<std::vector<Future>>();
|
||||
for (const auto& key : *keys) {
|
||||
futures->push_back(ctx->tx()->get(key, false));
|
||||
}
|
||||
ctx->continueAfterAll(*futures, [ctx, futures, results]() {
|
||||
results->clear();
|
||||
for (auto& f : *futures) {
|
||||
results->push_back(((ValueFuture&)f).getValue());
|
||||
}
|
||||
ASSERT(results->size() == futures->size());
|
||||
ctx->done();
|
||||
});
|
||||
},
|
||||
[this, keys, results, cont]() {
|
||||
ASSERT(results->size() == keys->size());
|
||||
for (int i = 0; i < keys->size(); i++) {
|
||||
auto expected = store.get((*keys)[i]);
|
||||
if ((*results)[i] != expected) {
|
||||
error(fmt::format("randomGetOp mismatch. key: {} expected: {:.80} actual: {:.80}",
|
||||
(*keys)[i],
|
||||
expected,
|
||||
(*results)[i]));
|
||||
}
|
||||
}
|
||||
schedule(cont);
|
||||
});
|
||||
}
|
||||
|
||||
void randomClearOp(TTaskFct cont) {
|
||||
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
|
||||
auto keys = std::make_shared<std::vector<std::string>>();
|
||||
for (int i = 0; i < numKeys; i++) {
|
||||
keys->push_back(randomExistingKey());
|
||||
}
|
||||
execTransaction(
|
||||
[keys](auto ctx) {
|
||||
for (const auto& key : *keys) {
|
||||
ctx->tx()->clear(key);
|
||||
}
|
||||
ctx->commit();
|
||||
},
|
||||
[this, keys, cont]() {
|
||||
for (const auto& key : *keys) {
|
||||
store.clear(key);
|
||||
}
|
||||
schedule(cont);
|
||||
});
|
||||
}
|
||||
|
||||
void randomClearRangeOp(TTaskFct cont) {
|
||||
std::string begin = randomKeyName();
|
||||
std::string end = randomKeyName();
|
||||
if (begin > end) {
|
||||
std::swap(begin, end);
|
||||
}
|
||||
execTransaction(
|
||||
[begin, end](auto ctx) {
|
||||
ctx->tx()->clearRange(begin, end);
|
||||
ctx->commit();
|
||||
},
|
||||
[this, begin, end, cont]() {
|
||||
store.clear(begin, end);
|
||||
schedule(cont);
|
||||
});
|
||||
}
|
||||
|
||||
void randomOperation(TTaskFct cont) {
|
||||
OpType txType = (store.size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
|
||||
switch (txType) {
|
||||
case OP_INSERT:
|
||||
randomInsertOp(cont);
|
||||
break;
|
||||
case OP_GET:
|
||||
randomGetOp(cont);
|
||||
break;
|
||||
case OP_CLEAR:
|
||||
randomClearOp(cont);
|
||||
break;
|
||||
case OP_CLEAR_RANGE:
|
||||
randomClearRangeOp(cont);
|
||||
break;
|
||||
case OP_COMMIT_READ:
|
||||
randomCommitReadOp(cont);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void randomOperations() {
|
||||
if (numOpLeft == 0)
|
||||
return;
|
||||
|
||||
numOpLeft--;
|
||||
randomOperation([this]() { randomOperations(); });
|
||||
}
|
||||
};
|
||||
|
||||
WorkloadFactory<ApiCorrectnessWorkload> ApiCorrectnessWorkloadFactory("ApiCorrectness");
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,167 @@
|
|||
/*
|
||||
* TesterKeyValueStore.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "TesterKeyValueStore.h"
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
// Get the value associated with a key
|
||||
std::optional<std::string> KeyValueStore::get(std::string_view key) const {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
auto value = store.find(std::string(key));
|
||||
if (value != store.end())
|
||||
return value->second;
|
||||
else
|
||||
return std::optional<std::string>();
|
||||
}
|
||||
|
||||
// Checks if the key exists
|
||||
bool KeyValueStore::exists(std::string_view key) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
return (store.find(std::string(key)) != store.end());
|
||||
}
|
||||
|
||||
// Returns the key designated by a key selector
|
||||
std::string KeyValueStore::getKey(std::string_view keyName, bool orEqual, int offset) const {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
// Begin by getting the start key referenced by the key selector
|
||||
std::map<std::string, std::string>::const_iterator mapItr = store.lower_bound(keyName);
|
||||
|
||||
// Update the iterator position if necessary based on the value of orEqual
|
||||
int count = 0;
|
||||
if (offset <= 0) {
|
||||
if (mapItr == store.end() || keyName != mapItr->first || !orEqual) {
|
||||
if (mapItr == store.begin())
|
||||
return startKey();
|
||||
|
||||
mapItr--;
|
||||
}
|
||||
} else {
|
||||
if (mapItr == store.end())
|
||||
return endKey();
|
||||
|
||||
if (keyName == mapItr->first && orEqual) {
|
||||
mapItr++;
|
||||
}
|
||||
|
||||
count++;
|
||||
}
|
||||
|
||||
// Increment the map iterator until the desired offset is reached
|
||||
for (; count < abs(offset); count++) {
|
||||
if (offset < 0) {
|
||||
if (mapItr == store.begin())
|
||||
break;
|
||||
|
||||
mapItr--;
|
||||
} else {
|
||||
if (mapItr == store.end())
|
||||
break;
|
||||
|
||||
mapItr++;
|
||||
}
|
||||
}
|
||||
|
||||
if (mapItr == store.end())
|
||||
return endKey();
|
||||
else if (count == abs(offset))
|
||||
return mapItr->first;
|
||||
else
|
||||
return startKey();
|
||||
}
|
||||
|
||||
// Gets a range of key-value pairs, returning a maximum of <limit> results
|
||||
std::vector<KeyValue> KeyValueStore::getRange(std::string_view begin,
|
||||
std::string_view end,
|
||||
int limit,
|
||||
bool reverse) const {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
std::vector<KeyValue> results;
|
||||
if (!reverse) {
|
||||
std::map<std::string, std::string>::const_iterator mapItr = store.lower_bound(begin);
|
||||
|
||||
for (; mapItr != store.end() && mapItr->first < end && results.size() < limit; mapItr++)
|
||||
results.push_back(KeyValue{ mapItr->first, mapItr->second });
|
||||
}
|
||||
|
||||
// Support for reverse getRange queries is supported, but not tested at this time. This is because reverse range
|
||||
// queries have been disallowed by the database at the API level
|
||||
else {
|
||||
std::map<std::string, std::string>::const_iterator mapItr = store.lower_bound(end);
|
||||
if (mapItr == store.begin())
|
||||
return results;
|
||||
|
||||
for (--mapItr; mapItr->first >= begin && results.size() < abs(limit); mapItr--) {
|
||||
results.push_back(KeyValue{ mapItr->first, mapItr->second });
|
||||
if (mapItr == store.begin())
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
// Stores a key-value pair in the database
|
||||
void KeyValueStore::set(std::string_view key, std::string_view value) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
store[std::string(key)] = value;
|
||||
}
|
||||
|
||||
// Removes a key from the database
|
||||
void KeyValueStore::clear(std::string_view key) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
auto iter = store.find(key);
|
||||
if (iter != store.end()) {
|
||||
store.erase(iter);
|
||||
}
|
||||
}
|
||||
|
||||
// Removes a range of keys from the database
|
||||
void KeyValueStore::clear(std::string_view begin, std::string_view end) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
store.erase(store.lower_bound(begin), store.lower_bound(end));
|
||||
}
|
||||
|
||||
// The number of keys in the database
|
||||
uint64_t KeyValueStore::size() const {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
return store.size();
|
||||
}
|
||||
|
||||
// The first key in the database; returned by key selectors that choose a key off the front
|
||||
std::string KeyValueStore::startKey() const {
|
||||
return "";
|
||||
}
|
||||
|
||||
// The last key in the database; returned by key selectors that choose a key off the back
|
||||
std::string KeyValueStore::endKey() const {
|
||||
return "\xff";
|
||||
}
|
||||
|
||||
// Debugging function that prints all key-value pairs
|
||||
void KeyValueStore::printContents() const {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
printf("Contents:\n");
|
||||
std::map<std::string, std::string>::const_iterator mapItr;
|
||||
for (mapItr = store.begin(); mapItr != store.end(); mapItr++)
|
||||
printf("%s\n", mapItr->first.c_str());
|
||||
}
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* TesterKeyValueStore.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef APITESTER_KEY_VALUE_STORE_H
|
||||
#define APITESTER_KEY_VALUE_STORE_H
|
||||
|
||||
#include <map>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <string_view>
|
||||
#include <vector>
|
||||
#include <mutex>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
struct KeyValue {
|
||||
std::string key;
|
||||
std::string value;
|
||||
};
|
||||
|
||||
class KeyValueStore {
|
||||
public:
|
||||
// Get the value associated with a key
|
||||
std::optional<std::string> get(std::string_view key) const;
|
||||
|
||||
// Checks if the key exists
|
||||
bool exists(std::string_view key);
|
||||
|
||||
// Returns the key designated by a key selector
|
||||
std::string getKey(std::string_view keyName, bool orEqual, int offset) const;
|
||||
|
||||
// Gets a range of key-value pairs, returning a maximum of <limit> results
|
||||
std::vector<KeyValue> getRange(std::string_view begin, std::string_view end, int limit, bool reverse) const;
|
||||
|
||||
// Stores a key-value pair in the database
|
||||
void set(std::string_view key, std::string_view value);
|
||||
|
||||
// Removes a key from the database
|
||||
void clear(std::string_view key);
|
||||
|
||||
// Removes a range of keys from the database
|
||||
void clear(std::string_view begin, std::string_view end);
|
||||
|
||||
// The number of keys in the database
|
||||
uint64_t size() const;
|
||||
|
||||
// The first key in the database; returned by key selectors that choose a key off the front
|
||||
std::string startKey() const;
|
||||
|
||||
// The last key in the database; returned by key selectors that choose a key off the back
|
||||
std::string endKey() const;
|
||||
|
||||
// Debugging function that prints all key-value pairs
|
||||
void printContents() const;
|
||||
|
||||
private:
|
||||
// A map holding the key-value pairs
|
||||
std::map<std::string, std::string, std::less<>> store;
|
||||
mutable std::mutex mutex;
|
||||
};
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
||||
#endif
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* TesterOptions.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef APITESTER_TESTER_OPTIONS_H
|
||||
#define APITESTER_TESTER_OPTIONS_H
|
||||
|
||||
#include "TesterTestSpec.h"
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
class TesterOptions {
|
||||
public:
|
||||
std::string clusterFile;
|
||||
bool trace = false;
|
||||
std::string traceDir;
|
||||
std::string traceFormat;
|
||||
std::string logGroup;
|
||||
std::string externalClientLibrary;
|
||||
std::string testFile;
|
||||
int numFdbThreads;
|
||||
int numClientThreads;
|
||||
int numDatabases;
|
||||
int numClients;
|
||||
std::vector<std::pair<std::string, std::string>> knobs;
|
||||
TestSpec testSpec;
|
||||
};
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
||||
#endif
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* TesterScheduler.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "TesterScheduler.h"
|
||||
#include "TesterUtil.h"
|
||||
|
||||
#include <memory>
|
||||
#include <thread>
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
using namespace boost::asio;
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
const TTaskFct NO_OP_TASK = []() {};
|
||||
|
||||
class AsioScheduler : public IScheduler {
|
||||
public:
|
||||
AsioScheduler(int numThreads) : numThreads(numThreads) {}
|
||||
|
||||
void start() override {
|
||||
work = require(io_ctx.get_executor(), execution::outstanding_work.tracked);
|
||||
for (int i = 0; i < numThreads; i++) {
|
||||
threads.emplace_back([this]() { io_ctx.run(); });
|
||||
}
|
||||
}
|
||||
|
||||
void schedule(TTaskFct task) override { post(io_ctx, task); }
|
||||
|
||||
void stop() override { work = any_io_executor(); }
|
||||
|
||||
void join() override {
|
||||
for (auto& th : threads) {
|
||||
th.join();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
int numThreads;
|
||||
std::vector<std::thread> threads;
|
||||
io_context io_ctx;
|
||||
any_io_executor work;
|
||||
};
|
||||
|
||||
std::unique_ptr<IScheduler> createScheduler(int numThreads) {
|
||||
ASSERT(numThreads > 0 && numThreads <= 1000);
|
||||
return std::make_unique<AsioScheduler>(numThreads);
|
||||
}
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,60 @@
|
|||
/*
|
||||
* TesterScheduler.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef APITESTER_SCHEDULER_H
|
||||
#define APITESTER_SCHEDULER_H
|
||||
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
using TTaskFct = std::function<void(void)>;
|
||||
|
||||
extern const TTaskFct NO_OP_TASK;
|
||||
|
||||
/**
|
||||
* Scheduler for asynchronous execution of tasks on a pool of threads
|
||||
*/
|
||||
class IScheduler {
|
||||
public:
|
||||
virtual ~IScheduler() {}
|
||||
|
||||
// Create scheduler threads and begin accepting tasks
|
||||
virtual void start() = 0;
|
||||
|
||||
// Schedule a task for asynchronous execution
|
||||
virtual void schedule(TTaskFct task) = 0;
|
||||
|
||||
// Gracefully stop the scheduler. Waits for already running tasks to be finish
|
||||
virtual void stop() = 0;
|
||||
|
||||
// Join with all threads of the scheduler
|
||||
virtual void join() = 0;
|
||||
};
|
||||
|
||||
// create a scheduler using given number of threads
|
||||
std::unique_ptr<IScheduler> createScheduler(int numThreads);
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
||||
#endif
|
|
@ -0,0 +1,169 @@
|
|||
/*
|
||||
* TesterTestSpec.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "TesterTestSpec.h"
|
||||
#include "TesterUtil.h"
|
||||
#include <toml.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <functional>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
namespace {
|
||||
|
||||
void processIntOption(const std::string& value, const std::string& optionName, int& res, int minVal, int maxVal) {
|
||||
char* endptr;
|
||||
res = strtol(value.c_str(), &endptr, 10);
|
||||
if (*endptr != '\0') {
|
||||
throw TesterError(fmt::format("Invalid test file. Invalid value {} for {}", value, optionName));
|
||||
}
|
||||
if (res < minVal || res > maxVal) {
|
||||
throw TesterError(
|
||||
fmt::format("Invalid test file. Value for {} must be between {} and {}", optionName, minVal, maxVal));
|
||||
}
|
||||
}
|
||||
|
||||
std::unordered_map<std::string, std::function<void(const std::string& value, TestSpec* spec)>> testSpecTestKeys = {
|
||||
{ "title",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
spec->title = value;
|
||||
} },
|
||||
{ "apiVersion",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
processIntOption(value, "apiVersion", spec->apiVersion, 700, 710);
|
||||
} },
|
||||
{ "blockOnFutures",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
spec->blockOnFutures = (value == "true");
|
||||
} },
|
||||
{ "buggify",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
spec->buggify = (value == "true");
|
||||
} },
|
||||
{ "multiThreaded",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
spec->multiThreaded = (value == "true");
|
||||
} },
|
||||
{ "fdbCallbacksOnExternalThreads",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
spec->fdbCallbacksOnExternalThreads = (value == "true");
|
||||
} },
|
||||
{ "databasePerTransaction",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
spec->databasePerTransaction = (value == "true");
|
||||
} },
|
||||
{ "minFdbThreads",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
processIntOption(value, "minFdbThreads", spec->minFdbThreads, 1, 1000);
|
||||
} },
|
||||
{ "maxFdbThreads",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
processIntOption(value, "maxFdbThreads", spec->maxFdbThreads, 1, 1000);
|
||||
} },
|
||||
{ "minClientThreads",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
processIntOption(value, "minClientThreads", spec->minClientThreads, 1, 1000);
|
||||
} },
|
||||
{ "maxClientThreads",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
processIntOption(value, "maxClientThreads", spec->maxClientThreads, 1, 1000);
|
||||
} },
|
||||
{ "minDatabases",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
processIntOption(value, "minDatabases", spec->minDatabases, 1, 1000);
|
||||
} },
|
||||
{ "maxDatabases",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
processIntOption(value, "maxDatabases", spec->maxDatabases, 1, 1000);
|
||||
} },
|
||||
{ "minClients",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
processIntOption(value, "minClients", spec->minClients, 1, 1000);
|
||||
} },
|
||||
{ "maxClients",
|
||||
[](const std::string& value, TestSpec* spec) { //
|
||||
processIntOption(value, "maxClients", spec->maxClients, 1, 1000);
|
||||
} }
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
std::string toml_to_string(const T& value) {
|
||||
// TOML formatting converts numbers to strings exactly how they're in the file
|
||||
// and thus, is equivalent to testspec. However, strings are quoted, so we
|
||||
// must remove the quotes.
|
||||
if (value.type() == toml::value_t::string) {
|
||||
const std::string& formatted = toml::format(value);
|
||||
return formatted.substr(1, formatted.size() - 2);
|
||||
} else {
|
||||
return toml::format(value);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
TestSpec readTomlTestSpec(std::string fileName) {
|
||||
TestSpec spec;
|
||||
WorkloadSpec workloadSpec;
|
||||
|
||||
const toml::value& conf = toml::parse(fileName);
|
||||
|
||||
// Then parse each test
|
||||
const toml::array& tests = toml::find(conf, "test").as_array();
|
||||
if (tests.size() == 0) {
|
||||
throw TesterError("Invalid test file. No [test] section found");
|
||||
} else if (tests.size() > 1) {
|
||||
throw TesterError("Invalid test file. More than one [test] section found");
|
||||
}
|
||||
|
||||
const toml::value& test = tests[0];
|
||||
|
||||
// First handle all test-level settings
|
||||
for (const auto& [k, v] : test.as_table()) {
|
||||
if (k == "workload") {
|
||||
continue;
|
||||
}
|
||||
if (testSpecTestKeys.find(k) != testSpecTestKeys.end()) {
|
||||
testSpecTestKeys[k](toml_to_string(v), &spec);
|
||||
} else {
|
||||
throw TesterError(fmt::format(
|
||||
"Invalid test file. Unrecognized test parameter. Name: {}, value {}", k, toml_to_string(v)));
|
||||
}
|
||||
}
|
||||
|
||||
// And then copy the workload attributes to spec.options
|
||||
const toml::array& workloads = toml::find(test, "workload").as_array();
|
||||
for (const toml::value& workload : workloads) {
|
||||
workloadSpec = WorkloadSpec();
|
||||
auto& options = workloadSpec.options;
|
||||
for (const auto& [attrib, v] : workload.as_table()) {
|
||||
options[attrib] = toml_to_string(v);
|
||||
}
|
||||
auto itr = options.find("name");
|
||||
if (itr == options.end()) {
|
||||
throw TesterError("Invalid test file. Unspecified workload name.");
|
||||
}
|
||||
workloadSpec.name = itr->second;
|
||||
spec.workloads.push_back(workloadSpec);
|
||||
}
|
||||
|
||||
return spec;
|
||||
}
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,90 @@
|
|||
/*
|
||||
* TesterTestSpec.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef APITESTER_CONFIG_READER_H
|
||||
#define APITESTER_CONFIG_READER_H
|
||||
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#define FDB_API_VERSION 710
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
/// Workload specification
|
||||
struct WorkloadSpec {
|
||||
std::string name;
|
||||
std::unordered_map<std::string, std::string> options;
|
||||
};
|
||||
|
||||
// Test speficification loaded from a *.toml file
|
||||
struct TestSpec {
|
||||
// Title of the test
|
||||
std::string title;
|
||||
|
||||
// FDB API version, using the latest version by default
|
||||
int apiVersion = FDB_API_VERSION;
|
||||
|
||||
// Use blocking waits on futures instead of scheduling callbacks
|
||||
bool blockOnFutures = false;
|
||||
|
||||
// Use multi-threaded FDB client
|
||||
bool multiThreaded = false;
|
||||
|
||||
// Enable injection of errors in FDB client
|
||||
bool buggify = false;
|
||||
|
||||
// Execute future callbacks on the threads of the external FDB library
|
||||
// rather than on the main thread of the local FDB client library
|
||||
bool fdbCallbacksOnExternalThreads = false;
|
||||
|
||||
// Execute each transaction in a separate database instance
|
||||
bool databasePerTransaction = false;
|
||||
|
||||
// Size of the FDB client thread pool (a random number in the [min,max] range)
|
||||
int minFdbThreads = 1;
|
||||
int maxFdbThreads = 1;
|
||||
|
||||
// Size of the thread pool for test workloads (a random number in the [min,max] range)
|
||||
int minClientThreads = 1;
|
||||
int maxClientThreads = 1;
|
||||
|
||||
// Size of the database instance pool (a random number in the [min,max] range)
|
||||
// Each transaction is assigned randomly to one of the databases in the pool
|
||||
int minDatabases = 1;
|
||||
int maxDatabases = 1;
|
||||
|
||||
// Number of workload clients (a random number in the [min,max] range)
|
||||
int minClients = 1;
|
||||
int maxClients = 10;
|
||||
|
||||
// List of workloads with their options
|
||||
std::vector<WorkloadSpec> workloads;
|
||||
};
|
||||
|
||||
// Read the test specfication from a *.toml file
|
||||
TestSpec readTomlTestSpec(std::string fileName);
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
||||
#endif
|
|
@ -0,0 +1,471 @@
|
|||
/*
|
||||
* TesterTransactionExecutor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "TesterTransactionExecutor.h"
|
||||
#include "TesterUtil.h"
|
||||
#include "test/apitester/TesterScheduler.h"
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <mutex>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
void TransactionActorBase::complete(fdb_error_t err) {
|
||||
error = err;
|
||||
context = {};
|
||||
}
|
||||
|
||||
void ITransactionContext::continueAfterAll(std::vector<Future> futures, TTaskFct cont) {
|
||||
auto counter = std::make_shared<std::atomic<int>>(futures.size());
|
||||
auto errorCode = std::make_shared<std::atomic<fdb_error_t>>(error_code_success);
|
||||
auto thisPtr = shared_from_this();
|
||||
for (auto& f : futures) {
|
||||
continueAfter(
|
||||
f,
|
||||
[thisPtr, f, counter, errorCode, cont]() {
|
||||
if (f.getError() != error_code_success) {
|
||||
(*errorCode) = f.getError();
|
||||
}
|
||||
if (--(*counter) == 0) {
|
||||
if (*errorCode == error_code_success) {
|
||||
// all futures successful -> continue
|
||||
cont();
|
||||
} else {
|
||||
// at least one future failed -> retry the transaction
|
||||
thisPtr->onError(*errorCode);
|
||||
}
|
||||
}
|
||||
},
|
||||
false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Transaction context base class, containing reusable functionality
|
||||
*/
|
||||
class TransactionContextBase : public ITransactionContext {
|
||||
public:
|
||||
TransactionContextBase(FDBTransaction* tx,
|
||||
std::shared_ptr<ITransactionActor> txActor,
|
||||
TTaskFct cont,
|
||||
IScheduler* scheduler)
|
||||
: fdbTx(tx), txActor(txActor), contAfterDone(cont), scheduler(scheduler), txState(TxState::IN_PROGRESS) {}
|
||||
|
||||
// A state machine:
|
||||
// IN_PROGRESS -> (ON_ERROR -> IN_PROGRESS)* [-> ON_ERROR] -> DONE
|
||||
enum class TxState { IN_PROGRESS, ON_ERROR, DONE };
|
||||
|
||||
Transaction* tx() override { return &fdbTx; }
|
||||
|
||||
// Set a continuation to be executed when a future gets ready
|
||||
void continueAfter(Future f, TTaskFct cont, bool retryOnError) override { doContinueAfter(f, cont, retryOnError); }
|
||||
|
||||
// Complete the transaction with a commit
|
||||
void commit() override {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
if (txState != TxState::IN_PROGRESS) {
|
||||
return;
|
||||
}
|
||||
lock.unlock();
|
||||
Future f = fdbTx.commit();
|
||||
auto thisRef = shared_from_this();
|
||||
doContinueAfter(
|
||||
f, [thisRef]() { thisRef->done(); }, true);
|
||||
}
|
||||
|
||||
// Complete the transaction without a commit (for read transactions)
|
||||
void done() override {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
if (txState != TxState::IN_PROGRESS) {
|
||||
return;
|
||||
}
|
||||
txState = TxState::DONE;
|
||||
lock.unlock();
|
||||
// cancel transaction so that any pending operations on it
|
||||
// fail gracefully
|
||||
fdbTx.cancel();
|
||||
txActor->complete(error_code_success);
|
||||
cleanUp();
|
||||
contAfterDone();
|
||||
}
|
||||
|
||||
protected:
|
||||
virtual void doContinueAfter(Future f, TTaskFct cont, bool retryOnError) = 0;
|
||||
|
||||
// Clean up transaction state after completing the transaction
|
||||
// Note that the object may live longer, because it is referenced
|
||||
// by not yet triggered callbacks
|
||||
virtual void cleanUp() {
|
||||
ASSERT(txState == TxState::DONE);
|
||||
ASSERT(!onErrorFuture);
|
||||
txActor = {};
|
||||
}
|
||||
|
||||
// Complete the transaction with an (unretriable) error
|
||||
void transactionFailed(fdb_error_t err) {
|
||||
ASSERT(err != error_code_success);
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
if (txState == TxState::DONE) {
|
||||
return;
|
||||
}
|
||||
txState = TxState::DONE;
|
||||
lock.unlock();
|
||||
txActor->complete(err);
|
||||
cleanUp();
|
||||
contAfterDone();
|
||||
}
|
||||
|
||||
// Handle result of an a transaction onError call
|
||||
void handleOnErrorResult() {
|
||||
ASSERT(txState == TxState::ON_ERROR);
|
||||
fdb_error_t err = onErrorFuture.getError();
|
||||
onErrorFuture = {};
|
||||
if (err) {
|
||||
transactionFailed(err);
|
||||
} else {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
txState = TxState::IN_PROGRESS;
|
||||
lock.unlock();
|
||||
txActor->start();
|
||||
}
|
||||
}
|
||||
|
||||
// FDB transaction
|
||||
Transaction fdbTx;
|
||||
|
||||
// Actor implementing the transaction worklflow
|
||||
std::shared_ptr<ITransactionActor> txActor;
|
||||
|
||||
// Mutex protecting access to shared mutable state
|
||||
std::mutex mutex;
|
||||
|
||||
// Continuation to be called after completion of the transaction
|
||||
TTaskFct contAfterDone;
|
||||
|
||||
// Reference to the scheduler
|
||||
IScheduler* scheduler;
|
||||
|
||||
// Transaction execution state
|
||||
TxState txState;
|
||||
|
||||
// onError future used in ON_ERROR state
|
||||
Future onErrorFuture;
|
||||
};
|
||||
|
||||
/**
|
||||
* Transaction context using blocking waits to implement continuations on futures
|
||||
*/
|
||||
class BlockingTransactionContext : public TransactionContextBase {
|
||||
public:
|
||||
BlockingTransactionContext(FDBTransaction* tx,
|
||||
std::shared_ptr<ITransactionActor> txActor,
|
||||
TTaskFct cont,
|
||||
IScheduler* scheduler)
|
||||
: TransactionContextBase(tx, txActor, cont, scheduler) {}
|
||||
|
||||
protected:
|
||||
void doContinueAfter(Future f, TTaskFct cont, bool retryOnError) override {
|
||||
auto thisRef = std::static_pointer_cast<BlockingTransactionContext>(shared_from_this());
|
||||
scheduler->schedule(
|
||||
[thisRef, f, cont, retryOnError]() mutable { thisRef->blockingContinueAfter(f, cont, retryOnError); });
|
||||
}
|
||||
|
||||
void blockingContinueAfter(Future f, TTaskFct cont, bool retryOnError) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
if (txState != TxState::IN_PROGRESS) {
|
||||
return;
|
||||
}
|
||||
lock.unlock();
|
||||
fdb_error_t err = fdb_future_block_until_ready(f.fdbFuture());
|
||||
if (err) {
|
||||
transactionFailed(err);
|
||||
return;
|
||||
}
|
||||
err = f.getError();
|
||||
if (err == error_code_transaction_cancelled) {
|
||||
return;
|
||||
}
|
||||
if (err == error_code_success || !retryOnError) {
|
||||
scheduler->schedule([cont]() { cont(); });
|
||||
return;
|
||||
}
|
||||
|
||||
onError(err);
|
||||
}
|
||||
|
||||
virtual void onError(fdb_error_t err) override {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
if (txState != TxState::IN_PROGRESS) {
|
||||
// Ignore further errors, if the transaction is in the error handing mode or completed
|
||||
return;
|
||||
}
|
||||
txState = TxState::ON_ERROR;
|
||||
lock.unlock();
|
||||
|
||||
ASSERT(!onErrorFuture);
|
||||
onErrorFuture = fdbTx.onError(err);
|
||||
fdb_error_t err2 = fdb_future_block_until_ready(onErrorFuture.fdbFuture());
|
||||
if (err2) {
|
||||
transactionFailed(err2);
|
||||
return;
|
||||
}
|
||||
auto thisRef = std::static_pointer_cast<BlockingTransactionContext>(shared_from_this());
|
||||
scheduler->schedule([thisRef]() { thisRef->handleOnErrorResult(); });
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Transaction context using callbacks to implement continuations on futures
|
||||
*/
|
||||
class AsyncTransactionContext : public TransactionContextBase {
|
||||
public:
|
||||
AsyncTransactionContext(FDBTransaction* tx,
|
||||
std::shared_ptr<ITransactionActor> txActor,
|
||||
TTaskFct cont,
|
||||
IScheduler* scheduler)
|
||||
: TransactionContextBase(tx, txActor, cont, scheduler) {}
|
||||
|
||||
protected:
|
||||
void doContinueAfter(Future f, TTaskFct cont, bool retryOnError) override {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
if (txState != TxState::IN_PROGRESS) {
|
||||
return;
|
||||
}
|
||||
callbackMap[f.fdbFuture()] = CallbackInfo{ f, cont, shared_from_this(), retryOnError };
|
||||
lock.unlock();
|
||||
fdb_error_t err = fdb_future_set_callback(f.fdbFuture(), futureReadyCallback, this);
|
||||
if (err) {
|
||||
lock.lock();
|
||||
callbackMap.erase(f.fdbFuture());
|
||||
lock.unlock();
|
||||
transactionFailed(err);
|
||||
}
|
||||
}
|
||||
|
||||
static void futureReadyCallback(FDBFuture* f, void* param) {
|
||||
AsyncTransactionContext* txCtx = (AsyncTransactionContext*)param;
|
||||
txCtx->onFutureReady(f);
|
||||
}
|
||||
|
||||
void onFutureReady(FDBFuture* f) {
|
||||
injectRandomSleep();
|
||||
// Hold a reference to this to avoid it to be
|
||||
// destroyed before releasing the mutex
|
||||
auto thisRef = shared_from_this();
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
auto iter = callbackMap.find(f);
|
||||
ASSERT(iter != callbackMap.end());
|
||||
CallbackInfo cbInfo = iter->second;
|
||||
callbackMap.erase(iter);
|
||||
if (txState != TxState::IN_PROGRESS) {
|
||||
return;
|
||||
}
|
||||
lock.unlock();
|
||||
fdb_error_t err = fdb_future_get_error(f);
|
||||
if (err == error_code_transaction_cancelled) {
|
||||
return;
|
||||
}
|
||||
if (err == error_code_success || !cbInfo.retryOnError) {
|
||||
scheduler->schedule(cbInfo.cont);
|
||||
return;
|
||||
}
|
||||
onError(err);
|
||||
}
|
||||
|
||||
virtual void onError(fdb_error_t err) override {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
if (txState != TxState::IN_PROGRESS) {
|
||||
// Ignore further errors, if the transaction is in the error handing mode or completed
|
||||
return;
|
||||
}
|
||||
txState = TxState::ON_ERROR;
|
||||
lock.unlock();
|
||||
|
||||
ASSERT(!onErrorFuture);
|
||||
onErrorFuture = tx()->onError(err);
|
||||
onErrorThisRef = std::static_pointer_cast<AsyncTransactionContext>(shared_from_this());
|
||||
fdb_error_t err2 = fdb_future_set_callback(onErrorFuture.fdbFuture(), onErrorReadyCallback, this);
|
||||
if (err2) {
|
||||
onErrorFuture = {};
|
||||
transactionFailed(err2);
|
||||
}
|
||||
}
|
||||
|
||||
static void onErrorReadyCallback(FDBFuture* f, void* param) {
|
||||
AsyncTransactionContext* txCtx = (AsyncTransactionContext*)param;
|
||||
txCtx->onErrorReady(f);
|
||||
}
|
||||
|
||||
void onErrorReady(FDBFuture* f) {
|
||||
injectRandomSleep();
|
||||
auto thisRef = onErrorThisRef;
|
||||
onErrorThisRef = {};
|
||||
scheduler->schedule([thisRef]() { thisRef->handleOnErrorResult(); });
|
||||
}
|
||||
|
||||
void cleanUp() override {
|
||||
TransactionContextBase::cleanUp();
|
||||
|
||||
// Cancel all pending operations
|
||||
// Note that the callbacks of the cancelled futures will still be called
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
std::vector<Future> futures;
|
||||
for (auto& iter : callbackMap) {
|
||||
futures.push_back(iter.second.future);
|
||||
}
|
||||
lock.unlock();
|
||||
for (auto& f : futures) {
|
||||
f.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
// Inject a random sleep with a low probability
|
||||
void injectRandomSleep() {
|
||||
if (Random::get().randomBool(0.01)) {
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(Random::get().randomInt(1, 5)));
|
||||
}
|
||||
}
|
||||
|
||||
// Object references for a future callback
|
||||
struct CallbackInfo {
|
||||
Future future;
|
||||
TTaskFct cont;
|
||||
std::shared_ptr<ITransactionContext> thisRef;
|
||||
bool retryOnError;
|
||||
};
|
||||
|
||||
// Map for keeping track of future waits and holding necessary object references
|
||||
std::unordered_map<FDBFuture*, CallbackInfo> callbackMap;
|
||||
|
||||
// Holding reference to this for onError future C callback
|
||||
std::shared_ptr<AsyncTransactionContext> onErrorThisRef;
|
||||
};
|
||||
|
||||
/**
|
||||
* Transaction executor base class, containing reusable functionality
|
||||
*/
|
||||
class TransactionExecutorBase : public ITransactionExecutor {
|
||||
public:
|
||||
TransactionExecutorBase(const TransactionExecutorOptions& options) : options(options), scheduler(nullptr) {}
|
||||
|
||||
void init(IScheduler* scheduler, const char* clusterFile) override {
|
||||
this->scheduler = scheduler;
|
||||
this->clusterFile = clusterFile;
|
||||
}
|
||||
|
||||
protected:
|
||||
// Execute the transaction on the given database instance
|
||||
void executeOnDatabase(FDBDatabase* db, std::shared_ptr<ITransactionActor> txActor, TTaskFct cont) {
|
||||
FDBTransaction* tx;
|
||||
fdb_error_t err = fdb_database_create_transaction(db, &tx);
|
||||
if (err != error_code_success) {
|
||||
txActor->complete(err);
|
||||
cont();
|
||||
} else {
|
||||
std::shared_ptr<ITransactionContext> ctx;
|
||||
if (options.blockOnFutures) {
|
||||
ctx = std::make_shared<BlockingTransactionContext>(tx, txActor, cont, scheduler);
|
||||
} else {
|
||||
ctx = std::make_shared<AsyncTransactionContext>(tx, txActor, cont, scheduler);
|
||||
}
|
||||
txActor->init(ctx);
|
||||
txActor->start();
|
||||
}
|
||||
}
|
||||
|
||||
protected:
|
||||
TransactionExecutorOptions options;
|
||||
std::string clusterFile;
|
||||
IScheduler* scheduler;
|
||||
};
|
||||
|
||||
/**
|
||||
* Transaction executor load balancing transactions over a fixed pool of databases
|
||||
*/
|
||||
class DBPoolTransactionExecutor : public TransactionExecutorBase {
|
||||
public:
|
||||
DBPoolTransactionExecutor(const TransactionExecutorOptions& options) : TransactionExecutorBase(options) {}
|
||||
|
||||
~DBPoolTransactionExecutor() override { release(); }
|
||||
|
||||
void init(IScheduler* scheduler, const char* clusterFile) override {
|
||||
TransactionExecutorBase::init(scheduler, clusterFile);
|
||||
for (int i = 0; i < options.numDatabases; i++) {
|
||||
FDBDatabase* db;
|
||||
fdb_error_t err = fdb_create_database(clusterFile, &db);
|
||||
if (err != error_code_success) {
|
||||
throw TesterError(fmt::format("Failed create database with the cluster file '{}'. Error: {}({})",
|
||||
clusterFile,
|
||||
err,
|
||||
fdb_get_error(err)));
|
||||
}
|
||||
databases.push_back(db);
|
||||
}
|
||||
}
|
||||
|
||||
void execute(std::shared_ptr<ITransactionActor> txActor, TTaskFct cont) override {
|
||||
int idx = Random::get().randomInt(0, options.numDatabases - 1);
|
||||
executeOnDatabase(databases[idx], txActor, cont);
|
||||
}
|
||||
|
||||
void release() {
|
||||
for (FDBDatabase* db : databases) {
|
||||
fdb_database_destroy(db);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<FDBDatabase*> databases;
|
||||
};
|
||||
|
||||
/**
|
||||
* Transaction executor executing each transaction on a separate database
|
||||
*/
|
||||
class DBPerTransactionExecutor : public TransactionExecutorBase {
|
||||
public:
|
||||
DBPerTransactionExecutor(const TransactionExecutorOptions& options) : TransactionExecutorBase(options) {}
|
||||
|
||||
void execute(std::shared_ptr<ITransactionActor> txActor, TTaskFct cont) override {
|
||||
FDBDatabase* db = nullptr;
|
||||
fdb_error_t err = fdb_create_database(clusterFile.c_str(), &db);
|
||||
if (err != error_code_success) {
|
||||
txActor->complete(err);
|
||||
cont();
|
||||
}
|
||||
executeOnDatabase(db, txActor, [cont, db]() {
|
||||
fdb_database_destroy(db);
|
||||
cont();
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
std::unique_ptr<ITransactionExecutor> createTransactionExecutor(const TransactionExecutorOptions& options) {
|
||||
if (options.databasePerTransaction) {
|
||||
return std::make_unique<DBPerTransactionExecutor>(options);
|
||||
} else {
|
||||
return std::make_unique<DBPoolTransactionExecutor>(options);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
* TesterTransactionExecutor.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef APITESTER_TRANSACTION_EXECUTOR_H
|
||||
#define APITESTER_TRANSACTION_EXECUTOR_H
|
||||
|
||||
#include "TesterOptions.h"
|
||||
#include "TesterApiWrapper.h"
|
||||
#include "TesterScheduler.h"
|
||||
#include <string_view>
|
||||
#include <memory>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
/**
|
||||
* Interface to be used for implementation of a concrete transaction
|
||||
*/
|
||||
class ITransactionContext : public std::enable_shared_from_this<ITransactionContext> {
|
||||
public:
|
||||
virtual ~ITransactionContext() {}
|
||||
|
||||
// Current FDB transaction
|
||||
virtual Transaction* tx() = 0;
|
||||
|
||||
// Schedule a continuation to be executed when the future gets ready
|
||||
// retryOnError controls whether transaction is retried in case of an error instead
|
||||
// of calling the continuation
|
||||
virtual void continueAfter(Future f, TTaskFct cont, bool retryOnError = true) = 0;
|
||||
|
||||
// Complete the transaction with a commit
|
||||
virtual void commit() = 0;
|
||||
|
||||
// retry transaction on error
|
||||
virtual void onError(fdb_error_t err) = 0;
|
||||
|
||||
// Mark the transaction as completed without committing it (for read transactions)
|
||||
virtual void done() = 0;
|
||||
|
||||
// A continuation to be executed when all of the given futures get ready
|
||||
virtual void continueAfterAll(std::vector<Future> futures, TTaskFct cont);
|
||||
};
|
||||
|
||||
/**
|
||||
* Interface of an actor object implementing a concrete transaction
|
||||
*/
|
||||
class ITransactionActor {
|
||||
public:
|
||||
virtual ~ITransactionActor() {}
|
||||
|
||||
// Initialize with the given transaction context
|
||||
virtual void init(std::shared_ptr<ITransactionContext> ctx) = 0;
|
||||
|
||||
// Start execution of the transaction, also called on retries
|
||||
virtual void start() = 0;
|
||||
|
||||
// Transaction completion result (error_code_success in case of success)
|
||||
virtual fdb_error_t getErrorCode() = 0;
|
||||
|
||||
// Notification about the completion of the transaction
|
||||
virtual void complete(fdb_error_t err) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* A helper base class for transaction actors
|
||||
*/
|
||||
class TransactionActorBase : public ITransactionActor {
|
||||
public:
|
||||
void init(std::shared_ptr<ITransactionContext> ctx) override { context = ctx; }
|
||||
fdb_error_t getErrorCode() override { return error; }
|
||||
void complete(fdb_error_t err) override;
|
||||
|
||||
protected:
|
||||
std::shared_ptr<ITransactionContext> ctx() { return context; }
|
||||
|
||||
private:
|
||||
std::shared_ptr<ITransactionContext> context;
|
||||
fdb_error_t error = error_code_success;
|
||||
};
|
||||
|
||||
// Type of the lambda functions implementing a transaction
|
||||
using TTxStartFct = std::function<void(std::shared_ptr<ITransactionContext>)>;
|
||||
|
||||
/**
|
||||
* A wrapper class for transactions implemented by lambda functions
|
||||
*/
|
||||
class TransactionFct : public TransactionActorBase {
|
||||
public:
|
||||
TransactionFct(TTxStartFct startFct) : startFct(startFct) {}
|
||||
void start() override { startFct(this->ctx()); }
|
||||
|
||||
private:
|
||||
TTxStartFct startFct;
|
||||
};
|
||||
|
||||
/**
|
||||
* Configuration of transaction execution mode
|
||||
*/
|
||||
struct TransactionExecutorOptions {
|
||||
// Use blocking waits on futures
|
||||
bool blockOnFutures = false;
|
||||
|
||||
// Create each transaction in a separate database instance
|
||||
bool databasePerTransaction = false;
|
||||
|
||||
// The size of the database instance pool
|
||||
int numDatabases = 1;
|
||||
};
|
||||
|
||||
/**
|
||||
* Transaction executor provides an interface for executing transactions
|
||||
* It is responsible for instantiating FDB databases and transactions and managing their lifecycle
|
||||
* according to the provided options
|
||||
*/
|
||||
class ITransactionExecutor {
|
||||
public:
|
||||
virtual ~ITransactionExecutor() {}
|
||||
virtual void init(IScheduler* sched, const char* clusterFile) = 0;
|
||||
virtual void execute(std::shared_ptr<ITransactionActor> tx, TTaskFct cont) = 0;
|
||||
};
|
||||
|
||||
// Create a transaction executor for the given options
|
||||
std::unique_ptr<ITransactionExecutor> createTransactionExecutor(const TransactionExecutorOptions& options);
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
||||
#endif
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
* TesterUtil.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "TesterUtil.h"
|
||||
#include <cstdio>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
Random::Random() {
|
||||
std::random_device dev;
|
||||
random.seed(dev());
|
||||
}
|
||||
|
||||
int Random::randomInt(int min, int max) {
|
||||
return std::uniform_int_distribution<int>(min, max)(random);
|
||||
}
|
||||
|
||||
Random& Random::get() {
|
||||
static thread_local Random random;
|
||||
return random;
|
||||
}
|
||||
|
||||
std::string Random::randomStringLowerCase(int minLength, int maxLength) {
|
||||
int length = randomInt(minLength, maxLength);
|
||||
std::string str;
|
||||
str.reserve(length);
|
||||
for (int i = 0; i < length; i++) {
|
||||
str += (char)randomInt('a', 'z');
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
||||
bool Random::randomBool(double trueRatio) {
|
||||
return std::uniform_real_distribution<double>(0.0, 1.0)(random) <= trueRatio;
|
||||
}
|
||||
|
||||
void print_internal_error(const char* msg, const char* file, int line) {
|
||||
fprintf(stderr, "Assertion %s failed @ %s %d:\n", msg, file, line);
|
||||
}
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* TesterUtil.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef APITESTER_UTIL_H
|
||||
#define APITESTER_UTIL_H
|
||||
|
||||
#include <random>
|
||||
#include <ostream>
|
||||
#include <optional>
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace fmt {
|
||||
|
||||
template <typename T>
|
||||
struct formatter<std::optional<T>> : fmt::formatter<T> {
|
||||
|
||||
template <typename FormatContext>
|
||||
auto format(const std::optional<T>& opt, FormatContext& ctx) {
|
||||
if (opt) {
|
||||
fmt::formatter<T>::format(*opt, ctx);
|
||||
return ctx.out();
|
||||
}
|
||||
return fmt::format_to(ctx.out(), "<empty>");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace fmt
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
class Random {
|
||||
public:
|
||||
Random();
|
||||
|
||||
static Random& get();
|
||||
|
||||
int randomInt(int min, int max);
|
||||
|
||||
std::string randomStringLowerCase(int minLength, int maxLength);
|
||||
|
||||
bool randomBool(double trueRatio);
|
||||
|
||||
std::mt19937 random;
|
||||
};
|
||||
|
||||
class TesterError : public std::runtime_error {
|
||||
public:
|
||||
explicit TesterError(const char* message) : std::runtime_error(message) {}
|
||||
explicit TesterError(const std::string& message) : std::runtime_error(message) {}
|
||||
TesterError(const TesterError&) = default;
|
||||
TesterError& operator=(const TesterError&) = default;
|
||||
TesterError(TesterError&&) = default;
|
||||
TesterError& operator=(TesterError&&) = default;
|
||||
};
|
||||
|
||||
void print_internal_error(const char* msg, const char* file, int line);
|
||||
|
||||
#define ASSERT(condition) \
|
||||
do { \
|
||||
if (!(condition)) { \
|
||||
print_internal_error(#condition, __FILE__, __LINE__); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (false) // For use in destructors, where throwing exceptions is extremely dangerous
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
||||
#endif
|
|
@ -0,0 +1,184 @@
|
|||
/*
|
||||
* TesterWorkload.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "TesterWorkload.h"
|
||||
#include "TesterUtil.h"
|
||||
#include "test/apitester/TesterScheduler.h"
|
||||
#include <cstdlib>
|
||||
#include <memory>
|
||||
#include <fmt/format.h>
|
||||
#include <vector>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
int WorkloadConfig::getIntOption(const std::string& name, int defaultVal) const {
|
||||
auto iter = options.find(name);
|
||||
if (iter == options.end()) {
|
||||
return defaultVal;
|
||||
} else {
|
||||
char* endptr;
|
||||
int intVal = strtol(iter->second.c_str(), &endptr, 10);
|
||||
if (*endptr != '\0') {
|
||||
throw TesterError(
|
||||
fmt::format("Invalid workload configuration. Invalid value {} for {}", iter->second, name));
|
||||
}
|
||||
return intVal;
|
||||
}
|
||||
}
|
||||
|
||||
double WorkloadConfig::getFloatOption(const std::string& name, double defaultVal) const {
|
||||
auto iter = options.find(name);
|
||||
if (iter == options.end()) {
|
||||
return defaultVal;
|
||||
} else {
|
||||
char* endptr;
|
||||
double floatVal = strtod(iter->second.c_str(), &endptr);
|
||||
if (*endptr != '\0') {
|
||||
throw TesterError(
|
||||
fmt::format("Invalid workload configuration. Invalid value {} for {}", iter->second, name));
|
||||
}
|
||||
return floatVal;
|
||||
}
|
||||
}
|
||||
|
||||
WorkloadBase::WorkloadBase(const WorkloadConfig& config)
|
||||
: manager(nullptr), tasksScheduled(0), numErrors(0), clientId(config.clientId), numClients(config.numClients),
|
||||
failed(false) {
|
||||
maxErrors = config.getIntOption("maxErrors", 10);
|
||||
workloadId = fmt::format("{}{}", config.name, clientId);
|
||||
}
|
||||
|
||||
void WorkloadBase::init(WorkloadManager* manager) {
|
||||
this->manager = manager;
|
||||
}
|
||||
|
||||
void WorkloadBase::schedule(TTaskFct task) {
|
||||
if (failed) {
|
||||
return;
|
||||
}
|
||||
tasksScheduled++;
|
||||
manager->scheduler->schedule([this, task]() {
|
||||
task();
|
||||
scheduledTaskDone();
|
||||
});
|
||||
}
|
||||
|
||||
void WorkloadBase::execTransaction(std::shared_ptr<ITransactionActor> tx, TTaskFct cont, bool failOnError) {
|
||||
if (failed) {
|
||||
return;
|
||||
}
|
||||
tasksScheduled++;
|
||||
manager->txExecutor->execute(tx, [this, tx, cont, failOnError]() {
|
||||
fdb_error_t err = tx->getErrorCode();
|
||||
if (tx->getErrorCode() == error_code_success) {
|
||||
cont();
|
||||
} else {
|
||||
std::string msg = fmt::format("Transaction failed with error: {} ({}})", err, fdb_get_error(err));
|
||||
if (failOnError) {
|
||||
error(msg);
|
||||
failed = true;
|
||||
} else {
|
||||
info(msg);
|
||||
cont();
|
||||
}
|
||||
}
|
||||
scheduledTaskDone();
|
||||
});
|
||||
}
|
||||
|
||||
void WorkloadBase::info(const std::string& msg) {
|
||||
fmt::print(stderr, "[{}] {}\n", workloadId, msg);
|
||||
}
|
||||
|
||||
void WorkloadBase::error(const std::string& msg) {
|
||||
fmt::print(stderr, "[{}] ERROR: {}\n", workloadId, msg);
|
||||
numErrors++;
|
||||
if (numErrors > maxErrors && !failed) {
|
||||
fmt::print(stderr, "[{}] ERROR: Stopping workload after {} errors\n", workloadId, numErrors);
|
||||
failed = true;
|
||||
}
|
||||
}
|
||||
|
||||
void WorkloadBase::scheduledTaskDone() {
|
||||
if (--tasksScheduled == 0) {
|
||||
if (numErrors > 0) {
|
||||
error(fmt::format("Workload failed with {} errors", numErrors.load()));
|
||||
} else {
|
||||
info("Workload successfully completed");
|
||||
}
|
||||
manager->workloadDone(this, numErrors > 0);
|
||||
}
|
||||
}
|
||||
|
||||
void WorkloadManager::add(std::shared_ptr<IWorkload> workload, TTaskFct cont) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
workloads[workload.get()] = WorkloadInfo{ workload, cont };
|
||||
}
|
||||
|
||||
void WorkloadManager::run() {
|
||||
std::vector<std::shared_ptr<IWorkload>> initialWorkloads;
|
||||
for (auto iter : workloads) {
|
||||
initialWorkloads.push_back(iter.second.ref);
|
||||
}
|
||||
for (auto iter : initialWorkloads) {
|
||||
iter->init(this);
|
||||
}
|
||||
for (auto iter : initialWorkloads) {
|
||||
iter->start();
|
||||
}
|
||||
scheduler->join();
|
||||
if (failed()) {
|
||||
fmt::print(stderr, "{} workloads failed\n", numWorkloadsFailed);
|
||||
} else {
|
||||
fprintf(stderr, "All workloads succesfully completed\n");
|
||||
}
|
||||
}
|
||||
|
||||
void WorkloadManager::workloadDone(IWorkload* workload, bool failed) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
auto iter = workloads.find(workload);
|
||||
ASSERT(iter != workloads.end());
|
||||
lock.unlock();
|
||||
iter->second.cont();
|
||||
lock.lock();
|
||||
workloads.erase(iter);
|
||||
if (failed) {
|
||||
numWorkloadsFailed++;
|
||||
}
|
||||
bool done = workloads.empty();
|
||||
lock.unlock();
|
||||
if (done) {
|
||||
scheduler->stop();
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<IWorkload> IWorkloadFactory::create(std::string const& name, const WorkloadConfig& config) {
|
||||
auto it = factories().find(name);
|
||||
if (it == factories().end())
|
||||
return {}; // or throw?
|
||||
return it->second->create(config);
|
||||
}
|
||||
|
||||
std::unordered_map<std::string, IWorkloadFactory*>& IWorkloadFactory::factories() {
|
||||
static std::unordered_map<std::string, IWorkloadFactory*> theFactories;
|
||||
return theFactories;
|
||||
}
|
||||
|
||||
} // namespace FdbApiTester
|
|
@ -0,0 +1,205 @@
|
|||
/*
|
||||
* TesterWorkload.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
#ifndef APITESTER_WORKLOAD_H
|
||||
#define APITESTER_WORKLOAD_H
|
||||
|
||||
#include "TesterTransactionExecutor.h"
|
||||
#include "TesterUtil.h"
|
||||
#include <atomic>
|
||||
#include <unordered_map>
|
||||
#include <mutex>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
class WorkloadManager;
|
||||
|
||||
// Workoad interface
|
||||
class IWorkload {
|
||||
public:
|
||||
virtual ~IWorkload() {}
|
||||
|
||||
// Intialize the workload
|
||||
virtual void init(WorkloadManager* manager) = 0;
|
||||
|
||||
// Start executing the workload
|
||||
virtual void start() = 0;
|
||||
};
|
||||
|
||||
// Workload configuration
|
||||
struct WorkloadConfig {
|
||||
// Workoad name
|
||||
std::string name;
|
||||
|
||||
// Client ID assigned to the workload (a number from 0 to numClients-1)
|
||||
int clientId;
|
||||
|
||||
// Total number of clients
|
||||
int numClients;
|
||||
|
||||
// Workload options: as key-value pairs
|
||||
std::unordered_map<std::string, std::string> options;
|
||||
|
||||
// Get option of a certain type by name. Throws an exception if the values is of a wrong type
|
||||
int getIntOption(const std::string& name, int defaultVal) const;
|
||||
double getFloatOption(const std::string& name, double defaultVal) const;
|
||||
};
|
||||
|
||||
// A base class for test workloads
|
||||
// Tracks if workload is active, notifies the workload manager when the workload completes
|
||||
class WorkloadBase : public IWorkload {
|
||||
public:
|
||||
WorkloadBase(const WorkloadConfig& config);
|
||||
|
||||
// Initialize the workload
|
||||
void init(WorkloadManager* manager) override;
|
||||
|
||||
protected:
|
||||
// Schedule the a task as a part of the workload
|
||||
void schedule(TTaskFct task);
|
||||
|
||||
// Execute a transaction within the workload
|
||||
void execTransaction(std::shared_ptr<ITransactionActor> tx, TTaskFct cont, bool failOnError = true);
|
||||
|
||||
// Execute a transaction within the workload, a convenience method for a tranasaction defined by a lambda function
|
||||
void execTransaction(TTxStartFct start, TTaskFct cont, bool failOnError = true) {
|
||||
execTransaction(std::make_shared<TransactionFct>(start), cont, failOnError);
|
||||
}
|
||||
|
||||
// Log an error message, increase error counter
|
||||
void error(const std::string& msg);
|
||||
|
||||
// Log an info message
|
||||
void info(const std::string& msg);
|
||||
|
||||
private:
|
||||
WorkloadManager* manager;
|
||||
|
||||
// Decrease scheduled task counter, notify the workload manager
|
||||
// that the task is done if no more tasks schedule
|
||||
void scheduledTaskDone();
|
||||
|
||||
// Keep track of tasks scheduled by the workload
|
||||
// End workload when this number falls to 0
|
||||
std::atomic<int> tasksScheduled;
|
||||
|
||||
// Number of errors logged
|
||||
std::atomic<int> numErrors;
|
||||
|
||||
protected:
|
||||
// Client ID assigned to the workload (a number from 0 to numClients-1)
|
||||
int clientId;
|
||||
|
||||
// Total number of clients
|
||||
int numClients;
|
||||
|
||||
// The maximum number of errors before stoppoing the workload
|
||||
int maxErrors;
|
||||
|
||||
// Workload identifier, consisting of workload name and client ID
|
||||
std::string workloadId;
|
||||
|
||||
// Workload is failed, no further transactions or continuations will be scheduled by the workload
|
||||
std::atomic<bool> failed;
|
||||
};
|
||||
|
||||
// Workload manager
|
||||
// Keeps track of active workoads, stops the scheduler after all workloads complete
|
||||
class WorkloadManager {
|
||||
public:
|
||||
WorkloadManager(ITransactionExecutor* txExecutor, IScheduler* scheduler)
|
||||
: txExecutor(txExecutor), scheduler(scheduler), numWorkloadsFailed(0) {}
|
||||
|
||||
// Add a workload
|
||||
// A continuation is to be specified for subworkloads
|
||||
void add(std::shared_ptr<IWorkload> workload, TTaskFct cont = NO_OP_TASK);
|
||||
|
||||
// Run all workloads. Blocks until all workloads complete
|
||||
void run();
|
||||
|
||||
// True if at least one workload has failed
|
||||
bool failed() {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
return numWorkloadsFailed > 0;
|
||||
}
|
||||
|
||||
private:
|
||||
friend WorkloadBase;
|
||||
|
||||
// Info about a running workload
|
||||
struct WorkloadInfo {
|
||||
// Reference to the workoad for ownership
|
||||
std::shared_ptr<IWorkload> ref;
|
||||
// Continuation to be executed after completing the workload
|
||||
TTaskFct cont;
|
||||
};
|
||||
|
||||
// To be called by a workload to notify that it is done
|
||||
void workloadDone(IWorkload* workload, bool failed);
|
||||
|
||||
// Transaction executor to be used by the workloads
|
||||
ITransactionExecutor* txExecutor;
|
||||
|
||||
// A scheduler to be used by the workloads
|
||||
IScheduler* scheduler;
|
||||
|
||||
// Mutex protects access to workloads & numWorkloadsFailed
|
||||
std::mutex mutex;
|
||||
|
||||
// A map of currently running workloads
|
||||
std::unordered_map<IWorkload*, WorkloadInfo> workloads;
|
||||
|
||||
// Number of workloads failed
|
||||
int numWorkloadsFailed;
|
||||
};
|
||||
|
||||
// A workload factory
|
||||
struct IWorkloadFactory {
|
||||
// create a workload by name
|
||||
static std::shared_ptr<IWorkload> create(std::string const& name, const WorkloadConfig& config);
|
||||
|
||||
// a singleton registry of workload factories
|
||||
static std::unordered_map<std::string, IWorkloadFactory*>& factories();
|
||||
|
||||
// Interface to be implemented by a workload factory
|
||||
virtual ~IWorkloadFactory() = default;
|
||||
virtual std::shared_ptr<IWorkload> create(const WorkloadConfig& config) = 0;
|
||||
};
|
||||
|
||||
/**
|
||||
* A template for a workload factory for creating workloads of a certain type
|
||||
*
|
||||
* Declare a global instance of the factory for a workload type as follows:
|
||||
* WorkloadFactory<MyWorkload> MyWorkloadFactory("myWorkload");
|
||||
*/
|
||||
template <class WorkloadType>
|
||||
struct WorkloadFactory : IWorkloadFactory {
|
||||
WorkloadFactory(const char* name) { factories()[name] = this; }
|
||||
std::shared_ptr<IWorkload> create(const WorkloadConfig& config) override {
|
||||
return std::make_shared<WorkloadType>(config);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
||||
#endif
|
|
@ -0,0 +1,284 @@
|
|||
/*
|
||||
* fdb_c_api_tester.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "TesterOptions.h"
|
||||
#include "TesterWorkload.h"
|
||||
#include "TesterScheduler.h"
|
||||
#include "TesterTransactionExecutor.h"
|
||||
#include "TesterTestSpec.h"
|
||||
#include "TesterUtil.h"
|
||||
#include "flow/SimpleOpt.h"
|
||||
#include "bindings/c/foundationdb/fdb_c.h"
|
||||
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <thread>
|
||||
#include <fmt/format.h>
|
||||
|
||||
namespace FdbApiTester {
|
||||
|
||||
namespace {
|
||||
|
||||
enum TesterOptionId {
|
||||
OPT_CONNFILE,
|
||||
OPT_HELP,
|
||||
OPT_TRACE,
|
||||
OPT_TRACE_DIR,
|
||||
OPT_LOGGROUP,
|
||||
OPT_TRACE_FORMAT,
|
||||
OPT_KNOB,
|
||||
OPT_EXTERNAL_CLIENT_LIBRARY,
|
||||
OPT_TEST_FILE
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption TesterOptionDefs[] = //
|
||||
{ { OPT_CONNFILE, "-C", SO_REQ_SEP },
|
||||
{ OPT_CONNFILE, "--cluster-file", SO_REQ_SEP },
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--log-dir", SO_REQ_SEP },
|
||||
{ OPT_LOGGROUP, "--log-group", SO_REQ_SEP },
|
||||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_TRACE_FORMAT, "--trace-format", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
{ OPT_EXTERNAL_CLIENT_LIBRARY, "--external-client-library", SO_REQ_SEP },
|
||||
{ OPT_TEST_FILE, "-f", SO_REQ_SEP },
|
||||
{ OPT_TEST_FILE, "--test-file", SO_REQ_SEP },
|
||||
SO_END_OF_OPTIONS };
|
||||
|
||||
void printProgramUsage(const char* execName) {
|
||||
printf("usage: %s [OPTIONS]\n"
|
||||
"\n",
|
||||
execName);
|
||||
printf(" -C, --cluster-file FILE\n"
|
||||
" The path of a file containing the connection string for the\n"
|
||||
" FoundationDB cluster. The default is `fdb.cluster'\n"
|
||||
" --log Enables trace file logging for the CLI session.\n"
|
||||
" --log-dir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n"
|
||||
" --log-group LOG_GROUP\n"
|
||||
" Sets the LogGroup field with the specified value for all\n"
|
||||
" events in the trace output (defaults to `default').\n"
|
||||
" --trace-format FORMAT\n"
|
||||
" Select the format of the log files. xml (the default) and json\n"
|
||||
" are supported. Has no effect unless --log is specified.\n"
|
||||
" --knob-KNOBNAME KNOBVALUE\n"
|
||||
" Changes a knob option. KNOBNAME should be lowercase.\n"
|
||||
" --external-client-library FILE\n"
|
||||
" Path to the external client library.\n"
|
||||
" -f, --test-file FILE\n"
|
||||
" Test file to run.\n"
|
||||
" -h, --help Display this help and exit.\n");
|
||||
}
|
||||
|
||||
// Extracts the key for command line arguments that are specified with a prefix (e.g. --knob-).
|
||||
// This function converts any hyphens in the extracted key to underscores.
|
||||
bool extractPrefixedArgument(std::string prefix, const std::string& arg, std::string& res) {
|
||||
if (arg.size() <= prefix.size() || arg.find(prefix) != 0 ||
|
||||
(arg[prefix.size()] != '-' && arg[prefix.size()] != '_')) {
|
||||
return false;
|
||||
}
|
||||
|
||||
res = arg.substr(prefix.size() + 1);
|
||||
std::transform(res.begin(), res.end(), res.begin(), [](int c) { return c == '-' ? '_' : c; });
|
||||
return true;
|
||||
}
|
||||
|
||||
bool validateTraceFormat(std::string_view format) {
|
||||
return format == "xml" || format == "json";
|
||||
}
|
||||
|
||||
bool processArg(TesterOptions& options, const CSimpleOpt& args) {
|
||||
switch (args.OptionId()) {
|
||||
case OPT_CONNFILE:
|
||||
options.clusterFile = args.OptionArg();
|
||||
break;
|
||||
case OPT_TRACE:
|
||||
options.trace = true;
|
||||
break;
|
||||
case OPT_TRACE_DIR:
|
||||
options.traceDir = args.OptionArg();
|
||||
break;
|
||||
case OPT_LOGGROUP:
|
||||
options.logGroup = args.OptionArg();
|
||||
break;
|
||||
case OPT_TRACE_FORMAT:
|
||||
if (!validateTraceFormat(args.OptionArg())) {
|
||||
fmt::print(stderr, "ERROR: Unrecognized trace format `{}'\n", args.OptionArg());
|
||||
return false;
|
||||
}
|
||||
options.traceFormat = args.OptionArg();
|
||||
break;
|
||||
case OPT_KNOB: {
|
||||
std::string knobName;
|
||||
if (!extractPrefixedArgument("--knob", args.OptionSyntax(), knobName)) {
|
||||
fmt::print(stderr, "ERROR: unable to parse knob option '{}'\n", args.OptionSyntax());
|
||||
return false;
|
||||
}
|
||||
options.knobs.emplace_back(knobName, args.OptionArg());
|
||||
break;
|
||||
}
|
||||
case OPT_EXTERNAL_CLIENT_LIBRARY:
|
||||
options.externalClientLibrary = args.OptionArg();
|
||||
break;
|
||||
|
||||
case OPT_TEST_FILE:
|
||||
options.testFile = args.OptionArg();
|
||||
options.testSpec = readTomlTestSpec(options.testFile);
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parseArgs(TesterOptions& options, int argc, char** argv) {
|
||||
// declare our options parser, pass in the arguments from main
|
||||
// as well as our array of valid options.
|
||||
CSimpleOpt args(argc, argv, TesterOptionDefs);
|
||||
|
||||
// while there are arguments left to process
|
||||
while (args.Next()) {
|
||||
if (args.LastError() == SO_SUCCESS) {
|
||||
if (args.OptionId() == OPT_HELP) {
|
||||
printProgramUsage(argv[0]);
|
||||
return false;
|
||||
}
|
||||
if (!processArg(options, args)) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
fmt::print(stderr, "ERROR: Invalid argument: {}\n", args.OptionText());
|
||||
printProgramUsage(argv[0]);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void fdb_check(fdb_error_t e) {
|
||||
if (e) {
|
||||
fmt::print(stderr, "Unexpected FDB error: {}({})\n", e, fdb_get_error(e));
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
void applyNetworkOptions(TesterOptions& options) {
|
||||
if (!options.externalClientLibrary.empty()) {
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_DISABLE_LOCAL_CLIENT));
|
||||
fdb_check(
|
||||
FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_EXTERNAL_CLIENT_LIBRARY, options.externalClientLibrary));
|
||||
}
|
||||
|
||||
if (options.testSpec.multiThreaded) {
|
||||
fdb_check(
|
||||
FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION, options.numFdbThreads));
|
||||
}
|
||||
|
||||
if (options.testSpec.fdbCallbacksOnExternalThreads) {
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_CALLBACKS_ON_EXTERNAL_THREADS));
|
||||
}
|
||||
|
||||
if (options.testSpec.buggify) {
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_CLIENT_BUGGIFY_ENABLE));
|
||||
}
|
||||
|
||||
if (options.trace) {
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_TRACE_ENABLE, options.traceDir));
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_TRACE_FORMAT, options.traceFormat));
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_TRACE_LOG_GROUP, options.logGroup));
|
||||
}
|
||||
|
||||
for (auto knob : options.knobs) {
|
||||
fdb_check(FdbApi::setOption(FDBNetworkOption::FDB_NET_OPTION_KNOB,
|
||||
fmt::format("{}={}", knob.first.c_str(), knob.second.c_str())));
|
||||
}
|
||||
}
|
||||
|
||||
void randomizeOptions(TesterOptions& options) {
|
||||
Random& random = Random::get();
|
||||
options.numFdbThreads = random.randomInt(options.testSpec.minFdbThreads, options.testSpec.maxFdbThreads);
|
||||
options.numClientThreads = random.randomInt(options.testSpec.minClientThreads, options.testSpec.maxClientThreads);
|
||||
options.numDatabases = random.randomInt(options.testSpec.minDatabases, options.testSpec.maxDatabases);
|
||||
options.numClients = random.randomInt(options.testSpec.minClients, options.testSpec.maxClients);
|
||||
}
|
||||
|
||||
bool runWorkloads(TesterOptions& options) {
|
||||
TransactionExecutorOptions txExecOptions;
|
||||
txExecOptions.blockOnFutures = options.testSpec.blockOnFutures;
|
||||
txExecOptions.numDatabases = options.numDatabases;
|
||||
txExecOptions.databasePerTransaction = options.testSpec.databasePerTransaction;
|
||||
|
||||
std::unique_ptr<IScheduler> scheduler = createScheduler(options.numClientThreads);
|
||||
std::unique_ptr<ITransactionExecutor> txExecutor = createTransactionExecutor(txExecOptions);
|
||||
scheduler->start();
|
||||
txExecutor->init(scheduler.get(), options.clusterFile.c_str());
|
||||
|
||||
WorkloadManager workloadMgr(txExecutor.get(), scheduler.get());
|
||||
for (const auto& workloadSpec : options.testSpec.workloads) {
|
||||
for (int i = 0; i < options.numClients; i++) {
|
||||
WorkloadConfig config;
|
||||
config.name = workloadSpec.name;
|
||||
config.options = workloadSpec.options;
|
||||
config.clientId = i;
|
||||
config.numClients = options.numClients;
|
||||
std::shared_ptr<IWorkload> workload = IWorkloadFactory::create(workloadSpec.name, config);
|
||||
if (!workload) {
|
||||
throw TesterError(fmt::format("Unknown workload '{}'", workloadSpec.name));
|
||||
}
|
||||
workloadMgr.add(workload);
|
||||
}
|
||||
}
|
||||
|
||||
workloadMgr.run();
|
||||
return !workloadMgr.failed();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace FdbApiTester
|
||||
|
||||
using namespace FdbApiTester;
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
int retCode = 0;
|
||||
try {
|
||||
TesterOptions options;
|
||||
if (!parseArgs(options, argc, argv)) {
|
||||
return 1;
|
||||
}
|
||||
randomizeOptions(options);
|
||||
|
||||
fdb_check(fdb_select_api_version(options.testSpec.apiVersion));
|
||||
applyNetworkOptions(options);
|
||||
fdb_check(fdb_setup_network());
|
||||
|
||||
std::thread network_thread{ &fdb_run_network };
|
||||
|
||||
if (!runWorkloads(options)) {
|
||||
retCode = 1;
|
||||
}
|
||||
|
||||
fdb_check(fdb_stop_network());
|
||||
network_thread.join();
|
||||
} catch (const std::runtime_error& err) {
|
||||
fmt::print(stderr, "ERROR: {}\n", err.what());
|
||||
retCode = 1;
|
||||
}
|
||||
return retCode;
|
||||
}
|
|
@ -0,0 +1,125 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# run_c_api_tests.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
import os
|
||||
from subprocess import Popen, TimeoutExpired
|
||||
import logging
|
||||
import signal
|
||||
|
||||
|
||||
def get_logger():
|
||||
return logging.getLogger('foundationdb.run_c_api_tests')
|
||||
|
||||
|
||||
def initialize_logger_level(logging_level):
|
||||
logger = get_logger()
|
||||
|
||||
assert logging_level in ['DEBUG', 'INFO', 'WARNING', 'ERROR']
|
||||
|
||||
logging.basicConfig(format='%(message)s')
|
||||
if logging_level == 'DEBUG':
|
||||
logger.setLevel(logging.DEBUG)
|
||||
elif logging_level == 'INFO':
|
||||
logger.setLevel(logging.INFO)
|
||||
elif logging_level == 'WARNING':
|
||||
logger.setLevel(logging.WARNING)
|
||||
elif logging_level == 'ERROR':
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
|
||||
def run_tester(args, test_file):
|
||||
cmd = [args.tester_binary, "--cluster-file",
|
||||
args.cluster_file, "--test-file", test_file]
|
||||
if args.external_client_library is not None:
|
||||
cmd += ["--external-client-library", args.external_client_library]
|
||||
|
||||
get_logger().info('\nRunning tester \'%s\'...' % ' '.join(cmd))
|
||||
proc = Popen(cmd, stdout=sys.stdout, stderr=sys.stderr)
|
||||
timed_out = False
|
||||
try:
|
||||
ret_code = proc.wait(args.timeout)
|
||||
except TimeoutExpired:
|
||||
proc.kill()
|
||||
timed_out = True
|
||||
except Exception as e:
|
||||
raise Exception('Unable to run tester (%s)' % e)
|
||||
|
||||
if ret_code != 0:
|
||||
if ret_code < 0:
|
||||
reason = signal.Signals(-ret_code).name
|
||||
else:
|
||||
reason = 'exit code: %d' % ret_code
|
||||
if timed_out:
|
||||
reason = 'timed out after %d seconds' % args.timeout
|
||||
ret_code = 1
|
||||
get_logger().error('\n\'%s\' did not complete succesfully (%s)' %
|
||||
(cmd[0], reason))
|
||||
|
||||
get_logger().info('')
|
||||
return ret_code
|
||||
|
||||
|
||||
def run_tests(args):
|
||||
num_failed = 0
|
||||
test_files = [f for f in os.listdir(args.test_dir)
|
||||
if os.path.isfile(os.path.join(args.test_dir, f)) and f.endswith(".toml")]
|
||||
|
||||
for test_file in test_files:
|
||||
get_logger().info('=========================================================')
|
||||
get_logger().info('Running test %s' % test_file)
|
||||
get_logger().info('=========================================================')
|
||||
ret_code = run_tester(args, os.path.join(args.test_dir, test_file))
|
||||
if ret_code != 0:
|
||||
num_failed += 1
|
||||
|
||||
return num_failed
|
||||
|
||||
|
||||
def parse_args(argv):
|
||||
parser = argparse.ArgumentParser(description='FoundationDB C API Tester')
|
||||
|
||||
parser.add_argument('--cluster-file', type=str, default="fdb.cluster",
|
||||
help='The cluster file for the cluster being connected to. (default: fdb.cluster)')
|
||||
parser.add_argument('--tester-binary', type=str, default="fdb_c_api_tester",
|
||||
help='Path to the fdb_c_api_tester executable. (default: fdb_c_api_tester)')
|
||||
parser.add_argument('--external-client-library', type=str, default=None,
|
||||
help='Path to the external client library. (default: None)')
|
||||
parser.add_argument('--test-dir', type=str, default="./",
|
||||
help='Path to a directory with test definitions. (default: ./)')
|
||||
parser.add_argument('--timeout', type=int, default=300,
|
||||
help='The timeout in seconds for running each individual test. (default 300)')
|
||||
parser.add_argument('--logging-level', type=str, default='INFO',
|
||||
choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], help='Specifies the level of detail in the tester output (default=\'INFO\').')
|
||||
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def main(argv):
|
||||
args = parse_args(argv)
|
||||
initialize_logger_level(args.logging_level)
|
||||
return run_tests(args)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]))
|
|
@ -0,0 +1,24 @@
|
|||
[[test]]
|
||||
title = 'Cancel Transaction with Blocking Waits'
|
||||
multiThreaded = true
|
||||
buggify = true
|
||||
blockOnFutures = true
|
||||
minFdbThreads = 2
|
||||
maxFdbThreads = 8
|
||||
minDatabases = 2
|
||||
maxDatabases = 8
|
||||
minClientThreads = 2
|
||||
maxClientThreads = 8
|
||||
minClients = 2
|
||||
maxClients = 8
|
||||
|
||||
[[test.workload]]
|
||||
name = 'CancelTransaction'
|
||||
minKeyLength = 1
|
||||
maxKeyLength = 64
|
||||
minValueLength = 1
|
||||
maxValueLength = 1000
|
||||
maxKeysPerTransaction = 50
|
||||
initialSize = 100
|
||||
numRandomOperations = 100
|
||||
readExistingKeysRatio = 0.9
|
|
@ -0,0 +1,23 @@
|
|||
[[test]]
|
||||
title = 'Cancel Transactions with Future Callbacks'
|
||||
multiThreaded = true
|
||||
buggify = true
|
||||
minFdbThreads = 2
|
||||
maxFdbThreads = 8
|
||||
minDatabases = 2
|
||||
maxDatabases = 8
|
||||
minClientThreads = 2
|
||||
maxClientThreads = 8
|
||||
minClients = 2
|
||||
maxClients = 8
|
||||
|
||||
[[test.workload]]
|
||||
name = 'CancelTransaction'
|
||||
minKeyLength = 1
|
||||
maxKeyLength = 64
|
||||
minValueLength = 1
|
||||
maxValueLength = 1000
|
||||
maxKeysPerTransaction = 50
|
||||
initialSize = 100
|
||||
numRandomOperations = 100
|
||||
readExistingKeysRatio = 0.9
|
|
@ -0,0 +1,24 @@
|
|||
[[test]]
|
||||
title = 'Cancel Transaction with Database per Transaction'
|
||||
multiThreaded = true
|
||||
buggify = true
|
||||
databasePerTransaction = true
|
||||
minFdbThreads = 2
|
||||
maxFdbThreads = 8
|
||||
minDatabases = 2
|
||||
maxDatabases = 8
|
||||
minClientThreads = 2
|
||||
maxClientThreads = 8
|
||||
minClients = 2
|
||||
maxClients = 8
|
||||
|
||||
[[test.workload]]
|
||||
name = 'CancelTransaction'
|
||||
minKeyLength = 1
|
||||
maxKeyLength = 64
|
||||
minValueLength = 1
|
||||
maxValueLength = 1000
|
||||
maxKeysPerTransaction = 50
|
||||
initialSize = 100
|
||||
numRandomOperations = 100
|
||||
readExistingKeysRatio = 0.9
|
|
@ -0,0 +1,25 @@
|
|||
[[test]]
|
||||
title = 'API Correctness Blocking'
|
||||
multiThreaded = true
|
||||
buggify = true
|
||||
blockOnFutures = true
|
||||
minFdbThreads = 2
|
||||
maxFdbThreads = 8
|
||||
minDatabases = 2
|
||||
maxDatabases = 8
|
||||
minClientThreads = 2
|
||||
maxClientThreads = 8
|
||||
minClients = 2
|
||||
maxClients = 8
|
||||
|
||||
|
||||
[[test.workload]]
|
||||
name = 'ApiCorrectness'
|
||||
minKeyLength = 1
|
||||
maxKeyLength = 64
|
||||
minValueLength = 1
|
||||
maxValueLength = 1000
|
||||
maxKeysPerTransaction = 50
|
||||
initialSize = 100
|
||||
numRandomOperations = 100
|
||||
readExistingKeysRatio = 0.9
|
|
@ -0,0 +1,24 @@
|
|||
[[test]]
|
||||
title = 'API Correctness Callbacks On External Threads'
|
||||
multiThreaded = true
|
||||
fdbCallbacksOnExternalThreads = true
|
||||
buggify = true
|
||||
minFdbThreads = 2
|
||||
maxFdbThreads = 8
|
||||
minDatabases = 2
|
||||
maxDatabases = 8
|
||||
minClientThreads = 2
|
||||
maxClientThreads = 8
|
||||
minClients = 2
|
||||
maxClients = 8
|
||||
|
||||
[[test.workload]]
|
||||
name = 'ApiCorrectness'
|
||||
minKeyLength = 1
|
||||
maxKeyLength = 64
|
||||
minValueLength = 1
|
||||
maxValueLength = 1000
|
||||
maxKeysPerTransaction = 50
|
||||
initialSize = 100
|
||||
numRandomOperations = 100
|
||||
readExistingKeysRatio = 0.9
|
|
@ -0,0 +1,24 @@
|
|||
[[test]]
|
||||
title = 'API Correctness Database Per Transaction'
|
||||
multiThreaded = true
|
||||
buggify = true
|
||||
databasePerTransaction = true
|
||||
minFdbThreads = 2
|
||||
maxFdbThreads = 8
|
||||
minDatabases = 2
|
||||
maxDatabases = 8
|
||||
minClientThreads = 2
|
||||
maxClientThreads = 8
|
||||
minClients = 2
|
||||
maxClients = 8
|
||||
|
||||
[[test.workload]]
|
||||
name = 'ApiCorrectness'
|
||||
minKeyLength = 1
|
||||
maxKeyLength = 64
|
||||
minValueLength = 1
|
||||
maxValueLength = 1000
|
||||
maxKeysPerTransaction = 50
|
||||
initialSize = 100
|
||||
numRandomOperations = 100
|
||||
readExistingKeysRatio = 0.9
|
|
@ -0,0 +1,23 @@
|
|||
[[test]]
|
||||
title = 'API Correctness Multi Threaded'
|
||||
multiThreaded = true
|
||||
buggify = true
|
||||
minFdbThreads = 2
|
||||
maxFdbThreads = 8
|
||||
minDatabases = 2
|
||||
maxDatabases = 8
|
||||
minClientThreads = 2
|
||||
maxClientThreads = 8
|
||||
minClients = 2
|
||||
maxClients = 8
|
||||
|
||||
[[test.workload]]
|
||||
name = 'ApiCorrectness'
|
||||
minKeyLength = 1
|
||||
maxKeyLength = 64
|
||||
minValueLength = 1
|
||||
maxValueLength = 1000
|
||||
maxKeysPerTransaction = 50
|
||||
initialSize = 100
|
||||
numRandomOperations = 100
|
||||
readExistingKeysRatio = 0.9
|
|
@ -0,0 +1,16 @@
|
|||
[[test]]
|
||||
title = 'API Correctness Single Threaded'
|
||||
minClients = 1
|
||||
maxClients = 3
|
||||
multiThreaded = false
|
||||
|
||||
[[test.workload]]
|
||||
name = 'ApiCorrectness'
|
||||
minKeyLength = 1
|
||||
maxKeyLength = 64
|
||||
minValueLength = 1
|
||||
maxValueLength = 1000
|
||||
maxKeysPerTransaction = 50
|
||||
initialSize = 100
|
||||
numRandomOperations = 100
|
||||
readExistingKeysRatio = 0.9
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* client_memory_test.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
#include "unit/fdb_api.hpp"
|
||||
|
||||
#include <thread>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
void fdb_check(fdb_error_t e) {
|
||||
if (e) {
|
||||
std::cerr << fdb_get_error(e) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
FDBDatabase* fdb_open_database(const char* clusterFile) {
|
||||
FDBDatabase* db;
|
||||
fdb_check(fdb_create_database(clusterFile, &db));
|
||||
return db;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc != 2) {
|
||||
printf("Usage: %s <cluster_file>", argv[0]);
|
||||
}
|
||||
fdb_check(fdb_select_api_version(710));
|
||||
fdb_check(fdb_setup_network());
|
||||
std::thread network_thread{ &fdb_run_network };
|
||||
|
||||
fdb_check(
|
||||
fdb_network_set_option(FDBNetworkOption::FDB_NET_OPTION_TRACE_ENABLE, reinterpret_cast<const uint8_t*>(""), 0));
|
||||
fdb_check(fdb_network_set_option(
|
||||
FDBNetworkOption::FDB_NET_OPTION_TRACE_FORMAT, reinterpret_cast<const uint8_t*>("json"), 4));
|
||||
|
||||
// Use a bunch of memory from different client threads
|
||||
FDBDatabase* db = fdb_open_database(argv[1]);
|
||||
auto thread_func = [&]() {
|
||||
fdb::Transaction tr(db);
|
||||
for (int i = 0; i < 10000; ++i) {
|
||||
tr.set(std::to_string(i), std::string(i, '\x00'));
|
||||
}
|
||||
tr.cancel();
|
||||
};
|
||||
std::vector<std::thread> threads;
|
||||
constexpr auto kThreadCount = 64;
|
||||
for (int i = 0; i < kThreadCount; ++i) {
|
||||
threads.emplace_back(thread_func);
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
fdb_database_destroy(db);
|
||||
db = nullptr;
|
||||
|
||||
// Memory usage should go down now if the allocator is returning memory to the OS. It's expected that something is
|
||||
// externally monitoring the memory usage of this process during this sleep.
|
||||
using namespace std::chrono_literals;
|
||||
std::this_thread::sleep_for(10s);
|
||||
|
||||
fdb_check(fdb_stop_network());
|
||||
network_thread.join();
|
||||
}
|
|
@ -2,8 +2,8 @@
|
|||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <getopt.h>
|
||||
#include <inttypes.h>
|
||||
#include <math.h>
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
@ -585,6 +585,7 @@ int64_t granule_start_load(const char* filename,
|
|||
int filenameLength,
|
||||
int64_t offset,
|
||||
int64_t length,
|
||||
int64_t fullFileLength,
|
||||
void* userContext) {
|
||||
FILE* fp;
|
||||
char full_fname[PATH_MAX];
|
||||
|
@ -616,7 +617,7 @@ int64_t granule_start_load(const char* filename,
|
|||
// don't seek if offset == 0
|
||||
if (offset && fseek(fp, offset, SEEK_SET)) {
|
||||
// if fseek was non-zero, it failed
|
||||
fprintf(stderr, "ERROR: BG could not seek to %lld in file %s\n", offset, full_fname);
|
||||
fprintf(stderr, "ERROR: BG could not seek to %" PRId64 " in file %s\n", offset, full_fname);
|
||||
fclose(fp);
|
||||
return -1;
|
||||
}
|
||||
|
@ -626,7 +627,7 @@ int64_t granule_start_load(const char* filename,
|
|||
fclose(fp);
|
||||
|
||||
if (readSize != length) {
|
||||
fprintf(stderr, "ERROR: BG could not read %lld bytes from file: %s\n", length, full_fname);
|
||||
fprintf(stderr, "ERROR: BG could not read %" PRId64 " bytes from file: %s\n", length, full_fname);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -637,7 +638,7 @@ int64_t granule_start_load(const char* filename,
|
|||
uint8_t* granule_get_load(int64_t loadId, void* userContext) {
|
||||
BGLocalFileContext* context = (BGLocalFileContext*)userContext;
|
||||
if (context->data_by_id[loadId] == 0) {
|
||||
fprintf(stderr, "ERROR: BG loadId invalid for get_load: %lld\n", loadId);
|
||||
fprintf(stderr, "ERROR: BG loadId invalid for get_load: %" PRId64 "\n", loadId);
|
||||
return 0;
|
||||
}
|
||||
return context->data_by_id[loadId];
|
||||
|
@ -646,7 +647,7 @@ uint8_t* granule_get_load(int64_t loadId, void* userContext) {
|
|||
void granule_free_load(int64_t loadId, void* userContext) {
|
||||
BGLocalFileContext* context = (BGLocalFileContext*)userContext;
|
||||
if (context->data_by_id[loadId] == 0) {
|
||||
fprintf(stderr, "ERROR: BG loadId invalid for free_load: %lld\n", loadId);
|
||||
fprintf(stderr, "ERROR: BG loadId invalid for free_load: %" PRId64 "\n", loadId);
|
||||
}
|
||||
free(context->data_by_id[loadId]);
|
||||
context->data_by_id[loadId] = 0;
|
||||
|
@ -682,6 +683,7 @@ int run_op_read_blob_granules(FDBTransaction* transaction,
|
|||
granuleContext.get_load_f = &granule_get_load;
|
||||
granuleContext.free_load_f = &granule_free_load;
|
||||
granuleContext.debugNoMaterialize = !doMaterialize;
|
||||
granuleContext.granuleParallelism = 2; // TODO make knob or setting for changing this?
|
||||
|
||||
r = fdb_transaction_read_blob_granules(transaction,
|
||||
(uint8_t*)keystr,
|
||||
|
@ -689,7 +691,7 @@ int run_op_read_blob_granules(FDBTransaction* transaction,
|
|||
(uint8_t*)keystr2,
|
||||
strlen(keystr2),
|
||||
0 /* beginVersion*/,
|
||||
-1, /* endVersion. -1 is use txn read version */
|
||||
-2, /* endVersion. -2 (latestVersion) is use txn read version */
|
||||
granuleContext);
|
||||
|
||||
free(fileContext.data_by_id);
|
||||
|
@ -1120,7 +1122,7 @@ int run_workload(FDBTransaction* transaction,
|
|||
if (tracetimer == dotrace) {
|
||||
fdb_error_t err;
|
||||
tracetimer = 0;
|
||||
snprintf(traceid, 32, "makotrace%019lld", total_xacts);
|
||||
snprintf(traceid, 32, "makotrace%019" PRId64, total_xacts);
|
||||
fprintf(debugme, "DEBUG: txn tracing %s\n", traceid);
|
||||
err = fdb_transaction_set_option(transaction,
|
||||
FDB_TR_OPTION_DEBUG_TRANSACTION_IDENTIFIER,
|
||||
|
@ -1284,7 +1286,7 @@ void* worker_thread(void* thread_args) {
|
|||
}
|
||||
|
||||
fprintf(debugme,
|
||||
"DEBUG: worker_id:%d (%d) thread_id:%d (%d) database_index:%lu (tid:%llu)\n",
|
||||
"DEBUG: worker_id:%d (%d) thread_id:%d (%d) database_index:%lu (tid:%" PRIu64 ")\n",
|
||||
worker_id,
|
||||
args->num_processes,
|
||||
thread_id,
|
||||
|
@ -1351,7 +1353,7 @@ void* worker_thread(void* thread_args) {
|
|||
char str2[1000];
|
||||
sprintf(str2, "%s%d", TEMP_DATA_STORE, *parent_id);
|
||||
rc = mkdir(str2, S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
|
||||
if (rc < 0) {
|
||||
if (rc < 0 && errno != EEXIST) {
|
||||
int ec = errno;
|
||||
fprintf(stderr, "Failed to make directory: %s because %s\n", str2, strerror(ec));
|
||||
goto failExit;
|
||||
|
@ -2251,9 +2253,9 @@ void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, s
|
|||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0) {
|
||||
uint64_t ops_total_diff = ops_total[op] - ops_total_prev[op];
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", ops_total_diff);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", ops_total_diff);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %llu,", get_ops_name(op), ops_total_diff);
|
||||
fprintf(fp, "\"%s\": %" PRIu64 ",", get_ops_name(op), ops_total_diff);
|
||||
}
|
||||
errors_diff[op] = errors_total[op] - errors_total_prev[op];
|
||||
print_err = (errors_diff[op] > 0);
|
||||
|
@ -2281,7 +2283,7 @@ void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, s
|
|||
printf("%" STR(STATS_TITLE_WIDTH) "s ", "Errors");
|
||||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", errors_diff[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", errors_diff[op]);
|
||||
if (fp) {
|
||||
fprintf(fp, ",\"errors\": %.2f", conflicts_diff);
|
||||
}
|
||||
|
@ -2430,10 +2432,10 @@ void print_report(mako_args_t* args,
|
|||
break;
|
||||
}
|
||||
}
|
||||
printf("Total Xacts: %8llu\n", totalxacts);
|
||||
printf("Total Conflicts: %8llu\n", conflicts);
|
||||
printf("Total Errors: %8llu\n", totalerrors);
|
||||
printf("Overall TPS: %8llu\n\n", totalxacts * 1000000000 / duration_nsec);
|
||||
printf("Total Xacts: %8" PRIu64 "\n", totalxacts);
|
||||
printf("Total Conflicts: %8" PRIu64 "\n", conflicts);
|
||||
printf("Total Errors: %8" PRIu64 "\n", totalerrors);
|
||||
printf("Overall TPS: %8" PRIu64 "\n\n", totalxacts * 1000000000 / duration_nsec);
|
||||
|
||||
if (fp) {
|
||||
fprintf(fp, "\"results\": {");
|
||||
|
@ -2441,10 +2443,10 @@ void print_report(mako_args_t* args,
|
|||
fprintf(fp, "\"totalProcesses\": %d,", args->num_processes);
|
||||
fprintf(fp, "\"totalThreads\": %d,", args->num_threads);
|
||||
fprintf(fp, "\"targetTPS\": %d,", args->tpsmax);
|
||||
fprintf(fp, "\"totalXacts\": %llu,", totalxacts);
|
||||
fprintf(fp, "\"totalConflicts\": %llu,", conflicts);
|
||||
fprintf(fp, "\"totalErrors\": %llu,", totalerrors);
|
||||
fprintf(fp, "\"overallTPS\": %llu,", totalxacts * 1000000000 / duration_nsec);
|
||||
fprintf(fp, "\"totalXacts\": %" PRIu64 ",", totalxacts);
|
||||
fprintf(fp, "\"totalConflicts\": %" PRIu64 ",", conflicts);
|
||||
fprintf(fp, "\"totalErrors\": %" PRIu64 ",", totalerrors);
|
||||
fprintf(fp, "\"overallTPS\": %" PRIu64 ",", totalxacts * 1000000000 / duration_nsec);
|
||||
}
|
||||
|
||||
/* per-op stats */
|
||||
|
@ -2457,14 +2459,14 @@ void print_report(mako_args_t* args,
|
|||
}
|
||||
for (op = 0; op < MAX_OP; op++) {
|
||||
if ((args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) || op == OP_COMMIT) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", ops_total[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", ops_total[op]);
|
||||
if (fp) {
|
||||
if (first_op) {
|
||||
first_op = 0;
|
||||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), ops_total[op]);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), ops_total[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2486,14 +2488,14 @@ void print_report(mako_args_t* args,
|
|||
first_op = 1;
|
||||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", errors_total[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", errors_total[op]);
|
||||
if (fp) {
|
||||
if (first_op) {
|
||||
first_op = 0;
|
||||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), errors_total[op]);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), errors_total[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2511,7 +2513,7 @@ void print_report(mako_args_t* args,
|
|||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
|
||||
if (lat_total[op]) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", lat_samples[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", lat_samples[op]);
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
}
|
||||
|
@ -2521,7 +2523,7 @@ void print_report(mako_args_t* args,
|
|||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), lat_samples[op]);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), lat_samples[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2538,14 +2540,14 @@ void print_report(mako_args_t* args,
|
|||
if (lat_min[op] == -1) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", lat_min[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", lat_min[op]);
|
||||
if (fp) {
|
||||
if (first_op) {
|
||||
first_op = 0;
|
||||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), lat_min[op]);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), lat_min[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2561,14 +2563,14 @@ void print_report(mako_args_t* args,
|
|||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
|
||||
if (lat_total[op]) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", lat_total[op] / lat_samples[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", lat_total[op] / lat_samples[op]);
|
||||
if (fp) {
|
||||
if (first_op) {
|
||||
first_op = 0;
|
||||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), lat_total[op] / lat_samples[op]);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), lat_total[op] / lat_samples[op]);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
|
@ -2588,14 +2590,14 @@ void print_report(mako_args_t* args,
|
|||
if (lat_max[op] == 0) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", lat_max[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", lat_max[op]);
|
||||
if (fp) {
|
||||
if (first_op) {
|
||||
first_op = 0;
|
||||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), lat_max[op]);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), lat_max[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2646,14 +2648,14 @@ void print_report(mako_args_t* args,
|
|||
} else {
|
||||
median = (dataPoints[op][num_points[op] / 2] + dataPoints[op][num_points[op] / 2 - 1]) >> 1;
|
||||
}
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", median);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", median);
|
||||
if (fp) {
|
||||
if (first_op) {
|
||||
first_op = 0;
|
||||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), median);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), median);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
|
@ -2676,14 +2678,14 @@ void print_report(mako_args_t* args,
|
|||
}
|
||||
if (lat_total[op]) {
|
||||
point_95pct = ((float)(num_points[op]) * 0.95) - 1;
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", dataPoints[op][point_95pct]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", dataPoints[op][point_95pct]);
|
||||
if (fp) {
|
||||
if (first_op) {
|
||||
first_op = 0;
|
||||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), dataPoints[op][point_95pct]);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), dataPoints[op][point_95pct]);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
|
@ -2706,14 +2708,14 @@ void print_report(mako_args_t* args,
|
|||
}
|
||||
if (lat_total[op]) {
|
||||
point_99pct = ((float)(num_points[op]) * 0.99) - 1;
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", dataPoints[op][point_99pct]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", dataPoints[op][point_99pct]);
|
||||
if (fp) {
|
||||
if (first_op) {
|
||||
first_op = 0;
|
||||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), dataPoints[op][point_99pct]);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), dataPoints[op][point_99pct]);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
|
@ -2736,14 +2738,14 @@ void print_report(mako_args_t* args,
|
|||
}
|
||||
if (lat_total[op]) {
|
||||
point_99_9pct = ((float)(num_points[op]) * 0.999) - 1;
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "llu ", dataPoints[op][point_99_9pct]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) PRIu64 " ", dataPoints[op][point_99_9pct]);
|
||||
if (fp) {
|
||||
if (first_op) {
|
||||
first_op = 0;
|
||||
} else {
|
||||
fprintf(fp, ",");
|
||||
}
|
||||
fprintf(fp, "\"%s\": %llu", get_ops_name(op), dataPoints[op][point_99_9pct]);
|
||||
fprintf(fp, "\"%s\": %" PRIu64, get_ops_name(op), dataPoints[op][point_99_9pct]);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
|
|
|
@ -149,7 +149,7 @@ Format
|
|||
------
|
||||
| One operation type is defined as ``<Type><Count>`` or ``<Type><Count>:<Range>``.
|
||||
| When Count is omitted, it's equivalent to setting it to 1. (e.g. ``g`` is equivalent to ``g1``)
|
||||
| Multiple operation types within the same trancaction can be concatenated. (e.g. ``g9u1`` = 9 GETs and 1 update)
|
||||
| Multiple operation types within the same transaction can be concatenated. (e.g. ``g9u1`` = 9 GETs and 1 update)
|
||||
|
||||
Transaction Specification Examples
|
||||
----------------------------------
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -67,25 +67,25 @@ void runTests(struct ResultSet* rs) {
|
|||
fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE);
|
||||
e = getSize(rs, tr, sizes + i);
|
||||
checkError(e, "transaction get size", rs);
|
||||
printf("size %d: %lld\n", i, sizes[i]);
|
||||
printf("size %d: %" PRId64 "\n", i, sizes[i]);
|
||||
i++;
|
||||
|
||||
fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE);
|
||||
e = getSize(rs, tr, sizes + i);
|
||||
checkError(e, "transaction get size", rs);
|
||||
printf("size %d: %lld\n", i, sizes[i]);
|
||||
printf("size %d: %" PRId64 "\n", i, sizes[i]);
|
||||
i++;
|
||||
|
||||
fdb_transaction_clear(tr, keys[i], KEY_SIZE);
|
||||
e = getSize(rs, tr, sizes + i);
|
||||
checkError(e, "transaction get size", rs);
|
||||
printf("size %d: %lld\n", i, sizes[i]);
|
||||
printf("size %d: %" PRId64 "\n", i, sizes[i]);
|
||||
i++;
|
||||
|
||||
fdb_transaction_clear_range(tr, keys[i], KEY_SIZE, keys[i + 1], KEY_SIZE);
|
||||
e = getSize(rs, tr, sizes + i);
|
||||
checkError(e, "transaction get size", rs);
|
||||
printf("size %d: %lld\n", i, sizes[i]);
|
||||
printf("size %d: %" PRId64 "\n", i, sizes[i]);
|
||||
i++;
|
||||
|
||||
for (j = 0; j + 1 < i; j++) {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -90,6 +90,14 @@ void Future::cancel() {
|
|||
return fdb_future_get_keyvalue_array(future_, out_kv, out_count, out_more);
|
||||
}
|
||||
|
||||
// MappedKeyValueArrayFuture
|
||||
|
||||
[[nodiscard]] fdb_error_t MappedKeyValueArrayFuture::get(const FDBMappedKeyValue** out_kv,
|
||||
int* out_count,
|
||||
fdb_bool_t* out_more) {
|
||||
return fdb_future_get_mappedkeyvalue_array(future_, out_kv, out_count, out_more);
|
||||
}
|
||||
|
||||
// Result
|
||||
|
||||
Result::~Result() {
|
||||
|
@ -122,8 +130,15 @@ EmptyFuture Database::create_snapshot(FDBDatabase* db,
|
|||
return EmptyFuture(fdb_database_create_snapshot(db, uid, uid_length, snap_command, snap_command_length));
|
||||
}
|
||||
|
||||
// Transaction
|
||||
// Tenant
|
||||
Tenant::Tenant(FDBDatabase* db, const uint8_t* name, int name_length) {
|
||||
if (fdb_error_t err = fdb_database_open_tenant(db, name, name_length, &tenant)) {
|
||||
std::cerr << fdb_get_error(err) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
// Transaction
|
||||
Transaction::Transaction(FDBDatabase* db) {
|
||||
if (fdb_error_t err = fdb_database_create_transaction(db, &tr_)) {
|
||||
std::cerr << fdb_get_error(err) << std::endl;
|
||||
|
@ -131,6 +146,13 @@ Transaction::Transaction(FDBDatabase* db) {
|
|||
}
|
||||
}
|
||||
|
||||
Transaction::Transaction(Tenant tenant) {
|
||||
if (fdb_error_t err = fdb_tenant_create_transaction(tenant.tenant, &tr_)) {
|
||||
std::cerr << fdb_get_error(err) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::~Transaction() {
|
||||
fdb_transaction_destroy(tr_);
|
||||
}
|
||||
|
@ -210,7 +232,7 @@ KeyValueArrayFuture Transaction::get_range(const uint8_t* begin_key_name,
|
|||
reverse));
|
||||
}
|
||||
|
||||
KeyValueArrayFuture Transaction::get_range_and_flat_map(const uint8_t* begin_key_name,
|
||||
MappedKeyValueArrayFuture Transaction::get_mapped_range(const uint8_t* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
|
@ -226,7 +248,7 @@ KeyValueArrayFuture Transaction::get_range_and_flat_map(const uint8_t* begin_key
|
|||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
return KeyValueArrayFuture(fdb_transaction_get_range_and_flat_map(tr_,
|
||||
return MappedKeyValueArrayFuture(fdb_transaction_get_mapped_range(tr_,
|
||||
begin_key_name,
|
||||
begin_key_name_length,
|
||||
begin_or_equal,
|
||||
|
|
|
@ -135,6 +135,18 @@ private:
|
|||
KeyValueArrayFuture(FDBFuture* f) : Future(f) {}
|
||||
};
|
||||
|
||||
class MappedKeyValueArrayFuture : public Future {
|
||||
public:
|
||||
// Call this function instead of fdb_future_get_mappedkeyvalue_array when using
|
||||
// the MappedKeyValueArrayFuture type. Its behavior is identical to
|
||||
// fdb_future_get_mappedkeyvalue_array.
|
||||
fdb_error_t get(const FDBMappedKeyValue** out_kv, int* out_count, fdb_bool_t* out_more);
|
||||
|
||||
private:
|
||||
friend class Transaction;
|
||||
MappedKeyValueArrayFuture(FDBFuture* f) : Future(f) {}
|
||||
};
|
||||
|
||||
class KeyRangeArrayFuture : public Future {
|
||||
public:
|
||||
// Call this function instead of fdb_future_get_keyrange_array when using
|
||||
|
@ -191,6 +203,15 @@ public:
|
|||
int snap_command_length);
|
||||
};
|
||||
|
||||
class Tenant final {
|
||||
public:
|
||||
Tenant(FDBDatabase* db, const uint8_t* name, int name_length);
|
||||
|
||||
private:
|
||||
friend class Transaction;
|
||||
FDBTenant* tenant;
|
||||
};
|
||||
|
||||
// Wrapper around FDBTransaction, providing the same set of calls as the C API.
|
||||
// Handles cleanup of memory, removing the need to call
|
||||
// fdb_transaction_destroy.
|
||||
|
@ -198,6 +219,7 @@ class Transaction final {
|
|||
public:
|
||||
// Given an FDBDatabase, initializes a new transaction.
|
||||
Transaction(FDBDatabase* db);
|
||||
Transaction(Tenant tenant);
|
||||
~Transaction();
|
||||
|
||||
// Wrapper around fdb_transaction_reset.
|
||||
|
@ -254,7 +276,7 @@ public:
|
|||
|
||||
// WARNING: This feature is considered experimental at this time. It is only allowed when using snapshot isolation
|
||||
// AND disabling read-your-writes. Returns a future which will be set to an FDBKeyValue array.
|
||||
KeyValueArrayFuture get_range_and_flat_map(const uint8_t* begin_key_name,
|
||||
MappedKeyValueArrayFuture get_mapped_range(const uint8_t* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -6,7 +6,7 @@ ExternalProject_Add(
|
|||
doctest
|
||||
PREFIX ${CMAKE_BINARY_DIR}/doctest
|
||||
GIT_REPOSITORY https://github.com/onqtam/doctest.git
|
||||
GIT_TAG 1c8da00c978c19e00a434b2b1f854fcffc9fba35 # v2.4.0
|
||||
GIT_TAG 8424be522357e68d8c6178375546bb0cf9d5f6b3 # v2.4.1
|
||||
TIMEOUT 10
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -173,6 +173,20 @@ struct GetRangeResult {
|
|||
fdb_error_t err;
|
||||
};
|
||||
|
||||
struct GetMappedRangeResult {
|
||||
std::vector<std::tuple<std::string, // key
|
||||
std::string, // value
|
||||
std::string, // begin
|
||||
std::string, // end
|
||||
std::vector<std::pair<std::string, std::string>> // range results
|
||||
>>
|
||||
mkvs;
|
||||
// True if values remain in the key range requested.
|
||||
bool more;
|
||||
// Set to a non-zero value if an error occurred during the transaction.
|
||||
fdb_error_t err;
|
||||
};
|
||||
|
||||
// Helper function to get a range of kv pairs. Returns a GetRangeResult struct
|
||||
// containing the results of the range read. Caller is responsible for checking
|
||||
// error on failure and retrying if necessary.
|
||||
|
@ -225,7 +239,11 @@ GetRangeResult get_range(fdb::Transaction& tr,
|
|||
return GetRangeResult{ results, out_more != 0, 0 };
|
||||
}
|
||||
|
||||
GetRangeResult get_range_and_flat_map(fdb::Transaction& tr,
|
||||
static inline std::string extractString(FDBKey key) {
|
||||
return std::string((const char*)key.key, key.key_length);
|
||||
}
|
||||
|
||||
GetMappedRangeResult get_mapped_range(fdb::Transaction& tr,
|
||||
const uint8_t* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
|
@ -242,7 +260,7 @@ GetRangeResult get_range_and_flat_map(fdb::Transaction& tr,
|
|||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
fdb::KeyValueArrayFuture f1 = tr.get_range_and_flat_map(begin_key_name,
|
||||
fdb::MappedKeyValueArrayFuture f1 = tr.get_mapped_range(begin_key_name,
|
||||
begin_key_name_length,
|
||||
begin_or_equal,
|
||||
begin_offset,
|
||||
|
@ -261,21 +279,41 @@ GetRangeResult get_range_and_flat_map(fdb::Transaction& tr,
|
|||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err) {
|
||||
return GetRangeResult{ {}, false, err };
|
||||
return GetMappedRangeResult{ {}, false, err };
|
||||
}
|
||||
|
||||
const FDBKeyValue* out_kv;
|
||||
const FDBMappedKeyValue* out_mkv;
|
||||
int out_count;
|
||||
fdb_bool_t out_more;
|
||||
fdb_check(f1.get(&out_kv, &out_count, &out_more));
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> results;
|
||||
fdb_check(f1.get(&out_mkv, &out_count, &out_more));
|
||||
|
||||
GetMappedRangeResult result;
|
||||
result.more = (out_more != 0);
|
||||
result.err = 0;
|
||||
|
||||
// std::cout << "out_count:" << out_count << " out_more:" << out_more << " out_mkv:" << (void*)out_mkv <<
|
||||
// std::endl;
|
||||
|
||||
for (int i = 0; i < out_count; ++i) {
|
||||
std::string key((const char*)out_kv[i].key, out_kv[i].key_length);
|
||||
std::string value((const char*)out_kv[i].value, out_kv[i].value_length);
|
||||
results.emplace_back(key, value);
|
||||
FDBMappedKeyValue mkv = out_mkv[i];
|
||||
auto key = extractString(mkv.key);
|
||||
auto value = extractString(mkv.value);
|
||||
auto begin = extractString(mkv.getRange.begin.key);
|
||||
auto end = extractString(mkv.getRange.end.key);
|
||||
// std::cout << "key:" << key << " value:" << value << " begin:" << begin << " end:" << end << std::endl;
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> range_results;
|
||||
for (int i = 0; i < mkv.getRange.m_size; ++i) {
|
||||
const auto& kv = mkv.getRange.data[i];
|
||||
std::string k((const char*)kv.key, kv.key_length);
|
||||
std::string v((const char*)kv.value, kv.value_length);
|
||||
range_results.emplace_back(k, v);
|
||||
// std::cout << "[" << i << "]" << k << " -> " << v << std::endl;
|
||||
}
|
||||
result.mkvs.emplace_back(key, value, begin, end, range_results);
|
||||
}
|
||||
return GetRangeResult{ results, out_more != 0, 0 };
|
||||
return result;
|
||||
}
|
||||
|
||||
// Clears all data in the database.
|
||||
|
@ -888,32 +926,35 @@ static Value dataOfRecord(const int i) {
|
|||
static std::string indexEntryKey(const int i) {
|
||||
return Tuple().append(StringRef(prefix)).append(INDEX).append(indexKey(i)).append(primaryKey(i)).pack().toString();
|
||||
}
|
||||
static std::string recordKey(const int i) {
|
||||
return Tuple().append(prefix).append(RECORD).append(primaryKey(i)).pack().toString();
|
||||
static std::string recordKey(const int i, const int split) {
|
||||
return Tuple().append(prefix).append(RECORD).append(primaryKey(i)).append(split).pack().toString();
|
||||
}
|
||||
static std::string recordValue(const int i) {
|
||||
return Tuple().append(dataOfRecord(i)).pack().toString();
|
||||
static std::string recordValue(const int i, const int split) {
|
||||
return Tuple().append(dataOfRecord(i)).append(split).pack().toString();
|
||||
}
|
||||
|
||||
const static int SPLIT_SIZE = 3;
|
||||
std::map<std::string, std::string> fillInRecords(int n) {
|
||||
// Note: The user requested `prefix` should be added as the first element of the tuple that forms the key, rather
|
||||
// than the prefix of the key. So we don't use key() or create_data() in this test.
|
||||
std::map<std::string, std::string> data;
|
||||
for (int i = 0; i < n; i++) {
|
||||
data[indexEntryKey(i)] = EMPTY;
|
||||
data[recordKey(i)] = recordValue(i);
|
||||
for (int split = 0; split < SPLIT_SIZE; split++) {
|
||||
data[recordKey(i, split)] = recordValue(i, split);
|
||||
}
|
||||
}
|
||||
insert_data(db, data);
|
||||
return data;
|
||||
}
|
||||
|
||||
GetRangeResult getIndexEntriesAndMap(int beginId, int endId, fdb::Transaction& tr) {
|
||||
GetMappedRangeResult getMappedIndexEntries(int beginId, int endId, fdb::Transaction& tr) {
|
||||
std::string indexEntryKeyBegin = indexEntryKey(beginId);
|
||||
std::string indexEntryKeyEnd = indexEntryKey(endId);
|
||||
|
||||
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).pack().toString();
|
||||
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).append("{...}"_sr).pack().toString();
|
||||
|
||||
return get_range_and_flat_map(
|
||||
return get_mapped_range(
|
||||
tr,
|
||||
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL((const uint8_t*)indexEntryKeyBegin.c_str(), indexEntryKeyBegin.size()),
|
||||
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL((const uint8_t*)indexEntryKeyEnd.c_str(), indexEntryKeyEnd.size()),
|
||||
|
@ -923,20 +964,20 @@ GetRangeResult getIndexEntriesAndMap(int beginId, int endId, fdb::Transaction& t
|
|||
/* target_bytes */ 0,
|
||||
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
|
||||
/* iteration */ 0,
|
||||
/* snapshot */ true,
|
||||
/* snapshot */ false,
|
||||
/* reverse */ 0);
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_range_and_flat_map") {
|
||||
fillInRecords(20);
|
||||
TEST_CASE("fdb_transaction_get_mapped_range") {
|
||||
const int TOTAL_RECORDS = 20;
|
||||
fillInRecords(TOTAL_RECORDS);
|
||||
|
||||
fdb::Transaction tr(db);
|
||||
// get_range_and_flat_map is only support without RYW. This is a must!!!
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
|
||||
// RYW should be enabled.
|
||||
while (1) {
|
||||
int beginId = 1;
|
||||
int endId = 19;
|
||||
auto result = getIndexEntriesAndMap(beginId, endId, tr);
|
||||
auto result = getMappedIndexEntries(beginId, endId, tr);
|
||||
|
||||
if (result.err) {
|
||||
fdb::EmptyFuture f1 = tr.on_error(result.err);
|
||||
|
@ -945,32 +986,30 @@ TEST_CASE("fdb_transaction_get_range_and_flat_map") {
|
|||
}
|
||||
|
||||
int expectSize = endId - beginId;
|
||||
CHECK(result.kvs.size() == expectSize);
|
||||
CHECK(result.mkvs.size() == expectSize);
|
||||
CHECK(!result.more);
|
||||
|
||||
int id = beginId;
|
||||
for (int i = 0; i < result.kvs.size(); i++, id++) {
|
||||
const auto& [key, value] = result.kvs[i];
|
||||
CHECK(recordKey(id).compare(key) == 0);
|
||||
CHECK(recordValue(id).compare(value) == 0);
|
||||
for (int i = 0; i < expectSize; i++, id++) {
|
||||
const auto& [key, value, begin, end, range_results] = result.mkvs[i];
|
||||
CHECK(indexEntryKey(id).compare(key) == 0);
|
||||
CHECK(EMPTY.compare(value) == 0);
|
||||
CHECK(range_results.size() == SPLIT_SIZE);
|
||||
for (int split = 0; split < SPLIT_SIZE; split++) {
|
||||
auto& [k, v] = range_results[split];
|
||||
CHECK(recordKey(id, split).compare(k) == 0);
|
||||
CHECK(recordValue(id, split).compare(v) == 0);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_range_and_flat_map get_key_values_and_map_has_more") {
|
||||
fillInRecords(2000);
|
||||
fdb::Transaction tr(db);
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
|
||||
auto result = getIndexEntriesAndMap(100, 1900, tr);
|
||||
CHECK(result.err == error_code_get_key_values_and_map_has_more);
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_range_and_flat_map_restricted_to_snapshot") {
|
||||
TEST_CASE("fdb_transaction_get_mapped_range_restricted_to_serializable") {
|
||||
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).pack().toString();
|
||||
fdb::Transaction tr(db);
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
|
||||
auto result = get_range_and_flat_map(
|
||||
auto result = get_mapped_range(
|
||||
tr,
|
||||
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL((const uint8_t*)indexEntryKey(0).c_str(), indexEntryKey(0).size()),
|
||||
FDB_KEYSEL_FIRST_GREATER_THAN((const uint8_t*)indexEntryKey(1).c_str(), indexEntryKey(1).size()),
|
||||
|
@ -980,16 +1019,16 @@ TEST_CASE("fdb_transaction_get_range_and_flat_map_restricted_to_snapshot") {
|
|||
/* target_bytes */ 0,
|
||||
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
|
||||
/* iteration */ 0,
|
||||
/* snapshot */ false, // Set snapshot to false
|
||||
/* snapshot */ true, // Set snapshot to true
|
||||
/* reverse */ 0);
|
||||
ASSERT(result.err == error_code_client_invalid_operation);
|
||||
ASSERT(result.err == error_code_unsupported_operation);
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_range_and_flat_map_restricted_to_ryw_disable") {
|
||||
TEST_CASE("fdb_transaction_get_mapped_range_restricted_to_ryw_enable") {
|
||||
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).pack().toString();
|
||||
fdb::Transaction tr(db);
|
||||
// Not set FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE.
|
||||
auto result = get_range_and_flat_map(
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0)); // Not disable RYW
|
||||
auto result = get_mapped_range(
|
||||
tr,
|
||||
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL((const uint8_t*)indexEntryKey(0).c_str(), indexEntryKey(0).size()),
|
||||
FDB_KEYSEL_FIRST_GREATER_THAN((const uint8_t*)indexEntryKey(1).c_str(), indexEntryKey(1).size()),
|
||||
|
@ -1001,7 +1040,7 @@ TEST_CASE("fdb_transaction_get_range_and_flat_map_restricted_to_ryw_disable") {
|
|||
/* iteration */ 0,
|
||||
/* snapshot */ true,
|
||||
/* reverse */ 0);
|
||||
ASSERT(result.err == error_code_client_invalid_operation);
|
||||
ASSERT(result.err == error_code_unsupported_operation);
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_range reverse") {
|
||||
|
@ -2358,6 +2397,114 @@ TEST_CASE("commit_does_not_reset") {
|
|||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Fast alloc thread cleanup") {
|
||||
// Try to cause an OOM if thread cleanup doesn't work
|
||||
for (int i = 0; i < 50000; ++i) {
|
||||
auto thread = std::thread([]() {
|
||||
fdb::Transaction tr(db);
|
||||
for (int s = 0; s < 11; ++s) {
|
||||
tr.set(key("foo"), std::string(8 << s, '\x00'));
|
||||
}
|
||||
});
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Tenant create, access, and delete") {
|
||||
std::string tenantName = "tenant";
|
||||
std::string testKey = "foo";
|
||||
std::string testValue = "bar";
|
||||
|
||||
fdb::Transaction tr(db);
|
||||
while (1) {
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
|
||||
tr.set("\xff\xff/management/tenant_map/" + tenantName, "");
|
||||
fdb::EmptyFuture commitFuture = tr.commit();
|
||||
fdb_error_t err = wait_future(commitFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f = tr.on_error(err);
|
||||
fdb_check(wait_future(f));
|
||||
continue;
|
||||
}
|
||||
tr.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
fdb::Tenant tenant(db, reinterpret_cast<const uint8_t*>(tenantName.c_str()), tenantName.size());
|
||||
fdb::Transaction tr2(tenant);
|
||||
|
||||
while (1) {
|
||||
tr2.set(testKey, testValue);
|
||||
fdb::EmptyFuture commitFuture = tr2.commit();
|
||||
fdb_error_t err = wait_future(commitFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f = tr2.on_error(err);
|
||||
fdb_check(wait_future(f));
|
||||
continue;
|
||||
}
|
||||
tr2.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
fdb::ValueFuture f1 = tr2.get(testKey, false);
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
int out_present;
|
||||
char* val;
|
||||
int vallen;
|
||||
fdb_check(f1.get(&out_present, (const uint8_t**)&val, &vallen));
|
||||
CHECK(out_present == 1);
|
||||
CHECK(vallen == testValue.size());
|
||||
CHECK(testValue == val);
|
||||
|
||||
tr2.clear(testKey);
|
||||
fdb::EmptyFuture commitFuture = tr2.commit();
|
||||
err = wait_future(commitFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f = tr2.on_error(err);
|
||||
fdb_check(wait_future(f));
|
||||
continue;
|
||||
}
|
||||
|
||||
tr2.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
|
||||
tr.clear("\xff\xff/management/tenant_map/" + tenantName);
|
||||
fdb::EmptyFuture commitFuture = tr.commit();
|
||||
fdb_error_t err = wait_future(commitFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f = tr.on_error(err);
|
||||
fdb_check(wait_future(f));
|
||||
continue;
|
||||
}
|
||||
tr.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
fdb::ValueFuture f1 = tr2.get(testKey, false);
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err == error_code_tenant_not_found) {
|
||||
tr2.reset();
|
||||
break;
|
||||
}
|
||||
if (err) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc < 3) {
|
||||
std::cout << "Unit tests for the FoundationDB C API.\n"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
/*
|
||||
* workloads.h
|
||||
* SimpleWorkload.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
/*
|
||||
* workloads.h
|
||||
* workloads.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -18,6 +18,7 @@ set(SRCS
|
|||
|
||||
add_flow_target(STATIC_LIBRARY NAME fdb_flow SRCS ${SRCS})
|
||||
target_link_libraries(fdb_flow PUBLIC fdb_c)
|
||||
target_link_libraries(fdb_flow PUBLIC fdbclient)
|
||||
target_include_directories(fdb_flow PUBLIC
|
||||
"${CMAKE_CURRENT_BINARY_DIR}"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -161,7 +161,7 @@ struct RangeResultRef : VectorRef<KeyValueRef> {
|
|||
// False implies that no such values remain
|
||||
Optional<KeyRef> readThrough; // Only present when 'more' is true. When present, this value represent the end (or
|
||||
// beginning if reverse) of the range
|
||||
// which was read to produce these results. This is guarenteed to be less than the requested range.
|
||||
// which was read to produce these results. This is guaranteed to be less than the requested range.
|
||||
bool readToBegin;
|
||||
bool readThroughEnd;
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -24,7 +24,7 @@
|
|||
#include <stdio.h>
|
||||
#include <cinttypes>
|
||||
|
||||
#include "contrib/fmt-8.0.1/include/fmt/format.h"
|
||||
#include "contrib/fmt-8.1.1/include/fmt/format.h"
|
||||
#include "flow/DeterministicRandom.h"
|
||||
#include "flow/SystemMonitor.h"
|
||||
#include "flow/TLSConfig.actor.h"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -448,16 +448,21 @@ func (o TransactionOptions) SetInitializeNewDatabase() error {
|
|||
return o.setOpt(300, nil)
|
||||
}
|
||||
|
||||
// Allows this transaction to read and modify system keys (those that start with the byte 0xFF)
|
||||
// Allows this transaction to read and modify system keys (those that start with the byte 0xFF). Implies raw_access.
|
||||
func (o TransactionOptions) SetAccessSystemKeys() error {
|
||||
return o.setOpt(301, nil)
|
||||
}
|
||||
|
||||
// Allows this transaction to read system keys (those that start with the byte 0xFF)
|
||||
// Allows this transaction to read system keys (those that start with the byte 0xFF). Implies raw_access.
|
||||
func (o TransactionOptions) SetReadSystemKeys() error {
|
||||
return o.setOpt(302, nil)
|
||||
}
|
||||
|
||||
// Allows this transaction to access the raw key-space when tenant mode is on.
|
||||
func (o TransactionOptions) SetRawAccess() error {
|
||||
return o.setOpt(303, nil)
|
||||
}
|
||||
|
||||
// Not yet implemented.
|
||||
func (o TransactionOptions) SetDebugRetryLogging(param string) error {
|
||||
return o.setOpt(401, []byte(param))
|
||||
|
@ -598,6 +603,11 @@ func (o TransactionOptions) SetBypassUnreadable() error {
|
|||
return o.setOpt(1100, nil)
|
||||
}
|
||||
|
||||
// Allows this transaction to use cached GRV from the database context. Defaults to off. Upon first usage, starts a background updater to periodically update the cache to avoid stale read versions.
|
||||
func (o TransactionOptions) SetUseGrvCache() error {
|
||||
return o.setOpt(1101, nil)
|
||||
}
|
||||
|
||||
type StreamingMode int
|
||||
|
||||
const (
|
||||
|
|
|
@ -27,6 +27,8 @@ set(JAVA_BINDING_SRCS
|
|||
src/main/com/apple/foundationdb/directory/package-info.java
|
||||
src/main/com/apple/foundationdb/directory/PathUtil.java
|
||||
src/main/com/apple/foundationdb/DirectBufferIterator.java
|
||||
src/main/com/apple/foundationdb/RangeResultDirectBufferIterator.java
|
||||
src/main/com/apple/foundationdb/MappedRangeResultDirectBufferIterator.java
|
||||
src/main/com/apple/foundationdb/DirectBufferPool.java
|
||||
src/main/com/apple/foundationdb/FDB.java
|
||||
src/main/com/apple/foundationdb/FDBDatabase.java
|
||||
|
@ -36,11 +38,13 @@ set(JAVA_BINDING_SRCS
|
|||
src/main/com/apple/foundationdb/FutureKeyArray.java
|
||||
src/main/com/apple/foundationdb/FutureResult.java
|
||||
src/main/com/apple/foundationdb/FutureResults.java
|
||||
src/main/com/apple/foundationdb/FutureMappedResults.java
|
||||
src/main/com/apple/foundationdb/FutureStrings.java
|
||||
src/main/com/apple/foundationdb/FutureVoid.java
|
||||
src/main/com/apple/foundationdb/JNIUtil.java
|
||||
src/main/com/apple/foundationdb/KeySelector.java
|
||||
src/main/com/apple/foundationdb/KeyValue.java
|
||||
src/main/com/apple/foundationdb/MappedKeyValue.java
|
||||
src/main/com/apple/foundationdb/LocalityUtil.java
|
||||
src/main/com/apple/foundationdb/NativeFuture.java
|
||||
src/main/com/apple/foundationdb/NativeObjectWrapper.java
|
||||
|
@ -49,9 +53,12 @@ set(JAVA_BINDING_SRCS
|
|||
src/main/com/apple/foundationdb/package-info.java
|
||||
src/main/com/apple/foundationdb/Range.java
|
||||
src/main/com/apple/foundationdb/RangeQuery.java
|
||||
src/main/com/apple/foundationdb/MappedRangeQuery.java
|
||||
src/main/com/apple/foundationdb/KeyArrayResult.java
|
||||
src/main/com/apple/foundationdb/RangeResult.java
|
||||
src/main/com/apple/foundationdb/MappedRangeResult.java
|
||||
src/main/com/apple/foundationdb/RangeResultInfo.java
|
||||
src/main/com/apple/foundationdb/MappedRangeResultInfo.java
|
||||
src/main/com/apple/foundationdb/RangeResultSummary.java
|
||||
src/main/com/apple/foundationdb/ReadTransaction.java
|
||||
src/main/com/apple/foundationdb/ReadTransactionContext.java
|
||||
|
@ -154,6 +161,7 @@ endif()
|
|||
set_target_properties(java_workloads PROPERTIES
|
||||
LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/share/foundationdb")
|
||||
target_link_libraries(java_workloads PUBLIC fdb_c ${JNI_LIBRARIES})
|
||||
target_link_libraries(java_workloads PRIVATE flow) # mostly for boost
|
||||
target_include_directories(java_workloads PUBLIC ${JNI_INCLUDE_DIRS})
|
||||
|
||||
set(CMAKE_JAVA_COMPILE_FLAGS "-source" "1.8" "-target" "1.8" "-XDignore.symbol.file")
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -20,6 +20,7 @@
|
|||
|
||||
#include <jni.h>
|
||||
#include <string.h>
|
||||
#include <functional>
|
||||
|
||||
#include "com_apple_foundationdb_FDB.h"
|
||||
#include "com_apple_foundationdb_FDBDatabase.h"
|
||||
|
@ -50,10 +51,14 @@ static thread_local jmethodID g_IFutureCallback_call_methodID = JNI_NULL;
|
|||
static thread_local bool is_external = false;
|
||||
static jclass range_result_summary_class;
|
||||
static jclass range_result_class;
|
||||
static jclass mapped_range_result_class;
|
||||
static jclass mapped_key_value_class;
|
||||
static jclass string_class;
|
||||
static jclass key_array_result_class;
|
||||
static jmethodID key_array_result_init;
|
||||
static jmethodID range_result_init;
|
||||
static jmethodID mapped_range_result_init;
|
||||
static jmethodID mapped_key_value_from_bytes;
|
||||
static jmethodID range_result_summary_init;
|
||||
|
||||
void detachIfExternalThread(void* ignore) {
|
||||
|
@ -478,6 +483,127 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureResults_FutureResult
|
|||
return result;
|
||||
}
|
||||
|
||||
class ExecuteOnLeave {
|
||||
std::function<void()> func;
|
||||
|
||||
public:
|
||||
explicit ExecuteOnLeave(std::function<void()> func) : func(func) {}
|
||||
~ExecuteOnLeave() { func(); }
|
||||
};
|
||||
|
||||
void cpBytesAndLengthInner(uint8_t*& pByte, jint*& pLength, const uint8_t* data, const int& length) {
|
||||
*pLength = length;
|
||||
pLength++;
|
||||
|
||||
memcpy(pByte, data, length);
|
||||
pByte += length;
|
||||
}
|
||||
|
||||
void cpBytesAndLength(uint8_t*& pByte, jint*& pLength, const FDBKey& key) {
|
||||
cpBytesAndLengthInner(pByte, pLength, key.key, key.key_length);
|
||||
}
|
||||
|
||||
JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureMappedResults_FutureMappedResults_1get(JNIEnv* jenv,
|
||||
jobject,
|
||||
jlong future) {
|
||||
if (!future) {
|
||||
throwParamNotNull(jenv);
|
||||
return JNI_NULL;
|
||||
}
|
||||
|
||||
FDBFuture* f = (FDBFuture*)future;
|
||||
|
||||
const FDBMappedKeyValue* kvms;
|
||||
int count;
|
||||
fdb_bool_t more;
|
||||
fdb_error_t err = fdb_future_get_mappedkeyvalue_array(f, &kvms, &count, &more);
|
||||
if (err) {
|
||||
safeThrow(jenv, getThrowable(jenv, err));
|
||||
return JNI_NULL;
|
||||
}
|
||||
|
||||
jobjectArray mrr_values = jenv->NewObjectArray(count, mapped_key_value_class, NULL);
|
||||
if (!mrr_values) {
|
||||
if (!jenv->ExceptionOccurred())
|
||||
throwOutOfMem(jenv);
|
||||
return JNI_NULL;
|
||||
}
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
FDBMappedKeyValue kvm = kvms[i];
|
||||
int kvm_count = kvm.getRange.m_size;
|
||||
|
||||
const int totalLengths = 4 + kvm_count * 2;
|
||||
|
||||
int totalBytes = kvm.key.key_length + kvm.value.key_length + kvm.getRange.begin.key.key_length +
|
||||
kvm.getRange.end.key.key_length;
|
||||
for (int i = 0; i < kvm_count; i++) {
|
||||
auto kv = kvm.getRange.data[i];
|
||||
totalBytes += kv.key_length + kv.value_length;
|
||||
}
|
||||
|
||||
jbyteArray bytesArray = jenv->NewByteArray(totalBytes);
|
||||
if (!bytesArray) {
|
||||
if (!jenv->ExceptionOccurred())
|
||||
throwOutOfMem(jenv);
|
||||
return JNI_NULL;
|
||||
}
|
||||
|
||||
jintArray lengthArray = jenv->NewIntArray(totalLengths);
|
||||
if (!lengthArray) {
|
||||
if (!jenv->ExceptionOccurred())
|
||||
throwOutOfMem(jenv);
|
||||
return JNI_NULL;
|
||||
}
|
||||
|
||||
uint8_t* bytes_barr = (uint8_t*)jenv->GetByteArrayElements(bytesArray, JNI_NULL);
|
||||
if (!bytes_barr) {
|
||||
throwRuntimeEx(jenv, "Error getting handle to native resources");
|
||||
return JNI_NULL;
|
||||
}
|
||||
{
|
||||
ExecuteOnLeave e([&]() { jenv->ReleaseByteArrayElements(bytesArray, (jbyte*)bytes_barr, 0); });
|
||||
|
||||
jint* length_barr = jenv->GetIntArrayElements(lengthArray, JNI_NULL);
|
||||
if (!length_barr) {
|
||||
if (!jenv->ExceptionOccurred())
|
||||
throwOutOfMem(jenv);
|
||||
return JNI_NULL;
|
||||
}
|
||||
{
|
||||
ExecuteOnLeave e([&]() { jenv->ReleaseIntArrayElements(lengthArray, length_barr, 0); });
|
||||
|
||||
uint8_t* pByte = bytes_barr;
|
||||
jint* pLength = length_barr;
|
||||
|
||||
cpBytesAndLength(pByte, pLength, kvm.key);
|
||||
cpBytesAndLength(pByte, pLength, kvm.value);
|
||||
cpBytesAndLength(pByte, pLength, kvm.getRange.begin.key);
|
||||
cpBytesAndLength(pByte, pLength, kvm.getRange.end.key);
|
||||
for (int kvm_i = 0; kvm_i < kvm_count; kvm_i++) {
|
||||
auto kv = kvm.getRange.data[kvm_i];
|
||||
cpBytesAndLengthInner(pByte, pLength, kv.key, kv.key_length);
|
||||
cpBytesAndLengthInner(pByte, pLength, kv.value, kv.value_length);
|
||||
}
|
||||
}
|
||||
}
|
||||
// After native arrays are released
|
||||
jobject mkv = jenv->CallStaticObjectMethod(
|
||||
mapped_key_value_class, mapped_key_value_from_bytes, (jbyteArray)bytesArray, (jintArray)lengthArray);
|
||||
if (jenv->ExceptionOccurred())
|
||||
return JNI_NULL;
|
||||
jenv->SetObjectArrayElement(mrr_values, i, mkv);
|
||||
if (jenv->ExceptionOccurred())
|
||||
return JNI_NULL;
|
||||
}
|
||||
|
||||
jobject mrr = jenv->NewObject(mapped_range_result_class, mapped_range_result_init, mrr_values, (jboolean)more);
|
||||
if (jenv->ExceptionOccurred())
|
||||
return JNI_NULL;
|
||||
|
||||
return mrr;
|
||||
}
|
||||
|
||||
// SOMEDAY: explore doing this more efficiently with Direct ByteBuffers
|
||||
JNIEXPORT jbyteArray JNICALL Java_com_apple_foundationdb_FutureResult_FutureResult_1get(JNIEnv* jenv,
|
||||
jobject,
|
||||
|
@ -767,23 +893,22 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
|
|||
return (jlong)f;
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL
|
||||
Java_com_apple_foundationdb_FDBTransaction_Transaction_1getRangeAndFlatMap(JNIEnv* jenv,
|
||||
jobject,
|
||||
jlong tPtr,
|
||||
jbyteArray keyBeginBytes,
|
||||
jboolean orEqualBegin,
|
||||
jint offsetBegin,
|
||||
jbyteArray keyEndBytes,
|
||||
jboolean orEqualEnd,
|
||||
jint offsetEnd,
|
||||
jbyteArray mapperBytes,
|
||||
jint rowLimit,
|
||||
jint targetBytes,
|
||||
jint streamingMode,
|
||||
jint iteration,
|
||||
jboolean snapshot,
|
||||
jboolean reverse) {
|
||||
JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1getMappedRange(JNIEnv* jenv,
|
||||
jobject,
|
||||
jlong tPtr,
|
||||
jbyteArray keyBeginBytes,
|
||||
jboolean orEqualBegin,
|
||||
jint offsetBegin,
|
||||
jbyteArray keyEndBytes,
|
||||
jboolean orEqualEnd,
|
||||
jint offsetEnd,
|
||||
jbyteArray mapperBytes,
|
||||
jint rowLimit,
|
||||
jint targetBytes,
|
||||
jint streamingMode,
|
||||
jint iteration,
|
||||
jboolean snapshot,
|
||||
jboolean reverse) {
|
||||
if (!tPtr || !keyBeginBytes || !keyEndBytes || !mapperBytes) {
|
||||
throwParamNotNull(jenv);
|
||||
return 0;
|
||||
|
@ -814,23 +939,23 @@ Java_com_apple_foundationdb_FDBTransaction_Transaction_1getRangeAndFlatMap(JNIEn
|
|||
return 0;
|
||||
}
|
||||
|
||||
FDBFuture* f = fdb_transaction_get_range_and_flat_map(tr,
|
||||
barrBegin,
|
||||
jenv->GetArrayLength(keyBeginBytes),
|
||||
orEqualBegin,
|
||||
offsetBegin,
|
||||
barrEnd,
|
||||
jenv->GetArrayLength(keyEndBytes),
|
||||
orEqualEnd,
|
||||
offsetEnd,
|
||||
barrMapper,
|
||||
jenv->GetArrayLength(mapperBytes),
|
||||
rowLimit,
|
||||
targetBytes,
|
||||
(FDBStreamingMode)streamingMode,
|
||||
iteration,
|
||||
snapshot,
|
||||
reverse);
|
||||
FDBFuture* f = fdb_transaction_get_mapped_range(tr,
|
||||
barrBegin,
|
||||
jenv->GetArrayLength(keyBeginBytes),
|
||||
orEqualBegin,
|
||||
offsetBegin,
|
||||
barrEnd,
|
||||
jenv->GetArrayLength(keyEndBytes),
|
||||
orEqualEnd,
|
||||
offsetEnd,
|
||||
barrMapper,
|
||||
jenv->GetArrayLength(mapperBytes),
|
||||
rowLimit,
|
||||
targetBytes,
|
||||
(FDBStreamingMode)streamingMode,
|
||||
iteration,
|
||||
snapshot,
|
||||
reverse);
|
||||
jenv->ReleaseByteArrayElements(keyBeginBytes, (jbyte*)barrBegin, JNI_ABORT);
|
||||
jenv->ReleaseByteArrayElements(keyEndBytes, (jbyte*)barrEnd, JNI_ABORT);
|
||||
jenv->ReleaseByteArrayElements(mapperBytes, (jbyte*)barrMapper, JNI_ABORT);
|
||||
|
@ -842,7 +967,6 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FutureResults_FutureResults_1
|
|||
jlong future,
|
||||
jobject jbuffer,
|
||||
jint bufferCapacity) {
|
||||
|
||||
if (!future) {
|
||||
throwParamNotNull(jenv);
|
||||
return;
|
||||
|
@ -902,6 +1026,92 @@ JNIEXPORT void JNICALL Java_com_apple_foundationdb_FutureResults_FutureResults_1
|
|||
}
|
||||
}
|
||||
|
||||
void memcpyStringInner(uint8_t* buffer, int& offset, const uint8_t* data, const int& length) {
|
||||
memcpy(buffer + offset, &length, sizeof(jint));
|
||||
offset += sizeof(jint);
|
||||
memcpy(buffer + offset, data, length);
|
||||
offset += length;
|
||||
}
|
||||
|
||||
void memcpyString(uint8_t* buffer, int& offset, const FDBKey& key) {
|
||||
memcpyStringInner(buffer, offset, key.key, key.key_length);
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL
|
||||
Java_com_apple_foundationdb_FutureMappedResults_FutureMappedResults_1getDirect(JNIEnv* jenv,
|
||||
jobject,
|
||||
jlong future,
|
||||
jobject jbuffer,
|
||||
jint bufferCapacity) {
|
||||
|
||||
if (!future) {
|
||||
throwParamNotNull(jenv);
|
||||
return;
|
||||
}
|
||||
|
||||
uint8_t* buffer = (uint8_t*)jenv->GetDirectBufferAddress(jbuffer);
|
||||
if (!buffer) {
|
||||
if (!jenv->ExceptionOccurred())
|
||||
throwRuntimeEx(jenv, "Error getting handle to native resources");
|
||||
return;
|
||||
}
|
||||
|
||||
FDBFuture* f = (FDBFuture*)future;
|
||||
const FDBMappedKeyValue* kvms;
|
||||
int count;
|
||||
fdb_bool_t more;
|
||||
fdb_error_t err = fdb_future_get_mappedkeyvalue_array(f, &kvms, &count, &more);
|
||||
if (err) {
|
||||
safeThrow(jenv, getThrowable(jenv, err));
|
||||
return;
|
||||
}
|
||||
|
||||
int totalCapacityNeeded = 2 * sizeof(jint);
|
||||
for (int i = 0; i < count; i++) {
|
||||
const FDBMappedKeyValue& kvm = kvms[i];
|
||||
totalCapacityNeeded += kvm.key.key_length + kvm.value.key_length + kvm.getRange.begin.key.key_length +
|
||||
kvm.getRange.end.key.key_length +
|
||||
5 * sizeof(jint); // Besides the 4 lengths above, also one for kvm_count.
|
||||
int kvm_count = kvm.getRange.m_size;
|
||||
for (int i = 0; i < kvm_count; i++) {
|
||||
auto kv = kvm.getRange.data[i];
|
||||
totalCapacityNeeded += kv.key_length + kv.value_length + 2 * sizeof(jint);
|
||||
}
|
||||
if (bufferCapacity < totalCapacityNeeded) {
|
||||
count = i; /* Only fit first `i` K/V pairs */
|
||||
more = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int offset = 0;
|
||||
|
||||
// First copy RangeResultSummary, i.e. [keyCount, more]
|
||||
memcpy(buffer + offset, &count, sizeof(jint));
|
||||
offset += sizeof(jint);
|
||||
|
||||
memcpy(buffer + offset, &more, sizeof(jint));
|
||||
offset += sizeof(jint);
|
||||
|
||||
for (int i = 0; i < count; i++) {
|
||||
const FDBMappedKeyValue& kvm = kvms[i];
|
||||
memcpyString(buffer, offset, kvm.key);
|
||||
memcpyString(buffer, offset, kvm.value);
|
||||
memcpyString(buffer, offset, kvm.getRange.begin.key);
|
||||
memcpyString(buffer, offset, kvm.getRange.end.key);
|
||||
|
||||
int kvm_count = kvm.getRange.m_size;
|
||||
memcpy(buffer + offset, &kvm_count, sizeof(jint));
|
||||
offset += sizeof(jint);
|
||||
|
||||
for (int i = 0; i < kvm_count; i++) {
|
||||
auto kv = kvm.getRange.data[i];
|
||||
memcpyStringInner(buffer, offset, kv.key, kv.key_length);
|
||||
memcpyStringInner(buffer, offset, kv.value, kv.value_length);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL
|
||||
Java_com_apple_foundationdb_FDBTransaction_Transaction_1getEstimatedRangeSizeBytes(JNIEnv* jenv,
|
||||
jobject,
|
||||
|
@ -1396,6 +1606,16 @@ jint JNI_OnLoad(JavaVM* vm, void* reserved) {
|
|||
range_result_init = env->GetMethodID(local_range_result_class, "<init>", "([B[IZ)V");
|
||||
range_result_class = (jclass)(env)->NewGlobalRef(local_range_result_class);
|
||||
|
||||
jclass local_mapped_range_result_class = env->FindClass("com/apple/foundationdb/MappedRangeResult");
|
||||
mapped_range_result_init =
|
||||
env->GetMethodID(local_mapped_range_result_class, "<init>", "([Lcom/apple/foundationdb/MappedKeyValue;Z)V");
|
||||
mapped_range_result_class = (jclass)(env)->NewGlobalRef(local_mapped_range_result_class);
|
||||
|
||||
jclass local_mapped_key_value_class = env->FindClass("com/apple/foundationdb/MappedKeyValue");
|
||||
mapped_key_value_from_bytes = env->GetStaticMethodID(
|
||||
local_mapped_key_value_class, "fromBytes", "([B[I)Lcom/apple/foundationdb/MappedKeyValue;");
|
||||
mapped_key_value_class = (jclass)(env)->NewGlobalRef(local_mapped_key_value_class);
|
||||
|
||||
jclass local_key_array_result_class = env->FindClass("com/apple/foundationdb/KeyArrayResult");
|
||||
key_array_result_init = env->GetMethodID(local_key_array_result_class, "<init>", "([B[I)V");
|
||||
key_array_result_class = (jclass)(env)->NewGlobalRef(local_key_array_result_class);
|
||||
|
@ -1424,6 +1644,12 @@ void JNI_OnUnload(JavaVM* vm, void* reserved) {
|
|||
if (range_result_class != JNI_NULL) {
|
||||
env->DeleteGlobalRef(range_result_class);
|
||||
}
|
||||
if (mapped_range_result_class != JNI_NULL) {
|
||||
env->DeleteGlobalRef(mapped_range_result_class);
|
||||
}
|
||||
if (mapped_key_value_class != JNI_NULL) {
|
||||
env->DeleteGlobalRef(mapped_key_value_class);
|
||||
}
|
||||
if (string_class != JNI_NULL) {
|
||||
env->DeleteGlobalRef(string_class);
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* RangeAndFlatMapQueryIntegrationTest.java
|
||||
* MappedRangeQueryIntegrationTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
|
@ -40,7 +40,7 @@ import org.junit.jupiter.api.Test;
|
|||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
|
||||
@ExtendWith(RequiresDatabase.class)
|
||||
class RangeAndFlatMapQueryIntegrationTest {
|
||||
class MappedRangeQueryIntegrationTest {
|
||||
private static final FDB fdb = FDB.selectAPIVersion(710);
|
||||
public String databaseArg = null;
|
||||
private Database openFDB() { return fdb.open(databaseArg); }
|
||||
|
@ -67,16 +67,27 @@ class RangeAndFlatMapQueryIntegrationTest {
|
|||
static private String indexKey(int i) { return String.format("index-key-of-record-%08d", i); }
|
||||
static private String dataOfRecord(int i) { return String.format("data-of-record-%08d", i); }
|
||||
|
||||
static byte[] MAPPER = Tuple.from(PREFIX, RECORD, "{K[3]}").pack();
|
||||
static byte[] MAPPER = Tuple.from(PREFIX, RECORD, "{K[3]}", "{...}").pack();
|
||||
static int SPLIT_SIZE = 3;
|
||||
|
||||
static private byte[] indexEntryKey(final int i) {
|
||||
return Tuple.from(PREFIX, INDEX, indexKey(i), primaryKey(i)).pack();
|
||||
}
|
||||
static private byte[] recordKey(final int i) { return Tuple.from(PREFIX, RECORD, primaryKey(i)).pack(); }
|
||||
static private byte[] recordValue(final int i) { return Tuple.from(dataOfRecord(i)).pack(); }
|
||||
static private byte[] recordKeyPrefix(final int i) {
|
||||
return Tuple.from(PREFIX, RECORD, primaryKey(i)).pack();
|
||||
}
|
||||
static private byte[] recordKey(final int i, final int split) {
|
||||
return Tuple.from(PREFIX, RECORD, primaryKey(i), split).pack();
|
||||
}
|
||||
static private byte[] recordValue(final int i, final int split) {
|
||||
return Tuple.from(dataOfRecord(i), split).pack();
|
||||
}
|
||||
|
||||
static private void insertRecordWithIndex(final Transaction tr, final int i) {
|
||||
tr.set(indexEntryKey(i), EMPTY);
|
||||
tr.set(recordKey(i), recordValue(i));
|
||||
for (int split = 0; split < SPLIT_SIZE; split++) {
|
||||
tr.set(recordKey(i, split), recordValue(i, split));
|
||||
}
|
||||
}
|
||||
|
||||
private static String getArgFromEnv() {
|
||||
|
@ -86,7 +97,7 @@ class RangeAndFlatMapQueryIntegrationTest {
|
|||
return cluster;
|
||||
}
|
||||
public static void main(String[] args) throws Exception {
|
||||
final RangeAndFlatMapQueryIntegrationTest test = new RangeAndFlatMapQueryIntegrationTest();
|
||||
final MappedRangeQueryIntegrationTest test = new MappedRangeQueryIntegrationTest();
|
||||
test.databaseArg = getArgFromEnv();
|
||||
test.clearDatabase();
|
||||
test.comparePerformance();
|
||||
|
@ -94,21 +105,21 @@ class RangeAndFlatMapQueryIntegrationTest {
|
|||
}
|
||||
|
||||
int numRecords = 10000;
|
||||
int numQueries = 10000;
|
||||
int numQueries = 1;
|
||||
int numRecordsPerQuery = 100;
|
||||
boolean validate = false;
|
||||
boolean validate = true;
|
||||
@Test
|
||||
void comparePerformance() {
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
try (Database db = openFDB()) {
|
||||
insertRecordsWithIndexes(numRecords, db);
|
||||
instrument(rangeQueryAndGet, "rangeQueryAndGet", db);
|
||||
instrument(rangeQueryAndFlatMap, "rangeQueryAndFlatMap", db);
|
||||
instrument(rangeQueryAndThenRangeQueries, "rangeQueryAndThenRangeQueries", db);
|
||||
instrument(mappedRangeQuery, "mappedRangeQuery", db);
|
||||
}
|
||||
}
|
||||
|
||||
private void instrument(final RangeQueryWithIndex query, final String name, final Database db) {
|
||||
System.out.printf("Starting %s (numQueries:%d, numRecordsPerQuery:%d)\n", name, numQueries, numRecordsPerQuery);
|
||||
System.out.printf("Starting %s (numQueries:%d, numRecordsPerQuery:%d, validation:%s)\n", name, numQueries, numRecordsPerQuery, validate ? "on" : "off");
|
||||
long startTime = System.currentTimeMillis();
|
||||
for (int queryId = 0; queryId < numQueries; queryId++) {
|
||||
int begin = ThreadLocalRandom.current().nextInt(numRecords - numRecordsPerQuery);
|
||||
|
@ -140,7 +151,7 @@ class RangeAndFlatMapQueryIntegrationTest {
|
|||
void run(int begin, int end, Database db);
|
||||
}
|
||||
|
||||
RangeQueryWithIndex rangeQueryAndGet = (int begin, int end, Database db) -> db.run(tr -> {
|
||||
RangeQueryWithIndex rangeQueryAndThenRangeQueries = (int begin, int end, Database db) -> db.run(tr -> {
|
||||
try {
|
||||
List<KeyValue> kvs = tr.getRange(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)),
|
||||
KeySelector.firstGreaterOrEqual(indexEntryKey(end)),
|
||||
|
@ -150,22 +161,25 @@ class RangeAndFlatMapQueryIntegrationTest {
|
|||
Assertions.assertEquals(end - begin, kvs.size());
|
||||
|
||||
// Get the records of each index entry IN PARALLEL.
|
||||
List<CompletableFuture<byte[]>> resultFutures = new ArrayList<>();
|
||||
List<CompletableFuture<List<KeyValue>>> resultFutures = new ArrayList<>();
|
||||
// In reality, we need to get the record key by parsing the index entry key. But considering this is a
|
||||
// performance test, we just ignore the returned key and simply generate it from recordKey.
|
||||
for (int id = begin; id < end; id++) {
|
||||
resultFutures.add(tr.get(recordKey(id)));
|
||||
resultFutures.add(tr.getRange(Range.startsWith(recordKeyPrefix(id)),
|
||||
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL).asList());
|
||||
}
|
||||
AsyncUtil.whenAll(resultFutures).get();
|
||||
|
||||
if (validate) {
|
||||
final Iterator<KeyValue> indexes = kvs.iterator();
|
||||
final Iterator<CompletableFuture<byte[]>> records = resultFutures.iterator();
|
||||
final Iterator<CompletableFuture<List<KeyValue>>> records = resultFutures.iterator();
|
||||
for (int id = begin; id < end; id++) {
|
||||
Assertions.assertTrue(indexes.hasNext());
|
||||
assertByteArrayEquals(indexEntryKey(id), indexes.next().getKey());
|
||||
|
||||
Assertions.assertTrue(records.hasNext());
|
||||
assertByteArrayEquals(recordValue(id), records.next().get());
|
||||
List<KeyValue> rangeResult = records.next().get();
|
||||
validateRangeResult(id, rangeResult);
|
||||
}
|
||||
Assertions.assertFalse(indexes.hasNext());
|
||||
Assertions.assertFalse(records.hasNext());
|
||||
|
@ -176,23 +190,32 @@ class RangeAndFlatMapQueryIntegrationTest {
|
|||
return null;
|
||||
});
|
||||
|
||||
RangeQueryWithIndex rangeQueryAndFlatMap = (int begin, int end, Database db) -> db.run(tr -> {
|
||||
RangeQueryWithIndex mappedRangeQuery = (int begin, int end, Database db) -> db.run(tr -> {
|
||||
try {
|
||||
tr.options().setReadYourWritesDisable();
|
||||
List<KeyValue> kvs =
|
||||
tr.snapshot()
|
||||
.getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)),
|
||||
KeySelector.firstGreaterOrEqual(indexEntryKey(end)), MAPPER,
|
||||
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
|
||||
List<MappedKeyValue> kvs =
|
||||
tr.getMappedRange(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)),
|
||||
KeySelector.firstGreaterOrEqual(indexEntryKey(end)), MAPPER,
|
||||
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
|
||||
.asList()
|
||||
.get();
|
||||
Assertions.assertEquals(end - begin, kvs.size());
|
||||
|
||||
if (validate) {
|
||||
final Iterator<KeyValue> results = kvs.iterator();
|
||||
final Iterator<MappedKeyValue> results = kvs.iterator();
|
||||
for (int id = begin; id < end; id++) {
|
||||
Assertions.assertTrue(results.hasNext());
|
||||
assertByteArrayEquals(recordValue(id), results.next().getValue());
|
||||
MappedKeyValue mappedKeyValue = results.next();
|
||||
assertByteArrayEquals(indexEntryKey(id), mappedKeyValue.getKey());
|
||||
assertByteArrayEquals(EMPTY, mappedKeyValue.getValue());
|
||||
assertByteArrayEquals(indexEntryKey(id), mappedKeyValue.getKey());
|
||||
|
||||
byte[] prefix = recordKeyPrefix(id);
|
||||
assertByteArrayEquals(prefix, mappedKeyValue.getRangeBegin());
|
||||
prefix[prefix.length - 1] = (byte)0x01;
|
||||
assertByteArrayEquals(prefix, mappedKeyValue.getRangeEnd());
|
||||
|
||||
List<KeyValue> rangeResult = mappedKeyValue.getRangeResult();
|
||||
validateRangeResult(id, rangeResult);
|
||||
}
|
||||
Assertions.assertFalse(results.hasNext());
|
||||
}
|
||||
|
@ -202,55 +225,16 @@ class RangeAndFlatMapQueryIntegrationTest {
|
|||
return null;
|
||||
});
|
||||
|
||||
void validateRangeResult(int id, List<KeyValue> rangeResult) {
|
||||
Assertions.assertEquals(rangeResult.size(), SPLIT_SIZE);
|
||||
for (int split = 0; split < SPLIT_SIZE; split++) {
|
||||
KeyValue keyValue = rangeResult.get(split);
|
||||
assertByteArrayEquals(recordKey(id, split), keyValue.getKey());
|
||||
assertByteArrayEquals(recordValue(id, split), keyValue.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
void assertByteArrayEquals(byte[] expected, byte[] actual) {
|
||||
Assertions.assertEquals(ByteArrayUtil.printable(expected), ByteArrayUtil.printable(actual));
|
||||
}
|
||||
|
||||
@Test
|
||||
void rangeAndFlatMapQueryOverMultipleRows() throws Exception {
|
||||
try (Database db = openFDB()) {
|
||||
insertRecordsWithIndexes(3, db);
|
||||
|
||||
List<byte[]> expected_data_of_records = new ArrayList<>();
|
||||
for (int i = 0; i <= 1; i++) {
|
||||
expected_data_of_records.add(recordValue(i));
|
||||
}
|
||||
|
||||
db.run(tr -> {
|
||||
// getRangeAndFlatMap is only support without RYW. This is a must!!!
|
||||
tr.options().setReadYourWritesDisable();
|
||||
|
||||
// getRangeAndFlatMap is only supported with snapshot.
|
||||
Iterator<KeyValue> kvs =
|
||||
tr.snapshot()
|
||||
.getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(0)),
|
||||
KeySelector.firstGreaterThan(indexEntryKey(1)), MAPPER,
|
||||
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
|
||||
.iterator();
|
||||
Iterator<byte[]> expected_data_of_records_iter = expected_data_of_records.iterator();
|
||||
while (expected_data_of_records_iter.hasNext()) {
|
||||
Assertions.assertTrue(kvs.hasNext(), "iterator ended too early");
|
||||
KeyValue kv = kvs.next();
|
||||
byte[] actual_data_of_record = kv.getValue();
|
||||
byte[] expected_data_of_record = expected_data_of_records_iter.next();
|
||||
|
||||
// System.out.println("result key:" + ByteArrayUtil.printable(kv.getKey()) + " value:" +
|
||||
// ByteArrayUtil.printable(kv.getValue())); Output:
|
||||
// result
|
||||
// key:\x02prefix\x00\x02INDEX\x00\x02index-key-of-record-0\x00\x02primary-key-of-record-0\x00
|
||||
// value:\x02data-of-record-0\x00
|
||||
// result
|
||||
// key:\x02prefix\x00\x02INDEX\x00\x02index-key-of-record-1\x00\x02primary-key-of-record-1\x00
|
||||
// value:\x02data-of-record-1\x00
|
||||
|
||||
// For now, we don't guarantee what that the returned keys mean.
|
||||
Assertions.assertArrayEquals(expected_data_of_record, actual_data_of_record,
|
||||
"Incorrect data of record!");
|
||||
}
|
||||
Assertions.assertFalse(kvs.hasNext(), "Iterator returned too much data");
|
||||
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -89,8 +89,6 @@ public class FakeFDBTransaction extends FDBTransaction {
|
|||
|
||||
@Override
|
||||
protected FutureResults getRange_internal(KeySelector begin, KeySelector end,
|
||||
// TODO: map is not supported in FakeFDBTransaction yet.
|
||||
byte[] mapper, // Nullable
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse) {
|
||||
numRangeCalls++;
|
||||
|
|
|
@ -32,11 +32,11 @@ import java.util.NoSuchElementException;
|
|||
* The serialization format of result is =>
|
||||
* [int keyCount, boolean more, ListOf<(int keyLen, int valueLen, byte[] key, byte[] value)>]
|
||||
*/
|
||||
class DirectBufferIterator implements Iterator<KeyValue>, AutoCloseable {
|
||||
private ByteBuffer byteBuffer;
|
||||
private int current = 0;
|
||||
private int keyCount = -1;
|
||||
private boolean more = false;
|
||||
abstract class DirectBufferIterator implements AutoCloseable {
|
||||
protected ByteBuffer byteBuffer;
|
||||
protected int current = 0;
|
||||
protected int keyCount = -1;
|
||||
protected boolean more = false;
|
||||
|
||||
public DirectBufferIterator(ByteBuffer buffer) {
|
||||
byteBuffer = buffer;
|
||||
|
@ -55,31 +55,11 @@ class DirectBufferIterator implements Iterator<KeyValue>, AutoCloseable {
|
|||
return keyCount > -1;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
assert (hasResultReady());
|
||||
return current < keyCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public KeyValue next() {
|
||||
assert (hasResultReady()); // Must be called once its ready.
|
||||
if (!hasNext()) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
|
||||
final int keyLen = byteBuffer.getInt();
|
||||
final int valueLen = byteBuffer.getInt();
|
||||
byte[] key = new byte[keyLen];
|
||||
byteBuffer.get(key);
|
||||
|
||||
byte[] value = new byte[valueLen];
|
||||
byteBuffer.get(value);
|
||||
|
||||
current += 1;
|
||||
return new KeyValue(key, value);
|
||||
}
|
||||
|
||||
public ByteBuffer getBuffer() {
|
||||
return byteBuffer;
|
||||
}
|
||||
|
|
|
@ -92,12 +92,10 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
}
|
||||
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRangeAndFlatMap(KeySelector begin, KeySelector end, byte[] mapper, int limit,
|
||||
boolean reverse, StreamingMode mode) {
|
||||
if (mapper == null) {
|
||||
throw new IllegalArgumentException("Mapper must be non-null");
|
||||
}
|
||||
return new RangeQuery(FDBTransaction.this, true, begin, end, mapper, limit, reverse, mode, eventKeeper);
|
||||
public AsyncIterable<MappedKeyValue> getMappedRange(KeySelector begin, KeySelector end, byte[] mapper,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
|
||||
throw new UnsupportedOperationException("getMappedRange is only supported in serializable");
|
||||
}
|
||||
|
||||
///////////////////
|
||||
|
@ -348,9 +346,12 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
}
|
||||
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRangeAndFlatMap(KeySelector begin, KeySelector end, byte[] mapper, int limit,
|
||||
boolean reverse, StreamingMode mode) {
|
||||
throw new UnsupportedOperationException("getRangeAndFlatMap is only supported in snapshot");
|
||||
public AsyncIterable<MappedKeyValue> getMappedRange(KeySelector begin, KeySelector end, byte[] mapper,
|
||||
int limit, boolean reverse, StreamingMode mode) {
|
||||
if (mapper == null) {
|
||||
throw new IllegalArgumentException("Mapper must be non-null");
|
||||
}
|
||||
return new MappedRangeQuery(FDBTransaction.this, false, begin, end, mapper, limit, reverse, mode, eventKeeper);
|
||||
}
|
||||
|
||||
///////////////////
|
||||
|
@ -431,7 +432,6 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
|
||||
// Users of this function must close the returned FutureResults when finished
|
||||
protected FutureResults getRange_internal(KeySelector begin, KeySelector end,
|
||||
byte[] mapper, // Nullable
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse) {
|
||||
if (eventKeeper != null) {
|
||||
|
@ -443,14 +443,33 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
" -- range get: (%s, %s) limit: %d, bytes: %d, mode: %d, iteration: %d, snap: %s, reverse %s",
|
||||
begin.toString(), end.toString(), rowLimit, targetBytes, streamingMode,
|
||||
iteration, Boolean.toString(isSnapshot), Boolean.toString(reverse)));*/
|
||||
return new FutureResults(
|
||||
mapper == null
|
||||
? Transaction_getRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(), end.getKey(),
|
||||
end.orEqual(), end.getOffset(), rowLimit, targetBytes, streamingMode,
|
||||
iteration, isSnapshot, reverse)
|
||||
: Transaction_getRangeAndFlatMap(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
|
||||
end.getKey(), end.orEqual(), end.getOffset(), mapper, rowLimit,
|
||||
targetBytes, streamingMode, iteration, isSnapshot, reverse),
|
||||
return new FutureResults(Transaction_getRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
|
||||
end.getKey(), end.orEqual(), end.getOffset(), rowLimit,
|
||||
targetBytes, streamingMode, iteration, isSnapshot, reverse),
|
||||
FDB.instance().isDirectBufferQueriesEnabled(), executor, eventKeeper);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
// Users of this function must close the returned FutureResults when finished
|
||||
protected FutureMappedResults getMappedRange_internal(KeySelector begin, KeySelector end,
|
||||
byte[] mapper, // Nullable
|
||||
int rowLimit, int targetBytes, int streamingMode,
|
||||
int iteration, boolean isSnapshot, boolean reverse) {
|
||||
if (eventKeeper != null) {
|
||||
eventKeeper.increment(Events.JNI_CALL);
|
||||
}
|
||||
pointerReadLock.lock();
|
||||
try {
|
||||
/*System.out.println(String.format(
|
||||
" -- range get: (%s, %s) limit: %d, bytes: %d, mode: %d, iteration: %d, snap: %s, reverse %s",
|
||||
begin.toString(), end.toString(), rowLimit, targetBytes, streamingMode,
|
||||
iteration, Boolean.toString(isSnapshot), Boolean.toString(reverse)));*/
|
||||
return new FutureMappedResults(
|
||||
Transaction_getMappedRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
|
||||
end.getKey(), end.orEqual(), end.getOffset(), mapper, rowLimit,
|
||||
targetBytes, streamingMode, iteration, isSnapshot, reverse),
|
||||
FDB.instance().isDirectBufferQueriesEnabled(), executor, eventKeeper);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
|
@ -790,7 +809,7 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
byte[] keyEnd, boolean orEqualEnd, int offsetEnd,
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse);
|
||||
private native long Transaction_getRangeAndFlatMap(long cPtr, byte[] keyBegin, boolean orEqualBegin,
|
||||
private native long Transaction_getMappedRange(long cPtr, byte[] keyBegin, boolean orEqualBegin,
|
||||
int offsetBegin, byte[] keyEnd, boolean orEqualEnd,
|
||||
int offsetEnd,
|
||||
byte[] mapper, // Nonnull
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
* FutureMappedResults.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.concurrent.Executor;
|
||||
|
||||
import com.apple.foundationdb.EventKeeper.Events;
|
||||
|
||||
class FutureMappedResults extends NativeFuture<MappedRangeResultInfo> {
|
||||
private final EventKeeper eventKeeper;
|
||||
FutureMappedResults(long cPtr, boolean enableDirectBufferQueries, Executor executor, EventKeeper eventKeeper) {
|
||||
super(cPtr);
|
||||
registerMarshalCallback(executor);
|
||||
this.enableDirectBufferQueries = enableDirectBufferQueries;
|
||||
this.eventKeeper = eventKeeper;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void postMarshal(MappedRangeResultInfo rri) {
|
||||
// We can't close because this class actually marshals on-demand
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MappedRangeResultInfo getIfDone_internal(long cPtr) throws FDBException {
|
||||
if (eventKeeper != null) {
|
||||
eventKeeper.increment(Events.JNI_CALL);
|
||||
}
|
||||
FDBException err = Future_getError(cPtr);
|
||||
|
||||
if (err != null && !err.isSuccess()) {
|
||||
throw err;
|
||||
}
|
||||
|
||||
return new MappedRangeResultInfo(this);
|
||||
}
|
||||
|
||||
public MappedRangeResult getResults() {
|
||||
ByteBuffer buffer = enableDirectBufferQueries ? DirectBufferPool.getInstance().poll() : null;
|
||||
if (buffer != null && eventKeeper != null) {
|
||||
eventKeeper.increment(Events.RANGE_QUERY_DIRECT_BUFFER_HIT);
|
||||
eventKeeper.increment(Events.JNI_CALL);
|
||||
} else if (eventKeeper != null) {
|
||||
eventKeeper.increment(Events.RANGE_QUERY_DIRECT_BUFFER_MISS);
|
||||
eventKeeper.increment(Events.JNI_CALL);
|
||||
}
|
||||
|
||||
try {
|
||||
pointerReadLock.lock();
|
||||
if (buffer != null) {
|
||||
try (MappedRangeResultDirectBufferIterator directIterator =
|
||||
new MappedRangeResultDirectBufferIterator(buffer)) {
|
||||
FutureMappedResults_getDirect(getPtr(), directIterator.getBuffer(),
|
||||
directIterator.getBuffer().capacity());
|
||||
return new MappedRangeResult(directIterator);
|
||||
}
|
||||
} else {
|
||||
return FutureMappedResults_get(getPtr());
|
||||
}
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean enableDirectBufferQueries = false;
|
||||
|
||||
private native MappedRangeResult FutureMappedResults_get(long cPtr) throws FDBException;
|
||||
private native void FutureMappedResults_getDirect(long cPtr, ByteBuffer buffer, int capacity) throws FDBException;
|
||||
}
|
|
@ -66,7 +66,7 @@ class FutureResults extends NativeFuture<RangeResultInfo> {
|
|||
try {
|
||||
pointerReadLock.lock();
|
||||
if (buffer != null) {
|
||||
try (DirectBufferIterator directIterator = new DirectBufferIterator(buffer)) {
|
||||
try (RangeResultDirectBufferIterator directIterator = new RangeResultDirectBufferIterator(buffer)) {
|
||||
FutureResults_getDirect(getPtr(), directIterator.getBuffer(), directIterator.getBuffer().capacity());
|
||||
return new RangeResult(directIterator);
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@
|
|||
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
|
@ -77,4 +79,13 @@ public class KeyValue {
|
|||
public int hashCode() {
|
||||
return 17 + (37 * Arrays.hashCode(key) + Arrays.hashCode(value));
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("KeyValue{");
|
||||
sb.append("key=").append(ByteArrayUtil.printable(key));
|
||||
sb.append(", value=").append(ByteArrayUtil.printable(value));
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
* MappedKeyValue.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
public class MappedKeyValue extends KeyValue {
|
||||
private final byte[] rangeBegin;
|
||||
private final byte[] rangeEnd;
|
||||
private final List<KeyValue> rangeResult;
|
||||
|
||||
MappedKeyValue(byte[] key, byte[] value, byte[] rangeBegin, byte[] rangeEnd, List<KeyValue> rangeResult) {
|
||||
super(key, value);
|
||||
this.rangeBegin = rangeBegin;
|
||||
this.rangeEnd = rangeEnd;
|
||||
this.rangeResult = rangeResult;
|
||||
}
|
||||
|
||||
public byte[] getRangeBegin() { return rangeBegin; }
|
||||
|
||||
public byte[] getRangeEnd() { return rangeEnd; }
|
||||
|
||||
public List<KeyValue> getRangeResult() { return rangeResult; }
|
||||
|
||||
public static MappedKeyValue fromBytes(byte[] bytes, int[] lengths) {
|
||||
// Lengths include: key, value, rangeBegin, rangeEnd, count * (underlying key, underlying value)
|
||||
if (lengths.length < 4) {
|
||||
throw new IllegalArgumentException("There needs to be at least 4 lengths to cover the metadata");
|
||||
}
|
||||
|
||||
Offset offset = new Offset();
|
||||
byte[] key = takeBytes(offset, bytes, lengths);
|
||||
byte[] value = takeBytes(offset, bytes, lengths);
|
||||
byte[] rangeBegin = takeBytes(offset, bytes, lengths);
|
||||
byte[] rangeEnd = takeBytes(offset, bytes, lengths);
|
||||
|
||||
if ((lengths.length - 4) % 2 != 0) {
|
||||
throw new IllegalArgumentException("There needs to be an even number of lengths!");
|
||||
}
|
||||
int count = (lengths.length - 4) / 2;
|
||||
List<KeyValue> rangeResult = new ArrayList<>(count);
|
||||
for (int i = 0; i < count; i++) {
|
||||
byte[] k = takeBytes(offset, bytes, lengths);
|
||||
byte[] v = takeBytes(offset, bytes, lengths);
|
||||
rangeResult.add(new KeyValue(k, v));
|
||||
}
|
||||
return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult);
|
||||
}
|
||||
|
||||
static class Offset {
|
||||
int bytes = 0;
|
||||
int lengths = 0;
|
||||
}
|
||||
|
||||
static byte[] takeBytes(Offset offset, byte[] bytes, int[] lengths) {
|
||||
int len = lengths[offset.lengths];
|
||||
byte[] b = new byte[len];
|
||||
System.arraycopy(bytes, offset.bytes, b, 0, len);
|
||||
offset.lengths++;
|
||||
offset.bytes += len;
|
||||
return b;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("MappedKeyValue{");
|
||||
sb.append("rangeBegin=").append(ByteArrayUtil.printable(rangeBegin));
|
||||
sb.append(", rangeEnd=").append(ByteArrayUtil.printable(rangeEnd));
|
||||
sb.append(", rangeResult=").append(rangeResult);
|
||||
sb.append('}');
|
||||
return super.toString() + "->" + sb.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,333 @@
|
|||
/*
|
||||
* RangeQuery.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import com.apple.foundationdb.EventKeeper.Events;
|
||||
import com.apple.foundationdb.async.AsyncIterable;
|
||||
import com.apple.foundationdb.async.AsyncIterator;
|
||||
import com.apple.foundationdb.async.AsyncUtil;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
import java.util.concurrent.CancellationException;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.function.BiConsumer;
|
||||
|
||||
// TODO: Share code with RangeQuery?
|
||||
/**
|
||||
* Represents a query against FoundationDB for a range of keys. The
|
||||
* result of this query can be iterated over in a blocking fashion with a call to
|
||||
* {@link #iterator()} (as specified by {@link Iterable}).
|
||||
* If the calling program uses an asynchronous paradigm, a non-blocking
|
||||
* {@link AsyncIterator} is returned from {@link #iterator()}. Both of these
|
||||
* constructions will not begin to query the database until the first call to
|
||||
* {@code hasNext()}. As the query uses its {@link Transaction} of origin to fetch
|
||||
* all the data, the use of this query object must not span more than a few seconds.
|
||||
*
|
||||
* <br><br><b>NOTE:</b> although resulting {@code Iterator}s do support the {@code remove()}
|
||||
* operation, the remove is not durable until {@code commit()} on the {@code Transaction}
|
||||
* that yielded this query returns <code>true</code>.
|
||||
*/
|
||||
class MappedRangeQuery implements AsyncIterable<MappedKeyValue> {
|
||||
private final FDBTransaction tr;
|
||||
private final KeySelector begin;
|
||||
private final KeySelector end;
|
||||
private final byte[] mapper; // Nonnull
|
||||
private final boolean snapshot;
|
||||
private final int rowLimit;
|
||||
private final boolean reverse;
|
||||
private final StreamingMode streamingMode;
|
||||
private final EventKeeper eventKeeper;
|
||||
|
||||
MappedRangeQuery(FDBTransaction transaction, boolean isSnapshot, KeySelector begin, KeySelector end, byte[] mapper,
|
||||
int rowLimit, boolean reverse, StreamingMode streamingMode, EventKeeper eventKeeper) {
|
||||
this.tr = transaction;
|
||||
this.begin = begin;
|
||||
this.end = end;
|
||||
this.mapper = mapper;
|
||||
this.snapshot = isSnapshot;
|
||||
this.rowLimit = rowLimit;
|
||||
this.reverse = reverse;
|
||||
this.streamingMode = streamingMode;
|
||||
this.eventKeeper = eventKeeper;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all the results from the range requested as a {@code List}. If there were no
|
||||
* limits on the original query and there is a large amount of data in the database
|
||||
* this call could use a very large amount of memory.
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to the contents of the database
|
||||
* constrained by the query parameters.
|
||||
*/
|
||||
@Override
|
||||
public CompletableFuture<List<MappedKeyValue>> asList() {
|
||||
StreamingMode mode = this.streamingMode;
|
||||
if (mode == StreamingMode.ITERATOR) mode = (this.rowLimit == 0) ? StreamingMode.WANT_ALL : StreamingMode.EXACT;
|
||||
|
||||
// if the streaming mode is EXACT, try and grab things as one chunk
|
||||
if (mode == StreamingMode.EXACT) {
|
||||
|
||||
FutureMappedResults range =
|
||||
tr.getMappedRange_internal(this.begin, this.end, this.mapper, this.rowLimit, 0,
|
||||
StreamingMode.EXACT.code(), 1, this.snapshot, this.reverse);
|
||||
return range.thenApply(result -> result.get().values).whenComplete((result, e) -> range.close());
|
||||
}
|
||||
|
||||
// If the streaming mode is not EXACT, simply collect the results of an
|
||||
// iteration into a list
|
||||
return AsyncUtil.collect(
|
||||
new MappedRangeQuery(tr, snapshot, begin, end, mapper, rowLimit, reverse, mode, eventKeeper),
|
||||
tr.getExecutor());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an {@code Iterator} over the results of this query against FoundationDB.
|
||||
*
|
||||
* @return an {@code Iterator} over type {@code MappedKeyValue}.
|
||||
*/
|
||||
@Override
|
||||
public AsyncRangeIterator iterator() {
|
||||
return new AsyncRangeIterator(this.rowLimit, this.reverse, this.streamingMode);
|
||||
}
|
||||
|
||||
private class AsyncRangeIterator implements AsyncIterator<MappedKeyValue> {
|
||||
// immutable aspects of this iterator
|
||||
private final boolean rowsLimited;
|
||||
private final boolean reverse;
|
||||
private final StreamingMode streamingMode;
|
||||
|
||||
// There is the chance for parallelism in the two "chunks" for fetched data
|
||||
private MappedRangeResult chunk = null;
|
||||
private MappedRangeResult nextChunk = null;
|
||||
private boolean fetchOutstanding = false;
|
||||
private byte[] prevKey = null;
|
||||
private int index = 0;
|
||||
private int iteration = 0;
|
||||
private KeySelector begin;
|
||||
private KeySelector end;
|
||||
|
||||
private int rowsRemaining;
|
||||
|
||||
private FutureMappedResults fetchingChunk;
|
||||
private CompletableFuture<Boolean> nextFuture;
|
||||
private boolean isCancelled = false;
|
||||
|
||||
private AsyncRangeIterator(int rowLimit, boolean reverse, StreamingMode streamingMode) {
|
||||
this.begin = MappedRangeQuery.this.begin;
|
||||
this.end = MappedRangeQuery.this.end;
|
||||
this.rowsLimited = rowLimit != 0;
|
||||
this.rowsRemaining = rowLimit;
|
||||
this.reverse = reverse;
|
||||
this.streamingMode = streamingMode;
|
||||
|
||||
startNextFetch();
|
||||
}
|
||||
|
||||
private synchronized boolean mainChunkIsTheLast() { return !chunk.more || (rowsLimited && rowsRemaining < 1); }
|
||||
|
||||
class FetchComplete implements BiConsumer<MappedRangeResultInfo, Throwable> {
|
||||
final FutureMappedResults fetchingChunk;
|
||||
final CompletableFuture<Boolean> promise;
|
||||
|
||||
FetchComplete(FutureMappedResults fetch, CompletableFuture<Boolean> promise) {
|
||||
this.fetchingChunk = fetch;
|
||||
this.promise = promise;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void accept(MappedRangeResultInfo data, Throwable error) {
|
||||
try {
|
||||
if (error != null) {
|
||||
if (eventKeeper != null) {
|
||||
eventKeeper.increment(Events.RANGE_QUERY_CHUNK_FAILED);
|
||||
}
|
||||
promise.completeExceptionally(error);
|
||||
if (error instanceof Error) {
|
||||
throw(Error) error;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
final MappedRangeResult rangeResult = data.get();
|
||||
final RangeResultSummary summary = rangeResult.getSummary();
|
||||
if (summary.lastKey == null) {
|
||||
promise.complete(Boolean.FALSE);
|
||||
return;
|
||||
}
|
||||
|
||||
synchronized (MappedRangeQuery.AsyncRangeIterator.this) {
|
||||
fetchOutstanding = false;
|
||||
|
||||
// adjust the total number of rows we should ever fetch
|
||||
rowsRemaining -= summary.keyCount;
|
||||
|
||||
// set up the next fetch
|
||||
if (reverse) {
|
||||
end = KeySelector.firstGreaterOrEqual(summary.lastKey);
|
||||
} else {
|
||||
begin = KeySelector.firstGreaterThan(summary.lastKey);
|
||||
}
|
||||
|
||||
// If this is the first fetch or the main chunk is exhausted
|
||||
if (chunk == null || index == chunk.values.size()) {
|
||||
nextChunk = null;
|
||||
chunk = rangeResult;
|
||||
index = 0;
|
||||
} else {
|
||||
nextChunk = rangeResult;
|
||||
}
|
||||
}
|
||||
|
||||
promise.complete(Boolean.TRUE);
|
||||
} finally {
|
||||
fetchingChunk.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized void startNextFetch() {
|
||||
if (fetchOutstanding)
|
||||
throw new IllegalStateException("Reentrant call not allowed"); // This can not be called reentrantly
|
||||
if (isCancelled) return;
|
||||
|
||||
if (chunk != null && mainChunkIsTheLast()) return;
|
||||
|
||||
fetchOutstanding = true;
|
||||
nextChunk = null;
|
||||
|
||||
nextFuture = new CompletableFuture<>();
|
||||
final long sTime = System.nanoTime();
|
||||
fetchingChunk = tr.getMappedRange_internal(begin, end, mapper, rowsLimited ? rowsRemaining : 0, 0,
|
||||
streamingMode.code(), ++iteration, snapshot, reverse);
|
||||
|
||||
BiConsumer<MappedRangeResultInfo, Throwable> cons = new FetchComplete(fetchingChunk, nextFuture);
|
||||
if (eventKeeper != null) {
|
||||
eventKeeper.increment(Events.RANGE_QUERY_FETCHES);
|
||||
cons = cons.andThen((r, t) -> {
|
||||
eventKeeper.timeNanos(Events.RANGE_QUERY_FETCH_TIME_NANOS, System.nanoTime() - sTime);
|
||||
});
|
||||
}
|
||||
|
||||
fetchingChunk.whenComplete(cons);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized CompletableFuture<Boolean> onHasNext() {
|
||||
if (isCancelled) throw new CancellationException();
|
||||
|
||||
// This will only happen before the first fetch has completed
|
||||
if (chunk == null) {
|
||||
return nextFuture;
|
||||
}
|
||||
|
||||
// We have a chunk and are still working though it
|
||||
if (index < chunk.values.size()) {
|
||||
return AsyncUtil.READY_TRUE;
|
||||
}
|
||||
|
||||
// If we are at the end of the current chunk there is either:
|
||||
// - no more data -or-
|
||||
// - we are already fetching the next block
|
||||
return mainChunkIsTheLast() ? AsyncUtil.READY_FALSE : nextFuture;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return onHasNext().join();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MappedKeyValue next() {
|
||||
CompletableFuture<Boolean> nextFuture;
|
||||
synchronized (this) {
|
||||
if (isCancelled) throw new CancellationException();
|
||||
|
||||
// at least the first chunk has been fetched and there is at least one
|
||||
// available result
|
||||
if (chunk != null && index < chunk.values.size()) {
|
||||
// If this is the first call to next() on a chunk, then we will want to
|
||||
// start fetching the data for the next block
|
||||
boolean initialNext = index == 0;
|
||||
|
||||
MappedKeyValue result = chunk.values.get(index);
|
||||
prevKey = result.getKey();
|
||||
index++;
|
||||
|
||||
if (eventKeeper != null) {
|
||||
// We record the BYTES_FETCHED here, rather than at a lower level,
|
||||
// because some parts of the construction of a MappedRangeResult occur underneath
|
||||
// the JNI boundary, and we don't want to pass the eventKeeper down there
|
||||
// (note: account for the length fields as well when recording the bytes
|
||||
// fetched)
|
||||
eventKeeper.count(Events.BYTES_FETCHED, result.getKey().length + result.getValue().length + 8);
|
||||
eventKeeper.increment(Events.RANGE_QUERY_RECORDS_FETCHED);
|
||||
}
|
||||
|
||||
// If this is the first call to next() on a chunk there cannot
|
||||
// be another waiting, since we could not have issued a request
|
||||
assert (!(initialNext && nextChunk != null));
|
||||
|
||||
// we are at the end of the current chunk and there is more to be had already
|
||||
if (index == chunk.values.size() && nextChunk != null) {
|
||||
index = 0;
|
||||
chunk = nextChunk;
|
||||
nextChunk = null;
|
||||
}
|
||||
|
||||
if (initialNext) {
|
||||
startNextFetch();
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
nextFuture = onHasNext();
|
||||
}
|
||||
|
||||
// If there was no result ready then we need to wait on the future
|
||||
// and return the proper result, throwing if there are no more elements
|
||||
return nextFuture
|
||||
.thenApply(hasNext -> {
|
||||
if (hasNext) {
|
||||
return next();
|
||||
}
|
||||
throw new NoSuchElementException();
|
||||
})
|
||||
.join();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void remove() {
|
||||
if (prevKey == null) throw new IllegalStateException("No value has been fetched from database");
|
||||
|
||||
tr.clear(prevKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void cancel() {
|
||||
isCancelled = true;
|
||||
nextFuture.cancel(true);
|
||||
fetchingChunk.cancel(true);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* MappedRangeResult.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
class MappedRangeResult {
|
||||
final List<MappedKeyValue> values;
|
||||
final boolean more;
|
||||
|
||||
public MappedRangeResult(MappedKeyValue[] values, boolean more) {
|
||||
this.values = Arrays.asList(values);
|
||||
this.more = more;
|
||||
}
|
||||
|
||||
MappedRangeResult(MappedRangeResultDirectBufferIterator iterator) {
|
||||
iterator.readResultsSummary();
|
||||
more = iterator.hasMore();
|
||||
|
||||
int count = iterator.count();
|
||||
values = new ArrayList<>(count);
|
||||
|
||||
for (int i = 0; i < count; ++i) {
|
||||
values.add(iterator.next());
|
||||
}
|
||||
}
|
||||
|
||||
public RangeResultSummary getSummary() {
|
||||
final int keyCount = values.size();
|
||||
final byte[] lastKey = keyCount > 0 ? values.get(keyCount - 1).getKey() : null;
|
||||
return new RangeResultSummary(lastKey, keyCount, more);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("MappedRangeResult{");
|
||||
sb.append("values=").append(values);
|
||||
sb.append(", more=").append(more);
|
||||
sb.append('}');
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
/*
|
||||
* MappedRangeResultDirectBufferIterator.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2015-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.NoSuchElementException;
|
||||
|
||||
/**
|
||||
* Holds the direct buffer that is shared with JNI wrapper.
|
||||
*/
|
||||
class MappedRangeResultDirectBufferIterator extends DirectBufferIterator implements Iterator<KeyValue> {
|
||||
|
||||
MappedRangeResultDirectBufferIterator(ByteBuffer buffer) { super(buffer); }
|
||||
|
||||
@Override
|
||||
public boolean hasNext() {
|
||||
return super.hasNext();
|
||||
}
|
||||
|
||||
@Override
|
||||
public MappedKeyValue next() {
|
||||
assert (hasResultReady()); // Must be called once its ready.
|
||||
if (!hasNext()) {
|
||||
throw new NoSuchElementException();
|
||||
}
|
||||
|
||||
final byte[] key = getString();
|
||||
final byte[] value = getString();
|
||||
final byte[] rangeBegin = getString();
|
||||
final byte[] rangeEnd = getString();
|
||||
final int rangeResultSize = byteBuffer.getInt();
|
||||
List<KeyValue> rangeResult = new ArrayList();
|
||||
for (int i = 0; i < rangeResultSize; i++) {
|
||||
final byte[] k = getString();
|
||||
final byte[] v = getString();
|
||||
rangeResult.add(new KeyValue(k, v));
|
||||
}
|
||||
current += 1;
|
||||
return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult);
|
||||
}
|
||||
|
||||
private byte[] getString() {
|
||||
final int len = byteBuffer.getInt();
|
||||
byte[] s = new byte[len];
|
||||
byteBuffer.get(s);
|
||||
return s;
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue