Merge branch 'apple-main' into ssupdateb4registration
This commit is contained in:
commit
3cd69743b9
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
# Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -202,6 +202,7 @@ class TestRunner(object):
|
|||
self.args.types = list(reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers)))
|
||||
|
||||
self.args.no_directory_snapshot_ops = self.args.no_directory_snapshot_ops or any([not tester.directory_snapshot_ops_enabled for tester in self.testers])
|
||||
self.args.no_tenants = self.args.no_tenants or any([not tester.tenants_enabled for tester in self.testers])
|
||||
|
||||
def print_test(self):
|
||||
test_instructions = self._generate_test()
|
||||
|
@ -282,6 +283,17 @@ class TestRunner(object):
|
|||
def _insert_instructions(self, test_instructions):
|
||||
util.get_logger().info('\nInserting test into database...')
|
||||
del self.db[:]
|
||||
|
||||
while True:
|
||||
tr = self.db.create_transaction()
|
||||
try:
|
||||
tr.options.set_special_key_space_enable_writes()
|
||||
del tr[b'\xff\xff/management/tenant_map/' : b'\xff\xff/management/tenant_map0']
|
||||
tr.commit().wait()
|
||||
break
|
||||
except fdb.FDBError as e:
|
||||
tr.on_error(e).wait()
|
||||
|
||||
for subspace, thread in test_instructions.items():
|
||||
thread.insert_operations(self.db, subspace)
|
||||
|
||||
|
@ -445,6 +457,8 @@ def parse_args(argv):
|
|||
|
||||
parser.add_argument('--no-directory-snapshot-ops', action='store_true', help='Disables snapshot operations for directory instructions.')
|
||||
|
||||
parser.add_argument('--no-tenants', action='store_true', help='Disables tenant operations.')
|
||||
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ ALL_TYPES = COMMON_TYPES + ['versionstamp']
|
|||
|
||||
|
||||
class Tester:
|
||||
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES, directory_snapshot_ops_enabled=True):
|
||||
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES, directory_snapshot_ops_enabled=True, tenants_enabled=False):
|
||||
self.name = name
|
||||
self.cmd = cmd
|
||||
self.max_int_bits = max_int_bits
|
||||
|
@ -35,6 +35,7 @@ class Tester:
|
|||
self.threads_enabled = threads_enabled
|
||||
self.types = types
|
||||
self.directory_snapshot_ops_enabled = directory_snapshot_ops_enabled
|
||||
self.tenants_enabled = tenants_enabled
|
||||
|
||||
def supports_api_version(self, api_version):
|
||||
return api_version >= self.min_api_version and api_version <= self.max_api_version
|
||||
|
@ -57,8 +58,8 @@ _java_cmd = 'java -ea -cp %s:%s com.apple.foundationdb.test.' % (
|
|||
|
||||
# We could set min_api_version lower on some of these if the testers were updated to support them
|
||||
testers = {
|
||||
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
|
||||
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
|
||||
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 2040, 23, MAX_API_VERSION),
|
||||
'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
Overview
|
||||
--------
|
||||
|
||||
Tenant testing is an optional extension to the core binding tester that enables
|
||||
testing of the tenant API. This testing is enabled by adding some additional
|
||||
instructions and modifying the behavior of some existing instructions.
|
||||
|
||||
Additional State and Initialization
|
||||
-----------------------------------
|
||||
|
||||
Your tester should store an additional piece of state tracking the active tenant
|
||||
that is to be used to create transactions. This tenant must support an unset
|
||||
state, in which case transactions will be created directly on the database.
|
||||
|
||||
New Instructions
|
||||
----------------
|
||||
|
||||
The tenant API introduces some new operations:
|
||||
|
||||
#### TENANT_CREATE
|
||||
|
||||
Pops the top item off of the stack as TENANT_NAME. Creates a new tenant
|
||||
in the database with the name TENANT_NAME. May optionally push a future
|
||||
onto the stack.
|
||||
|
||||
#### TENANT_DELETE
|
||||
|
||||
Pops the top item off of the stack as TENANT_NAME. Deletes the tenant with
|
||||
the name TENANT_NAME from the database. May optionally push a future onto
|
||||
the stack.
|
||||
|
||||
#### TENANT_SET_ACTIVE
|
||||
|
||||
Pops the top item off of the stack as TENANT_NAME. Opens the tenant with
|
||||
name TENANT_NAME and stores it as the active tenant.
|
||||
|
||||
#### TENANT_CLEAR_ACTIVE
|
||||
|
||||
Unsets the active tenant.
|
||||
|
||||
Updates to Existing Instructions
|
||||
--------------------------------
|
||||
|
||||
Some existing operations in the binding tester will have slightly modified
|
||||
behavior when tenants are enabled.
|
||||
|
||||
#### NEW_TRANSACTION
|
||||
|
||||
When creating a new transaction, the active tenant should be used. If no active
|
||||
tenant is set, then the transaction should be created as normal using the
|
||||
database.
|
||||
|
||||
#### _TENANT suffix
|
||||
|
||||
Similar to the _DATABASE suffix, an operation with the _TENANT suffix indicates
|
||||
that the operation should be performed on the current active tenant object. If
|
||||
there is no active tenant, then the operation should be performed on the database
|
||||
as if _DATABASE was specified. In any case where the operation suffixed with
|
||||
_DATABASE is allowed to push a future onto the stack, the same operation suffixed
|
||||
with _TENANT is also allowed to push a future onto the stack.
|
||||
|
||||
If your binding does not support operations directly on a tenant object, you should
|
||||
simulate it using an anonymous transaction. Remember that set and clear operations
|
||||
must immediately commit (with appropriate retry behavior!).
|
||||
|
||||
Operations that can include the _TENANT prefix are:
|
||||
|
||||
GET_TENANT
|
||||
GET_KEY_TENANT
|
||||
GET_RANGE_TENANT
|
||||
GET_RANGE_STARTS_WITH_TENANT
|
||||
GET_RANGE_SELECTOR_TENANT
|
||||
SET_TENANT
|
||||
CLEAR_TENANT
|
||||
CLEAR_RANGE_TENANT
|
||||
CLEAR_RANGE_STARTS_WITH_TENANT
|
||||
ATOMIC_OP_TENANT
|
|
@ -58,6 +58,7 @@ class ApiTest(Test):
|
|||
self.outstanding_ops = []
|
||||
self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types)
|
||||
self.api_version = args.api_version
|
||||
self.allocated_tenants = set()
|
||||
|
||||
def add_stack_items(self, num):
|
||||
self.stack_size += num
|
||||
|
@ -137,6 +138,12 @@ class ApiTest(Test):
|
|||
test_util.to_front(instructions, self.stack_size - read[0])
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
||||
def choose_tenant(self, new_tenant_probability):
|
||||
if len(self.allocated_tenants) == 0 or random.random() < new_tenant_probability:
|
||||
return self.random.random_string(random.randint(0, 30))
|
||||
else:
|
||||
return random.choice(list(self.allocated_tenants))
|
||||
|
||||
def generate(self, args, thread_number):
|
||||
instructions = InstructionSet()
|
||||
|
||||
|
@ -158,6 +165,7 @@ class ApiTest(Test):
|
|||
write_conflicts = ['WRITE_CONFLICT_RANGE', 'WRITE_CONFLICT_KEY', 'DISABLE_WRITE_CONFLICT']
|
||||
txn_sizes = ['GET_APPROXIMATE_SIZE']
|
||||
storage_metrics = ['GET_ESTIMATED_RANGE_SIZE', 'GET_RANGE_SPLIT_POINTS']
|
||||
tenants = ['TENANT_CREATE', 'TENANT_DELETE', 'TENANT_SET_ACTIVE', 'TENANT_CLEAR_ACTIVE']
|
||||
|
||||
op_choices += reads
|
||||
op_choices += mutations
|
||||
|
@ -173,6 +181,9 @@ class ApiTest(Test):
|
|||
op_choices += txn_sizes
|
||||
op_choices += storage_metrics
|
||||
|
||||
if not args.no_tenants:
|
||||
op_choices += tenants
|
||||
|
||||
idempotent_atomic_ops = ['BIT_AND', 'BIT_OR', 'MAX', 'MIN', 'BYTE_MIN', 'BYTE_MAX']
|
||||
atomic_ops = idempotent_atomic_ops + ['ADD', 'BIT_XOR', 'APPEND_IF_FITS']
|
||||
|
||||
|
@ -195,7 +206,7 @@ class ApiTest(Test):
|
|||
|
||||
# print 'Adding instruction %s at %d' % (op, index)
|
||||
|
||||
if args.concurrency == 1 and (op in database_mutations):
|
||||
if args.concurrency == 1 and (op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']):
|
||||
self.wait_for_reads(instructions)
|
||||
test_util.blocking_commit(instructions)
|
||||
self.can_get_commit_version = False
|
||||
|
@ -570,18 +581,39 @@ class ApiTest(Test):
|
|||
instructions.push_args(key1, key2, chunkSize)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
|
||||
elif op == 'TENANT_CREATE':
|
||||
tenant_name = self.choose_tenant(0.8)
|
||||
self.allocated_tenants.add(tenant_name)
|
||||
instructions.push_args(tenant_name)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
elif op == 'TENANT_DELETE':
|
||||
tenant_name = self.choose_tenant(0.2)
|
||||
if tenant_name in self.allocated_tenants:
|
||||
self.allocated_tenants.remove(tenant_name)
|
||||
instructions.push_args(tenant_name)
|
||||
instructions.append(op)
|
||||
self.add_strings(1)
|
||||
elif op == 'TENANT_SET_ACTIVE':
|
||||
tenant_name = self.choose_tenant(0.8)
|
||||
instructions.push_args(tenant_name)
|
||||
instructions.append(op)
|
||||
elif op == 'TENANT_CLEAR_ACTIVE':
|
||||
instructions.append(op)
|
||||
else:
|
||||
assert False, 'Unknown operation: ' + op
|
||||
|
||||
if read_performed and op not in database_reads:
|
||||
self.outstanding_ops.append((self.stack_size, len(instructions) - 1))
|
||||
|
||||
if args.concurrency == 1 and (op in database_reads or op in database_mutations):
|
||||
if args.concurrency == 1 and (op in database_reads or op in database_mutations or op in ['TENANT_CREATE', 'TENANT_DELETE']):
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
||||
instructions.begin_finalization()
|
||||
|
||||
if not args.no_tenants:
|
||||
instructions.append('TENANT_CLEAR_ACTIVE')
|
||||
|
||||
if args.concurrency == 1:
|
||||
self.wait_for_reads(instructions)
|
||||
test_util.blocking_commit(instructions)
|
||||
|
|
|
@ -135,6 +135,7 @@ if(NOT WIN32)
|
|||
add_executable(fdb_c_performance_test test/performance_test.c test/test.h)
|
||||
add_executable(fdb_c_ryw_benchmark test/ryw_benchmark.c test/test.h)
|
||||
add_executable(fdb_c_txn_size_test test/txn_size_test.c test/test.h)
|
||||
add_executable(fdb_c_client_memory_test test/client_memory_test.cpp test/unit/fdb_api.cpp test/unit/fdb_api.hpp)
|
||||
add_executable(mako ${MAKO_SRCS})
|
||||
add_executable(fdb_c_setup_tests test/unit/setup_tests.cpp)
|
||||
add_executable(fdb_c_unit_tests ${UNIT_TEST_SRCS})
|
||||
|
@ -145,10 +146,12 @@ if(NOT WIN32)
|
|||
strip_debug_symbols(fdb_c_performance_test)
|
||||
strip_debug_symbols(fdb_c_ryw_benchmark)
|
||||
strip_debug_symbols(fdb_c_txn_size_test)
|
||||
strip_debug_symbols(fdb_c_client_memory_test)
|
||||
endif()
|
||||
target_link_libraries(fdb_c_performance_test PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_txn_size_test PRIVATE fdb_c Threads::Threads)
|
||||
target_link_libraries(fdb_c_client_memory_test PRIVATE fdb_c Threads::Threads)
|
||||
|
||||
add_dependencies(fdb_c_setup_tests doctest)
|
||||
add_dependencies(fdb_c_unit_tests doctest)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -835,9 +835,10 @@ extern "C" DLLEXPORT FDBResult* fdb_transaction_read_blob_granules(FDBTransactio
|
|||
context.get_load_f = granule_context.get_load_f;
|
||||
context.free_load_f = granule_context.free_load_f;
|
||||
context.debugNoMaterialize = granule_context.debugNoMaterialize;
|
||||
context.granuleParallelism = granule_context.granuleParallelism;
|
||||
|
||||
Optional<Version> rv;
|
||||
if (readVersion != invalidVersion) { rv = readVersion; }
|
||||
if (readVersion != latestVersion) { rv = readVersion; }
|
||||
|
||||
return (FDBResult*)(TXN(tr)->readBlobGranules(range, beginVersion, rv, context).extractPtr()););
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -185,7 +185,12 @@ typedef struct readgranulecontext {
|
|||
void* userContext;
|
||||
|
||||
/* Returns a unique id for the load. Asynchronous to support queueing multiple in parallel. */
|
||||
int64_t (*start_load_f)(const char* filename, int filenameLength, int64_t offset, int64_t length, void* context);
|
||||
int64_t (*start_load_f)(const char* filename,
|
||||
int filenameLength,
|
||||
int64_t offset,
|
||||
int64_t length,
|
||||
int64_t fullFileLength,
|
||||
void* context);
|
||||
|
||||
/* Returns data for the load. Pass the loadId returned by start_load_f */
|
||||
uint8_t* (*get_load_f)(int64_t loadId, void* context);
|
||||
|
@ -196,6 +201,9 @@ typedef struct readgranulecontext {
|
|||
/* Set this to true for testing if you don't want to read the granule files,
|
||||
just do the request to the blob workers */
|
||||
fdb_bool_t debugNoMaterialize;
|
||||
|
||||
/* Number of granules to load in parallel */
|
||||
int granuleParallelism;
|
||||
} FDBReadBlobGranuleContext;
|
||||
|
||||
DLLEXPORT void fdb_future_cancel(FDBFuture* f);
|
||||
|
@ -441,15 +449,15 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range_split_points(F
|
|||
int end_key_name_length,
|
||||
int64_t chunk_size);
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_blob_granule_ranges(FDBTransaction* db,
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_blob_granule_ranges(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length);
|
||||
|
||||
/* InvalidVersion (-1) for readVersion means get read version from transaction
|
||||
/* LatestVersion (-2) for readVersion means get read version from transaction
|
||||
Separated out as optional because BG reads can support longer-lived reads than normal FDB transactions */
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBResult* fdb_transaction_read_blob_granules(FDBTransaction* db,
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBResult* fdb_transaction_read_blob_granules(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
uint8_t const* end_key_name,
|
||||
|
|
|
@ -0,0 +1,83 @@
|
|||
/*
|
||||
* client_memory_test.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
|
||||
#include "unit/fdb_api.hpp"
|
||||
|
||||
#include <thread>
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
void fdb_check(fdb_error_t e) {
|
||||
if (e) {
|
||||
std::cerr << fdb_get_error(e) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
FDBDatabase* fdb_open_database(const char* clusterFile) {
|
||||
FDBDatabase* db;
|
||||
fdb_check(fdb_create_database(clusterFile, &db));
|
||||
return db;
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc != 2) {
|
||||
printf("Usage: %s <cluster_file>", argv[0]);
|
||||
}
|
||||
fdb_check(fdb_select_api_version(710));
|
||||
fdb_check(fdb_setup_network());
|
||||
std::thread network_thread{ &fdb_run_network };
|
||||
|
||||
fdb_check(
|
||||
fdb_network_set_option(FDBNetworkOption::FDB_NET_OPTION_TRACE_ENABLE, reinterpret_cast<const uint8_t*>(""), 0));
|
||||
fdb_check(fdb_network_set_option(
|
||||
FDBNetworkOption::FDB_NET_OPTION_TRACE_FORMAT, reinterpret_cast<const uint8_t*>("json"), 4));
|
||||
|
||||
// Use a bunch of memory from different client threads
|
||||
FDBDatabase* db = fdb_open_database(argv[1]);
|
||||
auto thread_func = [&]() {
|
||||
fdb::Transaction tr(db);
|
||||
for (int i = 0; i < 10000; ++i) {
|
||||
tr.set(std::to_string(i), std::string(i, '\x00'));
|
||||
}
|
||||
tr.cancel();
|
||||
};
|
||||
std::vector<std::thread> threads;
|
||||
constexpr auto kThreadCount = 64;
|
||||
for (int i = 0; i < kThreadCount; ++i) {
|
||||
threads.emplace_back(thread_func);
|
||||
}
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
fdb_database_destroy(db);
|
||||
db = nullptr;
|
||||
|
||||
// Memory usage should go down now if the allocator is returning memory to the OS. It's expected that something is
|
||||
// externally monitoring the memory usage of this process during this sleep.
|
||||
using namespace std::chrono_literals;
|
||||
std::this_thread::sleep_for(10s);
|
||||
|
||||
fdb_check(fdb_stop_network());
|
||||
network_thread.join();
|
||||
}
|
|
@ -585,6 +585,7 @@ int64_t granule_start_load(const char* filename,
|
|||
int filenameLength,
|
||||
int64_t offset,
|
||||
int64_t length,
|
||||
int64_t fullFileLength,
|
||||
void* userContext) {
|
||||
FILE* fp;
|
||||
char full_fname[PATH_MAX];
|
||||
|
@ -682,6 +683,7 @@ int run_op_read_blob_granules(FDBTransaction* transaction,
|
|||
granuleContext.get_load_f = &granule_get_load;
|
||||
granuleContext.free_load_f = &granule_free_load;
|
||||
granuleContext.debugNoMaterialize = !doMaterialize;
|
||||
granuleContext.granuleParallelism = 2; // TODO make knob or setting for changing this?
|
||||
|
||||
r = fdb_transaction_read_blob_granules(transaction,
|
||||
(uint8_t*)keystr,
|
||||
|
@ -689,7 +691,7 @@ int run_op_read_blob_granules(FDBTransaction* transaction,
|
|||
(uint8_t*)keystr2,
|
||||
strlen(keystr2),
|
||||
0 /* beginVersion*/,
|
||||
-1, /* endVersion. -1 is use txn read version */
|
||||
-2, /* endVersion. -2 (latestVersion) is use txn read version */
|
||||
granuleContext);
|
||||
|
||||
free(fileContext.data_by_id);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -130,8 +130,15 @@ EmptyFuture Database::create_snapshot(FDBDatabase* db,
|
|||
return EmptyFuture(fdb_database_create_snapshot(db, uid, uid_length, snap_command, snap_command_length));
|
||||
}
|
||||
|
||||
// Transaction
|
||||
// Tenant
|
||||
Tenant::Tenant(FDBDatabase* db, const uint8_t* name, int name_length) {
|
||||
if (fdb_error_t err = fdb_database_open_tenant(db, name, name_length, &tenant)) {
|
||||
std::cerr << fdb_get_error(err) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
// Transaction
|
||||
Transaction::Transaction(FDBDatabase* db) {
|
||||
if (fdb_error_t err = fdb_database_create_transaction(db, &tr_)) {
|
||||
std::cerr << fdb_get_error(err) << std::endl;
|
||||
|
@ -139,6 +146,13 @@ Transaction::Transaction(FDBDatabase* db) {
|
|||
}
|
||||
}
|
||||
|
||||
Transaction::Transaction(Tenant tenant) {
|
||||
if (fdb_error_t err = fdb_tenant_create_transaction(tenant.tenant, &tr_)) {
|
||||
std::cerr << fdb_get_error(err) << std::endl;
|
||||
std::abort();
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::~Transaction() {
|
||||
fdb_transaction_destroy(tr_);
|
||||
}
|
||||
|
|
|
@ -203,6 +203,15 @@ public:
|
|||
int snap_command_length);
|
||||
};
|
||||
|
||||
class Tenant final {
|
||||
public:
|
||||
Tenant(FDBDatabase* db, const uint8_t* name, int name_length);
|
||||
|
||||
private:
|
||||
friend class Transaction;
|
||||
FDBTenant* tenant;
|
||||
};
|
||||
|
||||
// Wrapper around FDBTransaction, providing the same set of calls as the C API.
|
||||
// Handles cleanup of memory, removing the need to call
|
||||
// fdb_transaction_destroy.
|
||||
|
@ -210,6 +219,7 @@ class Transaction final {
|
|||
public:
|
||||
// Given an FDBDatabase, initializes a new transaction.
|
||||
Transaction(FDBDatabase* db);
|
||||
Transaction(Tenant tenant);
|
||||
~Transaction();
|
||||
|
||||
// Wrapper around fdb_transaction_reset.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2020 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -20,6 +20,7 @@
|
|||
|
||||
// Unit tests for the FoundationDB C API.
|
||||
|
||||
#include "fdb_c_options.g.h"
|
||||
#define FDB_API_VERSION 710
|
||||
#include <foundationdb/fdb_c.h>
|
||||
#include <assert.h>
|
||||
|
@ -2410,6 +2411,279 @@ TEST_CASE("Fast alloc thread cleanup") {
|
|||
}
|
||||
}
|
||||
|
||||
TEST_CASE("Tenant create, access, and delete") {
|
||||
std::string tenantName = "tenant";
|
||||
std::string testKey = "foo";
|
||||
std::string testValue = "bar";
|
||||
|
||||
fdb::Transaction tr(db);
|
||||
while (1) {
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
|
||||
tr.set("\xff\xff/management/tenant_map/" + tenantName, "");
|
||||
fdb::EmptyFuture commitFuture = tr.commit();
|
||||
fdb_error_t err = wait_future(commitFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f = tr.on_error(err);
|
||||
fdb_check(wait_future(f));
|
||||
continue;
|
||||
}
|
||||
tr.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
StringRef begin = "\xff\xff/management/tenant_map/"_sr;
|
||||
StringRef end = "\xff\xff/management/tenant_map0"_sr;
|
||||
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
|
||||
fdb::KeyValueArrayFuture f = tr.get_range(FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(begin.begin(), begin.size()),
|
||||
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(end.begin(), end.size()),
|
||||
/* limit */ 0,
|
||||
/* target_bytes */ 0,
|
||||
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
|
||||
/* iteration */ 0,
|
||||
/* snapshot */ false,
|
||||
/* reverse */ 0);
|
||||
|
||||
fdb_error_t err = wait_future(f);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
FDBKeyValue const* outKv;
|
||||
int outCount;
|
||||
int outMore;
|
||||
fdb_check(f.get(&outKv, &outCount, &outMore));
|
||||
CHECK(outCount == 1);
|
||||
CHECK(StringRef(outKv->key, outKv->key_length) == StringRef(tenantName).withPrefix(begin));
|
||||
|
||||
tr.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
fdb::Tenant tenant(db, reinterpret_cast<const uint8_t*>(tenantName.c_str()), tenantName.size());
|
||||
fdb::Transaction tr2(tenant);
|
||||
|
||||
while (1) {
|
||||
tr2.set(testKey, testValue);
|
||||
fdb::EmptyFuture commitFuture = tr2.commit();
|
||||
fdb_error_t err = wait_future(commitFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f = tr2.on_error(err);
|
||||
fdb_check(wait_future(f));
|
||||
continue;
|
||||
}
|
||||
tr2.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
fdb::ValueFuture f1 = tr2.get(testKey, false);
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
int out_present;
|
||||
char* val;
|
||||
int vallen;
|
||||
fdb_check(f1.get(&out_present, (const uint8_t**)&val, &vallen));
|
||||
CHECK(out_present == 1);
|
||||
CHECK(vallen == testValue.size());
|
||||
CHECK(testValue == val);
|
||||
|
||||
tr2.clear(testKey);
|
||||
fdb::EmptyFuture commitFuture = tr2.commit();
|
||||
err = wait_future(commitFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f = tr2.on_error(err);
|
||||
fdb_check(wait_future(f));
|
||||
continue;
|
||||
}
|
||||
|
||||
tr2.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
|
||||
tr.clear("\xff\xff/management/tenant_map/" + tenantName);
|
||||
fdb::EmptyFuture commitFuture = tr.commit();
|
||||
fdb_error_t err = wait_future(commitFuture);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f = tr.on_error(err);
|
||||
fdb_check(wait_future(f));
|
||||
continue;
|
||||
}
|
||||
tr.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
fdb::ValueFuture f1 = tr2.get(testKey, false);
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err == error_code_tenant_not_found) {
|
||||
tr2.reset();
|
||||
break;
|
||||
}
|
||||
if (err) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int64_t granule_start_load_fail(const char* filename,
|
||||
int filenameLength,
|
||||
int64_t offset,
|
||||
int64_t length,
|
||||
int64_t fullFileLength,
|
||||
void* userContext) {
|
||||
CHECK(false);
|
||||
return -1;
|
||||
}
|
||||
|
||||
uint8_t* granule_get_load_fail(int64_t loadId, void* userContext) {
|
||||
CHECK(false);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void granule_free_load_fail(int64_t loadId, void* userContext) {
|
||||
CHECK(false);
|
||||
}
|
||||
|
||||
TEST_CASE("Blob Granule Functions") {
|
||||
auto confValue =
|
||||
get_value("\xff/conf/blob_granules_enabled", /* snapshot */ false, { FDB_TR_OPTION_READ_SYSTEM_KEYS });
|
||||
if (!confValue.has_value() || confValue.value() != "1") {
|
||||
return;
|
||||
}
|
||||
|
||||
// write some data
|
||||
|
||||
insert_data(db, create_data({ { "bg1", "a" }, { "bg2", "b" }, { "bg3", "c" } }));
|
||||
|
||||
// because wiring up files is non-trivial, just test the calls complete with the expected no_materialize error
|
||||
FDBReadBlobGranuleContext granuleContext;
|
||||
granuleContext.userContext = nullptr;
|
||||
granuleContext.start_load_f = &granule_start_load_fail;
|
||||
granuleContext.get_load_f = &granule_get_load_fail;
|
||||
granuleContext.free_load_f = &granule_free_load_fail;
|
||||
granuleContext.debugNoMaterialize = true;
|
||||
granuleContext.granuleParallelism = 1;
|
||||
|
||||
// dummy values
|
||||
FDBKeyValue const* out_kv;
|
||||
int out_count;
|
||||
int out_more;
|
||||
|
||||
fdb::Transaction tr(db);
|
||||
int64_t originalReadVersion = -1;
|
||||
|
||||
// test no materialize gets error but completes, save read version
|
||||
while (1) {
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
|
||||
// -2 is latest version
|
||||
fdb::KeyValueArrayResult r = tr.read_blob_granules(key("bg"), key("bh"), 0, -2, granuleContext);
|
||||
fdb_error_t err = r.get(&out_kv, &out_count, &out_more);
|
||||
if (err && err != 2037 /* blob_granule_not_materialized */) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
CHECK(err == 2037 /* blob_granule_not_materialized */);
|
||||
|
||||
// If read done, save read version. Should have already used read version so this shouldn't error
|
||||
fdb::Int64Future grvFuture = tr.get_read_version();
|
||||
fdb_error_t grvErr = wait_future(grvFuture);
|
||||
CHECK(!grvErr);
|
||||
CHECK(!grvFuture.get(&originalReadVersion));
|
||||
|
||||
CHECK(originalReadVersion > 0);
|
||||
|
||||
tr.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
// test with begin version > 0
|
||||
while (1) {
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
|
||||
// -2 is latest version, read version should be >= originalReadVersion
|
||||
fdb::KeyValueArrayResult r =
|
||||
tr.read_blob_granules(key("bg"), key("bh"), originalReadVersion, -2, granuleContext);
|
||||
fdb_error_t err = r.get(&out_kv, &out_count, &out_more);
|
||||
;
|
||||
if (err && err != 2037 /* blob_granule_not_materialized */) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
CHECK(err == 2037 /* blob_granule_not_materialized */);
|
||||
|
||||
tr.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
// test with prior read version completes after delay larger than normal MVC window
|
||||
// TODO: should we not do this?
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(6000));
|
||||
while (1) {
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
|
||||
fdb::KeyValueArrayResult r =
|
||||
tr.read_blob_granules(key("bg"), key("bh"), 0, originalReadVersion, granuleContext);
|
||||
fdb_error_t err = r.get(&out_kv, &out_count, &out_more);
|
||||
if (err && err != 2037 /* blob_granule_not_materialized */) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
CHECK(err == 2037 /* blob_granule_not_materialized */);
|
||||
|
||||
tr.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
// test ranges
|
||||
|
||||
while (1) {
|
||||
fdb::KeyRangeArrayFuture f = tr.get_blob_granule_ranges(key("bg"), key("bh"));
|
||||
fdb_error_t err = wait_future(f);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
const FDBKeyRange* out_kr;
|
||||
int out_count;
|
||||
fdb_check(f.get(&out_kr, &out_count));
|
||||
|
||||
CHECK(out_count >= 1);
|
||||
// check key ranges are in order
|
||||
for (int i = 0; i < out_count; i++) {
|
||||
// key range start < end
|
||||
CHECK(std::string((const char*)out_kr[i].begin_key, out_kr[i].begin_key_length) <
|
||||
std::string((const char*)out_kr[i].end_key, out_kr[i].end_key_length));
|
||||
}
|
||||
// Ranges themselves are sorted
|
||||
for (int i = 0; i < out_count - 1; i++) {
|
||||
CHECK(std::string((const char*)out_kr[i].end_key, out_kr[i].end_key_length) <=
|
||||
std::string((const char*)out_kr[i + 1].begin_key, out_kr[i + 1].begin_key_length));
|
||||
}
|
||||
|
||||
tr.reset();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
if (argc < 3) {
|
||||
std::cout << "Unit tests for the FoundationDB C API.\n"
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -88,6 +88,7 @@ def api_version(ver):
|
|||
'predicates',
|
||||
'Future',
|
||||
'Database',
|
||||
'Tenant',
|
||||
'Transaction',
|
||||
'KeyValue',
|
||||
'KeySelector',
|
||||
|
|
|
@ -34,6 +34,7 @@ import traceback
|
|||
|
||||
import fdb
|
||||
from fdb import six
|
||||
from fdb.tuple import pack, unpack
|
||||
|
||||
_network_thread = None
|
||||
_network_thread_reentrant_lock = threading.RLock()
|
||||
|
@ -198,9 +199,10 @@ def transactional(*tr_args, **tr_kwargs):
|
|||
one of two actions, depending on the type of the parameter passed
|
||||
to the function at call time.
|
||||
|
||||
If given a Database, a Transaction will be created and passed into
|
||||
the wrapped code in place of the Database. After the function is
|
||||
complete, the newly created transaction will be committed.
|
||||
If given a Database or Tenant, a Transaction will be created and
|
||||
passed into the wrapped code in place of the Database or Tenant.
|
||||
After the function is complete, the newly created transaction
|
||||
will be committed.
|
||||
|
||||
It is important to note that the wrapped method may be called
|
||||
multiple times in the event of a commit failure, until the commit
|
||||
|
@ -943,128 +945,114 @@ class FormerFuture(_FDBBase):
|
|||
except:
|
||||
pass
|
||||
|
||||
|
||||
class Database(_FDBBase):
|
||||
def __init__(self, dpointer):
|
||||
self.dpointer = dpointer
|
||||
self.options = _DatabaseOptions(self)
|
||||
|
||||
def __del__(self):
|
||||
# print('Destroying database 0x%x' % self.dpointer)
|
||||
self.capi.fdb_database_destroy(self.dpointer)
|
||||
|
||||
class _TransactionCreator(_FDBBase):
|
||||
def get(self, key):
|
||||
return Database.__database_getitem(self, key)
|
||||
return _TransactionCreator.__creator_getitem(self, key)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, slice):
|
||||
return self.get_range(key.start, key.stop, reverse=(key.step == -1))
|
||||
return Database.__database_getitem(self, key)
|
||||
return _TransactionCreator.__creator_getitem(self, key)
|
||||
|
||||
def get_key(self, key_selector):
|
||||
return Database.__database_get_key(self, key_selector)
|
||||
return _TransactionCreator.__creator_get_key(self, key_selector)
|
||||
|
||||
def get_range(self, begin, end, limit=0, reverse=False, streaming_mode=StreamingMode.want_all):
|
||||
return Database.__database_get_range(self, begin, end, limit, reverse, streaming_mode)
|
||||
return _TransactionCreator.__creator_get_range(self, begin, end, limit, reverse, streaming_mode)
|
||||
|
||||
def get_range_startswith(self, prefix, *args, **kwargs):
|
||||
return Database.__database_get_range_startswith(self, prefix, *args, **kwargs)
|
||||
return _TransactionCreator.__creator_get_range_startswith(self, prefix, *args, **kwargs)
|
||||
|
||||
def set(self, key, value):
|
||||
Database.__database_setitem(self, key, value)
|
||||
_TransactionCreator.__creator_setitem(self, key, value)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
Database.__database_setitem(self, key, value)
|
||||
_TransactionCreator.__creator_setitem(self, key, value)
|
||||
|
||||
def clear(self, key):
|
||||
Database.__database_delitem(self, key)
|
||||
_TransactionCreator.__creator_delitem(self, key)
|
||||
|
||||
def clear_range(self, begin, end):
|
||||
Database.__database_delitem(self, slice(begin, end))
|
||||
_TransactionCreator.__creator_delitem(self, slice(begin, end))
|
||||
|
||||
def __delitem__(self, key_or_slice):
|
||||
Database.__database_delitem(self, key_or_slice)
|
||||
_TransactionCreator.__creator_delitem(self, key_or_slice)
|
||||
|
||||
def clear_range_startswith(self, prefix):
|
||||
Database.__database_clear_range_startswith(self, prefix)
|
||||
_TransactionCreator.__creator_clear_range_startswith(self, prefix)
|
||||
|
||||
def get_and_watch(self, key):
|
||||
return Database.__database_get_and_watch(self, key)
|
||||
return _TransactionCreator.__creator_get_and_watch(self, key)
|
||||
|
||||
def set_and_watch(self, key, value):
|
||||
return Database.__database_set_and_watch(self, key, value)
|
||||
return _TransactionCreator.__creator_set_and_watch(self, key, value)
|
||||
|
||||
def clear_and_watch(self, key):
|
||||
return Database.__database_clear_and_watch(self, key)
|
||||
return _TransactionCreator.__creator_clear_and_watch(self, key)
|
||||
|
||||
def create_transaction(self):
|
||||
pointer = ctypes.c_void_p()
|
||||
self.capi.fdb_database_create_transaction(self.dpointer, ctypes.byref(pointer))
|
||||
return Transaction(pointer.value, self)
|
||||
|
||||
def _set_option(self, option, param, length):
|
||||
self.capi.fdb_database_set_option(self.dpointer, option, param, length)
|
||||
pass
|
||||
|
||||
def _atomic_operation(self, opcode, key, param):
|
||||
Database.__database_atomic_operation(self, opcode, key, param)
|
||||
_TransactionCreator.__creator_atomic_operation(self, opcode, key, param)
|
||||
|
||||
#### Transaction implementations ####
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_getitem(tr, key):
|
||||
def __creator_getitem(tr, key):
|
||||
return tr[key].value
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_get_key(tr, key_selector):
|
||||
def __creator_get_key(tr, key_selector):
|
||||
return tr.get_key(key_selector).value
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_get_range(tr, begin, end, limit, reverse, streaming_mode):
|
||||
def __creator_get_range(tr, begin, end, limit, reverse, streaming_mode):
|
||||
return tr.get_range(begin, end, limit, reverse, streaming_mode).to_list()
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_get_range_startswith(tr, prefix, *args, **kwargs):
|
||||
def __creator_get_range_startswith(tr, prefix, *args, **kwargs):
|
||||
return tr.get_range_startswith(prefix, *args, **kwargs).to_list()
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_setitem(tr, key, value):
|
||||
def __creator_setitem(tr, key, value):
|
||||
tr[key] = value
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_clear_range_startswith(tr, prefix):
|
||||
def __creator_clear_range_startswith(tr, prefix):
|
||||
tr.clear_range_startswith(prefix)
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_get_and_watch(tr, key):
|
||||
def __creator_get_and_watch(tr, key):
|
||||
v = tr.get(key)
|
||||
return v, tr.watch(key)
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_set_and_watch(tr, key, value):
|
||||
def __creator_set_and_watch(tr, key, value):
|
||||
tr.set(key, value)
|
||||
return tr.watch(key)
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_clear_and_watch(tr, key):
|
||||
def __creator_clear_and_watch(tr, key):
|
||||
del tr[key]
|
||||
return tr.watch(key)
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_delitem(tr, key_or_slice):
|
||||
def __creator_delitem(tr, key_or_slice):
|
||||
del tr[key_or_slice]
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_atomic_operation(tr, opcode, key, param):
|
||||
def __creator_atomic_operation(tr, opcode, key, param):
|
||||
tr._atomic_operation(opcode, key, param)
|
||||
|
||||
# Asynchronous transactions
|
||||
|
@ -1074,11 +1062,11 @@ class Database(_FDBBase):
|
|||
From = asyncio.From
|
||||
coroutine = asyncio.coroutine
|
||||
|
||||
class Database:
|
||||
class TransactionCreator:
|
||||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_getitem(tr, key):
|
||||
def __creator_getitem(tr, key):
|
||||
# raise Return(( yield From( tr[key] ) ))
|
||||
raise Return(tr[key])
|
||||
yield None
|
||||
|
@ -1086,26 +1074,26 @@ class Database(_FDBBase):
|
|||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_get_key(tr, key_selector):
|
||||
def __creator_get_key(tr, key_selector):
|
||||
raise Return(tr.get_key(key_selector))
|
||||
yield None
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_get_range(tr, begin, end, limit, reverse, streaming_mode):
|
||||
def __creator_get_range(tr, begin, end, limit, reverse, streaming_mode):
|
||||
raise Return((yield From(tr.get_range(begin, end, limit, reverse, streaming_mode).to_list())))
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_get_range_startswith(tr, prefix, *args, **kwargs):
|
||||
def __creator_get_range_startswith(tr, prefix, *args, **kwargs):
|
||||
raise Return((yield From(tr.get_range_startswith(prefix, *args, **kwargs).to_list())))
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_setitem(tr, key, value):
|
||||
def __creator_setitem(tr, key, value):
|
||||
tr[key] = value
|
||||
raise Return()
|
||||
yield None
|
||||
|
@ -1113,7 +1101,7 @@ class Database(_FDBBase):
|
|||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_clear_range_startswith(tr, prefix):
|
||||
def __creator_clear_range_startswith(tr, prefix):
|
||||
tr.clear_range_startswith(prefix)
|
||||
raise Return()
|
||||
yield None
|
||||
|
@ -1121,7 +1109,7 @@ class Database(_FDBBase):
|
|||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_get_and_watch(tr, key):
|
||||
def __creator_get_and_watch(tr, key):
|
||||
v = tr.get(key)
|
||||
raise Return(v, tr.watch(key))
|
||||
yield None
|
||||
|
@ -1129,7 +1117,7 @@ class Database(_FDBBase):
|
|||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_set_and_watch(tr, key, value):
|
||||
def __creator_set_and_watch(tr, key, value):
|
||||
tr.set(key, value)
|
||||
raise Return(tr.watch(key))
|
||||
yield None
|
||||
|
@ -1137,7 +1125,7 @@ class Database(_FDBBase):
|
|||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_clear_and_watch(tr, key):
|
||||
def __creator_clear_and_watch(tr, key):
|
||||
del tr[key]
|
||||
raise Return(tr.watch(key))
|
||||
yield None
|
||||
|
@ -1145,7 +1133,7 @@ class Database(_FDBBase):
|
|||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_delitem(tr, key_or_slice):
|
||||
def __creator_delitem(tr, key_or_slice):
|
||||
del tr[key_or_slice]
|
||||
raise Return()
|
||||
yield None
|
||||
|
@ -1153,11 +1141,101 @@ class Database(_FDBBase):
|
|||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_atomic_operation(tr, opcode, key, param):
|
||||
def __creator_atomic_operation(tr, opcode, key, param):
|
||||
tr._atomic_operation(opcode, key, param)
|
||||
raise Return()
|
||||
yield None
|
||||
return Database
|
||||
return TransactionCreator
|
||||
|
||||
def process_tenant_name(name):
|
||||
if isinstance(name, tuple):
|
||||
return pack(name)
|
||||
elif isinstance(name, bytes):
|
||||
return name
|
||||
else:
|
||||
raise TypeError('Tenant name must be of type ' + bytes.__name__ + ' or of type ' + tuple.__name__)
|
||||
|
||||
class Database(_TransactionCreator):
|
||||
def __init__(self, dpointer):
|
||||
self.dpointer = dpointer
|
||||
self.options = _DatabaseOptions(self)
|
||||
|
||||
def __del__(self):
|
||||
# print('Destroying database 0x%x' % self.dpointer)
|
||||
self.capi.fdb_database_destroy(self.dpointer)
|
||||
|
||||
def _set_option(self, option, param, length):
|
||||
self.capi.fdb_database_set_option(self.dpointer, option, param, length)
|
||||
|
||||
def open_tenant(self, name):
|
||||
tname = process_tenant_name(name)
|
||||
pointer = ctypes.c_void_p()
|
||||
self.capi.fdb_database_open_tenant(self.dpointer, tname, len(tname), ctypes.byref(pointer))
|
||||
return Tenant(pointer.value)
|
||||
|
||||
def create_transaction(self):
|
||||
pointer = ctypes.c_void_p()
|
||||
self.capi.fdb_database_create_transaction(self.dpointer, ctypes.byref(pointer))
|
||||
return Transaction(pointer.value, self)
|
||||
|
||||
def allocate_tenant(self, name):
|
||||
Database.__database_allocate_tenant(self, process_tenant_name(name), [])
|
||||
|
||||
def delete_tenant(self, name):
|
||||
Database.__database_delete_tenant(self, process_tenant_name(name), [])
|
||||
|
||||
# Attempt to allocate a tenant in the cluster. If the tenant already exists,
|
||||
# this function will return a tenant_already_exists error. If the tenant is created
|
||||
# concurrently, then this function may return success even if another caller creates
|
||||
# it.
|
||||
#
|
||||
# The existence_check_marker is expected to be an empty list. This function will
|
||||
# modify the list after completing the existence check to avoid checking for existence
|
||||
# on retries. This allows the operation to be idempotent.
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_allocate_tenant(tr, name, existence_check_marker):
|
||||
tr.options.set_special_key_space_enable_writes()
|
||||
key = b'\xff\xff/management/tenant_map/%s' % name
|
||||
if not existence_check_marker:
|
||||
existing_tenant = tr[key].wait()
|
||||
existence_check_marker.append(None)
|
||||
if existing_tenant != None:
|
||||
raise fdb.FDBError(2132) # tenant_already_exists
|
||||
tr[key] = b''
|
||||
|
||||
# Attempt to remove a tenant in the cluster. If the tenant doesn't exist, this
|
||||
# function will return a tenant_not_found error. If the tenant is deleted
|
||||
# concurrently, then this function may return success even if another caller deletes
|
||||
# it.
|
||||
#
|
||||
# The existence_check_marker is expected to be an empty list. This function will
|
||||
# modify the list after completing the existence check to avoid checking for existence
|
||||
# on retries. This allows the operation to be idempotent.
|
||||
@staticmethod
|
||||
@transactional
|
||||
def __database_delete_tenant(tr, name, existence_check_marker):
|
||||
tr.options.set_special_key_space_enable_writes()
|
||||
key = b'\xff\xff/management/tenant_map/%s' % name
|
||||
if not existence_check_marker:
|
||||
existing_tenant = tr[key].wait()
|
||||
existence_check_marker.append(None)
|
||||
if existing_tenant == None:
|
||||
raise fdb.FDBError(2131) # tenant_not_found
|
||||
del tr[key]
|
||||
|
||||
|
||||
class Tenant(_TransactionCreator):
|
||||
def __init__(self, tpointer):
|
||||
self.tpointer = tpointer
|
||||
|
||||
def __del__(self):
|
||||
self.capi.fdb_tenant_destroy(self.tpointer)
|
||||
|
||||
def create_transaction(self):
|
||||
pointer = ctypes.c_void_p()
|
||||
self.capi.fdb_tenant_create_transaction(self.tpointer, ctypes.byref(pointer))
|
||||
return Transaction(pointer.value, self)
|
||||
|
||||
|
||||
fill_operations()
|
||||
|
@ -1458,6 +1536,10 @@ def init_c_api():
|
|||
_capi.fdb_database_destroy.argtypes = [ctypes.c_void_p]
|
||||
_capi.fdb_database_destroy.restype = None
|
||||
|
||||
_capi.fdb_database_open_tenant.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.POINTER(ctypes.c_void_p)]
|
||||
_capi.fdb_database_open_tenant.restype = ctypes.c_int
|
||||
_capi.fdb_database_open_tenant.errcheck = check_error_code
|
||||
|
||||
_capi.fdb_database_create_transaction.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p)]
|
||||
_capi.fdb_database_create_transaction.restype = ctypes.c_int
|
||||
_capi.fdb_database_create_transaction.errcheck = check_error_code
|
||||
|
@ -1466,6 +1548,13 @@ def init_c_api():
|
|||
_capi.fdb_database_set_option.restype = ctypes.c_int
|
||||
_capi.fdb_database_set_option.errcheck = check_error_code
|
||||
|
||||
_capi.fdb_tenant_destroy.argtypes = [ctypes.c_void_p]
|
||||
_capi.fdb_tenant_destroy.restype = None
|
||||
|
||||
_capi.fdb_tenant_create_transaction.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p)]
|
||||
_capi.fdb_tenant_create_transaction.restype = ctypes.c_int
|
||||
_capi.fdb_tenant_create_transaction.errcheck = check_error_code
|
||||
|
||||
_capi.fdb_transaction_destroy.argtypes = [ctypes.c_void_p]
|
||||
_capi.fdb_transaction_destroy.restype = None
|
||||
|
||||
|
@ -1686,10 +1775,10 @@ def init(event_model=None):
|
|||
raise asyncio.Return(self)
|
||||
return it()
|
||||
FDBRange.iterate = iterate
|
||||
AT = Database.declare_asynchronous_transactions()
|
||||
AT = _TransactionCreator.declare_asynchronous_transactions()
|
||||
for name in dir(AT):
|
||||
if name.startswith("_Database__database_"):
|
||||
setattr(Database, name, getattr(AT, name))
|
||||
if name.startswith("__TransactionCreator__creator_"):
|
||||
setattr(_TransactionCreator, name, getattr(AT, name))
|
||||
|
||||
def to_list(self):
|
||||
if self._mode == StreamingMode.iterator:
|
||||
|
|
|
@ -542,6 +542,103 @@ def triggerddteaminfolog(logger):
|
|||
output = run_fdbcli_command('triggerddteaminfolog')
|
||||
assert output == 'Triggered team info logging in data distribution.'
|
||||
|
||||
@enable_logging()
|
||||
def tenants(logger):
|
||||
output = run_fdbcli_command('listtenants')
|
||||
assert output == 'The cluster has no tenants'
|
||||
|
||||
output = run_fdbcli_command('createtenant tenant')
|
||||
assert output == 'The tenant `tenant\' has been created'
|
||||
|
||||
output = run_fdbcli_command('createtenant tenant2')
|
||||
assert output == 'The tenant `tenant2\' has been created'
|
||||
|
||||
output = run_fdbcli_command('listtenants')
|
||||
assert output == '1. tenant\n 2. tenant2'
|
||||
|
||||
output = run_fdbcli_command('listtenants a z 1')
|
||||
assert output == '1. tenant'
|
||||
|
||||
output = run_fdbcli_command('listtenants a tenant2')
|
||||
assert output == '1. tenant'
|
||||
|
||||
output = run_fdbcli_command('listtenants tenant2 z')
|
||||
assert output == '1. tenant2'
|
||||
|
||||
output = run_fdbcli_command('gettenant tenant')
|
||||
lines = output.split('\n')
|
||||
assert len(lines) == 2
|
||||
assert lines[0].strip().startswith('id: ')
|
||||
assert lines[1].strip().startswith('prefix: ')
|
||||
|
||||
output = run_fdbcli_command('usetenant')
|
||||
assert output == 'Using the default tenant'
|
||||
|
||||
output = run_fdbcli_command_and_get_error('usetenant tenant3')
|
||||
assert output == 'ERROR: Tenant `tenant3\' does not exist'
|
||||
|
||||
# Test writing keys to different tenants and make sure they all work correctly
|
||||
run_fdbcli_command('writemode on; set tenant_test default_tenant')
|
||||
output = run_fdbcli_command('get tenant_test')
|
||||
assert output == '`tenant_test\' is `default_tenant\''
|
||||
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
cmd_sequence = ['writemode on', 'usetenant tenant', 'get tenant_test', 'set tenant_test tenant']
|
||||
output, _ = process.communicate(input='\n'.join(cmd_sequence).encode())
|
||||
|
||||
lines = output.decode().strip().split('\n')[-3:]
|
||||
assert lines[0] == 'Using tenant `tenant\''
|
||||
assert lines[1] == '`tenant_test\': not found'
|
||||
assert lines[2].startswith('Committed')
|
||||
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
cmd_sequence = ['writemode on', 'usetenant tenant2', 'get tenant_test', 'set tenant_test tenant2', 'get tenant_test']
|
||||
output, _ = process.communicate(input='\n'.join(cmd_sequence).encode())
|
||||
|
||||
lines = output.decode().strip().split('\n')[-4:]
|
||||
assert lines[0] == 'Using tenant `tenant2\''
|
||||
assert lines[1] == '`tenant_test\': not found'
|
||||
assert lines[2].startswith('Committed')
|
||||
assert lines[3] == '`tenant_test\' is `tenant2\''
|
||||
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
cmd_sequence = ['usetenant tenant', 'get tenant_test', 'defaulttenant', 'get tenant_test']
|
||||
output, _ = process.communicate(input='\n'.join(cmd_sequence).encode())
|
||||
|
||||
lines = output.decode().strip().split('\n')[-4:]
|
||||
assert lines[0] == 'Using tenant `tenant\''
|
||||
assert lines[1] == '`tenant_test\' is `tenant\''
|
||||
assert lines[2] == 'Using the default tenant'
|
||||
assert lines[3] == '`tenant_test\' is `default_tenant\''
|
||||
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env)
|
||||
cmd_sequence = ['writemode on', 'usetenant tenant', 'clear tenant_test', 'deletetenant tenant', 'get tenant_test', 'defaulttenant', 'usetenant tenant']
|
||||
output, error_output = process.communicate(input='\n'.join(cmd_sequence).encode())
|
||||
|
||||
lines = output.decode().strip().split('\n')[-7:]
|
||||
error_lines = error_output.decode().strip().split('\n')[-2:]
|
||||
assert lines[0] == 'Using tenant `tenant\''
|
||||
assert lines[1].startswith('Committed')
|
||||
assert lines[2] == 'The tenant `tenant\' has been deleted'
|
||||
assert lines[3] == 'WARNING: the active tenant was deleted. Use the `usetenant\' or `defaulttenant\''
|
||||
assert lines[4] == 'command to choose a new tenant.'
|
||||
assert error_lines[0] == 'ERROR: Tenant does not exist (2131)'
|
||||
assert lines[6] == 'Using the default tenant'
|
||||
assert error_lines[1] == 'ERROR: Tenant `tenant\' does not exist'
|
||||
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env)
|
||||
cmd_sequence = ['writemode on', 'deletetenant tenant2', 'usetenant tenant2', 'clear tenant_test', 'defaulttenant', 'deletetenant tenant2']
|
||||
output, error_output = process.communicate(input='\n'.join(cmd_sequence).encode())
|
||||
|
||||
lines = output.decode().strip().split('\n')[-4:]
|
||||
error_lines = error_output.decode().strip().split('\n')[-1:]
|
||||
assert error_lines[0] == 'ERROR: Cannot delete a non-empty tenant (2133)'
|
||||
assert lines[0] == 'Using tenant `tenant2\''
|
||||
assert lines[1].startswith('Committed')
|
||||
assert lines[2] == 'Using the default tenant'
|
||||
assert lines[3] == 'The tenant `tenant2\' has been deleted'
|
||||
|
||||
run_fdbcli_command('writemode on; clear tenant_test')
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
||||
|
@ -586,6 +683,7 @@ if __name__ == '__main__':
|
|||
transaction()
|
||||
throttle()
|
||||
triggerddteaminfolog()
|
||||
tenants()
|
||||
else:
|
||||
assert args.process_number > 1, "Process number should be positive"
|
||||
coordinators()
|
||||
|
|
|
@ -0,0 +1,123 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# tenant_tests.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
import fdb
|
||||
import sys
|
||||
import json
|
||||
from fdb.tuple import pack
|
||||
|
||||
if __name__ == '__main__':
|
||||
fdb.api_version(710)
|
||||
|
||||
def test_tenant_tuple_name(db):
|
||||
tuplename=(b'test', b'level', b'hierarchy', 3, 1.24, 'str')
|
||||
db.allocate_tenant(tuplename)
|
||||
|
||||
tenant=db.open_tenant(tuplename)
|
||||
tenant[b'foo'] = b'bar'
|
||||
|
||||
assert tenant[b'foo'] == b'bar'
|
||||
|
||||
del tenant[b'foo']
|
||||
db.delete_tenant(tuplename)
|
||||
|
||||
def cleanup_tenant(db, tenant_name):
|
||||
try:
|
||||
tenant = db.open_tenant(tenant_name)
|
||||
del tenant[:]
|
||||
db.delete_tenant(tenant_name)
|
||||
except fdb.FDBError as e:
|
||||
if e.code == 2131: # tenant not found
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
|
||||
def test_tenant_operations(db):
|
||||
cleanup_tenant(db, b'tenant1')
|
||||
cleanup_tenant(db, b'tenant2')
|
||||
|
||||
db.allocate_tenant(b'tenant1')
|
||||
db.allocate_tenant(b'tenant2')
|
||||
|
||||
tenant1 = db.open_tenant(b'tenant1')
|
||||
tenant2 = db.open_tenant(b'tenant2')
|
||||
|
||||
db[b'tenant_test_key'] = b'no_tenant'
|
||||
tenant1[b'tenant_test_key'] = b'tenant1'
|
||||
tenant2[b'tenant_test_key'] = b'tenant2'
|
||||
|
||||
tenant1_entry = db[b'\xff\xff/management/tenant_map/tenant1']
|
||||
tenant1_json = json.loads(tenant1_entry)
|
||||
prefix1 = tenant1_json['prefix'].encode('utf8')
|
||||
|
||||
tenant2_entry = db[b'\xff\xff/management/tenant_map/tenant2']
|
||||
tenant2_json = json.loads(tenant2_entry)
|
||||
prefix2 = tenant2_json['prefix'].encode('utf8')
|
||||
|
||||
assert tenant1[b'tenant_test_key'] == b'tenant1'
|
||||
assert db[prefix1 + b'tenant_test_key'] == b'tenant1'
|
||||
assert tenant2[b'tenant_test_key'] == b'tenant2'
|
||||
assert db[prefix2 + b'tenant_test_key'] == b'tenant2'
|
||||
assert db[b'tenant_test_key'] == b'no_tenant'
|
||||
|
||||
tr1 = tenant1.create_transaction()
|
||||
try:
|
||||
del tr1[:]
|
||||
tr1.commit().wait()
|
||||
except fdb.FDBError as e:
|
||||
tr.on_error(e).wait()
|
||||
|
||||
assert tenant1[b'tenant_test_key'] == None
|
||||
assert db[prefix1 + b'tenant_test_key'] == None
|
||||
assert tenant2[b'tenant_test_key'] == b'tenant2'
|
||||
assert db[prefix2 + b'tenant_test_key'] == b'tenant2'
|
||||
assert db[b'tenant_test_key'] == b'no_tenant'
|
||||
|
||||
db.delete_tenant(b'tenant1')
|
||||
try:
|
||||
tenant1[b'tenant_test_key']
|
||||
assert False
|
||||
except fdb.FDBError as e:
|
||||
assert e.code == 2131 # tenant not found
|
||||
|
||||
del tenant2[:]
|
||||
db.delete_tenant(b'tenant2')
|
||||
|
||||
assert db[prefix1 + b'tenant_test_key'] == None
|
||||
assert db[prefix2 + b'tenant_test_key'] == None
|
||||
assert db[b'tenant_test_key'] == b'no_tenant'
|
||||
|
||||
del db[b'tenant_test_key']
|
||||
|
||||
assert db[b'tenant_test_key'] == None
|
||||
|
||||
def test_tenants(db):
|
||||
test_tenant_tuple_name(db)
|
||||
test_tenant_operations(db)
|
||||
|
||||
# Expect a cluster file as input. This test will write to the FDB cluster, so
|
||||
# be aware of potential side effects.
|
||||
if __name__ == '__main__':
|
||||
clusterFile = sys.argv[1]
|
||||
db = fdb.open(clusterFile)
|
||||
db.options.set_transaction_timeout(2000) # 2 seconds
|
||||
db.options.set_transaction_retry_limit(3)
|
||||
|
||||
test_tenants(db)
|
|
@ -49,6 +49,7 @@ from cancellation_timeout_tests import test_db_retry_limits
|
|||
from cancellation_timeout_tests import test_combinations
|
||||
|
||||
from size_limit_tests import test_size_limit_option, test_get_approximate_size
|
||||
from tenant_tests import test_tenants
|
||||
|
||||
random.seed(0)
|
||||
|
||||
|
@ -112,12 +113,13 @@ class Stack:
|
|||
|
||||
|
||||
class Instruction:
|
||||
def __init__(self, tr, stack, op, index, isDatabase=False, isSnapshot=False):
|
||||
def __init__(self, tr, stack, op, index, isDatabase=False, isTenant=False, isSnapshot=False):
|
||||
self.tr = tr
|
||||
self.stack = stack
|
||||
self.op = op
|
||||
self.index = index
|
||||
self.isDatabase = isDatabase
|
||||
self.isTenant = isTenant
|
||||
self.isSnapshot = isSnapshot
|
||||
|
||||
def pop(self, count=None, with_idx=False):
|
||||
|
@ -277,6 +279,7 @@ class Tester:
|
|||
|
||||
def __init__(self, db, prefix):
|
||||
self.db = db
|
||||
self.tenant = None
|
||||
|
||||
self.instructions = self.db[fdb.tuple.range((prefix,))]
|
||||
|
||||
|
@ -317,7 +320,8 @@ class Tester:
|
|||
|
||||
def new_transaction(self):
|
||||
with Tester.tr_map_lock:
|
||||
Tester.tr_map[self.tr_name] = self.db.create_transaction()
|
||||
tr_source = self.tenant if self.tenant is not None else self.db
|
||||
Tester.tr_map[self.tr_name] = tr_source.create_transaction()
|
||||
|
||||
def switch_transaction(self, name):
|
||||
self.tr_name = name
|
||||
|
@ -335,18 +339,22 @@ class Tester:
|
|||
# print("%d. Instruction is %s" % (idx, op))
|
||||
|
||||
isDatabase = op.endswith(six.u('_DATABASE'))
|
||||
isTenant = op.endswith(six.u('_TENANT'))
|
||||
isSnapshot = op.endswith(six.u('_SNAPSHOT'))
|
||||
|
||||
if isDatabase:
|
||||
op = op[:-9]
|
||||
obj = self.db
|
||||
elif isTenant:
|
||||
op = op[:-7]
|
||||
obj = self.tenant if self.tenant else self.db
|
||||
elif isSnapshot:
|
||||
op = op[:-9]
|
||||
obj = self.current_transaction().snapshot
|
||||
else:
|
||||
obj = self.current_transaction()
|
||||
|
||||
inst = Instruction(obj, self.stack, op, idx, isDatabase, isSnapshot)
|
||||
inst = Instruction(obj, self.stack, op, idx, isDatabase, isTenant, isSnapshot)
|
||||
|
||||
try:
|
||||
if inst.op == six.u("PUSH"):
|
||||
|
@ -583,6 +591,19 @@ class Tester:
|
|||
prefix = inst.pop()
|
||||
Tester.wait_empty(self.db, prefix)
|
||||
inst.push(b"WAITED_FOR_EMPTY")
|
||||
elif inst.op == six.u("TENANT_CREATE"):
|
||||
name = inst.pop()
|
||||
self.db.allocate_tenant(name)
|
||||
inst.push(b"RESULT_NOT_PRESENT")
|
||||
elif inst.op == six.u("TENANT_DELETE"):
|
||||
name = inst.pop()
|
||||
self.db.delete_tenant(name)
|
||||
inst.push(b"RESULT_NOT_PRESENT")
|
||||
elif inst.op == six.u("TENANT_SET_ACTIVE"):
|
||||
name = inst.pop()
|
||||
self.tenant = self.db.open_tenant(name)
|
||||
elif inst.op == six.u("TENANT_CLEAR_ACTIVE"):
|
||||
self.tenant = None
|
||||
elif inst.op == six.u("UNIT_TESTS"):
|
||||
try:
|
||||
test_db_options(db)
|
||||
|
@ -600,6 +621,8 @@ class Tester:
|
|||
test_size_limit_option(db)
|
||||
test_get_approximate_size(db)
|
||||
|
||||
test_tenants(db)
|
||||
|
||||
except fdb.FDBError as e:
|
||||
print("Unit tests failed: %s" % e.description)
|
||||
traceback.print_exc()
|
||||
|
|
|
@ -5,7 +5,6 @@ env_set(USE_DTRACE ON BOOL "Enable dtrace probes on supported platforms")
|
|||
env_set(USE_VALGRIND OFF BOOL "Compile for valgrind usage")
|
||||
env_set(USE_VALGRIND_FOR_CTEST ${USE_VALGRIND} BOOL "Use valgrind for ctest")
|
||||
env_set(ALLOC_INSTRUMENTATION OFF BOOL "Instrument alloc")
|
||||
env_set(USE_JEMALLOC ON BOOL "Link with jemalloc")
|
||||
env_set(USE_ASAN OFF BOOL "Compile with address sanitizer")
|
||||
env_set(USE_GCOV OFF BOOL "Compile with gcov instrumentation")
|
||||
env_set(USE_MSAN OFF BOOL "Compile with memory sanitizer. To avoid false positives you need to dynamically link to a msan-instrumented libc++ and libc++abi, which you must compile separately. See https://github.com/google/sanitizers/wiki/MemorySanitizerLibcxxHowTo#instrumented-libc.")
|
||||
|
@ -32,6 +31,14 @@ if(USE_ASAN OR USE_VALGRIND OR USE_MSAN OR USE_TSAN OR USE_UBSAN)
|
|||
set(USE_SANITIZER ON)
|
||||
endif()
|
||||
|
||||
set(jemalloc_default ON)
|
||||
# We don't want to use jemalloc on Windows
|
||||
# Nor on FreeBSD, where jemalloc is the default system allocator
|
||||
if(USE_SANITIZER OR WIN32 OR (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") OR APPLE)
|
||||
set(jemalloc_default OFF)
|
||||
endif()
|
||||
env_set(USE_JEMALLOC ${jemalloc_default} BOOL "Link with jemalloc")
|
||||
|
||||
if(USE_LIBCXX AND STATIC_LINK_LIBCXX AND NOT USE_LD STREQUAL "LLD")
|
||||
message(FATAL_ERROR "Unsupported configuration: STATIC_LINK_LIBCXX with libc++ only works if USE_LD=LLD")
|
||||
endif()
|
||||
|
|
|
@ -212,6 +212,17 @@ endif()
|
|||
|
||||
set(COROUTINE_IMPL ${DEFAULT_COROUTINE_IMPL} CACHE STRING "Which coroutine implementation to use. Options are boost and libcoro")
|
||||
|
||||
################################################################################
|
||||
# AWS SDK
|
||||
################################################################################
|
||||
|
||||
set(BUILD_AWS_BACKUP OFF CACHE BOOL "Build AWS S3 SDK backup client")
|
||||
if (BUILD_AWS_BACKUP)
|
||||
set(WITH_AWS_BACKUP ON)
|
||||
else()
|
||||
set(WITH_AWS_BACKUP OFF)
|
||||
endif()
|
||||
|
||||
################################################################################
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packages)
|
||||
|
@ -232,6 +243,7 @@ function(print_components)
|
|||
message(STATUS "Build Python sdist (make package): ${WITH_PYTHON_BINDING}")
|
||||
message(STATUS "Configure CTest (depends on Python): ${WITH_PYTHON}")
|
||||
message(STATUS "Build with RocksDB: ${WITH_ROCKSDB_EXPERIMENTAL}")
|
||||
message(STATUS "Build with AWS SDK: ${WITH_AWS_BACKUP}")
|
||||
message(STATUS "=========================================")
|
||||
endfunction()
|
||||
|
||||
|
|
|
@ -1,12 +1,5 @@
|
|||
add_library(jemalloc INTERFACE)
|
||||
|
||||
# We don't want to use jemalloc on Windows
|
||||
# Nor on FreeBSD, where jemalloc is the default system allocator
|
||||
if(USE_SANITIZER OR WIN32 OR (CMAKE_SYSTEM_NAME STREQUAL "FreeBSD") OR APPLE)
|
||||
set(USE_JEMALLOC OFF)
|
||||
return()
|
||||
endif()
|
||||
|
||||
if(NOT USE_JEMALLOC)
|
||||
return()
|
||||
endif()
|
||||
|
|
|
@ -0,0 +1,98 @@
|
|||
project(awssdk-download NONE)
|
||||
|
||||
# Compile the sdk with clang and libc++, since otherwise we get libc++ vs libstdc++ link errors when compiling fdb with clang
|
||||
set(AWSSDK_COMPILER_FLAGS "")
|
||||
set(AWSSDK_LINK_FLAGS "")
|
||||
if(APPLE OR CLANG OR USE_LIBCXX)
|
||||
set(AWSSDK_COMPILER_FLAGS -stdlib=libc++ -nostdlib++)
|
||||
set(AWSSDK_LINK_FLAGS -stdlib=libc++ -lc++abi)
|
||||
endif()
|
||||
|
||||
include(ExternalProject)
|
||||
ExternalProject_Add(awssdk_project
|
||||
GIT_REPOSITORY https://github.com/aws/aws-sdk-cpp.git
|
||||
GIT_TAG 2af3ce543c322cb259471b3b090829464f825972 # v1.9.200
|
||||
SOURCE_DIR "${CMAKE_CURRENT_BINARY_DIR}/awssdk-src"
|
||||
BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build"
|
||||
GIT_CONFIG advice.detachedHead=false
|
||||
CMAKE_ARGS -DBUILD_SHARED_LIBS=OFF # SDK builds shared libs by default, we want static libs
|
||||
-DENABLE_TESTING=OFF
|
||||
-DBUILD_ONLY=core # git repo contains SDK for every AWS product, we only want the core auth libraries
|
||||
-DSIMPLE_INSTALL=ON
|
||||
-DCMAKE_INSTALL_PREFIX=install # need to specify an install prefix so it doesn't install in /usr/lib - FIXME: use absolute path
|
||||
-DBYO_CRYPTO=ON # we have our own crypto libraries that conflict if we let aws sdk build and link its own
|
||||
|
||||
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_EXE_LINKER_FLAGS=${AWSSDK_COMPILER_FLAGS}
|
||||
-DCMAKE_CXX_FLAGS=${AWSSDK_LINK_FLAGS}
|
||||
TEST_COMMAND ""
|
||||
BUILD_ALWAYS TRUE
|
||||
# the sdk build produces a ton of artifacts, with their own dependency tree, so there is a very specific dependency order they must be linked in
|
||||
BUILD_BYPRODUCTS "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-cpp-sdk-core.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-crt-cpp.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-s3.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-auth.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-event-stream.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-http.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-mqtt.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-io.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-checksums.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-compression.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-cal.a"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-common.a"
|
||||
)
|
||||
|
||||
add_library(awssdk_core STATIC IMPORTED)
|
||||
add_dependencies(awssdk_core awssdk_project)
|
||||
set_target_properties(awssdk_core PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-cpp-sdk-core.a")
|
||||
|
||||
add_library(awssdk_crt STATIC IMPORTED)
|
||||
add_dependencies(awssdk_crt awssdk_project)
|
||||
set_target_properties(awssdk_crt PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-crt-cpp.a")
|
||||
|
||||
# TODO: can we remove c_s3? It seems to be a dependency of libaws-crt
|
||||
add_library(awssdk_c_s3 STATIC IMPORTED)
|
||||
add_dependencies(awssdk_c_s3 awssdk_project)
|
||||
set_target_properties(awssdk_c_s3 PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-s3.a")
|
||||
|
||||
add_library(awssdk_c_auth STATIC IMPORTED)
|
||||
add_dependencies(awssdk_c_auth awssdk_project)
|
||||
set_target_properties(awssdk_c_auth PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-auth.a")
|
||||
|
||||
add_library(awssdk_c_eventstream STATIC IMPORTED)
|
||||
add_dependencies(awssdk_c_eventstream awssdk_project)
|
||||
set_target_properties(awssdk_c_eventstream PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-event-stream.a")
|
||||
|
||||
add_library(awssdk_c_http STATIC IMPORTED)
|
||||
add_dependencies(awssdk_c_http awssdk_project)
|
||||
set_target_properties(awssdk_c_http PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-http.a")
|
||||
|
||||
add_library(awssdk_c_mqtt STATIC IMPORTED)
|
||||
add_dependencies(awssdk_c_mqtt awssdk_project)
|
||||
set_target_properties(awssdk_c_mqtt PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-mqtt.a")
|
||||
|
||||
add_library(awssdk_c_io STATIC IMPORTED)
|
||||
add_dependencies(awssdk_c_io awssdk_project)
|
||||
set_target_properties(awssdk_c_io PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-io.a")
|
||||
|
||||
add_library(awssdk_checksums STATIC IMPORTED)
|
||||
add_dependencies(awssdk_checksums awssdk_project)
|
||||
set_target_properties(awssdk_checksums PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-checksums.a")
|
||||
|
||||
add_library(awssdk_c_compression STATIC IMPORTED)
|
||||
add_dependencies(awssdk_c_compression awssdk_project)
|
||||
set_target_properties(awssdk_c_compression PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-compression.a")
|
||||
|
||||
add_library(awssdk_c_cal STATIC IMPORTED)
|
||||
add_dependencies(awssdk_c_cal awssdk_project)
|
||||
set_target_properties(awssdk_c_cal PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-cal.a")
|
||||
|
||||
add_library(awssdk_c_common STATIC IMPORTED)
|
||||
add_dependencies(awssdk_c_common awssdk_project)
|
||||
set_target_properties(awssdk_c_common PROPERTIES IMPORTED_LOCATION "${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/lib64/libaws-c-common.a")
|
||||
|
||||
# link them all together in one interface target
|
||||
add_library(awssdk_target INTERFACE)
|
||||
target_include_directories(awssdk_target SYSTEM INTERFACE ${CMAKE_CURRENT_BINARY_DIR}/awssdk-build/install/include)
|
||||
target_link_libraries(awssdk_target INTERFACE awssdk_core awssdk_crt awssdk_c_s3 awssdk_c_auth awssdk_c_eventstream awssdk_c_http awssdk_c_mqtt awssdk_c_io awssdk_checksums awssdk_c_compression awssdk_c_cal awssdk_c_common curl)
|
|
@ -346,7 +346,7 @@ function createDatabase
|
|||
|
||||
# Configure the database.
|
||||
else
|
||||
"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'configure new single memory; status' --timeout "${CONFIGUREWAIT}" --log --log-dir "${LOGDIR}" &>> "${LOGDIR}/fdbclient.log"
|
||||
"${BINDIR}/fdbcli" -C "${FDBCONF}" --exec 'configure new single memory tenant_mode=optional_experimental; status' --timeout "${CONFIGUREWAIT}" --log --log-dir "${LOGDIR}" &>> "${LOGDIR}/fdbclient.log"
|
||||
|
||||
if ! displayMessage "Checking if config succeeded"
|
||||
then
|
||||
|
|
|
@ -359,7 +359,7 @@ namespace SummarizeTest
|
|||
}
|
||||
|
||||
int result = 0;
|
||||
bool unseedCheck = random.NextDouble() < unseedRatio;
|
||||
bool unseedCheck = !noSim && random.NextDouble() < unseedRatio;
|
||||
for (int i = 0; i < maxTries; ++i)
|
||||
{
|
||||
bool logOnRetryableError = i == maxTries - 1;
|
||||
|
@ -829,17 +829,12 @@ namespace SummarizeTest
|
|||
if (ev.DDetails.ContainsKey("FaultInjectionEnabled"))
|
||||
xout.Add(new XAttribute("FaultInjectionEnabled", ev.Details.FaultInjectionEnabled));
|
||||
}
|
||||
if (ev.Type == "Simulation")
|
||||
if (ev.Type == "Simulation" || ev.Type == "NonSimulationTest")
|
||||
{
|
||||
xout.Add(
|
||||
new XAttribute("TestFile", ev.Details.TestFile));
|
||||
testFile = ev.Details.TestFile.Substring(ev.Details.TestFile.IndexOf("tests"));
|
||||
}
|
||||
if (ev.Type == "ActualRun")
|
||||
{
|
||||
xout.Add(
|
||||
new XAttribute("TestFile", ev.Details.RunID));
|
||||
}
|
||||
if (ev.Type == "ElapsedTime" && !testEndFound)
|
||||
{
|
||||
testEndFound = true;
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
#!/usr/bin/env python3
|
||||
#
|
||||
# alloc_instrumentation_traces.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
|
||||
"""
|
||||
Example trace:
|
||||
{ "Severity": "10", "Time": "194.878474", "DateTime": "2022-02-01T16:28:27Z", "Type": "MemSample", "Machine": "2.1.1.0:2", "ID": "0000000000000000", "Count": "943", "TotalSize": "540000000", "SampleCount": "54", "Hash": "980074757", "Bt": "addr2line -e fdbserver.debug -p -C -f -i 0x1919b72 0x3751d43 0x37518cc 0x19930f8 0x199dac3 0x1999e7c 0x21a1061 0x31e8fc5 0x31e784a 0x10ab3a8 0x36bf4c6 0x36bf304 0x36beea4 0x36bf352 0x36bfa1c 0x10ab3a8 0x37b22fe 0x37a16ee 0x368c754 0x19202d5 0x7fb3fe2d6555 0x1077029", "ThreadID": "10074331651862410074", "LogGroup": "default" }
|
||||
"""
|
||||
|
||||
|
||||
# This program analyzes MemSample trace events produced by setting ALLOC_INSTRUMENTATION in FastAlloc.h
|
||||
# It outputs the top memory users by total size as well as number of allocations.
|
||||
|
||||
# Example usage: cat trace.* | ./alloc_instrumentation_traces.py
|
||||
|
||||
import sys
|
||||
import json
|
||||
|
||||
byCnt = []
|
||||
bySize = []
|
||||
totalSize = 0
|
||||
|
||||
lastTimestamp = ""
|
||||
|
||||
for line in sys.stdin:
|
||||
ev = json.loads(line.rstrip())
|
||||
type = ev["Type"]
|
||||
|
||||
if (type != 'MemSample'):
|
||||
continue
|
||||
bt = ev["Bt"]
|
||||
|
||||
if (bt == "na"):
|
||||
continue
|
||||
|
||||
timestamp = ev["Time"]
|
||||
cnt = int(ev["Count"])
|
||||
scnt = int(ev["SampleCount"])
|
||||
size = int(ev["TotalSize"])
|
||||
h = ev["Hash"]
|
||||
|
||||
if (timestamp != lastTimestamp):
|
||||
byCnt = []
|
||||
bySize = []
|
||||
totalSize = 0
|
||||
lastTimestamp = timestamp
|
||||
|
||||
|
||||
# print(str(cnt) + " " + str(scnt) + " " + str(size) + " " + h)
|
||||
|
||||
byCnt.append( (cnt, scnt, size, h, bt) )
|
||||
bySize.append( (size, cnt, size, h, bt) )
|
||||
totalSize += size
|
||||
|
||||
byCnt.sort(reverse=True)
|
||||
bySize.sort(reverse=True)
|
||||
|
||||
btByHash = {}
|
||||
|
||||
byte_suffix = ["Bytes", "KB", "MB", "GB", "TB"]
|
||||
def byte_str(bytes):
|
||||
suffix_idx = 0
|
||||
while (bytes >= 1024 * 10):
|
||||
suffix_idx += 1
|
||||
bytes //= 1024
|
||||
return str(bytes) + ' ' + byte_suffix[suffix_idx]
|
||||
|
||||
print("By Size")
|
||||
print("-------\r\n")
|
||||
for x in bySize[:10]:
|
||||
# print(str(x[0]) + ": " + x[3])
|
||||
print(str(x[1]) + " / " + byte_str(x[0]) + " (" + byte_str(x[0] // x[1]) + " per alloc):\r\n" + x[4] + "\r\n")
|
||||
btByHash[x[3]] = x[4]
|
||||
|
||||
print()
|
||||
print("By Count")
|
||||
print("--------\r\n")
|
||||
for x in byCnt[:5]:
|
||||
# print(str(x[0]) + ": " + x[3])
|
||||
print(str(x[0]) + " / " + byte_str(x[2]) + " (" + byte_str(x[2] // x[0]) + " per alloc):\r\n" + x[4] + "\r\n")
|
||||
btByHash[x[3]] = x[4]
|
||||
|
|
@ -0,0 +1,237 @@
|
|||
# FDB Encryption **data at-rest**
|
||||
|
||||
## Threat Model
|
||||
|
||||
The proposed solution is `able to handle` the following attacks:
|
||||
|
||||
* An attacker, if able to get access to any FDB cluster host or attached disk, would not be able to read the persisted data. Further, for cloud deployments, returning a cloud instance back to the cloud provider will prevent the cloud provider from reading the contents of data stored on the disk.
|
||||
|
||||
* Data stored on a lost or stolen FDB host persistent disk storage device can’t be recovered.
|
||||
|
||||
The proposed solution `will not be able` to handle the following attacks:
|
||||
|
||||
* Encryption is enabled for data at-rest only, generating a memory dump of FDB processes could enable an attacker to read in-memory data contents.
|
||||
* An FDB cluster host access, if compromised, would allow an attacker to read/write data managed by the FDB cluster.
|
||||
|
||||
## Goals
|
||||
|
||||
FoundationDB being a multi-model, easily scalable and fault-tolerant, with an ability to provide great performance even with commodity hardware, plays a critical role enabling enterprises to deploy, manage and run mission critical applications.
|
||||
|
||||
Data encryption support is a table-stake feature for modern day enterprise service offerings in the cloud. Customers expect, and at times warrant, that their data and metadata be fully encrypted using the latest security standards. The goal of this document includes:
|
||||
|
||||
* Discuss detailed design to support data at-rest encryption support for data stored in FDB clusters. Encrypting data in-transit and/or in-memory caches at various layers in the query execution pipeline (inside and external to FDB) is out of the scope of this feature.
|
||||
|
||||
* Isolation guarantees: the encryption domain matches with `tenant` partition semantics supported by FDB clusters. Tenants are discrete namespaces in FDB that serve as transaction domains. A tenant is a `identifier` that maps to a `prefix` within the data-FDB cluster, and all operations within a tenant are implicitly bound within a `tenant-prefix`. Refer to `Multi-Tenant FoundationDB API` documentation more details. However, it is possible to use a single encryption key for the whole cluster, in case `tenant partitioning` isn’t available.
|
||||
|
||||
* Ease of integration with external Key Management Services enabling persisting, caching, and lookup of encryption keys.
|
||||
|
||||
## Config Knobs
|
||||
|
||||
* `ServerKnob::ENABLE_ENCRYPION` allows enable/disable encryption feature.
|
||||
* `ServerKnob::ENCRYPTION_MODE` controls the encryption mode supported. The current scheme supports `AES-256-CTR` encryption mode.
|
||||
|
||||
## Encryption Mode
|
||||
|
||||
The proposal is to use strong AES-256 CTR encryption mode. Salient properties are:
|
||||
|
||||
* HMAC_SHA256 key hashing technique is used to derive encryption keys using a base encryption key and locally generated random number. The formula used is as follows:
|
||||
|
||||
```
|
||||
DEK = HMAC SHA256(BEK || UID)
|
||||
|
||||
Where
|
||||
DEK = Derived Encryption Key
|
||||
BEK = Base Encryption key
|
||||
UID = Host local random generated number
|
||||
```
|
||||
|
||||
UID is an 8 byte host-local random number. Another option would have been a simple host-local incrementing counter, however, the scheme runs the risk of repeated encryption-key generation on cluster/process restarts.
|
||||
|
||||
* An encryption key derived using the above formula will be cached (in-memory) for a short time interval (10 mins, for instance). The encryption-key is immutable, but, the TTL approach allows refreshing encryption key by reaching out to External Encryption KeyManagement solutions, hence, supporting “restricting lifetime of an encryption” feature if implemented by Encryption Key Management solution.
|
||||
|
||||
* Initialization Vector (IV) selection would be random.
|
||||
|
||||
## Architecture
|
||||
|
||||
The encryption responsibilities are split across multiple modules to ensure data and metadata stored in the cluster is never persisted in plain text on any durable storages (temporary and/or long-term durable storage).
|
||||
|
||||
## Encryption Request Workflow
|
||||
|
||||
### **Write Request**
|
||||
|
||||
* An FDB client initiates a write transaction providing {key, value} in plaintext format.
|
||||
* An FDB cluster host as part of processing a write transaction would do the following:
|
||||
1. Obtain required encryption key based on the transaction request tenant information.
|
||||
2. Encrypt mutations before persisting them on Transaction Logs (TLogs). As a background process, the mutations are moved to a long-term durable storage by the Storage Server processes.
|
||||
|
||||
Refer to the sections below for more details.
|
||||
|
||||
### **Read Request**
|
||||
|
||||
* An FDB client initiates a read transaction request.
|
||||
* An FDB cluster host as part of processing request would do the following:
|
||||
1. StorageServer would read desired data blocks from the persistent storage.
|
||||
2. Regenerate the encryption key required to decrypt the data.
|
||||
3. Decrypt data and pass results as plaintext to the caller.
|
||||
|
||||
|
||||
Below diagram depicts the end-to-end encryption workflow detailing various modules involved and their interactions. The following section discusses detailed design for involved components.
|
||||
|
||||
```
|
||||
_______________________________________________________
|
||||
| FDB CLUSER HOST |
|
||||
| |
|
||||
_____________________ | ________________________ _________________ |
|
||||
| | (proprietary) | | | | |
|
||||
| |<---------- |--| KMS CONNECTOR | | COMMIT PROXIES | |
|
||||
| ENCRYPTION KEY | | | | | | |
|
||||
| MANAGEMENT SOLUTION | | |(non FDB - proprietary) | | | |
|
||||
| | | |________________________| |_________________| |
|
||||
| | | ^ | |
|
||||
|_____________________| | | (REST API) | (Encrypt |
|
||||
| | V Mutation) |
|
||||
| _________________________________________ | __________________
|
||||
| | | | | |
|
||||
| | ENCRYPT KEYPROXY SERVER |<------|-----------| |
|
||||
| |_________________________________________| | | |
|
||||
| | | | BACKUP FILES |
|
||||
| | (Encrypt Node) | | |
|
||||
| V | | |
|
||||
| _________________________________________ | | (Encrypt file) |
|
||||
| | |<------|-----------| |
|
||||
| | REDWOOD STORAGE SERVER | | |__________________|
|
||||
| |_________________________________________| |
|
||||
|_______________________________________________________|
|
||||
```
|
||||
|
||||
## FDB Encryption
|
||||
|
||||
An FDB client would insert data i.e. plaintext {key, value} in a FDB cluster for persistence.
|
||||
|
||||
### KMS-Connector
|
||||
|
||||
A non-FDB process running on FDB cluster hosts enables an FDB cluster to interact with external Encryption Key Managements services. Salient features includes:
|
||||
|
||||
* An external (non-FDB) standalone process implementing a REST server.
|
||||
|
||||
* Abstracts organization specific KeyManagementService integration details. The proposed design ensures ease of integration given limited infrastructure needed to implement a local/remote REST server.
|
||||
|
||||
* Ensure organization specific code is implemented outside the FDB codebase.
|
||||
|
||||
* The KMS-Connector process is launched and maintained by the FDBMonitor. The process needs to handle the following REST endpoint:
|
||||
1. GET - http://localhost/getEncryptionKey
|
||||
|
||||
Define a single interface returning “encryption key string in plaintext” and accepting an
|
||||
JSON input which can be customized as needed:
|
||||
|
||||
```json
|
||||
json_input_payload
|
||||
{
|
||||
“Version” : int // version
|
||||
“KeyId” : keyId // string
|
||||
}
|
||||
```
|
||||
|
||||
Few benefits of the above proposed schemes are:
|
||||
* JSON input format is extensible (adding new fields is backward compatible).
|
||||
|
||||
* Popular Cloud KMS “getPublicKey” API accepts “keyId” as a string, hence, API should be easy to integrate.
|
||||
|
||||
1. AWS: https://docs.aws.amazon.com/cli/latest/reference/kms/get-public-key.html
|
||||
2. GCP: https://cloud.google.com/kms/docs/retrieve-public-key
|
||||
|
||||
`Future improvements`: FDBMonitor at present will launch one KMS-Connector process per FDB cluster host. Though multiple KMS-Connector processes are launched, only one process (collocated with EncryptKeyServer) would consume cluster resources. In future, possible enhancements could be:
|
||||
|
||||
* Enable FDBMonitor to launch “N” (configurable) processes per cluster.
|
||||
* Enable the FDB cluster to manage external processes as well.
|
||||
|
||||
### Encrypt KeyServer
|
||||
|
||||
Salient features include:
|
||||
|
||||
* New FDB role/process to allow fetching of encryption keys from external KeyManagementService interfaces. The process connects to the KMS-Connector REST interface to fetch desired encryption keys.
|
||||
|
||||
* On an encryption-key fetch from KMS-Connector, it applies HMAC derivative function to generate a new encryption key and cache it in-memory. The in-memory cache is used to serve encryption key fetch requests from other FDB processes.
|
||||
|
||||
|
||||
Given encryption keys will be needed as part of cluster-recovery, this process/role needs to be recruited at the start of the cluster-recovery process (just after the “master/sequencer” process/role recruitment). All other FDB processes will interact with this process to obtain encryption keys needed to encrypt and/or decrypt the data payload.
|
||||
|
||||
`Note`: An alternative would be to incorporate the functionality into the ClusterController process itself, however, having clear responsibility separation would make design more flexible and extensible in future if needed.
|
||||
|
||||
### Commit Proxies (CPs)
|
||||
|
||||
When a FDB client initiates a write transaction to insert/update data stored in a FDB cluster, the transaction is received by a CP, which then resolves the transaction by checking if the transaction is allowed. If allowed, it commits the transaction to TLogs. The proposal is to extend CP responsibilities by encrypting mutations using the desired encryption key before mutations get persisted into TLogs (durable storage). The encryption key derivation is achieved using the following formula:
|
||||
|
||||
```
|
||||
DEK = HMAC SHA256(BEK || UID)
|
||||
|
||||
Where:
|
||||
|
||||
DEK = Derived Encryption Key
|
||||
BEK = Base Encryption Key
|
||||
UID = Host local random generated number
|
||||
```
|
||||
|
||||
The Transaction State Store (commonly referred as TxnStateStore) is a Key-Value datastore used by FDB to store metadata about the database itself for bootstrap purposes. The data stored in this store plays a critical role in: guiding the transaction system to persist writes (storage tags to mutations at CPs), and managing FDB internal data movement. The TxnStateStore data gets encrypted with the desired encryption key before getting persisted on the disk queues.
|
||||
|
||||
As part of encryption, every Mutation would be appended by a plaintext `BlobCipherEncryptHeader` to assist decrypting the information for reads.
|
||||
|
||||
CPs would cache (in-memory) recently used encryption-keys to optimize network traffic due to encryption related operations. Further, the caching would improve overall performance, avoiding frequent RPC calls to EncryptKeyServer which may eventually become a scalability bottleneck. Each encryption-key in the cache has a short Time-To-Live (10 mins) and on expiry the process will interact with the EncryptKeyServer to fetch the required encryption-keys. The same caching policy is followed by the Redwood Storage Server and the Backup File processes too.
|
||||
|
||||
### **Caveats**
|
||||
|
||||
The encryption is done inline in the transaction path, which will increase the total commit latencies. Few possible ways to minimize this impact are:
|
||||
|
||||
* Overlap encryption operations with the CP::resolution phase, which would minimize the latency penalty per transaction at the cost of spending more CPU cycles. If needed, for production deployments, we may need to increase the number of CPs per FDB cluster.
|
||||
* Implement an external process to offload encryption. If done, encryption would appear no different than the CP::resolution phase, where the process would invoke RPC calls to encrypt the buffer and wait for operation completion.
|
||||
|
||||
### Storage Servers
|
||||
|
||||
The encryption design only supports Redwood Storage Server integration, support for other storage engines is yet to be planned.
|
||||
|
||||
### Redwood Storage Nodes
|
||||
|
||||
Redwood at heart is a B+ tree and stores data in two types of nodes:
|
||||
|
||||
* `Non-leaf` nodes: Nodes will only store keys and not values(prefix compression is applied).
|
||||
* `Leaf` Nodes: Will store `{key, value}` tuples for a given key-range.
|
||||
|
||||
Both above-mentioned nodes will be converted into one or more fixed size pages (likely 4K or 8K) before being persisted on a durable storage. The encryption will be performed at the node level instead of “page level”, i.e. all pages constituting a given Redwood node will be encrypted using the same encryption key generated using the following formula:
|
||||
|
||||
```
|
||||
DEK = HMAC SHA256(BEK || UID)
|
||||
|
||||
Where:
|
||||
|
||||
DEK = Derived Encryption Key
|
||||
BEK = Base Encryption Key
|
||||
UID = Host local random generated number
|
||||
```
|
||||
|
||||
### Backup Files
|
||||
|
||||
Backup Files are designed to pull committed mutations from StorageServers and persist them as “files” stored on cloud backed BlobStorage such as Amazon S3. Each persisted file stores mutations for a given key-range and will be encrypted by generating an encryption key using below formula:
|
||||
|
||||
```
|
||||
DEK = HMAC SHA256(BEK || FID)
|
||||
|
||||
Where:
|
||||
|
||||
DEK = Derived Encryption Key
|
||||
BEK = Base Encryption Key
|
||||
FID = File Identifier (unique)
|
||||
```
|
||||
|
||||
## Decryption on Reads
|
||||
|
||||
To assist reads, FDB processes (StorageServers, Backup Files workers) will be modified to read/parse the encryption header. The data decryption will be done as follows:
|
||||
|
||||
* The FDB process will interact with Encrypt KeyServer to fetch the desired base encryption key corresponding to the key-id persisted in the encryption header.
|
||||
* Reconstruct the encryption key and decrypt the data block.
|
||||
|
||||
## Future Work
|
||||
|
||||
* Extend the TLog API to allow clients to read “plaintext mutations” directly from a TLogServer. In current implementations there are two consumers of TLogs:
|
||||
|
||||
1. Storage Server: At present the plan is for StorageServer to decrypt the mutations.
|
||||
2. BackupWorker (Apple implementation) which is currently not used in the code.
|
|
@ -3,3 +3,4 @@ setuptools>=20.10.0,<=57.4.0
|
|||
sphinx==1.5.6
|
||||
sphinx-bootstrap-theme==0.4.8
|
||||
docutils==0.16
|
||||
Jinja2==3.0.3
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
.. |database-type| replace:: ``Database``
|
||||
.. |database-class| replace:: :class:`Database`
|
||||
.. |database-auto| replace:: the :func:`@fdb.transactional <transactional>` decorator
|
||||
.. |tenant-type| replace:: FIXME
|
||||
.. |tenant-type| replace:: :class:`Tenant`
|
||||
.. |transaction-class| replace:: :class:`Transaction`
|
||||
.. |get-key-func| replace:: :func:`Transaction.get_key`
|
||||
.. |get-range-func| replace:: :func:`Transaction.get_range`
|
||||
|
@ -316,9 +316,29 @@ A |database-blurb1| |database-blurb2|
|
|||
|
||||
Returns a new :class:`Transaction` object. Consider using the :func:`@fdb.transactional <transactional>` decorator to create transactions instead, since it will automatically provide you with appropriate retry behavior.
|
||||
|
||||
.. method:: Database.open_tenant(tenant_name)
|
||||
|
||||
Opens an existing tenant to be used for running transactions and returns it as a :class`Tenant` object.
|
||||
|
||||
The tenant name can be either a byte string or a tuple. If a tuple is provided, the tuple will be packed using the tuple layer to generate the byte string tenant name.
|
||||
|
||||
.. |sync-read| replace:: This read is fully synchronous.
|
||||
.. |sync-write| replace:: This change will be committed immediately, and is fully synchronous.
|
||||
|
||||
.. method:: Database.allocate_tenant(tenant_name):
|
||||
|
||||
Creates a new tenant in the cluster. |sync-write|
|
||||
|
||||
The tenant name can be either a byte string or a tuple and cannot start with the ``\xff`` byte. If a tuple is provided, the tuple will be packed using the tuple layer to generate the byte string tenant name.
|
||||
|
||||
.. method:: Database.delete_tenant(tenant_name):
|
||||
|
||||
Delete a tenant from the cluster. |sync-write|
|
||||
|
||||
The tenant name can be either a byte string or a tuple. If a tuple is provided, the tuple will be packed using the tuple layer to generate the byte string tenant name.
|
||||
|
||||
It is an error to delete a tenant that still has data. To delete a non-empty tenant, first clear all of the keys in the tenant.
|
||||
|
||||
.. method:: Database.get(key)
|
||||
|
||||
Returns the value associated with the specified key in the database (or ``None`` if the key does not exist). |sync-read|
|
||||
|
@ -460,6 +480,17 @@ Database options
|
|||
.. method:: Database.options.set_snapshot_ryw_disable()
|
||||
|
||||
|option-db-snapshot-ryw-disable-blurb|
|
||||
|
||||
Tenant objects
|
||||
==============
|
||||
|
||||
.. class:: Tenant
|
||||
|
||||
|tenant-blurb1|
|
||||
|
||||
.. method:: Tenant.create_transaction()
|
||||
|
||||
Returns a new :class:`Transaction` object. Consider using the :func:`@fdb.transactional <transactional>` decorator to create transactions instead, since it will automatically provide you with appropriate retry behavior.
|
||||
|
||||
.. _api-python-transactional-decorator:
|
||||
|
||||
|
@ -479,9 +510,9 @@ Transactional decoration
|
|||
|
||||
The ``@fdb.transactional`` decorator makes ``simple_function`` a transactional function. All functions using this decorator must have an argument **named** ``tr``. This specially named argument is passed a transaction that the function can use to do reads and writes.
|
||||
|
||||
A caller of a transactionally decorated function can pass a :class:`Database` instead of a transaction for the ``tr`` parameter. Then a transaction will be created automatically, and automatically committed before returning to the caller. The decorator will retry calling the decorated function until the transaction successfully commits.
|
||||
A caller of a transactionally decorated function can pass a :class:`Database` or :class:`Tenant` instead of a transaction for the ``tr`` parameter. Then a transaction will be created automatically, and automatically committed before returning to the caller. The decorator will retry calling the decorated function until the transaction successfully commits.
|
||||
|
||||
If ``db`` is a :class:`Database`, a call like ::
|
||||
If ``db`` is a :class:`Database` or :class:`Tenant`, a call like ::
|
||||
|
||||
simple_function(db, 'a', 'b')
|
||||
|
||||
|
@ -744,7 +775,7 @@ Committing
|
|||
|
||||
.. decorator:: transactional()
|
||||
|
||||
The ``transactional`` decorator makes it easy to write transactional functions which accept either a :class:`Database` or a :class:`Transaction` as a parameter and automatically commit. See :func:`@fdb.transactional <transactional>` for explanation and examples.
|
||||
The ``transactional`` decorator makes it easy to write transactional functions which accept a :class:`Database`, :class`Tenant`, or :class:`Transaction` as a parameter and automatically commit. See :func:`@fdb.transactional <transactional>` for explanation and examples.
|
||||
|
||||
.. method :: Transaction.commit()
|
||||
|
||||
|
@ -754,7 +785,7 @@ Committing
|
|||
|
||||
|commit-outstanding-reads-blurb|
|
||||
|
||||
.. note :: Consider using the :func:`@fdb.transactional <transactional>` decorator, which not only calls :meth:`Database.create_transaction` and :meth:`Transaction.commit()` for you but also implements the required error handling and retry logic for transactions.
|
||||
.. note :: Consider using the :func:`@fdb.transactional <transactional>` decorator, which not only calls :meth:`Database.create_transaction` or :meth`Tenant.create_transaction` and :meth:`Transaction.commit()` for you but also implements the required error handling and retry logic for transactions.
|
||||
|
||||
.. warning :: |used-during-commit-blurb|
|
||||
|
||||
|
|
|
@ -155,6 +155,12 @@ Here is a complete list of valid parameters:
|
|||
|
||||
**Example**: The URL parameter *header=x-amz-storage-class:REDUCED_REDUNDANCY* would send the HTTP header required to use the reduced redundancy storage option in the S3 API.
|
||||
|
||||
Signing Protocol
|
||||
=================
|
||||
|
||||
AWS signature version 4 is the default signing protocol choice. This boolean knob ``--knob_http_request_aws_v4_header`` can be used to select between v4 style and v2 style signatures.
|
||||
If the knob is set to ``true`` then v4 signature will be used and if set to ``false`` then v2 signature will be used.
|
||||
|
||||
.. _blob-credential-files:
|
||||
|
||||
Blob Credential Files
|
||||
|
|
|
@ -153,6 +153,27 @@ If ``description=<DESC>`` is specified, the description field in the cluster fil
|
|||
|
||||
For more information on setting the cluster description, see :ref:`configuration-setting-cluster-description`.
|
||||
|
||||
createtenant
|
||||
------------
|
||||
|
||||
The ``createtenant`` command is used to create new tenants in the cluster. Its syntax is ``createtenant <TENANT_NAME>``.
|
||||
|
||||
The tenant name can be any byte string that does not begin with the ``\xff`` byte. If the tenant already exists, ``fdbcli`` will report an error.
|
||||
|
||||
defaulttenant
|
||||
-------------
|
||||
|
||||
The ``defaulttenant`` command configures ``fdbcli`` to run its commands without a tenant. This is the default behavior.
|
||||
|
||||
The active tenant cannot be changed while a transaction (using ``begin``) is open.
|
||||
|
||||
deletetenant
|
||||
------------
|
||||
|
||||
The ``deletetenant`` command is used to delete tenants from the cluster. Its syntax is ``deletetenant <TENANT_NAME>``.
|
||||
|
||||
In order to delete a tenant, it must be empty. To delete a tenant with data, first clear that data using the ``clear`` command. If the tenant does not exist, ``fdbcli`` will report an error.
|
||||
|
||||
exclude
|
||||
-------
|
||||
|
||||
|
@ -210,6 +231,13 @@ The ``getrangekeys`` command fetches keys in a range. Its syntax is ``getrangeke
|
|||
|
||||
Note that :ref:`characters can be escaped <cli-escaping>` when specifying keys (or values) in ``fdbcli``.
|
||||
|
||||
gettenant
|
||||
---------
|
||||
|
||||
The ``gettenant`` command fetches metadata for a given tenant and displays it. Its syntax is ``gettenant <TENANT_NAME>``.
|
||||
|
||||
Included in the output of this command are the ``id`` and ``prefix`` assigned to the tenant. If the tenant does not exist, ``fdbcli`` will report an error.
|
||||
|
||||
getversion
|
||||
----------
|
||||
|
||||
|
@ -300,6 +328,13 @@ Attempts to kill all specified processes. Each address should include the IP and
|
|||
|
||||
Attempts to kill all known processes in the cluster.
|
||||
|
||||
listtenants
|
||||
-----------
|
||||
|
||||
The ``listtenants`` command prints the names of tenants in the cluster. Its syntax is ``listtenants [BEGIN] [END] [LIMIT]``.
|
||||
|
||||
By default, the ``listtenants`` command will print up to 100 entries from the entire range of tenants. A narrower sub-range can be printed using the optional ``[BEGIN]`` and ``[END]`` parameters, and the limit can be changed by specifying an integer ``[LIMIT]`` parameter.
|
||||
|
||||
lock
|
||||
----
|
||||
|
||||
|
@ -512,6 +547,17 @@ unlock
|
|||
|
||||
The ``unlock`` command unlocks the database with the specified lock UID. Because this is a potentially dangerous operation, users must copy a passphrase before the unlock command is executed.
|
||||
|
||||
usetenant
|
||||
---------
|
||||
|
||||
The ``usetenant`` command configures ``fdbcli`` to run transactions within the specified tenant. Its syntax is ``usetenant <TENANT_NAME>``.
|
||||
|
||||
When configured, transactions will read and write keys from the key-space associated with the specified tenant. By default, ``fdbcli`` runs without a tenant. Management operations that modify keys (e.g. ``exclude``) will not operate within the tenant.
|
||||
|
||||
If the tenant chosen does not exist, ``fdbcli`` will report an error.
|
||||
|
||||
The active tenant cannot be changed while a transaction (using ``begin``) is open.
|
||||
|
||||
writemode
|
||||
---------
|
||||
|
||||
|
|
|
@ -38,6 +38,8 @@ The latest changes are detailed in :ref:`release-notes`. The documentation has t
|
|||
|
||||
* :doc:`administration` contains documentation on administering FoundationDB.
|
||||
|
||||
* :doc:`monitored-metrics` contains documentation on monitoring and alerting for FoundationDB.
|
||||
|
||||
* :doc:`redwood` contains documentation on Redwood Storage Engine.
|
||||
|
||||
* :doc:`visibility` contains documentation related to Visibility into FoundationDB.
|
||||
|
@ -55,6 +57,7 @@ The latest changes are detailed in :ref:`release-notes`. The documentation has t
|
|||
api-reference
|
||||
tutorials
|
||||
administration
|
||||
monitored-metrics
|
||||
redwood
|
||||
visibility
|
||||
earlier-release-notes
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -4,7 +4,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -238,7 +238,7 @@ ACTOR Future<Void> echoClient() {
|
|||
return Void();
|
||||
}
|
||||
|
||||
struct SimpleKeyValueStoreInteface {
|
||||
struct SimpleKeyValueStoreInterface {
|
||||
constexpr static FileIdentifier file_identifier = 8226647;
|
||||
RequestStream<struct GetKVInterface> connect;
|
||||
RequestStream<struct GetRequest> get;
|
||||
|
@ -253,7 +253,7 @@ struct SimpleKeyValueStoreInteface {
|
|||
|
||||
struct GetKVInterface {
|
||||
constexpr static FileIdentifier file_identifier = 8062308;
|
||||
ReplyPromise<SimpleKeyValueStoreInteface> reply;
|
||||
ReplyPromise<SimpleKeyValueStoreInterface> reply;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
@ -297,7 +297,7 @@ struct ClearRequest {
|
|||
};
|
||||
|
||||
ACTOR Future<Void> kvStoreServer() {
|
||||
state SimpleKeyValueStoreInteface inf;
|
||||
state SimpleKeyValueStoreInterface inf;
|
||||
state std::map<std::string, std::string> store;
|
||||
inf.connect.makeWellKnownEndpoint(WLTOKEN_SIMPLE_KV_SERVER, TaskPriority::DefaultEndpoint);
|
||||
loop {
|
||||
|
@ -333,17 +333,17 @@ ACTOR Future<Void> kvStoreServer() {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<SimpleKeyValueStoreInteface> connect() {
|
||||
ACTOR Future<SimpleKeyValueStoreInterface> connect() {
|
||||
std::cout << format("%llu: Connect...\n", uint64_t(g_network->now()));
|
||||
SimpleKeyValueStoreInteface c;
|
||||
SimpleKeyValueStoreInterface c;
|
||||
c.connect = RequestStream<GetKVInterface>(Endpoint::wellKnown({ serverAddress }, WLTOKEN_SIMPLE_KV_SERVER));
|
||||
SimpleKeyValueStoreInteface result = wait(c.connect.getReply(GetKVInterface()));
|
||||
SimpleKeyValueStoreInterface result = wait(c.connect.getReply(GetKVInterface()));
|
||||
std::cout << format("%llu: done..\n", uint64_t(g_network->now()));
|
||||
return result;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> kvSimpleClient() {
|
||||
state SimpleKeyValueStoreInteface server = wait(connect());
|
||||
state SimpleKeyValueStoreInterface server = wait(connect());
|
||||
std::cout << format("Set %s -> %s\n", "foo", "bar");
|
||||
SetRequest setRequest;
|
||||
setRequest.key = "foo";
|
||||
|
@ -356,7 +356,7 @@ ACTOR Future<Void> kvSimpleClient() {
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> kvClient(SimpleKeyValueStoreInteface server, std::shared_ptr<uint64_t> ops) {
|
||||
ACTOR Future<Void> kvClient(SimpleKeyValueStoreInterface server, std::shared_ptr<uint64_t> ops) {
|
||||
state Future<Void> timeout = delay(20);
|
||||
state int rangeSize = 2 << 12;
|
||||
loop {
|
||||
|
@ -397,7 +397,7 @@ ACTOR Future<Void> throughputMeasurement(std::shared_ptr<uint64_t> operations) {
|
|||
}
|
||||
|
||||
ACTOR Future<Void> multipleClients() {
|
||||
SimpleKeyValueStoreInteface server = wait(connect());
|
||||
SimpleKeyValueStoreInterface server = wait(connect());
|
||||
auto ops = std::make_shared<uint64_t>(0);
|
||||
std::vector<Future<Void>> clients(100);
|
||||
for (auto& f : clients) {
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -46,6 +46,7 @@ enum {
|
|||
OPT_HEX_KEY_PREFIX,
|
||||
OPT_BEGIN_VERSION_FILTER,
|
||||
OPT_END_VERSION_FILTER,
|
||||
OPT_KNOB,
|
||||
OPT_HELP
|
||||
};
|
||||
|
||||
|
@ -72,6 +73,7 @@ CSimpleOpt::SOption gConverterOptions[] = { { OPT_CONTAINER, "-r", SO_REQ_SEP },
|
|||
{ OPT_HEX_KEY_PREFIX, "--hex-prefix", SO_REQ_SEP },
|
||||
{ OPT_BEGIN_VERSION_FILTER, "--begin-version-filter", SO_REQ_SEP },
|
||||
{ OPT_END_VERSION_FILTER, "--end-version-filter", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob-", SO_REQ_SEP },
|
||||
{ OPT_HELP, "-?", SO_NONE },
|
||||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -26,17 +26,21 @@
|
|||
#include <vector>
|
||||
|
||||
#include "fdbbackup/BackupTLSConfig.h"
|
||||
#include "fdbclient/BuildFlags.h"
|
||||
#include "fdbbackup/FileConverter.h"
|
||||
#include "fdbclient/BackupAgent.actor.h"
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbbackup/FileConverter.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/IKnobCollection.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
#include "fdbclient/MutationList.h"
|
||||
#include "flow/ArgParseUtil.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/Trace.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/serialize.h"
|
||||
#include "fdbclient/BuildFlags.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
#define SevDecodeInfo SevVerbose
|
||||
|
@ -73,11 +77,13 @@ void printDecodeUsage() {
|
|||
" --list-only Print file list and exit.\n"
|
||||
" -k KEY_PREFIX Use the prefix for filtering mutations\n"
|
||||
" --hex-prefix HEX_PREFIX\n"
|
||||
" The prefix specified in HEX format, e.g., \\x05\\x01.\n"
|
||||
" The prefix specified in HEX format, e.g., \"\\\\x05\\\\x01\".\n"
|
||||
" --begin-version-filter BEGIN_VERSION\n"
|
||||
" The version range's begin version (inclusive) for filtering.\n"
|
||||
" --end-version-filter END_VERSION\n"
|
||||
" The version range's end version (exclusive) for filtering.\n"
|
||||
" --knob-KNOBNAME KNOBVALUE\n"
|
||||
" Changes a knob value. KNOBNAME should be lowercase."
|
||||
"\n";
|
||||
return;
|
||||
}
|
||||
|
@ -97,6 +103,8 @@ struct DecodeParams {
|
|||
Version beginVersionFilter = 0;
|
||||
Version endVersionFilter = std::numeric_limits<Version>::max();
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> knobs;
|
||||
|
||||
// Returns if [begin, end) overlap with the filter range
|
||||
bool overlap(Version begin, Version end) const {
|
||||
// Filter [100, 200), [50,75) [200, 300)
|
||||
|
@ -130,8 +138,39 @@ struct DecodeParams {
|
|||
if (!prefix.empty()) {
|
||||
s.append(", KeyPrefix: ").append(printable(KeyRef(prefix)));
|
||||
}
|
||||
for (const auto& [knob, value] : knobs) {
|
||||
s.append(", KNOB-").append(knob).append(" = ").append(value);
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
void updateKnobs() {
|
||||
auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection();
|
||||
for (const auto& [knobName, knobValueString] : knobs) {
|
||||
try {
|
||||
auto knobValue = g_knobs.parseKnobValue(knobName, knobValueString);
|
||||
g_knobs.setKnob(knobName, knobValue);
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_invalid_option_value) {
|
||||
std::cerr << "WARNING: Invalid value '" << knobValueString << "' for knob option '" << knobName
|
||||
<< "'\n";
|
||||
TraceEvent(SevWarnAlways, "InvalidKnobValue")
|
||||
.detail("Knob", printable(knobName))
|
||||
.detail("Value", printable(knobValueString));
|
||||
} else {
|
||||
std::cerr << "ERROR: Failed to set knob option '" << knobName << "': " << e.what() << "\n";
|
||||
TraceEvent(SevError, "FailedToSetKnob")
|
||||
.errorUnsuppressed(e)
|
||||
.detail("Knob", printable(knobName))
|
||||
.detail("Value", printable(knobValueString));
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs
|
||||
g_knobs.initialize(Randomize::True, IsSimulated::False);
|
||||
}
|
||||
};
|
||||
|
||||
// Decode an ASCII string, e.g., "\x15\x1b\x19\x04\xaf\x0c\x28\x0a",
|
||||
|
@ -256,6 +295,16 @@ int parseDecodeCommandLine(DecodeParams* param, CSimpleOpt* args) {
|
|||
param->tlsConfig.blobCredentials.push_back(args->OptionArg());
|
||||
break;
|
||||
|
||||
case OPT_KNOB: {
|
||||
Optional<std::string> knobName = extractPrefixedArgument("--knob", args->OptionSyntax());
|
||||
if (!knobName.present()) {
|
||||
std::cerr << "ERROR: unable to parse knob option '" << args->OptionSyntax() << "'\n";
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
param->knobs.emplace_back(knobName.get(), args->OptionArg());
|
||||
break;
|
||||
}
|
||||
|
||||
#ifndef TLS_DISABLED
|
||||
case TLSConfig::OPT_TLS_PLUGIN:
|
||||
args->OptionArg();
|
||||
|
@ -457,7 +506,8 @@ ACTOR Future<Void> process_file(Reference<IBackupContainer> container, LogFile f
|
|||
print = m.param1.startsWith(StringRef(params.prefix));
|
||||
} else if (m.type == MutationRef::ClearRange) {
|
||||
KeyRange range(KeyRangeRef(m.param1, m.param2));
|
||||
print = range.contains(StringRef(params.prefix));
|
||||
KeyRange range2 = prefixRange(StringRef(params.prefix));
|
||||
print = range.intersects(range2);
|
||||
} else {
|
||||
ASSERT(false);
|
||||
}
|
||||
|
@ -551,6 +601,9 @@ int main(int argc, char** argv) {
|
|||
StringRef url(param.container_url);
|
||||
setupNetwork(0, UseMetrics::True);
|
||||
|
||||
// Must be called after setupNetwork() to be effective
|
||||
param.updateKnobs();
|
||||
|
||||
TraceEvent::setNetworkThread();
|
||||
openTraceFile(NetworkAddress(), 10 << 20, 500 << 20, param.log_dir, "decode", param.trace_log_group);
|
||||
param.tlsConfig.setupBlobCredentials();
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -62,12 +62,27 @@ ACTOR Future<Void> setBlobRange(Database db, Key startKey, Key endKey, Value val
|
|||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> blobRangeCommandActor(Database localDb, std::vector<StringRef> tokens) {
|
||||
ACTOR Future<bool> blobRangeCommandActor(Database localDb,
|
||||
Optional<TenantMapEntry> tenantEntry,
|
||||
std::vector<StringRef> tokens) {
|
||||
// enables blob writing for the given range
|
||||
if (tokens.size() != 4) {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
} else if (tokens[3] > LiteralStringRef("\xff")) {
|
||||
}
|
||||
|
||||
Key begin;
|
||||
Key end;
|
||||
|
||||
if (tenantEntry.present()) {
|
||||
begin = tokens[2].withPrefix(tenantEntry.get().prefix);
|
||||
end = tokens[3].withPrefix(tenantEntry.get().prefix);
|
||||
} else {
|
||||
begin = tokens[2];
|
||||
end = tokens[3];
|
||||
}
|
||||
|
||||
if (end > LiteralStringRef("\xff")) {
|
||||
// TODO is this something we want?
|
||||
printf("Cannot blobbify system keyspace! Problematic End Key: %s\n", tokens[3].printable().c_str());
|
||||
return false;
|
||||
|
@ -78,12 +93,12 @@ ACTOR Future<bool> blobRangeCommandActor(Database localDb, std::vector<StringRef
|
|||
printf("Starting blobbify range for [%s - %s)\n",
|
||||
tokens[2].printable().c_str(),
|
||||
tokens[3].printable().c_str());
|
||||
wait(setBlobRange(localDb, tokens[2], tokens[3], LiteralStringRef("1")));
|
||||
wait(setBlobRange(localDb, begin, end, LiteralStringRef("1")));
|
||||
} else if (tokencmp(tokens[1], "stop")) {
|
||||
printf("Stopping blobbify range for [%s - %s)\n",
|
||||
tokens[2].printable().c_str(),
|
||||
tokens[3].printable().c_str());
|
||||
wait(setBlobRange(localDb, tokens[2], tokens[3], StringRef()));
|
||||
wait(setBlobRange(localDb, begin, end, StringRef()));
|
||||
} else {
|
||||
printUsage(tokens[0]);
|
||||
printf("Usage: blobrange <start|stop> <startkey> <endkey>");
|
||||
|
|
|
@ -24,6 +24,7 @@ set(FDBCLI_SRCS
|
|||
SnapshotCommand.actor.cpp
|
||||
StatusCommand.actor.cpp
|
||||
SuspendCommand.actor.cpp
|
||||
TenantCommands.actor.cpp
|
||||
ThrottleCommand.actor.cpp
|
||||
TriggerDDTeamInfoLogCommand.actor.cpp
|
||||
TssqCommand.actor.cpp
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -75,7 +75,10 @@ ACTOR Future<Void> requestVersionUpdate(Database localDb, Reference<ChangeFeedDa
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<bool> changeFeedCommandActor(Database localDb, std::vector<StringRef> tokens, Future<Void> warn) {
|
||||
ACTOR Future<bool> changeFeedCommandActor(Database localDb,
|
||||
Optional<TenantMapEntry> tenantEntry,
|
||||
std::vector<StringRef> tokens,
|
||||
Future<Void> warn) {
|
||||
if (tokens.size() == 1) {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
|
@ -92,8 +95,15 @@ ACTOR Future<bool> changeFeedCommandActor(Database localDb, std::vector<StringRe
|
|||
printUsage(tokens[0]);
|
||||
return false;
|
||||
}
|
||||
wait(updateChangeFeed(
|
||||
localDb, tokens[2], ChangeFeedStatus::CHANGE_FEED_CREATE, KeyRangeRef(tokens[3], tokens[4])));
|
||||
|
||||
KeyRange range;
|
||||
if (tenantEntry.present()) {
|
||||
range = KeyRangeRef(tokens[3], tokens[4]).withPrefix(tenantEntry.get().prefix);
|
||||
} else {
|
||||
range = KeyRangeRef(tokens[3], tokens[4]);
|
||||
}
|
||||
|
||||
wait(updateChangeFeed(localDb, tokens[2], ChangeFeedStatus::CHANGE_FEED_CREATE, range));
|
||||
} else if (tokencmp(tokens[1], "stop")) {
|
||||
if (tokens.size() != 3) {
|
||||
printUsage(tokens[0]);
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -264,7 +264,8 @@ CommandFactory configureFactory(
|
|||
"<single|double|triple|three_data_hall|three_datacenter|ssd|memory|memory-radixtree-beta|proxies=<PROXIES>|"
|
||||
"commit_proxies=<COMMIT_PROXIES>|grv_proxies=<GRV_PROXIES>|logs=<LOGS>|resolvers=<RESOLVERS>>*|"
|
||||
"count=<TSS_COUNT>|perpetual_storage_wiggle=<WIGGLE_SPEED>|perpetual_storage_wiggle_locality="
|
||||
"<<LOCALITY_KEY>:<LOCALITY_VALUE>|0>|storage_migration_type={disabled|gradual|aggressive}",
|
||||
"<<LOCALITY_KEY>:<LOCALITY_VALUE>|0>|storage_migration_type={disabled|gradual|aggressive}"
|
||||
"|tenant_mode={disabled|optional_experimental|required_experimental}|blob_granules_enabled={0|1}",
|
||||
"change the database configuration",
|
||||
"The `new' option, if present, initializes a new database with the given configuration rather than changing "
|
||||
"the configuration of an existing one. When used, both a redundancy mode and a storage engine must be "
|
||||
|
@ -295,6 +296,10 @@ CommandFactory configureFactory(
|
|||
"perpetual_storage_wiggle_locality=<<LOCALITY_KEY>:<LOCALITY_VALUE>|0>: Set the process filter for wiggling. "
|
||||
"The processes that match the given locality key and locality value are only wiggled. The value 0 will disable "
|
||||
"the locality filter and matches all the processes for wiggling.\n\n"
|
||||
"tenant_mode=<disabled|optional_experimental|required_experimental>: Sets the tenant mode for the cluster. If "
|
||||
"optional, then transactions can be run with or without specifying tenants. If required, all data must be "
|
||||
"accessed using tenants.\n\n"
|
||||
|
||||
"See the FoundationDB Administration Guide for more information."));
|
||||
|
||||
} // namespace fdb_cli
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue