Merge pull request #10074 from sfc-gh-ajbeamon/apply-black-format

Apply black format to most Python files
This commit is contained in:
A.J. Beamon 2023-05-02 08:20:47 -07:00 committed by GitHub
commit 0035d9c519
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
69 changed files with 4145 additions and 2126 deletions

View File

@ -3,6 +3,12 @@ repos:
rev: 2018e667a6a36ee3fbfa8041cd36512f92f60d49 # frozen: 22.8.0
hooks:
- id: black
exclude: |
(?x)^(
bindings/python/fdb/six.py|
contrib/Implib.so/implib-gen.py|
documentation/sphinx/extensions/rubydomain.py
)$
- repo: https://github.com/pycqa/flake8
rev: f8e1b317742036ff11ff86356fd2b68147e169f7 # frozen: 5.0.4
hooks:

View File

@ -17,4 +17,3 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#

View File

@ -22,7 +22,9 @@ import math
import sys
import os
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
sys.path[:0] = [
os.path.join(os.path.dirname(__file__), "..", "..", "bindings", "python")
]
import util
from fdb import LATEST_API_VERSION
@ -30,27 +32,20 @@ from fdb import LATEST_API_VERSION
FDB_API_VERSION = LATEST_API_VERSION
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(message)s'
"version": 1,
"disable_existing_loggers": False,
"formatters": {"simple": {"format": "%(message)s"}},
"handlers": {
"console": {
"level": "NOTSET",
"class": "logging.StreamHandler",
"stream": sys.stdout,
"formatter": "simple",
}
},
'handlers': {
'console': {
'level': 'NOTSET',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'simple'
}
"loggers": {
"foundationdb.bindingtester": {"level": "INFO", "handlers": ["console"]}
},
'loggers': {
'foundationdb.bindingtester': {
'level': 'INFO',
'handlers': ['console']
}
}
}
@ -61,7 +56,7 @@ class Result:
self.values = values
def key(self, specification):
return self.key_tuple[specification.key_start_index:]
return self.key_tuple[specification.key_start_index :]
@staticmethod
def elements_equal(el1, el2):
@ -116,4 +111,4 @@ class Result:
else:
value_str = repr(self.values)
return '%s = %s' % (repr(self.subspace_tuple + self.key_tuple), value_str)
return "%s = %s" % (repr(self.subspace_tuple + self.key_tuple), value_str)

View File

@ -34,7 +34,7 @@ import logging.config
from collections import OrderedDict
from functools import reduce
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
sys.path[:0] = [os.path.join(os.path.dirname(__file__), "..")]
from bindingtester import FDB_API_VERSION
from bindingtester import Result
@ -49,12 +49,32 @@ import fdb.tuple
API_VERSIONS = [
13, 14, 16, 21, 22, 23,
100, 200, 300,
400, 410, 420, 430, 440, 450, 460,
500, 510, 520,
600, 610, 620, 630,
700, 710, 720,
13,
14,
16,
21,
22,
23,
100,
200,
300,
400,
410,
420,
430,
440,
450,
460,
500,
510,
520,
600,
610,
620,
630,
700,
710,
720,
]
assert (
@ -74,7 +94,7 @@ class ResultSet(object):
base_name = name
while name in self.tester_results:
num += 1
name = '%s (%d)' % (base_name, num)
name = "%s (%d)" % (base_name, num)
self.tester_results[name] = results
@ -86,7 +106,10 @@ class ResultSet(object):
if len(self.tester_results) == 1:
return (0, False)
util.get_logger().info('Comparing results from \'%s\'...' % repr(util.subspace_to_tuple(self.specification.subspace)))
util.get_logger().info(
"Comparing results from '%s'..."
% repr(util.subspace_to_tuple(self.specification.subspace))
)
num_errors = 0
has_filtered_error = False
@ -98,60 +121,105 @@ class ResultSet(object):
while True:
# Gets the next result for each tester
results = {i: r[indices[i]] for i, r in enumerate(self.tester_results.values()) if len(r) > indices[i]}
results = {
i: r[indices[i]]
for i, r in enumerate(self.tester_results.values())
if len(r) > indices[i]
}
if len(results) == 0:
break
# Attempt to 'align' the results. If two results have matching sequence numbers, then they should be compared.
# Only those testers which have a result matching the minimum current sequence number will be included. All
# others are considered to have not produced a result and will be evaluated in a future iteration.
sequence_nums = [r.sequence_num(self.specification) for r in results.values()]
sequence_nums = [
r.sequence_num(self.specification) for r in results.values()
]
if any([s is not None for s in sequence_nums]):
results = {i: r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums)}
results = {
i: r
for i, r in results.items()
if r.sequence_num(self.specification) == min(sequence_nums)
}
# If these results aren't using sequence numbers, then we match two results based on whether they share the same key
else:
min_key = reduce(ResultSet._min_tuple, [r.key(self.specification) for r in results.values()])
results = {i: r for i, r in results.items() if Result.tuples_match(r.key(self.specification), min_key)}
min_key = reduce(
ResultSet._min_tuple,
[r.key(self.specification) for r in results.values()],
)
results = {
i: r
for i, r in results.items()
if Result.tuples_match(r.key(self.specification), min_key)
}
# Increment the indices for those testers which produced a result in this iteration
for i in results.keys():
indices[i] += 1
# Fill in 'None' values for testers that didn't produce a result and generate an output string describing the results
all_results = {i: results[i] if i in results else None for i in range(len(self.tester_results))}
all_results = {
i: results[i] if i in results else None
for i in range(len(self.tester_results))
}
result_keys = list(self.tester_results.keys())
result_str = '\n'.join([' %-*s - %s' % (name_length, result_keys[i], r) for i, r in all_results.items()])
result_str = "\n".join(
[
" %-*s - %s" % (name_length, result_keys[i], r)
for i, r in all_results.items()
]
)
result_list = list(results.values())
# If any of our results matches the global error filter, we ignore the result
if any(r.matches_global_error_filter(self.specification) for r in result_list):
if any(
r.matches_global_error_filter(self.specification) for r in result_list
):
has_filtered_error = True
# The result is considered correct if every tester produced a value and all the values meet the matching criteria
if len(results) < len(all_results) or not all(result_list[0].matches(r, self.specification) for r in result_list):
util.get_logger().error('\nIncorrect result: \n%s' % result_str)
if len(results) < len(all_results) or not all(
result_list[0].matches(r, self.specification) for r in result_list
):
util.get_logger().error("\nIncorrect result: \n%s" % result_str)
num_errors += 1
else:
util.get_logger().debug('\nCorrect result: \n%s' % result_str)
util.get_logger().debug("\nCorrect result: \n%s" % result_str)
if num_errors > 0:
util.get_logger().error('')
util.get_logger().error("")
else:
util.get_logger().debug('')
util.get_logger().debug("")
return (num_errors, has_filtered_error)
def choose_api_version(selected_api_version, tester_min_version, tester_max_version, test_min_version, test_max_version):
def choose_api_version(
selected_api_version,
tester_min_version,
tester_max_version,
test_min_version,
test_max_version,
):
if selected_api_version is not None:
if selected_api_version < tester_min_version or selected_api_version > tester_max_version:
raise Exception('Not all testers support the API version %d (min=%d, max=%d)' %
(selected_api_version, tester_min_version, tester_max_version))
elif selected_api_version < test_min_version or selected_api_version > test_max_version:
raise Exception('API version %d is not supported by the specified test (min=%d, max=%d)' %
(selected_api_version, test_min_version, test_max_version))
if (
selected_api_version < tester_min_version
or selected_api_version > tester_max_version
):
raise Exception(
"Not all testers support the API version %d (min=%d, max=%d)"
% (selected_api_version, tester_min_version, tester_max_version)
)
elif (
selected_api_version < test_min_version
or selected_api_version > test_max_version
):
raise Exception(
"API version %d is not supported by the specified test (min=%d, max=%d)"
% (selected_api_version, test_min_version, test_max_version)
)
api_version = selected_api_version
else:
@ -160,15 +228,24 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
if min_version > max_version:
raise Exception(
'Not all testers support the API versions required by the specified test'
'(tester: min=%d, max=%d; test: min=%d, max=%d)' % (tester_min_version, tester_max_version, test_min_version, test_max_version))
"Not all testers support the API versions required by the specified test"
"(tester: min=%d, max=%d; test: min=%d, max=%d)"
% (
tester_min_version,
tester_max_version,
test_min_version,
test_max_version,
)
)
if random.random() < 0.7:
api_version = max_version
elif random.random() < 0.7:
api_version = min_version
elif random.random() < 0.9:
api_version = random.choice([v for v in API_VERSIONS if v >= min_version and v <= max_version])
api_version = random.choice(
[v for v in API_VERSIONS if v >= min_version and v <= max_version]
)
else:
api_version = random.randint(min_version, max_version)
@ -180,47 +257,78 @@ class TestRunner(object):
self.args = copy.copy(args)
self.db = fdb.open(self.args.cluster_file)
self.test_seed = random.randint(0, 0xffffffff)
self.test_seed = random.randint(0, 0xFFFFFFFF)
self.testers = [Tester.get_test(self.args.test1)]
if self.args.test2 is not None:
self.testers.append(Tester.get_test(self.args.test2))
self.test = Test.create_test(self.args.test_name, fdb.Subspace((self.args.output_subspace,)))
self.test = Test.create_test(
self.args.test_name, fdb.Subspace((self.args.output_subspace,))
)
if self.test is None:
raise Exception('the test \'%s\' could not be found' % self.args.test_name)
raise Exception("the test '%s' could not be found" % self.args.test_name)
min_api_version = max([tester.min_api_version for tester in self.testers])
max_api_version = min([tester.max_api_version for tester in self.testers])
self.args.api_version = choose_api_version(self.args.api_version, min_api_version, max_api_version,
self.test.min_api_version, self.test.max_api_version)
self.args.api_version = choose_api_version(
self.args.api_version,
min_api_version,
max_api_version,
self.test.min_api_version,
self.test.max_api_version,
)
util.get_logger().info('\nCreating test at API version %d' % self.args.api_version)
util.get_logger().info(
"\nCreating test at API version %d" % self.args.api_version
)
max_int_bits = min([tester.max_int_bits for tester in self.testers])
if self.args.max_int_bits is None:
self.args.max_int_bits = max_int_bits
elif self.args.max_int_bits > max_int_bits:
raise Exception('The specified testers support at most %d-bit ints, but --max-int-bits was set to %d' %
(max_int_bits, self.args.max_int_bits))
raise Exception(
"The specified testers support at most %d-bit ints, but --max-int-bits was set to %d"
% (max_int_bits, self.args.max_int_bits)
)
self.args.no_threads = self.args.no_threads or any([not tester.threads_enabled for tester in self.testers])
self.args.no_threads = self.args.no_threads or any(
[not tester.threads_enabled for tester in self.testers]
)
if self.args.no_threads and self.args.concurrency > 1:
raise Exception('Not all testers support concurrency')
raise Exception("Not all testers support concurrency")
# Test types should be intersection of all tester supported types
self.args.types = list(reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers)))
self.args.types = list(
reduce(
lambda t1, t2: filter(t1.__contains__, t2),
map(lambda tester: tester.types, self.testers),
)
)
self.args.no_directory_snapshot_ops = self.args.no_directory_snapshot_ops or any([not tester.directory_snapshot_ops_enabled for tester in self.testers])
self.args.no_tenants = self.args.no_tenants or any([not tester.tenants_enabled for tester in self.testers]) or self.args.api_version < 710
self.args.no_directory_snapshot_ops = (
self.args.no_directory_snapshot_ops
or any(
[not tester.directory_snapshot_ops_enabled for tester in self.testers]
)
)
self.args.no_tenants = (
self.args.no_tenants
or any([not tester.tenants_enabled for tester in self.testers])
or self.args.api_version < 710
)
def print_test(self):
test_instructions = self._generate_test()
for top_level_subspace, top_level_thread in test_instructions.items():
for subspace, thread in top_level_thread.get_threads(top_level_subspace).items():
util.get_logger().error('\nThread at prefix %r:' % util.subspace_to_tuple(subspace))
for subspace, thread in top_level_thread.get_threads(
top_level_subspace
).items():
util.get_logger().error(
"\nThread at prefix %r:" % util.subspace_to_tuple(subspace)
)
if self.args.print_all:
instructions = thread
offset = 0
@ -229,18 +337,23 @@ class TestRunner(object):
offset = thread.core_test_begin
for i, instruction in enumerate(instructions):
if self.args.print_all or (instruction.operation != 'SWAP' and instruction.operation != 'PUSH'):
util.get_logger().error(' %d. %r' % (i + offset, instruction))
if self.args.print_all or (
instruction.operation != "SWAP"
and instruction.operation != "PUSH"
):
util.get_logger().error(" %d. %r" % (i + offset, instruction))
util.get_logger().error('')
util.get_logger().error("")
def run_test(self):
test_instructions = self._generate_test()
expected_results = self.test.get_expected_results()
tester_results = {s.subspace: ResultSet(s) for s in self.test.get_result_specifications()}
tester_results = {
s.subspace: ResultSet(s) for s in self.test.get_result_specifications()
}
for subspace, results in expected_results.items():
tester_results[subspace].add('expected', results)
tester_results[subspace].add("expected", results)
tester_errors = {}
@ -249,17 +362,24 @@ class TestRunner(object):
self.test.pre_run(self.db, self.args)
return_code = self._run_tester(tester)
if return_code != 0:
util.get_logger().error('Test of type %s failed to complete successfully with random seed %d and %d operations\n' %
(self.args.test_name, self.args.seed, self.args.num_ops))
util.get_logger().error(
"Test of type %s failed to complete successfully with random seed %d and %d operations\n"
% (self.args.test_name, self.args.seed, self.args.num_ops)
)
return 2
tester_errors[tester] = self.test.validate(self.db, self.args)
for spec in self.test.get_result_specifications():
tester_results[spec.subspace].add(tester.name, self._get_results(spec.subspace))
tester_results[spec.subspace].add(
tester.name, self._get_results(spec.subspace)
)
return_code = self._validate_results(tester_errors, tester_results)
util.get_logger().info('Completed %s test with random seed %d and %d operations\n' % (self.args.test_name, self.args.seed, self.args.num_ops))
util.get_logger().info(
"Completed %s test with random seed %d and %d operations\n"
% (self.args.test_name, self.args.seed, self.args.num_ops)
)
return return_code
@ -268,38 +388,55 @@ class TestRunner(object):
self._insert_instructions(test_instructions)
def _generate_test(self):
util.get_logger().info('Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)...' %
(self.args.test_name, self.args.seed, self.args.num_ops, self.args.concurrency))
util.get_logger().info(
"Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)..."
% (
self.args.test_name,
self.args.seed,
self.args.num_ops,
self.args.concurrency,
)
)
random.seed(self.test_seed)
if self.args.concurrency == 1:
self.test.setup(self.args)
test_instructions = {fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'),)): self.test.generate(self.args, 0)}
test_instructions = {
fdb.Subspace(
(bytes(self.args.instruction_prefix, "utf-8"),)
): self.test.generate(self.args, 0)
}
else:
test_instructions = {}
main_thread = InstructionSet()
for i in range(self.args.concurrency):
# thread_spec = fdb.Subspace(('thread_spec', i))
thread_spec = b'thread_spec%d' % i
thread_spec = b"thread_spec%d" % i
main_thread.push_args(thread_spec)
main_thread.append('START_THREAD')
main_thread.append("START_THREAD")
self.test.setup(self.args)
test_instructions[fdb.Subspace((thread_spec,))] = self.test.generate(self.args, i)
test_instructions[fdb.Subspace((thread_spec,))] = self.test.generate(
self.args, i
)
test_instructions[fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'),))] = main_thread
test_instructions[
fdb.Subspace((bytes(self.args.instruction_prefix, "utf-8"),))
] = main_thread
return test_instructions
def _insert_instructions(self, test_instructions):
util.get_logger().info('\nInserting test into database...')
util.get_logger().info("\nInserting test into database...")
del self.db[:]
while True:
tr = self.db.create_transaction()
try:
tr.options.set_special_key_space_enable_writes()
del tr[b'\xff\xff/management/tenant/map/' : b'\xff\xff/management/tenant/map0']
del tr[
b"\xff\xff/management/tenant/map/":b"\xff\xff/management/tenant/map0"
]
tr.commit().wait()
break
except fdb.FDBError as e:
@ -309,11 +446,14 @@ class TestRunner(object):
thread.insert_operations(self.db, subspace)
def _run_tester(self, test):
params = test.cmd.split(' ') + [self.args.instruction_prefix, str(self.args.api_version)]
params = test.cmd.split(" ") + [
self.args.instruction_prefix,
str(self.args.api_version),
]
if self.args.cluster_file is not None:
params += [self.args.cluster_file]
util.get_logger().info('\nRunning tester \'%s\'...' % ' '.join(params))
util.get_logger().info("\nRunning tester '%s'..." % " ".join(params))
sys.stdout.flush()
proc = subprocess.Popen(params)
timed_out = Event()
@ -327,7 +467,7 @@ class TestRunner(object):
timer.start()
ret_code = proc.wait()
except Exception as e:
raise Exception('Unable to run tester (%s)' % e)
raise Exception("Unable to run tester (%s)" % e)
finally:
timer.cancel()
@ -336,16 +476,20 @@ class TestRunner(object):
if ret_code < 0:
signal_name = util.signal_number_to_name(-ret_code)
reason = 'exit code: %s' % (signal_name,)
reason = "exit code: %s" % (signal_name,)
if timed_out.is_set():
reason = 'timed out after %d seconds' % (self.args.timeout,)
util.get_logger().error('\n\'%s\' did not complete succesfully (%s)' % (params[0], reason))
reason = "timed out after %d seconds" % (self.args.timeout,)
util.get_logger().error(
"\n'%s' did not complete succesfully (%s)" % (params[0], reason)
)
util.get_logger().info('')
util.get_logger().info("")
return ret_code
def _get_results(self, subspace, instruction_index=None):
util.get_logger().info('Reading results from \'%s\'...' % repr(util.subspace_to_tuple(subspace)))
util.get_logger().info(
"Reading results from '%s'..." % repr(util.subspace_to_tuple(subspace))
)
results = []
next_key = subspace.range().start
@ -360,7 +504,7 @@ class TestRunner(object):
return results
def _validate_results(self, tester_errors, tester_results):
util.get_logger().info('')
util.get_logger().info("")
num_incorrect = 0
has_filtered_error = False
@ -373,16 +517,28 @@ class TestRunner(object):
for tester, errors in tester_errors.items():
if len(errors) > 0:
util.get_logger().error('The %s tester reported errors:\n' % tester.name)
util.get_logger().error(
"The %s tester reported errors:\n" % tester.name
)
for i, error in enumerate(errors):
util.get_logger().error(' %d. %s' % (i + 1, error))
util.get_logger().error(" %d. %s" % (i + 1, error))
log_message = '\nTest with seed %d and concurrency %d had %d incorrect result(s) and %d error(s) at API version %d' %\
(self.args.seed, self.args.concurrency, num_incorrect, num_errors, self.args.api_version)
log_message = (
"\nTest with seed %d and concurrency %d had %d incorrect result(s) and %d error(s) at API version %d"
% (
self.args.seed,
self.args.concurrency,
num_incorrect,
num_errors,
self.args.api_version,
)
)
if num_errors == 0 and (num_incorrect == 0 or has_filtered_error):
util.get_logger().info(log_message)
if has_filtered_error:
util.get_logger().info("Test had permissible non-deterministic errors; disregarding results...")
util.get_logger().info(
"Test had permissible non-deterministic errors; disregarding results..."
)
return 0
else:
util.get_logger().error(log_message)
@ -390,7 +546,7 @@ class TestRunner(object):
def bisect(test_runner, args):
util.get_logger().info('')
util.get_logger().info("")
lower_bound = 0
upper_bound = args.num_ops
@ -401,89 +557,188 @@ def bisect(test_runner, args):
if lower_bound == upper_bound:
if result != 0:
util.get_logger().error('Found minimal failing test with %d operations' % lower_bound)
util.get_logger().error(
"Found minimal failing test with %d operations" % lower_bound
)
if args.print_test:
test_runner.print_test()
return 0
elif upper_bound < args.num_ops:
util.get_logger().error('Error finding minimal failing test for seed %d. The failure may not be deterministic' % args.seed)
util.get_logger().error(
"Error finding minimal failing test for seed %d. The failure may not be deterministic"
% args.seed
)
return 1
else:
util.get_logger().error('No failing test found for seed %d with %d ops. Try specifying a larger --num-ops parameter.'
% (args.seed, args.num_ops))
util.get_logger().error(
"No failing test found for seed %d with %d ops. Try specifying a larger --num-ops parameter."
% (args.seed, args.num_ops)
)
return 0
elif result == 0:
util.get_logger().info('Test with %d operations succeeded\n' % test_runner.args.num_ops)
util.get_logger().info(
"Test with %d operations succeeded\n" % test_runner.args.num_ops
)
lower_bound = test_runner.args.num_ops + 1
else:
util.get_logger().info('Test with %d operations failed with error code %d\n' % (test_runner.args.num_ops, result))
util.get_logger().info(
"Test with %d operations failed with error code %d\n"
% (test_runner.args.num_ops, result)
)
upper_bound = test_runner.args.num_ops
def parse_args(argv):
parser = argparse.ArgumentParser(description='FoundationDB Binding API Tester')
parser.add_argument('--test-name', default='scripted',
help='The name of the test to run. Must be the name of a test specified in the tests folder. (default=\'scripted\')')
parser = argparse.ArgumentParser(description="FoundationDB Binding API Tester")
parser.add_argument(
"--test-name",
default="scripted",
help="The name of the test to run. Must be the name of a test specified in the tests folder. (default='scripted')",
)
parser.add_argument(metavar='tester1', dest='test1', help='Name of the first tester to invoke')
parser.add_argument('--compare', metavar='tester2', nargs='?', type=str, default=None, const='python', dest='test2',
help='When specified, a second tester will be run and compared against the first. This flag takes an optional argument '
'for the second tester to invoke (default = \'python\').')
parser.add_argument('--print-test', action='store_true',
help='Instead of running a test, prints the set of instructions generated for that test. Unless --all is specified, all '
'setup, finalization, PUSH, and SWAP instructions will be excluded.')
parser.add_argument('--all', dest='print_all', action='store_true', help='Causes --print-test to print all instructions.')
parser.add_argument('--bisect', action='store_true',
help='Run the specified test varying the number of operations until a minimal failing test is found. Does not work for '
'concurrent tests.')
parser.add_argument('--insert-only', action='store_true', help='Insert the test instructions into the database, but do not run it.')
parser.add_argument('--concurrency', type=int, default=1, help='Number of concurrent test threads to run. (default = 1).')
parser.add_argument('--num-ops', type=int, default=100, help='The number of operations to generate per thread (default = 100)')
parser.add_argument('--seed', type=int, help='The random seed to use for generating the test')
parser.add_argument('--max-int-bits', type=int, default=None,
help='Maximum number of bits to use for int types in testers. By default, the largest value supported by the testers being '
'run will be chosen.')
parser.add_argument('--api-version', default=None, type=int,
help='The API version that the testers should use. Not supported in scripted mode. (default = random version supported by '
'all testers)')
parser.add_argument('--cluster-file', type=str, default=None, help='The cluster file for the cluster being connected to. (default None)')
parser.add_argument('--timeout', type=int, default=600, help='The timeout in seconds for running each individual tester. (default 600)')
parser.add_argument('--enable-client-trace-logging', nargs='?', type=str, default=None, const='.',
help='Enables trace file output. This flag takes an optional argument specifying the output directory (default = \'.\').')
parser.add_argument('--instruction-prefix', type=str, default='test_spec',
help='The prefix under which the main thread of test instructions are inserted (default=\'test_spec\').')
parser.add_argument('--output-subspace', type=str, default='tester_output',
help='The string used to create the output subspace for the testers. The subspace will be of the form (<output_subspace>,). '
'(default=\'tester_output\')')
parser.add_argument(
metavar="tester1", dest="test1", help="Name of the first tester to invoke"
)
parser.add_argument(
"--compare",
metavar="tester2",
nargs="?",
type=str,
default=None,
const="python",
dest="test2",
help="When specified, a second tester will be run and compared against the first. This flag takes an optional argument "
"for the second tester to invoke (default = 'python').",
)
parser.add_argument(
"--print-test",
action="store_true",
help="Instead of running a test, prints the set of instructions generated for that test. Unless --all is specified, all "
"setup, finalization, PUSH, and SWAP instructions will be excluded.",
)
parser.add_argument(
"--all",
dest="print_all",
action="store_true",
help="Causes --print-test to print all instructions.",
)
parser.add_argument(
"--bisect",
action="store_true",
help="Run the specified test varying the number of operations until a minimal failing test is found. Does not work for "
"concurrent tests.",
)
parser.add_argument(
"--insert-only",
action="store_true",
help="Insert the test instructions into the database, but do not run it.",
)
parser.add_argument(
"--concurrency",
type=int,
default=1,
help="Number of concurrent test threads to run. (default = 1).",
)
parser.add_argument(
"--num-ops",
type=int,
default=100,
help="The number of operations to generate per thread (default = 100)",
)
parser.add_argument(
"--seed", type=int, help="The random seed to use for generating the test"
)
parser.add_argument(
"--max-int-bits",
type=int,
default=None,
help="Maximum number of bits to use for int types in testers. By default, the largest value supported by the testers being "
"run will be chosen.",
)
parser.add_argument(
"--api-version",
default=None,
type=int,
help="The API version that the testers should use. Not supported in scripted mode. (default = random version supported by "
"all testers)",
)
parser.add_argument(
"--cluster-file",
type=str,
default=None,
help="The cluster file for the cluster being connected to. (default None)",
)
parser.add_argument(
"--timeout",
type=int,
default=600,
help="The timeout in seconds for running each individual tester. (default 600)",
)
parser.add_argument(
"--enable-client-trace-logging",
nargs="?",
type=str,
default=None,
const=".",
help="Enables trace file output. This flag takes an optional argument specifying the output directory (default = '.').",
)
parser.add_argument(
"--instruction-prefix",
type=str,
default="test_spec",
help="The prefix under which the main thread of test instructions are inserted (default='test_spec').",
)
parser.add_argument(
"--output-subspace",
type=str,
default="tester_output",
help="The string used to create the output subspace for the testers. The subspace will be of the form (<output_subspace>,). "
"(default='tester_output')",
)
parser.add_argument('--logging-level', type=str, default='INFO',
choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], help='Specifies the level of detail in the tester output (default=\'INFO\').')
parser.add_argument(
"--logging-level",
type=str,
default="INFO",
choices=["ERROR", "WARNING", "INFO", "DEBUG"],
help="Specifies the level of detail in the tester output (default='INFO').",
)
# SOMEDAY: this applies only to the scripted test. Should we invoke test files specifically (as in circus),
# or invoke them here and allow tests to add arguments?
parser.add_argument('--no-threads', action='store_true', help='Disables the START_THREAD instruction in the scripted test.')
parser.add_argument(
"--no-threads",
action="store_true",
help="Disables the START_THREAD instruction in the scripted test.",
)
parser.add_argument('--no-directory-snapshot-ops', action='store_true', help='Disables snapshot operations for directory instructions.')
parser.add_argument(
"--no-directory-snapshot-ops",
action="store_true",
help="Disables snapshot operations for directory instructions.",
)
parser.add_argument('--no-tenants', action='store_true', help='Disables tenant operations.')
parser.add_argument(
"--no-tenants", action="store_true", help="Disables tenant operations."
)
return parser.parse_args(argv)
def validate_args(args):
if args.insert_only and args.bisect:
raise Exception('--bisect cannot be used with --insert-only')
raise Exception("--bisect cannot be used with --insert-only")
if args.print_all and not args.print_test:
raise Exception('cannot specify --all without --print-test')
raise Exception("cannot specify --all without --print-test")
if args.bisect and not args.seed:
raise Exception('--seed must be specified if using --bisect')
raise Exception("--seed must be specified if using --bisect")
if args.concurrency < 1:
raise Exception('--concurrency must be a positive integer')
raise Exception("--concurrency must be a positive integer")
if args.concurrency > 1 and args.test2:
raise Exception('--compare cannot be used with concurrent tests')
raise Exception("--compare cannot be used with concurrent tests")
def main(argv):
@ -497,7 +752,7 @@ def main(argv):
validate_args(args)
if args.seed is None:
args.seed = random.randint(0, 0xffffffff)
args.seed = random.randint(0, 0xFFFFFFFF)
random.seed(args.seed)
@ -518,15 +773,15 @@ def main(argv):
return test_runner.run_test()
except Exception as e:
util.get_logger().error('\nERROR: %s' % e)
util.get_logger().error("\nERROR: %s" % e)
util.get_logger().debug(traceback.format_exc())
exit(3)
except BaseException:
util.get_logger().error('\nERROR: %s' % sys.exc_info()[0])
util.get_logger().error("\nERROR: %s" % sys.exc_info()[0])
util.get_logger().info(traceback.format_exc())
exit(3)
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))

View File

@ -23,12 +23,33 @@ import os
from bindingtester import FDB_API_VERSION
MAX_API_VERSION = FDB_API_VERSION
COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple']
ALL_TYPES = COMMON_TYPES + ['versionstamp']
COMMON_TYPES = [
"null",
"bytes",
"string",
"int",
"uuid",
"bool",
"float",
"double",
"tuple",
]
ALL_TYPES = COMMON_TYPES + ["versionstamp"]
class Tester:
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES, directory_snapshot_ops_enabled=True, tenants_enabled=False):
def __init__(
self,
name,
cmd,
max_int_bits=64,
min_api_version=0,
max_api_version=MAX_API_VERSION,
threads_enabled=True,
types=COMMON_TYPES,
directory_snapshot_ops_enabled=True,
tenants_enabled=False,
):
self.name = name
self.cmd = cmd
self.max_int_bits = max_int_bits
@ -40,31 +61,82 @@ class Tester:
self.tenants_enabled = tenants_enabled
def supports_api_version(self, api_version):
return api_version >= self.min_api_version and api_version <= self.max_api_version
return (
api_version >= self.min_api_version and api_version <= self.max_api_version
)
@classmethod
def get_test(cls, test_name_or_args):
if test_name_or_args in testers:
return testers[test_name_or_args]
else:
return Tester(test_name_or_args.split(' ')[0], test_name_or_args)
return Tester(test_name_or_args.split(" ")[0], test_name_or_args)
def _absolute_path(path):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', path)
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", path)
_java_cmd = 'java -ea -cp %s:%s com.apple.foundationdb.test.' % (
_absolute_path('java/foundationdb-client.jar'),
_absolute_path('java/foundationdb-tests.jar'))
_java_cmd = "java -ea -cp %s:%s com.apple.foundationdb.test." % (
_absolute_path("java/foundationdb-client.jar"),
_absolute_path("java/foundationdb-tests.jar"),
)
# We could set min_api_version lower on some of these if the testers were updated to support them
testers = {
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 2040, 23, MAX_API_VERSION),
'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES, tenants_enabled=True),
'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 2040, 200, MAX_API_VERSION, types=ALL_TYPES),
'flow': Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION, directory_snapshot_ops_enabled=False),
"python": Tester(
"python",
"python " + _absolute_path("python/tests/tester.py"),
2040,
23,
MAX_API_VERSION,
types=ALL_TYPES,
tenants_enabled=True,
),
"python3": Tester(
"python3",
"python3 " + _absolute_path("python/tests/tester.py"),
2040,
23,
MAX_API_VERSION,
types=ALL_TYPES,
tenants_enabled=True,
),
"ruby": Tester(
"ruby", _absolute_path("ruby/tests/tester.rb"), 2040, 23, MAX_API_VERSION
),
"java": Tester(
"java",
_java_cmd + "StackTester",
2040,
510,
MAX_API_VERSION,
types=ALL_TYPES,
tenants_enabled=True,
),
"java_async": Tester(
"java",
_java_cmd + "AsyncStackTester",
2040,
510,
MAX_API_VERSION,
types=ALL_TYPES,
tenants_enabled=True,
),
"go": Tester(
"go",
_absolute_path("go/build/bin/_stacktester"),
2040,
200,
MAX_API_VERSION,
types=ALL_TYPES,
),
"flow": Tester(
"flow",
_absolute_path("flow/bin/fdb_flow_tester"),
63,
500,
MAX_API_VERSION,
directory_snapshot_ops_enabled=False,
),
}

View File

@ -31,14 +31,18 @@ fdb.api_version(FDB_API_VERSION)
class ResultSpecification(object):
def __init__(self, subspace, key_start_index=0, ordering_index=None, global_error_filter=None):
def __init__(
self, subspace, key_start_index=0, ordering_index=None, global_error_filter=None
):
self.subspace = subspace
self.key_start_index = key_start_index
self.ordering_index = ordering_index
if global_error_filter is not None:
error_str = b'|'.join([b'%d' % e for e in global_error_filter])
self.error_regex = re.compile(rb'\x01+ERROR\x00\xff*\x01' + error_str + rb'\x00')
error_str = b"|".join([b"%d" % e for e in global_error_filter])
self.error_regex = re.compile(
rb"\x01+ERROR\x00\xff*\x01" + error_str + rb"\x00"
)
else:
self.error_regex = None
@ -82,22 +86,24 @@ class Test(object):
return []
def versionstamp_key(self, raw_bytes, version_pos):
if hasattr(self, 'api_version') and self.api_version < 520:
return raw_bytes + struct.pack('<H', version_pos)
if hasattr(self, "api_version") and self.api_version < 520:
return raw_bytes + struct.pack("<H", version_pos)
else:
return raw_bytes + struct.pack('<L', version_pos)
return raw_bytes + struct.pack("<L", version_pos)
def versionstamp_value(self, raw_bytes, version_pos=0):
if hasattr(self, 'api_version') and self.api_version < 520:
if hasattr(self, "api_version") and self.api_version < 520:
if version_pos != 0:
raise ValueError('unable to set non-zero version position before 520 in values')
raise ValueError(
"unable to set non-zero version position before 520 in values"
)
return raw_bytes
else:
return raw_bytes + struct.pack('<L', version_pos)
return raw_bytes + struct.pack("<L", version_pos)
@classmethod
def create_test(cls, name, subspace):
target = 'bindingtester.tests.%s' % name
target = "bindingtester.tests.%s" % name
test_class = [s for s in cls.__subclasses__() if s.__module__ == target]
if len(test_class) == 0:
return None
@ -123,15 +129,15 @@ class Instruction(object):
class PushInstruction(Instruction):
def __init__(self, argument):
self.operation = 'PUSH'
self.operation = "PUSH"
self.argument = argument
self.value = fdb.tuple.pack(('PUSH', argument))
self.value = fdb.tuple.pack(("PUSH", argument))
def __str__(self):
return '%s %s' % (self.operation, self.argument)
return "%s %s" % (self.operation, self.argument)
def __repr__(self):
return '%r %r' % (self.operation, self.argument)
return "%r %r" % (self.operation, self.argument)
class TestInstructions(object):
@ -173,11 +179,11 @@ class InstructionSet(TestInstructions, list):
self.core_test_end = len(self)
def core_instructions(self):
return self[self.core_test_begin: self.core_test_end]
return self[self.core_test_begin : self.core_test_end]
@fdb.transactional
def _insert_operations_transactional(self, tr, subspace, start, count):
for i, instruction in enumerate(self[start: start + count]):
for i, instruction in enumerate(self[start : start + count]):
tr[subspace.pack((start + i,))] = instruction.to_value()
def insert_operations(self, db, subspace):
@ -207,7 +213,9 @@ class ThreadedInstructionSet(TestInstructions):
def create_thread(self, subspace=None, thread_instructions=None):
if subspace in self.threads:
raise 'An instruction set with the subspace %r has already been created' % util.subspace_to_tuple(subspace)
raise "An instruction set with the subspace %r has already been created" % util.subspace_to_tuple(
subspace
)
if thread_instructions == None:
thread_instructions = InstructionSet()
@ -216,4 +224,4 @@ class ThreadedInstructionSet(TestInstructions):
return thread_instructions
util.import_subclasses(__file__, 'bindingtester.tests')
util.import_subclasses(__file__, "bindingtester.tests")

View File

@ -34,56 +34,67 @@ fdb.api_version(FDB_API_VERSION)
class DirectoryTest(Test):
def __init__(self, subspace):
super(DirectoryTest, self).__init__(subspace)
self.stack_subspace = subspace['stack']
self.directory_log = subspace['directory_log']['directory']
self.subspace_log = subspace['directory_log']['subspace']
self.prefix_log = subspace['prefix_log']
self.stack_subspace = subspace["stack"]
self.directory_log = subspace["directory_log"]["directory"]
self.subspace_log = subspace["directory_log"]["subspace"]
self.prefix_log = subspace["prefix_log"]
self.prepopulated_dirs = []
self.next_path = 1
def ensure_default_directory_subspace(self, instructions, path):
directory_util.create_default_directory_subspace(instructions, path, self.random)
directory_util.create_default_directory_subspace(
instructions, path, self.random
)
child = self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=True))
child = self.root.add_child(
path, DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
self.dir_list.append(child)
self.dir_index = directory_util.DEFAULT_DIRECTORY_INDEX
def generate_layer(self, allow_partition=True):
if random.random() < 0.7:
return b''
return b""
else:
choice = random.randint(0, 3)
if choice == 0 and allow_partition:
return b'partition'
return b"partition"
elif choice == 1:
return b'test_layer'
return b"test_layer"
else:
return self.random.random_string(random.randint(0, 5))
def setup(self, args):
self.dir_index = 0
self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types)
self.random = test_util.RandomGenerator(
args.max_int_bits, args.api_version, args.types
)
def generate(self, args, thread_number):
instructions = InstructionSet()
op_choices = ['NEW_TRANSACTION', 'COMMIT']
op_choices = ["NEW_TRANSACTION", "COMMIT"]
general = ['DIRECTORY_CREATE_SUBSPACE', 'DIRECTORY_CREATE_LAYER']
general = ["DIRECTORY_CREATE_SUBSPACE", "DIRECTORY_CREATE_LAYER"]
op_choices += general
directory_mutations = ['DIRECTORY_CREATE_OR_OPEN', 'DIRECTORY_CREATE', 'DIRECTORY_MOVE', 'DIRECTORY_MOVE_TO',
'DIRECTORY_REMOVE', 'DIRECTORY_REMOVE_IF_EXISTS']
directory_reads = ['DIRECTORY_EXISTS', 'DIRECTORY_OPEN', 'DIRECTORY_LIST']
directory_mutations = [
"DIRECTORY_CREATE_OR_OPEN",
"DIRECTORY_CREATE",
"DIRECTORY_MOVE",
"DIRECTORY_MOVE_TO",
"DIRECTORY_REMOVE",
"DIRECTORY_REMOVE_IF_EXISTS",
]
directory_reads = ["DIRECTORY_EXISTS", "DIRECTORY_OPEN", "DIRECTORY_LIST"]
directory_db_mutations = [x + '_DATABASE' for x in directory_mutations]
directory_db_reads = [x + '_DATABASE' for x in directory_reads]
directory_snapshot_reads = [x + '_SNAPSHOT' for x in directory_reads]
directory_db_mutations = [x + "_DATABASE" for x in directory_mutations]
directory_db_reads = [x + "_DATABASE" for x in directory_reads]
directory_snapshot_reads = [x + "_SNAPSHOT" for x in directory_reads]
directory = []
directory += directory_mutations
@ -94,27 +105,47 @@ class DirectoryTest(Test):
if not args.no_directory_snapshot_ops:
directory += directory_snapshot_reads
subspace = ['DIRECTORY_PACK_KEY', 'DIRECTORY_UNPACK_KEY', 'DIRECTORY_RANGE', 'DIRECTORY_CONTAINS', 'DIRECTORY_OPEN_SUBSPACE']
subspace = [
"DIRECTORY_PACK_KEY",
"DIRECTORY_UNPACK_KEY",
"DIRECTORY_RANGE",
"DIRECTORY_CONTAINS",
"DIRECTORY_OPEN_SUBSPACE",
]
instructions.append('NEW_TRANSACTION')
instructions.append("NEW_TRANSACTION")
default_path = 'default%d' % self.next_path
default_path = "default%d" % self.next_path
self.next_path += 1
self.dir_list = directory_util.setup_directories(instructions, default_path, self.random)
self.dir_list = directory_util.setup_directories(
instructions, default_path, self.random
)
self.root = self.dir_list[0]
instructions.push_args(0)
instructions.append('DIRECTORY_CHANGE')
instructions.append("DIRECTORY_CHANGE")
# Generate some directories that we are going to create in advance. This tests that other bindings
# are compatible with the Python implementation
self.prepopulated_dirs = [(generate_path(min_length=1), self.generate_layer()) for i in range(5)]
self.prepopulated_dirs = [
(generate_path(min_length=1), self.generate_layer()) for i in range(5)
]
for path, layer in self.prepopulated_dirs:
instructions.push_args(layer)
instructions.push_args(*test_util.with_length(path))
instructions.append('DIRECTORY_OPEN')
self.dir_list.append(self.root.add_child(path, DirectoryStateTreeNode(True, True, has_known_prefix=False, is_partition=(layer == b'partition'))))
instructions.append("DIRECTORY_OPEN")
self.dir_list.append(
self.root.add_child(
path,
DirectoryStateTreeNode(
True,
True,
has_known_prefix=False,
is_partition=(layer == b"partition"),
),
)
)
# print('%d. Selected %s, dir=%s, dir_id=%s, has_known_prefix=%s, dir_list_len=%d' \
# % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), self.dir_list[-1].dir_id, False, len(self.dir_list)-1))
@ -124,11 +155,14 @@ class DirectoryTest(Test):
if random.random() < 0.5:
while True:
self.dir_index = random.randrange(0, len(self.dir_list))
if not self.dir_list[self.dir_index].state.is_partition or not self.dir_list[self.dir_index].state.deleted:
if (
not self.dir_list[self.dir_index].state.is_partition
or not self.dir_list[self.dir_index].state.deleted
):
break
instructions.push_args(self.dir_index)
instructions.append('DIRECTORY_CHANGE')
instructions.append("DIRECTORY_CHANGE")
dir_entry = self.dir_list[self.dir_index]
@ -143,53 +177,74 @@ class DirectoryTest(Test):
# print('%d. Selected %s, dir=%d, dir_id=%d, has_known_prefix=%d, dir_list_len=%d' \
# % (len(instructions), op, self.dir_index, dir_entry.dir_id, dir_entry.state.has_known_prefix, len(self.dir_list)))
if op.endswith('_DATABASE') or op.endswith('_SNAPSHOT'):
if op.endswith("_DATABASE") or op.endswith("_SNAPSHOT"):
root_op = op[0:-9]
else:
root_op = op
if root_op == 'NEW_TRANSACTION':
if root_op == "NEW_TRANSACTION":
instructions.append(op)
elif root_op == 'COMMIT':
elif root_op == "COMMIT":
test_util.blocking_commit(instructions)
elif root_op == 'DIRECTORY_CREATE_SUBSPACE':
elif root_op == "DIRECTORY_CREATE_SUBSPACE":
path = generate_path()
instructions.push_args(generate_prefix(require_unique=False, is_partition=True))
instructions.push_args(
generate_prefix(require_unique=False, is_partition=True)
)
instructions.push_args(*test_util.with_length(path))
instructions.append(op)
self.dir_list.append(DirectoryStateTreeNode(False, True, has_known_prefix=True))
self.dir_list.append(
DirectoryStateTreeNode(False, True, has_known_prefix=True)
)
elif root_op == 'DIRECTORY_CREATE_LAYER':
elif root_op == "DIRECTORY_CREATE_LAYER":
indices = []
prefixes = [generate_prefix(require_unique=args.concurrency == 1, is_partition=True) for i in range(2)]
prefixes = [
generate_prefix(
require_unique=args.concurrency == 1, is_partition=True
)
for i in range(2)
]
for i in range(2):
instructions.push_args(prefixes[i])
instructions.push_args(*test_util.with_length(generate_path()))
instructions.append('DIRECTORY_CREATE_SUBSPACE')
instructions.append("DIRECTORY_CREATE_SUBSPACE")
indices.append(len(self.dir_list))
self.dir_list.append(DirectoryStateTreeNode(False, True, has_known_prefix=True))
self.dir_list.append(
DirectoryStateTreeNode(False, True, has_known_prefix=True)
)
instructions.push_args(random.choice([0, 1]))
instructions.push_args(*indices)
instructions.append(op)
self.dir_list.append(DirectoryStateTreeNode.get_layer(prefixes[0]))
elif root_op == 'DIRECTORY_CREATE_OR_OPEN':
elif root_op == "DIRECTORY_CREATE_OR_OPEN":
# Because allocated prefixes are non-deterministic, we cannot have overlapping
# transactions that allocate/remove these prefixes in a comparison test
if op.endswith('_DATABASE') and args.concurrency == 1:
if op.endswith("_DATABASE") and args.concurrency == 1:
test_util.blocking_commit(instructions)
path = generate_path()
# Partitions that use the high-contention allocator can result in non-determinism if they fail to commit,
# so we disallow them in comparison tests
op_args = test_util.with_length(path) + (self.generate_layer(allow_partition=args.concurrency > 1),)
directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
op_args = test_util.with_length(path) + (
self.generate_layer(allow_partition=args.concurrency > 1),
)
directory_util.push_instruction_and_record_prefix(
instructions,
op,
op_args,
path,
len(self.dir_list),
self.random,
self.prefix_log,
)
if not op.endswith('_DATABASE') and args.concurrency == 1:
if not op.endswith("_DATABASE") and args.concurrency == 1:
test_util.blocking_commit(instructions)
child_entry = dir_entry.get_descendent(path)
@ -199,32 +254,49 @@ class DirectoryTest(Test):
child_entry.state.has_known_prefix = False
self.dir_list.append(dir_entry.add_child(path, child_entry))
elif root_op == 'DIRECTORY_CREATE':
elif root_op == "DIRECTORY_CREATE":
layer = self.generate_layer()
is_partition = layer == b'partition'
is_partition = layer == b"partition"
prefix = generate_prefix(require_unique=is_partition and args.concurrency == 1, is_partition=is_partition, min_length=0)
prefix = generate_prefix(
require_unique=is_partition and args.concurrency == 1,
is_partition=is_partition,
min_length=0,
)
# Because allocated prefixes are non-deterministic, we cannot have overlapping
# transactions that allocate/remove these prefixes in a comparison test
if op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix:
if (
op.endswith("_DATABASE") and args.concurrency == 1
): # and allow_empty_prefix:
test_util.blocking_commit(instructions)
path = generate_path()
op_args = test_util.with_length(path) + (layer, prefix)
if prefix is None:
directory_util.push_instruction_and_record_prefix(
instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
instructions,
op,
op_args,
path,
len(self.dir_list),
self.random,
self.prefix_log,
)
else:
instructions.push_args(*op_args)
instructions.append(op)
if not op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix:
if (
not op.endswith("_DATABASE") and args.concurrency == 1
): # and allow_empty_prefix:
test_util.blocking_commit(instructions)
child_entry = dir_entry.get_descendent(path)
if child_entry is None:
child_entry = DirectoryStateTreeNode(True, True, has_known_prefix=bool(prefix))
child_entry = DirectoryStateTreeNode(
True, True, has_known_prefix=bool(prefix)
)
elif not bool(prefix):
child_entry.state.has_known_prefix = False
@ -233,7 +305,7 @@ class DirectoryTest(Test):
self.dir_list.append(dir_entry.add_child(path, child_entry))
elif root_op == 'DIRECTORY_OPEN':
elif root_op == "DIRECTORY_OPEN":
path = generate_path()
instructions.push_args(self.generate_layer())
instructions.push_args(*test_util.with_length(path))
@ -241,44 +313,58 @@ class DirectoryTest(Test):
child_entry = dir_entry.get_descendent(path)
if child_entry is None:
self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False))
self.dir_list.append(
DirectoryStateTreeNode(False, False, has_known_prefix=False)
)
else:
self.dir_list.append(dir_entry.add_child(path, child_entry))
elif root_op == 'DIRECTORY_MOVE':
elif root_op == "DIRECTORY_MOVE":
old_path = generate_path()
new_path = generate_path()
instructions.push_args(*(test_util.with_length(old_path) + test_util.with_length(new_path)))
instructions.push_args(
*(test_util.with_length(old_path) + test_util.with_length(new_path))
)
instructions.append(op)
child_entry = dir_entry.get_descendent(old_path)
if child_entry is None:
self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False))
self.dir_list.append(
DirectoryStateTreeNode(False, False, has_known_prefix=False)
)
else:
self.dir_list.append(dir_entry.add_child(new_path, child_entry))
# Make sure that the default directory subspace still exists after moving the specified directory
if dir_entry.state.is_directory and not dir_entry.state.is_subspace and old_path == ('',):
if (
dir_entry.state.is_directory
and not dir_entry.state.is_subspace
and old_path == ("",)
):
self.ensure_default_directory_subspace(instructions, default_path)
elif root_op == 'DIRECTORY_MOVE_TO':
elif root_op == "DIRECTORY_MOVE_TO":
new_path = generate_path()
instructions.push_args(*test_util.with_length(new_path))
instructions.append(op)
child_entry = dir_entry.get_descendent(())
if child_entry is None:
self.dir_list.append(DirectoryStateTreeNode(False, False, has_known_prefix=False))
self.dir_list.append(
DirectoryStateTreeNode(False, False, has_known_prefix=False)
)
else:
self.dir_list.append(dir_entry.add_child(new_path, child_entry))
# Make sure that the default directory subspace still exists after moving the current directory
self.ensure_default_directory_subspace(instructions, default_path)
elif root_op == 'DIRECTORY_REMOVE' or root_op == 'DIRECTORY_REMOVE_IF_EXISTS':
elif (
root_op == "DIRECTORY_REMOVE" or root_op == "DIRECTORY_REMOVE_IF_EXISTS"
):
# Because allocated prefixes are non-deterministic, we cannot have overlapping
# transactions that allocate/remove these prefixes in a comparison test
if op.endswith('_DATABASE') and args.concurrency == 1:
if op.endswith("_DATABASE") and args.concurrency == 1:
test_util.blocking_commit(instructions)
path = ()
@ -293,10 +379,14 @@ class DirectoryTest(Test):
dir_entry.delete(path)
# Make sure that the default directory subspace still exists after removing the specified directory
if path == () or (dir_entry.state.is_directory and not dir_entry.state.is_subspace and path == ('',)):
if path == () or (
dir_entry.state.is_directory
and not dir_entry.state.is_subspace
and path == ("",)
):
self.ensure_default_directory_subspace(instructions, default_path)
elif root_op == 'DIRECTORY_LIST' or root_op == 'DIRECTORY_EXISTS':
elif root_op == "DIRECTORY_LIST" or root_op == "DIRECTORY_EXISTS":
path = ()
count = random.randint(0, 1)
if count == 1:
@ -305,57 +395,65 @@ class DirectoryTest(Test):
instructions.push_args(count)
instructions.append(op)
elif root_op == 'DIRECTORY_PACK_KEY':
elif root_op == "DIRECTORY_PACK_KEY":
t = self.random.random_tuple(5)
instructions.push_args(*test_util.with_length(t))
instructions.append(op)
instructions.append('DIRECTORY_STRIP_PREFIX')
instructions.append("DIRECTORY_STRIP_PREFIX")
elif root_op == 'DIRECTORY_UNPACK_KEY' or root_op == 'DIRECTORY_CONTAINS':
if not dir_entry.state.has_known_prefix or random.random() < 0.2 or root_op == 'DIRECTORY_UNPACK_KEY':
elif root_op == "DIRECTORY_UNPACK_KEY" or root_op == "DIRECTORY_CONTAINS":
if (
not dir_entry.state.has_known_prefix
or random.random() < 0.2
or root_op == "DIRECTORY_UNPACK_KEY"
):
t = self.random.random_tuple(5)
instructions.push_args(*test_util.with_length(t))
instructions.append('DIRECTORY_PACK_KEY')
instructions.append("DIRECTORY_PACK_KEY")
instructions.append(op)
else:
instructions.push_args(fdb.tuple.pack(self.random.random_tuple(5)))
instructions.append(op)
elif root_op == 'DIRECTORY_RANGE' or root_op == 'DIRECTORY_OPEN_SUBSPACE':
elif root_op == "DIRECTORY_RANGE" or root_op == "DIRECTORY_OPEN_SUBSPACE":
t = self.random.random_tuple(5)
instructions.push_args(*test_util.with_length(t))
instructions.append(op)
if root_op == 'DIRECTORY_OPEN_SUBSPACE':
self.dir_list.append(DirectoryStateTreeNode(False, True, dir_entry.state.has_known_prefix))
if root_op == "DIRECTORY_OPEN_SUBSPACE":
self.dir_list.append(
DirectoryStateTreeNode(
False, True, dir_entry.state.has_known_prefix
)
)
else:
test_util.to_front(instructions, 1)
instructions.append('DIRECTORY_STRIP_PREFIX')
instructions.append("DIRECTORY_STRIP_PREFIX")
test_util.to_front(instructions, 1)
instructions.append('DIRECTORY_STRIP_PREFIX')
instructions.append("DIRECTORY_STRIP_PREFIX")
instructions.begin_finalization()
test_util.blocking_commit(instructions)
instructions.append('NEW_TRANSACTION')
instructions.append("NEW_TRANSACTION")
for i, dir_entry in enumerate(self.dir_list):
instructions.push_args(i)
instructions.append('DIRECTORY_CHANGE')
instructions.append("DIRECTORY_CHANGE")
if dir_entry.state.is_directory:
instructions.push_args(self.directory_log.key())
instructions.append('DIRECTORY_LOG_DIRECTORY')
instructions.append("DIRECTORY_LOG_DIRECTORY")
if dir_entry.state.has_known_prefix and dir_entry.state.is_subspace:
# print('%d. Logging subspace: %d' % (i, dir_entry.dir_id))
instructions.push_args(self.subspace_log.key())
instructions.append('DIRECTORY_LOG_SUBSPACE')
instructions.append("DIRECTORY_LOG_SUBSPACE")
if (i + 1) % 100 == 0:
test_util.blocking_commit(instructions)
test_util.blocking_commit(instructions)
instructions.push_args(self.stack_subspace.key())
instructions.append('LOG_STACK')
instructions.append("LOG_STACK")
test_util.blocking_commit(instructions)
return instructions
@ -363,10 +461,12 @@ class DirectoryTest(Test):
def pre_run(self, db, args):
for (path, layer) in self.prepopulated_dirs:
try:
util.get_logger().debug('Prepopulating directory: %r (layer=%r)' % (path, layer))
util.get_logger().debug(
"Prepopulating directory: %r (layer=%r)" % (path, layer)
)
fdb.directory.create_or_open(db, path, layer)
except Exception as e:
util.get_logger().debug('Could not create directory %r: %r' % (path, e))
util.get_logger().debug("Could not create directory %r: %r" % (path, e))
pass
def validate(self, db, args):
@ -380,9 +480,14 @@ class DirectoryTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021]),
ResultSpecification(
self.stack_subspace,
key_start_index=1,
ordering_index=1,
global_error_filter=[1007, 1009, 1021],
),
ResultSpecification(self.directory_log, ordering_index=0),
ResultSpecification(self.subspace_log, ordering_index=0)
ResultSpecification(self.subspace_log, ordering_index=0),
]
@ -394,31 +499,40 @@ def generate_path(min_length=0):
path = ()
for i in range(length):
if random.random() < 0.05:
path = path + ('',)
path = path + ("",)
else:
path = path + (random.choice(['1', '2', '3']),)
path = path + (random.choice(["1", "2", "3"]),)
return path
def generate_prefix(require_unique=False, is_partition=False, min_length=1):
fixed_prefix = b'abcdefg'
fixed_prefix = b"abcdefg"
if not require_unique and min_length == 0 and random.random() < 0.8:
return None
elif require_unique or is_partition or min_length > len(fixed_prefix) or random.random() < 0.5:
elif (
require_unique
or is_partition
or min_length > len(fixed_prefix)
or random.random() < 0.5
):
if require_unique:
min_length = max(min_length, 16)
length = random.randint(min_length, min_length + 5)
if length == 0:
return b''
return b""
if not is_partition:
first = random.randint(ord('\x1d'), 255) % 255
return bytes([first] + [random.randrange(0, 256) for i in range(0, length - 1)])
first = random.randint(ord("\x1d"), 255) % 255
return bytes(
[first] + [random.randrange(0, 256) for i in range(0, length - 1)]
)
else:
return bytes([random.randrange(ord('\x02'), ord('\x14')) for i in range(0, length)])
return bytes(
[random.randrange(ord("\x02"), ord("\x14")) for i in range(0, length)]
)
else:
prefix = fixed_prefix
generated = prefix[0:random.randrange(min_length, len(prefix))]
generated = prefix[0 : random.randrange(min_length, len(prefix))]
return generated

View File

@ -34,19 +34,25 @@ fdb.api_version(FDB_API_VERSION)
class DirectoryHcaTest(Test):
def __init__(self, subspace):
super(DirectoryHcaTest, self).__init__(subspace)
self.coordination = subspace['coordination']
self.prefix_log = subspace['prefix_log']
self.coordination = subspace["coordination"]
self.prefix_log = subspace["prefix_log"]
self.next_path = 1
def setup(self, args):
self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types)
self.transactions = [b'tr%d' % i for i in range(3)] # SOMEDAY: parameterize this number?
self.random = test_util.RandomGenerator(
args.max_int_bits, args.api_version, args.types
)
self.transactions = [
b"tr%d" % i for i in range(3)
] # SOMEDAY: parameterize this number?
self.barrier_num = 0
self.max_directories_per_transaction = 30
if args.api_version < 300:
if args.concurrency > 8:
raise Exception('Directory HCA test does not support concurrency larger than 8 with API version less than 300')
raise Exception(
"Directory HCA test does not support concurrency larger than 8 with API version less than 300"
)
self.max_directories_per_transaction = 8.0 / args.concurrency
@ -54,35 +60,39 @@ class DirectoryHcaTest(Test):
for tr in self.transactions:
if random.random() < 0.8 or args.api_version < 300:
instructions.push_args(tr)
instructions.append('USE_TRANSACTION')
instructions.append("USE_TRANSACTION")
test_util.blocking_commit(instructions)
def barrier(self, instructions, thread_number, thread_ending=False):
if not thread_ending:
instructions.push_args(self.coordination[(self.barrier_num + 1)][thread_number].key(), b'')
instructions.append('SET_DATABASE')
instructions.append('WAIT_FUTURE')
instructions.push_args(
self.coordination[(self.barrier_num + 1)][thread_number].key(), b""
)
instructions.append("SET_DATABASE")
instructions.append("WAIT_FUTURE")
instructions.push_args(self.coordination[self.barrier_num][thread_number].key())
instructions.append('CLEAR_DATABASE')
instructions.append('WAIT_FUTURE')
instructions.append("CLEAR_DATABASE")
instructions.append("WAIT_FUTURE")
instructions.push_args(self.coordination[self.barrier_num].key())
instructions.append('WAIT_EMPTY')
instructions.append("WAIT_EMPTY")
self.barrier_num += 1
def generate(self, args, thread_number):
instructions = InstructionSet()
instructions.append('NEW_TRANSACTION')
instructions.append("NEW_TRANSACTION")
default_path = 'default%d' % self.next_path
default_path = "default%d" % self.next_path
self.next_path += 1
dir_list = directory_util.setup_directories(instructions, default_path, self.random)
dir_list = directory_util.setup_directories(
instructions, default_path, self.random
)
num_dirs = len(dir_list)
instructions.push_args(directory_util.DEFAULT_DIRECTORY_INDEX)
instructions.append('DIRECTORY_CHANGE')
instructions.append("DIRECTORY_CHANGE")
instructions.setup_complete()
@ -93,24 +103,44 @@ class DirectoryHcaTest(Test):
self.barrier(instructions, thread_number)
instructions.push_args(random.choice(self.transactions))
instructions.append('USE_TRANSACTION')
instructions.append("USE_TRANSACTION")
if thread_number == 0 and args.concurrency > 1:
num_directories = 1
else:
num_directories = int(max(1, pow(random.random(), 4) * min(self.max_directories_per_transaction, args.num_ops - current_op)))
num_directories = int(
max(
1,
pow(random.random(), 4)
* min(
self.max_directories_per_transaction,
args.num_ops - current_op,
),
)
)
for i in range(num_directories):
path = (self.random.random_unicode_str(16),)
op_args = test_util.with_length(path) + (b'', None)
directory_util.push_instruction_and_record_prefix(instructions, 'DIRECTORY_CREATE',
op_args, path, num_dirs, self.random, self.prefix_log)
op_args = test_util.with_length(path) + (b"", None)
directory_util.push_instruction_and_record_prefix(
instructions,
"DIRECTORY_CREATE",
op_args,
path,
num_dirs,
self.random,
self.prefix_log,
)
num_dirs += 1
current_op += num_directories
if args.concurrency > 1:
self.barrier(instructions, thread_number, thread_ending=(current_op >= args.num_ops))
self.barrier(
instructions,
thread_number,
thread_ending=(current_op >= args.num_ops),
)
if thread_number == 0:
self.commit_transactions(instructions, args)
@ -121,7 +151,7 @@ class DirectoryHcaTest(Test):
def pre_run(self, tr, args):
if args.concurrency > 1:
for i in range(args.concurrency):
tr[self.coordination[0][i]] = b''
tr[self.coordination[0][i]] = b""
def validate(self, db, args):
errors = []

View File

@ -2,7 +2,16 @@ import sys
class TreeNodeState:
def __init__(self, node, dir_id, is_directory, is_subspace, has_known_prefix, root, is_partition):
def __init__(
self,
node,
dir_id,
is_directory,
is_subspace,
has_known_prefix,
root,
is_partition,
):
self.dir_id = dir_id
self.is_directory = is_directory
self.is_subspace = is_subspace
@ -42,17 +51,33 @@ class DirectoryStateTreeNode:
@classmethod
def get_layer(cls, node_subspace_prefix):
if node_subspace_prefix not in DirectoryStateTreeNode.layers:
DirectoryStateTreeNode.layers[node_subspace_prefix] = DirectoryStateTreeNode(True, False, has_known_prefix=False)
DirectoryStateTreeNode.layers[
node_subspace_prefix
] = DirectoryStateTreeNode(True, False, has_known_prefix=False)
return DirectoryStateTreeNode.layers[node_subspace_prefix]
def __init__(self, is_directory, is_subspace, has_known_prefix=True, root=None, is_partition=False):
self.state = TreeNodeState(self, DirectoryStateTreeNode.dir_id + 1, is_directory, is_subspace, has_known_prefix,
root or self, is_partition)
def __init__(
self,
is_directory,
is_subspace,
has_known_prefix=True,
root=None,
is_partition=False,
):
self.state = TreeNodeState(
self,
DirectoryStateTreeNode.dir_id + 1,
is_directory,
is_subspace,
has_known_prefix,
root or self,
is_partition,
)
DirectoryStateTreeNode.dir_id += 1
def __repr__(self):
return '{DirEntry %d: %d}' % (self.state.dir_id, self.state.has_known_prefix)
return "{DirEntry %d: %d}" % (self.state.dir_id, self.state.has_known_prefix)
def _get_descendent(self, subpath, default):
if not subpath:
@ -81,7 +106,9 @@ class DirectoryStateTreeNode:
child.state.root = self.state.root
if DirectoryStateTreeNode.default_directory:
# print('Adding child %r to default directory at %r' % (child, subpath))
child = DirectoryStateTreeNode.default_directory._add_child_impl(subpath, child)
child = DirectoryStateTreeNode.default_directory._add_child_impl(
subpath, child
)
# print('Added %r' % child)
# print('Adding child %r to directory at %r' % (child, subpath))
@ -118,7 +145,9 @@ class DirectoryStateTreeNode:
self.state.dir_id = min(other.state.dir_id, self.state.dir_id)
self.state.is_directory = self.state.is_directory and other.state.is_directory
self.state.is_subspace = self.state.is_subspace and other.state.is_subspace
self.state.has_known_prefix = self.state.has_known_prefix and other.state.has_known_prefix
self.state.has_known_prefix = (
self.state.has_known_prefix and other.state.has_known_prefix
)
self.state.deleted = self.state.deleted or other.state.deleted
self.state.is_partition = self.state.is_partition or other.state.is_partition
@ -157,74 +186,164 @@ def validate_dir(dir, root):
def run_test():
all_entries = []
root = DirectoryStateTreeNode.get_layer('\xfe')
root = DirectoryStateTreeNode.get_layer("\xfe")
all_entries.append(root)
default_dir = root.add_child(('default',), DirectoryStateTreeNode(True, True, has_known_prefix=True))
default_dir = root.add_child(
("default",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
DirectoryStateTreeNode.set_default_directory(default_dir)
all_entries.append(default_dir)
all_entries.append(default_dir.add_child(('1',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
all_entries.append(default_dir.add_child(('1', '1'), DirectoryStateTreeNode(True, False, has_known_prefix=True)))
all_entries.append(default_dir.add_child(('2',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
all_entries.append(default_dir.add_child(('3',), DirectoryStateTreeNode(True, True, has_known_prefix=False)))
all_entries.append(default_dir.add_child(('5',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
all_entries.append(default_dir.add_child(('3', '1'), DirectoryStateTreeNode(True, True, has_known_prefix=False)))
all_entries.append(default_dir.add_child(('1', '3'), DirectoryStateTreeNode(True, True, has_known_prefix=False)))
all_entries.append(
default_dir.add_child(
("1",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
all_entries.append(
default_dir.add_child(
("1", "1"), DirectoryStateTreeNode(True, False, has_known_prefix=True)
)
)
all_entries.append(
default_dir.add_child(
("2",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
all_entries.append(
default_dir.add_child(
("3",), DirectoryStateTreeNode(True, True, has_known_prefix=False)
)
)
all_entries.append(
default_dir.add_child(
("5",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
all_entries.append(
default_dir.add_child(
("3", "1"), DirectoryStateTreeNode(True, True, has_known_prefix=False)
)
)
all_entries.append(
default_dir.add_child(
("1", "3"), DirectoryStateTreeNode(True, True, has_known_prefix=False)
)
)
entry = all_entries[-1]
child_entries = []
child_entries.append(entry.add_child(('1',), DirectoryStateTreeNode(True, False, has_known_prefix=True)))
child_entries.append(entry.add_child(('2',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
child_entries.append(entry.add_child(('3',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
child_entries.append(entry.add_child(('4',), DirectoryStateTreeNode(True, False, has_known_prefix=False)))
child_entries.append(entry.add_child(('5',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
child_entries.append(
entry.add_child(
("1",), DirectoryStateTreeNode(True, False, has_known_prefix=True)
)
)
child_entries.append(
entry.add_child(
("2",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
child_entries.append(
entry.add_child(
("3",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
child_entries.append(
entry.add_child(
("4",), DirectoryStateTreeNode(True, False, has_known_prefix=False)
)
)
child_entries.append(
entry.add_child(
("5",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
all_entries.append(root.add_child(('1', '2'), DirectoryStateTreeNode(True, True, has_known_prefix=False)))
all_entries.append(root.add_child(('2',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
all_entries.append(root.add_child(('3',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
all_entries.append(root.add_child(('1', '3',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
all_entries.append(
root.add_child(
("1", "2"), DirectoryStateTreeNode(True, True, has_known_prefix=False)
)
)
all_entries.append(
root.add_child(
("2",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
all_entries.append(
root.add_child(
("3",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
all_entries.append(
root.add_child(
(
"1",
"3",
),
DirectoryStateTreeNode(True, True, has_known_prefix=True),
)
)
# This directory was merged with the default, but both have readable prefixes
entry = root.get_descendent(('2',))
entry = root.get_descendent(("2",))
assert entry.state.has_known_prefix
entry = all_entries[-1]
all_entries.append(entry.add_child(('1',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
all_entries.append(entry.add_child(('2',), DirectoryStateTreeNode(True, True, has_known_prefix=False)))
all_entries.append(entry.add_child(('3',), DirectoryStateTreeNode(True, False, has_known_prefix=True)))
all_entries.append(
entry.add_child(
("1",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
all_entries.append(
entry.add_child(
("2",), DirectoryStateTreeNode(True, True, has_known_prefix=False)
)
)
all_entries.append(
entry.add_child(
("3",), DirectoryStateTreeNode(True, False, has_known_prefix=True)
)
)
entry_to_move = all_entries[-1]
all_entries.append(entry.add_child(('5',), DirectoryStateTreeNode(True, False, has_known_prefix=True)))
child_entries.append(entry.add_child(('6',), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
all_entries.append(
entry.add_child(
("5",), DirectoryStateTreeNode(True, False, has_known_prefix=True)
)
)
child_entries.append(
entry.add_child(
("6",), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
all_entries.extend(child_entries)
# This directory has an unknown prefix
entry = root.get_descendent(('1', '2'))
entry = root.get_descendent(("1", "2"))
assert not entry.state.has_known_prefix
# This directory was default created and should have an unknown prefix
# It will merge with the default directory's child, which is not a subspace
entry = root.get_descendent(('1',))
entry = root.get_descendent(("1",))
assert not entry.state.has_known_prefix
assert not entry.state.is_subspace
# Multiple merges will have made this prefix unreadable
entry = root.get_descendent(('2',))
entry = root.get_descendent(("2",))
assert not entry.state.has_known_prefix
# Merge with default directory's child that has an unknown prefix
entry = root.get_descendent(('3',))
entry = root.get_descendent(("3",))
assert not entry.state.has_known_prefix
# Merge with default directory's child that has an unknown prefix and merged children
entry = root.get_descendent(('1', '3'))
assert set(entry.state.children.keys()) == {'1', '2', '3', '4', '5', '6'}
entry = root.get_descendent(("1", "3"))
assert set(entry.state.children.keys()) == {"1", "2", "3", "4", "5", "6"}
# This child entry should be the combination of ['default', '3'], ['default', '1', '3'], and ['1', '3']
entry = entry.get_descendent(('3',))
entry = entry.get_descendent(("3",))
assert not entry.state.has_known_prefix
assert not entry.state.is_subspace
@ -247,17 +366,17 @@ def run_test():
assert child_entries[5].state.has_known_prefix
assert child_entries[5].state.is_subspace
entry = root.add_child(('3',), entry_to_move)
entry = root.add_child(("3",), entry_to_move)
all_entries.append(entry)
# Test moving an entry
assert not entry.state.has_known_prefix
assert not entry.state.is_subspace
assert list(entry.state.children.keys()) == ['1']
assert list(entry.state.children.keys()) == ["1"]
for e in all_entries:
validate_dir(e, root)
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(run_test())

View File

@ -31,33 +31,37 @@ from bindingtester.tests.directory_state_tree import DirectoryStateTreeNode
fdb.api_version(FDB_API_VERSION)
DEFAULT_DIRECTORY_INDEX = 4
DEFAULT_DIRECTORY_PREFIX = b'default'
DIRECTORY_ERROR_STRING = b'DIRECTORY_ERROR'
DEFAULT_DIRECTORY_PREFIX = b"default"
DIRECTORY_ERROR_STRING = b"DIRECTORY_ERROR"
def setup_directories(instructions, default_path, random):
# Clients start with the default directory layer in the directory list
DirectoryStateTreeNode.reset()
dir_list = [DirectoryStateTreeNode.get_layer(b'\xfe')]
dir_list = [DirectoryStateTreeNode.get_layer(b"\xfe")]
instructions.push_args(0, b'\xfe')
instructions.append('DIRECTORY_CREATE_SUBSPACE')
instructions.push_args(0, b"\xfe")
instructions.append("DIRECTORY_CREATE_SUBSPACE")
dir_list.append(DirectoryStateTreeNode(False, True))
instructions.push_args(0, b'')
instructions.append('DIRECTORY_CREATE_SUBSPACE')
instructions.push_args(0, b"")
instructions.append("DIRECTORY_CREATE_SUBSPACE")
dir_list.append(DirectoryStateTreeNode(False, True))
instructions.push_args(1, 2, 1)
instructions.append('DIRECTORY_CREATE_LAYER')
dir_list.append(DirectoryStateTreeNode.get_layer(b'\xfe'))
instructions.append("DIRECTORY_CREATE_LAYER")
dir_list.append(DirectoryStateTreeNode.get_layer(b"\xfe"))
create_default_directory_subspace(instructions, default_path, random)
dir_list.append(dir_list[0].add_child((default_path,), DirectoryStateTreeNode(True, True, has_known_prefix=True)))
dir_list.append(
dir_list[0].add_child(
(default_path,), DirectoryStateTreeNode(True, True, has_known_prefix=True)
)
)
DirectoryStateTreeNode.set_default_directory(dir_list[-1])
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
instructions.append('DIRECTORY_SET_ERROR_INDEX')
instructions.append("DIRECTORY_SET_ERROR_INDEX")
return dir_list
@ -65,41 +69,47 @@ def setup_directories(instructions, default_path, random):
def create_default_directory_subspace(instructions, path, random):
test_util.blocking_commit(instructions)
instructions.push_args(3)
instructions.append('DIRECTORY_CHANGE')
instructions.append("DIRECTORY_CHANGE")
prefix = random.random_string(16)
instructions.push_args(1, path, b'', b'%s-%s' % (DEFAULT_DIRECTORY_PREFIX, prefix))
instructions.append('DIRECTORY_CREATE_DATABASE')
instructions.push_args(1, path, b"", b"%s-%s" % (DEFAULT_DIRECTORY_PREFIX, prefix))
instructions.append("DIRECTORY_CREATE_DATABASE")
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
instructions.append('DIRECTORY_CHANGE')
instructions.append("DIRECTORY_CHANGE")
def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_index, random, subspace):
if not op.endswith('_DATABASE'):
def push_instruction_and_record_prefix(
instructions, op, op_args, path, dir_index, random, subspace
):
if not op.endswith("_DATABASE"):
instructions.push_args(1, *test_util.with_length(path))
instructions.append('DIRECTORY_EXISTS')
instructions.append("DIRECTORY_EXISTS")
# This op must leave the stack in the state it is in at this point, with the exception
# that it may leave an error on the stack
instructions.push_args(*op_args)
instructions.append(op)
if not op.endswith('_DATABASE'):
if not op.endswith("_DATABASE"):
instructions.push_args(dir_index)
instructions.append('DIRECTORY_CHANGE')
instructions.append("DIRECTORY_CHANGE")
instructions.push_args(1, b'', random.random_string(16), b'')
instructions.append('DIRECTORY_PACK_KEY')
test_util.to_front(instructions, 3) # move the existence result up to the front of the stack
instructions.push_args(1, b"", random.random_string(16), b"")
instructions.append("DIRECTORY_PACK_KEY")
test_util.to_front(
instructions, 3
) # move the existence result up to the front of the stack
t = util.subspace_to_tuple(subspace)
instructions.push_args(len(t) + 3, *t)
instructions.append('TUPLE_PACK') # subspace[<exists>][<packed_key>][random.random_string(16)] = b''
instructions.append('SET')
instructions.append(
"TUPLE_PACK"
) # subspace[<exists>][<packed_key>][random.random_string(16)] = b''
instructions.append("SET")
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
instructions.append('DIRECTORY_CHANGE')
instructions.append("DIRECTORY_CHANGE")
def check_for_duplicate_prefixes(db, subspace):
@ -116,27 +126,43 @@ def check_for_duplicate_prefixes(db, subspace):
start_key = fdb.KeySelector.first_greater_than(prefixes[-1].key)
prefixes = [subspace[0].unpack(kv.key)[0] for kv in prefixes]
prefixes = [p for p in prefixes if not (p.startswith(DEFAULT_DIRECTORY_PREFIX) or p == DIRECTORY_ERROR_STRING)]
prefixes = [
p
for p in prefixes
if not (
p.startswith(DEFAULT_DIRECTORY_PREFIX) or p == DIRECTORY_ERROR_STRING
)
]
count += len(prefixes)
prefixes = [last_prefix] + prefixes
duplicates.update([p for i, p in enumerate(prefixes[1:]) if p == prefixes[i]])
last_prefix = prefixes[-1]
util.get_logger().info('Checked %d directory prefixes for duplicates' % count)
return ['The prefix %r was allocated multiple times' % d[:-2] for d in set(duplicates)]
util.get_logger().info("Checked %d directory prefixes for duplicates" % count)
return [
"The prefix %r was allocated multiple times" % d[:-2] for d in set(duplicates)
]
def validate_hca_state(db):
hca = fdb.Subspace((b'\xfe', b'hca'), b'\xfe')
hca = fdb.Subspace((b"\xfe", b"hca"), b"\xfe")
counters = hca[0]
recent = hca[1]
last_counter = db.get_range(counters.range().start, counters.range().stop, limit=1, reverse=True)
[(start, reported_count)] = [(counters.unpack(kv.key)[0], struct.unpack('<q', kv.value)[0]) for kv in last_counter] or [(0, 0)]
last_counter = db.get_range(
counters.range().start, counters.range().stop, limit=1, reverse=True
)
[(start, reported_count)] = [
(counters.unpack(kv.key)[0], struct.unpack("<q", kv.value)[0])
for kv in last_counter
] or [(0, 0)]
actual_count = len(db[recent[start]: recent.range().stop])
actual_count = len(db[recent[start] : recent.range().stop])
if actual_count > reported_count:
return ['The HCA reports %d prefixes allocated in current window, but it actually allocated %d' % (reported_count, actual_count)]
return [
"The HCA reports %d prefixes allocated in current window, but it actually allocated %d"
% (reported_count, actual_count)
]
return []

View File

@ -25,7 +25,12 @@ import fdb
from bindingtester import FDB_API_VERSION
from bindingtester import Result
from bindingtester.tests import Test, Instruction, ThreadedInstructionSet, ResultSpecification
from bindingtester.tests import (
Test,
Instruction,
ThreadedInstructionSet,
ResultSpecification,
)
from bindingtester.tests import test_util
fdb.api_version(FDB_API_VERSION)
@ -34,16 +39,17 @@ fdb.api_version(FDB_API_VERSION)
class ScriptedTest(Test):
def __init__(self, subspace):
super(ScriptedTest, self).__init__(subspace, FDB_API_VERSION, FDB_API_VERSION)
self.workspace = self.subspace['workspace']
self.results_subspace = self.subspace['results']
self.workspace = self.subspace["workspace"]
self.results_subspace = self.subspace["results"]
# self.thread_subspace = self.subspace['threads'] # TODO: update START_THREAD so that we can create threads in subspaces
def setup(self, args):
if args.concurrency > 1:
raise Exception('Scripted tests cannot be run with a concurrency greater than 1')
raise Exception(
"Scripted tests cannot be run with a concurrency greater than 1"
)
# SOMEDAY: this is only a limitation because we don't know how many operations the bisection should start with
# it should be fixable.
@ -51,7 +57,7 @@ class ScriptedTest(Test):
# We also need to enable the commented out support for num_ops in this file and make it so the default value runs
# the entire test
if args.bisect:
raise Exception('Scripted tests cannot be bisected')
raise Exception("Scripted tests cannot be bisected")
self.api_version = args.api_version
@ -61,295 +67,367 @@ class ScriptedTest(Test):
test_instructions = ThreadedInstructionSet()
main_thread = test_instructions.create_thread()
foo = [self.workspace.pack((b'foo%d' % i,)) for i in range(0, 6)]
foo = [self.workspace.pack((b"foo%d" % i,)) for i in range(0, 6)]
main_thread.append('NEW_TRANSACTION')
main_thread.append("NEW_TRANSACTION")
main_thread.push_args(1020)
main_thread.append('ON_ERROR')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('GET_READ_VERSION')
main_thread.push_args(foo[1], b'bar')
main_thread.append('SET')
main_thread.append("ON_ERROR")
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.append("GET_READ_VERSION")
main_thread.push_args(foo[1], b"bar")
main_thread.append("SET")
main_thread.push_args(foo[1])
main_thread.append('GET')
self.add_result(main_thread, args, b'bar')
main_thread.append("GET")
self.add_result(main_thread, args, b"bar")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.push_args(2000)
main_thread.append('ON_ERROR')
main_thread.append("ON_ERROR")
self.add_result(main_thread, args, test_util.error_string(2000))
main_thread.append('NEW_TRANSACTION')
main_thread.append("NEW_TRANSACTION")
main_thread.push_args(0)
main_thread.append('ON_ERROR')
main_thread.append("ON_ERROR")
self.add_result(main_thread, args, test_util.error_string(2000))
main_thread.append('NEW_TRANSACTION')
main_thread.append("NEW_TRANSACTION")
main_thread.push_args(foo[1])
main_thread.append('DUP')
main_thread.append('DUP')
main_thread.append('GET')
self.add_result(main_thread, args, b'bar')
main_thread.append('CLEAR')
main_thread.append('GET_SNAPSHOT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append("DUP")
main_thread.append("DUP")
main_thread.append("GET")
self.add_result(main_thread, args, b"bar")
main_thread.append("CLEAR")
main_thread.append("GET_SNAPSHOT")
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.push_args(foo[1])
main_thread.append('GET_DATABASE')
self.add_result(main_thread, args, b'bar')
main_thread.append("GET_DATABASE")
self.add_result(main_thread, args, b"bar")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.append('SET_READ_VERSION')
main_thread.append("SET_READ_VERSION")
main_thread.push_args(foo[1])
main_thread.append('DUP')
main_thread.append('GET')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('CLEAR')
main_thread.append("DUP")
main_thread.append("GET")
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.append("CLEAR")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, test_util.error_string(1020))
main_thread.push_args(foo[1])
main_thread.append('GET_SNAPSHOT')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append("GET_SNAPSHOT")
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.push_args(foo[1])
main_thread.append('CLEAR')
main_thread.append('COMMIT')
main_thread.append('WAIT_FUTURE')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append('GET_COMMITTED_VERSION')
main_thread.append('RESET')
main_thread.append('EMPTY_STACK')
main_thread.append("CLEAR")
main_thread.append("COMMIT")
main_thread.append("WAIT_FUTURE")
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.append("GET_COMMITTED_VERSION")
main_thread.append("RESET")
main_thread.append("EMPTY_STACK")
main_thread.append('NEW_TRANSACTION')
main_thread.push_args(1, b'bar', foo[1], foo[2], b'bar2', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5')
main_thread.append('SWAP')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET_DATABASE')
main_thread.append("NEW_TRANSACTION")
main_thread.push_args(
1,
b"bar",
foo[1],
foo[2],
b"bar2",
foo[3],
b"bar3",
foo[4],
b"bar4",
foo[5],
b"bar5",
)
main_thread.append("SWAP")
main_thread.append("SET")
main_thread.append("SET")
main_thread.append("SET")
main_thread.append("SET")
main_thread.append("SET_DATABASE")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.append('SET_READ_VERSION')
main_thread.append("SET_READ_VERSION")
main_thread.push_args(foo[2])
main_thread.append('GET')
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.append("GET")
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.append('NEW_TRANSACTION')
main_thread.push_args(b'', 0, -1, b'')
main_thread.append('GET_KEY')
self.add_result(main_thread, args, b'')
main_thread.append("NEW_TRANSACTION")
main_thread.push_args(b"", 0, -1, b"")
main_thread.append("GET_KEY")
self.add_result(main_thread, args, b"")
main_thread.append('NEW_TRANSACTION')
main_thread.append('GET_READ_VERSION_SNAPSHOT')
main_thread.push_args(b'random', foo[1], foo[3], 0, 1, 1)
main_thread.append('POP')
main_thread.append('GET_RANGE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], b'bar2', foo[1], b'bar')))
main_thread.append("NEW_TRANSACTION")
main_thread.append("GET_READ_VERSION_SNAPSHOT")
main_thread.push_args(b"random", foo[1], foo[3], 0, 1, 1)
main_thread.append("POP")
main_thread.append("GET_RANGE")
self.add_result(
main_thread, args, fdb.tuple.pack((foo[2], b"bar2", foo[1], b"bar"))
)
main_thread.push_args(foo[1], foo[3], 1, 1, 0)
main_thread.append('GET_RANGE_SNAPSHOT')
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], b'bar2')))
main_thread.append("GET_RANGE_SNAPSHOT")
self.add_result(main_thread, args, fdb.tuple.pack((foo[2], b"bar2")))
main_thread.push_args(foo[1], foo[3], 0, 0, 4)
main_thread.append('GET_RANGE_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2')))
main_thread.append("GET_RANGE_DATABASE")
self.add_result(
main_thread, args, fdb.tuple.pack((foo[1], b"bar", foo[2], b"bar2"))
)
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.push_args(foo[3], foo[5])
main_thread.append('CLEAR_RANGE')
main_thread.push_args(foo[1], 0, 3, b'')
main_thread.append('GET_KEY')
main_thread.append("CLEAR_RANGE")
main_thread.push_args(foo[1], 0, 3, b"")
main_thread.append("GET_KEY")
self.add_result(main_thread, args, foo[5])
main_thread.push_args(foo[1], 1, 2, b'')
main_thread.append('GET_KEY_SNAPSHOT')
main_thread.push_args(foo[1], 1, 2, b"")
main_thread.append("GET_KEY_SNAPSHOT")
self.add_result(main_thread, args, foo[5])
main_thread.push_args(foo[5], 0, -2, b'')
main_thread.append('GET_KEY_DATABASE')
main_thread.push_args(foo[5], 0, -2, b"")
main_thread.append("GET_KEY_DATABASE")
self.add_result(main_thread, args, foo[2])
main_thread.push_args(self.workspace.key(), 2, 0, 2)
main_thread.append('GET_RANGE_STARTS_WITH')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2')))
main_thread.append("GET_RANGE_STARTS_WITH")
self.add_result(
main_thread, args, fdb.tuple.pack((foo[1], b"bar", foo[2], b"bar2"))
)
main_thread.push_args(self.workspace.key(), 4, 0, 3)
main_thread.append('GET_RANGE_STARTS_WITH_SNAPSHOT')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[5], b'bar5')))
main_thread.append("GET_RANGE_STARTS_WITH_SNAPSHOT")
self.add_result(
main_thread,
args,
fdb.tuple.pack((foo[1], b"bar", foo[2], b"bar2", foo[5], b"bar5")),
)
main_thread.push_args(self.workspace.key(), 3, 1, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[5], b'bar5', foo[4], b'bar4', foo[3], b'bar3')))
main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, b'')
main_thread.append('GET_RANGE_SELECTOR')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2')))
main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, b'')
main_thread.append('GET_RANGE_SELECTOR_SNAPSHOT')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[5], b'bar5')))
main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, b'')
main_thread.append('GET_RANGE_SELECTOR_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[2], b'bar2', foo[3], b'bar3')))
main_thread.append("GET_RANGE_STARTS_WITH_DATABASE")
self.add_result(
main_thread,
args,
fdb.tuple.pack((foo[5], b"bar5", foo[4], b"bar4", foo[3], b"bar3")),
)
main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, b"")
main_thread.append("GET_RANGE_SELECTOR")
self.add_result(
main_thread, args, fdb.tuple.pack((foo[1], b"bar", foo[2], b"bar2"))
)
main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, b"")
main_thread.append("GET_RANGE_SELECTOR_SNAPSHOT")
self.add_result(
main_thread,
args,
fdb.tuple.pack((foo[1], b"bar", foo[2], b"bar2", foo[5], b"bar5")),
)
main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, b"")
main_thread.append("GET_RANGE_SELECTOR_DATABASE")
self.add_result(
main_thread,
args,
fdb.tuple.pack((foo[1], b"bar", foo[2], b"bar2", foo[3], b"bar3")),
)
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.push_args(self.workspace.key())
main_thread.append('CLEAR_RANGE_STARTS_WITH')
main_thread.append("CLEAR_RANGE_STARTS_WITH")
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH')
self.add_result(main_thread, args, b'')
main_thread.append("GET_RANGE_STARTS_WITH")
self.add_result(main_thread, args, b"")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.append('SET_READ_VERSION')
main_thread.append("SET_READ_VERSION")
main_thread.push_args(foo[1])
main_thread.append('GET')
self.add_result(main_thread, args, b'bar')
main_thread.append("GET")
self.add_result(main_thread, args, b"bar")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.push_args(foo[1], b'bar', foo[2], b'bar2', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
main_thread.append('SET')
main_thread.push_args(
foo[1],
b"bar",
foo[2],
b"bar2",
foo[3],
b"bar3",
foo[4],
b"bar4",
foo[5],
b"bar5",
)
main_thread.append("SET")
main_thread.append("SET")
main_thread.append("SET")
main_thread.append("SET")
main_thread.append("SET")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.push_args(foo[2])
main_thread.append('CLEAR_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.append("CLEAR_DATABASE")
main_thread.append("WAIT_FUTURE")
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[3], b'bar3', foo[4], b'bar4', foo[5], b'bar5')))
main_thread.append("GET_RANGE_STARTS_WITH_DATABASE")
self.add_result(
main_thread,
args,
fdb.tuple.pack(
(foo[1], b"bar", foo[3], b"bar3", foo[4], b"bar4", foo[5], b"bar5")
),
)
main_thread.push_args(foo[3], foo[5])
main_thread.append('CLEAR_RANGE_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.append("CLEAR_RANGE_DATABASE")
main_thread.append("WAIT_FUTURE")
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, fdb.tuple.pack((foo[1], b'bar', foo[5], b'bar5')))
main_thread.append("GET_RANGE_STARTS_WITH_DATABASE")
self.add_result(
main_thread, args, fdb.tuple.pack((foo[1], b"bar", foo[5], b"bar5"))
)
main_thread.push_args(self.workspace.key())
main_thread.append('CLEAR_RANGE_STARTS_WITH_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.append("CLEAR_RANGE_STARTS_WITH_DATABASE")
main_thread.append("WAIT_FUTURE")
main_thread.push_args(self.workspace.key(), 0, 0, -1)
main_thread.append('GET_RANGE_STARTS_WITH_DATABASE')
self.add_result(main_thread, args, b'')
main_thread.append("GET_RANGE_STARTS_WITH_DATABASE")
self.add_result(main_thread, args, b"")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.append('NEW_TRANSACTION')
main_thread.append("NEW_TRANSACTION")
main_thread.push_args(foo[1], foo[5], 0, 0, 0)
main_thread.append('GET_RANGE')
main_thread.append("GET_RANGE")
self.add_result(main_thread, args, test_util.error_string(2210))
main_thread.push_args(foo[1], foo[5], 0, 0, 0)
main_thread.append('GET_RANGE_DATABASE')
main_thread.append("GET_RANGE_DATABASE")
self.add_result(main_thread, args, test_util.error_string(2210))
self.append_range_test(main_thread, args, 100, 256)
self.append_range_test(main_thread, args, 1000, 8)
main_thread.append('EMPTY_STACK')
tup = (0, b'foo', -1093, 'unicode\u9348test', 0xffffffff + 100, b'bar\x00\xff')
main_thread.append("EMPTY_STACK")
tup = (0, b"foo", -1093, "unicode\u9348test", 0xFFFFFFFF + 100, b"bar\x00\xff")
main_thread.push_args(*test_util.with_length(tup))
main_thread.append('TUPLE_PACK')
main_thread.append('DUP')
main_thread.append("TUPLE_PACK")
main_thread.append("DUP")
self.add_result(main_thread, args, fdb.tuple.pack(tup))
main_thread.append('TUPLE_UNPACK')
main_thread.append("TUPLE_UNPACK")
for item in reversed(tup):
self.add_result(main_thread, args, fdb.tuple.pack((item,)))
main_thread.push_args(0xffffffff, -100)
main_thread.append('SUB')
main_thread.push_args(0xFFFFFFFF, -100)
main_thread.append("SUB")
main_thread.push_args(1)
main_thread.append('TUPLE_PACK')
self.add_result(main_thread, args, fdb.tuple.pack((0xffffffff + 100,)))
main_thread.append("TUPLE_PACK")
self.add_result(main_thread, args, fdb.tuple.pack((0xFFFFFFFF + 100,)))
main_thread.append('EMPTY_STACK')
main_thread.append("EMPTY_STACK")
main_thread.push_args(*test_util.with_length(tup))
main_thread.append('TUPLE_RANGE')
main_thread.append("TUPLE_RANGE")
rng = fdb.tuple.range(tup)
self.add_result(main_thread, args, rng.stop)
self.add_result(main_thread, args, rng.start)
stampKey = b'stampedXXXXXXXXXXsuffix'
stampKeyIndex = stampKey.find(b'XXXXXXXXXX')
main_thread.push_args('SET_VERSIONSTAMPED_KEY', self.versionstamp_key(stampKey, stampKeyIndex), b'stampedBar')
main_thread.append('ATOMIC_OP')
main_thread.push_args('SET_VERSIONSTAMPED_VALUE', b'stampedValue', self.versionstamp_value(b'XXXXXXXXXX'))
main_thread.append('ATOMIC_OP')
stampKey = b"stampedXXXXXXXXXXsuffix"
stampKeyIndex = stampKey.find(b"XXXXXXXXXX")
main_thread.push_args(
"SET_VERSIONSTAMPED_KEY",
self.versionstamp_key(stampKey, stampKeyIndex),
b"stampedBar",
)
main_thread.append("ATOMIC_OP")
main_thread.push_args(
"SET_VERSIONSTAMPED_VALUE",
b"stampedValue",
self.versionstamp_value(b"XXXXXXXXXX"),
)
main_thread.append("ATOMIC_OP")
if self.api_version >= 520:
stampValue = b'stampedXXXXXXXXXXsuffix'
stampValueIndex = stampValue.find(b'XXXXXXXXXX')
main_thread.push_args('SET_VERSIONSTAMPED_VALUE', b'stampedValue2', self.versionstamp_value(stampValue, stampValueIndex))
main_thread.append('ATOMIC_OP')
stampValue = b"stampedXXXXXXXXXXsuffix"
stampValueIndex = stampValue.find(b"XXXXXXXXXX")
main_thread.push_args(
"SET_VERSIONSTAMPED_VALUE",
b"stampedValue2",
self.versionstamp_value(stampValue, stampValueIndex),
)
main_thread.append("ATOMIC_OP")
main_thread.push_args(b'suffix')
main_thread.append('GET_VERSIONSTAMP')
main_thread.push_args(b"suffix")
main_thread.append("GET_VERSIONSTAMP")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
main_thread.push_args(b'stamped')
main_thread.append('CONCAT')
main_thread.append('CONCAT')
main_thread.append('GET')
self.add_result(main_thread, args, b'stampedBar')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
main_thread.push_args(b"stamped")
main_thread.append("CONCAT")
main_thread.append("CONCAT")
main_thread.append("GET")
self.add_result(main_thread, args, b"stampedBar")
main_thread.push_args(b'stampedValue', b'suffix')
main_thread.append('GET')
main_thread.push_args(b'stamped')
main_thread.append('CONCAT')
main_thread.append('CONCAT')
main_thread.append('GET')
self.add_result(main_thread, args, b'stampedBar')
main_thread.push_args(b"stampedValue", b"suffix")
main_thread.append("GET")
main_thread.push_args(b"stamped")
main_thread.append("CONCAT")
main_thread.append("CONCAT")
main_thread.append("GET")
self.add_result(main_thread, args, b"stampedBar")
if self.api_version >= 520:
main_thread.push_args(b'stampedValue2')
main_thread.append('GET')
main_thread.append('GET')
self.add_result(main_thread, args, b'stampedBar')
main_thread.push_args(b"stampedValue2")
main_thread.append("GET")
main_thread.append("GET")
self.add_result(main_thread, args, b"stampedBar")
main_thread.append('GET_VERSIONSTAMP')
main_thread.append("GET_VERSIONSTAMP")
test_util.blocking_commit(main_thread)
self.add_result(main_thread, args, b'RESULT_NOT_PRESENT')
self.add_result(main_thread, args, b"RESULT_NOT_PRESENT")
self.add_result(main_thread, args, test_util.error_string(2021))
main_thread.push_args(b'sentinel')
main_thread.append('UNIT_TESTS')
self.add_result(main_thread, args, b'sentinel')
main_thread.push_args(b"sentinel")
main_thread.append("UNIT_TESTS")
self.add_result(main_thread, args, b"sentinel")
if not args.no_threads:
wait_key = b'waitKey'
wait_key = b"waitKey"
# threads = [self.thread_subspace[i] for i in range(0, 2)]
threads = [b'thread_spec%d' % i for i in range(0, 2)]
threads = [b"thread_spec%d" % i for i in range(0, 2)]
for thread_spec in threads:
main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), b'')
main_thread.append('SET_DATABASE')
main_thread.append('WAIT_FUTURE')
main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), b"")
main_thread.append("SET_DATABASE")
main_thread.append("WAIT_FUTURE")
for thread_spec in threads:
main_thread.push_args(thread_spec)
# if len(main_thread) < args.num_ops:
main_thread.append('START_THREAD')
main_thread.append("START_THREAD")
thread = test_instructions.create_thread(fdb.Subspace((thread_spec,)))
thread.append('NEW_TRANSACTION')
thread.push_args(foo[1], foo[1], b'bar%s' % thread_spec, self.workspace.pack(
(wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec)))
thread.append('GET')
thread.append('POP')
thread.append('SET')
thread.append('CLEAR')
thread.append("NEW_TRANSACTION")
thread.push_args(
foo[1],
foo[1],
b"bar%s" % thread_spec,
self.workspace.pack((wait_key, thread_spec)),
self.workspace.pack((wait_key, thread_spec)),
)
thread.append("GET")
thread.append("POP")
thread.append("SET")
thread.append("CLEAR")
test_util.blocking_commit(thread)
thread.append('POP')
thread.append('CLEAR_DATABASE')
thread.append("POP")
thread.append("CLEAR_DATABASE")
thread.push_args(self.workspace.pack((wait_key,)))
thread.append('WAIT_EMPTY')
thread.append("WAIT_EMPTY")
thread.append('NEW_TRANSACTION')
thread.append("NEW_TRANSACTION")
thread.push_args(foo[1])
thread.append('GET')
self.add_result(thread, args, b'barthread_spec0', b'barthread_spec1')
thread.append("GET")
self.add_result(thread, args, b"barthread_spec0", b"barthread_spec1")
main_thread.append('EMPTY_STACK')
main_thread.append("EMPTY_STACK")
# if len(main_thread) > args.num_ops:
# main_thread[args.num_ops:] = []
@ -357,21 +435,32 @@ class ScriptedTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.results_subspace, ordering_index=0, global_error_filter=[1007, 1009, 1021])
ResultSpecification(
self.results_subspace,
ordering_index=0,
global_error_filter=[1007, 1009, 1021],
)
]
def get_expected_results(self):
return {self.results_subspace: self.results}
def append_range_test(self, instructions, args, num_pairs, kv_length):
instructions.append('NEW_TRANSACTION')
instructions.append("NEW_TRANSACTION")
instructions.push_args(self.workspace.key())
instructions.append('CLEAR_RANGE_STARTS_WITH')
instructions.append("CLEAR_RANGE_STARTS_WITH")
kvpairs = []
for i in range(0, num_pairs * 2):
kvpairs.append(self.workspace.pack((b'foo', bytes([random.randint(0, 254) for i in range(0, kv_length)]))))
kvpairs.append(
self.workspace.pack(
(
b"foo",
bytes([random.randint(0, 254) for i in range(0, kv_length)]),
)
)
)
kvpairs = list(set(kvpairs))
if len(kvpairs) % 2 == 1:
@ -380,30 +469,32 @@ class ScriptedTest(Test):
instructions.push_args(*kvpairs)
for i in range(0, len(kvpairs) // 2):
instructions.append('SET')
instructions.append("SET")
if i % 100 == 99:
test_util.blocking_commit(instructions)
self.add_result(instructions, args, b'RESULT_NOT_PRESENT')
self.add_result(instructions, args, b"RESULT_NOT_PRESENT")
foo_range = self.workspace.range((b'foo',))
foo_range = self.workspace.range((b"foo",))
instructions.push_args(foo_range.start, foo_range.stop, 0, 0, -1)
instructions.append('GET_RANGE')
instructions.append("GET_RANGE")
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
instructions.push_args(self.workspace.key(), 0, 0, -1)
instructions.append('GET_RANGE_STARTS_WITH')
instructions.append("GET_RANGE_STARTS_WITH")
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
instructions.push_args(foo_range.start, 0, 1, foo_range.stop, 0, 1, 0, 0, -1, b'')
instructions.append('GET_RANGE_SELECTOR')
instructions.push_args(
foo_range.start, 0, 1, foo_range.stop, 0, 1, 0, 0, -1, b""
)
instructions.append("GET_RANGE_SELECTOR")
self.add_result(instructions, args, fdb.tuple.pack(tuple(kvpairs)))
test_util.blocking_commit(instructions)
self.add_result(instructions, args, b'RESULT_NOT_PRESENT')
self.add_result(instructions, args, b"RESULT_NOT_PRESENT")
def add_result(self, instructions, args, *values):
key = self.results_subspace.pack((len(self.results),))
instructions.push_args(key)
instructions.append('SET_DATABASE')
instructions.append("SET_DATABASE")
# if len(instructions) <= args.num_ops:
self.results.append(Result(self.results_subspace, key, values))
instructions.append('POP')
instructions.append("POP")

View File

@ -35,8 +35,10 @@ fdb.api_version(FDB_API_VERSION)
class TupleTest(Test):
def __init__(self, subspace):
super(TupleTest, self).__init__(subspace)
self.workspace = self.subspace['workspace'] # The keys and values here must match between subsequent runs of the same test
self.stack_subspace = self.subspace['stack']
self.workspace = self.subspace[
"workspace"
] # The keys and values here must match between subsequent runs of the same test
self.stack_subspace = self.subspace["stack"]
def setup(self, args):
self.max_int_bits = args.max_int_bits
@ -45,30 +47,32 @@ class TupleTest(Test):
def generate(self, args, thread_number):
instructions = InstructionSet()
min_value = -2**self.max_int_bits + 1
min_value = -(2**self.max_int_bits) + 1
max_value = 2**self.max_int_bits - 1
instructions.append('NEW_TRANSACTION')
instructions.append("NEW_TRANSACTION")
# Test integer encoding
mutations = 0
for i in range(0, self.max_int_bits + 1):
for sign in [-1, 1]:
sign_str = '' if sign == 1 else '-'
sign_str = "" if sign == 1 else "-"
for offset in range(-10, 11):
val = (2**i) * sign + offset
if val >= min_value and val <= max_value:
if offset == 0:
add_str = ''
add_str = ""
elif offset > 0:
add_str = '+%d' % offset
add_str = "+%d" % offset
else:
add_str = '%d' % offset
add_str = "%d" % offset
instructions.push_args(1, val)
instructions.append('TUPLE_PACK')
instructions.push_args(self.workspace.pack(('%s2^%d%s' % (sign_str, i, add_str),)))
instructions.append('SET')
instructions.append("TUPLE_PACK")
instructions.push_args(
self.workspace.pack(("%s2^%d%s" % (sign_str, i, add_str),))
)
instructions.append("SET")
mutations += 1
if mutations >= 5000:
@ -79,7 +83,7 @@ class TupleTest(Test):
test_util.blocking_commit(instructions)
instructions.push_args(self.stack_subspace.key())
instructions.append('LOG_STACK')
instructions.append("LOG_STACK")
test_util.blocking_commit(instructions)
@ -88,5 +92,10 @@ class TupleTest(Test):
def get_result_specifications(self):
return [
ResultSpecification(self.workspace, global_error_filter=[1007, 1009, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1009, 1021]),
ResultSpecification(
self.stack_subspace,
key_start_index=1,
ordering_index=1,
global_error_filter=[1007, 1009, 1021],
),
]

View File

@ -29,27 +29,27 @@ import fdb
def initialize_logger_level(logging_level):
logger = get_logger()
assert logging_level in ['DEBUG', 'INFO', 'WARNING', 'ERROR']
assert logging_level in ["DEBUG", "INFO", "WARNING", "ERROR"]
if logging_level == 'DEBUG':
if logging_level == "DEBUG":
logger.setLevel(logging.DEBUG)
elif logging_level == 'INFO':
elif logging_level == "INFO":
logger.setLevel(logging.INFO)
elif logging_level == 'WARNING':
elif logging_level == "WARNING":
logger.setLevel(logging.WARNING)
elif logging_level == 'ERROR':
elif logging_level == "ERROR":
logger.setLevel(logging.ERROR)
def get_logger():
return logging.getLogger('foundationdb.bindingtester')
return logging.getLogger("foundationdb.bindingtester")
# Attempts to get the name associated with a process termination signal
def signal_number_to_name(signal_num):
name = []
for key in signal.__dict__.keys():
if key.startswith('SIG') and getattr(signal, key) == signal_num:
if key.startswith("SIG") and getattr(signal, key) == signal_num:
name.append(key)
if len(name) == 1:
return name[0]
@ -58,11 +58,11 @@ def signal_number_to_name(signal_num):
def import_subclasses(filename, module_path):
for f in glob.glob(os.path.join(os.path.dirname(filename), '*.py')):
for f in glob.glob(os.path.join(os.path.dirname(filename), "*.py")):
fn = os.path.basename(f)
if fn == '__init__.py':
if fn == "__init__.py":
continue
__import__('%s.%s' % (module_path, os.path.splitext(fn)[0]))
__import__("%s.%s" % (module_path, os.path.splitext(fn)[0]))
# Attempts to unpack a subspace
@ -73,4 +73,6 @@ def subspace_to_tuple(subspace):
return fdb.tuple.unpack(subspace.key())
except Exception as e:
get_logger().debug(e)
raise Exception('The binding tester does not support subspaces with non-tuple raw prefixes')
raise Exception(
"The binding tester does not support subspaces with non-tuple raw prefixes"
)

View File

@ -27,10 +27,9 @@ import sys
functions = {}
func_re = re.compile(
"^\s*FDB_API_(?:CHANGED|REMOVED)\s*\(\s*([^,]*),\s*([^)]*)\).*")
func_re = re.compile("^\s*FDB_API_(?:CHANGED|REMOVED)\s*\(\s*([^,]*),\s*([^)]*)\).*")
with open(source, 'r') as srcfile:
with open(source, "r") as srcfile:
for line in srcfile:
m = func_re.match(line)
if m:
@ -63,12 +62,12 @@ def write_unix_asm(asmfile, functions, prefix):
asmfile.write(".intel_syntax noprefix\n")
i = 0
if os == 'linux' or os == 'freebsd':
if os == "linux" or os == "freebsd":
asmfile.write("\n.data\n")
for f in functions:
asmfile.write("\t.extern fdb_api_ptr_%s\n" % f)
if os == 'linux' or os == 'freebsd':
if os == "linux" or os == "freebsd":
asmfile.write("\n.text\n")
for f in functions:
if cpu == "ppc64le":
@ -80,7 +79,7 @@ def write_unix_asm(asmfile, functions, prefix):
i = 0
for f in functions:
asmfile.write("\n.globl %s%s\n" % (prefix, f))
if cpu == 'aarch64' and os == 'osx':
if cpu == "aarch64" and os == "osx":
asmfile.write(".p2align\t2\n")
asmfile.write("%s%s:\n" % (prefix, f))
@ -114,14 +113,14 @@ def write_unix_asm(asmfile, functions, prefix):
# .ident "GCC: (GNU) 8.3.1 20190311 (Red Hat 8.3.1-3)"
if cpu == "aarch64":
if os == 'osx':
if os == "osx":
asmfile.write("\tadrp x8, _fdb_api_ptr_%s@GOTPAGE\n" % (f))
asmfile.write("\tldr x8, [x8, _fdb_api_ptr_%s@GOTPAGEOFF]\n" % (f))
elif os == 'linux':
elif os == "linux":
asmfile.write("\tadrp x8, :got:fdb_api_ptr_%s\n" % (f))
asmfile.write("\tldr x8, [x8, #:got_lo12:fdb_api_ptr_%s]\n" % (f))
else:
assert False, '{} not supported for Arm yet'.format(os)
assert False, "{} not supported for Arm yet".format(os)
asmfile.write("\tldr x8, [x8]\n")
asmfile.write("\tbr x8\n")
elif cpu == "ppc64le":
@ -166,28 +165,31 @@ def write_unix_asm(asmfile, functions, prefix):
i = i + 1
else:
asmfile.write(
"\tmov r11, qword ptr [%sfdb_api_ptr_%s@GOTPCREL+rip]\n" % (prefix, f))
"\tmov r11, qword ptr [%sfdb_api_ptr_%s@GOTPCREL+rip]\n" % (prefix, f)
)
asmfile.write("\tmov r11, qword ptr [r11]\n")
asmfile.write("\tjmp r11\n")
with open(asm, 'w') as asmfile:
with open(h, 'w') as hfile:
with open(asm, "w") as asmfile:
with open(h, "w") as hfile:
hfile.write(
"void fdb_api_ptr_unimpl() { fprintf(stderr, \"UNIMPLEMENTED FDB API FUNCTION\\n\"); abort(); }\n\n")
'void fdb_api_ptr_unimpl() { fprintf(stderr, "UNIMPLEMENTED FDB API FUNCTION\\n"); abort(); }\n\n'
)
hfile.write(
"void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
'void fdb_api_ptr_removed() { fprintf(stderr, "REMOVED FDB API FUNCTION\\n"); abort(); }\n\n'
)
if os == 'linux':
write_unix_asm(asmfile, functions, '')
if os == "linux":
write_unix_asm(asmfile, functions, "")
elif os == "osx":
write_unix_asm(asmfile, functions, '_')
write_unix_asm(asmfile, functions, "_")
elif os == "windows":
write_windows_asm(asmfile, functions)
for f in functions:
if os == "windows":
hfile.write("extern \"C\" ")
hfile.write('extern "C" ')
hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f)
for v in functions[f]:
hfile.write("#define %s_v%d_PREV %s_v%d\n" % (f, v, f, v - 1))

View File

@ -52,7 +52,10 @@ class TestEnv(LocalCluster):
self.downloader.binary_path(version, "fdbcli"),
1,
)
self.set_env_var("LD_LIBRARY_PATH", "%s:%s" % (self.downloader.lib_dir(version), os.getenv("LD_LIBRARY_PATH")))
self.set_env_var(
"LD_LIBRARY_PATH",
"%s:%s" % (self.downloader.lib_dir(version), os.getenv("LD_LIBRARY_PATH")),
)
client_lib = self.downloader.lib_path(version)
assert client_lib.exists(), "{} does not exist".format(client_lib)
self.client_lib_external = self.tmp_dir.joinpath("libfdb_c_external.so")
@ -69,9 +72,13 @@ class TestEnv(LocalCluster):
def exec_client_command(self, cmd_args, env_vars=None, expected_ret_code=0):
print("Executing test command: {}".format(" ".join([str(c) for c in cmd_args])))
tester_proc = subprocess.Popen(cmd_args, stdout=sys.stdout, stderr=sys.stderr, env=env_vars)
tester_proc = subprocess.Popen(
cmd_args, stdout=sys.stdout, stderr=sys.stderr, env=env_vars
)
tester_retcode = tester_proc.wait()
assert tester_retcode == expected_ret_code, "Tester completed return code {}, but {} was expected".format(
assert (
tester_retcode == expected_ret_code
), "Tester completed return code {}, but {} was expected".format(
tester_retcode, expected_ret_code
)
@ -82,11 +89,17 @@ class FdbCShimTests:
assert self.build_dir.exists(), "{} does not exist".format(args.build_dir)
assert self.build_dir.is_dir(), "{} is not a directory".format(args.build_dir)
self.unit_tests_bin = Path(args.unit_tests_bin).resolve()
assert self.unit_tests_bin.exists(), "{} does not exist".format(self.unit_tests_bin)
assert self.unit_tests_bin.exists(), "{} does not exist".format(
self.unit_tests_bin
)
self.api_tester_bin = Path(args.api_tester_bin).resolve()
assert self.api_tester_bin.exists(), "{} does not exist".format(self.api_tests_bin)
assert self.api_tester_bin.exists(), "{} does not exist".format(
self.api_tests_bin
)
self.shim_lib_tester_bin = Path(args.shim_lib_tester_bin).resolve()
assert self.shim_lib_tester_bin.exists(), "{} does not exist".format(self.shim_lib_tester_bin)
assert self.shim_lib_tester_bin.exists(), "{} does not exist".format(
self.shim_lib_tester_bin
)
self.api_test_dir = Path(args.api_test_dir).resolve()
assert self.api_test_dir.exists(), "{} does not exist".format(self.api_test_dir)
self.downloader = FdbBinaryDownloader(args.build_dir)
@ -124,7 +137,9 @@ class FdbCShimTests:
with TestEnv(self.build_dir, self.downloader, version) as test_env:
cmd_args = self.build_c_api_tester_args(test_env, test_file)
env_vars = os.environ.copy()
env_vars["FDB_LOCAL_CLIENT_LIBRARY_PATH"] = self.downloader.lib_path(version)
env_vars["FDB_LOCAL_CLIENT_LIBRARY_PATH"] = self.downloader.lib_path(
version
)
test_env.exec_client_command(cmd_args, env_vars)
def run_c_unit_tests(self, version):
@ -132,9 +147,16 @@ class FdbCShimTests:
print("C Unit Tests - version: {}".format(version))
print("-" * 80)
with TestEnv(self.build_dir, self.downloader, version) as test_env:
cmd_args = [self.unit_tests_bin, test_env.cluster_file, "fdb", test_env.client_lib_external]
cmd_args = [
self.unit_tests_bin,
test_env.cluster_file,
"fdb",
test_env.client_lib_external,
]
env_vars = os.environ.copy()
env_vars["FDB_LOCAL_CLIENT_LIBRARY_PATH"] = self.downloader.lib_path(version)
env_vars["FDB_LOCAL_CLIENT_LIBRARY_PATH"] = self.downloader.lib_path(
version
)
test_env.exec_client_command(cmd_args, env_vars)
def run_c_shim_lib_tester(
@ -163,7 +185,11 @@ class FdbCShimTests:
test_flags.append("use_external_lib")
else:
test_flags.append("use_local_lib")
print("C Shim Tests - version: {}, API version: {}, {}".format(version, api_version, ", ".join(test_flags)))
print(
"C Shim Tests - version: {}, API version: {}, {}".format(
version, api_version, ", ".join(test_flags)
)
)
print("-" * 80)
cmd_args = [
self.shim_lib_tester_bin,
@ -178,10 +204,17 @@ class FdbCShimTests:
("dummy" if invalid_lib_path else self.downloader.lib_path(version)),
]
if use_external_lib:
cmd_args = cmd_args + ["--disable-local-client", "--external-client-library", test_env.client_lib_external]
cmd_args = cmd_args + [
"--disable-local-client",
"--external-client-library",
test_env.client_lib_external,
]
env_vars = os.environ.copy()
if set_ld_lib_path:
env_vars["LD_LIBRARY_PATH"] = "%s:%s" % (self.downloader.lib_dir(version), os.getenv("LD_LIBRARY_PATH"))
env_vars["LD_LIBRARY_PATH"] = "%s:%s" % (
self.downloader.lib_dir(version),
os.getenv("LD_LIBRARY_PATH"),
)
if set_env_path:
env_vars["FDB_LOCAL_CLIENT_LIBRARY_PATH"] = (
"dummy" if invalid_lib_path else self.downloader.lib_path(version)
@ -206,7 +239,9 @@ class FdbCShimTests:
self.run_c_shim_lib_tester(CURRENT_VERSION, test_env, set_env_path=True)
# Test using the loaded client library as the local client
self.run_c_shim_lib_tester(CURRENT_VERSION, test_env, call_set_path=True, use_external_lib=False)
self.run_c_shim_lib_tester(
CURRENT_VERSION, test_env, call_set_path=True, use_external_lib=False
)
# Test setting an invalid client library path over an API call
self.run_c_shim_lib_tester(
@ -227,15 +262,24 @@ class FdbCShimTests:
)
# Test calling a function that exists in the loaded library, but not for the selected API version
self.run_c_shim_lib_tester(CURRENT_VERSION, test_env, call_set_path=True, api_version=700)
self.run_c_shim_lib_tester(
CURRENT_VERSION, test_env, call_set_path=True, api_version=700
)
if self.test_prev_versions:
# Test the API workload with the release version
self.run_c_api_test(PREV_RELEASE_VERSION, DEFAULT_TEST_FILE)
with TestEnv(self.build_dir, self.downloader, PREV_RELEASE_VERSION) as test_env:
with TestEnv(
self.build_dir, self.downloader, PREV_RELEASE_VERSION
) as test_env:
# Test using the loaded client library as the local client
self.run_c_shim_lib_tester(PREV_RELEASE_VERSION, test_env, call_set_path=True, use_external_lib=False)
self.run_c_shim_lib_tester(
PREV_RELEASE_VERSION,
test_env,
call_set_path=True,
use_external_lib=False,
)
# Test the client library of the release version in combination with the dev API version
self.run_c_shim_lib_tester(
@ -248,7 +292,11 @@ class FdbCShimTests:
# Test calling a function that does not exist in the loaded library
self.run_c_shim_lib_tester(
"7.0.0", test_env, call_set_path=True, api_version=700, expected_ret_code=IMPLIBSO_ERROR_CODE
"7.0.0",
test_env,
call_set_path=True,
api_version=700,
expected_ret_code=IMPLIBSO_ERROR_CODE,
)
@ -270,16 +318,28 @@ if __name__ == "__main__":
required=True,
)
parser.add_argument(
"--unit-tests-bin", type=str, help="Path to the fdb_c_shim_unit_tests executable.", required=True
"--unit-tests-bin",
type=str,
help="Path to the fdb_c_shim_unit_tests executable.",
required=True,
)
parser.add_argument(
"--api-tester-bin", type=str, help="Path to the fdb_c_shim_api_tester executable.", required=True
"--api-tester-bin",
type=str,
help="Path to the fdb_c_shim_api_tester executable.",
required=True,
)
parser.add_argument(
"--shim-lib-tester-bin", type=str, help="Path to the fdb_c_shim_lib_tester executable.", required=True
"--shim-lib-tester-bin",
type=str,
help="Path to the fdb_c_shim_lib_tester executable.",
required=True,
)
parser.add_argument(
"--api-test-dir", type=str, help="Path to a directory with api test definitions.", required=True
"--api-test-dir",
type=str,
help="Path to a directory with api test definitions.",
required=True,
)
parser.add_argument(
"--disable-prev-version-tests",

View File

@ -1348,12 +1348,7 @@ class Tenant(_TransactionCreator):
def list_blobbified_ranges(self, begin, end, limit):
return FutureKeyValueArray(
self.capi.fdb_tenant_list_blobbified_ranges(
self.tpointer,
begin,
len(begin),
end,
len(end),
limit
self.tpointer, begin, len(begin), end, len(end), limit
)
)

View File

@ -364,7 +364,6 @@ if sys.version_info < (2, 7):
s = s.lstrip("-0b") # remove leading zeros and minus sign
return len(s)
else:
def _bit_length(x):

View File

@ -40,6 +40,7 @@ def retry_with_timeout(seconds):
tr = db.create_transaction()
return wrapper
return decorator
@ -53,7 +54,7 @@ def test_cancellation(db):
tr.cancel()
try:
tr.commit().wait() # should throw
raise TestError('Basic cancellation unit test failed.')
raise TestError("Basic cancellation unit test failed.")
except fdb.FDBError as e:
if e.code != 1025:
raise
@ -69,7 +70,7 @@ def test_cancellation(db):
tr.commit().wait() # should not throw
except fdb.FDBError as e:
if e.code == 1025:
raise TestError('Cancellation survived reset.')
raise TestError("Cancellation survived reset.")
else:
raise
@ -81,13 +82,13 @@ def test_cancellation(db):
tr.cancel()
try:
tr.on_error(fdb.FDBError(1007)).wait() # should throw
raise TestError('on_error() did not notice cancellation.')
raise TestError("on_error() did not notice cancellation.")
except fdb.FDBError as e:
if e.code != 1025:
raise
try:
tr.commit().wait() # should throw
raise TestError('Cancellation did not survive on_error().')
raise TestError("Cancellation did not survive on_error().")
except fdb.FDBError as e:
if e.code != 1025:
raise
@ -97,7 +98,7 @@ def test_cancellation(db):
# (4) Cancellation works with weird operations
@retry_with_timeout(default_timeout)
def txn4(tr):
tr[b'foo']
tr[b"foo"]
tr.cancel()
try:
tr.get_read_version().wait() # should throw
@ -118,21 +119,21 @@ def test_retry_limits(db):
@retry_with_timeout(default_timeout)
def txn1(tr):
tr.options.set_retry_limit(1)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_retry_limit(1)
try:
tr.on_error(err).wait() # should throw
raise TestError('(1) Retry limit was ignored.')
raise TestError("(1) Retry limit was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_retry_limit(1)
try:
tr.on_error(err).wait() # should throw
raise TestError('(1) Transaction not cancelled after error.')
raise TestError("(1) Transaction not cancelled after error.")
except fdb.FDBError as e:
if e.code != 1025:
raise
@ -143,18 +144,18 @@ def test_retry_limits(db):
@retry_with_timeout(default_timeout)
def txn2(tr):
tr.options.set_retry_limit(1)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr.options.set_retry_limit(1)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(2) Retry limit was ignored.')
raise TestError("(2) Retry limit was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
tr.reset()
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
txn2(db)
@ -163,16 +164,16 @@ def test_retry_limits(db):
@retry_with_timeout(default_timeout)
def txn3(tr):
tr.options.set_retry_limit(0)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(3) Retry limit was ignored.')
raise TestError("(3) Retry limit was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
tr.reset()
tr.options.set_retry_limit(1)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
txn3(db)
@ -180,41 +181,41 @@ def test_retry_limits(db):
# (4) Retries accumulate when limits are turned off, and are respected retroactively
@retry_with_timeout(default_timeout)
def txn4(tr):
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr.options.set_retry_limit(1)
try:
tr.on_error(err).wait() # should throw
raise TestError('(4) Retry limit was ignored.')
raise TestError("(4) Retry limit was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
tr.reset()
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_retry_limit(1)
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_retry_limit(2)
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_retry_limit(3)
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_retry_limit(4)
tr.on_error(err).wait() # should not throw
tr.options.set_retry_limit(4)
try:
tr.on_error(err).wait() # should throw
raise TestError('(4) Retry limit was ignored.')
raise TestError("(4) Retry limit was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
tr.options.set_retry_limit(4)
try:
tr.on_error(err).wait() # should throw
raise TestError('(4) Transaction not cancelled after error.')
raise TestError("(4) Transaction not cancelled after error.")
except fdb.FDBError as e:
if e.code != 1025:
raise
@ -225,20 +226,20 @@ def test_retry_limits(db):
@retry_with_timeout(default_timeout)
def txn5(tr):
tr.options.set_retry_limit(2)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
try:
tr.on_error(err).wait() # should throw
raise TestError('(5) Retry limit was ignored.')
raise TestError("(5) Retry limit was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
try:
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should throw
raise TestError('(5) Transaction not cancelled after error.')
raise TestError("(5) Transaction not cancelled after error.")
except fdb.FDBError as e:
if e.code != 1025:
raise
@ -253,19 +254,19 @@ def test_db_retry_limits(db):
# (1) Basic retry limit
@retry_with_timeout(default_timeout)
def txn1(tr):
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(1) Retry limit from database was ignored.')
raise TestError("(1) Retry limit from database was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(1) Transaction not cancelled after error.')
raise TestError("(1) Transaction not cancelled after error.")
except fdb.FDBError as e:
if e.code != 1025:
raise
@ -276,21 +277,21 @@ def test_db_retry_limits(db):
@retry_with_timeout(default_timeout)
def txn2(tr):
tr.options.set_retry_limit(2)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(2) Retry limit from transaction was ignored.')
raise TestError("(2) Retry limit from transaction was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(2) Transaction not cancelled after error.')
raise TestError("(2) Transaction not cancelled after error.")
except fdb.FDBError as e:
if e.code != 1025:
raise
@ -303,19 +304,19 @@ def test_db_retry_limits(db):
@retry_with_timeout(default_timeout)
def txn3(tr):
tr.options.set_retry_limit(1)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(3) Retry limit from transaction was ignored.')
raise TestError("(3) Retry limit from transaction was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(3) Transaction not cancelled after error.')
raise TestError("(3) Transaction not cancelled after error.")
except fdb.FDBError as e:
if e.code != 1025:
raise
@ -326,30 +327,30 @@ def test_db_retry_limits(db):
@retry_with_timeout(default_timeout)
def txn4(tr):
tr.options.set_retry_limit(1)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(4) Retry limit from transaction was ignored.')
raise TestError("(4) Retry limit from transaction was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
tr.reset()
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
try:
tr.on_error(err).wait() # should throw
raise TestError('(4) Retry limit from database was ignored.')
raise TestError("(4) Retry limit from database was ignored.")
except fdb.FDBError as e:
if e.code != err.code:
raise
try:
tr.on_error(err).wait() # should throw
raise TestError('(4) Transaction not cancelled after error.')
raise TestError("(4) Transaction not cancelled after error.")
except fdb.FDBError as e:
if e.code != 1025:
raise
@ -436,7 +437,7 @@ def test_timeouts(db):
# (6) Timeout will fire "retroactively"
@retry_with_timeout(default_timeout)
def txn6(tr):
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
time.sleep(1)
tr.options.set_timeout(10)
try:
@ -451,11 +452,11 @@ def test_timeouts(db):
# (7) Transaction reset also resets time from which timeout is measured
@retry_with_timeout(default_timeout)
def txn7(tr):
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
time.sleep(1)
start = time.time()
tr.reset()
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_timeout(500)
try:
tr.commit().wait() # should not throw, but could if commit were slow:
@ -470,10 +471,10 @@ def test_timeouts(db):
# (8) on_error() does not reset time from which timeout is measured
@retry_with_timeout(default_timeout)
def txn8(tr):
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
time.sleep(1)
tr.on_error(fdb.FDBError(1007)).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_timeout(100)
try:
tr.commit().wait() # should throw
@ -487,9 +488,9 @@ def test_timeouts(db):
# (9) Timeouts can be unset
@retry_with_timeout(default_timeout)
def txn9(tr):
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_timeout(100)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_timeout(0)
time.sleep(1)
tr.commit().wait() # should not throw
@ -499,7 +500,7 @@ def test_timeouts(db):
# (10) Unsetting a timeout after it has fired doesn't help
@retry_with_timeout(default_timeout)
def txn10(tr):
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.options.set_timeout(100)
time.sleep(1)
tr.options.set_timeout(0)
@ -517,15 +518,17 @@ def test_timeouts(db):
def txn11(tr):
for i in range(2):
tr.options.set_timeout(1500)
tr.set_read_version(0x7ffffffffffffff0)
_ = tr[b'foo']
tr.set_read_version(0x7FFFFFFFFFFFFFF0)
_ = tr[b"foo"]
try:
tr.commit().wait()
tr.reset()
except fdb.FDBError as e:
if i == 0:
if e.code != 1009: # future_version
raise fdb.FDBError(1007) # Something weird happened; raise a retryable error so we run this transaction again
raise fdb.FDBError(
1007
) # Something weird happened; raise a retryable error so we run this transaction again
else:
tr.on_error(e).wait()
elif i == 1 and e.code != 1031:
@ -554,10 +557,10 @@ def test_db_timeouts(db):
# (2) Timeout after on_error
@retry_with_timeout(default_timeout)
def txn2(tr):
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
time.sleep(1)
_ = tr[b'foo']
_ = tr[b"foo"]
try:
tr.commit().wait() # should throw
raise TestError("(2) Timeout didn't fire.")
@ -572,9 +575,9 @@ def test_db_timeouts(db):
def txn3(tr):
tr.options.set_timeout(1000)
time.sleep(0.75)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
tr.on_error(err).wait() # should not throw
_ = tr[b'foo']
_ = tr[b"foo"]
time.sleep(0.75)
try:
tr.commit().wait() # should throw
@ -589,7 +592,7 @@ def test_db_timeouts(db):
@retry_with_timeout(default_timeout)
def txn4(tr):
tr.options.set_timeout(100)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
time.sleep(0.2)
try:
tr.commit().wait() # should throw
@ -604,7 +607,7 @@ def test_db_timeouts(db):
@retry_with_timeout(default_timeout)
def txn5(tr):
tr.options.set_timeout(100)
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
time.sleep(0.2)
try:
tr.commit().wait() # should throw
@ -613,10 +616,10 @@ def test_db_timeouts(db):
if e.code != 1031:
raise
tr.reset()
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
time.sleep(0.2)
tr.on_error(err).wait() # should not throw
tr[b'foo'] = b'bar'
tr[b"foo"] = b"bar"
time.sleep(0.8)
try:
tr.commit().wait() # should throw
@ -638,13 +641,13 @@ def test_combinations(db):
tr.cancel()
try:
tr.on_error(fdb.FDBError(1007)).wait() # should throw
raise TestError('on_error() did not notice cancellation.')
raise TestError("on_error() did not notice cancellation.")
except fdb.FDBError as e:
if e.code != 1025:
raise
try:
tr.commit().wait() # should throw
raise TestError('Cancellation did not survive on_error().')
raise TestError("Cancellation did not survive on_error().")
except fdb.FDBError as e:
if e.code != 1025:
raise

View File

@ -27,14 +27,14 @@ import fdb.directory_impl
from fdb import six
ops_that_create_dirs = [
six.u('DIRECTORY_CREATE_SUBSPACE'),
six.u('DIRECTORY_CREATE_LAYER'),
six.u('DIRECTORY_CREATE_OR_OPEN'),
six.u('DIRECTORY_CREATE'),
six.u('DIRECTORY_OPEN'),
six.u('DIRECTORY_MOVE'),
six.u('DIRECTORY_MOVE_TO'),
six.u('DIRECTORY_OPEN_SUBSPACE'),
six.u("DIRECTORY_CREATE_SUBSPACE"),
six.u("DIRECTORY_CREATE_LAYER"),
six.u("DIRECTORY_CREATE_OR_OPEN"),
six.u("DIRECTORY_CREATE"),
six.u("DIRECTORY_OPEN"),
six.u("DIRECTORY_MOVE"),
six.u("DIRECTORY_MOVE_TO"),
six.u("DIRECTORY_OPEN_SUBSPACE"),
]
log_all = False
@ -50,7 +50,7 @@ def log_op(msg, force=False):
print(msg)
class DirectoryExtension():
class DirectoryExtension:
def __init__(self):
self.dir_list = [fdb.directory]
self.dir_index = 0
@ -70,7 +70,10 @@ class DirectoryExtension():
def append_dir(self, inst, dir):
if log_dirs or log_all:
print('pushed %s at %d (op=%s)' % (dir.__class__.__name__, len(self.dir_list), inst.op))
print(
"pushed %s at %d (op=%s)"
% (dir.__class__.__name__, len(self.dir_list), inst.op)
)
self.dir_list.append(dir)
@ -80,127 +83,159 @@ class DirectoryExtension():
print("%d. %s" % (inst.index, inst.op))
directory = self.dir_list[self.dir_index]
if inst.op == six.u('DIRECTORY_CREATE_SUBSPACE'):
if inst.op == six.u("DIRECTORY_CREATE_SUBSPACE"):
path = self.pop_tuples(inst.stack)
raw_prefix = inst.pop()
log_op('created subspace at %r: %r' % (path, raw_prefix))
log_op("created subspace at %r: %r" % (path, raw_prefix))
self.append_dir(inst, fdb.Subspace(path, raw_prefix))
elif inst.op == six.u('DIRECTORY_CREATE_LAYER'):
elif inst.op == six.u("DIRECTORY_CREATE_LAYER"):
index1, index2, allow_manual_prefixes = inst.pop(3)
if self.dir_list[index1] is None or self.dir_list[index2] is None:
log_op('create directory layer: None')
log_op("create directory layer: None")
self.append_dir(inst, None)
else:
log_op('create directory layer: node_subspace (%d) = %r, content_subspace (%d) = %r, allow_manual_prefixes = %d' %
(index1, self.dir_list[index1].rawPrefix, index2, self.dir_list[index2].rawPrefix, allow_manual_prefixes))
self.append_dir(inst, fdb.DirectoryLayer(self.dir_list[index1], self.dir_list[index2], allow_manual_prefixes == 1))
elif inst.op == six.u('DIRECTORY_CHANGE'):
log_op(
"create directory layer: node_subspace (%d) = %r, content_subspace (%d) = %r, allow_manual_prefixes = %d"
% (
index1,
self.dir_list[index1].rawPrefix,
index2,
self.dir_list[index2].rawPrefix,
allow_manual_prefixes,
)
)
self.append_dir(
inst,
fdb.DirectoryLayer(
self.dir_list[index1],
self.dir_list[index2],
allow_manual_prefixes == 1,
),
)
elif inst.op == six.u("DIRECTORY_CHANGE"):
self.dir_index = inst.pop()
if not self.dir_list[self.dir_index]:
self.dir_index = self.error_index
if log_dirs or log_all:
new_dir = self.dir_list[self.dir_index]
clazz = new_dir.__class__.__name__
new_path = repr(new_dir._path) if hasattr(new_dir, '_path') else "<na>"
print('changed directory to %d (%s @%r)' % (self.dir_index, clazz, new_path))
elif inst.op == six.u('DIRECTORY_SET_ERROR_INDEX'):
new_path = (
repr(new_dir._path) if hasattr(new_dir, "_path") else "<na>"
)
print(
"changed directory to %d (%s @%r)"
% (self.dir_index, clazz, new_path)
)
elif inst.op == six.u("DIRECTORY_SET_ERROR_INDEX"):
self.error_index = inst.pop()
elif inst.op == six.u('DIRECTORY_CREATE_OR_OPEN'):
elif inst.op == six.u("DIRECTORY_CREATE_OR_OPEN"):
path = self.pop_tuples(inst.stack)
layer = inst.pop()
log_op('create_or_open %r: layer=%r' % (directory.get_path() + path, layer))
d = directory.create_or_open(inst.tr, path, layer or b'')
log_op(
"create_or_open %r: layer=%r" % (directory.get_path() + path, layer)
)
d = directory.create_or_open(inst.tr, path, layer or b"")
self.append_dir(inst, d)
elif inst.op == six.u('DIRECTORY_CREATE'):
elif inst.op == six.u("DIRECTORY_CREATE"):
path = self.pop_tuples(inst.stack)
layer, prefix = inst.pop(2)
log_op('create %r: layer=%r, prefix=%r' % (directory.get_path() + path, layer, prefix))
self.append_dir(inst, directory.create(inst.tr, path, layer or b'', prefix))
elif inst.op == six.u('DIRECTORY_OPEN'):
log_op(
"create %r: layer=%r, prefix=%r"
% (directory.get_path() + path, layer, prefix)
)
self.append_dir(
inst, directory.create(inst.tr, path, layer or b"", prefix)
)
elif inst.op == six.u("DIRECTORY_OPEN"):
path = self.pop_tuples(inst.stack)
layer = inst.pop()
log_op('open %r: layer=%r' % (directory.get_path() + path, layer))
self.append_dir(inst, directory.open(inst.tr, path, layer or b''))
elif inst.op == six.u('DIRECTORY_MOVE'):
log_op("open %r: layer=%r" % (directory.get_path() + path, layer))
self.append_dir(inst, directory.open(inst.tr, path, layer or b""))
elif inst.op == six.u("DIRECTORY_MOVE"):
old_path, new_path = self.pop_tuples(inst.stack, 2)
log_op('move %r to %r' % (directory.get_path() + old_path, directory.get_path() + new_path))
log_op(
"move %r to %r"
% (directory.get_path() + old_path, directory.get_path() + new_path)
)
self.append_dir(inst, directory.move(inst.tr, old_path, new_path))
elif inst.op == six.u('DIRECTORY_MOVE_TO'):
elif inst.op == six.u("DIRECTORY_MOVE_TO"):
new_absolute_path = self.pop_tuples(inst.stack)
log_op('move %r to %r' % (directory.get_path(), new_absolute_path))
log_op("move %r to %r" % (directory.get_path(), new_absolute_path))
self.append_dir(inst, directory.move_to(inst.tr, new_absolute_path))
elif inst.op == six.u('DIRECTORY_REMOVE'):
elif inst.op == six.u("DIRECTORY_REMOVE"):
count = inst.pop()
if count == 0:
log_op('remove %r' % (directory.get_path(),))
log_op("remove %r" % (directory.get_path(),))
directory.remove(inst.tr)
else:
path = self.pop_tuples(inst.stack)
log_op('remove %r' % (directory.get_path() + path,))
log_op("remove %r" % (directory.get_path() + path,))
directory.remove(inst.tr, path)
elif inst.op == six.u('DIRECTORY_REMOVE_IF_EXISTS'):
elif inst.op == six.u("DIRECTORY_REMOVE_IF_EXISTS"):
count = inst.pop()
if count == 0:
log_op('remove_if_exists %r' % (directory.get_path(),))
log_op("remove_if_exists %r" % (directory.get_path(),))
directory.remove_if_exists(inst.tr)
else:
path = self.pop_tuples(inst.stack)
log_op('remove_if_exists %r' % (directory.get_path() + path,))
log_op("remove_if_exists %r" % (directory.get_path() + path,))
directory.remove_if_exists(inst.tr, path)
elif inst.op == six.u('DIRECTORY_LIST'):
elif inst.op == six.u("DIRECTORY_LIST"):
count = inst.pop()
if count == 0:
result = directory.list(inst.tr)
log_op('list %r' % (directory.get_path(),))
log_op("list %r" % (directory.get_path(),))
else:
path = self.pop_tuples(inst.stack)
result = directory.list(inst.tr, path)
log_op('list %r' % (directory.get_path() + path,))
log_op("list %r" % (directory.get_path() + path,))
inst.push(fdb.tuple.pack(tuple(result)))
elif inst.op == six.u('DIRECTORY_EXISTS'):
elif inst.op == six.u("DIRECTORY_EXISTS"):
count = inst.pop()
if count == 0:
result = directory.exists(inst.tr)
log_op('exists %r: %d' % (directory.get_path(), result))
log_op("exists %r: %d" % (directory.get_path(), result))
else:
path = self.pop_tuples(inst.stack)
result = directory.exists(inst.tr, path)
log_op('exists %r: %d' % (directory.get_path() + path, result))
log_op("exists %r: %d" % (directory.get_path() + path, result))
if result:
inst.push(1)
else:
inst.push(0)
elif inst.op == six.u('DIRECTORY_PACK_KEY'):
elif inst.op == six.u("DIRECTORY_PACK_KEY"):
key_tuple = self.pop_tuples(inst.stack)
inst.push(directory.pack(key_tuple))
elif inst.op == six.u('DIRECTORY_UNPACK_KEY'):
elif inst.op == six.u("DIRECTORY_UNPACK_KEY"):
key = inst.pop()
log_op('unpack %r in subspace with prefix %r' % (key, directory.rawPrefix))
log_op(
"unpack %r in subspace with prefix %r" % (key, directory.rawPrefix)
)
tup = directory.unpack(key)
for t in tup:
inst.push(t)
elif inst.op == six.u('DIRECTORY_RANGE'):
elif inst.op == six.u("DIRECTORY_RANGE"):
tup = self.pop_tuples(inst.stack)
rng = directory.range(tup)
inst.push(rng.start)
inst.push(rng.stop)
elif inst.op == six.u('DIRECTORY_CONTAINS'):
elif inst.op == six.u("DIRECTORY_CONTAINS"):
key = inst.pop()
result = directory.contains(key)
if result:
inst.push(1)
else:
inst.push(0)
elif inst.op == six.u('DIRECTORY_OPEN_SUBSPACE'):
elif inst.op == six.u("DIRECTORY_OPEN_SUBSPACE"):
path = self.pop_tuples(inst.stack)
log_op('open_subspace %r (at %r)' % (path, directory.key()))
log_op("open_subspace %r (at %r)" % (path, directory.key()))
self.append_dir(inst, directory.subspace(path))
elif inst.op == six.u('DIRECTORY_LOG_SUBSPACE'):
elif inst.op == six.u("DIRECTORY_LOG_SUBSPACE"):
prefix = inst.pop()
inst.tr[prefix + fdb.tuple.pack((self.dir_index,))] = directory.key()
elif inst.op == six.u('DIRECTORY_LOG_DIRECTORY'):
elif inst.op == six.u("DIRECTORY_LOG_DIRECTORY"):
prefix = inst.pop()
exists = directory.exists(inst.tr)
if exists:
@ -208,18 +243,25 @@ class DirectoryExtension():
else:
children = ()
logSubspace = fdb.Subspace((self.dir_index,), prefix)
inst.tr[logSubspace[six.u('path')]] = fdb.tuple.pack(directory.get_path())
inst.tr[logSubspace[six.u('layer')]] = fdb.tuple.pack((directory.get_layer(),))
inst.tr[logSubspace[six.u('exists')]] = fdb.tuple.pack((int(exists),))
inst.tr[logSubspace[six.u('children')]] = fdb.tuple.pack(children)
elif inst.op == six.u('DIRECTORY_STRIP_PREFIX'):
inst.tr[logSubspace[six.u("path")]] = fdb.tuple.pack(
directory.get_path()
)
inst.tr[logSubspace[six.u("layer")]] = fdb.tuple.pack(
(directory.get_layer(),)
)
inst.tr[logSubspace[six.u("exists")]] = fdb.tuple.pack((int(exists),))
inst.tr[logSubspace[six.u("children")]] = fdb.tuple.pack(children)
elif inst.op == six.u("DIRECTORY_STRIP_PREFIX"):
s = inst.pop()
if not s.startswith(directory.key()):
raise Exception('String %r does not start with raw prefix %r' % (s, directory.key()))
raise Exception(
"String %r does not start with raw prefix %r"
% (s, directory.key())
)
inst.push(s[len(directory.key()):])
inst.push(s[len(directory.key()) :])
else:
raise Exception('Unknown op: %s' % inst.op)
raise Exception("Unknown op: %s" % inst.op)
except Exception as e:
if log_all or log_errors:
print(e)
@ -228,4 +270,4 @@ class DirectoryExtension():
if inst.op in ops_that_create_dirs:
self.append_dir(inst, None)
inst.push(b'DIRECTORY_ERROR')
inst.push(b"DIRECTORY_ERROR")

View File

@ -21,7 +21,7 @@
import fdb
import sys
if __name__ == '__main__':
if __name__ == "__main__":
fdb.api_version(fdb.LATEST_API_VERSION)
@ -37,37 +37,37 @@ def setValueWithLimit(tr, key, value, limit):
def test_size_limit_option(db):
value = b'a' * 1024
value = b"a" * 1024
setValue(db, b't1', value)
assert(value == db[b't1'])
setValue(db, b"t1", value)
assert value == db[b"t1"]
try:
db.options.set_transaction_size_limit(1000)
setValue(db, b't2', value)
assert(False) # not reached
setValue(db, b"t2", value)
assert False # not reached
except fdb.FDBError as e:
assert(e.code == 2101) # Transaction exceeds byte limit (2101)
assert e.code == 2101 # Transaction exceeds byte limit (2101)
# Per transaction option overrides database option
db.options.set_transaction_size_limit(1000000)
try:
setValueWithLimit(db, b't3', value, 1000)
assert(False) # not reached
setValueWithLimit(db, b"t3", value, 1000)
assert False # not reached
except fdb.FDBError as e:
assert(e.code == 2101) # Transaction exceeds byte limit (2101)
assert e.code == 2101 # Transaction exceeds byte limit (2101)
# DB default survives on_error reset
db.options.set_transaction_size_limit(1000)
tr = db.create_transaction()
try:
tr[b't4'] = b'bar'
tr[b"t4"] = b"bar"
tr.on_error(fdb.FDBError(1007)).wait()
setValue(tr, b't4', value)
setValue(tr, b"t4", value)
tr.commit().wait()
assert(False) # not reached
assert False # not reached
except fdb.FDBError as e:
assert(e.code == 2101) # Transaction exceeds byte limit (2101)
assert e.code == 2101 # Transaction exceeds byte limit (2101)
# Reset the size limit for future tests
db.options.set_transaction_size_limit(10000000)
@ -75,29 +75,29 @@ def test_size_limit_option(db):
@fdb.transactional
def test_get_approximate_size(tr):
tr[b'key1'] = b'value1'
tr[b"key1"] = b"value1"
s1 = tr.get_approximate_size().wait()
tr[b'key2'] = b'value2'
tr[b"key2"] = b"value2"
s2 = tr.get_approximate_size().wait()
assert(s1 < s2)
assert s1 < s2
tr.clear(b'key3')
tr.clear(b"key3")
s3 = tr.get_approximate_size().wait()
assert(s2 < s3)
assert s2 < s3
tr.add_read_conflict_key(b'key3+')
tr.add_read_conflict_key(b"key3+")
s4 = tr.get_approximate_size().wait()
assert(s3 < s4)
assert s3 < s4
tr.add_write_conflict_key(b'key4')
tr.add_write_conflict_key(b"key4")
s5 = tr.get_approximate_size().wait()
assert(s4 < s5)
assert s4 < s5
# Expect a cluster file as input. This test will write to the FDB cluster, so
# be aware of potential side effects.
if __name__ == '__main__':
if __name__ == "__main__":
clusterFile = sys.argv[1]
db = fdb.open(clusterFile)
db.options.set_transaction_timeout(2000) # 2 seconds

View File

@ -24,7 +24,7 @@ import json
import base64
from fdb.tuple import pack
if __name__ == '__main__':
if __name__ == "__main__":
fdb.api_version(fdb.LATEST_API_VERSION)
@ -41,61 +41,61 @@ def cleanup_tenant(db, tenant_name):
def test_tenant_tuple_name(db):
tuplename = (b'test', b'level', b'hierarchy', 3, 1.24, 'str')
tuplename = (b"test", b"level", b"hierarchy", 3, 1.24, "str")
cleanup_tenant(db, tuplename)
fdb.tenant_management.create_tenant(db, tuplename)
tenant = db.open_tenant(tuplename)
tenant[b'foo'] = b'bar'
tenant[b"foo"] = b"bar"
assert tenant[b'foo'] == b'bar'
assert tenant[b"foo"] == b"bar"
del tenant[b'foo']
del tenant[b"foo"]
fdb.tenant_management.delete_tenant(db, tuplename)
def test_tenant_operations(db):
cleanup_tenant(db, b'tenant1')
cleanup_tenant(db, b'tenant2')
cleanup_tenant(db, b"tenant1")
cleanup_tenant(db, b"tenant2")
fdb.tenant_management.create_tenant(db, b'tenant1')
fdb.tenant_management.create_tenant(db, b'tenant2')
fdb.tenant_management.create_tenant(db, b"tenant1")
fdb.tenant_management.create_tenant(db, b"tenant2")
tenant_list = fdb.tenant_management.list_tenants(db, b'a', b'z', 10).to_list()
assert tenant_list[0].key == b'tenant1'
assert tenant_list[1].key == b'tenant2'
tenant_list = fdb.tenant_management.list_tenants(db, b"a", b"z", 10).to_list()
assert tenant_list[0].key == b"tenant1"
assert tenant_list[1].key == b"tenant2"
t1_entry = tenant_list[0].value
t1_json = json.loads(t1_entry)
p1 = base64.b64decode(t1_json['prefix']['base64'])
p1 = base64.b64decode(t1_json["prefix"]["base64"])
t2_entry = tenant_list[1].value
t2_json = json.loads(t2_entry)
p2 = base64.b64decode(t2_json['prefix']['base64'])
p2 = base64.b64decode(t2_json["prefix"]["base64"])
tenant1 = db.open_tenant(b'tenant1')
tenant2 = db.open_tenant(b'tenant2')
tenant1 = db.open_tenant(b"tenant1")
tenant2 = db.open_tenant(b"tenant2")
db[b'tenant_test_key'] = b'no_tenant'
tenant1[b'tenant_test_key'] = b'tenant1'
tenant2[b'tenant_test_key'] = b'tenant2'
db[b"tenant_test_key"] = b"no_tenant"
tenant1[b"tenant_test_key"] = b"tenant1"
tenant2[b"tenant_test_key"] = b"tenant2"
tenant1_entry = db[b'\xff\xff/management/tenant/map/tenant1']
tenant1_entry = db[b"\xff\xff/management/tenant/map/tenant1"]
tenant1_json = json.loads(tenant1_entry)
prefix1 = base64.b64decode(tenant1_json['prefix']['base64'])
prefix1 = base64.b64decode(tenant1_json["prefix"]["base64"])
assert prefix1 == p1
tenant2_entry = db[b'\xff\xff/management/tenant/map/tenant2']
tenant2_entry = db[b"\xff\xff/management/tenant/map/tenant2"]
tenant2_json = json.loads(tenant2_entry)
prefix2 = base64.b64decode(tenant2_json['prefix']['base64'])
prefix2 = base64.b64decode(tenant2_json["prefix"]["base64"])
assert prefix2 == p2
assert tenant1[b'tenant_test_key'] == b'tenant1'
assert db[prefix1 + b'tenant_test_key'] == b'tenant1'
assert tenant2[b'tenant_test_key'] == b'tenant2'
assert db[prefix2 + b'tenant_test_key'] == b'tenant2'
assert db[b'tenant_test_key'] == b'no_tenant'
assert tenant1[b"tenant_test_key"] == b"tenant1"
assert db[prefix1 + b"tenant_test_key"] == b"tenant1"
assert tenant2[b"tenant_test_key"] == b"tenant2"
assert db[prefix2 + b"tenant_test_key"] == b"tenant2"
assert db[b"tenant_test_key"] == b"no_tenant"
tr1 = tenant1.create_transaction()
try:
@ -104,87 +104,95 @@ def test_tenant_operations(db):
except fdb.FDBError as e:
tr1.on_error(e).wait()
assert tenant1[b'tenant_test_key'] == None
assert db[prefix1 + b'tenant_test_key'] == None
assert tenant2[b'tenant_test_key'] == b'tenant2'
assert db[prefix2 + b'tenant_test_key'] == b'tenant2'
assert db[b'tenant_test_key'] == b'no_tenant'
assert tenant1[b"tenant_test_key"] == None
assert db[prefix1 + b"tenant_test_key"] == None
assert tenant2[b"tenant_test_key"] == b"tenant2"
assert db[prefix2 + b"tenant_test_key"] == b"tenant2"
assert db[b"tenant_test_key"] == b"no_tenant"
fdb.tenant_management.delete_tenant(db, b'tenant1')
fdb.tenant_management.delete_tenant(db, b"tenant1")
try:
tenant1[b'tenant_test_key']
tenant1[b"tenant_test_key"]
assert False
except fdb.FDBError as e:
assert e.code == 2131 # tenant not found
del tenant2[:]
fdb.tenant_management.delete_tenant(db, b'tenant2')
fdb.tenant_management.delete_tenant(db, b"tenant2")
assert db[prefix1 + b'tenant_test_key'] == None
assert db[prefix2 + b'tenant_test_key'] == None
assert db[b'tenant_test_key'] == b'no_tenant'
assert db[prefix1 + b"tenant_test_key"] == None
assert db[prefix2 + b"tenant_test_key"] == None
assert db[b"tenant_test_key"] == b"no_tenant"
del db[b'tenant_test_key']
del db[b"tenant_test_key"]
assert db[b'tenant_test_key'] == None
assert db[b"tenant_test_key"] == None
def test_tenant_operation_retries(db):
cleanup_tenant(db, b'tenant1')
cleanup_tenant(db, b'tenant2')
cleanup_tenant(db, b"tenant1")
cleanup_tenant(db, b"tenant2")
# Test that the tenant creation only performs the existence check once
fdb.tenant_management._create_tenant_impl(db, b'tenant1', [], force_existence_check_maybe_committed=True)
fdb.tenant_management._create_tenant_impl(
db, b"tenant1", [], force_existence_check_maybe_committed=True
)
# An attempt to create the tenant again should fail
try:
fdb.tenant_management.create_tenant(db, b'tenant1')
fdb.tenant_management.create_tenant(db, b"tenant1")
assert False
except fdb.FDBError as e:
assert e.code == 2132 # tenant already exists
# Using a transaction skips the existence check
tr = db.create_transaction()
fdb.tenant_management.create_tenant(tr, b'tenant1')
fdb.tenant_management.create_tenant(tr, b"tenant1")
# Test that a concurrent tenant creation doesn't interfere with the existence check logic
tr = db.create_transaction()
existence_check_marker = []
fdb.tenant_management._create_tenant_impl(tr, b'tenant2', existence_check_marker)
fdb.tenant_management._create_tenant_impl(tr, b"tenant2", existence_check_marker)
fdb.tenant_management.create_tenant(db, b'tenant2')
fdb.tenant_management.create_tenant(db, b"tenant2")
tr = db.create_transaction()
try:
fdb.tenant_management._create_tenant_impl(tr, b'tenant2', existence_check_marker)
fdb.tenant_management._create_tenant_impl(
tr, b"tenant2", existence_check_marker
)
tr.commit().wait()
except fdb.FDBError as e:
tr.on_error(e).wait()
# Test that tenant deletion only performs the existence check once
fdb.tenant_management._delete_tenant_impl(db, b'tenant1', [], force_existence_check_maybe_committed=True)
fdb.tenant_management._delete_tenant_impl(
db, b"tenant1", [], force_existence_check_maybe_committed=True
)
# An attempt to delete the tenant again should fail
try:
fdb.tenant_management.delete_tenant(db, b'tenant1')
fdb.tenant_management.delete_tenant(db, b"tenant1")
assert False
except fdb.FDBError as e:
assert e.code == 2131 # tenant not found
# Using a transaction skips the existence check
tr = db.create_transaction()
fdb.tenant_management.delete_tenant(tr, b'tenant1')
fdb.tenant_management.delete_tenant(tr, b"tenant1")
# Test that a concurrent tenant deletion doesn't interfere with the existence check logic
tr = db.create_transaction()
existence_check_marker = []
fdb.tenant_management._delete_tenant_impl(tr, b'tenant2', existence_check_marker)
fdb.tenant_management._delete_tenant_impl(tr, b"tenant2", existence_check_marker)
fdb.tenant_management.delete_tenant(db, b'tenant2')
fdb.tenant_management.delete_tenant(db, b"tenant2")
tr = db.create_transaction()
try:
fdb.tenant_management._delete_tenant_impl(tr, b'tenant2', existence_check_marker)
fdb.tenant_management._delete_tenant_impl(
tr, b"tenant2", existence_check_marker
)
tr.commit().wait()
except fdb.FDBError as e:
tr.on_error(e).wait()
@ -198,7 +206,7 @@ def test_tenants(db):
# Expect a cluster file as input. This test will write to the FDB cluster, so
# be aware of potential side effects.
if __name__ == '__main__':
if __name__ == "__main__":
clusterFile = sys.argv[1]
db = fdb.open(clusterFile)
db.options.set_transaction_timeout(2000) # 2 seconds

View File

@ -38,8 +38,8 @@ from fdb.six import u
def randomUnicode():
while True:
c = random.randint(0, 0xffff)
if unicodedata.category(unichr(c))[0] in 'LMNPSZ':
c = random.randint(0, 0xFFFF)
if unicodedata.category(unichr(c))[0] in "LMNPSZ":
return unichr(c)
@ -47,31 +47,63 @@ def randomElement():
r = random.randint(0, 9)
if r == 0:
if random.random() < 0.5:
chars = [b'\x00', b'\x01', b'a', b'7', b'\xfe', b'\ff']
return b''.join([random.choice(chars) for c in _range(random.randint(0, 5))])
chars = [b"\x00", b"\x01", b"a", b"7", b"\xfe", b"\ff"]
return b"".join(
[random.choice(chars) for c in _range(random.randint(0, 5))]
)
else:
return b''.join([six.int2byte(random.randint(0, 255)) for _ in _range(random.randint(0, 10))])
return b"".join(
[
six.int2byte(random.randint(0, 255))
for _ in _range(random.randint(0, 10))
]
)
elif r == 1:
if random.random() < 0.5:
chars = [u('\x00'), u('\x01'), u('a'), u('7'), u('\xfe'), u('\ff'), u('\u0000'), u('\u0001'), u('\uffff'), u('\uff00'), u('\U0001f4a9')]
return u('').join([random.choice(chars) for c in _range(random.randint(0, 10))])
chars = [
u("\x00"),
u("\x01"),
u("a"),
u("7"),
u("\xfe"),
u("\ff"),
u("\u0000"),
u("\u0001"),
u("\uffff"),
u("\uff00"),
u("\U0001f4a9"),
]
return u("").join(
[random.choice(chars) for c in _range(random.randint(0, 10))]
)
else:
return u('').join([randomUnicode() for _ in _range(random.randint(0, 10))])
return u("").join([randomUnicode() for _ in _range(random.randint(0, 10))])
elif r == 2:
return random.choice([-1, 1]) * min(2**random.randint(0, 2040) + random.randint(-10, 10), 2**2040 - 1)
return random.choice([-1, 1]) * min(
2 ** random.randint(0, 2040) + random.randint(-10, 10), 2**2040 - 1
)
elif r == 3:
return random.choice([-1, 1]) * 2**random.randint(0, 64) + random.randint(-10, 10)
return random.choice([-1, 1]) * 2 ** random.randint(0, 64) + random.randint(
-10, 10
)
elif r == 4:
return None
elif r == 5:
ret = random.choice([float('-nan'), float('-inf'), -0.0, 0.0, float('inf'), float('nan')])
ret = random.choice(
[float("-nan"), float("-inf"), -0.0, 0.0, float("inf"), float("nan")]
)
if random.random() < 0.5:
return SingleFloat(ret)
else:
return ret
elif r == 6:
is_double = random.random() < 0.5
byte_str = b''.join([six.int2byte(random.randint(0, 255)) for _ in _range(8 if is_double else 4)])
byte_str = b"".join(
[
six.int2byte(random.randint(0, 255))
for _ in _range(8 if is_double else 4)
]
)
if is_double:
return struct.unpack(">d", byte_str)[0]
else:
@ -89,7 +121,7 @@ def randomTuple():
def isprefix(a, b):
return compare(a, b[:len(a)]) == 0
return compare(a, b[: len(a)]) == 0
def find_bad_sort(a, b):
@ -133,7 +165,10 @@ def tupleTest(N=10000):
problem = find_bad_sort(a, b)
if problem:
print("Bad sort:\n %s\n %s" % (problem[0], problem[1]))
print("Bytes:\n %s\n %s" % (repr(pack(problem[0])), repr(pack(problem[1]))))
print(
"Bytes:\n %s\n %s"
% (repr(pack(problem[0])), repr(pack(problem[1])))
)
# print("Tuple order:\n %s\n %s" % (tupleorder(problem[0]), tupleorder(problem[1])))
return False
else:
@ -148,38 +183,54 @@ def tupleTest(N=10000):
t3 = randomTuple()
if not compare(unpack(pack(t)), t) == 0:
print("unpack . pack /= identity:\n Orig: %s\n Bytes: %s\n New: %s" % (t, repr(pack(t)), unpack(pack(t))))
print(
"unpack . pack /= identity:\n Orig: %s\n Bytes: %s\n New: %s"
% (t, repr(pack(t)), unpack(pack(t)))
)
return False
r = range(t)
if r.start <= pack(t) < r.stop:
print("element within own range:\n Tuple: %s\n Bytes: %s\n Start: %s\n Stop: %s" %
(t, repr(pack(t)), repr(r.start), repr(r.stop)))
print(
"element within own range:\n Tuple: %s\n Bytes: %s\n Start: %s\n Stop: %s"
% (t, repr(pack(t)), repr(r.start), repr(r.stop))
)
if not r.start <= pack(t2) < r.stop:
print("prefixed element not in range:\n Tuple: %s\n Bytes: %s\n Prefixed: %s\n Bytes: %s" %
(t, repr(pack(t)), t2, repr(pack(t2))))
print(
"prefixed element not in range:\n Tuple: %s\n Bytes: %s\n Prefixed: %s\n Bytes: %s"
% (t, repr(pack(t)), t2, repr(pack(t2)))
)
return False
if not isprefix(t, t3):
if r.start <= pack(t3) <= r.stop:
print("non-prefixed element in range:\n Tuple: %s\n Bytes: %s\n Other: %s\n Bytes: %s"
% (t, repr(pack(t)), t3, repr(pack(t3))))
print(
"non-prefixed element in range:\n Tuple: %s\n Bytes: %s\n Other: %s\n Bytes: %s"
% (t, repr(pack(t)), t3, repr(pack(t3)))
)
return False
if (compare(t, t3) < 0) != (pack(t) < pack(t3)):
print("Bad comparison:\n Tuple: %s\n Bytes: %s\n Other: %s\n Bytes: %s" % (t, repr(pack(t)), t3, repr(pack(t3))))
print(
"Bad comparison:\n Tuple: %s\n Bytes: %s\n Other: %s\n Bytes: %s"
% (t, repr(pack(t)), t3, repr(pack(t3)))
)
return False
if not pack(t) < pack(t2):
print("Prefix not before prefixed:\n Tuple: %s\n Bytes: %s\n Other: %s\n Bytes: %s" % (t, repr(pack(t)), t2, repr(pack(t2))))
print(
"Prefix not before prefixed:\n Tuple: %s\n Bytes: %s\n Other: %s\n Bytes: %s"
% (t, repr(pack(t)), t2, repr(pack(t2)))
)
return False
print("Tuple check %d OK" % N)
return True
# test:
# a = ('\x00a', -2, 'b\x01', 12345, '')
# assert(a==fdbtuple.unpack(fdbtuple.pack(a)))
if __name__ == '__main__':
if __name__ == "__main__":
assert tupleTest(10000)

View File

@ -6,9 +6,11 @@ from test_harness.config import config
from test_harness.run import TestRunner
from test_harness.summarize import SummaryTree
if __name__ == '__main__':
if __name__ == "__main__":
try:
parser = argparse.ArgumentParser('TestHarness', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = argparse.ArgumentParser(
"TestHarness", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
config.build_arguments(parser)
args = parser.parse_args()
config.extract_args(args)
@ -17,9 +19,9 @@ if __name__ == '__main__':
exit(1)
except Exception as e:
_, _, exc_traceback = sys.exc_info()
error = SummaryTree('TestHarnessError')
error.attributes['Severity'] = '40'
error.attributes['ErrorMessage'] = str(e)
error.attributes['Trace'] = repr(traceback.format_tb(exc_traceback))
error = SummaryTree("TestHarnessError")
error.attributes["Severity"] = "40"
error.attributes["ErrorMessage"] = str(e)
error.attributes["Trace"] = repr(traceback.format_tb(exc_traceback))
error.dump(sys.stdout)
exit(1)

View File

@ -21,14 +21,14 @@ class BuggifyOption:
self.value = BuggifyOptionValue.RANDOM
if val is not None:
v = val.lower()
if v in ['on', '1', 'true']:
if v in ["on", "1", "true"]:
self.value = BuggifyOptionValue.ON
elif v in ['off', '0', 'false']:
elif v in ["off", "0", "false"]:
self.value = BuggifyOptionValue.OFF
elif v in ['random', 'rnd', 'r']:
elif v in ["random", "rnd", "r"]:
pass
else:
assert False, 'Invalid value {} -- use true, false, or random'.format(v)
assert False, "Invalid value {} -- use true, false, or random".format(v)
class ConfigValue:
@ -36,12 +36,12 @@ class ConfigValue:
self.name = name
self.value = None
self.kwargs = kwargs
if 'default' in self.kwargs:
self.value = self.kwargs['default']
if "default" in self.kwargs:
self.value = self.kwargs["default"]
def get_arg_name(self) -> str:
if 'long_name' in self.kwargs:
return self.kwargs['long_name']
if "long_name" in self.kwargs:
return self.kwargs["long_name"]
else:
return self.name
@ -49,26 +49,28 @@ class ConfigValue:
kwargs = copy.copy(self.kwargs)
long_name = self.name
short_name = None
if 'long_name' in kwargs:
long_name = kwargs['long_name']
del kwargs['long_name']
if 'short_name' in kwargs:
short_name = kwargs['short_name']
del kwargs['short_name']
if 'action' in kwargs and kwargs['action'] in ['store_true', 'store_false']:
del kwargs['type']
long_name = long_name.replace('_', '-')
if "long_name" in kwargs:
long_name = kwargs["long_name"]
del kwargs["long_name"]
if "short_name" in kwargs:
short_name = kwargs["short_name"]
del kwargs["short_name"]
if "action" in kwargs and kwargs["action"] in ["store_true", "store_false"]:
del kwargs["type"]
long_name = long_name.replace("_", "-")
if short_name is None:
# line below is useful for debugging
# print('add_argument(\'--{}\', [{{{}}}])'.format(long_name, ', '.join(['\'{}\': \'{}\''.format(k, v)
# for k, v in kwargs.items()])))
parser.add_argument('--{}'.format(long_name), **kwargs)
parser.add_argument("--{}".format(long_name), **kwargs)
else:
# line below is useful for debugging
# print('add_argument(\'-{}\', \'--{}\', [{{{}}}])'.format(short_name, long_name,
# ', '.join(['\'{}\': \'{}\''.format(k, v)
# for k, v in kwargs.items()])))
parser.add_argument('-{}'.format(short_name), '--{}'.format(long_name), **kwargs)
parser.add_argument(
"-{}".format(short_name), "--{}".format(long_name), **kwargs
)
def get_value(self, args: argparse.Namespace) -> tuple[str, Any]:
return self.name, args.__getattribute__(self.get_arg_name())
@ -102,122 +104,189 @@ class Config:
* Changing the default value for all executables might not always be desirable. If it should be only changed for
one executable Config.change_default should be used.
"""
def __init__(self):
self.random = random.Random()
self.cluster_file: str | None = None
self.cluster_file_args = {'short_name': 'C', 'type': str, 'help': 'Path to fdb cluster file', 'required': False,
'env_name': 'JOSHUA_CLUSTER_FILE'}
self.cluster_file_args = {
"short_name": "C",
"type": str,
"help": "Path to fdb cluster file",
"required": False,
"env_name": "JOSHUA_CLUSTER_FILE",
}
self.joshua_dir: str | None = None
self.joshua_dir_args = {'type': str, 'help': 'Where to write FDB data to', 'required': False,
'env_name': 'JOSHUA_APP_DIR'}
self.joshua_dir_args = {
"type": str,
"help": "Where to write FDB data to",
"required": False,
"env_name": "JOSHUA_APP_DIR",
}
self.stats: str | None = None
self.stats_args = {'type': str, 'help': 'A base64 encoded list of statistics (used to reproduce runs)',
'required': False}
self.stats_args = {
"type": str,
"help": "A base64 encoded list of statistics (used to reproduce runs)",
"required": False,
}
self.random_seed: int | None = None
self.random_seed_args = {'type': int,
'help': 'Force given seed given to fdbserver -- mostly useful for debugging',
'required': False}
self.random_seed_args = {
"type": int,
"help": "Force given seed given to fdbserver -- mostly useful for debugging",
"required": False,
}
self.kill_seconds: int = 30 * 60
self.kill_seconds_args = {'help': 'Timeout for individual test'}
self.kill_seconds_args = {"help": "Timeout for individual test"}
self.buggify_on_ratio: float = 0.8
self.buggify_on_ratio_args = {'help': 'Probability that buggify is turned on'}
self.buggify_on_ratio_args = {"help": "Probability that buggify is turned on"}
self.write_run_times = False
self.write_run_times_args = {'help': 'Write back probabilities after each test run',
'action': 'store_true'}
self.write_run_times_args = {
"help": "Write back probabilities after each test run",
"action": "store_true",
}
self.unseed_check_ratio: float = 0.05
self.unseed_check_ratio_args = {'help': 'Probability for doing determinism check'}
self.test_dirs: List[str] = ['slow', 'fast', 'restarting', 'rare', 'noSim']
self.test_dirs_args: dict = {'nargs': '*', 'help': 'test_directories to look for files in'}
self.trace_format: str = 'json'
self.trace_format_args = {'choices': ['json', 'xml'], 'help': 'What format fdb should produce'}
self.unseed_check_ratio_args = {
"help": "Probability for doing determinism check"
}
self.test_dirs: List[str] = ["slow", "fast", "restarting", "rare", "noSim"]
self.test_dirs_args: dict = {
"nargs": "*",
"help": "test_directories to look for files in",
}
self.trace_format: str = "json"
self.trace_format_args = {
"choices": ["json", "xml"],
"help": "What format fdb should produce",
}
self.crash_on_error: bool = True
self.crash_on_error_args = {'long_name': 'no_crash', 'action': 'store_false',
'help': 'Don\'t crash on first error'}
self.crash_on_error_args = {
"long_name": "no_crash",
"action": "store_false",
"help": "Don't crash on first error",
}
self.max_warnings: int = 10
self.max_warnings_args = {'short_name': 'W'}
self.max_warnings_args = {"short_name": "W"}
self.max_errors: int = 10
self.max_errors_args = {'short_name': 'E'}
self.old_binaries_path: Path = Path('/app/deploy/global_data/oldBinaries/')
self.old_binaries_path_args = {'help': 'Path to the directory containing the old fdb binaries'}
self.tls_plugin_path: Path = Path('/app/deploy/runtime/.tls_5_1/FDBLibTLS.so')
self.tls_plugin_path_args = {'help': 'Path to the tls plugin used for binaries < 5.2.0'}
self.max_errors_args = {"short_name": "E"}
self.old_binaries_path: Path = Path("/app/deploy/global_data/oldBinaries/")
self.old_binaries_path_args = {
"help": "Path to the directory containing the old fdb binaries"
}
self.tls_plugin_path: Path = Path("/app/deploy/runtime/.tls_5_1/FDBLibTLS.so")
self.tls_plugin_path_args = {
"help": "Path to the tls plugin used for binaries < 5.2.0"
}
self.disable_kaio: bool = False
self.use_valgrind: bool = False
self.use_valgrind_args = {'action': 'store_true'}
self.buggify = BuggifyOption('random')
self.buggify_args = {'short_name': 'b', 'choices': ['on', 'off', 'random']}
self.use_valgrind_args = {"action": "store_true"}
self.buggify = BuggifyOption("random")
self.buggify_args = {"short_name": "b", "choices": ["on", "off", "random"]}
self.pretty_print: bool = False
self.pretty_print_args = {'short_name': 'P', 'action': 'store_true'}
self.pretty_print_args = {"short_name": "P", "action": "store_true"}
self.clean_up: bool = True
self.clean_up_args = {'long_name': 'no_clean_up', 'action': 'store_false'}
self.run_dir: Path = Path('tmp')
self.joshua_seed: int = random.randint(0, 2 ** 32 - 1)
self.joshua_seed_args = {'short_name': 's', 'help': 'A random seed', 'env_name': 'JOSHUA_SEED'}
self.clean_up_args = {"long_name": "no_clean_up", "action": "store_false"}
self.run_dir: Path = Path("tmp")
self.joshua_seed: int = random.randint(0, 2**32 - 1)
self.joshua_seed_args = {
"short_name": "s",
"help": "A random seed",
"env_name": "JOSHUA_SEED",
}
self.print_coverage = False
self.print_coverage_args = {'action': 'store_true'}
self.binary = Path('bin') / ('fdbserver.exe' if os.name == 'nt' else 'fdbserver')
self.binary_args = {'help': 'Path to executable'}
self.print_coverage_args = {"action": "store_true"}
self.binary = Path("bin") / (
"fdbserver.exe" if os.name == "nt" else "fdbserver"
)
self.binary_args = {"help": "Path to executable"}
self.hit_per_runs_ratio: int = 20000
self.hit_per_runs_ratio_args = {'help': 'Maximum test runs before each code probe hit at least once'}
self.output_format: str = 'xml'
self.output_format_args = {'short_name': 'O', 'choices': ['json', 'xml'],
'help': 'What format TestHarness should produce'}
self.include_test_files: str = r'.*'
self.include_test_files_args = {'help': 'Only consider test files whose path match against the given regex'}
self.exclude_test_files: str = r'.^'
self.exclude_test_files_args = {'help': 'Don\'t consider test files whose path match against the given regex'}
self.include_test_classes: str = r'.*'
self.include_test_classes_args = {'help': 'Only consider tests whose names match against the given regex'}
self.exclude_test_names: str = r'.^'
self.exclude_test_names_args = {'help': 'Don\'t consider tests whose names match against the given regex'}
self.hit_per_runs_ratio_args = {
"help": "Maximum test runs before each code probe hit at least once"
}
self.output_format: str = "xml"
self.output_format_args = {
"short_name": "O",
"choices": ["json", "xml"],
"help": "What format TestHarness should produce",
}
self.include_test_files: str = r".*"
self.include_test_files_args = {
"help": "Only consider test files whose path match against the given regex"
}
self.exclude_test_files: str = r".^"
self.exclude_test_files_args = {
"help": "Don't consider test files whose path match against the given regex"
}
self.include_test_classes: str = r".*"
self.include_test_classes_args = {
"help": "Only consider tests whose names match against the given regex"
}
self.exclude_test_names: str = r".^"
self.exclude_test_names_args = {
"help": "Don't consider tests whose names match against the given regex"
}
self.details: bool = False
self.details_args = {'help': 'Print detailed results', 'short_name': 'c', 'action': 'store_true'}
self.details_args = {
"help": "Print detailed results",
"short_name": "c",
"action": "store_true",
}
self.success: bool = False
self.success_args = {'help': 'Print successful results', 'action': 'store_true'}
self.cov_include_files: str = r'.*'
self.cov_include_files_args = {'help': 'Only consider coverage traces that originated in files matching regex'}
self.cov_exclude_files: str = r'.^'
self.cov_exclude_files_args = {'help': 'Ignore coverage traces that originated in files matching regex'}
self.success_args = {"help": "Print successful results", "action": "store_true"}
self.cov_include_files: str = r".*"
self.cov_include_files_args = {
"help": "Only consider coverage traces that originated in files matching regex"
}
self.cov_exclude_files: str = r".^"
self.cov_exclude_files_args = {
"help": "Ignore coverage traces that originated in files matching regex"
}
self.max_stderr_bytes: int = 10000
self.write_stats: bool = True
self.read_stats: bool = True
self.reproduce_prefix: str | None = None
self.reproduce_prefix_args = {'type': str, 'required': False,
'help': 'When printing the results, prepend this string to the command'}
self.reproduce_prefix_args = {
"type": str,
"required": False,
"help": "When printing the results, prepend this string to the command",
}
self.long_running: bool = False
self.long_running_args = {'action': 'store_true'}
self.long_running_args = {"action": "store_true"}
self._env_names: Dict[str, str] = {}
self._config_map = self._build_map()
self._read_env()
self.random.seed(self.joshua_seed, version=2)
def change_default(self, attr: str, default_val):
assert attr in self._config_map, 'Unknown config attribute {}'.format(attr)
assert attr in self._config_map, "Unknown config attribute {}".format(attr)
self.__setattr__(attr, default_val)
self._config_map[attr].kwargs['default'] = default_val
self._config_map[attr].kwargs["default"] = default_val
def _get_env_name(self, var_name: str) -> str:
return self._env_names.get(var_name, 'TH_{}'.format(var_name.upper()))
return self._env_names.get(var_name, "TH_{}".format(var_name.upper()))
def dump(self):
for attr in dir(self):
obj = getattr(self, attr)
if attr == 'random' or attr.startswith('_') or callable(obj) or attr.endswith('_args'):
if (
attr == "random"
or attr.startswith("_")
or callable(obj)
or attr.endswith("_args")
):
continue
print('config.{}: {} = {}'.format(attr, type(obj), obj))
print("config.{}: {} = {}".format(attr, type(obj), obj))
def _build_map(self) -> OrderedDict[str, ConfigValue]:
config_map: OrderedDict[str, ConfigValue] = collections.OrderedDict()
for attr in dir(self):
obj = getattr(self, attr)
if attr == 'random' or attr.startswith('_') or callable(obj):
if attr == "random" or attr.startswith("_") or callable(obj):
continue
if attr.endswith('_args'):
name = attr[0:-len('_args')]
if attr.endswith("_args"):
name = attr[0 : -len("_args")]
assert name in config_map
assert isinstance(obj, dict)
for k, v in obj.items():
if k == 'env_name':
if k == "env_name":
self._env_names[name] = v
else:
config_map[name].kwargs[k] = v
@ -225,24 +294,29 @@ class Config:
# attribute_args has to be declared after the attribute
assert attr not in config_map
val_type = type(obj)
kwargs = {'type': val_type, 'default': obj}
kwargs = {"type": val_type, "default": obj}
config_map[attr] = ConfigValue(attr, **kwargs)
return config_map
def _read_env(self):
for attr in dir(self):
obj = getattr(self, attr)
if attr == 'random' or attr.startswith('_') or attr.endswith('_args') or callable(obj):
if (
attr == "random"
or attr.startswith("_")
or attr.endswith("_args")
or callable(obj)
):
continue
env_name = self._get_env_name(attr)
attr_type = self._config_map[attr].kwargs['type']
attr_type = self._config_map[attr].kwargs["type"]
assert type(None) != attr_type
e = os.getenv(env_name)
if e is not None:
# Use the env var to supply the default value, so that if the
# environment variable is set and the corresponding command line
# flag is not, the environment variable has an effect.
self._config_map[attr].kwargs['default'] = attr_type(e)
self._config_map[attr].kwargs["default"] = attr_type(e)
def build_arguments(self, parser: argparse.ArgumentParser):
for val in self._config_map.values():
@ -258,10 +332,12 @@ class Config:
config = Config()
if __name__ == '__main__':
if __name__ == "__main__":
# test the config setup
parser = argparse.ArgumentParser('TestHarness Config Tester',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = argparse.ArgumentParser(
"TestHarness Config Tester",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
config.build_arguments(parser)
args = parser.parse_args()
config.extract_args(args)

View File

@ -20,7 +20,7 @@ fdb.api_version(630)
def str_to_tuple(s: str | None):
if s is None:
return s
return tuple(s.split(','))
return tuple(s.split(","))
fdb_db = None
@ -45,27 +45,39 @@ def chunkify(iterable, sz: int):
@fdb.transactional
def write_coverage_chunk(tr, path: Tuple[str, ...], metadata: Tuple[str, ...],
coverage: List[Tuple[Coverage, bool]], initialized: bool) -> bool:
def write_coverage_chunk(
tr,
path: Tuple[str, ...],
metadata: Tuple[str, ...],
coverage: List[Tuple[Coverage, bool]],
initialized: bool,
) -> bool:
cov_dir = fdb.directory.create_or_open(tr, path)
if not initialized:
metadata_dir = fdb.directory.create_or_open(tr, metadata)
v = tr[metadata_dir['initialized']]
v = tr[metadata_dir["initialized"]]
initialized = v.present()
for cov, covered in coverage:
if not initialized or covered:
tr.add(cov_dir.pack((cov.file, cov.line, cov.comment, cov.rare)), struct.pack('<I', 1 if covered else 0))
tr.add(
cov_dir.pack((cov.file, cov.line, cov.comment, cov.rare)),
struct.pack("<I", 1 if covered else 0),
)
return initialized
@fdb.transactional
def set_initialized(tr, metadata: Tuple[str, ...]):
metadata_dir = fdb.directory.create_or_open(tr, metadata)
tr[metadata_dir['initialized']] = fdb.tuple.pack((True,))
tr[metadata_dir["initialized"]] = fdb.tuple.pack((True,))
def write_coverage(cluster_file: str | None, cov_path: Tuple[str, ...], metadata: Tuple[str, ...],
coverage: OrderedDict[Coverage, bool]):
def write_coverage(
cluster_file: str | None,
cov_path: Tuple[str, ...],
metadata: Tuple[str, ...],
coverage: OrderedDict[Coverage, bool],
):
db = open_db(cluster_file)
assert config.joshua_dir is not None
initialized: bool = False
@ -81,12 +93,14 @@ def _read_coverage(tr, cov_path: Tuple[str, ...]) -> OrderedDict[Coverage, int]:
cov_dir = fdb.directory.create_or_open(tr, cov_path)
for k, v in tr[cov_dir.range()]:
file, line, comment, rare = cov_dir.unpack(k)
count = struct.unpack('<I', v)[0]
count = struct.unpack("<I", v)[0]
res[Coverage(file, line, comment, rare)] = count
return res
def read_coverage(cluster_file: str | None, cov_path: Tuple[str, ...]) -> OrderedDict[Coverage, int]:
def read_coverage(
cluster_file: str | None, cov_path: Tuple[str, ...]
) -> OrderedDict[Coverage, int]:
db = open_db(cluster_file)
return _read_coverage(db, cov_path)
@ -105,7 +119,7 @@ class Statistics:
@fdb.transactional
def open_stats_dir(self, tr, app_dir: Tuple[str]):
stats_dir = app_dir + ('runtime_stats',)
stats_dir = app_dir + ("runtime_stats",)
return fdb.directory.create_or_open(tr, stats_dir)
@fdb.transactional
@ -113,14 +127,14 @@ class Statistics:
result = collections.OrderedDict()
for k, v in tr[self.stats_dir.range()]:
test_name = self.stats_dir.unpack(k)[0]
runtime, run_count = struct.unpack('<II', v)
runtime, run_count = struct.unpack("<II", v)
result[test_name] = TestStatistics(runtime, run_count)
return result
@fdb.transactional
def _write_runtime(self, tr, test_name: str, time: int) -> None:
key = self.stats_dir.pack((test_name,))
tr.add(key, struct.pack('<II', time, 1))
tr.add(key, struct.pack("<II", time, 1))
def write_runtime(self, test_name: str, time: int) -> None:
assert self.db is not None
@ -128,8 +142,11 @@ class Statistics:
class FDBStatFetcher(StatFetcher):
def __init__(self, tests: OrderedDict[str, TestDescription],
joshua_dir: Tuple[str] = str_to_tuple(config.joshua_dir)):
def __init__(
self,
tests: OrderedDict[str, TestDescription],
joshua_dir: Tuple[str] = str_to_tuple(config.joshua_dir),
):
super().__init__(tests)
self.statistics = Statistics(config.cluster_file, joshua_dir)

View File

@ -23,7 +23,7 @@ class ToSummaryTree(xml.sax.handler.ContentHandler):
self.stack: List[SummaryTree] = []
def result(self) -> SummaryTree:
assert len(self.stack) == 0 and self.root is not None, 'Parse Error'
assert len(self.stack) == 0 and self.root is not None, "Parse Error"
return self.root
def startElement(self, name, attrs):
@ -46,65 +46,72 @@ def _print_summary(summary: SummaryTree, commands: Set[str]):
is_valgrind_run = False
if config.reproduce_prefix is not None:
cmd.append(config.reproduce_prefix)
cmd.append('bin/fdbserver')
if 'TestFile' in summary.attributes:
file_name = summary.attributes['TestFile']
role = 'test' if test_harness.run.is_no_sim(Path(file_name)) else 'simulation'
cmd += ['-r', role, '-f', file_name]
if re.search(r'restarting\/.*-2\.', file_name):
cmd.append("bin/fdbserver")
if "TestFile" in summary.attributes:
file_name = summary.attributes["TestFile"]
role = "test" if test_harness.run.is_no_sim(Path(file_name)) else "simulation"
cmd += ["-r", role, "-f", file_name]
if re.search(r"restarting\/.*-2\.", file_name):
cmd += ["--restarting"]
else:
cmd += ['-r', 'simulation', '-f', '<ERROR>']
if 'RandomSeed' in summary.attributes:
cmd += ['-s', summary.attributes['RandomSeed']]
cmd += ["-r", "simulation", "-f", "<ERROR>"]
if "RandomSeed" in summary.attributes:
cmd += ["-s", summary.attributes["RandomSeed"]]
else:
cmd += ['-s', '<Error>']
if 'BuggifyEnabled' in summary.attributes:
arg = 'on'
if summary.attributes['BuggifyEnabled'].lower() in ['0', 'off', 'false']:
arg = 'off'
cmd += ['-b', arg]
cmd += ["-s", "<Error>"]
if "BuggifyEnabled" in summary.attributes:
arg = "on"
if summary.attributes["BuggifyEnabled"].lower() in ["0", "off", "false"]:
arg = "off"
cmd += ["-b", arg]
else:
cmd += ['b', '<ERROR>']
cmd += ['--crash', '--trace_format', config.trace_format]
cmd += ["b", "<ERROR>"]
cmd += ["--crash", "--trace_format", config.trace_format]
# we want the command as the first attribute
attributes = {'Command': ' '.join(cmd)}
attributes = {"Command": " ".join(cmd)}
for k, v in summary.attributes.items():
if k == 'Errors':
attributes['ErrorCount'] = v
if k == "Errors":
attributes["ErrorCount"] = v
else:
attributes[k] = v
summary.attributes = attributes
error_count = 0
warning_count = 0
small_summary = SummaryTree('Test')
small_summary = SummaryTree("Test")
small_summary.attributes = attributes
errors = SummaryTree('Errors')
warnings = SummaryTree('Warnings')
errors = SummaryTree("Errors")
warnings = SummaryTree("Warnings")
buggifies: OrderedDict[str, List[int]] = collections.OrderedDict()
for child in summary.children:
if 'Severity' in child.attributes and child.attributes['Severity'] == '40' and error_count < config.max_errors:
if (
"Severity" in child.attributes
and child.attributes["Severity"] == "40"
and error_count < config.max_errors
):
error_count += 1
if errors.name == 'ValgrindError':
if errors.name == "ValgrindError":
is_valgrind_run = True
errors.append(child)
if 'Severity' in child.attributes and child.attributes[
'Severity'] == '30' and warning_count < config.max_warnings:
if (
"Severity" in child.attributes
and child.attributes["Severity"] == "30"
and warning_count < config.max_warnings
):
warning_count += 1
warnings.append(child)
if child.name == 'BuggifySection':
file = child.attributes['File']
line = int(child.attributes['Line'])
if child.name == "BuggifySection":
file = child.attributes["File"]
line = int(child.attributes["Line"])
buggifies.setdefault(file, []).append(line)
buggifies_elem = SummaryTree('Buggifies')
buggifies_elem = SummaryTree("Buggifies")
for file, lines in buggifies.items():
lines.sort()
if config.output_format == 'json':
buggifies_elem.attributes[file] = ' '.join(str(line) for line in lines)
if config.output_format == "json":
buggifies_elem.attributes[file] = " ".join(str(line) for line in lines)
else:
child = SummaryTree('Buggify')
child.attributes['File'] = file
child.attributes['Lines'] = ' '.join(str(line) for line in lines)
child = SummaryTree("Buggify")
child.attributes["File"] = file
child.attributes["Lines"] = " ".join(str(line) for line in lines)
small_summary.append(child)
small_summary.children.append(buggifies_elem)
if len(errors.children) > 0:
@ -113,38 +120,51 @@ def _print_summary(summary: SummaryTree, commands: Set[str]):
small_summary.children.append(warnings)
if is_valgrind_run:
idx = 0 if config.reproduce_prefix is None else 1
cmd.insert(idx, 'valgrind')
key = ' '.join(cmd)
cmd.insert(idx, "valgrind")
key = " ".join(cmd)
count = 1
while key in commands:
key = '{} # {}'.format(' '.join(cmd), count)
key = "{} # {}".format(" ".join(cmd), count)
count += 1
if config.details:
key = str(len(commands))
str_io = io.StringIO()
summary.dump(str_io, prefix=(' ' if config.pretty_print else ''))
if config.output_format == 'json':
sys.stdout.write('{}"Test{}": {}'.format(' ' if config.pretty_print else '',
key, str_io.getvalue()))
summary.dump(str_io, prefix=(" " if config.pretty_print else ""))
if config.output_format == "json":
sys.stdout.write(
'{}"Test{}": {}'.format(
" " if config.pretty_print else "", key, str_io.getvalue()
)
)
else:
sys.stdout.write(str_io.getvalue())
if config.pretty_print:
sys.stdout.write('\n' if config.output_format == 'xml' else ',\n')
sys.stdout.write("\n" if config.output_format == "xml" else ",\n")
return key
output = io.StringIO()
small_summary.dump(output, prefix=(' ' if config.pretty_print else ''))
if config.output_format == 'json':
sys.stdout.write('{}"{}": {}'.format(' ' if config.pretty_print else '', key, output.getvalue().strip()))
small_summary.dump(output, prefix=(" " if config.pretty_print else ""))
if config.output_format == "json":
sys.stdout.write(
'{}"{}": {}'.format(
" " if config.pretty_print else "", key, output.getvalue().strip()
)
)
else:
sys.stdout.write('{}{}'.format(' ' if config.pretty_print else '', output.getvalue().strip()))
sys.stdout.write('\n' if config.output_format == 'xml' else ',\n')
sys.stdout.write(
"{}{}".format(
" " if config.pretty_print else "", output.getvalue().strip()
)
)
sys.stdout.write("\n" if config.output_format == "xml" else ",\n")
def print_errors(ensemble_id: str):
joshua_model.open(config.cluster_file)
properties = joshua_model.get_ensemble_properties(ensemble_id)
compressed = properties["compressed"] if "compressed" in properties else False
for rec in joshua_model.tail_results(ensemble_id, errors_only=(not config.success), compressed=compressed):
for rec in joshua_model.tail_results(
ensemble_id, errors_only=(not config.success), compressed=compressed
):
if len(rec) == 5:
version_stamp, result_code, host, seed, output = rec
elif len(rec) == 4:

View File

@ -25,10 +25,12 @@ class GlobalStatistics:
class EnsembleResults:
def __init__(self, cluster_file: str | None, ensemble_id: str):
self.global_statistics = GlobalStatistics()
self.fdb_path = ('joshua', 'ensembles', 'results', 'application', ensemble_id)
self.coverage_path = self.fdb_path + ('coverage',)
self.fdb_path = ("joshua", "ensembles", "results", "application", ensemble_id)
self.coverage_path = self.fdb_path + ("coverage",)
self.statistics = test_harness.fdb.Statistics(cluster_file, self.fdb_path)
coverage_dict: OrderedDict[Coverage, int] = test_harness.fdb.read_coverage(cluster_file, self.coverage_path)
coverage_dict: OrderedDict[Coverage, int] = test_harness.fdb.read_coverage(
cluster_file, self.coverage_path
)
self.coverage: List[Tuple[Coverage, int]] = []
self.min_coverage_hit: int | None = None
self.ratio = self.global_statistics.total_test_runs / config.hit_per_runs_ratio
@ -59,14 +61,16 @@ class EnsembleResults:
def dump(self, prefix: str):
errors = 0
out = SummaryTree('EnsembleResults')
out.attributes['TotalRuntime'] = str(self.global_statistics.total_cpu_time)
out.attributes['TotalTestRuns'] = str(self.global_statistics.total_test_runs)
out.attributes['TotalProbesHit'] = str(self.global_statistics.total_probes_hit)
out.attributes['MinProbeHit'] = str(self.min_coverage_hit)
out.attributes['TotalProbes'] = str(len(self.coverage))
out.attributes['MissedProbes'] = str(self.global_statistics.total_missed_probes)
out.attributes['MissedNonRareProbes'] = str(self.global_statistics.total_missed_nonrare_probes)
out = SummaryTree("EnsembleResults")
out.attributes["TotalRuntime"] = str(self.global_statistics.total_cpu_time)
out.attributes["TotalTestRuns"] = str(self.global_statistics.total_test_runs)
out.attributes["TotalProbesHit"] = str(self.global_statistics.total_probes_hit)
out.attributes["MinProbeHit"] = str(self.min_coverage_hit)
out.attributes["TotalProbes"] = str(len(self.coverage))
out.attributes["MissedProbes"] = str(self.global_statistics.total_missed_probes)
out.attributes["MissedNonRareProbes"] = str(
self.global_statistics.total_missed_nonrare_probes
)
for cov, count in self.coverage:
severity = 10
@ -75,77 +79,92 @@ class EnsembleResults:
if severity == 40:
errors += 1
if (severity == 40 and errors <= config.max_errors) or config.details:
child = SummaryTree('CodeProbe')
child.attributes['Severity'] = str(severity)
child.attributes['File'] = cov.file
child.attributes['Line'] = str(cov.line)
child.attributes['Comment'] = '' if cov.comment is None else cov.comment
child.attributes['HitCount'] = str(count)
child.attributes['Rare'] = str(cov.rare)
child = SummaryTree("CodeProbe")
child.attributes["Severity"] = str(severity)
child.attributes["File"] = cov.file
child.attributes["Line"] = str(cov.line)
child.attributes["Comment"] = "" if cov.comment is None else cov.comment
child.attributes["HitCount"] = str(count)
child.attributes["Rare"] = str(cov.rare)
out.append(child)
if config.details:
for k, runtime, run_count in self.stats:
child = SummaryTree('Test')
child.attributes['Name'] = k
child.attributes['Runtime'] = str(runtime)
child.attributes['RunCount'] = str(run_count)
child = SummaryTree("Test")
child.attributes["Name"] = k
child.attributes["Runtime"] = str(runtime)
child.attributes["RunCount"] = str(run_count)
out.append(child)
if errors > 0:
out.attributes['Errors'] = str(errors)
out.attributes["Errors"] = str(errors)
str_io = io.StringIO()
out.dump(str_io, prefix=prefix, new_line=config.pretty_print)
if config.output_format == 'xml':
if config.output_format == "xml":
sys.stdout.write(str_io.getvalue())
else:
sys.stdout.write('{}"EnsembleResults":{}{}'.format(' ' if config.pretty_print else '',
'\n' if config.pretty_print else ' ',
str_io.getvalue()))
sys.stdout.write(
'{}"EnsembleResults":{}{}'.format(
" " if config.pretty_print else "",
"\n" if config.pretty_print else " ",
str_io.getvalue(),
)
)
def write_header(ensemble_id: str):
if config.output_format == 'json':
if config.output_format == "json":
if config.pretty_print:
print('{')
print(' "{}": {},\n'.format('ID', json.dumps(ensemble_id.strip())))
print("{")
print(' "{}": {},\n'.format("ID", json.dumps(ensemble_id.strip())))
else:
sys.stdout.write('{{{}: {},'.format('ID', json.dumps(ensemble_id.strip())))
elif config.output_format == 'xml':
sys.stdout.write('<Ensemble ID={}>'.format(quoteattr(ensemble_id.strip())))
sys.stdout.write("{{{}: {},".format("ID", json.dumps(ensemble_id.strip())))
elif config.output_format == "xml":
sys.stdout.write("<Ensemble ID={}>".format(quoteattr(ensemble_id.strip())))
if config.pretty_print:
sys.stdout.write('\n')
sys.stdout.write("\n")
else:
assert False, 'unknown output format {}'.format(config.output_format)
assert False, "unknown output format {}".format(config.output_format)
def write_footer():
if config.output_format == 'xml':
sys.stdout.write('</Ensemble>\n')
elif config.output_format == 'json':
sys.stdout.write('}\n')
if config.output_format == "xml":
sys.stdout.write("</Ensemble>\n")
elif config.output_format == "json":
sys.stdout.write("}\n")
else:
assert False, 'unknown output format {}'.format(config.output_format)
assert False, "unknown output format {}".format(config.output_format)
if __name__ == '__main__':
parser = argparse.ArgumentParser('TestHarness Results', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
config.change_default('pretty_print', True)
config.change_default('max_warnings', 0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"TestHarness Results", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
config.change_default("pretty_print", True)
config.change_default("max_warnings", 0)
config.build_arguments(parser)
parser.add_argument('ensemble_id', type=str, help='The ensemble to fetch the result for')
parser.add_argument(
"ensemble_id", type=str, help="The ensemble to fetch the result for"
)
args = parser.parse_args()
config.extract_args(args)
config.output_format = args.output_format
write_header(args.ensemble_id)
try:
import test_harness.joshua
test_harness.joshua.print_errors(args.ensemble_id)
except ModuleNotFoundError:
child = SummaryTree('JoshuaNotFound')
child.attributes['Severity'] = '30'
child.attributes['Message'] = 'Could not import Joshua -- set PYTHONPATH to joshua checkout dir'
child.dump(sys.stdout, prefix=(' ' if config.pretty_print else ''), new_line=config.pretty_print)
child = SummaryTree("JoshuaNotFound")
child.attributes["Severity"] = "30"
child.attributes[
"Message"
] = "Could not import Joshua -- set PYTHONPATH to joshua checkout dir"
child.dump(
sys.stdout,
prefix=(" " if config.pretty_print else ""),
new_line=config.pretty_print,
)
results = EnsembleResults(config.cluster_file, args.ensemble_id)
results.dump(' ' if config.pretty_print else '')
results.dump(" " if config.pretty_print else "")
write_footer()
exit(0 if results.coverage_ok else 1)

View File

@ -405,7 +405,7 @@ class TestRun:
str(self.uid),
"--log-directory",
str(self.temp_path),
"--check-rocksdb"
"--check-rocksdb",
]
subprocess.run(command, check=True)

View File

@ -13,7 +13,18 @@ import xml.sax.handler
import xml.sax.saxutils
from pathlib import Path
from typing import List, Dict, TextIO, Callable, Optional, OrderedDict, Any, Tuple, Iterator, Iterable
from typing import (
List,
Dict,
TextIO,
Callable,
Optional,
OrderedDict,
Any,
Tuple,
Iterator,
Iterable,
)
from test_harness.config import config
from test_harness.valgrind import parse_valgrind_output
@ -39,7 +50,7 @@ class SummaryTree:
return children
res: Dict[str, Any] = {}
if add_name:
res['Type'] = self.name
res["Type"] = self.name
for k, v in self.attributes.items():
res[k] = v
children = []
@ -55,15 +66,15 @@ class SummaryTree:
else:
children.append(child.to_dict())
if len(children) > 0:
res['children'] = children
res["children"] = children
return res
def to_json(self, out: TextIO, prefix: str = ''):
res = json.dumps(self.to_dict(), indent=(' ' if config.pretty_print else None))
def to_json(self, out: TextIO, prefix: str = ""):
res = json.dumps(self.to_dict(), indent=(" " if config.pretty_print else None))
for line in res.splitlines(False):
out.write('{}{}\n'.format(prefix, line))
out.write("{}{}\n".format(prefix, line))
def to_xml(self, out: TextIO, prefix: str = ''):
def to_xml(self, out: TextIO, prefix: str = ""):
# minidom doesn't support omitting the xml declaration which is a problem for joshua
# However, our xml is very simple and therefore serializing manually is easy enough
attrs = []
@ -73,8 +84,8 @@ class SummaryTree:
except OSError:
pass
for k, v in self.attributes.items():
attrs.append('{}={}'.format(k, xml.sax.saxutils.quoteattr(v)))
elem = '{}<{}{}'.format(prefix, self.name, ('' if len(attrs) == 0 else ' '))
attrs.append("{}={}".format(k, xml.sax.saxutils.quoteattr(v)))
elem = "{}<{}{}".format(prefix, self.name, ("" if len(attrs) == 0 else " "))
out.write(elem)
if config.pretty_print:
curr_line_len = len(elem)
@ -82,34 +93,40 @@ class SummaryTree:
attr_len = len(attrs[i])
if i == 0 or attr_len + curr_line_len + 1 <= print_width:
if i != 0:
out.write(' ')
out.write(" ")
out.write(attrs[i])
curr_line_len += attr_len
else:
out.write('\n')
out.write(' ' * len(elem))
out.write("\n")
out.write(" " * len(elem))
out.write(attrs[i])
curr_line_len = len(elem) + attr_len
else:
out.write(' '.join(attrs))
out.write(" ".join(attrs))
if len(self.children) == 0:
out.write('/>')
out.write("/>")
else:
out.write('>')
out.write(">")
for child in self.children:
if config.pretty_print:
out.write('\n')
child.to_xml(out, prefix=(' {}'.format(prefix) if config.pretty_print else prefix))
out.write("\n")
child.to_xml(
out, prefix=(" {}".format(prefix) if config.pretty_print else prefix)
)
if len(self.children) > 0:
out.write('{}{}</{}>'.format(('\n' if config.pretty_print else ''), prefix, self.name))
out.write(
"{}{}</{}>".format(
("\n" if config.pretty_print else ""), prefix, self.name
)
)
def dump(self, out: TextIO, prefix: str = '', new_line: bool = True):
if config.output_format == 'json':
def dump(self, out: TextIO, prefix: str = "", new_line: bool = True):
if config.output_format == "json":
self.to_json(out, prefix=prefix)
else:
self.to_xml(out, prefix=prefix)
if new_line:
out.write('\n')
out.write("\n")
ParserCallback = Callable[[Dict[str, str]], Optional[str]]
@ -118,9 +135,13 @@ ParserCallback = Callable[[Dict[str, str]], Optional[str]]
class ParseHandler:
def __init__(self, out: SummaryTree):
self.out = out
self.events: OrderedDict[Optional[Tuple[str, Optional[str]]], List[ParserCallback]] = collections.OrderedDict()
self.events: OrderedDict[
Optional[Tuple[str, Optional[str]]], List[ParserCallback]
] = collections.OrderedDict()
def add_handler(self, attr: Tuple[str, Optional[str]], callback: ParserCallback) -> None:
def add_handler(
self, attr: Tuple[str, Optional[str]], callback: ParserCallback
) -> None:
self.events.setdefault(attr, []).append(callback)
def _call(self, callback: ParserCallback, attrs: Dict[str, str]) -> str | None:
@ -128,10 +149,10 @@ class ParseHandler:
return callback(attrs)
except Exception as e:
_, _, exc_traceback = sys.exc_info()
child = SummaryTree('NonFatalParseError')
child.attributes['Severity'] = '30'
child.attributes['ErrorMessage'] = str(e)
child.attributes['Trace'] = repr(traceback.format_tb(exc_traceback))
child = SummaryTree("NonFatalParseError")
child.attributes["Severity"] = "30"
child.attributes["ErrorMessage"] = str(e)
child.attributes["Trace"] = repr(traceback.format_tb(exc_traceback))
self.out.append(child)
return None
@ -193,7 +214,9 @@ class JsonParser(Parser):
class Coverage:
def __init__(self, file: str, line: str | int, comment: str | None = None, rare: bool = False):
def __init__(
self, file: str, line: str | int, comment: str | None = None, rare: bool = False
):
self.file = file
self.line = int(line)
self.comment = comment
@ -251,10 +274,10 @@ class TraceFiles:
self.path: Path = path
self.timestamps: List[int] = []
self.runs: OrderedDict[int, List[Path]] = collections.OrderedDict()
trace_expr = re.compile(r'trace.*\.(json|xml)')
trace_expr = re.compile(r"trace.*\.(json|xml)")
for file in self.path.iterdir():
if file.is_file() and trace_expr.match(file.name) is not None:
ts = int(file.name.split('.')[6])
ts = int(file.name.split(".")[6])
if ts in self.runs:
self.runs[ts].append(file)
else:
@ -289,10 +312,21 @@ class TraceFiles:
class Summary:
def __init__(self, binary: Path, runtime: float = 0, max_rss: int | None = None,
was_killed: bool = False, uid: uuid.UUID | None = None, expected_unseed: int | None = None,
exit_code: int = 0, valgrind_out_file: Path | None = None, stats: str | None = None,
error_out: str = None, will_restart: bool = False, long_running: bool = False):
def __init__(
self,
binary: Path,
runtime: float = 0,
max_rss: int | None = None,
was_killed: bool = False,
uid: uuid.UUID | None = None,
expected_unseed: int | None = None,
exit_code: int = 0,
valgrind_out_file: Path | None = None,
stats: str | None = None,
error_out: str = None,
will_restart: bool = False,
long_running: bool = False,
):
self.binary = binary
self.runtime: float = runtime
self.max_rss: int | None = max_rss
@ -300,7 +334,7 @@ class Summary:
self.long_running = long_running
self.expected_unseed: int | None = expected_unseed
self.exit_code: int = exit_code
self.out: SummaryTree = SummaryTree('Test')
self.out: SummaryTree = SummaryTree("Test")
self.test_begin_found: bool = False
self.test_end_found: bool = False
self.unseed: int | None = None
@ -313,21 +347,21 @@ class Summary:
self.test_count: int = 0
self.tests_passed: int = 0
self.error_out = error_out
self.stderr_severity: str = '40'
self.stderr_severity: str = "40"
self.will_restart: bool = will_restart
self.test_dir: Path | None = None
self.is_negative_test = False
self.negative_test_success = False
self.max_trace_time = -1
self.max_trace_time_type = 'None'
self.max_trace_time_type = "None"
if uid is not None:
self.out.attributes['TestUID'] = str(uid)
self.out.attributes["TestUID"] = str(uid)
if stats is not None:
self.out.attributes['Statistics'] = stats
self.out.attributes['JoshuaSeed'] = str(config.joshua_seed)
self.out.attributes['WillRestart'] = '1' if self.will_restart else '0'
self.out.attributes['NegativeTest'] = '1' if self.is_negative_test else '0'
self.out.attributes["Statistics"] = stats
self.out.attributes["JoshuaSeed"] = str(config.joshua_seed)
self.out.attributes["WillRestart"] = "1" if self.will_restart else "0"
self.out.attributes["NegativeTest"] = "1" if self.is_negative_test else "0"
self.handler = ParseHandler(self.out)
self.register_handlers()
@ -343,35 +377,39 @@ class Summary:
trace_files = TraceFiles(trace_dir)
if len(trace_files) == 0:
self.error = True
child = SummaryTree('NoTracesFound')
child.attributes['Severity'] = '40'
child.attributes['Path'] = str(trace_dir.absolute())
child.attributes['Command'] = command
child = SummaryTree("NoTracesFound")
child.attributes["Severity"] = "40"
child.attributes["Path"] = str(trace_dir.absolute())
child.attributes["Command"] = command
self.out.append(child)
return
self.summarize_files(trace_files[0])
if config.joshua_dir is not None:
import test_harness.fdb
test_harness.fdb.write_coverage(config.cluster_file,
test_harness.fdb.str_to_tuple(config.joshua_dir) + ('coverage',),
test_harness.fdb.str_to_tuple(config.joshua_dir) + ('coverage-metadata',),
self.coverage)
test_harness.fdb.write_coverage(
config.cluster_file,
test_harness.fdb.str_to_tuple(config.joshua_dir) + ("coverage",),
test_harness.fdb.str_to_tuple(config.joshua_dir)
+ ("coverage-metadata",),
self.coverage,
)
def list_simfdb(self) -> SummaryTree:
res = SummaryTree('SimFDB')
res.attributes['TestDir'] = str(self.test_dir)
res = SummaryTree("SimFDB")
res.attributes["TestDir"] = str(self.test_dir)
if self.test_dir is None:
return res
simfdb = self.test_dir / Path('simfdb')
simfdb = self.test_dir / Path("simfdb")
if not simfdb.exists():
res.attributes['NoSimDir'] = "simfdb doesn't exist"
res.attributes["NoSimDir"] = "simfdb doesn't exist"
return res
elif not simfdb.is_dir():
res.attributes['NoSimDir'] = 'simfdb is not a directory'
res.attributes["NoSimDir"] = "simfdb is not a directory"
return res
for file in simfdb.iterdir():
child = SummaryTree('Directory' if file.is_dir() else 'File')
child.attributes['Name'] = file.name
child = SummaryTree("Directory" if file.is_dir() else "File")
child.attributes["Name"] = file.name
res.append(child)
return res
@ -382,67 +420,69 @@ class Summary:
def done(self):
if config.print_coverage:
for k, v in self.coverage.items():
child = SummaryTree('CodeCoverage')
child.attributes['File'] = k.file
child.attributes['Line'] = str(k.line)
child.attributes['Rare'] = k.rare
child = SummaryTree("CodeCoverage")
child.attributes["File"] = k.file
child.attributes["Line"] = str(k.line)
child.attributes["Rare"] = k.rare
if not v:
child.attributes['Covered'] = '0'
child.attributes["Covered"] = "0"
if k.comment is not None and len(k.comment):
child.attributes['Comment'] = k.comment
child.attributes["Comment"] = k.comment
self.out.append(child)
if self.warnings > config.max_warnings:
child = SummaryTree('WarningLimitExceeded')
child.attributes['Severity'] = '30'
child.attributes['WarningCount'] = str(self.warnings)
child = SummaryTree("WarningLimitExceeded")
child.attributes["Severity"] = "30"
child.attributes["WarningCount"] = str(self.warnings)
self.out.append(child)
if self.errors > config.max_errors:
child = SummaryTree('ErrorLimitExceeded')
child.attributes['Severity'] = '40'
child.attributes['ErrorCount'] = str(self.errors)
child = SummaryTree("ErrorLimitExceeded")
child.attributes["Severity"] = "40"
child.attributes["ErrorCount"] = str(self.errors)
self.out.append(child)
self.error = True
if self.was_killed:
child = SummaryTree('ExternalTimeout')
child.attributes['Severity'] = '40'
child = SummaryTree("ExternalTimeout")
child.attributes["Severity"] = "40"
if self.long_running:
# debugging info for long-running tests
child.attributes['LongRunning'] = '1'
child.attributes['Runtime'] = str(self.runtime)
child.attributes["LongRunning"] = "1"
child.attributes["Runtime"] = str(self.runtime)
self.out.append(child)
self.error = True
if self.max_rss is not None:
self.out.attributes['PeakMemory'] = str(self.max_rss)
self.out.attributes["PeakMemory"] = str(self.max_rss)
if self.valgrind_out_file is not None:
try:
valgrind_errors = parse_valgrind_output(self.valgrind_out_file)
for valgrind_error in valgrind_errors:
if valgrind_error.kind.startswith('Leak'):
if valgrind_error.kind.startswith("Leak"):
continue
self.error = True
child = SummaryTree('ValgrindError')
child.attributes['Severity'] = '40'
child.attributes['What'] = valgrind_error.what.what
child.attributes['Backtrace'] = valgrind_error.what.backtrace
child = SummaryTree("ValgrindError")
child.attributes["Severity"] = "40"
child.attributes["What"] = valgrind_error.what.what
child.attributes["Backtrace"] = valgrind_error.what.backtrace
aux_count = 0
for aux in valgrind_error.aux:
child.attributes['WhatAux{}'.format(aux_count)] = aux.what
child.attributes['BacktraceAux{}'.format(aux_count)] = aux.backtrace
child.attributes["WhatAux{}".format(aux_count)] = aux.what
child.attributes[
"BacktraceAux{}".format(aux_count)
] = aux.backtrace
aux_count += 1
self.out.append(child)
except Exception as e:
self.error = True
child = SummaryTree('ValgrindParseError')
child.attributes['Severity'] = '40'
child.attributes['ErrorMessage'] = str(e)
child = SummaryTree("ValgrindParseError")
child.attributes["Severity"] = "40"
child.attributes["ErrorMessage"] = str(e)
_, _, exc_traceback = sys.exc_info()
child.attributes['Trace'] = repr(traceback.format_tb(exc_traceback))
child.attributes["Trace"] = repr(traceback.format_tb(exc_traceback))
self.out.append(child)
if not self.test_end_found:
child = SummaryTree('TestUnexpectedlyNotFinished')
child.attributes['Severity'] = '40'
child.attributes['LastTraceTime'] = str(self.max_trace_time)
child.attributes['LastTraceType'] = self.max_trace_time_type
child = SummaryTree("TestUnexpectedlyNotFinished")
child.attributes["Severity"] = "40"
child.attributes["LastTraceTime"] = str(self.max_trace_time)
child.attributes["LastTraceType"] = self.max_trace_time_type
self.out.append(child)
self.error = True
if self.error_out is not None and len(self.error_out) > 0:
@ -450,234 +490,254 @@ class Summary:
stderr_bytes = 0
for line in lines:
if line.endswith(
"WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!"):
"WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!"
):
# When running ASAN we expect to see this message. Boost coroutine should be using the correct asan annotations so that it shouldn't produce any false positives.
continue
if line.endswith("Warning: unimplemented fcntl command: 1036"):
# Valgrind produces this warning when F_SET_RW_HINT is used
continue
if self.stderr_severity == '40':
if self.stderr_severity == "40":
self.error = True
remaining_bytes = config.max_stderr_bytes - stderr_bytes
if remaining_bytes > 0:
out_err = line[0:remaining_bytes] + ('...' if len(line) > remaining_bytes else '')
child = SummaryTree('StdErrOutput')
child.attributes['Severity'] = self.stderr_severity
child.attributes['Output'] = out_err
out_err = line[0:remaining_bytes] + (
"..." if len(line) > remaining_bytes else ""
)
child = SummaryTree("StdErrOutput")
child.attributes["Severity"] = self.stderr_severity
child.attributes["Output"] = out_err
self.out.append(child)
stderr_bytes += len(line)
if stderr_bytes > config.max_stderr_bytes:
child = SummaryTree('StdErrOutputTruncated')
child.attributes['Severity'] = self.stderr_severity
child.attributes['BytesRemaining'] = str(stderr_bytes - config.max_stderr_bytes)
child = SummaryTree("StdErrOutputTruncated")
child.attributes["Severity"] = self.stderr_severity
child.attributes["BytesRemaining"] = str(
stderr_bytes - config.max_stderr_bytes
)
self.out.append(child)
self.out.attributes['Ok'] = '1' if self.ok() else '0'
self.out.attributes['Runtime'] = str(self.runtime)
self.out.attributes["Ok"] = "1" if self.ok() else "0"
self.out.attributes["Runtime"] = str(self.runtime)
if not self.ok():
reason = 'Unknown'
reason = "Unknown"
if self.error:
reason = 'ProducedErrors'
reason = "ProducedErrors"
elif not self.test_end_found:
reason = 'TestDidNotFinish'
reason = "TestDidNotFinish"
elif self.tests_passed == 0:
reason = 'NoTestsPassed'
reason = "NoTestsPassed"
elif self.test_count != self.tests_passed:
reason = 'Expected {} tests to pass, but only {} did'.format(self.test_count, self.tests_passed)
self.out.attributes['FailReason'] = reason
reason = "Expected {} tests to pass, but only {} did".format(
self.test_count, self.tests_passed
)
self.out.attributes["FailReason"] = reason
def parse_file(self, file: Path):
parser: Parser
if file.suffix == '.json':
if file.suffix == ".json":
parser = JsonParser()
elif file.suffix == '.xml':
elif file.suffix == ".xml":
parser = XmlParser()
else:
child = SummaryTree('TestHarnessBug')
child.attributes['File'] = __file__
child = SummaryTree("TestHarnessBug")
child.attributes["File"] = __file__
frame = inspect.currentframe()
if frame is not None:
child.attributes['Line'] = str(inspect.getframeinfo(frame).lineno)
child.attributes['Details'] = 'Unexpected suffix {} for file {}'.format(file.suffix, file.name)
child.attributes["Line"] = str(inspect.getframeinfo(frame).lineno)
child.attributes["Details"] = "Unexpected suffix {} for file {}".format(
file.suffix, file.name
)
self.error = True
self.out.append(child)
return
with file.open('r') as f:
with file.open("r") as f:
try:
parser.parse(f, self.handler)
except Exception as e:
child = SummaryTree('SummarizationError')
child.attributes['Severity'] = '40'
child.attributes['ErrorMessage'] = str(e)
child = SummaryTree("SummarizationError")
child.attributes["Severity"] = "40"
child.attributes["ErrorMessage"] = str(e)
self.out.append(child)
def register_handlers(self):
def remap_event_severity(attrs):
if 'Type' not in attrs or 'Severity' not in attrs:
if "Type" not in attrs or "Severity" not in attrs:
return None
k = (attrs['Type'], int(attrs['Severity']))
k = (attrs["Type"], int(attrs["Severity"]))
if k in self.severity_map:
return str(self.severity_map[k])
self.handler.add_handler(('Severity', None), remap_event_severity)
self.handler.add_handler(("Severity", None), remap_event_severity)
def get_max_trace_time(attrs):
if 'Type' not in attrs:
if "Type" not in attrs:
return None
time = float(attrs['Time'])
time = float(attrs["Time"])
if time >= self.max_trace_time:
self.max_trace_time = time
self.max_trace_time_type = attrs['Type']
self.max_trace_time_type = attrs["Type"]
return None
self.handler.add_handler(('Time', None), get_max_trace_time)
self.handler.add_handler(("Time", None), get_max_trace_time)
def program_start(attrs: Dict[str, str]):
if self.test_begin_found:
return
self.test_begin_found = True
self.out.attributes['RandomSeed'] = attrs['RandomSeed']
self.out.attributes['SourceVersion'] = attrs['SourceVersion']
self.out.attributes['Time'] = attrs['ActualTime']
self.out.attributes['BuggifyEnabled'] = attrs['BuggifyEnabled']
self.out.attributes['DeterminismCheck'] = '0' if self.expected_unseed is None else '1'
if self.binary.name != 'fdbserver':
self.out.attributes['OldBinary'] = self.binary.name
if 'FaultInjectionEnabled' in attrs:
self.out.attributes['FaultInjectionEnabled'] = attrs['FaultInjectionEnabled']
self.out.attributes["RandomSeed"] = attrs["RandomSeed"]
self.out.attributes["SourceVersion"] = attrs["SourceVersion"]
self.out.attributes["Time"] = attrs["ActualTime"]
self.out.attributes["BuggifyEnabled"] = attrs["BuggifyEnabled"]
self.out.attributes["DeterminismCheck"] = (
"0" if self.expected_unseed is None else "1"
)
if self.binary.name != "fdbserver":
self.out.attributes["OldBinary"] = self.binary.name
if "FaultInjectionEnabled" in attrs:
self.out.attributes["FaultInjectionEnabled"] = attrs[
"FaultInjectionEnabled"
]
self.handler.add_handler(('Type', 'ProgramStart'), program_start)
self.handler.add_handler(("Type", "ProgramStart"), program_start)
def negative_test_success(attrs: Dict[str, str]):
self.negative_test_success = True
child = SummaryTree(attrs['Type'])
child = SummaryTree(attrs["Type"])
for k, v in attrs:
if k != 'Type':
if k != "Type":
child.attributes[k] = v
self.out.append(child)
pass
self.handler.add_handler(('Type', 'NegativeTestSuccess'), negative_test_success)
self.handler.add_handler(("Type", "NegativeTestSuccess"), negative_test_success)
def config_string(attrs: Dict[str, str]):
self.out.attributes['ConfigString'] = attrs['ConfigString']
self.out.attributes["ConfigString"] = attrs["ConfigString"]
self.handler.add_handler(('Type', 'SimulatorConfig'), config_string)
self.handler.add_handler(("Type", "SimulatorConfig"), config_string)
def set_test_file(attrs: Dict[str, str]):
test_file = Path(attrs['TestFile'])
cwd = Path('.').absolute()
test_file = Path(attrs["TestFile"])
cwd = Path(".").absolute()
try:
test_file = test_file.relative_to(cwd)
except ValueError:
pass
self.out.attributes['TestFile'] = str(test_file)
self.out.attributes["TestFile"] = str(test_file)
self.handler.add_handler(('Type', 'Simulation'), set_test_file)
self.handler.add_handler(('Type', 'NonSimulationTest'), set_test_file)
self.handler.add_handler(("Type", "Simulation"), set_test_file)
self.handler.add_handler(("Type", "NonSimulationTest"), set_test_file)
def set_elapsed_time(attrs: Dict[str, str]):
if self.test_end_found:
return
self.test_end_found = True
self.unseed = int(attrs['RandomUnseed'])
self.unseed = int(attrs["RandomUnseed"])
if self.expected_unseed is not None and self.unseed != self.expected_unseed:
severity = 40 if ('UnseedMismatch', 40) not in self.severity_map \
else self.severity_map[('UnseedMismatch', 40)]
severity = (
40
if ("UnseedMismatch", 40) not in self.severity_map
else self.severity_map[("UnseedMismatch", 40)]
)
if severity >= 30:
child = SummaryTree('UnseedMismatch')
child.attributes['Unseed'] = str(self.unseed)
child.attributes['ExpectedUnseed'] = str(self.expected_unseed)
child.attributes['Severity'] = str(severity)
child = SummaryTree("UnseedMismatch")
child.attributes["Unseed"] = str(self.unseed)
child.attributes["ExpectedUnseed"] = str(self.expected_unseed)
child.attributes["Severity"] = str(severity)
if severity >= 40:
self.error = True
self.out.append(child)
self.out.attributes['SimElapsedTime'] = attrs['SimTime']
self.out.attributes['RealElapsedTime'] = attrs['RealTime']
self.out.attributes["SimElapsedTime"] = attrs["SimTime"]
self.out.attributes["RealElapsedTime"] = attrs["RealTime"]
if self.unseed is not None:
self.out.attributes['RandomUnseed'] = str(self.unseed)
self.out.attributes["RandomUnseed"] = str(self.unseed)
self.handler.add_handler(('Type', 'ElapsedTime'), set_elapsed_time)
self.handler.add_handler(("Type", "ElapsedTime"), set_elapsed_time)
def parse_warning(attrs: Dict[str, str]):
self.warnings += 1
if self.warnings > config.max_warnings:
return
child = SummaryTree(attrs['Type'])
child = SummaryTree(attrs["Type"])
for k, v in attrs.items():
if k != 'Type':
if k != "Type":
child.attributes[k] = v
self.out.append(child)
self.handler.add_handler(('Severity', '30'), parse_warning)
self.handler.add_handler(("Severity", "30"), parse_warning)
def parse_error(attrs: Dict[str, str]):
if 'ErrorIsInjectedFault' in attrs and attrs['ErrorIsInjectedFault'].lower() in ['1', 'true']:
if "ErrorIsInjectedFault" in attrs and attrs[
"ErrorIsInjectedFault"
].lower() in ["1", "true"]:
# ignore injected errors. In newer fdb versions these will have a lower severity
return
self.errors += 1
self.error = True
if self.errors > config.max_errors:
return
child = SummaryTree(attrs['Type'])
child = SummaryTree(attrs["Type"])
for k, v in attrs.items():
child.attributes[k] = v
self.out.append(child)
self.handler.add_handler(('Severity', '40'), parse_error)
self.handler.add_handler(("Severity", "40"), parse_error)
def coverage(attrs: Dict[str, str]):
covered = True
if 'Covered' in attrs:
covered = int(attrs['Covered']) != 0
comment = ''
if 'Comment' in attrs:
comment = attrs['Comment']
if "Covered" in attrs:
covered = int(attrs["Covered"]) != 0
comment = ""
if "Comment" in attrs:
comment = attrs["Comment"]
rare = False
if 'Rare' in attrs:
rare = bool(int(attrs['Rare']))
c = Coverage(attrs['File'], attrs['Line'], comment, rare)
if "Rare" in attrs:
rare = bool(int(attrs["Rare"]))
c = Coverage(attrs["File"], attrs["Line"], comment, rare)
if covered or c not in self.coverage:
self.coverage[c] = covered
self.handler.add_handler(('Type', 'CodeCoverage'), coverage)
self.handler.add_handler(("Type", "CodeCoverage"), coverage)
def expected_test_pass(attrs: Dict[str, str]):
self.test_count = int(attrs['Count'])
self.test_count = int(attrs["Count"])
self.handler.add_handler(('Type', 'TestsExpectedToPass'), expected_test_pass)
self.handler.add_handler(("Type", "TestsExpectedToPass"), expected_test_pass)
def test_passed(attrs: Dict[str, str]):
if attrs['Passed'] == '1':
if attrs["Passed"] == "1":
self.tests_passed += 1
self.handler.add_handler(('Type', 'TestResults'), test_passed)
self.handler.add_handler(("Type", "TestResults"), test_passed)
def remap_event_severity(attrs: Dict[str, str]):
self.severity_map[(attrs['TargetEvent'], int(attrs['OriginalSeverity']))] = int(attrs['NewSeverity'])
self.severity_map[
(attrs["TargetEvent"], int(attrs["OriginalSeverity"]))
] = int(attrs["NewSeverity"])
self.handler.add_handler(('Type', 'RemapEventSeverity'), remap_event_severity)
self.handler.add_handler(("Type", "RemapEventSeverity"), remap_event_severity)
def buggify_section(attrs: Dict[str, str]):
if attrs['Type'] == 'FaultInjected' or attrs.get('Activated', '0') == '1':
child = SummaryTree(attrs['Type'])
child.attributes['File'] = attrs['File']
child.attributes['Line'] = attrs['Line']
if attrs["Type"] == "FaultInjected" or attrs.get("Activated", "0") == "1":
child = SummaryTree(attrs["Type"])
child.attributes["File"] = attrs["File"]
child.attributes["Line"] = attrs["Line"]
self.out.append(child)
self.handler.add_handler(('Type', 'BuggifySection'), buggify_section)
self.handler.add_handler(('Type', 'FaultInjected'), buggify_section)
self.handler.add_handler(("Type", "BuggifySection"), buggify_section)
self.handler.add_handler(("Type", "FaultInjected"), buggify_section)
def running_unit_test(attrs: Dict[str, str]):
child = SummaryTree('RunningUnitTest')
child.attributes['Name'] = attrs['Name']
child.attributes['File'] = attrs['File']
child.attributes['Line'] = attrs['Line']
child = SummaryTree("RunningUnitTest")
child.attributes["Name"] = attrs["Name"]
child.attributes["File"] = attrs["File"]
child.attributes["Line"] = attrs["Line"]
self.handler.add_handler(('Type', 'RunningUnitTest'), running_unit_test)
self.handler.add_handler(("Type", "RunningUnitTest"), running_unit_test)
def stderr_severity(attrs: Dict[str, str]):
if 'NewSeverity' in attrs:
self.stderr_severity = attrs['NewSeverity']
if "NewSeverity" in attrs:
self.stderr_severity = attrs["NewSeverity"]
self.handler.add_handler(('Type', 'StderrSeverity'), stderr_severity)
self.handler.add_handler(("Type", "StderrSeverity"), stderr_severity)

View File

@ -4,13 +4,17 @@ from test_harness.valgrind import parse_valgrind_output
from pathlib import Path
if __name__ == '__main__':
if __name__ == "__main__":
errors = parse_valgrind_output(Path(sys.argv[1]))
for valgrind_error in errors:
print('ValgrindError: what={}, kind={}'.format(valgrind_error.what.what, valgrind_error.kind))
print('Backtrace: {}'.format(valgrind_error.what.backtrace))
print(
"ValgrindError: what={}, kind={}".format(
valgrind_error.what.what, valgrind_error.kind
)
)
print("Backtrace: {}".format(valgrind_error.what.backtrace))
counter = 0
for aux in valgrind_error.aux:
print('Aux {}:'.format(counter))
print(' What: {}'.format(aux.what))
print(' Backtrace: {}'.format(aux.backtrace))
print("Aux {}:".format(counter))
print(" What: {}".format(aux.what))
print(" Backtrace: {}".format(aux.backtrace))

View File

@ -18,7 +18,9 @@ def files_matching(path: Path, pattern: Pattern, recurse: bool = True) -> List[P
return res
def dirs_with_files_matching(path: Path, pattern: Pattern, recurse: bool = True) -> List[Path]:
def dirs_with_files_matching(
path: Path, pattern: Pattern, recurse: bool = True
) -> List[Path]:
res: List[Path] = []
sub_directories: List[Path] = []
has_file = False
@ -36,25 +38,33 @@ def dirs_with_files_matching(path: Path, pattern: Pattern, recurse: bool = True)
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser('TestHarness Timeout', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"TestHarness Timeout", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
config.build_arguments(parser)
args = parser.parse_args()
config.extract_args(args)
valgrind_files: List[Path] = []
if config.use_valgrind:
valgrind_files = files_matching(Path.cwd(), re.compile(r'valgrind.*\.xml'))
valgrind_files = files_matching(Path.cwd(), re.compile(r"valgrind.*\.xml"))
for directory in dirs_with_files_matching(Path.cwd(), re.compile(r'trace.*\.(json|xml)'), recurse=True):
for directory in dirs_with_files_matching(
Path.cwd(), re.compile(r"trace.*\.(json|xml)"), recurse=True
):
trace_files = TraceFiles(directory)
for files in trace_files.items():
if config.use_valgrind:
for valgrind_file in valgrind_files:
summary = Summary(Path('bin/fdbserver'), was_killed=True)
summary = Summary(Path("bin/fdbserver"), was_killed=True)
summary.valgrind_out_file = valgrind_file
summary.summarize_files(files)
summary.out.dump(sys.stdout)
else:
summary = Summary(Path('bin/fdbserver'), was_killed=True, long_running=config.long_running)
summary = Summary(
Path("bin/fdbserver"),
was_killed=True,
long_running=config.long_running,
)
summary.summarize_files(files)
summary.out.dump(sys.stdout)

View File

@ -7,14 +7,14 @@ from typing import List
class ValgrindWhat:
def __init__(self):
self.what: str = ''
self.backtrace: str = ''
self.what: str = ""
self.backtrace: str = ""
class ValgrindError:
def __init__(self):
self.what: ValgrindWhat = ValgrindWhat()
self.kind: str = ''
self.kind: str = ""
self.aux: List[ValgrindWhat] = []
@ -73,62 +73,72 @@ class ValgrindHandler(xml.sax.handler.ContentHandler):
def startElement(self, name, attrs):
# pdb.set_trace()
if name == 'error':
if name == "error":
self.stack.append(ValgrindError())
self.state_stack.append(ValgrindParseState.ERROR)
if len(self.stack) == 0:
return
if name == 'kind':
if name == "kind":
self.state_stack.append(ValgrindParseState.KIND)
elif name == 'what':
elif name == "what":
self.state_stack.append(ValgrindParseState.WHAT)
elif name == 'auxwhat':
assert self.state() in [ValgrindParseState.ERROR, ValgrindParseState.ERROR_AUX]
elif name == "auxwhat":
assert self.state() in [
ValgrindParseState.ERROR,
ValgrindParseState.ERROR_AUX,
]
self.state_stack.pop()
self.state_stack.append(ValgrindParseState.ERROR_AUX)
self.state_stack.append(ValgrindParseState.AUX_WHAT)
self.stack[-1].aux.append(ValgrindWhat())
elif name == 'stack':
elif name == "stack":
state = self.state()
assert state in [ValgrindParseState.ERROR, ValgrindParseState.ERROR_AUX]
if state == ValgrindParseState.ERROR:
self.state_stack.append(ValgrindParseState.STACK)
else:
self.state_stack.append(ValgrindParseState.STACK_AUX)
elif name == 'ip':
elif name == "ip":
state = self.state()
assert state in [ValgrindParseState.STACK, ValgrindParseState.STACK_AUX]
if state == ValgrindParseState.STACK:
self.state_stack.append(ValgrindParseState.STACK_IP)
if len(self.stack[-1].what.backtrace) == 0:
self.stack[-1].what.backtrace = 'addr2line -e fdbserver.debug -p -C -f -i '
self.stack[
-1
].what.backtrace = "addr2line -e fdbserver.debug -p -C -f -i "
else:
self.stack[-1].what.backtrace += ' '
self.stack[-1].what.backtrace += " "
else:
self.state_stack.append(ValgrindParseState.STACK_IP_AUX)
if len(self.stack[-1].aux[-1].backtrace) == 0:
self.stack[-1].aux[-1].backtrace = 'addr2line -e fdbserver.debug -p -C -f -i '
self.stack[-1].aux[
-1
].backtrace = "addr2line -e fdbserver.debug -p -C -f -i "
else:
self.stack[-1].aux[-1].backtrace += ' '
self.stack[-1].aux[-1].backtrace += " "
def endElement(self, name):
# pdb.set_trace()
if name == 'error':
if name == "error":
self.result.append(self.stack.pop())
self.state_stack.pop()
elif name == 'kind':
elif name == "kind":
assert self.state() == ValgrindParseState.KIND
self.state_stack.pop()
elif name == 'what':
elif name == "what":
assert self.state() == ValgrindParseState.WHAT
self.state_stack.pop()
elif name == 'auxwhat':
elif name == "auxwhat":
assert self.state() == ValgrindParseState.AUX_WHAT
self.state_stack.pop()
elif name == 'stack':
assert self.state() in [ValgrindParseState.STACK, ValgrindParseState.STACK_AUX]
elif name == "stack":
assert self.state() in [
ValgrindParseState.STACK,
ValgrindParseState.STACK_AUX,
]
self.state_stack.pop()
elif name == 'ip':
elif name == "ip":
self.state_stack.pop()
state = self.state()
assert state in [ValgrindParseState.STACK, ValgrindParseState.STACK_AUX]
@ -136,6 +146,6 @@ class ValgrindHandler(xml.sax.handler.ContentHandler):
def parse_valgrind_output(valgrind_out_file: Path) -> List[ValgrindError]:
handler = ValgrindHandler()
with valgrind_out_file.open('r') as f:
with valgrind_out_file.open("r") as f:
xml.sax.parse(f, handler)
return handler.result

View File

@ -37,18 +37,18 @@ class Version:
return hash(self.version_tuple())
def __str__(self):
return format('{}.{}.{}'.format(self.major, self.minor, self.patch))
return format("{}.{}.{}".format(self.major, self.minor, self.patch))
@staticmethod
def of_binary(binary: Path):
parts = binary.name.split('-')
parts = binary.name.split("-")
if len(parts) != 2:
return Version.max_version()
return Version.parse(parts[1])
@staticmethod
def parse(version: str):
version_tuple = version.split('.')
version_tuple = version.split(".")
self = Version()
self.major = int(version_tuple[0])
if len(version_tuple) > 1:

View File

@ -27,11 +27,13 @@ from collections import defaultdict
allocs = {}
class Allocation:
def __init__(self, size, backtrace):
self.size = size
self.backtrace = backtrace
def print_stacks(stack_count, sort_by_count):
counts = defaultdict(int)
sizes = defaultdict(int)
@ -47,40 +49,92 @@ def print_stacks(stack_count, sort_by_count):
ordered_list = ordered_list[-stack_count:]
for size, backtrace in ordered_list:
print(str.format('bytes={0:<10} count={1:<8} {2}', sizes[backtrace], counts[backtrace], backtrace))
print(
str.format(
"bytes={0:<10} count={1:<8} {2}",
sizes[backtrace],
counts[backtrace],
backtrace,
)
)
print("-" * 80)
print('-'*80)
def process_line(line, quiet):
items = line.split('\t')
if items[0] == 'Alloc':
items = line.split("\t")
if items[0] == "Alloc":
allocs[items[1]] = Allocation(size=int(items[2]), backtrace=items[3])
elif items[0] == 'Dealloc':
elif items[0] == "Dealloc":
allocs.pop(items[1], None)
elif not quiet:
print(line)
def non_negative_int(value_str):
value = int(value_str)
if value < 0:
raise argparse.ArgumentTypeError("%s is negative" % value)
return value
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parses the output from enabling ALLOC_INSTRUMENTATION in FoundationDB and reports information about the top memory users.')
parser.add_argument('input_file', type=str, help='Path to file(s) containing the output from a run of FoundationDB with ALLOC_INSTRUMENTATION enabled. If not specified, stdin will be used.', default='-', nargs='*')
parser.add_argument('-f', '--logging-frequency', type=non_negative_int, help='How frequently the top stacks will be logged, measured in lines of output processed. A value of 0 disables periodic logging. Defaults to 1,000,000.', default=1000000)
parser.add_argument('-p', '--periodic-stack-count', type=non_negative_int, help='How many stack traces to log when periodically logging output. A value of 0 results in all stacks being logged. Defaults to 15.', default=15)
parser.add_argument('-s', '--final-stack-count', type=non_negative_int, help='How many stack traces to log when finished processing output. A value of 0 results in all stacks being logged. Defaults to 0.', default=0)
parser.add_argument('-c', '--sort-by-count', action='store_true', default=False, help='If specified, stacks will be sorted by largest count rather than largest number of bytes.')
parser.add_argument('-q', '--quiet', action='store_true', default=False, help='If specified, lines from the input file that are not parsable by this tool will not be printed.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Parses the output from enabling ALLOC_INSTRUMENTATION in FoundationDB and reports information about the top memory users."
)
parser.add_argument(
"input_file",
type=str,
help="Path to file(s) containing the output from a run of FoundationDB with ALLOC_INSTRUMENTATION enabled. If not specified, stdin will be used.",
default="-",
nargs="*",
)
parser.add_argument(
"-f",
"--logging-frequency",
type=non_negative_int,
help="How frequently the top stacks will be logged, measured in lines of output processed. A value of 0 disables periodic logging. Defaults to 1,000,000.",
default=1000000,
)
parser.add_argument(
"-p",
"--periodic-stack-count",
type=non_negative_int,
help="How many stack traces to log when periodically logging output. A value of 0 results in all stacks being logged. Defaults to 15.",
default=15,
)
parser.add_argument(
"-s",
"--final-stack-count",
type=non_negative_int,
help="How many stack traces to log when finished processing output. A value of 0 results in all stacks being logged. Defaults to 0.",
default=0,
)
parser.add_argument(
"-c",
"--sort-by-count",
action="store_true",
default=False,
help="If specified, stacks will be sorted by largest count rather than largest number of bytes.",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
default=False,
help="If specified, lines from the input file that are not parsable by this tool will not be printed.",
)
args = parser.parse_args()
# Process each line, periodically reporting the top stacks by size
for line_num, line in enumerate(fileinput.input(args.input_file)):
process_line(line.rstrip(), args.quiet)
if args.logging_frequency and line_num and line_num % args.logging_frequency == 0:
if (
args.logging_frequency
and line_num
and line_num % args.logging_frequency == 0
):
print_stacks(args.periodic_stack_count, args.sort_by_count)
# Print all stacks

View File

@ -41,34 +41,33 @@ totalSize = 0
lastTimestamp = ""
for line in sys.stdin:
ev = json.loads(line.rstrip())
type = ev["Type"]
ev = json.loads(line.rstrip())
type = ev["Type"]
if (type != 'MemSample'):
continue
bt = ev["Bt"]
if type != "MemSample":
continue
bt = ev["Bt"]
if (bt == "na"):
continue
if bt == "na":
continue
timestamp = ev["Time"]
cnt = int(ev["Count"])
scnt = int(ev["SampleCount"])
size = int(ev["TotalSize"])
h = ev["Hash"]
timestamp = ev["Time"]
cnt = int(ev["Count"])
scnt = int(ev["SampleCount"])
size = int(ev["TotalSize"])
h = ev["Hash"]
if (timestamp != lastTimestamp):
byCnt = []
bySize = []
totalSize = 0
lastTimestamp = timestamp
if timestamp != lastTimestamp:
byCnt = []
bySize = []
totalSize = 0
lastTimestamp = timestamp
# print(str(cnt) + " " + str(scnt) + " " + str(size) + " " + h)
# print(str(cnt) + " " + str(scnt) + " " + str(size) + " " + h)
byCnt.append( (cnt, scnt, size, h, bt) )
bySize.append( (size, cnt, size, h, bt) )
totalSize += size
byCnt.append((cnt, scnt, size, h, bt))
bySize.append((size, cnt, size, h, bt))
totalSize += size
byCnt.sort(reverse=True)
bySize.sort(reverse=True)
@ -76,25 +75,45 @@ bySize.sort(reverse=True)
btByHash = {}
byte_suffix = ["Bytes", "KB", "MB", "GB", "TB"]
def byte_str(bytes):
suffix_idx = 0
while (bytes >= 1024 * 10):
suffix_idx += 1
bytes //= 1024
return str(bytes) + ' ' + byte_suffix[suffix_idx]
suffix_idx = 0
while bytes >= 1024 * 10:
suffix_idx += 1
bytes //= 1024
return str(bytes) + " " + byte_suffix[suffix_idx]
print("By Size")
print("-------\r\n")
for x in bySize[:10]:
# print(str(x[0]) + ": " + x[3])
print(str(x[1]) + " / " + byte_str(x[0]) + " (" + byte_str(x[0] // x[1]) + " per alloc):\r\n" + x[4] + "\r\n")
btByHash[x[3]] = x[4]
# print(str(x[0]) + ": " + x[3])
print(
str(x[1])
+ " / "
+ byte_str(x[0])
+ " ("
+ byte_str(x[0] // x[1])
+ " per alloc):\r\n"
+ x[4]
+ "\r\n"
)
btByHash[x[3]] = x[4]
print()
print("By Count")
print("--------\r\n")
for x in byCnt[:5]:
# print(str(x[0]) + ": " + x[3])
print(str(x[0]) + " / " + byte_str(x[2]) + " (" + byte_str(x[2] // x[0]) + " per alloc):\r\n" + x[4] + "\r\n")
btByHash[x[3]] = x[4]
# print(str(x[0]) + ": " + x[3])
print(
str(x[0])
+ " / "
+ byte_str(x[2])
+ " ("
+ byte_str(x[2] // x[0])
+ " per alloc):\r\n"
+ x[4]
+ "\r\n"
)
btByHash[x[3]] = x[4]

View File

@ -27,61 +27,111 @@ import sys
import traceback
LOG_FORMAT = '%(created)f [%(levelname)s] %(message)s'
LOG_FORMAT = "%(created)f [%(levelname)s] %(message)s"
EXCLUDED_FILES = list(map(re.compile, [
# Output directories
r'\.git/.*', r'bin/.*', r'packages/.*', r'\.objs/.*', r'\.deps/.*', r'bindings/go/build/.*', r'documentation/sphinx/\.out/.*',
EXCLUDED_FILES = list(
map(
re.compile,
[
# Output directories
r"\.git/.*",
r"bin/.*",
r"packages/.*",
r"\.objs/.*",
r"\.deps/.*",
r"bindings/go/build/.*",
r"documentation/sphinx/\.out/.*",
# Generated files
r".*\.g\.cpp$",
r".*\.g\.h$",
r"(^|.*/)generated.mk$",
r".*\.g\.S$",
r".*/MutationType\.java",
r".*/generated\.go",
# Binary files
r".*\.class$",
r".*\.o$",
r".*\.a$",
r".*[\.-]debug",
r".*\.so$",
r".*\.dylib$",
r".*\.dll$",
r".*\.tar[^/]*$",
r".*\.jar$",
r".*pyc$",
r"bindings/flow/bin/.*",
r".*\.pdf$",
r".*\.jp[e]*g",
r".*\.png",
r".*\.ico",
r"packaging/msi/art/.*",
# Project configuration files
r".*foundationdb\.VC\.db$",
r".*foundationdb\.VC\.VC\.opendb$",
r".*iml$",
# Source files from someone else
r"(^|.*/)Hash3\..*",
r"(^|.*/)sqlite.*",
r"bindings/go/godoc-resources/.*",
r"bindings/go/src/fdb/tuple/testdata/tuples.golden",
r"fdbcli/linenoise/.*",
r"contrib/rapidjson/.*",
r"fdbrpc/rapidxml/.*",
r"fdbrpc/zlib/.*",
r"fdbrpc/sha1/.*",
r"fdbrpc/xml2json.hpp$",
r"fdbrpc/libcoroutine/.*",
r"fdbrpc/libeio/.*",
r"fdbrpc/lib64/.*",
r"fdbrpc/generated-constants.cpp$",
# Miscellaneous
r"bindings/nodejs/node_modules/.*",
r"bindings/go/godoc/.*",
r".*trace.*xml$",
r".*log$",
r".*\.DS_Store$",
r"simfdb/\.*",
r".*~$",
r".*.swp$",
],
)
)
# Generated files
r'.*\.g\.cpp$', r'.*\.g\.h$', r'(^|.*/)generated.mk$', r'.*\.g\.S$',
r'.*/MutationType\.java', r'.*/generated\.go',
SUSPECT_PHRASES = map(
re.compile,
[
r"#define\s+FDB_API_VERSION\s+(\d+)",
r"\.\s*selectApiVersion\s*\(\s*(\d+)\s*\)",
r"\.\s*APIVersion\s*\(\s*(\d+)\s*\)",
r"\.\s*MustAPIVersion\s*\(\s*(\d+)\s*\)",
r"header_version\s+=\s+(\d+)",
r"\.\s*apiVersion\s*\(\s*(\d+)\s*\)",
r"API_VERSION\s*=\s*(\d+)",
r"fdb_select_api_version\s*\((\d+)\)",
],
)
# Binary files
r'.*\.class$', r'.*\.o$', r'.*\.a$', r'.*[\.-]debug', r'.*\.so$', r'.*\.dylib$', r'.*\.dll$', r'.*\.tar[^/]*$', r'.*\.jar$', r'.*pyc$', r'bindings/flow/bin/.*',
r'.*\.pdf$', r'.*\.jp[e]*g', r'.*\.png', r'.*\.ico',
r'packaging/msi/art/.*',
# Project configuration files
r'.*foundationdb\.VC\.db$', r'.*foundationdb\.VC\.VC\.opendb$', r'.*iml$',
# Source files from someone else
r'(^|.*/)Hash3\..*', r'(^|.*/)sqlite.*',
r'bindings/go/godoc-resources/.*',
r'bindings/go/src/fdb/tuple/testdata/tuples.golden',
r'fdbcli/linenoise/.*',
r'contrib/rapidjson/.*', r'fdbrpc/rapidxml/.*', r'fdbrpc/zlib/.*', r'fdbrpc/sha1/.*',
r'fdbrpc/xml2json.hpp$', r'fdbrpc/libcoroutine/.*', r'fdbrpc/libeio/.*', r'fdbrpc/lib64/.*',
r'fdbrpc/generated-constants.cpp$',
# Miscellaneous
r'bindings/nodejs/node_modules/.*', r'bindings/go/godoc/.*', r'.*trace.*xml$', r'.*log$', r'.*\.DS_Store$', r'simfdb/\.*', r'.*~$', r'.*.swp$'
]))
SUSPECT_PHRASES = map(re.compile, [
r'#define\s+FDB_API_VERSION\s+(\d+)',
r'\.\s*selectApiVersion\s*\(\s*(\d+)\s*\)',
r'\.\s*APIVersion\s*\(\s*(\d+)\s*\)',
r'\.\s*MustAPIVersion\s*\(\s*(\d+)\s*\)',
r'header_version\s+=\s+(\d+)',
r'\.\s*apiVersion\s*\(\s*(\d+)\s*\)',
r'API_VERSION\s*=\s*(\d+)',
r'fdb_select_api_version\s*\((\d+)\)'
])
DIM_CODE = '\033[2m'
BOLD_CODE = '\033[1m'
RED_COLOR = '\033[91m'
GREEN_COLOR = '\033[92m'
END_COLOR = '\033[0m'
DIM_CODE = "\033[2m"
BOLD_CODE = "\033[1m"
RED_COLOR = "\033[91m"
GREEN_COLOR = "\033[92m"
END_COLOR = "\033[0m"
def positive_response(val):
return val.lower() in {'y', 'yes'}
return val.lower() in {"y", "yes"}
# Returns: new line list + a dirty flag
def rewrite_lines(lines, version_re, new_version, suspect_only=True, print_diffs=False, ask_confirm=False, grayscale=False):
def rewrite_lines(
lines,
version_re,
new_version,
suspect_only=True,
print_diffs=False,
ask_confirm=False,
grayscale=False,
):
new_lines = []
dirty = False
new_str = str(new_version)
@ -96,20 +146,48 @@ def rewrite_lines(lines, version_re, new_version, suspect_only=True, print_diffs
# Replace suspect code with new version.
start = m.start(group_index)
end = m.end(group_index)
new_line = new_line[:start + offset] + new_str + new_line[end + offset:]
new_line = (
new_line[: start + offset] + new_str + new_line[end + offset :]
)
offset += len(new_str) - (end - start)
if (print_diffs or ask_confirm) and line != new_line:
print('Rewrite:')
print('\n'.join(map(lambda pair: ' {:4d}: {}'.format(line_no - 1 + pair[0], pair[1]), enumerate(lines[line_no - 2:line_no]))))
print((DIM_CODE if grayscale else RED_COLOR) + '-{:4d}: {}'.format(line_no + 1, line) + END_COLOR)
print((BOLD_CODE if grayscale else GREEN_COLOR) + '+{:4d}: {}'.format(line_no + 1, new_line) + END_COLOR)
print('\n'.join(map(lambda pair: ' {:4d}: {}'.format(line_no + 2 + pair[0], pair[1]), enumerate(lines[line_no + 1:line_no + 3]))))
print("Rewrite:")
print(
"\n".join(
map(
lambda pair: " {:4d}: {}".format(
line_no - 1 + pair[0], pair[1]
),
enumerate(lines[line_no - 2 : line_no]),
)
)
)
print(
(DIM_CODE if grayscale else RED_COLOR)
+ "-{:4d}: {}".format(line_no + 1, line)
+ END_COLOR
)
print(
(BOLD_CODE if grayscale else GREEN_COLOR)
+ "+{:4d}: {}".format(line_no + 1, new_line)
+ END_COLOR
)
print(
"\n".join(
map(
lambda pair: " {:4d}: {}".format(
line_no + 2 + pair[0], pair[1]
),
enumerate(lines[line_no + 1 : line_no + 3]),
)
)
)
if ask_confirm:
text = input('Looks good (y/n)? ')
text = input("Looks good (y/n)? ")
if not positive_response(text):
print('Okay, skipping.')
print("Okay, skipping.")
new_line = line
dirty = dirty or (new_line != line)
@ -118,17 +196,27 @@ def rewrite_lines(lines, version_re, new_version, suspect_only=True, print_diffs
return new_lines, dirty
def address_file(base_path, file_path, version, new_version=None, suspect_only=False, show_diffs=False,
rewrite=False, ask_confirm=True, grayscale=False, paths_only=False):
def address_file(
base_path,
file_path,
version,
new_version=None,
suspect_only=False,
show_diffs=False,
rewrite=False,
ask_confirm=True,
grayscale=False,
paths_only=False,
):
if any(map(lambda x: x.match(file_path), EXCLUDED_FILES)):
logging.debug('skipping file %s as matches excluded list', file_path)
logging.debug("skipping file %s as matches excluded list", file_path)
return True
# Look for all instances of the version number where it is not part of a larger number
version_re = re.compile('(^|[^\\d])(' + str(version) + ')([^\\d]|$)')
version_re = re.compile("(^|[^\\d])(" + str(version) + ")([^\\d]|$)")
try:
contents = open(os.path.join(base_path, file_path), 'r').read()
lines = contents.split('\n')
contents = open(os.path.join(base_path, file_path), "r").read()
lines = contents.split("\n")
new_lines = lines
dirty = False
@ -139,81 +227,178 @@ def address_file(base_path, file_path, version, new_version=None, suspect_only=F
for suspect_phrase in SUSPECT_PHRASES:
for match in suspect_phrase.finditer(line):
curr_version = int(match.groups()[0])
if (new_version is None and curr_version < version) or (new_version is not None and curr_version < new_version):
if (new_version is None and curr_version < version) or (
new_version is not None and curr_version < new_version
):
found = True
logging.info('Old version: %s:%d:%s', file_path, line_no + 1, line)
logging.info(
"Old version: %s:%d:%s", file_path, line_no + 1, line
)
if found and new_version is not None and (show_diffs or rewrite):
new_lines, dirty = rewrite_lines(lines, version_re, new_version, True, print_diffs=True,
ask_confirm=(rewrite and ask_confirm), grayscale=grayscale)
new_lines, dirty = rewrite_lines(
lines,
version_re,
new_version,
True,
print_diffs=True,
ask_confirm=(rewrite and ask_confirm),
grayscale=grayscale,
)
else:
matching_lines = filter(lambda pair: version_re.search(pair[1]), enumerate(lines))
matching_lines = filter(
lambda pair: version_re.search(pair[1]), enumerate(lines)
)
# Look for lines with the version
if matching_lines:
if paths_only:
logging.info('File %s matches', file_path)
logging.info("File %s matches", file_path)
else:
for line_no, line in matching_lines:
logging.info('Match: %s:%d:%s', file_path, line_no + 1, line)
logging.info("Match: %s:%d:%s", file_path, line_no + 1, line)
if new_version is not None and (show_diffs or rewrite):
new_lines, dirty = rewrite_lines(lines, version_re, new_version, False, print_diffs=True,
ask_confirm=(rewrite and ask_confirm), grayscale=grayscale)
new_lines, dirty = rewrite_lines(
lines,
version_re,
new_version,
False,
print_diffs=True,
ask_confirm=(rewrite and ask_confirm),
grayscale=grayscale,
)
else:
logging.debug('File %s does not match', file_path)
logging.debug("File %s does not match", file_path)
if dirty and rewrite:
logging.info('Rewriting %s', os.path.join(base_path, file_path))
with open(os.path.join(base_path, file_path), 'w') as fout:
fout.write('\n'.join(new_lines))
logging.info("Rewriting %s", os.path.join(base_path, file_path))
with open(os.path.join(base_path, file_path), "w") as fout:
fout.write("\n".join(new_lines))
return True
except (OSError, UnicodeDecodeError) as e:
logging.exception('Unable to read file %s due to OSError', os.path.join(base_path, file_path))
logging.exception(
"Unable to read file %s due to OSError", os.path.join(base_path, file_path)
)
return False
def address_path(path, version, new_version=None, suspect_only=False, show_diffs=False, rewrite=False, ask_confirm=True, grayscale=False, paths_only=False):
def address_path(
path,
version,
new_version=None,
suspect_only=False,
show_diffs=False,
rewrite=False,
ask_confirm=True,
grayscale=False,
paths_only=False,
):
try:
if os.path.exists(path):
if os.path.isdir(path):
status = True
for dir_path, dir_names, file_names in os.walk(path):
for file_name in file_names:
file_path = os.path.relpath(os.path.join(dir_path, file_name), path)
status = address_file(path, file_path, version, new_version, suspect_only, show_diffs,
rewrite, ask_confirm, grayscale, paths_only) and status
file_path = os.path.relpath(
os.path.join(dir_path, file_name), path
)
status = (
address_file(
path,
file_path,
version,
new_version,
suspect_only,
show_diffs,
rewrite,
ask_confirm,
grayscale,
paths_only,
)
and status
)
return status
else:
base_name, file_name = os.path.split(path)
return address_file(base_name, file_name, version, new_version, suspect_only, show_diffs, rewrite, ask_confirm, grayscale)
return address_file(
base_name,
file_name,
version,
new_version,
suspect_only,
show_diffs,
rewrite,
ask_confirm,
grayscale,
)
else:
logging.error('Path %s does not exist', path)
logging.error("Path %s does not exist", path)
return False
except OSError as e:
logging.exception('Unable to find all API versions due to OSError')
logging.exception("Unable to find all API versions due to OSError")
return False
def run(arg_list):
parser = argparse.ArgumentParser(description='finds and rewrites the API version in FDB source files')
parser.add_argument('path', help='path to search for FDB source files')
parser.add_argument('version', type=int, help='current/old version to search for')
parser.add_argument('--new-version', type=int, default=None, help='new version to update to')
parser.add_argument('--suspect-only', action='store_true', default=False, help='only look for phrases trying to set the API version')
parser.add_argument('--show-diffs', action='store_true', default=False, help='show suggested diffs for fixing version')
parser.add_argument('--rewrite', action='store_true', default=False, help='rewrite offending files')
parser.add_argument('-y', '--skip-confirm', action='store_true', default=False, help='do not ask for confirmation before rewriting')
parser.add_argument('--grayscale', action='store_true', default=False,
help='print diffs using grayscale output instead of red and green')
parser.add_argument('--paths-only', action='store_true', default=False, help='display only the path instead of the offending lines')
parser = argparse.ArgumentParser(
description="finds and rewrites the API version in FDB source files"
)
parser.add_argument("path", help="path to search for FDB source files")
parser.add_argument("version", type=int, help="current/old version to search for")
parser.add_argument(
"--new-version", type=int, default=None, help="new version to update to"
)
parser.add_argument(
"--suspect-only",
action="store_true",
default=False,
help="only look for phrases trying to set the API version",
)
parser.add_argument(
"--show-diffs",
action="store_true",
default=False,
help="show suggested diffs for fixing version",
)
parser.add_argument(
"--rewrite", action="store_true", default=False, help="rewrite offending files"
)
parser.add_argument(
"-y",
"--skip-confirm",
action="store_true",
default=False,
help="do not ask for confirmation before rewriting",
)
parser.add_argument(
"--grayscale",
action="store_true",
default=False,
help="print diffs using grayscale output instead of red and green",
)
parser.add_argument(
"--paths-only",
action="store_true",
default=False,
help="display only the path instead of the offending lines",
)
args = parser.parse_args(arg_list)
return address_path(args.path, args.version, args.new_version, args.suspect_only, args.show_diffs,
args.rewrite, not args.skip_confirm, args.grayscale, args.paths_only)
return address_path(
args.path,
args.version,
args.new_version,
args.suspect_only,
args.show_diffs,
args.rewrite,
not args.skip_confirm,
args.grayscale,
args.paths_only,
)
if __name__ == '__main__':
if __name__ == "__main__":
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
if not run(sys.argv[1:]):
exit(1)

View File

@ -14,108 +14,119 @@ import json
#
# And then open Chrome, navigate to chrome://tracing , and load the tracing.json
def parse_args():
args = argparse.ArgumentParser()
args.add_argument('path')
args.add_argument('output')
args.add_argument("path")
args.add_argument("output")
return args.parse_args()
# When encountering an event with this location, use this as the (b)egin or
# (e)nd of a span with a better given name
locationToPhase = {
"NativeAPI.commit.Before": [],
"CommitProxyServer.batcher": [("b", "Commit")],
"CommitProxyServer.commitBatch.Before": [],
"CommitProxyServer.commitBatch.GettingCommitVersion": [("b", "CommitVersion")],
"CommitProxyServer.commitBatch.GotCommitVersion": [("e", "CommitVersion")],
"Resolver.resolveBatch.Before": [("b", "Resolver.PipelineWait")],
"Resolver.resolveBatch.AfterQueueSizeCheck": [],
"Resolver.resolveBatch.AfterOrderer": [("e", "Resolver.PipelineWait"), ("b", "Resolver.Conflicts")],
"Resolver.resolveBatch.After": [("e", "Resolver.Conflicts")],
"CommitProxyServer.commitBatch.AfterResolution": [("b", "Proxy.Processing")],
"CommitProxyServer.commitBatch.ProcessingMutations": [],
"CommitProxyServer.commitBatch.AfterStoreCommits": [("e", "Proxy.Processing")],
"TLog.tLogCommit.BeforeWaitForVersion": [("b", "TLog.PipelineWait")],
"TLog.tLogCommit.Before": [("e", "TLog.PipelineWait")],
"TLog.tLogCommit.AfterTLogCommit": [("b", "TLog.FSync")],
"TLog.tLogCommit.After": [("e", "TLog.FSync")],
"CommitProxyServer.commitBatch.AfterLogPush": [("e", "Commit")],
"NativeAPI.commit.After": [],
"NativeAPI.commit.Before": [],
"CommitProxyServer.batcher": [("b", "Commit")],
"CommitProxyServer.commitBatch.Before": [],
"CommitProxyServer.commitBatch.GettingCommitVersion": [("b", "CommitVersion")],
"CommitProxyServer.commitBatch.GotCommitVersion": [("e", "CommitVersion")],
"Resolver.resolveBatch.Before": [("b", "Resolver.PipelineWait")],
"Resolver.resolveBatch.AfterQueueSizeCheck": [],
"Resolver.resolveBatch.AfterOrderer": [
("e", "Resolver.PipelineWait"),
("b", "Resolver.Conflicts"),
],
"Resolver.resolveBatch.After": [("e", "Resolver.Conflicts")],
"CommitProxyServer.commitBatch.AfterResolution": [("b", "Proxy.Processing")],
"CommitProxyServer.commitBatch.ProcessingMutations": [],
"CommitProxyServer.commitBatch.AfterStoreCommits": [("e", "Proxy.Processing")],
"TLog.tLogCommit.BeforeWaitForVersion": [("b", "TLog.PipelineWait")],
"TLog.tLogCommit.Before": [("e", "TLog.PipelineWait")],
"TLog.tLogCommit.AfterTLogCommit": [("b", "TLog.FSync")],
"TLog.tLogCommit.After": [("e", "TLog.FSync")],
"CommitProxyServer.commitBatch.AfterLogPush": [("e", "Commit")],
"NativeAPI.commit.After": [],
}
class CommitDebugHandler(xml.sax.ContentHandler, object):
def __init__(self, f):
self._f = f
self._f.write('[ ') # Trace viewer adds the missing ] for us
self._f.write("[ ") # Trace viewer adds the missing ] for us
self._starttime = None
self._data = dict()
def _emit(self, d):
self._f.write(json.dumps(d) + ', ')
self._f.write(json.dumps(d) + ", ")
def startElement(self, name, attrs):
# I've flipped from using Async spans to Duration spans, because
# I kept on running into issues with trace viewer believeing there
# is no start or end of an emitted span even when there actually is.
if name == "Event" and attrs.get('Type') == "CommitDebug":
if name == "Event" and attrs.get("Type") == "CommitDebug":
if self._starttime is None:
self._starttime = float(attrs['Time'])
self._starttime = float(attrs["Time"])
attr_id = attrs['ID']
attr_id = attrs["ID"]
# Trace viewer doesn't seem to care about types, so use host as pid and port as tid
(pid, tid) = attrs['Machine'].split(':')
(pid, tid) = attrs["Machine"].split(":")
traces = locationToPhase[attrs["Location"]]
for (phase, name) in traces:
if phase == "b":
self._data[(attrs['Machine'], name)] = float(attrs['Time'])
self._data[(attrs["Machine"], name)] = float(attrs["Time"])
else:
starttime = self._data.get((attrs['Machine'], name))
starttime = self._data.get((attrs["Machine"], name))
if starttime is None:
return
trace = {
# ts and dur are in microseconds
"ts": (starttime - self._starttime) * 1000 * 1000 + 0.001,
"dur": (float(attrs['Time']) - starttime) * 1000 * 1000,
"dur": (float(attrs["Time"]) - starttime) * 1000 * 1000,
"cat": "commit",
"name": name,
"ph": "X",
"pid": pid,
"tid": tid }
"tid": tid,
}
self._emit(trace)
def do_file(args, handler, filename):
openfn = gzip.open if filename.endswith('.gz') else open
openfn = gzip.open if filename.endswith(".gz") else open
try:
with openfn(filename) as f:
xml.sax.parse(f, handler)
except xml.sax._exceptions.SAXParseException as e:
print(e)
def main():
args = parse_args()
handler = CommitDebugHandler(open(args.output, 'w'))
handler = CommitDebugHandler(open(args.output, "w"))
def xmliter(filename):
for line in gzip.open(filename):
if line.startswith("<Event"):
start = line.find("Time=")+6
start = line.find("Time=") + 6
end = line.find('"', start)
tx = float(line[start:end])
yield (tx, line)
if os.path.isdir(args.path):
combined_xml = os.path.join(args.path, 'combined.xml.gz')
combined_xml = os.path.join(args.path, "combined.xml.gz")
if not os.path.exists(combined_xml):
files = [xmliter(filename) for filename in glob.glob(os.path.join(args.path, "*.xml.gz"))]
files = [
xmliter(filename)
for filename in glob.glob(os.path.join(args.path, "*.xml.gz"))
]
merged = heapq.merge(*files)
with gzip.open(combined_xml, 'w') as f:
f.write('<Trace>')
with gzip.open(combined_xml, "w") as f:
f.write("<Trace>")
for line in merged:
f.write(line[1])
f.write('</Trace>')
f.write("</Trace>")
do_file(args, handler, combined_xml)
else:
do_file(args, handler, args.path)
@ -123,5 +134,5 @@ def main():
return 0
if __name__ == '__main__':
if __name__ == "__main__":
sys.exit(main())

View File

@ -34,6 +34,7 @@ import math as m
# https://github.com/DataDog/sketches-java/
# in the file CubicallyInterpolatedMapping.java
class DDSketch(object):
A = 6.0 / 35.0
B = -3.0 / 5.0
@ -59,14 +60,21 @@ class DDSketch(object):
def reverseLog(self, index):
exponent = m.floor(index)
d0 = self.B * self.B - 3 * self.A * self.C
d1 = 2 * self.B * self.B * self.B - 9 * self.A * self.B * self.C - 27 * self.A * self.A * (index - exponent)
d1 = (
2 * self.B * self.B * self.B
- 9 * self.A * self.B * self.C
- 27 * self.A * self.A * (index - exponent)
)
p = np.cbrt((d1 - np.sqrt(d1 * d1 - 4 * d0 * d0 * d0)) / 2)
significandPlusOne = - (self.B + p + d0 / p) / (3 * self.A) + 1
significandPlusOne = -(self.B + p + d0 / p) / (3 * self.A) + 1
return np.ldexp(significandPlusOne / 2, exponent + 1)
def getIndex(self, sample):
return m.ceil(self.fastlog(sample) * self.multiplier) + self.offset
def getValue(self, idx):
return self.reverseLog((idx - self.offset) / self.multiplier) * 2.0 / (1 + self.gamma)
return (
self.reverseLog((idx - self.offset) / self.multiplier)
* 2.0
/ (1 + self.gamma)
)

View File

@ -29,9 +29,10 @@ def relative_entropy(p, q):
difference = 0.0
for i in range(len(p)):
if p[i] != 0.0 and q[i] != 0.0:
difference += (p[i] * np.log2(p[i]/q[i]))
difference += p[i] * np.log2(p[i] / q[i])
return difference
# jensen-shannon divergence (or symmetric relative entropy)
def relative_entropy_symmetric(dd1, dd2):
# normalize p, q into distribution
@ -44,13 +45,22 @@ def relative_entropy_symmetric(dd1, dd2):
return 0.5 * relative_entropy(p, m) + 0.5 * relative_entropy(q, m)
# setup cmdline args
parser = argparse.ArgumentParser(description="Compares two DDSketch distributions")
parser.add_argument('--txn1', help='Transaction type for first file', required=True, type=str)
parser.add_argument('--txn2', help='Transaction type for second file', required=True, type=str)
parser.add_argument('--file1', help='Path to first ddsketch json', required=True, type=str)
parser.add_argument('--file2', help="Path to second ddsketch json'", required=True, type=str)
parser.add_argument("--op", help='Operation name', type=str)
parser.add_argument(
"--txn1", help="Transaction type for first file", required=True, type=str
)
parser.add_argument(
"--txn2", help="Transaction type for second file", required=True, type=str
)
parser.add_argument(
"--file1", help="Path to first ddsketch json", required=True, type=str
)
parser.add_argument(
"--file2", help="Path to second ddsketch json'", required=True, type=str
)
parser.add_argument("--op", help="Operation name", type=str)
args = parser.parse_args()
f1 = open(args.file1)
@ -58,7 +68,10 @@ f2 = open(args.file2)
data1 = json.load(f1)
data2 = json.load(f2)
if data1[args.txn1][args.op]["errorGuarantee"] != data2[args.txn2][args.op]["errorGuarantee"]:
if (
data1[args.txn1][args.op]["errorGuarantee"]
!= data2[args.txn2][args.op]["errorGuarantee"]
):
print("ERROR: The sketches have different error guarantees and cannot be compared!")
exit()

View File

@ -24,9 +24,15 @@ import ddsketch_calc as dd
parser = argparse.ArgumentParser(description="Converts values to DDSketch buckets")
parser.add_argument('-e', '--error_guarantee', help='Error guarantee (default is 0.005)', required=False, type=float)
parser.add_argument('-v', '--value', help="Value", required=False, type=int)
parser.add_argument('-b', '--bucket', help='Bucket index', required=False, type=int)
parser.add_argument(
"-e",
"--error_guarantee",
help="Error guarantee (default is 0.005)",
required=False,
type=float,
)
parser.add_argument("-v", "--value", help="Value", required=False, type=int)
parser.add_argument("-b", "--bucket", help="Bucket index", required=False, type=int)
args = parser.parse_args()
error = 0.005

View File

@ -26,11 +26,13 @@ import ddsketch_calc as dd
# setup cmdline args
parser = argparse.ArgumentParser(description="Graphs DDSketch distribution")
parser.add_argument('-t', '--txn', help='Transaction type (ex: g8ui)', required=True, type=str)
parser.add_argument('--file', help='Path to ddsketch json', required=True, type=str)
parser.add_argument('--title', help='Title for the graph', required=False, type=str)
parser.add_argument('--savefig', help='Will save the plot to a file if set', type=str)
parser.add_argument('--op', help='Which OP to plot (casing matters)', type=str)
parser.add_argument(
"-t", "--txn", help="Transaction type (ex: g8ui)", required=True, type=str
)
parser.add_argument("--file", help="Path to ddsketch json", required=True, type=str)
parser.add_argument("--title", help="Title for the graph", required=False, type=str)
parser.add_argument("--savefig", help="Will save the plot to a file if set", type=str)
parser.add_argument("--op", help="Which OP to plot (casing matters)", type=str)
args = parser.parse_args()
@ -45,13 +47,13 @@ sketch = dd.DDSketch(error)
# trim the tails of the distribution
ls = [i for i, e in enumerate(buckets) if e != 0]
actual_data = buckets[ls[0]:ls[-1]+1]
indices = range(ls[0], ls[-1]+1)
actual_data = buckets[ls[0] : ls[-1] + 1]
indices = range(ls[0], ls[-1] + 1)
actual_indices = [sketch.getValue(i) for i in indices]
# configure the x-axis to make more sense
fig, ax = plt.subplots()
ax.ticklabel_format(useOffset=False, style='plain')
ax.ticklabel_format(useOffset=False, style="plain")
plt.plot(actual_indices, actual_data)
plt.xlabel("Latency (in us)")
plt.ylabel("Frequency count")
@ -62,6 +64,6 @@ if args.title is not None:
plt.title(plt_title)
plt.xlim([actual_indices[0], actual_indices[-1]])
if args.savefig is not None:
plt.savefig(args.savefig, format='png')
plt.savefig(args.savefig, format="png")
else:
plt.show()

View File

@ -4,30 +4,38 @@ import os
import json
import re
def actorFile(actor: str, build: str, src: str):
res = actor.replace(build, src, 1)
res = res.replace('actor.g.cpp', 'actor.cpp')
return res.replace('actor.g.h', 'actor.h')
res = res.replace("actor.g.cpp", "actor.cpp")
return res.replace("actor.g.h", "actor.h")
def rreplace(s, old, new, occurrence = 1):
def rreplace(s, old, new, occurrence=1):
li = s.rsplit(old, occurrence)
return new.join(li)
def actorCommand(cmd: str, build:str, src: str):
r1 = re.compile('-c (.+)(actor\.g\.cpp)')
def actorCommand(cmd: str, build: str, src: str):
r1 = re.compile("-c (.+)(actor\.g\.cpp)")
m1 = r1.search(cmd)
if m1 is None:
return cmd
cmd1 = r1.sub('\\1actor.cpp', cmd)
cmd1 = r1.sub("\\1actor.cpp", cmd)
return rreplace(cmd1, build, src)
parser = ArgumentParser(description="Generates a new compile_commands.json for rtags+flow")
parser = ArgumentParser(
description="Generates a new compile_commands.json for rtags+flow"
)
parser.add_argument("-b", help="Build directory", dest="builddir", default=os.getcwd())
parser.add_argument("-s", help="Build directory", dest="srcdir", default=os.getcwd())
parser.add_argument("-o", help="Output file", dest="out", default="processed_compile_commands.json")
parser.add_argument("input", help="compile_commands.json", default="compile_commands.json", nargs="?")
parser.add_argument(
"-o", help="Output file", dest="out", default="processed_compile_commands.json"
)
parser.add_argument(
"input", help="compile_commands.json", default="compile_commands.json", nargs="?"
)
args = parser.parse_args()
print("transform {} with build directory {}".format(args.input, args.builddir))
@ -38,15 +46,17 @@ with open(args.input) as f:
result = []
for cmd in cmds:
additional_flags = ['-Wno-unknown-attributes']
cmd['command'] = cmd['command'].replace(' -DNO_INTELLISENSE ', ' {} '.format(' '.join(additional_flags)))
if cmd['file'].endswith('actor.g.cpp'):
additional_flags = ["-Wno-unknown-attributes"]
cmd["command"] = cmd["command"].replace(
" -DNO_INTELLISENSE ", " {} ".format(" ".join(additional_flags))
)
if cmd["file"].endswith("actor.g.cpp"):
# here we need to rewrite the rule
cmd['command'] = actorCommand(cmd['command'], args.builddir, args.srcdir)
cmd['file'] = actorFile(cmd['file'], args.builddir, args.srcdir)
cmd["command"] = actorCommand(cmd["command"], args.builddir, args.srcdir)
cmd["file"] = actorFile(cmd["file"], args.builddir, args.srcdir)
result.append(cmd)
else:
result.append(cmd)
with open(args.out, 'w') as f:
with open(args.out, "w") as f:
json.dump(result, f, indent=4)

View File

@ -32,48 +32,76 @@ from priority import Priority
from plot import Plotter
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--workload', type=str, help='Name of workload to run')
parser.add_argument('-r', '--ratekeeper', type=str, help='Name of ratekeeper model')
parser.add_argument('-d', '--duration', type=int, default=240, help='Duration of simulated test, in seconds. Defaults to 240.')
parser.add_argument('-L', '--limiter', type=str, default='Original', help='Name of limiter implementation. Defaults to \'Original\'.')
parser.add_argument('-p', '--proxy', type=str, default='ProxyModel', help='Name of proxy implementation. Defaults to \'ProxyModel\'.')
parser.add_argument('--list', action='store_true', default=False, help='List options for all models.')
parser.add_argument('--no-graph', action='store_true', default=False, help='Disable graphical output.')
parser.add_argument("-w", "--workload", type=str, help="Name of workload to run")
parser.add_argument("-r", "--ratekeeper", type=str, help="Name of ratekeeper model")
parser.add_argument(
"-d",
"--duration",
type=int,
default=240,
help="Duration of simulated test, in seconds. Defaults to 240.",
)
parser.add_argument(
"-L",
"--limiter",
type=str,
default="Original",
help="Name of limiter implementation. Defaults to 'Original'.",
)
parser.add_argument(
"-p",
"--proxy",
type=str,
default="ProxyModel",
help="Name of proxy implementation. Defaults to 'ProxyModel'.",
)
parser.add_argument(
"--list", action="store_true", default=False, help="List options for all models."
)
parser.add_argument(
"--no-graph", action="store_true", default=False, help="Disable graphical output."
)
args = parser.parse_args()
def print_choices_list(context=None):
if context == 'workload' or context is None:
print('Workloads:')
if context == "workload" or context is None:
print("Workloads:")
for w in workload_model.predefined_workloads.keys():
print(' %s' % w)
print(" %s" % w)
if context == 'ratekeeper' or context is None:
print('\nRatekeeper models:')
if context == "ratekeeper" or context is None:
print("\nRatekeeper models:")
for r in ratekeeper_model.predefined_ratekeeper.keys():
print(' %s' % r)
print(" %s" % r)
proxy_model_classes = [c for c in [getattr(proxy_model, a) for a in dir(proxy_model)] if inspect.isclass(c)]
proxy_model_classes = [
c
for c in [getattr(proxy_model, a) for a in dir(proxy_model)]
if inspect.isclass(c)
]
if context == 'proxy' or context is None:
print('\nProxy models:')
if context == "proxy" or context is None:
print("\nProxy models:")
for p in proxy_model_classes:
if issubclass(p, proxy_model.ProxyModel):
print(' %s' % p.__name__)
print(" %s" % p.__name__)
if context == 'limiter' or context is None:
print('\nProxy limiters:')
if context == "limiter" or context is None:
print("\nProxy limiters:")
for p in proxy_model_classes:
if issubclass(p, proxy_model.Limiter) and p != proxy_model.Limiter:
name = p.__name__
if name.endswith('Limiter'):
name = name[0:-len('Limiter')]
print(' %s' % name)
if name.endswith("Limiter"):
name = name[0 : -len("Limiter")]
print(" %s" % name)
if args.workload is None or args.ratekeeper is None:
print('ERROR: A workload (-w/--workload) and ratekeeper model (-r/--ratekeeper) must be specified.\n')
print(
"ERROR: A workload (-w/--workload) and ratekeeper model (-r/--ratekeeper) must be specified.\n"
)
print_choices_list()
sys.exit(1)
@ -88,26 +116,26 @@ def validate_class_type(var, name, superclass):
if args.ratekeeper not in ratekeeper_model.predefined_ratekeeper:
print('Invalid ratekeeper model `%s\'' % args.ratekeeper)
print_choices_list('ratekeeper')
print("Invalid ratekeeper model `%s'" % args.ratekeeper)
print_choices_list("ratekeeper")
sys.exit(1)
if args.workload not in workload_model.predefined_workloads:
print('Invalid workload model `%s\'' % args.workload)
print_choices_list('workload')
print("Invalid workload model `%s'" % args.workload)
print_choices_list("workload")
sys.exit(1)
if not validate_class_type(proxy_model, args.proxy, proxy_model.ProxyModel):
print('Invalid proxy model `%s\'' % args.proxy)
print_choices_list('proxy')
print("Invalid proxy model `%s'" % args.proxy)
print_choices_list("proxy")
sys.exit(1)
limiter_name = args.limiter
if not validate_class_type(proxy_model, limiter_name, proxy_model.Limiter):
limiter_name += 'Limiter'
limiter_name += "Limiter"
if not validate_class_type(proxy_model, limiter_name, proxy_model.Limiter):
print('Invalid proxy limiter `%s\'' % args.limiter)
print_choices_list('limiter')
print("Invalid proxy limiter `%s'" % args.limiter)
print_choices_list("limiter")
sys.exit(1)
ratekeeper = ratekeeper_model.predefined_ratekeeper[args.ratekeeper]
@ -119,19 +147,30 @@ proxy = getattr(proxy_model, args.proxy)(args.duration, ratekeeper, workload, li
proxy.run()
for priority in workload.priorities():
latencies = sorted([p for t in proxy.results.latencies[priority].values() for p in t])
latencies = sorted(
[p for t in proxy.results.latencies[priority].values() for p in t]
)
total_started = sum(proxy.results.started[priority].values())
still_queued = sum([r.count for r in proxy.request_queue if r.priority == priority])
if len(latencies) > 0:
print('\n%s: %d requests in %d seconds (rate=%f). %d still queued.' % (priority, total_started, proxy.time, float(total_started) / proxy.time, still_queued))
print(' Median latency: %f' % latencies[len(latencies) // 2])
print(' 90%% latency: %f' % latencies[int(0.9 * len(latencies))])
print(' 99%% latency: %f' % latencies[int(0.99 * len(latencies))])
print(' 99.9%% latency: %f' % latencies[int(0.999 * len(latencies))])
print(' Max latency: %f' % latencies[-1])
print(
"\n%s: %d requests in %d seconds (rate=%f). %d still queued."
% (
priority,
total_started,
proxy.time,
float(total_started) / proxy.time,
still_queued,
)
)
print(" Median latency: %f" % latencies[len(latencies) // 2])
print(" 90%% latency: %f" % latencies[int(0.9 * len(latencies))])
print(" 99%% latency: %f" % latencies[int(0.99 * len(latencies))])
print(" 99.9%% latency: %f" % latencies[int(0.999 * len(latencies))])
print(" Max latency: %f" % latencies[-1])
print('')
print("")
if not args.no_graph:
plotter = Plotter(proxy.results)

View File

@ -48,62 +48,90 @@ class Plotter:
for priority in self.results.started.keys():
Plotter.add_plot(self.results.started[priority], time_resolution, priority)
plt.xlabel('Time (s)')
plt.ylabel('Released/s')
plt.xlabel("Time (s)")
plt.ylabel("Released/s")
plt.legend()
plt.subplot(3, 3, 2)
for priority in self.results.queued.keys():
Plotter.add_plot(self.results.queued[priority], time_resolution, priority)
plt.xlabel('Time (s)')
plt.ylabel('Requests/s')
plt.xlabel("Time (s)")
plt.ylabel("Requests/s")
plt.legend()
plt.subplot(3, 3, 3)
for priority in self.results.unprocessed_queue_sizes.keys():
data = {k: max(v) for (k, v) in self.results.unprocessed_queue_sizes[priority].items()}
data = {
k: max(v)
for (k, v) in self.results.unprocessed_queue_sizes[priority].items()
}
Plotter.add_plot(data, time_resolution, priority)
plt.xlabel('Time (s)')
plt.ylabel('Max queue size')
plt.xlabel("Time (s)")
plt.ylabel("Max queue size")
plt.legend()
num = 4
for priority in self.results.latencies.keys():
plt.subplot(3, 3, num)
median_latencies = {k: v[int(0.5 * len(v))] if len(v) > 0 else 0 for (k, v) in
self.results.latencies[priority].items()}
percentile90_latencies = {k: v[int(0.9 * len(v))] if len(v) > 0 else 0 for (k, v) in
self.results.latencies[priority].items()}
max_latencies = {k: max(v) if len(v) > 0 else 0 for (k, v) in self.results.latencies[priority].items()}
median_latencies = {
k: v[int(0.5 * len(v))] if len(v) > 0 else 0
for (k, v) in self.results.latencies[priority].items()
}
percentile90_latencies = {
k: v[int(0.9 * len(v))] if len(v) > 0 else 0
for (k, v) in self.results.latencies[priority].items()
}
max_latencies = {
k: max(v) if len(v) > 0 else 0
for (k, v) in self.results.latencies[priority].items()
}
Plotter.add_plot(median_latencies, time_resolution, 'median')
Plotter.add_plot(percentile90_latencies, time_resolution, '90th percentile')
Plotter.add_plot(max_latencies, time_resolution, 'max')
Plotter.add_plot(median_latencies, time_resolution, "median")
Plotter.add_plot(percentile90_latencies, time_resolution, "90th percentile")
Plotter.add_plot(max_latencies, time_resolution, "max")
plt.xlabel('Time (s)')
plt.ylabel(str(priority) + ' Latency (s)')
plt.yscale('log')
plt.xlabel("Time (s)")
plt.ylabel(str(priority) + " Latency (s)")
plt.yscale("log")
plt.legend()
num += 1
for priority in self.results.rate.keys():
plt.subplot(3, 3, num)
if len(self.results.rate[priority]) > 0:
Plotter.add_plot(self.results.rate[priority], time_resolution, 'Rate', use_avg=True)
Plotter.add_plot(
self.results.rate[priority], time_resolution, "Rate", use_avg=True
)
if len(self.results.released[priority]) > 0:
Plotter.add_plot(self.results.released[priority], time_resolution, 'Released', use_avg=True)
Plotter.add_plot(
self.results.released[priority],
time_resolution,
"Released",
use_avg=True,
)
if len(self.results.limit[priority]) > 0:
Plotter.add_plot(self.results.limit[priority], time_resolution, 'Limit', use_avg=True)
Plotter.add_plot(
self.results.limit[priority], time_resolution, "Limit", use_avg=True
)
if len(self.results.limit_and_budget[priority]) > 0:
Plotter.add_plot(self.results.limit_and_budget[priority], time_resolution, 'Limit and budget',
use_avg=True)
Plotter.add_plot(
self.results.limit_and_budget[priority],
time_resolution,
"Limit and budget",
use_avg=True,
)
if len(self.results.budget[priority]) > 0:
Plotter.add_plot(self.results.budget[priority], time_resolution, 'Budget', use_avg=True)
Plotter.add_plot(
self.results.budget[priority],
time_resolution,
"Budget",
use_avg=True,
)
plt.xlabel('Time (s)')
plt.ylabel('Value (' + str(priority) + ')')
plt.xlabel("Time (s)")
plt.ylabel("Value (" + str(priority) + ")")
plt.legend()
num += 1

View File

@ -53,7 +53,16 @@ class Limiter:
self.count = count
class UpdateBudgetParams:
def __init__(self, time, num_started, num_started_at_priority, min_priority, last_batch, queue_empty, elapsed):
def __init__(
self,
time,
num_started,
num_started_at_priority,
min_priority,
last_batch,
queue_empty,
elapsed,
):
self.time = time
self.num_started = num_started
self.num_started_at_priority = num_started_at_priority
@ -129,7 +138,9 @@ class TimeLimiter(PositiveBudgetLimiter):
self.locked_until = 0
def can_start(self, params):
return params.time >= self.locked_until and PositiveBudgetLimiter.can_start(self, params)
return params.time >= self.locked_until and PositiveBudgetLimiter.can_start(
self, params
)
def update_budget(self, params):
# print('Start update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s, last_batch=%d' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority, params.last_batch))
@ -137,9 +148,14 @@ class TimeLimiter(PositiveBudgetLimiter):
if params.min_priority >= self.priority or params.num_started < self.limit:
self.limit -= params.num_started
else:
self.limit = min(self.limit, max(self.limit - params.num_started, -params.last_batch))
self.locked_until = min(params.time + 2.0,
max(params.time, self.locked_until) + (params.num_started - self.limit) / self.rate)
self.limit = min(
self.limit, max(self.limit - params.num_started, -params.last_batch)
)
self.locked_until = min(
params.time + 2.0,
max(params.time, self.locked_until)
+ (params.num_started - self.limit) / self.rate,
)
# print('End update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority))
@ -161,7 +177,11 @@ class TimePositiveBudgetLimiter(PositiveBudgetLimiter):
# print('Start update budget: time=%f, limit=%f, locked_until=%f, num_started=%d, priority=%s, min_priority=%s, last_batch=%d' % (params.time, self.limit, self.locked_until, params.num_started, self.priority, params.min_priority, params.last_batch))
if params.num_started > self.limit:
self.locked_until = min(params.time + 2.0, max(params.time, self.locked_until) + (params.num_started - self.limit) / self.rate)
self.locked_until = min(
params.time + 2.0,
max(params.time, self.locked_until)
+ (params.num_started - self.limit) / self.rate,
)
self.limit = 0
else:
self.limit -= params.num_started
@ -187,7 +207,9 @@ class SmoothingLimiter(OriginalLimiter):
def update_limit(self, params):
self.limit = 2.0 * (
self.smooth_rate_limit.smooth_total(params.time) - self.smooth_released.smooth_rate(params.time))
self.smooth_rate_limit.smooth_total(params.time)
- self.smooth_released.smooth_rate(params.time)
)
def can_start(self, params):
return params.num_started + params.count <= self.limit
@ -203,15 +225,22 @@ class SmoothingBudgetLimiter(SmoothingLimiter):
self.budget = 0
def update_limit(self, params):
release_rate = (
self.smooth_rate_limit.smooth_total(params.time) - self.smooth_released.smooth_rate(params.time))
release_rate = self.smooth_rate_limit.smooth_total(
params.time
) - self.smooth_released.smooth_rate(params.time)
# self.smooth_filled.set_total(params.time, 1 if release_rate > 0 else 0)
self.limit = 2.0 * release_rate
self.proxy_model.results.rate[self.priority][params.time] = self.smooth_rate_limit.smooth_total(params.time)
self.proxy_model.results.released[self.priority][params.time] = self.smooth_released.smooth_rate(params.time)
self.proxy_model.results.rate[self.priority][
params.time
] = self.smooth_rate_limit.smooth_total(params.time)
self.proxy_model.results.released[self.priority][
params.time
] = self.smooth_released.smooth_rate(params.time)
self.proxy_model.results.limit[self.priority][params.time] = self.limit
self.proxy_model.results.limit_and_budget[self.priority][params.time] = self.limit + self.budget
self.proxy_model.results.limit_and_budget[self.priority][params.time] = (
self.limit + self.budget
)
self.proxy_model.results.budget[self.priority][params.time] = self.budget
# self.budget = max(0, self.budget + params.elapsed * self.smooth_rate_limit.smooth_total(params.time))
@ -222,10 +251,16 @@ class SmoothingBudgetLimiter(SmoothingLimiter):
# print('Update limit: time=%f, priority=%s, limit=%f, rate=%f, released=%f, budget=%f' % (params.time, self.priority, self.limit, self.smooth_rate_limit.smooth_total(params.time), self.smooth_released.smooth_rate(params.time), self.budget))
def can_start(self, params):
return params.num_started + params.count <= self.limit + self.budget # or params.num_started + params.count <= self.budget
return (
params.num_started + params.count <= self.limit + self.budget
) # or params.num_started + params.count <= self.budget
def update_budget(self, params):
self.budget = max(0, self.budget + (self.limit - params.num_started_at_priority) / 2 * params.elapsed)
self.budget = max(
0,
self.budget
+ (self.limit - params.num_started_at_priority) / 2 * params.elapsed,
)
if params.queue_empty:
self.budget = min(10, self.budget)
@ -248,14 +283,19 @@ class ProxyModel:
self.budget = {p: {} for p in priorities}
def init_result(self, priorities, starting_value, duration):
return {p: {s: copy.copy(starting_value) for s in range(0, duration)} for p in priorities}
return {
p: {s: copy.copy(starting_value) for s in range(0, duration)}
for p in priorities
}
def __init__(self, duration, ratekeeper_model, workload_model, Limiter):
self.time = 0
self.log_time = 0
self.duration = duration
self.priority_limiters = {priority: Limiter(priority, ratekeeper_model, self) for priority in
workload_model.priorities()}
self.priority_limiters = {
priority: Limiter(priority, ratekeeper_model, self)
for priority in workload_model.priorities()
}
self.workload_model = workload_model
self.request_scheduled = {p: False for p in self.workload_model.priorities()}
@ -270,8 +310,15 @@ class ProxyModel:
for priority in self.workload_model.priorities():
next_request = self.workload_model.next_request(self.time, priority)
assert next_request is not None
heapq.heappush(self.tasks, Task(next_request.time,
lambda next_request=next_request: self.receive_request(next_request)))
heapq.heappush(
self.tasks,
Task(
next_request.time,
lambda next_request=next_request: self.receive_request(
next_request
),
),
)
self.request_scheduled[priority] = True
while True: # or len(self.request_queue) > 0:
@ -299,7 +346,10 @@ class ProxyModel:
next_request = self.workload_model.next_request(self.time, request.priority)
if next_request is not None and next_request.time < self.duration:
heapq.heappush(self.tasks, Task(next_request.time, lambda: self.receive_request(next_request)))
heapq.heappush(
self.tasks,
Task(next_request.time, lambda: self.receive_request(next_request)),
)
else:
self.request_scheduled[request.priority] = False
@ -317,17 +367,30 @@ class ProxyModel:
request = self.request_queue[0]
if not self.priority_limiters[request.priority].can_start(
Limiter.CanStartParams(self.time, current_started, request.count)):
Limiter.CanStartParams(self.time, current_started, request.count)
):
break
min_priority = request.priority
last_batch = request.count
if self.workload_model.request_completed(request) and not self.request_scheduled[request.priority]:
next_request = self.workload_model.next_request(self.time, request.priority)
if (
self.workload_model.request_completed(request)
and not self.request_scheduled[request.priority]
):
next_request = self.workload_model.next_request(
self.time, request.priority
)
assert next_request is not None
heapq.heappush(self.tasks, Task(next_request.time,
lambda next_request=next_request: self.receive_request(next_request)))
heapq.heappush(
self.tasks,
Task(
next_request.time,
lambda next_request=next_request: self.receive_request(
next_request
),
),
)
self.request_scheduled[request.priority] = True
current_started += request.count
@ -335,7 +398,9 @@ class ProxyModel:
heapq.heappop(self.request_queue)
self.results.started[request.priority][int(self.time)] += request.count
self.results.latencies[request.priority][int(self.time)].append(self.time - request.time)
self.results.latencies[request.priority][int(self.time)].append(
self.time - request.time
)
if len(self.request_queue) == 0:
min_priority = Priority.BATCH
@ -343,15 +408,27 @@ class ProxyModel:
for priority, limiter in self.priority_limiters.items():
started_at_priority = sum([v for p, v in started.items() if p <= priority])
limiter.update_budget(
Limiter.UpdateBudgetParams(self.time, current_started, started_at_priority, min_priority, last_batch,
len(self.request_queue) == 0 or self.request_queue[0].priority > priority,
elapsed))
Limiter.UpdateBudgetParams(
self.time,
current_started,
started_at_priority,
min_priority,
last_batch,
len(self.request_queue) == 0
or self.request_queue[0].priority > priority,
elapsed,
)
)
for priority in self.workload_model.priorities():
self.results.unprocessed_queue_sizes[priority][int(self.time)].append(
self.workload_model.workload_models[priority].outstanding)
self.workload_model.workload_models[priority].outstanding
)
current_time = self.time
delay = 0.001
heapq.heappush(self.tasks, Task(self.time + delay, lambda: self.process_requests(current_time)))
heapq.heappush(
self.tasks,
Task(self.time + delay, lambda: self.process_requests(current_time)),
)

View File

@ -82,7 +82,12 @@ class DistributionRateModel(RateModel):
self.rate = None
def get_rate(self, time):
if self.frequency == 0 or int((time - self.last_change) / self.frequency) > int(self.last_change / self.frequency) or self.rate is None:
if (
self.frequency == 0
or int((time - self.last_change) / self.frequency)
> int(self.last_change / self.frequency)
or self.rate is None
):
self.last_change = time
self.rate = self.distribution()

View File

@ -33,37 +33,44 @@ class RatekeeperModel:
predefined_ratekeeper = {}
predefined_ratekeeper['default200_batch100'] = RatekeeperModel(
predefined_ratekeeper["default200_batch100"] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.FixedRateModel(200),
Priority.BATCH: rate_model.FixedRateModel(100)
})
Priority.BATCH: rate_model.FixedRateModel(100),
}
)
predefined_ratekeeper['default_sawtooth'] = RatekeeperModel(
predefined_ratekeeper["default_sawtooth"] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.SawtoothRateModel(10, 200, 1),
Priority.BATCH: rate_model.FixedRateModel(0)
})
Priority.BATCH: rate_model.FixedRateModel(0),
}
)
predefined_ratekeeper['default_uniform_random'] = RatekeeperModel(
predefined_ratekeeper["default_uniform_random"] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.DistributionRateModel(lambda: numpy.random.uniform(10, 200), 1),
Priority.BATCH: rate_model.FixedRateModel(0)
})
Priority.DEFAULT: rate_model.DistributionRateModel(
lambda: numpy.random.uniform(10, 200), 1
),
Priority.BATCH: rate_model.FixedRateModel(0),
}
)
predefined_ratekeeper['default_trickle'] = RatekeeperModel(
predefined_ratekeeper["default_trickle"] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.FixedRateModel(3),
Priority.BATCH: rate_model.FixedRateModel(0)
})
Priority.BATCH: rate_model.FixedRateModel(0),
}
)
predefined_ratekeeper['default1000'] = RatekeeperModel(
predefined_ratekeeper["default1000"] = RatekeeperModel(
{
Priority.SYSTEM: rate_model.UnlimitedRateModel(),
Priority.DEFAULT: rate_model.FixedRateModel(1000),
Priority.BATCH: rate_model.FixedRateModel(500)
})
Priority.BATCH: rate_model.FixedRateModel(500),
}
)

View File

@ -50,4 +50,6 @@ class Smoother:
elapsed = time - self.time
if elapsed > 0:
self.time = time
self.estimate += (self.total - self.estimate) * (1 - math.exp(-elapsed / self.folding_time))
self.estimate += (self.total - self.estimate) * (
1 - math.exp(-elapsed / self.folding_time)
)

View File

@ -38,7 +38,9 @@ class Request:
class PriorityWorkloadModel:
def __init__(self, priority, rate_model, batch_model, generator, max_outstanding=1e9):
def __init__(
self, priority, rate_model, batch_model, generator, max_outstanding=1e9
):
self.priority = priority
self.rate_model = rate_model
self.batch_model = batch_model
@ -127,89 +129,105 @@ class DistributionRequestGenerator(RequestGenerator):
predefined_workloads = {}
predefined_workloads['slow_exponential'] = WorkloadModel(
predefined_workloads["slow_exponential"] = WorkloadModel(
{
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.FixedRateModel(100),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.exponential),
max_outstanding=100
)
})
Priority.DEFAULT: PriorityWorkloadModel(
Priority.DEFAULT,
rate_model.FixedRateModel(100),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.exponential),
max_outstanding=100,
)
}
)
predefined_workloads['fixed_uniform'] = WorkloadModel(
predefined_workloads["fixed_uniform"] = WorkloadModel(
{
Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM,
rate_model.FixedRateModel(0),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=10
),
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.FixedRateModel(95),
DistributionBatchGenerator(Distribution.fixed, 10),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200
),
Priority.BATCH: PriorityWorkloadModel(Priority.BATCH,
rate_model.FixedRateModel(1),
DistributionBatchGenerator(Distribution.uniform, 500),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200
)
})
Priority.SYSTEM: PriorityWorkloadModel(
Priority.SYSTEM,
rate_model.FixedRateModel(0),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=10,
),
Priority.DEFAULT: PriorityWorkloadModel(
Priority.DEFAULT,
rate_model.FixedRateModel(95),
DistributionBatchGenerator(Distribution.fixed, 10),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200,
),
Priority.BATCH: PriorityWorkloadModel(
Priority.BATCH,
rate_model.FixedRateModel(1),
DistributionBatchGenerator(Distribution.uniform, 500),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200,
),
}
)
predefined_workloads['batch_starvation'] = WorkloadModel(
predefined_workloads["batch_starvation"] = WorkloadModel(
{
Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM,
rate_model.FixedRateModel(1),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=10
),
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.IntervalRateModel([(0, 50), (60, 150), (120, 90)]),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200
),
Priority.BATCH: PriorityWorkloadModel(Priority.BATCH,
rate_model.FixedRateModel(100),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200
)
})
Priority.SYSTEM: PriorityWorkloadModel(
Priority.SYSTEM,
rate_model.FixedRateModel(1),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=10,
),
Priority.DEFAULT: PriorityWorkloadModel(
Priority.DEFAULT,
rate_model.IntervalRateModel([(0, 50), (60, 150), (120, 90)]),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200,
),
Priority.BATCH: PriorityWorkloadModel(
Priority.BATCH,
rate_model.FixedRateModel(100),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200,
),
}
)
predefined_workloads['default_low_high_low'] = WorkloadModel(
predefined_workloads["default_low_high_low"] = WorkloadModel(
{
Priority.SYSTEM: PriorityWorkloadModel(Priority.SYSTEM,
rate_model.FixedRateModel(0),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=10
),
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.IntervalRateModel([(0, 100), (60, 300), (120, 100)]),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200
),
Priority.BATCH: PriorityWorkloadModel(Priority.BATCH,
rate_model.FixedRateModel(0),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200
)
})
Priority.SYSTEM: PriorityWorkloadModel(
Priority.SYSTEM,
rate_model.FixedRateModel(0),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=10,
),
Priority.DEFAULT: PriorityWorkloadModel(
Priority.DEFAULT,
rate_model.IntervalRateModel([(0, 100), (60, 300), (120, 100)]),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200,
),
Priority.BATCH: PriorityWorkloadModel(
Priority.BATCH,
rate_model.FixedRateModel(0),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.uniform),
max_outstanding=200,
),
}
)
for rate in [83, 100, 180, 190, 200]:
predefined_workloads['default%d' % rate] = WorkloadModel(
predefined_workloads["default%d" % rate] = WorkloadModel(
{
Priority.DEFAULT: PriorityWorkloadModel(Priority.DEFAULT,
rate_model.FixedRateModel(rate),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.exponential),
max_outstanding=1000
)
})
Priority.DEFAULT: PriorityWorkloadModel(
Priority.DEFAULT,
rate_model.FixedRateModel(rate),
DistributionBatchGenerator(Distribution.fixed, 1),
DistributionRequestGenerator(Distribution.exponential),
max_outstanding=1000,
)
}
)

View File

@ -34,57 +34,70 @@ def get_version_string(library_path):
try:
lib = ctypes.cdll.LoadLibrary(library_path)
except Exception as e:
error('Could not load library %r: %s' % (library_path, str(e)))
error("Could not load library %r: %s" % (library_path, str(e)))
lib.fdb_get_error.restype = ctypes.c_char_p
try:
r = lib.fdb_select_api_version_impl(410, 410)
if r != 0:
error('Error setting API version: %s (%d)' % (lib.fdb_get_error(r), r))
error("Error setting API version: %s (%d)" % (lib.fdb_get_error(r), r))
except Exception as e:
error('Error calling fdb_select_api_version_impl: %s' % str(e))
error("Error calling fdb_select_api_version_impl: %s" % str(e))
try:
lib.fdb_get_client_version.restype = ctypes.c_char_p
version_str = lib.fdb_get_client_version().decode('utf-8')
version_str = lib.fdb_get_client_version().decode("utf-8")
except Exception as e:
error('Error getting version information from client library: %s' % str(e))
error("Error getting version information from client library: %s" % str(e))
version_components = version_str.split(',')
package_version = '.'.join(version_components[0].split('.')[0:2])
version_components = version_str.split(",")
package_version = ".".join(version_components[0].split(".")[0:2])
version_str = 'FoundationDB Client %s (v%s)\n' % (package_version, version_components[0])
version_str += 'source version %s\n' % version_components[1]
version_str += 'protocol %s' % version_components[2]
version_str = "FoundationDB Client %s (v%s)\n" % (
package_version,
version_components[0],
)
version_str += "source version %s\n" % version_components[1]
version_str += "protocol %s" % version_components[2]
return version_str
if __name__ == '__main__':
if platform.system() == 'Linux':
default_lib = 'libfdb_c.so'
platform_name = 'Linux'
dlopen = 'dlopen'
elif platform.system() == 'Windows':
default_lib = 'fdb_c.dll'
platform_name = 'Windows'
dlopen = 'LoadLibrary'
elif platform.system() == 'Darwin':
default_lib = 'libfdb_c.dylib'
platform_name = 'macOS'
dlopen = 'dlopen'
if __name__ == "__main__":
if platform.system() == "Linux":
default_lib = "libfdb_c.so"
platform_name = "Linux"
dlopen = "dlopen"
elif platform.system() == "Windows":
default_lib = "fdb_c.dll"
platform_name = "Windows"
dlopen = "LoadLibrary"
elif platform.system() == "Darwin":
default_lib = "libfdb_c.dylib"
platform_name = "macOS"
dlopen = "dlopen"
else:
error('Unsupported platform: %s' % platform.system())
error("Unsupported platform: %s" % platform.system())
parser = argparse.ArgumentParser(description='Prints version information for an FDB client library (e.g. %s). Must be run on a library built for the current platform (%s).' % (default_lib, platform_name))
parser.add_argument('library_path', type=str, help='Path to the client library. If not specified, the library will be searched for according to the procedures for %s on the current platform (%s).' % (dlopen, platform_name), default=None, nargs='?')
parser = argparse.ArgumentParser(
description="Prints version information for an FDB client library (e.g. %s). Must be run on a library built for the current platform (%s)."
% (default_lib, platform_name)
)
parser.add_argument(
"library_path",
type=str,
help="Path to the client library. If not specified, the library will be searched for according to the procedures for %s on the current platform (%s)."
% (dlopen, platform_name),
default=None,
nargs="?",
)
args = parser.parse_args()
if args.library_path is None:
args.library_path = default_lib
elif not os.path.isfile(args.library_path):
error('Library does not exist: %r' % args.library_path)
error("Library does not exist: %r" % args.library_path)
print(get_version_string(args.library_path))

View File

@ -90,11 +90,11 @@ class CompilationDatabase:
if not any(
(prefix in key)
for prefix in [
"flow/",
"fdbcli/",
"fdbserver/",
"flow/",
"fdbcli/",
"fdbserver/",
"fdbclient/",
"fdbrpc/",
"fdbrpc/",
]
):
continue

View File

@ -50,9 +50,18 @@ PROTOCOL_VERSION_6_3 = 0x0FDB00B063010001
PROTOCOL_VERSION_7_0 = 0x0FDB00B070010001
PROTOCOL_VERSION_7_1 = 0x0FDB00B071010000
PROTOCOL_VERSION_7_2 = 0x0FDB00B072000000
supported_protocol_versions = frozenset([PROTOCOL_VERSION_5_2, PROTOCOL_VERSION_6_0, PROTOCOL_VERSION_6_1,
PROTOCOL_VERSION_6_2, PROTOCOL_VERSION_6_3, PROTOCOL_VERSION_7_0,
PROTOCOL_VERSION_7_1, PROTOCOL_VERSION_7_2])
supported_protocol_versions = frozenset(
[
PROTOCOL_VERSION_5_2,
PROTOCOL_VERSION_6_0,
PROTOCOL_VERSION_6_1,
PROTOCOL_VERSION_6_2,
PROTOCOL_VERSION_6_3,
PROTOCOL_VERSION_7_0,
PROTOCOL_VERSION_7_1,
PROTOCOL_VERSION_7_2,
]
)
fdb.api_version(520)
@ -86,8 +95,11 @@ class ByteBuffer(object):
def get_bytes(self, n):
if self._offset + n > len(self.val):
raise IndexError("Request to read %d bytes with only %d remaining" % (n, self.get_remaining_bytes()))
ret = self.val[self._offset:self._offset + n]
raise IndexError(
"Request to read %d bytes with only %d remaining"
% (n, self.get_remaining_bytes())
)
ret = self.val[self._offset : self._offset + n]
self._offset += n
return ret
@ -115,7 +127,11 @@ class ByteBuffer(object):
return [self.get_key_range() for _ in range(0, length)]
def get_mutation(self):
return Mutation(ord(self.get_bytes(1)), self.get_bytes_with_length(), self.get_bytes_with_length())
return Mutation(
ord(self.get_bytes(1)),
self.get_bytes_with_length(),
self.get_bytes_with_length(),
)
def get_mutation_list(self):
length = self.get_int()
@ -348,7 +364,9 @@ class ClientTransactionInfo:
self.error_get_ranges = []
self.error_get_ranges.append(error_get_range)
elif event == 6:
error_commit = ErrorCommitInfo(bb, protocol_version, full_output=full_output)
error_commit = ErrorCommitInfo(
bb, protocol_version, full_output=full_output
)
if not type_filter or "error_commit" in type_filter:
if not self.error_commits:
self.error_commits = []
@ -357,8 +375,15 @@ class ClientTransactionInfo:
raise Exception("Unknown event type %d" % event)
def has_types(self):
return self.get_version or self.gets or self.get_ranges or self.commit \
or self.error_gets or self.error_get_ranges or self.error_commits
return (
self.get_version
or self.gets
or self.get_ranges
or self.commit
or self.error_gets
or self.error_get_ranges
or self.error_commits
)
def to_json(self):
return json.dumps(self, cls=ObjJsonEncoder, sort_keys=True)
@ -367,31 +392,44 @@ class ClientTransactionInfo:
class TransactionInfoLoader(object):
max_num_chunks_to_store = 1000 # Each chunk would be 100 KB in size
def __init__(self, db, full_output=True, type_filter=None, min_timestamp=None, max_timestamp=None):
def __init__(
self,
db,
full_output=True,
type_filter=None,
min_timestamp=None,
max_timestamp=None,
):
self.db = db
self.full_output = full_output
self.type_filter = type_filter
self.min_timestamp = min_timestamp
self.max_timestamp = max_timestamp
'''
"""
Keys look like this
FF - 2 bytes \xff\x02
SSSSSSSSSS - 10 bytes Version Stamp
RRRRRRRRRRRRRRRR - 16 bytes Transaction id
NNNN - 4 Bytes Chunk number
TTTT - 4 Bytes Total number of chunks
'''
sample_key = "FF/fdbClientInfo/client_latency/SSSSSSSSSS/RRRRRRRRRRRRRRRR/NNNNTTTT/"
"""
sample_key = (
"FF/fdbClientInfo/client_latency/SSSSSSSSSS/RRRRRRRRRRRRRRRR/NNNNTTTT/"
)
self.client_latency_start = b'\xff\x02/fdbClientInfo/client_latency/'
self.client_latency_start_key_selector = fdb.KeySelector.first_greater_than(self.client_latency_start)
self.client_latency_end_key_selector = fdb.KeySelector.first_greater_or_equal(strinc(self.client_latency_start))
self.version_stamp_start_idx = sample_key.index('S')
self.version_stamp_end_idx = sample_key.rindex('S')
self.tr_id_start_idx = sample_key.index('R')
self.tr_id_end_idx = sample_key.rindex('R')
self.chunk_num_start_idx = sample_key.index('N')
self.num_chunks_start_idx = sample_key.index('T')
self.client_latency_start = b"\xff\x02/fdbClientInfo/client_latency/"
self.client_latency_start_key_selector = fdb.KeySelector.first_greater_than(
self.client_latency_start
)
self.client_latency_end_key_selector = fdb.KeySelector.first_greater_or_equal(
strinc(self.client_latency_start)
)
self.version_stamp_start_idx = sample_key.index("S")
self.version_stamp_end_idx = sample_key.rindex("S")
self.tr_id_start_idx = sample_key.index("R")
self.tr_id_end_idx = sample_key.rindex("R")
self.chunk_num_start_idx = sample_key.index("N")
self.num_chunks_start_idx = sample_key.index("T")
self.tr_info_map = {}
self.num_chunks_stored = 0
@ -404,14 +442,22 @@ class TransactionInfoLoader(object):
self.num_transactions_discarded += 1
def parse_key(self, k):
version_stamp_bytes = k[self.version_stamp_start_idx:self.version_stamp_end_idx + 1]
tr_id = k[self.tr_id_start_idx:self.tr_id_end_idx + 1]
num_chunks = struct.unpack(">i", k[self.num_chunks_start_idx:self.num_chunks_start_idx + 4])[0]
chunk_num = struct.unpack(">i", k[self.chunk_num_start_idx:self.chunk_num_start_idx + 4])[0]
version_stamp_bytes = k[
self.version_stamp_start_idx : self.version_stamp_end_idx + 1
]
tr_id = k[self.tr_id_start_idx : self.tr_id_end_idx + 1]
num_chunks = struct.unpack(
">i", k[self.num_chunks_start_idx : self.num_chunks_start_idx + 4]
)[0]
chunk_num = struct.unpack(
">i", k[self.chunk_num_start_idx : self.chunk_num_start_idx + 4]
)[0]
return version_stamp_bytes, tr_id, num_chunks, chunk_num
def get_key_prefix_for_version_stamp(self, version_stamp):
return self.client_latency_start + struct.pack(">Q", version_stamp) + b'\x00\x00'
return (
self.client_latency_start + struct.pack(">Q", version_stamp) + b"\x00\x00"
)
@fdb.transactional
def find_version_for_timestamp(self, tr, timestamp, start):
@ -427,30 +473,40 @@ class TransactionInfoLoader(object):
"""
tr.options.set_read_system_keys()
tr.options.set_read_lock_aware()
timekeeper_prefix = b'\xff\x02/timeKeeper/map/'
timekeeper_prefix = b"\xff\x02/timeKeeper/map/"
timestamp_packed = fdb.tuple.pack((timestamp,))
if start:
start_key = timekeeper_prefix
end_key = fdb.KeySelector.first_greater_than(timekeeper_prefix + timestamp_packed)
end_key = fdb.KeySelector.first_greater_than(
timekeeper_prefix + timestamp_packed
)
reverse = True
else:
start_key = fdb.KeySelector.first_greater_or_equal(timekeeper_prefix + timestamp_packed)
start_key = fdb.KeySelector.first_greater_or_equal(
timekeeper_prefix + timestamp_packed
)
end_key = fdb.KeySelector.first_greater_or_equal(strinc(timekeeper_prefix))
reverse = False
for k, v in tr.snapshot.get_range(start_key, end_key, limit=1, reverse=reverse):
return fdb.tuple.unpack(v)[0]
return 0 if start else 0x8000000000000000 # we didn't find any timekeeper data so find the max range
return (
0 if start else 0x8000000000000000
) # we didn't find any timekeeper data so find the max range
def fetch_transaction_info(self):
if self.min_timestamp:
start_version = self.find_version_for_timestamp(self.db, self.min_timestamp, True)
start_version = self.find_version_for_timestamp(
self.db, self.min_timestamp, True
)
logger.debug("Using start version %s" % start_version)
start_key = self.get_key_prefix_for_version_stamp(start_version)
else:
start_key = self.client_latency_start_key_selector
if self.max_timestamp:
end_version = self.find_version_for_timestamp(self.db, self.max_timestamp, False)
end_version = self.find_version_for_timestamp(
self.db, self.max_timestamp, False
)
logger.debug("Using end version %s" % end_version)
end_key = self.get_key_prefix_for_version_stamp(end_version)
else:
@ -460,7 +516,11 @@ class TransactionInfoLoader(object):
invalid_transaction_infos = 0
def build_client_transaction_info(v):
return ClientTransactionInfo(ByteBuffer(v), full_output=self.full_output, type_filter=self.type_filter)
return ClientTransactionInfo(
ByteBuffer(v),
full_output=self.full_output,
type_filter=self.type_filter,
)
more = True
tr = self.db.create_transaction()
@ -471,8 +531,9 @@ class TransactionInfoLoader(object):
buffer = []
try:
logger.debug("Querying [%s:%s]" % (start_key, end_key))
transaction_info_range = tr.snapshot.get_range(start_key, end_key,
streaming_mode=fdb.impl.StreamingMode.want_all)
transaction_info_range = tr.snapshot.get_range(
start_key, end_key, streaming_mode=fdb.impl.StreamingMode.want_all
)
for k, v in transaction_info_range:
found += 1
# logger.debug(k)
@ -498,26 +559,38 @@ class TransactionInfoLoader(object):
if chunk_num == 1:
# first chunk
assert tr_id not in self.tr_info_map
self.tr_info_map[tr_id] = [TrInfoChunk(num_chunks, chunk_num, k, v)]
self.tr_info_map[tr_id] = [
TrInfoChunk(num_chunks, chunk_num, k, v)
]
self.num_chunks_stored += 1
self._check_and_adjust_chunk_cache_size()
else:
if tr_id not in self.tr_info_map:
logger.error(
"Got a middle chunk without getting beginning part. Discarding transaction id: %s\n" % tr_id)
"Got a middle chunk without getting beginning part. Discarding transaction id: %s\n"
% tr_id
)
continue
c_list = self.tr_info_map[tr_id]
if c_list[-1].num_chunks != num_chunks or c_list[-1].chunk_num != chunk_num - 1:
if (
c_list[-1].num_chunks != num_chunks
or c_list[-1].chunk_num != chunk_num - 1
):
self.tr_info_map.pop(tr_id)
self.num_chunks_stored -= len(c_list)
raise Exception("Chunk numbers do not match for Transaction id: %s" % tr_id)
raise Exception(
"Chunk numbers do not match for Transaction id: %s"
% tr_id
)
c_list.append(TrInfoChunk(num_chunks, chunk_num, k, v))
self.num_chunks_stored += 1
if num_chunks == chunk_num:
self.tr_info_map.pop(tr_id)
self.num_chunks_stored -= len(c_list)
try:
info = build_client_transaction_info(b''.join([chunk.value for chunk in c_list]))
info = build_client_transaction_info(
b"".join([chunk.value for chunk in c_list])
)
if info.has_types():
buffer.append(info)
except UnsupportedProtocolVersionError:
@ -528,7 +601,10 @@ class TransactionInfoLoader(object):
transaction_infos += 1
self._check_and_adjust_chunk_cache_size()
if transaction_infos % 1000 == 0:
print("Processed %d transactions, %d invalid" % (transaction_infos, invalid_transaction_infos))
print(
"Processed %d transactions, %d invalid"
% (transaction_infos, invalid_transaction_infos)
)
if found == 0:
more = False
except fdb.FDBError as e:
@ -540,12 +616,16 @@ class TransactionInfoLoader(object):
for item in buffer:
yield item
print("Processed %d transactions, %d invalid\n" % (transaction_infos, invalid_transaction_infos))
print(
"Processed %d transactions, %d invalid\n"
% (transaction_infos, invalid_transaction_infos)
)
def has_sortedcontainers():
try:
import sortedcontainers
return True
except ImportError:
logger.warn("Can't find sortedcontainers so disabling ReadCounter")
@ -555,6 +635,7 @@ def has_sortedcontainers():
def has_dateparser():
try:
import dateparser
return True
except ImportError:
logger.warn("Can't find dateparser so disabling human date parsing")
@ -564,8 +645,9 @@ def has_dateparser():
class ReadCounter(object):
def __init__(self):
from sortedcontainers import SortedDict
self.reads = SortedDict()
self.reads[b''] = [0, 0]
self.reads[b""] = [0, 0]
self.read_counts = {}
self.hit_count = 0
@ -574,7 +656,9 @@ class ReadCounter(object):
for get in transaction_info.gets:
self._insert_read(get.key, None)
for get_range in transaction_info.get_ranges:
self._insert_read(get_range.key_range.start_key, get_range.key_range.end_key)
self._insert_read(
get_range.key_range.start_key, get_range.key_range.end_key
)
def _insert_read(self, start_key, end_key):
self.read_counts.setdefault((start_key, end_key), 0)
@ -584,7 +668,7 @@ class ReadCounter(object):
if end_key is not None:
self.reads.setdefault(end_key, [0, 0])[1] += 1
else:
self.reads.setdefault(start_key + b'\x00', [0, 0])[1] += 1
self.reads.setdefault(start_key + b"\x00", [0, 0])[1] += 1
def get_total_reads(self):
return sum([v for v in self.read_counts.values()])
@ -596,22 +680,32 @@ class ReadCounter(object):
return True
def get_top_k_reads(self, num, filter_addresses, shard_finder=None):
count_pairs = sorted([(v, k) for (k, v) in self.read_counts.items()], reverse=True, key=lambda item: item[0])
count_pairs = sorted(
[(v, k) for (k, v) in self.read_counts.items()],
reverse=True,
key=lambda item: item[0],
)
if not filter_addresses:
count_pairs = count_pairs[0:num]
if shard_finder:
results = []
for (count, (start, end)) in count_pairs:
results.append((start, end, count, shard_finder.get_addresses_for_key(start)))
results.append(
(start, end, count, shard_finder.get_addresses_for_key(start))
)
shard_finder.wait_for_shard_addresses(results, 0, 3)
if filter_addresses:
filter_addresses = set(filter_addresses)
results = [r for r in results if filter_addresses.issubset(set(r[3]))][0:num]
results = [r for r in results if filter_addresses.issubset(set(r[3]))][
0:num
]
else:
results = [(start, end, count) for (count, (start, end)) in count_pairs[0:num]]
results = [
(start, end, count) for (count, (start, end)) in count_pairs[0:num]
]
return results
@ -630,9 +724,13 @@ class ReadCounter(object):
addresses = shard_finder.get_addresses_for_key(start)
else:
addresses = None
output_range_counts.append((start, end, started_count, total_count, shard_count, addresses))
output_range_counts.append(
(start, end, started_count, total_count, shard_count, addresses)
)
else:
output_range_counts.append((start, end, started_count, total_count, None, None))
output_range_counts.append(
(start, end, started_count, total_count, None, None)
)
this_range_start_key = None
last_end = None
@ -644,7 +742,9 @@ class ReadCounter(object):
open_count -= end_count
if opened_this_range >= range_size:
add_boundary(this_range_start_key, start_key, opened_this_range, count_this_range)
add_boundary(
this_range_start_key, start_key, opened_this_range, count_this_range
)
count_this_range = open_count
opened_this_range = 0
this_range_start_key = None
@ -660,9 +760,11 @@ class ReadCounter(object):
last_end = start_key
if last_end is None:
last_end = b'\xff'
last_end = b"\xff"
if count_this_range > 0:
add_boundary(this_range_start_key, last_end, opened_this_range, count_this_range)
add_boundary(
this_range_start_key, last_end, opened_this_range, count_this_range
)
shard_finder.wait_for_shard_addresses(output_range_counts, 0, 5)
return output_range_counts
@ -677,7 +779,7 @@ class ShardFinder(object):
self.refresh_tr()
self.outstanding = []
self.boundary_keys = list(fdb.locality.get_boundary_keys(db, b'', b'\xff\xff'))
self.boundary_keys = list(fdb.locality.get_boundary_keys(db, b"", b"\xff\xff"))
self.shard_cache = {}
def _get_boundary_keys(self, begin, end):
@ -734,16 +836,24 @@ class ShardFinder(object):
if item[addr_idx] is not None:
while True:
try:
ranges[index] = item[0:addr_idx] + ([a.decode('ascii') for a in item[addr_idx].wait()],) \
+ item[addr_idx + 1:]
ranges[index] = (
item[0:addr_idx]
+ ([a.decode("ascii") for a in item[addr_idx].wait()],)
+ item[addr_idx + 1 :]
)
break
except fdb.FDBError:
ranges[index] = item[0:addr_idx] + (self.get_addresses_for_key(item[key_idx]),) \
+ item[addr_idx + 1:]
ranges[index] = (
item[0:addr_idx]
+ (self.get_addresses_for_key(item[key_idx]),)
+ item[addr_idx + 1 :]
)
class WriteCounter(object):
mutation_types_to_consider = frozenset([MutationType.SET_VALUE, MutationType.ADD_VALUE])
mutation_types_to_consider = frozenset(
[MutationType.SET_VALUE, MutationType.ADD_VALUE]
)
def __init__(self):
self.writes = defaultdict(lambda: 0)
@ -767,7 +877,9 @@ class WriteCounter(object):
addresses = shard_finder.get_addresses_for_key(start)
else:
addresses = None
output_range_counts.append((start, end, count, None, shard_count, addresses))
output_range_counts.append(
(start, end, count, None, shard_count, addresses)
)
else:
output_range_counts.append((start, end, count, None, None, None))
@ -798,13 +910,17 @@ class WriteCounter(object):
if shard_finder:
results = []
for (count, key) in count_pairs:
results.append((key, None, count, shard_finder.get_addresses_for_key(key)))
results.append(
(key, None, count, shard_finder.get_addresses_for_key(key))
)
shard_finder.wait_for_shard_addresses(results, 0, 3)
if filter_addresses:
filter_addresses = set(filter_addresses)
results = [r for r in results if filter_addresses.issubset(set(r[3]))][0:num]
results = [r for r in results if filter_addresses.issubset(set(r[3]))][
0:num
]
else:
results = [(key, None, count) for (count, key) in count_pairs[0:num]]
@ -819,40 +935,101 @@ def connect(cluster_file=None):
def main():
parser = argparse.ArgumentParser(description="TransactionProfilingAnalyzer")
parser.add_argument("-C", "--cluster-file", type=str, help="Cluster file")
parser.add_argument("--full-output", action="store_true", help="Print full output from mutations")
parser.add_argument("--filter-get-version", action="store_true",
help="Include get_version type. If no filter args are given all will be returned.")
parser.add_argument("--filter-get", action="store_true",
help="Include get type. If no filter args are given all will be returned.")
parser.add_argument("--filter-get-range", action="store_true",
help="Include get_range type. If no filter args are given all will be returned.")
parser.add_argument("--filter-reads", action="store_true",
help="Include get and get_range type. If no filter args are given all will be returned.")
parser.add_argument("--filter-commit", action="store_true",
help="Include commit type. If no filter args are given all will be returned.")
parser.add_argument("--filter-error-get", action="store_true",
help="Include error_get type. If no filter args are given all will be returned.")
parser.add_argument("--filter-error-get-range", action="store_true",
help="Include error_get_range type. If no filter args are given all will be returned.")
parser.add_argument("--filter-error-commit", action="store_true",
help="Include error_commit type. If no filter args are given all will be returned.")
parser.add_argument(
"--full-output", action="store_true", help="Print full output from mutations"
)
parser.add_argument(
"--filter-get-version",
action="store_true",
help="Include get_version type. If no filter args are given all will be returned.",
)
parser.add_argument(
"--filter-get",
action="store_true",
help="Include get type. If no filter args are given all will be returned.",
)
parser.add_argument(
"--filter-get-range",
action="store_true",
help="Include get_range type. If no filter args are given all will be returned.",
)
parser.add_argument(
"--filter-reads",
action="store_true",
help="Include get and get_range type. If no filter args are given all will be returned.",
)
parser.add_argument(
"--filter-commit",
action="store_true",
help="Include commit type. If no filter args are given all will be returned.",
)
parser.add_argument(
"--filter-error-get",
action="store_true",
help="Include error_get type. If no filter args are given all will be returned.",
)
parser.add_argument(
"--filter-error-get-range",
action="store_true",
help="Include error_get_range type. If no filter args are given all will be returned.",
)
parser.add_argument(
"--filter-error-commit",
action="store_true",
help="Include error_commit type. If no filter args are given all will be returned.",
)
start_time_group = parser.add_mutually_exclusive_group()
start_time_group.add_argument("--min-timestamp", type=int, help="Don't return events older than this epoch time")
start_time_group.add_argument("-s", "--start-time", type=str,
help="Don't return events older than this parsed time")
start_time_group.add_argument(
"--min-timestamp",
type=int,
help="Don't return events older than this epoch time",
)
start_time_group.add_argument(
"-s",
"--start-time",
type=str,
help="Don't return events older than this parsed time",
)
end_time_group = parser.add_mutually_exclusive_group()
end_time_group.add_argument("--max-timestamp", type=int, help="Don't return events newer than this epoch time")
end_time_group.add_argument("-e", "--end-time", type=str, help="Don't return events older than this parsed time")
parser.add_argument("--num-buckets", type=int,
help="The number of buckets to partition the key-space into for operation counts", default=100)
parser.add_argument("--top-requests", type=int,
help="If specified will output this many top keys for reads or writes", default=0)
parser.add_argument("--exclude-ports", action="store_true",
help="Print addresses without the port number. Only works in versions older than 6.3, and is required in versions older than 6.2.")
parser.add_argument("--single-shard-ranges-only", action="store_true",
help="Only print range boundaries that exist in a single shard")
parser.add_argument("-a", "--filter-address", action="append",
help="Only print range boundaries that include the given address. This option can used multiple times to include more than one address in the filter, in which case all addresses must match.")
end_time_group.add_argument(
"--max-timestamp",
type=int,
help="Don't return events newer than this epoch time",
)
end_time_group.add_argument(
"-e",
"--end-time",
type=str,
help="Don't return events older than this parsed time",
)
parser.add_argument(
"--num-buckets",
type=int,
help="The number of buckets to partition the key-space into for operation counts",
default=100,
)
parser.add_argument(
"--top-requests",
type=int,
help="If specified will output this many top keys for reads or writes",
default=0,
)
parser.add_argument(
"--exclude-ports",
action="store_true",
help="Print addresses without the port number. Only works in versions older than 6.3, and is required in versions older than 6.2.",
)
parser.add_argument(
"--single-shard-ranges-only",
action="store_true",
help="Only print range boundaries that exist in a single shard",
)
parser.add_argument(
"-a",
"--filter-address",
action="append",
help="Only print range boundaries that include the given address. This option can used multiple times to include more than one address in the filter, in which case all addresses must match.",
)
args = parser.parse_args()
@ -872,13 +1049,15 @@ def main():
if args.filter_error_commit:
type_filter.add("error_commit")
if (not type_filter or "commit" in type_filter):
if not type_filter or "commit" in type_filter:
write_counter = WriteCounter() if args.num_buckets else None
else:
write_counter = None
if (not type_filter or "get" in type_filter or "get_range" in type_filter):
read_counter = ReadCounter() if (has_sortedcontainers() and args.num_buckets) else None
if not type_filter or "get" in type_filter or "get_range" in type_filter:
read_counter = (
ReadCounter() if (has_sortedcontainers() and args.num_buckets) else None
)
else:
read_counter = None
@ -890,6 +1069,7 @@ def main():
if not has_dateparser():
raise Exception("Can't find dateparser needed to parse human dates")
import dateparser
min_timestamp = int(dateparser.parse(args.start_time).timestamp())
else:
raise Exception("Must specify start time")
@ -900,21 +1080,31 @@ def main():
if not has_dateparser():
raise Exception("Can't find dateparser needed to parse human dates")
import dateparser
max_timestamp = int(dateparser.parse(args.end_time).timestamp())
else:
raise Exception("Must specify end time")
now = time.time()
if max_timestamp > now:
raise Exception("max_timestamp is %d seconds in the future" % (max_timestamp - now))
raise Exception(
"max_timestamp is %d seconds in the future" % (max_timestamp - now)
)
if min_timestamp > now:
raise Exception("min_timestamp is %d seconds in the future" % (min_timestamp - now))
raise Exception(
"min_timestamp is %d seconds in the future" % (min_timestamp - now)
)
logger.info("Loading transactions from %d to %d" % (min_timestamp, max_timestamp))
db = connect(cluster_file=args.cluster_file)
loader = TransactionInfoLoader(db, full_output=full_output, type_filter=type_filter,
min_timestamp=min_timestamp, max_timestamp=max_timestamp)
loader = TransactionInfoLoader(
db,
full_output=full_output,
type_filter=type_filter,
min_timestamp=min_timestamp,
max_timestamp=max_timestamp,
)
for info in loader.fetch_transaction_info():
if info.has_types():
@ -932,12 +1122,21 @@ def main():
for (idx, (start, end, count, addresses)) in enumerate(top):
running_count += count
if end is not None:
op_str = 'Range %r - %r' % (start, end)
op_str = "Range %r - %r" % (start, end)
else:
op_str = 'Key %r' % start
op_str = "Key %r" % start
print(" %d. %s\n %d sampled %s (%.2f%%, %.2f%% cumulative)" % (
idx + 1, op_str, count, context, 100 * count / total, 100 * running_count / total))
print(
" %d. %s\n %d sampled %s (%.2f%%, %.2f%% cumulative)"
% (
idx + 1,
op_str,
count,
context,
100 * count / total,
100 * running_count / total,
)
)
print(" shard addresses: %s\n" % ", ".join(addresses))
else:
@ -945,8 +1144,15 @@ def main():
def print_range_boundaries(range_boundaries, context):
omit_start = None
for (idx, (start, end, start_count, total_count, shard_count, addresses)) in enumerate(range_boundaries):
omit = args.single_shard_ranges_only and shard_count is not None and shard_count > 1
for (
idx,
(start, end, start_count, total_count, shard_count, addresses),
) in enumerate(range_boundaries):
omit = (
args.single_shard_ranges_only
and shard_count is not None
and shard_count > 1
)
if args.filter_address:
if not addresses:
omit = True
@ -965,15 +1171,30 @@ def main():
omit_start = None
if total_count is None:
count_str = '%d sampled %s' % (start_count, context)
count_str = "%d sampled %s" % (start_count, context)
else:
count_str = '%d sampled %s (%d intersecting)' % (start_count, context, total_count)
count_str = "%d sampled %s (%d intersecting)" % (
start_count,
context,
total_count,
)
if not shard_count:
print(" %d. [%s, %s]\n %s\n" % (idx + 1, start, end, count_str))
else:
addresses_string = "; addresses=%s" % ', '.join(addresses) if addresses else ''
print(" %d. [%s, %s]\n %s spanning %d shard(s)%s\n" % (
idx + 1, start, end, count_str, shard_count, addresses_string))
addresses_string = (
"; addresses=%s" % ", ".join(addresses) if addresses else ""
)
print(
" %d. [%s, %s]\n %s spanning %d shard(s)%s\n"
% (
idx + 1,
start,
end,
count_str,
shard_count,
addresses_string,
)
)
elif omit_start is None:
omit_start = idx
@ -985,14 +1206,19 @@ def main():
shard_finder = ShardFinder(db, args.exclude_ports)
print("NOTE: shard locations are current and may not reflect where an operation was performed in the past\n")
print(
"NOTE: shard locations are current and may not reflect where an operation was performed in the past\n"
)
if write_counter:
if args.top_requests:
top_writes = write_counter.get_top_k_writes(args.top_requests, args.filter_address,
shard_finder=shard_finder)
top_writes = write_counter.get_top_k_writes(
args.top_requests, args.filter_address, shard_finder=shard_finder
)
range_boundaries = write_counter.get_range_boundaries(args.num_buckets, shard_finder=shard_finder)
range_boundaries = write_counter.get_range_boundaries(
args.num_buckets, shard_finder=shard_finder
)
num_writes = write_counter.get_total_writes()
if args.top_requests or range_boundaries:
@ -1018,9 +1244,13 @@ def main():
if read_counter:
if args.top_requests:
top_reads = read_counter.get_top_k_reads(args.top_requests, args.filter_address, shard_finder=shard_finder)
top_reads = read_counter.get_top_k_reads(
args.top_requests, args.filter_address, shard_finder=shard_finder
)
range_boundaries = read_counter.get_range_boundaries(args.num_buckets, shard_finder=shard_finder)
range_boundaries = read_counter.get_range_boundaries(
args.num_buckets, shard_finder=shard_finder
)
num_reads = read_counter.get_total_reads()
if args.top_requests or range_boundaries:

View File

@ -50,9 +50,9 @@ class RangeCounterTest(unittest.TestCase):
rc._insert_range("a", "b")
rc._insert_range("b", "c")
assert rc.ranges == SortedDict({"a": ("b", 1), "b": ("c", 1)}), rc.ranges
assert rc.get_count_for_key('a') == 1
assert rc.get_count_for_key('b') == 1
assert rc.get_count_for_key('c') == 0
assert rc.get_count_for_key("a") == 1
assert rc.get_count_for_key("b") == 1
assert rc.get_count_for_key("c") == 0
def test_two_duplicates(self):
rc = RangeCounter(1)
@ -64,25 +64,33 @@ class RangeCounterTest(unittest.TestCase):
rc = RangeCounter(1)
rc._insert_range("b", "c")
rc._insert_range("a", "d")
assert rc.ranges == SortedDict({"a": ("b", 1), "b": ("c", 2), "c": ("d", 1)}), rc.ranges
assert rc.ranges == SortedDict(
{"a": ("b", 1), "b": ("c", 2), "c": ("d", 1)}
), rc.ranges
def test_wholly_inside(self):
rc = RangeCounter(1)
rc._insert_range("a", "d")
rc._insert_range("b", "c")
assert rc.ranges == SortedDict({"a": ("b", 1), "b": ("c", 2), "c": ("d", 1)}), rc.ranges
assert rc.ranges == SortedDict(
{"a": ("b", 1), "b": ("c", 2), "c": ("d", 1)}
), rc.ranges
def test_intersect_before(self):
rc = RangeCounter(1)
rc._insert_range("b", "d")
rc._insert_range("a", "c")
assert rc.ranges == SortedDict({"a": ("b", 1), "b": ("c", 2), "c": ("d", 1)}), rc.ranges
assert rc.ranges == SortedDict(
{"a": ("b", 1), "b": ("c", 2), "c": ("d", 1)}
), rc.ranges
def test_intersect_after(self):
rc = RangeCounter(1)
rc._insert_range("a", "c")
rc._insert_range("b", "d")
assert rc.ranges == SortedDict({"a": ("b", 1), "b": ("c", 2), "c": ("d", 1)}), rc.ranges
assert rc.ranges == SortedDict(
{"a": ("b", 1), "b": ("c", 2), "c": ("d", 1)}
), rc.ranges
def test_wide(self):
rc = RangeCounter(1)
@ -90,7 +98,17 @@ class RangeCounterTest(unittest.TestCase):
rc._insert_range("e", "g")
rc._insert_range("i", "k")
rc._insert_range("b", "j")
assert rc.ranges == SortedDict({"a": ("b", 1), "b": ("c", 2), "c": ("e", 1), "e": ("g", 2), "g": ("i", 1), "i": ("j", 2), "j": ("k", 1)}), rc.ranges
assert rc.ranges == SortedDict(
{
"a": ("b", 1),
"b": ("c", 2),
"c": ("e", 1),
"e": ("g", 2),
"g": ("i", 1),
"i": ("j", 2),
"j": ("k", 1),
}
), rc.ranges
def test_random(self):
letters = string.ascii_lowercase
@ -102,7 +120,11 @@ class RangeCounterTest(unittest.TestCase):
def test_correct():
for (k, v) in count_dict.items():
rc_count = rc.get_count_for_key(k)
assert rc_count == v, "Counts for %s mismatch. Expected %d got %d" % (k, v, rc_count)
assert rc_count == v, "Counts for %s mismatch. Expected %d got %d" % (
k,
v,
rc_count,
)
for _ in range(0, 100):
i = random.randint(0, len(letters) - 1)

View File

@ -17,44 +17,47 @@ import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('extensions'))
sys.path.insert(0, os.path.abspath("extensions"))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.ifconfig',
'brokenrole',
'relativelink',
'rubydomain',
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.ifconfig",
"brokenrole",
"relativelink",
"rubydomain",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
master_doc = "index"
# General information about the project.
project = u'FoundationDB'
copyright = u'2013-2022 Apple, Inc and the FoundationDB project authors'
project = "FoundationDB"
copyright = "2013-2022 Apple, Inc and the FoundationDB project authors"
version_path = os.path.join(os.path.dirname(sys.executable), '..', '..', '..', 'versions.target')
version_path = os.path.join(
os.path.dirname(sys.executable), "..", "..", "..", "versions.target"
)
if os.path.exists(version_path):
# Load the version information from 'versions.target'
import xml.etree.ElementTree as ET
tree = ET.parse(version_path)
root = tree.getroot()
@ -63,10 +66,14 @@ if os.path.exists(version_path):
# built documents.
#
# The short X.Y version.
version = root.find(".//{http://schemas.microsoft.com/developer/msbuild/2003}PackageName").text
version = root.find(
".//{http://schemas.microsoft.com/developer/msbuild/2003}PackageName"
).text
# The full version, including alpha/beta/rc tags.
# FoundationDB special note: also see guide-common.rst.inc and update the link to the EC2 template
release = root.find(".//{http://schemas.microsoft.com/developer/msbuild/2003}Version").text
release = root.find(
".//{http://schemas.microsoft.com/developer/msbuild/2003}Version"
).text
else:
# Version and release will be overridden by sphinx command line
version = None
@ -75,34 +82,34 @@ else:
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
language = "en"
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# show_authors = False
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# modindex_common_prefix = []
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
@ -111,7 +118,7 @@ todo_include_todos = False
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
html_theme = "bootstrap"
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
@ -120,16 +127,16 @@ html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'globaltoc_depth': 2,
'globaltoc_includehidden': "true",
'navbar_links': [
"globaltoc_depth": 2,
"globaltoc_includehidden": "true",
"navbar_links": [
("Site Map", "contents"),
],
'source_link_position': "footer",
"source_link_position": "footer",
}
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
@ -137,7 +144,7 @@ html_theme_options = {
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
@ -146,21 +153,21 @@ html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
html_last_updated_fmt = "%b %d, %Y"
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html'],
'contents': [],
"**": ["localtoc.html"],
"contents": [],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
@ -169,10 +176,10 @@ html_domain_indices = False
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
@ -181,10 +188,10 @@ html_show_sphinx = False
html_show_copyright = True
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FoundationDB'
htmlhelp_basename = "FoundationDB"
# Disable permalinks
html_permalinks = False
@ -193,42 +200,45 @@ html_permalinks = False
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'FoundationDB.tex', u'FoundationDB Documentation',
u'FoundationDB', 'manual'),
(
"index",
"FoundationDB.tex",
"FoundationDB Documentation",
"FoundationDB",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
@ -236,12 +246,11 @@ latex_documents = [
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'foundationdb', u'FoundationDB Documentation',
[u'FoundationDB'], 1)
("index", "foundationdb", "FoundationDB Documentation", ["FoundationDB"], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
@ -250,19 +259,25 @@ man_pages = [
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FoundationDB', u'FoundationDB Documentation',
u'FoundationDB', 'FoundationDB', 'One line description of project.',
'Miscellaneous'),
(
"index",
"FoundationDB",
"FoundationDB Documentation",
"FoundationDB",
"FoundationDB",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# texinfo_show_urls = 'footnote'
########### Check for inappropriate use of the default role ##########

View File

@ -18,12 +18,14 @@
# limitations under the License.
#
def setup(app):
app.add_role('broken', broken_role)
app.add_role("broken", broken_role)
def broken_role(name, rawtext, text, lineno, inliner, options=None, content=None):
options = options or {}
content = content or []
msg = inliner.reporter.error('Broken role invoked', line=lineno)
prb = inliner.problematic(rawtext,rawtext,msg)
return [prb],[msg]
msg = inliner.reporter.error("Broken role invoked", line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]

View File

@ -29,20 +29,41 @@ from sphinx.addnodes import toctree
# Name <relative/path>_
def setup(app):
import sphinx.environment
from docutils import nodes
old_resolve = sphinx.environment.BuildEnvironment.resolve_toctree
def resolve_toctree(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False):
result = old_resolve(self, docname, builder, toctree, prune=True, maxdepth=0,
titles_only=False, collapse=False, includehidden=False)
def resolve_toctree(
self,
docname,
builder,
toctree,
prune=True,
maxdepth=0,
titles_only=False,
collapse=False,
includehidden=False,
):
result = old_resolve(
self,
docname,
builder,
toctree,
prune=True,
maxdepth=0,
titles_only=False,
collapse=False,
includehidden=False,
)
if result == None:
return result
for node in result.traverse( nodes.reference ):
if not node['internal'] and node['refuri'].startswith("relative://"):
node['refuri'] = node['refuri'][len("relative://"):]
for node in result.traverse(nodes.reference):
if not node["internal"] and node["refuri"].startswith("relative://"):
node["refuri"] = node["refuri"][len("relative://") :]
return result
sphinx.environment.BuildEnvironment.resolve_toctree = resolve_toctree

View File

@ -53,9 +53,7 @@ def run_fdbcli_command(*args):
string: Console output from fdbcli
"""
commands = command_template + ["{}".format(" ".join(args))]
process = subprocess.run(
commands, stdout=subprocess.PIPE, env=fdbcli_env
)
process = subprocess.run(commands, stdout=subprocess.PIPE, env=fdbcli_env)
return process.stdout.decode("utf-8").strip()
@ -1081,7 +1079,7 @@ def tenant_get(logger):
assert lines[3].strip() == "lock state: unlocked"
# id = lines[0].strip().removeprefix("id: ")
# Workaround until Python 3.9+ for removeprefix
id = lines[0].strip()[len("id: "):]
id = lines[0].strip()[len("id: ") :]
id_output = run_fdbcli_command("tenant getId {}".format(id))
assert id_output == output
@ -1117,7 +1115,7 @@ def tenant_get(logger):
assert lines[4].strip() == "tenant group: tenant_group2"
# id2 = lines[0].strip().removeprefix("id: ")
# Workaround until Python 3.9+ for removeprefix
id2 = lines[0].strip()[len("id: "):]
id2 = lines[0].strip()[len("id: ") :]
id_output = run_fdbcli_command("tenant getId {}".format(id2))
assert id_output == output
@ -1144,6 +1142,7 @@ def tenant_get(logger):
id_output = run_fdbcli_command("tenant getId {} JSON".format(id2))
assert id_output == output
@enable_logging()
def tenant_configure(logger):
setup_tenants(["tenant"])

View File

@ -34,7 +34,7 @@ class Context:
return self.random.randint(100000, 999999)
class InfiniteLoop (Exception):
class InfiniteLoop(Exception):
pass
@ -69,7 +69,7 @@ def indent(cx):
return "\t" * cx.indent
class F (object):
class F(object):
def unreachable(self):
return False
@ -77,7 +77,7 @@ class F (object):
return False
class hashF (F):
class hashF(F):
def __init__(self, cx):
self.cx = cx
self.uniqueID = cx.uniqueID()
@ -116,7 +116,7 @@ class compoundF(F):
return any(c.containsbreak() for c in self.children)
class loopF (F):
class loopF(F):
def __init__(self, cx):
self.cx = cx
ccx = copy.copy(cx)
@ -128,13 +128,18 @@ class loopF (F):
def __str__(self):
if self.forever:
return (indent(self.cx) + "loop {\n" +
str(self.body) +
indent(self.cx) + "}\n")
return (
indent(self.cx) + "loop {\n" + str(self.body) + indent(self.cx) + "}\n"
)
else:
return (indent(self.cx) + "state int i%d; for(i%d = 0; i%d < 5; i%d++) {\n" % ((self.uniqueID,) * 4) +
str(self.body) +
indent(self.cx) + "}\n")
return (
indent(self.cx)
+ "state int i%d; for(i%d = 0; i%d < 5; i%d++) {\n"
% ((self.uniqueID,) * 4)
+ str(self.body)
+ indent(self.cx)
+ "}\n"
)
def eval(self, ecx):
if self.forever:
@ -159,7 +164,7 @@ class loopF (F):
return self.forever and not self.body.containsbreak()
class rangeForF (F):
class rangeForF(F):
def __init__(self, cx):
self.cx = cx
ccx = copy.copy(cx)
@ -170,17 +175,24 @@ class rangeForF (F):
def __str__(self):
return (
indent(self.cx) +
("\n" + indent(self.cx)).join([
"state std::vector<int> V;",
"V.push_back(1);",
"V.push_back(2);",
"V.push_back(3);",
"for( auto i : V ) {\n",
]).replace("V", "list%d" % self.uniqueID) +
indent(self.cx) + "\t(void)i;\n" + # Suppress -Wunused-variable warning in generated code
str(self.body) +
indent(self.cx) + "}\n")
indent(self.cx)
+ ("\n" + indent(self.cx))
.join(
[
"state std::vector<int> V;",
"V.push_back(1);",
"V.push_back(2);",
"V.push_back(3);",
"for( auto i : V ) {\n",
]
)
.replace("V", "list%d" % self.uniqueID)
+ indent(self.cx)
+ "\t(void)i;\n"
+ str(self.body) # Suppress -Wunused-variable warning in generated code
+ indent(self.cx)
+ "}\n"
)
def eval(self, ecx):
for i in range(1, 4):
@ -196,7 +208,7 @@ class rangeForF (F):
return False
class ifF (F):
class ifF(F):
def __init__(self, cx):
self.cx = cx
ccx = copy.copy(cx)
@ -206,16 +218,20 @@ class ifF (F):
if cx.random.random() < 0.5:
ccx = copy.copy(cx)
ccx.indent += 1
self.elsebody = compoundF(ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)])
self.elsebody = compoundF(
ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)]
)
else:
self.elsebody = None
def __str__(self):
s = (indent(self.cx) + "if ( (++ifstate&1) == %d ) {\n" % self.toggle +
str(self.ifbody))
s = (
indent(self.cx)
+ "if ( (++ifstate&1) == %d ) {\n" % self.toggle
+ str(self.ifbody)
)
if self.elsebody:
s += (indent(self.cx) + "} else {\n" +
str(self.elsebody))
s += indent(self.cx) + "} else {\n" + str(self.elsebody)
s += indent(self.cx) + "}\n"
return s
@ -230,13 +246,17 @@ class ifF (F):
return OK
def unreachable(self):
return self.elsebody and self.ifbody.unreachable() and self.elsebody.unreachable()
return (
self.elsebody and self.ifbody.unreachable() and self.elsebody.unreachable()
)
def containsbreak(self):
return self.ifbody.containsbreak() or (self.elsebody and self.elsebody.containsbreak())
return self.ifbody.containsbreak() or (
self.elsebody and self.elsebody.containsbreak()
)
class tryF (F):
class tryF(F):
def __init__(self, cx):
self.cx = cx
ccx = copy.copy(cx)
@ -247,12 +267,16 @@ class tryF (F):
self.catch = compoundF(ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)])
def __str__(self):
return (indent(self.cx) + "try {\n" +
str(self.body) +
indent(self.cx) + "} catch (...) {\n" +
str(self.catch) +
indent(self.cx) + "}\n"
)
return (
indent(self.cx)
+ "try {\n"
+ str(self.body)
+ indent(self.cx)
+ "} catch (...) {\n"
+ str(self.catch)
+ indent(self.cx)
+ "}\n"
)
def eval(self, ecx):
ecx.infinityCheck()
@ -312,14 +336,16 @@ class waitF(F):
def __str__(self):
return (
indent(self.cx) + "int input = waitNext( inputStream );\n" +
indent(self.cx) + "outputStream.send( input + %d );\n" % self.uniqueID
indent(self.cx)
+ "int input = waitNext( inputStream );\n"
+ indent(self.cx)
+ "outputStream.send( input + %d );\n" % self.uniqueID
)
def eval(self, ecx):
ecx.infinityCheck()
input = ecx.inp()
ecx.out((input + self.uniqueID) & 0xffffffff)
ecx.out((input + self.uniqueID) & 0xFFFFFFFF)
return OK
@ -343,7 +369,7 @@ class throwF2(throwF):
return indent(self.cx) + "throw_operation_failed();\n"
def unreachable(self):
return False # The actor compiler doesn't know the function never returns
return False # The actor compiler doesn't know the function never returns
class throwF3(throwF):
@ -351,7 +377,7 @@ class throwF3(throwF):
return indent(self.cx) + "wait( error ); // throw operation_failed()\n"
def unreachable(self):
return False # The actor compiler doesn't know that 'error' always contains an error
return False # The actor compiler doesn't know that 'error' always contains an error
class returnF(F):
@ -373,10 +399,10 @@ class returnF(F):
def fuzzCode(cx):
choices = [loopF, rangeForF, tryF, doubleF, ifF]
if (cx.indent < 2):
if cx.indent < 2:
choices = choices * 2
choices += [waitF, returnF]
if (cx.inLoop):
if cx.inLoop:
choices += [breakF, continueF]
choices = choices * 3 + [throwF, throwF2, throwF3]
return cx.random.choice(choices)
@ -387,12 +413,17 @@ def randomActor(index):
cx = Context()
cx.indent += 1
actor = fuzzCode(cx)(cx)
actor = compoundF(cx, [actor, returnF(cx)]) # Add a return at the end if the end is reachable
actor = compoundF(
cx, [actor, returnF(cx)]
) # Add a return at the end if the end is reachable
name = "actorFuzz%d" % index
text = ("ACTOR Future<int> %s( FutureStream<int> inputStream, PromiseStream<int> outputStream, Future<Void> error ) {\n" % name
+ "\tstate int ifstate = 0;\n"
+ str(actor)
+ "}")
text = (
"ACTOR Future<int> %s( FutureStream<int> inputStream, PromiseStream<int> outputStream, Future<Void> error ) {\n"
% name
+ "\tstate int ifstate = 0;\n"
+ str(actor)
+ "}"
)
ecx = actor.ecx = ExecContext((i + 1) * 1000 for i in range(1000000))
try:
result = actor.eval(ecx)
@ -411,7 +442,8 @@ def randomActor(index):
return actor
header='''
header = """
/*
* ActorFuzz.actor.cpp
*
@ -432,15 +464,18 @@ header='''
* limitations under the License.
*/
'''
"""
testCaseCount = 30
outputFile = open("ActorFuzz.actor.cpp", "wt")
print(header, file=outputFile)
print('// THIS FILE WAS GENERATED BY actorFuzz.py; DO NOT MODIFY IT DIRECTLY\n', file=outputFile)
print(
"// THIS FILE WAS GENERATED BY actorFuzz.py; DO NOT MODIFY IT DIRECTLY\n",
file=outputFile,
)
print('#include "fdbrpc/ActorFuzz.h"\n', file=outputFile)
print('#ifndef WIN32\n', file=outputFile)
print("#ifndef WIN32\n", file=outputFile)
actors = [randomActor(i) for i in range(testCaseCount)]
@ -449,8 +484,11 @@ for actor in actors:
print("std::pair<int,int> actorFuzzTests() {\n\tint testsOK = 0;", file=outputFile)
for actor in actors:
print('\ttestsOK += testFuzzActor( &%s, "%s", {%s} );' % (actor.name, actor.name, ','.join(str(e) for e in actor.ecx.output)),
file=outputFile)
print(
'\ttestsOK += testFuzzActor( &%s, "%s", {%s} );'
% (actor.name, actor.name, ",".join(str(e) for e in actor.ecx.output)),
file=outputFile,
)
print("\treturn std::make_pair(testsOK, %d);\n}" % len(actors), file=outputFile)
print('#endif // WIN32\n', file=outputFile)
print("#endif // WIN32\n", file=outputFile)
outputFile.close()

View File

@ -23,10 +23,12 @@ import fdb
app = Flask(__name__)
fdb.api_version(int(os.getenv('FDB_API_VERSION')))
fdb.api_version(int(os.getenv("FDB_API_VERSION")))
db = fdb.open()
COUNTER_KEY = fdb.tuple.pack(('counter',))
COUNTER_KEY = fdb.tuple.pack(("counter",))
def _increment_counter(tr):
counter_value = tr[COUNTER_KEY]
if counter_value == None:
@ -36,14 +38,16 @@ def _increment_counter(tr):
tr[COUNTER_KEY] = fdb.tuple.pack((counter,))
return counter
@app.route("/counter", methods=['GET'])
@app.route("/counter", methods=["GET"])
def get_counter():
counter_value = db[COUNTER_KEY]
if counter_value == None:
return '0'
return "0"
return str(fdb.tuple.unpack(counter_value)[0])
@app.route("/counter/increment", methods=['POST'])
@app.route("/counter/increment", methods=["POST"])
def increment_counter():
return str(_increment_counter(db))

View File

@ -29,18 +29,20 @@ import errno
import subprocess
import os
def invalidClusterFile(clusterFile):
print('ERROR: \'%s\' is not a valid cluster file' % clusterFile)
print("ERROR: '%s' is not a valid cluster file" % clusterFile)
sys.exit(1)
def getOrValidateAddress(address):
if address is None:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('www.foundationdb.org', 80))
s.connect(("www.foundationdb.org", 80))
return s.getsockname()[0]
except Exception as e:
print('ERROR: Could not determine an address')
print("ERROR: Could not determine an address")
exit(1)
else:
try:
@ -52,13 +54,14 @@ def getOrValidateAddress(address):
if e.errno == errno.EADDRINUSE:
return address
else:
print('ERROR: Address %s could not be bound' % address)
print("ERROR: Address %s could not be bound" % address)
exit(1)
def makePublic(clusterFile, newAddress, makeTLS):
newAddress = getOrValidateAddress(newAddress)
f = open(clusterFile, 'r')
f = open(clusterFile, "r")
clusterStr = None
for line in f:
line = line.strip()
@ -71,51 +74,79 @@ def makePublic(clusterFile, newAddress, makeTLS):
if clusterStr is None:
invalidClusterFile(clusterFile)
if not re.match('^[a-zA-Z0-9_]*:[a-zA-Z0-9]*@([0-9\\.]*:[0-9]*(:tls)?,)*[0-9\\.]*:[0-9]*(:tls)?$', clusterStr):
if not re.match(
"^[a-zA-Z0-9_]*:[a-zA-Z0-9]*@([0-9\\.]*:[0-9]*(:tls)?,)*[0-9\\.]*:[0-9]*(:tls)?$",
clusterStr,
):
invalidClusterFile(clusterFile)
if not re.match('^.*@(127\\.0\\.0\\.1:[0-9]*(:tls)?,)*127\\.0\\.0\\.1:[0-9]*(:tls)?$', clusterStr):
print('ERROR: Cannot modify cluster file whose coordinators are not at address 127.0.0.1')
if not re.match(
"^.*@(127\\.0\\.0\\.1:[0-9]*(:tls)?,)*127\\.0\\.0\\.1:[0-9]*(:tls)?$",
clusterStr,
):
print(
"ERROR: Cannot modify cluster file whose coordinators are not at address 127.0.0.1"
)
sys.exit(1)
f.close()
f = open(clusterFile, 'w')
clusterStr = clusterStr.replace('127.0.0.1', newAddress)
f = open(clusterFile, "w")
clusterStr = clusterStr.replace("127.0.0.1", newAddress)
if makeTLS:
clusterStr = re.sub("([0-9]),", "\\1:tls,", clusterStr)
if not clusterStr.endswith(":tls"):
clusterStr += ":tls"
f.write(clusterStr + '\n')
f.write(clusterStr + "\n")
f.close()
return newAddress, clusterStr.count(":tls") != 0
def restartServer():
subprocess.call(['service', 'foundationdb', 'restart'])
subprocess.call(["service", "foundationdb", "restart"])
if __name__ == '__main__':
if platform.system() != 'Linux':
print('ERROR: this script can only be run on Linux')
if __name__ == "__main__":
if platform.system() != "Linux":
print("ERROR: this script can only be run on Linux")
sys.exit(1)
parser = argparse.ArgumentParser(description='Converts a cluster with a local address to one with a public address')
parser.add_argument('-C', dest='clusterFile', type=str,
help='The cluster file to be converted. If unspecified, the cluster file at /etc/foundationdb/fdb.cluster is used.',
default='/etc/foundationdb/fdb.cluster')
parser.add_argument('-a', dest='address', type=str,
help='The new IP address to use. By default, an interface with access to the internet is chosen.')
parser.add_argument('-t', dest='tls', action='store_true', default=False,
help='Convert addresses without TLS enabled to accepting TLS connections.')
parser = argparse.ArgumentParser(
description="Converts a cluster with a local address to one with a public address"
)
parser.add_argument(
"-C",
dest="clusterFile",
type=str,
help="The cluster file to be converted. If unspecified, the cluster file at /etc/foundationdb/fdb.cluster is used.",
default="/etc/foundationdb/fdb.cluster",
)
parser.add_argument(
"-a",
dest="address",
type=str,
help="The new IP address to use. By default, an interface with access to the internet is chosen.",
)
parser.add_argument(
"-t",
dest="tls",
action="store_true",
default=False,
help="Convert addresses without TLS enabled to accepting TLS connections.",
)
args = parser.parse_args()
if os.geteuid() != 0:
print('ERROR: this script must be run as root')
print("ERROR: this script must be run as root")
sys.exit(1)
address, hasTLS = makePublic(args.clusterFile, args.address, args.tls)
restartServer()
print('%s is now using address %s%s' % (args.clusterFile, address, " (TLS enabled)" if hasTLS else ""))
print(
"%s is now using address %s%s"
% (args.clusterFile, address, " (TLS enabled)" if hasTLS else "")
)

View File

@ -390,7 +390,7 @@ def run_simulation_test(basedir, options):
seed = int(options.seed, 0)
if options.test_number:
idx = int(options.test_number)
seed = ((seed + idx) % (2 ** 32 - 2)) + 1
seed = ((seed + idx) % (2**32 - 2)) + 1
pargs.append("{}".format(seed))
if options.testtype == "test":
pargs.append("-C")
@ -410,7 +410,7 @@ def run_simulation_test(basedir, options):
seed = int(options.seed, 0)
if options.test_number:
idx = int(options.test_number)
seed = ((seed + idx) % (2 ** 32 - 2)) + 1
seed = ((seed + idx) % (2**32 - 2)) + 1
wd = os.path.join(test_dir, "test_{}".format(options.name.replace("/", "_")))
os.mkdir(wd)
return_codes = {} # {command: return_code}
@ -434,7 +434,7 @@ def run_simulation_test(basedir, options):
if not first:
tmp.append("-R")
if seed is not None:
seed = (seed + 1) % (2 ** 32 - 2)
seed = (seed + 1) % (2**32 - 2)
first = False
if seed is not None:
tmp.append("-s")

View File

@ -3,16 +3,25 @@ from typing import List
import json
import time
def private_key_gen(kty: str, kid: str):
assert kty == "EC" or kty == "RSA"
if kty == "EC":
return JsonWebKey.generate_key(kty=kty, crv_or_size="P-256", is_private=True, options={"kid": kid})
return JsonWebKey.generate_key(
kty=kty, crv_or_size="P-256", is_private=True, options={"kid": kid}
)
else:
return JsonWebKey.generate_key(kty=kty, crv_or_size=4096, is_private=True, options={"kid": kid})
return JsonWebKey.generate_key(
kty=kty, crv_or_size=4096, is_private=True, options={"kid": kid}
)
def public_keyset_from_keys(keys: List):
keys = list(map(lambda key: key.as_dict(is_private=False, alg=alg_from_kty(key.kty)), keys))
return json.dumps({ "keys": keys })
keys = list(
map(lambda key: key.as_dict(is_private=False, alg=alg_from_kty(key.kty)), keys)
)
return json.dumps({"keys": keys})
def alg_from_kty(kty: str):
assert kty == "EC" or kty == "RSA"
@ -21,6 +30,7 @@ def alg_from_kty(kty: str):
else:
return "RS256"
def token_gen(private_key, claims, headers={}):
if not headers:
headers = {
@ -31,6 +41,7 @@ def token_gen(private_key, claims, headers={}):
}
return jwt.encode(headers, claims, private_key)
def token_claim_1h(tenant_id: int):
# JWT claim that is valid for 1 hour since time of invocation
now = time.time()

View File

@ -49,7 +49,9 @@ class FdbBinaryDownloader:
assert self.build_dir.exists(), "{} does not exist".format(build_dir)
assert self.build_dir.is_dir(), "{} is not a directory".format(build_dir)
self.platform = platform.machine()
assert self.platform in SUPPORTED_PLATFORMS, "Unsupported platform {}".format(self.platform)
assert self.platform in SUPPORTED_PLATFORMS, "Unsupported platform {}".format(
self.platform
)
self.download_dir = self.build_dir.joinpath("tmp", "old_binaries")
self.local_binary_repo = Path(LOCAL_OLD_BINARY_REPO)
if not self.local_binary_repo.exists():
@ -57,13 +59,17 @@ class FdbBinaryDownloader:
# Check if the binaries for the given version are available in the local old binaries repository
def version_in_local_repo(self, version):
return (self.local_binary_repo is not None) and (self.local_binary_repo.joinpath(version).exists())
return (self.local_binary_repo is not None) and (
self.local_binary_repo.joinpath(version).exists()
)
def binary_path(self, version, bin_name):
if is_local_build_version(version):
return self.build_dir.joinpath("bin", bin_name)
elif self.version_in_local_repo(version):
return self.local_binary_repo.joinpath(version, "bin", "{}-{}".format(bin_name, version))
return self.local_binary_repo.joinpath(
version, "bin", "{}-{}".format(bin_name, version)
)
else:
return self.download_dir.joinpath(version, bin_name)
@ -77,7 +83,9 @@ class FdbBinaryDownloader:
return self.lib_dir(version).joinpath("libfdb_c.so")
# Download an old binary of a given version from a remote repository
def download_old_binary(self, version, target_bin_name, remote_bin_name, make_executable):
def download_old_binary(
self, version, target_bin_name, remote_bin_name, make_executable
):
local_file = self.download_dir.joinpath(version, target_bin_name)
if local_file.exists():
return
@ -85,7 +93,9 @@ class FdbBinaryDownloader:
# Download to a temporary file and then replace the target file atomically
# to avoid consistency errors in case of multiple tests are downloading the
# same file in parallel
local_file_tmp = Path("{}.{}".format(str(local_file), random_alphanum_string(8)))
local_file_tmp = Path(
"{}.{}".format(str(local_file), random_alphanum_string(8))
)
self.download_dir.joinpath(version).mkdir(parents=True, exist_ok=True)
remote_file = "{}{}/{}".format(FDB_DOWNLOAD_ROOT, version, remote_bin_name)
remote_sha256 = "{}.sha256".format(remote_file)
@ -93,7 +103,9 @@ class FdbBinaryDownloader:
for attempt_cnt in range(MAX_DOWNLOAD_ATTEMPTS + 1):
if attempt_cnt == MAX_DOWNLOAD_ATTEMPTS:
assert False, "Failed to download {} after {} attempts".format(local_file_tmp, MAX_DOWNLOAD_ATTEMPTS)
assert False, "Failed to download {} after {} attempts".format(
local_file_tmp, MAX_DOWNLOAD_ATTEMPTS
)
try:
print("Downloading '{}' to '{}'...".format(remote_file, local_file_tmp))
request.urlretrieve(remote_file, local_file_tmp)
@ -111,7 +123,11 @@ class FdbBinaryDownloader:
if expected_checksum == actual_checkum:
print("Checksum OK")
break
print("Checksum mismatch. Expected: {} Actual: {}".format(expected_checksum, actual_checkum))
print(
"Checksum mismatch. Expected: {} Actual: {}".format(
expected_checksum, actual_checkum
)
)
os.rename(local_file_tmp, local_file)
os.remove(local_sha256)
@ -127,9 +143,15 @@ class FdbBinaryDownloader:
return
# Avoid race conditions in case of parallel test execution by first copying to a temporary file
# and then renaming it atomically
dest_file_tmp = Path("{}.{}".format(str(dest_lib_file), random_alphanum_string(8)))
src_lib_file = self.local_binary_repo.joinpath(version, "lib", "libfdb_c-{}.so".format(version))
assert src_lib_file.exists(), "Missing file {} in the local old binaries repository".format(src_lib_file)
dest_file_tmp = Path(
"{}.{}".format(str(dest_lib_file), random_alphanum_string(8))
)
src_lib_file = self.local_binary_repo.joinpath(
version, "lib", "libfdb_c-{}.so".format(version)
)
assert (
src_lib_file.exists()
), "Missing file {} in the local old binaries repository".format(src_lib_file)
self.download_dir.joinpath(version).mkdir(parents=True, exist_ok=True)
shutil.copyfile(src_lib_file, dest_file_tmp)
os.rename(dest_file_tmp, dest_lib_file)
@ -144,7 +166,15 @@ class FdbBinaryDownloader:
self.copy_clientlib_from_local_repo(version)
return
self.download_old_binary(version, "fdbserver", "fdbserver.{}".format(self.platform), True)
self.download_old_binary(version, "fdbmonitor", "fdbmonitor.{}".format(self.platform), True)
self.download_old_binary(version, "fdbcli", "fdbcli.{}".format(self.platform), True)
self.download_old_binary(version, "libfdb_c.so", "libfdb_c.{}.so".format(self.platform), False)
self.download_old_binary(
version, "fdbserver", "fdbserver.{}".format(self.platform), True
)
self.download_old_binary(
version, "fdbmonitor", "fdbmonitor.{}".format(self.platform), True
)
self.download_old_binary(
version, "fdbcli", "fdbcli.{}".format(self.platform), True
)
self.download_old_binary(
version, "libfdb_c.so", "libfdb_c.{}.so".format(self.platform), False
)

View File

@ -4,8 +4,9 @@ from argparse import ArgumentParser, RawDescriptionHelpFormatter
def CreateTmpFdbClusterArgParser(description):
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
description=description)
parser = ArgumentParser(
formatter_class=RawDescriptionHelpFormatter, description=description
)
parser.add_argument(
"--build-dir",
"-b",
@ -26,13 +27,16 @@ def CreateTmpFdbClusterArgParser(description):
"--disable-tenants",
help="Do not enable tenant mode",
action="store_true",
default=False
default=False,
)
parser.add_argument(
"--blob-granules-enabled", help="Enable blob granules", action="store_true"
)
parser.add_argument(
"--tls-enabled", help="Enable TLS (with test-only certificates)", action="store_true")
"--tls-enabled",
help="Enable TLS (with test-only certificates)",
action="store_true",
)
parser.add_argument(
"--server-cert-chain-len",
help="Length of server TLS certificate chain including root CA. Negative value deliberately generates expired leaf certificate for TLS testing. Only takes effect with --tls-enabled.",

View File

@ -4,9 +4,11 @@ import time
alphanum_letters = string.ascii_letters + string.digits
def random_alphanum_string(length):
return "".join(random.choice(alphanum_letters) for _ in range(length))
# attach a post-run trace checker to cluster that runs for events between the time of scope entry and exit
class ScopedTraceChecker:
def __init__(self, cluster, checker_func, filename_substr: str = ""):
@ -20,4 +22,6 @@ class ScopedTraceChecker:
return self
def __exit__(self, exc_type, exc_value, traceback):
self.cluster.add_trace_check_from_to(self.checker_func, self.begin, time.time(), self.filename_substr)
self.cluster.add_trace_check_from_to(
self.checker_func, self.begin, time.time(), self.filename_substr
)

View File

@ -70,6 +70,7 @@ class TempCluster(LocalCluster):
if self.remove_at_exit:
shutil.rmtree(self.tmp_dir)
if __name__ == "__main__":
script_desc = """
This script automatically configures a temporary local cluster on the machine

View File

@ -310,7 +310,11 @@ def tenant_id_from_name(db):
tenant = db.open_tenant(to_bytes(tenant_name))
return tenant.get_id().wait() # returns int
except fdb.FDBError as e:
print("retrying tenant id fetch after 0.5 second backoff due to {}".format(e))
print(
"retrying tenant id fetch after 0.5 second backoff due to {}".format(
e
)
)
time.sleep(0.5)
return fn
@ -325,7 +329,9 @@ def token_claim_1h(tenant_id_from_name):
return {
"iss": "fdb-authz-tester",
"sub": "authz-test",
"aud": ["tmp-cluster"] if random.choice([True, False]) else "tmp-cluster", # too expensive to parameterize just for this
"aud": ["tmp-cluster"]
if random.choice([True, False])
else "tmp-cluster", # too expensive to parameterize just for this
"iat": now,
"nbf": now - 1,
"exp": now + 60 * 60,

View File

@ -27,7 +27,7 @@ import argparse
class Result(object):
def __init__(self):
self.id = random.randint(0, 2 ** 63)
self.id = random.randint(0, 2**63)
self.kpis = {}
self.errors = []