Merge pull request #1 from alecgrieser/bindings-format
Reformat the go and python bindings
This commit is contained in:
commit
d8973c0867
|
@ -21,36 +21,37 @@
|
|||
import sys
|
||||
import os
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
|
||||
import util
|
||||
|
||||
FDB_API_VERSION = 510
|
||||
|
||||
LOGGING = {
|
||||
'version' : 1,
|
||||
'disable_existing_loggers' : False,
|
||||
'formatters' : {
|
||||
'simple' : {
|
||||
'format' : '%(message)s'
|
||||
'version': 1,
|
||||
'disable_existing_loggers': False,
|
||||
'formatters': {
|
||||
'simple': {
|
||||
'format': '%(message)s'
|
||||
}
|
||||
},
|
||||
'handlers' : {
|
||||
'console' : {
|
||||
'level' : 'NOTSET',
|
||||
'class' : 'logging.StreamHandler',
|
||||
'stream' : sys.stdout,
|
||||
'formatter' : 'simple'
|
||||
'handlers': {
|
||||
'console': {
|
||||
'level': 'NOTSET',
|
||||
'class': 'logging.StreamHandler',
|
||||
'stream': sys.stdout,
|
||||
'formatter': 'simple'
|
||||
}
|
||||
},
|
||||
'loggers' : {
|
||||
'foundationdb.bindingtester' : {
|
||||
'level' : 'INFO',
|
||||
'handlers' : ['console']
|
||||
'loggers': {
|
||||
'foundationdb.bindingtester': {
|
||||
'level': 'INFO',
|
||||
'handlers': ['console']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class Result:
|
||||
def __init__(self, subspace, key, values):
|
||||
self.subspace_tuple = util.subspace_to_tuple(subspace)
|
||||
|
@ -63,7 +64,7 @@ class Result:
|
|||
|
||||
left_key = self.key_tuple[specification.key_start_index:]
|
||||
right_key = self.key_tuple[specification.key_start_index:]
|
||||
|
||||
|
||||
if len(left_key) != len(right_key) or left_key != right_key:
|
||||
return False
|
||||
|
||||
|
@ -81,7 +82,7 @@ class Result:
|
|||
def sequence_num(self, specification):
|
||||
if specification.ordering_index is not None:
|
||||
return self.key_tuple[specification.ordering_index]
|
||||
|
||||
|
||||
return None
|
||||
|
||||
def __str__(self):
|
||||
|
@ -91,4 +92,3 @@ class Result:
|
|||
value_str = repr(self.values)
|
||||
|
||||
return '%s = %s' % (repr(self.subspace_tuple + self.key_tuple), value_str)
|
||||
|
||||
|
|
|
@ -34,8 +34,9 @@ from threading import Timer, Event
|
|||
import logging.config
|
||||
|
||||
from collections import OrderedDict
|
||||
from functools import reduce
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
|
||||
|
||||
import bindingtester
|
||||
|
||||
|
@ -52,6 +53,7 @@ import fdb.tuple
|
|||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
|
||||
class ResultSet(object):
|
||||
def __init__(self, specification):
|
||||
self.specification = specification
|
||||
|
@ -80,20 +82,20 @@ class ResultSet(object):
|
|||
has_filtered_error = False
|
||||
|
||||
while True:
|
||||
results = { i : r[indices[i]] for i, r in enumerate(self.tester_results.values()) if len(r) > indices[i] }
|
||||
results = {i: r[indices[i]] for i, r in enumerate(self.tester_results.values()) if len(r) > indices[i]}
|
||||
if len(results) == 0:
|
||||
break
|
||||
|
||||
sequence_nums = [ r.sequence_num(self.specification) for r in results.values() ]
|
||||
sequence_nums = [r.sequence_num(self.specification) for r in results.values()]
|
||||
if any([s is not None for s in sequence_nums]):
|
||||
results = { i : r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums) }
|
||||
results = {i: r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums)}
|
||||
else:
|
||||
results = { i : r for i, r in results.items() if r.matches(min(results.values()), self.specification) }
|
||||
results = {i: r for i, r in results.items() if r.matches(min(results.values()), self.specification)}
|
||||
|
||||
for i in results.keys():
|
||||
indices[i] += 1
|
||||
|
||||
all_results = { i : results[i] if i in results else None for i in range(len(self.tester_results)) }
|
||||
all_results = {i: results[i] if i in results else None for i in range(len(self.tester_results))}
|
||||
result_str = '\n'.join([' %-*s - %s' % (name_length, self.tester_results.keys()[i], r) for i, r in all_results.items()])
|
||||
|
||||
result_list = results.values()
|
||||
|
@ -113,12 +115,15 @@ class ResultSet(object):
|
|||
|
||||
return (num_errors, has_filtered_error)
|
||||
|
||||
|
||||
def choose_api_version(selected_api_version, tester_min_version, tester_max_version, test_min_version, test_max_version):
|
||||
if selected_api_version is not None:
|
||||
if selected_api_version < tester_min_version or selected_api_version > tester_max_version:
|
||||
raise Exception('Not all testers support the API version %d (min=%d, max=%d)' % (selected_api_version, tester_min_version, tester_max_version))
|
||||
raise Exception('Not all testers support the API version %d (min=%d, max=%d)' %
|
||||
(selected_api_version, tester_min_version, tester_max_version))
|
||||
elif selected_api_version < test_min_version or selected_api_version > test_max_version:
|
||||
raise Exception('API version %d is not supported by the specified test (min=%d, max=%d)' % (selected_api_version, test_min_version, test_max_version))
|
||||
raise Exception('API version %d is not supported by the specified test (min=%d, max=%d)' %
|
||||
(selected_api_version, test_min_version, test_max_version))
|
||||
|
||||
api_version = selected_api_version
|
||||
else:
|
||||
|
@ -126,19 +131,23 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
|
|||
max_version = min(tester_max_version, test_max_version)
|
||||
|
||||
if min_version > max_version:
|
||||
raise Exception('Not all testers support the API versions required by the specified test (tester: min=%d, max=%d; test: min=%d, max=%d)' % (tester_min_version, tester_max_version, test_min_version, test_max_version))
|
||||
raise Exception(
|
||||
'Not all testers support the API versions required by the specified test'
|
||||
'(tester: min=%d, max=%d; test: min=%d, max=%d)' % (tester_min_version, tester_max_version, test_min_version, test_max_version))
|
||||
|
||||
if random.random() < 0.7:
|
||||
api_version = max_version
|
||||
elif random.random() < 0.7:
|
||||
api_version = min_version
|
||||
elif random.random() < 0.9:
|
||||
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430, 440, 450, 460, 500, 510] if v >= min_version and v <= max_version])
|
||||
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430,
|
||||
440, 450, 460, 500, 510] if v >= min_version and v <= max_version])
|
||||
else:
|
||||
api_version = random.randint(min_version, max_version)
|
||||
|
||||
return api_version
|
||||
|
||||
|
||||
class TestRunner(object):
|
||||
def __init__(self, args):
|
||||
self.args = copy.copy(args)
|
||||
|
@ -157,7 +166,8 @@ class TestRunner(object):
|
|||
|
||||
min_api_version = max([tester.min_api_version for tester in self.testers])
|
||||
max_api_version = min([tester.max_api_version for tester in self.testers])
|
||||
self.args.api_version = choose_api_version(self.args.api_version, min_api_version, max_api_version, self.test.min_api_version, self.test.max_api_version)
|
||||
self.args.api_version = choose_api_version(self.args.api_version, min_api_version, max_api_version,
|
||||
self.test.min_api_version, self.test.max_api_version)
|
||||
|
||||
util.get_logger().info('\nCreating test at API version %d' % self.args.api_version)
|
||||
|
||||
|
@ -165,7 +175,8 @@ class TestRunner(object):
|
|||
if self.args.max_int_bits is None:
|
||||
self.args.max_int_bits = max_int_bits
|
||||
elif self.args.max_int_bits > max_int_bits:
|
||||
raise Exception('The specified testers support at most %d-bit ints, but --max-int-bits was set to %d' % (max_int_bits, self.args.max_int_bits))
|
||||
raise Exception('The specified testers support at most %d-bit ints, but --max-int-bits was set to %d' %
|
||||
(max_int_bits, self.args.max_int_bits))
|
||||
|
||||
self.args.no_threads = self.args.no_threads or any([not tester.threads_enabled for tester in self.testers])
|
||||
if self.args.no_threads and self.args.concurrency > 1:
|
||||
|
@ -189,15 +200,15 @@ class TestRunner(object):
|
|||
|
||||
for i, instruction in enumerate(instructions):
|
||||
if self.args.print_all or (instruction.operation != 'SWAP' and instruction.operation != 'PUSH'):
|
||||
util.get_logger().error(' %d. %r' % (i+offset, instruction))
|
||||
util.get_logger().error(' %d. %r' % (i + offset, instruction))
|
||||
|
||||
util.get_logger().error('');
|
||||
util.get_logger().error('')
|
||||
|
||||
def run_test(self):
|
||||
test_instructions = self._generate_test()
|
||||
expected_results = self.test.get_expected_results()
|
||||
|
||||
tester_results = { s.subspace : ResultSet(s) for s in self.test.get_result_specifications() }
|
||||
tester_results = {s.subspace: ResultSet(s) for s in self.test.get_result_specifications()}
|
||||
for subspace, results in expected_results.items():
|
||||
tester_results[subspace].add('expected', results)
|
||||
|
||||
|
@ -208,7 +219,8 @@ class TestRunner(object):
|
|||
self.test.pre_run(self.db, self.args)
|
||||
return_code = self._run_tester(tester)
|
||||
if return_code != 0:
|
||||
util.get_logger().error('Test of type %s failed to complete successfully with random seed %d and %d operations\n' % (self.args.test_name, self.args.seed, self.args.num_ops))
|
||||
util.get_logger().error('Test of type %s failed to complete successfully with random seed %d and %d operations\n' %
|
||||
(self.args.test_name, self.args.seed, self.args.num_ops))
|
||||
return 2
|
||||
|
||||
tester_errors[tester] = self.test.validate(self.db, self.args)
|
||||
|
@ -226,18 +238,19 @@ class TestRunner(object):
|
|||
self._insert_instructions(test_instructions)
|
||||
|
||||
def _generate_test(self):
|
||||
util.get_logger().info('Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)...' % (self.args.test_name, self.args.seed, self.args.num_ops, self.args.concurrency))
|
||||
util.get_logger().info('Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)...' %
|
||||
(self.args.test_name, self.args.seed, self.args.num_ops, self.args.concurrency))
|
||||
|
||||
random.seed(self.test_seed)
|
||||
|
||||
if self.args.concurrency == 1:
|
||||
self.test.setup(self.args)
|
||||
test_instructions = { fdb.Subspace((self.args.instruction_prefix,)) : self.test.generate(self.args, 0) }
|
||||
test_instructions = {fdb.Subspace((self.args.instruction_prefix,)): self.test.generate(self.args, 0)}
|
||||
else:
|
||||
test_instructions = {}
|
||||
main_thread = InstructionSet()
|
||||
for i in range(self.args.concurrency):
|
||||
#thread_spec = fdb.Subspace(('thread_spec', i))
|
||||
# thread_spec = fdb.Subspace(('thread_spec', i))
|
||||
thread_spec = 'thread_spec%d' % i
|
||||
main_thread.push_args(thread_spec)
|
||||
main_thread.append('START_THREAD')
|
||||
|
@ -260,7 +273,7 @@ class TestRunner(object):
|
|||
params += [self.args.cluster_file]
|
||||
|
||||
util.get_logger().info('\nRunning tester \'%s\'...' % ' '.join(params))
|
||||
sys.stdout.flush();
|
||||
sys.stdout.flush()
|
||||
proc = subprocess.Popen(params)
|
||||
timed_out = Event()
|
||||
|
||||
|
@ -321,9 +334,10 @@ class TestRunner(object):
|
|||
if len(errors) > 0:
|
||||
util.get_logger().error('The %s tester reported errors:\n' % tester.name)
|
||||
for i, error in enumerate(errors):
|
||||
util.get_logger().error(' %d. %s' % (i+1, error))
|
||||
util.get_logger().error(' %d. %s' % (i + 1, error))
|
||||
|
||||
log_message = '\nTest with seed %d and concurrency %d had %d incorrect result(s) and %d error(s) at API version %d' % (self.args.seed, self.args.concurrency, num_incorrect, num_errors, self.args.api_version)
|
||||
log_message = '\nTest with seed %d and concurrency %d had %d incorrect result(s) and %d error(s) at API version %d' %\
|
||||
(self.args.seed, self.args.concurrency, num_incorrect, num_errors, self.args.api_version)
|
||||
if num_errors == 0 and (num_incorrect == 0 or has_filtered_error):
|
||||
util.get_logger().info(log_message)
|
||||
if has_filtered_error:
|
||||
|
@ -333,6 +347,7 @@ class TestRunner(object):
|
|||
util.get_logger().error(log_message)
|
||||
return 1
|
||||
|
||||
|
||||
def bisect(test_runner, args):
|
||||
util.get_logger().info('')
|
||||
|
||||
|
@ -354,7 +369,8 @@ def bisect(test_runner, args):
|
|||
util.get_logger().error('Error finding minimal failing test for seed %d. The failure may not be deterministic' % args.seed)
|
||||
return 1
|
||||
else:
|
||||
util.get_logger().error('No failing test found for seed %d with %d ops. Try specifying a larger --num-ops parameter.' % (args.seed, args.num_ops))
|
||||
util.get_logger().error('No failing test found for seed %d with %d ops. Try specifying a larger --num-ops parameter.'
|
||||
% (args.seed, args.num_ops))
|
||||
return 0
|
||||
|
||||
elif result == 0:
|
||||
|
@ -365,30 +381,45 @@ def bisect(test_runner, args):
|
|||
util.get_logger().info('Test with %d operations failed with error code %d\n' % (test_runner.args.num_ops, result))
|
||||
upper_bound = test_runner.args.num_ops
|
||||
|
||||
|
||||
def parse_args(argv):
|
||||
parser = argparse.ArgumentParser(description='FoundationDB Binding API Tester')
|
||||
parser.add_argument('--test-name', default='scripted', help='The name of the test to run. Must be the name of a test specified in the tests folder. (default=\'scripted\')')
|
||||
parser.add_argument('--test-name', default='scripted',
|
||||
help='The name of the test to run. Must be the name of a test specified in the tests folder. (default=\'scripted\')')
|
||||
|
||||
parser.add_argument(metavar='tester1', dest='test1', help='Name of the first tester to invoke')
|
||||
parser.add_argument('--compare', metavar='tester2', nargs='?', type=str, default=None, const='python', dest='test2', help='When specified, a second tester will be run and compared against the first. This flag takes an optional argument for the second tester to invoke (default = \'python\').')
|
||||
|
||||
parser.add_argument('--print-test', action='store_true', help='Instead of running a test, prints the set of instructions generated for that test. Unless --all is specified, all setup, finalization, PUSH, and SWAP instructions will be excluded.')
|
||||
parser.add_argument('--compare', metavar='tester2', nargs='?', type=str, default=None, const='python', dest='test2',
|
||||
help='When specified, a second tester will be run and compared against the first. This flag takes an optional argument '
|
||||
'for the second tester to invoke (default = \'python\').')
|
||||
parser.add_argument('--print-test', action='store_true',
|
||||
help='Instead of running a test, prints the set of instructions generated for that test. Unless --all is specified, all '
|
||||
'setup, finalization, PUSH, and SWAP instructions will be excluded.')
|
||||
parser.add_argument('--all', dest='print_all', action='store_true', help='Causes --print-test to print all instructions.')
|
||||
parser.add_argument('--bisect', action='store_true', help='Run the specified test varying the number of operations until a minimal failing test is found. Does not work for concurrent tests.')
|
||||
parser.add_argument('--bisect', action='store_true',
|
||||
help='Run the specified test varying the number of operations until a minimal failing test is found. Does not work for '
|
||||
'concurrent tests.')
|
||||
parser.add_argument('--insert-only', action='store_true', help='Insert the test instructions into the database, but do not run it.')
|
||||
|
||||
parser.add_argument('--concurrency', type=int, default=1, help='Number of concurrent test threads to run. (default = 1).')
|
||||
parser.add_argument('--num-ops', type=int, default=100, help='The number of operations to generate per thread (default = 100)')
|
||||
parser.add_argument('--seed', type=int, help='The random seed to use for generating the test')
|
||||
parser.add_argument('--max-int-bits', type=int, default=None, help='Maximum number of bits to use for int types in testers. By default, the largest value supported by the testers being run will be chosen.')
|
||||
parser.add_argument('--api-version', default=None, type=int, help='The API version that the testers should use. Not supported in scripted mode. (default = random version supported by all testers)')
|
||||
parser.add_argument('--max-int-bits', type=int, default=None,
|
||||
help='Maximum number of bits to use for int types in testers. By default, the largest value supported by the testers being '
|
||||
'run will be chosen.')
|
||||
parser.add_argument('--api-version', default=None, type=int,
|
||||
help='The API version that the testers should use. Not supported in scripted mode. (default = random version supported by '
|
||||
'all testers)')
|
||||
parser.add_argument('--cluster-file', type=str, default=None, help='The cluster file for the cluster being connected to. (default None)')
|
||||
parser.add_argument('--timeout', type=int, default=600, help='The timeout in seconds for running each individual tester. (default 600)')
|
||||
parser.add_argument('--enable-client-trace-logging', nargs='?', type=str, default=None, const='.', help='Enables trace file output. This flag takes an optional argument specifying the output directory (default = \'.\').')
|
||||
parser.add_argument('--instruction-prefix', type=str, default='test_spec', help='The prefix under which the main thread of test instructions are inserted (default=\'test_spec\').')
|
||||
parser.add_argument('--output-subspace', type=str, default='tester_output', help='The string used to create the output subspace for the testers. The subspace will be of the form (<output_subspace>,). (default=\'tester_output\')')
|
||||
parser.add_argument('--enable-client-trace-logging', nargs='?', type=str, default=None, const='.',
|
||||
help='Enables trace file output. This flag takes an optional argument specifying the output directory (default = \'.\').')
|
||||
parser.add_argument('--instruction-prefix', type=str, default='test_spec',
|
||||
help='The prefix under which the main thread of test instructions are inserted (default=\'test_spec\').')
|
||||
parser.add_argument('--output-subspace', type=str, default='tester_output',
|
||||
help='The string used to create the output subspace for the testers. The subspace will be of the form (<output_subspace>,). '
|
||||
'(default=\'tester_output\')')
|
||||
|
||||
parser.add_argument('--logging-level', type=str, default='INFO', choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], help='Specifies the level of detail in the tester output (default=\'INFO\').')
|
||||
parser.add_argument('--logging-level', type=str, default='INFO',
|
||||
choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], help='Specifies the level of detail in the tester output (default=\'INFO\').')
|
||||
|
||||
# SOMEDAY: this applies only to the scripted test. Should we invoke test files specifically (as in circus),
|
||||
# or invoke them here and allow tests to add arguments?
|
||||
|
@ -396,6 +427,7 @@ def parse_args(argv):
|
|||
|
||||
return parser.parse_args(argv)
|
||||
|
||||
|
||||
def validate_args(args):
|
||||
if args.insert_only and args.bisect:
|
||||
raise Exception('--bisect cannot be used with --insert-only')
|
||||
|
@ -408,6 +440,7 @@ def validate_args(args):
|
|||
if args.concurrency > 1 and args.test2:
|
||||
raise Exception('--compare cannot be used with concurrent tests')
|
||||
|
||||
|
||||
def main(argv):
|
||||
args = parse_args(argv)
|
||||
try:
|
||||
|
@ -444,9 +477,11 @@ def main(argv):
|
|||
util.get_logger().debug(traceback.format_exc())
|
||||
exit(3)
|
||||
|
||||
except:
|
||||
except BaseException:
|
||||
util.get_logger().error('\nERROR: %s' % sys.exc_info()[0])
|
||||
util.get_logger().info(traceback.format_exc())
|
||||
exit(3)
|
||||
|
||||
if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
|
|
|
@ -21,8 +21,9 @@
|
|||
import os
|
||||
|
||||
MAX_API_VERSION = 510
|
||||
COMMON_TYPES = [ 'null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple' ]
|
||||
ALL_TYPES = COMMON_TYPES + [ 'versionstamp' ]
|
||||
COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple']
|
||||
ALL_TYPES = COMMON_TYPES + ['versionstamp']
|
||||
|
||||
|
||||
class Tester:
|
||||
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES):
|
||||
|
@ -44,22 +45,24 @@ class Tester:
|
|||
else:
|
||||
return Tester(test_name_or_args.split(' ')[0], test_name_or_args)
|
||||
|
||||
|
||||
def _absolute_path(path):
|
||||
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', path)
|
||||
|
||||
|
||||
_java_cmd = 'java -ea -cp %s:%s com.apple.foundationdb.test.' % (
|
||||
_absolute_path('java/foundationdb-client.jar'),
|
||||
_absolute_path('java/foundationdb-tests.jar'))
|
||||
_absolute_path('java/foundationdb-client.jar'),
|
||||
_absolute_path('java/foundationdb-tests.jar'))
|
||||
|
||||
# We could set min_api_version lower on some of these if the testers were updated to support them
|
||||
testers = {
|
||||
'python' : Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'python3' : Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'node' : Tester('node', _absolute_path('nodejs/tests/tester.js'), 53, 500, MAX_API_VERSION),
|
||||
'streamline' : Tester('streamline', _absolute_path('nodejs/tests/streamline_tester._js'), 53, 500, MAX_API_VERSION),
|
||||
'ruby' : Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 64, 23, MAX_API_VERSION),
|
||||
'java' : Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'java_async' : Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'go' : Tester('go', _absolute_path('go/build/bin/_stacktester'), 63, 200, MAX_API_VERSION),
|
||||
'flow' : Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION),
|
||||
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'node': Tester('node', _absolute_path('nodejs/tests/tester.js'), 53, 500, MAX_API_VERSION),
|
||||
'streamline': Tester('streamline', _absolute_path('nodejs/tests/streamline_tester._js'), 53, 500, MAX_API_VERSION),
|
||||
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 64, 23, MAX_API_VERSION),
|
||||
'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 63, 200, MAX_API_VERSION),
|
||||
'flow': Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION),
|
||||
}
|
||||
|
|
|
@ -28,11 +28,12 @@ from bindingtester import util
|
|||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
|
||||
class ResultSpecification(object):
|
||||
def __init__(self, subspace, key_start_index=0, ordering_index=None, global_error_filter=None):
|
||||
self.subspace = subspace
|
||||
self.key_start_index = key_start_index
|
||||
self.ordering_index = ordering_index
|
||||
self.ordering_index = ordering_index
|
||||
|
||||
if global_error_filter is not None:
|
||||
error_str = '|'.join(['%d' % e for e in global_error_filter])
|
||||
|
@ -45,7 +46,7 @@ class ResultSpecification(object):
|
|||
return False
|
||||
|
||||
return self.error_regex.search(str) is not None
|
||||
|
||||
|
||||
|
||||
class Test(object):
|
||||
def __init__(self, subspace, min_api_version=0, max_api_version=int(1e9)):
|
||||
|
@ -54,7 +55,7 @@ class Test(object):
|
|||
self.max_api_version = max_api_version
|
||||
|
||||
# Returns nothing
|
||||
def setup(self, args):
|
||||
def setup(self, args):
|
||||
pass
|
||||
|
||||
# Returns an instance of TestInstructions
|
||||
|
@ -75,7 +76,7 @@ class Test(object):
|
|||
def get_expected_results(self):
|
||||
return {}
|
||||
|
||||
# Returns a list of error strings
|
||||
# Returns a list of error strings
|
||||
def validate(self, db, args):
|
||||
return []
|
||||
|
||||
|
@ -88,6 +89,7 @@ class Test(object):
|
|||
|
||||
return test_class[0](subspace)
|
||||
|
||||
|
||||
class Instruction(object):
|
||||
def __init__(self, operation):
|
||||
self.operation = operation
|
||||
|
@ -103,6 +105,7 @@ class Instruction(object):
|
|||
def __repr__(self):
|
||||
return repr(self.operation)
|
||||
|
||||
|
||||
class PushInstruction(Instruction):
|
||||
def __init__(self, argument):
|
||||
self.operation = 'PUSH'
|
||||
|
@ -115,6 +118,7 @@ class PushInstruction(Instruction):
|
|||
def __repr__(self):
|
||||
return '%r %r' % (self.operation, self.argument)
|
||||
|
||||
|
||||
class TestInstructions(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
@ -126,13 +130,14 @@ class TestInstructions(object):
|
|||
def insert_operations(self, db, subspace):
|
||||
pass
|
||||
|
||||
|
||||
class InstructionSet(TestInstructions, list):
|
||||
def __init__(self):
|
||||
TestInstructions.__init__(self)
|
||||
list.__init__(self)
|
||||
|
||||
self.core_test_begin = 0
|
||||
self.core_test_end = None
|
||||
self.core_test_end = None
|
||||
|
||||
def push_args(self, *args):
|
||||
self.extend([PushInstruction(arg) for arg in reversed(args)])
|
||||
|
@ -144,7 +149,7 @@ class InstructionSet(TestInstructions, list):
|
|||
list.append(self, Instruction(instruction))
|
||||
|
||||
def get_threads(self, subspace):
|
||||
return { subspace : self }
|
||||
return {subspace: self}
|
||||
|
||||
def setup_complete(self):
|
||||
self.core_test_begin = len(self)
|
||||
|
@ -153,16 +158,17 @@ class InstructionSet(TestInstructions, list):
|
|||
self.core_test_end = len(self)
|
||||
|
||||
def core_instructions(self):
|
||||
return self[self.core_test_begin : self.core_test_end]
|
||||
|
||||
return self[self.core_test_begin: self.core_test_end]
|
||||
|
||||
@fdb.transactional
|
||||
def _insert_operations_transactional(self, tr, subspace, start, count):
|
||||
for i, instruction in enumerate(self[start : start+count]):
|
||||
for i, instruction in enumerate(self[start: start + count]):
|
||||
tr[subspace.pack((start + i,))] = instruction.to_value()
|
||||
|
||||
def insert_operations(self, db, subspace):
|
||||
for i in range(0, int(math.ceil(len(self) / 1000.0))):
|
||||
self._insert_operations_transactional(db, subspace, i*1000, 1000)
|
||||
self._insert_operations_transactional(db, subspace, i * 1000, 1000)
|
||||
|
||||
|
||||
class ThreadedInstructionSet(TestInstructions):
|
||||
def __init__(self):
|
||||
|
@ -194,4 +200,5 @@ class ThreadedInstructionSet(TestInstructions):
|
|||
self.threads[subspace] = thread_instructions
|
||||
return thread_instructions
|
||||
|
||||
|
||||
util.import_subclasses(__file__, 'bindingtester.tests')
|
||||
|
|
|
@ -32,11 +32,12 @@ from bindingtester.tests import test_util
|
|||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
|
||||
class ApiTest(Test):
|
||||
def __init__(self, subspace):
|
||||
super(ApiTest, self).__init__(subspace)
|
||||
self.workspace = self.subspace['workspace'] # The keys and values here must match between subsequent runs of the same test
|
||||
self.scratch = self.subspace['scratch'] # The keys and values here can differ between runs
|
||||
self.workspace = self.subspace['workspace'] # The keys and values here must match between subsequent runs of the same test
|
||||
self.scratch = self.subspace['scratch'] # The keys and values here can differ between runs
|
||||
self.stack_subspace = self.subspace['stack']
|
||||
|
||||
self.versionstamped_values = self.scratch['versionstamped_values']
|
||||
|
@ -78,7 +79,7 @@ class ApiTest(Test):
|
|||
self.key_depth = max(0, self.key_depth - num)
|
||||
|
||||
self.outstanding_ops = [i for i in self.outstanding_ops if i[0] <= self.stack_size]
|
||||
|
||||
|
||||
def ensure_string(self, instructions, num):
|
||||
while self.string_depth < num:
|
||||
instructions.push_args(self.random.random_string(random.randint(0, 100)))
|
||||
|
@ -97,7 +98,7 @@ class ApiTest(Test):
|
|||
tup = self.random.random_tuple(5)
|
||||
self.generated_keys.append(tup)
|
||||
|
||||
return self.workspace.pack(tup)
|
||||
return self.workspace.pack(tup)
|
||||
|
||||
def ensure_key(self, instructions, num):
|
||||
while self.key_depth < num:
|
||||
|
@ -131,7 +132,7 @@ class ApiTest(Test):
|
|||
def wait_for_reads(self, instructions):
|
||||
while len(self.outstanding_ops) > 0 and self.outstanding_ops[-1][0] <= self.stack_size:
|
||||
read = self.outstanding_ops.pop()
|
||||
#print '%d. waiting for read at instruction %r' % (len(instructions), read)
|
||||
# print '%d. waiting for read at instruction %r' % (len(instructions), read)
|
||||
test_util.to_front(instructions, self.stack_size - read[0])
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
||||
|
@ -187,7 +188,7 @@ class ApiTest(Test):
|
|||
index = len(instructions)
|
||||
read_performed = False
|
||||
|
||||
#print 'Adding instruction %s at %d' % (op, index)
|
||||
# print 'Adding instruction %s at %d' % (op, index)
|
||||
|
||||
if args.concurrency == 1 and (op in database_mutations):
|
||||
self.wait_for_reads(instructions)
|
||||
|
@ -211,7 +212,7 @@ class ApiTest(Test):
|
|||
instructions.push_args(random.randint(0, 5000))
|
||||
instructions.append(op)
|
||||
|
||||
self.outstanding_ops.append((self.stack_size, len(instructions)-1))
|
||||
self.outstanding_ops.append((self.stack_size, len(instructions) - 1))
|
||||
if args.concurrency == 1:
|
||||
self.wait_for_reads(instructions)
|
||||
|
||||
|
@ -236,7 +237,7 @@ class ApiTest(Test):
|
|||
test_util.to_front(instructions, 3)
|
||||
instructions.append(op)
|
||||
|
||||
#Don't add key here because we may be outside of our prefix
|
||||
# Don't add key here because we may be outside of our prefix
|
||||
self.add_strings(1)
|
||||
self.can_set_version = False
|
||||
read_performed = True
|
||||
|
@ -249,7 +250,7 @@ class ApiTest(Test):
|
|||
test_util.to_front(instructions, 4)
|
||||
instructions.append(op)
|
||||
|
||||
if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large
|
||||
if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large
|
||||
self.add_strings(1)
|
||||
else:
|
||||
self.add_stack_items(1)
|
||||
|
@ -258,14 +259,14 @@ class ApiTest(Test):
|
|||
read_performed = True
|
||||
|
||||
elif op == 'GET_RANGE_STARTS_WITH' or op == 'GET_RANGE_STARTS_WITH_SNAPSHOT' or op == 'GET_RANGE_STARTS_WITH_DATABASE':
|
||||
#TODO: not tested well
|
||||
# TODO: not tested well
|
||||
self.ensure_key(instructions, 1)
|
||||
range_params = self.random.random_range_params()
|
||||
instructions.push_args(*range_params)
|
||||
test_util.to_front(instructions, 3)
|
||||
instructions.append(op)
|
||||
|
||||
if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large
|
||||
if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large
|
||||
self.add_strings(1)
|
||||
else:
|
||||
self.add_stack_items(1)
|
||||
|
@ -285,7 +286,7 @@ class ApiTest(Test):
|
|||
test_util.to_front(instructions, 9)
|
||||
instructions.append(op)
|
||||
|
||||
if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large
|
||||
if range_params[0] >= 1 and range_params[0] <= 1000: # avoid adding a string if the limit is large
|
||||
self.add_strings(1)
|
||||
else:
|
||||
self.add_stack_items(1)
|
||||
|
@ -302,8 +303,8 @@ class ApiTest(Test):
|
|||
self.ensure_key_value(instructions)
|
||||
instructions.append(op)
|
||||
if op == 'SET_DATABASE':
|
||||
self.add_stack_items(1)
|
||||
|
||||
self.add_stack_items(1)
|
||||
|
||||
elif op == 'SET_READ_VERSION':
|
||||
if self.has_version and self.can_set_version:
|
||||
instructions.append(op)
|
||||
|
@ -316,7 +317,7 @@ class ApiTest(Test):
|
|||
self.add_stack_items(1)
|
||||
|
||||
elif op == 'CLEAR_RANGE' or op == 'CLEAR_RANGE_DATABASE':
|
||||
#Protect against inverted range
|
||||
# Protect against inverted range
|
||||
key1 = self.workspace.pack(self.random.random_tuple(5))
|
||||
key2 = self.workspace.pack(self.random.random_tuple(5))
|
||||
|
||||
|
@ -334,7 +335,7 @@ class ApiTest(Test):
|
|||
instructions.append(op)
|
||||
if op == 'CLEAR_RANGE_STARTS_WITH_DATABASE':
|
||||
self.add_stack_items(1)
|
||||
|
||||
|
||||
elif op == 'ATOMIC_OP' or op == 'ATOMIC_OP_DATABASE':
|
||||
self.ensure_key_value(instructions)
|
||||
if op == 'ATOMIC_OP' or args.concurrency > 1:
|
||||
|
@ -351,10 +352,10 @@ class ApiTest(Test):
|
|||
key1 = self.versionstamped_values.pack((rand_str1,))
|
||||
|
||||
split = random.randint(0, 70)
|
||||
rand_str2 = self.random.random_string(20+split) + fdb.tuple.Versionstamp._UNSET_TR_VERSION + self.random.random_string(70-split)
|
||||
rand_str2 = self.random.random_string(20 + split) + fdb.tuple.Versionstamp._UNSET_TR_VERSION + self.random.random_string(70 - split)
|
||||
key2 = self.versionstamped_keys.pack() + rand_str2
|
||||
index = key2.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION)
|
||||
key2 += chr(index%256)+chr(index/256)
|
||||
key2 += chr(index % 256) + chr(index / 256)
|
||||
|
||||
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', key1, fdb.tuple.Versionstamp._UNSET_TR_VERSION + rand_str2)
|
||||
instructions.append('ATOMIC_OP')
|
||||
|
@ -436,8 +437,8 @@ class ApiTest(Test):
|
|||
|
||||
version_key = self.versionstamped_keys.pack(tup)
|
||||
first_incomplete = version_key.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION)
|
||||
second_incomplete = -1 if first_incomplete < 0 else version_key.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION,
|
||||
first_incomplete + len(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + 1)
|
||||
second_incomplete = -1 if first_incomplete < 0 else \
|
||||
version_key.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION, first_incomplete + len(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + 1)
|
||||
|
||||
# If there is exactly one incomplete versionstamp, perform the versionstamped key operation.
|
||||
if first_incomplete >= 0 and second_incomplete < 0:
|
||||
|
@ -449,7 +450,8 @@ class ApiTest(Test):
|
|||
instructions.append('ATOMIC_OP')
|
||||
|
||||
version_value_key = self.versionstamped_values.pack((rand_str,))
|
||||
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', version_value_key, fdb.tuple.Versionstamp._UNSET_TR_VERSION + fdb.tuple.pack(tup))
|
||||
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', version_value_key,
|
||||
fdb.tuple.Versionstamp._UNSET_TR_VERSION + fdb.tuple.pack(tup))
|
||||
instructions.append('ATOMIC_OP')
|
||||
self.can_use_key_selectors = False
|
||||
|
||||
|
@ -469,7 +471,7 @@ class ApiTest(Test):
|
|||
instructions.append(op)
|
||||
self.add_strings(len(tups))
|
||||
|
||||
#Use SUB to test if integers are correctly unpacked
|
||||
# Use SUB to test if integers are correctly unpacked
|
||||
elif op == 'SUB':
|
||||
a = self.random.random_int() / 2
|
||||
b = self.random.random_int() / 2
|
||||
|
@ -512,7 +514,7 @@ class ApiTest(Test):
|
|||
assert False
|
||||
|
||||
if read_performed and op not in database_reads:
|
||||
self.outstanding_ops.append((self.stack_size, len(instructions)-1))
|
||||
self.outstanding_ops.append((self.stack_size, len(instructions) - 1))
|
||||
|
||||
if args.concurrency == 1 and (op in database_reads or op in database_mutations):
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
@ -536,7 +538,7 @@ class ApiTest(Test):
|
|||
def check_versionstamps(self, tr, begin_key, limit):
|
||||
next_begin = None
|
||||
incorrect_versionstamps = 0
|
||||
for k,v in tr.get_range(begin_key, self.versionstamped_values.range().stop, limit=limit):
|
||||
for k, v in tr.get_range(begin_key, self.versionstamped_values.range().stop, limit=limit):
|
||||
next_begin = k + '\x00'
|
||||
tup = fdb.tuple.unpack(k)
|
||||
key = self.versionstamped_keys.pack() + v[10:].replace(fdb.tuple.Versionstamp._UNSET_TR_VERSION, v[:10], 1)
|
||||
|
@ -545,7 +547,6 @@ class ApiTest(Test):
|
|||
util.get_logger().error(' %s != %s', repr(tr[key]), repr(tup[-1]))
|
||||
incorrect_versionstamps += 1
|
||||
|
||||
|
||||
return (next_begin, incorrect_versionstamps)
|
||||
|
||||
def validate(self, db, args):
|
||||
|
@ -564,8 +565,7 @@ class ApiTest(Test):
|
|||
return errors
|
||||
|
||||
def get_result_specifications(self):
|
||||
return [
|
||||
ResultSpecification(self.workspace, global_error_filter=[1007, 1021]),
|
||||
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021])
|
||||
return [
|
||||
ResultSpecification(self.workspace, global_error_filter=[1007, 1021]),
|
||||
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021])
|
||||
]
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ from bindingtester.tests.directory_util import DirListEntry
|
|||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
|
||||
class DirectoryTest(Test):
|
||||
|
||||
def __init__(self, subspace):
|
||||
|
@ -71,12 +72,12 @@ class DirectoryTest(Test):
|
|||
instructions = InstructionSet()
|
||||
|
||||
op_choices = ['NEW_TRANSACTION', 'COMMIT']
|
||||
|
||||
|
||||
general = ['DIRECTORY_CREATE_SUBSPACE', 'DIRECTORY_CREATE_LAYER']
|
||||
|
||||
op_choices += general
|
||||
|
||||
directory_mutations = ['DIRECTORY_CREATE_OR_OPEN', 'DIRECTORY_CREATE', 'DIRECTORY_MOVE', 'DIRECTORY_MOVE_TO',
|
||||
directory_mutations = ['DIRECTORY_CREATE_OR_OPEN', 'DIRECTORY_CREATE', 'DIRECTORY_MOVE', 'DIRECTORY_MOVE_TO',
|
||||
'DIRECTORY_REMOVE', 'DIRECTORY_REMOVE_IF_EXISTS']
|
||||
directory_reads = ['DIRECTORY_EXISTS', 'DIRECTORY_OPEN', 'DIRECTORY_LIST']
|
||||
|
||||
|
@ -105,17 +106,18 @@ class DirectoryTest(Test):
|
|||
|
||||
# Generate some directories that we are going to create in advance. This tests that other bindings
|
||||
# are compatible with the Python implementation
|
||||
self.prepopulated_dirs = [ (generate_path(min_length=1), self.generate_layer()) for i in range(5) ]
|
||||
self.prepopulated_dirs = [(generate_path(min_length=1), self.generate_layer()) for i in range(5)]
|
||||
|
||||
for path, layer in self.prepopulated_dirs:
|
||||
instructions.push_args(layer)
|
||||
instructions.push_args(*test_util.with_length(path))
|
||||
instructions.append('DIRECTORY_OPEN')
|
||||
#print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), False, len(self.dir_list))
|
||||
# print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' \
|
||||
# % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), False, len(self.dir_list))
|
||||
self.dir_list.append(self.dir_list[0].add_child(path, default_path, self.root, DirListEntry(True, True, has_known_prefix=False)))
|
||||
|
||||
instructions.setup_complete()
|
||||
|
||||
|
||||
for i in range(args.num_ops):
|
||||
if random.random() < 0.5:
|
||||
self.dir_index = random.randrange(0, len(self.dir_list))
|
||||
|
@ -131,7 +133,8 @@ class DirectoryTest(Test):
|
|||
op = random.choice(choices)
|
||||
dir_entry = self.dir_list[self.dir_index]
|
||||
|
||||
#print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' % (len(instructions), op, repr(self.dir_index), repr(dir_entry.has_known_prefix), len(self.dir_list))
|
||||
# print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' \
|
||||
# % (len(instructions), op, repr(self.dir_index), repr(dir_entry.has_known_prefix), len(self.dir_list))
|
||||
|
||||
if op.endswith('_DATABASE') or op.endswith('_SNAPSHOT'):
|
||||
root_op = op[0:-9]
|
||||
|
@ -160,7 +163,7 @@ class DirectoryTest(Test):
|
|||
indices.append(len(self.dir_list))
|
||||
self.dir_list.append(DirListEntry(False, True))
|
||||
|
||||
instructions.push_args(random.choice([0,1]))
|
||||
instructions.push_args(random.choice([0, 1]))
|
||||
instructions.push_args(*indices)
|
||||
instructions.append(op)
|
||||
self.dir_list.append(DirListEntry(True, False, False))
|
||||
|
@ -172,7 +175,7 @@ class DirectoryTest(Test):
|
|||
test_util.blocking_commit(instructions)
|
||||
|
||||
path = generate_path()
|
||||
op_args = test_util.with_length(path) + (self.generate_layer(),)
|
||||
op_args = test_util.with_length(path) + (self.generate_layer(),)
|
||||
directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
|
||||
|
||||
if not op.endswith('_DATABASE') and args.concurrency == 1:
|
||||
|
@ -189,18 +192,19 @@ class DirectoryTest(Test):
|
|||
|
||||
# Because allocated prefixes are non-deterministic, we cannot have overlapping
|
||||
# transactions that allocate/remove these prefixes in a comparison test
|
||||
if op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix:
|
||||
if op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
path = generate_path()
|
||||
op_args = test_util.with_length(path) + (layer, prefix)
|
||||
op_args = test_util.with_length(path) + (layer, prefix)
|
||||
if prefix is None:
|
||||
directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
|
||||
directory_util.push_instruction_and_record_prefix(
|
||||
instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
|
||||
else:
|
||||
instructions.push_args(*op_args)
|
||||
instructions.append(op)
|
||||
|
||||
if not op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix:
|
||||
if not op.endswith('_DATABASE') and args.concurrency == 1: # and allow_empty_prefix:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
self.dir_list.append(dir_entry.add_child(path, default_path, self.root, DirListEntry(True, True, bool(prefix))))
|
||||
|
@ -228,13 +232,14 @@ class DirectoryTest(Test):
|
|||
new_path = generate_path()
|
||||
instructions.push_args(*test_util.with_length(new_path))
|
||||
instructions.append(op)
|
||||
self.dir_list.append(dir_entry.root.add_child(new_path, default_path, self.root, DirListEntry(True, True, dir_entry.has_known_prefix)))
|
||||
self.dir_list.append(dir_entry.root.add_child(new_path, default_path, self.root,
|
||||
DirListEntry(True, True, dir_entry.has_known_prefix)))
|
||||
|
||||
# Make sure that the default directory subspace still exists after moving the current directory
|
||||
self.ensure_default_directory_subspace(instructions, default_path)
|
||||
|
||||
# FIXME: There is currently a problem with removing partitions. In these generated tests, it's possible
|
||||
# for a removed partition to resurrect itself and insert keys into the database using its allocated
|
||||
# for a removed partition to resurrect itself and insert keys into the database using its allocated
|
||||
# prefix. The result is non-deterministic HCA errors.
|
||||
elif root_op == 'DIRECTORY_REMOVE' or root_op == 'DIRECTORY_REMOVE_IF_EXISTS':
|
||||
# Because allocated prefixes are non-deterministic, we cannot have overlapping
|
||||
|
@ -242,7 +247,7 @@ class DirectoryTest(Test):
|
|||
if op.endswith('_DATABASE') and args.concurrency == 1:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
path = ()
|
||||
path = ()
|
||||
count = random.randint(0, 1)
|
||||
if count == 1:
|
||||
path = generate_path()
|
||||
|
@ -256,14 +261,14 @@ class DirectoryTest(Test):
|
|||
self.ensure_default_directory_subspace(instructions, default_path)
|
||||
|
||||
elif root_op == 'DIRECTORY_LIST' or root_op == 'DIRECTORY_EXISTS':
|
||||
path = ()
|
||||
path = ()
|
||||
count = random.randint(0, 1)
|
||||
if count == 1:
|
||||
path = generate_path()
|
||||
instructions.push_args(*test_util.with_length(path))
|
||||
instructions.push_args(count)
|
||||
instructions.append(op)
|
||||
|
||||
|
||||
elif root_op == 'DIRECTORY_PACK_KEY':
|
||||
t = self.random.random_tuple(5)
|
||||
instructions.push_args(*test_util.with_length(t))
|
||||
|
@ -305,10 +310,10 @@ class DirectoryTest(Test):
|
|||
instructions.push_args(self.directory_log.key())
|
||||
instructions.append('DIRECTORY_LOG_DIRECTORY')
|
||||
if dir_entry.has_known_prefix and dir_entry.is_subspace:
|
||||
#print '%d. Logging subspace: %d' % (i, dir_entry.dir_id)
|
||||
# print '%d. Logging subspace: %d' % (i, dir_entry.dir_id)
|
||||
instructions.push_args(self.subspace_log.key())
|
||||
instructions.append('DIRECTORY_LOG_SUBSPACE')
|
||||
if (i+1) % 100 == 0:
|
||||
if (i + 1) % 100 == 0:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
instructions.push_args(self.stack_subspace.key())
|
||||
|
@ -332,18 +337,21 @@ class DirectoryTest(Test):
|
|||
# If a partition is created, allocates a prefix, and then is removed, subsequent prefix
|
||||
# allocations could collide with prior ones. We can get around this by not allowing
|
||||
# a removed directory (or partition) to be used, but that weakens the test in another way.
|
||||
#errors += directory_util.check_for_duplicate_prefixes(db, self.prefix_log)
|
||||
# errors += directory_util.check_for_duplicate_prefixes(db, self.prefix_log)
|
||||
return errors
|
||||
|
||||
def get_result_specfications(self):
|
||||
return [
|
||||
ResultSpecification(self.stack, key_start_index=1, ordering_index=1),
|
||||
ResultSpecification(self.directory_log, ordering_index=0),
|
||||
ResultSpecification(self.subspace_log, ordering_index=0)
|
||||
return [
|
||||
ResultSpecification(self.stack, key_start_index=1, ordering_index=1),
|
||||
ResultSpecification(self.directory_log, ordering_index=0),
|
||||
ResultSpecification(self.subspace_log, ordering_index=0)
|
||||
]
|
||||
|
||||
|
||||
# Utility functions
|
||||
def generate_path(min_length = 0):
|
||||
|
||||
|
||||
def generate_path(min_length=0):
|
||||
length = int(random.random() * random.random() * (4 - min_length)) + min_length
|
||||
path = ()
|
||||
for i in range(length):
|
||||
|
@ -351,9 +359,10 @@ def generate_path(min_length = 0):
|
|||
path = path + (u'',)
|
||||
else:
|
||||
path = path + (random.choice([u'1', u'2', u'3']),)
|
||||
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def generate_prefix(allow_empty=True, is_partition=False):
|
||||
if allow_empty and random.random() < 0.8:
|
||||
return None
|
||||
|
@ -364,7 +373,7 @@ def generate_prefix(allow_empty=True, is_partition=False):
|
|||
|
||||
if not is_partition:
|
||||
first = chr(random.randint(ord('\x1d'), 255) % 255)
|
||||
return first + ''.join(chr(random.randrange(0, 256)) for i in range(0, length-1))
|
||||
return first + ''.join(chr(random.randrange(0, 256)) for i in range(0, length - 1))
|
||||
else:
|
||||
return ''.join(chr(random.randrange(ord('\x02'), ord('\x14'))) for i in range(0, length))
|
||||
else:
|
||||
|
|
|
@ -30,6 +30,7 @@ from bindingtester.tests import test_util, directory_util
|
|||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
|
||||
class DirectoryHcaTest(Test):
|
||||
def __init__(self, subspace):
|
||||
super(DirectoryHcaTest, self).__init__(subspace)
|
||||
|
@ -39,7 +40,7 @@ class DirectoryHcaTest(Test):
|
|||
|
||||
def setup(self, args):
|
||||
self.random = test_util.RandomGenerator(args.max_int_bits, args.api_version, args.types)
|
||||
self.transactions = ['tr%d' % i for i in range(3)] # SOMEDAY: parameterize this number?
|
||||
self.transactions = ['tr%d' % i for i in range(3)] # SOMEDAY: parameterize this number?
|
||||
self.barrier_num = 0
|
||||
|
||||
self.max_directories_per_transaction = 30
|
||||
|
@ -58,7 +59,7 @@ class DirectoryHcaTest(Test):
|
|||
|
||||
def barrier(self, instructions, thread_number, thread_ending=False):
|
||||
if not thread_ending:
|
||||
instructions.push_args(self.coordination[(self.barrier_num+1)][thread_number].key(), '')
|
||||
instructions.push_args(self.coordination[(self.barrier_num + 1)][thread_number].key(), '')
|
||||
instructions.append('SET_DATABASE')
|
||||
instructions.append('WAIT_FUTURE')
|
||||
|
||||
|
@ -101,8 +102,9 @@ class DirectoryHcaTest(Test):
|
|||
|
||||
for i in range(num_directories):
|
||||
path = (self.random.random_unicode_str(16),)
|
||||
op_args = test_util.with_length(path) + ('', None)
|
||||
directory_util.push_instruction_and_record_prefix(instructions, 'DIRECTORY_CREATE', op_args, path, num_dirs, self.random, self.prefix_log)
|
||||
op_args = test_util.with_length(path) + ('', None)
|
||||
directory_util.push_instruction_and_record_prefix(instructions, 'DIRECTORY_CREATE',
|
||||
op_args, path, num_dirs, self.random, self.prefix_log)
|
||||
num_dirs += 1
|
||||
|
||||
current_op += num_directories
|
||||
|
@ -127,4 +129,3 @@ class DirectoryHcaTest(Test):
|
|||
errors += directory_util.validate_hca_state(db)
|
||||
|
||||
return errors
|
||||
|
||||
|
|
|
@ -34,8 +34,9 @@ DEFAULT_DIRECTORY_INDEX = 4
|
|||
DEFAULT_DIRECTORY_PREFIX = 'default'
|
||||
DIRECTORY_ERROR_STRING = 'DIRECTORY_ERROR'
|
||||
|
||||
|
||||
class DirListEntry:
|
||||
dir_id = 0 # Used for debugging
|
||||
dir_id = 0 # Used for debugging
|
||||
|
||||
def __init__(self, is_directory, is_subspace, has_known_prefix=True, path=(), root=None):
|
||||
self.root = root or self
|
||||
|
@ -53,45 +54,46 @@ class DirListEntry:
|
|||
|
||||
def add_child(self, subpath, default_path, root, child):
|
||||
if default_path in root.children:
|
||||
#print 'Adding child %r to default directory %r at %r' % (child, root.children[DirectoryTest.DEFAULT_DIRECTORY_PATH].path, subpath)
|
||||
# print 'Adding child %r to default directory %r at %r' % (child, root.children[DirectoryTest.DEFAULT_DIRECTORY_PATH].path, subpath)
|
||||
c = root.children[default_path]._add_child_impl(subpath, child)
|
||||
child.has_known_prefix = c.has_known_prefix and child.has_known_prefix
|
||||
#print 'Added %r' % c
|
||||
# print 'Added %r' % c
|
||||
|
||||
#print 'Adding child %r to directory %r at %r' % (child, self.path, subpath)
|
||||
# print 'Adding child %r to directory %r at %r' % (child, self.path, subpath)
|
||||
c = self._add_child_impl(subpath, child)
|
||||
#print 'Added %r' % c
|
||||
# print 'Added %r' % c
|
||||
return c
|
||||
|
||||
def _add_child_impl(self, subpath, child):
|
||||
#print '%d, %d. Adding child (recursive): %s %s' % (self.dir_id, child.dir_id, repr(self.path), repr(subpath))
|
||||
# print '%d, %d. Adding child (recursive): %s %s' % (self.dir_id, child.dir_id, repr(self.path), repr(subpath))
|
||||
if len(subpath) == 0:
|
||||
self.has_known_prefix = self.has_known_prefix and child.has_known_prefix
|
||||
#print '%d, %d. Setting child: %d' % (self.dir_id, child.dir_id, self.has_known_prefix)
|
||||
# print '%d, %d. Setting child: %d' % (self.dir_id, child.dir_id, self.has_known_prefix)
|
||||
self._merge_children(child)
|
||||
|
||||
return self
|
||||
else:
|
||||
if not subpath[0] in self.children:
|
||||
#print '%d, %d. Path %s was absent (%s)' % (self.dir_id, child.dir_id, repr(self.path + subpath[0:1]), repr(self.children))
|
||||
subdir = DirListEntry(True, True, path = self.path+subpath[0:1], root = self.root)
|
||||
# print '%d, %d. Path %s was absent (%s)' % (self.dir_id, child.dir_id, repr(self.path + subpath[0:1]), repr(self.children))
|
||||
subdir = DirListEntry(True, True, path=self.path + subpath[0:1], root=self.root)
|
||||
subdir.has_known_prefix = len(subpath) == 1
|
||||
self.children[subpath[0]] = subdir
|
||||
else:
|
||||
subdir = self.children[subpath[0]]
|
||||
subdir.has_known_prefix = False
|
||||
#print '%d, %d. Path was present' % (self.dir_id, child.dir_id)
|
||||
# print '%d, %d. Path was present' % (self.dir_id, child.dir_id)
|
||||
|
||||
return subdir._add_child_impl(subpath[1:], child)
|
||||
|
||||
def _merge_children(self, other):
|
||||
for c in other.children:
|
||||
if not c in self.children:
|
||||
if c not in self.children:
|
||||
self.children[c] = other.children[c]
|
||||
else:
|
||||
self.children[c].has_known_prefix = self.children[c].has_known_prefix and other.children[c].has_known_prefix
|
||||
self.children[c]._merge_children(other.children[c])
|
||||
|
||||
|
||||
def setup_directories(instructions, default_path, random):
|
||||
dir_list = [DirListEntry(True, False, True)]
|
||||
instructions.push_args(0, '\xfe')
|
||||
|
@ -114,6 +116,7 @@ def setup_directories(instructions, default_path, random):
|
|||
|
||||
return dir_list
|
||||
|
||||
|
||||
def create_default_directory_subspace(instructions, path, random):
|
||||
test_util.blocking_commit(instructions)
|
||||
instructions.push_args(3)
|
||||
|
@ -125,6 +128,7 @@ def create_default_directory_subspace(instructions, path, random):
|
|||
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
|
||||
|
||||
def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_index, random, subspace):
|
||||
if not op.endswith('_DATABASE'):
|
||||
instructions.push_args(1, *test_util.with_length(path))
|
||||
|
@ -141,17 +145,18 @@ def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_inde
|
|||
|
||||
instructions.push_args(1, '', random.random_string(16), '')
|
||||
instructions.append('DIRECTORY_PACK_KEY')
|
||||
test_util.to_front(instructions, 3) # move the existence result up to the front of the stack
|
||||
test_util.to_front(instructions, 3) # move the existence result up to the front of the stack
|
||||
|
||||
t = util.subspace_to_tuple(subspace)
|
||||
instructions.push_args(len(t) + 3, *t)
|
||||
|
||||
instructions.append('TUPLE_PACK') # subspace[<exists>][<packed_key>][random.random_string(16)] = ''
|
||||
instructions.append('TUPLE_PACK') # subspace[<exists>][<packed_key>][random.random_string(16)] = ''
|
||||
instructions.append('SET')
|
||||
|
||||
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
|
||||
instructions.append('DIRECTORY_CHANGE')
|
||||
|
||||
|
||||
def check_for_duplicate_prefixes(db, subspace):
|
||||
last_prefix = None
|
||||
start_key = subspace[0].range().start
|
||||
|
@ -164,18 +169,19 @@ def check_for_duplicate_prefixes(db, subspace):
|
|||
break
|
||||
|
||||
start_key = fdb.KeySelector.first_greater_than(prefixes[-1].key)
|
||||
|
||||
|
||||
prefixes = [subspace[0].unpack(kv.key)[0] for kv in prefixes]
|
||||
prefixes = [p for p in prefixes if not (p.startswith(DEFAULT_DIRECTORY_PREFIX) or p == DIRECTORY_ERROR_STRING)]
|
||||
count += len(prefixes)
|
||||
|
||||
prefixes = [last_prefix] + prefixes
|
||||
duplicates.update([p for i,p in enumerate(prefixes[1:]) if p == prefixes[i]])
|
||||
duplicates.update([p for i, p in enumerate(prefixes[1:]) if p == prefixes[i]])
|
||||
last_prefix = prefixes[-1]
|
||||
|
||||
util.get_logger().info('Checked %d directory prefixes for duplicates' % count)
|
||||
return ['The prefix %r was allocated multiple times' % d[:-2] for d in set(duplicates)]
|
||||
|
||||
|
||||
def validate_hca_state(db):
|
||||
hca = fdb.Subspace(('\xfe', 'hca'), '\xfe')
|
||||
counters = hca[0]
|
||||
|
@ -184,7 +190,7 @@ def validate_hca_state(db):
|
|||
last_counter = db.get_range(counters.range().start, counters.range().stop, limit=1, reverse=True)
|
||||
[(start, reported_count)] = [(counters.unpack(kv.key)[0], struct.unpack('<q', kv.value)[0]) for kv in last_counter] or [(0, 0)]
|
||||
|
||||
actual_count = len(db[recent[start] : recent.range().stop])
|
||||
actual_count = len(db[recent[start]: recent.range().stop])
|
||||
if actual_count > reported_count:
|
||||
return ['The HCA reports %d prefixes allocated in current window, but it actually allocated %d' % (reported_count, actual_count)]
|
||||
|
||||
|
|
|
@ -31,6 +31,8 @@ from bindingtester.tests import test_util
|
|||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
# SOMEDAY: This should probably be broken up into smaller tests
|
||||
|
||||
|
||||
class ScriptedTest(Test):
|
||||
TEST_API_VERSION = 510
|
||||
|
||||
|
@ -38,15 +40,15 @@ class ScriptedTest(Test):
|
|||
super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION)
|
||||
self.workspace = self.subspace['workspace']
|
||||
self.results_subspace = self.subspace['results']
|
||||
#self.thread_subspace = self.subspace['threads'] # TODO: update START_THREAD so that we can create threads in subspaces
|
||||
# self.thread_subspace = self.subspace['threads'] # TODO: update START_THREAD so that we can create threads in subspaces
|
||||
|
||||
def setup(self, args):
|
||||
if args.concurrency > 1:
|
||||
raise Exception('Scripted tests cannot be run with a concurrency greater than 1')
|
||||
|
||||
|
||||
# SOMEDAY: this is only a limitation because we don't know how many operations the bisection should start with
|
||||
# it should be fixable.
|
||||
#
|
||||
#
|
||||
# We also need to enable the commented out support for num_ops in this file and make it so the default value runs
|
||||
# the entire test
|
||||
if args.bisect:
|
||||
|
@ -58,7 +60,7 @@ class ScriptedTest(Test):
|
|||
test_instructions = ThreadedInstructionSet()
|
||||
main_thread = test_instructions.create_thread()
|
||||
|
||||
foo = [self.workspace.pack(('foo%d' % i,)) for i in range(0,6)]
|
||||
foo = [self.workspace.pack(('foo%d' % i,)) for i in range(0, 6)]
|
||||
|
||||
main_thread.append('NEW_TRANSACTION')
|
||||
main_thread.push_args(1020)
|
||||
|
@ -270,8 +272,8 @@ class ScriptedTest(Test):
|
|||
|
||||
stampKey = 'stampedXXXXXXXXXXsuffix'
|
||||
stampKeyIndex = stampKey.find('XXXXXXXXXX')
|
||||
stampKeyStr = chr(stampKeyIndex%256) + chr(stampKeyIndex/256)
|
||||
main_thread.push_args(u'SET_VERSIONSTAMPED_KEY', stampKey + stampKeyStr, 'stampedBar')
|
||||
stampKeyStr = chr(stampKeyIndex % 256) + chr(stampKeyIndex / 256)
|
||||
main_thread.push_args(u'SET_VERSIONSTAMPED_KEY', stampKey + stampKeyStr, 'stampedBar')
|
||||
main_thread.append('ATOMIC_OP')
|
||||
main_thread.push_args(u'SET_VERSIONSTAMPED_VALUE', 'stampedValue', 'XXXXXXXXXX')
|
||||
main_thread.append('ATOMIC_OP')
|
||||
|
@ -305,7 +307,7 @@ class ScriptedTest(Test):
|
|||
|
||||
if not args.no_threads:
|
||||
wait_key = 'waitKey'
|
||||
#threads = [self.thread_subspace[i] for i in range(0, 2)]
|
||||
# threads = [self.thread_subspace[i] for i in range(0, 2)]
|
||||
threads = ['thread_spec%d' % i for i in range(0, 2)]
|
||||
for thread_spec in threads:
|
||||
main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), '')
|
||||
|
@ -314,11 +316,12 @@ class ScriptedTest(Test):
|
|||
|
||||
for thread_spec in threads:
|
||||
main_thread.push_args(thread_spec)
|
||||
#if len(main_thread) < args.num_ops:
|
||||
# if len(main_thread) < args.num_ops:
|
||||
main_thread.append('START_THREAD')
|
||||
thread = test_instructions.create_thread(fdb.Subspace((thread_spec,)))
|
||||
thread.append('NEW_TRANSACTION')
|
||||
thread.push_args(foo[1], foo[1], 'bar%s' % thread_spec, self.workspace.pack((wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec)))
|
||||
thread.push_args(foo[1], foo[1], 'bar%s' % thread_spec, self.workspace.pack(
|
||||
(wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec)))
|
||||
thread.append('GET')
|
||||
thread.append('POP')
|
||||
thread.append('SET')
|
||||
|
@ -333,20 +336,20 @@ class ScriptedTest(Test):
|
|||
thread.push_args(foo[1])
|
||||
thread.append('GET')
|
||||
self.add_result(thread, args, 'barthread_spec0', 'barthread_spec1')
|
||||
|
||||
|
||||
main_thread.append('EMPTY_STACK')
|
||||
#if len(main_thread) > args.num_ops:
|
||||
#main_thread[args.num_ops:] = []
|
||||
# if len(main_thread) > args.num_ops:
|
||||
# main_thread[args.num_ops:] = []
|
||||
|
||||
return test_instructions
|
||||
|
||||
def get_result_specifications(self):
|
||||
return [
|
||||
ResultSpecification(self.results_subspace, ordering_index=0, global_error_filter=[1007, 1021])
|
||||
return [
|
||||
ResultSpecification(self.results_subspace, ordering_index=0, global_error_filter=[1007, 1021])
|
||||
]
|
||||
|
||||
def get_expected_results(self):
|
||||
return { self.results_subspace : self.results }
|
||||
return {self.results_subspace: self.results}
|
||||
|
||||
def append_range_test(self, instructions, args, num_pairs, kv_length):
|
||||
instructions.append('NEW_TRANSACTION')
|
||||
|
@ -355,7 +358,7 @@ class ScriptedTest(Test):
|
|||
instructions.append('CLEAR_RANGE_STARTS_WITH')
|
||||
|
||||
kvpairs = []
|
||||
for i in range(0, num_pairs*2):
|
||||
for i in range(0, num_pairs * 2):
|
||||
kvpairs.append(self.workspace.pack(('foo', ''.join(chr(random.randint(0, 254)) for i in range(0, kv_length)))))
|
||||
|
||||
kvpairs = list(set(kvpairs))
|
||||
|
@ -364,7 +367,7 @@ class ScriptedTest(Test):
|
|||
kvpairs.sort()
|
||||
|
||||
instructions.push_args(*kvpairs)
|
||||
for i in range(0, len(kvpairs)/2):
|
||||
for i in range(0, len(kvpairs) / 2):
|
||||
instructions.append('SET')
|
||||
if i % 100 == 99:
|
||||
test_util.blocking_commit(instructions)
|
||||
|
@ -388,8 +391,7 @@ class ScriptedTest(Test):
|
|||
instructions.push_args(key)
|
||||
instructions.append('SET_DATABASE')
|
||||
|
||||
#if len(instructions) <= args.num_ops:
|
||||
# if len(instructions) <= args.num_ops:
|
||||
self.results.append(Result(self.results_subspace, key, values))
|
||||
|
||||
instructions.append('POP')
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@ from bindingtester import util
|
|||
from bindingtester import FDB_API_VERSION
|
||||
from bindingtester.known_testers import COMMON_TYPES
|
||||
|
||||
|
||||
class RandomGenerator(object):
|
||||
def __init__(self, max_int_bits=64, api_version=FDB_API_VERSION, types=COMMON_TYPES):
|
||||
self.max_int_bits = max_int_bits
|
||||
|
@ -41,13 +42,13 @@ class RandomGenerator(object):
|
|||
return u''.join(self.random_unicode_char() for i in range(0, length))
|
||||
|
||||
def random_int(self):
|
||||
num_bits = random.randint(0, self.max_int_bits) # This way, we test small numbers with higher probability
|
||||
num_bits = random.randint(0, self.max_int_bits) # This way, we test small numbers with higher probability
|
||||
|
||||
max_value = (1 << num_bits) - 1
|
||||
min_value = -max_value - 1
|
||||
num = random.randint(min_value, max_value)
|
||||
|
||||
#util.get_logger().debug('generating int (%d): %d - %s' % (num_bits, num, repr(fdb.tuple.pack((num,)))))
|
||||
# util.get_logger().debug('generating int (%d): %d - %s' % (num_bits, num, repr(fdb.tuple.pack((num,)))))
|
||||
return num
|
||||
|
||||
def random_float(self, exp_bits):
|
||||
|
@ -57,7 +58,7 @@ class RandomGenerator(object):
|
|||
else:
|
||||
# Choose a value from all over the range of acceptable floats for this precision.
|
||||
sign = -1 if random.random() < 0.5 else 1
|
||||
exponent = random.randint(-(1 << (exp_bits-1))-10, (1 << (exp_bits-1) - 1))
|
||||
exponent = random.randint(-(1 << (exp_bits - 1)) - 10, (1 << (exp_bits - 1) - 1))
|
||||
mantissa = random.random()
|
||||
return sign * math.pow(2, exponent) * mantissa
|
||||
|
||||
|
@ -117,12 +118,12 @@ class RandomGenerator(object):
|
|||
smaller_size = random.randint(1, len(to_add))
|
||||
tuples.append(to_add[:smaller_size])
|
||||
else:
|
||||
non_empty = filter(lambda (i,x): (isinstance(x, list) or isinstance(x, tuple)) and len(x) > 0, enumerate(to_add))
|
||||
non_empty = filter(lambda (_, x): (isinstance(x, list) or isinstance(x, tuple)) and len(x) > 0, enumerate(to_add))
|
||||
if len(non_empty) > 0 and random.random() < 0.25:
|
||||
# Add a smaller list to test prefixes of nested structures.
|
||||
idx, choice = random.choice(non_empty)
|
||||
smaller_size = random.randint(0, len(to_add[idx]))
|
||||
tuples.append(to_add[:idx] + (choice[:smaller_size],) + to_add[idx+1:])
|
||||
tuples.append(to_add[:idx] + (choice[:smaller_size],) + to_add[idx + 1:])
|
||||
|
||||
random.shuffle(tuples)
|
||||
return tuples
|
||||
|
@ -133,7 +134,7 @@ class RandomGenerator(object):
|
|||
elif random.random() < 0.75:
|
||||
limit = 0
|
||||
else:
|
||||
limit = random.randint(1e8, (1<<31)-1)
|
||||
limit = random.randint(1e8, (1 << 31) - 1)
|
||||
|
||||
return (limit, random.randint(0, 1), random.randint(-2, 4))
|
||||
|
||||
|
@ -149,13 +150,13 @@ class RandomGenerator(object):
|
|||
if length == 0:
|
||||
return ''
|
||||
|
||||
return chr(random.randint(0, 254)) + ''.join(chr(random.randint(0, 255)) for i in range(0, length-1))
|
||||
return chr(random.randint(0, 254)) + ''.join(chr(random.randint(0, 255)) for i in range(0, length - 1))
|
||||
|
||||
def random_unicode_char(self):
|
||||
while True:
|
||||
if random.random() < 0.05:
|
||||
# Choose one of these special character sequences.
|
||||
specials = [u'\U0001f4a9', u'\U0001f63c', u'\U0001f3f3\ufe0f\u200d\U0001f308', u'\U0001f1f5\U0001f1f2', u'\uf8ff',
|
||||
specials = [u'\U0001f4a9', u'\U0001f63c', u'\U0001f3f3\ufe0f\u200d\U0001f308', u'\U0001f1f5\U0001f1f2', u'\uf8ff',
|
||||
u'\U0002a2b2', u'\u05e9\u05dc\u05d5\u05dd']
|
||||
return random.choice(specials)
|
||||
c = random.randint(0, 0xffff)
|
||||
|
@ -166,11 +167,13 @@ class RandomGenerator(object):
|
|||
def error_string(error_code):
|
||||
return fdb.tuple.pack(('ERROR', str(error_code)))
|
||||
|
||||
|
||||
def blocking_commit(instructions):
|
||||
instructions.append('COMMIT')
|
||||
instructions.append('WAIT_FUTURE')
|
||||
instructions.append('RESET')
|
||||
|
||||
|
||||
def to_front(instructions, index):
|
||||
if index == 0:
|
||||
pass
|
||||
|
@ -178,19 +181,19 @@ def to_front(instructions, index):
|
|||
instructions.push_args(1)
|
||||
instructions.append('SWAP')
|
||||
elif index == 2:
|
||||
instructions.push_args(index-1)
|
||||
instructions.push_args(index - 1)
|
||||
instructions.append('SWAP')
|
||||
instructions.push_args(index)
|
||||
instructions.append('SWAP')
|
||||
else:
|
||||
instructions.push_args(index-1)
|
||||
instructions.push_args(index - 1)
|
||||
instructions.append('SWAP')
|
||||
instructions.push_args(index)
|
||||
instructions.append('SWAP')
|
||||
instructions.push_args(index-1)
|
||||
instructions.push_args(index - 1)
|
||||
instructions.append('SWAP')
|
||||
to_front(instructions, index-1)
|
||||
to_front(instructions, index - 1)
|
||||
|
||||
|
||||
def with_length(tup):
|
||||
return (len(tup),) + tup
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import glob
|
|||
|
||||
import fdb
|
||||
|
||||
|
||||
def initialize_logger_level(logging_level):
|
||||
logger = get_logger()
|
||||
|
||||
|
@ -39,9 +40,11 @@ def initialize_logger_level(logging_level):
|
|||
elif logging_level == "ERROR":
|
||||
logger.setLevel(logging.ERROR)
|
||||
|
||||
|
||||
def get_logger():
|
||||
return logging.getLogger('foundationdb.bindingtester')
|
||||
|
||||
|
||||
# Attempts to get the name associated with a process termination signal
|
||||
def signal_number_to_name(signal_num):
|
||||
name = []
|
||||
|
@ -53,6 +56,7 @@ def signal_number_to_name(signal_num):
|
|||
else:
|
||||
return str(signal_num)
|
||||
|
||||
|
||||
def import_subclasses(filename, module_path):
|
||||
for f in glob.glob(os.path.join(os.path.dirname(filename), '*.py')):
|
||||
fn = os.path.basename(f)
|
||||
|
@ -60,6 +64,7 @@ def import_subclasses(filename, module_path):
|
|||
continue
|
||||
__import__('%s.%s' % (module_path, os.path.splitext(fn)[0]))
|
||||
|
||||
|
||||
# Attempts to unpack a subspace
|
||||
# This throws an exception if the subspace cannot be unpacked as a tuple
|
||||
# As a result, the binding tester cannot use subspaces that have non-tuple raw prefixes
|
||||
|
@ -69,4 +74,3 @@ def subspace_to_tuple(subspace):
|
|||
except Exception as e:
|
||||
get_logger().debug(e)
|
||||
raise Exception('The binding tester does not support subspaces with non-tuple raw prefixes')
|
||||
|
||||
|
|
|
@ -27,19 +27,21 @@ import sys
|
|||
|
||||
functions = {}
|
||||
|
||||
func_re = re.compile("^\s*FDB_API_(?:CHANGED|REMOVED)\s*\(\s*([^,]*),\s*([^)]*)\).*")
|
||||
func_re = re.compile(
|
||||
"^\s*FDB_API_(?:CHANGED|REMOVED)\s*\(\s*([^,]*),\s*([^)]*)\).*")
|
||||
|
||||
with open(source, 'r') as srcfile:
|
||||
for l in srcfile:
|
||||
m = func_re.match(l)
|
||||
if m:
|
||||
func, ver = m.groups()
|
||||
if not func in functions:
|
||||
if func not in functions:
|
||||
functions[func] = []
|
||||
ver = int(ver)
|
||||
if not ver in functions[func]:
|
||||
if ver not in functions[func]:
|
||||
functions[func].append(ver)
|
||||
|
||||
|
||||
def write_windows_asm(asmfile, functions):
|
||||
asmfile.write(".data\n")
|
||||
for f in functions:
|
||||
|
@ -55,6 +57,7 @@ def write_windows_asm(asmfile, functions):
|
|||
|
||||
asmfile.write("\nEND\n")
|
||||
|
||||
|
||||
def write_unix_asm(asmfile, functions, prefix):
|
||||
asmfile.write(".intel_syntax noprefix\n")
|
||||
|
||||
|
@ -70,13 +73,17 @@ def write_unix_asm(asmfile, functions, prefix):
|
|||
for f in functions:
|
||||
asmfile.write("\n.globl %s%s\n" % (prefix, f))
|
||||
asmfile.write("%s%s:\n" % (prefix, f))
|
||||
asmfile.write("\tmov r11, qword ptr [%sfdb_api_ptr_%s@GOTPCREL+rip]\n" % (prefix, f))
|
||||
asmfile.write(
|
||||
"\tmov r11, qword ptr [%sfdb_api_ptr_%s@GOTPCREL+rip]\n" % (prefix, f))
|
||||
asmfile.write("\tmov r11, qword ptr [r11]\n")
|
||||
asmfile.write("\tjmp r11\n")
|
||||
|
||||
|
||||
with open(asm, 'w') as asmfile, open(h, 'w') as hfile:
|
||||
hfile.write("void fdb_api_ptr_unimpl() { fprintf(stderr, \"UNIMPLEMENTED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
hfile.write("void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
hfile.write(
|
||||
"void fdb_api_ptr_unimpl() { fprintf(stderr, \"UNIMPLEMENTED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
hfile.write(
|
||||
"void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
|
||||
if platform == "linux":
|
||||
write_unix_asm(asmfile, functions, '')
|
||||
|
@ -90,4 +97,4 @@ with open(asm, 'w') as asmfile, open(h, 'w') as hfile:
|
|||
hfile.write("extern \"C\" ")
|
||||
hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f)
|
||||
for v in functions[f]:
|
||||
hfile.write("#define %s_v%d_PREV %s_v%d\n" % (f, v, f, v-1))
|
||||
hfile.write("#define %s_v%d_PREV %s_v%d\n" % (f, v, f, v - 1))
|
||||
|
|
|
@ -51,6 +51,14 @@ GO_SRC := $(shell find $(CURDIR)/bindings/go/src -name '*.go')
|
|||
|
||||
fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC)
|
||||
|
||||
fdb_go_fmt: $(GO_SRC)
|
||||
@echo "Formatting fdb_go"
|
||||
@gofmt -w $(GO_SRC)
|
||||
|
||||
fdb_go_fmt_check: $(GO_SRC)
|
||||
@echo "Checking fdb_go"
|
||||
@bash -c 'fmtoutstr=$$(gofmt -l $(GO_SRC)) ; if [[ -n "$${fmtoutstr}" ]] ; then echo "Detected go formatting violations for the following files:" ; echo "$${fmtoutstr}" ; echo "Try running: make fdb_go_fmt"; exit 1 ; fi'
|
||||
|
||||
fdb_go_path: $(GO_SRC)
|
||||
@echo "Creating fdb_go_path"
|
||||
@mkdir -p $(GO_DEST)
|
||||
|
@ -66,27 +74,27 @@ fdb_go_tester_clean:
|
|||
@echo "Cleaning fdb_go_tester"
|
||||
@rm -rf $(GOPATH)/bin
|
||||
|
||||
$(GOPATH)/bin/_stacktester: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OBJECTS) $(GO_DEST)/fdb/generated.go
|
||||
$(GOPATH)/bin/_stacktester: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_PACKAGE_OBJECTS) $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling $(basename $(notdir $@))"
|
||||
@go install $(GO_IMPORT_PATH)/_stacktester
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_DEST)/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb/tuple"
|
||||
@go install $(GO_IMPORT_PATH)/fdb/tuple
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_DEST)/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb/subspace"
|
||||
@go install $(GO_IMPORT_PATH)/fdb/subspace
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a $(GO_DEST)/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb/directory"
|
||||
@go install $(GO_IMPORT_PATH)/fdb/directory
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb.a: fdb_go_path $(GO_SRC) $(GO_DEST)/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb.a: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb"
|
||||
@go install $(GO_IMPORT_PATH)/fdb
|
||||
|
||||
$(GO_DEST)/fdb/generated.go: fdb_go_path lib/libfdb_c.$(DLEXT) bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
|
||||
$(GO_DEST)/fdb/generated.go: fdb_go_path fdb_go_fmt_check lib/libfdb_c.$(DLEXT) bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
|
||||
@echo "Building $@"
|
||||
@go run bindings/go/src/_util/translate_fdb_options.go < fdbclient/vexillographer/fdb.options > $@
|
||||
|
||||
|
|
|
@ -21,12 +21,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"strings"
|
||||
"bytes"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (sm *StackMachine) popTuples(count int) []tuple.Tuple {
|
||||
|
@ -60,8 +60,8 @@ func tuplePackStrings(s []string) []byte {
|
|||
}
|
||||
|
||||
type DirectoryExtension struct {
|
||||
list []interface{}
|
||||
index int64
|
||||
list []interface{}
|
||||
index int64
|
||||
errorIndex int64
|
||||
}
|
||||
|
||||
|
@ -93,15 +93,15 @@ func (sm *StackMachine) maybePath() []string {
|
|||
return path
|
||||
}
|
||||
|
||||
var createOps = map[string]bool {
|
||||
var createOps = map[string]bool{
|
||||
"CREATE_SUBSPACE": true,
|
||||
"CREATE_LAYER": true,
|
||||
"CREATE_OR_OPEN": true,
|
||||
"CREATE": true,
|
||||
"OPEN": true,
|
||||
"MOVE": true,
|
||||
"MOVE_TO": true,
|
||||
"OPEN_SUBSPACE": true,
|
||||
"CREATE_LAYER": true,
|
||||
"CREATE_OR_OPEN": true,
|
||||
"CREATE": true,
|
||||
"OPEN": true,
|
||||
"MOVE": true,
|
||||
"MOVE_TO": true,
|
||||
"OPEN_SUBSPACE": true,
|
||||
}
|
||||
|
||||
func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool, idx int, t fdb.Transactor, rt fdb.ReadTransactor) {
|
||||
|
@ -142,7 +142,9 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
layer = l.([]byte)
|
||||
}
|
||||
d, e := de.cwd().CreateOrOpen(t, tupleToPath(tuples[0]), layer)
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
de.store(d)
|
||||
case op == "CREATE":
|
||||
tuples := sm.popTuples(1)
|
||||
|
@ -159,7 +161,9 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
// p.([]byte) itself may be nil, but CreatePrefix handles that appropriately
|
||||
d, e = de.cwd().CreatePrefix(t, tupleToPath(tuples[0]), layer, p.([]byte))
|
||||
}
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
de.store(d)
|
||||
case op == "OPEN":
|
||||
tuples := sm.popTuples(1)
|
||||
|
@ -169,7 +173,9 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
layer = l.([]byte)
|
||||
}
|
||||
d, e := de.cwd().Open(rt, tupleToPath(tuples[0]), layer)
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
de.store(d)
|
||||
case op == "CHANGE":
|
||||
i := sm.waitAndPop().item.(int64)
|
||||
|
@ -182,12 +188,16 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
case op == "MOVE":
|
||||
tuples := sm.popTuples(2)
|
||||
d, e := de.cwd().Move(t, tupleToPath(tuples[0]), tupleToPath(tuples[1]))
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
de.store(d)
|
||||
case op == "MOVE_TO":
|
||||
tuples := sm.popTuples(1)
|
||||
d, e := de.cwd().MoveTo(t, tupleToPath(tuples[0]))
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
de.store(d)
|
||||
case strings.HasPrefix(op, "REMOVE"):
|
||||
path := sm.maybePath()
|
||||
|
@ -197,9 +207,11 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
// doesn't end up committing the version key. (Other languages have
|
||||
// separate remove() and remove_if_exists() so don't have this tricky
|
||||
// issue).
|
||||
_, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
_, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
ok, e := de.cwd().Remove(tr, path)
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
switch op[6:] {
|
||||
case "":
|
||||
if !ok {
|
||||
|
@ -209,16 +221,24 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
}
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
case op == "LIST":
|
||||
subs, e := de.cwd().List(rt, sm.maybePath())
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
t := make(tuple.Tuple, len(subs))
|
||||
for i, s := range subs { t[i] = s }
|
||||
for i, s := range subs {
|
||||
t[i] = s
|
||||
}
|
||||
sm.store(idx, t.Pack())
|
||||
case op == "EXISTS":
|
||||
b, e := de.cwd().Exists(rt, sm.maybePath())
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
if b {
|
||||
sm.store(idx, int64(1))
|
||||
} else {
|
||||
|
@ -229,8 +249,10 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
sm.store(idx, de.css().Pack(tuples[0]))
|
||||
case op == "UNPACK_KEY":
|
||||
t, e := de.css().Unpack(fdb.Key(sm.waitAndPop().item.([]byte)))
|
||||
if e != nil { panic(e) }
|
||||
for _, el := range(t) {
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
for _, el := range t {
|
||||
sm.store(idx, el)
|
||||
}
|
||||
case op == "RANGE":
|
||||
|
@ -252,7 +274,7 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
k := sm.waitAndPop().item.([]byte)
|
||||
k = append(k, tuple.Tuple{de.index}.Pack()...)
|
||||
v := de.css().Bytes()
|
||||
t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(fdb.Key(k), v)
|
||||
return nil, nil
|
||||
})
|
||||
|
@ -266,7 +288,9 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
k3 := ss.Pack(tuple.Tuple{"exists"})
|
||||
var v3 []byte
|
||||
exists, e := de.cwd().Exists(rt, nil)
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
if exists {
|
||||
v3 = tuple.Tuple{1}.Pack()
|
||||
} else {
|
||||
|
@ -276,10 +300,12 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
|
|||
var subs []string
|
||||
if exists {
|
||||
subs, e = de.cwd().List(rt, nil)
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
v4 := tuplePackStrings(subs)
|
||||
t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(k1, v1)
|
||||
tr.Set(k2, v2)
|
||||
tr.Set(k3, v3)
|
||||
|
|
|
@ -24,23 +24,23 @@ import (
|
|||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"log"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"runtime"
|
||||
"reflect"
|
||||
"time"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const verbose bool = false
|
||||
|
||||
var trMap = map[string]fdb.Transaction {}
|
||||
var trMap = map[string]fdb.Transaction{}
|
||||
var trMapLock = sync.RWMutex{}
|
||||
|
||||
// Make tuples sortable by byte-order
|
||||
|
@ -69,17 +69,17 @@ func int64ToBool(i int64) bool {
|
|||
|
||||
type stackEntry struct {
|
||||
item interface{}
|
||||
idx int
|
||||
idx int
|
||||
}
|
||||
|
||||
type StackMachine struct {
|
||||
prefix []byte
|
||||
trName string
|
||||
stack []stackEntry
|
||||
prefix []byte
|
||||
trName string
|
||||
stack []stackEntry
|
||||
lastVersion int64
|
||||
threads sync.WaitGroup
|
||||
verbose bool
|
||||
de *DirectoryExtension
|
||||
threads sync.WaitGroup
|
||||
verbose bool
|
||||
de *DirectoryExtension
|
||||
}
|
||||
|
||||
func newStackMachine(prefix []byte, verbose bool) *StackMachine {
|
||||
|
@ -99,7 +99,7 @@ func (sm *StackMachine) waitAndPop() (ret stackEntry) {
|
|||
}
|
||||
}()
|
||||
|
||||
ret, sm.stack = sm.stack[len(sm.stack) - 1], sm.stack[:len(sm.stack) - 1]
|
||||
ret, sm.stack = sm.stack[len(sm.stack)-1], sm.stack[:len(sm.stack)-1]
|
||||
switch el := ret.item.(type) {
|
||||
case []byte:
|
||||
ret.item = el
|
||||
|
@ -150,9 +150,9 @@ func (sm *StackMachine) popPrefixRange() fdb.ExactRange {
|
|||
}
|
||||
|
||||
func (sm *StackMachine) pushRange(idx int, sl []fdb.KeyValue, prefixFilter []byte) {
|
||||
var t tuple.Tuple = make(tuple.Tuple, 0, len(sl) * 2)
|
||||
var t tuple.Tuple = make(tuple.Tuple, 0, len(sl)*2)
|
||||
|
||||
for _, kv := range(sl) {
|
||||
for _, kv := range sl {
|
||||
if prefixFilter == nil || bytes.HasPrefix(kv.Key, prefixFilter) {
|
||||
t = append(t, kv.Key)
|
||||
t = append(t, kv.Value)
|
||||
|
@ -240,7 +240,7 @@ func (sm *StackMachine) dumpStack() {
|
|||
}
|
||||
}
|
||||
|
||||
func (sm *StackMachine) executeMutation(t fdb.Transactor, f func (fdb.Transaction) (interface{}, error), isDB bool, idx int) {
|
||||
func (sm *StackMachine) executeMutation(t fdb.Transactor, f func(fdb.Transaction) (interface{}, error), isDB bool, idx int) {
|
||||
_, e := t.Transact(f)
|
||||
if e != nil {
|
||||
panic(e)
|
||||
|
@ -250,8 +250,8 @@ func (sm *StackMachine) executeMutation(t fdb.Transactor, f func (fdb.Transactio
|
|||
}
|
||||
}
|
||||
|
||||
func (sm *StackMachine) checkWatches(watches [4]fdb.FutureNil, expected bool) (bool) {
|
||||
for _, watch := range(watches) {
|
||||
func (sm *StackMachine) checkWatches(watches [4]fdb.FutureNil, expected bool) bool {
|
||||
for _, watch := range watches {
|
||||
if watch.IsReady() || expected {
|
||||
e := watch.Get()
|
||||
if e != nil {
|
||||
|
@ -283,7 +283,9 @@ func (sm *StackMachine) testWatches() {
|
|||
tr.Set(fdb.Key("w3"), []byte("3"))
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
var watches [4]fdb.FutureNil
|
||||
|
||||
|
@ -297,7 +299,9 @@ func (sm *StackMachine) testWatches() {
|
|||
tr.Clear(fdb.Key("w1"))
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
|
@ -312,7 +316,9 @@ func (sm *StackMachine) testWatches() {
|
|||
tr.BitXor(fdb.Key("w3"), []byte("\xff\xff"))
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
if sm.checkWatches(watches, true) {
|
||||
return
|
||||
|
@ -322,21 +328,23 @@ func (sm *StackMachine) testWatches() {
|
|||
|
||||
func (sm *StackMachine) testLocality() {
|
||||
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Options().SetTimeout(60*1000)
|
||||
tr.Options().SetTimeout(60 * 1000)
|
||||
tr.Options().SetReadSystemKeys()
|
||||
boundaryKeys, e := db.LocalityGetBoundaryKeys(fdb.KeyRange{fdb.Key(""), fdb.Key("\xff\xff")}, 0, 0)
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
for i:=0; i<len(boundaryKeys)-1 ; i++ {
|
||||
for i := 0; i < len(boundaryKeys)-1; i++ {
|
||||
start := boundaryKeys[i]
|
||||
end := tr.GetKey(fdb.LastLessThan(boundaryKeys[i+1])).MustGet()
|
||||
|
||||
startAddresses := tr.LocalityGetAddressesForKey(start).MustGet()
|
||||
endAddresses := tr.LocalityGetAddressesForKey(end).MustGet()
|
||||
|
||||
for _, address1 := range(startAddresses) {
|
||||
for _, address1 := range startAddresses {
|
||||
found := false
|
||||
for _, address2 := range(endAddresses) {
|
||||
for _, address2 := range endAddresses {
|
||||
if address1 == address2 {
|
||||
found = true
|
||||
break
|
||||
|
@ -351,7 +359,9 @@ func (sm *StackMachine) testLocality() {
|
|||
return nil, nil
|
||||
})
|
||||
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
}
|
||||
|
||||
func (sm *StackMachine) logStack(entries map[int]stackEntry, prefix []byte) {
|
||||
|
@ -377,7 +387,9 @@ func (sm *StackMachine) logStack(entries map[int]stackEntry, prefix []byte) {
|
|||
return nil, nil
|
||||
})
|
||||
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -467,28 +479,28 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
case op == "PUSH":
|
||||
sm.store(idx, inst[1])
|
||||
case op == "DUP":
|
||||
entry := sm.stack[len(sm.stack) - 1]
|
||||
entry := sm.stack[len(sm.stack)-1]
|
||||
sm.store(entry.idx, entry.item)
|
||||
case op == "EMPTY_STACK":
|
||||
sm.stack = []stackEntry{}
|
||||
sm.stack = make([]stackEntry, 0)
|
||||
case op == "SWAP":
|
||||
idx := sm.waitAndPop().item.(int64)
|
||||
sm.stack[len(sm.stack) - 1], sm.stack[len(sm.stack) - 1 - int(idx)] = sm.stack[len(sm.stack) - 1 - int(idx)], sm.stack[len(sm.stack) - 1]
|
||||
sm.stack[len(sm.stack)-1], sm.stack[len(sm.stack)-1-int(idx)] = sm.stack[len(sm.stack)-1-int(idx)], sm.stack[len(sm.stack)-1]
|
||||
case op == "POP":
|
||||
sm.stack = sm.stack[:len(sm.stack) - 1]
|
||||
sm.stack = sm.stack[:len(sm.stack)-1]
|
||||
case op == "SUB":
|
||||
sm.store(idx, sm.waitAndPop().item.(int64) - sm.waitAndPop().item.(int64))
|
||||
sm.store(idx, sm.waitAndPop().item.(int64)-sm.waitAndPop().item.(int64))
|
||||
case op == "CONCAT":
|
||||
str1 := sm.waitAndPop().item
|
||||
str2 := sm.waitAndPop().item
|
||||
switch str1.(type) {
|
||||
case string:
|
||||
sm.store(idx, str1.(string) + str2.(string))
|
||||
case []byte:
|
||||
sm.store(idx, append(str1.([]byte), str2.([]byte)...))
|
||||
default:
|
||||
panic("Invalid CONCAT parameter")
|
||||
case string:
|
||||
sm.store(idx, str1.(string)+str2.(string))
|
||||
case []byte:
|
||||
sm.store(idx, append(str1.([]byte), str2.([]byte)...))
|
||||
default:
|
||||
panic("Invalid CONCAT parameter")
|
||||
}
|
||||
case op == "NEW_TRANSACTION":
|
||||
sm.newTransaction()
|
||||
|
@ -497,16 +509,18 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
case op == "ON_ERROR":
|
||||
sm.store(idx, sm.currentTransaction().OnError(fdb.Error{int(sm.waitAndPop().item.(int64))}))
|
||||
case op == "GET_READ_VERSION":
|
||||
_, e = rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
_, e = rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
sm.lastVersion = rtr.GetReadVersion().MustGet()
|
||||
sm.store(idx, []byte("GOT_READ_VERSION"))
|
||||
return nil, nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
case op == "SET":
|
||||
key := fdb.Key(sm.waitAndPop().item.([]byte))
|
||||
value := sm.waitAndPop().item.([]byte)
|
||||
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
|
||||
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(key, value)
|
||||
return nil, nil
|
||||
}, isDB, idx)
|
||||
|
@ -525,10 +539,12 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
sm.logStack(entries, prefix)
|
||||
case op == "GET":
|
||||
key := fdb.Key(sm.waitAndPop().item.([]byte))
|
||||
res, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
res, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return rtr.Get(key), nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
sm.store(idx, res.(fdb.FutureByteSlice))
|
||||
case op == "COMMIT":
|
||||
|
@ -537,7 +553,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
sm.currentTransaction().Reset()
|
||||
case op == "CLEAR":
|
||||
key := fdb.Key(sm.waitAndPop().item.([]byte))
|
||||
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
|
||||
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Clear(key)
|
||||
return nil, nil
|
||||
}, isDB, idx)
|
||||
|
@ -557,10 +573,12 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
case op == "GET_KEY":
|
||||
sel := sm.popSelector()
|
||||
prefix := sm.waitAndPop().item.([]byte)
|
||||
res, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
res, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return rtr.GetKey(sel).MustGet(), nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
key := res.(fdb.Key)
|
||||
|
||||
|
@ -570,7 +588,9 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
sm.store(idx, prefix)
|
||||
} else {
|
||||
s, e := fdb.Strinc(prefix)
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
sm.store(idx, s)
|
||||
}
|
||||
case strings.HasPrefix(op, "GET_RANGE"):
|
||||
|
@ -591,10 +611,12 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
prefix = sm.waitAndPop().item.([]byte)
|
||||
}
|
||||
|
||||
res, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
res, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return rtr.GetRange(r, ro).GetSliceOrPanic(), nil
|
||||
})
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
sm.pushRange(idx, res.([]fdb.KeyValue), prefix)
|
||||
case strings.HasPrefix(op, "CLEAR_RANGE"):
|
||||
|
@ -607,7 +629,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
er = sm.popKeyRange()
|
||||
}
|
||||
|
||||
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
|
||||
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.ClearRange(er)
|
||||
return nil, nil
|
||||
}, isDB, idx)
|
||||
|
@ -623,7 +645,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
for _, el := range(t) {
|
||||
for _, el := range t {
|
||||
sm.store(idx, []byte(tuple.Tuple{el}.Pack()))
|
||||
}
|
||||
case op == "TUPLE_SORT":
|
||||
|
@ -681,7 +703,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
db.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
v := tr.GetRange(er, fdb.RangeOptions{}).GetSliceOrPanic()
|
||||
if len(v) != 0 {
|
||||
panic(fdb.Error{1020})
|
||||
|
@ -718,7 +740,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
key := fdb.Key(sm.waitAndPop().item.([]byte))
|
||||
ival := sm.waitAndPop().item
|
||||
value := ival.([]byte)
|
||||
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
|
||||
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
|
||||
reflect.ValueOf(tr).MethodByName(opname).Call([]reflect.Value{reflect.ValueOf(key), reflect.ValueOf(value)})
|
||||
return nil, nil
|
||||
}, isDB, idx)
|
||||
|
@ -740,7 +762,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
tr.Options().SetReadSystemKeys()
|
||||
tr.Options().SetAccessSystemKeys()
|
||||
tr.Options().SetDurabilityDevNullIsWebScale()
|
||||
tr.Options().SetTimeout(60*1000)
|
||||
tr.Options().SetTimeout(60 * 1000)
|
||||
tr.Options().SetRetryLimit(50)
|
||||
tr.Options().SetMaxRetryDelay(100)
|
||||
tr.Options().SetUsedDuringCommitProtectionDisable()
|
||||
|
@ -751,7 +773,9 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
return tr.Get(fdb.Key("\xff")).MustGet(), nil
|
||||
})
|
||||
|
||||
if e != nil { panic(e) }
|
||||
if e != nil {
|
||||
panic(e)
|
||||
}
|
||||
|
||||
sm.testWatches()
|
||||
sm.testLocality()
|
||||
|
@ -772,7 +796,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
}
|
||||
|
||||
func (sm *StackMachine) Run() {
|
||||
r, e := db.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
r, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
return tr.GetRange(tuple.Tuple{sm.prefix}, fdb.RangeOptions{}).GetSliceOrPanic(), nil
|
||||
})
|
||||
if e != nil {
|
||||
|
@ -781,7 +805,7 @@ func (sm *StackMachine) Run() {
|
|||
|
||||
instructions := r.([]fdb.KeyValue)
|
||||
|
||||
for i, kv := range(instructions) {
|
||||
for i, kv := range instructions {
|
||||
inst, _ := tuple.Unpack(fdb.Key(kv.Value))
|
||||
|
||||
if sm.verbose {
|
||||
|
|
|
@ -24,26 +24,26 @@ package main
|
|||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
"go/doc"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"strings"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
"go/doc"
|
||||
)
|
||||
|
||||
type Option struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Code int `xml:"code,attr"`
|
||||
ParamType string `xml:"paramType,attr"`
|
||||
ParamDesc string `xml:"paramDescription,attr"`
|
||||
Name string `xml:"name,attr"`
|
||||
Code int `xml:"code,attr"`
|
||||
ParamType string `xml:"paramType,attr"`
|
||||
ParamDesc string `xml:"paramDescription,attr"`
|
||||
Description string `xml:"description,attr"`
|
||||
Hidden bool `xml:"hidden,attr"`
|
||||
Hidden bool `xml:"hidden,attr"`
|
||||
}
|
||||
type Scope struct {
|
||||
Name string `xml:"name,attr"`
|
||||
Name string `xml:"name,attr"`
|
||||
Option []Option
|
||||
}
|
||||
type Options struct {
|
||||
|
@ -114,12 +114,12 @@ func translateName(old string) string {
|
|||
return strings.Replace(strings.Title(strings.Replace(old, "_", " ", -1)), " ", "", -1)
|
||||
}
|
||||
|
||||
func lowerFirst (s string) string {
|
||||
if s == "" {
|
||||
return ""
|
||||
}
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
return string(unicode.ToLower(r)) + s[n:]
|
||||
func lowerFirst(s string) string {
|
||||
if s == "" {
|
||||
return ""
|
||||
}
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
return string(unicode.ToLower(r)) + s[n:]
|
||||
}
|
||||
|
||||
func writeMutation(opt Option) {
|
||||
|
@ -139,7 +139,7 @@ func writeEnum(scope Scope, opt Option, delta int) {
|
|||
doc.ToText(os.Stdout, opt.Description, " // ", "", 73)
|
||||
// fmt.Printf(" // %s\n", opt.Description)
|
||||
}
|
||||
fmt.Printf(" %s %s = %d\n", scope.Name + translateName(opt.Name), scope.Name, opt.Code + delta)
|
||||
fmt.Printf(" %s %s = %d\n", scope.Name+translateName(opt.Name), scope.Name, opt.Code+delta)
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -182,11 +182,11 @@ func int64ToBytes(i int64) ([]byte, error) {
|
|||
}
|
||||
`)
|
||||
|
||||
for _, scope := range(v.Scope) {
|
||||
for _, scope := range v.Scope {
|
||||
if strings.HasSuffix(scope.Name, "Option") {
|
||||
receiver := scope.Name + "s"
|
||||
|
||||
for _, opt := range(scope.Option) {
|
||||
for _, opt := range scope.Option {
|
||||
if opt.Description != "Deprecated" && !opt.Hidden { // Eww
|
||||
writeOpt(receiver, opt)
|
||||
}
|
||||
|
@ -195,7 +195,7 @@ func int64ToBytes(i int64) ([]byte, error) {
|
|||
}
|
||||
|
||||
if scope.Name == "MutationType" {
|
||||
for _, opt := range(scope.Option) {
|
||||
for _, opt := range scope.Option {
|
||||
if opt.Description != "Deprecated" && !opt.Hidden { // Eww
|
||||
writeMutation(opt)
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ func int64ToBytes(i int64) ([]byte, error) {
|
|||
type %s int
|
||||
const (
|
||||
`, scope.Name)
|
||||
for _, opt := range(scope.Option) {
|
||||
for _, opt := range scope.Option {
|
||||
if !opt.Hidden {
|
||||
writeEnum(scope, opt, d)
|
||||
}
|
||||
|
|
|
@ -23,10 +23,10 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"math/rand"
|
||||
"sync"
|
||||
)
|
||||
|
@ -53,14 +53,18 @@ func windowSize(start int64) int64 {
|
|||
// can't be too small. So start small and scale up. We don't want this to
|
||||
// ever get *too* big because we have to store about window_size/2 recent
|
||||
// items.
|
||||
if start < 255 { return 64 }
|
||||
if start < 65535 { return 1024 }
|
||||
if start < 255 {
|
||||
return 64
|
||||
}
|
||||
if start < 65535 {
|
||||
return 1024
|
||||
}
|
||||
return 8192
|
||||
}
|
||||
|
||||
func (hca highContentionAllocator) allocate(tr fdb.Transaction, s subspace.Subspace) (subspace.Subspace, error) {
|
||||
for {
|
||||
rr := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit:1, Reverse:true})
|
||||
rr := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit: 1, Reverse: true})
|
||||
kvs := rr.GetSliceOrPanic()
|
||||
|
||||
var start int64
|
||||
|
@ -106,7 +110,7 @@ func (hca highContentionAllocator) allocate(tr fdb.Transaction, s subspace.Subsp
|
|||
}
|
||||
|
||||
window = windowSize(start)
|
||||
if count * 2 < window {
|
||||
if count*2 < window {
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -124,7 +128,7 @@ func (hca highContentionAllocator) allocate(tr fdb.Transaction, s subspace.Subsp
|
|||
|
||||
allocatorMutex.Lock()
|
||||
|
||||
latestCounter := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit:1, Reverse:true})
|
||||
latestCounter := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit: 1, Reverse: true})
|
||||
candidateValue := tr.Get(key)
|
||||
tr.Options().SetNextWriteNoWriteConflictRange()
|
||||
tr.Set(key, []byte(""))
|
||||
|
|
|
@ -40,9 +40,9 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"errors"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -23,23 +23,23 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type directoryLayer struct {
|
||||
nodeSS subspace.Subspace
|
||||
nodeSS subspace.Subspace
|
||||
contentSS subspace.Subspace
|
||||
|
||||
allowManualPrefixes bool
|
||||
|
||||
allocator highContentionAllocator
|
||||
rootNode subspace.Subspace
|
||||
rootNode subspace.Subspace
|
||||
|
||||
path []string
|
||||
}
|
||||
|
@ -130,13 +130,17 @@ func (dl directoryLayer) createOrOpen(rtr fdb.ReadTransaction, tr *fdb.Transacti
|
|||
prefix = newss.Bytes()
|
||||
|
||||
pf, e := dl.isPrefixFree(rtr.Snapshot(), prefix)
|
||||
if e != nil { return nil, e }
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
if !pf {
|
||||
return nil, errors.New("the directory layer has manually allocated prefixes that conflict with the automatic prefix allocator")
|
||||
}
|
||||
} else {
|
||||
pf, e := dl.isPrefixFree(rtr, prefix)
|
||||
if e != nil { return nil, e }
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
if !pf {
|
||||
return nil, errors.New("the given prefix is already in use")
|
||||
}
|
||||
|
@ -171,7 +175,7 @@ func (dl directoryLayer) createOrOpen(rtr fdb.ReadTransaction, tr *fdb.Transacti
|
|||
}
|
||||
|
||||
func (dl directoryLayer) CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
return dl.createOrOpen(tr, &tr, path, layer, nil, true, true)
|
||||
})
|
||||
if e != nil {
|
||||
|
@ -181,7 +185,7 @@ func (dl directoryLayer) CreateOrOpen(t fdb.Transactor, path []string, layer []b
|
|||
}
|
||||
|
||||
func (dl directoryLayer) Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
return dl.createOrOpen(tr, &tr, path, layer, nil, true, false)
|
||||
})
|
||||
if e != nil {
|
||||
|
@ -194,7 +198,7 @@ func (dl directoryLayer) CreatePrefix(t fdb.Transactor, path []string, layer []b
|
|||
if prefix == nil {
|
||||
prefix = []byte{}
|
||||
}
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
return dl.createOrOpen(tr, &tr, path, layer, prefix, true, false)
|
||||
})
|
||||
if e != nil {
|
||||
|
@ -204,7 +208,7 @@ func (dl directoryLayer) CreatePrefix(t fdb.Transactor, path []string, layer []b
|
|||
}
|
||||
|
||||
func (dl directoryLayer) Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error) {
|
||||
r, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
r, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
return dl.createOrOpen(rtr, nil, path, layer, nil, false, true)
|
||||
})
|
||||
if e != nil {
|
||||
|
@ -214,7 +218,7 @@ func (dl directoryLayer) Open(rt fdb.ReadTransactor, path []string, layer []byte
|
|||
}
|
||||
|
||||
func (dl directoryLayer) Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
|
||||
r, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
r, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
if e := dl.checkVersion(rtr, nil); e != nil {
|
||||
return false, e
|
||||
}
|
||||
|
@ -241,7 +245,7 @@ func (dl directoryLayer) Exists(rt fdb.ReadTransactor, path []string) (bool, err
|
|||
}
|
||||
|
||||
func (dl directoryLayer) List(rt fdb.ReadTransactor, path []string) ([]string, error) {
|
||||
r, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
r, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
if e := dl.checkVersion(rtr, nil); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
@ -272,7 +276,7 @@ func (dl directoryLayer) MoveTo(t fdb.Transactor, newAbsolutePath []string) (Dir
|
|||
}
|
||||
|
||||
func (dl directoryLayer) Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error) {
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
if e := dl.checkVersion(tr, &tr); e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
@ -330,7 +334,7 @@ func (dl directoryLayer) Move(t fdb.Transactor, oldPath []string, newPath []stri
|
|||
}
|
||||
|
||||
func (dl directoryLayer) Remove(t fdb.Transactor, path []string) (bool, error) {
|
||||
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
if e := dl.checkVersion(tr, &tr); e != nil {
|
||||
return false, e
|
||||
}
|
||||
|
@ -375,9 +379,13 @@ func (dl directoryLayer) removeRecursive(tr fdb.Transaction, node subspace.Subsp
|
|||
}
|
||||
|
||||
p, e := dl.nodeSS.Unpack(node)
|
||||
if e != nil { return e }
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
kr, e := fdb.PrefixRange(p[0].([]byte))
|
||||
if e != nil { return e }
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
|
||||
tr.ClearRange(kr)
|
||||
tr.ClearRange(node)
|
||||
|
@ -445,7 +453,7 @@ func (dl directoryLayer) nodeContainingKey(rtr fdb.ReadTransaction, key []byte)
|
|||
bk, _ := dl.nodeSS.FDBRangeKeys()
|
||||
kr := fdb.KeyRange{bk, fdb.Key(append(dl.nodeSS.Pack(tuple.Tuple{key}), 0x00))}
|
||||
|
||||
kvs := rtr.GetRange(kr, fdb.RangeOptions{Reverse:true, Limit:1}).GetSliceOrPanic()
|
||||
kvs := rtr.GetRange(kr, fdb.RangeOptions{Reverse: true, Limit: 1}).GetSliceOrPanic()
|
||||
if len(kvs) == 1 {
|
||||
pp, e := dl.nodeSS.Unpack(kvs[0].Key)
|
||||
if e != nil {
|
||||
|
@ -540,7 +548,7 @@ func (dl directoryLayer) contentsOfNode(node subspace.Subspace, path []string, l
|
|||
}
|
||||
prefix := p[0]
|
||||
|
||||
newPath := make([]string, len(dl.path) + len(path))
|
||||
newPath := make([]string, len(dl.path)+len(path))
|
||||
copy(newPath, dl.path)
|
||||
copy(newPath[len(dl.path):], path)
|
||||
|
||||
|
@ -548,7 +556,7 @@ func (dl directoryLayer) contentsOfNode(node subspace.Subspace, path []string, l
|
|||
ss := subspace.FromBytes(pb)
|
||||
|
||||
if bytes.Compare(layer, []byte("partition")) == 0 {
|
||||
nssb := make([]byte, len(pb) + 1)
|
||||
nssb := make([]byte, len(pb)+1)
|
||||
copy(nssb, pb)
|
||||
nssb[len(pb)] = 0xFE
|
||||
ndl := NewDirectoryLayer(subspace.FromBytes(nssb), ss, false).(directoryLayer)
|
||||
|
@ -560,7 +568,9 @@ func (dl directoryLayer) contentsOfNode(node subspace.Subspace, path []string, l
|
|||
}
|
||||
|
||||
func (dl directoryLayer) nodeWithPrefix(prefix []byte) subspace.Subspace {
|
||||
if prefix == nil { return nil }
|
||||
if prefix == nil {
|
||||
return nil
|
||||
}
|
||||
return dl.nodeSS.Sub(prefix)
|
||||
}
|
||||
|
||||
|
@ -576,9 +586,9 @@ func (dl directoryLayer) find(rtr fdb.ReadTransaction, path []string) *node {
|
|||
}
|
||||
|
||||
func (dl directoryLayer) partitionSubpath(lpath, rpath []string) []string {
|
||||
r := make([]string, len(lpath) - len(dl.path) + len(rpath))
|
||||
r := make([]string, len(lpath)-len(dl.path)+len(rpath))
|
||||
copy(r, lpath[len(dl.path):])
|
||||
copy(r[len(lpath) - len(dl.path):], rpath)
|
||||
copy(r[len(lpath)-len(dl.path):], rpath)
|
||||
return r
|
||||
}
|
||||
|
||||
|
|
|
@ -38,8 +38,8 @@ type DirectorySubspace interface {
|
|||
|
||||
type directorySubspace struct {
|
||||
subspace.Subspace
|
||||
dl directoryLayer
|
||||
path []string
|
||||
dl directoryLayer
|
||||
path []string
|
||||
layer []byte
|
||||
}
|
||||
|
||||
|
|
|
@ -23,16 +23,16 @@
|
|||
package directory
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"bytes"
|
||||
)
|
||||
|
||||
type node struct {
|
||||
subspace subspace.Subspace
|
||||
path []string
|
||||
subspace subspace.Subspace
|
||||
path []string
|
||||
targetPath []string
|
||||
_layer fdb.FutureByteSlice
|
||||
_layer fdb.FutureByteSlice
|
||||
}
|
||||
|
||||
func (n *node) exists() bool {
|
||||
|
|
|
@ -53,7 +53,7 @@ func (e Error) Error() string {
|
|||
var (
|
||||
errNetworkNotSetup = Error{2008}
|
||||
|
||||
errAPIVersionUnset = Error{2200}
|
||||
errAPIVersionAlreadySet = Error{2201}
|
||||
errAPIVersionUnset = Error{2200}
|
||||
errAPIVersionAlreadySet = Error{2201}
|
||||
errAPIVersionNotSupported = Error{2203}
|
||||
)
|
||||
|
|
|
@ -30,11 +30,11 @@ package fdb
|
|||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime"
|
||||
"sync"
|
||||
"unsafe"
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
/* Would put this in futures.go but for the documented issue with
|
||||
|
@ -53,7 +53,7 @@ type Transactor interface {
|
|||
// Transact executes the caller-provided function, providing it with a
|
||||
// Transaction (itself a Transactor, allowing composition of transactional
|
||||
// functions).
|
||||
Transact(func (Transaction) (interface{}, error)) (interface{}, error)
|
||||
Transact(func(Transaction) (interface{}, error)) (interface{}, error)
|
||||
|
||||
// All Transactors are also ReadTransactors, allowing them to be used with
|
||||
// read-only transactional functions.
|
||||
|
@ -68,7 +68,7 @@ type ReadTransactor interface {
|
|||
// ReadTransact executes the caller-provided function, providing it with a
|
||||
// ReadTransaction (itself a ReadTransactor, allowing composition of
|
||||
// read-only transactional functions).
|
||||
ReadTransact(func (ReadTransaction) (interface{}, error)) (interface{}, error)
|
||||
ReadTransact(func(ReadTransaction) (interface{}, error)) (interface{}, error)
|
||||
}
|
||||
|
||||
func setOpt(setter func(*C.uint8_t, C.int) C.fdb_error_t, param []byte) error {
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
package fdb_test
|
||||
|
||||
import (
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -52,7 +52,7 @@ func ExampleVersionstamp(t *testing.T) {
|
|||
fdb.MustAPIVersion(400)
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
setVs := func(t fdb.Transactor, key fdb.Key ) (fdb.FutureKey, error) {
|
||||
setVs := func(t fdb.Transactor, key fdb.Key) (fdb.FutureKey, error) {
|
||||
fmt.Printf("setOne called with: %T\n", t)
|
||||
ret, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.SetVersionstampedValue(key, []byte("blahblahbl"))
|
||||
|
@ -100,7 +100,7 @@ func ExampleTransactor() {
|
|||
setMany := func(t fdb.Transactor, value []byte, keys ...fdb.Key) error {
|
||||
fmt.Printf("setMany called with: %T\n", t)
|
||||
_, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
for _, key := range(keys) {
|
||||
for _, key := range keys {
|
||||
setOne(tr, key, value)
|
||||
}
|
||||
return nil, nil
|
||||
|
|
|
@ -41,9 +41,9 @@ package fdb
|
|||
import "C"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
"sync"
|
||||
"runtime"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// A Future represents a value (or error) to be available at some later
|
||||
|
@ -253,7 +253,7 @@ type futureKeyValueArray struct {
|
|||
}
|
||||
|
||||
func stringRefToSlice(ptr unsafe.Pointer) []byte {
|
||||
size := *((*C.int)(unsafe.Pointer(uintptr(ptr)+8)))
|
||||
size := *((*C.int)(unsafe.Pointer(uintptr(ptr) + 8)))
|
||||
|
||||
if size == 0 {
|
||||
return []byte{}
|
||||
|
@ -278,13 +278,13 @@ func (f futureKeyValueArray) Get() ([]KeyValue, bool, error) {
|
|||
ret := make([]KeyValue, int(count))
|
||||
|
||||
for i := 0; i < int(count); i++ {
|
||||
kvptr := unsafe.Pointer(uintptr(unsafe.Pointer(kvs)) + uintptr(i * 24))
|
||||
kvptr := unsafe.Pointer(uintptr(unsafe.Pointer(kvs)) + uintptr(i*24))
|
||||
|
||||
ret[i].Key = stringRefToSlice(kvptr)
|
||||
ret[i].Value = stringRefToSlice(unsafe.Pointer(uintptr(kvptr) + 12))
|
||||
}
|
||||
|
||||
return ret, (more != 0), nil
|
||||
return ret, (more != 0), nil
|
||||
}
|
||||
|
||||
// FutureInt64 represents the asynchronous result of a function that returns a
|
||||
|
@ -361,7 +361,7 @@ func (f futureStringSlice) Get() ([]string, error) {
|
|||
ret := make([]string, int(count))
|
||||
|
||||
for i := 0; i < int(count); i++ {
|
||||
ret[i] = C.GoString((*C.char)(*(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(strings))+uintptr(i*8)))))
|
||||
ret[i] = C.GoString((*C.char)(*(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(strings)) + uintptr(i*8)))))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
|
|
|
@ -36,9 +36,9 @@ type Selectable interface {
|
|||
// below. For details of how KeySelectors are specified and resolved, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#key-selectors.
|
||||
type KeySelector struct {
|
||||
Key KeyConvertible
|
||||
Key KeyConvertible
|
||||
OrEqual bool
|
||||
Offset int
|
||||
Offset int
|
||||
}
|
||||
|
||||
func (ks KeySelector) FDBKeySelector() KeySelector {
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
|
||||
// KeyValue represents a single key-value pair in the database.
|
||||
type KeyValue struct {
|
||||
Key Key
|
||||
Key Key
|
||||
Value []byte
|
||||
}
|
||||
|
||||
|
@ -121,11 +121,11 @@ func (sr SelectorRange) FDBRangeKeySelectors() (Selectable, Selectable) {
|
|||
// A RangeResult should not be returned from a transactional function passed to
|
||||
// the Transact method of a Transactor.
|
||||
type RangeResult struct {
|
||||
t *transaction
|
||||
sr SelectorRange
|
||||
options RangeOptions
|
||||
t *transaction
|
||||
sr SelectorRange
|
||||
options RangeOptions
|
||||
snapshot bool
|
||||
f *futureKeyValueArray
|
||||
f *futureKeyValueArray
|
||||
}
|
||||
|
||||
// GetSliceWithError returns a slice of KeyValue objects satisfying the range
|
||||
|
@ -173,12 +173,12 @@ func (rr RangeResult) GetSliceOrPanic() []KeyValue {
|
|||
// range specified in the read that returned this RangeResult.
|
||||
func (rr RangeResult) Iterator() *RangeIterator {
|
||||
return &RangeIterator{
|
||||
t: rr.t,
|
||||
f: rr.f,
|
||||
sr: rr.sr,
|
||||
options: rr.options,
|
||||
t: rr.t,
|
||||
f: rr.f,
|
||||
sr: rr.sr,
|
||||
options: rr.options,
|
||||
iteration: 1,
|
||||
snapshot: rr.snapshot,
|
||||
snapshot: rr.snapshot,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -193,17 +193,17 @@ func (rr RangeResult) Iterator() *RangeIterator {
|
|||
// RangeResult and used concurrently. RangeIterator should not be returned from
|
||||
// a transactional function passed to the Transact method of a Transactor.
|
||||
type RangeIterator struct {
|
||||
t *transaction
|
||||
f *futureKeyValueArray
|
||||
sr SelectorRange
|
||||
options RangeOptions
|
||||
t *transaction
|
||||
f *futureKeyValueArray
|
||||
sr SelectorRange
|
||||
options RangeOptions
|
||||
iteration int
|
||||
done bool
|
||||
more bool
|
||||
kvs []KeyValue
|
||||
index int
|
||||
err error
|
||||
snapshot bool
|
||||
done bool
|
||||
more bool
|
||||
kvs []KeyValue
|
||||
index int
|
||||
err error
|
||||
snapshot bool
|
||||
}
|
||||
|
||||
// Advance attempts to advance the iterator to the next key-value pair. Advance
|
||||
|
|
|
@ -46,7 +46,7 @@ type Snapshot struct {
|
|||
//
|
||||
// See the ReadTransactor interface for an example of using ReadTransact with
|
||||
// Transaction, Snapshot and Database objects.
|
||||
func (s Snapshot) ReadTransact(f func (ReadTransaction) (interface{}, error)) (r interface{}, e error) {
|
||||
func (s Snapshot) ReadTransact(f func(ReadTransaction) (interface{}, error)) (r interface{}, e error) {
|
||||
defer panicToError(&e)
|
||||
|
||||
r, e = f(s)
|
||||
|
|
|
@ -33,10 +33,10 @@
|
|||
package subspace
|
||||
|
||||
import (
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"bytes"
|
||||
"errors"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
)
|
||||
|
||||
// Subspace represents a well-defined region of keyspace in a FoundationDB
|
||||
|
@ -134,7 +134,7 @@ func (s subspace) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
|
|||
}
|
||||
|
||||
func concat(a []byte, b ...byte) []byte {
|
||||
r := make([]byte, len(a) + len(b))
|
||||
r := make([]byte, len(a)+len(b))
|
||||
copy(r, a)
|
||||
copy(r[len(a):], b)
|
||||
return r
|
||||
|
|
|
@ -70,7 +70,7 @@ type Transaction struct {
|
|||
|
||||
type transaction struct {
|
||||
ptr *C.FDBTransaction
|
||||
db Database
|
||||
db Database
|
||||
}
|
||||
|
||||
// TransactionOptions is a handle with which to set options that affect a
|
||||
|
@ -110,7 +110,7 @@ func (t Transaction) GetDatabase() Database {
|
|||
//
|
||||
// See the Transactor interface for an example of using Transact with
|
||||
// Transaction and Database objects.
|
||||
func (t Transaction) Transact(f func (Transaction) (interface{}, error)) (r interface{}, e error) {
|
||||
func (t Transaction) Transact(f func(Transaction) (interface{}, error)) (r interface{}, e error) {
|
||||
defer panicToError(&e)
|
||||
|
||||
r, e = f(t)
|
||||
|
@ -260,11 +260,11 @@ func (t *transaction) getRange(r Range, options RangeOptions, snapshot bool) Ran
|
|||
f := t.doGetRange(r, options, snapshot, 1)
|
||||
begin, end := r.FDBRangeKeySelectors()
|
||||
return RangeResult{
|
||||
t: t,
|
||||
sr: SelectorRange{begin, end},
|
||||
options: options,
|
||||
t: t,
|
||||
sr: SelectorRange{begin, end},
|
||||
options: options,
|
||||
snapshot: snapshot,
|
||||
f: &f,
|
||||
f: &f,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -402,7 +402,7 @@ func (t Transaction) AddReadConflictRange(er ExactRange) error {
|
|||
}
|
||||
|
||||
func copyAndAppend(orig []byte, b byte) []byte {
|
||||
ret := make([]byte, len(orig) + 1)
|
||||
ret := make([]byte, len(orig)+1)
|
||||
copy(ret, orig)
|
||||
ret[len(orig)] = b
|
||||
return ret
|
||||
|
|
|
@ -35,9 +35,9 @@
|
|||
package tuple
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"encoding/binary"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
)
|
||||
|
||||
|
@ -68,33 +68,33 @@ type UUID [16]byte
|
|||
|
||||
// Type codes: These prefix the different elements in a packed Tuple
|
||||
// to indicate what type they are.
|
||||
const nilCode = 0x00
|
||||
const bytesCode = 0x01
|
||||
const stringCode = 0x02
|
||||
const nestedCode = 0x05
|
||||
const nilCode = 0x00
|
||||
const bytesCode = 0x01
|
||||
const stringCode = 0x02
|
||||
const nestedCode = 0x05
|
||||
const intZeroCode = 0x14
|
||||
const posIntEnd = 0x1c
|
||||
const posIntEnd = 0x1c
|
||||
const negIntStart = 0x0c
|
||||
const floatCode = 0x20
|
||||
const doubleCode = 0x21
|
||||
const falseCode = 0x26
|
||||
const trueCode = 0x27
|
||||
const uuidCode = 0x30
|
||||
const floatCode = 0x20
|
||||
const doubleCode = 0x21
|
||||
const falseCode = 0x26
|
||||
const trueCode = 0x27
|
||||
const uuidCode = 0x30
|
||||
|
||||
var sizeLimits = []uint64{
|
||||
1 << (0 * 8) - 1,
|
||||
1 << (1 * 8) - 1,
|
||||
1 << (2 * 8) - 1,
|
||||
1 << (3 * 8) - 1,
|
||||
1 << (4 * 8) - 1,
|
||||
1 << (5 * 8) - 1,
|
||||
1 << (6 * 8) - 1,
|
||||
1 << (7 * 8) - 1,
|
||||
1 << (8 * 8) - 1,
|
||||
1<<(0*8) - 1,
|
||||
1<<(1*8) - 1,
|
||||
1<<(2*8) - 1,
|
||||
1<<(3*8) - 1,
|
||||
1<<(4*8) - 1,
|
||||
1<<(5*8) - 1,
|
||||
1<<(6*8) - 1,
|
||||
1<<(7*8) - 1,
|
||||
1<<(8*8) - 1,
|
||||
}
|
||||
|
||||
func adjustFloatBytes(b []byte, encode bool) {
|
||||
if (encode && b[0] & 0x80 != 0x00) || (!encode && b[0] & 0x80 == 0x00) {
|
||||
if (encode && b[0]&0x80 != 0x00) || (!encode && b[0]&0x80 == 0x00) {
|
||||
// Negative numbers: flip all of the bytes.
|
||||
for i := 0; i < len(b); i++ {
|
||||
b[i] = b[i] ^ 0xff
|
||||
|
@ -131,11 +131,11 @@ func encodeInt(buf *bytes.Buffer, i int64) {
|
|||
switch {
|
||||
case i > 0:
|
||||
n = bisectLeft(uint64(i))
|
||||
buf.WriteByte(byte(intZeroCode+n))
|
||||
buf.WriteByte(byte(intZeroCode + n))
|
||||
binary.Write(&ibuf, binary.BigEndian, i)
|
||||
case i < 0:
|
||||
n = bisectLeft(uint64(-i))
|
||||
buf.WriteByte(byte(0x14-n))
|
||||
buf.WriteByte(byte(0x14 - n))
|
||||
binary.Write(&ibuf, binary.BigEndian, int64(sizeLimits[n])+i)
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ func encodeTuple(buf *bytes.Buffer, t Tuple, nested bool) {
|
|||
buf.WriteByte(nestedCode)
|
||||
}
|
||||
|
||||
for i, e := range(t) {
|
||||
for i, e := range t {
|
||||
switch e := e.(type) {
|
||||
case Tuple:
|
||||
encodeTuple(buf, e, true)
|
||||
|
@ -232,7 +232,7 @@ func findTerminator(b []byte) int {
|
|||
for {
|
||||
idx := bytes.IndexByte(bp, 0x00)
|
||||
length += idx
|
||||
if idx + 1 == len(bp) || bp[idx+1] != 0xFF {
|
||||
if idx+1 == len(bp) || bp[idx+1] != 0xFF {
|
||||
break
|
||||
}
|
||||
length += 2
|
||||
|
@ -276,7 +276,7 @@ func decodeInt(b []byte) (int64, int) {
|
|||
ret -= int64(sizeLimits[n])
|
||||
}
|
||||
|
||||
return ret, n+1
|
||||
return ret, n + 1
|
||||
}
|
||||
|
||||
func decodeFloat(b []byte) (float32, int) {
|
||||
|
@ -317,11 +317,11 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
|
|||
if !nested {
|
||||
el = nil
|
||||
off = 1
|
||||
} else if i + 1 < len(b) && b[i+1] == 0xff {
|
||||
} else if i+1 < len(b) && b[i+1] == 0xff {
|
||||
el = nil
|
||||
off = 2
|
||||
} else {
|
||||
return t, i+1, nil
|
||||
return t, i + 1, nil
|
||||
}
|
||||
case b[i] == bytesCode:
|
||||
el, off = decodeBytes(b[i:])
|
||||
|
@ -330,12 +330,12 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
|
|||
case negIntStart <= b[i] && b[i] <= posIntEnd:
|
||||
el, off = decodeInt(b[i:])
|
||||
case b[i] == floatCode:
|
||||
if i + 5 > len(b) {
|
||||
if i+5 > len(b) {
|
||||
return nil, i, fmt.Errorf("insufficient bytes to decode float starting at position %d of byte array for tuple", i)
|
||||
}
|
||||
el, off = decodeFloat(b[i:])
|
||||
case b[i] == doubleCode:
|
||||
if i + 9 > len(b) {
|
||||
if i+9 > len(b) {
|
||||
return nil, i, fmt.Errorf("insufficient bytes to decode double starting at position %d of byte array for tuple", i)
|
||||
}
|
||||
el, off = decodeDouble(b[i:])
|
||||
|
@ -346,7 +346,7 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
|
|||
el = false
|
||||
off = 1
|
||||
case b[i] == uuidCode:
|
||||
if i + 17 > len(b) {
|
||||
if i+17 > len(b) {
|
||||
return nil, i, fmt.Errorf("insufficient bytes to decode UUID starting at position %d of byte array for tuple", i)
|
||||
}
|
||||
el, off = decodeUUID(b[i:])
|
||||
|
@ -401,7 +401,7 @@ func (t Tuple) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
|
|||
}
|
||||
|
||||
func concat(a []byte, b ...byte) []byte {
|
||||
r := make([]byte, len(a) + len(b))
|
||||
r := make([]byte, len(a)+len(b))
|
||||
copy(r, a)
|
||||
copy(r[len(a):], b)
|
||||
return r
|
||||
|
|
|
@ -23,18 +23,23 @@
|
|||
"""Documentation for this API can be found at
|
||||
https://foundationdb.org/documentation/api-python.html"""
|
||||
|
||||
|
||||
def open(*args, **kwargs):
|
||||
raise RuntimeError('You must call api_version() before using any fdb methods')
|
||||
|
||||
|
||||
init = open
|
||||
|
||||
|
||||
def transactional(*args, **kwargs):
|
||||
raise RuntimeError('You must call api_version() before using fdb.transactional')
|
||||
|
||||
|
||||
def _add_symbols(module, symbols):
|
||||
for symbol in symbols:
|
||||
globals()[symbol] = getattr(module, symbol)
|
||||
|
||||
|
||||
def api_version(ver):
|
||||
header_version = 510
|
||||
|
||||
|
@ -52,10 +57,12 @@ def api_version(ver):
|
|||
import fdb.impl
|
||||
|
||||
err = fdb.impl._capi.fdb_select_api_version_impl(ver, header_version)
|
||||
if err == 2203: # api_version_not_supported, but that's not helpful to the user
|
||||
if err == 2203: # api_version_not_supported, but that's not helpful to the user
|
||||
max_supported_ver = fdb.impl._capi.fdb_get_max_api_version()
|
||||
if header_version > max_supported_ver:
|
||||
raise RuntimeError("This version of the FoundationDB Python binding is not supported by the installed FoundationDB C library. The binding requires a library that supports API version %d, but the installed library supports a maximum version of %d." % (header_version, max_supported_ver))
|
||||
raise RuntimeError("This version of the FoundationDB Python binding is not supported by the installed "
|
||||
"FoundationDB C library. The binding requires a library that supports API version "
|
||||
"%d, but the installed library supports a maximum version of %d." % (header_version, max_supported_ver))
|
||||
|
||||
else:
|
||||
raise RuntimeError("API version %d is not supported by the installed FoundationDB C library." % ver)
|
||||
|
@ -78,7 +85,7 @@ def api_version(ver):
|
|||
'transactional',
|
||||
'options',
|
||||
'StreamingMode',
|
||||
)
|
||||
)
|
||||
|
||||
_add_symbols(fdb.impl, list)
|
||||
|
||||
|
@ -97,7 +104,8 @@ def api_version(ver):
|
|||
if issubclass(o, fdb.impl.Future):
|
||||
if hasattr(o, "wait"):
|
||||
o.get = o.wait
|
||||
except TypeError: pass
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
# FDBRange used to be called FDBRangeIter and was an iterator,
|
||||
# but it's now a container. In v13 we have to make it act like
|
||||
|
@ -117,4 +125,3 @@ def api_version(ver):
|
|||
import fdb.subspace_impl
|
||||
subspace_symbols = ('Subspace',)
|
||||
_add_symbols(fdb.subspace_impl, subspace_symbols)
|
||||
|
||||
|
|
|
@ -28,17 +28,19 @@ from fdb import impl as _impl
|
|||
from fdb import six
|
||||
import fdb.tuple
|
||||
from .subspace_impl import Subspace
|
||||
|
||||
|
||||
|
||||
class AllocatorTransactionState:
|
||||
def __init__(self):
|
||||
self.lock = threading.Lock()
|
||||
|
||||
|
||||
class HighContentionAllocator (object):
|
||||
|
||||
def __init__(self, subspace):
|
||||
self.counters = subspace[0]
|
||||
self.recent = subspace[1]
|
||||
self.lock = threading.Lock();
|
||||
self.lock = threading.Lock()
|
||||
|
||||
@_impl.transactional
|
||||
def allocate(self, tr):
|
||||
|
@ -52,20 +54,21 @@ class HighContentionAllocator (object):
|
|||
if not hasattr(tr, "__fdb_directory_layer_hca_state__"):
|
||||
with self.lock:
|
||||
if not hasattr(tr, "__fdb_directory_layer_hca_state__"):
|
||||
tr.__fdb_directory_layer_hca_state__ = AllocatorTransactionState();
|
||||
tr.__fdb_directory_layer_hca_state__ = AllocatorTransactionState()
|
||||
|
||||
tr_state = tr.__fdb_directory_layer_hca_state__
|
||||
|
||||
while True:
|
||||
[start] = [self.counters.unpack(k)[0] for k,_ in tr.snapshot.get_range(self.counters.range().start, self.counters.range().stop, limit=1, reverse=True)] or [0]
|
||||
[start] = [self.counters.unpack(k)[0] for k, _ in tr.snapshot.get_range(
|
||||
self.counters.range().start, self.counters.range().stop, limit=1, reverse=True)] or [0]
|
||||
|
||||
window_advanced = False
|
||||
while True:
|
||||
with tr_state.lock:
|
||||
if window_advanced:
|
||||
del tr[self.counters : self.counters[start]]
|
||||
del tr[self.counters: self.counters[start]]
|
||||
tr.options.set_next_write_no_write_conflict_range()
|
||||
del tr[self.recent : self.recent[start]]
|
||||
del tr[self.recent: self.recent[start]]
|
||||
|
||||
# Increment the allocation count for the current window
|
||||
tr.add(self.counters[start], struct.pack("<q", 1))
|
||||
|
@ -96,10 +99,10 @@ class HighContentionAllocator (object):
|
|||
tr.options.set_next_write_no_write_conflict_range()
|
||||
tr[self.recent[candidate]] = b''
|
||||
|
||||
latest_counter = [self.counters.unpack(k)[0] for k,_ in latest_counter]
|
||||
latest_counter = [self.counters.unpack(k)[0] for k, _ in latest_counter]
|
||||
if len(latest_counter) > 0 and latest_counter[0] > start:
|
||||
break
|
||||
|
||||
|
||||
if candidate_value == None:
|
||||
tr.add_write_conflict_key(self.recent[candidate])
|
||||
return fdb.tuple.pack((candidate,))
|
||||
|
@ -108,12 +111,15 @@ class HighContentionAllocator (object):
|
|||
# Larger window sizes are better for high contention, smaller sizes for
|
||||
# keeping the keys small. But if there are many allocations, the keys
|
||||
# can't be too small. So start small and scale up. We don't want this
|
||||
# to ever get *too* big because we have to store about window_size/2
|
||||
# to ever get *too* big because we have to store about window_size/2
|
||||
# recent items.
|
||||
if start < 255: return 64
|
||||
if start < 65535: return 1024
|
||||
if start < 255:
|
||||
return 64
|
||||
if start < 65535:
|
||||
return 1024
|
||||
return 8192
|
||||
|
||||
|
||||
class Directory(object):
|
||||
def __init__(self, directory_layer, path=(), layer=b''):
|
||||
self._directory_layer = directory_layer
|
||||
|
@ -177,7 +183,7 @@ class Directory(object):
|
|||
|
||||
def get_layer(self):
|
||||
return self._layer
|
||||
|
||||
|
||||
def get_path(self):
|
||||
return self._path
|
||||
|
||||
|
@ -186,7 +192,7 @@ class Directory(object):
|
|||
path = (path,)
|
||||
return path
|
||||
|
||||
def _partition_subpath(self, path, directory_layer = None):
|
||||
def _partition_subpath(self, path, directory_layer=None):
|
||||
directory_layer = directory_layer or self._directory_layer
|
||||
return self._path[len(directory_layer._path):] + path
|
||||
|
||||
|
@ -195,6 +201,7 @@ class Directory(object):
|
|||
def _get_layer_for_path(self, path):
|
||||
return self._directory_layer
|
||||
|
||||
|
||||
class DirectoryLayer(Directory):
|
||||
|
||||
def __init__(self, node_subspace=Subspace(rawPrefix=b'\xfe'), content_subspace=Subspace(), allow_manual_prefixes=False):
|
||||
|
@ -214,7 +221,7 @@ class DirectoryLayer(Directory):
|
|||
def create_or_open(self, tr, path, layer=None):
|
||||
""" Opens the directory with the given path.
|
||||
|
||||
If the directory does not exist, it is created (creating parent
|
||||
If the directory does not exist, it is created (creating parent
|
||||
directories if necessary).
|
||||
|
||||
If layer is specified, it is checked against the layer of an existing
|
||||
|
@ -241,7 +248,9 @@ class DirectoryLayer(Directory):
|
|||
if existing_node.exists():
|
||||
if existing_node.is_in_partition():
|
||||
subpath = existing_node.get_partition_subpath()
|
||||
return existing_node.get_contents(self)._directory_layer._create_or_open_internal(tr, subpath, layer, prefix, allow_create, allow_open)
|
||||
return existing_node.get_contents(self)._directory_layer._create_or_open_internal(
|
||||
tr, subpath, layer, prefix, allow_create, allow_open
|
||||
)
|
||||
|
||||
if not allow_open:
|
||||
raise ValueError("The directory already exists.")
|
||||
|
@ -282,7 +291,7 @@ class DirectoryLayer(Directory):
|
|||
layer = b''
|
||||
|
||||
tr[node[b'layer']] = layer
|
||||
|
||||
|
||||
return self._contents_of_node(node, path, layer)
|
||||
|
||||
@_impl.transactional
|
||||
|
@ -332,7 +341,7 @@ class DirectoryLayer(Directory):
|
|||
|
||||
if old_path == new_path[:len(old_path)]:
|
||||
raise ValueError("The destination directory cannot be a subdirectory of the source directory.")
|
||||
|
||||
|
||||
old_node = self._find(tr, old_path).prefetch_metadata(tr)
|
||||
new_node = self._find(tr, new_path).prefetch_metadata(tr)
|
||||
|
||||
|
@ -340,7 +349,7 @@ class DirectoryLayer(Directory):
|
|||
raise ValueError("The source directory does not exist.")
|
||||
|
||||
if old_node.is_in_partition() or new_node.is_in_partition():
|
||||
if not old_node.is_in_partition() or not new_node.is_in_partition() or old_node.path != new_node.path:
|
||||
if not old_node.is_in_partition() or not new_node.is_in_partition() or old_node.path != new_node.path:
|
||||
raise ValueError("Cannot move between partitions.")
|
||||
|
||||
return new_node.get_contents(self).move(tr, old_node.get_partition_subpath(), new_node.get_partition_subpath())
|
||||
|
@ -399,7 +408,7 @@ class DirectoryLayer(Directory):
|
|||
|
||||
@_impl.transactional
|
||||
def list(self, tr, path=()):
|
||||
"""Returns the names of the specified directory's subdirectories as a
|
||||
"""Returns the names of the specified directory's subdirectories as a
|
||||
list of strings.
|
||||
"""
|
||||
self._check_version(tr, write_access=False)
|
||||
|
@ -431,11 +440,11 @@ class DirectoryLayer(Directory):
|
|||
return True
|
||||
|
||||
########################################
|
||||
## Private methods for implementation ##
|
||||
# Private methods for implementation #
|
||||
########################################
|
||||
|
||||
SUBDIRS=0
|
||||
VERSION=(1,0,0)
|
||||
SUBDIRS = 0
|
||||
VERSION = (1, 0, 0)
|
||||
|
||||
def _check_version(self, tr, write_access=True):
|
||||
version = tr[self._root_node[b'version']]
|
||||
|
@ -473,7 +482,8 @@ class DirectoryLayer(Directory):
|
|||
return None
|
||||
|
||||
def _node_with_prefix(self, prefix):
|
||||
if prefix == None: return None
|
||||
if prefix == None:
|
||||
return None
|
||||
return self._node_subspace[prefix]
|
||||
|
||||
def _contents_of_node(self, node, path, layer=None):
|
||||
|
@ -487,7 +497,7 @@ class DirectoryLayer(Directory):
|
|||
def _find(self, tr, path):
|
||||
n = _Node(self._root_node, (), path)
|
||||
for i, name in enumerate(path):
|
||||
n = _Node(self._node_with_prefix(tr[n.subspace[self.SUBDIRS][name]]), path[:i+1], path)
|
||||
n = _Node(self._node_with_prefix(tr[n.subspace[self.SUBDIRS][name]]), path[:i + 1], path)
|
||||
if not n.exists() or n.layer(tr) == b'partition':
|
||||
return n
|
||||
return n
|
||||
|
@ -511,11 +521,13 @@ class DirectoryLayer(Directory):
|
|||
# Returns true if the given prefix does not "intersect" any currently
|
||||
# allocated prefix (including the root node). This means that it neither
|
||||
# contains any other prefix nor is contained by any other prefix.
|
||||
return prefix and not self._node_containing_key(tr, prefix) and not len(list(tr.get_range(self._node_subspace.pack((prefix,)), self._node_subspace.pack((_impl.strinc(prefix),)), limit=1)))
|
||||
return prefix and not self._node_containing_key(tr, prefix) \
|
||||
and not len(list(tr.get_range(self._node_subspace.pack((prefix,)), self._node_subspace.pack((_impl.strinc(prefix),)), limit=1)))
|
||||
|
||||
def _is_prefix_empty(self, tr, prefix):
|
||||
return len(list(tr.get_range(prefix, _impl.strinc(prefix), limit=1))) == 0
|
||||
|
||||
|
||||
def _to_unicode_path(path):
|
||||
if isinstance(path, bytes):
|
||||
path = six.text_type(path)
|
||||
|
@ -535,8 +547,10 @@ def _to_unicode_path(path):
|
|||
|
||||
raise ValueError('Invalid path: must be a unicode string or a tuple of unicode strings')
|
||||
|
||||
|
||||
directory = DirectoryLayer()
|
||||
|
||||
|
||||
class DirectorySubspace(Subspace, Directory):
|
||||
# A DirectorySubspace represents the *contents* of a directory, but it also
|
||||
# remembers the path with which it was opened and offers convenience methods
|
||||
|
@ -549,6 +563,7 @@ class DirectorySubspace(Subspace, Directory):
|
|||
def __repr__(self):
|
||||
return 'DirectorySubspace(path=' + repr(self._path) + ', prefix=' + repr(self.rawPrefix) + ')'
|
||||
|
||||
|
||||
class DirectoryPartition(DirectorySubspace):
|
||||
def __init__(self, path, prefix, parent_directory_layer):
|
||||
directory_layer = DirectoryLayer(Subspace(rawPrefix=prefix + b'\xfe'), Subspace(rawPrefix=prefix))
|
||||
|
@ -590,6 +605,7 @@ class DirectoryPartition(DirectorySubspace):
|
|||
else:
|
||||
return self._directory_layer
|
||||
|
||||
|
||||
class _Node (object):
|
||||
|
||||
def __init__(self, subspace, path, target_path):
|
||||
|
@ -623,4 +639,3 @@ class _Node (object):
|
|||
|
||||
def get_contents(self, directory_layer, tr=None):
|
||||
return directory_layer._contents_of_node(self.subspace, self.path, self.layer(tr))
|
||||
|
||||
|
|
|
@ -40,41 +40,50 @@ _open_file = open
|
|||
|
||||
import weakref
|
||||
|
||||
|
||||
class _NetworkOptions(object):
|
||||
def __init__(self, parent):
|
||||
self._parent = parent
|
||||
|
||||
|
||||
class _ErrorPredicates(object):
|
||||
def __init__(self, parent):
|
||||
self._parent = parent
|
||||
|
||||
|
||||
class _ClusterOptions(object):
|
||||
def __init__(self, cluster):
|
||||
self._parent = weakref.proxy(cluster)
|
||||
|
||||
|
||||
class _DatabaseOptions(object):
|
||||
def __init__(self, db):
|
||||
self._parent = weakref.proxy(db)
|
||||
|
||||
|
||||
class _TransactionOptions(object):
|
||||
def __init__(self, tr):
|
||||
self._parent = weakref.proxy(tr)
|
||||
|
||||
|
||||
from fdb import fdboptions as _opts
|
||||
import types
|
||||
import struct
|
||||
|
||||
|
||||
def option_wrap(code):
|
||||
def setfunc(self):
|
||||
self._parent._set_option(code, None, 0)
|
||||
return setfunc
|
||||
|
||||
|
||||
def option_wrap_string(code):
|
||||
def setfunc(self, param=None):
|
||||
param, length = optionalParamToBytes(param)
|
||||
self._parent._set_option(code, param, length)
|
||||
return setfunc
|
||||
|
||||
|
||||
def option_wrap_bytes(code):
|
||||
def setfunc(self, param=None):
|
||||
if param is None:
|
||||
|
@ -85,25 +94,29 @@ def option_wrap_bytes(code):
|
|||
raise TypeError('Value must be of type ' + bytes.__name__)
|
||||
return setfunc
|
||||
|
||||
|
||||
def option_wrap_int(code):
|
||||
def setfunc(self, param):
|
||||
self._parent._set_option(code, struct.pack("<q", param), 8)
|
||||
return setfunc
|
||||
|
||||
|
||||
def pred_wrap(code):
|
||||
def predfunc(self, error):
|
||||
return self._parent._error_predicate(code, error.code)
|
||||
return predfunc
|
||||
|
||||
|
||||
def operation_wrap(code):
|
||||
def opfunc(self, key, param):
|
||||
self._atomic_operation(code, key, param)
|
||||
return opfunc
|
||||
|
||||
|
||||
def fill_options(scope, predicates=False):
|
||||
_dict = getattr(_opts, scope)
|
||||
|
||||
for k,v in _dict.items():
|
||||
for k, v in _dict.items():
|
||||
fname = (predicates and 'is_' or 'set_') + k.lower()
|
||||
code, desc, paramType, paramDesc = v
|
||||
if predicates:
|
||||
|
@ -113,7 +126,7 @@ def fill_options(scope, predicates=False):
|
|||
f = option_wrap(code)
|
||||
elif paramType == type(''):
|
||||
f = option_wrap_string(code)
|
||||
elif paramType == type(b''): # This won't happen in Python 2 because type('') == type(b''), but it will happen in Python 3
|
||||
elif paramType == type(b''): # This won't happen in Python 2 because type('') == type(b''), but it will happen in Python 3
|
||||
f = option_wrap_bytes(code)
|
||||
elif paramType == type(0):
|
||||
f = option_wrap_int(code)
|
||||
|
@ -126,6 +139,7 @@ def fill_options(scope, predicates=False):
|
|||
klass = globals()['_' + scope + 's']
|
||||
setattr(klass, fname, f)
|
||||
|
||||
|
||||
def add_operation(fname, v):
|
||||
code, desc, paramType, paramDesc = v
|
||||
f = operation_wrap(code)
|
||||
|
@ -134,14 +148,16 @@ def add_operation(fname, v):
|
|||
setattr(globals()['Database'], fname, f)
|
||||
setattr(globals()['Transaction'], fname, f)
|
||||
|
||||
|
||||
def fill_operations():
|
||||
_dict = getattr(_opts, 'MutationType')
|
||||
|
||||
for k,v in _dict.items():
|
||||
for k, v in _dict.items():
|
||||
fname = k.lower()
|
||||
add_operation(fname, v)
|
||||
add_operation("bit_" + fname, v)
|
||||
|
||||
|
||||
for scope in ['ClusterOption', 'DatabaseOption', 'TransactionOption', 'NetworkOption']:
|
||||
fill_options(scope)
|
||||
|
||||
|
@ -150,11 +166,14 @@ fill_options('ErrorPredicate', True)
|
|||
options = _NetworkOptions(sys.modules[__name__])
|
||||
predicates = _ErrorPredicates(sys.modules[__name__])
|
||||
|
||||
|
||||
def _set_option(option, param, length):
|
||||
_capi.fdb_network_set_option(option, param, length)
|
||||
|
||||
|
||||
def _error_predicate(predicate, error_code):
|
||||
return bool( _capi.fdb_error_predicate(predicate, error_code) )
|
||||
return bool(_capi.fdb_error_predicate(predicate, error_code))
|
||||
|
||||
|
||||
def make_enum(scope):
|
||||
_dict = getattr(_opts, scope)
|
||||
|
@ -162,15 +181,17 @@ def make_enum(scope):
|
|||
x = type(scope, (), {})
|
||||
|
||||
def makeprop(value, doc):
|
||||
return property( fget=lambda o:value, doc=doc )
|
||||
return property(fget=lambda o: value, doc=doc)
|
||||
for k, v in _dict.items():
|
||||
setattr(x, k.lower(), makeprop( v[0], v[1] ))
|
||||
setattr(x, k.lower(), makeprop(v[0], v[1]))
|
||||
|
||||
globals()[scope] = x()
|
||||
|
||||
|
||||
make_enum("StreamingMode")
|
||||
make_enum("ConflictRangeType")
|
||||
|
||||
|
||||
def transactional(*tr_args, **tr_kwargs):
|
||||
"""Decorate a funcation as transactional.
|
||||
|
||||
|
@ -212,7 +233,7 @@ def transactional(*tr_args, **tr_kwargs):
|
|||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
if isinstance(args[index], TransactionRead):
|
||||
raise asyncio.Return(( yield asyncio.From(func(*args, **kwargs)) ))
|
||||
raise asyncio.Return((yield asyncio.From(func(*args, **kwargs))))
|
||||
|
||||
largs = list(args)
|
||||
tr = largs[index] = args[index].create_transaction()
|
||||
|
@ -235,9 +256,9 @@ def transactional(*tr_args, **tr_kwargs):
|
|||
tr = largs[index] = args[index].create_transaction()
|
||||
|
||||
committed = False
|
||||
#retries = 0
|
||||
#start = datetime.datetime.now()
|
||||
#last = start
|
||||
# retries = 0
|
||||
# start = datetime.datetime.now()
|
||||
# last = start
|
||||
|
||||
while not committed:
|
||||
try:
|
||||
|
@ -247,15 +268,16 @@ def transactional(*tr_args, **tr_kwargs):
|
|||
except FDBError as e:
|
||||
tr.on_error(e.code).wait()
|
||||
|
||||
#now = datetime.datetime.now()
|
||||
#td = now - last
|
||||
#elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)
|
||||
#if elapsed >= 1:
|
||||
#td = now - start
|
||||
#print ('fdb WARNING: long transaction (%gs elapsed in transactional function \'%s\' (%d retries, %s))' % (elapsed, func.__name__, retries, committed and 'committed' or 'not yet committed'))
|
||||
#last = now
|
||||
# now = datetime.datetime.now()
|
||||
# td = now - last
|
||||
# elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)
|
||||
# if elapsed >= 1:
|
||||
# td = now - start
|
||||
# print ('fdb WARNING: long transaction (%gs elapsed in transactional function \'%s\' (%d retries, %s))'
|
||||
# % (elapsed, func.__name__, retries, committed and 'committed' or 'not yet committed'))
|
||||
# last = now
|
||||
|
||||
#retries += 1
|
||||
# retries += 1
|
||||
return ret
|
||||
return wrapper
|
||||
|
||||
|
@ -269,6 +291,7 @@ def transactional(*tr_args, **tr_kwargs):
|
|||
else:
|
||||
raise Exception('Invalid use of transactional decorator.')
|
||||
|
||||
|
||||
class FDBError(Exception):
|
||||
"""This exception is raised when an FDB API call returns an
|
||||
error. The error code will be stored in the code attribute, and a
|
||||
|
@ -276,6 +299,7 @@ class FDBError(Exception):
|
|||
attribute.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, code):
|
||||
self.code = code
|
||||
self._description = None
|
||||
|
@ -335,7 +359,7 @@ class FDBRange(object):
|
|||
esel = self._esel
|
||||
limit = self._limit
|
||||
|
||||
iteration = 1 # the first read was fired off when the FDBRange was initialized
|
||||
iteration = 1 # the first read was fired off when the FDBRange was initialized
|
||||
future = self._future
|
||||
|
||||
done = False
|
||||
|
@ -408,8 +432,10 @@ class TransactionRead(_FDBBase):
|
|||
return key_or_selector
|
||||
|
||||
def get_range(self, begin, end, limit=0, reverse=False, streaming_mode=StreamingMode.iterator):
|
||||
if begin is None: begin = b''
|
||||
if end is None: end = b'\xff'
|
||||
if begin is None:
|
||||
begin = b''
|
||||
if end is None:
|
||||
end = b'\xff'
|
||||
begin = self._to_selector(begin)
|
||||
end = self._to_selector(end)
|
||||
return FDBRange(self, begin, end, limit, reverse, streaming_mode)
|
||||
|
@ -423,6 +449,7 @@ class TransactionRead(_FDBBase):
|
|||
return self.get_range(key.start, key.stop, reverse=(key.step == -1))
|
||||
return self.get(key)
|
||||
|
||||
|
||||
class Transaction(TransactionRead):
|
||||
"""A modifiable snapshot of a Database.
|
||||
|
||||
|
@ -464,8 +491,10 @@ class Transaction(TransactionRead):
|
|||
self.capi.fdb_transaction_clear(self.tpointer, key, len(key))
|
||||
|
||||
def clear_range(self, begin, end):
|
||||
if begin is None: begin = b''
|
||||
if end is None: end = b'\xff'
|
||||
if begin is None:
|
||||
begin = b''
|
||||
if end is None:
|
||||
end = b'\xff'
|
||||
if isinstance(begin, KeySelector):
|
||||
begin = self.get_key(begin)
|
||||
if isinstance(end, KeySelector):
|
||||
|
@ -534,13 +563,14 @@ class Transaction(TransactionRead):
|
|||
|
||||
def __delitem__(self, key):
|
||||
if isinstance(key, slice):
|
||||
self.clear_range( key.start, key.stop )
|
||||
self.clear_range(key.start, key.stop)
|
||||
else:
|
||||
self.clear(key)
|
||||
|
||||
|
||||
class Future(_FDBBase):
|
||||
Event = threading.Event
|
||||
_state = None #< Hack for trollius
|
||||
_state = None # < Hack for trollius
|
||||
|
||||
def __init__(self, fpointer):
|
||||
# print('Creating future 0x%x' % fpointer)
|
||||
|
@ -562,7 +592,7 @@ class Future(_FDBBase):
|
|||
raise NotImplementedError
|
||||
|
||||
def is_ready(self):
|
||||
return bool( self.capi.fdb_future_is_ready(self.fpointer) )
|
||||
return bool(self.capi.fdb_future_is_ready(self.fpointer))
|
||||
|
||||
def block_until_ready(self):
|
||||
self.capi.fdb_future_block_until_ready(self.fpointer)
|
||||
|
@ -595,35 +625,42 @@ class Future(_FDBBase):
|
|||
Returns the index in the parameter list of a ready future."""
|
||||
if not futures:
|
||||
raise ValueError("wait_for_any requires at least one future")
|
||||
d={}
|
||||
d = {}
|
||||
ev = futures[0].Event()
|
||||
for i,f in enumerate(futures):
|
||||
def cb(ignore,i=i):
|
||||
for i, f in enumerate(futures):
|
||||
def cb(ignore, i=i):
|
||||
if d.setdefault('i', i) == i:
|
||||
ev.set()
|
||||
f.on_ready( cb )
|
||||
f.on_ready(cb)
|
||||
ev.wait()
|
||||
return d['i']
|
||||
|
||||
# asyncio future protocol
|
||||
def cancelled(self):
|
||||
if not self.done(): return False
|
||||
if not self.done():
|
||||
return False
|
||||
e = self.exception()
|
||||
return getattr(e, 'code', 0) == 1101
|
||||
done = is_ready
|
||||
|
||||
def result(self):
|
||||
if not self.done(): raise Exception("Future result not available")
|
||||
if not self.done():
|
||||
raise Exception("Future result not available")
|
||||
return self.wait()
|
||||
|
||||
def exception(self):
|
||||
if not self.done(): raise Exception("Future result not available")
|
||||
if not self.done():
|
||||
raise Exception("Future result not available")
|
||||
try:
|
||||
self.wait()
|
||||
return None
|
||||
except BaseException as e:
|
||||
return e
|
||||
def add_done_callback(self,fn):
|
||||
|
||||
def add_done_callback(self, fn):
|
||||
self.on_ready(lambda f: self.call_soon_threadsafe(fn, f))
|
||||
def remove_done_callback(self,fn):
|
||||
|
||||
def remove_done_callback(self, fn):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
|
@ -649,13 +686,15 @@ class FutureKeyValueArray(Future):
|
|||
count = ctypes.c_int()
|
||||
more = ctypes.c_int()
|
||||
self.capi.fdb_future_get_keyvalue_array(self.fpointer, ctypes.byref(kvs), ctypes.byref(count), ctypes.byref(more))
|
||||
return ([KeyValue(ctypes.string_at(x.key, x.key_length), ctypes.string_at(x.value, x.value_length)) for x in kvs[0:count.value]], count.value, more.value)
|
||||
return ([KeyValue(ctypes.string_at(x.key, x.key_length), ctypes.string_at(x.value, x.value_length))
|
||||
for x in kvs[0:count.value]], count.value, more.value)
|
||||
|
||||
# Logically, we should self._release_memory() after extracting the
|
||||
# KVs but before returning, but then we would have to store
|
||||
# the KVs on the python side and in most cases we are about to
|
||||
# destroy the future anyway
|
||||
|
||||
|
||||
class FutureStringArray(Future):
|
||||
def wait(self):
|
||||
self.block_until_ready()
|
||||
|
@ -664,6 +703,7 @@ class FutureStringArray(Future):
|
|||
self.capi.fdb_future_get_string_array(self.fpointer, ctypes.byref(strings), ctypes.byref(count))
|
||||
return list(strings[0:count.value])
|
||||
|
||||
|
||||
class replaceable_property(object):
|
||||
def __get__(self, obj, cls=None):
|
||||
return self.method(obj)
|
||||
|
@ -671,6 +711,7 @@ class replaceable_property(object):
|
|||
def __init__(self, method):
|
||||
self.method = method
|
||||
|
||||
|
||||
class LazyFuture(Future):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(LazyFuture, self).__init__(*args, **kwargs)
|
||||
|
@ -692,7 +733,7 @@ class LazyFuture(Future):
|
|||
|
||||
except:
|
||||
e = sys.exc_info()
|
||||
if not (isinstance(e[1], FDBError) and e[1].code == 1102): # future_released
|
||||
if not (isinstance(e[1], FDBError) and e[1].code == 1102): # future_released
|
||||
raise
|
||||
|
||||
return self.value
|
||||
|
@ -702,6 +743,7 @@ class LazyFuture(Future):
|
|||
# http://bugs.python.org/issue12370
|
||||
_super = super
|
||||
|
||||
|
||||
class FutureString(LazyFuture):
|
||||
def __init__(self, *args):
|
||||
self._error = None
|
||||
|
@ -724,7 +766,7 @@ class FutureString(LazyFuture):
|
|||
return self.value
|
||||
|
||||
def __repr__(self):
|
||||
return self.value.__repr__()
|
||||
return self.value.__repr__()
|
||||
|
||||
def __add__(self, rhs):
|
||||
if isinstance(rhs, FutureString):
|
||||
|
@ -776,13 +818,15 @@ class FutureString(LazyFuture):
|
|||
def __int__(self):
|
||||
return int(self.value)
|
||||
|
||||
|
||||
def makewrapper(func):
|
||||
def tmpfunc(self, *args):
|
||||
return func(self.value, *args)
|
||||
return tmpfunc
|
||||
|
||||
|
||||
for i in dir(bytes):
|
||||
if not i.startswith('_') or i in ('__getitem__','__getslice__','__hash__', '__len__'):
|
||||
if not i.startswith('_') or i in ('__getitem__', '__getslice__', '__hash__', '__len__'):
|
||||
setattr(FutureString, i, makewrapper(getattr(bytes, i)))
|
||||
|
||||
|
||||
|
@ -845,10 +889,9 @@ class Database(FormerFuture):
|
|||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, slice):
|
||||
return self.get_range(key.start, key.stop, reverse=(key.step==-1))
|
||||
return self.get_range(key.start, key.stop, reverse=(key.step == -1))
|
||||
return Database.__database_getitem(self, key)
|
||||
|
||||
|
||||
def get_key(self, key_selector):
|
||||
return Database.__database_get_key(self, key_selector)
|
||||
|
||||
|
@ -955,39 +998,40 @@ class Database(FormerFuture):
|
|||
def __database_atomic_operation(tr, opcode, key, param):
|
||||
tr._atomic_operation(opcode, key, param)
|
||||
|
||||
### Asynchronous transactions
|
||||
# Asynchronous transactions
|
||||
@staticmethod
|
||||
def declare_asynchronous_transactions():
|
||||
Return = asyncio.Return
|
||||
From = asyncio.From
|
||||
coroutine = asyncio.coroutine
|
||||
|
||||
class Database:
|
||||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_getitem(tr, key):
|
||||
#raise Return(( yield From( tr[key] ) ))
|
||||
raise Return( tr[key] )
|
||||
# raise Return(( yield From( tr[key] ) ))
|
||||
raise Return(tr[key])
|
||||
yield None
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_get_key(tr, key_selector):
|
||||
raise Return( tr.get_key(key_selector) )
|
||||
raise Return(tr.get_key(key_selector))
|
||||
yield None
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_get_range(tr, begin, end, limit, reverse, streaming_mode):
|
||||
raise Return(( yield From( tr.get_range(begin, end, limit, reverse, streaming_mode).to_list() ) ))
|
||||
raise Return((yield From(tr.get_range(begin, end, limit, reverse, streaming_mode).to_list())))
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
@coroutine
|
||||
def __database_get_range_startswith(tr, prefix, *args, **kwargs):
|
||||
raise Return(( yield From( tr.get_range_startswith(prefix, *args, **kwargs).to_list() )))
|
||||
raise Return((yield From(tr.get_range_startswith(prefix, *args, **kwargs).to_list())))
|
||||
|
||||
@staticmethod
|
||||
@transactional
|
||||
|
@ -1010,7 +1054,7 @@ class Database(FormerFuture):
|
|||
@coroutine
|
||||
def __database_get_and_watch(tr, key):
|
||||
v = tr.get(key)
|
||||
raise Return( v, tr.watch(key) )
|
||||
raise Return(v, tr.watch(key))
|
||||
yield None
|
||||
|
||||
@staticmethod
|
||||
|
@ -1018,7 +1062,7 @@ class Database(FormerFuture):
|
|||
@coroutine
|
||||
def __database_set_and_watch(tr, key, value):
|
||||
tr.set(key, value)
|
||||
raise Return( tr.watch(key) )
|
||||
raise Return(tr.watch(key))
|
||||
yield None
|
||||
|
||||
@staticmethod
|
||||
|
@ -1026,7 +1070,7 @@ class Database(FormerFuture):
|
|||
@coroutine
|
||||
def __database_clear_and_watch(tr, key):
|
||||
del tr[key]
|
||||
raise Return( tr.watch(key) )
|
||||
raise Return(tr.watch(key))
|
||||
yield None
|
||||
|
||||
@staticmethod
|
||||
|
@ -1046,8 +1090,10 @@ class Database(FormerFuture):
|
|||
yield None
|
||||
return Database
|
||||
|
||||
|
||||
fill_operations()
|
||||
|
||||
|
||||
class Cluster(FormerFuture):
|
||||
def __init__(self, cpointer):
|
||||
self.cpointer = cpointer
|
||||
|
@ -1084,10 +1130,10 @@ class KeySelector(object):
|
|||
self.offset = offset
|
||||
|
||||
def __add__(self, offset):
|
||||
return KeySelector(self.key, self.or_equal, self.offset+offset)
|
||||
return KeySelector(self.key, self.or_equal, self.offset + offset)
|
||||
|
||||
def __sub__(self, offset):
|
||||
return KeySelector(self.key, self.or_equal, self.offset-offset)
|
||||
return KeySelector(self.key, self.or_equal, self.offset - offset)
|
||||
|
||||
@classmethod
|
||||
def last_less_than(cls, key):
|
||||
|
@ -1149,11 +1195,13 @@ class KeyValue(object):
|
|||
def __iter__(self):
|
||||
return KVIter(self)
|
||||
|
||||
|
||||
def check_error_code(code, func, arguments):
|
||||
if code:
|
||||
raise FDBError(code)
|
||||
return None
|
||||
|
||||
|
||||
if sys.maxsize <= 2**32:
|
||||
raise Exception("FoundationDB API requires a 64-bit python interpreter!")
|
||||
if platform.system() == 'Windows':
|
||||
|
@ -1174,6 +1222,7 @@ else:
|
|||
raise Exception("Platform (%s) %s is not supported by the FoundationDB API!" % (sys.platform, platform.system()))
|
||||
this_dir = os.path.dirname(__file__)
|
||||
|
||||
|
||||
# Preferred installation: The C API library or a symbolic link to the
|
||||
# library should be in the same directory as this module.
|
||||
# Failing that, a file named $(capi_name).pth should be in the same directory,
|
||||
|
@ -1181,18 +1230,20 @@ this_dir = os.path.dirname(__file__)
|
|||
# Failing that, we try to load the C API library without qualification, and
|
||||
# the library should be on the platform's dynamic library search path
|
||||
def read_pth_file():
|
||||
pth_file = os.path.join(this_dir, capi_name+'.pth')
|
||||
if not os.path.exists(pth_file): return None
|
||||
pth_file = os.path.join(this_dir, capi_name + '.pth')
|
||||
if not os.path.exists(pth_file):
|
||||
return None
|
||||
pth = _open_file(pth_file, "rt").read().strip()
|
||||
if pth[0] != '/':
|
||||
pth = os.path.join(this_dir, pth)
|
||||
return pth
|
||||
|
||||
|
||||
for pth in [
|
||||
lambda: os.path.join(this_dir, capi_name),
|
||||
#lambda: os.path.join(this_dir, '../../lib', capi_name), # For compatibility with existing unix installation process... should be removed
|
||||
read_pth_file
|
||||
]:
|
||||
lambda: os.path.join(this_dir, capi_name),
|
||||
# lambda: os.path.join(this_dir, '../../lib', capi_name), # For compatibility with existing unix installation process... should be removed
|
||||
read_pth_file
|
||||
]:
|
||||
p = pth()
|
||||
if p and os.path.exists(p):
|
||||
_capi = ctypes.CDLL(os.path.abspath(p))
|
||||
|
@ -1208,9 +1259,10 @@ else:
|
|||
try:
|
||||
_capi = ctypes.CDLL(lib_path)
|
||||
except:
|
||||
raise Exception( "Unable to locate the FoundationDB API shared library!" )
|
||||
raise Exception("Unable to locate the FoundationDB API shared library!")
|
||||
else:
|
||||
raise Exception( "Unable to locate the FoundationDB API shared library!" )
|
||||
raise Exception("Unable to locate the FoundationDB API shared library!")
|
||||
|
||||
|
||||
def keyToBytes(k):
|
||||
if hasattr(k, 'as_foundationdb_key'):
|
||||
|
@ -1219,6 +1271,7 @@ def keyToBytes(k):
|
|||
raise TypeError('Key must be of type ' + bytes.__name__)
|
||||
return k
|
||||
|
||||
|
||||
def valueToBytes(v):
|
||||
if hasattr(v, 'as_foundationdb_value'):
|
||||
v = v.as_foundationdb_value()
|
||||
|
@ -1226,6 +1279,7 @@ def valueToBytes(v):
|
|||
raise TypeError('Value must be of type ' + bytes.__name__)
|
||||
return v
|
||||
|
||||
|
||||
def paramToBytes(v):
|
||||
if isinstance(v, FutureString):
|
||||
v = v.value
|
||||
|
@ -1235,6 +1289,7 @@ def paramToBytes(v):
|
|||
raise TypeError('Parameter must be a string')
|
||||
return v
|
||||
|
||||
|
||||
def optionalParamToBytes(v):
|
||||
if v is None:
|
||||
return (None, 0)
|
||||
|
@ -1242,6 +1297,7 @@ def optionalParamToBytes(v):
|
|||
v = paramToBytes(v)
|
||||
return (v, len(v))
|
||||
|
||||
|
||||
_FDBBase.capi = _capi
|
||||
|
||||
_capi.fdb_select_api_version_impl.argtypes = [ctypes.c_int, ctypes.c_int]
|
||||
|
@ -1300,7 +1356,7 @@ _capi.fdb_future_get_version.restype = ctypes.c_int
|
|||
_capi.fdb_future_get_version.errcheck = check_error_code
|
||||
|
||||
_capi.fdb_future_get_key.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(ctypes.c_byte)),
|
||||
ctypes.POINTER(ctypes.c_int)]
|
||||
ctypes.POINTER(ctypes.c_int)]
|
||||
_capi.fdb_future_get_key.restype = ctypes.c_int
|
||||
_capi.fdb_future_get_key.errcheck = check_error_code
|
||||
|
||||
|
@ -1313,11 +1369,12 @@ _capi.fdb_future_get_database.restype = ctypes.c_int
|
|||
_capi.fdb_future_get_database.errcheck = check_error_code
|
||||
|
||||
_capi.fdb_future_get_value.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_int),
|
||||
ctypes.POINTER(ctypes.POINTER(ctypes.c_byte)), ctypes.POINTER(ctypes.c_int)]
|
||||
ctypes.POINTER(ctypes.POINTER(ctypes.c_byte)), ctypes.POINTER(ctypes.c_int)]
|
||||
_capi.fdb_future_get_value.restype = ctypes.c_int
|
||||
_capi.fdb_future_get_value.errcheck = check_error_code
|
||||
|
||||
_capi.fdb_future_get_keyvalue_array.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(KeyValueStruct)), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)]
|
||||
_capi.fdb_future_get_keyvalue_array.argtypes = [ctypes.c_void_p, ctypes.POINTER(
|
||||
ctypes.POINTER(KeyValueStruct)), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)]
|
||||
_capi.fdb_future_get_keyvalue_array.restype = int
|
||||
_capi.fdb_future_get_keyvalue_array.errcheck = check_error_code
|
||||
|
||||
|
@ -1367,7 +1424,9 @@ _capi.fdb_transaction_get.restype = ctypes.c_void_p
|
|||
_capi.fdb_transaction_get_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]
|
||||
_capi.fdb_transaction_get_key.restype = ctypes.c_void_p
|
||||
|
||||
_capi.fdb_transaction_get_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]
|
||||
_capi.fdb_transaction_get_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
|
||||
ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
|
||||
ctypes.c_int, ctypes.c_int]
|
||||
_capi.fdb_transaction_get_range.restype = ctypes.c_void_p
|
||||
|
||||
_capi.fdb_transaction_add_conflict_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
|
||||
|
@ -1415,6 +1474,7 @@ _capi.fdb_transaction_reset.restype = None
|
|||
if hasattr(ctypes.pythonapi, 'Py_IncRef'):
|
||||
def _pin_callback(cb):
|
||||
ctypes.pythonapi.Py_IncRef(ctypes.py_object(cb))
|
||||
|
||||
def _unpin_callback(cb):
|
||||
ctypes.pythonapi.Py_DecRef(ctypes.py_object(cb))
|
||||
else:
|
||||
|
@ -1422,6 +1482,7 @@ else:
|
|||
_pin_callback = _active_callbacks.add
|
||||
_unpin_callback = _active_callbacks.remove
|
||||
|
||||
|
||||
def init(event_model=None):
|
||||
"""Initialize the FDB interface.
|
||||
|
||||
|
@ -1456,7 +1517,8 @@ def init(event_model=None):
|
|||
import gevent
|
||||
|
||||
if gevent.__version__[0] != '0':
|
||||
def nullf(): pass
|
||||
def nullf():
|
||||
pass
|
||||
|
||||
class ThreadEvent(object):
|
||||
def __init__(self):
|
||||
|
@ -1493,6 +1555,7 @@ def init(event_model=None):
|
|||
|
||||
def _gevent_block_until_ready(self):
|
||||
e = self.Event()
|
||||
|
||||
def is_ready_cb(future):
|
||||
e.set()
|
||||
self.on_ready(is_ready_cb)
|
||||
|
@ -1505,8 +1568,10 @@ def init(event_model=None):
|
|||
class DebugEvent(object):
|
||||
def __init__(self):
|
||||
self.ev = threading.Event()
|
||||
|
||||
def set(self):
|
||||
self.ev.set()
|
||||
|
||||
def wait(self):
|
||||
while not self.ev.isSet():
|
||||
self.ev.wait(.001)
|
||||
|
@ -1528,10 +1593,12 @@ def init(event_model=None):
|
|||
asyncio.futures._FUTURE_CLASSES += (Future,)
|
||||
|
||||
def _do_not_block(self):
|
||||
if not self.is_ready(): raise Exception("Future not ready")
|
||||
if not self.is_ready():
|
||||
raise Exception("Future not ready")
|
||||
Future.block_until_ready = _do_not_block
|
||||
Future.call_soon_threadsafe = asyncio.get_event_loop().call_soon_threadsafe
|
||||
Future._loop = asyncio.get_event_loop()
|
||||
|
||||
def iterate(self):
|
||||
"""Usage:
|
||||
fa = tr.get_range(...).iterate()
|
||||
|
@ -1580,43 +1647,50 @@ def init(event_model=None):
|
|||
_network_thread = None
|
||||
raise
|
||||
|
||||
def init_v13(local_address, event_model = None):
|
||||
|
||||
def init_v13(local_address, event_model=None):
|
||||
return init(event_model)
|
||||
|
||||
|
||||
open_clusters = {}
|
||||
open_databases = {}
|
||||
|
||||
cacheLock = threading.Lock()
|
||||
|
||||
def open( cluster_file = None, database_name = b'DB', event_model = None ):
|
||||
|
||||
def open(cluster_file=None, database_name=b'DB', event_model=None):
|
||||
"""Opens the given database (or the default database of the cluster indicated
|
||||
by the fdb.cluster file in a platform-specific location, if no cluster_file
|
||||
or database_name is provided). Initializes the FDB interface as required."""
|
||||
|
||||
with _network_thread_reentrant_lock:
|
||||
if not _network_thread:
|
||||
init(event_model = event_model)
|
||||
init(event_model=event_model)
|
||||
|
||||
with cacheLock:
|
||||
if not cluster_file in open_clusters:
|
||||
open_clusters[cluster_file] = create_cluster( cluster_file )
|
||||
if cluster_file not in open_clusters:
|
||||
open_clusters[cluster_file] = create_cluster(cluster_file)
|
||||
|
||||
if not (cluster_file, database_name) in open_databases:
|
||||
if (cluster_file, database_name) not in open_databases:
|
||||
open_databases[(cluster_file, database_name)] = open_clusters[cluster_file].open_database(database_name)
|
||||
|
||||
return open_databases[(cluster_file, database_name)]
|
||||
|
||||
def open_v13( cluster_id_path, database_name, local_address = None, event_model = None ):
|
||||
|
||||
def open_v13(cluster_id_path, database_name, local_address=None, event_model=None):
|
||||
return open(cluster_id_path, database_name, event_model)
|
||||
|
||||
|
||||
import atexit
|
||||
|
||||
|
||||
@atexit.register
|
||||
def _stop_on_exit():
|
||||
if _network_thread:
|
||||
_capi.fdb_stop_network()
|
||||
_network_thread.join()
|
||||
|
||||
|
||||
def strinc(key):
|
||||
key = key.rstrip(b'\xff')
|
||||
if len(key) == 0:
|
||||
|
|
|
@ -25,11 +25,12 @@ https://foundationdb.org/documentation/api-python.html"""
|
|||
|
||||
from fdb import impl as _impl
|
||||
|
||||
|
||||
def _get_boundary_keys(db_or_tr, begin, end):
|
||||
if isinstance(db_or_tr, _impl.Transaction):
|
||||
tr = db_or_tr.db.create_transaction()
|
||||
# This does not guarantee transactionality because of the exception handling below,
|
||||
# but it does hide latency for the new transaction's start
|
||||
# This does not guarantee transactionality because of the exception handling below,
|
||||
# but it does hide latency for the new transaction's start
|
||||
tr.set_read_version(db_or_tr.get_read_version().wait())
|
||||
else:
|
||||
tr = db_or_tr.create_transaction()
|
||||
|
@ -39,20 +40,22 @@ def _get_boundary_keys(db_or_tr, begin, end):
|
|||
lastbegin = begin
|
||||
tr.options.set_read_system_keys()
|
||||
tr.options.set_lock_aware()
|
||||
kvs = tr.snapshot.get_range(b'\xff'+b'/keyServers/'+begin, b'\xff'+b'/keyServers/'+end)
|
||||
kvs = tr.snapshot.get_range(b'\xff' + b'/keyServers/' + begin, b'\xff' + b'/keyServers/' + end)
|
||||
if first_time:
|
||||
first_time = False
|
||||
yield None # trick to get the above get_range to be asynchronously dispatched before get_boundary_keys() returns.
|
||||
yield None # trick to get the above get_range to be asynchronously dispatched before get_boundary_keys() returns.
|
||||
for kv in kvs:
|
||||
yield kv.key[13:]
|
||||
begin = kv.key[13:]+b'\x00'
|
||||
begin = kv.key[13:] + b'\x00'
|
||||
begin = end
|
||||
except _impl.FDBError as e:
|
||||
if e.code == 1007 and begin != lastbegin: # if we get a transaction_too_old and *something* has happened, then we are no longer transactional
|
||||
# if we get a transaction_too_old and *something* has happened, then we are no longer transactional
|
||||
if e.code == 1007 and begin != lastbegin:
|
||||
tr = tr.db.create_transaction()
|
||||
else:
|
||||
tr.on_error(e).wait()
|
||||
|
||||
|
||||
def get_boundary_keys(db_or_tr, begin, end):
|
||||
begin = _impl.keyToBytes(begin)
|
||||
end = _impl.keyToBytes(end)
|
||||
|
@ -60,7 +63,7 @@ def get_boundary_keys(db_or_tr, begin, end):
|
|||
gen = _get_boundary_keys(db_or_tr, begin, end)
|
||||
try:
|
||||
next(gen)
|
||||
except StopIteration: # if _get_boundary_keys() never yields a value, e.g. begin > end
|
||||
except StopIteration: # if _get_boundary_keys() never yields a value, e.g. begin > end
|
||||
return (x for x in list())
|
||||
return gen
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
import fdb.tuple
|
||||
|
||||
|
||||
class Subspace (object):
|
||||
|
||||
def __init__(self, prefixTuple=tuple(), rawPrefix=b''):
|
||||
|
|
|
@ -20,44 +20,49 @@
|
|||
|
||||
# FoundationDB Python API
|
||||
|
||||
import ctypes, uuid, struct, math
|
||||
import ctypes
|
||||
import uuid
|
||||
import struct
|
||||
import math
|
||||
from bisect import bisect_left
|
||||
|
||||
from fdb import six
|
||||
import fdb
|
||||
|
||||
_size_limits = tuple( (1 << (i*8))-1 for i in range(9) )
|
||||
_size_limits = tuple((1 << (i * 8)) - 1 for i in range(9))
|
||||
|
||||
# Define type codes:
|
||||
NULL_CODE = 0x00
|
||||
BYTES_CODE = 0x01
|
||||
STRING_CODE = 0x02
|
||||
NESTED_CODE = 0x05
|
||||
INT_ZERO_CODE = 0x14
|
||||
POS_INT_END = 0x1d
|
||||
NEG_INT_START = 0x0b
|
||||
FLOAT_CODE = 0x20
|
||||
DOUBLE_CODE = 0x21
|
||||
FALSE_CODE = 0x26
|
||||
TRUE_CODE = 0x27
|
||||
UUID_CODE = 0x30
|
||||
NULL_CODE = 0x00
|
||||
BYTES_CODE = 0x01
|
||||
STRING_CODE = 0x02
|
||||
NESTED_CODE = 0x05
|
||||
INT_ZERO_CODE = 0x14
|
||||
POS_INT_END = 0x1d
|
||||
NEG_INT_START = 0x0b
|
||||
FLOAT_CODE = 0x20
|
||||
DOUBLE_CODE = 0x21
|
||||
FALSE_CODE = 0x26
|
||||
TRUE_CODE = 0x27
|
||||
UUID_CODE = 0x30
|
||||
VERSIONSTAMP_CODE = 0x33
|
||||
|
||||
# Reserved: Codes 0x03, 0x04, 0x23, and 0x24 are reserved for historical reasons.
|
||||
|
||||
def _find_terminator( v, pos ):
|
||||
|
||||
def _find_terminator(v, pos):
|
||||
# Finds the start of the next terminator [\x00]![\xff] or the end of v
|
||||
while True:
|
||||
pos = v.find(b'\x00', pos)
|
||||
if pos < 0:
|
||||
return len(v)
|
||||
if pos+1 == len(v) or v[pos+1:pos+2] != b'\xff':
|
||||
if pos + 1 == len(v) or v[pos + 1:pos + 2] != b'\xff':
|
||||
return pos
|
||||
pos += 2
|
||||
|
||||
|
||||
# If encoding and sign bit is 1 (negative), flip all of the bits. Otherwise, just flip sign.
|
||||
# If decoding and sign bit is 0 (negative), flip all of the bits. Otherwise, just flip sign.
|
||||
def _float_adjust( v, encode ):
|
||||
def _float_adjust(v, encode):
|
||||
if encode and six.indexbytes(v, 0) & 0x80 != 0x00:
|
||||
return b''.join(map(lambda x: six.int2byte(x ^ 0xff), six.iterbytes(v)))
|
||||
elif not encode and six.indexbytes(v, 0) & 0x80 != 0x80:
|
||||
|
@ -65,6 +70,7 @@ def _float_adjust( v, encode ):
|
|||
else:
|
||||
return six.int2byte(six.indexbytes(v, 0) ^ 0x80) + v[1:]
|
||||
|
||||
|
||||
class SingleFloat(object):
|
||||
def __init__(self, value):
|
||||
if isinstance(value, float):
|
||||
|
@ -116,6 +122,7 @@ class SingleFloat(object):
|
|||
def __nonzero__(self):
|
||||
return bool(self.value)
|
||||
|
||||
|
||||
class Versionstamp(object):
|
||||
LENGTH = 12
|
||||
_TR_VERSION_LEN = 10
|
||||
|
@ -218,60 +225,61 @@ class Versionstamp(object):
|
|||
def __nonzero__(self):
|
||||
return self.is_complete()
|
||||
|
||||
|
||||
def _decode(v, pos):
|
||||
code = six.indexbytes(v, pos)
|
||||
if code == NULL_CODE:
|
||||
return None, pos+1
|
||||
return None, pos + 1
|
||||
elif code == BYTES_CODE:
|
||||
end = _find_terminator(v, pos+1)
|
||||
return v[pos+1:end].replace(b"\x00\xFF", b"\x00"), end+1
|
||||
end = _find_terminator(v, pos + 1)
|
||||
return v[pos + 1:end].replace(b"\x00\xFF", b"\x00"), end + 1
|
||||
elif code == STRING_CODE:
|
||||
end = _find_terminator(v, pos+1)
|
||||
return v[pos+1:end].replace(b"\x00\xFF", b"\x00").decode("utf-8"), end+1
|
||||
end = _find_terminator(v, pos + 1)
|
||||
return v[pos + 1:end].replace(b"\x00\xFF", b"\x00").decode("utf-8"), end + 1
|
||||
elif code >= INT_ZERO_CODE and code < POS_INT_END:
|
||||
n = code - 20
|
||||
end = pos + 1 + n
|
||||
return struct.unpack(">Q", b'\x00'*(8-n) + v[pos+1:end])[0], end
|
||||
return struct.unpack(">Q", b'\x00' * (8 - n) + v[pos + 1:end])[0], end
|
||||
elif code > NEG_INT_START and code < INT_ZERO_CODE:
|
||||
n = 20 - code
|
||||
end = pos + 1 + n
|
||||
return struct.unpack(">Q", b'\x00'*(8-n) + v[pos+1:end])[0]-_size_limits[n], end
|
||||
elif code == POS_INT_END: # 0x1d; Positive 9-255 byte integer
|
||||
length = six.indexbytes(v, pos+1)
|
||||
return struct.unpack(">Q", b'\x00' * (8 - n) + v[pos + 1:end])[0] - _size_limits[n], end
|
||||
elif code == POS_INT_END: # 0x1d; Positive 9-255 byte integer
|
||||
length = six.indexbytes(v, pos + 1)
|
||||
val = 0
|
||||
for i in _range(length):
|
||||
val = val << 8
|
||||
val += six.indexbytes(v, pos+2+i)
|
||||
return val, pos+2+length
|
||||
elif code == NEG_INT_START: # 0x0b; Negative 9-255 byte integer
|
||||
length = six.indexbytes(v, pos+1)^0xff
|
||||
val += six.indexbytes(v, pos + 2 + i)
|
||||
return val, pos + 2 + length
|
||||
elif code == NEG_INT_START: # 0x0b; Negative 9-255 byte integer
|
||||
length = six.indexbytes(v, pos + 1) ^ 0xff
|
||||
val = 0
|
||||
for i in _range(length):
|
||||
val = val << 8
|
||||
val += six.indexbytes(v, pos+2+i)
|
||||
return val - (1<<(length*8)) + 1, pos+2+length
|
||||
val += six.indexbytes(v, pos + 2 + i)
|
||||
return val - (1 << (length * 8)) + 1, pos + 2 + length
|
||||
elif code == FLOAT_CODE:
|
||||
return SingleFloat(struct.unpack(">f", _float_adjust(v[pos+1:pos+5], False))[0]), pos+5
|
||||
return SingleFloat(struct.unpack(">f", _float_adjust(v[pos + 1:pos + 5], False))[0]), pos + 5
|
||||
elif code == DOUBLE_CODE:
|
||||
return struct.unpack(">d", _float_adjust(v[pos+1:pos+9], False))[0], pos+9
|
||||
return struct.unpack(">d", _float_adjust(v[pos + 1:pos + 9], False))[0], pos + 9
|
||||
elif code == UUID_CODE:
|
||||
return uuid.UUID(bytes=v[pos+1:pos+17]), pos+17
|
||||
return uuid.UUID(bytes=v[pos + 1:pos + 17]), pos + 17
|
||||
elif code == FALSE_CODE:
|
||||
if hasattr(fdb, "_version") and fdb._version < 500:
|
||||
raise ValueError("Invalid API version " + str(fdb._version) + " for boolean types")
|
||||
return False, pos+1
|
||||
return False, pos + 1
|
||||
elif code == TRUE_CODE:
|
||||
if hasattr(fdb, "_version") and fdb._version < 500:
|
||||
raise ValueError("Invalid API version " + str(fdb._version) + " for boolean types")
|
||||
return True, pos+1
|
||||
return True, pos + 1
|
||||
elif code == VERSIONSTAMP_CODE:
|
||||
return Versionstamp.from_bytes(v, pos+1), pos + 1 + Versionstamp.LENGTH
|
||||
return Versionstamp.from_bytes(v, pos + 1), pos + 1 + Versionstamp.LENGTH
|
||||
elif code == NESTED_CODE:
|
||||
ret = []
|
||||
end_pos = pos+1
|
||||
end_pos = pos + 1
|
||||
while end_pos < len(v):
|
||||
if six.indexbytes(v, end_pos) == 0x00:
|
||||
if end_pos+1 < len(v) and six.indexbytes(v, end_pos+1) == 0xff:
|
||||
if end_pos + 1 < len(v) and six.indexbytes(v, end_pos + 1) == 0xff:
|
||||
ret.append(None)
|
||||
end_pos += 2
|
||||
else:
|
||||
|
@ -279,10 +287,11 @@ def _decode(v, pos):
|
|||
else:
|
||||
val, end_pos = _decode(v, end_pos)
|
||||
ret.append(val)
|
||||
return tuple(ret), end_pos+1
|
||||
return tuple(ret), end_pos + 1
|
||||
else:
|
||||
raise ValueError("Unknown data type in DB: " + repr(v))
|
||||
|
||||
|
||||
def _reduce_children(child_values):
|
||||
version_pos = -1
|
||||
len_so_far = 0
|
||||
|
@ -296,6 +305,7 @@ def _reduce_children(child_values):
|
|||
bytes_list.append(child_bytes)
|
||||
return bytes_list, version_pos
|
||||
|
||||
|
||||
def _encode(value, nested=False):
|
||||
# returns [code][data] (code != 0xFF)
|
||||
# encoded values are self-terminating
|
||||
|
@ -305,7 +315,7 @@ def _encode(value, nested=False):
|
|||
return b''.join([six.int2byte(NULL_CODE), six.int2byte(0xff)]), -1
|
||||
else:
|
||||
return b''.join([six.int2byte(NULL_CODE)]), -1
|
||||
elif isinstance(value, bytes): # also gets non-None fdb.impl.Value
|
||||
elif isinstance(value, bytes): # also gets non-None fdb.impl.Value
|
||||
return six.int2byte(BYTES_CODE) + value.replace(b'\x00', b'\x00\xFF') + b'\x00', -1
|
||||
elif isinstance(value, six.text_type):
|
||||
return six.int2byte(STRING_CODE) + value.encode('utf-8').replace(b'\x00', b'\x00\xFF') + b'\x00', -1
|
||||
|
@ -314,26 +324,26 @@ def _encode(value, nested=False):
|
|||
return b''.join([six.int2byte(INT_ZERO_CODE)]), -1
|
||||
elif value > 0:
|
||||
if value >= _size_limits[-1]:
|
||||
length = (value.bit_length()+7)//8
|
||||
length = (value.bit_length() + 7) // 8
|
||||
data = [six.int2byte(POS_INT_END), six.int2byte(length)]
|
||||
for i in _range(length-1,-1,-1):
|
||||
data.append(six.int2byte( (value>>(8*i))&0xff ))
|
||||
for i in _range(length - 1, -1, -1):
|
||||
data.append(six.int2byte((value >> (8 * i)) & 0xff))
|
||||
return b''.join(data), -1
|
||||
|
||||
n = bisect_left( _size_limits, value )
|
||||
return six.int2byte(INT_ZERO_CODE + n) + struct.pack( ">Q", value )[-n:], -1
|
||||
n = bisect_left(_size_limits, value)
|
||||
return six.int2byte(INT_ZERO_CODE + n) + struct.pack(">Q", value)[-n:], -1
|
||||
else:
|
||||
if -value >= _size_limits[-1]:
|
||||
length = (value.bit_length()+7)//8
|
||||
value += (1<<(length*8)) - 1
|
||||
data = [six.int2byte(NEG_INT_START), six.int2byte(length^0xff)]
|
||||
for i in _range(length-1,-1,-1):
|
||||
data.append(six.int2byte( (value>>(8*i))&0xff ))
|
||||
length = (value.bit_length() + 7) // 8
|
||||
value += (1 << (length * 8)) - 1
|
||||
data = [six.int2byte(NEG_INT_START), six.int2byte(length ^ 0xff)]
|
||||
for i in _range(length - 1, -1, -1):
|
||||
data.append(six.int2byte((value >> (8 * i)) & 0xff))
|
||||
return b''.join(data), -1
|
||||
|
||||
n = bisect_left( _size_limits, -value )
|
||||
n = bisect_left(_size_limits, -value)
|
||||
maxv = _size_limits[n]
|
||||
return six.int2byte(INT_ZERO_CODE - n) + struct.pack( ">Q", maxv+value)[-n:], -1
|
||||
return six.int2byte(INT_ZERO_CODE - n) + struct.pack(">Q", maxv + value)[-n:], -1
|
||||
elif isinstance(value, ctypes.c_float) or isinstance(value, SingleFloat):
|
||||
return six.int2byte(FLOAT_CODE) + _float_adjust(struct.pack(">f", value.value), True), -1
|
||||
elif isinstance(value, ctypes.c_double):
|
||||
|
@ -357,6 +367,7 @@ def _encode(value, nested=False):
|
|||
else:
|
||||
raise ValueError("Unsupported data type: " + str(type(value)))
|
||||
|
||||
|
||||
# packs the tuple possibly for versionstamp operations and returns the position of the
|
||||
# incomplete versionstamp
|
||||
# * if there are no incomplete versionstamp members, this returns the packed tuple and -1
|
||||
|
@ -379,6 +390,7 @@ def _pack_maybe_with_versionstamp(t, prefix=None):
|
|||
|
||||
return b''.join(bytes_list), version_pos
|
||||
|
||||
|
||||
# packs the specified tuple into a key
|
||||
def pack(t, prefix=None):
|
||||
res, version_pos = _pack_maybe_with_versionstamp(t, prefix)
|
||||
|
@ -386,6 +398,7 @@ def pack(t, prefix=None):
|
|||
raise ValueError("Incomplete versionstamp included in vanilla tuple pack")
|
||||
return res
|
||||
|
||||
|
||||
# packs the specified tuple into a key for versionstamp operations
|
||||
def pack_with_versionstamp(t, prefix=None):
|
||||
res, version_pos = _pack_maybe_with_versionstamp(t, prefix)
|
||||
|
@ -393,6 +406,7 @@ def pack_with_versionstamp(t, prefix=None):
|
|||
raise ValueError("No incomplete versionstamp included in tuple pack with versionstamp")
|
||||
return res
|
||||
|
||||
|
||||
# unpacks the specified key into a tuple
|
||||
def unpack(key, prefix_len=0):
|
||||
pos = prefix_len
|
||||
|
@ -402,6 +416,7 @@ def unpack(key, prefix_len=0):
|
|||
res.append(r)
|
||||
return tuple(res)
|
||||
|
||||
|
||||
# determines if there is at least one incomplete versionstamp in a tuple
|
||||
def has_incomplete_versionstamp(t):
|
||||
def _elem_has_incomplete(item):
|
||||
|
@ -415,7 +430,10 @@ def has_incomplete_versionstamp(t):
|
|||
return False
|
||||
return any(map(_elem_has_incomplete, t))
|
||||
|
||||
|
||||
_range = range
|
||||
|
||||
|
||||
def range(t):
|
||||
"""Returns a slice of keys that includes all tuples of greater
|
||||
length than the specified tuple that that start with the
|
||||
|
@ -428,8 +446,9 @@ def range(t):
|
|||
|
||||
p = pack(t)
|
||||
return slice(
|
||||
p+b'\x00',
|
||||
p+b'\xff')
|
||||
p + b'\x00',
|
||||
p + b'\xff')
|
||||
|
||||
|
||||
def _code_for(value):
|
||||
if value == None:
|
||||
|
@ -455,6 +474,7 @@ def _code_for(value):
|
|||
else:
|
||||
raise ValueError("Unsupported data type: " + str(type(value)))
|
||||
|
||||
|
||||
def _compare_floats(f1, f2):
|
||||
sign1 = int(math.copysign(1, f1))
|
||||
sign2 = int(math.copysign(1, f2))
|
||||
|
@ -473,7 +493,8 @@ def _compare_floats(f1, f2):
|
|||
# There are enough edge cases that bit comparison is safer.
|
||||
bytes1 = struct.pack(">d", f1)
|
||||
bytes2 = struct.pack(">d", f2)
|
||||
return sign1*(-1 if bytes1 < bytes2 else 0 if bytes1 == bytes2 else 1)
|
||||
return sign1 * (-1 if bytes1 < bytes2 else 0 if bytes1 == bytes2 else 1)
|
||||
|
||||
|
||||
def _compare_values(value1, value2):
|
||||
code1 = _code_for(value1)
|
||||
|
@ -505,6 +526,7 @@ def _compare_values(value1, value2):
|
|||
# Booleans, UUIDs, integers, and Versionstamps can just use standard comparison.
|
||||
return -1 if value1 < value2 else 0 if value1 == value2 else 1
|
||||
|
||||
|
||||
# compare element by element and return -1 if t1 < t2 or 1 if t1 > t2 or 0 if t1 == t2
|
||||
def compare(t1, t2):
|
||||
i = 0
|
||||
|
|
|
@ -29,7 +29,7 @@ else
|
|||
PYVER = $(VERSION)a1
|
||||
endif
|
||||
|
||||
fdb_python: bindings/python/fdb/fdboptions.py bindings/python/setup.py
|
||||
fdb_python: bindings/python/fdb/fdboptions.py bindings/python/setup.py fdb_python_check
|
||||
|
||||
bindings/python/fdb/fdboptions.py: bin/vexillographer.exe fdbclient/vexillographer/fdb.options
|
||||
@echo "Building $@"
|
||||
|
@ -43,6 +43,10 @@ bindings/python/setup.py: bindings/python/setup.py.in $(ALL_MAKEFILES) versions.
|
|||
@echo "Generating $@"
|
||||
@m4 -DVERSION=$(PYVER) $< > $@
|
||||
|
||||
fdb_python_check: bindings/python/setup.py bindings/python/fdb/*.py bindings/python/tests/*.py
|
||||
@echo "Checking fdb_python"
|
||||
@bash -c "if which pycodestyle &> /dev/null ; then pycodestyle bindings/python --config=bindings/python/setup.cfg ; else echo \"Skipped Python style check! Missing: pycodestyle\"; fi"
|
||||
|
||||
fdb_python_sdist: fdb_python
|
||||
@mkdir -p packages
|
||||
@rm -rf bindings/python/dist
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
[pycodestyle]
|
||||
max-line-length = 150
|
||||
exclude = six.py, fdboptions.py
|
||||
ignore = E266, E402, E711, E712, E721, E722, W503, W504
|
|
@ -23,6 +23,7 @@ import fdb
|
|||
|
||||
TestError = Exception
|
||||
|
||||
|
||||
def retry_with_timeout(seconds):
|
||||
def decorator(f):
|
||||
def wrapper(db):
|
||||
|
@ -41,15 +42,17 @@ def retry_with_timeout(seconds):
|
|||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
default_timeout = 60
|
||||
|
||||
|
||||
def test_cancellation(db):
|
||||
# (1) Basic cancellation
|
||||
@retry_with_timeout(default_timeout)
|
||||
def txn1(tr):
|
||||
tr.cancel()
|
||||
try:
|
||||
tr.commit().wait() # should throw
|
||||
tr.commit().wait() # should throw
|
||||
raise TestError('Basic cancellation unit test failed.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1025:
|
||||
|
@ -63,7 +66,7 @@ def test_cancellation(db):
|
|||
tr.cancel()
|
||||
tr.reset()
|
||||
try:
|
||||
tr.commit().wait() # should not throw
|
||||
tr.commit().wait() # should not throw
|
||||
except fdb.FDBError as e:
|
||||
if e.code == 1025:
|
||||
raise TestError('Cancellation survived reset.')
|
||||
|
@ -77,13 +80,13 @@ def test_cancellation(db):
|
|||
def txn3(tr):
|
||||
tr.cancel()
|
||||
try:
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should throw
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should throw
|
||||
raise TestError('on_error() did not notice cancellation.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1025:
|
||||
raise
|
||||
try:
|
||||
tr.commit().wait() # should throw
|
||||
tr.commit().wait() # should throw
|
||||
raise TestError('Cancellation did not survive on_error().')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1025:
|
||||
|
@ -97,7 +100,7 @@ def test_cancellation(db):
|
|||
tr[b'foo']
|
||||
tr.cancel()
|
||||
try:
|
||||
tr.get_read_version().wait() # should throw
|
||||
tr.get_read_version().wait() # should throw
|
||||
raise TestError("Cancellation didn't throw on weird operation.")
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1025:
|
||||
|
@ -107,6 +110,7 @@ def test_cancellation(db):
|
|||
|
||||
return
|
||||
|
||||
|
||||
def test_retry_limits(db):
|
||||
err = fdb.FDBError(1007)
|
||||
|
||||
|
@ -115,11 +119,11 @@ def test_retry_limits(db):
|
|||
def txn1(tr):
|
||||
tr.options.set_retry_limit(1)
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.options.set_retry_limit(1)
|
||||
try:
|
||||
tr.on_error(err).wait() # should throw
|
||||
tr.on_error(err).wait() # should throw
|
||||
raise TestError('(1) Retry limit was ignored.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
|
@ -127,7 +131,7 @@ def test_retry_limits(db):
|
|||
tr[b'foo'] = b'bar'
|
||||
tr.options.set_retry_limit(1)
|
||||
try:
|
||||
tr.on_error(err).wait() # should throw
|
||||
tr.on_error(err).wait() # should throw
|
||||
raise TestError('(1) Retry limit was ignored.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
|
@ -141,7 +145,7 @@ def test_retry_limits(db):
|
|||
tr.options.set_retry_limit(0)
|
||||
tr[b'foo'] = b'bar'
|
||||
try:
|
||||
tr.on_error(err).wait() # should throw
|
||||
tr.on_error(err).wait() # should throw
|
||||
raise TestError('(2) Retry limit was ignored.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
|
@ -149,7 +153,7 @@ def test_retry_limits(db):
|
|||
tr.options.set_retry_limit(1)
|
||||
tr[b'foo'] = b'bar'
|
||||
try:
|
||||
tr.on_error(err).wait() # should throw
|
||||
tr.on_error(err).wait() # should throw
|
||||
raise TestError('(2) Retry limit was ignored.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
|
@ -162,18 +166,18 @@ def test_retry_limits(db):
|
|||
def txn3(tr):
|
||||
tr.options.set_retry_limit(1)
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.options.set_retry_limit(1)
|
||||
tr[b'foo'] = b'bar'
|
||||
try:
|
||||
tr.on_error(err).wait() # should throw
|
||||
tr.on_error(err).wait() # should throw
|
||||
raise TestError('(3) Retry limit was ignored.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
raise
|
||||
tr.reset()
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.on_error(err).wait() # should not throw
|
||||
|
||||
txn3(db)
|
||||
|
||||
|
@ -183,7 +187,7 @@ def test_retry_limits(db):
|
|||
tr.options.set_retry_limit(0)
|
||||
tr[b'foo'] = b'bar'
|
||||
try:
|
||||
tr.on_error(err).wait() # should throw
|
||||
tr.on_error(err).wait() # should throw
|
||||
raise TestError('(4) Retry limit was ignored.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
|
@ -191,7 +195,7 @@ def test_retry_limits(db):
|
|||
tr.reset()
|
||||
tr.options.set_retry_limit(1)
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.on_error(err).wait() # should not throw
|
||||
|
||||
txn4(db)
|
||||
|
||||
|
@ -199,22 +203,22 @@ def test_retry_limits(db):
|
|||
@retry_with_timeout(default_timeout)
|
||||
def txn5(tr):
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.options.set_retry_limit(1)
|
||||
try:
|
||||
tr.on_error(err).wait() # should throw
|
||||
tr.on_error(err).wait() # should throw
|
||||
raise TestError('(5) Retry limit was ignored.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
raise
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.options.set_retry_limit(-1)
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.options.set_retry_limit(4)
|
||||
try:
|
||||
tr.on_error(err).wait() # should throw
|
||||
tr.on_error(err).wait() # should throw
|
||||
raise TestError('(5) Retry limit was ignored.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
|
@ -227,20 +231,21 @@ def test_retry_limits(db):
|
|||
def txn6(tr):
|
||||
tr.options.set_retry_limit(1)
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.options.set_retry_limit(1)
|
||||
try:
|
||||
tr.on_error(err).wait() # should throw
|
||||
tr.on_error(err).wait() # should throw
|
||||
raise TestError('(6) Retry limit was ignored.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
raise
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.on_error(err).wait() # should not throw
|
||||
tr.on_error(err).wait() # should not throw
|
||||
|
||||
txn6(db)
|
||||
|
||||
|
||||
def test_timeouts(db):
|
||||
# (1) Basic timeouts
|
||||
@retry_with_timeout(default_timeout)
|
||||
|
@ -248,7 +253,7 @@ def test_timeouts(db):
|
|||
tr.options.set_timeout(10)
|
||||
time.sleep(1)
|
||||
try:
|
||||
tr.commit().wait() # should throw
|
||||
tr.commit().wait() # should throw
|
||||
raise TestError("Timeout didn't fire.")
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1031:
|
||||
|
@ -260,9 +265,9 @@ def test_timeouts(db):
|
|||
@retry_with_timeout(default_timeout)
|
||||
def txn2(tr):
|
||||
tr.options.set_timeout(100)
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should not throw
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should not throw
|
||||
time.sleep(1)
|
||||
tr.commit().wait() # should not throw
|
||||
tr.commit().wait() # should not throw
|
||||
|
||||
txn2(db)
|
||||
|
||||
|
@ -272,13 +277,13 @@ def test_timeouts(db):
|
|||
tr.options.set_timeout(100)
|
||||
time.sleep(1)
|
||||
try:
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should throw
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should throw
|
||||
raise TestError("Timeout didn't fire.")
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1031:
|
||||
raise
|
||||
try:
|
||||
tr.commit().wait() # should throw
|
||||
tr.commit().wait() # should throw
|
||||
raise TestError("Timeout didn't fire.")
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1031:
|
||||
|
@ -292,7 +297,7 @@ def test_timeouts(db):
|
|||
tr.options.set_timeout(100)
|
||||
tr.reset()
|
||||
time.sleep(1)
|
||||
tr.commit().wait() # should not throw
|
||||
tr.commit().wait() # should not throw
|
||||
|
||||
txn4(db)
|
||||
|
||||
|
@ -302,18 +307,18 @@ def test_timeouts(db):
|
|||
tr.options.set_timeout(100)
|
||||
time.sleep(1)
|
||||
tr.reset()
|
||||
tr.commit().wait() # should not throw
|
||||
tr.commit().wait() # should not throw
|
||||
|
||||
txn5(db)
|
||||
|
||||
# (6) Timeout will fire "retroactively"
|
||||
@retry_with_timeout(default_timeout)
|
||||
def txn6(tr):
|
||||
tr[b'foo']=b'bar'
|
||||
tr[b'foo'] = b'bar'
|
||||
time.sleep(1)
|
||||
tr.options.set_timeout(10)
|
||||
try:
|
||||
tr.commit().wait() # should throw
|
||||
tr.commit().wait() # should throw
|
||||
raise TestError("Timeout didn't fire.")
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1031:
|
||||
|
@ -324,33 +329,32 @@ def test_timeouts(db):
|
|||
# (7) Transaction reset also resets time from which timeout is measured
|
||||
@retry_with_timeout(default_timeout)
|
||||
def txn7(tr):
|
||||
tr[b'foo']=b'bar'
|
||||
tr[b'foo'] = b'bar'
|
||||
time.sleep(1)
|
||||
start = time.time()
|
||||
tr.reset()
|
||||
tr[b'foo']=b'bar'
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.options.set_timeout(500)
|
||||
try:
|
||||
tr.commit().wait() #should not throw, but could if commit were slow:
|
||||
tr.commit().wait() # should not throw, but could if commit were slow:
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1031:
|
||||
raise
|
||||
if time.time() - start < 0.49:
|
||||
raise
|
||||
|
||||
|
||||
txn7(db)
|
||||
|
||||
# (8) on_error() does not reset time from which timeout is measured
|
||||
@retry_with_timeout(default_timeout)
|
||||
def txn8(tr):
|
||||
tr[b'foo']=b'bar'
|
||||
tr[b'foo'] = b'bar'
|
||||
time.sleep(1)
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should not throw
|
||||
tr[b'foo']=b'bar'
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should not throw
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.options.set_timeout(100)
|
||||
try:
|
||||
tr.commit().wait() # should throw
|
||||
tr.commit().wait() # should throw
|
||||
raise TestError("Timeout didn't fire.")
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1031:
|
||||
|
@ -361,24 +365,24 @@ def test_timeouts(db):
|
|||
# (9) Timeouts can be unset
|
||||
@retry_with_timeout(default_timeout)
|
||||
def txn9(tr):
|
||||
tr[b'foo']=b'bar'
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.options.set_timeout(100)
|
||||
tr[b'foo']=b'bar'
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.options.set_timeout(0)
|
||||
time.sleep(1)
|
||||
tr.commit().wait() # should not throw
|
||||
tr.commit().wait() # should not throw
|
||||
|
||||
txn9(db)
|
||||
|
||||
# (10) Unsetting a timeout after it has fired doesn't help
|
||||
@retry_with_timeout(default_timeout)
|
||||
def txn10(tr):
|
||||
tr[b'foo']=b'bar'
|
||||
tr[b'foo'] = b'bar'
|
||||
tr.options.set_timeout(100)
|
||||
time.sleep(1)
|
||||
tr.options.set_timeout(0)
|
||||
try:
|
||||
tr.commit().wait() # should throw
|
||||
tr.commit().wait() # should throw
|
||||
raise TestError("Timeout didn't fire.")
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1031:
|
||||
|
@ -398,8 +402,8 @@ def test_timeouts(db):
|
|||
tr.reset()
|
||||
except fdb.FDBError as e:
|
||||
if i == 0:
|
||||
if e.code != 1009: # future_version
|
||||
raise fdb.FDBError(1007) # Something weird happened; raise a retryable error so we run this transaction again
|
||||
if e.code != 1009: # future_version
|
||||
raise fdb.FDBError(1007) # Something weird happened; raise a retryable error so we run this transaction again
|
||||
else:
|
||||
tr.on_error(e).wait()
|
||||
elif i == 1 and e.code != 1031:
|
||||
|
@ -407,6 +411,7 @@ def test_timeouts(db):
|
|||
|
||||
txn11(db)
|
||||
|
||||
|
||||
def test_combinations(db):
|
||||
# (1) Hitting retry limit still clears timeouts
|
||||
@retry_with_timeout(default_timeout)
|
||||
|
@ -414,13 +419,13 @@ def test_combinations(db):
|
|||
tr.options.set_retry_limit(0)
|
||||
tr.options.set_timeout(100)
|
||||
try:
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should throw
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should throw
|
||||
raise TestError("Retry limit was ignored.")
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1007:
|
||||
raise
|
||||
time.sleep(1)
|
||||
tr.commit().wait() # should not throw
|
||||
tr.commit().wait() # should not throw
|
||||
|
||||
txn1(db)
|
||||
|
||||
|
@ -430,13 +435,13 @@ def test_combinations(db):
|
|||
tr.options.set_retry_limit(0)
|
||||
tr.cancel()
|
||||
try:
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should throw
|
||||
tr.on_error(fdb.FDBError(1007)).wait() # should throw
|
||||
raise TestError('on_error() did not notice cancellation.')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1025:
|
||||
raise
|
||||
try:
|
||||
tr.commit().wait() # should throw
|
||||
tr.commit().wait() # should throw
|
||||
raise TestError('Cancellation did not survive on_error().')
|
||||
except fdb.FDBError as e:
|
||||
if e.code != 1025:
|
||||
|
|
|
@ -44,10 +44,12 @@ log_ops = False
|
|||
log_dirs = False
|
||||
log_errors = False
|
||||
|
||||
|
||||
def log_op(msg, force=False):
|
||||
if log_ops or log_all or force:
|
||||
print(msg)
|
||||
|
||||
|
||||
class DirectoryExtension():
|
||||
def __init__(self):
|
||||
self.dir_list = [fdb.directory]
|
||||
|
@ -60,7 +62,7 @@ class DirectoryExtension():
|
|||
actual_num = 1
|
||||
|
||||
tuples = tuple([tuple(stack.pop(stack.pop())) for i in range(actual_num)])
|
||||
|
||||
|
||||
if num is None:
|
||||
return tuples[0]
|
||||
|
||||
|
@ -71,7 +73,7 @@ class DirectoryExtension():
|
|||
print('pushed %s at %d (op=%s)' % (dir.__class__.__name__, len(self.dir_list), inst.op))
|
||||
|
||||
self.dir_list.append(dir)
|
||||
|
||||
|
||||
def process_instruction(self, inst):
|
||||
try:
|
||||
if log_all or log_instructions:
|
||||
|
@ -86,14 +88,15 @@ class DirectoryExtension():
|
|||
elif inst.op == six.u('DIRECTORY_CREATE_LAYER'):
|
||||
index1, index2, allow_manual_prefixes = inst.pop(3)
|
||||
if self.dir_list[index1] is None or self.dir_list[index2] is None:
|
||||
log_op('create directory layer: None');
|
||||
log_op('create directory layer: None')
|
||||
self.append_dir(inst, None)
|
||||
else:
|
||||
log_op('create directory layer: node_subspace (%d) = %r, content_subspace (%d) = %r, allow_manual_prefixes = %d' % (index1, self.dir_list[index1].rawPrefix, index2, self.dir_list[index2].rawPrefix, allow_manual_prefixes))
|
||||
log_op('create directory layer: node_subspace (%d) = %r, content_subspace (%d) = %r, allow_manual_prefixes = %d' %
|
||||
(index1, self.dir_list[index1].rawPrefix, index2, self.dir_list[index2].rawPrefix, allow_manual_prefixes))
|
||||
self.append_dir(inst, fdb.DirectoryLayer(self.dir_list[index1], self.dir_list[index2], allow_manual_prefixes == 1))
|
||||
elif inst.op == six.u('DIRECTORY_CHANGE'):
|
||||
self.dir_index = inst.pop()
|
||||
if not self.dir_list[self.dir_index]:
|
||||
if not self.dir_list[self.dir_index]:
|
||||
self.dir_index = self.error_index
|
||||
if log_dirs or log_all:
|
||||
new_dir = self.dir_list[self.dir_index]
|
||||
|
@ -123,7 +126,7 @@ class DirectoryExtension():
|
|||
log_op('move %s to %s' % (repr(directory.get_path() + old_path), repr(directory.get_path() + new_path)))
|
||||
self.append_dir(inst, directory.move(inst.tr, old_path, new_path))
|
||||
elif inst.op == six.u('DIRECTORY_MOVE_TO'):
|
||||
new_absolute_path = self.pop_tuples(inst.stack)
|
||||
new_absolute_path = self.pop_tuples(inst.stack)
|
||||
log_op('move %s to %s' % (repr(directory.get_path()), repr(new_absolute_path)))
|
||||
self.append_dir(inst, directory.move_to(inst.tr, new_absolute_path))
|
||||
elif inst.op == six.u('DIRECTORY_REMOVE'):
|
||||
|
@ -220,7 +223,7 @@ class DirectoryExtension():
|
|||
except Exception as e:
|
||||
if log_all or log_errors:
|
||||
print(e)
|
||||
#traceback.print_exc(file=sys.stdout)
|
||||
# traceback.print_exc(file=sys.stdout)
|
||||
|
||||
if inst.op in ops_that_create_dirs:
|
||||
self.append_dir(inst, None)
|
||||
|
|
|
@ -30,7 +30,7 @@ import random
|
|||
import time
|
||||
import traceback
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
|
||||
import fdb
|
||||
fdb.api_version(int(sys.argv[2]))
|
||||
|
||||
|
@ -52,6 +52,7 @@ if len(sys.argv) == 4:
|
|||
else:
|
||||
db = fdb.open()
|
||||
|
||||
|
||||
class Stack:
|
||||
def __init__(self):
|
||||
self.stack = []
|
||||
|
@ -90,7 +91,7 @@ class Stack:
|
|||
else:
|
||||
raw[i] = (raw[i][0], val)
|
||||
except fdb.FDBError as e:
|
||||
#print('ERROR: %s' % repr(e))
|
||||
# print('ERROR: %s' % repr(e))
|
||||
raw[i] = (raw[i][0], fdb.tuple.pack((b'ERROR', str(e.code).encode('ascii'))))
|
||||
|
||||
if count is None:
|
||||
|
@ -104,6 +105,7 @@ class Stack:
|
|||
else:
|
||||
return [item[1] for item in raw]
|
||||
|
||||
|
||||
class Instruction:
|
||||
def __init__(self, tr, stack, op, index, isDatabase=False, isSnapshot=False):
|
||||
self.tr = tr
|
||||
|
@ -119,6 +121,7 @@ class Instruction:
|
|||
def push(self, val):
|
||||
self.stack.push(self.index, val)
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def test_options(tr):
|
||||
tr.options.set_priority_system_immediate()
|
||||
|
@ -130,9 +133,9 @@ def test_options(tr):
|
|||
tr.options.set_read_system_keys()
|
||||
tr.options.set_access_system_keys()
|
||||
tr.options.set_durability_dev_null_is_web_scale()
|
||||
tr.options.set_timeout(60*1000);
|
||||
tr.options.set_retry_limit(50);
|
||||
tr.options.set_max_retry_delay(100);
|
||||
tr.options.set_timeout(60 * 1000)
|
||||
tr.options.set_retry_limit(50)
|
||||
tr.options.set_max_retry_delay(100)
|
||||
tr.options.set_used_during_commit_protection_disable()
|
||||
tr.options.set_transaction_logging_enable('my_transaction')
|
||||
tr.options.set_read_lock_aware()
|
||||
|
@ -140,6 +143,7 @@ def test_options(tr):
|
|||
|
||||
tr.get(b'\xff').wait()
|
||||
|
||||
|
||||
def check_watches(db, watches, expected):
|
||||
for i, watch in enumerate(watches):
|
||||
if watch.is_ready() or expected:
|
||||
|
@ -154,6 +158,7 @@ def check_watches(db, watches, expected):
|
|||
|
||||
return True
|
||||
|
||||
|
||||
def test_watches(db):
|
||||
while True:
|
||||
db[b'w0'] = b'0'
|
||||
|
@ -196,10 +201,11 @@ def test_watches(db):
|
|||
if check_watches(db, watches, True):
|
||||
return
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def test_locality(tr):
|
||||
tr.options.set_timeout(60*1000)
|
||||
tr.options.set_read_system_keys() # We do this because the last shard (for now, someday the last N shards) is in the /FF/ keyspace
|
||||
tr.options.set_timeout(60 * 1000)
|
||||
tr.options.set_read_system_keys() # We do this because the last shard (for now, someday the last N shards) is in the /FF/ keyspace
|
||||
|
||||
# This isn't strictly transactional, thought we expect it to be given the size of our database
|
||||
boundary_keys = list(fdb.locality.get_boundary_keys(tr, b'', b'\xff\xff')) + [b'\xff\xff']
|
||||
|
@ -211,12 +217,14 @@ def test_locality(tr):
|
|||
if [set(s.wait()) for s in start_addresses] != [set(e.wait()) for e in end_addresses]:
|
||||
raise Exception("Locality not internally consistent.")
|
||||
|
||||
|
||||
def test_predicates():
|
||||
assert fdb.predicates.is_retryable(fdb.FDBError(1020))
|
||||
assert not fdb.predicates.is_retryable(fdb.FDBError(10))
|
||||
|
||||
|
||||
class Tester:
|
||||
tr_map = { }
|
||||
tr_map = {}
|
||||
tr_map_lock = threading.RLock()
|
||||
|
||||
def __init__(self, db, prefix):
|
||||
|
@ -234,16 +242,16 @@ class Tester:
|
|||
|
||||
def push_range(self, inst, iter, prefix_filter=None):
|
||||
kvs = []
|
||||
for k,v in iter:
|
||||
for k, v in iter:
|
||||
if prefix_filter is None or k.startswith(prefix_filter):
|
||||
kvs += [k,v]
|
||||
kvs += [k, v]
|
||||
|
||||
inst.push( fdb.tuple.pack( tuple(kvs) ) )
|
||||
inst.push(fdb.tuple.pack(tuple(kvs)))
|
||||
|
||||
@staticmethod
|
||||
@fdb.transactional
|
||||
def wait_empty(tr, prefix):
|
||||
res = tr.get_range_startswith(prefix, 1).to_list();
|
||||
res = tr.get_range_startswith(prefix, 1).to_list()
|
||||
if len(res) == 1:
|
||||
raise fdb.FDBError(1020)
|
||||
|
||||
|
@ -255,7 +263,6 @@ class Tester:
|
|||
|
||||
tr.set(pk, pv[:40000])
|
||||
|
||||
|
||||
def current_transaction(self):
|
||||
with Tester.tr_map_lock:
|
||||
return Tester.tr_map[self.tr_name]
|
||||
|
@ -267,7 +274,7 @@ class Tester:
|
|||
def switch_transaction(self, name):
|
||||
self.tr_name = name
|
||||
with Tester.tr_map_lock:
|
||||
if not self.tr_name in Tester.tr_map:
|
||||
if self.tr_name not in Tester.tr_map:
|
||||
self.new_transaction()
|
||||
|
||||
def run(self):
|
||||
|
@ -277,7 +284,7 @@ class Tester:
|
|||
|
||||
# print("Stack is %r" % self.stack)
|
||||
# if op != "PUSH" and op != "SWAP":
|
||||
# print("%d. Instruction is %s" % (idx, op))
|
||||
# print("%d. Instruction is %s" % (idx, op))
|
||||
|
||||
isDatabase = op.endswith(six.u('_DATABASE'))
|
||||
isSnapshot = op.endswith(six.u('_SNAPSHOT'))
|
||||
|
@ -387,7 +394,7 @@ class Tester:
|
|||
prefix = inst.pop()
|
||||
entries = {}
|
||||
while len(self.stack) > 0:
|
||||
stack_index = len(self.stack)-1
|
||||
stack_index = len(self.stack) - 1
|
||||
entries[stack_index] = inst.pop(with_idx=True)
|
||||
if len(entries) == 100:
|
||||
self.log_stack(self.db, prefix, entries)
|
||||
|
@ -401,10 +408,10 @@ class Tester:
|
|||
if obj == self.db:
|
||||
inst.push(b"RESULT_NOT_PRESENT")
|
||||
elif inst.op == six.u("SET_READ_VERSION"):
|
||||
inst.tr.set_read_version( self.last_version )
|
||||
inst.tr.set_read_version(self.last_version)
|
||||
elif inst.op == six.u("CLEAR"):
|
||||
if random.random() < 0.5:
|
||||
del obj[ inst.pop() ]
|
||||
del obj[inst.pop()]
|
||||
else:
|
||||
obj.clear(inst.pop())
|
||||
|
||||
|
@ -418,25 +425,25 @@ class Tester:
|
|||
elif num == 1:
|
||||
obj.clear_range(begin, end)
|
||||
else:
|
||||
obj.__delitem__( slice( begin, end ) )
|
||||
obj.__delitem__(slice(begin, end))
|
||||
|
||||
if obj == self.db:
|
||||
inst.push(b"RESULT_NOT_PRESENT")
|
||||
elif inst.op == six.u("CLEAR_RANGE_STARTS_WITH"):
|
||||
obj.clear_range_startswith( inst.pop() )
|
||||
obj.clear_range_startswith(inst.pop())
|
||||
if obj == self.db:
|
||||
inst.push(b"RESULT_NOT_PRESENT")
|
||||
elif inst.op == six.u("READ_CONFLICT_RANGE"):
|
||||
inst.tr.add_read_conflict_range( inst.pop(), inst.pop() )
|
||||
inst.tr.add_read_conflict_range(inst.pop(), inst.pop())
|
||||
inst.push(b"SET_CONFLICT_RANGE")
|
||||
elif inst.op == six.u("WRITE_CONFLICT_RANGE"):
|
||||
inst.tr.add_write_conflict_range( inst.pop(), inst.pop() )
|
||||
inst.tr.add_write_conflict_range(inst.pop(), inst.pop())
|
||||
inst.push(b"SET_CONFLICT_RANGE")
|
||||
elif inst.op == six.u("READ_CONFLICT_KEY"):
|
||||
inst.tr.add_read_conflict_key( inst.pop() )
|
||||
inst.tr.add_read_conflict_key(inst.pop())
|
||||
inst.push(b"SET_CONFLICT_KEY")
|
||||
elif inst.op == six.u("WRITE_CONFLICT_KEY"):
|
||||
inst.tr.add_write_conflict_key( inst.pop() )
|
||||
inst.tr.add_write_conflict_key(inst.pop())
|
||||
inst.push(b"SET_CONFLICT_KEY")
|
||||
elif inst.op == six.u("DISABLE_WRITE_CONFLICT"):
|
||||
inst.tr.options.set_next_write_no_write_conflict_range()
|
||||
|
@ -472,7 +479,7 @@ class Tester:
|
|||
else:
|
||||
inst.push(b"ERROR: MULTIPLE")
|
||||
elif inst.op == six.u("TUPLE_UNPACK"):
|
||||
for i in fdb.tuple.unpack( inst.pop() ):
|
||||
for i in fdb.tuple.unpack(inst.pop()):
|
||||
inst.push(fdb.tuple.pack((i,)))
|
||||
elif inst.op == six.u("TUPLE_SORT"):
|
||||
count = inst.pop()
|
||||
|
@ -487,7 +494,7 @@ class Tester:
|
|||
elif inst.op == six.u("TUPLE_RANGE"):
|
||||
count = inst.pop()
|
||||
items = inst.pop(count)
|
||||
r = fdb.tuple.range( tuple(items) )
|
||||
r = fdb.tuple.range(tuple(items))
|
||||
inst.push(r.start)
|
||||
inst.push(r.stop)
|
||||
elif inst.op == six.u("ENCODE_FLOAT"):
|
||||
|
@ -507,7 +514,7 @@ class Tester:
|
|||
d_bytes = struct.pack(">d", d)
|
||||
inst.push(d_bytes)
|
||||
elif inst.op == six.u("START_THREAD"):
|
||||
t = Tester( self.db, inst.pop() )
|
||||
t = Tester(self.db, inst.pop())
|
||||
thr = threading.Thread(target=t.run)
|
||||
thr.start()
|
||||
self.threads.append(thr)
|
||||
|
@ -539,13 +546,14 @@ class Tester:
|
|||
raise Exception("Unknown op %s" % inst.op)
|
||||
except fdb.FDBError as e:
|
||||
# print('ERROR: %s' % repr(e))
|
||||
inst.stack.push( idx, fdb.tuple.pack( (b"ERROR", str(e.code).encode('ascii')) ) )
|
||||
inst.stack.push(idx, fdb.tuple.pack((b"ERROR", str(e.code).encode('ascii'))))
|
||||
|
||||
# print(" to %s" % self.stack)
|
||||
# print()
|
||||
|
||||
[thr.join() for thr in self.threads]
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
t = Tester(db, sys.argv[1].encode('ascii'))
|
||||
t.run()
|
||||
|
|
|
@ -20,7 +20,13 @@
|
|||
#
|
||||
|
||||
|
||||
import ctypes, sys, random, struct, unicodedata, math, uuid
|
||||
import ctypes
|
||||
import sys
|
||||
import random
|
||||
import struct
|
||||
import unicodedata
|
||||
import math
|
||||
import uuid
|
||||
|
||||
_range = range
|
||||
|
||||
|
@ -29,20 +35,22 @@ from fdb import six
|
|||
|
||||
from fdb.six import u
|
||||
|
||||
|
||||
def randomUnicode():
|
||||
while True:
|
||||
c = random.randint(0, 0xffff)
|
||||
if unicodedata.category(unichr(c))[0] in 'LMNPSZ':
|
||||
return unichr(c)
|
||||
|
||||
|
||||
def randomElement():
|
||||
r = random.randint(0,9)
|
||||
r = random.randint(0, 9)
|
||||
if r == 0:
|
||||
if random.random() < 0.5:
|
||||
chars = [b'\x00', b'\x01', b'a', b'7', b'\xfe', b'\ff']
|
||||
return b''.join([random.choice(chars) for c in _range(random.randint(0, 5))])
|
||||
else:
|
||||
return b''.join([six.int2byte(random.randint(0,255)) for _ in _range(random.randint(0,10))])
|
||||
return b''.join([six.int2byte(random.randint(0, 255)) for _ in _range(random.randint(0, 10))])
|
||||
elif r == 1:
|
||||
if random.random() < 0.5:
|
||||
chars = [u('\x00'), u('\x01'), u('a'), u('7'), u('\xfe'), u('\ff'), u('\u0000'), u('\u0001'), u('\uffff'), u('\uff00'), u('\U0001f4a9')]
|
||||
|
@ -63,24 +71,27 @@ def randomElement():
|
|||
return ret
|
||||
elif r == 6:
|
||||
is_double = random.random() < 0.5
|
||||
byte_str = b''.join([six.int2byte(random.randint(0,255)) for _ in _range(8 if is_double else 4)])
|
||||
byte_str = b''.join([six.int2byte(random.randint(0, 255)) for _ in _range(8 if is_double else 4)])
|
||||
if is_double:
|
||||
return struct.unpack(">d", byte_str)[0]
|
||||
else:
|
||||
return SingleFloat(struct.unpack(">f", byte_str)[0])
|
||||
elif r == 7:
|
||||
return random.random() < 0.5
|
||||
return random.random() < 0.5
|
||||
elif r == 8:
|
||||
return uuid.uuid4()
|
||||
elif r == 9:
|
||||
return [randomElement() for _ in _range(random.randint(0,5))]
|
||||
return [randomElement() for _ in _range(random.randint(0, 5))]
|
||||
|
||||
|
||||
def randomTuple():
|
||||
return tuple( randomElement() for x in _range(random.randint(0,4)) )
|
||||
return tuple(randomElement() for x in _range(random.randint(0, 4)))
|
||||
|
||||
def isprefix(a,b):
|
||||
|
||||
def isprefix(a, b):
|
||||
return compare(a, b[:len(a)]) == 0
|
||||
|
||||
|
||||
def find_bad_sort(a, b):
|
||||
for x1 in a:
|
||||
for x2 in b:
|
||||
|
@ -88,26 +99,33 @@ def find_bad_sort(a, b):
|
|||
return (x1, x2)
|
||||
return None
|
||||
|
||||
|
||||
def equalEnough(t1, t2):
|
||||
if len(t1) != len(t2): return False
|
||||
if len(t1) != len(t2):
|
||||
return False
|
||||
|
||||
for i in _range(len(t1)):
|
||||
e1 = t1[i]
|
||||
e2 = t2[i]
|
||||
|
||||
if isinstance(e1, SingleFloat):
|
||||
if not isinstance(e2, SingleFloat): return False
|
||||
if not isinstance(e2, SingleFloat):
|
||||
return False
|
||||
return ctypes.c_float(e1.value).value == ctypes.c_float(e2.value).value
|
||||
elif isinstance(e1, list) or isinstance(e2, tuple):
|
||||
if not (isinstance(e2, list) or isinstance(e2, tuple)): return False
|
||||
if not equalEnough(e1, e2): return False
|
||||
if not (isinstance(e2, list) or isinstance(e2, tuple)):
|
||||
return False
|
||||
if not equalEnough(e1, e2):
|
||||
return False
|
||||
else:
|
||||
if e1 != e2: return False
|
||||
if e1 != e2:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def tupleTest(N=10000):
|
||||
someTuples = [ randomTuple() for i in _range(N) ]
|
||||
someTuples = [randomTuple() for i in _range(N)]
|
||||
a = sorted(someTuples, cmp=compare)
|
||||
b = sorted(someTuples, key=pack)
|
||||
|
||||
|
@ -116,7 +134,7 @@ def tupleTest(N=10000):
|
|||
if problem:
|
||||
print("Bad sort:\n %s\n %s" % (problem[0], problem[1]))
|
||||
print("Bytes:\n %s\n %s" % (repr(pack(problem[0])), repr(pack(problem[1]))))
|
||||
#print("Tuple order:\n %s\n %s" % (tupleorder(problem[0]), tupleorder(problem[1])))
|
||||
# print("Tuple order:\n %s\n %s" % (tupleorder(problem[0]), tupleorder(problem[1])))
|
||||
return False
|
||||
else:
|
||||
print("Sorts unequal but every pair correct")
|
||||
|
@ -135,14 +153,17 @@ def tupleTest(N=10000):
|
|||
|
||||
r = range(t)
|
||||
if r.start <= pack(t) < r.stop:
|
||||
print("element within own range:\n Tuple: %s\n Bytes: %s\n Start: %s\n Stop: %s" % (t, repr(pack(t)), repr(r.start), repr(r.stop)))
|
||||
print("element within own range:\n Tuple: %s\n Bytes: %s\n Start: %s\n Stop: %s" %
|
||||
(t, repr(pack(t)), repr(r.start), repr(r.stop)))
|
||||
if not r.start <= pack(t2) < r.stop:
|
||||
print("prefixed element not in range:\n Tuple: %s\n Bytes: %s\n Prefixed: %s\n Bytes: %s" % (t, repr(pack(t)), t2, repr(pack(t2))))
|
||||
print("prefixed element not in range:\n Tuple: %s\n Bytes: %s\n Prefixed: %s\n Bytes: %s" %
|
||||
(t, repr(pack(t)), t2, repr(pack(t2))))
|
||||
return False
|
||||
|
||||
if not isprefix(t, t3):
|
||||
if r.start <= pack(t3) <= r.stop:
|
||||
print("non-prefixed element in range:\n Tuple: %s\n Bytes: %s\n Other: %s\n Bytes: %s" % (t, repr(pack(t)), t3, repr(pack(t3))))
|
||||
print("non-prefixed element in range:\n Tuple: %s\n Bytes: %s\n Other: %s\n Bytes: %s"
|
||||
% (t, repr(pack(t)), t3, repr(pack(t3))))
|
||||
return False
|
||||
|
||||
if (compare(t, t3) < 0) != (pack(t) < pack(t3)):
|
||||
|
@ -159,5 +180,6 @@ def tupleTest(N=10000):
|
|||
# a = ('\x00a', -2, 'b\x01', 12345, '')
|
||||
# assert(a==fdbtuple.unpack(fdbtuple.pack(a)))
|
||||
|
||||
if __name__=='__main__':
|
||||
|
||||
if __name__ == '__main__':
|
||||
assert tupleTest(10000)
|
||||
|
|
|
@ -23,37 +23,37 @@
|
|||
import sys
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
print """Usage:
|
||||
print """Usage:
|
||||
%s [input]""" % sys.argv[0]
|
||||
sys.exit()
|
||||
sys.exit()
|
||||
|
||||
csproj = sys.argv[1]
|
||||
|
||||
from xml.dom.minidom import parse
|
||||
|
||||
try:
|
||||
dom = parse(csproj)
|
||||
dom = parse(csproj)
|
||||
except:
|
||||
print "ERROR: Unable to open CSProj file %s" % csproj
|
||||
sys.exit()
|
||||
print "ERROR: Unable to open CSProj file %s" % csproj
|
||||
sys.exit()
|
||||
|
||||
outputType = dom.getElementsByTagName("OutputType")[0].childNodes[0].data
|
||||
assemblyName = dom.getElementsByTagName("AssemblyName")[0].childNodes[0].data
|
||||
|
||||
if outputType == "Exe":
|
||||
print "define(`GENTARGET', `bin/%s.exe')dnl" % assemblyName
|
||||
print "define(`GENOUTPUTTYPE', `exe')dnl"
|
||||
print "define(`GENTARGET', `bin/%s.exe')dnl" % assemblyName
|
||||
print "define(`GENOUTPUTTYPE', `exe')dnl"
|
||||
elif outputType == "Library":
|
||||
print "define(`GENTARGET', `bin/%s.dll')dnl" % assemblyName
|
||||
print "define(`GENOUTPUTTYPE', `library')dnl"
|
||||
print "define(`GENTARGET', `bin/%s.dll')dnl" % assemblyName
|
||||
print "define(`GENOUTPUTTYPE', `library')dnl"
|
||||
else:
|
||||
print "ERROR: Unable to determine output type"
|
||||
sys.exit()
|
||||
print "ERROR: Unable to determine output type"
|
||||
sys.exit()
|
||||
|
||||
sources = [ node.getAttribute("Include").replace('\\', '/') for node in
|
||||
dom.getElementsByTagName("Compile") ]
|
||||
assemblies = [ node.getAttribute("Include") for node in
|
||||
dom.getElementsByTagName("Reference") ]
|
||||
sources = [node.getAttribute("Include").replace('\\', '/') for node in
|
||||
dom.getElementsByTagName("Compile")]
|
||||
assemblies = [node.getAttribute("Include") for node in
|
||||
dom.getElementsByTagName("Reference")]
|
||||
|
||||
print "define(`GENSOURCES', `%s')dnl" % ' '.join(sources)
|
||||
print "define(`GENREFERENCES', `%s')dnl" % ','.join(assemblies)
|
||||
|
|
|
@ -24,6 +24,7 @@ import tarfile
|
|||
import argparse
|
||||
import glob
|
||||
|
||||
|
||||
def createTarball(outFile, rootDir, inFiles):
|
||||
tar = tarfile.open(outFile, 'w:gz')
|
||||
for fStr in inFiles:
|
||||
|
@ -35,6 +36,7 @@ def createTarball(outFile, rootDir, inFiles):
|
|||
|
||||
tar.close()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser(description='Python tar utility')
|
||||
parser.add_argument('-r', dest='rootDir', type=str, help='The root directory for files in the tarball')
|
||||
|
|
|
@ -64,10 +64,10 @@ else:
|
|||
print "ERROR: Unable to determine configuration type"
|
||||
sys.exit()
|
||||
|
||||
sources = [ node.getAttribute("Include").replace('\\', '/') for node in
|
||||
dom.getElementsByTagName("ActorCompiler") +
|
||||
dom.getElementsByTagName("ClCompile") +
|
||||
dom.getElementsByTagName("ClInclude")
|
||||
if not node.getElementsByTagName("ExcludedFromBuild") and node.hasAttribute("Include") ]
|
||||
sources = [node.getAttribute("Include").replace('\\', '/') for node in
|
||||
dom.getElementsByTagName("ActorCompiler") +
|
||||
dom.getElementsByTagName("ClCompile") +
|
||||
dom.getElementsByTagName("ClInclude")
|
||||
if not node.getElementsByTagName("ExcludedFromBuild") and node.hasAttribute("Include")]
|
||||
|
||||
print "define(`GENSOURCES', `%s')dnl" % ' '.join(sorted(sources))
|
||||
|
|
|
@ -21,57 +21,76 @@
|
|||
import random
|
||||
import copy
|
||||
|
||||
|
||||
class Context:
|
||||
tok = 0
|
||||
inLoop = False
|
||||
indent = 0
|
||||
|
||||
def __init__(self):
|
||||
self.random = random.Random()
|
||||
|
||||
def uniqueID(self):
|
||||
return self.random.randint(100000, 999999)
|
||||
|
||||
|
||||
class InfiniteLoop (Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ExecContext:
|
||||
iterationsLeft = 1000
|
||||
ifstate = 0
|
||||
|
||||
def __init__(self, inputSeq):
|
||||
self.input = iter(inputSeq)
|
||||
self.output = []
|
||||
def inp(self): return next(self.input)
|
||||
def out(self, x): self.output.append(x)
|
||||
|
||||
def inp(self):
|
||||
return next(self.input)
|
||||
|
||||
def out(self, x):
|
||||
self.output.append(x)
|
||||
|
||||
def infinityCheck(self):
|
||||
self.iterationsLeft -= 1
|
||||
if self.iterationsLeft <= 0:
|
||||
raise InfiniteLoop()
|
||||
|
||||
|
||||
OK = 1
|
||||
BREAK = 2
|
||||
THROW = 3
|
||||
RETURN = 4
|
||||
CONTINUE = 5
|
||||
|
||||
|
||||
def indent(cx):
|
||||
return "\t" * cx.indent
|
||||
|
||||
|
||||
class F (object):
|
||||
def unreachable(self):
|
||||
return False
|
||||
|
||||
def containsbreak(self):
|
||||
return False
|
||||
|
||||
|
||||
class hashF (F):
|
||||
def __init__(self, cx):
|
||||
self.cx = cx
|
||||
self.uniqueID = cx.uniqueID()
|
||||
|
||||
def __str__(self):
|
||||
return indent(self.cx) + "outputStream.send( %d );\n" % self.uniqueID
|
||||
|
||||
def eval(self, ecx):
|
||||
ecx.infinityCheck()
|
||||
ecx.out( self.uniqueID )
|
||||
ecx.out(self.uniqueID)
|
||||
return OK
|
||||
|
||||
|
||||
class compoundF(F):
|
||||
def __init__(self, cx, children):
|
||||
self.cx = cx
|
||||
|
@ -81,17 +100,21 @@ class compoundF(F):
|
|||
if c.unreachable():
|
||||
self.unreachable = lambda: 1
|
||||
break
|
||||
|
||||
def __str__(self):
|
||||
return "".join( str(c) for c in self.children )
|
||||
return "".join(str(c) for c in self.children)
|
||||
|
||||
def eval(self, ecx):
|
||||
for c in self.children:
|
||||
ecx.infinityCheck()
|
||||
result = c.eval( ecx )
|
||||
result = c.eval(ecx)
|
||||
if result != OK:
|
||||
break
|
||||
return result
|
||||
|
||||
def containsbreak(self):
|
||||
return any( c.containsbreak() for c in self.children )
|
||||
return any(c.containsbreak() for c in self.children)
|
||||
|
||||
|
||||
class loopF (F):
|
||||
def __init__(self, cx):
|
||||
|
@ -99,255 +122,297 @@ class loopF (F):
|
|||
ccx = copy.copy(cx)
|
||||
ccx.indent += 1
|
||||
ccx.inLoop = True
|
||||
self.body = compoundF( ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)] )
|
||||
self.body = compoundF(ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)])
|
||||
self.uniqueID = cx.uniqueID()
|
||||
self.forever = cx.random.random() < 0.1
|
||||
|
||||
def __str__(self):
|
||||
if self.forever:
|
||||
return (indent(self.cx) + "loop {\n" +
|
||||
str(self.body) +
|
||||
str(self.body) +
|
||||
indent(self.cx) + "}\n")
|
||||
else:
|
||||
return (indent(self.cx) + "state int i%d; for(i%d = 0; i%d < 5; i%d++) {\n" % ((self.uniqueID,)*4) +
|
||||
str(self.body) +
|
||||
return (indent(self.cx) + "state int i%d; for(i%d = 0; i%d < 5; i%d++) {\n" % ((self.uniqueID,) * 4) +
|
||||
str(self.body) +
|
||||
indent(self.cx) + "}\n")
|
||||
|
||||
def eval(self, ecx):
|
||||
if self.forever:
|
||||
while True:
|
||||
ecx.infinityCheck()
|
||||
result = self.body.eval( ecx )
|
||||
result = self.body.eval(ecx)
|
||||
if result == BREAK:
|
||||
break
|
||||
elif result not in (OK,CONTINUE):
|
||||
elif result not in (OK, CONTINUE):
|
||||
return result
|
||||
else:
|
||||
for i in range(5):
|
||||
ecx.infinityCheck()
|
||||
result = self.body.eval( ecx )
|
||||
result = self.body.eval(ecx)
|
||||
if result == BREAK:
|
||||
break
|
||||
elif result not in (OK,CONTINUE):
|
||||
elif result not in (OK, CONTINUE):
|
||||
return result
|
||||
return OK
|
||||
|
||||
def unreachable(self):
|
||||
return self.forever and not self.body.containsbreak()
|
||||
|
||||
|
||||
class rangeForF (F):
|
||||
def __init__(self, cx):
|
||||
self.cx = cx
|
||||
ccx = copy.copy(cx)
|
||||
ccx.indent += 1
|
||||
ccx.inLoop = True
|
||||
self.body = compoundF( ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)] )
|
||||
self.body = compoundF(ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)])
|
||||
self.uniqueID = cx.uniqueID()
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
indent(self.cx) +
|
||||
("\n"+indent(self.cx)).join([
|
||||
("\n" + indent(self.cx)).join([
|
||||
"state std::vector<int> V;",
|
||||
"V.push_back(1);",
|
||||
"V.push_back(2);",
|
||||
"V.push_back(3);",
|
||||
"for( auto i : V ) {\n",
|
||||
]).replace("V", "list%d" % self.uniqueID) +
|
||||
]).replace("V", "list%d" % self.uniqueID) +
|
||||
str(self.body) +
|
||||
indent(self.cx) + "}\n")
|
||||
|
||||
def eval(self, ecx):
|
||||
for i in range(1,4):
|
||||
for i in range(1, 4):
|
||||
ecx.infinityCheck()
|
||||
result = self.body.eval( ecx )
|
||||
result = self.body.eval(ecx)
|
||||
if result == BREAK:
|
||||
break
|
||||
elif result not in (OK,CONTINUE):
|
||||
elif result not in (OK, CONTINUE):
|
||||
return result
|
||||
return OK
|
||||
|
||||
def unreachable(self):
|
||||
return False
|
||||
|
||||
|
||||
class ifF (F):
|
||||
def __init__(self, cx):
|
||||
self.cx = cx
|
||||
ccx = copy.copy(cx)
|
||||
ccx.indent += 1
|
||||
self.toggle = cx.random.randint(0,1)
|
||||
self.ifbody = compoundF( ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)] )
|
||||
self.toggle = cx.random.randint(0, 1)
|
||||
self.ifbody = compoundF(ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)])
|
||||
if cx.random.random() < 0.5:
|
||||
ccx = copy.copy(cx)
|
||||
ccx.indent += 1
|
||||
self.elsebody = compoundF( ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)] )
|
||||
self.elsebody = compoundF(ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)])
|
||||
else:
|
||||
self.elsebody = None
|
||||
|
||||
def __str__(self):
|
||||
s = (indent(self.cx) + "if ( (++ifstate&1) == %d ) {\n" % self.toggle +
|
||||
str(self.ifbody))
|
||||
str(self.ifbody))
|
||||
if self.elsebody:
|
||||
s += (indent(self.cx) + "} else {\n" +
|
||||
str(self.elsebody))
|
||||
str(self.elsebody))
|
||||
s += indent(self.cx) + "}\n"
|
||||
return s
|
||||
|
||||
def eval(self, ecx):
|
||||
ecx.infinityCheck()
|
||||
ecx.ifstate = ecx.ifstate + 1
|
||||
if (ecx.ifstate&1) == self.toggle:
|
||||
if (ecx.ifstate & 1) == self.toggle:
|
||||
return self.ifbody.eval(ecx)
|
||||
elif self.elsebody:
|
||||
return self.elsebody.eval(ecx)
|
||||
else:
|
||||
return OK
|
||||
|
||||
def unreachable(self):
|
||||
return self.elsebody and self.ifbody.unreachable() and self.elsebody.unreachable()
|
||||
|
||||
def containsbreak(self):
|
||||
return self.ifbody.containsbreak() or (self.elsebody and self.elsebody.containsbreak())
|
||||
|
||||
|
||||
class tryF (F):
|
||||
def __init__(self, cx):
|
||||
self.cx = cx
|
||||
ccx = copy.copy(cx)
|
||||
ccx.indent += 1
|
||||
self.body = compoundF( ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)] )
|
||||
self.body = compoundF(ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)])
|
||||
ccx = copy.copy(cx)
|
||||
ccx.indent += 1
|
||||
self.catch = compoundF( ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)] )
|
||||
self.catch = compoundF(ccx, [hashF(ccx)] + [fuzzCode(ccx)(ccx)] + [hashF(ccx)])
|
||||
|
||||
def __str__(self):
|
||||
return (indent(self.cx) + "try {\n" +
|
||||
str( self.body ) +
|
||||
str(self.body) +
|
||||
indent(self.cx) + "} catch (...) {\n" +
|
||||
str( self.catch ) +
|
||||
str(self.catch) +
|
||||
indent(self.cx) + "}\n"
|
||||
)
|
||||
|
||||
def eval(self, ecx):
|
||||
ecx.infinityCheck()
|
||||
result = self.body.eval(ecx)
|
||||
if result != THROW: return result
|
||||
if result != THROW:
|
||||
return result
|
||||
return self.catch.eval(ecx)
|
||||
|
||||
def unreachable(self):
|
||||
return self.body.unreachable() and self.catch.unreachable()
|
||||
|
||||
def containsbreak(self):
|
||||
return self.body.containsbreak() or self.catch.containsbreak()
|
||||
|
||||
def doubleF( cx ):
|
||||
return compoundF( cx, [fuzzCode(cx)(cx)] + [hashF(cx)] + [fuzzCode(cx)(cx)] )
|
||||
|
||||
def doubleF(cx):
|
||||
return compoundF(cx, [fuzzCode(cx)(cx)] + [hashF(cx)] + [fuzzCode(cx)(cx)])
|
||||
|
||||
|
||||
class breakF(F):
|
||||
def __init__(self, cx):
|
||||
self.cx = cx
|
||||
|
||||
def __str__(self):
|
||||
return indent(self.cx) + "break;\n"
|
||||
|
||||
def unreachable(self):
|
||||
return True
|
||||
|
||||
def eval(self, ecx):
|
||||
ecx.infinityCheck()
|
||||
return BREAK
|
||||
|
||||
def containsbreak(self):
|
||||
return True
|
||||
|
||||
|
||||
class continueF(F):
|
||||
def __init__(self, cx):
|
||||
self.cx = cx
|
||||
|
||||
def __str__(self):
|
||||
return indent(self.cx) + "continue;\n"
|
||||
|
||||
def unreachable(self):
|
||||
return True
|
||||
|
||||
def eval(self, ecx):
|
||||
ecx.infinityCheck()
|
||||
return CONTINUE
|
||||
|
||||
class waitF( F ):
|
||||
|
||||
class waitF(F):
|
||||
def __init__(self, cx):
|
||||
self.cx = cx
|
||||
self.uniqueID = cx.uniqueID()
|
||||
|
||||
def __str__(self):
|
||||
return (
|
||||
indent(self.cx) + "int input = waitNext( inputStream );\n" +
|
||||
indent(self.cx) + "outputStream.send( input + %d );\n" % self.uniqueID
|
||||
)
|
||||
)
|
||||
|
||||
def eval(self, ecx):
|
||||
ecx.infinityCheck()
|
||||
input = ecx.inp()
|
||||
ecx.out( (input + self.uniqueID)&0xffffffff )
|
||||
ecx.out((input + self.uniqueID) & 0xffffffff)
|
||||
return OK
|
||||
|
||||
class throwF( F ):
|
||||
|
||||
class throwF(F):
|
||||
def __init__(self, cx):
|
||||
self.cx = cx
|
||||
|
||||
def __str__(self):
|
||||
return indent(self.cx) + "throw operation_failed();\n"
|
||||
|
||||
def unreachable(self):
|
||||
return True
|
||||
|
||||
def eval(self, ecx):
|
||||
ecx.infinityCheck()
|
||||
return THROW
|
||||
|
||||
class throwF2( throwF ):
|
||||
|
||||
class throwF2(throwF):
|
||||
def __str__(self):
|
||||
return indent(self.cx) + "throw_operation_failed();\n"
|
||||
|
||||
def unreachable(self):
|
||||
return False # The actor compiler doesn't know the function never returns
|
||||
|
||||
class throwF3( throwF ):
|
||||
|
||||
class throwF3(throwF):
|
||||
def __str__(self):
|
||||
return indent(self.cx) + "Void _ = wait( error ); // throw operation_failed()\n"
|
||||
|
||||
def unreachable(self):
|
||||
return False # The actor compiler doesn't know that 'error' always contains an error
|
||||
|
||||
class returnF( F ):
|
||||
|
||||
class returnF(F):
|
||||
def __init__(self, cx):
|
||||
self.cx = cx
|
||||
self.uniqueID = cx.uniqueID()
|
||||
|
||||
def __str__(self):
|
||||
return indent(self.cx) + "return %d;\n" % self.uniqueID
|
||||
|
||||
def unreachable(self):
|
||||
return True
|
||||
|
||||
def eval(self, ecx):
|
||||
ecx.infinityCheck()
|
||||
ecx.returnValue = self.uniqueID
|
||||
return RETURN
|
||||
|
||||
|
||||
def fuzzCode(cx):
|
||||
choices = [loopF, rangeForF, tryF, doubleF, ifF]
|
||||
if (cx.indent < 2):
|
||||
choices = choices*2
|
||||
choices = choices * 2
|
||||
choices += [waitF, returnF]
|
||||
if (cx.inLoop):
|
||||
choices += [breakF, continueF]
|
||||
choices = choices*3 + [throwF,throwF2, throwF3]
|
||||
choices = choices * 3 + [throwF, throwF2, throwF3]
|
||||
return cx.random.choice(choices)
|
||||
|
||||
|
||||
def randomActor(index):
|
||||
while 1:
|
||||
cx = Context()
|
||||
cx.indent += 1
|
||||
actor = fuzzCode(cx)(cx)
|
||||
actor = compoundF( cx, [actor, returnF(cx)] ) # Add a return at the end if the end is reachable
|
||||
actor = compoundF(cx, [actor, returnF(cx)]) # Add a return at the end if the end is reachable
|
||||
name = "actorFuzz%d" % index
|
||||
text = ( "ACTOR Future<int> %s( FutureStream<int> inputStream, PromiseStream<int> outputStream, Future<Void> error ) {\n" % name
|
||||
+ "\tstate int ifstate = 0;\n"
|
||||
+ str(actor)
|
||||
+ "}" )
|
||||
ecx = actor.ecx = ExecContext( (i+1)*1000 for i in range(1000000) )
|
||||
text = ("ACTOR Future<int> %s( FutureStream<int> inputStream, PromiseStream<int> outputStream, Future<Void> error ) {\n" % name
|
||||
+ "\tstate int ifstate = 0;\n"
|
||||
+ str(actor)
|
||||
+ "}")
|
||||
ecx = actor.ecx = ExecContext((i + 1) * 1000 for i in range(1000000))
|
||||
try:
|
||||
result = actor.eval(ecx)
|
||||
except InfiniteLoop:
|
||||
print( "Infinite loop for actor %s" % name )
|
||||
print("Infinite loop for actor %s" % name)
|
||||
continue
|
||||
if result == RETURN:
|
||||
ecx.out( ecx.returnValue )
|
||||
ecx.out(ecx.returnValue)
|
||||
elif result == THROW:
|
||||
ecx.out( 1000 )
|
||||
ecx.out(1000)
|
||||
else:
|
||||
print(text)
|
||||
raise Exception( "Invalid eval result: " + str(result) )
|
||||
raise Exception("Invalid eval result: " + str(result))
|
||||
actor.name = name
|
||||
actor.text = text
|
||||
|
||||
return actor
|
||||
|
||||
|
||||
testCaseCount = 30
|
||||
outputFile = open( "ActorFuzz.actor.cpp", "wt" )
|
||||
outputFile = open("ActorFuzz.actor.cpp", "wt")
|
||||
print('// THIS FILE WAS GENERATED BY actorFuzz.py; DO NOT MODIFY IT DIRECTLY\n', file=outputFile)
|
||||
print('#include "ActorFuzz.h"\n', file=outputFile)
|
||||
print('#ifndef WIN32\n', file=outputFile)
|
||||
|
|
|
@ -60,20 +60,26 @@ db = fdb.open(event_model="gevent")
|
|||
## This defines a Subspace of keys ##
|
||||
#####################################
|
||||
|
||||
|
||||
class Subspace (object):
|
||||
def __init__(self, prefixTuple, rawPrefix=""):
|
||||
self.rawPrefix = rawPrefix + fdb.tuple.pack(prefixTuple)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return Subspace( (name,), self.rawPrefix )
|
||||
return Subspace((name,), self.rawPrefix)
|
||||
|
||||
def key(self):
|
||||
return self.rawPrefix
|
||||
|
||||
def pack(self, tuple):
|
||||
return self.rawPrefix + fdb.tuple.pack( tuple )
|
||||
return self.rawPrefix + fdb.tuple.pack(tuple)
|
||||
|
||||
def unpack(self, key):
|
||||
assert key.startswith(self.rawPrefix)
|
||||
return fdb.tuple.unpack(key[len(self.rawPrefix):])
|
||||
|
||||
def range(self, tuple=()):
|
||||
p = fdb.tuple.range( tuple )
|
||||
p = fdb.tuple.range(tuple)
|
||||
return slice(self.rawPrefix + p.start, self.rawPrefix + p.stop)
|
||||
|
||||
|
||||
|
@ -91,6 +97,7 @@ class BulkLoader(Queue):
|
|||
Supports the use of multiple concurrent transactions for efficiency, with a
|
||||
default of 50 concurrent transactions.
|
||||
'''
|
||||
|
||||
def __init__(self, number_producers=1, number_consumers=50, **kwargs):
|
||||
# Setting maxsize to the number of consumers will make producers
|
||||
# wait to put a task in the queue until some consumer is free
|
||||
|
@ -101,7 +108,8 @@ class BulkLoader(Queue):
|
|||
|
||||
def _producer(self):
|
||||
# put will block if maxsize of queue is reached
|
||||
for data in self.reader(): self.put(data)
|
||||
for data in self.reader():
|
||||
self.put(data)
|
||||
|
||||
def _consumer(self):
|
||||
try:
|
||||
|
@ -109,7 +117,8 @@ class BulkLoader(Queue):
|
|||
data = self.get(block=False)
|
||||
self.writer(db, data)
|
||||
gevent.sleep(0) # yield
|
||||
except Empty: pass
|
||||
except Empty:
|
||||
pass
|
||||
|
||||
def produce_and_consume(self):
|
||||
producers = [gevent.spawn(self._producer) for _ in xrange(self._number_producers)]
|
||||
|
@ -158,6 +167,7 @@ class ReadCSV(BulkLoader):
|
|||
names and skip it. Otherwise, treat the first line as data to be read.
|
||||
Default is False.
|
||||
'''
|
||||
|
||||
def __init__(self, number_producers=1, number_consumers=50, **kwargs):
|
||||
super(ReadCSV, self).__init__(number_producers, number_consumers, **kwargs)
|
||||
self._filename = kwargs.get('filename', '*')
|
||||
|
@ -168,7 +178,8 @@ class ReadCSV(BulkLoader):
|
|||
|
||||
def reader(self):
|
||||
for fully_pathed in glob.iglob(os.path.join(self._dir, self._filename)):
|
||||
if not os.path.isfile(fully_pathed): continue
|
||||
if not os.path.isfile(fully_pathed):
|
||||
continue
|
||||
with open(fully_pathed, 'rb') as csv_file:
|
||||
csv_reader = csv.reader(csv_file, delimiter=self._delimiter)
|
||||
first_line = True
|
||||
|
@ -198,6 +209,7 @@ class ReadJSON(BulkLoader):
|
|||
convert_numbers=<bool>. If True, returns byte strings rather than numbers or
|
||||
unicode in the deserialized object. Default is False.
|
||||
'''
|
||||
|
||||
def __init__(self, number_producers=1, number_consumers=50, **kwargs):
|
||||
super(ReadJSON, self).__init__(number_producers, number_consumers, **kwargs)
|
||||
self._filename = kwargs.get('filename', '*')
|
||||
|
@ -220,7 +232,8 @@ class ReadJSON(BulkLoader):
|
|||
|
||||
def reader(self):
|
||||
for fully_pathed in glob.iglob(os.path.join(self._dir, self._filename)):
|
||||
if not os.path.isfile(fully_pathed): continue
|
||||
if not os.path.isfile(fully_pathed):
|
||||
continue
|
||||
with open(fully_pathed, 'r') as json_file:
|
||||
if self._convert_numbers:
|
||||
json_object = json.load(json_file,
|
||||
|
@ -247,6 +260,7 @@ class ReadBlob(BulkLoader):
|
|||
|
||||
chunk_size=<int>. Number of bytes to read from file. Default is 10240.
|
||||
'''
|
||||
|
||||
def __init__(self, number_producers=1, number_consumers=50, **kwargs):
|
||||
super(ReadBlob, self).__init__(number_producers, number_consumers, **kwargs)
|
||||
self._filename = kwargs.get('filename', '*')
|
||||
|
@ -255,16 +269,19 @@ class ReadBlob(BulkLoader):
|
|||
|
||||
def reader(self):
|
||||
files_found = list(glob.iglob(os.path.join(self._dir, self._filename)))
|
||||
if len(files_found) != 1: raise Exception("Must specify single file")
|
||||
if len(files_found) != 1:
|
||||
raise Exception("Must specify single file")
|
||||
fully_pathed = files_found[0]
|
||||
if not os.path.isfile(fully_pathed): raise Exception("No file found")
|
||||
if not os.path.isfile(fully_pathed):
|
||||
raise Exception("No file found")
|
||||
with open(fully_pathed, 'rb') as blob_file:
|
||||
file_size = os.stat(fully_pathed).st_size;
|
||||
file_size = os.stat(fully_pathed).st_size
|
||||
position = 0
|
||||
while (position < file_size):
|
||||
try:
|
||||
chunk = blob_file.read(self._chunk_size)
|
||||
if not chunk: break;
|
||||
if not chunk:
|
||||
break
|
||||
offset = position
|
||||
position += self._chunk_size
|
||||
yield offset, chunk
|
||||
|
@ -291,12 +308,14 @@ class WriteKVP(BulkLoader):
|
|||
clear=<bool>. If True, clears the specified subspace before writing to it.
|
||||
Default is False.
|
||||
'''
|
||||
|
||||
def __init__(self, number_producers=1, number_consumers=50, **kwargs):
|
||||
super(WriteKVP, self).__init__(number_producers, number_consumers, **kwargs)
|
||||
self._empty_value = kwargs.get('empty_value', False)
|
||||
self._subspace = kwargs.get('subspace', Subspace(('bulk_kvp',)))
|
||||
self._clear = kwargs.get('clear', False)
|
||||
if self._clear: clear_subspace(db, self._subspace)
|
||||
if self._clear:
|
||||
clear_subspace(db, self._subspace)
|
||||
|
||||
@fdb.transactional
|
||||
def writer(self, tr, data):
|
||||
|
@ -319,11 +338,13 @@ class WriteDoc(BulkLoader):
|
|||
Can be used to load a specified collection or arbitrary subdocument.
|
||||
Defaults to root.
|
||||
'''
|
||||
|
||||
def __init__(self, number_producers=1, number_consumers=50, **kwargs):
|
||||
super(WriteDoc, self).__init__(number_producers, number_consumers, **kwargs)
|
||||
self._document = kwargs.get('document', simpledoc.root)
|
||||
self._clear = kwargs.get('clear', False)
|
||||
if self._clear: _simpledoc_clear(db, self._document)
|
||||
if self._clear:
|
||||
_simpledoc_clear(db, self._document)
|
||||
|
||||
def writer(self, tr, data):
|
||||
_writer_doc(db, self._document, data)
|
||||
|
@ -363,11 +384,13 @@ class WriteBlob(BulkLoader):
|
|||
blob=<Doc()>. Specifies the Blob object to which data is written. Default is
|
||||
Blob(Subspace('bulk_blob',)).
|
||||
'''
|
||||
|
||||
def __init__(self, number_producers=1, number_consumers=50, **kwargs):
|
||||
super(WriteBlob, self).__init__(number_producers, number_consumers, **kwargs)
|
||||
self._blob = kwargs.get('blob', blob.Blob(Subspace(('bulk_blob',))))
|
||||
self._clear = kwargs.get('clear', False)
|
||||
if self._clear: self._blob.delete(db)
|
||||
if self._clear:
|
||||
self._blob.delete(db)
|
||||
|
||||
@fdb.transactional
|
||||
def writer(self, tr, data):
|
||||
|
@ -414,4 +437,4 @@ def test_blob_blob():
|
|||
tasks = BlobToBlob(1, 5, dir='BlobDir', filename='hamlet.txt', blob=my_blob)
|
||||
tasks.produce_and_consume()
|
||||
|
||||
'''
|
||||
'''
|
||||
|
|
|
@ -27,6 +27,7 @@ _packedPrefix = 'packed'
|
|||
|
||||
fdb.api_version(16)
|
||||
|
||||
|
||||
class _MergedData:
|
||||
def __init__(self):
|
||||
self.results = []
|
||||
|
@ -35,13 +36,14 @@ class _MergedData:
|
|||
self.packedIndex = 0
|
||||
pass
|
||||
|
||||
|
||||
class Column:
|
||||
def __init__(self, columnName):
|
||||
self.columnName = columnName
|
||||
self.packFetchCount = 10
|
||||
self.targetChunkSize = 5000
|
||||
self.maxChunkSize = 10000
|
||||
#self.mergeChunkSize = 2500
|
||||
# self.mergeChunkSize = 2500
|
||||
|
||||
def _getSubKeyTuple(self, key):
|
||||
return fdb.tuple.unpack(key)[2:]
|
||||
|
@ -52,15 +54,15 @@ class Column:
|
|||
def _isPackedKey(self, key):
|
||||
return str(key).startswith(fdb.tuple.pack((self.columnName, _packedPrefix)))
|
||||
|
||||
#This results in slight inefficiencies when the key being searched for comes before the first packed segment with strictRange=False
|
||||
def _getPackedData(self, key, packedRange, requireKey = True, strictRange = True):
|
||||
# This results in slight inefficiencies when the key being searched for comes before the first packed segment with strictRange=False
|
||||
def _getPackedData(self, key, packedRange, requireKey=True, strictRange=True):
|
||||
found = False
|
||||
keyRange = None
|
||||
packedKeyRange = None
|
||||
packedData = None
|
||||
|
||||
for k,v in packedRange:
|
||||
#print 'Searching ' + k + ' for ' + key
|
||||
for k, v in packedRange:
|
||||
# print 'Searching ' + k + ' for ' + key
|
||||
if self._isPackedKey(k):
|
||||
if found:
|
||||
endRange = self._getSubKeyTuple(k)
|
||||
|
@ -69,7 +71,7 @@ class Column:
|
|||
|
||||
keyRange = self._getSubKeyTuple(k)
|
||||
packedKeyRange = keyRange
|
||||
#print str(keyRange)
|
||||
# print str(keyRange)
|
||||
if (not requireKey or key >= keyRange[0]) and key <= keyRange[1]:
|
||||
if strictRange:
|
||||
packedData = _PackedData(v)
|
||||
|
@ -88,43 +90,47 @@ class Column:
|
|||
return [packedKeyRange, keyRange, packedData]
|
||||
|
||||
def _getPackedRange(self, tr, key):
|
||||
return tr.get_range(fdb.KeySelector.last_less_than(fdb.tuple.pack((self.columnName, _packedPrefix, key + chr(0)))), fdb.tuple.pack((self.columnName, _packedPrefix + chr(0))), 2)
|
||||
return tr.get_range(fdb.KeySelector.last_less_than(fdb.tuple.pack((self.columnName, _packedPrefix, key + chr(0)))),
|
||||
fdb.tuple.pack((self.columnName, _packedPrefix + chr(0))), 2)
|
||||
|
||||
def _getUnpackedData(self, tr, key):
|
||||
return tr[fdb.tuple.pack((self.columnName, _unpackedPrefix, key))]
|
||||
|
||||
def _getUnpackedRange(self, tr, keyBegin, keyEnd, limit):
|
||||
return tr.get_range(fdb.tuple.pack((self.columnName, _unpackedPrefix, keyBegin)), fdb.tuple.pack((self.columnName, _unpackedPrefix, keyEnd)), limit)
|
||||
return tr.get_range(fdb.tuple.pack((self.columnName, _unpackedPrefix, keyBegin)),
|
||||
fdb.tuple.pack((self.columnName, _unpackedPrefix, keyEnd)), limit)
|
||||
|
||||
def _mergeResults(self, packed, unpacked, totalUnpacked, packedIndex = 0, minPackedKey = '', maxKey = None):
|
||||
def _mergeResults(self, packed, unpacked, totalUnpacked, packedIndex=0, minPackedKey='', maxKey=None):
|
||||
data = _MergedData()
|
||||
if packed is None:
|
||||
#print 'No merge necessary'
|
||||
# print 'No merge necessary'
|
||||
data.finishedUnpack = True
|
||||
data.finishedPack = True
|
||||
data.packedIndex = 0
|
||||
|
||||
if maxKey is None:
|
||||
data.results = [fdb.KeyValue(self._getSubKey(k), v) for k,v in unpacked]
|
||||
data.results = [fdb.KeyValue(self._getSubKey(k), v) for k, v in unpacked]
|
||||
else:
|
||||
for k,v in unpacked:
|
||||
for k, v in unpacked:
|
||||
if k < maxKey:
|
||||
data.results.append(fdb.KeyValue(self._getSubKey(k), v))
|
||||
else:
|
||||
data.finishedUnpack = False
|
||||
break
|
||||
else:
|
||||
#print 'Merging packed'
|
||||
# print 'Merging packed'
|
||||
unpackedCount = 0
|
||||
for k,v in unpacked:
|
||||
for k, v in unpacked:
|
||||
subKey = self._getSubKey(k)
|
||||
#print 'Unpacked: ' + subKey
|
||||
# print 'Unpacked: ' + subKey
|
||||
if maxKey is not None and subKey >= maxKey:
|
||||
#print 'subKey >= maxKey %s, %s' % (subKey, maxKey)
|
||||
# print 'subKey >= maxKey %s, %s' % (subKey, maxKey)
|
||||
break
|
||||
|
||||
exactMatch = False
|
||||
while packedIndex < len(packed.rows) and packed.rows[packedIndex].key <= subKey and (maxKey is None or packed.rows[packedIndex].key < maxKey):
|
||||
while packedIndex < len(packed.rows) \
|
||||
and packed.rows[packedIndex].key <= subKey \
|
||||
and (maxKey is None or packed.rows[packedIndex].key < maxKey):
|
||||
exactMatch = packed.rows[packedIndex].key == subKey
|
||||
if packed.rows[packedIndex].key < subKey and packed.rows[packedIndex].key >= minPackedKey:
|
||||
data.results.append(packed.rows[packedIndex])
|
||||
|
@ -135,13 +141,13 @@ class Column:
|
|||
if exactMatch:
|
||||
data.results.append(fdb.KeyValue(self._getSubKey(k), v))
|
||||
|
||||
#print 'packedIndex == len(packed.rows)'
|
||||
# print 'packedIndex == len(packed.rows)'
|
||||
break
|
||||
|
||||
data.results.append(fdb.KeyValue(self._getSubKey(k), v))
|
||||
unpackedCount += 1
|
||||
|
||||
#print "Packed index: %d, Unpacked: %d, total: %d" % (packedIndex, unpackedCount, totalUnpacked)
|
||||
# print "Packed index: %d, Unpacked: %d, total: %d" % (packedIndex, unpackedCount, totalUnpacked)
|
||||
if unpackedCount < totalUnpacked:
|
||||
while packedIndex < len(packed.rows) and (maxKey is None or packed.rows[packedIndex].key < maxKey):
|
||||
if packed.rows[packedIndex].key >= minPackedKey:
|
||||
|
@ -153,8 +159,8 @@ class Column:
|
|||
data.finishedUnpack = unpackedCount == totalUnpacked
|
||||
data.packedIndex = packedIndex
|
||||
|
||||
#print str(data.results)
|
||||
#print 'Num Results: %d' % len(data.results)
|
||||
# print str(data.results)
|
||||
# print 'Num Results: %d' % len(data.results)
|
||||
|
||||
return data
|
||||
|
||||
|
@ -182,16 +188,16 @@ class Column:
|
|||
def delete(self, tr):
|
||||
tr.clear_range_startswith(self.columnName)
|
||||
|
||||
def getColumnStream(self, db, startRow = ''):
|
||||
def getColumnStream(self, db, startRow=''):
|
||||
return _ColumnStream(db, self, startRow)
|
||||
|
||||
#This function is not fully transactional. Each compressed block will be created in a transaction
|
||||
def pack(self, db, startRow = '', endRow = '\xff'):
|
||||
# This function is not fully transactional. Each compressed block will be created in a transaction
|
||||
def pack(self, db, startRow='', endRow='\xff'):
|
||||
currentRow = startRow
|
||||
numFetched = self.packFetchCount
|
||||
|
||||
while numFetched == self.packFetchCount:
|
||||
#print 'outer: \'' + repr(currentRow) + '\''
|
||||
# print 'outer: \'' + repr(currentRow) + '\''
|
||||
try:
|
||||
tr = db.create_transaction()
|
||||
packedIndex = 0
|
||||
|
@ -200,8 +206,8 @@ class Column:
|
|||
newPack = _PackedData()
|
||||
oldRows = []
|
||||
|
||||
while 1:
|
||||
#print 'inner: \'' + repr(currentRow) + '\''
|
||||
while True:
|
||||
# print 'inner: \'' + repr(currentRow) + '\''
|
||||
unpacked = list(self._getUnpackedRange(tr, lastRow, endRow, self.packFetchCount))
|
||||
|
||||
unpackedCount = len(unpacked)
|
||||
|
@ -213,7 +219,7 @@ class Column:
|
|||
packedRange = self._getPackedRange(tr, subKey)
|
||||
[packedKeyRange, keyRange, packedData] = self._getPackedData(subKey, packedRange, False, False)
|
||||
if packedKeyRange is not None:
|
||||
#print 'Deleting old rows'
|
||||
# print 'Deleting old rows'
|
||||
oldRows.append(fdb.tuple.pack((self.columnName, _packedPrefix, packedKeyRange[0], packedKeyRange[1])))
|
||||
|
||||
maxKey = None
|
||||
|
@ -225,29 +231,30 @@ class Column:
|
|||
oldRows.append(fdb.tuple.pack((self.columnName, _unpackedPrefix, row.key)))
|
||||
newPack.addRow(row)
|
||||
lastRow = row.key
|
||||
#print 'Set lastRow = \'' + repr(lastRow) + '\''
|
||||
# print 'Set lastRow = \'' + repr(lastRow) + '\''
|
||||
|
||||
lastRow = lastRow + chr(0)
|
||||
if (maxKey is not None and merged.finishedPack) or (maxKey is None and newPack.bytes > self.targetChunkSize):
|
||||
break
|
||||
|
||||
#print 'Deleting rows'
|
||||
# print 'Deleting rows'
|
||||
for row in oldRows:
|
||||
#print 'Deleting row ' + repr(row)
|
||||
# print 'Deleting row ' + repr(row)
|
||||
del tr[row]
|
||||
|
||||
for k,v in newPack.getPackedKeyValues(self, self.targetChunkSize, self.maxChunkSize):
|
||||
for k, v in newPack.getPackedKeyValues(self, self.targetChunkSize, self.maxChunkSize):
|
||||
tr[k] = v
|
||||
|
||||
tr.commit().wait()
|
||||
currentRow = lastRow
|
||||
numFetched = unpackedCount
|
||||
except fdb.FDBError as e:
|
||||
if e.code == 1007: #past_version
|
||||
if e.code == 1007: # past_version
|
||||
pass
|
||||
#FIXME: Unpack the overlapping packed block and try again
|
||||
# FIXME: Unpack the overlapping packed block and try again
|
||||
tr.on_error(e.code).wait()
|
||||
|
||||
|
||||
class _ColumnStream:
|
||||
|
||||
def __init__(self, db, column, startKey):
|
||||
|
@ -272,37 +279,37 @@ class _ColumnStream:
|
|||
return value
|
||||
|
||||
def _readNextRow(self, db):
|
||||
#print 'Reading next row'
|
||||
# print 'Reading next row'
|
||||
if self.resultsIndex >= len(self.results):
|
||||
#print 'Fetching rows'
|
||||
# print 'Fetching rows'
|
||||
self._fetchRows(db)
|
||||
|
||||
if self.resultsIndex >= len(self.results):
|
||||
#print 'Finished iterating: (%d/%d)' % (self.resultsIndex, len(self.results))
|
||||
# print 'Finished iterating: (%d/%d)' % (self.resultsIndex, len(self.results))
|
||||
return None
|
||||
|
||||
else:
|
||||
self.currentKey = self.results[self.resultsIndex].key
|
||||
value = self.results[self.resultsIndex].value
|
||||
self.resultsIndex += 1
|
||||
#print 'Returning value (%s, %s)' % (self.currentKey, value)
|
||||
# print 'Returning value (%s, %s)' % (self.currentKey, value)
|
||||
return (self.currentKey, value)
|
||||
|
||||
@fdb.transactional
|
||||
def _fetchRows(self, tr):
|
||||
|
||||
if self.firstRead:
|
||||
#print 'First fetch'
|
||||
# print 'First fetch'
|
||||
startKey = self.currentKey
|
||||
else:
|
||||
#print 'Subsequent fetch %s' % self.currentKey
|
||||
# print 'Subsequent fetch %s' % self.currentKey
|
||||
startKey = self.currentKey + chr(0)
|
||||
|
||||
#print 'Using start key %s' % startKey
|
||||
# print 'Using start key %s' % startKey
|
||||
|
||||
#Read next packed and unpacked entries
|
||||
#FIXME: Should we read unpacked after getting the result of the packed data? If we do, then we can more accurately limit the number
|
||||
#of results that we get back
|
||||
# Read next packed and unpacked entries
|
||||
# FIXME: Should we read unpacked after getting the result of the packed data? If we do, then we can more accurately limit the number
|
||||
# of results that we get back
|
||||
unpacked = self.column._getUnpackedRange(tr, startKey, '\xff', self.fetchCount)
|
||||
|
||||
if self.packedData is None:
|
||||
|
@ -312,21 +319,22 @@ class _ColumnStream:
|
|||
merged = self.column._mergeResults(self.packedData, unpacked, self.fetchCount, self.packedIndex, startKey)
|
||||
|
||||
if merged.finishedPack:
|
||||
#print 'reset packed'
|
||||
# print 'reset packed'
|
||||
self.packedData = None
|
||||
self.packedIndex = 0
|
||||
else:
|
||||
#print 'more packed %d' % merged.packedIndex
|
||||
# print 'more packed %d' % merged.packedIndex
|
||||
self.packedIndex = merged.packedIndex
|
||||
|
||||
self.results = merged.results
|
||||
|
||||
#print 'Getting range %s - %s (%d)' % (startKey, fdb.tuple.pack((self.column.columnName + chr(0))), self.fetchCount)
|
||||
# print 'Getting range %s - %s (%d)' % (startKey, fdb.tuple.pack((self.column.columnName + chr(0))), self.fetchCount)
|
||||
self.resultsIndex = 0
|
||||
self.firstRead = False
|
||||
|
||||
|
||||
class _PackedData:
|
||||
def __init__(self, packedValue = None):
|
||||
def __init__(self, packedValue=None):
|
||||
self.rows = []
|
||||
self.bytes = 0
|
||||
|
||||
|
@ -334,7 +342,7 @@ class _PackedData:
|
|||
self._unpack(packedValue)
|
||||
|
||||
def addRow(self, row):
|
||||
#print 'adding row %s' % row.key
|
||||
# print 'adding row %s' % row.key
|
||||
self.rows.append(row)
|
||||
self.bytes += len(row.key) + len(row.value) + 12
|
||||
|
||||
|
@ -376,7 +384,8 @@ class _PackedData:
|
|||
header = ''.join(headerItems)
|
||||
body = ''.join(row.value for row in self.rows[startRowIndex:rowIndex])
|
||||
|
||||
results.append(fdb.KeyValue(fdb.tuple.pack((column.columnName, _packedPrefix, startKey, endKey)), struct.pack('i', len(header)) + header + body))
|
||||
results.append(fdb.KeyValue(fdb.tuple.pack((column.columnName, _packedPrefix, startKey, endKey)),
|
||||
struct.pack('i', len(header)) + header + body))
|
||||
currentByte += packBytes
|
||||
|
||||
return results
|
||||
|
@ -384,14 +393,14 @@ class _PackedData:
|
|||
def _unpack(self, str):
|
||||
self.bytes = len(str)
|
||||
headerLength = struct.unpack('i', str[0:4])[0]
|
||||
header = str[4:4+headerLength]
|
||||
body = str[4+headerLength:]
|
||||
header = str[4:4 + headerLength]
|
||||
body = str[4 + headerLength:]
|
||||
|
||||
index = 0
|
||||
while index < headerLength:
|
||||
#print 'header length: %d, %d' % (len(self.header), index)
|
||||
(keyLength, valueLength, valueOffset) = struct.unpack('iii', header[index:index+12])
|
||||
key = header[index+12:index+12+keyLength]
|
||||
# print 'header length: %d, %d' % (len(self.header), index)
|
||||
(keyLength, valueLength, valueOffset) = struct.unpack('iii', header[index:index + 12])
|
||||
key = header[index + 12:index + 12 + keyLength]
|
||||
index = index + 12 + keyLength
|
||||
value = body[valueOffset:valueOffset+valueLength]
|
||||
value = body[valueOffset:valueOffset + valueLength]
|
||||
self.rows.append(fdb.KeyValue(key, value))
|
||||
|
|
|
@ -41,30 +41,39 @@ import fdb.tuple
|
|||
|
||||
fdb.api_version(22)
|
||||
|
||||
|
||||
###################################
|
||||
# This defines a Subspace of keys #
|
||||
###################################
|
||||
|
||||
|
||||
class Subspace (object):
|
||||
def __init__(self, prefixTuple, rawPrefix=""):
|
||||
self.rawPrefix = rawPrefix + fdb.tuple.pack(prefixTuple)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return Subspace( (name,), self.rawPrefix )
|
||||
return Subspace((name,), self.rawPrefix)
|
||||
|
||||
def key(self):
|
||||
return self.rawPrefix
|
||||
|
||||
def pack(self, tuple):
|
||||
return self.rawPrefix + fdb.tuple.pack( tuple )
|
||||
return self.rawPrefix + fdb.tuple.pack(tuple)
|
||||
|
||||
def unpack(self, key):
|
||||
assert key.startswith(self.rawPrefix)
|
||||
return fdb.tuple.unpack(key[len(self.rawPrefix):])
|
||||
|
||||
def range(self, tuple=()):
|
||||
p = fdb.tuple.range( tuple )
|
||||
p = fdb.tuple.range(tuple)
|
||||
return slice(self.rawPrefix + p.start, self.rawPrefix + p.stop)
|
||||
|
||||
|
||||
#########
|
||||
# Queue #
|
||||
#########
|
||||
|
||||
|
||||
class Queue:
|
||||
# Public functions
|
||||
def __init__(self, subspace, highContention=True):
|
||||
|
@ -119,7 +128,7 @@ class Queue:
|
|||
return self._conflictedItem.pack((subKey,))
|
||||
|
||||
def _randID(self):
|
||||
return os.urandom(20) # this relies on good random data from the OS to avoid collisions
|
||||
return os.urandom(20) # this relies on good random data from the OS to avoid collisions
|
||||
|
||||
def _encodeValue(self, value):
|
||||
return fdb.tuple.pack((value,))
|
||||
|
@ -191,7 +200,7 @@ class Queue:
|
|||
|
||||
i = 0
|
||||
pops = list(pops)
|
||||
for pop,(k,v) in zip(pops, items):
|
||||
for pop, (k, v) in zip(pops, items):
|
||||
key = self._conflictedPop.unpack(pop.key)
|
||||
storageKey = self._conflictedItemKey(key[1])
|
||||
tr[storageKey] = v
|
||||
|
@ -275,10 +284,12 @@ class Queue:
|
|||
except fdb.FDBError as e:
|
||||
tr.on_error(e.code).wait()
|
||||
|
||||
|
||||
##################
|
||||
# Internal tests #
|
||||
##################
|
||||
|
||||
|
||||
def queue_test(db):
|
||||
queue = Queue(Subspace(('queue_test',)), False)
|
||||
print 'Clear Queue'
|
||||
|
@ -300,10 +311,12 @@ def queue_test(db):
|
|||
queue.clear(db)
|
||||
print 'Empty? %s' % queue.empty(db)
|
||||
|
||||
|
||||
######################
|
||||
# Queue sample usage #
|
||||
######################
|
||||
|
||||
|
||||
# caution: modifies the database!
|
||||
def queue_single_client_example(db):
|
||||
queue = Queue(Subspace(('queue_example',)), False)
|
||||
|
@ -315,18 +328,22 @@ def queue_single_client_example(db):
|
|||
for i in range(10):
|
||||
print queue.pop(db)
|
||||
|
||||
|
||||
def push_thread(queue, db, id, num):
|
||||
for i in range(num):
|
||||
queue.push(db, '%d.%d' % (id, i))
|
||||
|
||||
|
||||
def pop_thread(queue, db, id, num):
|
||||
for i in range(num):
|
||||
queue.pop(db)
|
||||
|
||||
print 'Finished pop thread %d' % id
|
||||
|
||||
|
||||
import threading
|
||||
|
||||
|
||||
def queue_multi_client_example(db):
|
||||
descriptions = ["simple queue", "high contention queue"]
|
||||
|
||||
|
@ -335,19 +352,24 @@ def queue_multi_client_example(db):
|
|||
queue = Queue(Subspace(('queue_example',)), highContention > 0)
|
||||
queue.clear(db)
|
||||
|
||||
pushThreads = [ threading.Thread(target=push_thread, args=(queue, db, i, 100)) for i in range(10) ]
|
||||
popThreads = [ threading.Thread(target=pop_thread, args=(queue, db, i, 100)) for i in range(10) ]
|
||||
pushThreads = [threading.Thread(target=push_thread, args=(queue, db, i, 100)) for i in range(10)]
|
||||
popThreads = [threading.Thread(target=pop_thread, args=(queue, db, i, 100)) for i in range(10)]
|
||||
|
||||
start = time.time()
|
||||
|
||||
for push in pushThreads: push.start()
|
||||
for pop in popThreads: pop.start()
|
||||
for push in pushThreads: push.join()
|
||||
for pop in popThreads: pop.join()
|
||||
for push in pushThreads:
|
||||
push.start()
|
||||
for pop in popThreads:
|
||||
pop.start()
|
||||
for push in pushThreads:
|
||||
push.join()
|
||||
for pop in popThreads:
|
||||
pop.join()
|
||||
|
||||
end = time.time()
|
||||
print 'Finished %s in %f seconds' % (descriptions[highContention], end - start)
|
||||
|
||||
|
||||
def queue_example(db):
|
||||
print "Running single client example:"
|
||||
queue_single_client_example(db)
|
||||
|
@ -355,10 +377,10 @@ def queue_example(db):
|
|||
print "\nRunning multi-client example:"
|
||||
queue_multi_client_example(db)
|
||||
|
||||
|
||||
# caution: modifies the database!
|
||||
if __name__ == '__main__':
|
||||
db = fdb.open()
|
||||
|
||||
queue_example(db)
|
||||
#queue_test(db)
|
||||
|
||||
# queue_test(db)
|
||||
|
|
|
@ -23,12 +23,14 @@ import fdb.tuple
|
|||
|
||||
fdb.api_version(16)
|
||||
|
||||
|
||||
def nextStopToNone(gen):
|
||||
try:
|
||||
return gen.next()
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
|
||||
class FdbSet (object):
|
||||
def __init__(self, path):
|
||||
self._path = path
|
||||
|
@ -36,13 +38,13 @@ class FdbSet (object):
|
|||
@fdb.transactional
|
||||
def length(self, tr):
|
||||
setLength = 0
|
||||
for k,v in tr[fdb.tuple.range((self._path,))]:
|
||||
for k, v in tr[fdb.tuple.range((self._path,))]:
|
||||
setLength += 1
|
||||
return setLength
|
||||
|
||||
@fdb.transactional
|
||||
def iterate(self, tr):
|
||||
for k,v in tr[fdb.tuple.range((self._path,))]:
|
||||
for k, v in tr[fdb.tuple.range((self._path,))]:
|
||||
yield fdb.tuple.unpack(k)[1]
|
||||
|
||||
@fdb.transactional
|
||||
|
@ -50,18 +52,18 @@ class FdbSet (object):
|
|||
return tr[fdb.tuple.pack((self._path, x))].present()
|
||||
|
||||
@fdb.transactional
|
||||
def issubset(self, tr, t): #s <= t
|
||||
for k,v in tr[fdb.tuple.range((self._path,))]:
|
||||
def issubset(self, tr, t): # s <= t
|
||||
for k, v in tr[fdb.tuple.range((self._path,))]:
|
||||
if not t.contains(tr, fdb.tuple.unpack(k)[1]):
|
||||
return False
|
||||
return True
|
||||
|
||||
@fdb.transactional
|
||||
def issuperset(self, tr, t): #s >= t
|
||||
def issuperset(self, tr, t): # s >= t
|
||||
return t.issubset(tr, self)
|
||||
|
||||
@fdb.transactional
|
||||
def union(self, tr, t): #s | t
|
||||
def union(self, tr, t): # s | t
|
||||
s_gen = self.iterate(tr)
|
||||
t_gen = t.iterate(tr)
|
||||
s_key = nextStopToNone(s_gen)
|
||||
|
@ -82,7 +84,7 @@ class FdbSet (object):
|
|||
t_key = nextStopToNone(t_gen)
|
||||
|
||||
@fdb.transactional
|
||||
def intersection(self, tr, t): #s & t
|
||||
def intersection(self, tr, t): # s & t
|
||||
s_key = self.first_greater_or_equal(tr, "")
|
||||
t_key = t.first_greater_or_equal(tr, "")
|
||||
|
||||
|
@ -99,7 +101,7 @@ class FdbSet (object):
|
|||
t_key = t.first_greater_than(tr, t_key)
|
||||
|
||||
@fdb.transactional
|
||||
def difference(self, tr, t): #s - t
|
||||
def difference(self, tr, t): # s - t
|
||||
s_gen = self.iterate(tr)
|
||||
s_key = nextStopToNone(s_gen)
|
||||
t_key = t.first_greater_or_equal(tr, "")
|
||||
|
@ -116,7 +118,7 @@ class FdbSet (object):
|
|||
s_key = nextStopToNone(s_gen)
|
||||
|
||||
@fdb.transactional
|
||||
def symmetric_difference(self, tr, t): #s ^ t
|
||||
def symmetric_difference(self, tr, t): # s ^ t
|
||||
s_gen = self.iterate(tr)
|
||||
t_gen = t.iterate(tr)
|
||||
s_key = nextStopToNone(s_gen)
|
||||
|
@ -136,12 +138,12 @@ class FdbSet (object):
|
|||
t_key = nextStopToNone(t_gen)
|
||||
|
||||
@fdb.transactional
|
||||
def update(self, tr, t): #s |= t T
|
||||
def update(self, tr, t): # s |= t T
|
||||
for k in t.iterate(tr):
|
||||
self.add(tr, k)
|
||||
|
||||
@fdb.transactional
|
||||
def intersection_update(self, tr, t): #s &= t
|
||||
def intersection_update(self, tr, t): # s &= t
|
||||
lastValue = fdb.tuple.pack((self._path,))
|
||||
for k in self.intersection(tr, t):
|
||||
if k != lastValue:
|
||||
|
@ -150,12 +152,12 @@ class FdbSet (object):
|
|||
del tr[lastValue + '\x00':fdb.tuple.pack((self._path + chr(0),))]
|
||||
|
||||
@fdb.transactional
|
||||
def difference_update(self, tr, t): #s -= t
|
||||
def difference_update(self, tr, t): # s -= t
|
||||
for k in self.intersection(tr, t):
|
||||
del tr[fdb.tuple.pack((self._path,k))]
|
||||
del tr[fdb.tuple.pack((self._path, k))]
|
||||
|
||||
@fdb.transactional
|
||||
def symmetric_difference_update(self, tr, t): #s ^ t
|
||||
def symmetric_difference_update(self, tr, t): # s ^ t
|
||||
s_gen = self.iterate(tr)
|
||||
t_gen = t.iterate(tr)
|
||||
s_key = nextStopToNone(s_gen)
|
||||
|
@ -177,17 +179,17 @@ class FdbSet (object):
|
|||
|
||||
@fdb.transactional
|
||||
def add(self, tr, x):
|
||||
tr[fdb.tuple.pack((self._path,x))] = ""
|
||||
tr[fdb.tuple.pack((self._path, x))] = ""
|
||||
|
||||
@fdb.transactional
|
||||
def remove(self, tr, x):
|
||||
if tr[fdb.tuple.pack((self._path,x))] == None:
|
||||
if tr[fdb.tuple.pack((self._path, x))] == None:
|
||||
raise KeyError
|
||||
del tr[fdb.tuple.pack((self._path,x))]
|
||||
del tr[fdb.tuple.pack((self._path, x))]
|
||||
|
||||
@fdb.transactional
|
||||
def discard(self, tr, x):
|
||||
del tr[fdb.tuple.pack((self._path,x))]
|
||||
del tr[fdb.tuple.pack((self._path, x))]
|
||||
|
||||
@fdb.transactional
|
||||
def pop(self, tr):
|
||||
|
@ -218,6 +220,7 @@ class FdbSet (object):
|
|||
def _keyInRange(self, key):
|
||||
return key < fdb.tuple.pack((self._path + chr(0),))
|
||||
|
||||
|
||||
def test(db):
|
||||
print "starting set test"
|
||||
tr = db.create_transaction()
|
||||
|
|
|
@ -25,6 +25,7 @@ fdb.init("10.0.1.22:1234")
|
|||
|
||||
db = fdb.create_cluster("10.0.3.1:2181/evan_local").open_database("set")
|
||||
|
||||
|
||||
class FdbTreap (object):
|
||||
def __init__(self, path):
|
||||
self._rootKey = path
|
||||
|
@ -32,20 +33,20 @@ class FdbTreap (object):
|
|||
|
||||
@fdb.transactional
|
||||
def updateNode(self, tr, node):
|
||||
tr[fdb.tuple_to_key(self._path,node[0])] = fdb.tuple_to_key(node[1])
|
||||
tr[fdb.tuple_to_key(self._path, node[0])] = fdb.tuple_to_key(node[1])
|
||||
|
||||
@fdb.transactional
|
||||
def updateRoot(self, tr, node):
|
||||
tr[self._rootKey] = fdb.tuple_to_key(self._path,node[0])
|
||||
tr[self._rootKey] = fdb.tuple_to_key(self._path, node[0])
|
||||
|
||||
@fdb.transactional
|
||||
def parent(self, tr, key):
|
||||
#find parent
|
||||
for k,v in tr.get_range(fdb.last_less_than(fdb.tuple_to_key(self._path,key)),
|
||||
fdb.first_greater_than(fdb.tuple_to_key(self._path,key)) + 1, 2):
|
||||
# find parent
|
||||
for k, v in tr.get_range(fdb.last_less_than(fdb.tuple_to_key(self._path, key)),
|
||||
fdb.first_greater_than(fdb.tuple_to_key(self._path, key)) + 1, 2):
|
||||
parentValue = fdb.key_to_tuple(v)
|
||||
if parentValue[0] == key or parentValue[1] == key:
|
||||
return tuple(fdb.key_to_tuple(k)[1],parentValue)
|
||||
return tuple(fdb.key_to_tuple(k)[1], parentValue)
|
||||
return None
|
||||
|
||||
@fdb.transactional
|
||||
|
@ -80,12 +81,12 @@ class FdbTreap (object):
|
|||
def setKey(self, tr, key, value, metric):
|
||||
isNew = True
|
||||
isRoot = True
|
||||
child = tuple(key,tuple("","",random.random(),metric,value))
|
||||
child = tuple(key, tuple("", "", random.random(), metric, value))
|
||||
parent = tuple()
|
||||
|
||||
#find self or parent
|
||||
for k,v in tr.get_range(fdb.last_less_than(fdb.tuple_to_key(self._path,key)),
|
||||
fdb.first_greater_than(fdb.tuple_to_key(self._path,key)) + 1, 2):
|
||||
# find self or parent
|
||||
for k, v in tr.get_range(fdb.last_less_than(fdb.tuple_to_key(self._path, key)),
|
||||
fdb.first_greater_than(fdb.tuple_to_key(self._path, key)) + 1, 2):
|
||||
isRoot = False
|
||||
node = tuple(fdb.key_to_tuple(k)[1], fdb.key_to_tuple(v))
|
||||
if node[0] == key:
|
||||
|
@ -104,16 +105,16 @@ class FdbTreap (object):
|
|||
parent[1][0] = key
|
||||
break
|
||||
|
||||
#insert root
|
||||
# insert root
|
||||
if isRoot:
|
||||
self.updateRoot(tr, child)
|
||||
|
||||
#update parent
|
||||
# update parent
|
||||
if isNew:
|
||||
self.updateNode(tr, parent)
|
||||
|
||||
#insert self
|
||||
# insert self
|
||||
self.updateNode(tr, child)
|
||||
|
||||
#balance
|
||||
# balance
|
||||
self.balance(tr, parent, child)
|
||||
|
|
|
@ -31,26 +31,34 @@ import threading
|
|||
|
||||
fdb.api_version(22)
|
||||
|
||||
|
||||
###################################
|
||||
# This defines a Subspace of keys #
|
||||
###################################
|
||||
|
||||
|
||||
class Subspace (object):
|
||||
def __init__(self, prefixTuple, rawPrefix=""):
|
||||
self.rawPrefix = rawPrefix + fdb.tuple.pack(prefixTuple)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return Subspace( (name,), self.rawPrefix )
|
||||
return Subspace((name,), self.rawPrefix)
|
||||
|
||||
def key(self):
|
||||
return self.rawPrefix
|
||||
|
||||
def pack(self, tuple):
|
||||
return self.rawPrefix + fdb.tuple.pack( tuple )
|
||||
return self.rawPrefix + fdb.tuple.pack(tuple)
|
||||
|
||||
def unpack(self, key):
|
||||
assert key.startswith(self.rawPrefix)
|
||||
return fdb.tuple.unpack(key[len(self.rawPrefix):])
|
||||
|
||||
def range(self, tuple=()):
|
||||
p = fdb.tuple.range( tuple )
|
||||
p = fdb.tuple.range(tuple)
|
||||
return slice(self.rawPrefix + p.start, self.rawPrefix + p.stop)
|
||||
|
||||
|
||||
########################
|
||||
# _ImplicitTransaction #
|
||||
########################
|
||||
|
@ -64,6 +72,7 @@ class Subspace (object):
|
|||
# vector.push(1)
|
||||
# ...
|
||||
|
||||
|
||||
class _ImplicitTransaction:
|
||||
def __init__(self, vector, tr):
|
||||
self.vector = vector
|
||||
|
@ -79,28 +88,30 @@ class _ImplicitTransaction:
|
|||
def __exit__(self, type, value, traceback):
|
||||
self.vector.local.tr = self.initialValue
|
||||
|
||||
|
||||
##########
|
||||
# Vector #
|
||||
##########
|
||||
|
||||
## Vector stores each of its values using its index as the key.
|
||||
## The size of a vector is equal to the index of its last key + 1.
|
||||
# Vector stores each of its values using its index as the key.
|
||||
# The size of a vector is equal to the index of its last key + 1.
|
||||
##
|
||||
## For indexes smaller than the vector's size that have no associated key
|
||||
## in the database, the value will be the specified defaultValue.
|
||||
# For indexes smaller than the vector's size that have no associated key
|
||||
# in the database, the value will be the specified defaultValue.
|
||||
##
|
||||
## If the last value in the vector has the default value, its key will
|
||||
## always be set so that size can be determined.
|
||||
# If the last value in the vector has the default value, its key will
|
||||
# always be set so that size can be determined.
|
||||
##
|
||||
## By creating Vector with a Subspace, all kv pairs modified by the
|
||||
## layer will have keys that start within that Subspace.
|
||||
# By creating Vector with a Subspace, all kv pairs modified by the
|
||||
# layer will have keys that start within that Subspace.
|
||||
|
||||
|
||||
class Vector:
|
||||
"""Represents a potentially sparse array in FoundationDB."""
|
||||
|
||||
# Public functions
|
||||
|
||||
def __init__(self, subspace, defaultValue = ''):
|
||||
def __init__(self, subspace, defaultValue=''):
|
||||
self.subspace = subspace
|
||||
self.defaultValue = defaultValue
|
||||
self.local = threading.local()
|
||||
|
@ -183,7 +194,6 @@ class Vector:
|
|||
else:
|
||||
return self.get(index)
|
||||
|
||||
|
||||
# Private functions
|
||||
|
||||
@fdb.transactional
|
||||
|
@ -194,7 +204,7 @@ class Vector:
|
|||
def _back(self, tr):
|
||||
keyRange = self.subspace.range()
|
||||
last = tr.get_range(keyRange.start, keyRange.stop, 1, True)
|
||||
for k,v in last:
|
||||
for k, v in last:
|
||||
return fdb.tuple.unpack(v)[0]
|
||||
return None
|
||||
|
||||
|
@ -218,7 +228,7 @@ class Vector:
|
|||
|
||||
# Second to last item is being represented sparsely
|
||||
elif len(lastTwo) == 1 or indices[0] > indices[1] + 1:
|
||||
tr[self._key_at(indices[0]-1)] = fdb.tuple.pack((self.defaultValue,))
|
||||
tr[self._key_at(indices[0] - 1)] = fdb.tuple.pack((self.defaultValue,))
|
||||
|
||||
del tr[lastTwo[0].key]
|
||||
return fdb.tuple.unpack(lastTwo[0].value)[0]
|
||||
|
@ -253,8 +263,8 @@ class Vector:
|
|||
start = self._key_at(index)
|
||||
end = self.subspace.range().stop
|
||||
|
||||
output = tr.get_range(start,end,1)
|
||||
for k,v in output:
|
||||
output = tr.get_range(start, end, 1)
|
||||
for k, v in output:
|
||||
# The requested index had an associated key
|
||||
if(start == k):
|
||||
return fdb.tuple.unpack(v)[0]
|
||||
|
@ -289,7 +299,7 @@ class Vector:
|
|||
if step > 0:
|
||||
start = self._key_at(startIndex)
|
||||
else:
|
||||
end = self._key_at(startIndex+1)
|
||||
end = self._key_at(startIndex + 1)
|
||||
|
||||
if endIndex is None:
|
||||
if step > 0:
|
||||
|
@ -301,7 +311,7 @@ class Vector:
|
|||
if step > 0:
|
||||
end = self._key_at(endIndex)
|
||||
else:
|
||||
start = self._key_at(endIndex+1)
|
||||
start = self._key_at(endIndex + 1)
|
||||
|
||||
result = tr.get_range(start, end, 0, step < 0)
|
||||
|
||||
|
@ -310,11 +320,11 @@ class Vector:
|
|||
if step > 0:
|
||||
currentIndex = 0
|
||||
else:
|
||||
currentIndex = size-1
|
||||
currentIndex = size - 1
|
||||
elif currentIndex >= size:
|
||||
currentIndex = size-1
|
||||
currentIndex = size - 1
|
||||
|
||||
for k,v in result:
|
||||
for k, v in result:
|
||||
keyIndex = self.subspace.unpack(k)[0]
|
||||
while (step > 0 and currentIndex < keyIndex) or (step < 0 and currentIndex > keyIndex):
|
||||
currentIndex = currentIndex + step
|
||||
|
@ -353,11 +363,11 @@ class Vector:
|
|||
|
||||
# Check if the new end of the vector was being sparsely represented
|
||||
if self._size(tr) < length:
|
||||
tr[self._key_at(length-1)] = fdb.tuple.pack((self.defaultValue,))
|
||||
tr[self._key_at(length - 1)] = fdb.tuple.pack((self.defaultValue,))
|
||||
|
||||
@fdb.transactional
|
||||
def _expand(self, tr, length, currentSize):
|
||||
tr[self._key_at(length-1)] = fdb.tuple.pack((self.defaultValue,))
|
||||
tr[self._key_at(length - 1)] = fdb.tuple.pack((self.defaultValue,))
|
||||
|
||||
@fdb.transactional
|
||||
def _clear(self, tr):
|
||||
|
@ -380,6 +390,7 @@ class Vector:
|
|||
# internal tests #
|
||||
##################
|
||||
|
||||
|
||||
# caution: modifies the database!
|
||||
@fdb.transactional
|
||||
def vector_test(tr):
|
||||
|
@ -398,11 +409,11 @@ def vector_test(tr):
|
|||
_print_vector(vector, tr)
|
||||
|
||||
# Swap
|
||||
vector.swap(0,2)
|
||||
vector.swap(0, 2)
|
||||
_print_vector(vector, tr)
|
||||
|
||||
# Pop
|
||||
print 'Popped:', vector.pop();
|
||||
print 'Popped:', vector.pop()
|
||||
_print_vector(vector, tr)
|
||||
|
||||
# Clear
|
||||
|
@ -502,12 +513,15 @@ def vector_test(tr):
|
|||
_print_vector(vector, tr)
|
||||
print 'Size:', vector.size()
|
||||
|
||||
|
||||
##############################
|
||||
# Vector sample usage #
|
||||
##############################
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
|
||||
# caution: modifies the database!
|
||||
@fdb.transactional
|
||||
def vector_example(tr):
|
||||
|
@ -531,6 +545,7 @@ def vector_example(tr):
|
|||
vector.swap(1, 10)
|
||||
_print_vector(vector, tr)
|
||||
|
||||
|
||||
def _print_vector(vector, tr):
|
||||
first = True
|
||||
with vector.use_transaction(tr):
|
||||
|
@ -543,8 +558,9 @@ def _print_vector(vector, tr):
|
|||
|
||||
print
|
||||
|
||||
|
||||
# caution: modifies the database!
|
||||
if __name__ == '__main__':
|
||||
db = fdb.open()
|
||||
vector_example(db)
|
||||
#vector_test(db)
|
||||
# vector_test(db)
|
||||
|
|
|
@ -23,11 +23,10 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
import fdb
|
||||
from pubsub_bigdoc import PubSub
|
||||
|
||||
db = fdb.open('10.0.3.1:2181/bbc', 'TwitDB')
|
||||
|
||||
ps = PubSub(db)
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
import fdb
|
||||
from pubsub_bigdoc import PubSub
|
||||
|
||||
|
|
|
@ -239,10 +239,10 @@ class PubSub(object):
|
|||
# Create the specified numbers of feeds and inboxes. Subscribe each inbox to a
|
||||
# randomly selected subset of feeds.
|
||||
def setup_topology(feeds, inboxes):
|
||||
feed_map = {f: ps.create_feed('Alice '+str(f)) for f in range(feeds)}
|
||||
feed_map = {f: ps.create_feed('Alice ' + str(f)) for f in range(feeds)}
|
||||
inbox_map = {}
|
||||
for i in range(inboxes):
|
||||
inbox_map[i] = ps.create_inbox('Bob '+str(i))
|
||||
inbox_map[i] = ps.create_inbox('Bob ' + str(i))
|
||||
for f in random.sample(xrange(feeds), random.randint(1, feeds)):
|
||||
ps.create_subscription(inbox_map[i], feed_map[f])
|
||||
return feed_map, inbox_map
|
||||
|
@ -274,7 +274,8 @@ def inbox_driver(inbox):
|
|||
get_and_print_inbox_messages(inbox)
|
||||
changed = (latest != inbox.latest_message)
|
||||
latest = inbox.latest_message
|
||||
if not changed and waited > wait_limit: break
|
||||
if not changed and waited > wait_limit:
|
||||
break
|
||||
waited += wait_inc
|
||||
time.sleep(wait_inc)
|
||||
|
||||
|
@ -285,16 +286,21 @@ def run_threads(feed_map, inbox_map, messages):
|
|||
for id in feed_map]
|
||||
inbox_threads = [threading.Thread(target=inbox_driver, args=(inbox_map[id],))
|
||||
for id in inbox_map]
|
||||
for f in feed_threads: f.start()
|
||||
for i in inbox_threads: i.start()
|
||||
for f in feed_threads: f.join()
|
||||
for i in inbox_threads: i.join()
|
||||
for f in feed_threads:
|
||||
f.start()
|
||||
for i in inbox_threads:
|
||||
i.start()
|
||||
for f in feed_threads:
|
||||
f.join()
|
||||
for i in inbox_threads:
|
||||
i.join()
|
||||
|
||||
|
||||
def sample_pubsub(feeds, inboxes, messages):
|
||||
feed_map, inbox_map = setup_topology(feeds, inboxes)
|
||||
run_threads(feed_map, inbox_map, messages)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import random
|
||||
import threading
|
||||
|
|
|
@ -46,10 +46,10 @@ ps.clear_all_messages()
|
|||
# Create the specified numbers of feeds and inboxes. Subscribe each inbox to a
|
||||
# randomly selected subset of feeds.
|
||||
def setup_topology(feeds, inboxes):
|
||||
feed_map = {f: ps.create_feed('Alice '+str(f)) for f in range(feeds)}
|
||||
feed_map = {f: ps.create_feed('Alice ' + str(f)) for f in range(feeds)}
|
||||
inbox_map = {}
|
||||
for i in range(inboxes):
|
||||
inbox_map[i] = ps.create_inbox('Bob '+str(i))
|
||||
inbox_map[i] = ps.create_inbox('Bob ' + str(i))
|
||||
for f in random.sample(xrange(feeds), random.randint(1, feeds)):
|
||||
ps.create_subscription(inbox_map[i], feed_map[f])
|
||||
return feed_map, inbox_map
|
||||
|
@ -81,7 +81,8 @@ def inbox_driver(inbox):
|
|||
get_and_print_inbox_messages(inbox)
|
||||
changed = (latest != inbox.latest_message)
|
||||
latest = inbox.latest_message
|
||||
if not changed and waited > wait_limit: break
|
||||
if not changed and waited > wait_limit:
|
||||
break
|
||||
waited += wait_inc
|
||||
time.sleep(wait_inc)
|
||||
|
||||
|
@ -92,15 +93,20 @@ def run_threads(feed_map, inbox_map, messages):
|
|||
for id in feed_map]
|
||||
inbox_threads = [threading.Thread(target=inbox_driver, args=(inbox_map[id],))
|
||||
for id in inbox_map]
|
||||
for f in feed_threads: f.start()
|
||||
for i in inbox_threads: i.start()
|
||||
for f in feed_threads: f.join()
|
||||
for i in inbox_threads: i.join()
|
||||
for f in feed_threads:
|
||||
f.start()
|
||||
for i in inbox_threads:
|
||||
i.start()
|
||||
for f in feed_threads:
|
||||
f.join()
|
||||
for i in inbox_threads:
|
||||
i.join()
|
||||
|
||||
|
||||
def sample_pubsub(feeds, inboxes, messages):
|
||||
feed_map, inbox_map = setup_topology(feeds, inboxes)
|
||||
run_threads(feed_map, inbox_map, messages)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sample_pubsub(3, 3, 3)
|
||||
|
|
|
@ -60,49 +60,63 @@ Created on May 14, 2012
|
|||
import os
|
||||
import sys
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
import fdb
|
||||
import struct
|
||||
|
||||
|
||||
def key_for_feed(feed):
|
||||
return fdb.tuple_to_key('f', struct.pack('>Q', feed))
|
||||
|
||||
|
||||
def key_for_feed_subscriber_count(feed):
|
||||
return fdb.tuple_to_key('f', struct.pack('>Q', feed), 'subCount')
|
||||
|
||||
|
||||
def key_for_feed_message_count(feed):
|
||||
return fdb.tuple_to_key('f', struct.pack('>Q', feed), 'messCount')
|
||||
|
||||
|
||||
def key_for_feed_subscriber(feed, inbox):
|
||||
return fdb.tuple_to_key('f', struct.pack('>Q', feed), 'subs', struct.pack('>Q', inbox))
|
||||
|
||||
|
||||
def prefix_for_feed_subscribers(feed):
|
||||
return fdb.tuple_to_key('f', struct.pack('>Q', feed), 'subs')
|
||||
|
||||
|
||||
def key_for_feed_watcher(feed, inbox):
|
||||
return fdb.tuple_to_key('f', struct.pack('>Q', feed), 'watchers', struct.pack('>Q', inbox))
|
||||
|
||||
|
||||
def key_for_feed_message(feed, message):
|
||||
return fdb.tuple_to_key('f', struct.pack('>Q', feed), 'message', struct.pack('>Q', message))
|
||||
|
||||
|
||||
def prefix_for_feed_messages(feed):
|
||||
return fdb.tuple_to_key('f', struct.pack('>Q', feed), 'message')
|
||||
|
||||
|
||||
def key_for_inbox(inbox):
|
||||
return fdb.tuple_to_key('i', struct.pack('>Q', inbox))
|
||||
|
||||
|
||||
def key_for_inbox_subscription_count(inbox):
|
||||
return fdb.tuple_to_key('i', struct.pack('>Q', inbox), 'subCount')
|
||||
|
||||
|
||||
def key_for_inbox_subscription(inbox, feed):
|
||||
return fdb.tuple_to_key('i', struct.pack('>Q', inbox), 'subs', struct.pack('>Q', feed))
|
||||
|
||||
|
||||
def prefix_for_inbox_subscriptions(inbox):
|
||||
return fdb.tuple_to_key('i', struct.pack('>Q', inbox), 'subs')
|
||||
|
||||
|
||||
def key_for_inbox_stale_feed(inbox, feed):
|
||||
return fdb.tuple_to_key('i', struct.pack('>Q', inbox), 'stale', struct.pack('>Q', feed))
|
||||
|
||||
|
||||
def key_for_message(message):
|
||||
return fdb.tuple_to_key('m', struct.pack('>Q', message))
|
||||
|
||||
|
@ -137,7 +151,7 @@ def _create_inbox_internal(tr, inbox, metadata):
|
|||
def _create_subscription_internal(tr, feed, inbox):
|
||||
key = key_for_inbox_subscription(inbox, feed)
|
||||
if tr[key] != None:
|
||||
return True # This subscription exists
|
||||
return True # This subscription exists
|
||||
|
||||
# print 'Feed, inbox:', feed, ",", inbox
|
||||
|
||||
|
@ -147,7 +161,7 @@ def _create_subscription_internal(tr, feed, inbox):
|
|||
# print f_count, 'and', i_count
|
||||
if f_count == None or i_count == None:
|
||||
print 'There is not a feed or inbox'
|
||||
return False # Either the inbox or the feed do not exist
|
||||
return False # Either the inbox or the feed do not exist
|
||||
|
||||
# Update the subscriptions of the inbox
|
||||
tr[key] = ''
|
||||
|
@ -168,7 +182,7 @@ def _create_subscription_internal(tr, feed, inbox):
|
|||
@fdb.transactional
|
||||
def _post_message_internal(tr, feed, contents):
|
||||
if tr[key_for_feed(feed)] == None:
|
||||
return False # this feed does not exist!
|
||||
return False # this feed does not exist!
|
||||
|
||||
# Get globally latest message, set our ID to that less one
|
||||
zero_key = key_for_message(0)
|
||||
|
@ -190,11 +204,11 @@ def _post_message_internal(tr, feed, contents):
|
|||
struct.pack('>Q', struct.unpack('>Q', f_count + '')[0] + 1)
|
||||
|
||||
# update the watchers on the feed to mark those inboxes as stale
|
||||
#prefix = fdb.tuple_to_key(key_for_feed(feed), 'watchers')
|
||||
#for k,v in tr.get_range_startswith(prefix):
|
||||
# stale_inbox = fdb.key_to_tuple(k)[3]
|
||||
# tr[key_for_inbox_stale_feed(stale_inbox, feed)] = ''
|
||||
# del tr[k]
|
||||
# prefix = fdb.tuple_to_key(key_for_feed(feed), 'watchers')
|
||||
# for k,v in tr.get_range_startswith(prefix):
|
||||
# stale_inbox = fdb.key_to_tuple(k)[3]
|
||||
# tr[key_for_inbox_stale_feed(stale_inbox, feed)] = ''
|
||||
# del tr[k]
|
||||
|
||||
return True
|
||||
|
||||
|
@ -203,18 +217,18 @@ def _post_message_internal(tr, feed, contents):
|
|||
def _list_messages_internal(tr, inbox):
|
||||
messages = []
|
||||
if tr[key_for_inbox(inbox)] == None:
|
||||
return messages # this inbox does not exist!
|
||||
return messages # this inbox does not exist!
|
||||
|
||||
print 'Messages in %s''s inbox' % tr[key_for_inbox(inbox)]
|
||||
prefix = prefix_for_inbox_subscriptions(inbox)
|
||||
for k, _ in tr.get_range_startswith(prefix):
|
||||
#print "inbox sub:", fdb.key_to_tuple(k)
|
||||
# print "inbox sub:", fdb.key_to_tuple(k)
|
||||
feed = struct.unpack('>Q', fdb.key_to_tuple(k)[3])[0]
|
||||
#print "messages from feed:", feed
|
||||
# print "messages from feed:", feed
|
||||
print ' from %s:' % tr[key_for_feed(feed)]
|
||||
feed_prefix = prefix_for_feed_messages(feed)
|
||||
for key, _ in tr.get_range_startswith(feed_prefix):
|
||||
#print "feed message:", fdb.key_to_tuple(key)
|
||||
# print "feed message:", fdb.key_to_tuple(key)
|
||||
message_id = struct.unpack('>Q', fdb.key_to_tuple(key)[3])[0]
|
||||
message = tr[key_for_message(message_id)]
|
||||
print " ", message
|
||||
|
@ -256,4 +270,3 @@ class PubSub(object):
|
|||
|
||||
def print_feed_stats(self, feed):
|
||||
_print_internal(self.db, feed)
|
||||
|
||||
|
|
|
@ -31,30 +31,35 @@ feeds = simpledoc.root.feeds
|
|||
inboxes = simpledoc.root.inboxes
|
||||
messages = simpledoc.root.messages
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _create_feed_internal(metadata):
|
||||
feed = feeds[metadata]
|
||||
feed.set_value(metadata)
|
||||
return feed
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _create_inbox_internal(metadata):
|
||||
inbox = inboxes[metadata]
|
||||
inbox.set_value(metadata)
|
||||
return inbox
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _create_feed_and_inbox_internal(metadata):
|
||||
_create_feed_internal(metadata)
|
||||
_create_inbox_internal(metadata)
|
||||
return True
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _create_subscription_internal(feed, inbox):
|
||||
inbox.subs[feed.get_name()] = ""
|
||||
inbox.dirtyfeeds[feed.get_name()] = "1"
|
||||
return True
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _post_message_internal(feed, message_id, contents):
|
||||
message = messages.prepend()
|
||||
|
@ -66,14 +71,16 @@ def _post_message_internal(feed, message_id, contents):
|
|||
inboxes[inbox.get_name()].dirtyfeeds[feed.get_name()] = "1"
|
||||
feed.watchinginboxes.clear_all()
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _list_messages_internal(inbox):
|
||||
print "messages in %s's inbox:" % inbox.get_value()
|
||||
for feed in inbox.subs.get_children():
|
||||
print " from %s:" % feeds[feed.get_name()].get_value()
|
||||
print " from %s:" % feeds[feed.get_name()].get_value()
|
||||
for message in feed_messages.find_all(feed.get_name()):
|
||||
print " ", message.get_value()
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _get_feed_messages_internal(feed, limit):
|
||||
message_list = []
|
||||
|
@ -85,20 +92,21 @@ def _get_feed_messages_internal(feed, limit):
|
|||
counter += 1
|
||||
return message_list
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _copy_dirty_feeds(inbox):
|
||||
changed = False
|
||||
latest_id = inbox.latest_message.get_value()
|
||||
#print "latest message is", latest_id
|
||||
# print "latest message is", latest_id
|
||||
for feed in inbox.dirtyfeeds.get_children():
|
||||
#print "working on dirty feed", feed.get_name()
|
||||
# print "working on dirty feed", feed.get_name()
|
||||
for message in feed_messages.find_all(feed.get_name()):
|
||||
#print "found message", message.get_name()
|
||||
# print "found message", message.get_name()
|
||||
if latest_id != None and message.get_name() >= latest_id:
|
||||
break
|
||||
changed = True
|
||||
inbox.messages[message.get_name()] = feed.get_name()
|
||||
#print "copied message", message.get_name()
|
||||
# print "copied message", message.get_name()
|
||||
|
||||
# now that we have copied, mark this inbox as watching the feed
|
||||
feeds[feed.get_name()].watchinginboxes[inbox.get_name()] = "1"
|
||||
|
@ -106,6 +114,7 @@ def _copy_dirty_feeds(inbox):
|
|||
inbox.dirtyfeeds.clear_all()
|
||||
return changed
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _get_inbox_subscriptions_internal(inbox, limit):
|
||||
subscriptions = []
|
||||
|
@ -115,6 +124,7 @@ def _get_inbox_subscriptions_internal(inbox, limit):
|
|||
|
||||
return subscriptions
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _get_inbox_messages_internal(inbox, limit):
|
||||
inbox_changed = _copy_dirty_feeds(inbox)
|
||||
|
@ -133,17 +143,20 @@ def _get_inbox_messages_internal(inbox, limit):
|
|||
|
||||
return [messages[mid].get_value() for mid in message_ids]
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _clear_all_messages():
|
||||
simpledoc.root.clear_all()
|
||||
|
||||
|
||||
@simpledoc.transactional
|
||||
def _print_internals(feed_or_inbox = None):
|
||||
def _print_internals(feed_or_inbox=None):
|
||||
if feed_or_inbox is None:
|
||||
print simpledoc.root.get_json(False)
|
||||
else:
|
||||
print feed_or_inbox.get_json(False)
|
||||
|
||||
|
||||
class PubSub(object):
|
||||
def __init__(self, db):
|
||||
self.db = db
|
||||
|
@ -187,4 +200,3 @@ class PubSub(object):
|
|||
|
||||
def print_internals(self):
|
||||
_print_internals(self.db)
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
import fdb
|
||||
import argparse
|
||||
from pubsub_bigdoc import PubSub
|
||||
|
@ -50,8 +50,8 @@ for i in range(args.userStart, args.userCount):
|
|||
print i,
|
||||
print 'done'
|
||||
|
||||
#@fdb.transactional
|
||||
#def done(tr):
|
||||
# tr['/done/%d' % args.userStart] = 'done'
|
||||
# @fdb.transactional
|
||||
# def done(tr):
|
||||
# tr['/done/%d' % args.userStart] = 'done'
|
||||
#
|
||||
#done(db)
|
||||
# done(db)
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
import fdb
|
||||
import argparse
|
||||
import random
|
||||
|
@ -54,6 +54,7 @@ name = os.uname()[1]
|
|||
|
||||
print 'sending messages',
|
||||
|
||||
|
||||
def message_client():
|
||||
gevent.sleep(random.random())
|
||||
messages_sent = 0
|
||||
|
@ -65,8 +66,8 @@ def message_client():
|
|||
else:
|
||||
ps.get_inbox_messages(ps.get_inbox_by_name('%09d' % user), 10)
|
||||
|
||||
|
||||
jobs = [gevent.spawn(message_client) for i in range(0, args.threads)]
|
||||
gevent.joinall(jobs)
|
||||
|
||||
print 'done'
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
import os
|
||||
import sys
|
||||
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
import fdb
|
||||
import argparse
|
||||
import random
|
||||
|
@ -55,8 +55,8 @@ for i in range(0, args.followers):
|
|||
print i,
|
||||
print 'done'
|
||||
|
||||
#@fdb.transactional
|
||||
#def done(tr):
|
||||
# tr['/done/%d' % args.userStart] = 'done'
|
||||
# @fdb.transactional
|
||||
# def done(tr):
|
||||
# tr['/done/%d' % args.userStart] = 'done'
|
||||
#
|
||||
#done(db)
|
||||
# done(db)
|
||||
|
|
|
@ -20,39 +20,54 @@
|
|||
|
||||
# FoundationDB TaskBucket layer
|
||||
|
||||
import random, uuid, time, struct
|
||||
import fdb, fdb.tuple
|
||||
import random
|
||||
import uuid
|
||||
import time
|
||||
import struct
|
||||
import fdb
|
||||
import fdb.tuple
|
||||
fdb.api_version(200)
|
||||
|
||||
# TODO: Make this fdb.tuple.subspace() or similar?
|
||||
|
||||
|
||||
class Subspace (object):
|
||||
def __init__(self, prefixTuple, rawPrefix=""):
|
||||
self.rawPrefix = rawPrefix + fdb.tuple.pack(prefixTuple)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return Subspace( (name,), self.rawPrefix )
|
||||
return Subspace((name,), self.rawPrefix)
|
||||
|
||||
def key(self):
|
||||
return self.rawPrefix
|
||||
|
||||
def pack(self, tuple):
|
||||
return self.rawPrefix + fdb.tuple.pack( tuple )
|
||||
return self.rawPrefix + fdb.tuple.pack(tuple)
|
||||
|
||||
def unpack(self, key):
|
||||
assert key.startswith(self.rawPrefix)
|
||||
return fdb.tuple.unpack(key[len(self.rawPrefix):])
|
||||
|
||||
def range(self, tuple=()):
|
||||
p = fdb.tuple.range( tuple )
|
||||
p = fdb.tuple.range(tuple)
|
||||
return slice(self.rawPrefix + p.start, self.rawPrefix + p.stop)
|
||||
|
||||
|
||||
def random_key():
|
||||
return uuid.uuid4().bytes
|
||||
|
||||
|
||||
def _pack_value(v):
|
||||
if hasattr(v, 'pack'):
|
||||
return v.pack()
|
||||
else:
|
||||
return v
|
||||
|
||||
|
||||
class TaskTimedOutException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TaskBucket (object):
|
||||
"""A TaskBucket represents an unordered collection of tasks, stored
|
||||
in a database and available to any number of clients. A program may
|
||||
|
@ -60,7 +75,7 @@ class TaskBucket (object):
|
|||
key/value dictionary. See TaskDispatcher for an easy way to define
|
||||
tasks as functions."""
|
||||
|
||||
def __init__(self, subspace, system_access = False):
|
||||
def __init__(self, subspace, system_access=False):
|
||||
self.prefix = subspace
|
||||
self.active = self.prefix["ac"]
|
||||
self.available = self.prefix["av"]
|
||||
|
@ -73,7 +88,7 @@ class TaskBucket (object):
|
|||
"""Removes all tasks, whether or not locked, from the bucket."""
|
||||
if self.system_access:
|
||||
tr.options.set_access_system_keys()
|
||||
del tr[ self.prefix.range(()) ]
|
||||
del tr[self.prefix.range(())]
|
||||
|
||||
@fdb.transactional
|
||||
def add(self, tr, taskDict):
|
||||
|
@ -82,8 +97,8 @@ class TaskBucket (object):
|
|||
tr.options.set_access_system_keys()
|
||||
assert taskDict
|
||||
key = random_key()
|
||||
for k,v in taskDict.items():
|
||||
tr[ self.available.pack( (key, k) ) ] = _pack_value( v )
|
||||
for k, v in taskDict.items():
|
||||
tr[self.available.pack((key, k))] = _pack_value(v)
|
||||
taskDict["__task_key"] = key
|
||||
return key
|
||||
|
||||
|
@ -93,7 +108,7 @@ class TaskBucket (object):
|
|||
if self.system_access:
|
||||
tr.options.set_access_system_keys()
|
||||
key = random_key()
|
||||
tr[ self.available.pack( (key, "type") ) ] = ""
|
||||
tr[self.available.pack((key, "type"))] = ""
|
||||
return key
|
||||
|
||||
@fdb.transactional
|
||||
|
@ -103,25 +118,25 @@ class TaskBucket (object):
|
|||
If there are no tasks in the bucket, returns None."""
|
||||
if self.system_access:
|
||||
tr.options.set_access_system_keys()
|
||||
k = tr.snapshot.get_key( fdb.KeySelector.last_less_or_equal( self.available.pack( (random_key(),) ) ) )
|
||||
if not k or k < self.available.pack( ("",) ):
|
||||
k = tr.snapshot.get_key( fdb.KeySelector.last_less_or_equal( self.available.pack( (chr(255)*16,) ) ) )
|
||||
if not k or k < self.available.pack( ("",) ):
|
||||
k = tr.snapshot.get_key(fdb.KeySelector.last_less_or_equal(self.available.pack((random_key(),))))
|
||||
if not k or k < self.available.pack(("",)):
|
||||
k = tr.snapshot.get_key(fdb.KeySelector.last_less_or_equal(self.available.pack((chr(255) * 16,))))
|
||||
if not k or k < self.available.pack(("",)):
|
||||
if self.check_timeouts(tr):
|
||||
return self.get_one(tr)
|
||||
return None
|
||||
key = self.available.unpack(k)[0]
|
||||
avail = self.available[key]
|
||||
timeout = tr.get_read_version().wait() + long( self.timeout * (0.9 + 0.2*random.random()) )
|
||||
timeout = tr.get_read_version().wait() + long(self.timeout * (0.9 + 0.2 * random.random()))
|
||||
|
||||
taskDict = {}
|
||||
for k,v in tr[ avail.range(()) ]:
|
||||
for k, v in tr[avail.range(())]:
|
||||
tk, = avail.unpack(k)
|
||||
taskDict[tk]=v
|
||||
taskDict[tk] = v
|
||||
|
||||
if tk != "type" or v != "":
|
||||
tr[ self.timeouts.pack( (timeout, key, tk) ) ] = v
|
||||
del tr[ avail.range(()) ]
|
||||
tr[self.timeouts.pack((timeout, key, tk))] = v
|
||||
del tr[avail.range(())]
|
||||
tr[self.active.key()] = random_key()
|
||||
|
||||
taskDict["__task_key"] = key
|
||||
|
@ -132,17 +147,17 @@ class TaskBucket (object):
|
|||
def is_empty(self, tr):
|
||||
if self.system_access:
|
||||
tr.options.set_read_system_keys()
|
||||
k = tr.get_key( fdb.KeySelector.last_less_or_equal( self.available.pack( (chr(255)*16,) ) ) )
|
||||
if k and k >= self.available.pack( ("",) ):
|
||||
k = tr.get_key(fdb.KeySelector.last_less_or_equal(self.available.pack((chr(255) * 16,))))
|
||||
if k and k >= self.available.pack(("",)):
|
||||
return False
|
||||
return not bool(next(iter(tr[self.timeouts.range()]),False))
|
||||
return not bool(next(iter(tr[self.timeouts.range()]), False))
|
||||
|
||||
@fdb.transactional
|
||||
def is_busy(self, tr):
|
||||
if self.system_access:
|
||||
tr.options.set_read_system_keys()
|
||||
k = tr.get_key( fdb.KeySelector.last_less_or_equal( self.available.pack( (chr(255)*16,) ) ) )
|
||||
return k and k >= self.available.pack( ("",) )
|
||||
k = tr.get_key(fdb.KeySelector.last_less_or_equal(self.available.pack((chr(255) * 16,))))
|
||||
return k and k >= self.available.pack(("",))
|
||||
|
||||
@fdb.transactional
|
||||
def finish(self, tr, taskDict):
|
||||
|
@ -150,19 +165,19 @@ class TaskBucket (object):
|
|||
from the bucket. If the task has already timed out, raises TaskTimedOutException."""
|
||||
if self.system_access:
|
||||
tr.options.set_access_system_keys()
|
||||
rng = self.timeouts.range( (taskDict["__task_timeout"], taskDict["__task_key"]) )
|
||||
if next(iter(tr[rng]),False):
|
||||
del tr[ rng ]
|
||||
rng = self.timeouts.range((taskDict["__task_timeout"], taskDict["__task_key"]))
|
||||
if next(iter(tr[rng]), False):
|
||||
del tr[rng]
|
||||
else:
|
||||
raise TaskTimedOutException()
|
||||
|
||||
@fdb.transactional
|
||||
def is_finished(self, tr, taskDict):
|
||||
#print "checking if the task was finished at version: {0}".format(tr.get_read_version().wait())
|
||||
# print "checking if the task was finished at version: {0}".format(tr.get_read_version().wait())
|
||||
if self.system_access:
|
||||
tr.options.set_read_system_keys()
|
||||
rng = self.timeouts.range( (taskDict["__task_timeout"], taskDict["__task_key"]) )
|
||||
return not bool(next(iter(tr[rng]),False))
|
||||
rng = self.timeouts.range((taskDict["__task_timeout"], taskDict["__task_key"]))
|
||||
return not bool(next(iter(tr[rng]), False))
|
||||
|
||||
def check_active(self, db):
|
||||
@fdb.transactional
|
||||
|
@ -199,16 +214,17 @@ class TaskBucket (object):
|
|||
"""Looks for tasks that have timed out and returns them to be available tasks. Returns True
|
||||
iff any tasks were affected."""
|
||||
end = tr.get_read_version().wait()
|
||||
rng = slice( self.timeouts.range((0,)).start , self.timeouts.range((end,)).stop )
|
||||
rng = slice(self.timeouts.range((0,)).start, self.timeouts.range((end,)).stop)
|
||||
anyTimeouts = False
|
||||
for k,v in tr.get_range( rng.start, rng.stop, streaming_mode = fdb.StreamingMode.want_all):
|
||||
for k, v in tr.get_range(rng.start, rng.stop, streaming_mode=fdb.StreamingMode.want_all):
|
||||
timeout, taskKey, param = self.timeouts.unpack(k)
|
||||
anyTimeouts = True
|
||||
tr.set( self.available.pack( (taskKey,param) ), v )
|
||||
tr.set(self.available.pack((taskKey, param)), v)
|
||||
del tr[rng]
|
||||
|
||||
return anyTimeouts
|
||||
|
||||
|
||||
class TaskDispatcher (object):
|
||||
def __init__(self):
|
||||
self.taskTypes = {}
|
||||
|
@ -237,23 +253,26 @@ class TaskDispatcher (object):
|
|||
in, passing the taskDict as keyword arguments. Does not finish or
|
||||
extend the task or otherwise interact with a TaskBucket."""
|
||||
if taskDict["type"] != "":
|
||||
self.taskTypes[ taskDict["type"] ]( **taskDict )
|
||||
self.taskTypes[taskDict["type"]](**taskDict)
|
||||
|
||||
def do_one(self, db, taskBucket):
|
||||
"""Gets one task (if any) from the task bucket, executes it, and finishes it.
|
||||
Returns True if a task was executed, False if none was available."""
|
||||
task = taskBucket.get_one(db)
|
||||
if not task: return False
|
||||
if not task:
|
||||
return False
|
||||
self.dispatch(task)
|
||||
return True
|
||||
|
||||
|
||||
class FutureBucket (object):
|
||||
"""A factory for Futures, and a location in the database to store them."""
|
||||
def __init__(self, subspace, system_access = False):
|
||||
|
||||
def __init__(self, subspace, system_access=False):
|
||||
self.prefix = subspace
|
||||
self.dispatcher = TaskDispatcher()
|
||||
self.dispatcher.taskType( self._add_task )
|
||||
self.dispatcher.taskType( self._unblock_future )
|
||||
self.dispatcher.taskType(self._add_task)
|
||||
self.dispatcher.taskType(self._unblock_future)
|
||||
self.system_access = system_access
|
||||
|
||||
@fdb.transactional
|
||||
|
@ -267,7 +286,7 @@ class FutureBucket (object):
|
|||
|
||||
def unpack(self, packed_future):
|
||||
"""Returns a Future such that Future.pack()==packed_future."""
|
||||
return Future( self, packed_future )
|
||||
return Future(self, packed_future)
|
||||
|
||||
@fdb.transactional
|
||||
def join(self, tr, *futures):
|
||||
|
@ -279,18 +298,19 @@ class FutureBucket (object):
|
|||
return joined
|
||||
|
||||
def _add_task(self, tr, taskType, taskBucket, **task):
|
||||
bucket = TaskBucket( Subspace((), rawPrefix=taskBucket), system_access=self.system_access )
|
||||
bucket = TaskBucket(Subspace((), rawPrefix=taskBucket), system_access=self.system_access)
|
||||
task["type"] = taskType
|
||||
bucket.add( tr, task )
|
||||
bucket.add(tr, task)
|
||||
|
||||
def _unblock_future(self, tr, future, blockid, **task):
|
||||
if self.system_access:
|
||||
tr.options.set_access_system_keys()
|
||||
future = self.unpack(future)
|
||||
del tr[ future.prefix["bl"][blockid].key() ]
|
||||
del tr[future.prefix["bl"][blockid].key()]
|
||||
if future.is_set(tr):
|
||||
future.perform_all_actions(tr)
|
||||
|
||||
|
||||
class Future (object):
|
||||
"""Represents a state which will become true ("set") at some point, and a set
|
||||
of actions to perform as soon as the state is true. The state can change
|
||||
|
@ -298,10 +318,11 @@ class Future (object):
|
|||
|
||||
def __init__(self, bucket, key=None):
|
||||
"""Not for direct use. Call FutureBucket.future() instead."""
|
||||
if key is None: key = random_key()
|
||||
if key is None:
|
||||
key = random_key()
|
||||
self.key = key
|
||||
self.bucket = bucket
|
||||
self.prefix = bucket.prefix[ self.key ]
|
||||
self.prefix = bucket.prefix[self.key]
|
||||
self.dispatcher = bucket.dispatcher
|
||||
self.system_access = bucket.system_access
|
||||
|
||||
|
@ -312,7 +333,7 @@ class Future (object):
|
|||
def is_set(self, tr):
|
||||
if self.system_access:
|
||||
tr.options.set_read_system_keys()
|
||||
return not any( tr[self.prefix["bl"].range(())] )
|
||||
return not any(tr[self.prefix["bl"].range(())])
|
||||
|
||||
@fdb.transactional
|
||||
def on_set_add_task(self, tr, taskBucket, taskDict):
|
||||
|
@ -334,8 +355,8 @@ class Future (object):
|
|||
self.perform_action(tr, taskDict)
|
||||
else:
|
||||
cb_key = random_key()
|
||||
for k,v in taskDict.items():
|
||||
tr[ self.prefix["cb"][ cb_key ][k].key() ] = _pack_value( v )
|
||||
for k, v in taskDict.items():
|
||||
tr[self.prefix["cb"][cb_key][k].key()] = _pack_value(v)
|
||||
|
||||
@fdb.transactional
|
||||
def set(self, tr):
|
||||
|
@ -343,8 +364,8 @@ class Future (object):
|
|||
actions that are to take place when it is set take place."""
|
||||
if self.system_access:
|
||||
tr.options.set_access_system_keys()
|
||||
#if self.is_set(tr): return
|
||||
del tr[ self.prefix.range( ("bl",) ) ] # Remove all blocks
|
||||
# if self.is_set(tr): return
|
||||
del tr[self.prefix.range(("bl",))] # Remove all blocks
|
||||
self.perform_all_actions(tr)
|
||||
|
||||
@fdb.transactional
|
||||
|
@ -355,8 +376,9 @@ class Future (object):
|
|||
if self.system_access:
|
||||
tr.options.set_access_system_keys()
|
||||
assert futures
|
||||
if self.is_set(tr): return
|
||||
del tr[ self.prefix["bl"][""].key() ]
|
||||
if self.is_set(tr):
|
||||
return
|
||||
del tr[self.prefix["bl"][""].key()]
|
||||
self._join(tr, futures)
|
||||
|
||||
@fdb.transactional
|
||||
|
@ -372,23 +394,23 @@ class Future (object):
|
|||
ids = [random_key() for f in futures]
|
||||
for blockid in ids:
|
||||
self._add_block(tr, blockid)
|
||||
for f,blockid in zip(futures,ids):
|
||||
f.on_set( tr, self.dispatcher.makeTask( self.bucket._unblock_future, future=self.pack(), blockid=blockid ) )
|
||||
for f, blockid in zip(futures, ids):
|
||||
f.on_set(tr, self.dispatcher.makeTask(self.bucket._unblock_future, future=self.pack(), blockid=blockid))
|
||||
|
||||
def _add_block(self, tr, blockid):
|
||||
tr[ self.prefix["bl"][blockid].key() ] = ""
|
||||
tr[self.prefix["bl"][blockid].key()] = ""
|
||||
|
||||
def perform_all_actions(self, tr):
|
||||
cb = self.prefix["cb"]
|
||||
callbacks = list(tr[cb.range()])
|
||||
del tr[cb.range()] # Remove all callbacks
|
||||
del tr[cb.range()] # Remove all callbacks
|
||||
# Now actually perform the callbacks
|
||||
taskDict = {}
|
||||
taskKey = None
|
||||
for k,v in callbacks:
|
||||
for k, v in callbacks:
|
||||
cb_key, k = cb.unpack(k)
|
||||
if cb_key != taskKey:
|
||||
self.perform_action( tr, taskDict )
|
||||
self.perform_action(tr, taskDict)
|
||||
taskDict = {}
|
||||
taskKey = cb_key
|
||||
taskDict[k] = v
|
||||
|
@ -397,4 +419,4 @@ class Future (object):
|
|||
def perform_action(self, tr, taskDict):
|
||||
if taskDict:
|
||||
taskDict["tr"] = tr
|
||||
self.dispatcher.dispatch( taskDict )
|
||||
self.dispatcher.dispatch(taskDict)
|
||||
|
|
|
@ -20,19 +20,24 @@
|
|||
#
|
||||
|
||||
|
||||
import sys,os
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'layers')]
|
||||
import sys
|
||||
import os
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
|
||||
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'layers')]
|
||||
|
||||
import fdb, taskbucket, time, sys
|
||||
import fdb
|
||||
import taskbucket
|
||||
import time
|
||||
import sys
|
||||
fdb.api_version(200)
|
||||
|
||||
from taskbucket import Subspace, TaskTimedOutException
|
||||
|
||||
taskDispatcher = taskbucket.TaskDispatcher()
|
||||
testSubspace = Subspace( (), "backup-agent" )
|
||||
taskBucket = taskbucket.TaskBucket( testSubspace["tasks"] )
|
||||
futureBucket = taskbucket.FutureBucket( testSubspace["futures"] )
|
||||
testSubspace = Subspace((), "backup-agent")
|
||||
taskBucket = taskbucket.TaskBucket(testSubspace["tasks"])
|
||||
futureBucket = taskbucket.FutureBucket(testSubspace["futures"])
|
||||
|
||||
|
||||
@taskDispatcher.taskType
|
||||
def say_hello(name, done, **task):
|
||||
|
@ -46,6 +51,7 @@ def say_hello(name, done, **task):
|
|||
taskBucket.finish(tr, task)
|
||||
say_hello_tx(db)
|
||||
|
||||
|
||||
@taskDispatcher.taskType
|
||||
def say_hello_to_everyone(done, **task):
|
||||
done = futureBucket.unpack(done)
|
||||
|
@ -56,16 +62,18 @@ def say_hello_to_everyone(done, **task):
|
|||
for name in range(20):
|
||||
name_done = futureBucket.future(tr)
|
||||
futures.append(name_done)
|
||||
taskBucket.add( tr, taskDispatcher.makeTask(say_hello, name=str(name), done=name_done) )
|
||||
taskBucket.add(tr, taskDispatcher.makeTask(say_hello, name=str(name), done=name_done))
|
||||
done.join(tr, *futures)
|
||||
taskBucket.finish(tr, task)
|
||||
say_hello_to_everyone_tx(db)
|
||||
|
||||
|
||||
@taskDispatcher.taskType
|
||||
def said_hello(**task):
|
||||
print "Said hello to everyone."
|
||||
taskBucket.finish(db, task)
|
||||
|
||||
|
||||
if len(sys.argv) == 2:
|
||||
clusterFile = sys.argv[1]
|
||||
db = fdb.open()
|
||||
|
@ -75,8 +83,8 @@ print "adding tasks"
|
|||
all_done = futureBucket.future(db)
|
||||
|
||||
taskBucket.clear(db)
|
||||
taskBucket.add( db, taskDispatcher.makeTask( say_hello_to_everyone, done=all_done ) )
|
||||
all_done.on_set_add_task( db, taskBucket, taskDispatcher.makeTask( said_hello ) )
|
||||
taskBucket.add(db, taskDispatcher.makeTask(say_hello_to_everyone, done=all_done))
|
||||
all_done.on_set_add_task(db, taskBucket, taskDispatcher.makeTask(said_hello))
|
||||
|
||||
while True:
|
||||
try:
|
||||
|
|
|
@ -21,11 +21,11 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const CHUNK_SIZE int = 5
|
||||
|
@ -34,10 +34,12 @@ func write_blob(t fdb.Transactor, blob_subspace subspace.Subspace, blob []byte)
|
|||
|
||||
_, err = t.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
|
||||
if len(blob) == 0 { return nil, nil }
|
||||
if len(blob) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(blob); i+=CHUNK_SIZE {
|
||||
if i + CHUNK_SIZE <= len(blob) {
|
||||
for i := 0; i < len(blob); i += CHUNK_SIZE {
|
||||
if i+CHUNK_SIZE <= len(blob) {
|
||||
tr.Set(blob_subspace.Pack(tuple.Tuple{i}), blob[i:i+CHUNK_SIZE])
|
||||
} else {
|
||||
tr.Set(blob_subspace.Pack(tuple.Tuple{i}), blob[i:])
|
||||
|
@ -54,7 +56,7 @@ func read_blob(t fdb.ReadTransactor, blob_subspace subspace.Subspace) ([]byte, e
|
|||
|
||||
var blob []byte
|
||||
|
||||
ri := rtr.GetRange(blob_subspace, fdb.RangeOptions{}).Iterator();
|
||||
ri := rtr.GetRange(blob_subspace, fdb.RangeOptions{}).Iterator()
|
||||
|
||||
for ri.Advance() {
|
||||
|
||||
|
|
|
@ -21,14 +21,14 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
func clear_subspace(trtr fdb.Transactor, sub subspace.Subspace) error {
|
||||
|
@ -50,14 +50,15 @@ func print_subspace(trtr fdb.Transactor, sub subspace.Subspace) {
|
|||
})
|
||||
}
|
||||
|
||||
|
||||
func _pack(t interface{}) []byte {
|
||||
return tuple.Tuple{t}.Pack()
|
||||
}
|
||||
|
||||
func _unpack(t []byte) tuple.Tuple {
|
||||
i, e := tuple.Unpack(t)
|
||||
if e != nil {return nil}
|
||||
if e != nil {
|
||||
return nil
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
|
@ -67,7 +68,9 @@ const EmptyList int = -2
|
|||
func ToTuples(item interface{}) []tuple.Tuple {
|
||||
switch i := item.(type) {
|
||||
case []interface{}:
|
||||
if len(i) == 0 {return []tuple.Tuple{tuple.Tuple{EmptyList}}}
|
||||
if len(i) == 0 {
|
||||
return []tuple.Tuple{tuple.Tuple{EmptyList}}
|
||||
}
|
||||
tuples := make([]tuple.Tuple, 0)
|
||||
for i, v := range i {
|
||||
for _, t := range ToTuples(v) {
|
||||
|
@ -76,7 +79,9 @@ func ToTuples(item interface{}) []tuple.Tuple {
|
|||
}
|
||||
return tuples
|
||||
case map[string]interface{}:
|
||||
if len(i) == 0 {return []tuple.Tuple{tuple.Tuple{EmptyObject}}}
|
||||
if len(i) == 0 {
|
||||
return []tuple.Tuple{tuple.Tuple{EmptyObject}}
|
||||
}
|
||||
tuples := make([]tuple.Tuple, 0)
|
||||
for k, v := range i {
|
||||
for _, t := range ToTuples(v) {
|
||||
|
@ -92,42 +97,52 @@ func ToTuples(item interface{}) []tuple.Tuple {
|
|||
|
||||
func FromTuples(tuples []tuple.Tuple) interface{} {
|
||||
//fmt.Println(tuples)
|
||||
if len(tuples) == 0 {return nil}
|
||||
if len(tuples) == 0 {
|
||||
return nil
|
||||
}
|
||||
first := tuples[0]
|
||||
if len(first) == 1 {return first[0]}
|
||||
if first[0] == EmptyObject {return make(map[string]interface{}, 0)}
|
||||
if first[0] == EmptyList {return make([]interface{}, 0)}
|
||||
if len(first) == 1 {
|
||||
return first[0]
|
||||
}
|
||||
if first[0] == EmptyObject {
|
||||
return make(map[string]interface{}, 0)
|
||||
}
|
||||
if first[0] == EmptyList {
|
||||
return make([]interface{}, 0)
|
||||
}
|
||||
|
||||
group := make(map[string][]tuple.Tuple)
|
||||
|
||||
for _, t := range tuples {
|
||||
k := string(_pack(t[0]))
|
||||
_, ok := group[k]
|
||||
if !ok {group[k] = make([]tuple.Tuple, 0)}
|
||||
if !ok {
|
||||
group[k] = make([]tuple.Tuple, 0)
|
||||
}
|
||||
group[k] = append(group[k], t[0:len(t)])
|
||||
}
|
||||
|
||||
switch first[0].(type) {
|
||||
case int64:
|
||||
res := make([]interface{}, 0)
|
||||
for _, g := range group {
|
||||
subtup := make([]tuple.Tuple, 0)
|
||||
for _, t := range g {
|
||||
subtup = append(subtup, t[1:len(t)])
|
||||
}
|
||||
res = append(res, FromTuples(subtup))
|
||||
case int64:
|
||||
res := make([]interface{}, 0)
|
||||
for _, g := range group {
|
||||
subtup := make([]tuple.Tuple, 0)
|
||||
for _, t := range g {
|
||||
subtup = append(subtup, t[1:len(t)])
|
||||
}
|
||||
return res
|
||||
default:
|
||||
res := make(map[string]interface{})
|
||||
for _, g := range group {
|
||||
subtup := make([]tuple.Tuple, 0)
|
||||
for _, t := range g {
|
||||
subtup = append(subtup, t[1:len(t)])
|
||||
}
|
||||
res[g[0][0].(string)] = FromTuples(subtup)
|
||||
res = append(res, FromTuples(subtup))
|
||||
}
|
||||
return res
|
||||
default:
|
||||
res := make(map[string]interface{})
|
||||
for _, g := range group {
|
||||
subtup := make([]tuple.Tuple, 0)
|
||||
for _, t := range g {
|
||||
subtup = append(subtup, t[1:len(t)])
|
||||
}
|
||||
return res
|
||||
res[g[0][0].(string)] = FromTuples(subtup)
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -140,23 +155,23 @@ func (doc Doc) InsertDoc(trtr fdb.Transactor, docdata []byte) int {
|
|||
json.Unmarshal(docdata, &data)
|
||||
docid := 0
|
||||
switch d := data.(type) {
|
||||
case map[string]interface{}:
|
||||
temp, ok := d["doc_id"]
|
||||
if !ok {
|
||||
docid = doc._GetNewID(trtr)
|
||||
d["doc_id"] = docid
|
||||
} else {
|
||||
docid = temp.(int)
|
||||
case map[string]interface{}:
|
||||
temp, ok := d["doc_id"]
|
||||
if !ok {
|
||||
docid = doc._GetNewID(trtr)
|
||||
d["doc_id"] = docid
|
||||
} else {
|
||||
docid = temp.(int)
|
||||
}
|
||||
tuples := ToTuples(d)
|
||||
trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
for _, t := range tuples {
|
||||
tr.Set(doc.DocSS.Pack(append(tuple.Tuple{d["doc_id"]}, t[0:len(t)-1]...)), _pack(t[len(t)-1]))
|
||||
}
|
||||
tuples := ToTuples(d)
|
||||
trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
for _, t := range tuples {
|
||||
tr.Set(doc.DocSS.Pack(append(tuple.Tuple{d["doc_id"]}, t[0:len(t)-1]...)), _pack(t[len(t)-1]))
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
return nil, nil
|
||||
})
|
||||
}
|
||||
return docid
|
||||
return docid
|
||||
}
|
||||
|
||||
func (doc Doc) _GetNewID(trtr fdb.Transactor) int {
|
||||
|
@ -165,7 +180,9 @@ func (doc Doc) _GetNewID(trtr fdb.Transactor) int {
|
|||
for true {
|
||||
new_id = rand.Intn(100000007)
|
||||
rp, err := fdb.PrefixRange(doc.DocSS.Pack(tuple.Tuple{new_id}))
|
||||
if err != nil {continue}
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
res, err := tr.GetRange(rp, fdb.RangeOptions{1, -1, false}).GetSliceWithError()
|
||||
if len(res) == 0 {
|
||||
|
@ -181,14 +198,18 @@ func (doc Doc) GetDoc(trtr fdb.Transactor, doc_id int) interface{} {
|
|||
tuples := make([]tuple.Tuple, 0)
|
||||
trtr.ReadTransact(func(tr fdb.ReadTransaction) (interface{}, error) {
|
||||
kr, err := fdb.PrefixRange(doc.DocSS.Pack(tuple.Tuple{doc_id}))
|
||||
if err != nil {panic(err)}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
items := tr.GetRange(kr, fdb.RangeOptions{}).Iterator()
|
||||
|
||||
for items.Advance() {
|
||||
v := items.MustGet()
|
||||
tup, err := doc.DocSS.Unpack(v.Key)
|
||||
if err != nil {panic(err)}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
tuples = append(tuples, append(tup[1:len(tup)], _unpack(v.Value)))
|
||||
}
|
||||
return nil, nil
|
||||
|
@ -203,7 +224,9 @@ func main() {
|
|||
db := fdb.MustOpenDefault()
|
||||
|
||||
DocDemoDir, err := directory.CreateOrOpen(db, []string{"docdemo"}, nil)
|
||||
if err != nil {panic(err)}
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
clear_subspace(db, DocDemoDir)
|
||||
|
||||
|
|
|
@ -21,12 +21,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"log"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func clear_subspace(trtr fdb.Transactor, sub subspace.Subspace) error {
|
||||
|
@ -50,8 +50,8 @@ func (graph *Graph) NewGraph(dir subspace.Subspace, name string) {
|
|||
|
||||
func (graph *Graph) set_edge(trtr fdb.Transactor, node, neighbor int) (inter interface{}, err error) {
|
||||
inter, err = trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(graph.EdgeSpace.Pack( tuple.Tuple{ node, neighbor } ), []byte(""))
|
||||
tr.Set(graph.InvSpace.Pack( tuple.Tuple{ neighbor, node } ), []byte(""))
|
||||
tr.Set(graph.EdgeSpace.Pack(tuple.Tuple{node, neighbor}), []byte(""))
|
||||
tr.Set(graph.InvSpace.Pack(tuple.Tuple{neighbor, node}), []byte(""))
|
||||
return nil, nil
|
||||
})
|
||||
return
|
||||
|
@ -59,8 +59,8 @@ func (graph *Graph) set_edge(trtr fdb.Transactor, node, neighbor int) (inter int
|
|||
|
||||
func (graph *Graph) del_edge(trtr fdb.Transactor, node, neighbor int) (inter interface{}, err error) {
|
||||
inter, err = trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Clear(graph.EdgeSpace.Pack( tuple.Tuple{ node, neighbor } ))
|
||||
tr.Clear(graph.InvSpace.Pack( tuple.Tuple{ neighbor, node } ))
|
||||
tr.Clear(graph.EdgeSpace.Pack(tuple.Tuple{node, neighbor}))
|
||||
tr.Clear(graph.InvSpace.Pack(tuple.Tuple{neighbor, node}))
|
||||
return nil, nil
|
||||
})
|
||||
return
|
||||
|
@ -70,10 +70,12 @@ func (graph *Graph) get_out_neighbors(trtr fdb.Transactor, node int) ([]int, err
|
|||
|
||||
val, err := trtr.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
|
||||
kr, err := fdb.PrefixRange(graph.EdgeSpace.Pack( tuple.Tuple{ node } ))
|
||||
if err != nil { return nil, err }
|
||||
kr, err := fdb.PrefixRange(graph.EdgeSpace.Pack(tuple.Tuple{node}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ri := rtr.GetRange(kr, fdb.RangeOptions{}).Iterator();
|
||||
ri := rtr.GetRange(kr, fdb.RangeOptions{}).Iterator()
|
||||
neighbors := make([]int, 0)
|
||||
|
||||
for ri.Advance() {
|
||||
|
@ -81,7 +83,9 @@ func (graph *Graph) get_out_neighbors(trtr fdb.Transactor, node int) ([]int, err
|
|||
kv := ri.MustGet()
|
||||
|
||||
t, err := graph.EdgeSpace.Unpack(kv.Key)
|
||||
if err != nil { return nil, err }
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
neighbors = append(neighbors, int(t[1].(int64)))
|
||||
}
|
||||
|
@ -94,10 +98,12 @@ func (graph *Graph) get_out_neighbors(trtr fdb.Transactor, node int) ([]int, err
|
|||
func (graph *Graph) get_in_neighbors(trtr fdb.Transactor, node int) ([]int, error) {
|
||||
val, err := trtr.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
|
||||
kr, err := fdb.PrefixRange(graph.InvSpace.Pack( tuple.Tuple{ node } ))
|
||||
if err != nil { return nil, err }
|
||||
kr, err := fdb.PrefixRange(graph.InvSpace.Pack(tuple.Tuple{node}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ri := rtr.GetRange(kr, fdb.RangeOptions{}).Iterator();
|
||||
ri := rtr.GetRange(kr, fdb.RangeOptions{}).Iterator()
|
||||
neighbors := make([]int, 0)
|
||||
|
||||
for ri.Advance() {
|
||||
|
@ -105,7 +111,9 @@ func (graph *Graph) get_in_neighbors(trtr fdb.Transactor, node int) ([]int, erro
|
|||
|
||||
t, err := graph.InvSpace.Unpack(kv.Key)
|
||||
|
||||
if err != nil { return nil, err }
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
neighbors = append(neighbors, int(t[1].(int64)))
|
||||
}
|
||||
|
@ -121,7 +129,9 @@ func main() {
|
|||
db := fdb.MustOpenDefault()
|
||||
|
||||
GraphDemoDir, err := directory.CreateOrOpen(db, []string{"Graph"}, nil)
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
clear_subspace(db, GraphDemoDir)
|
||||
|
||||
|
@ -134,22 +144,30 @@ func main() {
|
|||
g.set_edge(db, 1, 2)
|
||||
|
||||
i, err := g.get_out_neighbors(db, 0)
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(i)
|
||||
|
||||
_, err = g.del_edge(db, 0, 2)
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
i, err = g.get_in_neighbors(db, 2)
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(i)
|
||||
|
||||
clear_subspace(db, GraphDemoDir)
|
||||
|
||||
i, err = g.get_in_neighbors(db, 2)
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(i)
|
||||
}
|
||||
|
|
|
@ -21,12 +21,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"log"
|
||||
)
|
||||
|
||||
func clear_subspace(trtr fdb.Transactor, sub subspace.Subspace) error {
|
||||
|
@ -54,19 +54,23 @@ func _pack(t interface{}) []byte {
|
|||
|
||||
func _unpack(t []byte) tuple.Tuple {
|
||||
i, e := tuple.Unpack(t)
|
||||
if e != nil {return nil}
|
||||
if e != nil {
|
||||
return nil
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
type Workspace struct {
|
||||
Dir directory.Directory
|
||||
db fdb.Database
|
||||
db fdb.Database
|
||||
}
|
||||
|
||||
func (wrkspc Workspace) _Update(trtr fdb.Transactor) {
|
||||
trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
_, err := wrkspc.Dir.Remove(tr, []string{"current"})
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = wrkspc.Dir.Move(tr, []string{"new"}, []string{"current"})
|
||||
return nil, err
|
||||
|
@ -80,7 +84,9 @@ func (wrkspc Workspace) GetCurrent() (dir directory.DirectorySubspace, err error
|
|||
|
||||
func (wrkspc Workspace) Session(foo func(directory.DirectorySubspace)) (err error) {
|
||||
newdir, err := wrkspc.Dir.CreateOrOpen(wrkspc.db, []string{"new"}, nil)
|
||||
if err != nil {return}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
foo(newdir)
|
||||
wrkspc._Update(wrkspc.db)
|
||||
return
|
||||
|
@ -92,7 +98,9 @@ func main() {
|
|||
db := fdb.MustOpenDefault()
|
||||
|
||||
WorkspaceDemoDir, err := directory.CreateOrOpen(db, []string{"Workspace"}, nil)
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
clear_subspace(db, WorkspaceDemoDir)
|
||||
|
||||
|
@ -108,7 +116,6 @@ func main() {
|
|||
|
||||
print_subspace(db, current)
|
||||
|
||||
|
||||
w.Session(func(dir directory.DirectorySubspace) {
|
||||
db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.Set(dir.Pack(tuple.Tuple{"b"}), _pack("World"))
|
||||
|
@ -116,6 +123,8 @@ func main() {
|
|||
})
|
||||
})
|
||||
current, err = w.GetCurrent()
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
print_subspace(db, current)
|
||||
}
|
||||
|
|
|
@ -34,13 +34,13 @@ func clear_subspace(db fdb.Transactor, ss subspace.Subspace) {
|
|||
db.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
tr.ClearRange(ss)
|
||||
return nil, nil
|
||||
})
|
||||
})
|
||||
return
|
||||
|
||||
}
|
||||
|
||||
type MultiMap struct {
|
||||
MapSS subspace.Subspace
|
||||
MapSS subspace.Subspace
|
||||
Pos, Neg []byte
|
||||
}
|
||||
|
||||
|
@ -72,13 +72,17 @@ func (multi MultiMap) MultiSubtract(trtr fdb.Transactor, index, value interface{
|
|||
func (multi MultiMap) MultiGet(tr fdb.ReadTransactor, index int) (ret []interface{}, e error) {
|
||||
_, e = tr.ReadTransact(func(tr fdb.ReadTransaction) (interface{}, error) {
|
||||
pr, err := fdb.PrefixRange(multi.MapSS.Pack(tuple.Tuple{index}))
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kvs := tr.GetRange(pr, fdb.RangeOptions{0, -1, false}).GetSliceOrPanic()
|
||||
ret := make([]interface{}, len(kvs))
|
||||
i := 0
|
||||
for _, kv := range kvs {
|
||||
temp, err := multi.MapSS.Unpack(kv.Key)
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ret[i] = temp[1]
|
||||
i++
|
||||
}
|
||||
|
@ -89,8 +93,10 @@ func (multi MultiMap) MultiGet(tr fdb.ReadTransactor, index int) (ret []interfac
|
|||
|
||||
func (multi MultiMap) MultiGetCounts(trtr fdb.Transactor, index interface{}) (map[interface{}]int, error) {
|
||||
i, e := trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
kr, err := fdb.PrefixRange(multi.MapSS.Pack( tuple.Tuple{}))
|
||||
if err != nil {return nil, err}
|
||||
kr, err := fdb.PrefixRange(multi.MapSS.Pack(tuple.Tuple{}))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ks, err := tr.GetRange(kr, fdb.RangeOptions{}).GetSliceWithError()
|
||||
|
||||
|
@ -99,10 +105,12 @@ func (multi MultiMap) MultiGetCounts(trtr fdb.Transactor, index interface{}) (ma
|
|||
for _, v := range ks {
|
||||
bs := v.Value
|
||||
k, err := multi.MapSS.Unpack(v.Key)
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
counts[k[1]] = 0
|
||||
for i, j := 0, 1; i < len(bs); i++ {
|
||||
counts[k[1]] += int(bs[i])*j
|
||||
counts[k[1]] += int(bs[i]) * j
|
||||
j = j << 4
|
||||
}
|
||||
}
|
||||
|
@ -113,7 +121,7 @@ func (multi MultiMap) MultiGetCounts(trtr fdb.Transactor, index interface{}) (ma
|
|||
|
||||
func (multi MultiMap) MultiIsElement(trtr fdb.Transactor, index, value interface{}) bool {
|
||||
item, _ := trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
r := tr.Get(multi.MapSS.Pack(tuple.Tuple{ index, value } )).MustGet()
|
||||
r := tr.Get(multi.MapSS.Pack(tuple.Tuple{index, value})).MustGet()
|
||||
if r == nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -21,13 +21,13 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"log"
|
||||
"math/rand"
|
||||
)
|
||||
|
||||
func clear_subspace(trtr fdb.Transactor, sub subspace.Subspace) error {
|
||||
|
@ -44,7 +44,9 @@ func _pack(t interface{}) []byte {
|
|||
|
||||
func _unpack(t []byte) tuple.Tuple {
|
||||
i, e := tuple.Unpack(t)
|
||||
if e != nil {return nil}
|
||||
if e != nil {
|
||||
return nil
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
|
@ -61,26 +63,38 @@ func (prty Priority) Push(trtr fdb.Transactor, value interface{}, priority int)
|
|||
|
||||
func (prty Priority) _NextCount(trtr fdb.Transactor, priority int) int {
|
||||
res, err := trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
kr, e := fdb.PrefixRange(prty.PrioritySS.Pack( tuple.Tuple{priority} ))
|
||||
if e != nil {return nil, e}
|
||||
kr, e := fdb.PrefixRange(prty.PrioritySS.Pack(tuple.Tuple{priority}))
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
ks, e := tr.Snapshot().GetRange(kr, fdb.RangeOptions{1, -1, true}).GetSliceWithError()
|
||||
if e != nil {return nil, e}
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
if len(ks) == 0 {return 0, nil}
|
||||
if len(ks) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
k, e := prty.PrioritySS.Unpack(ks[0].Key)
|
||||
return k[0].(int)+1, nil
|
||||
return k[0].(int) + 1, nil
|
||||
})
|
||||
if err != nil {return 0}
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return res.(int)
|
||||
}
|
||||
|
||||
func (prty Priority) Pop(trtr fdb.Transactor, max bool) interface{} {
|
||||
res, _ := trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
ks, err := tr.GetRange(prty.PrioritySS, fdb.RangeOptions{1, -1, max}).GetSliceWithError()
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(ks) == 0 {return nil, nil}
|
||||
if len(ks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
tr.Clear(ks[0].Key)
|
||||
return _unpack(ks[0].Value)[0], nil
|
||||
})
|
||||
|
@ -90,22 +104,27 @@ func (prty Priority) Pop(trtr fdb.Transactor, max bool) interface{} {
|
|||
func (prty Priority) Peek(trtr fdb.Transactor, max bool) interface{} {
|
||||
res, _ := trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
ks, err := tr.GetRange(prty.PrioritySS, fdb.RangeOptions{1, -1, max}).GetSliceWithError()
|
||||
if err != nil {return nil, err}
|
||||
if len(ks) == 0 {return nil, nil}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return _unpack(ks[0].Value)[0], nil
|
||||
})
|
||||
return res
|
||||
}
|
||||
|
||||
|
||||
func main() {
|
||||
fdb.MustAPIVersion(510)
|
||||
|
||||
db := fdb.MustOpenDefault()
|
||||
|
||||
PriorityDemoDir, err := directory.CreateOrOpen(db, []string{"Priority"}, nil)
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
clear_subspace(db, PriorityDemoDir)
|
||||
|
||||
|
|
|
@ -21,15 +21,15 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"log"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type EmptyQueueError struct {}
|
||||
type EmptyQueueError struct{}
|
||||
|
||||
func (q EmptyQueueError) Error() string {
|
||||
return "Queue is Empty"
|
||||
|
@ -54,7 +54,9 @@ func (q *Queue) NewQueue(ss subspace.Subspace) {
|
|||
func (q *Queue) Dequeue(trtr fdb.Transactor) (interface{}, error) {
|
||||
i, e := trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
item, err := q.FirstItem(tr)
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tr.Clear(item.(fdb.KeyValue).Key)
|
||||
return item.(fdb.KeyValue).Value, err
|
||||
})
|
||||
|
@ -64,12 +66,16 @@ func (q *Queue) Dequeue(trtr fdb.Transactor) (interface{}, error) {
|
|||
func (q *Queue) Enqueue(trtr fdb.Transactor, item interface{}) (interface{}, error) {
|
||||
i, e := trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
index, err := q.LastIndex(tr)
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ki, err := q.QueueSS.Unpack(index.(fdb.Key))
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tr.Set(q.QueueSS.Pack(tuple.Tuple{ki[0].(int64)+1}), []byte(item.(string)))
|
||||
tr.Set(q.QueueSS.Pack(tuple.Tuple{ki[0].(int64) + 1}), []byte(item.(string)))
|
||||
|
||||
return nil, nil
|
||||
})
|
||||
|
@ -106,7 +112,9 @@ func main() {
|
|||
db := fdb.MustOpenDefault()
|
||||
|
||||
QueueDemoDir, err := directory.CreateOrOpen(db, []string{"Queue"}, nil)
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
clear_subspace(db, QueueDemoDir)
|
||||
|
||||
|
@ -119,7 +127,9 @@ func main() {
|
|||
q.Enqueue(db, "test3")
|
||||
for i := 0; i < 5; i++ {
|
||||
item, e := q.Dequeue(db)
|
||||
if e != nil {log.Fatal(e)}
|
||||
if e != nil {
|
||||
log.Fatal(e)
|
||||
}
|
||||
|
||||
fmt.Println(string(item.([]byte)))
|
||||
}
|
||||
|
|
|
@ -21,12 +21,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"log"
|
||||
)
|
||||
|
||||
func clear_subspace(trtr fdb.Transactor, sub subspace.Subspace) error {
|
||||
|
@ -43,7 +43,9 @@ func _pack(t interface{}) []byte {
|
|||
|
||||
func _unpack(t []byte) tuple.Tuple {
|
||||
i, e := tuple.Unpack(t)
|
||||
if e != nil {return nil}
|
||||
if e != nil {
|
||||
return nil
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
|
@ -65,7 +67,7 @@ func (tbl Table) TableSetCell(trtr fdb.Transactor, row, column int, value interf
|
|||
}
|
||||
|
||||
func (tbl Table) TableGetCell(trtr fdb.Transactor, row, column int) interface{} {
|
||||
item, _:= trtr.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
item, _ := trtr.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
i := rtr.Get(tbl.row.Pack(tuple.Tuple{row, column})).MustGet()
|
||||
return i, nil
|
||||
})
|
||||
|
@ -75,7 +77,9 @@ func (tbl Table) TableGetCell(trtr fdb.Transactor, row, column int) interface{}
|
|||
func (tbl Table) TableSetRow(trtr fdb.Transactor, row int, cols ...interface{}) {
|
||||
trtr.Transact(func(tr fdb.Transaction) (interface{}, error) {
|
||||
kr, err := fdb.PrefixRange(tbl.row.Pack(tuple.Tuple{row}))
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tr.ClearRange(kr)
|
||||
|
||||
|
@ -90,10 +94,14 @@ func (tbl Table) TableSetRow(trtr fdb.Transactor, row int, cols ...interface{})
|
|||
func (tbl Table) TableGetRow(tr fdb.ReadTransactor, row int) ([]interface{}, error) {
|
||||
item, err := tr.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
kr, e := fdb.PrefixRange(tbl.row.Pack(tuple.Tuple{row}))
|
||||
if e != nil {return nil, e}
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
slice, e := rtr.GetRange(kr, fdb.RangeOptions{0, -1, false}).GetSliceWithError()
|
||||
if e != nil {return nil, e}
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
ret := make([]interface{}, len(slice))
|
||||
|
||||
|
@ -103,17 +111,23 @@ func (tbl Table) TableGetRow(tr fdb.ReadTransactor, row int) ([]interface{}, err
|
|||
|
||||
return ret, nil
|
||||
})
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return item.([]interface{}), nil
|
||||
}
|
||||
|
||||
func (tbl Table) TableGetCol(tr fdb.ReadTransactor, col int) ([]interface{}, error) {
|
||||
item, err := tr.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
|
||||
kr, e := fdb.PrefixRange(tbl.col.Pack(tuple.Tuple{col}))
|
||||
if e != nil {return nil, e}
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
slice, e := rtr.GetRange(kr, fdb.RangeOptions{0, -1, false}).GetSliceWithError()
|
||||
if e != nil {return nil, e}
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
|
||||
ret := make([]interface{}, len(slice))
|
||||
|
||||
|
@ -123,7 +137,9 @@ func (tbl Table) TableGetCol(tr fdb.ReadTransactor, col int) ([]interface{}, err
|
|||
|
||||
return ret, nil
|
||||
})
|
||||
if err != nil {return nil, err}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return item.([]interface{}), nil
|
||||
}
|
||||
|
||||
|
@ -133,7 +149,9 @@ func main() {
|
|||
db := fdb.MustOpenDefault()
|
||||
|
||||
TableDemoDir, err := directory.CreateOrOpen(db, []string{"Graph"}, nil)
|
||||
if err != nil {log.Fatal(err)}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
clear_subspace(db, TableDemoDir)
|
||||
|
||||
|
|
|
@ -22,23 +22,28 @@ import fdb
|
|||
fdb.api_version(300)
|
||||
db = fdb.open()
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def clear_subspace(tr, subspace):
|
||||
tr.clear_range_startswith(subspace.key())
|
||||
|
||||
|
||||
CHUNK_LARGE = 5
|
||||
|
||||
blob = fdb.Subspace(('B',))
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def write_blob(tr, data):
|
||||
if not len(data): return
|
||||
num_chunks = (len(data)+CHUNK_LARGE-1) / CHUNK_LARGE
|
||||
chunk_size = (len(data)+num_chunks)/num_chunks
|
||||
chunks = [(n, n+chunk_size) for n in range(0, len(data), chunk_size)]
|
||||
if not len(data):
|
||||
return
|
||||
num_chunks = (len(data) + CHUNK_LARGE - 1) / CHUNK_LARGE
|
||||
chunk_size = (len(data) + num_chunks) / num_chunks
|
||||
chunks = [(n, n + chunk_size) for n in range(0, len(data), chunk_size)]
|
||||
for start, end in chunks:
|
||||
tr[blob[start]] = data[start:end]
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def read_blob(tr):
|
||||
value = ''
|
||||
|
@ -46,6 +51,7 @@ def read_blob(tr):
|
|||
value += v
|
||||
return value
|
||||
|
||||
|
||||
clear_subspace(db, blob)
|
||||
write_blob(db, 'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
|
||||
print read_blob(db)
|
||||
|
|
|
@ -31,6 +31,7 @@ doc_space = fdb.Subspace(('D',))
|
|||
EMPTY_OBJECT = -2
|
||||
EMPTY_ARRAY = -1
|
||||
|
||||
|
||||
def to_tuples(item):
|
||||
if item == {}:
|
||||
return [(EMPTY_OBJECT, None)]
|
||||
|
@ -43,12 +44,17 @@ def to_tuples(item):
|
|||
else:
|
||||
return [(item,)]
|
||||
|
||||
|
||||
def from_tuples(tuples):
|
||||
if not tuples: return {}
|
||||
if not tuples:
|
||||
return {}
|
||||
first = tuples[0] # Determine kind of object from first tuple
|
||||
if len(first) == 1: return first[0] # Primitive value
|
||||
if first == (EMPTY_OBJECT,None): return {}
|
||||
if first == (EMPTY_ARRAY, None): return []
|
||||
if len(first) == 1:
|
||||
return first[0] # Primitive value
|
||||
if first == (EMPTY_OBJECT, None):
|
||||
return {}
|
||||
if first == (EMPTY_ARRAY, None):
|
||||
return []
|
||||
# For an object or array, we need to group the tuples by their first element
|
||||
groups = [list(g) for k, g in itertools.groupby(tuples, lambda t:t[0])]
|
||||
if first[0] == 0: # array
|
||||
|
@ -56,17 +62,19 @@ def from_tuples(tuples):
|
|||
else: # object
|
||||
return dict((g[0][0], from_tuples([t[1:] for t in g])) for g in groups)
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def insert_doc(tr, doc):
|
||||
if type(doc) == str:
|
||||
doc = json.loads(doc)
|
||||
if not 'doc_id' in doc:
|
||||
if 'doc_id' not in doc:
|
||||
new_id = _get_new_id(tr)
|
||||
doc['doc_id'] = new_id
|
||||
for tup in to_tuples( doc ):
|
||||
for tup in to_tuples(doc):
|
||||
tr[doc_space.pack((doc['doc_id'],) + tup[:-1])] = fdb.tuple.pack((tup[-1],))
|
||||
return doc['doc_id']
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def _get_new_id(tr):
|
||||
found = False
|
||||
|
@ -78,6 +86,7 @@ def _get_new_id(tr):
|
|||
break
|
||||
return new_id
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def get_doc(tr, doc_id, prefix=()):
|
||||
v = tr[doc_space.pack((doc_id,) + prefix)]
|
||||
|
@ -85,23 +94,28 @@ def get_doc(tr, doc_id, prefix=()):
|
|||
return from_tuples([prefix + fdb.tuple.unpack(v)])
|
||||
else:
|
||||
return from_tuples([doc_space.unpack(k)[1:] + fdb.tuple.unpack(v)
|
||||
for k, v in tr[doc_space.range((doc_id,)+prefix)]])
|
||||
for k, v in tr[doc_space.range((doc_id,) + prefix)]])
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def print_subspace(tr, subspace):
|
||||
for k, v in tr[subspace.range()]:
|
||||
print subspace.unpack(k), fdb.tuple.unpack(v)[0]
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def clear_subspace(tr, subspace):
|
||||
tr.clear_range_startswith(subspace.key())
|
||||
|
||||
|
||||
clear_subspace(db, doc_space)
|
||||
|
||||
|
||||
def smoke_test():
|
||||
h1 = {'user': {'jones': {'friend_of': 'smith', 'group': ['sales', 'service']}, 'smith': {'friend_of': 'jones', 'group': ['dev', 'research']}}}
|
||||
id = insert_doc(db, h1)
|
||||
print get_doc(db, id, ('user', 'smith', 'group'))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
smoke_test()
|
||||
|
|
|
@ -26,26 +26,32 @@ graph = fdb.Subspace(('G',))
|
|||
edge = graph['E']
|
||||
inverse = graph['I']
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def set_edge(tr, node, neighbor):
|
||||
tr[edge[node][neighbor]] = ''
|
||||
tr[inverse[neighbor][node]] = ''
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def del_edge(tr, node, neighbor):
|
||||
del tr[edge[node][neighbor]]
|
||||
del tr[inverse[neighbor][node]]
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def get_out_neighbors(tr, node):
|
||||
return [edge.unpack(k)[1] for k, _ in tr[edge[node].range()]]
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def get_in_neighbors(tr, node):
|
||||
return [inverse.unpack(k)[1] for k, _ in tr[inverse[node].range()]]
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def clear_subspace(tr, subspace):
|
||||
tr.clear_range_startswith(subspace.key())
|
||||
|
||||
|
||||
clear_subspace(db, graph)
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
import fdb
|
||||
fdb.api_version(300)
|
||||
|
||||
|
||||
class Workspace(object):
|
||||
|
||||
def __init__(self, directory, db):
|
||||
|
@ -42,15 +43,18 @@ class Workspace(object):
|
|||
def current(self):
|
||||
return self.dir.create_or_open(self.db, (u'current',))
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def clear_subspace(tr, subspace):
|
||||
tr.clear_range_startswith(subspace.key())
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def print_subspace(tr, subspace):
|
||||
for k, v in tr[subspace.range()]:
|
||||
print subspace.unpack(k), v
|
||||
|
||||
|
||||
def smoke_test():
|
||||
db = fdb.open()
|
||||
working_dir = fdb.directory.create_or_open(db, (u'working',))
|
||||
|
@ -68,6 +72,6 @@ def smoke_test():
|
|||
print "contents:"
|
||||
print_subspace(db, workspace.current)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
smoke_test()
|
||||
|
||||
|
|
|
@ -24,19 +24,24 @@ import fdb
|
|||
fdb.api_version(300)
|
||||
db = fdb.open()
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def clear_subspace(tr, subspace):
|
||||
tr.clear_range_startswith(subspace.key())
|
||||
|
||||
|
||||
multi = fdb.Subspace(('M',))
|
||||
clear_subspace(db, multi)
|
||||
|
||||
|
||||
# Multimaps with multiset values
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def multi_add(tr, index, value):
|
||||
tr.add(multi[index][value], struct.pack('<q', 1))
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def multi_subtract(tr, index, value):
|
||||
v = tr[multi[index][value]]
|
||||
|
@ -45,27 +50,33 @@ def multi_subtract(tr, index, value):
|
|||
else:
|
||||
del tr[multi[index][value]]
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def multi_get(tr, index):
|
||||
return [multi.unpack(k)[1] for k, v in tr[multi[index].range()]]
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def multi_get_counts(tr, index):
|
||||
return {multi.unpack(k)[1]:struct.unpack('<q', v)[0]
|
||||
return {multi.unpack(k)[1]: struct.unpack('<q', v)[0]
|
||||
for k, v in tr[multi[index].range()]}
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def multi_is_element(tr, index, value):
|
||||
return tr[multi[index][value]].present()
|
||||
|
||||
|
||||
import time
|
||||
|
||||
N = 10000
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def time_atomic_add(tr):
|
||||
for i in xrange(N):
|
||||
multi_add(db, 'foo','bar')
|
||||
multi_add(db, 'foo', 'bar')
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def time_atomic_subtract(tr):
|
||||
|
@ -75,6 +86,7 @@ def time_atomic_subtract(tr):
|
|||
end = time.time()
|
||||
print "{} seconds for atomic subtract".format(end - start)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
time_atomic_add(db)
|
||||
time_atomic_subtract(db)
|
||||
|
|
|
@ -24,10 +24,12 @@ fdb.api_version(300)
|
|||
|
||||
pq = fdb.Subspace(('P',))
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def push(tr, value, priority):
|
||||
tr[pq[priority][_next_count(tr, priority)][os.urandom(20)]] = value
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def _next_count(tr, priority):
|
||||
r = pq[priority].range()
|
||||
|
@ -35,6 +37,7 @@ def _next_count(tr, priority):
|
|||
return pq[priority].unpack(key)[0] + 1
|
||||
return 0
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def pop(tr, max=False):
|
||||
r = pq.range()
|
||||
|
@ -42,16 +45,19 @@ def pop(tr, max=False):
|
|||
del tr[item.key]
|
||||
return item.value
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def peek(tr, max=False):
|
||||
r = pq.range()
|
||||
for item in tr.get_range(r.start, r.stop, limit=1, reverse=max):
|
||||
return item.value
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def clear_subspace(tr, subspace):
|
||||
tr.clear_range_startswith(subspace.key())
|
||||
|
||||
|
||||
def smoke_test():
|
||||
print "Peek none:", peek(db)
|
||||
push(db, 'a', 1)
|
||||
|
@ -88,7 +94,8 @@ def smoke_test():
|
|||
print pop(db, max=True)
|
||||
print pop(db, max=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
db = fdb.open()
|
||||
clear_subspace(db, pq)
|
||||
smoke_test()
|
||||
smoke_test()
|
||||
|
|
|
@ -24,24 +24,30 @@ import fdb
|
|||
fdb.api_version(300)
|
||||
db = fdb.open()
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def clear_subspace(tr, subspace):
|
||||
tr.clear_range_startswith(subspace.key())
|
||||
|
||||
|
||||
queue = fdb.Subspace(('Q',))
|
||||
clear_subspace(db, queue)
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def dequeue(tr):
|
||||
item = first_item(tr)
|
||||
if item is None: return None
|
||||
if item is None:
|
||||
return None
|
||||
del tr[item.key]
|
||||
return item.value
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def enqueue(tr, value):
|
||||
tr[queue[last_index(tr) + 1][os.urandom(20)]] = value
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def last_index(tr):
|
||||
r = queue.range()
|
||||
|
@ -49,12 +55,14 @@ def last_index(tr):
|
|||
return queue.unpack(key)[0]
|
||||
return 0
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def first_item(tr):
|
||||
r = queue.range()
|
||||
for kv in tr.get_range(r.start, r.stop, limit=1):
|
||||
return kv
|
||||
|
||||
|
||||
def smoke_test():
|
||||
enqueue(db, 'a')
|
||||
enqueue(db, 'b')
|
||||
|
@ -83,8 +91,8 @@ def smoke_test():
|
|||
print dequeue(db)
|
||||
print dequeue(db)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
db = fdb.open()
|
||||
clear_subspace(db, queue)
|
||||
smoke_test()
|
||||
|
||||
|
|
|
@ -26,27 +26,33 @@ table = fdb.Subspace(('T',))
|
|||
row_index = table['R']
|
||||
col_index = table['C']
|
||||
|
||||
|
||||
def _pack(value):
|
||||
return fdb.tuple.pack((value,))
|
||||
|
||||
|
||||
def _unpack(value):
|
||||
return fdb.tuple.unpack(value)[0]
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def table_set_cell(tr, row, column, value):
|
||||
tr[row_index[row][column]] = _pack(value)
|
||||
tr[col_index[column][row]] = _pack(value)
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def table_get_cell(tr, row, column):
|
||||
return tr[row_index[row][column]]
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def table_set_row(tr, row, cols):
|
||||
del tr[row_index[row].range()]
|
||||
for c, v in cols.iteritems():
|
||||
table_set_cell(tr, row, c, v)
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def table_get_row(tr, row):
|
||||
cols = {}
|
||||
|
@ -55,6 +61,7 @@ def table_get_row(tr, row):
|
|||
cols[c] = _unpack(v)
|
||||
return cols
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def table_get_col(tr, col):
|
||||
rows = {}
|
||||
|
@ -63,8 +70,9 @@ def table_get_col(tr, col):
|
|||
rows[r] = _unpack(v)
|
||||
return rows
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def clear_subspace(tr, subspace):
|
||||
tr.clear_range_startswith(subspace.key())
|
||||
|
||||
#clear_subspace(db, table)
|
||||
# clear_subspace(db, table)
|
||||
|
|
|
@ -32,7 +32,7 @@ class Result(object):
|
|||
self.errors = []
|
||||
|
||||
def add_kpi(self, name, value, units):
|
||||
self.kpis[name] = {'value' : value, 'units' : units}
|
||||
self.kpis[name] = {'value': value, 'units': units}
|
||||
|
||||
def add_error(self, error):
|
||||
self.errors.append(error)
|
||||
|
@ -43,7 +43,8 @@ class Result(object):
|
|||
file = os.path.join(dir, file)
|
||||
|
||||
with open(file, 'w') as f:
|
||||
json.dump({'kpis' : self.kpis, 'errors' : self.errors}, f)
|
||||
json.dump({'kpis': self.kpis, 'errors': self.errors}, f)
|
||||
|
||||
|
||||
class PythonTest(object):
|
||||
def __init__(self):
|
||||
|
@ -70,9 +71,11 @@ class PythonTest(object):
|
|||
if parser is None:
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument('--output-directory', default='', type=str, help='The directory to store the output JSON in. If not set, the current directory is used')
|
||||
parser.add_argument('--output-directory', default='', type=str,
|
||||
help='The directory to store the output JSON in. If not set, the current directory is used')
|
||||
parser.add_argument('--disable-multiversion-api', action='store_true', help='Disables the multi-version client API')
|
||||
parser.add_argument('--enable-callbacks-on-external-threads', action='store_true', help='Allows callbacks to be called on threads created by the client library')
|
||||
parser.add_argument('--enable-callbacks-on-external-threads', action='store_true',
|
||||
help='Allows callbacks to be called on threads created by the client library')
|
||||
parser.add_argument('--use-external-client', action='store_true', help='Connect to the server using an external client')
|
||||
|
||||
self.args = parser.parse_args()
|
||||
|
|
|
@ -34,11 +34,12 @@ import fdb
|
|||
import fdb.tuple
|
||||
fdb.api_version(400)
|
||||
|
||||
#A class that mimics some of the operations of the FoundationDB key-value store
|
||||
|
||||
# A class that mimics some of the operations of the FoundationDB key-value store
|
||||
class KeyValueStore():
|
||||
|
||||
#Uses a simple dictionary to store key-value pairs
|
||||
#Any operations that depend on the order of keys first sort the data
|
||||
# Uses a simple dictionary to store key-value pairs
|
||||
# Any operations that depend on the order of keys first sort the data
|
||||
store = dict()
|
||||
|
||||
def get(self, key):
|
||||
|
@ -130,7 +131,7 @@ class PythonCorrectness(PythonTest):
|
|||
callback = False
|
||||
callbackError = ''
|
||||
|
||||
#Python correctness tests (checks if functions run and yield correct results)
|
||||
# Python correctness tests (checks if functions run and yield correct results)
|
||||
def run_test(self):
|
||||
try:
|
||||
db = fdb.open(None, 'DB')
|
||||
|
@ -152,13 +153,15 @@ class PythonCorrectness(PythonTest):
|
|||
except:
|
||||
self.result.add_error(self.getError('Failed to complete all tests'))
|
||||
|
||||
#Generates a random set of keys and values
|
||||
def generateData(self, numKeys, minKeyLength, maxKeyLength, minValueLength, maxValueLength, prefix = '', allowDuplicates = True):
|
||||
# Generates a random set of keys and values
|
||||
def generateData(self, numKeys, minKeyLength, maxKeyLength, minValueLength, maxValueLength, prefix='', allowDuplicates=True):
|
||||
data = list()
|
||||
keys = set()
|
||||
while len(data) < numKeys:
|
||||
#key = prefix + ''.join(random.choice(string.ascii_lowercase) for i in range(0, random.randint(minKeyLength - len(prefix), maxKeyLength - len(prefix))))
|
||||
key = prefix + ''.join(chr(random.randint(0, 254)) for i in range(0, random.randint(minKeyLength - len(prefix), maxKeyLength - len(prefix))))
|
||||
# key = prefix + ''.join(random.choice(string.ascii_lowercase)
|
||||
# for i in range(0, random.randint(minKeyLength - len(prefix), maxKeyLength - len(prefix))))
|
||||
key = prefix + ''.join(chr(random.randint(0, 254))
|
||||
for i in range(0, random.randint(minKeyLength - len(prefix), maxKeyLength - len(prefix))))
|
||||
if not allowDuplicates:
|
||||
if key in keys:
|
||||
continue
|
||||
|
@ -170,7 +173,7 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
return data
|
||||
|
||||
#Function to test the callback feature of Future objects
|
||||
# Function to test the callback feature of Future objects
|
||||
def testCallback(self, future):
|
||||
try:
|
||||
future.wait()
|
||||
|
@ -181,7 +184,7 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
self.callback = True
|
||||
|
||||
#Tests that all of the functions in the python API can be called without failing
|
||||
# Tests that all of the functions in the python API can be called without failing
|
||||
def testFunctions(self, db):
|
||||
self.callback = False
|
||||
self.callbackError = ''
|
||||
|
@ -223,8 +226,8 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
try:
|
||||
value = tr['fakekey']
|
||||
#The following line would generate a segfault
|
||||
#value.capi.fdb_future_block_until_ready(0)
|
||||
# The following line would generate a segfault
|
||||
# value.capi.fdb_future_block_until_ready(0)
|
||||
value.wait()
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
|
@ -262,7 +265,6 @@ class PythonCorrectness(PythonTest):
|
|||
except:
|
||||
self.result.add_error(self.getError('Get and reset failed'))
|
||||
|
||||
|
||||
try:
|
||||
tr.set_read_version(version.wait())
|
||||
except KeyboardInterrupt:
|
||||
|
@ -314,13 +316,13 @@ class PythonCorrectness(PythonTest):
|
|||
tr['testkey2'] = 'testvalue2'
|
||||
tr['testkey3'] = 'testvalue3'
|
||||
|
||||
for k,v in tr.get_range('testkey1', 'testkey3'):
|
||||
for k, v in tr.get_range('testkey1', 'testkey3'):
|
||||
v += ''
|
||||
|
||||
for k,v in tr.get_range('testkey1', 'testkey2', 2):
|
||||
for k, v in tr.get_range('testkey1', 'testkey2', 2):
|
||||
v += ''
|
||||
|
||||
for k,v in tr['testkey1':'testkey3']:
|
||||
for k, v in tr['testkey1':'testkey3']:
|
||||
v += ''
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
|
@ -331,7 +333,7 @@ class PythonCorrectness(PythonTest):
|
|||
tr['otherkey1'] = 'othervalue1'
|
||||
tr['otherkey2'] = 'othervalue2'
|
||||
|
||||
for k,v in tr.get_range_startswith('testkey'):
|
||||
for k, v in tr.get_range_startswith('testkey'):
|
||||
v += ''
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
|
@ -365,13 +367,13 @@ class PythonCorrectness(PythonTest):
|
|||
self.result.add_error(self.getError('Create key selector failed'))
|
||||
|
||||
try:
|
||||
for k,v in tr.get_range(begin, end):
|
||||
for k, v in tr.get_range(begin, end):
|
||||
v += ''
|
||||
|
||||
for k,v in tr.get_range(begin, end, 2):
|
||||
for k, v in tr.get_range(begin, end, 2):
|
||||
v += ''
|
||||
|
||||
for k,v in tr[begin:end]:
|
||||
for k, v in tr[begin:end]:
|
||||
v += ''
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
|
@ -399,13 +401,13 @@ class PythonCorrectness(PythonTest):
|
|||
begin = fdb.KeySelector.last_less_than('testkey2')
|
||||
end = fdb.KeySelector.first_greater_or_equal('testkey2')
|
||||
|
||||
for k,v in tr.get_range(begin, end):
|
||||
for k, v in tr.get_range(begin, end):
|
||||
v += ''
|
||||
|
||||
begin = fdb.KeySelector.last_less_or_equal('testkey2')
|
||||
end = fdb.KeySelector.first_greater_than('testkey2')
|
||||
|
||||
for k,v in tr.get_range(begin, end):
|
||||
for k, v in tr.get_range(begin, end):
|
||||
v += ''
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
|
@ -442,7 +444,7 @@ class PythonCorrectness(PythonTest):
|
|||
tr[fdb.tuple.pack(('k1', 'k2', 'k3'))] = 'v1'
|
||||
tr[fdb.tuple.pack(('k1', 'k2', 'k3', 'k4'))] = 'v2'
|
||||
|
||||
for k,v in tr[fdb.tuple.range(('k1', 'k2'))]:
|
||||
for k, v in tr[fdb.tuple.range(('k1', 'k2'))]:
|
||||
v += ''
|
||||
|
||||
except KeyboardInterrupt:
|
||||
|
@ -467,30 +469,30 @@ class PythonCorrectness(PythonTest):
|
|||
if len(self.callbackError) > 0:
|
||||
self.result.add_error(self.callbackError)
|
||||
|
||||
#Compares a FoundationDB database with an in-memory key-value store
|
||||
# Compares a FoundationDB database with an in-memory key-value store
|
||||
def compareDatabaseToMemory(self, db, store):
|
||||
dbResult = self.correctnessGetRangeTransactional(db, '\x00', '\xff')
|
||||
storeResult = store.get_range('\x00', '\xff')
|
||||
|
||||
return self.compareResults(dbResult, storeResult)
|
||||
|
||||
#Compares result sets coming from a FoundationDB database and an in-memory key-value store
|
||||
# Compares result sets coming from a FoundationDB database and an in-memory key-value store
|
||||
def compareResults(self, dbResults, storeResults):
|
||||
if len(dbResults) != len(storeResults):
|
||||
#print 'mismatched lengths: ' + str(len(dbResults)) + ' - ' + str(len(storeResults))
|
||||
# print 'mismatched lengths: ' + str(len(dbResults)) + ' - ' + str(len(storeResults))
|
||||
return False
|
||||
|
||||
for i in range(0, len(dbResults)):
|
||||
#if i >= len(storeResults):
|
||||
# if i >= len(storeResults):
|
||||
# print 'mismatched key: ' + dbResults[i].key
|
||||
# return False
|
||||
if dbResults[i].key != storeResults[i][0] or dbResults[i].value != storeResults[i][1]:
|
||||
#print 'mismatched key: ' + dbResults[i].key + ' - ' + storeResults[i][0]
|
||||
# print 'mismatched key: ' + dbResults[i].key + ' - ' + storeResults[i][0]
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
#Performs the same operations on a FoundationDB database and an in-memory key-value store and compares the results
|
||||
# Performs the same operations on a FoundationDB database and an in-memory key-value store and compares the results
|
||||
def testCorrectness(self, db):
|
||||
numKeys = 5000
|
||||
ratioShortKeys = 0.5
|
||||
|
@ -516,51 +518,51 @@ class PythonCorrectness(PythonTest):
|
|||
try:
|
||||
store = KeyValueStore()
|
||||
|
||||
#Generate some random data
|
||||
# Generate some random data
|
||||
data = self.generateData(numKeys * ratioShortKeys, minShortKeyLength, maxShortKeyLength, minValueLength, maxValueLength)
|
||||
data.extend(self.generateData(numKeys * (1 - ratioShortKeys), minLongKeyLength, maxLongKeyLength, minValueLength, maxValueLength))
|
||||
|
||||
#Insert the data
|
||||
# Insert the data
|
||||
self.correctnessSet(db, store, data, maxKeysPerTransaction)
|
||||
if not self.compareDatabaseToMemory(db, store):
|
||||
self.result.add_error('transaction.set resulted in incorrect database')
|
||||
|
||||
#Compare the results of single key reads
|
||||
# Compare the results of single key reads
|
||||
if not self.correctnessGet(db, store, data, numReads, maxKeysPerTransaction):
|
||||
self.result.add_error('transaction.get returned incorrect result')
|
||||
|
||||
#Compare the results of range reads
|
||||
# Compare the results of range reads
|
||||
for i in range(0, numRangeReads):
|
||||
if not self.correctnessGetRange(db, store, data):
|
||||
if not self.correctnessGetRange(db, store, data):
|
||||
self.result.add_error('transaction.get_range returned incorrect results')
|
||||
break
|
||||
|
||||
#Compare the results of prefix reads
|
||||
# Compare the results of prefix reads
|
||||
for i in range(0, numPrefixReads):
|
||||
if not self.correctnessGetPrefix(db, store, data):
|
||||
self.result.add_error('transaction.get_range_startswith returned incorrect results')
|
||||
break
|
||||
|
||||
#Compare the results of get key
|
||||
# Compare the results of get key
|
||||
if not self.correctnessGetKey(db, store, data, numGetKeys, maxKeysPerTransaction):
|
||||
self.result.add_error('transaction.get_key returned incorrect results')
|
||||
|
||||
#Compare the results of clear
|
||||
# Compare the results of clear
|
||||
clearedKeys = self.correctnessClear(db, store, data, numClears, maxKeysPerTransaction)
|
||||
if not self.compareDatabaseToMemory(db, store):
|
||||
self.result.add_error('transaction.clear resulted in incorrect database')
|
||||
|
||||
# for key in clearedKeys:
|
||||
# print 'clearing key ' + key
|
||||
#else:
|
||||
# print 'successful compare'
|
||||
# print 'clearing key ' + key
|
||||
# else:
|
||||
# print 'successful compare'
|
||||
|
||||
#Fill the database back up with data
|
||||
# Fill the database back up with data
|
||||
self.correctnessSet(db, store, data, maxKeysPerTransaction)
|
||||
if not self.compareDatabaseToMemory(db, store):
|
||||
self.result.add_error('transaction.set resulted in incorrect database')
|
||||
|
||||
#Compare the results of clear_range
|
||||
# Compare the results of clear_range
|
||||
for i in range(0, numRangeClears):
|
||||
self.correctnessClearRange(db, store, data)
|
||||
|
||||
|
@ -568,7 +570,7 @@ class PythonCorrectness(PythonTest):
|
|||
if not success:
|
||||
self.result.add_error('transaction.clear_range resulted in incorrect database')
|
||||
|
||||
#Fill the database back up with data
|
||||
# Fill the database back up with data
|
||||
self.correctnessSet(db, store, data, maxKeysPerTransaction)
|
||||
if not self.compareDatabaseToMemory(db, store):
|
||||
self.result.add_error('transaction.set resulted in incorrect database')
|
||||
|
@ -577,7 +579,7 @@ class PythonCorrectness(PythonTest):
|
|||
if not success:
|
||||
break
|
||||
|
||||
#Compare the results of clear_range_startswith
|
||||
# Compare the results of clear_range_startswith
|
||||
self.correctnessClearPrefix(db, store, data, numPrefixClears)
|
||||
if not self.compareDatabaseToMemory(db, store):
|
||||
self.result.add_error('transaction.clear_range_startswith resulted in incorrect database')
|
||||
|
@ -587,23 +589,23 @@ class PythonCorrectness(PythonTest):
|
|||
except:
|
||||
self.result.add_error(self.getError('Database error in correctness test'))
|
||||
|
||||
#Stores data in the database and a memory key-value store
|
||||
# Stores data in the database and a memory key-value store
|
||||
def correctnessSet(self, db, store, data, maxKeysPerTransaction):
|
||||
for [key, value] in data:
|
||||
store.set(key, value)
|
||||
|
||||
keysCommitted = 0
|
||||
while keysCommitted < len(data):
|
||||
self.correctnessSetTransactional(db, data[keysCommitted : keysCommitted + maxKeysPerTransaction])
|
||||
self.correctnessSetTransactional(db, data[keysCommitted: keysCommitted + maxKeysPerTransaction])
|
||||
keysCommitted += maxKeysPerTransaction
|
||||
|
||||
#Stores data in the database
|
||||
# Stores data in the database
|
||||
@fdb.transactional
|
||||
def correctnessSetTransactional(self, tr, data):
|
||||
for [key, value] in data:
|
||||
tr.set(key, value)
|
||||
|
||||
#Compares the results of the get operation from the database and a memory key-value store
|
||||
# Compares the results of the get operation from the database and a memory key-value store
|
||||
def correctnessGet(self, db, store, data, numReads, maxKeysPerTransaction):
|
||||
keys = []
|
||||
for i in range(0, numReads):
|
||||
|
@ -612,7 +614,7 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
keysRetrieved = 0
|
||||
while keysRetrieved < len(keys):
|
||||
subKeys = keys[keysRetrieved : keysRetrieved + maxKeysPerTransaction]
|
||||
subKeys = keys[keysRetrieved: keysRetrieved + maxKeysPerTransaction]
|
||||
|
||||
values = self.correctnessGetTransactional(db, subKeys)
|
||||
for i in range(0, numReads):
|
||||
|
@ -623,7 +625,7 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
return True
|
||||
|
||||
#Gets the values for the specified list of keys from the database
|
||||
# Gets the values for the specified list of keys from the database
|
||||
@fdb.transactional
|
||||
def correctnessGetTransactional(self, tr, keys):
|
||||
futures = []
|
||||
|
@ -636,7 +638,7 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
return values
|
||||
|
||||
#Compares the results of the get_range operation from the database and a memory key-value store
|
||||
# Compares the results of the get_range operation from the database and a memory key-value store
|
||||
def correctnessGetRange(self, db, store, data):
|
||||
index = random.randint(0, len(data) - 1)
|
||||
index2 = random.randint(0, len(data) - 1)
|
||||
|
@ -649,15 +651,15 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
return self.compareResults(dbResults, storeResults)
|
||||
|
||||
#Gets the entries in the range [key1,key2) from the database
|
||||
# Gets the entries in the range [key1,key2) from the database
|
||||
@fdb.transactional
|
||||
def correctnessGetRangeTransactional(self, tr, key1, key2, data = None):
|
||||
def correctnessGetRangeTransactional(self, tr, key1, key2, data=None):
|
||||
if data is not None:
|
||||
return list(tr.get_range(key1, key2, len(data)))
|
||||
else:
|
||||
return list(tr.get_range(key1, key2))
|
||||
|
||||
#Compares the results of the get_range_startswith operation from the database and a memory key-value store
|
||||
# Compares the results of the get_range_startswith operation from the database and a memory key-value store
|
||||
def correctnessGetPrefix(self, db, store, data):
|
||||
prefix = ''.join(chr(random.randint(0, 254)) for i in range(0, random.randint(1, 3)))
|
||||
dbResults = self.correctnessGetPrefixTransactional(db, prefix)
|
||||
|
@ -665,12 +667,12 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
return self.compareResults(dbResults, storeResults)
|
||||
|
||||
#Gets the entries with a given prefix from the database
|
||||
# Gets the entries with a given prefix from the database
|
||||
@fdb.transactional
|
||||
def correctnessGetPrefixTransactional(self, tr, prefix):
|
||||
return list(tr.get_range_startswith(prefix))
|
||||
|
||||
#Compares the results of the get_key operation from the database and a memory key-value store
|
||||
# Compares the results of the get_key operation from the database and a memory key-value store
|
||||
def correctnessGetKey(self, db, store, data, numGetKeys, maxKeysPerTransaction):
|
||||
selectors = []
|
||||
for i in range(0, numGetKeys):
|
||||
|
@ -684,7 +686,7 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
keysRetrieved = 0
|
||||
while keysRetrieved < len(selectors):
|
||||
subSelectors = selectors[keysRetrieved : keysRetrieved + maxKeysPerTransaction]
|
||||
subSelectors = selectors[keysRetrieved: keysRetrieved + maxKeysPerTransaction]
|
||||
dbKeys = self.correctnessGetKeyTransactional(db, subSelectors)
|
||||
for i in range(0, numGetKeys):
|
||||
if dbKeys[i] != store.get_key(subSelectors[i]):
|
||||
|
@ -693,7 +695,7 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
return True
|
||||
|
||||
#Gets the keys specified by the list of key selectors
|
||||
# Gets the keys specified by the list of key selectors
|
||||
@fdb.transactional
|
||||
def correctnessGetKeyTransactional(self, tr, keySelectors):
|
||||
futures = []
|
||||
|
@ -710,7 +712,7 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
return keys
|
||||
|
||||
#Clears data from a database and a memory key-value store
|
||||
# Clears data from a database and a memory key-value store
|
||||
def correctnessClear(self, db, store, data, numClears, maxKeysPerTransaction):
|
||||
clearedKeys = []
|
||||
for i in range(0, numClears):
|
||||
|
@ -720,18 +722,18 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
keysCleared = 0
|
||||
while keysCleared < len(clearedKeys):
|
||||
self.correctnessClearTransactional(db, clearedKeys[keysCleared : keysCleared + maxKeysPerTransaction])
|
||||
self.correctnessClearTransactional(db, clearedKeys[keysCleared: keysCleared + maxKeysPerTransaction])
|
||||
keysCleared += maxKeysPerTransaction
|
||||
|
||||
return clearedKeys
|
||||
|
||||
#Clears a list of keys from the database
|
||||
# Clears a list of keys from the database
|
||||
@fdb.transactional
|
||||
def correctnessClearTransactional(self, tr, clearedKeys):
|
||||
for key in clearedKeys:
|
||||
tr.clear(key)
|
||||
|
||||
#Clears a range of data from a database and a memory key-value store
|
||||
# Clears a range of data from a database and a memory key-value store
|
||||
def correctnessClearRange(self, db, store, data):
|
||||
index = random.randint(0, len(data) - 1)
|
||||
index2 = random.randint(0, len(data) - 1)
|
||||
|
@ -742,12 +744,12 @@ class PythonCorrectness(PythonTest):
|
|||
self.correctnessClearRangeTransactional(db, key1, key2)
|
||||
store.clear_range(key1, key2)
|
||||
|
||||
#Clears a range of memory from a database
|
||||
# Clears a range of memory from a database
|
||||
@fdb.transactional
|
||||
def correctnessClearRangeTransactional(self, tr, key1, key2):
|
||||
tr.clear_range(key1, key2)
|
||||
|
||||
#Clears data with random prefixes from a database and a memory key-value store
|
||||
# Clears data with random prefixes from a database and a memory key-value store
|
||||
def correctnessClearPrefix(self, db, store, data, numPrefixClears):
|
||||
prefixes = []
|
||||
for i in range(0, numPrefixClears):
|
||||
|
@ -757,19 +759,21 @@ class PythonCorrectness(PythonTest):
|
|||
|
||||
self.correctnessClearPrefixTransactional(db, prefixes)
|
||||
|
||||
#Clears keys from a database that have a prefix in the prefixes list
|
||||
# Clears keys from a database that have a prefix in the prefixes list
|
||||
@fdb.transactional
|
||||
def correctnessClearPrefixTransactional(self, tr, prefixes):
|
||||
for prefix in prefixes:
|
||||
tr.clear_range_startswith(prefix)
|
||||
|
||||
#Adds the stack trace to an error message
|
||||
# Adds the stack trace to an error message
|
||||
def getError(self, message):
|
||||
errorMessage = message + "\n" + traceback.format_exc()
|
||||
print('%s', errorMessage)
|
||||
return errorMessage
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Running PythonCorrectness test on Python version %d.%d.%d%s%d" % (sys.version_info[0], sys.version_info[1], sys.version_info[2], sys.version_info[3][0], sys.version_info[4]))
|
||||
print("Running PythonCorrectness test on Python version %d.%d.%d%s%d" %
|
||||
(sys.version_info[0], sys.version_info[1], sys.version_info[2], sys.version_info[3][0], sys.version_info[4]))
|
||||
|
||||
PythonCorrectness().run()
|
||||
|
|
|
@ -37,19 +37,20 @@ import fdb
|
|||
import fdb.tuple
|
||||
fdb.api_version(400)
|
||||
|
||||
|
||||
class PythonPerformance(PythonTest):
|
||||
tests = {
|
||||
'future_latency' : 'Python API future throughput',
|
||||
'set' : 'Python API set throughput',
|
||||
'clear' : 'Python API clear throughput',
|
||||
'clear_range' : 'Python API clear_range throughput',
|
||||
'parallel_get' : 'Python API parallel get throughput',
|
||||
'serial_get' : 'Python API serial get throughput',
|
||||
'get_range' : 'Python API get_range throughput',
|
||||
'get_key' : 'Python API get_key throughput',
|
||||
'get_single_key_range' : 'Python API get_single_key_range throughput',
|
||||
'alternating_get_set' : 'Python API alternating get and set throughput',
|
||||
'write_transaction' : 'Python API single-key transaction throughput',
|
||||
'future_latency': 'Python API future throughput',
|
||||
'set': 'Python API set throughput',
|
||||
'clear': 'Python API clear throughput',
|
||||
'clear_range': 'Python API clear_range throughput',
|
||||
'parallel_get': 'Python API parallel get throughput',
|
||||
'serial_get': 'Python API serial get throughput',
|
||||
'get_range': 'Python API get_range throughput',
|
||||
'get_key': 'Python API get_key throughput',
|
||||
'get_single_key_range': 'Python API get_single_key_range throughput',
|
||||
'alternating_get_set': 'Python API alternating get and set throughput',
|
||||
'write_transaction': 'Python API single-key transaction throughput',
|
||||
}
|
||||
|
||||
def __init__(self, key_count=1000000, key_size=16, value_size=100):
|
||||
|
@ -58,7 +59,7 @@ class PythonPerformance(PythonTest):
|
|||
self.key_size = key_size
|
||||
self.value_str = ''.join(['x' for i in range(value_size)])
|
||||
|
||||
#Python Performance Tests (checks if functions run and yield correct results, gets performance indicators)
|
||||
# Python Performance Tests (checks if functions run and yield correct results, gets performance indicators)
|
||||
def run_test(self):
|
||||
try:
|
||||
db = fdb.open(None, 'DB')
|
||||
|
@ -76,7 +77,7 @@ class PythonPerformance(PythonTest):
|
|||
self.result.add_error(self.get_error('Failed to complete all tests'))
|
||||
|
||||
def random_key(self):
|
||||
return self.key(random.randint(0, self.key_count-1))
|
||||
return self.key(random.randint(0, self.key_count - 1))
|
||||
|
||||
def key(self, num):
|
||||
return '%0*d' % (self.key_size, num)
|
||||
|
@ -89,7 +90,7 @@ class PythonPerformance(PythonTest):
|
|||
del db[:]
|
||||
num_keys = 100000 / (self.key_size + len(self.value_str))
|
||||
|
||||
trs = [db.create_transaction() for i in range(int(math.ceil(float(self.key_count)/num_keys)))]
|
||||
trs = [db.create_transaction() for i in range(int(math.ceil(float(self.key_count) / num_keys)))]
|
||||
success = [False for i in range(len(trs))]
|
||||
|
||||
while not all(success):
|
||||
|
@ -104,16 +105,16 @@ class PythonPerformance(PythonTest):
|
|||
tr.options.set_retry_limit(5)
|
||||
futures[i] = tr.commit()
|
||||
|
||||
for i,f in futures.items():
|
||||
for i, f in futures.items():
|
||||
try:
|
||||
f.wait()
|
||||
success[i] = True
|
||||
except fdb.FDBError as e:
|
||||
trs[i].on_error(e).wait()
|
||||
|
||||
time.sleep(60) # Give the database time to rebalance
|
||||
time.sleep(60) # Give the database time to rebalance
|
||||
|
||||
#Tests the performance of the API functions
|
||||
# Tests the performance of the API functions
|
||||
def test_performance(self, db):
|
||||
self.insert_data(db)
|
||||
|
||||
|
@ -121,12 +122,12 @@ class PythonPerformance(PythonTest):
|
|||
self.args.tests_to_run = PythonPerformance.tests.keys()
|
||||
else:
|
||||
for t in self.args.tests_to_run:
|
||||
if not t in PythonPerformance.tests:
|
||||
if t not in PythonPerformance.tests:
|
||||
raise Exception("Unknown Python performance test '%s'" % t)
|
||||
|
||||
num_runs = 25
|
||||
|
||||
#Run each test
|
||||
# Run each test
|
||||
for test in self.args.tests_to_run:
|
||||
time.sleep(5)
|
||||
print('Running test %s' % test)
|
||||
|
@ -135,7 +136,7 @@ class PythonPerformance(PythonTest):
|
|||
fxn_name = 'run_%s' % test
|
||||
assert hasattr(self, fxn_name), 'Test function %s not implemented' % fxn_name
|
||||
|
||||
#Perform each test several times
|
||||
# Perform each test several times
|
||||
for x in range(0, num_runs):
|
||||
try:
|
||||
results.append(getattr(self, fxn_name)(db))
|
||||
|
@ -161,7 +162,7 @@ class PythonPerformance(PythonTest):
|
|||
|
||||
return count / (time.time() - s)
|
||||
|
||||
#Tests the performance of the 'clear' function
|
||||
# Tests the performance of the 'clear' function
|
||||
def run_clear(self, db, count=100000):
|
||||
tr = db.create_transaction()
|
||||
s = time.time()
|
||||
|
@ -171,18 +172,18 @@ class PythonPerformance(PythonTest):
|
|||
|
||||
return count / (time.time() - s)
|
||||
|
||||
#Tests the performance of the 'clear_range' function
|
||||
# Tests the performance of the 'clear_range' function
|
||||
def run_clear_range(self, db, count=100000):
|
||||
tr = db.create_transaction()
|
||||
s = time.time()
|
||||
|
||||
for i in range(count):
|
||||
key = self.random_key()
|
||||
del tr[key : self.key(int(key)+1)]
|
||||
del tr[key: self.key(int(key) + 1)]
|
||||
|
||||
return count / (time.time() - s)
|
||||
|
||||
#Tests the performance of the 'set' function
|
||||
# Tests the performance of the 'set' function
|
||||
def run_set(self, db, count=100000):
|
||||
tr = db.create_transaction()
|
||||
s = time.time()
|
||||
|
@ -193,7 +194,7 @@ class PythonPerformance(PythonTest):
|
|||
|
||||
return count / (time.time() - s)
|
||||
|
||||
#Tests the parallel performance of the 'get' function
|
||||
# Tests the parallel performance of the 'get' function
|
||||
@fdb.transactional
|
||||
def run_parallel_get(self, tr, count=10000):
|
||||
tr.options.set_retry_limit(5)
|
||||
|
@ -226,12 +227,12 @@ class PythonPerformance(PythonTest):
|
|||
|
||||
return count / (time.time() - s)
|
||||
|
||||
#Tests the serial performance of the 'get' function
|
||||
# Tests the serial performance of the 'get' function
|
||||
@fdb.transactional
|
||||
def run_serial_get(self, tr, count=2000):
|
||||
tr.options.set_retry_limit(5)
|
||||
|
||||
if count > self.key_count/2:
|
||||
if count > self.key_count / 2:
|
||||
keys = [self.random_key() for i in range(count)]
|
||||
else:
|
||||
key_set = OrderedDict()
|
||||
|
@ -246,18 +247,18 @@ class PythonPerformance(PythonTest):
|
|||
|
||||
return count / (time.time() - s)
|
||||
|
||||
#Tests the performance of the 'get_range' function
|
||||
# Tests the performance of the 'get_range' function
|
||||
@fdb.transactional
|
||||
def run_get_range(self, tr, count=100000):
|
||||
tr.options.set_retry_limit(5)
|
||||
b = random.randint(0, self.key_count-count)
|
||||
b = random.randint(0, self.key_count - count)
|
||||
s = time.time()
|
||||
|
||||
list(tr[self.key(b) : self.key(b+count)])
|
||||
list(tr[self.key(b): self.key(b + count)])
|
||||
|
||||
return count / (time.time() - s)
|
||||
|
||||
#Tests the performance of the 'get_key' function
|
||||
# Tests the performance of the 'get_key' function
|
||||
@fdb.transactional
|
||||
def run_get_key(self, tr, count=2000):
|
||||
tr.options.set_retry_limit(5)
|
||||
|
@ -275,7 +276,7 @@ class PythonPerformance(PythonTest):
|
|||
|
||||
for i in range(count):
|
||||
index = random.randint(0, self.key_count)
|
||||
list(tr.get_range(self.key(index), self.key(index+1), limit=2))
|
||||
list(tr.get_range(self.key(index), self.key(index + 1), limit=2))
|
||||
|
||||
return count / (time.time() - s)
|
||||
|
||||
|
@ -292,14 +293,16 @@ class PythonPerformance(PythonTest):
|
|||
|
||||
return count / (time.time() - s)
|
||||
|
||||
#Adds the stack trace to an error message
|
||||
# Adds the stack trace to an error message
|
||||
def get_error(self, message):
|
||||
errorMessage = message + "\n" + traceback.format_exc()
|
||||
print('%s' % errorMessage)
|
||||
return errorMessage
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Running PythonPerformance test on Python version %d.%d.%d%s%d" % (sys.version_info[0], sys.version_info[1], sys.version_info[2], sys.version_info[3][0], sys.version_info[4]))
|
||||
print("Running PythonPerformance test on Python version %d.%d.%d%s%d" %
|
||||
(sys.version_info[0], sys.version_info[1], sys.version_info[2], sys.version_info[3][0], sys.version_info[4]))
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
|
|
|
@ -32,14 +32,15 @@ from python_tests import PythonTest
|
|||
import fdb
|
||||
fdb.api_version(400)
|
||||
|
||||
|
||||
class RYWBenchmark(PythonTest):
|
||||
tests = {
|
||||
'get_single' : "RYW: get single cached value throughput",
|
||||
'get_many_sequential' : "RYW: get sequential cached values throughput",
|
||||
'get_range_basic' : "RYW: get range cached values throughput",
|
||||
'single_clear_get_range' : "RYW: get range cached values with clears throughput",
|
||||
'clear_range_get_range' : "RYW: get range cached values with clear ranges throughput",
|
||||
'interleaved_sets_gets' : "RYW: interleaved sets and gets on a single key throughput",
|
||||
'get_single': "RYW: get single cached value throughput",
|
||||
'get_many_sequential': "RYW: get sequential cached values throughput",
|
||||
'get_range_basic': "RYW: get range cached values throughput",
|
||||
'single_clear_get_range': "RYW: get range cached values with clears throughput",
|
||||
'clear_range_get_range': "RYW: get range cached values with clear ranges throughput",
|
||||
'interleaved_sets_gets': "RYW: interleaved sets and gets on a single key throughput",
|
||||
}
|
||||
|
||||
def __init__(self, key_count=10000, key_size=16):
|
||||
|
@ -66,7 +67,7 @@ class RYWBenchmark(PythonTest):
|
|||
def key(self, num):
|
||||
return '%0*d' % (self.key_size, num)
|
||||
|
||||
#Adds the stack trace to an error message
|
||||
# Adds the stack trace to an error message
|
||||
def get_error(self, message):
|
||||
errorMessage = message + "\n" + traceback.format_exc()
|
||||
print(errorMessage)
|
||||
|
@ -80,7 +81,7 @@ class RYWBenchmark(PythonTest):
|
|||
self.args.tests_to_run = RYWBenchmark.tests.keys()
|
||||
else:
|
||||
for t in self.args.tests_to_run:
|
||||
if not t in RYWBenchmark.tests:
|
||||
if t not in RYWBenchmark.tests:
|
||||
raise Exception("Unknown RYW benchmark test '%s'" % t)
|
||||
|
||||
num_runs = 25
|
||||
|
@ -112,53 +113,55 @@ class RYWBenchmark(PythonTest):
|
|||
tr[self.key(i)] = 'foo'
|
||||
|
||||
def run_get_single(self, tr, count=10000):
|
||||
start = time.time()
|
||||
for i in range(count):
|
||||
tr.get(self.key(5001)).wait()
|
||||
return count / (time.time() - start)
|
||||
start = time.time()
|
||||
for i in range(count):
|
||||
tr.get(self.key(5001)).wait()
|
||||
return count / (time.time() - start)
|
||||
|
||||
def run_get_many_sequential(self, tr, count=10000):
|
||||
start = time.time()
|
||||
for j in range(count):
|
||||
tr.get(self.key(j)).wait()
|
||||
return count / (time.time() - start)
|
||||
start = time.time()
|
||||
for j in range(count):
|
||||
tr.get(self.key(j)).wait()
|
||||
return count / (time.time() - start)
|
||||
|
||||
def run_get_range_basic(self, tr, count=100):
|
||||
start = time.time()
|
||||
for i in range(count):
|
||||
list(tr.get_range(self.key(0), self.key(self.key_count)))
|
||||
return self.key_count * count / (time.time() - start)
|
||||
start = time.time()
|
||||
for i in range(count):
|
||||
list(tr.get_range(self.key(0), self.key(self.key_count)))
|
||||
return self.key_count * count / (time.time() - start)
|
||||
|
||||
def run_single_clear_get_range(self, tr, count=100):
|
||||
for i in range(0, self.key_count, 2):
|
||||
tr.clear(self.key(i))
|
||||
start = time.time()
|
||||
for i in range(0, count):
|
||||
list(tr.get_range(self.key(0), self.key(self.key_count)))
|
||||
kpi = self.key_count * count / 2 / (time.time() - start)
|
||||
self.insert_data(tr)
|
||||
return kpi
|
||||
for i in range(0, self.key_count, 2):
|
||||
tr.clear(self.key(i))
|
||||
start = time.time()
|
||||
for i in range(0, count):
|
||||
list(tr.get_range(self.key(0), self.key(self.key_count)))
|
||||
kpi = self.key_count * count / 2 / (time.time() - start)
|
||||
self.insert_data(tr)
|
||||
return kpi
|
||||
|
||||
def run_clear_range_get_range(self, tr, count=100):
|
||||
for i in range(0, self.key_count, 4):
|
||||
tr.clear_range(self.key(i), self.key(i+1))
|
||||
start = time.time()
|
||||
for i in range(0, count):
|
||||
list(tr.get_range(self.key(0), self.key(self.key_count)))
|
||||
kpi = self.key_count * count * 3 / 4 / (time.time() - start)
|
||||
self.insert_data(tr)
|
||||
return kpi
|
||||
for i in range(0, self.key_count, 4):
|
||||
tr.clear_range(self.key(i), self.key(i + 1))
|
||||
start = time.time()
|
||||
for i in range(0, count):
|
||||
list(tr.get_range(self.key(0), self.key(self.key_count)))
|
||||
kpi = self.key_count * count * 3 / 4 / (time.time() - start)
|
||||
self.insert_data(tr)
|
||||
return kpi
|
||||
|
||||
def run_interleaved_sets_gets(self, tr, count=10000):
|
||||
start = time.time()
|
||||
tr['foo'] = str(1)
|
||||
for i in range(count):
|
||||
old = int(tr.get('foo').wait())
|
||||
tr.set('foo', str(old + 1))
|
||||
return count / (time.time() - start)
|
||||
start = time.time()
|
||||
tr['foo'] = str(1)
|
||||
for i in range(count):
|
||||
old = int(tr.get('foo').wait())
|
||||
tr.set('foo', str(old + 1))
|
||||
return count / (time.time() - start)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print("Running RYW Benchmark test on Python version %d.%d.%d%s%d" % (sys.version_info[0], sys.version_info[1], sys.version_info[2], sys.version_info[3][0], sys.version_info[4]))
|
||||
print("Running RYW Benchmark test on Python version %d.%d.%d%s%d" %
|
||||
(sys.version_info[0], sys.version_info[1], sys.version_info[2], sys.version_info[3][0], sys.version_info[4]))
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
|
|
Loading…
Reference in New Issue