Merge branch 'release-5.2' of github.com:apple/foundationdb into feature-redwood

This commit is contained in:
Stephen Atherton 2018-04-06 16:29:37 -07:00
commit 2752a28611
827 changed files with 26264 additions and 4628 deletions

View File

@ -144,7 +144,7 @@ OBJDIR := .objs
include $(MK_INCLUDE)
clean: $(CLEAN_TARGETS)
clean: $(CLEAN_TARGETS) docpreview_clean
@echo "Cleaning toplevel"
@rm -rf $(OBJDIR)
@rm -rf $(DEPSDIR)
@ -174,12 +174,21 @@ lib/libstdc++.a: $(shell $(CC) -print-file-name=libstdc++_pic.a)
@ar rcs $@ .libstdc++/*.o
@rm -r .libstdc++
docpreview: javadoc godoc
TARGETS= $(MAKE) -C documentation docpreview
ifeq ($(PLATFORM),osx)
MD5SUM=md5
else
MD5SUM=md5sum
endif
docpreview_clean:
CLEAN_TARGETS= $(MAKE) -C documentation docpreview_clean
packages/foundationdb-docs-$(VERSION).tar.gz: FORCE javadoc godoc
TARGETS= $(MAKE) -C documentation docpackage
@mkdir -p packages
@rm -f packages/foundationdb-docs-$(VERSION).tar.gz
@cp documentation/sphinx/.dist/foundationdb-docs-$(VERSION).tar.gz packages/foundationdb-docs-$(VERSION).tar.gz
docpackage: packages/foundationdb-docs-$(VERSION).tar.gz
FORCE:
.SECONDEXPANSION:

View File

@ -21,36 +21,37 @@
import sys
import os
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')]
import util
FDB_API_VERSION = 510
FDB_API_VERSION = 520
LOGGING = {
'version' : 1,
'disable_existing_loggers' : False,
'formatters' : {
'simple' : {
'format' : '%(message)s'
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(message)s'
}
},
'handlers' : {
'console' : {
'level' : 'NOTSET',
'class' : 'logging.StreamHandler',
'stream' : sys.stdout,
'formatter' : 'simple'
'handlers': {
'console': {
'level': 'NOTSET',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'simple'
}
},
'loggers' : {
'foundationdb.bindingtester' : {
'level' : 'INFO',
'handlers' : ['console']
'loggers': {
'foundationdb.bindingtester': {
'level': 'INFO',
'handlers': ['console']
}
}
}
class Result:
def __init__(self, subspace, key, values):
self.subspace_tuple = util.subspace_to_tuple(subspace)
@ -91,4 +92,3 @@ class Result:
value_str = repr(self.values)
return '%s = %s' % (repr(self.subspace_tuple + self.key_tuple), value_str)

View File

@ -34,8 +34,9 @@ from threading import Timer, Event
import logging.config
from collections import OrderedDict
from functools import reduce
sys.path[:0]=[os.path.join(os.path.dirname(__file__), '..')]
sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')]
import bindingtester
@ -52,6 +53,7 @@ import fdb.tuple
fdb.api_version(FDB_API_VERSION)
class ResultSet(object):
def __init__(self, specification):
self.specification = specification
@ -80,20 +82,20 @@ class ResultSet(object):
has_filtered_error = False
while True:
results = { i : r[indices[i]] for i, r in enumerate(self.tester_results.values()) if len(r) > indices[i] }
results = {i: r[indices[i]] for i, r in enumerate(self.tester_results.values()) if len(r) > indices[i]}
if len(results) == 0:
break
sequence_nums = [ r.sequence_num(self.specification) for r in results.values() ]
sequence_nums = [r.sequence_num(self.specification) for r in results.values()]
if any([s is not None for s in sequence_nums]):
results = { i : r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums) }
results = {i: r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums)}
else:
results = { i : r for i, r in results.items() if r.matches(min(results.values()), self.specification) }
results = {i: r for i, r in results.items() if r.matches(min(results.values()), self.specification)}
for i in results.keys():
indices[i] += 1
all_results = { i : results[i] if i in results else None for i in range(len(self.tester_results)) }
all_results = {i: results[i] if i in results else None for i in range(len(self.tester_results))}
result_str = '\n'.join([' %-*s - %s' % (name_length, self.tester_results.keys()[i], r) for i, r in all_results.items()])
result_list = results.values()
@ -113,12 +115,15 @@ class ResultSet(object):
return (num_errors, has_filtered_error)
def choose_api_version(selected_api_version, tester_min_version, tester_max_version, test_min_version, test_max_version):
if selected_api_version is not None:
if selected_api_version < tester_min_version or selected_api_version > tester_max_version:
raise Exception('Not all testers support the API version %d (min=%d, max=%d)' % (selected_api_version, tester_min_version, tester_max_version))
raise Exception('Not all testers support the API version %d (min=%d, max=%d)' %
(selected_api_version, tester_min_version, tester_max_version))
elif selected_api_version < test_min_version or selected_api_version > test_max_version:
raise Exception('API version %d is not supported by the specified test (min=%d, max=%d)' % (selected_api_version, test_min_version, test_max_version))
raise Exception('API version %d is not supported by the specified test (min=%d, max=%d)' %
(selected_api_version, test_min_version, test_max_version))
api_version = selected_api_version
else:
@ -126,19 +131,23 @@ def choose_api_version(selected_api_version, tester_min_version, tester_max_vers
max_version = min(tester_max_version, test_max_version)
if min_version > max_version:
raise Exception('Not all testers support the API versions required by the specified test (tester: min=%d, max=%d; test: min=%d, max=%d)' % (tester_min_version, tester_max_version, test_min_version, test_max_version))
raise Exception(
'Not all testers support the API versions required by the specified test'
'(tester: min=%d, max=%d; test: min=%d, max=%d)' % (tester_min_version, tester_max_version, test_min_version, test_max_version))
if random.random() < 0.7:
api_version = max_version
elif random.random() < 0.7:
api_version = min_version
elif random.random() < 0.9:
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430, 440, 450, 460, 500, 510] if v >= min_version and v <= max_version])
api_version = random.choice([v for v in [13, 14, 16, 21, 22, 23, 100, 200, 300, 400, 410, 420, 430,
440, 450, 460, 500, 510, 520] if v >= min_version and v <= max_version])
else:
api_version = random.randint(min_version, max_version)
return api_version
class TestRunner(object):
def __init__(self, args):
self.args = copy.copy(args)
@ -157,7 +166,8 @@ class TestRunner(object):
min_api_version = max([tester.min_api_version for tester in self.testers])
max_api_version = min([tester.max_api_version for tester in self.testers])
self.args.api_version = choose_api_version(self.args.api_version, min_api_version, max_api_version, self.test.min_api_version, self.test.max_api_version)
self.args.api_version = choose_api_version(self.args.api_version, min_api_version, max_api_version,
self.test.min_api_version, self.test.max_api_version)
util.get_logger().info('\nCreating test at API version %d' % self.args.api_version)
@ -165,7 +175,8 @@ class TestRunner(object):
if self.args.max_int_bits is None:
self.args.max_int_bits = max_int_bits
elif self.args.max_int_bits > max_int_bits:
raise Exception('The specified testers support at most %d-bit ints, but --max-int-bits was set to %d' % (max_int_bits, self.args.max_int_bits))
raise Exception('The specified testers support at most %d-bit ints, but --max-int-bits was set to %d' %
(max_int_bits, self.args.max_int_bits))
self.args.no_threads = self.args.no_threads or any([not tester.threads_enabled for tester in self.testers])
if self.args.no_threads and self.args.concurrency > 1:
@ -189,15 +200,15 @@ class TestRunner(object):
for i, instruction in enumerate(instructions):
if self.args.print_all or (instruction.operation != 'SWAP' and instruction.operation != 'PUSH'):
util.get_logger().error(' %d. %r' % (i+offset, instruction))
util.get_logger().error(' %d. %r' % (i + offset, instruction))
util.get_logger().error('');
util.get_logger().error('')
def run_test(self):
test_instructions = self._generate_test()
expected_results = self.test.get_expected_results()
tester_results = { s.subspace : ResultSet(s) for s in self.test.get_result_specifications() }
tester_results = {s.subspace: ResultSet(s) for s in self.test.get_result_specifications()}
for subspace, results in expected_results.items():
tester_results[subspace].add('expected', results)
@ -208,7 +219,8 @@ class TestRunner(object):
self.test.pre_run(self.db, self.args)
return_code = self._run_tester(tester)
if return_code != 0:
util.get_logger().error('Test of type %s failed to complete successfully with random seed %d and %d operations\n' % (self.args.test_name, self.args.seed, self.args.num_ops))
util.get_logger().error('Test of type %s failed to complete successfully with random seed %d and %d operations\n' %
(self.args.test_name, self.args.seed, self.args.num_ops))
return 2
tester_errors[tester] = self.test.validate(self.db, self.args)
@ -226,18 +238,19 @@ class TestRunner(object):
self._insert_instructions(test_instructions)
def _generate_test(self):
util.get_logger().info('Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)...' % (self.args.test_name, self.args.seed, self.args.num_ops, self.args.concurrency))
util.get_logger().info('Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)...' %
(self.args.test_name, self.args.seed, self.args.num_ops, self.args.concurrency))
random.seed(self.test_seed)
if self.args.concurrency == 1:
self.test.setup(self.args)
test_instructions = { fdb.Subspace((self.args.instruction_prefix,)) : self.test.generate(self.args, 0) }
test_instructions = {fdb.Subspace((self.args.instruction_prefix,)): self.test.generate(self.args, 0)}
else:
test_instructions = {}
main_thread = InstructionSet()
for i in range(self.args.concurrency):
#thread_spec = fdb.Subspace(('thread_spec', i))
# thread_spec = fdb.Subspace(('thread_spec', i))
thread_spec = 'thread_spec%d' % i
main_thread.push_args(thread_spec)
main_thread.append('START_THREAD')
@ -260,7 +273,7 @@ class TestRunner(object):
params += [self.args.cluster_file]
util.get_logger().info('\nRunning tester \'%s\'...' % ' '.join(params))
sys.stdout.flush();
sys.stdout.flush()
proc = subprocess.Popen(params)
timed_out = Event()
@ -321,9 +334,10 @@ class TestRunner(object):
if len(errors) > 0:
util.get_logger().error('The %s tester reported errors:\n' % tester.name)
for i, error in enumerate(errors):
util.get_logger().error(' %d. %s' % (i+1, error))
util.get_logger().error(' %d. %s' % (i + 1, error))
log_message = '\nTest with seed %d and concurrency %d had %d incorrect result(s) and %d error(s) at API version %d' % (self.args.seed, self.args.concurrency, num_incorrect, num_errors, self.args.api_version)
log_message = '\nTest with seed %d and concurrency %d had %d incorrect result(s) and %d error(s) at API version %d' %\
(self.args.seed, self.args.concurrency, num_incorrect, num_errors, self.args.api_version)
if num_errors == 0 and (num_incorrect == 0 or has_filtered_error):
util.get_logger().info(log_message)
if has_filtered_error:
@ -333,6 +347,7 @@ class TestRunner(object):
util.get_logger().error(log_message)
return 1
def bisect(test_runner, args):
util.get_logger().info('')
@ -354,7 +369,8 @@ def bisect(test_runner, args):
util.get_logger().error('Error finding minimal failing test for seed %d. The failure may not be deterministic' % args.seed)
return 1
else:
util.get_logger().error('No failing test found for seed %d with %d ops. Try specifying a larger --num-ops parameter.' % (args.seed, args.num_ops))
util.get_logger().error('No failing test found for seed %d with %d ops. Try specifying a larger --num-ops parameter.'
% (args.seed, args.num_ops))
return 0
elif result == 0:
@ -365,30 +381,45 @@ def bisect(test_runner, args):
util.get_logger().info('Test with %d operations failed with error code %d\n' % (test_runner.args.num_ops, result))
upper_bound = test_runner.args.num_ops
def parse_args(argv):
parser = argparse.ArgumentParser(description='FoundationDB Binding API Tester')
parser.add_argument('--test-name', default='scripted', help='The name of the test to run. Must be the name of a test specified in the tests folder. (default=\'scripted\')')
parser.add_argument('--test-name', default='scripted',
help='The name of the test to run. Must be the name of a test specified in the tests folder. (default=\'scripted\')')
parser.add_argument(metavar='tester1', dest='test1', help='Name of the first tester to invoke')
parser.add_argument('--compare', metavar='tester2', nargs='?', type=str, default=None, const='python', dest='test2', help='When specified, a second tester will be run and compared against the first. This flag takes an optional argument for the second tester to invoke (default = \'python\').')
parser.add_argument('--print-test', action='store_true', help='Instead of running a test, prints the set of instructions generated for that test. Unless --all is specified, all setup, finalization, PUSH, and SWAP instructions will be excluded.')
parser.add_argument('--compare', metavar='tester2', nargs='?', type=str, default=None, const='python', dest='test2',
help='When specified, a second tester will be run and compared against the first. This flag takes an optional argument '
'for the second tester to invoke (default = \'python\').')
parser.add_argument('--print-test', action='store_true',
help='Instead of running a test, prints the set of instructions generated for that test. Unless --all is specified, all '
'setup, finalization, PUSH, and SWAP instructions will be excluded.')
parser.add_argument('--all', dest='print_all', action='store_true', help='Causes --print-test to print all instructions.')
parser.add_argument('--bisect', action='store_true', help='Run the specified test varying the number of operations until a minimal failing test is found. Does not work for concurrent tests.')
parser.add_argument('--bisect', action='store_true',
help='Run the specified test varying the number of operations until a minimal failing test is found. Does not work for '
'concurrent tests.')
parser.add_argument('--insert-only', action='store_true', help='Insert the test instructions into the database, but do not run it.')
parser.add_argument('--concurrency', type=int, default=1, help='Number of concurrent test threads to run. (default = 1).')
parser.add_argument('--num-ops', type=int, default=100, help='The number of operations to generate per thread (default = 100)')
parser.add_argument('--seed', type=int, help='The random seed to use for generating the test')
parser.add_argument('--max-int-bits', type=int, default=None, help='Maximum number of bits to use for int types in testers. By default, the largest value supported by the testers being run will be chosen.')
parser.add_argument('--api-version', default=None, type=int, help='The API version that the testers should use. Not supported in scripted mode. (default = random version supported by all testers)')
parser.add_argument('--max-int-bits', type=int, default=None,
help='Maximum number of bits to use for int types in testers. By default, the largest value supported by the testers being '
'run will be chosen.')
parser.add_argument('--api-version', default=None, type=int,
help='The API version that the testers should use. Not supported in scripted mode. (default = random version supported by '
'all testers)')
parser.add_argument('--cluster-file', type=str, default=None, help='The cluster file for the cluster being connected to. (default None)')
parser.add_argument('--timeout', type=int, default=600, help='The timeout in seconds for running each individual tester. (default 600)')
parser.add_argument('--enable-client-trace-logging', nargs='?', type=str, default=None, const='.', help='Enables trace file output. This flag takes an optional argument specifying the output directory (default = \'.\').')
parser.add_argument('--instruction-prefix', type=str, default='test_spec', help='The prefix under which the main thread of test instructions are inserted (default=\'test_spec\').')
parser.add_argument('--output-subspace', type=str, default='tester_output', help='The string used to create the output subspace for the testers. The subspace will be of the form (<output_subspace>,). (default=\'tester_output\')')
parser.add_argument('--enable-client-trace-logging', nargs='?', type=str, default=None, const='.',
help='Enables trace file output. This flag takes an optional argument specifying the output directory (default = \'.\').')
parser.add_argument('--instruction-prefix', type=str, default='test_spec',
help='The prefix under which the main thread of test instructions are inserted (default=\'test_spec\').')
parser.add_argument('--output-subspace', type=str, default='tester_output',
help='The string used to create the output subspace for the testers. The subspace will be of the form (<output_subspace>,). '
'(default=\'tester_output\')')
parser.add_argument('--logging-level', type=str, default='INFO', choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], help='Specifies the level of detail in the tester output (default=\'INFO\').')
parser.add_argument('--logging-level', type=str, default='INFO',
choices=['ERROR', 'WARNING', 'INFO', 'DEBUG'], help='Specifies the level of detail in the tester output (default=\'INFO\').')
# SOMEDAY: this applies only to the scripted test. Should we invoke test files specifically (as in circus),
# or invoke them here and allow tests to add arguments?
@ -396,6 +427,7 @@ def parse_args(argv):
return parser.parse_args(argv)
def validate_args(args):
if args.insert_only and args.bisect:
raise Exception('--bisect cannot be used with --insert-only')
@ -408,6 +440,7 @@ def validate_args(args):
if args.concurrency > 1 and args.test2:
raise Exception('--compare cannot be used with concurrent tests')
def main(argv):
args = parse_args(argv)
try:
@ -444,9 +477,11 @@ def main(argv):
util.get_logger().debug(traceback.format_exc())
exit(3)
except:
except BaseException:
util.get_logger().error('\nERROR: %s' % sys.exc_info()[0])
util.get_logger().info(traceback.format_exc())
exit(3)
if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))

View File

@ -20,9 +20,10 @@
import os
MAX_API_VERSION = 510
COMMON_TYPES = [ 'null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple' ]
ALL_TYPES = COMMON_TYPES + [ 'versionstamp' ]
MAX_API_VERSION = 520
COMMON_TYPES = ['null', 'bytes', 'string', 'int', 'uuid', 'bool', 'float', 'double', 'tuple']
ALL_TYPES = COMMON_TYPES + ['versionstamp']
class Tester:
def __init__(self, name, cmd, max_int_bits=64, min_api_version=0, max_api_version=MAX_API_VERSION, threads_enabled=True, types=COMMON_TYPES):
@ -44,22 +45,24 @@ class Tester:
else:
return Tester(test_name_or_args.split(' ')[0], test_name_or_args)
def _absolute_path(path):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', path)
_java_cmd = 'java -ea -cp %s:%s com.apple.foundationdb.test.' % (
_absolute_path('java/foundationdb-client.jar'),
_absolute_path('java/foundationdb-tests.jar'))
# We could set min_api_version lower on some of these if the testers were updated to support them
testers = {
'python' : Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
'python3' : Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
'node' : Tester('node', _absolute_path('nodejs/tests/tester.js'), 53, 500, MAX_API_VERSION),
'streamline' : Tester('streamline', _absolute_path('nodejs/tests/streamline_tester._js'), 53, 500, MAX_API_VERSION),
'ruby' : Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 64, 23, MAX_API_VERSION),
'java' : Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
'java_async' : Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
'go' : Tester('go', _absolute_path('go/build/bin/_stacktester'), 63, 200, MAX_API_VERSION),
'flow' : Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION),
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
'node': Tester('node', _absolute_path('nodejs/tests/tester.js'), 53, 500, MAX_API_VERSION),
'streamline': Tester('streamline', _absolute_path('nodejs/tests/streamline_tester._js'), 53, 500, MAX_API_VERSION),
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 64, 23, MAX_API_VERSION),
'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 63, 200, MAX_API_VERSION),
'flow': Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION),
}

View File

@ -28,6 +28,7 @@ from bindingtester import util
fdb.api_version(FDB_API_VERSION)
class ResultSpecification(object):
def __init__(self, subspace, key_start_index=0, ordering_index=None, global_error_filter=None):
self.subspace = subspace
@ -88,6 +89,7 @@ class Test(object):
return test_class[0](subspace)
class Instruction(object):
def __init__(self, operation):
self.operation = operation
@ -103,6 +105,7 @@ class Instruction(object):
def __repr__(self):
return repr(self.operation)
class PushInstruction(Instruction):
def __init__(self, argument):
self.operation = 'PUSH'
@ -115,6 +118,7 @@ class PushInstruction(Instruction):
def __repr__(self):
return '%r %r' % (self.operation, self.argument)
class TestInstructions(object):
def __init__(self):
pass
@ -126,6 +130,7 @@ class TestInstructions(object):
def insert_operations(self, db, subspace):
pass
class InstructionSet(TestInstructions, list):
def __init__(self):
TestInstructions.__init__(self)
@ -144,7 +149,7 @@ class InstructionSet(TestInstructions, list):
list.append(self, Instruction(instruction))
def get_threads(self, subspace):
return { subspace : self }
return {subspace: self}
def setup_complete(self):
self.core_test_begin = len(self)
@ -153,16 +158,17 @@ class InstructionSet(TestInstructions, list):
self.core_test_end = len(self)
def core_instructions(self):
return self[self.core_test_begin : self.core_test_end]
return self[self.core_test_begin: self.core_test_end]
@fdb.transactional
def _insert_operations_transactional(self, tr, subspace, start, count):
for i, instruction in enumerate(self[start : start+count]):
for i, instruction in enumerate(self[start: start + count]):
tr[subspace.pack((start + i,))] = instruction.to_value()
def insert_operations(self, db, subspace):
for i in range(0, int(math.ceil(len(self) / 1000.0))):
self._insert_operations_transactional(db, subspace, i*1000, 1000)
self._insert_operations_transactional(db, subspace, i * 1000, 1000)
class ThreadedInstructionSet(TestInstructions):
def __init__(self):
@ -194,4 +200,5 @@ class ThreadedInstructionSet(TestInstructions):
self.threads[subspace] = thread_instructions
return thread_instructions
util.import_subclasses(__file__, 'bindingtester.tests')

View File

@ -18,7 +18,6 @@
# limitations under the License.
#
import ctypes
import random
import struct
@ -32,6 +31,7 @@ from bindingtester.tests import test_util
fdb.api_version(FDB_API_VERSION)
class ApiTest(Test):
def __init__(self, subspace):
super(ApiTest, self).__init__(subspace)
@ -131,7 +131,7 @@ class ApiTest(Test):
def wait_for_reads(self, instructions):
while len(self.outstanding_ops) > 0 and self.outstanding_ops[-1][0] <= self.stack_size:
read = self.outstanding_ops.pop()
#print '%d. waiting for read at instruction %r' % (len(instructions), read)
# print '%d. waiting for read at instruction %r' % (len(instructions), read)
test_util.to_front(instructions, self.stack_size - read[0])
instructions.append('WAIT_FUTURE')
@ -168,7 +168,7 @@ class ApiTest(Test):
op_choices += resets
idempotent_atomic_ops = [u'BIT_AND', u'BIT_OR', u'MAX', u'MIN', u'BYTE_MIN', u'BYTE_MAX']
atomic_ops = idempotent_atomic_ops + [u'ADD', u'BIT_XOR']
atomic_ops = idempotent_atomic_ops + [u'ADD', u'BIT_XOR', u'APPEND_IF_FITS']
if args.concurrency > 1:
self.max_keys = random.randint(100, 1000)
@ -187,7 +187,7 @@ class ApiTest(Test):
index = len(instructions)
read_performed = False
#print 'Adding instruction %s at %d' % (op, index)
# print 'Adding instruction %s at %d' % (op, index)
if args.concurrency == 1 and (op in database_mutations):
self.wait_for_reads(instructions)
@ -211,7 +211,7 @@ class ApiTest(Test):
instructions.push_args(random.randint(0, 5000))
instructions.append(op)
self.outstanding_ops.append((self.stack_size, len(instructions)-1))
self.outstanding_ops.append((self.stack_size, len(instructions) - 1))
if args.concurrency == 1:
self.wait_for_reads(instructions)
@ -236,7 +236,7 @@ class ApiTest(Test):
test_util.to_front(instructions, 3)
instructions.append(op)
#Don't add key here because we may be outside of our prefix
# Don't add key here because we may be outside of our prefix
self.add_strings(1)
self.can_set_version = False
read_performed = True
@ -258,7 +258,7 @@ class ApiTest(Test):
read_performed = True
elif op == 'GET_RANGE_STARTS_WITH' or op == 'GET_RANGE_STARTS_WITH_SNAPSHOT' or op == 'GET_RANGE_STARTS_WITH_DATABASE':
#TODO: not tested well
# TODO: not tested well
self.ensure_key(instructions, 1)
range_params = self.random.random_range_params()
instructions.push_args(*range_params)
@ -316,7 +316,7 @@ class ApiTest(Test):
self.add_stack_items(1)
elif op == 'CLEAR_RANGE' or op == 'CLEAR_RANGE_DATABASE':
#Protect against inverted range
# Protect against inverted range
key1 = self.workspace.pack(self.random.random_tuple(5))
key2 = self.workspace.pack(self.random.random_tuple(5))
@ -351,10 +351,10 @@ class ApiTest(Test):
key1 = self.versionstamped_values.pack((rand_str1,))
split = random.randint(0, 70)
rand_str2 = self.random.random_string(20+split) + fdb.tuple.Versionstamp._UNSET_TR_VERSION + self.random.random_string(70-split)
rand_str2 = self.random.random_string(20 + split) + fdb.tuple.Versionstamp._UNSET_TR_VERSION + self.random.random_string(70 - split)
key2 = self.versionstamped_keys.pack() + rand_str2
index = key2.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION)
key2 += chr(index%256)+chr(index/256)
key2 += chr(index % 256) + chr(index / 256)
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', key1, fdb.tuple.Versionstamp._UNSET_TR_VERSION + rand_str2)
instructions.append('ATOMIC_OP')
@ -436,8 +436,8 @@ class ApiTest(Test):
version_key = self.versionstamped_keys.pack(tup)
first_incomplete = version_key.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION)
second_incomplete = -1 if first_incomplete < 0 else version_key.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION,
first_incomplete + len(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + 1)
second_incomplete = -1 if first_incomplete < 0 else \
version_key.find(fdb.tuple.Versionstamp._UNSET_TR_VERSION, first_incomplete + len(fdb.tuple.Versionstamp._UNSET_TR_VERSION) + 1)
# If there is exactly one incomplete versionstamp, perform the versionstamped key operation.
if first_incomplete >= 0 and second_incomplete < 0:
@ -449,7 +449,8 @@ class ApiTest(Test):
instructions.append('ATOMIC_OP')
version_value_key = self.versionstamped_values.pack((rand_str,))
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', version_value_key, fdb.tuple.Versionstamp._UNSET_TR_VERSION + fdb.tuple.pack(tup))
instructions.push_args(u'SET_VERSIONSTAMPED_VALUE', version_value_key,
fdb.tuple.Versionstamp._UNSET_TR_VERSION + fdb.tuple.pack(tup))
instructions.append('ATOMIC_OP')
self.can_use_key_selectors = False
@ -469,7 +470,7 @@ class ApiTest(Test):
instructions.append(op)
self.add_strings(len(tups))
#Use SUB to test if integers are correctly unpacked
# Use SUB to test if integers are correctly unpacked
elif op == 'SUB':
a = self.random.random_int() / 2
b = self.random.random_int() / 2
@ -512,7 +513,7 @@ class ApiTest(Test):
assert False
if read_performed and op not in database_reads:
self.outstanding_ops.append((self.stack_size, len(instructions)-1))
self.outstanding_ops.append((self.stack_size, len(instructions) - 1))
if args.concurrency == 1 and (op in database_reads or op in database_mutations):
instructions.append('WAIT_FUTURE')
@ -536,7 +537,7 @@ class ApiTest(Test):
def check_versionstamps(self, tr, begin_key, limit):
next_begin = None
incorrect_versionstamps = 0
for k,v in tr.get_range(begin_key, self.versionstamped_values.range().stop, limit=limit):
for k, v in tr.get_range(begin_key, self.versionstamped_values.range().stop, limit=limit):
next_begin = k + '\x00'
tup = fdb.tuple.unpack(k)
key = self.versionstamped_keys.pack() + v[10:].replace(fdb.tuple.Versionstamp._UNSET_TR_VERSION, v[:10], 1)
@ -545,7 +546,6 @@ class ApiTest(Test):
util.get_logger().error(' %s != %s', repr(tr[key]), repr(tup[-1]))
incorrect_versionstamps += 1
return (next_begin, incorrect_versionstamps)
def validate(self, db, args):
@ -568,4 +568,3 @@ class ApiTest(Test):
ResultSpecification(self.workspace, global_error_filter=[1007, 1021]),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021])
]

View File

@ -32,6 +32,7 @@ from bindingtester.tests.directory_util import DirListEntry
fdb.api_version(FDB_API_VERSION)
class DirectoryTest(Test):
def __init__(self, subspace):
@ -105,13 +106,14 @@ class DirectoryTest(Test):
# Generate some directories that we are going to create in advance. This tests that other bindings
# are compatible with the Python implementation
self.prepopulated_dirs = [ (generate_path(min_length=1), self.generate_layer()) for i in range(5) ]
self.prepopulated_dirs = [(generate_path(min_length=1), self.generate_layer()) for i in range(5)]
for path, layer in self.prepopulated_dirs:
instructions.push_args(layer)
instructions.push_args(*test_util.with_length(path))
instructions.append('DIRECTORY_OPEN')
#print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), False, len(self.dir_list))
# print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' \
# % (len(instructions), 'DIRECTORY_OPEN', repr(self.dir_index), False, len(self.dir_list))
self.dir_list.append(self.dir_list[0].add_child(path, default_path, self.root, DirListEntry(True, True, has_known_prefix=False)))
instructions.setup_complete()
@ -131,7 +133,8 @@ class DirectoryTest(Test):
op = random.choice(choices)
dir_entry = self.dir_list[self.dir_index]
#print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' % (len(instructions), op, repr(self.dir_index), repr(dir_entry.has_known_prefix), len(self.dir_list))
# print '%d. Selected %s, dir=%s, has_known_prefix=%s, dir_list_len=%d' \
# % (len(instructions), op, repr(self.dir_index), repr(dir_entry.has_known_prefix), len(self.dir_list))
if op.endswith('_DATABASE') or op.endswith('_SNAPSHOT'):
root_op = op[0:-9]
@ -160,7 +163,7 @@ class DirectoryTest(Test):
indices.append(len(self.dir_list))
self.dir_list.append(DirListEntry(False, True))
instructions.push_args(random.choice([0,1]))
instructions.push_args(random.choice([0, 1]))
instructions.push_args(*indices)
instructions.append(op)
self.dir_list.append(DirListEntry(True, False, False))
@ -195,7 +198,8 @@ class DirectoryTest(Test):
path = generate_path()
op_args = test_util.with_length(path) + (layer, prefix)
if prefix is None:
directory_util.push_instruction_and_record_prefix(instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
directory_util.push_instruction_and_record_prefix(
instructions, op, op_args, path, len(self.dir_list), self.random, self.prefix_log)
else:
instructions.push_args(*op_args)
instructions.append(op)
@ -228,7 +232,8 @@ class DirectoryTest(Test):
new_path = generate_path()
instructions.push_args(*test_util.with_length(new_path))
instructions.append(op)
self.dir_list.append(dir_entry.root.add_child(new_path, default_path, self.root, DirListEntry(True, True, dir_entry.has_known_prefix)))
self.dir_list.append(dir_entry.root.add_child(new_path, default_path, self.root,
DirListEntry(True, True, dir_entry.has_known_prefix)))
# Make sure that the default directory subspace still exists after moving the current directory
self.ensure_default_directory_subspace(instructions, default_path)
@ -305,10 +310,10 @@ class DirectoryTest(Test):
instructions.push_args(self.directory_log.key())
instructions.append('DIRECTORY_LOG_DIRECTORY')
if dir_entry.has_known_prefix and dir_entry.is_subspace:
#print '%d. Logging subspace: %d' % (i, dir_entry.dir_id)
# print '%d. Logging subspace: %d' % (i, dir_entry.dir_id)
instructions.push_args(self.subspace_log.key())
instructions.append('DIRECTORY_LOG_SUBSPACE')
if (i+1) % 100 == 0:
if (i + 1) % 100 == 0:
test_util.blocking_commit(instructions)
instructions.push_args(self.stack_subspace.key())
@ -332,18 +337,21 @@ class DirectoryTest(Test):
# If a partition is created, allocates a prefix, and then is removed, subsequent prefix
# allocations could collide with prior ones. We can get around this by not allowing
# a removed directory (or partition) to be used, but that weakens the test in another way.
#errors += directory_util.check_for_duplicate_prefixes(db, self.prefix_log)
# errors += directory_util.check_for_duplicate_prefixes(db, self.prefix_log)
return errors
def get_result_specfications(self):
def get_result_specifications(self):
return [
ResultSpecification(self.stack, key_start_index=1, ordering_index=1),
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1),
ResultSpecification(self.directory_log, ordering_index=0),
ResultSpecification(self.subspace_log, ordering_index=0)
]
# Utility functions
def generate_path(min_length = 0):
def generate_path(min_length=0):
length = int(random.random() * random.random() * (4 - min_length)) + min_length
path = ()
for i in range(length):
@ -354,6 +362,7 @@ def generate_path(min_length = 0):
return path
def generate_prefix(allow_empty=True, is_partition=False):
if allow_empty and random.random() < 0.8:
return None
@ -364,7 +373,7 @@ def generate_prefix(allow_empty=True, is_partition=False):
if not is_partition:
first = chr(random.randint(ord('\x1d'), 255) % 255)
return first + ''.join(chr(random.randrange(0, 256)) for i in range(0, length-1))
return first + ''.join(chr(random.randrange(0, 256)) for i in range(0, length - 1))
else:
return ''.join(chr(random.randrange(ord('\x02'), ord('\x14'))) for i in range(0, length))
else:

View File

@ -30,6 +30,7 @@ from bindingtester.tests import test_util, directory_util
fdb.api_version(FDB_API_VERSION)
class DirectoryHcaTest(Test):
def __init__(self, subspace):
super(DirectoryHcaTest, self).__init__(subspace)
@ -58,7 +59,7 @@ class DirectoryHcaTest(Test):
def barrier(self, instructions, thread_number, thread_ending=False):
if not thread_ending:
instructions.push_args(self.coordination[(self.barrier_num+1)][thread_number].key(), '')
instructions.push_args(self.coordination[(self.barrier_num + 1)][thread_number].key(), '')
instructions.append('SET_DATABASE')
instructions.append('WAIT_FUTURE')
@ -102,7 +103,8 @@ class DirectoryHcaTest(Test):
for i in range(num_directories):
path = (self.random.random_unicode_str(16),)
op_args = test_util.with_length(path) + ('', None)
directory_util.push_instruction_and_record_prefix(instructions, 'DIRECTORY_CREATE', op_args, path, num_dirs, self.random, self.prefix_log)
directory_util.push_instruction_and_record_prefix(instructions, 'DIRECTORY_CREATE',
op_args, path, num_dirs, self.random, self.prefix_log)
num_dirs += 1
current_op += num_directories
@ -127,4 +129,3 @@ class DirectoryHcaTest(Test):
errors += directory_util.validate_hca_state(db)
return errors

View File

@ -34,6 +34,7 @@ DEFAULT_DIRECTORY_INDEX = 4
DEFAULT_DIRECTORY_PREFIX = 'default'
DIRECTORY_ERROR_STRING = 'DIRECTORY_ERROR'
class DirListEntry:
dir_id = 0 # Used for debugging
@ -53,45 +54,46 @@ class DirListEntry:
def add_child(self, subpath, default_path, root, child):
if default_path in root.children:
#print 'Adding child %r to default directory %r at %r' % (child, root.children[DirectoryTest.DEFAULT_DIRECTORY_PATH].path, subpath)
# print 'Adding child %r to default directory %r at %r' % (child, root.children[DirectoryTest.DEFAULT_DIRECTORY_PATH].path, subpath)
c = root.children[default_path]._add_child_impl(subpath, child)
child.has_known_prefix = c.has_known_prefix and child.has_known_prefix
#print 'Added %r' % c
# print 'Added %r' % c
#print 'Adding child %r to directory %r at %r' % (child, self.path, subpath)
# print 'Adding child %r to directory %r at %r' % (child, self.path, subpath)
c = self._add_child_impl(subpath, child)
#print 'Added %r' % c
# print 'Added %r' % c
return c
def _add_child_impl(self, subpath, child):
#print '%d, %d. Adding child (recursive): %s %s' % (self.dir_id, child.dir_id, repr(self.path), repr(subpath))
# print '%d, %d. Adding child (recursive): %s %s' % (self.dir_id, child.dir_id, repr(self.path), repr(subpath))
if len(subpath) == 0:
self.has_known_prefix = self.has_known_prefix and child.has_known_prefix
#print '%d, %d. Setting child: %d' % (self.dir_id, child.dir_id, self.has_known_prefix)
# print '%d, %d. Setting child: %d' % (self.dir_id, child.dir_id, self.has_known_prefix)
self._merge_children(child)
return self
else:
if not subpath[0] in self.children:
#print '%d, %d. Path %s was absent (%s)' % (self.dir_id, child.dir_id, repr(self.path + subpath[0:1]), repr(self.children))
subdir = DirListEntry(True, True, path = self.path+subpath[0:1], root = self.root)
# print '%d, %d. Path %s was absent (%s)' % (self.dir_id, child.dir_id, repr(self.path + subpath[0:1]), repr(self.children))
subdir = DirListEntry(True, True, path=self.path + subpath[0:1], root=self.root)
subdir.has_known_prefix = len(subpath) == 1
self.children[subpath[0]] = subdir
else:
subdir = self.children[subpath[0]]
subdir.has_known_prefix = False
#print '%d, %d. Path was present' % (self.dir_id, child.dir_id)
# print '%d, %d. Path was present' % (self.dir_id, child.dir_id)
return subdir._add_child_impl(subpath[1:], child)
def _merge_children(self, other):
for c in other.children:
if not c in self.children:
if c not in self.children:
self.children[c] = other.children[c]
else:
self.children[c].has_known_prefix = self.children[c].has_known_prefix and other.children[c].has_known_prefix
self.children[c]._merge_children(other.children[c])
def setup_directories(instructions, default_path, random):
dir_list = [DirListEntry(True, False, True)]
instructions.push_args(0, '\xfe')
@ -114,6 +116,7 @@ def setup_directories(instructions, default_path, random):
return dir_list
def create_default_directory_subspace(instructions, path, random):
test_util.blocking_commit(instructions)
instructions.push_args(3)
@ -125,6 +128,7 @@ def create_default_directory_subspace(instructions, path, random):
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
instructions.append('DIRECTORY_CHANGE')
def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_index, random, subspace):
if not op.endswith('_DATABASE'):
instructions.push_args(1, *test_util.with_length(path))
@ -152,6 +156,7 @@ def push_instruction_and_record_prefix(instructions, op, op_args, path, dir_inde
instructions.push_args(DEFAULT_DIRECTORY_INDEX)
instructions.append('DIRECTORY_CHANGE')
def check_for_duplicate_prefixes(db, subspace):
last_prefix = None
start_key = subspace[0].range().start
@ -170,12 +175,13 @@ def check_for_duplicate_prefixes(db, subspace):
count += len(prefixes)
prefixes = [last_prefix] + prefixes
duplicates.update([p for i,p in enumerate(prefixes[1:]) if p == prefixes[i]])
duplicates.update([p for i, p in enumerate(prefixes[1:]) if p == prefixes[i]])
last_prefix = prefixes[-1]
util.get_logger().info('Checked %d directory prefixes for duplicates' % count)
return ['The prefix %r was allocated multiple times' % d[:-2] for d in set(duplicates)]
def validate_hca_state(db):
hca = fdb.Subspace(('\xfe', 'hca'), '\xfe')
counters = hca[0]
@ -184,7 +190,7 @@ def validate_hca_state(db):
last_counter = db.get_range(counters.range().start, counters.range().stop, limit=1, reverse=True)
[(start, reported_count)] = [(counters.unpack(kv.key)[0], struct.unpack('<q', kv.value)[0]) for kv in last_counter] or [(0, 0)]
actual_count = len(db[recent[start] : recent.range().stop])
actual_count = len(db[recent[start]: recent.range().stop])
if actual_count > reported_count:
return ['The HCA reports %d prefixes allocated in current window, but it actually allocated %d' % (reported_count, actual_count)]

View File

@ -31,14 +31,16 @@ from bindingtester.tests import test_util
fdb.api_version(FDB_API_VERSION)
# SOMEDAY: This should probably be broken up into smaller tests
class ScriptedTest(Test):
TEST_API_VERSION = 510
TEST_API_VERSION = 520
def __init__(self, subspace):
super(ScriptedTest, self).__init__(subspace, ScriptedTest.TEST_API_VERSION, ScriptedTest.TEST_API_VERSION)
self.workspace = self.subspace['workspace']
self.results_subspace = self.subspace['results']
#self.thread_subspace = self.subspace['threads'] # TODO: update START_THREAD so that we can create threads in subspaces
# self.thread_subspace = self.subspace['threads'] # TODO: update START_THREAD so that we can create threads in subspaces
def setup(self, args):
if args.concurrency > 1:
@ -58,7 +60,7 @@ class ScriptedTest(Test):
test_instructions = ThreadedInstructionSet()
main_thread = test_instructions.create_thread()
foo = [self.workspace.pack(('foo%d' % i,)) for i in range(0,6)]
foo = [self.workspace.pack(('foo%d' % i,)) for i in range(0, 6)]
main_thread.append('NEW_TRANSACTION')
main_thread.push_args(1020)
@ -270,7 +272,7 @@ class ScriptedTest(Test):
stampKey = 'stampedXXXXXXXXXXsuffix'
stampKeyIndex = stampKey.find('XXXXXXXXXX')
stampKeyStr = chr(stampKeyIndex%256) + chr(stampKeyIndex/256)
stampKeyStr = chr(stampKeyIndex % 256) + chr(stampKeyIndex / 256)
main_thread.push_args(u'SET_VERSIONSTAMPED_KEY', stampKey + stampKeyStr, 'stampedBar')
main_thread.append('ATOMIC_OP')
main_thread.push_args(u'SET_VERSIONSTAMPED_VALUE', 'stampedValue', 'XXXXXXXXXX')
@ -305,7 +307,7 @@ class ScriptedTest(Test):
if not args.no_threads:
wait_key = 'waitKey'
#threads = [self.thread_subspace[i] for i in range(0, 2)]
# threads = [self.thread_subspace[i] for i in range(0, 2)]
threads = ['thread_spec%d' % i for i in range(0, 2)]
for thread_spec in threads:
main_thread.push_args(self.workspace.pack((wait_key, thread_spec)), '')
@ -314,11 +316,12 @@ class ScriptedTest(Test):
for thread_spec in threads:
main_thread.push_args(thread_spec)
#if len(main_thread) < args.num_ops:
# if len(main_thread) < args.num_ops:
main_thread.append('START_THREAD')
thread = test_instructions.create_thread(fdb.Subspace((thread_spec,)))
thread.append('NEW_TRANSACTION')
thread.push_args(foo[1], foo[1], 'bar%s' % thread_spec, self.workspace.pack((wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec)))
thread.push_args(foo[1], foo[1], 'bar%s' % thread_spec, self.workspace.pack(
(wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec)))
thread.append('GET')
thread.append('POP')
thread.append('SET')
@ -335,8 +338,8 @@ class ScriptedTest(Test):
self.add_result(thread, args, 'barthread_spec0', 'barthread_spec1')
main_thread.append('EMPTY_STACK')
#if len(main_thread) > args.num_ops:
#main_thread[args.num_ops:] = []
# if len(main_thread) > args.num_ops:
# main_thread[args.num_ops:] = []
return test_instructions
@ -346,7 +349,7 @@ class ScriptedTest(Test):
]
def get_expected_results(self):
return { self.results_subspace : self.results }
return {self.results_subspace: self.results}
def append_range_test(self, instructions, args, num_pairs, kv_length):
instructions.append('NEW_TRANSACTION')
@ -355,7 +358,7 @@ class ScriptedTest(Test):
instructions.append('CLEAR_RANGE_STARTS_WITH')
kvpairs = []
for i in range(0, num_pairs*2):
for i in range(0, num_pairs * 2):
kvpairs.append(self.workspace.pack(('foo', ''.join(chr(random.randint(0, 254)) for i in range(0, kv_length)))))
kvpairs = list(set(kvpairs))
@ -364,7 +367,7 @@ class ScriptedTest(Test):
kvpairs.sort()
instructions.push_args(*kvpairs)
for i in range(0, len(kvpairs)/2):
for i in range(0, len(kvpairs) / 2):
instructions.append('SET')
if i % 100 == 99:
test_util.blocking_commit(instructions)
@ -388,8 +391,7 @@ class ScriptedTest(Test):
instructions.push_args(key)
instructions.append('SET_DATABASE')
#if len(instructions) <= args.num_ops:
# if len(instructions) <= args.num_ops:
self.results.append(Result(self.results_subspace, key, values))
instructions.append('POP')

View File

@ -31,6 +31,7 @@ from bindingtester import util
from bindingtester import FDB_API_VERSION
from bindingtester.known_testers import COMMON_TYPES
class RandomGenerator(object):
def __init__(self, max_int_bits=64, api_version=FDB_API_VERSION, types=COMMON_TYPES):
self.max_int_bits = max_int_bits
@ -47,7 +48,7 @@ class RandomGenerator(object):
min_value = -max_value - 1
num = random.randint(min_value, max_value)
#util.get_logger().debug('generating int (%d): %d - %s' % (num_bits, num, repr(fdb.tuple.pack((num,)))))
# util.get_logger().debug('generating int (%d): %d - %s' % (num_bits, num, repr(fdb.tuple.pack((num,)))))
return num
def random_float(self, exp_bits):
@ -57,7 +58,7 @@ class RandomGenerator(object):
else:
# Choose a value from all over the range of acceptable floats for this precision.
sign = -1 if random.random() < 0.5 else 1
exponent = random.randint(-(1 << (exp_bits-1))-10, (1 << (exp_bits-1) - 1))
exponent = random.randint(-(1 << (exp_bits - 1)) - 10, (1 << (exp_bits - 1) - 1))
mantissa = random.random()
return sign * math.pow(2, exponent) * mantissa
@ -117,12 +118,12 @@ class RandomGenerator(object):
smaller_size = random.randint(1, len(to_add))
tuples.append(to_add[:smaller_size])
else:
non_empty = filter(lambda (i,x): (isinstance(x, list) or isinstance(x, tuple)) and len(x) > 0, enumerate(to_add))
non_empty = filter(lambda (_, x): (isinstance(x, list) or isinstance(x, tuple)) and len(x) > 0, enumerate(to_add))
if len(non_empty) > 0 and random.random() < 0.25:
# Add a smaller list to test prefixes of nested structures.
idx, choice = random.choice(non_empty)
smaller_size = random.randint(0, len(to_add[idx]))
tuples.append(to_add[:idx] + (choice[:smaller_size],) + to_add[idx+1:])
tuples.append(to_add[:idx] + (choice[:smaller_size],) + to_add[idx + 1:])
random.shuffle(tuples)
return tuples
@ -133,7 +134,7 @@ class RandomGenerator(object):
elif random.random() < 0.75:
limit = 0
else:
limit = random.randint(1e8, (1<<31)-1)
limit = random.randint(1e8, (1 << 31) - 1)
return (limit, random.randint(0, 1), random.randint(-2, 4))
@ -149,7 +150,7 @@ class RandomGenerator(object):
if length == 0:
return ''
return chr(random.randint(0, 254)) + ''.join(chr(random.randint(0, 255)) for i in range(0, length-1))
return chr(random.randint(0, 254)) + ''.join(chr(random.randint(0, 255)) for i in range(0, length - 1))
def random_unicode_char(self):
while True:
@ -166,11 +167,13 @@ class RandomGenerator(object):
def error_string(error_code):
return fdb.tuple.pack(('ERROR', str(error_code)))
def blocking_commit(instructions):
instructions.append('COMMIT')
instructions.append('WAIT_FUTURE')
instructions.append('RESET')
def to_front(instructions, index):
if index == 0:
pass
@ -178,19 +181,19 @@ def to_front(instructions, index):
instructions.push_args(1)
instructions.append('SWAP')
elif index == 2:
instructions.push_args(index-1)
instructions.push_args(index - 1)
instructions.append('SWAP')
instructions.push_args(index)
instructions.append('SWAP')
else:
instructions.push_args(index-1)
instructions.push_args(index - 1)
instructions.append('SWAP')
instructions.push_args(index)
instructions.append('SWAP')
instructions.push_args(index-1)
instructions.push_args(index - 1)
instructions.append('SWAP')
to_front(instructions, index-1)
to_front(instructions, index - 1)
def with_length(tup):
return (len(tup),) + tup

View File

@ -25,6 +25,7 @@ import glob
import fdb
def initialize_logger_level(logging_level):
logger = get_logger()
@ -39,9 +40,11 @@ def initialize_logger_level(logging_level):
elif logging_level == "ERROR":
logger.setLevel(logging.ERROR)
def get_logger():
return logging.getLogger('foundationdb.bindingtester')
# Attempts to get the name associated with a process termination signal
def signal_number_to_name(signal_num):
name = []
@ -53,6 +56,7 @@ def signal_number_to_name(signal_num):
else:
return str(signal_num)
def import_subclasses(filename, module_path):
for f in glob.glob(os.path.join(os.path.dirname(filename), '*.py')):
fn = os.path.basename(f)
@ -60,6 +64,7 @@ def import_subclasses(filename, module_path):
continue
__import__('%s.%s' % (module_path, os.path.splitext(fn)[0]))
# Attempts to unpack a subspace
# This throws an exception if the subspace cannot be unpacked as a tuple
# As a result, the binding tester cannot use subspaces that have non-tuple raw prefixes
@ -69,4 +74,3 @@ def subspace_to_tuple(subspace):
except Exception as e:
get_logger().debug(e)
raise Exception('The binding tester does not support subspaces with non-tuple raw prefixes')

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include "fdbclient/MultiVersionTransaction.h"
#include "foundationdb/fdb_c.h"

View File

@ -28,10 +28,10 @@
#endif
#if !defined(FDB_API_VERSION)
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 510)
#error You must #define FDB_API_VERSION prior to including fdb_c.h (current version is 520)
#elif FDB_API_VERSION < 13
#error API version no longer supported (upgrade to 13)
#elif FDB_API_VERSION > 510
#elif FDB_API_VERSION > 520
#error Requested API version requires a newer version of this header
#endif

View File

@ -27,19 +27,21 @@ import sys
functions = {}
func_re = re.compile("^\s*FDB_API_(?:CHANGED|REMOVED)\s*\(\s*([^,]*),\s*([^)]*)\).*")
func_re = re.compile(
"^\s*FDB_API_(?:CHANGED|REMOVED)\s*\(\s*([^,]*),\s*([^)]*)\).*")
with open(source, 'r') as srcfile:
for l in srcfile:
m = func_re.match(l)
if m:
func, ver = m.groups()
if not func in functions:
if func not in functions:
functions[func] = []
ver = int(ver)
if not ver in functions[func]:
if ver not in functions[func]:
functions[func].append(ver)
def write_windows_asm(asmfile, functions):
asmfile.write(".data\n")
for f in functions:
@ -55,6 +57,7 @@ def write_windows_asm(asmfile, functions):
asmfile.write("\nEND\n")
def write_unix_asm(asmfile, functions, prefix):
asmfile.write(".intel_syntax noprefix\n")
@ -70,13 +73,17 @@ def write_unix_asm(asmfile, functions, prefix):
for f in functions:
asmfile.write("\n.globl %s%s\n" % (prefix, f))
asmfile.write("%s%s:\n" % (prefix, f))
asmfile.write("\tmov r11, qword ptr [%sfdb_api_ptr_%s@GOTPCREL+rip]\n" % (prefix, f))
asmfile.write(
"\tmov r11, qword ptr [%sfdb_api_ptr_%s@GOTPCREL+rip]\n" % (prefix, f))
asmfile.write("\tmov r11, qword ptr [r11]\n")
asmfile.write("\tjmp r11\n")
with open(asm, 'w') as asmfile, open(h, 'w') as hfile:
hfile.write("void fdb_api_ptr_unimpl() { fprintf(stderr, \"UNIMPLEMENTED FDB API FUNCTION\\n\"); abort(); }\n\n")
hfile.write("void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
hfile.write(
"void fdb_api_ptr_unimpl() { fprintf(stderr, \"UNIMPLEMENTED FDB API FUNCTION\\n\"); abort(); }\n\n")
hfile.write(
"void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
if platform == "linux":
write_unix_asm(asmfile, functions, '')
@ -90,4 +97,4 @@ with open(asm, 'w') as asmfile, open(h, 'w') as hfile:
hfile.write("extern \"C\" ")
hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f)
for v in functions[f]:
hfile.write("#define %s_v%d_PREV %s_v%d\n" % (f, v, f, v-1))
hfile.write("#define %s_v%d_PREV %s_v%d\n" % (f, v, f, v - 1))

View File

@ -26,6 +26,8 @@ fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a
fdb_c_tests_LIBS := -Llib -lfdb_c
fdb_c_tests_HEADERS := -Ibindings/c
CLEAN_TARGETS += fdb_c_tests_clean
ifeq ($(PLATFORM),linux)
fdb_c_LIBS += lib/libstdc++.a -lm -lpthread -lrt -ldl
fdb_c_LDFLAGS += -Wl,--version-script=bindings/c/fdb_c.map -static-libgcc -Wl,-z,nodelete
@ -98,4 +100,7 @@ packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz: bin/fdb_c_performance_test b
fdb_c_tests: packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz
fdb_c_tests_clean:
@rm -f packages/fdb-c-tests-$(VERSION)-$(PLATFORM).tar.gz
packages: fdb_c_tests

View File

@ -602,7 +602,7 @@ void runTests(struct ResultSet *rs) {
int main(int argc, char **argv) {
srand(time(NULL));
struct ResultSet *rs = newResultSet();
checkError(fdb_select_api_version(510), "select API version", rs);
checkError(fdb_select_api_version(520), "select API version", rs);
printf("Running performance test at client version: %s\n", fdb_get_client_version());
valueStr = (uint8_t*)malloc((sizeof(uint8_t))*valueSize);

View File

@ -243,7 +243,7 @@ void runTests(struct ResultSet *rs) {
int main(int argc, char **argv) {
srand(time(NULL));
struct ResultSet *rs = newResultSet();
checkError(fdb_select_api_version(510), "select API version", rs);
checkError(fdb_select_api_version(520), "select API version", rs);
printf("Running RYW Benchmark test at client version: %s\n", fdb_get_client_version());
keys = generateKeys(numKeys, keySize);

View File

@ -27,7 +27,7 @@
#include <pthread.h>
#ifndef FDB_API_VERSION
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#endif
#include <foundationdb/fdb_c.h>

View File

@ -140,9 +140,14 @@ namespace FDB {
API::API(int version) : version(version) {}
API* API::selectAPIVersion(int apiVersion) {
if(API::instance && apiVersion != API::instance->version) {
if(API::instance) {
if(apiVersion != API::instance->version) {
throw api_version_already_set();
}
else {
return API::instance;
}
}
if(apiVersion < 500 || apiVersion > FDB_API_VERSION) {
throw api_version_not_supported();
@ -150,12 +155,22 @@ namespace FDB {
throw_on_error( fdb_select_api_version_impl(apiVersion, FDB_API_VERSION) );
if(!API::instance) {
API::instance = new API(apiVersion);
return API::instance;
}
bool API::isAPIVersionSelected() {
return API::instance != NULL;
}
API* API::getInstance() {
if(API::instance == NULL) {
throw api_version_unset();
}
else {
return API::instance;
}
}
void API::setupNetwork() {
throw_on_error( fdb_setup_network() );
@ -183,6 +198,10 @@ namespace FDB {
return Reference<Cluster>( new Cluster(c) );
}
int API::getAPIVersion() const {
return version;
}
Reference<DatabaseContext> Cluster::createDatabase() {
const char *dbName = "DB";
CFuture f( fdb_cluster_create_database( c, (uint8_t*)dbName, (int)strlen(dbName) ) );

View File

@ -23,7 +23,7 @@
#include <flow/flow.h>
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include <bindings/c/foundationdb/fdb_c.h>
#undef DLLEXPORT
@ -64,6 +64,8 @@ namespace FDB {
class API {
public:
static API* selectAPIVersion(int apiVersion);
static API* getInstance();
static bool isAPIVersionSelected();
void setNetworkOption(FDBNetworkOption option, Optional<StringRef> value = Optional<StringRef>());
@ -74,6 +76,7 @@ namespace FDB {
Reference<Cluster> createCluster( std::string const& connFilename );
bool evaluatePredicate(FDBErrorPredicate pred, Error const& e);
int getAPIVersion() const;
private:
static API* instance;

View File

@ -1510,6 +1510,25 @@ struct UnitTestsFunc : InstructionFunc {
ASSERT(data->api->evaluatePredicate(FDBErrorPredicate::FDB_ERROR_PREDICATE_RETRYABLE, Error(1020)));
ASSERT(!data->api->evaluatePredicate(FDBErrorPredicate::FDB_ERROR_PREDICATE_RETRYABLE, Error(10)));
ASSERT(API::isAPIVersionSelected());
state API *fdb = API::getInstance();
ASSERT(fdb->getAPIVersion() <= FDB_API_VERSION);
try {
API::selectAPIVersion(fdb->getAPIVersion() + 1);
ASSERT(false);
}
catch(Error &e) {
ASSERT(e.code() == error_code_api_version_already_set);
}
try {
API::selectAPIVersion(fdb->getAPIVersion() - 1);
ASSERT(false);
}
catch(Error &e) {
ASSERT(e.code() == error_code_api_version_already_set);
}
API::selectAPIVersion(fdb->getAPIVersion());
state Reference<Transaction> tr(new Transaction(data->db));
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE);
@ -1574,7 +1593,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
try {
if(LOG_INSTRUCTIONS) {
if(op != LiteralStringRef("SWAP") && op != LiteralStringRef("PUSH")) {
printf("%lu. %s\n", idx, tupleToString(opTuple).c_str());
printf("%zu. %s\n", idx, tupleToString(opTuple).c_str());
fflush(stdout);
}
}
@ -1654,6 +1673,7 @@ void populateAtomicOpMap() {
optionInfo["BIT_OR"] = FDBMutationType::FDB_MUTATION_TYPE_BIT_OR;
optionInfo["XOR"] = FDBMutationType::FDB_MUTATION_TYPE_XOR;
optionInfo["BIT_XOR"] = FDBMutationType::FDB_MUTATION_TYPE_BIT_XOR;
optionInfo["APPEND_IF_FITS"] = FDBMutationType::FDB_MUTATION_TYPE_APPEND_IF_FITS;
optionInfo["MAX"] = FDBMutationType::FDB_MUTATION_TYPE_MAX;
optionInfo["MIN"] = FDBMutationType::FDB_MUTATION_TYPE_MIN;
optionInfo["SET_VERSIONSTAMPED_KEY"] = FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_KEY;
@ -1681,7 +1701,18 @@ ACTOR void startTest(std::string clusterFilename, StringRef prefix, int apiVersi
// This is "our" network
g_network = newNet2(NetworkAddress(), false);
ASSERT(!API::isAPIVersionSelected());
try {
API::getInstance();
ASSERT(false);
}
catch(Error& e) {
ASSERT(e.code() == error_code_api_version_unset);
}
API *fdb = API::selectAPIVersion(apiVersion);
ASSERT(API::isAPIVersionSelected());
ASSERT(fdb->getAPIVersion() == apiVersion);
//fdb->setNetworkOption(FDBNetworkOption::FDB_NET_OPTION_TRACE_ENABLE);
// We have to start the fdb_flow network and thread separately!

View File

@ -1,14 +1,14 @@
fdb-go
======
[Go language](http://golang.org) bindings for [FoundationDB](http://foundationdb.org/documentation/), a distributed key-value store with ACID transactions.
[Go language](http://golang.org) bindings for [FoundationDB](https://www.foundationdb.org/documentation/), a distributed key-value store with ACID transactions.
This package requires:
- Go 1.1+ with CGO enabled
- FoundationDB C API 2.0.x, 3.0.x, or 4.x.y (part of the [FoundationDB clients package](https://files.foundationdb.org/fdb-c/))
- FoundationDB C API 2.0.x, 3.0.x, or 4.x.y (part of the [FoundationDB clients package](https://www.foundationdb.org/downloads/fdb-c/))
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-510.
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-520.
To build this package, in the top level of this repository run:
@ -27,5 +27,5 @@ of downloading from the remote repository.
Documentation
-------------
* [API documentation](https://foundationdb.org/documentation/godoc/fdb.html)
* [Tutorial](https://foundationdb.org/documentation/class-scheduling-go.html)
* [API documentation](https://www.foundationdb.org/documentation/godoc/fdb.html)
* [Tutorial](https://www.foundationdb.org/documentation/class-scheduling-go.html)

View File

@ -25,7 +25,7 @@ GOPATH := $(CURDIR)/bindings/go/build
GO_IMPORT_PATH := github.com/apple/foundationdb/bindings/go/src
GO_DEST := $(GOPATH)/src/$(GO_IMPORT_PATH)
.PHONY: fdb_go fdb_go_path fdb_go_tester fdb_go_tester_clean godoc godoc_clean
.PHONY: fdb_go fdb_go_path fdb_go_fmt fdb_go_fmt_check fdb_go_tester fdb_go_tester_clean godoc godoc_clean
# We only override if the environment didn't set it (this is used by
# the fdbwebsite documentation build process)
@ -49,12 +49,23 @@ GO_PACKAGE_OBJECTS := $(addprefix $(GO_PACKAGE_OUTDIR)/,$(GO_PACKAGES:=.a))
GO_SRC := $(shell find $(CURDIR)/bindings/go/src -name '*.go')
fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC)
fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC) fdb_go_fmt_check
fdb_go_path: $(GO_SRC)
fdb_go_fmt: $(GO_SRC)
@echo "Formatting fdb_go"
@gofmt -w $(GO_SRC)
fdb_go_fmt_check: $(GO_SRC)
@echo "Checking fdb_go"
@bash -c 'fmtoutstr=$$(gofmt -l $(GO_SRC)) ; if [[ -n "$${fmtoutstr}" ]] ; then echo "Detected go formatting violations for the following files:" ; echo "$${fmtoutstr}" ; echo "Try running: make fdb_go_fmt"; exit 1 ; fi'
$(GO_DEST)/.stamp: $(GO_SRC)
@echo "Creating fdb_go_path"
@mkdir -p $(GO_DEST)
@cp -r bindings/go/src/* $(GO_DEST)
@touch $(GO_DEST)/.stamp
fdb_go_path: $(GO_DEST)/.stamp
fdb_go_clean:
@echo "Cleaning fdb_go"
@ -66,31 +77,31 @@ fdb_go_tester_clean:
@echo "Cleaning fdb_go_tester"
@rm -rf $(GOPATH)/bin
$(GOPATH)/bin/_stacktester: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OBJECTS) $(GO_DEST)/fdb/generated.go
$(GOPATH)/bin/_stacktester: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OBJECTS) $(GO_DEST)/fdb/generated.go
@echo "Compiling $(basename $(notdir $@))"
@go install $(GO_IMPORT_PATH)/_stacktester
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_DEST)/fdb/generated.go
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_DEST)/fdb/generated.go
@echo "Compiling fdb/tuple"
@go install $(GO_IMPORT_PATH)/fdb/tuple
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_DEST)/fdb/generated.go
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_DEST)/fdb/generated.go
@echo "Compiling fdb/subspace"
@go install $(GO_IMPORT_PATH)/fdb/subspace
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: fdb_go_path $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a $(GO_DEST)/fdb/generated.go
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a $(GO_DEST)/fdb/generated.go
@echo "Compiling fdb/directory"
@go install $(GO_IMPORT_PATH)/fdb/directory
$(GO_PACKAGE_OUTDIR)/fdb.a: fdb_go_path $(GO_SRC) $(GO_DEST)/fdb/generated.go
$(GO_PACKAGE_OUTDIR)/fdb.a: $(GO_DEST)/.stamp lib/libfdb_c.$(DLEXT) $(GO_SRC) $(GO_DEST)/fdb/generated.go
@echo "Compiling fdb"
@go install $(GO_IMPORT_PATH)/fdb
$(GO_DEST)/fdb/generated.go: fdb_go_path lib/libfdb_c.$(DLEXT) bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
$(GO_DEST)/fdb/generated.go: $(GO_DEST)/.stamp bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
@echo "Building $@"
@go run bindings/go/src/_util/translate_fdb_options.go < fdbclient/vexillographer/fdb.options > $@
godoc: fdb_go_path $(GO_SRC)
godoc: fdb_go_path $(GO_SRC) $(GO_DEST)/fdb/generated.go
@echo "Generating Go Documentation"
@rm -rf $(GODOC_DIR)/godoc
@mkdir -p $(GODOC_DIR)/godoc
@ -106,6 +117,12 @@ godoc: fdb_go_path $(GO_SRC)
@(sed -i -e 's_a href="subspace/"_a href="fdb.subspace.html"_' $(GODOC_DIR)/godoc/fdb.html)
@(sed -i -e 's_a href="directory/"_a href="fdb.directory.html"_' $(GODOC_DIR)/godoc/fdb.html)
@(sed -i -e 's_a href="/pkg/builtin_a href="https://godoc.org/pkg/builtin_g;s_a href="/src/github.com/apple/foundationdb_a href="https://github.com/apple/foundationdb/tree/master_g;s_a href="/pkg/github.com/apple/foundationdb/bindings/go/src/fdb/_a href="./fdb.html_g' $(GODOC_DIR)/godoc/fdb.html)
@(sed -i -e 's_a href="/pkg/builtin_a href="https://godoc.org/pkg/builtin_g;s_a href="/src/github.com/apple/foundationdb_a href="https://github.com/apple/foundationdb/tree/master_g;s_a href="/pkg/github.com/apple/foundationdb/bindings/go/src/fdb/_a href="./fdb.html_g' $(GODOC_DIR)/godoc/fdb.directory.html)
@(sed -i -e 's_a href="/pkg/builtin_a href="https://godoc.org/pkg/builtin_g;s_a href="/src/github.com/apple/foundationdb_a href="https://github.com/apple/foundationdb/tree/master_g;s_a href="/pkg/github.com/apple/foundationdb/bindings/go/src/fdb/_a href="./fdb.html_g' $(GODOC_DIR)/godoc/fdb.subspace.html)
@(sed -i -e 's_a href="/pkg/builtin_a href="https://godoc.org/pkg/builtin_g;s_a href="/src/github.com/apple/foundationdb_a href="https://github.com/apple/foundationdb/tree/master_g;s_a href="/pkg/github.com/apple/foundationdb/bindings/go/src/fdb/_a href="./fdb.html_g' $(GODOC_DIR)/godoc/fdb.tuple.html)
godoc_clean:
@echo "Cleaning Go Documentation"
@rm -rf $(GODOC_DIR)/godoc

View File

@ -21,12 +21,12 @@
package main
import (
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"strings"
"bytes"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/directory"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"strings"
)
func (sm *StackMachine) popTuples(count int) []tuple.Tuple {
@ -93,7 +93,7 @@ func (sm *StackMachine) maybePath() []string {
return path
}
var createOps = map[string]bool {
var createOps = map[string]bool{
"CREATE_SUBSPACE": true,
"CREATE_LAYER": true,
"CREATE_OR_OPEN": true,
@ -142,7 +142,9 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
layer = l.([]byte)
}
d, e := de.cwd().CreateOrOpen(t, tupleToPath(tuples[0]), layer)
if e != nil { panic(e) }
if e != nil {
panic(e)
}
de.store(d)
case op == "CREATE":
tuples := sm.popTuples(1)
@ -159,7 +161,9 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
// p.([]byte) itself may be nil, but CreatePrefix handles that appropriately
d, e = de.cwd().CreatePrefix(t, tupleToPath(tuples[0]), layer, p.([]byte))
}
if e != nil { panic(e) }
if e != nil {
panic(e)
}
de.store(d)
case op == "OPEN":
tuples := sm.popTuples(1)
@ -169,7 +173,9 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
layer = l.([]byte)
}
d, e := de.cwd().Open(rt, tupleToPath(tuples[0]), layer)
if e != nil { panic(e) }
if e != nil {
panic(e)
}
de.store(d)
case op == "CHANGE":
i := sm.waitAndPop().item.(int64)
@ -182,12 +188,16 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
case op == "MOVE":
tuples := sm.popTuples(2)
d, e := de.cwd().Move(t, tupleToPath(tuples[0]), tupleToPath(tuples[1]))
if e != nil { panic(e) }
if e != nil {
panic(e)
}
de.store(d)
case op == "MOVE_TO":
tuples := sm.popTuples(1)
d, e := de.cwd().MoveTo(t, tupleToPath(tuples[0]))
if e != nil { panic(e) }
if e != nil {
panic(e)
}
de.store(d)
case strings.HasPrefix(op, "REMOVE"):
path := sm.maybePath()
@ -197,9 +207,11 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
// doesn't end up committing the version key. (Other languages have
// separate remove() and remove_if_exists() so don't have this tricky
// issue).
_, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
_, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
ok, e := de.cwd().Remove(tr, path)
if e != nil { panic(e) }
if e != nil {
panic(e)
}
switch op[6:] {
case "":
if !ok {
@ -209,16 +221,24 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
}
return nil, nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
case op == "LIST":
subs, e := de.cwd().List(rt, sm.maybePath())
if e != nil { panic(e) }
if e != nil {
panic(e)
}
t := make(tuple.Tuple, len(subs))
for i, s := range subs { t[i] = s }
for i, s := range subs {
t[i] = s
}
sm.store(idx, t.Pack())
case op == "EXISTS":
b, e := de.cwd().Exists(rt, sm.maybePath())
if e != nil { panic(e) }
if e != nil {
panic(e)
}
if b {
sm.store(idx, int64(1))
} else {
@ -229,8 +249,10 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
sm.store(idx, de.css().Pack(tuples[0]))
case op == "UNPACK_KEY":
t, e := de.css().Unpack(fdb.Key(sm.waitAndPop().item.([]byte)))
if e != nil { panic(e) }
for _, el := range(t) {
if e != nil {
panic(e)
}
for _, el := range t {
sm.store(idx, el)
}
case op == "RANGE":
@ -252,7 +274,7 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
k := sm.waitAndPop().item.([]byte)
k = append(k, tuple.Tuple{de.index}.Pack()...)
v := de.css().Bytes()
t.Transact(func (tr fdb.Transaction) (interface{}, error) {
t.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.Set(fdb.Key(k), v)
return nil, nil
})
@ -266,7 +288,9 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
k3 := ss.Pack(tuple.Tuple{"exists"})
var v3 []byte
exists, e := de.cwd().Exists(rt, nil)
if e != nil { panic(e) }
if e != nil {
panic(e)
}
if exists {
v3 = tuple.Tuple{1}.Pack()
} else {
@ -276,10 +300,12 @@ func (de *DirectoryExtension) processOp(sm *StackMachine, op string, isDB bool,
var subs []string
if exists {
subs, e = de.cwd().List(rt, nil)
if e != nil { panic(e) }
if e != nil {
panic(e)
}
}
v4 := tuplePackStrings(subs)
t.Transact(func (tr fdb.Transaction) (interface{}, error) {
t.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.Set(k1, v1)
tr.Set(k2, v2)
tr.Set(k3, v3)

View File

@ -24,23 +24,23 @@ import (
"bytes"
"encoding/binary"
"encoding/hex"
"fmt"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"log"
"fmt"
"os"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"runtime"
"reflect"
"time"
"strconv"
)
const verbose bool = false
var trMap = map[string]fdb.Transaction {}
var trMap = map[string]fdb.Transaction{}
var trMapLock = sync.RWMutex{}
// Make tuples sortable by byte-order
@ -99,7 +99,7 @@ func (sm *StackMachine) waitAndPop() (ret stackEntry) {
}
}()
ret, sm.stack = sm.stack[len(sm.stack) - 1], sm.stack[:len(sm.stack) - 1]
ret, sm.stack = sm.stack[len(sm.stack)-1], sm.stack[:len(sm.stack)-1]
switch el := ret.item.(type) {
case []byte:
ret.item = el
@ -150,9 +150,9 @@ func (sm *StackMachine) popPrefixRange() fdb.ExactRange {
}
func (sm *StackMachine) pushRange(idx int, sl []fdb.KeyValue, prefixFilter []byte) {
var t tuple.Tuple = make(tuple.Tuple, 0, len(sl) * 2)
var t tuple.Tuple = make(tuple.Tuple, 0, len(sl)*2)
for _, kv := range(sl) {
for _, kv := range sl {
if prefixFilter == nil || bytes.HasPrefix(kv.Key, prefixFilter) {
t = append(t, kv.Key)
t = append(t, kv.Value)
@ -240,7 +240,7 @@ func (sm *StackMachine) dumpStack() {
}
}
func (sm *StackMachine) executeMutation(t fdb.Transactor, f func (fdb.Transaction) (interface{}, error), isDB bool, idx int) {
func (sm *StackMachine) executeMutation(t fdb.Transactor, f func(fdb.Transaction) (interface{}, error), isDB bool, idx int) {
_, e := t.Transact(f)
if e != nil {
panic(e)
@ -250,8 +250,8 @@ func (sm *StackMachine) executeMutation(t fdb.Transactor, f func (fdb.Transactio
}
}
func (sm *StackMachine) checkWatches(watches [4]fdb.FutureNil, expected bool) (bool) {
for _, watch := range(watches) {
func (sm *StackMachine) checkWatches(watches [4]fdb.FutureNil, expected bool) bool {
for _, watch := range watches {
if watch.IsReady() || expected {
e := watch.Get()
if e != nil {
@ -283,7 +283,9 @@ func (sm *StackMachine) testWatches() {
tr.Set(fdb.Key("w3"), []byte("3"))
return nil, nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
var watches [4]fdb.FutureNil
@ -297,7 +299,9 @@ func (sm *StackMachine) testWatches() {
tr.Clear(fdb.Key("w1"))
return nil, nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
time.Sleep(5 * time.Second)
@ -312,7 +316,9 @@ func (sm *StackMachine) testWatches() {
tr.BitXor(fdb.Key("w3"), []byte("\xff\xff"))
return nil, nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
if sm.checkWatches(watches, true) {
return
@ -322,21 +328,23 @@ func (sm *StackMachine) testWatches() {
func (sm *StackMachine) testLocality() {
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.Options().SetTimeout(60*1000)
tr.Options().SetTimeout(60 * 1000)
tr.Options().SetReadSystemKeys()
boundaryKeys, e := db.LocalityGetBoundaryKeys(fdb.KeyRange{fdb.Key(""), fdb.Key("\xff\xff")}, 0, 0)
if e != nil { panic(e) }
if e != nil {
panic(e)
}
for i:=0; i<len(boundaryKeys)-1 ; i++ {
for i := 0; i < len(boundaryKeys)-1; i++ {
start := boundaryKeys[i]
end := tr.GetKey(fdb.LastLessThan(boundaryKeys[i+1])).MustGet()
startAddresses := tr.LocalityGetAddressesForKey(start).MustGet()
endAddresses := tr.LocalityGetAddressesForKey(end).MustGet()
for _, address1 := range(startAddresses) {
for _, address1 := range startAddresses {
found := false
for _, address2 := range(endAddresses) {
for _, address2 := range endAddresses {
if address1 == address2 {
found = true
break
@ -351,7 +359,9 @@ func (sm *StackMachine) testLocality() {
return nil, nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
}
func (sm *StackMachine) logStack(entries map[int]stackEntry, prefix []byte) {
@ -377,7 +387,9 @@ func (sm *StackMachine) logStack(entries map[int]stackEntry, prefix []byte) {
return nil, nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
return
}
@ -467,24 +479,24 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
case op == "PUSH":
sm.store(idx, inst[1])
case op == "DUP":
entry := sm.stack[len(sm.stack) - 1]
entry := sm.stack[len(sm.stack)-1]
sm.store(entry.idx, entry.item)
case op == "EMPTY_STACK":
sm.stack = []stackEntry{}
sm.stack = make([]stackEntry, 0)
case op == "SWAP":
idx := sm.waitAndPop().item.(int64)
sm.stack[len(sm.stack) - 1], sm.stack[len(sm.stack) - 1 - int(idx)] = sm.stack[len(sm.stack) - 1 - int(idx)], sm.stack[len(sm.stack) - 1]
sm.stack[len(sm.stack)-1], sm.stack[len(sm.stack)-1-int(idx)] = sm.stack[len(sm.stack)-1-int(idx)], sm.stack[len(sm.stack)-1]
case op == "POP":
sm.stack = sm.stack[:len(sm.stack) - 1]
sm.stack = sm.stack[:len(sm.stack)-1]
case op == "SUB":
sm.store(idx, sm.waitAndPop().item.(int64) - sm.waitAndPop().item.(int64))
sm.store(idx, sm.waitAndPop().item.(int64)-sm.waitAndPop().item.(int64))
case op == "CONCAT":
str1 := sm.waitAndPop().item
str2 := sm.waitAndPop().item
switch str1.(type) {
case string:
sm.store(idx, str1.(string) + str2.(string))
sm.store(idx, str1.(string)+str2.(string))
case []byte:
sm.store(idx, append(str1.([]byte), str2.([]byte)...))
default:
@ -497,16 +509,18 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
case op == "ON_ERROR":
sm.store(idx, sm.currentTransaction().OnError(fdb.Error{int(sm.waitAndPop().item.(int64))}))
case op == "GET_READ_VERSION":
_, e = rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
_, e = rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
sm.lastVersion = rtr.GetReadVersion().MustGet()
sm.store(idx, []byte("GOT_READ_VERSION"))
return nil, nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
case op == "SET":
key := fdb.Key(sm.waitAndPop().item.([]byte))
value := sm.waitAndPop().item.([]byte)
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
tr.Set(key, value)
return nil, nil
}, isDB, idx)
@ -525,10 +539,12 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
sm.logStack(entries, prefix)
case op == "GET":
key := fdb.Key(sm.waitAndPop().item.([]byte))
res, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
res, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
return rtr.Get(key), nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
sm.store(idx, res.(fdb.FutureByteSlice))
case op == "COMMIT":
@ -537,7 +553,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
sm.currentTransaction().Reset()
case op == "CLEAR":
key := fdb.Key(sm.waitAndPop().item.([]byte))
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
tr.Clear(key)
return nil, nil
}, isDB, idx)
@ -557,10 +573,12 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
case op == "GET_KEY":
sel := sm.popSelector()
prefix := sm.waitAndPop().item.([]byte)
res, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
res, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
return rtr.GetKey(sel).MustGet(), nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
key := res.(fdb.Key)
@ -570,7 +588,9 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
sm.store(idx, prefix)
} else {
s, e := fdb.Strinc(prefix)
if e != nil { panic(e) }
if e != nil {
panic(e)
}
sm.store(idx, s)
}
case strings.HasPrefix(op, "GET_RANGE"):
@ -591,10 +611,12 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
prefix = sm.waitAndPop().item.([]byte)
}
res, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
res, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
return rtr.GetRange(r, ro).GetSliceOrPanic(), nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
sm.pushRange(idx, res.([]fdb.KeyValue), prefix)
case strings.HasPrefix(op, "CLEAR_RANGE"):
@ -607,7 +629,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
er = sm.popKeyRange()
}
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
tr.ClearRange(er)
return nil, nil
}, isDB, idx)
@ -623,7 +645,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
if e != nil {
panic(e)
}
for _, el := range(t) {
for _, el := range t {
sm.store(idx, []byte(tuple.Tuple{el}.Pack()))
}
case op == "TUPLE_SORT":
@ -681,7 +703,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
if e != nil {
panic(e)
}
db.Transact(func (tr fdb.Transaction) (interface{}, error) {
db.Transact(func(tr fdb.Transaction) (interface{}, error) {
v := tr.GetRange(er, fdb.RangeOptions{}).GetSliceOrPanic()
if len(v) != 0 {
panic(fdb.Error{1020})
@ -718,7 +740,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
key := fdb.Key(sm.waitAndPop().item.([]byte))
ival := sm.waitAndPop().item
value := ival.([]byte)
sm.executeMutation(t, func (tr fdb.Transaction) (interface{}, error) {
sm.executeMutation(t, func(tr fdb.Transaction) (interface{}, error) {
reflect.ValueOf(tr).MethodByName(opname).Call([]reflect.Value{reflect.ValueOf(key), reflect.ValueOf(value)})
return nil, nil
}, isDB, idx)
@ -730,6 +752,33 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
db.Options().SetLocationCacheSize(100001)
db.Options().SetMaxWatches(10001)
if !fdb.IsAPIVersionSelected() {
log.Fatal("API version should be selected")
}
apiVersion := fdb.MustGetAPIVersion()
if apiVersion == 0 {
log.Fatal("API version is 0")
}
e1 := fdb.APIVersion(apiVersion + 1)
if e1 != nil {
fdbE := e1.(fdb.Error)
if fdbE.Code != 2201 {
panic(e1)
}
} else {
log.Fatal("Was not stopped from selecting two API versions")
}
e2 := fdb.APIVersion(apiVersion - 1)
if e2 != nil {
fdbE := e2.(fdb.Error)
if fdbE.Code != 2201 {
panic(e2)
}
} else {
log.Fatal("Was not stopped from selecting two API versions")
}
fdb.MustAPIVersion(apiVersion)
_, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.Options().SetPrioritySystemImmediate()
tr.Options().SetPriorityBatch()
@ -740,7 +789,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
tr.Options().SetReadSystemKeys()
tr.Options().SetAccessSystemKeys()
tr.Options().SetDurabilityDevNullIsWebScale()
tr.Options().SetTimeout(60*1000)
tr.Options().SetTimeout(60 * 1000)
tr.Options().SetRetryLimit(50)
tr.Options().SetMaxRetryDelay(100)
tr.Options().SetUsedDuringCommitProtectionDisable()
@ -751,7 +800,9 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
return tr.Get(fdb.Key("\xff")).MustGet(), nil
})
if e != nil { panic(e) }
if e != nil {
panic(e)
}
sm.testWatches()
sm.testLocality()
@ -772,7 +823,7 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
}
func (sm *StackMachine) Run() {
r, e := db.Transact(func (tr fdb.Transaction) (interface{}, error) {
r, e := db.Transact(func(tr fdb.Transaction) (interface{}, error) {
return tr.GetRange(tuple.Tuple{sm.prefix}, fdb.RangeOptions{}).GetSliceOrPanic(), nil
})
if e != nil {
@ -781,7 +832,7 @@ func (sm *StackMachine) Run() {
instructions := r.([]fdb.KeyValue)
for i, kv := range(instructions) {
for i, kv := range instructions {
inst, _ := tuple.Unpack(fdb.Key(kv.Value))
if sm.verbose {
@ -811,10 +862,17 @@ func main() {
log.Fatal(e)
}
if fdb.IsAPIVersionSelected() {
log.Fatal("API version already selected")
}
e = fdb.APIVersion(apiVersion)
if e != nil {
log.Fatal(e)
}
if fdb.MustGetAPIVersion() != apiVersion {
log.Fatal("API version not equal to value selected")
}
db, e = fdb.Open(clusterFile, []byte("DB"))
if e != nil {

View File

@ -24,14 +24,14 @@ package main
import (
"encoding/xml"
"io/ioutil"
"fmt"
"go/doc"
"io/ioutil"
"log"
"strings"
"os"
"strings"
"unicode"
"unicode/utf8"
"go/doc"
)
type Option struct {
@ -114,7 +114,7 @@ func translateName(old string) string {
return strings.Replace(strings.Title(strings.Replace(old, "_", " ", -1)), " ", "", -1)
}
func lowerFirst (s string) string {
func lowerFirst(s string) string {
if s == "" {
return ""
}
@ -139,7 +139,7 @@ func writeEnum(scope Scope, opt Option, delta int) {
doc.ToText(os.Stdout, opt.Description, " // ", "", 73)
// fmt.Printf(" // %s\n", opt.Description)
}
fmt.Printf(" %s %s = %d\n", scope.Name + translateName(opt.Name), scope.Name, opt.Code + delta)
fmt.Printf(" %s %s = %d\n", scope.Name+translateName(opt.Name), scope.Name, opt.Code+delta)
}
func main() {
@ -182,11 +182,11 @@ func int64ToBytes(i int64) ([]byte, error) {
}
`)
for _, scope := range(v.Scope) {
for _, scope := range v.Scope {
if strings.HasSuffix(scope.Name, "Option") {
receiver := scope.Name + "s"
for _, opt := range(scope.Option) {
for _, opt := range scope.Option {
if opt.Description != "Deprecated" && !opt.Hidden { // Eww
writeOpt(receiver, opt)
}
@ -195,7 +195,7 @@ func int64ToBytes(i int64) ([]byte, error) {
}
if scope.Name == "MutationType" {
for _, opt := range(scope.Option) {
for _, opt := range scope.Option {
if opt.Description != "Deprecated" && !opt.Hidden { // Eww
writeMutation(opt)
}
@ -218,7 +218,7 @@ func int64ToBytes(i int64) ([]byte, error) {
type %s int
const (
`, scope.Name)
for _, opt := range(scope.Option) {
for _, opt := range scope.Option {
if !opt.Hidden {
writeEnum(scope, opt, d)
}

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include <foundationdb/fdb_c.h>
*/
import "C"

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include <foundationdb/fdb_c.h>
*/
import "C"

View File

@ -23,10 +23,10 @@
package directory
import (
"bytes"
"encoding/binary"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"encoding/binary"
"bytes"
"math/rand"
"sync"
)
@ -53,14 +53,18 @@ func windowSize(start int64) int64 {
// can't be too small. So start small and scale up. We don't want this to
// ever get *too* big because we have to store about window_size/2 recent
// items.
if start < 255 { return 64 }
if start < 65535 { return 1024 }
if start < 255 {
return 64
}
if start < 65535 {
return 1024
}
return 8192
}
func (hca highContentionAllocator) allocate(tr fdb.Transaction, s subspace.Subspace) (subspace.Subspace, error) {
for {
rr := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit:1, Reverse:true})
rr := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit: 1, Reverse: true})
kvs := rr.GetSliceOrPanic()
var start int64
@ -106,7 +110,7 @@ func (hca highContentionAllocator) allocate(tr fdb.Transaction, s subspace.Subsp
}
window = windowSize(start)
if count * 2 < window {
if count*2 < window {
break
}
@ -124,7 +128,7 @@ func (hca highContentionAllocator) allocate(tr fdb.Transaction, s subspace.Subsp
allocatorMutex.Lock()
latestCounter := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit:1, Reverse:true})
latestCounter := tr.Snapshot().GetRange(hca.counters, fdb.RangeOptions{Limit: 1, Reverse: true})
candidateValue := tr.Get(key)
tr.Options().SetNextWriteNoWriteConflictRange()
tr.Set(key, []byte(""))

View File

@ -26,7 +26,7 @@
//
// For general guidance on directory usage, see the Directories section of the
// Developer Guide
// (https://foundationdb.org/documentation/developer-guide.html#developer-guide-directories).
// (https://www.foundationdb.org/documentation/developer-guide.html#developer-guide-directories).
//
// Directories are identified by hierarchical paths analogous to the paths in a
// Unix-like file system. A path is represented as a slice of strings. Each
@ -40,9 +40,9 @@
package directory
import (
"errors"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"errors"
)
const (

View File

@ -23,13 +23,13 @@
package directory
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"encoding/binary"
"bytes"
"fmt"
"errors"
)
type directoryLayer struct {
@ -130,13 +130,17 @@ func (dl directoryLayer) createOrOpen(rtr fdb.ReadTransaction, tr *fdb.Transacti
prefix = newss.Bytes()
pf, e := dl.isPrefixFree(rtr.Snapshot(), prefix)
if e != nil { return nil, e }
if e != nil {
return nil, e
}
if !pf {
return nil, errors.New("the directory layer has manually allocated prefixes that conflict with the automatic prefix allocator")
}
} else {
pf, e := dl.isPrefixFree(rtr, prefix)
if e != nil { return nil, e }
if e != nil {
return nil, e
}
if !pf {
return nil, errors.New("the given prefix is already in use")
}
@ -171,7 +175,7 @@ func (dl directoryLayer) createOrOpen(rtr fdb.ReadTransaction, tr *fdb.Transacti
}
func (dl directoryLayer) CreateOrOpen(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
return dl.createOrOpen(tr, &tr, path, layer, nil, true, true)
})
if e != nil {
@ -181,7 +185,7 @@ func (dl directoryLayer) CreateOrOpen(t fdb.Transactor, path []string, layer []b
}
func (dl directoryLayer) Create(t fdb.Transactor, path []string, layer []byte) (DirectorySubspace, error) {
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
return dl.createOrOpen(tr, &tr, path, layer, nil, true, false)
})
if e != nil {
@ -194,7 +198,7 @@ func (dl directoryLayer) CreatePrefix(t fdb.Transactor, path []string, layer []b
if prefix == nil {
prefix = []byte{}
}
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
return dl.createOrOpen(tr, &tr, path, layer, prefix, true, false)
})
if e != nil {
@ -204,7 +208,7 @@ func (dl directoryLayer) CreatePrefix(t fdb.Transactor, path []string, layer []b
}
func (dl directoryLayer) Open(rt fdb.ReadTransactor, path []string, layer []byte) (DirectorySubspace, error) {
r, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
r, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
return dl.createOrOpen(rtr, nil, path, layer, nil, false, true)
})
if e != nil {
@ -214,7 +218,7 @@ func (dl directoryLayer) Open(rt fdb.ReadTransactor, path []string, layer []byte
}
func (dl directoryLayer) Exists(rt fdb.ReadTransactor, path []string) (bool, error) {
r, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
r, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
if e := dl.checkVersion(rtr, nil); e != nil {
return false, e
}
@ -241,7 +245,7 @@ func (dl directoryLayer) Exists(rt fdb.ReadTransactor, path []string) (bool, err
}
func (dl directoryLayer) List(rt fdb.ReadTransactor, path []string) ([]string, error) {
r, e := rt.ReadTransact(func (rtr fdb.ReadTransaction) (interface{}, error) {
r, e := rt.ReadTransact(func(rtr fdb.ReadTransaction) (interface{}, error) {
if e := dl.checkVersion(rtr, nil); e != nil {
return nil, e
}
@ -272,7 +276,7 @@ func (dl directoryLayer) MoveTo(t fdb.Transactor, newAbsolutePath []string) (Dir
}
func (dl directoryLayer) Move(t fdb.Transactor, oldPath []string, newPath []string) (DirectorySubspace, error) {
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
if e := dl.checkVersion(tr, &tr); e != nil {
return nil, e
}
@ -330,7 +334,7 @@ func (dl directoryLayer) Move(t fdb.Transactor, oldPath []string, newPath []stri
}
func (dl directoryLayer) Remove(t fdb.Transactor, path []string) (bool, error) {
r, e := t.Transact(func (tr fdb.Transaction) (interface{}, error) {
r, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
if e := dl.checkVersion(tr, &tr); e != nil {
return false, e
}
@ -375,9 +379,13 @@ func (dl directoryLayer) removeRecursive(tr fdb.Transaction, node subspace.Subsp
}
p, e := dl.nodeSS.Unpack(node)
if e != nil { return e }
if e != nil {
return e
}
kr, e := fdb.PrefixRange(p[0].([]byte))
if e != nil { return e }
if e != nil {
return e
}
tr.ClearRange(kr)
tr.ClearRange(node)
@ -445,7 +453,7 @@ func (dl directoryLayer) nodeContainingKey(rtr fdb.ReadTransaction, key []byte)
bk, _ := dl.nodeSS.FDBRangeKeys()
kr := fdb.KeyRange{bk, fdb.Key(append(dl.nodeSS.Pack(tuple.Tuple{key}), 0x00))}
kvs := rtr.GetRange(kr, fdb.RangeOptions{Reverse:true, Limit:1}).GetSliceOrPanic()
kvs := rtr.GetRange(kr, fdb.RangeOptions{Reverse: true, Limit: 1}).GetSliceOrPanic()
if len(kvs) == 1 {
pp, e := dl.nodeSS.Unpack(kvs[0].Key)
if e != nil {
@ -540,7 +548,7 @@ func (dl directoryLayer) contentsOfNode(node subspace.Subspace, path []string, l
}
prefix := p[0]
newPath := make([]string, len(dl.path) + len(path))
newPath := make([]string, len(dl.path)+len(path))
copy(newPath, dl.path)
copy(newPath[len(dl.path):], path)
@ -548,7 +556,7 @@ func (dl directoryLayer) contentsOfNode(node subspace.Subspace, path []string, l
ss := subspace.FromBytes(pb)
if bytes.Compare(layer, []byte("partition")) == 0 {
nssb := make([]byte, len(pb) + 1)
nssb := make([]byte, len(pb)+1)
copy(nssb, pb)
nssb[len(pb)] = 0xFE
ndl := NewDirectoryLayer(subspace.FromBytes(nssb), ss, false).(directoryLayer)
@ -560,7 +568,9 @@ func (dl directoryLayer) contentsOfNode(node subspace.Subspace, path []string, l
}
func (dl directoryLayer) nodeWithPrefix(prefix []byte) subspace.Subspace {
if prefix == nil { return nil }
if prefix == nil {
return nil
}
return dl.nodeSS.Sub(prefix)
}
@ -576,9 +586,9 @@ func (dl directoryLayer) find(rtr fdb.ReadTransaction, path []string) *node {
}
func (dl directoryLayer) partitionSubpath(lpath, rpath []string) []string {
r := make([]string, len(lpath) - len(dl.path) + len(rpath))
r := make([]string, len(lpath)-len(dl.path)+len(rpath))
copy(r, lpath[len(dl.path):])
copy(r[len(lpath) - len(dl.path):], rpath)
copy(r[len(lpath)-len(dl.path):], rpath)
return r
}

View File

@ -23,9 +23,9 @@
package directory
import (
"bytes"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/subspace"
"bytes"
)
type node struct {

View File

@ -25,12 +25,12 @@ Package fdb provides an interface to FoundationDB databases (version 2.0 or high
To build and run programs using this package, you must have an installed copy of
the FoundationDB client libraries (version 2.0.0 or later), available for Linux,
Windows and OS X at https://files.foundationdb.org/fdb-c/.
Windows and OS X at https://www.foundationdb.org/downloads/fdb-c/.
This documentation specifically applies to the FoundationDB Go binding. For more
extensive guidance to programming with FoundationDB, as well as API
documentation for the other FoundationDB interfaces, please see
https://foundationdb.org/documentation/index.html.
https://www.foundationdb.org/documentation/index.html.
Basic Usage
@ -198,7 +198,7 @@ operations perform different transformations. Like other database operations, an
atomic operation is used within a transaction.
For more information on atomic operations in FoundationDB, please see
https://foundationdb.org/documentation/developer-guide.html#atomic-operations. The
https://www.foundationdb.org/documentation/developer-guide.html#atomic-operations. The
operands to atomic operations in this API must be provided as appropriately
encoded byte slices. To convert a Go type to a byte slice, see the binary
package.

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include <foundationdb/fdb_c.h>
*/
import "C"
@ -37,7 +37,7 @@ import (
// as a panic from any FoundationDB API function whose name ends with OrPanic.
//
// You may compare the Code field of an Error against the list of FoundationDB
// error codes at https://foundationdb.org/documentation/api-error-codes.html,
// error codes at https://www.foundationdb.org/documentation/api-error-codes.html,
// but generally an Error should be passed to (Transaction).OnError. When using
// (Database).Transact, non-fatal errors will be retried automatically.
type Error struct {

View File

@ -23,18 +23,18 @@
package fdb
/*
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include <foundationdb/fdb_c.h>
#include <stdlib.h>
*/
import "C"
import (
"fmt"
"log"
"runtime"
"sync"
"unsafe"
"fmt"
"log"
)
/* Would put this in futures.go but for the documented issue with
@ -53,7 +53,7 @@ type Transactor interface {
// Transact executes the caller-provided function, providing it with a
// Transaction (itself a Transactor, allowing composition of transactional
// functions).
Transact(func (Transaction) (interface{}, error)) (interface{}, error)
Transact(func(Transaction) (interface{}, error)) (interface{}, error)
// All Transactors are also ReadTransactors, allowing them to be used with
// read-only transactional functions.
@ -68,7 +68,7 @@ type ReadTransactor interface {
// ReadTransact executes the caller-provided function, providing it with a
// ReadTransaction (itself a ReadTransactor, allowing composition of
// read-only transactional functions).
ReadTransact(func (ReadTransaction) (interface{}, error)) (interface{}, error)
ReadTransact(func(ReadTransaction) (interface{}, error)) (interface{}, error)
}
func setOpt(setter func(*C.uint8_t, C.int) C.fdb_error_t, param []byte) error {
@ -109,7 +109,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// library, an error will be returned. APIVersion must be called prior to any
// other functions in the fdb package.
//
// Currently, this package supports API versions 200 through 510.
// Currently, this package supports API versions 200 through 520.
//
// Warning: When using the multi-version client API, setting an API version that
// is not supported by a particular client library will prevent that client from
@ -117,7 +117,7 @@ func (opt NetworkOptions) setOpt(code int, param []byte) error {
// the API version of your application after upgrading your client until the
// cluster has also been upgraded.
func APIVersion(version int) error {
headerVersion := 510
headerVersion := 520
networkMutex.Lock()
defer networkMutex.Unlock()
@ -129,7 +129,7 @@ func APIVersion(version int) error {
return errAPIVersionAlreadySet
}
if version < 200 || version > 510 {
if version < 200 || version > 520 {
return errAPIVersionNotSupported
}
@ -152,6 +152,25 @@ func APIVersion(version int) error {
return nil
}
// Determines if an API version has already been selected, i.e., if
// APIVersion or MustAPIVersion have already been called.
func IsAPIVersionSelected() bool {
return apiVersion != 0
}
// Returns the API version that has been selected through APIVersion
// or MustAPIVersion. If the version has already been selected, then
// the first value returned is the API version and the error is
// nil. If the API version has not yet been set, then the error
// will be non-nil.
func GetAPIVersion() (int, error) {
if IsAPIVersionSelected() {
return apiVersion, nil
} else {
return 0, errAPIVersionUnset
}
}
// MustAPIVersion is like APIVersion but panics if the API version is not
// supported.
func MustAPIVersion(version int) {
@ -161,6 +180,16 @@ func MustAPIVersion(version int) {
}
}
// MustGetAPIVersion is like GetAPIVersion but panics if the API version
// has not yet been set.
func MustGetAPIVersion() int {
apiVersion, err := GetAPIVersion()
if err != nil {
panic(err)
}
return apiVersion
}
var apiVersion int
var networkStarted bool
var networkMutex sync.Mutex

View File

@ -23,8 +23,8 @@
package fdb_test
import (
"github.com/apple/foundationdb/bindings/go/src/fdb"
"fmt"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"testing"
)
@ -52,7 +52,7 @@ func ExampleVersionstamp(t *testing.T) {
fdb.MustAPIVersion(400)
db := fdb.MustOpenDefault()
setVs := func(t fdb.Transactor, key fdb.Key ) (fdb.FutureKey, error) {
setVs := func(t fdb.Transactor, key fdb.Key) (fdb.FutureKey, error) {
fmt.Printf("setOne called with: %T\n", t)
ret, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
tr.SetVersionstampedValue(key, []byte("blahblahbl"))
@ -100,7 +100,7 @@ func ExampleTransactor() {
setMany := func(t fdb.Transactor, value []byte, keys ...fdb.Key) error {
fmt.Printf("setMany called with: %T\n", t)
_, e := t.Transact(func(tr fdb.Transaction) (interface{}, error) {
for _, key := range(keys) {
for _, key := range keys {
setOne(tr, key, value)
}
return nil, nil

View File

@ -24,7 +24,7 @@ package fdb
/*
#cgo LDFLAGS: -lfdb_c -lm
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include <foundationdb/fdb_c.h>
#include <string.h>
@ -41,9 +41,9 @@ package fdb
import "C"
import (
"unsafe"
"sync"
"runtime"
"sync"
"unsafe"
)
// A Future represents a value (or error) to be available at some later
@ -253,7 +253,7 @@ type futureKeyValueArray struct {
}
func stringRefToSlice(ptr unsafe.Pointer) []byte {
size := *((*C.int)(unsafe.Pointer(uintptr(ptr)+8)))
size := *((*C.int)(unsafe.Pointer(uintptr(ptr) + 8)))
if size == 0 {
return []byte{}
@ -278,7 +278,7 @@ func (f futureKeyValueArray) Get() ([]KeyValue, bool, error) {
ret := make([]KeyValue, int(count))
for i := 0; i < int(count); i++ {
kvptr := unsafe.Pointer(uintptr(unsafe.Pointer(kvs)) + uintptr(i * 24))
kvptr := unsafe.Pointer(uintptr(unsafe.Pointer(kvs)) + uintptr(i*24))
ret[i].Key = stringRefToSlice(kvptr)
ret[i].Value = stringRefToSlice(unsafe.Pointer(uintptr(kvptr) + 12))
@ -361,7 +361,7 @@ func (f futureStringSlice) Get() ([]string, error) {
ret := make([]string, int(count))
for i := 0; i < int(count); i++ {
ret[i] = C.GoString((*C.char)(*(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(strings))+uintptr(i*8)))))
ret[i] = C.GoString((*C.char)(*(**C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(strings)) + uintptr(i*8)))))
}
return ret, nil

View File

@ -34,7 +34,7 @@ type Selectable interface {
//
// The most common key selectors are constructed with the functions documented
// below. For details of how KeySelectors are specified and resolved, see
// https://foundationdb.org/documentation/developer-guide.html#key-selectors.
// https://www.foundationdb.org/documentation/developer-guide.html#key-selectors.
type KeySelector struct {
Key KeyConvertible
OrEqual bool

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include <foundationdb/fdb_c.h>
*/
import "C"

View File

@ -28,7 +28,7 @@ package fdb
// transaction conflicts but making it harder to reason about concurrency.
//
// For more information on snapshot reads, see
// https://foundationdb.org/documentation/developer-guide.html#snapshot-reads.
// https://www.foundationdb.org/documentation/developer-guide.html#snapshot-reads.
type Snapshot struct {
*transaction
}
@ -46,7 +46,7 @@ type Snapshot struct {
//
// See the ReadTransactor interface for an example of using ReadTransact with
// Transaction, Snapshot and Database objects.
func (s Snapshot) ReadTransact(f func (ReadTransaction) (interface{}, error)) (r interface{}, e error) {
func (s Snapshot) ReadTransact(f func(ReadTransaction) (interface{}, error)) (r interface{}, e error) {
defer panicToError(&e)
r, e = f(s)

View File

@ -29,14 +29,14 @@
// As a best practice, API clients should use at least one subspace for
// application data. For general guidance on subspace usage, see the Subspaces
// section of the Developer Guide
// (https://foundationdb.org/documentation/developer-guide.html#developer-guide-sub-keyspaces).
// (https://www.foundationdb.org/documentation/developer-guide.html#developer-guide-sub-keyspaces).
package subspace
import (
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"bytes"
"errors"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
)
// Subspace represents a well-defined region of keyspace in a FoundationDB
@ -134,7 +134,7 @@ func (s subspace) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
}
func concat(a []byte, b ...byte) []byte {
r := make([]byte, len(a) + len(b))
r := make([]byte, len(a)+len(b))
copy(r, a)
copy(r[len(a):], b)
return r

View File

@ -23,7 +23,7 @@
package fdb
/*
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include <foundationdb/fdb_c.h>
*/
import "C"
@ -110,7 +110,7 @@ func (t Transaction) GetDatabase() Database {
//
// See the Transactor interface for an example of using Transact with
// Transaction and Database objects.
func (t Transaction) Transact(f func (Transaction) (interface{}, error)) (r interface{}, e error) {
func (t Transaction) Transact(f func(Transaction) (interface{}, error)) (r interface{}, e error) {
defer panicToError(&e)
r, e = f(t)
@ -171,7 +171,7 @@ func (t Transaction) SetReadVersion(version int64) {
// but making it harder to reason about concurrency.
//
// For more information on snapshot reads, see
// https://foundationdb.org/documentation/developer-guide.html#using-snapshot-reads.
// https://www.foundationdb.org/documentation/developer-guide.html#using-snapshot-reads.
func (t Transaction) Snapshot() Snapshot {
return Snapshot{t.transaction}
}
@ -196,7 +196,7 @@ func (t Transaction) OnError(e Error) FutureNil {
// As with other client/server databases, in some failure scenarios a client may
// be unable to determine whether a transaction succeeded. For more information,
// see
// https://foundationdb.org/documentation/developer-guide.html#developer-guide-unknown-results.
// https://www.foundationdb.org/documentation/developer-guide.html#developer-guide-unknown-results.
func (t Transaction) Commit() FutureNil {
return &futureNil{newFuture(C.fdb_transaction_commit(t.ptr))}
}
@ -396,13 +396,13 @@ func addConflictRange(t *transaction, er ExactRange, crtype conflictRangeType) e
// conflict.
//
// For more information on conflict ranges, see
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
// https://www.foundationdb.org/documentation/developer-guide.html#conflict-ranges.
func (t Transaction) AddReadConflictRange(er ExactRange) error {
return addConflictRange(t.transaction, er, conflictRangeTypeRead)
}
func copyAndAppend(orig []byte, b byte) []byte {
ret := make([]byte, len(orig) + 1)
ret := make([]byte, len(orig)+1)
copy(ret, orig)
ret[len(orig)] = b
return ret
@ -413,7 +413,7 @@ func copyAndAppend(orig []byte, b byte) []byte {
// this key could cause the transaction to fail with a conflict.
//
// For more information on conflict ranges, see
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
// https://www.foundationdb.org/documentation/developer-guide.html#conflict-ranges.
func (t Transaction) AddReadConflictKey(key KeyConvertible) error {
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeRead)
}
@ -424,7 +424,7 @@ func (t Transaction) AddReadConflictKey(key KeyConvertible) error {
// conflict.
//
// For more information on conflict ranges, see
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
// https://www.foundationdb.org/documentation/developer-guide.html#conflict-ranges.
func (t Transaction) AddWriteConflictRange(er ExactRange) error {
return addConflictRange(t.transaction, er, conflictRangeTypeWrite)
}
@ -434,7 +434,7 @@ func (t Transaction) AddWriteConflictRange(er ExactRange) error {
// read this key could fail with a conflict.
//
// For more information on conflict ranges, see
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
// https://www.foundationdb.org/documentation/developer-guide.html#conflict-ranges.
func (t Transaction) AddWriteConflictKey(key KeyConvertible) error {
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeWrite)
}

View File

@ -27,7 +27,7 @@
// of higher-level data models.
//
// For general guidance on tuple usage, see the Tuple section of Data Modeling
// (https://foundationdb.org/documentation/data-modeling.html#data-modeling-tuples).
// (https://www.foundationdb.org/documentation/data-modeling.html#data-modeling-tuples).
//
// FoundationDB tuples can currently encode byte and unicode strings, integers
// and NULL values. In Go these are represented as []byte, string, int64 and
@ -35,9 +35,9 @@
package tuple
import (
"fmt"
"encoding/binary"
"bytes"
"encoding/binary"
"fmt"
"github.com/apple/foundationdb/bindings/go/src/fdb"
)
@ -82,19 +82,19 @@ const trueCode = 0x27
const uuidCode = 0x30
var sizeLimits = []uint64{
1 << (0 * 8) - 1,
1 << (1 * 8) - 1,
1 << (2 * 8) - 1,
1 << (3 * 8) - 1,
1 << (4 * 8) - 1,
1 << (5 * 8) - 1,
1 << (6 * 8) - 1,
1 << (7 * 8) - 1,
1 << (8 * 8) - 1,
1<<(0*8) - 1,
1<<(1*8) - 1,
1<<(2*8) - 1,
1<<(3*8) - 1,
1<<(4*8) - 1,
1<<(5*8) - 1,
1<<(6*8) - 1,
1<<(7*8) - 1,
1<<(8*8) - 1,
}
func adjustFloatBytes(b []byte, encode bool) {
if (encode && b[0] & 0x80 != 0x00) || (!encode && b[0] & 0x80 == 0x00) {
if (encode && b[0]&0x80 != 0x00) || (!encode && b[0]&0x80 == 0x00) {
// Negative numbers: flip all of the bytes.
for i := 0; i < len(b); i++ {
b[i] = b[i] ^ 0xff
@ -131,11 +131,11 @@ func encodeInt(buf *bytes.Buffer, i int64) {
switch {
case i > 0:
n = bisectLeft(uint64(i))
buf.WriteByte(byte(intZeroCode+n))
buf.WriteByte(byte(intZeroCode + n))
binary.Write(&ibuf, binary.BigEndian, i)
case i < 0:
n = bisectLeft(uint64(-i))
buf.WriteByte(byte(0x14-n))
buf.WriteByte(byte(0x14 - n))
binary.Write(&ibuf, binary.BigEndian, int64(sizeLimits[n])+i)
}
@ -170,7 +170,7 @@ func encodeTuple(buf *bytes.Buffer, t Tuple, nested bool) {
buf.WriteByte(nestedCode)
}
for i, e := range(t) {
for i, e := range t {
switch e := e.(type) {
case Tuple:
encodeTuple(buf, e, true)
@ -232,7 +232,7 @@ func findTerminator(b []byte) int {
for {
idx := bytes.IndexByte(bp, 0x00)
length += idx
if idx + 1 == len(bp) || bp[idx+1] != 0xFF {
if idx+1 == len(bp) || bp[idx+1] != 0xFF {
break
}
length += 2
@ -276,7 +276,7 @@ func decodeInt(b []byte) (int64, int) {
ret -= int64(sizeLimits[n])
}
return ret, n+1
return ret, n + 1
}
func decodeFloat(b []byte) (float32, int) {
@ -317,11 +317,11 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
if !nested {
el = nil
off = 1
} else if i + 1 < len(b) && b[i+1] == 0xff {
} else if i+1 < len(b) && b[i+1] == 0xff {
el = nil
off = 2
} else {
return t, i+1, nil
return t, i + 1, nil
}
case b[i] == bytesCode:
el, off = decodeBytes(b[i:])
@ -330,12 +330,12 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
case negIntStart <= b[i] && b[i] <= posIntEnd:
el, off = decodeInt(b[i:])
case b[i] == floatCode:
if i + 5 > len(b) {
if i+5 > len(b) {
return nil, i, fmt.Errorf("insufficient bytes to decode float starting at position %d of byte array for tuple", i)
}
el, off = decodeFloat(b[i:])
case b[i] == doubleCode:
if i + 9 > len(b) {
if i+9 > len(b) {
return nil, i, fmt.Errorf("insufficient bytes to decode double starting at position %d of byte array for tuple", i)
}
el, off = decodeDouble(b[i:])
@ -346,7 +346,7 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
el = false
off = 1
case b[i] == uuidCode:
if i + 17 > len(b) {
if i+17 > len(b) {
return nil, i, fmt.Errorf("insufficient bytes to decode UUID starting at position %d of byte array for tuple", i)
}
el, off = decodeUUID(b[i:])
@ -401,7 +401,7 @@ func (t Tuple) FDBRangeKeySelectors() (fdb.Selectable, fdb.Selectable) {
}
func concat(a []byte, b ...byte) []byte {
r := make([]byte, len(a) + len(b))
r := make([]byte, len(a)+len(b))
copy(r, a)
copy(r[len(a):], b)
return r

View File

@ -21,7 +21,7 @@
#include <jni.h>
#include <string.h>
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#include <foundationdb/fdb_c.h>

View File

@ -38,13 +38,14 @@ else
endif
ifeq ($(PLATFORM),linux)
fdb_java_CFLAGS += -I/usr/lib/jvm/java-8-openjdk-amd64/include -I/usr/lib/jvm/java-8-openjdk-amd64/include/linux
JAVA_HOME ?= /usr/lib/jvm/java-8-openjdk-amd64
fdb_java_CFLAGS += -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/linux
fdb_java_LDFLAGS += -static-libgcc
java_ARCH := amd64
else ifeq ($(PLATFORM),osx)
# FIXME: Surely there is a better way to grab the JNI headers on any version of macOS.
fdb_java_CFLAGS += -I/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers
JAVA_HOME ?= $(shell /usr/libexec/java_home)
fdb_java_CFLAGS += -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/darwin
java_ARCH := x86_64
endif
@ -110,7 +111,7 @@ javadoc: $(JAVA_SOURCES) bindings/java/src/main/overview.html
-windowtitle "FoundationDB Java Client API" \
-doctitle "FoundationDB Java Client API" \
-link "http://docs.oracle.com/javase/8/docs/api" \
com.apple.foundationdb.org.apple.foundationdb.async com.apple.foundationdb.tuple com.apple.foundationdb.directory com.apple.foundationdb.subspace
com.apple.foundationdb com.apple.foundationdb.async com.apple.foundationdb.tuple com.apple.foundationdb.directory com.apple.foundationdb.subspace
javadoc_clean:
@rm -rf $(JAVADOC_DIR)/javadoc

View File

@ -10,13 +10,13 @@
<packaging>jar</packaging>
<name>foundationdb-java</name>
<description>Java bindings for the FoundationDB database. These bindings require the FoundationDB client, which is under a different license. The client can be obtained from https://files.foundationdb.org/fdb-c/.</description>
<description>Java bindings for the FoundationDB database. These bindings require the FoundationDB client, which is under a different license. The client can be obtained from https://www.foundationdb.org/downloads/fdb-c/.</description>
<inceptionYear>2010</inceptionYear>
<url>http://foundationdb.org</url>
<url>https://www.foundationdb.org</url>
<organization>
<name>FoundationDB</name>
<url>http://foundationdb.org</url>
<url>https://www.foundationdb.org</url>
</organization>
<developers>

View File

@ -83,6 +83,8 @@ public class Cluster extends NativeObjectWrapper {
/**
* Creates a connection to a specific database on an <i>FDB</i> cluster.
*
* @param e the {@link Executor} to use when executing asynchronous callbacks for the database
*
* @return a {@code Future} that will be set to a {@code Database} upon
* successful connection.
*/

View File

@ -80,6 +80,9 @@ public interface Database extends AutoCloseable, TransactionContext {
*
* @param retryable the block of logic to execute in a {@link Transaction} against
* this database
* @param <T> the return type of {@code retryable}
*
* @return the result of the last run of {@code retryable}
*/
@Override
default <T> T read(Function<? super ReadTransaction, T> retryable) {
@ -94,6 +97,8 @@ public interface Database extends AutoCloseable, TransactionContext {
* @param retryable the block of logic to execute in a {@link Transaction} against
* this database
* @param e the {@link Executor} to use for asynchronous callbacks
* @param <T> the return type of {@code retryable}
* @return the result of the last run of {@code retryable}
*
* @see #read(Function)
*/
@ -113,6 +118,10 @@ public interface Database extends AutoCloseable, TransactionContext {
*
* @param retryable the block of logic to execute in a {@link ReadTransaction} against
* this database
* @param <T> the return type of {@code retryable}
*
* @return a {@code CompletableFuture} that will be set to the value returned by the last call
* to {@code retryable}
*/
@Override
default <T> CompletableFuture<T> readAsync(
@ -128,6 +137,10 @@ public interface Database extends AutoCloseable, TransactionContext {
* @param retryable the block of logic to execute in a {@link ReadTransaction} against
* this database
* @param e the {@link Executor} to use for asynchronous callbacks
* @param <T> the return type of {@code retryable}
*
* @return a {@code CompletableFuture} that will be set to the value returned by the last call
* to {@code retryable}
*
* @see #readAsync(Function)
*/
@ -147,11 +160,14 @@ public interface Database extends AutoCloseable, TransactionContext {
* be unable to determine whether a transaction succeeded. In these cases, your
* transaction may be executed twice. For more information about how to reason
* about these situations see
* <a href="/foundationdb/developer-guide.html#transactions-with-unknown-results"
* <a href="/developer-guide.html#transactions-with-unknown-results"
* target="_blank">the FounationDB Developer Guide</a>
*
* @param retryable the block of logic to execute in a {@link Transaction} against
* this database
* @param <T> the return type of {@code retryable}
*
* @return the result of the last run of {@code retryable}
*/
@Override
default <T> T run(Function<? super Transaction, T> retryable) {
@ -166,6 +182,9 @@ public interface Database extends AutoCloseable, TransactionContext {
* @param retryable the block of logic to execute in a {@link Transaction} against
* this database
* @param e the {@link Executor} to use for asynchronous callbacks
* @param <T> the return type of {@code retryable}
*
* @return the result of the last run of {@code retryable}
*/
<T> T run(Function<? super Transaction, T> retryable, Executor e);
@ -183,7 +202,7 @@ public interface Database extends AutoCloseable, TransactionContext {
* be unable to determine whether a transaction succeeded. In these cases, your
* transaction may be executed twice. For more information about how to reason
* about these situations see
* <a href="/foundationdb/developer-guide.html#transactions-with-unknown-results"
* <a href="/developer-guide.html#transactions-with-unknown-results"
* target="_blank">the FounationDB Developer Guide</a><br>
* <br>
* Any errors encountered executing {@code retryable}, or received from the
@ -191,6 +210,10 @@ public interface Database extends AutoCloseable, TransactionContext {
*
* @param retryable the block of logic to execute in a {@link Transaction} against
* this database
* @param <T> the return type of {@code retryable}
*
* @return a {@code CompletableFuture} that will be set to the value returned by the last call
* to {@code retryable}
*/
@Override
default <T> CompletableFuture<T> runAsync(
@ -206,6 +229,10 @@ public interface Database extends AutoCloseable, TransactionContext {
* @param retryable the block of logic to execute in a {@link Transaction} against
* this database
* @param e the {@link Executor} to use for asynchronous callbacks
* @param <T> the return type of {@code retryable}
*
* @return a {@code CompletableFuture} that will be set to the value returned by the last call
* to {@code retryable}
*
* @see #run(Function)
*/

View File

@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicInteger;
* This call is required before using any other part of the API. The call allows
* an error to be thrown at this point to prevent client code from accessing a later library
* with incorrect assumptions from the current version. The API version documented here is version
* {@code 510}.<br><br>
* {@code 520}.<br><br>
* FoundationDB encapsulates multiple versions of its interface by requiring
* the client to explicitly specify the version of the API it uses. The purpose
* of this design is to allow you to upgrade the server, client libraries, or
@ -81,7 +81,7 @@ public class FDB {
public static final ExecutorService DEFAULT_EXECUTOR;
final int apiVersion;
private final int apiVersion;
private volatile boolean netStarted = false;
private volatile boolean netStopped = false;
volatile boolean warnOnUnclosed = true;
@ -123,6 +123,37 @@ public class FDB {
return options;
}
/**
* Determines if the API version has already been selected. That is, this
* will return {@code true} if the user has already called
* {@link #selectAPIVersion(int) selectAPIVersion()} and that call
* has completed successfully.
*
* @return {@code true} if an API version has been selected and {@code false} otherwise
*/
public static boolean isAPIVersionSelected() {
return singleton != null;
}
/**
* Return the instance of the FDB API singleton. This method will always return
* a non-{@code null} value for the singleton, but if the
* {@link #selectAPIVersion(int) selectAPIVersion()} method has not yet been
* called, it will throw an {@link FDBException} indicating that an API
* version has not yet been set.
*
* @return the FoundationDB API object
* @throws FDBException if {@link #selectAPIVersion(int) selectAPIVersion()} has not been called
*/
public static FDB instance() throws FDBException {
if(singleton != null) {
return singleton;
}
else {
throw new FDBException("API version is not set", 2200);
}
}
/**
* Select the version for the client API. An exception will be thrown if the
* requested version is not supported by this implementation of the API. As
@ -142,7 +173,7 @@ public class FDB {
*/
public static synchronized FDB selectAPIVersion(final int version) throws FDBException {
if(singleton != null) {
if(version != singleton.apiVersion) {
if(version != singleton.getAPIVersion()) {
throw new IllegalArgumentException(
"FoundationDB API already started at different version");
}
@ -150,8 +181,8 @@ public class FDB {
}
if(version < 510)
throw new IllegalArgumentException("API version not supported (minimum 510)");
if(version > 510)
throw new IllegalArgumentException("API version not supported (maximum 510)");
if(version > 520)
throw new IllegalArgumentException("API version not supported (maximum 520)");
Select_API_version(version);
FDB fdb = new FDB(version);
@ -169,18 +200,21 @@ public class FDB {
this.warnOnUnclosed = warnOnUnclosed;
}
// Singleton is initialized to null and only set once by a call to selectAPIVersion
static FDB getInstance() {
if(singleton != null) {
return singleton;
}
throw new IllegalStateException("API version has not been selected");
/**
* Returns the API version that was selected by the {@link #selectAPIVersion(int) selectAPIVersion()}
* call. This can be used to guard different parts of client code against different versions
* of the FoundationDB API to allow for libraries using FoundationDB to be compatible across
* several versions.
*
* @return the FoundationDB API version that has been loaded
*/
public int getAPIVersion() {
return apiVersion;
}
/**
* Connects to the cluster specified by the
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>.
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>.
* If the FoundationDB network has not been started, it will be started in the course of this call
* as if {@link FDB#startNetwork()} had been called.
*
@ -199,9 +233,9 @@ public class FDB {
* {@link #startNetwork()} had been called.
*
* @param clusterFilePath the
* <a href="/foundationdb/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* <a href="/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* defining the FoundationDB cluster. This can be {@code null} if the
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
* is to be used.
*
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@code Cluster}.
@ -220,9 +254,9 @@ public class FDB {
* are produced from using the resulting {@link Cluster}.
*
* @param clusterFilePath the
* <a href="/foundationdb/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* <a href="/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* defining the FoundationDB cluster. This can be {@code null} if the
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
* is to be used.
* @param e used to run the FDB network thread
*
@ -245,7 +279,7 @@ public class FDB {
/**
* Initializes networking, connects with the
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>,
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>,
* and opens the database.
*
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@link Database}
@ -259,9 +293,9 @@ public class FDB {
* and opens the database.
*
* @param clusterFilePath the
* <a href="/foundationdb/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* <a href="/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* defining the FoundationDB cluster. This can be {@code null} if the
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
* is to be used.
*
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@link Database}
@ -275,9 +309,9 @@ public class FDB {
* and opens the database.
*
* @param clusterFilePath the
* <a href="/foundationdb/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* <a href="/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
* defining the FoundationDB cluster. This can be {@code null} if the
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
* is to be used.
* @param e the {@link Executor} to use to execute asynchronous callbacks
*
@ -329,12 +363,14 @@ public class FDB {
* event loop is a blocking operation that is not
* expected to terminate until the program is complete. This will therefore consume an
* entire thread from {@code e} if {@code e} is a thread pool or will completely block
* operation of a single threaded {@code Executor}.<br>
* the single thread of a single-threaded {@code Executor}.<br>
* <br>
* Manual configuration of the networking engine can be achieved through calls on
* {@link NetworkOptions}. These options should be set before a call
* to this method.
*
* @param e the {@link Executor} to use to execute network operations on
*
* @see NetworkOptions
*
* @throws IllegalStateException if the network has already been stopped

View File

@ -32,7 +32,10 @@ import com.apple.foundationdb.tuple.ByteArrayUtil;
* {@link Transaction#getRange(KeySelector, KeySelector) getRange()}.<br>
* <br>
* For more about how key selectors work in practice, see
* <a href="/foundationdb/developer-guide.html#key-selectors" target="_blank">the KeySelector documentation</a>.
* <a href="/developer-guide.html#key-selectors" target="_blank">the KeySelector documentation</a>.
* Note that the way the key selectors are resolved is somewhat non-intuitive, so
* users who wish to use a key selector other than the default ones described below should
* probably consult that documentation before proceeding.
* <br>
* <br>
* Generally one of the following static methods should be used to construct a {@code KeySelector}:
@ -52,13 +55,25 @@ public class KeySelector {
/**
* Constructs a new {@code KeySelector} from the given parameters. Client code
* will not generally call this constructor.
* will not generally call this constructor. A key selector can be used to
* specify a key that will be resolved at runtime based on a starting key and
* an offset. When this is passed as an argument to a {@link Transaction}'s
* {@link Transaction#getKey(KeySelector) getKey()} or
* {@link Transaction#getRange(KeySelector, KeySelector) getRange()}
* methods, the key selector will be resolved to a key within the
* database. This is done in a manner equivalent to finding the last key that is
* less than (or less than or equal to, if {@code orEqual} is
* {@code true}) the base {@code key} specified here and then
* returning the key that is {@code offset} keys greater than that
* key.
*
* @param key the base key to reference
* @param orEqual <code>true</code> if the key should be considered for equality
* @param offset the number of keys to offset from once the key is found
* @param orEqual {@code true} if the key selector should resolve to
* {@code key} (if {@code key} is present) before accounting for the offset
* @param offset the offset (in number of keys) that the selector will advance after
* resolving to a key based on the {@code key} and {@code orEqual} parameters
*/
public KeySelector(byte[] key, boolean orEqual,int offset) {
public KeySelector(byte[] key, boolean orEqual, int offset) {
this.key = key;
this.orEqual = orEqual;
this.offset = offset;
@ -119,7 +134,7 @@ public class KeySelector {
* poor choice for iterating through a large range. (Instead, use the keys
* returned from a range query operation
* themselves to create a new beginning {@code KeySelector}.) For more information see
* <a href="/foundationdb/developer-guide.html#key-selectors" target="_blank">the KeySelector documentation</a>.
* <a href="/developer-guide.html#key-selectors" target="_blank">the KeySelector documentation</a>.
*
* @param offset the number of keys to offset the {@code KeySelector}. This number can be
* negative.
@ -157,7 +172,11 @@ public class KeySelector {
}
/**
* Returns the key offset for this {@code KeySelector}. For internal use.
* Returns the key offset parameter for this {@code KeySelector}. See
* the {@link #KeySelector(byte[], boolean, int) KeySelector constructor}
* for more details.
*
* @return the key offset for this {@code KeySelector}
*/
public int getOffset() {
return offset;

View File

@ -221,7 +221,7 @@ public class LocalityUtil {
@Override
protected void finalize() throws Throwable {
try {
if(FDB.getInstance().warnOnUnclosed && !closed) {
if(FDB.instance().warnOnUnclosed && !closed) {
System.err.println("CloseableAsyncIterator not closed (getBoundaryKeys)");
}
if(!closed) {

View File

@ -47,7 +47,7 @@ abstract class NativeObjectWrapper implements AutoCloseable {
public void checkUnclosed(String context) {
try {
if(FDB.getInstance().warnOnUnclosed && !closed) {
if(FDB.instance().warnOnUnclosed && !closed) {
System.err.println(context + " not closed");
}
}

View File

@ -43,6 +43,7 @@ public interface ReadTransactionContext {
*
* @param retryable the block of logic to execute against a {@link ReadTransaction}
* in this context
* @param <T> the return type of {@code retryable}
*
* @return a result of the last call to {@code retryable}
*/
@ -56,6 +57,7 @@ public interface ReadTransactionContext {
*
* @param retryable the block of logic to execute against a {@link ReadTransaction}
* in this context
* @param <T> the return type of {@code retryable}
*
* @return a {@code CompletableFuture} that will be set to the value returned by the last call
* to {@code retryable}

View File

@ -34,12 +34,12 @@ import com.apple.foundationdb.tuple.Tuple;
* the underlying database if and when the transaction is committed. Read operations do see the
* effects of previous write operations on the same transaction. Committing a transaction usually
* succeeds in the absence of
* <a href="/foundationdb/developer-guide.html#transaction-conflicts" target="_blank">conflicts</a>.<br>
* <a href="/developer-guide.html#developer-guide-transaction-conflicts" target="_blank">conflicts</a>.<br>
* <br>
* Transactions group operations into a unit with the properties of atomicity, isolation, and
* durability. Transactions also provide the ability to maintain an application's invariants or
* integrity constraints, supporting the property of consistency. Together these properties are
* known as <a href="/foundationdb/developer-guide.html#acid" target="_blank">ACID</a>.<br>
* known as <a href="/developer-guide.html#acid" target="_blank">ACID</a>.<br>
* <br>
* Transactions are also causally consistent: once a transaction has been successfully committed,
* all subsequently created transactions will see the modifications made by it.
@ -49,7 +49,7 @@ import com.apple.foundationdb.tuple.Tuple;
* <br>
* Keys and values in FoundationDB are byte arrays. To encode other data types, see the
* {@link Tuple Tuple API} and
* <a href="/foundationdb/data-modeling.html#tuples" target="_blank">tuple layer documentation</a>.<br>
* <a href="/data-modeling.html#data-modeling-tuples" target="_blank">tuple layer documentation</a>.<br>
* <br>
* When used as a {@link TransactionContext}, the methods {@code run()} and
* {@code runAsync()} on a {@code Transaction} will simply attempt the operations
@ -79,11 +79,13 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
/**
* Return special-purpose, read-only view of the database. Reads done through this interface are known as "snapshot reads".
* Snapshot reads selectively relax FoundationDB's isolation property, reducing
* <a href="/foundationdb/developer-guide.html#transaction-conflicts" target="_blank">Transaction conflicts</a>
* <a href="/developer-guide.html#transaction-conflicts" target="_blank">Transaction conflicts</a>
* but making reasoning about concurrency harder.<br>
* <br>
* For more information about how to use snapshot reads correctly, see
* <a href="/foundationdb/developer-guide.html#using-snapshot-reads" target="_blank">Using snapshot reads</a>.
* <a href="/developer-guide.html#using-snapshot-reads" target="_blank">Using snapshot reads</a>.
*
* @return a read-only view of this {@code Transaction} with relaxed isolation properties
*/
ReadTransaction snapshot();
@ -143,8 +145,9 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
*
* @param key the key whose value is to be set
* @param value the value to set in the database
* @throws IllegalArgumentException
* @throws FDBException
*
* @throws IllegalArgumentException if {@code key} or {@code value} is {@code null}
* @throws FDBException if the set operation otherwise fails
*/
void set(byte[] key, byte[] value);
@ -153,8 +156,9 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
* database until {@link #commit} is called.
*
* @param key the key whose value is to be cleared
* @throws IllegalArgumentException
* @throws FDBException
*
* @throws IllegalArgumentException if {@code key} is {@code null}
* @throws FDBException if clear operation otherwise fails
*/
void clear(byte[] key);
@ -167,8 +171,9 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
*
* @param beginKey the first clear
* @param endKey the key one past the last key to clear
* @throws IllegalArgumentException
* @throws FDBException
*
* @throws IllegalArgumentException if {@code beginKey} or {@code endKey} is {@code null}
* @throws FDBException if the clear operation otherwise fails
*/
void clear(byte[] beginKey, byte[] endKey);
@ -181,7 +186,7 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
*
* @param range the range of keys to clear
*
* @throws FDBException
* @throws FDBException if the clear operation fails
*/
void clear(Range range);
@ -191,7 +196,7 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
*
* @param prefix the starting bytes from the keys to be cleared.
*
* @throws FDBException
* @throws FDBException if the clear-range operation fails
*/
@Deprecated
void clearRangeStartsWith(byte[] prefix);
@ -232,10 +237,7 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
* {@code Database}'s {@link Database#run(Function) run()} calls for managing
* transactional access to FoundationDB.
*
* @return a {@code CompletableFuture} that, when set without error, guarantees the
* {@code Transaction}'s modifications committed durably to the
* database. If the commit failed, it will throw an {@link FDBException}.
* <br><br>
* <p>
* As with other client/server databases, in some failure scenarios a client may
* be unable to determine whether a transaction succeeded. In these cases, an
* {@link FDBException} will be thrown with error code {@code commit_unknown_result} (1021).
@ -243,11 +245,18 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
* retry loops that don't specifically detect {@code commit_unknown_result} could end
* up executing a transaction twice. For more information, see the FoundationDB
* Developer Guide documentation.
* </p>
*
* <p>
* If any operation is performed on a transaction after a commit has been
* issued but before it has returned, both the commit and the operation will
* throw an error code {@code used_during_commit}(2017). In this case, all
* throw an error code {@code used_during_commit} (2017). In this case, all
* subsequent operations on this transaction will throw this error.
* </p>
*
* @return a {@code CompletableFuture} that, when set without error, guarantees the
* {@code Transaction}'s modifications committed durably to the
* database. If the commit failed, it will throw an {@link FDBException}.
*/
CompletableFuture<Void> commit();
@ -355,7 +364,10 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
* Run a function once against this {@code Transaction}. This call blocks while
* user code is executing, returning the result of that code on completion.
*
* @return the return value of {@code retryable}
* @param retryable the block of logic to execute against this {@code Transaction}
* @param <T> the return type of {@code retryable}
*
* @return a result of the single call to {@code retryable}
*/
@Override
<T> T run(Function<? super Transaction, T> retryable);
@ -364,6 +376,9 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
* Run a function once against this {@code Transaction}. This call returns
* immediately with a {@code CompletableFuture} handle to the result.
*
* @param retryable the block of logic to execute against this {@code Transaction}
* @param <T> the return type of {@code retryable}
*
* @return a {@code CompletableFuture} that will be set to the return value of {@code retryable}
*/
@Override

View File

@ -42,6 +42,7 @@ public interface TransactionContext extends ReadTransactionContext {
*
* @param retryable the block of logic to execute against a {@link Transaction}
* in this context
* @param <T> the return type of {@code retryable}
*
* @return a result of the last call to {@code retryable}
*/
@ -55,6 +56,7 @@ public interface TransactionContext extends ReadTransactionContext {
*
* @param retryable the block of logic to execute against a {@link Transaction}
* in this context
* @param <T> the return type of {@code retryable}
*
* @return a {@code CompletableFuture} that will be set to the value returned by the last call
* to {@code retryable}

View File

@ -63,6 +63,8 @@ public class AsyncUtil {
*
* @param func the {@code Function} to run
* @param value the input to pass to {@code func}
* @param <I> type of input to {@code func}
* @param <O> type of output of {@code func}
*
* @return the output of {@code func}, or a {@code CompletableFuture} carrying any exception
* caught in the process.
@ -159,9 +161,10 @@ public class AsyncUtil {
}
/**
* Iterates over a set of items and returns the result as a list.
* Iterates over a stream of items and returns the result as a list.
*
* @param iterable the source of data over which to iterate
* @param <V> type of the items returned by the iterable
*
* @return a {@code CompletableFuture} which will be set to the amalgamation of results
* from iteration.
@ -174,6 +177,7 @@ public class AsyncUtil {
* Iterates over a set of items and returns the remaining results as a list.
*
* @param iterator the source of data over which to iterate. This function will exhaust the iterator.
* @param <V> type of the items returned by the iterator
*
* @return a {@code CompletableFuture} which will be set to the amalgamation of results
* from iteration.
@ -187,6 +191,7 @@ public class AsyncUtil {
*
* @param iterable the source of data over which to iterate
* @param executor the {@link Executor} to use for asynchronous operations
* @param <V> type of the items returned by the iterable
*
* @return a {@code CompletableFuture} which will be set to the amalgamation of results
* from iteration.
@ -200,6 +205,7 @@ public class AsyncUtil {
*
* @param iterator the source of data over which to iterate. This function will exhaust the iterator.
* @param executor the {@link Executor} to use for asynchronous operations
* @param <V> type of the items returned by the iterator
*
* @return a {@code CompletableFuture} which will be set to the amalgamation of results
* from iteration.
@ -215,6 +221,9 @@ public class AsyncUtil {
*
* @param iterable input
* @param func mapping function applied to each element
* @param <V> type of the items returned by the original iterable
* @param <T> type of the items returned by the final iterable
*
* @return a new iterable with each element mapped to a different value
*/
public static <V, T> AsyncIterable<T> mapIterable(final AsyncIterable<V> iterable,
@ -239,6 +248,9 @@ public class AsyncUtil {
*
* @param iterator input
* @param func mapping function applied to each element
* @param <V> type of the items returned by the original iterator
* @param <T> type of the items returned by the final iterator
*
* @return a new iterator with each element mapped to a different value
*/
public static <V, T> AsyncIterator<T> mapIterator(final AsyncIterator<V> iterator,
@ -277,6 +289,9 @@ public class AsyncUtil {
*
* @param iterator input
* @param func mapping function applied to each element
* @param <V> type of the items returned by the original iterator
* @param <T> type of the items returned by the final iterator
*
* @return a new iterator with each element mapped to a different value
*/
public static <V, T> CloseableAsyncIterator<T> mapIterator(final CloseableAsyncIterator<V> iterator,
@ -423,6 +438,7 @@ public class AsyncUtil {
* All errors from {@code task} will be passed to the resulting {@code CompletableFuture}.
*
* @param task the asynchronous process for which to signal completion
* @param <V> type of element returned by {@code task}
*
* @return a newly created {@code CompletableFuture} that is set when {@code task} completes
*/
@ -437,6 +453,7 @@ public class AsyncUtil {
* it is explicitly cancelled.
*
* @param task the asynchronous process to monitor the readiness of
* @param <V> return type of the asynchronous task
*
* @return a new {@link CompletableFuture} that is set when {@code task} is ready.
*/
@ -444,6 +461,22 @@ public class AsyncUtil {
return task.handle((v, t) -> null);
}
/**
* Composes an asynchronous task with an exception-handler that returns a {@link CompletableFuture}
* of the same type. If {@code task} completes normally, this will return a {@link CompletableFuture}
* with the same value as {@code task}. If {@code task} completes exceptionally,
* this will call {@code fn} with the exception returned by {@code task} and return
* the result of the {@link CompletableFuture} returned by that function.
*
* @param task the asynchronous process to handle exceptions from
* @param fn a function mapping exceptions from {@code task} to a {@link CompletableFuture} of the same
* type as {@code task}
* @param <V> return type of the asynchronous task
*
* @return a {@link CompletableFuture} that contains the value returned by {@code task}
* if {@code task} completes normally and the result of {@code fn} if {@code task}
* completes exceptionally
*/
public static <V> CompletableFuture<V> composeExceptionally(CompletableFuture<V> task, Function<Throwable, CompletableFuture<V>> fn) {
return task.handle((v,e) -> e)
.thenCompose(e -> {
@ -521,6 +554,7 @@ public class AsyncUtil {
* any of the tasks returns an error, the output is set to that error.
*
* @param tasks the tasks whose output is to be added to the output
* @param <V> return type of the asynchronous tasks
*
* @return a {@code CompletableFuture} that will be set to the collective result of the tasks
*/
@ -539,8 +573,9 @@ public class AsyncUtil {
* Replaces the output of an asynchronous task with a predetermined value.
*
* @param task the asynchronous process whose output is to be replaced
*
* @param value the predetermined value to be returned on success of {@code task}
* @param <V> return type of original future
* @param <T> return type of final future
*
* @return a {@code CompletableFuture} that will be set to {@code value} on completion of {@code task}
*/
@ -554,6 +589,7 @@ public class AsyncUtil {
*
* @param input the list of {@link CompletableFuture}s to monitor. This list
* <b>must not</b> be modified during the execution of this call.
* @param <V> return type of the asynchronous tasks
*
* @return a signal that will be set when any of the {@code CompletableFuture}s are done
*/
@ -569,6 +605,7 @@ public class AsyncUtil {
*
* @param input the list of {@link CompletableFuture}s to monitor. This list
* <b>must not</b> be modified during the execution of this call.
* @param <V> return type of the asynchronous tasks
*
* @return a signal that will be set when all of the {@code CompletableFuture}s are done
*/

View File

@ -55,7 +55,7 @@ import com.apple.foundationdb.tuple.Tuple;
* to manage its subspaces.
*
* For general guidance on directory usage, see the discussion in the
* <a href="/foundationdb/developer-guide.html#developer-guide-directories" target="_blank">Developer Guide</a>.
* <a href="/developer-guide.html#developer-guide-directories" target="_blank">Developer Guide</a>.
* </p>
* <p>
* Directories are identified by hierarchical paths analogous to the paths

View File

@ -37,8 +37,8 @@ import com.apple.foundationdb.tuple.Tuple;
* content.
* </p>
*
* For general guidance on partition usage, see
* <a href="/foundationdb/developer-guide.html#directory-partitions" target="_blank">The Developer Guide</a>.
* For general guidance on partition usage, see the
* <a href="/developer-guide.html#directory-partitions" target="_blank">Developer Guide</a>.
*/
class DirectoryPartition extends DirectorySubspace {

View File

@ -31,7 +31,7 @@
* used for the corresponding subspace. In effect, directories provide
* a level of indirection for access to subspaces.<br>
* <br>
* See <a href="/foundationdb/developer-guide.html#developer-guide-directories">general
* See <a href="/developer-guide.html#developer-guide-directories">general
* directory documentation</a> for information about how directories work and
* interact with other parts of the built-in keyspace management features.
*/

View File

@ -29,7 +29,7 @@
* assure that {@link com.apple.foundationdb.Transaction#commit()} has returned successfully
* before itself returning. If you are not able to use these functions for some reason
* please closely read and understand the other
* <a href="/foundationdb/data-modeling.html#tuples">developer
* <a href="/data-modeling.html#data-modeling-tuples">developer
* documentation on FoundationDB transactions</a>.
*/
package com.apple.foundationdb;

View File

@ -38,7 +38,7 @@ import com.apple.foundationdb.tuple.Versionstamp;
*
* <p>
* For general guidance on subspace usage, see the discussion in
* <a href="/foundationdb/developer-guide.html#subspaces-of-keys" target="_blank">Developer Guide</a>.
* <a href="/developer-guide.html#developer-guide-sub-keyspaces" target="_blank">Developer Guide</a>.
* </p>
*
* <p>

View File

@ -26,7 +26,7 @@
* from the result. As a best practice, API clients should use at least one
* subspace for application data.<br>
* <br>
* See <a href="/foundationdb/developer-guide.html#developer-guide-sub-keyspaces">general
* See <a href="/developer-guide.html#developer-guide-sub-keyspaces">general
* subspace documentation</a> for information about how subspaces work and
* interact with other parts of the built-in keyspace management features.
*/

View File

@ -236,8 +236,11 @@ public class ByteArrayUtil {
/**
* Compare byte arrays for equality and ordering purposes. Elements in the array
* are interpreted and compared as unsigned bytes. Neither parameter
* may be {@code null}
* may be {@code null}.
*
* @param l byte array on the left-hand side of the inequality
* @param r byte array on the right-hand side of the inequality
*
* @return return -1, 0, or 1 if {@code l} is less than, equal to, or greater than
* {@code r}.
*/

View File

@ -389,10 +389,11 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
/**
* Construct a new {@code Tuple} with elements decoded from a supplied {@code byte} array.
* The passed byte array must not be {@code null}.
*
* @param bytes encoded {@code Tuple} source. Must not be {@code null}
* @param bytes encoded {@code Tuple} source
*
* @return a newly constructed object.
* @return a new {@code Tuple} constructed by deserializing the provided {@code byte} array
*/
public static Tuple fromBytes(byte[] bytes) {
return fromBytes(bytes, 0, bytes.length);
@ -400,10 +401,13 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
/**
* Construct a new {@code Tuple} with elements decoded from a supplied {@code byte} array.
* The passed byte array must not be {@code null}.
*
* @param bytes encoded {@code Tuple} source. Must not be {@code null}
* @param bytes encoded {@code Tuple} source
* @param offset starting offset of byte array of encoded data
* @param length length of encoded data within the source
*
* @return a newly constructed object.
* @return a new {@code Tuple} constructed by deserializing the specified slice of the provided {@code byte} array
*/
public static Tuple fromBytes(byte[] bytes, int offset, int length) {
Tuple t = new Tuple();
@ -414,7 +418,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
/**
* Gets the number of elements in this {@code Tuple}.
*
* @return the count of elements
* @return the number of elements in this {@code Tuple}
*/
public int size() {
return this.elements.size();
@ -437,6 +441,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the item to return
*
* @return the item at {@code index} as a {@code long}
*
* @throws ClassCastException if the element at {@code index} is not a {@link Number}
* @throws NullPointerException if the element at {@code index} is {@code null}
*/
public long getLong(int index) {
Object o = this.elements.get(index);
@ -453,6 +460,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the element to return
*
* @return the item at {@code index} as a {@code byte[]}
*
* @throws ClassCastException if the element at {@code index} is not a {@link Number}
*/
public byte[] getBytes(int index) {
Object o = this.elements.get(index);
@ -470,6 +479,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the element to return
*
* @return the item at {@code index} as a {@code String}
*
* @throws ClassCastException if the element at {@code index} is not a {@link String}
*/
public String getString(int index) {
Object o = this.elements.get(index);
@ -489,6 +500,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the element to return
*
* @return the item at {@code index} as a {@link BigInteger}
*
* @throws ClassCastException if the element at {@code index} is not a {@link Number}
*/
public BigInteger getBigInteger(int index) {
Object o = this.elements.get(index);
@ -509,6 +522,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the item to return
*
* @return the item at {@code index} as a {@code float}
*
* @throws ClassCastException if the element at {@code index} is not a {@link Number}
*/
public float getFloat(int index) {
Object o = this.elements.get(index);
@ -525,6 +540,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the item to return
*
* @return the item at {@code index} as a {@code double}
*
* @throws ClassCastException if the element at {@code index} is not a {@link Number}
*/
public double getDouble(int index) {
Object o = this.elements.get(index);
@ -541,6 +558,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the item to return
*
* @return the item at {@code index} as a {@code boolean}
*
* @throws ClassCastException if the element at {@code index} is not a {@link Boolean}
* @throws NullPointerException if the element at {@code index} is {@code null}
*/
public boolean getBoolean(int index) {
Object o = this.elements.get(index);
@ -558,6 +578,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the item to return
*
* @return the item at {@code index} as a {@link UUID}
*
* @throws ClassCastException if the element at {@code index} is not a {@link UUID}
*/
public UUID getUUID(int index) {
Object o = this.elements.get(index);
@ -573,7 +595,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
*
* @param index the location of the item to return
*
* @return the item at {@code index} as a {@link UUID}
* @return the item at {@code index} as a {@link Versionstamp}
*
* @throws ClassCastException if the element at {@code index} is not a {@link Versionstamp}
*/
public Versionstamp getVersionstamp(int index) {
Object o = this.elements.get(index);
@ -590,6 +614,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the item to return
*
* @return the item at {@code index} as a {@link List}
*
* @throws ClassCastException if the element at {@code index} is not a {@link List}
* or a {@code Tuple}
*/
public List<Object> getNestedList(int index) {
Object o = this.elements.get(index);
@ -614,6 +641,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* @param index the location of the item to return
*
* @return the item at {@code index} as a {@link List}
*
* @throws ClassCastException if the element at {@code index} is not a {@code Tuple}
* or a {@link List}
*/
public Tuple getNestedTuple(int index) {
Object o = this.elements.get(index);
@ -642,7 +672,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
/**
* Creates a new {@code Tuple} with the first item of this {@code Tuple} removed.
*
* @return a newly created {@code Tuple}
* @return a newly created {@code Tuple} without the first item of this {@code Tuple}
*
* @throws IllegalStateException if this {@code Tuple} is empty
*/
public Tuple popFront() {
if(elements.size() == 0)
@ -659,7 +691,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
/**
* Creates a new {@code Tuple} with the last item of this {@code Tuple} removed.
*
* @return a newly created {@code Tuple}
* @return a newly created {@code Tuple} without the last item of this {@code Tuple}
*
* @throws IllegalStateException if this {@code Tuple} is empty
*/
public Tuple popBack() {
if(elements.size() == 0)
@ -684,8 +718,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* Range r = t.range();</pre>
* {@code r} includes all tuples ("a", "b", ...)
*
* @return the keyspace range containing all {@code Tuple}s that have this {@code Tuple}
* as a prefix.
* @return the range of keys containing all {@code Tuple}s that have this {@code Tuple}
* as a prefix
*/
public Range range() {
byte[] p = pack();
@ -728,7 +762,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* Returns a hash code value for this {@code Tuple}.
* {@inheritDoc}
*
* @return a hashcode
* @return a hash code for this {@code Tuple} that can be used by hash tables
*/
@Override
public int hashCode() {
@ -741,7 +775,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* {@link Tuple#compareTo(Tuple) compareTo()} would return {@code 0}.
*
* @return {@code true} if {@code obj} is a {@code Tuple} and their binary representation
* is identical.
* is identical
*/
@Override
public boolean equals(Object o) {
@ -754,9 +788,14 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
}
/**
* Returns a string representing this {@code Tuple}.
* Returns a string representing this {@code Tuple}. This contains human-readable
* representations of all of the elements of this {@code Tuple}. For most elements,
* this means using that object's default string representation. For byte-arrays,
* this means using {@link ByteArrayUtil#printable(byte[]) ByteArrayUtil.printable()}
* to produce a byte-string where most printable ASCII code points have been
* rendered as characters.
*
* @return a string
* @return a human-readable {@link String} representation of this {@code Tuple}
*/
@Override
public String toString() {
@ -797,9 +836,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* can only be {@link String}s, {@code byte[]}s, {@link Number}s, {@link UUID}s,
* {@link Boolean}s, {@link List}s, {@code Tuple}s, or {@code null}s.
*
* @param items the elements from which to create the {@code Tuple}.
* @param items the elements from which to create the {@code Tuple}
*
* @return a newly created {@code Tuple}
* @return a new {@code Tuple} with the given items as its elements
*/
public static Tuple fromItems(Iterable<? extends Object> items) {
Tuple t = new Tuple();
@ -817,7 +856,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
*
* @param items the elements from which to create the {@code Tuple}.
*
* @return a newly created {@code Tuple}
* @return a new {@code Tuple} with the given items as its elements
*/
public static Tuple fromList(List<? extends Object> items) {
return new Tuple(items);
@ -827,11 +866,12 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* Efficiently creates a new {@code Tuple} from a {@link Stream} of objects. The
* elements must follow the type guidelines from {@link Tuple#addObject(Object) add},
* and so can only be {@link String}s, {@code byte[]}s, {@link Number}s, {@link UUID}s,
* {@link Boolean}s, {@link List}s, {@code Tuple}s, or {@code null}s.
* {@link Boolean}s, {@link List}s, {@code Tuple}s, or {@code null}s. Note that this
* class will consume all elements from the {@link Stream}.
*
* @param items the {@link Stream} of items from which to create the {@code Tuple}.
* @param items the {@link Stream} of items from which to create the {@code Tuple}
*
* @return a newly created {@code Tuple}
* @return a new {@code Tuple} with the given items as its elements
*/
public static Tuple fromStream(Stream<? extends Object> items) {
Tuple t = new Tuple();
@ -845,9 +885,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
* can only be {@link String}s, {@code byte[]}s, {@link Number}s, {@link UUID}s,
* {@link Boolean}s, {@link List}s, {@code Tuple}s, or {@code null}s.
*
* @param items the elements from which to create the {@code Tuple}.
* @param items the elements from which to create the {@code Tuple}
*
* @return a newly created {@code Tuple}
* @return a new {@code Tuple} with the given items as its elements
*/
public static Tuple from(Object... items) {
return fromList(Arrays.asList(items));

View File

@ -57,7 +57,7 @@ import java.util.Arrays;
*
* <pre>
* <code>
* {@code CompletableFuture<byte[]>} trVersionFuture = db.run((Transaction tr) -> {
* CompletableFuture&lt;byte[]&gt; trVersionFuture = db.run((Transaction tr) -&gt; {
* // The incomplete Versionstamp will be overwritten with tr's version information when committed.
* Tuple t = Tuple.from("prefix", Versionstamp.incomplete());
* tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, t.packWithVersionstamp(), new byte[0]);
@ -66,7 +66,7 @@ import java.util.Arrays;
*
* byte[] trVersion = trVersionFuture.get();
*
* Versionstamp v = db.run((Transaction tr) -> {
* Versionstamp v = db.run((Transaction tr) -&gt; {
* Subspace subspace = new Subspace(Tuple.from("prefix"));
* byte[] serialized = tr.getRange(subspace.range(), 1).iterator().next().getKey();
* Tuple t = subspace.unpack(serialized);

View File

@ -22,7 +22,7 @@
* Provides a set of utilities for serializing and deserializing typed data
* for use in FoundationDB. When packed together into a {@link com.apple.foundationdb.tuple.Tuple}
* this data is suitable for use as an index or organizational structure within FoundationDB
* keyspace. See <a href="/foundationdb/data-modeling.html#tuples">general Tuple
* keyspace. See <a href="/data-modeling.html#data-modeling-tuples">general Tuple
* documentation</a> for information about how Tuples sort and can be used to efficiently
* model data.
*/

View File

@ -5,7 +5,7 @@ This documents the client API for using FoundationDB from Java.<br>
<h3>Installation</h3>
FoundationDB's Java bindings rely on native libraries that are installed as part of the
FoundationDB client binaries installation (see
<a href="/foundationdb/api-general.html#installing-client-binaries" target="_blank">
<a href="/api-general.html#installing-client-binaries" target="_blank">
Installing FoundationDB client binaries</a>). The FoundationDB Java bindings are available
through Artifactory. To use them in your Maven-enabled project, add a dependency to your
pom.xml like: <br>
@ -19,18 +19,18 @@ pom.xml like: <br>
}
</pre>
Alternatively, simply download the JAR from
<a href="https://files.foundationdb.org/fdb-java/">Artifactory</a>
<a href="https://www.foundationdb.org/downloads/fdb-java/">Artifactory</a>
and add it to your classpath.<br>
<br>
<h3>Getting started</h3>
To start using FoundationDB from Java, create an instance of the
{@link com.apple.foundationdb.FDB FoundationDB API interface} with the version of the
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 500} and {@code 510}).
API that you want to use (this release of the FoundationDB Java API supports versions between {@code 510} and {@code 520}).
With this API object you can then open {@link com.apple.foundationdb.Cluster Cluster}s and
{@link com.apple.foundationdb.Database Database}s and start using
{@link com.apple.foundationdb.Transaction Transaction}s.
Here we give an example. The example relies on a cluster file at the
<a href="/foundationdb/api-general.html#default-cluster-file">default location</a>
<a href="/administration.html#default-cluster-file">default location</a>
for your platform and a running server.<br>
<br>
<pre>
@ -41,7 +41,7 @@ import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
try(Database db = fdb.open()) {
// Run an operation on the database
@ -69,7 +69,7 @@ cross-language support for storing and retrieving typed data from the
binary data that FoundationDB supports. And, just as importantly, data packed into
{@code Tuple}s and used as keys sort in predictable and useful ways. See the
{@link com.apple.foundationdb.tuple Tuple class documentation} for information about use in Java
and <a href="/foundationdb/data-modeling.html#tuples">general Tuple documentation</a>
and <a href="/data-modeling.html#data-modeling-tuples">general Tuple documentation</a>
for information about how Tuples sort and can be used to efficiently model data.
<br>
<h3>FoundationDB {@link com.apple.foundationdb.directory Directory API}</h3>

View File

@ -27,7 +27,7 @@ import com.apple.foundationdb.Database;
import com.apple.foundationdb.FDB;
public abstract class AbstractTester {
public static final int API_VERSION = 510;
public static final int API_VERSION = 520;
protected static final int NUM_RUNS = 25;
protected static final Charset ASCII = Charset.forName("ASCII");

View File

@ -451,6 +451,27 @@ public class AsyncStackTester {
else if(op == StackOperation.UNIT_TESTS) {
inst.context.db.options().setLocationCacheSize(100001);
return inst.context.db.runAsync(tr -> {
FDB fdb = FDB.instance();
String alreadyStartedMessage = "FoundationDB API already started at different version";
try {
FDB.selectAPIVersion(fdb.getAPIVersion() + 1);
throw new IllegalStateException("Was not stopped from selecting two API versions");
}
catch(IllegalArgumentException e) {
if(!e.getMessage().equals(alreadyStartedMessage)) {
throw e;
}
}
try {
FDB.selectAPIVersion(fdb.getAPIVersion() - 1);
throw new IllegalStateException("Was not stopped from selecting two API versions");
}
catch(IllegalArgumentException e) {
if(!e.getMessage().equals(alreadyStartedMessage)) {
throw e;
}
}
tr.options().setPrioritySystemImmediate();
tr.options().setPriorityBatch();
tr.options().setCausalReadRisky();
@ -686,7 +707,23 @@ public class AsyncStackTester {
byte[] prefix = args[0].getBytes();
FDB fdb = FDB.selectAPIVersion(Integer.parseInt(args[1]));
if(FDB.isAPIVersionSelected()) {
throw new IllegalStateException("API version already set to " + FDB.instance().getAPIVersion());
}
try {
FDB.instance();
throw new IllegalStateException("Able to get API instance before selecting API version");
}
catch(FDBException e) {
if(e.getCode() != 2200) {
throw e;
}
}
int apiVersion = Integer.parseInt(args[1]);
FDB fdb = FDB.selectAPIVersion(apiVersion);
if(FDB.instance().getAPIVersion() != apiVersion) {
throw new IllegalStateException("API version not correctly set to " + apiVersion);
}
//ExecutorService executor = Executors.newFixedThreadPool(2);
Cluster cl = fdb.createCluster(args.length > 2 ? args[2] : null);

View File

@ -33,7 +33,7 @@ public class BlockingBenchmark {
private static final int PARALLEL = 100;
public static void main(String[] args) throws InterruptedException {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
// The cluster file DOES NOT need to be valid, although it must exist.
// This is because the database is never really contacted in this test.

View File

@ -48,7 +48,7 @@ public class ConcurrentGetSetGet {
}
public static void main(String[] args) {
try(Database database = FDB.selectAPIVersion(510).open()) {
try(Database database = FDB.selectAPIVersion(520).open()) {
new ConcurrentGetSetGet().apply(database);
}
}

View File

@ -34,7 +34,7 @@ import com.apple.foundationdb.directory.DirectorySubspace;
public class DirectoryTest {
public static void main(String[] args) throws Exception {
try {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
try(Database db = fdb.open()) {
runTests(db);
}

View File

@ -26,7 +26,7 @@ import com.apple.foundationdb.tuple.Tuple;
public class Example {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
try(Database db = fdb.open()) {
// Run an operation on the database

View File

@ -32,7 +32,7 @@ public class IterableTest {
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -34,7 +34,7 @@ import com.apple.foundationdb.tuple.ByteArrayUtil;
public class LocalityTests {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
try(Database database = fdb.open(args[0])) {
try(Transaction tr = database.createTransaction()) {
String[] keyAddresses = LocalityUtil.getAddressesForKey(tr, "a".getBytes()).join();

View File

@ -43,7 +43,7 @@ public class ParallelRandomScan {
private static final int PARALLELISM_STEP = 5;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(510);
FDB api = FDB.selectAPIVersion(520);
try(Database database = api.open(args[0])) {
for(int i = PARALLELISM_MIN; i <= PARALLELISM_MAX; i += PARALLELISM_STEP) {
runTest(database, i, ROWS, DURATION_MS);

View File

@ -34,7 +34,7 @@ import com.apple.foundationdb.Transaction;
import com.apple.foundationdb.async.AsyncIterable;
public class RangeTest {
private static final int API_VERSION = 510;
private static final int API_VERSION = 520;
public static void main(String[] args) {
System.out.println("About to use version " + API_VERSION);

View File

@ -34,7 +34,7 @@ public class SerialInsertion {
private static final int NODES = 1000000;
public static void main(String[] args) {
FDB api = FDB.selectAPIVersion(510);
FDB api = FDB.selectAPIVersion(520);
try(Database database = api.open()) {
long start = System.currentTimeMillis();

View File

@ -39,7 +39,7 @@ public class SerialIteration {
private static final int THREAD_COUNT = 1;
public static void main(String[] args) throws InterruptedException {
FDB api = FDB.selectAPIVersion(510);
FDB api = FDB.selectAPIVersion(520);
try(Database database = api.open(args[0])) {
for(int i = 1; i <= THREAD_COUNT; i++) {
runThreadedTest(database, i);

View File

@ -31,7 +31,7 @@ public class SerialTest {
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -400,6 +400,28 @@ public class StackTester {
try {
inst.context.db.options().setLocationCacheSize(100001);
inst.context.db.run(tr -> {
FDB fdb = FDB.instance();
String alreadyStartedMessage = "FoundationDB API already started at different version";
try {
FDB.selectAPIVersion(fdb.getAPIVersion() + 1);
throw new IllegalStateException("Was not stopped from selecting two API versions");
}
catch(IllegalArgumentException e) {
if(!e.getMessage().equals(alreadyStartedMessage)) {
throw e;
}
}
try {
FDB.selectAPIVersion(fdb.getAPIVersion() - 1);
throw new IllegalStateException("Was not stopped from selecting two API versions");
}
catch(IllegalArgumentException e) {
if(!e.getMessage().equals(alreadyStartedMessage)) {
throw e;
}
}
tr.options().setPrioritySystemImmediate();
tr.options().setPriorityBatch();
tr.options().setCausalReadRisky();
@ -692,7 +714,23 @@ public class StackTester {
throw new IllegalArgumentException("StackTester needs parameters <prefix> <optional_cluster_file>");
byte[] prefix = args[0].getBytes();
FDB fdb = FDB.selectAPIVersion(Integer.parseInt(args[1]));
if(FDB.isAPIVersionSelected()) {
throw new IllegalStateException("API version already set to " + FDB.instance().getAPIVersion());
}
try {
FDB.instance();
throw new IllegalStateException("Able to get API instance before selecting API version");
}
catch(FDBException e) {
if(e.getCode() != 2200) {
throw e;
}
}
int apiVersion = Integer.parseInt(args[1]);
FDB fdb = FDB.selectAPIVersion(apiVersion);
if(FDB.instance().getAPIVersion() != apiVersion) {
throw new IllegalStateException("API version not correctly set to " + apiVersion);
}
Database db;
if(args.length == 2)
db = fdb.open();

View File

@ -30,7 +30,7 @@ public class TupleTest {
public static void main(String[] args) throws InterruptedException {
final int reps = 1000;
try {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
try(Database db = fdb.open()) {
runTests(reps, db);
}

View File

@ -32,7 +32,7 @@ import com.apple.foundationdb.tuple.Versionstamp;
public class VersionstampSmokeTest {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
try(Database db = fdb.open()) {
db.run(tr -> {
tr.clear(Tuple.from("prefix").range());

View File

@ -34,7 +34,7 @@ import com.apple.foundationdb.Transaction;
public class WatchTest {
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(510);
FDB fdb = FDB.selectAPIVersion(520);
try(Database database = fdb.open(args[0])) {
database.options().setLocationCacheSize(42);
try(Transaction tr = database.createTransaction()) {

View File

@ -1,3 +1,3 @@
Complete documentation of the FoundationDB Node.js API can be found at [https://foundationdb.org/documentation/api-node.html](https://foundationdb.org/documentation/api-node.html).
Complete documentation of the FoundationDB Node.js API can be found at [https://www.foundationdb.org/documentation/api-node.html](https://www.foundationdb.org/documentation/api-node.html).
These bindings require the FoundationDB client. The client can be obtained from [https://files.foundationdb.org/fdb-c/](https://files.foundationdb.org/fdb-c/).
These bindings require the FoundationDB client. The client can be obtained from [https://www.foundationdb.org/downloads/fdb-c/](https://www.foundationdb.org/downloads/fdb-c/).

View File

@ -25,6 +25,9 @@ CLEAN_TARGETS += fdb_node_clean fdb_node_npm_clean
NODE_VERSIONS := 0.8.22 0.10.0
NODE_DIST_URL ?= https://nodejs.org/dist
NODE_REGISTRY_URL ?= https://registry.npmjs.org/
ifeq ($(RELEASE),true)
NPMVER = $(VERSION)
else
@ -46,7 +49,7 @@ bindings/nodejs/fdb_node.stamp: bindings/nodejs/src/FdbOptions.g.cpp bindings/no
for ver in $(NODE_VERSIONS); do \
MMVER=`echo $$ver | sed -e 's,\., ,g' | awk '{print $$1 "." $$2}'` && \
mkdir modules/$$MMVER && \
node-gyp configure --dist-url=https://nodejs.org/dist --target=$$ver && \
node-gyp configure --dist-url=$(NODE_DIST_URL) --target=$$ver && \
node-gyp -v build && \
cp build/Release/fdblib.node modules/$${MMVER} ; \
done
@ -67,7 +70,7 @@ bindings/nodejs/package.json: bindings/nodejs/package.json.in $(ALL_MAKEFILES) v
@m4 -DVERSION=$(NPMVER) $< > $@
@echo "Updating Node dependencies"
@cd bindings/nodejs && \
npm config set registry "https://registry.npmjs.org/" && \
npm config set registry "$(NODE_REGISTRY_URL)" && \
npm update
fdb_node_npm: fdb_node versions.target bindings/nodejs/README.md bindings/nodejs/lib/*.js bindings/nodejs/src/* bindings/nodejs/binding.gyp LICENSE

View File

@ -18,6 +18,18 @@
* limitations under the License.
*/
"use strict";
var util = require('util');
var buffer = require('./bufferConversion');
var future = require('./future');
var transactional = require('./retryDecorator');
var tuple = require('./tuple');
var Subspace = require('./subspace');
var fdbUtil = require('./fdbUtil');
/*************
* Utilities *
*************/

View File

@ -43,8 +43,8 @@ module.exports = {
throw new Error('Cannot select multiple different FDB API versions');
if(version < 500)
throw new RangeError('FDB API versions before 500 are not supported');
if(version > 510)
throw new RangeError('Latest known FDB API version is 510');
if(version > 520)
throw new RangeError('Latest known FDB API version is 520');
if(!selectedApiVersion.value) {
fdb.apiVersion(version);

View File

@ -18,3 +18,87 @@
* limitations under the License.
*/
"use strict";
var buffer = require('./bufferConversion');
var future = require('./future');
var strinc = function(str) {
var buf = buffer(str);
var lastNonFFByte;
for(lastNonFFByte = buf.length-1; lastNonFFByte >= 0; --lastNonFFByte)
if(buf[lastNonFFByte] != 0xFF)
break;
if(lastNonFFByte < 0)
throw new Error('invalid argument \'' + str + '\': prefix must have at least one byte not equal to 0xFF');
var copy = new Buffer(lastNonFFByte + 1);
str.copy(copy, 0, 0, copy.length);
++copy[lastNonFFByte];
return copy;
};
var whileLoop = function(func, cb) {
return future.create(function(futureCb) {
var calledCallback = true;
function outer(err, res) {
if(err || typeof(res) !== 'undefined') {
futureCb(err, res);
}
else if(!calledCallback) {
calledCallback = true;
}
else {
while(calledCallback) {
calledCallback = false;
func(outer);
}
calledCallback = true;
}
}
outer();
}, cb);
};
var keyToBuffer = function(key) {
if(typeof(key.asFoundationDBKey) == 'function')
return buffer(key.asFoundationDBKey());
return buffer(key);
};
var valueToBuffer = function(val) {
if(typeof(val.asFoundationDBValue) == 'function')
return buffer(val.asFoundationDBValue());
return buffer(val);
};
var buffersEqual = function(buf1, buf2) {
if(!buf1 || !buf2)
return buf1 === buf2;
if(buf1.length !== buf2.length)
return false;
for(var i = 0; i < buf1.length; ++i)
if(buf1[i] !== buf2[i])
return false;
return true;
};
module.exports = {
strinc: strinc,
whileLoop: whileLoop,
keyToBuffer: keyToBuffer,
valueToBuffer: valueToBuffer,
buffersEqual: buffersEqual
};

View File

@ -18,3 +18,68 @@
* limitations under the License.
*/
"use strict";
var buffer = require('./bufferConversion');
var fdbUtil = require('./fdbUtil');
var tuple = require('./tuple');
var Subspace = function(prefixArray, rawPrefix) {
if(typeof rawPrefix === 'undefined')
rawPrefix = new Buffer(0);
if(typeof prefixArray === 'undefined')
prefixArray = [];
rawPrefix = fdbUtil.keyToBuffer(rawPrefix);
var packed = tuple.pack(prefixArray);
this.rawPrefix = Buffer.concat([rawPrefix, packed], rawPrefix.length + packed.length);
};
Subspace.prototype.key = function() {
return this.rawPrefix;
};
Subspace.prototype.pack = function(arr) {
var packed = tuple.pack(arr);
return Buffer.concat([this.rawPrefix, packed], this.rawPrefix.length + packed.length) ;
};
Subspace.prototype.unpack = function(key) {
key = fdbUtil.keyToBuffer(key);
if(!this.contains(key))
throw new Error('Cannot unpack key that is not in subspace.');
return tuple.unpack(key.slice(this.rawPrefix.length));
};
Subspace.prototype.range = function(arr) {
if(typeof arr === 'undefined')
arr = [];
var range = tuple.range(arr);
return {
begin: Buffer.concat([this.rawPrefix, range.begin], this.rawPrefix.length + range.begin.length),
end: Buffer.concat([this.rawPrefix, range.end], this.rawPrefix.length + range.end.length)
};
};
Subspace.prototype.contains = function(key) {
key = fdbUtil.keyToBuffer(key);
return key.length >= this.rawPrefix.length && fdbUtil.buffersEqual(key.slice(0, this.rawPrefix.length), this.rawPrefix);
};
Subspace.prototype.get = function(item) {
return this.subspace([item]);
};
Subspace.prototype.subspace = function(arr) {
return new Subspace(arr, this.rawPrefix);
};
Subspace.prototype.asFoundationDBKey = function() {
return this.key();
};
module.exports = Subspace;

View File

@ -4,7 +4,7 @@
"registry": "https://registry.npmjs.org"
},
"version": "VERSION",
"author": "FoundationDB <fdbopensource@apple.com> (http://foundationdb.org)",
"author": "FoundationDB <fdb-dist@apple.com> (https://www.foundationdb.org)",
"description": "Node.js bindings for the FoundationDB database",
"keywords": [ "FoundationDB", "database", "NoSQL", "ACID" ],
"homepage": "http://17.199.145.104",

View File

@ -22,6 +22,6 @@
#ifndef FDB_NODE_VERSION_H
#define FDB_NODE_VERSION_H
#define FDB_API_VERSION 510
#define FDB_API_VERSION 520
#endif

View File

@ -1,3 +1,5 @@
#!/usr/bin/env node
/*
* promise_aplus_test.js
*
@ -18,8 +20,6 @@
* limitations under the License.
*/
#!/usr/bin/env node
"use strict";
var promisesAplusTests = require('promises-aplus-tests');

View File

@ -1,3 +1,5 @@
#!/usr/bin/env node
/*
* tester.js
*
@ -18,8 +20,6 @@
* limitations under the License.
*/
#!/usr/bin/env node
"use strict";
//cmd line: node tester.js <test_prefix> <optional_cluster_file>

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
var fdb = require('../lib/fdb.js').apiVersion(510);
var fdb = require('../lib/fdb.js').apiVersion(520);
var fdbModule = require('../lib/fdbModule.js');
console.log(fdb.tuple.pack([-Math.pow(2,53)]));

View File

@ -1,3 +1,3 @@
Complete documentation of the FoundationDB Python API can be found at https://foundationdb.org/documentation/api-python.html.
Complete documentation of the FoundationDB Python API can be found at https://www.foundationdb.org/documentation/api-python.html.
These bindings require the FoundationDB client. The client can be obtained from https://files.foundationdb.org/fdb-c/.
These bindings require the FoundationDB client. The client can be obtained from https://www.foundationdb.org/downloads/fdb-c/.

View File

@ -21,22 +21,38 @@
# FoundationDB Python API
"""Documentation for this API can be found at
https://foundationdb.org/documentation/api-python.html"""
https://www.foundationdb.org/documentation/api-python.html"""
def open(*args, **kwargs):
raise RuntimeError('You must call api_version() before using any fdb methods')
init = open
def transactional(*args, **kwargs):
raise RuntimeError('You must call api_version() before using fdb.transactional')
def _add_symbols(module, symbols):
for symbol in symbols:
globals()[symbol] = getattr(module, symbol)
def is_api_version_selected():
return '_version' in globals()
def get_api_version():
if is_api_version_selected():
return globals()['_version']
else:
raise RuntimeError('API version is not set')
def api_version(ver):
header_version = 510
header_version = 520
if '_version' in globals():
if globals()['_version'] != ver:
@ -55,7 +71,9 @@ def api_version(ver):
if err == 2203: # api_version_not_supported, but that's not helpful to the user
max_supported_ver = fdb.impl._capi.fdb_get_max_api_version()
if header_version > max_supported_ver:
raise RuntimeError("This version of the FoundationDB Python binding is not supported by the installed FoundationDB C library. The binding requires a library that supports API version %d, but the installed library supports a maximum version of %d." % (header_version, max_supported_ver))
raise RuntimeError("This version of the FoundationDB Python binding is not supported by the installed "
"FoundationDB C library. The binding requires a library that supports API version "
"%d, but the installed library supports a maximum version of %d." % (header_version, max_supported_ver))
else:
raise RuntimeError("API version %d is not supported by the installed FoundationDB C library." % ver)
@ -97,7 +115,8 @@ def api_version(ver):
if issubclass(o, fdb.impl.Future):
if hasattr(o, "wait"):
o.get = o.wait
except TypeError: pass
except TypeError:
pass
# FDBRange used to be called FDBRangeIter and was an iterator,
# but it's now a container. In v13 we have to make it act like
@ -117,4 +136,3 @@ def api_version(ver):
import fdb.subspace_impl
subspace_symbols = ('Subspace',)
_add_symbols(fdb.subspace_impl, subspace_symbols)

View File

@ -29,16 +29,18 @@ from fdb import six
import fdb.tuple
from .subspace_impl import Subspace
class AllocatorTransactionState:
def __init__(self):
self.lock = threading.Lock()
class HighContentionAllocator (object):
def __init__(self, subspace):
self.counters = subspace[0]
self.recent = subspace[1]
self.lock = threading.Lock();
self.lock = threading.Lock()
@_impl.transactional
def allocate(self, tr):
@ -52,27 +54,28 @@ class HighContentionAllocator (object):
if not hasattr(tr, "__fdb_directory_layer_hca_state__"):
with self.lock:
if not hasattr(tr, "__fdb_directory_layer_hca_state__"):
tr.__fdb_directory_layer_hca_state__ = AllocatorTransactionState();
tr.__fdb_directory_layer_hca_state__ = AllocatorTransactionState()
tr_state = tr.__fdb_directory_layer_hca_state__
while True:
[start] = [self.counters.unpack(k)[0] for k,_ in tr.snapshot.get_range(self.counters.range().start, self.counters.range().stop, limit=1, reverse=True)] or [0]
[start] = [self.counters.unpack(k)[0] for k, _ in tr.snapshot.get_range(
self.counters.range().start, self.counters.range().stop, limit=1, reverse=True)] or [0]
window_advanced = False
while True:
with tr_state.lock:
if window_advanced:
del tr[self.counters : self.counters[start]]
del tr[self.counters: self.counters[start]]
tr.options.set_next_write_no_write_conflict_range()
del tr[self.recent : self.recent[start]]
del tr[self.recent: self.recent[start]]
# Increment the allocation count for the current window
tr.add(self.counters[start], struct.pack("<q", 1))
count = tr.snapshot[self.counters[start]]
if count != None:
count = struct.unpack("<q", str(count))[0]
count = struct.unpack("<q", bytes(count))[0]
else:
count = 0
@ -96,7 +99,7 @@ class HighContentionAllocator (object):
tr.options.set_next_write_no_write_conflict_range()
tr[self.recent[candidate]] = b''
latest_counter = [self.counters.unpack(k)[0] for k,_ in latest_counter]
latest_counter = [self.counters.unpack(k)[0] for k, _ in latest_counter]
if len(latest_counter) > 0 and latest_counter[0] > start:
break
@ -110,10 +113,13 @@ class HighContentionAllocator (object):
# can't be too small. So start small and scale up. We don't want this
# to ever get *too* big because we have to store about window_size/2
# recent items.
if start < 255: return 64
if start < 65535: return 1024
if start < 255:
return 64
if start < 65535:
return 1024
return 8192
class Directory(object):
def __init__(self, directory_layer, path=(), layer=b''):
self._directory_layer = directory_layer
@ -186,7 +192,7 @@ class Directory(object):
path = (path,)
return path
def _partition_subpath(self, path, directory_layer = None):
def _partition_subpath(self, path, directory_layer=None):
directory_layer = directory_layer or self._directory_layer
return self._path[len(directory_layer._path):] + path
@ -195,6 +201,7 @@ class Directory(object):
def _get_layer_for_path(self, path):
return self._directory_layer
class DirectoryLayer(Directory):
def __init__(self, node_subspace=Subspace(rawPrefix=b'\xfe'), content_subspace=Subspace(), allow_manual_prefixes=False):
@ -241,7 +248,9 @@ class DirectoryLayer(Directory):
if existing_node.exists():
if existing_node.is_in_partition():
subpath = existing_node.get_partition_subpath()
return existing_node.get_contents(self)._directory_layer._create_or_open_internal(tr, subpath, layer, prefix, allow_create, allow_open)
return existing_node.get_contents(self)._directory_layer._create_or_open_internal(
tr, subpath, layer, prefix, allow_create, allow_open
)
if not allow_open:
raise ValueError("The directory already exists.")
@ -431,11 +440,11 @@ class DirectoryLayer(Directory):
return True
########################################
## Private methods for implementation ##
# Private methods for implementation #
########################################
SUBDIRS=0
VERSION=(1,0,0)
SUBDIRS = 0
VERSION = (1, 0, 0)
def _check_version(self, tr, write_access=True):
version = tr[self._root_node[b'version']]
@ -473,7 +482,8 @@ class DirectoryLayer(Directory):
return None
def _node_with_prefix(self, prefix):
if prefix == None: return None
if prefix == None:
return None
return self._node_subspace[prefix]
def _contents_of_node(self, node, path, layer=None):
@ -487,7 +497,7 @@ class DirectoryLayer(Directory):
def _find(self, tr, path):
n = _Node(self._root_node, (), path)
for i, name in enumerate(path):
n = _Node(self._node_with_prefix(tr[n.subspace[self.SUBDIRS][name]]), path[:i+1], path)
n = _Node(self._node_with_prefix(tr[n.subspace[self.SUBDIRS][name]]), path[:i + 1], path)
if not n.exists() or n.layer(tr) == b'partition':
return n
return n
@ -511,11 +521,13 @@ class DirectoryLayer(Directory):
# Returns true if the given prefix does not "intersect" any currently
# allocated prefix (including the root node). This means that it neither
# contains any other prefix nor is contained by any other prefix.
return prefix and not self._node_containing_key(tr, prefix) and not len(list(tr.get_range(self._node_subspace.pack((prefix,)), self._node_subspace.pack((_impl.strinc(prefix),)), limit=1)))
return prefix and not self._node_containing_key(tr, prefix) \
and not len(list(tr.get_range(self._node_subspace.pack((prefix,)), self._node_subspace.pack((_impl.strinc(prefix),)), limit=1)))
def _is_prefix_empty(self, tr, prefix):
return len(list(tr.get_range(prefix, _impl.strinc(prefix), limit=1))) == 0
def _to_unicode_path(path):
if isinstance(path, bytes):
path = six.text_type(path)
@ -535,8 +547,10 @@ def _to_unicode_path(path):
raise ValueError('Invalid path: must be a unicode string or a tuple of unicode strings')
directory = DirectoryLayer()
class DirectorySubspace(Subspace, Directory):
# A DirectorySubspace represents the *contents* of a directory, but it also
# remembers the path with which it was opened and offers convenience methods
@ -549,6 +563,7 @@ class DirectorySubspace(Subspace, Directory):
def __repr__(self):
return 'DirectorySubspace(path=' + repr(self._path) + ', prefix=' + repr(self.rawPrefix) + ')'
class DirectoryPartition(DirectorySubspace):
def __init__(self, path, prefix, parent_directory_layer):
directory_layer = DirectoryLayer(Subspace(rawPrefix=prefix + b'\xfe'), Subspace(rawPrefix=prefix))
@ -590,6 +605,7 @@ class DirectoryPartition(DirectorySubspace):
else:
return self._directory_layer
class _Node (object):
def __init__(self, subspace, path, target_path):
@ -623,4 +639,3 @@ class _Node (object):
def get_contents(self, directory_layer, tr=None):
return directory_layer._contents_of_node(self.subspace, self.path, self.layer(tr))

View File

@ -40,41 +40,50 @@ _open_file = open
import weakref
class _NetworkOptions(object):
def __init__(self, parent):
self._parent = parent
class _ErrorPredicates(object):
def __init__(self, parent):
self._parent = parent
class _ClusterOptions(object):
def __init__(self, cluster):
self._parent = weakref.proxy(cluster)
class _DatabaseOptions(object):
def __init__(self, db):
self._parent = weakref.proxy(db)
class _TransactionOptions(object):
def __init__(self, tr):
self._parent = weakref.proxy(tr)
from fdb import fdboptions as _opts
import types
import struct
def option_wrap(code):
def setfunc(self):
self._parent._set_option(code, None, 0)
return setfunc
def option_wrap_string(code):
def setfunc(self, param=None):
param, length = optionalParamToBytes(param)
self._parent._set_option(code, param, length)
return setfunc
def option_wrap_bytes(code):
def setfunc(self, param=None):
if param is None:
@ -85,25 +94,29 @@ def option_wrap_bytes(code):
raise TypeError('Value must be of type ' + bytes.__name__)
return setfunc
def option_wrap_int(code):
def setfunc(self, param):
self._parent._set_option(code, struct.pack("<q", param), 8)
return setfunc
def pred_wrap(code):
def predfunc(self, error):
return self._parent._error_predicate(code, error.code)
return predfunc
def operation_wrap(code):
def opfunc(self, key, param):
self._atomic_operation(code, key, param)
return opfunc
def fill_options(scope, predicates=False):
_dict = getattr(_opts, scope)
for k,v in _dict.items():
for k, v in _dict.items():
fname = (predicates and 'is_' or 'set_') + k.lower()
code, desc, paramType, paramDesc = v
if predicates:
@ -126,6 +139,7 @@ def fill_options(scope, predicates=False):
klass = globals()['_' + scope + 's']
setattr(klass, fname, f)
def add_operation(fname, v):
code, desc, paramType, paramDesc = v
f = operation_wrap(code)
@ -134,14 +148,16 @@ def add_operation(fname, v):
setattr(globals()['Database'], fname, f)
setattr(globals()['Transaction'], fname, f)
def fill_operations():
_dict = getattr(_opts, 'MutationType')
for k,v in _dict.items():
for k, v in _dict.items():
fname = k.lower()
add_operation(fname, v)
add_operation("bit_" + fname, v)
for scope in ['ClusterOption', 'DatabaseOption', 'TransactionOption', 'NetworkOption']:
fill_options(scope)
@ -150,11 +166,14 @@ fill_options('ErrorPredicate', True)
options = _NetworkOptions(sys.modules[__name__])
predicates = _ErrorPredicates(sys.modules[__name__])
def _set_option(option, param, length):
_capi.fdb_network_set_option(option, param, length)
def _error_predicate(predicate, error_code):
return bool( _capi.fdb_error_predicate(predicate, error_code) )
return bool(_capi.fdb_error_predicate(predicate, error_code))
def make_enum(scope):
_dict = getattr(_opts, scope)
@ -162,15 +181,17 @@ def make_enum(scope):
x = type(scope, (), {})
def makeprop(value, doc):
return property( fget=lambda o:value, doc=doc )
return property(fget=lambda o: value, doc=doc)
for k, v in _dict.items():
setattr(x, k.lower(), makeprop( v[0], v[1] ))
setattr(x, k.lower(), makeprop(v[0], v[1]))
globals()[scope] = x()
make_enum("StreamingMode")
make_enum("ConflictRangeType")
def transactional(*tr_args, **tr_kwargs):
"""Decorate a funcation as transactional.
@ -212,7 +233,7 @@ def transactional(*tr_args, **tr_kwargs):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if isinstance(args[index], TransactionRead):
raise asyncio.Return(( yield asyncio.From(func(*args, **kwargs)) ))
raise asyncio.Return((yield asyncio.From(func(*args, **kwargs))))
largs = list(args)
tr = largs[index] = args[index].create_transaction()
@ -235,9 +256,9 @@ def transactional(*tr_args, **tr_kwargs):
tr = largs[index] = args[index].create_transaction()
committed = False
#retries = 0
#start = datetime.datetime.now()
#last = start
# retries = 0
# start = datetime.datetime.now()
# last = start
while not committed:
try:
@ -247,15 +268,16 @@ def transactional(*tr_args, **tr_kwargs):
except FDBError as e:
tr.on_error(e.code).wait()
#now = datetime.datetime.now()
#td = now - last
#elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)
#if elapsed >= 1:
#td = now - start
#print ('fdb WARNING: long transaction (%gs elapsed in transactional function \'%s\' (%d retries, %s))' % (elapsed, func.__name__, retries, committed and 'committed' or 'not yet committed'))
#last = now
# now = datetime.datetime.now()
# td = now - last
# elapsed = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / float(10**6)
# if elapsed >= 1:
# td = now - start
# print ('fdb WARNING: long transaction (%gs elapsed in transactional function \'%s\' (%d retries, %s))'
# % (elapsed, func.__name__, retries, committed and 'committed' or 'not yet committed'))
# last = now
#retries += 1
# retries += 1
return ret
return wrapper
@ -269,6 +291,7 @@ def transactional(*tr_args, **tr_kwargs):
else:
raise Exception('Invalid use of transactional decorator.')
class FDBError(Exception):
"""This exception is raised when an FDB API call returns an
error. The error code will be stored in the code attribute, and a
@ -276,6 +299,7 @@ class FDBError(Exception):
attribute.
"""
def __init__(self, code):
self.code = code
self._description = None
@ -408,8 +432,10 @@ class TransactionRead(_FDBBase):
return key_or_selector
def get_range(self, begin, end, limit=0, reverse=False, streaming_mode=StreamingMode.iterator):
if begin is None: begin = b''
if end is None: end = b'\xff'
if begin is None:
begin = b''
if end is None:
end = b'\xff'
begin = self._to_selector(begin)
end = self._to_selector(end)
return FDBRange(self, begin, end, limit, reverse, streaming_mode)
@ -423,6 +449,7 @@ class TransactionRead(_FDBBase):
return self.get_range(key.start, key.stop, reverse=(key.step == -1))
return self.get(key)
class Transaction(TransactionRead):
"""A modifiable snapshot of a Database.
@ -464,8 +491,10 @@ class Transaction(TransactionRead):
self.capi.fdb_transaction_clear(self.tpointer, key, len(key))
def clear_range(self, begin, end):
if begin is None: begin = b''
if end is None: end = b'\xff'
if begin is None:
begin = b''
if end is None:
end = b'\xff'
if isinstance(begin, KeySelector):
begin = self.get_key(begin)
if isinstance(end, KeySelector):
@ -534,13 +563,14 @@ class Transaction(TransactionRead):
def __delitem__(self, key):
if isinstance(key, slice):
self.clear_range( key.start, key.stop )
self.clear_range(key.start, key.stop)
else:
self.clear(key)
class Future(_FDBBase):
Event = threading.Event
_state = None #< Hack for trollius
_state = None # < Hack for trollius
def __init__(self, fpointer):
# print('Creating future 0x%x' % fpointer)
@ -562,7 +592,7 @@ class Future(_FDBBase):
raise NotImplementedError
def is_ready(self):
return bool( self.capi.fdb_future_is_ready(self.fpointer) )
return bool(self.capi.fdb_future_is_ready(self.fpointer))
def block_until_ready(self):
self.capi.fdb_future_block_until_ready(self.fpointer)
@ -595,35 +625,42 @@ class Future(_FDBBase):
Returns the index in the parameter list of a ready future."""
if not futures:
raise ValueError("wait_for_any requires at least one future")
d={}
d = {}
ev = futures[0].Event()
for i,f in enumerate(futures):
def cb(ignore,i=i):
for i, f in enumerate(futures):
def cb(ignore, i=i):
if d.setdefault('i', i) == i:
ev.set()
f.on_ready( cb )
f.on_ready(cb)
ev.wait()
return d['i']
# asyncio future protocol
def cancelled(self):
if not self.done(): return False
if not self.done():
return False
e = self.exception()
return getattr(e, 'code', 0) == 1101
done = is_ready
def result(self):
if not self.done(): raise Exception("Future result not available")
if not self.done():
raise Exception("Future result not available")
return self.wait()
def exception(self):
if not self.done(): raise Exception("Future result not available")
if not self.done():
raise Exception("Future result not available")
try:
self.wait()
return None
except BaseException as e:
return e
def add_done_callback(self,fn):
def add_done_callback(self, fn):
self.on_ready(lambda f: self.call_soon_threadsafe(fn, f))
def remove_done_callback(self,fn):
def remove_done_callback(self, fn):
raise NotImplementedError()
@ -649,13 +686,15 @@ class FutureKeyValueArray(Future):
count = ctypes.c_int()
more = ctypes.c_int()
self.capi.fdb_future_get_keyvalue_array(self.fpointer, ctypes.byref(kvs), ctypes.byref(count), ctypes.byref(more))
return ([KeyValue(ctypes.string_at(x.key, x.key_length), ctypes.string_at(x.value, x.value_length)) for x in kvs[0:count.value]], count.value, more.value)
return ([KeyValue(ctypes.string_at(x.key, x.key_length), ctypes.string_at(x.value, x.value_length))
for x in kvs[0:count.value]], count.value, more.value)
# Logically, we should self._release_memory() after extracting the
# KVs but before returning, but then we would have to store
# the KVs on the python side and in most cases we are about to
# destroy the future anyway
class FutureStringArray(Future):
def wait(self):
self.block_until_ready()
@ -664,6 +703,7 @@ class FutureStringArray(Future):
self.capi.fdb_future_get_string_array(self.fpointer, ctypes.byref(strings), ctypes.byref(count))
return list(strings[0:count.value])
class replaceable_property(object):
def __get__(self, obj, cls=None):
return self.method(obj)
@ -671,6 +711,7 @@ class replaceable_property(object):
def __init__(self, method):
self.method = method
class LazyFuture(Future):
def __init__(self, *args, **kwargs):
super(LazyFuture, self).__init__(*args, **kwargs)
@ -702,6 +743,7 @@ class LazyFuture(Future):
# http://bugs.python.org/issue12370
_super = super
class FutureString(LazyFuture):
def __init__(self, *args):
self._error = None
@ -776,13 +818,15 @@ class FutureString(LazyFuture):
def __int__(self):
return int(self.value)
def makewrapper(func):
def tmpfunc(self, *args):
return func(self.value, *args)
return tmpfunc
for i in dir(bytes):
if not i.startswith('_') or i in ('__getitem__','__getslice__','__hash__', '__len__'):
if not i.startswith('_') or i in ('__getitem__', '__getslice__', '__hash__', '__len__'):
setattr(FutureString, i, makewrapper(getattr(bytes, i)))
@ -845,10 +889,9 @@ class Database(FormerFuture):
def __getitem__(self, key):
if isinstance(key, slice):
return self.get_range(key.start, key.stop, reverse=(key.step==-1))
return self.get_range(key.start, key.stop, reverse=(key.step == -1))
return Database.__database_getitem(self, key)
def get_key(self, key_selector):
return Database.__database_get_key(self, key_selector)
@ -955,39 +998,40 @@ class Database(FormerFuture):
def __database_atomic_operation(tr, opcode, key, param):
tr._atomic_operation(opcode, key, param)
### Asynchronous transactions
# Asynchronous transactions
@staticmethod
def declare_asynchronous_transactions():
Return = asyncio.Return
From = asyncio.From
coroutine = asyncio.coroutine
class Database:
@staticmethod
@transactional
@coroutine
def __database_getitem(tr, key):
#raise Return(( yield From( tr[key] ) ))
raise Return( tr[key] )
# raise Return(( yield From( tr[key] ) ))
raise Return(tr[key])
yield None
@staticmethod
@transactional
@coroutine
def __database_get_key(tr, key_selector):
raise Return( tr.get_key(key_selector) )
raise Return(tr.get_key(key_selector))
yield None
@staticmethod
@transactional
@coroutine
def __database_get_range(tr, begin, end, limit, reverse, streaming_mode):
raise Return(( yield From( tr.get_range(begin, end, limit, reverse, streaming_mode).to_list() ) ))
raise Return((yield From(tr.get_range(begin, end, limit, reverse, streaming_mode).to_list())))
@staticmethod
@transactional
@coroutine
def __database_get_range_startswith(tr, prefix, *args, **kwargs):
raise Return(( yield From( tr.get_range_startswith(prefix, *args, **kwargs).to_list() )))
raise Return((yield From(tr.get_range_startswith(prefix, *args, **kwargs).to_list())))
@staticmethod
@transactional
@ -1010,7 +1054,7 @@ class Database(FormerFuture):
@coroutine
def __database_get_and_watch(tr, key):
v = tr.get(key)
raise Return( v, tr.watch(key) )
raise Return(v, tr.watch(key))
yield None
@staticmethod
@ -1018,7 +1062,7 @@ class Database(FormerFuture):
@coroutine
def __database_set_and_watch(tr, key, value):
tr.set(key, value)
raise Return( tr.watch(key) )
raise Return(tr.watch(key))
yield None
@staticmethod
@ -1026,7 +1070,7 @@ class Database(FormerFuture):
@coroutine
def __database_clear_and_watch(tr, key):
del tr[key]
raise Return( tr.watch(key) )
raise Return(tr.watch(key))
yield None
@staticmethod
@ -1046,8 +1090,10 @@ class Database(FormerFuture):
yield None
return Database
fill_operations()
class Cluster(FormerFuture):
def __init__(self, cpointer):
self.cpointer = cpointer
@ -1084,10 +1130,10 @@ class KeySelector(object):
self.offset = offset
def __add__(self, offset):
return KeySelector(self.key, self.or_equal, self.offset+offset)
return KeySelector(self.key, self.or_equal, self.offset + offset)
def __sub__(self, offset):
return KeySelector(self.key, self.or_equal, self.offset-offset)
return KeySelector(self.key, self.or_equal, self.offset - offset)
@classmethod
def last_less_than(cls, key):
@ -1149,11 +1195,13 @@ class KeyValue(object):
def __iter__(self):
return KVIter(self)
def check_error_code(code, func, arguments):
if code:
raise FDBError(code)
return None
if sys.maxsize <= 2**32:
raise Exception("FoundationDB API requires a 64-bit python interpreter!")
if platform.system() == 'Windows':
@ -1174,6 +1222,7 @@ else:
raise Exception("Platform (%s) %s is not supported by the FoundationDB API!" % (sys.platform, platform.system()))
this_dir = os.path.dirname(__file__)
# Preferred installation: The C API library or a symbolic link to the
# library should be in the same directory as this module.
# Failing that, a file named $(capi_name).pth should be in the same directory,
@ -1181,18 +1230,20 @@ this_dir = os.path.dirname(__file__)
# Failing that, we try to load the C API library without qualification, and
# the library should be on the platform's dynamic library search path
def read_pth_file():
pth_file = os.path.join(this_dir, capi_name+'.pth')
if not os.path.exists(pth_file): return None
pth_file = os.path.join(this_dir, capi_name + '.pth')
if not os.path.exists(pth_file):
return None
pth = _open_file(pth_file, "rt").read().strip()
if pth[0] != '/':
pth = os.path.join(this_dir, pth)
return pth
for pth in [
lambda: os.path.join(this_dir, capi_name),
#lambda: os.path.join(this_dir, '../../lib', capi_name), # For compatibility with existing unix installation process... should be removed
# lambda: os.path.join(this_dir, '../../lib', capi_name), # For compatibility with existing unix installation process... should be removed
read_pth_file
]:
]:
p = pth()
if p and os.path.exists(p):
_capi = ctypes.CDLL(os.path.abspath(p))
@ -1208,9 +1259,10 @@ else:
try:
_capi = ctypes.CDLL(lib_path)
except:
raise Exception( "Unable to locate the FoundationDB API shared library!" )
raise Exception("Unable to locate the FoundationDB API shared library!")
else:
raise Exception( "Unable to locate the FoundationDB API shared library!" )
raise Exception("Unable to locate the FoundationDB API shared library!")
def keyToBytes(k):
if hasattr(k, 'as_foundationdb_key'):
@ -1219,6 +1271,7 @@ def keyToBytes(k):
raise TypeError('Key must be of type ' + bytes.__name__)
return k
def valueToBytes(v):
if hasattr(v, 'as_foundationdb_value'):
v = v.as_foundationdb_value()
@ -1226,6 +1279,7 @@ def valueToBytes(v):
raise TypeError('Value must be of type ' + bytes.__name__)
return v
def paramToBytes(v):
if isinstance(v, FutureString):
v = v.value
@ -1235,6 +1289,7 @@ def paramToBytes(v):
raise TypeError('Parameter must be a string')
return v
def optionalParamToBytes(v):
if v is None:
return (None, 0)
@ -1242,6 +1297,7 @@ def optionalParamToBytes(v):
v = paramToBytes(v)
return (v, len(v))
_FDBBase.capi = _capi
_capi.fdb_select_api_version_impl.argtypes = [ctypes.c_int, ctypes.c_int]
@ -1317,7 +1373,8 @@ _capi.fdb_future_get_value.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.c_
_capi.fdb_future_get_value.restype = ctypes.c_int
_capi.fdb_future_get_value.errcheck = check_error_code
_capi.fdb_future_get_keyvalue_array.argtypes = [ctypes.c_void_p, ctypes.POINTER(ctypes.POINTER(KeyValueStruct)), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_keyvalue_array.argtypes = [ctypes.c_void_p, ctypes.POINTER(
ctypes.POINTER(KeyValueStruct)), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(ctypes.c_int)]
_capi.fdb_future_get_keyvalue_array.restype = int
_capi.fdb_future_get_keyvalue_array.errcheck = check_error_code
@ -1367,7 +1424,9 @@ _capi.fdb_transaction_get.restype = ctypes.c_void_p
_capi.fdb_transaction_get_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_get_key.restype = ctypes.c_void_p
_capi.fdb_transaction_get_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_get_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int, ctypes.c_int,
ctypes.c_int, ctypes.c_int]
_capi.fdb_transaction_get_range.restype = ctypes.c_void_p
_capi.fdb_transaction_add_conflict_range.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
@ -1415,6 +1474,7 @@ _capi.fdb_transaction_reset.restype = None
if hasattr(ctypes.pythonapi, 'Py_IncRef'):
def _pin_callback(cb):
ctypes.pythonapi.Py_IncRef(ctypes.py_object(cb))
def _unpin_callback(cb):
ctypes.pythonapi.Py_DecRef(ctypes.py_object(cb))
else:
@ -1422,6 +1482,7 @@ else:
_pin_callback = _active_callbacks.add
_unpin_callback = _active_callbacks.remove
def init(event_model=None):
"""Initialize the FDB interface.
@ -1456,7 +1517,8 @@ def init(event_model=None):
import gevent
if gevent.__version__[0] != '0':
def nullf(): pass
def nullf():
pass
class ThreadEvent(object):
def __init__(self):
@ -1493,6 +1555,7 @@ def init(event_model=None):
def _gevent_block_until_ready(self):
e = self.Event()
def is_ready_cb(future):
e.set()
self.on_ready(is_ready_cb)
@ -1505,8 +1568,10 @@ def init(event_model=None):
class DebugEvent(object):
def __init__(self):
self.ev = threading.Event()
def set(self):
self.ev.set()
def wait(self):
while not self.ev.isSet():
self.ev.wait(.001)
@ -1528,10 +1593,12 @@ def init(event_model=None):
asyncio.futures._FUTURE_CLASSES += (Future,)
def _do_not_block(self):
if not self.is_ready(): raise Exception("Future not ready")
if not self.is_ready():
raise Exception("Future not ready")
Future.block_until_ready = _do_not_block
Future.call_soon_threadsafe = asyncio.get_event_loop().call_soon_threadsafe
Future._loop = asyncio.get_event_loop()
def iterate(self):
"""Usage:
fa = tr.get_range(...).iterate()
@ -1580,43 +1647,50 @@ def init(event_model=None):
_network_thread = None
raise
def init_v13(local_address, event_model = None):
def init_v13(local_address, event_model=None):
return init(event_model)
open_clusters = {}
open_databases = {}
cacheLock = threading.Lock()
def open( cluster_file = None, database_name = b'DB', event_model = None ):
def open(cluster_file=None, database_name=b'DB', event_model=None):
"""Opens the given database (or the default database of the cluster indicated
by the fdb.cluster file in a platform-specific location, if no cluster_file
or database_name is provided). Initializes the FDB interface as required."""
with _network_thread_reentrant_lock:
if not _network_thread:
init(event_model = event_model)
init(event_model=event_model)
with cacheLock:
if not cluster_file in open_clusters:
open_clusters[cluster_file] = create_cluster( cluster_file )
if cluster_file not in open_clusters:
open_clusters[cluster_file] = create_cluster(cluster_file)
if not (cluster_file, database_name) in open_databases:
if (cluster_file, database_name) not in open_databases:
open_databases[(cluster_file, database_name)] = open_clusters[cluster_file].open_database(database_name)
return open_databases[(cluster_file, database_name)]
def open_v13( cluster_id_path, database_name, local_address = None, event_model = None ):
def open_v13(cluster_id_path, database_name, local_address=None, event_model=None):
return open(cluster_id_path, database_name, event_model)
import atexit
@atexit.register
def _stop_on_exit():
if _network_thread:
_capi.fdb_stop_network()
_network_thread.join()
def strinc(key):
key = key.rstrip(b'\xff')
if len(key) == 0:

Some files were not shown because too many files have changed in this diff Show More