Merge branch 'feature-range-feed' into blob_full
This commit is contained in:
commit
c2d1d1704f
|
@ -63,6 +63,7 @@ packaging/msi/obj
|
|||
simfdb
|
||||
tests/oldBinaries
|
||||
trace.*.xml
|
||||
.venv
|
||||
|
||||
# Editor files
|
||||
*.iml
|
||||
|
|
|
@ -562,3 +562,28 @@ folly_memcpy:
|
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
Arm Limited (optimized-routines)
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 1999-2019, Arm Limited.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
|
@ -157,6 +157,7 @@ if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
|
|||
endif()
|
||||
|
||||
include(CompileBoost)
|
||||
include(GetMsgpack)
|
||||
add_subdirectory(flow)
|
||||
add_subdirectory(fdbrpc)
|
||||
add_subdirectory(fdbclient)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
<img alt="FoundationDB logo" src="documentation/FDB_logo.png?raw=true" width="400">
|
||||
|
||||
![Build Status](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiSFd4OEl4QllpbUQrQ0xyN25ZM1FIM3MyZ0tBV3Y1cm9wU293V2Rlb3Qyei9XMlIrb2Y0YkFqOTBzc2w5ZjZScFdjME9pcGRXTGNRMWkwY2ZPbGMwUUdNPSIsIml2UGFyYW1ldGVyU3BlYyI6IlBqTkJjeCt5QkNuTlBGZEwiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)
|
||||
![Build Status](https://codebuild.us-west-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoiZ1FhRlNwU0JXeHVpZkt0a0k0QlNJK3BEUkplTGVRYnk3azBoT1FOazBQbGlIeDgrYmRJZVhuSUI4RTd3RWJWcjVMT3ZPTzV0NXlCTWpPTGlPVlMzckJJPSIsIml2UGFyYW1ldGVyU3BlYyI6IlB0TWVCM0VYdU5PQWtMUFYiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master)
|
||||
|
||||
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs ACID transactions for all operations. It is especially well-suited for read/write workloads but also has excellent performance for write-intensive workloads. Users interact with the database using API language binding.
|
||||
|
||||
|
|
|
@ -152,19 +152,18 @@ void* fdb_network_thread(void* args) {
|
|||
}
|
||||
|
||||
int genprefix(char* str, char* prefix, int prefixlen, int prefixpadding, int rows, int len) {
|
||||
const int rowdigit = digits(rows);
|
||||
const int paddinglen = len - (prefixlen + rowdigit) - 1;
|
||||
int offset = 0;
|
||||
if (prefixpadding) {
|
||||
memset(str, 'x', paddinglen);
|
||||
offset += paddinglen;
|
||||
}
|
||||
memcpy(str + offset, prefix, prefixlen);
|
||||
str[len - 1] = '\0';
|
||||
return offset + prefixlen;
|
||||
const int rowdigit = digits(rows);
|
||||
const int paddinglen = len - (prefixlen + rowdigit) - 1;
|
||||
int offset = 0;
|
||||
if (prefixpadding) {
|
||||
memset(str, 'x', paddinglen);
|
||||
offset += paddinglen;
|
||||
}
|
||||
memcpy(str + offset, prefix, prefixlen);
|
||||
str[len - 1] = '\0';
|
||||
return offset + prefixlen;
|
||||
}
|
||||
|
||||
|
||||
/* cleanup database */
|
||||
int cleanup(FDBTransaction* transaction, mako_args_t* args) {
|
||||
struct timespec timer_start, timer_end;
|
||||
|
@ -189,10 +188,19 @@ int cleanup(FDBTransaction* transaction, mako_args_t* args) {
|
|||
free(prefixstr);
|
||||
len += 1;
|
||||
|
||||
retryTxn:
|
||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &timer_start);
|
||||
|
||||
fdb_transaction_clear_range(transaction, (uint8_t*)beginstr, len + 1, (uint8_t*)endstr, len + 1);
|
||||
if (commit_transaction(transaction) != FDB_SUCCESS)
|
||||
switch (commit_transaction(transaction)) {
|
||||
case (FDB_SUCCESS):
|
||||
break;
|
||||
case (FDB_ERROR_RETRY):
|
||||
fdb_transaction_reset(transaction);
|
||||
goto retryTxn;
|
||||
default:
|
||||
goto failExit;
|
||||
}
|
||||
|
||||
fdb_transaction_reset(transaction);
|
||||
clock_gettime(CLOCK_MONOTONIC_COARSE, &timer_end);
|
||||
|
@ -308,11 +316,19 @@ int populate(FDBTransaction* transaction,
|
|||
|
||||
/* commit every 100 inserts (default) */
|
||||
if (i % args->txnspec.ops[OP_INSERT][OP_COUNT] == 0) {
|
||||
retryTxn:
|
||||
if (stats->xacts % args->sampling == 0) {
|
||||
clock_gettime(CLOCK_MONOTONIC, &timer_start_commit);
|
||||
}
|
||||
if (commit_transaction(transaction) != FDB_SUCCESS)
|
||||
|
||||
switch (commit_transaction(transaction)) {
|
||||
case (FDB_SUCCESS):
|
||||
break;
|
||||
case (FDB_ERROR_RETRY):
|
||||
goto retryTxn;
|
||||
default:
|
||||
goto failExit;
|
||||
}
|
||||
|
||||
/* xact latency stats */
|
||||
if (stats->xacts % args->sampling == 0) {
|
||||
|
@ -337,20 +353,41 @@ int populate(FDBTransaction* transaction,
|
|||
xacts++; /* for throttling */
|
||||
}
|
||||
}
|
||||
|
||||
if (stats->xacts % args->sampling == 0) {
|
||||
clock_gettime(CLOCK_MONOTONIC, &timer_start_commit);
|
||||
}
|
||||
if (commit_transaction(transaction) != FDB_SUCCESS)
|
||||
goto failExit;
|
||||
|
||||
/* xact latency stats */
|
||||
if (stats->xacts % args->sampling == 0) {
|
||||
clock_gettime(CLOCK_MONOTONIC, &timer_per_xact_end);
|
||||
update_op_lat_stats(
|
||||
&timer_start_commit, &timer_per_xact_end, OP_COMMIT, stats, block, elem_size, is_memory_allocated);
|
||||
update_op_lat_stats(
|
||||
&timer_per_xact_start, &timer_per_xact_end, OP_TRANSACTION, stats, block, elem_size, is_memory_allocated);
|
||||
time_t start_time_sec, current_time_sec;
|
||||
time(&start_time_sec);
|
||||
int is_committed = false;
|
||||
// will hit FDB_ERROR_RETRY if running mako with multi-version client
|
||||
while (!is_committed) {
|
||||
if (stats->xacts % args->sampling == 0) {
|
||||
clock_gettime(CLOCK_MONOTONIC, &timer_start_commit);
|
||||
}
|
||||
int rc;
|
||||
if ((rc = commit_transaction(transaction) != FDB_SUCCESS)) {
|
||||
if (rc == FDB_ERROR_RETRY) {
|
||||
time(¤t_time_sec);
|
||||
if (difftime(current_time_sec, start_time_sec) > 5) {
|
||||
goto failExit;
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
goto failExit;
|
||||
}
|
||||
}
|
||||
is_committed = true;
|
||||
/* xact latency stats */
|
||||
if (stats->xacts % args->sampling == 0) {
|
||||
clock_gettime(CLOCK_MONOTONIC, &timer_per_xact_end);
|
||||
update_op_lat_stats(
|
||||
&timer_start_commit, &timer_per_xact_end, OP_COMMIT, stats, block, elem_size, is_memory_allocated);
|
||||
update_op_lat_stats(&timer_per_xact_start,
|
||||
&timer_per_xact_end,
|
||||
OP_TRANSACTION,
|
||||
stats,
|
||||
block,
|
||||
elem_size,
|
||||
is_memory_allocated);
|
||||
}
|
||||
}
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &timer_end);
|
||||
|
@ -555,7 +592,13 @@ retryTxn:
|
|||
if (keyend > args->rows - 1) {
|
||||
keyend = args->rows - 1;
|
||||
}
|
||||
genkey(keystr2, KEYPREFIX, KEYPREFIXLEN, args->prefixpadding, keyend, args->rows, args->key_length + 1);
|
||||
genkey(keystr2,
|
||||
KEYPREFIX,
|
||||
KEYPREFIXLEN,
|
||||
args->prefixpadding,
|
||||
keyend,
|
||||
args->rows,
|
||||
args->key_length + 1);
|
||||
}
|
||||
|
||||
if (stats->xacts % args->sampling == 0) {
|
||||
|
@ -1209,7 +1252,8 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
|
|||
|
||||
/* Set client Log group */
|
||||
if (strlen(args->log_group) != 0) {
|
||||
err = fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP, (uint8_t*)args->log_group, strlen(args->log_group));
|
||||
err =
|
||||
fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP, (uint8_t*)args->log_group, strlen(args->log_group));
|
||||
if (err) {
|
||||
fprintf(stderr, "ERROR: fdb_network_set_option(FDB_NET_OPTION_TRACE_LOG_GROUP): %s\n", fdb_get_error(err));
|
||||
}
|
||||
|
|
|
@ -71,13 +71,13 @@ int digits(int num) {
|
|||
/* prefix is "mako" by default, prefixpadding = 1 means 'x' will be in front rather than trailing the keyname */
|
||||
/* len is the buffer size, key length + null */
|
||||
void genkey(char* str, char* prefix, int prefixlen, int prefixpadding, int num, int rows, int len) {
|
||||
const int rowdigit = digits(rows);
|
||||
const int prefixoffset = prefixpadding ? len - (prefixlen + rowdigit) - 1 : 0;
|
||||
char* prefixstr = (char*)alloca(sizeof(char) * (prefixlen + rowdigit + 1));
|
||||
snprintf(prefixstr, prefixlen + rowdigit + 1, "%s%0.*d", prefix, rowdigit, num);
|
||||
memset(str, 'x', len);
|
||||
memcpy(str + prefixoffset, prefixstr, prefixlen + rowdigit);
|
||||
str[len - 1] = '\0';
|
||||
const int rowdigit = digits(rows);
|
||||
const int prefixoffset = prefixpadding ? len - (prefixlen + rowdigit) - 1 : 0;
|
||||
char* prefixstr = (char*)alloca(sizeof(char) * (prefixlen + rowdigit + 1));
|
||||
snprintf(prefixstr, prefixlen + rowdigit + 1, "%s%0.*d", prefix, rowdigit, num);
|
||||
memset(str, 'x', len);
|
||||
memcpy(str + prefixoffset, prefixstr, prefixlen + rowdigit);
|
||||
str[len - 1] = '\0';
|
||||
}
|
||||
|
||||
/* This is another sorting algorithm used to calculate latency parameters */
|
||||
|
|
|
@ -43,7 +43,8 @@ set(go_options_file ${GO_DEST}/src/fdb/generated.go)
|
|||
|
||||
set(go_env GOPATH=${GOPATH}
|
||||
C_INCLUDE_PATH=${CMAKE_BINARY_DIR}/bindings/c/foundationdb:${CMAKE_SOURCE_DIR}/bindings/c
|
||||
CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/lib)
|
||||
CGO_LDFLAGS=-L${CMAKE_BINARY_DIR}/lib
|
||||
GO111MODULE=off)
|
||||
|
||||
foreach(src_file IN LISTS SRCS)
|
||||
set(dest_file ${GO_DEST}/${src_file})
|
||||
|
|
|
@ -77,19 +77,37 @@ add_dependencies(packages python_package)
|
|||
|
||||
if (NOT WIN32 AND NOT OPEN_FOR_IDE)
|
||||
add_fdbclient_test(
|
||||
NAME fdbcli_tests
|
||||
NAME single_process_fdbcli_tests
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||
${CMAKE_BINARY_DIR}/bin/fdbcli
|
||||
${CMAKE_BINARY_DIR}
|
||||
@CLUSTER_FILE@
|
||||
1
|
||||
)
|
||||
add_fdbclient_test(
|
||||
NAME multi_process_fdbcli_tests
|
||||
PROCESS_NUMBER 5
|
||||
TEST_TIMEOUT 120 # The test can take near to 1 minutes sometime, set timeout to 2 minutes to be safe
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||
${CMAKE_BINARY_DIR}/bin/fdbcli
|
||||
${CMAKE_BINARY_DIR}
|
||||
@CLUSTER_FILE@
|
||||
5
|
||||
)
|
||||
if (TARGET external_client) # external_client copies fdb_c to bindings/c/libfdb_c.so
|
||||
add_fdbclient_test(
|
||||
NAME single_process_external_client_fdbcli_tests
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||
${CMAKE_BINARY_DIR}
|
||||
@CLUSTER_FILE@
|
||||
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c.so
|
||||
)
|
||||
add_fdbclient_test(
|
||||
NAME multi_process_external_client_fdbcli_tests
|
||||
PROCESS_NUMBER 5
|
||||
TEST_TIMEOUT 120 # The test can take near to 1 minutes sometime, set timeout to 2 minutes to be safe
|
||||
COMMAND ${CMAKE_SOURCE_DIR}/bindings/python/tests/fdbcli_tests.py
|
||||
${CMAKE_BINARY_DIR}
|
||||
@CLUSTER_FILE@
|
||||
5
|
||||
--external-client-library ${CMAKE_BINARY_DIR}/bindings/c/libfdb_c.so
|
||||
)
|
||||
endif()
|
||||
endif()
|
||||
|
|
|
@ -1,14 +1,17 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import sys
|
||||
import os
|
||||
import subprocess
|
||||
import logging
|
||||
import functools
|
||||
import json
|
||||
import time
|
||||
import random
|
||||
from argparse import ArgumentParser, RawDescriptionHelpFormatter
|
||||
|
||||
def enable_logging(level=logging.ERROR):
|
||||
|
||||
def enable_logging(level=logging.DEBUG):
|
||||
"""Enable logging in the function with the specified logging level
|
||||
|
||||
Args:
|
||||
|
@ -16,7 +19,7 @@ def enable_logging(level=logging.ERROR):
|
|||
"""
|
||||
def func_decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args,**kwargs):
|
||||
def wrapper(*args, **kwargs):
|
||||
# initialize logger
|
||||
logger = logging.getLogger(func.__name__)
|
||||
logger.setLevel(level)
|
||||
|
@ -32,6 +35,7 @@ def enable_logging(level=logging.ERROR):
|
|||
return wrapper
|
||||
return func_decorator
|
||||
|
||||
|
||||
def run_fdbcli_command(*args):
|
||||
"""run the fdbcli statement: fdbcli --exec '<arg1> <arg2> ... <argN>'.
|
||||
|
||||
|
@ -39,7 +43,8 @@ def run_fdbcli_command(*args):
|
|||
string: Console output from fdbcli
|
||||
"""
|
||||
commands = command_template + ["{}".format(' '.join(args))]
|
||||
return subprocess.run(commands, stdout=subprocess.PIPE).stdout.decode('utf-8').strip()
|
||||
return subprocess.run(commands, stdout=subprocess.PIPE, env=fdbcli_env).stdout.decode('utf-8').strip()
|
||||
|
||||
|
||||
def run_fdbcli_command_and_get_error(*args):
|
||||
"""run the fdbcli statement: fdbcli --exec '<arg1> <arg2> ... <argN>'.
|
||||
|
@ -48,7 +53,8 @@ def run_fdbcli_command_and_get_error(*args):
|
|||
string: Stderr output from fdbcli
|
||||
"""
|
||||
commands = command_template + ["{}".format(' '.join(args))]
|
||||
return subprocess.run(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stderr.decode('utf-8').strip()
|
||||
return subprocess.run(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=fdbcli_env).stderr.decode('utf-8').strip()
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def advanceversion(logger):
|
||||
|
@ -72,6 +78,7 @@ def advanceversion(logger):
|
|||
logger.debug("Read version: {}".format(version4))
|
||||
assert version4 >= version3
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def maintenance(logger):
|
||||
# expected fdbcli output when running 'maintenance' while there's no ongoing maintenance
|
||||
|
@ -94,45 +101,52 @@ def maintenance(logger):
|
|||
output3 = run_fdbcli_command('maintenance')
|
||||
assert output3 == no_maintenance_output
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def setclass(logger):
|
||||
# get all processes' network addresses
|
||||
output1 = run_fdbcli_command('setclass')
|
||||
class_type_line_1 = output1.split('\n')[-1]
|
||||
logger.debug(class_type_line_1)
|
||||
# check process' network address
|
||||
assert '127.0.0.1' in class_type_line_1
|
||||
network_address = ':'.join(class_type_line_1.split(':')[:2])
|
||||
logger.debug("Network address: {}".format(network_address))
|
||||
# check class type
|
||||
assert 'unset' in class_type_line_1
|
||||
# check class source
|
||||
assert 'command_line' in class_type_line_1
|
||||
logger.debug(output1)
|
||||
# except the first line, each line is one process
|
||||
process_types = output1.split('\n')[1:]
|
||||
assert len(process_types) == args.process_number
|
||||
addresses = []
|
||||
for line in process_types:
|
||||
assert '127.0.0.1' in line
|
||||
# check class type
|
||||
assert 'unset' in line
|
||||
# check class source
|
||||
assert 'command_line' in line
|
||||
# check process' network address
|
||||
network_address = ':'.join(line.split(':')[:2])
|
||||
logger.debug("Network address: {}".format(network_address))
|
||||
addresses.append(network_address)
|
||||
random_address = random.choice(addresses)
|
||||
logger.debug("Randomly selected address: {}".format(random_address))
|
||||
# set class to a random valid type
|
||||
class_types = ['storage', 'storage', 'transaction', 'resolution',
|
||||
'commit_proxy', 'grv_proxy', 'master', 'stateless', 'log',
|
||||
'router', 'cluster_controller', 'fast_restore', 'data_distributor',
|
||||
'coordinator', 'ratekeeper', 'storage_cache', 'backup'
|
||||
]
|
||||
class_types = ['storage', 'transaction', 'resolution',
|
||||
'commit_proxy', 'grv_proxy', 'master', 'stateless', 'log',
|
||||
'router', 'cluster_controller', 'fast_restore', 'data_distributor',
|
||||
'coordinator', 'ratekeeper', 'storage_cache', 'backup'
|
||||
]
|
||||
random_class_type = random.choice(class_types)
|
||||
logger.debug("Change to type: {}".format(random_class_type))
|
||||
run_fdbcli_command('setclass', network_address, random_class_type)
|
||||
run_fdbcli_command('setclass', random_address, random_class_type)
|
||||
# check the set successful
|
||||
output2 = run_fdbcli_command('setclass')
|
||||
class_type_line_2 = output2.split('\n')[-1]
|
||||
logger.debug(class_type_line_2)
|
||||
logger.debug(output2)
|
||||
assert random_address in output2
|
||||
process_types = output2.split('\n')[1:]
|
||||
# check process' network address
|
||||
assert network_address in class_type_line_2
|
||||
# check class type changed to the specified value
|
||||
assert random_class_type in class_type_line_2
|
||||
# check class source
|
||||
assert 'set_class' in class_type_line_2
|
||||
# set back to default
|
||||
run_fdbcli_command('setclass', network_address, 'default')
|
||||
# everything should be back to the same as before
|
||||
output3 = run_fdbcli_command('setclass')
|
||||
class_type_line_3 = output3.split('\n')[-1]
|
||||
logger.debug(class_type_line_3)
|
||||
assert class_type_line_3 == class_type_line_1
|
||||
for line in process_types:
|
||||
if random_address in line:
|
||||
# check class type changed to the specified value
|
||||
assert random_class_type in line
|
||||
# check class source
|
||||
assert 'set_class' in line
|
||||
# set back to unset
|
||||
run_fdbcli_command('setclass', random_address, 'unset')
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def lockAndUnlock(logger):
|
||||
|
@ -148,7 +162,7 @@ def lockAndUnlock(logger):
|
|||
output2 = run_fdbcli_command_and_get_error("lock")
|
||||
assert output2 == 'ERROR: Database is locked (1038)'
|
||||
# unlock the database
|
||||
process = subprocess.Popen(command_template + ['unlock ' + lock_uid], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
||||
process = subprocess.Popen(command_template + ['unlock ' + lock_uid], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
line1 = process.stdout.readline()
|
||||
# The randome passphrease we need to confirm to proceed the unlocking
|
||||
line2 = process.stdout.readline()
|
||||
|
@ -159,6 +173,7 @@ def lockAndUnlock(logger):
|
|||
assert output3.decode('utf-8').strip() == 'Database unlocked.'
|
||||
assert not get_value_from_status_json(True, 'cluster', 'database_lock_state', 'locked')
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def kill(logger):
|
||||
output1 = run_fdbcli_command('kill')
|
||||
|
@ -168,11 +183,11 @@ def kill(logger):
|
|||
address = lines[1]
|
||||
logger.debug("Address: {}".format(address))
|
||||
old_generation = get_value_from_status_json(False, 'cluster', 'generation')
|
||||
# This is currently an issue with fdbcli,
|
||||
# where you need to first run 'kill' to initialize processes' list
|
||||
# This is currently an issue with fdbcli,
|
||||
# where you need to first run 'kill' to initialize processes' list
|
||||
# and then specify the certain process to kill
|
||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
||||
#
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
#
|
||||
output2, err = process.communicate(input='kill; kill {}\n'.format(address).encode())
|
||||
logger.debug(output2)
|
||||
# wait for a second for the cluster recovery
|
||||
|
@ -181,6 +196,7 @@ def kill(logger):
|
|||
logger.debug("Old: {}, New: {}".format(old_generation, new_generation))
|
||||
assert new_generation > old_generation
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def suspend(logger):
|
||||
output1 = run_fdbcli_command('suspend')
|
||||
|
@ -200,7 +216,7 @@ def suspend(logger):
|
|||
assert len(pinfo) == 1
|
||||
pid = pinfo[0].split(' ')[0]
|
||||
logger.debug("Pid: {}".format(pid))
|
||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
# suspend the process for enough long time
|
||||
output2, err = process.communicate(input='suspend; suspend 3600 {}\n'.format(address).encode())
|
||||
# the cluster should be unavailable after the only process being suspended
|
||||
|
@ -213,7 +229,7 @@ def suspend(logger):
|
|||
kill_output = subprocess.check_output(['kill', pid]).decode().strip()
|
||||
logger.debug("Kill result: {}".format(kill_output))
|
||||
# The process should come back after a few time
|
||||
duration = 0 # seconds we already wait
|
||||
duration = 0 # seconds we already wait
|
||||
while not get_value_from_status_json(False, 'client', 'database_status', 'available') and duration < 60:
|
||||
logger.debug("Sleep for 1 second to wait cluster recovery")
|
||||
time.sleep(1)
|
||||
|
@ -221,6 +237,7 @@ def suspend(logger):
|
|||
# at most after 60 seconds, the cluster should be available
|
||||
assert get_value_from_status_json(False, 'client', 'database_status', 'available')
|
||||
|
||||
|
||||
def get_value_from_status_json(retry, *args):
|
||||
while True:
|
||||
result = json.loads(run_fdbcli_command('status', 'json'))
|
||||
|
@ -229,9 +246,10 @@ def get_value_from_status_json(retry, *args):
|
|||
for arg in args:
|
||||
assert arg in result
|
||||
result = result[arg]
|
||||
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def consistencycheck(logger):
|
||||
consistency_check_on_output = 'ConsistencyCheck is on'
|
||||
|
@ -245,6 +263,7 @@ def consistencycheck(logger):
|
|||
output3 = run_fdbcli_command('consistencycheck')
|
||||
assert output3 == consistency_check_on_output
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def cache_range(logger):
|
||||
# this command is currently experimental
|
||||
|
@ -252,6 +271,7 @@ def cache_range(logger):
|
|||
run_fdbcli_command('cache_range', 'set', 'a', 'b')
|
||||
run_fdbcli_command('cache_range', 'clear', 'a', 'b')
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def datadistribution(logger):
|
||||
output1 = run_fdbcli_command('datadistribution', 'off')
|
||||
|
@ -271,6 +291,7 @@ def datadistribution(logger):
|
|||
assert output6 == 'Data distribution is enabled for rebalance.'
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def transaction(logger):
|
||||
"""This test will cover the transaction related fdbcli commands.
|
||||
|
@ -280,7 +301,7 @@ def transaction(logger):
|
|||
"""
|
||||
err1 = run_fdbcli_command_and_get_error('set', 'key', 'value')
|
||||
assert err1 == 'ERROR: writemode must be enabled to set or clear keys in the database.'
|
||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
transaction_flow = ['writemode on', 'begin', 'getversion', 'set key value', 'get key', 'commit']
|
||||
output1, _ = process.communicate(input='\n'.join(transaction_flow).encode())
|
||||
# split the output into lines
|
||||
|
@ -299,13 +320,13 @@ def transaction(logger):
|
|||
output2 = run_fdbcli_command('get', 'key')
|
||||
assert output2 == "`key' is `value'"
|
||||
# test rollback and read-your-write behavior
|
||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
transaction_flow = [
|
||||
'writemode on', 'begin', 'getrange a z',
|
||||
'writemode on', 'begin', 'getrange a z',
|
||||
'clear key', 'get key',
|
||||
# 'option on READ_YOUR_WRITES_DISABLE', 'get key',
|
||||
'rollback'
|
||||
]
|
||||
]
|
||||
output3, _ = process.communicate(input='\n'.join(transaction_flow).encode())
|
||||
lines = list(filter(len, output3.decode().split('\n')))[-5:]
|
||||
# lines[0] == "Transaction started" and lines[1] == 'Range limited to 25 keys'
|
||||
|
@ -316,13 +337,13 @@ def transaction(logger):
|
|||
output4 = run_fdbcli_command('get', 'key')
|
||||
assert output4 == "`key' is `value'"
|
||||
# test read_your_write_disable option and clear the inserted key
|
||||
process = subprocess.Popen(command_template[:-1], stdin = subprocess.PIPE, stdout = subprocess.PIPE)
|
||||
process = subprocess.Popen(command_template[:-1], stdin=subprocess.PIPE, stdout=subprocess.PIPE, env=fdbcli_env)
|
||||
transaction_flow = [
|
||||
'writemode on', 'begin',
|
||||
'option on READ_YOUR_WRITES_DISABLE',
|
||||
'clear key', 'get key',
|
||||
'commit'
|
||||
]
|
||||
]
|
||||
output6, _ = process.communicate(input='\n'.join(transaction_flow).encode())
|
||||
lines = list(filter(len, output6.decode().split('\n')))[-4:]
|
||||
assert lines[1] == 'Option enabled for current transaction'
|
||||
|
@ -332,16 +353,18 @@ def transaction(logger):
|
|||
output7 = run_fdbcli_command('get', 'key')
|
||||
assert output7 == "`key': not found"
|
||||
|
||||
|
||||
def get_fdb_process_addresses(logger):
|
||||
# get all processes' network addresses
|
||||
output = run_fdbcli_command('kill')
|
||||
logger.debug(output)
|
||||
# except the first line, each line is one process
|
||||
addresses = output.split('\n')[1:]
|
||||
assert len(addresses) == process_number
|
||||
assert len(addresses) == args.process_number
|
||||
return addresses
|
||||
|
||||
@enable_logging()
|
||||
|
||||
@enable_logging(logging.DEBUG)
|
||||
def coordinators(logger):
|
||||
# we should only have one coordinator for now
|
||||
output1 = run_fdbcli_command('coordinators')
|
||||
|
@ -366,8 +389,10 @@ def coordinators(logger):
|
|||
# auto change should go back to 1 coordinator
|
||||
run_fdbcli_command('coordinators', 'auto')
|
||||
assert len(get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')) == 1
|
||||
wait_for_database_available(logger)
|
||||
|
||||
@enable_logging()
|
||||
|
||||
@enable_logging(logging.DEBUG)
|
||||
def exclude(logger):
|
||||
# get all processes' network addresses
|
||||
addresses = get_fdb_process_addresses(logger)
|
||||
|
@ -379,7 +404,7 @@ def exclude(logger):
|
|||
# randomly pick one and exclude the process
|
||||
excluded_address = random.choice(addresses)
|
||||
# If we see "not enough space" error, use FORCE option to proceed
|
||||
# this should be a safe operation as we do not need any storage space for the test
|
||||
# this should be a safe operation as we do not need any storage space for the test
|
||||
force = False
|
||||
# sometimes we need to retry the exclude
|
||||
while True:
|
||||
|
@ -413,8 +438,11 @@ def exclude(logger):
|
|||
# check the include is successful
|
||||
output4 = run_fdbcli_command('exclude')
|
||||
assert no_excluded_process_output in output4
|
||||
wait_for_database_available(logger)
|
||||
|
||||
# read the system key 'k', need to enable the option first
|
||||
|
||||
|
||||
def read_system_key(k):
|
||||
output = run_fdbcli_command('option', 'on', 'READ_SYSTEM_KEYS;', 'get', k)
|
||||
if 'is' not in output:
|
||||
|
@ -423,11 +451,14 @@ def read_system_key(k):
|
|||
_, value = output.split(' is ')
|
||||
return value
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def throttle(logger):
|
||||
# no throttled tags at the beginning
|
||||
no_throttle_tags_output = 'There are no throttled tags'
|
||||
assert run_fdbcli_command('throttle', 'list') == no_throttle_tags_output
|
||||
output = run_fdbcli_command('throttle', 'list')
|
||||
logger.debug(output)
|
||||
assert output == no_throttle_tags_output
|
||||
# test 'throttle enable auto'
|
||||
run_fdbcli_command('throttle', 'enable', 'auto')
|
||||
# verify the change is applied by reading the system key
|
||||
|
@ -440,30 +471,89 @@ def throttle(logger):
|
|||
assert enable_flag == "`0'"
|
||||
# TODO : test manual throttling, not easy to do now
|
||||
|
||||
|
||||
def wait_for_database_available(logger):
|
||||
# sometimes the change takes some time to have effect and the database can be unavailable at that time
|
||||
# this is to wait until the database is available again
|
||||
while not get_value_from_status_json(True, 'client', 'database_status', 'available'):
|
||||
logger.debug("Database unavailable for now, wait for one second")
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def profile(logger):
|
||||
# profile list should return the same list as kill
|
||||
addresses = get_fdb_process_addresses(logger)
|
||||
output1 = run_fdbcli_command('profile', 'list')
|
||||
assert output1.split('\n') == addresses
|
||||
# check default output
|
||||
default_profile_client_get_output = 'Client profiling rate is set to default and size limit is set to default.'
|
||||
output2 = run_fdbcli_command('profile', 'client', 'get')
|
||||
assert output2 == default_profile_client_get_output
|
||||
# set rate and size limit
|
||||
run_fdbcli_command('profile', 'client', 'set', '0.5', '1GB')
|
||||
output3 = run_fdbcli_command('profile', 'client', 'get')
|
||||
logger.debug(output3)
|
||||
output3_list = output3.split(' ')
|
||||
assert float(output3_list[6]) == 0.5
|
||||
# size limit should be 1GB
|
||||
assert output3_list[-1] == '1000000000.'
|
||||
# change back to default value and check
|
||||
run_fdbcli_command('profile', 'client', 'set', 'default', 'default')
|
||||
assert run_fdbcli_command('profile', 'client', 'get') == default_profile_client_get_output
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def triggerddteaminfolog(logger):
|
||||
# this command is straightforward and only has one code path
|
||||
output = run_fdbcli_command('triggerddteaminfolog')
|
||||
assert output == 'Triggered team info logging in data distribution.'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# fdbcli_tests.py <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>
|
||||
assert len(sys.argv) == 4, "Please pass arguments: <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>"
|
||||
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
||||
description="""
|
||||
The test calls fdbcli commands through fdbcli --exec "<command>" interactively using subprocess.
|
||||
The outputs from fdbcli are returned and compared to predefined results.
|
||||
Consequently, changing fdbcli outputs or breaking any commands will casue the test to fail.
|
||||
Commands that are easy to test will run against a single process cluster.
|
||||
For complex commands like exclude, they will run against a cluster with multiple(current set to 5) processes.
|
||||
If external_client_library is given, we will disable the local client and use the external client to run fdbcli.
|
||||
""")
|
||||
parser.add_argument('build_dir', metavar='BUILD_DIRECTORY', help='FDB build directory')
|
||||
parser.add_argument('cluster_file', metavar='CLUSTER_FILE', help='FDB cluster file')
|
||||
parser.add_argument('process_number', nargs='?', metavar='PROCESS_NUMBER', help="Number of fdb processes", type=int, default=1)
|
||||
parser.add_argument('--external-client-library', '-e', metavar='EXTERNAL_CLIENT_LIBRARY_PATH', help="External client library path")
|
||||
args = parser.parse_args()
|
||||
|
||||
# keep current environment variables
|
||||
fdbcli_env = os.environ.copy()
|
||||
# set external client library if provided
|
||||
if args.external_client_library:
|
||||
# disable local client and use the external client library
|
||||
fdbcli_env['FDB_NETWORK_OPTION_DISABLE_LOCAL_CLIENT'] = ''
|
||||
fdbcli_env['FDB_NETWORK_OPTION_EXTERNAL_CLIENT_LIBRARY'] = args.external_client_library
|
||||
|
||||
# shell command template
|
||||
command_template = [sys.argv[1], '-C', sys.argv[2], '--exec']
|
||||
command_template = [args.build_dir + '/bin/fdbcli', '-C', args.cluster_file, '--exec']
|
||||
# tests for fdbcli commands
|
||||
# assertions will fail if fdbcli does not work as expected
|
||||
process_number = int(sys.argv[3])
|
||||
if process_number == 1:
|
||||
if args.process_number == 1:
|
||||
# TODO: disable for now, the change can cause the database unavailable
|
||||
#advanceversion()
|
||||
# advanceversion()
|
||||
cache_range()
|
||||
consistencycheck()
|
||||
datadistribution()
|
||||
kill()
|
||||
lockAndUnlock()
|
||||
maintenance()
|
||||
setclass()
|
||||
profile()
|
||||
suspend()
|
||||
transaction()
|
||||
throttle()
|
||||
triggerddteaminfolog()
|
||||
else:
|
||||
assert process_number > 1, "Process number should be positive"
|
||||
assert args.process_number > 1, "Process number should be positive"
|
||||
coordinators()
|
||||
exclude()
|
||||
|
||||
|
||||
setclass()
|
||||
|
|
|
@ -42,7 +42,7 @@ else()
|
|||
set(WITH_TLS OFF)
|
||||
endif()
|
||||
if(WIN32)
|
||||
message(STATUS "TLS is temporarilty disabled on macOS while libressl -> openssl transition happens")
|
||||
message(STATUS "TLS is temporarilty disabled on Windows while libressl -> openssl transition happens")
|
||||
set(WITH_TLS OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
@ -55,7 +55,7 @@ find_package(Python COMPONENTS Interpreter)
|
|||
if(Python_Interpreter_FOUND)
|
||||
set(WITH_PYTHON ON)
|
||||
else()
|
||||
#message(FATAL_ERROR "Could not found a suitable python interpreter")
|
||||
message(WARNING "Could not found a suitable python interpreter")
|
||||
set(WITH_PYTHON OFF)
|
||||
endif()
|
||||
|
||||
|
@ -66,7 +66,7 @@ else()
|
|||
if(WITH_PYTHON)
|
||||
set(WITH_PYTHON_BINDING ON)
|
||||
else()
|
||||
#message(FATAL_ERROR "Could not found a suitable python interpreter")
|
||||
message(WARNING "Python binding depends on Python, but a python interpreter is not found")
|
||||
set(WITH_PYTHON_BINDING OFF)
|
||||
endif()
|
||||
endif()
|
||||
|
@ -76,6 +76,9 @@ endif()
|
|||
################################################################################
|
||||
|
||||
option(BUILD_C_BINDING "build C binding" ON)
|
||||
if(BUILD_C_BINDING AND NOT WITH_PYTHON)
|
||||
message(WARNING "C binding depends on Python, but a python interpreter is not found")
|
||||
endif()
|
||||
if(BUILD_C_BINDING AND WITH_PYTHON)
|
||||
set(WITH_C_BINDING ON)
|
||||
else()
|
||||
|
@ -87,6 +90,9 @@ endif()
|
|||
################################################################################
|
||||
|
||||
option(BUILD_JAVA_BINDING "build java binding" ON)
|
||||
if(BUILD_JAVA_BINDING AND NOT WITH_C_BINDING)
|
||||
message(WARNING "Java binding depends on C binding, but C binding is not enabled")
|
||||
endif()
|
||||
if(NOT BUILD_JAVA_BINDING OR NOT WITH_C_BINDING)
|
||||
set(WITH_JAVA_BINDING OFF)
|
||||
else()
|
||||
|
@ -120,6 +126,9 @@ endif()
|
|||
################################################################################
|
||||
|
||||
option(BUILD_GO_BINDING "build go binding" ON)
|
||||
if(BUILD_GO_BINDING AND NOT WITH_C_BINDING)
|
||||
message(WARNING "Go binding depends on C binding, but C binding is not enabled")
|
||||
endif()
|
||||
if(NOT BUILD_GO_BINDING OR NOT BUILD_C_BINDING)
|
||||
set(WITH_GO_BINDING OFF)
|
||||
else()
|
||||
|
@ -141,6 +150,9 @@ endif()
|
|||
################################################################################
|
||||
|
||||
option(BUILD_RUBY_BINDING "build ruby binding" ON)
|
||||
if(BUILD_RUBY_BINDING AND NOT WITH_C_BINDING)
|
||||
message(WARNING "Ruby binding depends on C binding, but C binding is not enabled")
|
||||
endif()
|
||||
if(NOT BUILD_RUBY_BINDING OR NOT BUILD_C_BINDING)
|
||||
set(WITH_RUBY_BINDING OFF)
|
||||
else()
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
find_package(msgpack 3.3.0 EXACT QUIET CONFIG)
|
||||
|
||||
add_library(msgpack INTERFACE)
|
||||
|
||||
if(msgpack_FOUND)
|
||||
target_link_libraries(msgpack INTERFACE msgpackc-cxx)
|
||||
else()
|
||||
include(ExternalProject)
|
||||
ExternalProject_add(msgpackProject
|
||||
URL "https://github.com/msgpack/msgpack-c/releases/download/cpp-3.3.0/msgpack-3.3.0.tar.gz"
|
||||
URL_HASH SHA256=6e114d12a5ddb8cb11f669f83f32246e484a8addd0ce93f274996f1941c1f07b
|
||||
CONFIGURE_COMMAND ""
|
||||
BUILD_COMMAND ""
|
||||
INSTALL_COMMAND ""
|
||||
)
|
||||
|
||||
ExternalProject_Get_property(msgpackProject SOURCE_DIR)
|
||||
target_include_directories(msgpack SYSTEM INTERFACE "${SOURCE_DIR}/include")
|
||||
add_dependencies(msgpack msgpackProject)
|
||||
endif()
|
|
@ -155,17 +155,35 @@ list(GET FDB_VERSION_LIST 2 FDB_PATCH)
|
|||
# Alternatives config
|
||||
################################################################################
|
||||
|
||||
set(mv_packaging_dir ${PROJECT_SOURCE_DIR}/packaging/multiversion)
|
||||
math(EXPR ALTERNATIVES_PRIORITY "(${PROJECT_VERSION_MAJOR} * 1000) + (${PROJECT_VERSION_MINOR} * 100) + ${PROJECT_VERSION_PATCH}")
|
||||
set(script_dir "${PROJECT_BINARY_DIR}/packaging/multiversion/")
|
||||
file(MAKE_DIRECTORY "${script_dir}/server" "${script_dir}/clients")
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/server/postinst" "${script_dir}/server" @ONLY)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/server/prerm" "${script_dir}/server" @ONLY)
|
||||
set(LIB_DIR lib)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/postinst" "${script_dir}/clients" @ONLY)
|
||||
set(LIB_DIR lib64)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/postinst" "${script_dir}/clients/postinst-el7" @ONLY)
|
||||
configure_file("${PROJECT_SOURCE_DIR}/packaging/multiversion/clients/prerm" "${script_dir}/clients" @ONLY)
|
||||
|
||||
# Needs to to be named postinst for debian
|
||||
configure_file("${mv_packaging_dir}/server/postinst-deb" "${script_dir}/server/postinst" @ONLY)
|
||||
|
||||
configure_file("${mv_packaging_dir}/server/postinst-rpm" "${script_dir}/server" @ONLY)
|
||||
configure_file("${mv_packaging_dir}/server/prerm" "${script_dir}/server" @ONLY)
|
||||
set(LIB_DIR lib)
|
||||
configure_file("${mv_packaging_dir}/clients/postinst" "${script_dir}/clients" @ONLY)
|
||||
set(LIB_DIR lib64)
|
||||
configure_file("${mv_packaging_dir}/clients/postinst" "${script_dir}/clients/postinst-el7" @ONLY)
|
||||
configure_file("${mv_packaging_dir}/clients/prerm" "${script_dir}/clients" @ONLY)
|
||||
|
||||
#make sure all directories we need exist
|
||||
file(MAKE_DIRECTORY "${script_dir}/clients/usr/lib/foundationdb")
|
||||
install(DIRECTORY "${script_dir}/clients/usr/lib/foundationdb"
|
||||
DESTINATION usr/lib
|
||||
COMPONENT clients-versioned)
|
||||
file(MAKE_DIRECTORY "${script_dir}/clients/usr/lib/pkgconfig")
|
||||
install(DIRECTORY "${script_dir}/clients/usr/lib/pkgconfig"
|
||||
DESTINATION usr/lib
|
||||
COMPONENT clients-versioned)
|
||||
file(MAKE_DIRECTORY "${script_dir}/clients/usr/lib/cmake")
|
||||
install(DIRECTORY "${script_dir}/clients/usr/lib/cmake"
|
||||
DESTINATION usr/lib
|
||||
COMPONENT clients-versioned)
|
||||
|
||||
################################################################################
|
||||
# Move Docker Setup
|
||||
|
@ -196,6 +214,8 @@ set(CPACK_COMPONENT_SERVER-EL7_DEPENDS clients-el7)
|
|||
set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb)
|
||||
set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz)
|
||||
set(CPACK_COMPONENT_SERVER-VERSIONED_DEPENDS clients-versioned)
|
||||
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_REQUIRES
|
||||
"foundationdb-clients-${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH} = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
|
||||
set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server")
|
||||
set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server")
|
||||
|
@ -245,6 +265,7 @@ set(CPACK_RPM_PACKAGE_NAME "foundationdb")
|
|||
set(CPACK_RPM_CLIENTS-EL7_PACKAGE_NAME "foundationdb-clients")
|
||||
set(CPACK_RPM_SERVER-EL7_PACKAGE_NAME "foundationdb-server")
|
||||
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME "foundationdb-server-${PROJECT_VERSION}")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_PACKAGE_NAME "foundationdb-clients-${PROJECT_VERSION}")
|
||||
|
||||
set(CPACK_RPM_CLIENTS-EL7_FILE_NAME "${rpm-clients-filename}.el7.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
set(CPACK_RPM_CLIENTS-VERSIONED_FILE_NAME "${rpm-clients-filename}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
|
||||
|
@ -281,7 +302,10 @@ set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
|
|||
"/lib"
|
||||
"/lib/systemd"
|
||||
"/lib/systemd/system"
|
||||
"/etc/rc.d/init.d")
|
||||
"/etc/rc.d/init.d"
|
||||
"/usr/lib/pkgconfig"
|
||||
"/usr/lib/foundationdb"
|
||||
"/usr/lib/cmake")
|
||||
set(CPACK_RPM_DEBUGINFO_PACKAGE ${GENERATE_DEBUG_PACKAGES})
|
||||
#set(CPACK_RPM_BUILD_SOURCE_FDB_INSTALL_DIRS_PREFIX /usr/src)
|
||||
set(CPACK_RPM_COMPONENT_INSTALL ON)
|
||||
|
@ -305,7 +329,7 @@ set(CPACK_RPM_SERVER-EL7_PACKAGE_REQUIRES
|
|||
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
|
||||
|
||||
set(CPACK_RPM_SERVER-VERSIONED_POST_INSTALL_SCRIPT_FILE
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/postinst)
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/postinst-rpm)
|
||||
|
||||
set(CPACK_RPM_SERVER-VERSIONED_PRE_UNINSTALL_SCRIPT_FILE
|
||||
${CMAKE_BINARY_DIR}/packaging/multiversion/server/prerm)
|
||||
|
|
|
@ -308,9 +308,16 @@ namespace SummarizeTest
|
|||
string lastFolderName = Path.GetFileName(Path.GetDirectoryName(testFile));
|
||||
if (lastFolderName.Contains("from_") || lastFolderName.Contains("to_")) // Only perform upgrade/downgrade tests from certain versions
|
||||
{
|
||||
oldBinaryVersionLowerBound = lastFolderName.Split('_').Last();
|
||||
oldBinaryVersionLowerBound = lastFolderName.Split('_').ElementAt(1); // Assuming "from_*.*.*" appears first in the folder name
|
||||
}
|
||||
string oldBinaryVersionUpperBound = getFdbserverVersion(fdbserverName);
|
||||
if (lastFolderName.Contains("until_")) // Specify upper bound for old binary; "until_*.*.*" is assumed at the end if present
|
||||
{
|
||||
string givenUpperBound = lastFolderName.Split('_').Last();
|
||||
if (versionLessThan(givenUpperBound, oldBinaryVersionUpperBound)) {
|
||||
oldBinaryVersionUpperBound = givenUpperBound;
|
||||
}
|
||||
}
|
||||
if (versionGreaterThanOrEqual("4.0.0", oldBinaryVersionUpperBound)) {
|
||||
// If the binary under test is from 3.x, then allow upgrade tests from 3.x binaries.
|
||||
oldBinaryVersionLowerBound = "0.0.0";
|
||||
|
@ -320,8 +327,22 @@ namespace SummarizeTest
|
|||
Directory.GetFiles(oldBinaryFolder),
|
||||
x => versionGreaterThanOrEqual(Path.GetFileName(x).Split('-').Last(), oldBinaryVersionLowerBound)
|
||||
&& versionLessThan(Path.GetFileName(x).Split('-').Last(), oldBinaryVersionUpperBound));
|
||||
oldBinaries = oldBinaries.Concat(currentBinary);
|
||||
oldServerName = random.Choice(oldBinaries.ToList<string>());
|
||||
if (!lastFolderName.Contains("until_")) {
|
||||
// only add current binary to the list of old binaries if "until_" is not specified in the folder name
|
||||
// <version> in until_<version> should be less or equal to the current binary version
|
||||
// otherwise, using "until_" makes no sense
|
||||
// thus, by definition, if "until_" appears, we do not want to run with the current binary version
|
||||
oldBinaries = oldBinaries.Concat(currentBinary);
|
||||
}
|
||||
List<string> oldBinariesList = oldBinaries.ToList<string>();
|
||||
if (oldBinariesList.Count == 0) {
|
||||
// In theory, restarting tests are named to have at least one old binary version to run
|
||||
// But if none of the provided old binaries fall in the range, we just skip the test
|
||||
Console.WriteLine("No available old binary version from {0} to {1}", oldBinaryVersionLowerBound, oldBinaryVersionUpperBound);
|
||||
return 0;
|
||||
} else {
|
||||
oldServerName = random.Choice(oldBinariesList);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
# pkg_tester
|
||||
|
||||
This is a test suite that can be used to validate properties of generated package files.
|
||||
|
||||
To use it, first build the package files as described in the main [README](https://github.com/apple/foundationdb#linux)
|
||||
|
||||
Then setup a virtualenv:
|
||||
|
||||
```
|
||||
$ python3 -m venv .venv
|
||||
$ source .venv/bin/activate
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Then you can run the tests with pytest:
|
||||
|
||||
```
|
||||
$ BUILDDIR=<BUILDDIR> python -m pytest -s -v
|
||||
```
|
||||
|
||||
These are snapshot tests, so you may need to update the snapshots with
|
||||
|
||||
```
|
||||
$ BUILDDIR=<BUILDDIR> python -m pytest -s -v --snapshot-update
|
||||
```
|
||||
|
||||
Use discretion about whether or not the behavior change is acceptable.
|
||||
|
||||
A helpful tip for debugging: if you run pytest with `--pdb`, then it will pause
|
||||
the tests at the first error which gives you a chance to run some `docker exec`
|
||||
commands to try and see what's wrong.
|
||||
|
||||
There's a small chance that this will leak an image (especially if you interrupt the test with ctrl-c). Consider running
|
||||
|
||||
```
|
||||
$ docker image prune
|
||||
```
|
||||
|
||||
after. If you `kill -9` the test, there might even be leaked containers. You can destroy all existing containers with
|
||||
|
||||
```
|
||||
$ docker rm -f $(docker ps -a -q) # destroy all docker containers!
|
||||
```
|
||||
|
||||
# Requirements
|
||||
|
||||
docker, python
|
||||
|
||||
# Future work?
|
||||
|
||||
- [x] Test rpms
|
||||
- [x] Test debs
|
||||
- [x] Test versioned packages
|
||||
- [ ] Test that upgrades preserve data/config
|
||||
|
||||
# Development
|
||||
|
||||
Please run `black` and `mypy` after making changes
|
|
@ -0,0 +1,244 @@
|
|||
# name: test_backup_restore[centos-versioned]
|
||||
'
|
||||
Submitted and now waiting for the backup on tag `default' to complete.
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_backup_restore[centos-versioned].1
|
||||
'
|
||||
`x' is `y'
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_backup_restore[centos]
|
||||
'
|
||||
Submitted and now waiting for the backup on tag `default' to complete.
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_backup_restore[centos].1
|
||||
'
|
||||
`x' is `y'
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_backup_restore[ubuntu-versioned]
|
||||
'
|
||||
Submitted and now waiting for the backup on tag `default' to complete.
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_backup_restore[ubuntu-versioned].1
|
||||
'
|
||||
`x' is `y'
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_backup_restore[ubuntu]
|
||||
'
|
||||
Submitted and now waiting for the backup on tag `default' to complete.
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_backup_restore[ubuntu].1
|
||||
'
|
||||
`x' is `y'
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_fdbcli_help_text[centos-versioned]
|
||||
'
|
||||
FoundationDB CLI 7.1 (v7.1.0)
|
||||
usage: fdbcli [OPTIONS]
|
||||
|
||||
-C CONNFILE The path of a file containing the connection string for the
|
||||
FoundationDB cluster. The default is first the value of the
|
||||
FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',
|
||||
then `/etc/foundationdb/fdb.cluster'.
|
||||
--log Enables trace file logging for the CLI session.
|
||||
--log-dir PATH Specifes the output directory for trace files. If
|
||||
unspecified, defaults to the current directory. Has
|
||||
no effect unless --log is specified.
|
||||
--trace_format FORMAT
|
||||
Select the format of the log files. xml (the default) and json
|
||||
are supported. Has no effect unless --log is specified.
|
||||
--exec CMDS Immediately executes the semicolon separated CLI commands
|
||||
and then exits.
|
||||
--no-status Disables the initial status check done when starting
|
||||
the CLI.
|
||||
--tls_certificate_file CERTFILE
|
||||
The path of a file containing the TLS certificate and CA
|
||||
chain.
|
||||
--tls_ca_file CERTAUTHFILE
|
||||
The path of a file containing the CA certificates chain.
|
||||
--tls_key_file KEYFILE
|
||||
The path of a file containing the private key corresponding
|
||||
to the TLS certificate.
|
||||
--tls_password PASSCODE
|
||||
The passphrase of encrypted private key
|
||||
--tls_verify_peers CONSTRAINTS
|
||||
The constraints by which to validate TLS peers. The contents
|
||||
and format of CONSTRAINTS are plugin-specific.
|
||||
--knob_KNOBNAME KNOBVALUE
|
||||
Changes a knob option. KNOBNAME should be lowercase.
|
||||
--debug-tls Prints the TLS configuration and certificate chain, then exits.
|
||||
Useful in reporting and diagnosing TLS issues.
|
||||
--build_flags Print build information and exit.
|
||||
-v, --version Print FoundationDB CLI version information and exit.
|
||||
-h, --help Display this help and exit.
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_fdbcli_help_text[centos]
|
||||
'
|
||||
FoundationDB CLI 7.1 (v7.1.0)
|
||||
usage: fdbcli [OPTIONS]
|
||||
|
||||
-C CONNFILE The path of a file containing the connection string for the
|
||||
FoundationDB cluster. The default is first the value of the
|
||||
FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',
|
||||
then `/etc/foundationdb/fdb.cluster'.
|
||||
--log Enables trace file logging for the CLI session.
|
||||
--log-dir PATH Specifes the output directory for trace files. If
|
||||
unspecified, defaults to the current directory. Has
|
||||
no effect unless --log is specified.
|
||||
--trace_format FORMAT
|
||||
Select the format of the log files. xml (the default) and json
|
||||
are supported. Has no effect unless --log is specified.
|
||||
--exec CMDS Immediately executes the semicolon separated CLI commands
|
||||
and then exits.
|
||||
--no-status Disables the initial status check done when starting
|
||||
the CLI.
|
||||
--tls_certificate_file CERTFILE
|
||||
The path of a file containing the TLS certificate and CA
|
||||
chain.
|
||||
--tls_ca_file CERTAUTHFILE
|
||||
The path of a file containing the CA certificates chain.
|
||||
--tls_key_file KEYFILE
|
||||
The path of a file containing the private key corresponding
|
||||
to the TLS certificate.
|
||||
--tls_password PASSCODE
|
||||
The passphrase of encrypted private key
|
||||
--tls_verify_peers CONSTRAINTS
|
||||
The constraints by which to validate TLS peers. The contents
|
||||
and format of CONSTRAINTS are plugin-specific.
|
||||
--knob_KNOBNAME KNOBVALUE
|
||||
Changes a knob option. KNOBNAME should be lowercase.
|
||||
--debug-tls Prints the TLS configuration and certificate chain, then exits.
|
||||
Useful in reporting and diagnosing TLS issues.
|
||||
--build_flags Print build information and exit.
|
||||
-v, --version Print FoundationDB CLI version information and exit.
|
||||
-h, --help Display this help and exit.
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_fdbcli_help_text[ubuntu-versioned]
|
||||
'
|
||||
FoundationDB CLI 7.1 (v7.1.0)
|
||||
usage: fdbcli [OPTIONS]
|
||||
|
||||
-C CONNFILE The path of a file containing the connection string for the
|
||||
FoundationDB cluster. The default is first the value of the
|
||||
FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',
|
||||
then `/etc/foundationdb/fdb.cluster'.
|
||||
--log Enables trace file logging for the CLI session.
|
||||
--log-dir PATH Specifes the output directory for trace files. If
|
||||
unspecified, defaults to the current directory. Has
|
||||
no effect unless --log is specified.
|
||||
--trace_format FORMAT
|
||||
Select the format of the log files. xml (the default) and json
|
||||
are supported. Has no effect unless --log is specified.
|
||||
--exec CMDS Immediately executes the semicolon separated CLI commands
|
||||
and then exits.
|
||||
--no-status Disables the initial status check done when starting
|
||||
the CLI.
|
||||
--tls_certificate_file CERTFILE
|
||||
The path of a file containing the TLS certificate and CA
|
||||
chain.
|
||||
--tls_ca_file CERTAUTHFILE
|
||||
The path of a file containing the CA certificates chain.
|
||||
--tls_key_file KEYFILE
|
||||
The path of a file containing the private key corresponding
|
||||
to the TLS certificate.
|
||||
--tls_password PASSCODE
|
||||
The passphrase of encrypted private key
|
||||
--tls_verify_peers CONSTRAINTS
|
||||
The constraints by which to validate TLS peers. The contents
|
||||
and format of CONSTRAINTS are plugin-specific.
|
||||
--knob_KNOBNAME KNOBVALUE
|
||||
Changes a knob option. KNOBNAME should be lowercase.
|
||||
--debug-tls Prints the TLS configuration and certificate chain, then exits.
|
||||
Useful in reporting and diagnosing TLS issues.
|
||||
--build_flags Print build information and exit.
|
||||
-v, --version Print FoundationDB CLI version information and exit.
|
||||
-h, --help Display this help and exit.
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_fdbcli_help_text[ubuntu]
|
||||
'
|
||||
FoundationDB CLI 7.1 (v7.1.0)
|
||||
usage: fdbcli [OPTIONS]
|
||||
|
||||
-C CONNFILE The path of a file containing the connection string for the
|
||||
FoundationDB cluster. The default is first the value of the
|
||||
FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',
|
||||
then `/etc/foundationdb/fdb.cluster'.
|
||||
--log Enables trace file logging for the CLI session.
|
||||
--log-dir PATH Specifes the output directory for trace files. If
|
||||
unspecified, defaults to the current directory. Has
|
||||
no effect unless --log is specified.
|
||||
--trace_format FORMAT
|
||||
Select the format of the log files. xml (the default) and json
|
||||
are supported. Has no effect unless --log is specified.
|
||||
--exec CMDS Immediately executes the semicolon separated CLI commands
|
||||
and then exits.
|
||||
--no-status Disables the initial status check done when starting
|
||||
the CLI.
|
||||
--tls_certificate_file CERTFILE
|
||||
The path of a file containing the TLS certificate and CA
|
||||
chain.
|
||||
--tls_ca_file CERTAUTHFILE
|
||||
The path of a file containing the CA certificates chain.
|
||||
--tls_key_file KEYFILE
|
||||
The path of a file containing the private key corresponding
|
||||
to the TLS certificate.
|
||||
--tls_password PASSCODE
|
||||
The passphrase of encrypted private key
|
||||
--tls_verify_peers CONSTRAINTS
|
||||
The constraints by which to validate TLS peers. The contents
|
||||
and format of CONSTRAINTS are plugin-specific.
|
||||
--knob_KNOBNAME KNOBVALUE
|
||||
Changes a knob option. KNOBNAME should be lowercase.
|
||||
--debug-tls Prints the TLS configuration and certificate chain, then exits.
|
||||
Useful in reporting and diagnosing TLS issues.
|
||||
--build_flags Print build information and exit.
|
||||
-v, --version Print FoundationDB CLI version information and exit.
|
||||
-h, --help Display this help and exit.
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_write[centos-versioned]
|
||||
'
|
||||
`x' is `y'
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_write[centos]
|
||||
'
|
||||
`x' is `y'
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_write[ubuntu-versioned]
|
||||
'
|
||||
`x' is `y'
|
||||
|
||||
'
|
||||
---
|
||||
# name: test_write[ubuntu]
|
||||
'
|
||||
`x' is `y'
|
||||
|
||||
'
|
||||
---
|
|
@ -0,0 +1,13 @@
|
|||
attrs==21.2.0
|
||||
colored==1.4.2
|
||||
importlib-metadata==4.0.1
|
||||
iniconfig==1.1.1
|
||||
packaging==20.9
|
||||
pluggy==0.13.1
|
||||
py==1.10.0
|
||||
pyparsing==2.4.7
|
||||
pytest==6.2.4
|
||||
syrupy==1.2.3
|
||||
toml==0.10.2
|
||||
typing-extensions==3.10.0.0
|
||||
zipp==3.4.1
|
|
@ -0,0 +1,251 @@
|
|||
# test_fdb_pkgs.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import glob
|
||||
import os
|
||||
import pathlib
|
||||
import pytest
|
||||
import shlex
|
||||
import subprocess
|
||||
import uuid
|
||||
|
||||
from typing import Iterator, List, Optional, Union
|
||||
|
||||
|
||||
def run(args: List[str]) -> str:
|
||||
print("$ {}".format(" ".join(map(shlex.quote, args))))
|
||||
result = subprocess.check_output(args).decode("utf-8")
|
||||
print(result, end="")
|
||||
return result
|
||||
|
||||
|
||||
class Image:
|
||||
def __init__(self, uid: str):
|
||||
self.uid = uid
|
||||
|
||||
def dispose(self):
|
||||
run(["docker", "image", "rm", self.uid])
|
||||
|
||||
|
||||
class Container:
|
||||
def __init__(self, image: Union[str, Image], initd=False):
|
||||
if isinstance(image, Image):
|
||||
image_name = image.uid
|
||||
else:
|
||||
assert isinstance(image, str)
|
||||
image_name = image
|
||||
|
||||
# minimal extra args required to run systemd
|
||||
# https://developers.redhat.com/blog/2016/09/13/running-systemd-in-a-non-privileged-container#the_quest
|
||||
extra_initd_args = []
|
||||
if initd:
|
||||
extra_initd_args = "--tmpfs /tmp --tmpfs /run -v /sys/fs/cgroup:/sys/fs/cgroup:ro".split()
|
||||
|
||||
self.uid = str(uuid.uuid4())
|
||||
|
||||
run(
|
||||
["docker", "run"]
|
||||
+ ["-t", "-d", "--name", self.uid]
|
||||
+ extra_initd_args
|
||||
+ [image_name]
|
||||
+ ["/usr/sbin/init" for _ in range(1) if initd]
|
||||
).rstrip()
|
||||
|
||||
def run(self, args: List[str]) -> str:
|
||||
return run(["docker", "exec", self.uid] + args)
|
||||
|
||||
def copy_to(self, src_path: str, dst_path: str) -> None:
|
||||
run(["docker", "cp", src_path, "{}:{}".format(self.uid, dst_path)])
|
||||
|
||||
def commit(self) -> Image:
|
||||
output = run(["docker", "commit", self.uid])
|
||||
uid = output.split(":")[1].rstrip()
|
||||
return Image(uid)
|
||||
|
||||
def dispose(self):
|
||||
run(["docker", "rm", "-f", self.uid])
|
||||
|
||||
|
||||
def ubuntu_image_with_fdb_helper(versioned: bool) -> Iterator[Optional[Image]]:
|
||||
"""
|
||||
Return an image which has just the fdb deb packages installed.
|
||||
"""
|
||||
builddir = os.environ.get("BUILDDIR")
|
||||
if builddir is None:
|
||||
assert False, "BUILDDIR environment variable not set"
|
||||
debs = [
|
||||
deb
|
||||
for deb in glob.glob(os.path.join(builddir, "packages", "*.deb"))
|
||||
if ("versioned" in deb) == versioned
|
||||
]
|
||||
if not debs:
|
||||
yield None
|
||||
return
|
||||
|
||||
container = None
|
||||
image = None
|
||||
try:
|
||||
container = Container("ubuntu")
|
||||
for deb in debs:
|
||||
container.copy_to(deb, "/opt")
|
||||
container.run(["bash", "-c", "dpkg -i /opt/*.deb"])
|
||||
container.run(["bash", "-c", "rm /opt/*.deb"])
|
||||
image = container.commit()
|
||||
yield image
|
||||
finally:
|
||||
if container is not None:
|
||||
container.dispose()
|
||||
if image is not None:
|
||||
image.dispose()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def ubuntu_image_with_fdb() -> Iterator[Optional[Image]]:
|
||||
yield from ubuntu_image_with_fdb_helper(versioned=False)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def ubuntu_image_with_fdb_versioned() -> Iterator[Optional[Image]]:
|
||||
yield from ubuntu_image_with_fdb_helper(versioned=True)
|
||||
|
||||
|
||||
def centos_image_with_fdb_helper(versioned: bool) -> Iterator[Optional[Image]]:
|
||||
"""
|
||||
Return an image which has just the fdb rpm packages installed.
|
||||
"""
|
||||
builddir = os.environ.get("BUILDDIR")
|
||||
if builddir is None:
|
||||
assert False, "BUILDDIR environment variable not set"
|
||||
rpms = [
|
||||
rpm
|
||||
for rpm in glob.glob(os.path.join(builddir, "packages", "*.rpm"))
|
||||
if ("versioned" in rpm) == versioned
|
||||
]
|
||||
if not rpms:
|
||||
yield None
|
||||
return
|
||||
|
||||
container = None
|
||||
image = None
|
||||
try:
|
||||
container = Container("centos", initd=True)
|
||||
for rpm in rpms:
|
||||
container.copy_to(rpm, "/opt")
|
||||
container.run(["bash", "-c", "yum install -y /opt/*.rpm"])
|
||||
container.run(["bash", "-c", "rm /opt/*.rpm"])
|
||||
image = container.commit()
|
||||
yield image
|
||||
finally:
|
||||
if container is not None:
|
||||
container.dispose()
|
||||
if image is not None:
|
||||
image.dispose()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def centos_image_with_fdb() -> Iterator[Optional[Image]]:
|
||||
yield from centos_image_with_fdb_helper(versioned=False)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def centos_image_with_fdb_versioned() -> Iterator[Optional[Image]]:
|
||||
yield from centos_image_with_fdb_helper(versioned=True)
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "linux_container" in metafunc.fixturenames:
|
||||
metafunc.parametrize(
|
||||
"linux_container",
|
||||
["ubuntu", "centos", "ubuntu-versioned", "centos-versioned"],
|
||||
indirect=True,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def linux_container(
|
||||
request,
|
||||
ubuntu_image_with_fdb,
|
||||
centos_image_with_fdb,
|
||||
ubuntu_image_with_fdb_versioned,
|
||||
centos_image_with_fdb_versioned,
|
||||
) -> Iterator[Container]:
|
||||
"""
|
||||
Tests which accept this fixture will be run once for each supported platform, for each type of package (versioned or unversioned).
|
||||
"""
|
||||
container: Optional[Container] = None
|
||||
try:
|
||||
if request.param == "ubuntu":
|
||||
if ubuntu_image_with_fdb is None:
|
||||
pytest.skip("No debian packages available to test")
|
||||
container = Container(ubuntu_image_with_fdb)
|
||||
container.run(
|
||||
["/etc/init.d/foundationdb", "start"]
|
||||
) # outside docker this shouldn't be necessary
|
||||
elif request.param == "centos":
|
||||
if centos_image_with_fdb is None:
|
||||
pytest.skip("No rpm packages available to test")
|
||||
container = Container(centos_image_with_fdb, initd=True)
|
||||
elif request.param == "ubuntu-versioned":
|
||||
if ubuntu_image_with_fdb is None:
|
||||
pytest.skip("No versioned debian packages available to test")
|
||||
container = Container(ubuntu_image_with_fdb_versioned)
|
||||
container.run(
|
||||
["/etc/init.d/foundationdb", "start"]
|
||||
) # outside docker this shouldn't be necessary
|
||||
elif request.param == "centos-versioned":
|
||||
if centos_image_with_fdb is None:
|
||||
pytest.skip("No versioned rpm packages available to test")
|
||||
container = Container(centos_image_with_fdb_versioned, initd=True)
|
||||
else:
|
||||
assert False
|
||||
yield container
|
||||
finally:
|
||||
if container is not None:
|
||||
container.dispose()
|
||||
|
||||
|
||||
#################### BEGIN ACTUAL TESTS ####################
|
||||
|
||||
|
||||
def test_db_available(linux_container: Container):
|
||||
linux_container.run(["fdbcli", "--exec", "get x"])
|
||||
|
||||
|
||||
def test_write(linux_container: Container, snapshot):
|
||||
linux_container.run(["fdbcli", "--exec", "writemode on; set x y"])
|
||||
assert snapshot == linux_container.run(["fdbcli", "--exec", "get x"])
|
||||
|
||||
|
||||
def test_fdbcli_help_text(linux_container: Container, snapshot):
|
||||
assert snapshot == linux_container.run(["fdbcli", "--help"])
|
||||
|
||||
|
||||
def test_backup_restore(linux_container: Container, snapshot, tmp_path: pathlib.Path):
|
||||
linux_container.run(["fdbcli", "--exec", "writemode on; set x y"])
|
||||
assert snapshot == linux_container.run(
|
||||
["fdbbackup", "start", "-d", "file:///tmp/fdb_backup", "-w"]
|
||||
)
|
||||
linux_container.run(["fdbcli", "--exec", "writemode on; clear x"])
|
||||
linux_container.run(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
"fdbrestore start -r file://$(echo /tmp/fdb_backup/*) -w --dest_cluster_file /etc/foundationdb/fdb.cluster",
|
||||
]
|
||||
)
|
||||
assert snapshot == linux_container.run(["fdbcli", "--exec", "get x"])
|
|
@ -95,9 +95,6 @@ exclude_patterns = []
|
|||
# output. They are ignored by default.
|
||||
#show_authors = False
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'solarizedlight'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
#modindex_common_prefix = []
|
||||
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
--index-url https://pypi.python.org/simple
|
||||
setuptools>=20.10.0
|
||||
setuptools>=20.10.0,<=57.4.0
|
||||
sphinx==1.5.6
|
||||
sphinx-bootstrap-theme==0.4.8
|
||||
docutils==0.16
|
||||
pygments-style-solarized
|
||||
|
|
|
@ -50,6 +50,9 @@
|
|||
"kvstore_available_bytes":12341234,
|
||||
"kvstore_free_bytes":12341234,
|
||||
"kvstore_total_bytes":12341234,
|
||||
"kvstore_total_size":12341234,
|
||||
"kvstore_total_nodes":12341234,
|
||||
"kvstore_inline_keys":12341234,
|
||||
"durable_bytes":{
|
||||
"hz":0.0,
|
||||
"counter":0,
|
||||
|
@ -208,6 +211,13 @@
|
|||
"estimated_cost":{
|
||||
"hz":0.0
|
||||
}
|
||||
},
|
||||
"busiest_write_tag":{
|
||||
"tag": "",
|
||||
"fractional_cost": 0.0,
|
||||
"estimated_cost":{
|
||||
"hz":0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
|
@ -226,6 +236,9 @@
|
|||
"$enum":[
|
||||
"file_open_error",
|
||||
"incorrect_cluster_file_contents",
|
||||
"trace_log_file_write_error",
|
||||
"trace_log_could_not_create_file",
|
||||
"trace_log_writer_thread_unresponsive",
|
||||
"process_error",
|
||||
"io_error",
|
||||
"io_timeout",
|
||||
|
@ -285,15 +298,20 @@
|
|||
"run_loop_busy":0.2 // fraction of time the run loop was busy
|
||||
}
|
||||
},
|
||||
"old_logs":[
|
||||
"logs":[
|
||||
{
|
||||
"logs":[ // this field will be absent if a value has not been explicitly set
|
||||
"log_interfaces":[ // this field will be absent if a value has not been explicitly set
|
||||
{
|
||||
"id":"7f8d623d0cb9966e",
|
||||
"healthy":true,
|
||||
"address":"1.2.3.4:1234"
|
||||
}
|
||||
],
|
||||
"epoch":1,
|
||||
"current":false,
|
||||
"begin_version":23,
|
||||
"end_version":112315141,
|
||||
"possibly_losing_data":true,
|
||||
"log_replication_factor":3,
|
||||
"log_write_anti_quorum":0,
|
||||
"log_fault_tolerance":2,
|
||||
|
@ -325,7 +343,8 @@
|
|||
"storage_server_min_free_space_ratio",
|
||||
"log_server_min_free_space",
|
||||
"log_server_min_free_space_ratio",
|
||||
"storage_server_durability_lag"
|
||||
"storage_server_durability_lag",
|
||||
"storage_server_list_fetch_failed"
|
||||
]
|
||||
},
|
||||
"description":"The database is not being saturated by the workload."
|
||||
|
@ -345,7 +364,8 @@
|
|||
"storage_server_min_free_space_ratio",
|
||||
"log_server_min_free_space",
|
||||
"log_server_min_free_space_ratio",
|
||||
"storage_server_durability_lag"
|
||||
"storage_server_durability_lag",
|
||||
"storage_server_list_fetch_failed"
|
||||
]
|
||||
},
|
||||
"description":"The database is not being saturated by the workload."
|
||||
|
@ -358,15 +378,11 @@
|
|||
"auto" : {
|
||||
"busy_read" : 0,
|
||||
"busy_write" : 0,
|
||||
"count" : 0
|
||||
"count" : 0,
|
||||
"recommended_only":0
|
||||
},
|
||||
"manual" : {
|
||||
"count" : 0
|
||||
},
|
||||
"recommend" : {
|
||||
"busy_read" : 0,
|
||||
"busy_write" : 0,
|
||||
"count" : 0
|
||||
}
|
||||
},
|
||||
"limiting_queue_bytes_storage_server":0,
|
||||
|
@ -394,12 +410,13 @@
|
|||
"seconds":1.0,
|
||||
"versions":1000000
|
||||
},
|
||||
"active_tss_count":0,
|
||||
"degraded_processes":0,
|
||||
"database_available":true,
|
||||
"database_lock_state":{
|
||||
"locked":true,
|
||||
"lock_uid":"00000000000000000000000000000000" // Only present when database is locked
|
||||
}
|
||||
},
|
||||
"generation":2,
|
||||
"latency_probe":{ // all measurements are based on running sample transactions
|
||||
"read_seconds":7, // time to perform a single read
|
||||
|
@ -468,6 +485,8 @@
|
|||
"database_availability_timeout",
|
||||
"consistencycheck_suspendkey_fetch_timeout",
|
||||
"consistencycheck_disabled",
|
||||
"duplicate_mutation_streams",
|
||||
"duplicate_mutation_fetch_timeout",
|
||||
"primary_dc_missing",
|
||||
"fetch_primary_dc_timeout"
|
||||
]
|
||||
|
@ -476,7 +495,10 @@
|
|||
{
|
||||
"name":{ // when not limiting
|
||||
"$enum":[
|
||||
"incorrect_cluster_file_contents"
|
||||
"incorrect_cluster_file_contents",
|
||||
"trace_log_file_write_error",
|
||||
"trace_log_could_not_create_file",
|
||||
"trace_log_writer_thread_unresponsive"
|
||||
]
|
||||
},
|
||||
"description":"Cluster file contents do not match current cluster connection string. Verify cluster file is writable and has not been overwritten externally."
|
||||
|
@ -680,7 +702,10 @@
|
|||
"ssd-2",
|
||||
"ssd-redwood-experimental",
|
||||
"ssd-rocksdb-experimental",
|
||||
"memory"
|
||||
"memory",
|
||||
"memory-1",
|
||||
"memory-2",
|
||||
"memory-radixtree-beta"
|
||||
]},
|
||||
"tss_count":1,
|
||||
"tss_storage_engine":{
|
||||
|
@ -690,7 +715,10 @@
|
|||
"ssd-2",
|
||||
"ssd-redwood-experimental",
|
||||
"ssd-rocksdb-experimental",
|
||||
"memory"
|
||||
"memory",
|
||||
"memory-1",
|
||||
"memory-2",
|
||||
"memory-radixtree-beta"
|
||||
]},
|
||||
"coordinators_count":1,
|
||||
"excluded_servers":[
|
||||
|
@ -700,11 +728,13 @@
|
|||
}
|
||||
],
|
||||
"auto_commit_proxies":3,
|
||||
"auto_grv_proxies":1,
|
||||
"auto_resolvers":1,
|
||||
"auto_logs":3,
|
||||
"backup_worker_enabled":1,
|
||||
"commit_proxies":5, // this field will be absent if a value has not been explicitly set
|
||||
"grv_proxies":1, // this field will be absent if a value has not been explicitly set
|
||||
"proxies":6, // this field will be absent if a value has not been explicitly set
|
||||
"backup_worker_enabled":1,
|
||||
"perpetual_storage_wiggle": 0
|
||||
},
|
||||
"data":{
|
||||
|
@ -805,7 +835,8 @@
|
|||
"coordinators":[
|
||||
{
|
||||
"reachable":true,
|
||||
"address":"127.0.0.1:4701"
|
||||
"address":"127.0.0.1:4701",
|
||||
"protocol":"0fdb00b070010001"
|
||||
}
|
||||
],
|
||||
"quorum_reachable":true
|
||||
|
|
|
@ -63,7 +63,7 @@ Source IP:port 0 string The IP and port of the machine where the s
|
|||
Trace ID 1 uint64 The 64-bit identifier of the trace. All spans in a trace share the same trace ID.
|
||||
Span ID 2 uint64 The 64-bit identifier of the span. All spans have a unique identifier.
|
||||
Start timestamp 3 double The timestamp when the operation represented by the span began.
|
||||
End timestamp 4 double The timestamp when the operation represented by the span ended.
|
||||
Duration 4 double The duration in seconds of the operation represented by the span.
|
||||
Operation name 5 string The name of the operation the span represents.
|
||||
Tags 6 map User defined tags, added manually to specify additional information.
|
||||
Parent span IDs 7 vector (Optional) A list of span IDs representing parents of this span.
|
||||
|
|
|
@ -199,7 +199,7 @@ that process, and wait for necessary data to be moved away.
|
|||
While the key is set, any commit that tries to set a key in the range will fail with the ``special_keys_api_failure`` error.
|
||||
#. ``\xff\xff/management/data_distribution/<mode|rebalance_ignored>`` Read/write. Changing these two keys will change the two corresponding system keys ``\xff/dataDistributionMode`` and ``\xff\x02/rebalanceDDIgnored``. The value of ``\xff\xff/management/data_distribution/mode`` is a literal text of ``0`` (disable) or ``1`` (enable). Transactions committed with invalid values will throw ``special_keys_api_failure`` . The value of ``\xff\xff/management/data_distribution/rebalance_ignored`` is empty. If present, it means data distribution is disabled for rebalance. Any transaction committed with non-empty value for this key will throw ``special_keys_api_failure``. For more details, see help text of ``fdbcli`` command ``datadistribution``.
|
||||
#. ``\xff\xff/management/consistency_check_suspended`` Read/write. Set or read this key will set or read the underlying system key ``\xff\x02/ConsistencyCheck/Suspend``. The value of this special key is unused thus if present, will be empty. In particular, if the key exists, then consistency is suspended. For more details, see help text of ``fdbcli`` command ``consistencycheck``.
|
||||
#. ``\xff\xff/management/db_locked`` Read/write. A single key that can be read and modified. Set the key will lock the database and clear the key will unlock. If the database is already locked, then the commit will fail with the ``special_keys_api_failure`` error. For more details, see help text of ``fdbcli`` command ``lock`` and ``unlock``.
|
||||
#. ``\xff\xff/management/db_locked`` Read/write. A single key that can be read and modified. Set the key with a 32 bytes hex string UID will lock the database and clear the key will unlock. Read the key will return the UID string as the value. If the database is already locked, then the commit will fail with the ``special_keys_api_failure`` error. For more details, see help text of ``fdbcli`` command ``lock`` and ``unlock``.
|
||||
#. ``\xff\xff/management/auto_coordinators`` Read-only. A single key, if read, will return a set of processes which is able to satisfy the current redundency level and serve as new coordinators. The return value is formatted as a comma delimited string of network addresses of coordinators, i.e. ``<ip:port>,<ip:port>,...,<ip:port>``.
|
||||
#. ``\xff\xff/management/excluded_locality/<locality>`` Read/write. Indicates that the cluster should move data away from processes matching ``<locality>``, so that they can be safely removed. See :ref:`removing machines from a cluster <removing-machines-from-a-cluster>` for documentation for the corresponding fdbcli command.
|
||||
#. ``\xff\xff/management/failed_locality/<locality>`` Read/write. Indicates that the cluster should consider matching processes as permanently failed. This allows the cluster to avoid maintaining extra state and doing extra work in the hope that these processes come back. See :ref:`removing machines from a cluster <removing-machines-from-a-cluster>` for documentation for the corresponding fdbcli command.
|
||||
|
|
|
@ -81,8 +81,7 @@ bool BackupTLSConfig::setupTLS() {
|
|||
try {
|
||||
setNetworkOption(FDBNetworkOptions::TLS_VERIFY_PEERS, tlsVerifyPeers);
|
||||
} catch (Error& e) {
|
||||
std::cerr << "ERROR: cannot set TLS peer verification to " << tlsVerifyPeers << " (" << e.what()
|
||||
<< ")\n";
|
||||
std::cerr << "ERROR: cannot set TLS peer verification to " << tlsVerifyPeers << " (" << e.what() << ")\n";
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,11 +67,11 @@ CSimpleOpt::SOption gConverterOptions[] = { { OPT_CONTAINER, "-r", SO_REQ_SEP },
|
|||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
{ OPT_BUILD_FLAGS, "--build_flags", SO_NONE },
|
||||
{ OPT_LIST_ONLY, "--list_only", SO_NONE },
|
||||
{ OPT_KEY_PREFIX, "-k", SO_REQ_SEP },
|
||||
{ OPT_HEX_KEY_PREFIX, "--hex_prefix", SO_REQ_SEP },
|
||||
{ OPT_BEGIN_VERSION_FILTER, "--begin_version_filter", SO_REQ_SEP },
|
||||
{ OPT_END_VERSION_FILTER, "--end_version_filter", SO_REQ_SEP },
|
||||
{ OPT_LIST_ONLY, "--list_only", SO_NONE },
|
||||
{ OPT_KEY_PREFIX, "-k", SO_REQ_SEP },
|
||||
{ OPT_HEX_KEY_PREFIX, "--hex_prefix", SO_REQ_SEP },
|
||||
{ OPT_BEGIN_VERSION_FILTER, "--begin_version_filter", SO_REQ_SEP },
|
||||
{ OPT_END_VERSION_FILTER, "--end_version_filter", SO_REQ_SEP },
|
||||
{ OPT_HELP, "-?", SO_NONE },
|
||||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
|
|
|
@ -46,40 +46,39 @@ extern bool g_crashOnError;
|
|||
namespace file_converter {
|
||||
|
||||
void printDecodeUsage() {
|
||||
std::cout
|
||||
<< "Decoder for FoundationDB backup mutation logs.\n"
|
||||
"Usage: fdbdecode [OPTIONS]\n"
|
||||
" -r, --container URL\n"
|
||||
" Backup container URL, e.g., file:///some/path/.\n"
|
||||
" -i, --input FILE\n"
|
||||
" Log file filter, only matched files are decoded.\n"
|
||||
" --log Enables trace file logging for the CLI session.\n"
|
||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n"
|
||||
" --loggroup LOG_GROUP\n"
|
||||
" Sets the LogGroup field with the specified value for all\n"
|
||||
" events in the trace output (defaults to `default').\n"
|
||||
" --trace_format FORMAT\n"
|
||||
" Select the format of the trace files, xml (the default) or json.\n"
|
||||
" Has no effect unless --log is specified.\n"
|
||||
" --crash Crash on serious error.\n"
|
||||
" --blob_credentials FILE\n"
|
||||
" File containing blob credentials in JSON format.\n"
|
||||
" The same credential format/file fdbbackup uses.\n"
|
||||
std::cout << "Decoder for FoundationDB backup mutation logs.\n"
|
||||
"Usage: fdbdecode [OPTIONS]\n"
|
||||
" -r, --container URL\n"
|
||||
" Backup container URL, e.g., file:///some/path/.\n"
|
||||
" -i, --input FILE\n"
|
||||
" Log file filter, only matched files are decoded.\n"
|
||||
" --log Enables trace file logging for the CLI session.\n"
|
||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n"
|
||||
" --loggroup LOG_GROUP\n"
|
||||
" Sets the LogGroup field with the specified value for all\n"
|
||||
" events in the trace output (defaults to `default').\n"
|
||||
" --trace_format FORMAT\n"
|
||||
" Select the format of the trace files, xml (the default) or json.\n"
|
||||
" Has no effect unless --log is specified.\n"
|
||||
" --crash Crash on serious error.\n"
|
||||
" --blob_credentials FILE\n"
|
||||
" File containing blob credentials in JSON format.\n"
|
||||
" The same credential format/file fdbbackup uses.\n"
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_HELP
|
||||
#endif
|
||||
" --build_flags Print build information and exit.\n"
|
||||
" --list_only Print file list and exit.\n"
|
||||
" -k KEY_PREFIX Use the prefix for filtering mutations\n"
|
||||
" --hex_prefix HEX_PREFIX\n"
|
||||
" The prefix specified in HEX format, e.g., \\x05\\x01.\n"
|
||||
" --begin_version_filter BEGIN_VERSION\n"
|
||||
" The version range's begin version (inclusive) for filtering.\n"
|
||||
" --end_version_filter END_VERSION\n"
|
||||
" The version range's end version (exclusive) for filtering.\n"
|
||||
"\n";
|
||||
" --build_flags Print build information and exit.\n"
|
||||
" --list_only Print file list and exit.\n"
|
||||
" -k KEY_PREFIX Use the prefix for filtering mutations\n"
|
||||
" --hex_prefix HEX_PREFIX\n"
|
||||
" The prefix specified in HEX format, e.g., \\x05\\x01.\n"
|
||||
" --begin_version_filter BEGIN_VERSION\n"
|
||||
" The version range's begin version (inclusive) for filtering.\n"
|
||||
" --end_version_filter END_VERSION\n"
|
||||
" The version range's end version (exclusive) for filtering.\n"
|
||||
"\n";
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -465,9 +464,9 @@ ACTOR Future<Void> process_file(Reference<IBackupContainer> container, LogFile f
|
|||
}
|
||||
if (print) {
|
||||
TraceEvent(format("Mutation_%llu_%d", vms.version, sub).c_str(), uid)
|
||||
.detail("Version", vms.version)
|
||||
.setMaxFieldLength(10000)
|
||||
.detail("M", m.toString());
|
||||
.detail("Version", vms.version)
|
||||
.setMaxFieldLength(10000)
|
||||
.detail("M", m.toString());
|
||||
std::cout << vms.version << " " << m.toString() << "\n";
|
||||
}
|
||||
}
|
||||
|
@ -498,7 +497,8 @@ ACTOR Future<Void> decode_logs(DecodeParams params) {
|
|||
state std::vector<LogFile> logs = getRelevantLogFiles(listing.logs, params);
|
||||
printLogFiles("Relevant files are: ", logs);
|
||||
|
||||
if (params.list_only) return Void();
|
||||
if (params.list_only)
|
||||
return Void();
|
||||
|
||||
state int idx = 0;
|
||||
while (idx < logs.size()) {
|
||||
|
@ -506,7 +506,7 @@ ACTOR Future<Void> decode_logs(DecodeParams params) {
|
|||
wait(process_file(container, logs[idx], uid, params));
|
||||
idx++;
|
||||
}
|
||||
TraceEvent("DecodeDone", uid);
|
||||
TraceEvent("DecodeDone", uid).log();
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
|
|
@ -1209,9 +1209,11 @@ static void printFastRestoreUsage(bool devhelp) {
|
|||
static void printDBAgentUsage(bool devhelp) {
|
||||
printf("FoundationDB " FDB_VT_PACKAGE_NAME " (v" FDB_VT_VERSION ")\n");
|
||||
printf("Usage: %s [OPTIONS]\n\n", exeDatabaseAgent.toString().c_str());
|
||||
printf(" -d CONNFILE The path of a file containing the connection string for the\n"
|
||||
printf(" -d, --destination CONNFILE\n"
|
||||
" The path of a file containing the connection string for the\n"
|
||||
" destination FoundationDB cluster.\n");
|
||||
printf(" -s CONNFILE The path of a file containing the connection string for the\n"
|
||||
printf(" -s, --source CONNFILE\n"
|
||||
" The path of a file containing the connection string for the\n"
|
||||
" source FoundationDB cluster.\n");
|
||||
printf(" --log Enables trace file logging for the CLI session.\n"
|
||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
|
@ -1223,7 +1225,7 @@ static void printDBAgentUsage(bool devhelp) {
|
|||
printf(" --trace_format FORMAT\n"
|
||||
" Select the format of the trace files. xml (the default) and json are supported.\n"
|
||||
" Has no effect unless --log is specified.\n");
|
||||
printf(" -m SIZE, --memory SIZE\n"
|
||||
printf(" -m, --memory SIZE\n"
|
||||
" Memory limit. The default value is 8GiB. When specified\n"
|
||||
" without a unit, MiB is assumed.\n");
|
||||
#ifndef TLS_DISABLED
|
||||
|
@ -3073,6 +3075,36 @@ Version parseVersion(const char* str) {
|
|||
extern uint8_t* g_extra_memory;
|
||||
#endif
|
||||
|
||||
// Creates a connection to a cluster. Optionally prints an error if the connection fails.
|
||||
Optional<Database> connectToCluster(std::string const& clusterFile,
|
||||
LocalityData const& localities,
|
||||
bool quiet = false) {
|
||||
auto resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName(clusterFile);
|
||||
Reference<ClusterConnectionFile> ccf;
|
||||
|
||||
Optional<Database> db;
|
||||
|
||||
try {
|
||||
ccf = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
||||
} catch (Error& e) {
|
||||
if (!quiet)
|
||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedClusterFile, e).c_str());
|
||||
return db;
|
||||
}
|
||||
|
||||
try {
|
||||
db = Database::createDatabase(ccf, -1, IsInternal::True, localities);
|
||||
} catch (Error& e) {
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str());
|
||||
}
|
||||
return db;
|
||||
}
|
||||
|
||||
return db;
|
||||
};
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
platformInit();
|
||||
|
||||
|
@ -3785,9 +3817,7 @@ int main(int argc, char* argv[]) {
|
|||
std::set_new_handler(&platform::outOfMemory);
|
||||
setMemoryQuota(memLimit);
|
||||
|
||||
Reference<ClusterConnectionFile> ccf;
|
||||
Database db;
|
||||
Reference<ClusterConnectionFile> sourceCcf;
|
||||
Database sourceDb;
|
||||
FileBackupAgent ba;
|
||||
Key tag;
|
||||
|
@ -3830,43 +3860,29 @@ int main(int argc, char* argv[]) {
|
|||
};
|
||||
|
||||
auto initCluster = [&](bool quiet = false) {
|
||||
auto resolvedClusterFile = ClusterConnectionFile::lookupClusterFileName(clusterFile);
|
||||
try {
|
||||
ccf = makeReference<ClusterConnectionFile>(resolvedClusterFile.first);
|
||||
} catch (Error& e) {
|
||||
if (!quiet)
|
||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedClusterFile, e).c_str());
|
||||
return false;
|
||||
Optional<Database> result = connectToCluster(clusterFile, localities, quiet);
|
||||
if (result.present()) {
|
||||
db = result.get();
|
||||
}
|
||||
|
||||
try {
|
||||
db = Database::createDatabase(ccf, -1, IsInternal::True, localities);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return result.present();
|
||||
};
|
||||
|
||||
if (sourceClusterFile.size()) {
|
||||
auto resolvedSourceClusterFile = ClusterConnectionFile::lookupClusterFileName(sourceClusterFile);
|
||||
try {
|
||||
sourceCcf = makeReference<ClusterConnectionFile>(resolvedSourceClusterFile.first);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "%s\n", ClusterConnectionFile::getErrorString(resolvedSourceClusterFile, e).c_str());
|
||||
return FDB_EXIT_ERROR;
|
||||
auto initSourceCluster = [&](bool required, bool quiet = false) {
|
||||
if (!sourceClusterFile.size() && required) {
|
||||
if (!quiet) {
|
||||
fprintf(stderr, "ERROR: source cluster file is required\n");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
sourceDb = Database::createDatabase(sourceCcf, -1, IsInternal::True, localities);
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", sourceCcf->getFilename().c_str());
|
||||
return FDB_EXIT_ERROR;
|
||||
Optional<Database> result = connectToCluster(sourceClusterFile, localities, quiet);
|
||||
if (result.present()) {
|
||||
sourceDb = result.get();
|
||||
}
|
||||
}
|
||||
|
||||
return result.present();
|
||||
};
|
||||
|
||||
switch (programExe) {
|
||||
case ProgramExe::AGENT:
|
||||
|
@ -4166,13 +4182,15 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
break;
|
||||
case ProgramExe::DR_AGENT:
|
||||
if (!initCluster())
|
||||
if (!initCluster() || !initSourceCluster(true)) {
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
f = stopAfter(runDBAgent(sourceDb, db));
|
||||
break;
|
||||
case ProgramExe::DB_BACKUP:
|
||||
if (!initCluster())
|
||||
if (!initCluster() || !initSourceCluster(dbType != DBType::ABORT || !dstOnly)) {
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
switch (dbType) {
|
||||
case DBType::START:
|
||||
f = stopAfter(submitDBBackup(sourceDb, db, backupKeys, tagName));
|
||||
|
|
|
@ -2,15 +2,24 @@ set(FDBCLI_SRCS
|
|||
fdbcli.actor.cpp
|
||||
fdbcli.actor.h
|
||||
AdvanceVersionCommand.actor.cpp
|
||||
CacheRangeCommand.actor.cpp
|
||||
ConsistencyCheckCommand.actor.cpp
|
||||
DataDistributionCommand.actor.cpp
|
||||
ExpensiveDataCheckCommand.actor.cpp
|
||||
FlowLineNoise.actor.cpp
|
||||
FlowLineNoise.h
|
||||
ForceRecoveryWithDataLossCommand.actor.cpp
|
||||
KillCommand.actor.cpp
|
||||
MaintenanceCommand.actor.cpp
|
||||
ProfileCommand.actor.cpp
|
||||
SetClassCommand.actor.cpp
|
||||
SnapshotCommand.actor.cpp
|
||||
StatusCommand.actor.cpp
|
||||
SuspendCommand.actor.cpp
|
||||
ThrottleCommand.actor.cpp
|
||||
Util.cpp
|
||||
TriggerDDTeamInfoLogCommand.actor.cpp
|
||||
TssqCommand.actor.cpp
|
||||
Util.actor.cpp
|
||||
linenoise/linenoise.h)
|
||||
|
||||
if(NOT WIN32)
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
/*
|
||||
* CacheRangeCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> cacheRangeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||
if (tokens.size() != 4) {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
} else {
|
||||
state KeyRangeRef cacheRange(tokens[2], tokens[3]);
|
||||
if (tokencmp(tokens[1], "set")) {
|
||||
wait(ManagementAPI::addCachedRange(db, cacheRange));
|
||||
} else if (tokencmp(tokens[1], "clear")) {
|
||||
wait(ManagementAPI::removeCachedRange(db, cacheRange));
|
||||
} else {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CommandFactory cacheRangeFactory(
|
||||
"cache_range",
|
||||
CommandHelp(
|
||||
"cache_range <set|clear> <BEGINKEY> <ENDKEY>",
|
||||
"Mark a key range to add to or remove from storage caches.",
|
||||
"Use the storage caches to assist in balancing hot read shards. Set the appropriate ranges when experiencing "
|
||||
"heavy load, and clear them when they are no longer necessary."));
|
||||
|
||||
} // namespace fdb_cli
|
|
@ -32,7 +32,9 @@ namespace fdb_cli {
|
|||
|
||||
const KeyRef consistencyCheckSpecialKey = LiteralStringRef("\xff\xff/management/consistency_check_suspended");
|
||||
|
||||
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens) {
|
||||
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
bool intrans) {
|
||||
// Here we do not proceed in a try-catch loop since the transaction is always supposed to succeed.
|
||||
// If not, the outer loop catch block(fdbcli.actor.cpp) will handle the error and print out the error message
|
||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||
|
@ -41,10 +43,12 @@ ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr, std:
|
|||
printf("ConsistencyCheck is %s\n", suspended.present() ? "off" : "on");
|
||||
} else if (tokens.size() == 2 && tokencmp(tokens[1], "off")) {
|
||||
tr->set(consistencyCheckSpecialKey, Value());
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
if (!intrans)
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
} else if (tokens.size() == 2 && tokencmp(tokens[1], "on")) {
|
||||
tr->clear(consistencyCheckSpecialKey);
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
if (!intrans)
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
} else {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
|
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* DataDistributionCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "boost/lexical_cast.hpp"
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
ACTOR Future<Void> setDDMode(Reference<IDatabase> db, int mode) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||
try {
|
||||
tr->set(fdb_cli::ddModeSpecialKey, boost::lexical_cast<std::string>(mode));
|
||||
if (mode) {
|
||||
// set DDMode to 1 will enable all disabled parts, for instance the SS failure monitors.
|
||||
// hold the returned standalone object's memory
|
||||
state ThreadFuture<RangeResult> resultFuture =
|
||||
tr->getRange(fdb_cli::maintenanceSpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||
RangeResult res = wait(safeThreadFutureToFuture(resultFuture));
|
||||
ASSERT(res.size() <= 1);
|
||||
if (res.size() == 1 && res[0].key == fdb_cli::ignoreSSFailureSpecialKey) {
|
||||
// only clear the key if it is currently being used to disable all SS failure data movement
|
||||
tr->clear(fdb_cli::maintenanceSpecialKeyRange);
|
||||
}
|
||||
tr->clear(fdb_cli::ddIgnoreRebalanceSpecialKey);
|
||||
}
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
TraceEvent("SetDDModeRetrying").error(e);
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> setDDIgnoreRebalanceSwitch(Reference<IDatabase> db, bool ignoreRebalance) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||
try {
|
||||
if (ignoreRebalance) {
|
||||
tr->set(fdb_cli::ddIgnoreRebalanceSpecialKey, ValueRef());
|
||||
} else {
|
||||
tr->clear(fdb_cli::ddIgnoreRebalanceSpecialKey);
|
||||
}
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
const KeyRef ddModeSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/mode");
|
||||
const KeyRef ddIgnoreRebalanceSpecialKey = LiteralStringRef("\xff\xff/management/data_distribution/rebalance_ignored");
|
||||
|
||||
ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||
state bool result = true;
|
||||
if (tokens.size() != 2 && tokens.size() != 3) {
|
||||
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||
"<ssfailure|rebalance>>\n");
|
||||
result = false;
|
||||
} else {
|
||||
if (tokencmp(tokens[1], "on")) {
|
||||
wait(success(setDDMode(db, 1)));
|
||||
printf("Data distribution is turned on.\n");
|
||||
} else if (tokencmp(tokens[1], "off")) {
|
||||
wait(success(setDDMode(db, 0)));
|
||||
printf("Data distribution is turned off.\n");
|
||||
} else if (tokencmp(tokens[1], "disable")) {
|
||||
if (tokencmp(tokens[2], "ssfailure")) {
|
||||
wait(success((setHealthyZone(db, LiteralStringRef("IgnoreSSFailures"), 0))));
|
||||
printf("Data distribution is disabled for storage server failures.\n");
|
||||
} else if (tokencmp(tokens[2], "rebalance")) {
|
||||
wait(setDDIgnoreRebalanceSwitch(db, true));
|
||||
printf("Data distribution is disabled for rebalance.\n");
|
||||
} else {
|
||||
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||
"<ssfailure|rebalance>>\n");
|
||||
result = false;
|
||||
}
|
||||
} else if (tokencmp(tokens[1], "enable")) {
|
||||
if (tokencmp(tokens[2], "ssfailure")) {
|
||||
wait(success((clearHealthyZone(db, false, true))));
|
||||
printf("Data distribution is enabled for storage server failures.\n");
|
||||
} else if (tokencmp(tokens[2], "rebalance")) {
|
||||
wait(setDDIgnoreRebalanceSwitch(db, false));
|
||||
printf("Data distribution is enabled for rebalance.\n");
|
||||
} else {
|
||||
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||
"<ssfailure|rebalance>>\n");
|
||||
result = false;
|
||||
}
|
||||
} else {
|
||||
printf("Usage: datadistribution <on|off|disable <ssfailure|rebalance>|enable "
|
||||
"<ssfailure|rebalance>>\n");
|
||||
result = false;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// hidden commands, no help text for now
|
||||
CommandFactory dataDistributionFactory("datadistribution");
|
||||
} // namespace fdb_cli
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
* ExpensiveDataCheckCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
// The command is used to send a data check request to the specified process
|
||||
// The check request is accomplished by rebooting the process
|
||||
|
||||
ACTOR Future<bool> expensiveDataCheckCommandActor(
|
||||
Reference<IDatabase> db,
|
||||
Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||
state bool result = true;
|
||||
if (tokens.size() == 1) {
|
||||
// initialize worker interfaces
|
||||
wait(getWorkerInterfaces(tr, address_interface));
|
||||
}
|
||||
if (tokens.size() == 1 || tokencmp(tokens[1], "list")) {
|
||||
if (address_interface->size() == 0) {
|
||||
printf("\nNo addresses can be checked.\n");
|
||||
} else if (address_interface->size() == 1) {
|
||||
printf("\nThe following address can be checked:\n");
|
||||
} else {
|
||||
printf("\nThe following %zu addresses can be checked:\n", address_interface->size());
|
||||
}
|
||||
for (auto it : *address_interface) {
|
||||
printf("%s\n", printable(it.first).c_str());
|
||||
}
|
||||
printf("\n");
|
||||
} else if (tokencmp(tokens[1], "all")) {
|
||||
state std::map<Key, std::pair<Value, ClientLeaderRegInterface>>::const_iterator it;
|
||||
for (it = address_interface->cbegin(); it != address_interface->cend(); it++) {
|
||||
int64_t checkRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(it->first, true, 0)));
|
||||
if (!checkRequestSent) {
|
||||
result = false;
|
||||
fprintf(stderr, "ERROR: failed to send request to check process `%s'.\n", it->first.toString().c_str());
|
||||
}
|
||||
}
|
||||
if (address_interface->size() == 0) {
|
||||
fprintf(stderr,
|
||||
"ERROR: no processes to check. You must run the `expensive_data_check’ "
|
||||
"command before running `expensive_data_check all’.\n");
|
||||
} else {
|
||||
printf("Attempted to kill and check %zu processes\n", address_interface->size());
|
||||
}
|
||||
} else {
|
||||
state int i;
|
||||
for (i = 1; i < tokens.size(); i++) {
|
||||
if (!address_interface->count(tokens[i])) {
|
||||
fprintf(stderr, "ERROR: process `%s' not recognized.\n", printable(tokens[i]).c_str());
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (result) {
|
||||
for (i = 1; i < tokens.size(); i++) {
|
||||
int64_t checkRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(tokens[i], true, 0)));
|
||||
if (!checkRequestSent) {
|
||||
result = false;
|
||||
fprintf(
|
||||
stderr, "ERROR: failed to send request to check process `%s'.\n", tokens[i].toString().c_str());
|
||||
}
|
||||
}
|
||||
printf("Attempted to kill and check %zu processes\n", tokens.size() - 1);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
// hidden commands, no help text for now
|
||||
CommandFactory expensiveDataCheckFactory("expensive_data_check");
|
||||
} // namespace fdb_cli
|
|
@ -0,0 +1,107 @@
|
|||
/*
|
||||
* KillCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> killCommandActor(Reference<IDatabase> db,
|
||||
Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||
ASSERT(tokens.size() >= 1);
|
||||
state bool result = true;
|
||||
if (tokens.size() == 1) {
|
||||
// initialize worker interfaces
|
||||
wait(getWorkerInterfaces(tr, address_interface));
|
||||
}
|
||||
if (tokens.size() == 1 || tokencmp(tokens[1], "list")) {
|
||||
if (address_interface->size() == 0) {
|
||||
printf("\nNo addresses can be killed.\n");
|
||||
} else if (address_interface->size() == 1) {
|
||||
printf("\nThe following address can be killed:\n");
|
||||
} else {
|
||||
printf("\nThe following %zu addresses can be killed:\n", address_interface->size());
|
||||
}
|
||||
for (auto it : *address_interface) {
|
||||
printf("%s\n", printable(it.first).c_str());
|
||||
}
|
||||
printf("\n");
|
||||
} else if (tokencmp(tokens[1], "all")) {
|
||||
state std::map<Key, std::pair<Value, ClientLeaderRegInterface>>::const_iterator it;
|
||||
for (it = address_interface->cbegin(); it != address_interface->cend(); it++) {
|
||||
int64_t killRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(it->first, false, 0)));
|
||||
if (!killRequestSent) {
|
||||
result = false;
|
||||
fprintf(stderr, "ERROR: failed to send request to kill process `%s'.\n", it->first.toString().c_str());
|
||||
}
|
||||
}
|
||||
if (address_interface->size() == 0) {
|
||||
result = false;
|
||||
fprintf(stderr,
|
||||
"ERROR: no processes to kill. You must run the `kill’ command before "
|
||||
"running `kill all’.\n");
|
||||
} else {
|
||||
printf("Attempted to kill %zu processes\n", address_interface->size());
|
||||
}
|
||||
} else {
|
||||
state int i;
|
||||
for (i = 1; i < tokens.size(); i++) {
|
||||
if (!address_interface->count(tokens[i])) {
|
||||
fprintf(stderr, "ERROR: process `%s' not recognized.\n", printable(tokens[i]).c_str());
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (result) {
|
||||
for (i = 1; i < tokens.size(); i++) {
|
||||
int64_t killRequestSent = wait(safeThreadFutureToFuture(db->rebootWorker(tokens[i], false, 0)));
|
||||
if (!killRequestSent) {
|
||||
result = false;
|
||||
fprintf(
|
||||
stderr, "ERROR: failed to send request to kill process `%s'.\n", tokens[i].toString().c_str());
|
||||
}
|
||||
}
|
||||
printf("Attempted to kill %zu processes\n", tokens.size() - 1);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
CommandFactory killFactory(
|
||||
"kill",
|
||||
CommandHelp(
|
||||
"kill all|list|<ADDRESS...>",
|
||||
"attempts to kill one or more processes in the cluster",
|
||||
"If no addresses are specified, populates the list of processes which can be killed. Processes cannot be "
|
||||
"killed before this list has been populated.\n\nIf `all' is specified, attempts to kill all known "
|
||||
"processes.\n\nIf `list' is specified, displays all known processes. This is only useful when the database is "
|
||||
"unresponsive.\n\nFor each IP:port pair in <ADDRESS ...>, attempt to kill the specified process."));
|
||||
} // namespace fdb_cli
|
|
@ -64,43 +64,17 @@ ACTOR Future<Void> printHealthyZone(Reference<IDatabase> db) {
|
|||
}
|
||||
}
|
||||
|
||||
// clear ongoing maintenance, let clearSSFailureZoneString = true to enable data distribution for storage
|
||||
ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db,
|
||||
bool printWarning = false,
|
||||
bool clearSSFailureZoneString = false) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
TraceEvent("ClearHealthyZone").detail("ClearSSFailureZoneString", clearSSFailureZoneString);
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||
try {
|
||||
// hold the returned standalone object's memory
|
||||
state ThreadFuture<RangeResult> resultFuture =
|
||||
tr->getRange(fdb_cli::maintenanceSpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||
RangeResult res = wait(safeThreadFutureToFuture(resultFuture));
|
||||
ASSERT(res.size() <= 1);
|
||||
if (!clearSSFailureZoneString && res.size() == 1 && res[0].key == fdb_cli::ignoreSSFailureSpecialKey) {
|
||||
if (printWarning) {
|
||||
fprintf(stderr,
|
||||
"ERROR: Maintenance mode cannot be used while data distribution is disabled for storage "
|
||||
"server failures. Use 'datadistribution on' to reenable data distribution.\n");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
tr->clear(fdb_cli::maintenanceSpecialKeyRange);
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return true;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
namespace fdb_cli {
|
||||
|
||||
const KeyRangeRef maintenanceSpecialKeyRange = KeyRangeRef(LiteralStringRef("\xff\xff/management/maintenance/"),
|
||||
LiteralStringRef("\xff\xff/management/maintenance0"));
|
||||
// The special key, if present, means data distribution is disabled for storage failures;
|
||||
const KeyRef ignoreSSFailureSpecialKey = LiteralStringRef("\xff\xff/management/maintenance/IgnoreSSFailures");
|
||||
|
||||
// add a zone to maintenance and specify the maintenance duration
|
||||
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db,
|
||||
StringRef zoneId,
|
||||
double seconds,
|
||||
bool printWarning = false) {
|
||||
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db, StringRef zoneId, double seconds, bool printWarning) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
TraceEvent("SetHealthyZone").detail("Zone", zoneId).detail("DurationSeconds", seconds);
|
||||
loop {
|
||||
|
@ -129,14 +103,35 @@ ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db,
|
|||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
// clear ongoing maintenance, let clearSSFailureZoneString = true to enable data distribution for storage
|
||||
ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db, bool printWarning, bool clearSSFailureZoneString) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
TraceEvent("ClearHealthyZone").detail("ClearSSFailureZoneString", clearSSFailureZoneString);
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||
try {
|
||||
// hold the returned standalone object's memory
|
||||
state ThreadFuture<RangeResult> resultFuture =
|
||||
tr->getRange(fdb_cli::maintenanceSpecialKeyRange, CLIENT_KNOBS->TOO_MANY);
|
||||
RangeResult res = wait(safeThreadFutureToFuture(resultFuture));
|
||||
ASSERT(res.size() <= 1);
|
||||
if (!clearSSFailureZoneString && res.size() == 1 && res[0].key == fdb_cli::ignoreSSFailureSpecialKey) {
|
||||
if (printWarning) {
|
||||
fprintf(stderr,
|
||||
"ERROR: Maintenance mode cannot be used while data distribution is disabled for storage "
|
||||
"server failures. Use 'datadistribution on' to reenable data distribution.\n");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
const KeyRangeRef maintenanceSpecialKeyRange = KeyRangeRef(LiteralStringRef("\xff\xff/management/maintenance/"),
|
||||
LiteralStringRef("\xff\xff/management/maintenance0"));
|
||||
// The special key, if present, means data distribution is disabled for storage failures;
|
||||
const KeyRef ignoreSSFailureSpecialKey = LiteralStringRef("\xff\xff/management/maintenance/IgnoreSSFailures");
|
||||
tr->clear(fdb_cli::maintenanceSpecialKeyRange);
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return true;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||
state bool result = true;
|
||||
|
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
* ProfileCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "boost/lexical_cast.hpp"
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/GlobalConfig.actor.h"
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
#include "fdbclient/Tuple.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> profileCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens, bool intrans) {
|
||||
state bool result = true;
|
||||
if (tokens.size() == 1) {
|
||||
fprintf(stderr, "ERROR: Usage: profile <client|list|flow|heap>\n");
|
||||
result = false;
|
||||
} else if (tokencmp(tokens[1], "client")) {
|
||||
if (tokens.size() == 2) {
|
||||
fprintf(stderr, "ERROR: Usage: profile client <get|set>\n");
|
||||
return false;
|
||||
}
|
||||
wait(GlobalConfig::globalConfig().onInitialized());
|
||||
if (tokencmp(tokens[2], "get")) {
|
||||
if (tokens.size() != 3) {
|
||||
fprintf(stderr, "ERROR: Addtional arguments to `get` are not supported.\n");
|
||||
return false;
|
||||
}
|
||||
std::string sampleRateStr = "default";
|
||||
std::string sizeLimitStr = "default";
|
||||
const double sampleRateDbl = GlobalConfig::globalConfig().get<double>(
|
||||
fdbClientInfoTxnSampleRate, std::numeric_limits<double>::infinity());
|
||||
if (!std::isinf(sampleRateDbl)) {
|
||||
sampleRateStr = std::to_string(sampleRateDbl);
|
||||
}
|
||||
const int64_t sizeLimit = GlobalConfig::globalConfig().get<int64_t>(fdbClientInfoTxnSizeLimit, -1);
|
||||
if (sizeLimit != -1) {
|
||||
sizeLimitStr = boost::lexical_cast<std::string>(sizeLimit);
|
||||
}
|
||||
printf("Client profiling rate is set to %s and size limit is set to %s.\n",
|
||||
sampleRateStr.c_str(),
|
||||
sizeLimitStr.c_str());
|
||||
} else if (tokencmp(tokens[2], "set")) {
|
||||
if (tokens.size() != 5) {
|
||||
fprintf(stderr, "ERROR: Usage: profile client set <RATE|default> <SIZE|default>\n");
|
||||
return false;
|
||||
}
|
||||
double sampleRate;
|
||||
if (tokencmp(tokens[3], "default")) {
|
||||
sampleRate = std::numeric_limits<double>::infinity();
|
||||
} else {
|
||||
char* end;
|
||||
sampleRate = std::strtod((const char*)tokens[3].begin(), &end);
|
||||
if (!std::isspace(*end)) {
|
||||
fprintf(stderr, "ERROR: %s failed to parse.\n", printable(tokens[3]).c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
int64_t sizeLimit;
|
||||
if (tokencmp(tokens[4], "default")) {
|
||||
sizeLimit = -1;
|
||||
} else {
|
||||
Optional<uint64_t> parsed = parse_with_suffix(tokens[4].toString());
|
||||
if (parsed.present()) {
|
||||
sizeLimit = parsed.get();
|
||||
} else {
|
||||
fprintf(stderr, "ERROR: `%s` failed to parse.\n", printable(tokens[4]).c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
Tuple rate = Tuple().appendDouble(sampleRate);
|
||||
Tuple size = Tuple().append(sizeLimit);
|
||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||
tr->set(GlobalConfig::prefixedKey(fdbClientInfoTxnSampleRate), rate.pack());
|
||||
tr->set(GlobalConfig::prefixedKey(fdbClientInfoTxnSizeLimit), size.pack());
|
||||
if (!intrans) {
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "ERROR: Unknown action: %s\n", printable(tokens[2]).c_str());
|
||||
result = false;
|
||||
}
|
||||
} else if (tokencmp(tokens[1], "list")) {
|
||||
if (tokens.size() != 2) {
|
||||
fprintf(stderr, "ERROR: Usage: profile list\n");
|
||||
return false;
|
||||
}
|
||||
// Hold the reference to the standalone's memory
|
||||
state ThreadFuture<RangeResult> kvsFuture =
|
||||
tr->getRange(KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"),
|
||||
LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||
CLIENT_KNOBS->TOO_MANY);
|
||||
RangeResult kvs = wait(safeThreadFutureToFuture(kvsFuture));
|
||||
ASSERT(!kvs.more);
|
||||
for (const auto& pair : kvs) {
|
||||
auto ip_port =
|
||||
(pair.key.endsWith(LiteralStringRef(":tls")) ? pair.key.removeSuffix(LiteralStringRef(":tls"))
|
||||
: pair.key)
|
||||
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
|
||||
printf("%s\n", printable(ip_port).c_str());
|
||||
}
|
||||
} else {
|
||||
fprintf(stderr, "ERROR: Unknown type: %s\n", printable(tokens[1]).c_str());
|
||||
result = false;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
CommandFactory profileFactory("profile",
|
||||
CommandHelp("profile <client|list> <action> <ARGS>",
|
||||
"namespace for all the profiling-related commands.",
|
||||
"Different types support different actions. Run `profile` to get a list of "
|
||||
"types, and iteratively explore the help.\n"));
|
||||
} // namespace fdb_cli
|
|
@ -71,15 +71,22 @@ ACTOR Future<Void> printProcessClass(Reference<IDatabase> db) {
|
|||
};
|
||||
|
||||
ACTOR Future<bool> setProcessClass(Reference<IDatabase> db, KeyRef network_address, KeyRef class_type) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::SPECIAL_KEY_SPACE_ENABLE_WRITES);
|
||||
try {
|
||||
tr->set(network_address.withPrefix(fdb_cli::processClassTypeSpecialKeyRange.begin), class_type);
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return true;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
tr->set(network_address.withPrefix(fdb_cli::processClassTypeSpecialKeyRange.begin), class_type);
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return true;
|
||||
} catch (Error& e) {
|
||||
state Error err(e);
|
||||
if (e.code() == error_code_special_keys_api_failure) {
|
||||
std::string errorMsgStr = wait(fdb_cli::getSpecialKeysFailureErrorMessage(tr));
|
||||
// error message already has \n at the end
|
||||
fprintf(stderr, "%s", errorMsgStr.c_str());
|
||||
return false;
|
||||
}
|
||||
wait(safeThreadFutureToFuture(tr->onError(err)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -103,8 +110,8 @@ ACTOR Future<bool> setClassCommandActor(Reference<IDatabase> db, std::vector<Str
|
|||
} else if (tokens.size() == 1) {
|
||||
wait(printProcessClass(db));
|
||||
} else {
|
||||
bool successful = wait(setProcessClass(db, tokens[1], tokens[2]));
|
||||
return successful;
|
||||
bool successful = wait(setProcessClass(db, tokens[1], tokens[2]));
|
||||
return successful;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -60,5 +60,5 @@ ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<Str
|
|||
}
|
||||
|
||||
// hidden commands, no help text for now
|
||||
CommandFactory dataDistributionFactory("snapshot");
|
||||
CommandFactory snapshotFactory("snapshot");
|
||||
} // namespace fdb_cli
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
* SuspendCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> suspendCommandActor(Reference<IDatabase> db,
|
||||
Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||
ASSERT(tokens.size() >= 1);
|
||||
state bool result = true;
|
||||
if (tokens.size() == 1) {
|
||||
// initialize worker interfaces
|
||||
wait(getWorkerInterfaces(tr, address_interface));
|
||||
if (address_interface->size() == 0) {
|
||||
printf("\nNo addresses can be suspended.\n");
|
||||
} else if (address_interface->size() == 1) {
|
||||
printf("\nThe following address can be suspended:\n");
|
||||
} else {
|
||||
printf("\nThe following %zu addresses can be suspended:\n", address_interface->size());
|
||||
}
|
||||
for (auto it : *address_interface) {
|
||||
printf("%s\n", printable(it.first).c_str());
|
||||
}
|
||||
printf("\n");
|
||||
} else if (tokens.size() == 2) {
|
||||
printUsage(tokens[0]);
|
||||
result = false;
|
||||
} else {
|
||||
for (int i = 2; i < tokens.size(); i++) {
|
||||
if (!address_interface->count(tokens[i])) {
|
||||
fprintf(stderr, "ERROR: process `%s' not recognized.\n", printable(tokens[i]).c_str());
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (result) {
|
||||
state double seconds;
|
||||
int n = 0;
|
||||
state int i;
|
||||
auto secondsStr = tokens[1].toString();
|
||||
if (sscanf(secondsStr.c_str(), "%lf%n", &seconds, &n) != 1 || n != secondsStr.size()) {
|
||||
printUsage(tokens[0]);
|
||||
result = false;
|
||||
} else {
|
||||
int64_t timeout_ms = seconds * 1000;
|
||||
tr->setOption(FDBTransactionOptions::TIMEOUT, StringRef((uint8_t*)&timeout_ms, sizeof(int64_t)));
|
||||
for (i = 2; i < tokens.size(); i++) {
|
||||
int64_t suspendRequestSent =
|
||||
wait(safeThreadFutureToFuture(db->rebootWorker(tokens[i], false, static_cast<int>(seconds))));
|
||||
if (!suspendRequestSent) {
|
||||
result = false;
|
||||
fprintf(stderr,
|
||||
"ERROR: failed to send request to suspend process `%s'.\n",
|
||||
tokens[i].toString().c_str());
|
||||
}
|
||||
}
|
||||
printf("Attempted to suspend %zu processes\n", tokens.size() - 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
CommandFactory suspendFactory(
|
||||
"suspend",
|
||||
CommandHelp(
|
||||
"suspend <SECONDS> <ADDRESS...>",
|
||||
"attempts to suspend one or more processes in the cluster",
|
||||
"If no parameters are specified, populates the list of processes which can be suspended. Processes cannot be "
|
||||
"suspended before this list has been populated.\n\nFor each IP:port pair in <ADDRESS...>, attempt to suspend "
|
||||
"the processes for the specified SECONDS after which the process will die."));
|
||||
} // namespace fdb_cli
|
|
@ -21,345 +21,16 @@
|
|||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/TagThrottle.h"
|
||||
#include "fdbclient/TagThrottle.actor.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
// Helper functions copied from TagThrottle.actor.cpp
|
||||
// The only difference is transactions are changed to go through MultiversionTransaction,
|
||||
// instead of the native Transaction(i.e., RYWTransaction)
|
||||
|
||||
ACTOR Future<bool> getValidAutoEnabled(Reference<ITransaction> tr) {
|
||||
state bool result;
|
||||
loop {
|
||||
Optional<Value> value = wait(safeThreadFutureToFuture(tr->get(tagThrottleAutoEnabledKey)));
|
||||
if (!value.present()) {
|
||||
tr->reset();
|
||||
wait(delay(CLIENT_KNOBS->DEFAULT_BACKOFF));
|
||||
continue;
|
||||
} else if (value.get() == LiteralStringRef("1")) {
|
||||
result = true;
|
||||
} else if (value.get() == LiteralStringRef("0")) {
|
||||
result = false;
|
||||
} else {
|
||||
TraceEvent(SevWarnAlways, "InvalidAutoTagThrottlingValue").detail("Value", value.get());
|
||||
tr->reset();
|
||||
wait(delay(CLIENT_KNOBS->DEFAULT_BACKOFF));
|
||||
continue;
|
||||
}
|
||||
return result;
|
||||
};
|
||||
}
|
||||
|
||||
ACTOR Future<std::vector<TagThrottleInfo>> getThrottledTags(Reference<IDatabase> db,
|
||||
int limit,
|
||||
bool containsRecommend = false) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
state bool reportAuto = containsRecommend;
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
try {
|
||||
if (!containsRecommend) {
|
||||
wait(store(reportAuto, getValidAutoEnabled(tr)));
|
||||
}
|
||||
state ThreadFuture<RangeResult> f = tr->getRange(
|
||||
reportAuto ? tagThrottleKeys : KeyRangeRef(tagThrottleKeysPrefix, tagThrottleAutoKeysPrefix), limit);
|
||||
RangeResult throttles = wait(safeThreadFutureToFuture(f));
|
||||
std::vector<TagThrottleInfo> results;
|
||||
for (auto throttle : throttles) {
|
||||
results.push_back(TagThrottleInfo(TagThrottleKey::fromKey(throttle.key),
|
||||
TagThrottleValue::fromValue(throttle.value)));
|
||||
}
|
||||
return results;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<std::vector<TagThrottleInfo>> getRecommendedTags(Reference<IDatabase> db, int limit) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
try {
|
||||
bool enableAuto = wait(getValidAutoEnabled(tr));
|
||||
if (enableAuto) {
|
||||
return std::vector<TagThrottleInfo>();
|
||||
}
|
||||
state ThreadFuture<RangeResult> f =
|
||||
tr->getRange(KeyRangeRef(tagThrottleAutoKeysPrefix, tagThrottleKeys.end), limit);
|
||||
RangeResult throttles = wait(safeThreadFutureToFuture(f));
|
||||
std::vector<TagThrottleInfo> results;
|
||||
for (auto throttle : throttles) {
|
||||
results.push_back(TagThrottleInfo(TagThrottleKey::fromKey(throttle.key),
|
||||
TagThrottleValue::fromValue(throttle.value)));
|
||||
}
|
||||
return results;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> updateThrottleCount(Reference<ITransaction> tr, int64_t delta) {
|
||||
state ThreadFuture<Optional<Value>> countVal = tr->get(tagThrottleCountKey);
|
||||
state ThreadFuture<Optional<Value>> limitVal = tr->get(tagThrottleLimitKey);
|
||||
|
||||
wait(success(safeThreadFutureToFuture(countVal)) && success(safeThreadFutureToFuture(limitVal)));
|
||||
|
||||
int64_t count = 0;
|
||||
int64_t limit = 0;
|
||||
|
||||
if (countVal.get().present()) {
|
||||
BinaryReader reader(countVal.get().get(), Unversioned());
|
||||
reader >> count;
|
||||
}
|
||||
|
||||
if (limitVal.get().present()) {
|
||||
BinaryReader reader(limitVal.get().get(), Unversioned());
|
||||
reader >> limit;
|
||||
}
|
||||
|
||||
count += delta;
|
||||
|
||||
if (count > limit) {
|
||||
throw too_many_tag_throttles();
|
||||
}
|
||||
|
||||
BinaryWriter writer(Unversioned());
|
||||
writer << count;
|
||||
|
||||
tr->set(tagThrottleCountKey, writer.toValue());
|
||||
return Void();
|
||||
}
|
||||
|
||||
void signalThrottleChange(Reference<ITransaction> tr) {
|
||||
tr->atomicOp(
|
||||
tagThrottleSignalKey, LiteralStringRef("XXXXXXXXXX\x00\x00\x00\x00"), MutationRef::SetVersionstampedValue);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> throttleTags(Reference<IDatabase> db,
|
||||
TagSet tags,
|
||||
double tpsRate,
|
||||
double initialDuration,
|
||||
TagThrottleType throttleType,
|
||||
TransactionPriority priority,
|
||||
Optional<double> expirationTime = Optional<double>(),
|
||||
Optional<TagThrottledReason> reason = Optional<TagThrottledReason>()) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
state Key key = TagThrottleKey(tags, throttleType, priority).toKey();
|
||||
|
||||
ASSERT(initialDuration > 0);
|
||||
|
||||
if (throttleType == TagThrottleType::MANUAL) {
|
||||
reason = TagThrottledReason::MANUAL;
|
||||
}
|
||||
TagThrottleValue throttle(tpsRate,
|
||||
expirationTime.present() ? expirationTime.get() : 0,
|
||||
initialDuration,
|
||||
reason.present() ? reason.get() : TagThrottledReason::UNSET);
|
||||
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
|
||||
wr << throttle;
|
||||
state Value value = wr.toValue();
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
if (throttleType == TagThrottleType::MANUAL) {
|
||||
Optional<Value> oldThrottle = wait(safeThreadFutureToFuture(tr->get(key)));
|
||||
if (!oldThrottle.present()) {
|
||||
wait(updateThrottleCount(tr, 1));
|
||||
}
|
||||
}
|
||||
|
||||
tr->set(key, value);
|
||||
|
||||
if (throttleType == TagThrottleType::MANUAL) {
|
||||
signalThrottleChange(tr);
|
||||
}
|
||||
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<bool> unthrottleTags(Reference<IDatabase> db,
|
||||
TagSet tags,
|
||||
Optional<TagThrottleType> throttleType,
|
||||
Optional<TransactionPriority> priority) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
|
||||
state std::vector<Key> keys;
|
||||
for (auto p : allTransactionPriorities) {
|
||||
if (!priority.present() || priority.get() == p) {
|
||||
if (!throttleType.present() || throttleType.get() == TagThrottleType::AUTO) {
|
||||
keys.push_back(TagThrottleKey(tags, TagThrottleType::AUTO, p).toKey());
|
||||
}
|
||||
if (!throttleType.present() || throttleType.get() == TagThrottleType::MANUAL) {
|
||||
keys.push_back(TagThrottleKey(tags, TagThrottleType::MANUAL, p).toKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
state bool removed = false;
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
state std::vector<Future<Optional<Value>>> values;
|
||||
values.reserve(keys.size());
|
||||
for (auto key : keys) {
|
||||
values.push_back(safeThreadFutureToFuture(tr->get(key)));
|
||||
}
|
||||
|
||||
wait(waitForAll(values));
|
||||
|
||||
int delta = 0;
|
||||
for (int i = 0; i < values.size(); ++i) {
|
||||
if (values[i].get().present()) {
|
||||
if (TagThrottleKey::fromKey(keys[i]).throttleType == TagThrottleType::MANUAL) {
|
||||
delta -= 1;
|
||||
}
|
||||
|
||||
tr->clear(keys[i]);
|
||||
|
||||
// Report that we are removing this tag if we ever see it present.
|
||||
// This protects us from getting confused if the transaction is maybe committed.
|
||||
// It's ok if someone else actually ends up removing this tag at the same time
|
||||
// and we aren't the ones to actually do it.
|
||||
removed = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (delta != 0) {
|
||||
wait(updateThrottleCount(tr, delta));
|
||||
}
|
||||
if (removed) {
|
||||
signalThrottleChange(tr);
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
}
|
||||
|
||||
return removed;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> enableAuto(Reference<IDatabase> db, bool enabled) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
Optional<Value> value = wait(safeThreadFutureToFuture(tr->get(tagThrottleAutoEnabledKey)));
|
||||
if (!value.present() || (enabled && value.get() != LiteralStringRef("1")) ||
|
||||
(!enabled && value.get() != LiteralStringRef("0"))) {
|
||||
tr->set(tagThrottleAutoEnabledKey, LiteralStringRef(enabled ? "1" : "0"));
|
||||
signalThrottleChange(tr);
|
||||
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
}
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<bool> unthrottleMatchingThrottles(Reference<IDatabase> db,
|
||||
KeyRef beginKey,
|
||||
KeyRef endKey,
|
||||
Optional<TransactionPriority> priority,
|
||||
bool onlyExpiredThrottles) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
|
||||
state KeySelector begin = firstGreaterOrEqual(beginKey);
|
||||
state KeySelector end = firstGreaterOrEqual(endKey);
|
||||
|
||||
state bool removed = false;
|
||||
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
// holds memory of the RangeResult
|
||||
state ThreadFuture<RangeResult> f = tr->getRange(begin, end, 1000);
|
||||
state RangeResult tags = wait(safeThreadFutureToFuture(f));
|
||||
state uint64_t unthrottledTags = 0;
|
||||
uint64_t manualUnthrottledTags = 0;
|
||||
for (auto tag : tags) {
|
||||
if (onlyExpiredThrottles) {
|
||||
double expirationTime = TagThrottleValue::fromValue(tag.value).expirationTime;
|
||||
if (expirationTime == 0 || expirationTime > now()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
TagThrottleKey key = TagThrottleKey::fromKey(tag.key);
|
||||
if (priority.present() && key.priority != priority.get()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (key.throttleType == TagThrottleType::MANUAL) {
|
||||
++manualUnthrottledTags;
|
||||
}
|
||||
|
||||
removed = true;
|
||||
tr->clear(tag.key);
|
||||
unthrottledTags++;
|
||||
}
|
||||
|
||||
if (manualUnthrottledTags > 0) {
|
||||
wait(updateThrottleCount(tr, -manualUnthrottledTags));
|
||||
}
|
||||
|
||||
if (unthrottledTags > 0) {
|
||||
signalThrottleChange(tr);
|
||||
}
|
||||
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
|
||||
if (!tags.more) {
|
||||
return removed;
|
||||
}
|
||||
|
||||
ASSERT(tags.size() > 0);
|
||||
begin = KeySelector(firstGreaterThan(tags[tags.size() - 1].key), tags.arena());
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Future<bool> unthrottleAll(Reference<IDatabase> db,
|
||||
Optional<TagThrottleType> tagThrottleType,
|
||||
Optional<TransactionPriority> priority) {
|
||||
KeyRef begin = tagThrottleKeys.begin;
|
||||
KeyRef end = tagThrottleKeys.end;
|
||||
|
||||
if (tagThrottleType.present() && tagThrottleType == TagThrottleType::AUTO) {
|
||||
begin = tagThrottleAutoKeysPrefix;
|
||||
} else if (tagThrottleType.present() && tagThrottleType == TagThrottleType::MANUAL) {
|
||||
end = tagThrottleAutoKeysPrefix;
|
||||
}
|
||||
|
||||
return unthrottleMatchingThrottles(db, begin, end, priority, false);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||
|
@ -403,11 +74,11 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
|||
|
||||
state std::vector<TagThrottleInfo> tags;
|
||||
if (reportThrottled && reportRecommended) {
|
||||
wait(store(tags, getThrottledTags(db, throttleListLimit, true)));
|
||||
wait(store(tags, ThrottleApi::getThrottledTags(db, throttleListLimit, true)));
|
||||
} else if (reportThrottled) {
|
||||
wait(store(tags, getThrottledTags(db, throttleListLimit)));
|
||||
wait(store(tags, ThrottleApi::getThrottledTags(db, throttleListLimit)));
|
||||
} else if (reportRecommended) {
|
||||
wait(store(tags, getRecommendedTags(db, throttleListLimit)));
|
||||
wait(store(tags, ThrottleApi::getRecommendedTags(db, throttleListLimit)));
|
||||
}
|
||||
|
||||
bool anyLogged = false;
|
||||
|
@ -509,7 +180,7 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
|||
TagSet tags;
|
||||
tags.addTag(tokens[3]);
|
||||
|
||||
wait(throttleTags(db, tags, tpsRate, duration, TagThrottleType::MANUAL, priority));
|
||||
wait(ThrottleApi::throttleTags(db, tags, tpsRate, duration, TagThrottleType::MANUAL, priority));
|
||||
printf("Tag `%s' has been throttled\n", tokens[3].toString().c_str());
|
||||
} else if (tokencmp(tokens[1], "off")) {
|
||||
int nextIndex = 2;
|
||||
|
@ -586,7 +257,7 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
|||
priority.present() ? format(" at %s priority", transactionPriorityToString(priority.get(), false)) : "";
|
||||
|
||||
if (tags.size() > 0) {
|
||||
bool success = wait(unthrottleTags(db, tags, throttleType, priority));
|
||||
bool success = wait(ThrottleApi::unthrottleTags(db, tags, throttleType, priority));
|
||||
if (success) {
|
||||
printf("Unthrottled tag `%s'%s\n", tokens[3].toString().c_str(), priorityString.c_str());
|
||||
} else {
|
||||
|
@ -596,7 +267,7 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
|||
priorityString.c_str());
|
||||
}
|
||||
} else {
|
||||
bool unthrottled = wait(unthrottleAll(db, throttleType, priority));
|
||||
bool unthrottled = wait(ThrottleApi::unthrottleAll(db, throttleType, priority));
|
||||
if (unthrottled) {
|
||||
printf("Unthrottled all %sthrottled tags%s\n", throttleTypeString, priorityString.c_str());
|
||||
} else {
|
||||
|
@ -626,7 +297,7 @@ ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<Str
|
|||
return false;
|
||||
}
|
||||
state bool autoTagThrottlingEnabled = tokencmp(tokens[1], "enable");
|
||||
wait(enableAuto(db, autoTagThrottlingEnabled));
|
||||
wait(ThrottleApi::enableAuto(db, autoTagThrottlingEnabled));
|
||||
printf("Automatic tag throttling has been %s\n", autoTagThrottlingEnabled ? "enabled" : "disabled");
|
||||
} else {
|
||||
printUsage(tokens[0]);
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* TriggerDDTeamInfoLogCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<Void> triggerddteaminfologCommandActor(Reference<IDatabase> db) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
std::string v = deterministicRandom()->randomUniqueID().toString();
|
||||
tr->set(triggerDDTeamInfoPrintKey, v);
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
printf("Triggered team info logging in data distribution.\n");
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CommandFactory triggerddteaminfologFactory(
|
||||
"triggerddteaminfolog",
|
||||
CommandHelp("triggerddteaminfolog",
|
||||
"trigger the data distributor teams logging",
|
||||
"Trigger the data distributor to log detailed information about its teams."));
|
||||
|
||||
} // namespace fdb_cli
|
|
@ -0,0 +1,144 @@
|
|||
/*
|
||||
* TssqCommand.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
|
||||
#include "fdbclient/FDBOptions.g.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/KeyBackedTypes.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/ThreadHelper.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
ACTOR Future<Void> tssQuarantineList(Reference<IDatabase> db) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
// Hold the reference to the standalone's memory
|
||||
state ThreadFuture<RangeResult> resultFuture = tr->getRange(tssQuarantineKeys, CLIENT_KNOBS->TOO_MANY);
|
||||
RangeResult result = wait(safeThreadFutureToFuture(resultFuture));
|
||||
// shouldn't have many quarantined TSSes
|
||||
ASSERT(!result.more);
|
||||
printf("Found %d quarantined TSS processes%s\n", result.size(), result.size() == 0 ? "." : ":");
|
||||
for (auto& it : result) {
|
||||
printf(" %s\n", decodeTssQuarantineKey(it.key).toString().c_str());
|
||||
}
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<bool> tssQuarantine(Reference<IDatabase> db, bool enable, UID tssId) {
|
||||
state Reference<ITransaction> tr = db->createTransaction();
|
||||
state KeyBackedMap<UID, UID> tssMapDB = KeyBackedMap<UID, UID>(tssMappingKeys.begin);
|
||||
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
||||
|
||||
// Do some validation first to make sure the command is valid
|
||||
Optional<Value> serverListValue = wait(safeThreadFutureToFuture(tr->get(serverListKeyFor(tssId))));
|
||||
if (!serverListValue.present()) {
|
||||
printf("No TSS %s found in cluster!\n", tssId.toString().c_str());
|
||||
return false;
|
||||
}
|
||||
state StorageServerInterface ssi = decodeServerListValue(serverListValue.get());
|
||||
if (!ssi.isTss()) {
|
||||
printf("Cannot quarantine Non-TSS storage ID %s!\n", tssId.toString().c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
Optional<Value> currentQuarantineValue =
|
||||
wait(safeThreadFutureToFuture(tr->get(tssQuarantineKeyFor(tssId))));
|
||||
if (enable && currentQuarantineValue.present()) {
|
||||
printf("TSS %s already in quarantine, doing nothing.\n", tssId.toString().c_str());
|
||||
return false;
|
||||
} else if (!enable && !currentQuarantineValue.present()) {
|
||||
printf("TSS %s is not in quarantine, cannot remove from quarantine!.\n", tssId.toString().c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (enable) {
|
||||
tr->set(tssQuarantineKeyFor(tssId), LiteralStringRef(""));
|
||||
// remove server from TSS mapping when quarantine is enabled
|
||||
tssMapDB.erase(tr, ssi.tssPairID.get());
|
||||
} else {
|
||||
tr->clear(tssQuarantineKeyFor(tssId));
|
||||
}
|
||||
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
}
|
||||
}
|
||||
printf("Successfully %s TSS %s\n", enable ? "quarantined" : "removed", tssId.toString().c_str());
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<bool> tssqCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
|
||||
if (tokens.size() == 2) {
|
||||
if (tokens[1] != LiteralStringRef("list")) {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
} else {
|
||||
wait(tssQuarantineList(db));
|
||||
}
|
||||
} else if (tokens.size() == 3) {
|
||||
if ((tokens[1] != LiteralStringRef("start") && tokens[1] != LiteralStringRef("stop")) ||
|
||||
(tokens[2].size() != 32) || !std::all_of(tokens[2].begin(), tokens[2].end(), &isxdigit)) {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
} else {
|
||||
bool enable = tokens[1] == LiteralStringRef("start");
|
||||
UID tssId = UID::fromString(tokens[2].toString());
|
||||
bool success = wait(tssQuarantine(db, enable, tssId));
|
||||
return success;
|
||||
}
|
||||
} else {
|
||||
printUsage(tokens[0]);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
CommandFactory tssqFactory(
|
||||
"tssq",
|
||||
CommandHelp("tssq start|stop <StorageUID>",
|
||||
"start/stop tss quarantine",
|
||||
"Toggles Quarantine mode for a Testing Storage Server. Quarantine will happen automatically if the "
|
||||
"TSS is detected to have incorrect data, but can also be initiated manually. You can also remove a "
|
||||
"TSS from quarantine once your investigation is finished, which will destroy the TSS process."));
|
||||
|
||||
} // namespace fdb_cli
|
|
@ -0,0 +1,115 @@
|
|||
/*
|
||||
* Util.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbcli/fdbcli.actor.h"
|
||||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
#include "fdbclient/Schemas.h"
|
||||
#include "fdbclient/Status.h"
|
||||
|
||||
#include "flow/Arena.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
bool tokencmp(StringRef token, const char* command) {
|
||||
if (token.size() != strlen(command))
|
||||
return false;
|
||||
|
||||
return !memcmp(token.begin(), command, token.size());
|
||||
}
|
||||
|
||||
void printUsage(StringRef command) {
|
||||
const auto& helpMap = CommandFactory::commands();
|
||||
auto i = helpMap.find(command.toString());
|
||||
if (i != helpMap.end())
|
||||
printf("Usage: %s\n", i->second.usage.c_str());
|
||||
else
|
||||
fprintf(stderr, "ERROR: Unknown command `%s'\n", command.toString().c_str());
|
||||
}
|
||||
|
||||
ACTOR Future<std::string> getSpecialKeysFailureErrorMessage(Reference<ITransaction> tr) {
|
||||
Optional<Value> errorMsg = wait(safeThreadFutureToFuture(tr->get(fdb_cli::errorMsgSpecialKey)));
|
||||
// Error message should be present
|
||||
ASSERT(errorMsg.present());
|
||||
// Read the json string
|
||||
auto valueObj = readJSONStrictly(errorMsg.get().toString()).get_obj();
|
||||
// verify schema
|
||||
auto schema = readJSONStrictly(JSONSchemas::managementApiErrorSchema.toString()).get_obj();
|
||||
std::string errorStr;
|
||||
ASSERT(schemaMatch(schema, valueObj, errorStr, SevError, true));
|
||||
// return the error message
|
||||
return valueObj["message"].get_str();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> verifyAndAddInterface(std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface,
|
||||
Reference<FlowLock> connectLock,
|
||||
KeyValue kv) {
|
||||
wait(connectLock->take());
|
||||
state FlowLock::Releaser releaser(*connectLock);
|
||||
state ClientWorkerInterface workerInterf;
|
||||
try {
|
||||
// the interface is back-ward compatible, thus if parsing failed, it needs to upgrade cli version
|
||||
workerInterf = BinaryReader::fromStringRef<ClientWorkerInterface>(kv.value, IncludeVersion());
|
||||
} catch (Error& e) {
|
||||
fprintf(stderr, "Error: %s; CLI version is too old, please update to use a newer version\n", e.what());
|
||||
return Void();
|
||||
}
|
||||
state ClientLeaderRegInterface leaderInterf(workerInterf.address());
|
||||
choose {
|
||||
when(Optional<LeaderInfo> rep =
|
||||
wait(brokenPromiseToNever(leaderInterf.getLeader.getReply(GetLeaderRequest())))) {
|
||||
StringRef ip_port =
|
||||
(kv.key.endsWith(LiteralStringRef(":tls")) ? kv.key.removeSuffix(LiteralStringRef(":tls")) : kv.key)
|
||||
.removePrefix(LiteralStringRef("\xff\xff/worker_interfaces/"));
|
||||
(*address_interface)[ip_port] = std::make_pair(kv.value, leaderInterf);
|
||||
|
||||
if (workerInterf.reboot.getEndpoint().addresses.secondaryAddress.present()) {
|
||||
Key full_ip_port2 =
|
||||
StringRef(workerInterf.reboot.getEndpoint().addresses.secondaryAddress.get().toString());
|
||||
StringRef ip_port2 = full_ip_port2.endsWith(LiteralStringRef(":tls"))
|
||||
? full_ip_port2.removeSuffix(LiteralStringRef(":tls"))
|
||||
: full_ip_port2;
|
||||
(*address_interface)[ip_port2] = std::make_pair(kv.value, leaderInterf);
|
||||
}
|
||||
}
|
||||
when(wait(delay(CLIENT_KNOBS->CLI_CONNECT_TIMEOUT))) {}
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> getWorkerInterfaces(Reference<ITransaction> tr,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface) {
|
||||
// Hold the reference to the standalone's memory
|
||||
state ThreadFuture<RangeResult> kvsFuture = tr->getRange(
|
||||
KeyRangeRef(LiteralStringRef("\xff\xff/worker_interfaces/"), LiteralStringRef("\xff\xff/worker_interfaces0")),
|
||||
CLIENT_KNOBS->TOO_MANY);
|
||||
RangeResult kvs = wait(safeThreadFutureToFuture(kvsFuture));
|
||||
ASSERT(!kvs.more);
|
||||
auto connectLock = makeReference<FlowLock>(CLIENT_KNOBS->CLI_CONNECT_PARALLELISM);
|
||||
std::vector<Future<Void>> addInterfs;
|
||||
for (auto it : kvs) {
|
||||
addInterfs.push_back(verifyAndAddInterface(address_interface, connectLock, it));
|
||||
}
|
||||
wait(waitForAll(addInterfs));
|
||||
return Void();
|
||||
}
|
||||
|
||||
} // namespace fdb_cli
|
File diff suppressed because it is too large
Load Diff
|
@ -28,7 +28,9 @@
|
|||
#elif !defined(FDBCLI_FDBCLI_ACTOR_H)
|
||||
#define FDBCLI_FDBCLI_ACTOR_H
|
||||
|
||||
#include "fdbclient/CoordinationInterface.h"
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/StatusClient.h"
|
||||
#include "flow/Arena.h"
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
@ -61,34 +63,95 @@ struct CommandFactory {
|
|||
extern const KeyRef advanceVersionSpecialKey;
|
||||
// consistencycheck
|
||||
extern const KeyRef consistencyCheckSpecialKey;
|
||||
// datadistribution
|
||||
extern const KeyRef ddModeSpecialKey;
|
||||
extern const KeyRef ddIgnoreRebalanceSpecialKey;
|
||||
// maintenance
|
||||
extern const KeyRangeRef maintenanceSpecialKeyRange;
|
||||
extern const KeyRef ignoreSSFailureSpecialKey;
|
||||
// setclass
|
||||
extern const KeyRangeRef processClassSourceSpecialKeyRange;
|
||||
extern const KeyRangeRef processClassTypeSpecialKeyRange;
|
||||
// Other special keys
|
||||
inline const KeyRef errorMsgSpecialKey = LiteralStringRef("\xff\xff/error_message");
|
||||
// help functions (Copied from fdbcli.actor.cpp)
|
||||
// decode worker interfaces
|
||||
ACTOR Future<Void> addInterface(std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface,
|
||||
Reference<FlowLock> connectLock,
|
||||
KeyValue kv);
|
||||
|
||||
// compare StringRef with the given c string
|
||||
bool tokencmp(StringRef token, const char* command);
|
||||
// print the usage of the specified command
|
||||
void printUsage(StringRef command);
|
||||
// Pre: tr failed with special_keys_api_failure error
|
||||
// Read the error message special key and return the message
|
||||
ACTOR Future<std::string> getSpecialKeysFailureErrorMessage(Reference<ITransaction> tr);
|
||||
// Using \xff\xff/worker_interfaces/ special key, get all worker interfaces
|
||||
ACTOR Future<Void> getWorkerInterfaces(Reference<ITransaction> tr,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
|
||||
// Deserialize \xff\xff/worker_interfaces/<address>:=<ClientInterface> k-v pair and verify by a RPC call
|
||||
ACTOR Future<Void> verifyAndAddInterface(std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface,
|
||||
Reference<FlowLock> connectLock,
|
||||
KeyValue kv);
|
||||
// print cluster status info
|
||||
void printStatus(StatusObjectReader statusObj,
|
||||
StatusClient::StatusLevel level,
|
||||
bool displayDatabaseAvailable = true,
|
||||
bool hideErrorMessages = false);
|
||||
|
||||
// All fdbcli commands (alphabetically)
|
||||
// advanceversion command
|
||||
ACTOR Future<bool> advanceVersionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// cache_range command
|
||||
ACTOR Future<bool> cacheRangeCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// consistency command
|
||||
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens);
|
||||
ACTOR Future<bool> consistencyCheckCommandActor(Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
bool intrans);
|
||||
// datadistribution command
|
||||
ACTOR Future<bool> dataDistributionCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// expensive_data_check command
|
||||
ACTOR Future<bool> expensiveDataCheckCommandActor(
|
||||
Reference<IDatabase> db,
|
||||
Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
|
||||
// force_recovery_with_data_loss command
|
||||
ACTOR Future<bool> forceRecoveryWithDataLossCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// kill command
|
||||
ACTOR Future<bool> killCommandActor(Reference<IDatabase> db,
|
||||
Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
|
||||
// maintenance command
|
||||
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db, StringRef zoneId, double seconds, bool printWarning = false);
|
||||
ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db,
|
||||
bool printWarning = false,
|
||||
bool clearSSFailureZoneString = false);
|
||||
ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// profile command
|
||||
ACTOR Future<bool> profileCommandActor(Reference<ITransaction> tr, std::vector<StringRef> tokens, bool intrans);
|
||||
// setclass command
|
||||
ACTOR Future<bool> setClassCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// snapshot command
|
||||
ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// status command
|
||||
ACTOR Future<bool> statusCommandActor(Reference<IDatabase> db,
|
||||
Database localDb,
|
||||
std::vector<StringRef> tokens,
|
||||
bool isExecMode = false);
|
||||
// suspend command
|
||||
ACTOR Future<bool> suspendCommandActor(Reference<IDatabase> db,
|
||||
Reference<ITransaction> tr,
|
||||
std::vector<StringRef> tokens,
|
||||
std::map<Key, std::pair<Value, ClientLeaderRegInterface>>* address_interface);
|
||||
// throttle command
|
||||
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
// triggerteaminfolog command
|
||||
ACTOR Future<Void> triggerddteaminfologCommandActor(Reference<IDatabase> db);
|
||||
// tssq command
|
||||
ACTOR Future<bool> tssqCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
|
||||
|
||||
} // namespace fdb_cli
|
||||
|
||||
|
|
|
@ -0,0 +1,397 @@
|
|||
/*
|
||||
* ActorLineageProfiler.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "flow/singleton.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
#include "fdbclient/ActorLineageProfiler.h"
|
||||
#include "fdbclient/NameLineage.h"
|
||||
#include <msgpack.hpp>
|
||||
#include <memory>
|
||||
#include <typeindex>
|
||||
#include <boost/endian/conversion.hpp>
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
using namespace std::literals;
|
||||
|
||||
class Packer : public msgpack::packer<msgpack::sbuffer> {
|
||||
struct visitor_t {
|
||||
using VisitorMap = std::unordered_map<std::type_index, std::function<void(std::any const&, Packer& packer)>>;
|
||||
VisitorMap visitorMap;
|
||||
|
||||
template <class T>
|
||||
static void any_visitor(std::any const& val, Packer& packer) {
|
||||
const T& v = std::any_cast<const T&>(val);
|
||||
packer.pack(v);
|
||||
}
|
||||
|
||||
template <class... Args>
|
||||
struct populate_visitor_map;
|
||||
template <class Head, class... Tail>
|
||||
struct populate_visitor_map<Head, Tail...> {
|
||||
static void populate(VisitorMap& map) {
|
||||
map.emplace(std::type_index(typeid(Head)), any_visitor<Head>);
|
||||
populate_visitor_map<Tail...>::populate(map);
|
||||
}
|
||||
};
|
||||
template <class Head>
|
||||
struct populate_visitor_map<Head> {
|
||||
static void populate(VisitorMap&) {}
|
||||
};
|
||||
|
||||
visitor_t() {
|
||||
populate_visitor_map<int64_t,
|
||||
uint64_t,
|
||||
bool,
|
||||
float,
|
||||
double,
|
||||
std::string,
|
||||
std::string_view,
|
||||
std::vector<std::any>,
|
||||
std::vector<std::string>,
|
||||
std::vector<std::string_view>,
|
||||
std::map<std::string, std::any>,
|
||||
std::map<std::string_view, std::any>,
|
||||
std::vector<std::map<std::string_view, std::any>>>::populate(visitorMap);
|
||||
}
|
||||
|
||||
void visit(const std::any& val, Packer& packer) {
|
||||
auto iter = visitorMap.find(val.type());
|
||||
if (iter == visitorMap.end()) {
|
||||
TraceEvent(SevError, "PackerTypeNotFound").detail("Type", val.type().name());
|
||||
} else {
|
||||
iter->second(val, packer);
|
||||
}
|
||||
}
|
||||
};
|
||||
msgpack::sbuffer sbuffer;
|
||||
// Initializing visitor_t involves building a type-map. As this is a relatively expensive operation, we don't want
|
||||
// to do this each time we create a Packer object. So visitor_t is a stateless class and we only use it as a
|
||||
// visitor.
|
||||
crossbow::singleton<visitor_t> visitor;
|
||||
|
||||
public:
|
||||
Packer() : msgpack::packer<msgpack::sbuffer>(sbuffer) {}
|
||||
|
||||
void pack(std::any const& val) { visitor->visit(val, *this); }
|
||||
|
||||
void pack(bool val) {
|
||||
if (val) {
|
||||
pack_true();
|
||||
} else {
|
||||
pack_false();
|
||||
}
|
||||
}
|
||||
|
||||
void pack(uint64_t val) {
|
||||
if (val <= std::numeric_limits<uint8_t>::max()) {
|
||||
pack_uint8(uint8_t(val));
|
||||
} else if (val <= std::numeric_limits<uint16_t>::max()) {
|
||||
pack_uint16(uint16_t(val));
|
||||
} else if (val <= std::numeric_limits<uint32_t>::max()) {
|
||||
pack_uint32(uint32_t(val));
|
||||
} else {
|
||||
pack_uint64(val);
|
||||
}
|
||||
}
|
||||
|
||||
void pack(int64_t val) {
|
||||
if (val >= 0) {
|
||||
this->pack(uint64_t(val));
|
||||
} else if (val >= std::numeric_limits<uint8_t>::min()) {
|
||||
pack_int8(int8_t(val));
|
||||
} else if (val >= std::numeric_limits<int16_t>::min()) {
|
||||
pack_int16(int16_t(val));
|
||||
} else if (val >= std::numeric_limits<int32_t>::min()) {
|
||||
pack_int32(int32_t(val));
|
||||
} else if (val >= std::numeric_limits<int64_t>::min()) {
|
||||
pack_int64(int64_t(val));
|
||||
}
|
||||
}
|
||||
|
||||
void pack(float val) { pack_float(val); }
|
||||
void pack(double val) { pack_double(val); }
|
||||
void pack(std::string const& str) {
|
||||
pack_str(str.size());
|
||||
pack_str_body(str.data(), str.size());
|
||||
}
|
||||
|
||||
void pack(std::string_view val) {
|
||||
pack_str(val.size());
|
||||
pack_str_body(val.data(), val.size());
|
||||
}
|
||||
|
||||
template <class K, class V>
|
||||
void pack(std::map<K, V> const& map) {
|
||||
pack_map(map.size());
|
||||
for (const auto& p : map) {
|
||||
pack(p.first);
|
||||
pack(p.second);
|
||||
}
|
||||
}
|
||||
|
||||
template <class T>
|
||||
void pack(std::vector<T> const& val) {
|
||||
pack_array(val.size());
|
||||
for (const auto& v : val) {
|
||||
pack(v);
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<char*, unsigned> getbuf() {
|
||||
unsigned size = sbuffer.size();
|
||||
return std::make_pair(sbuffer.release(), size);
|
||||
}
|
||||
};
|
||||
|
||||
IALPCollectorBase::IALPCollectorBase() {
|
||||
SampleCollector::instance().addCollector(this);
|
||||
}
|
||||
|
||||
std::map<std::string_view, std::any> SampleCollectorT::collect(ActorLineage* lineage) {
|
||||
ASSERT(lineage != nullptr);
|
||||
std::map<std::string_view, std::any> out;
|
||||
for (auto& collector : collectors) {
|
||||
auto val = collector->collect(lineage);
|
||||
if (val.has_value()) {
|
||||
out[collector->name()] = val.value();
|
||||
}
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
std::shared_ptr<Sample> SampleCollectorT::collect() {
|
||||
auto sample = std::make_shared<Sample>();
|
||||
double time = g_network->now();
|
||||
sample->time = time;
|
||||
for (auto& p : getSamples) {
|
||||
Packer packer;
|
||||
std::vector<std::map<std::string_view, std::any>> samples;
|
||||
auto sampleVec = p.second();
|
||||
for (auto& val : sampleVec) {
|
||||
auto m = collect(val.getPtr());
|
||||
if (!m.empty()) {
|
||||
samples.emplace_back(std::move(m));
|
||||
}
|
||||
}
|
||||
if (!samples.empty()) {
|
||||
packer.pack(samples);
|
||||
sample->data[p.first] = packer.getbuf();
|
||||
}
|
||||
}
|
||||
return sample;
|
||||
}
|
||||
|
||||
void SampleCollection_t::collect(const Reference<ActorLineage>& lineage) {
|
||||
ASSERT(lineage.isValid());
|
||||
_currentLineage = lineage;
|
||||
auto sample = _collector->collect();
|
||||
ASSERT(sample);
|
||||
{
|
||||
Lock _{ mutex };
|
||||
data.emplace_back(sample);
|
||||
}
|
||||
auto min = std::min(data.back()->time - windowSize, data.back()->time);
|
||||
double oldest = data.front()->time;
|
||||
// we don't need to check for data.empty() in this loop (or the inner loop) as we know that we will end
|
||||
// up with at least one entry which is the most recent sample
|
||||
while (oldest < min) {
|
||||
Lock _{ mutex };
|
||||
// we remove at most 10 elements at a time. This is so we don't block the main thread for too long.
|
||||
for (int i = 0; i < 10 && oldest < min; ++i) {
|
||||
data.pop_front();
|
||||
oldest = data.front()->time;
|
||||
}
|
||||
}
|
||||
// TODO: Should only call ingest when deleting from memory
|
||||
config->ingest(sample);
|
||||
}
|
||||
|
||||
std::vector<std::shared_ptr<Sample>> SampleCollection_t::get(double from /*= 0.0*/,
|
||||
double to /*= std::numeric_limits<double>::max()*/) const {
|
||||
Lock _{ mutex };
|
||||
std::vector<std::shared_ptr<Sample>> res;
|
||||
for (const auto& sample : data) {
|
||||
if (sample->time > to) {
|
||||
break;
|
||||
} else if (sample->time >= from) {
|
||||
res.push_back(sample);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void sample(LineageReference* lineagePtr) {
|
||||
if (!lineagePtr->isValid()) {
|
||||
return;
|
||||
}
|
||||
(*lineagePtr)->modify(&NameLineage::actorName) = lineagePtr->actorName();
|
||||
boost::asio::post(ActorLineageProfiler::instance().context(),
|
||||
[lineage = LineageReference::addRef(lineagePtr->getPtr())]() {
|
||||
SampleCollection::instance().collect(lineage);
|
||||
});
|
||||
}
|
||||
|
||||
struct ProfilerImpl {
|
||||
boost::asio::io_context context;
|
||||
boost::asio::executor_work_guard<decltype(context.get_executor())> workGuard;
|
||||
boost::asio::steady_timer timer;
|
||||
std::thread mainThread;
|
||||
unsigned frequency;
|
||||
|
||||
SampleCollection collection;
|
||||
|
||||
ProfilerImpl() : workGuard(context.get_executor()), timer(context) {
|
||||
mainThread = std::thread([this]() { context.run(); });
|
||||
}
|
||||
~ProfilerImpl() {
|
||||
setFrequency(0);
|
||||
workGuard.reset();
|
||||
mainThread.join();
|
||||
}
|
||||
|
||||
void profileHandler(boost::system::error_code const& ec) {
|
||||
if (ec) {
|
||||
return;
|
||||
}
|
||||
startSampling = true;
|
||||
timer = boost::asio::steady_timer(context, std::chrono::microseconds(1000000 / frequency));
|
||||
timer.async_wait([this](auto const& ec) { profileHandler(ec); });
|
||||
}
|
||||
|
||||
void setFrequency(unsigned frequency) {
|
||||
boost::asio::post(context, [this, frequency]() {
|
||||
this->frequency = frequency;
|
||||
timer.cancel();
|
||||
if (frequency > 0) {
|
||||
profileHandler(boost::system::error_code{});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
ActorLineageProfilerT::ActorLineageProfilerT() : impl(new ProfilerImpl()) {
|
||||
// collection->collector()->addGetter(WaitState::Network,
|
||||
// std::bind(&ActorLineageSet::copy, std::ref(g_network->getActorLineageSet())));
|
||||
// collection->collector()->addGetter(
|
||||
// WaitState::Disk,
|
||||
// std::bind(&ActorLineageSet::copy, std::ref(IAsyncFileSystem::filesystem()->getActorLineageSet())));
|
||||
collection->collector()->addGetter(WaitState::Running, []() {
|
||||
return std::vector<Reference<ActorLineage>>({ SampleCollection::instance().getLineage() });
|
||||
});
|
||||
}
|
||||
|
||||
ActorLineageProfilerT::~ActorLineageProfilerT() {
|
||||
delete impl;
|
||||
}
|
||||
|
||||
void ActorLineageProfilerT::setFrequency(unsigned frequency) {
|
||||
impl->setFrequency(frequency);
|
||||
}
|
||||
|
||||
boost::asio::io_context& ActorLineageProfilerT::context() {
|
||||
return impl->context;
|
||||
}
|
||||
|
||||
SampleIngestor::~SampleIngestor() {}
|
||||
|
||||
void ProfilerConfigT::reset(std::map<std::string, std::string> const& config) {
|
||||
bool expectNoMore = false, useFluentD = false, useTCP = false;
|
||||
std::string endpoint;
|
||||
ConfigError err;
|
||||
for (auto& kv : config) {
|
||||
if (expectNoMore) {
|
||||
err.description = format("Unexpected option %s", kv.first.c_str());
|
||||
throw err;
|
||||
}
|
||||
if (kv.first == "ingestor") {
|
||||
std::string val = kv.second;
|
||||
std::for_each(val.begin(), val.end(), [](auto c) { return std::tolower(c); });
|
||||
if (val == "none") {
|
||||
setBackend(std::make_shared<NoneIngestor>());
|
||||
} else if (val == "fluentd") {
|
||||
useFluentD = true;
|
||||
} else {
|
||||
err.description = format("Unsupported ingestor: %s", val.c_str());
|
||||
throw err;
|
||||
}
|
||||
} else if (kv.first == "ingestor_endpoint") {
|
||||
endpoint = kv.second;
|
||||
} else if (kv.first == "ingestor_protocol") {
|
||||
auto val = kv.second;
|
||||
std::for_each(val.begin(), val.end(), [](auto c) { return std::tolower(c); });
|
||||
if (val == "tcp") {
|
||||
useTCP = true;
|
||||
} else if (val == "udp") {
|
||||
useTCP = false;
|
||||
} else {
|
||||
err.description = format("Unsupported protocol for fluentd: %s", kv.second.c_str());
|
||||
throw err;
|
||||
}
|
||||
} else {
|
||||
err.description = format("Unknown option %s", kv.first.c_str());
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
if (useFluentD) {
|
||||
if (endpoint.empty()) {
|
||||
err.description = "Endpoint is required for fluentd ingestor";
|
||||
throw err;
|
||||
}
|
||||
NetworkAddress address;
|
||||
try {
|
||||
address = NetworkAddress::parse(endpoint);
|
||||
} catch (Error& e) {
|
||||
err.description = format("Can't parse address %s", endpoint.c_str());
|
||||
throw err;
|
||||
}
|
||||
setBackend(std::make_shared<FluentDIngestor>(
|
||||
useTCP ? FluentDIngestor::Protocol::TCP : FluentDIngestor::Protocol::UDP, address));
|
||||
}
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> ProfilerConfigT::getConfig() const {
|
||||
std::map<std::string, std::string> res;
|
||||
if (ingestor) {
|
||||
ingestor->getConfig(res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
// Callback used to update the sampling profilers run frequency whenever the
|
||||
// frequency changes.
|
||||
void samplingProfilerUpdateFrequency(std::optional<std::any> freq) {
|
||||
double frequency = 0;
|
||||
if (freq.has_value()) {
|
||||
frequency = std::any_cast<double>(freq.value());
|
||||
}
|
||||
TraceEvent(SevInfo, "SamplingProfilerUpdateFrequency").detail("Frequency", frequency);
|
||||
ActorLineageProfiler::instance().setFrequency(frequency);
|
||||
}
|
||||
|
||||
// Callback used to update the sample collector window size.
|
||||
void samplingProfilerUpdateWindow(std::optional<std::any> window) {
|
||||
double duration = 0;
|
||||
if (window.has_value()) {
|
||||
duration = std::any_cast<double>(window.value());
|
||||
}
|
||||
TraceEvent(SevInfo, "SamplingProfilerUpdateWindow").detail("Duration", duration);
|
||||
SampleCollection::instance().setWindowSize(duration);
|
||||
}
|
|
@ -0,0 +1,192 @@
|
|||
/*
|
||||
* ActorLineageProfiler.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/AnnotateActor.h"
|
||||
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <any>
|
||||
#include <vector>
|
||||
#include <mutex>
|
||||
#include <condition_variable>
|
||||
#include "flow/singleton.h"
|
||||
#include "flow/flow.h"
|
||||
|
||||
void samplingProfilerUpdateFrequency(std::optional<std::any> freq);
|
||||
void samplingProfilerUpdateWindow(std::optional<std::any> window);
|
||||
|
||||
struct IALPCollectorBase {
|
||||
virtual std::optional<std::any> collect(ActorLineage*) = 0;
|
||||
virtual const std::string_view& name() = 0;
|
||||
IALPCollectorBase();
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct IALPCollector : IALPCollectorBase {
|
||||
const std::string_view& name() override { return T::name; }
|
||||
};
|
||||
|
||||
struct Sample : std::enable_shared_from_this<Sample> {
|
||||
double time = 0.0;
|
||||
Sample() {}
|
||||
Sample(Sample const&) = delete;
|
||||
Sample& operator=(Sample const&) = delete;
|
||||
std::unordered_map<WaitState, std::pair<char*, unsigned>> data;
|
||||
~Sample() {
|
||||
std::for_each(data.begin(), data.end(), [](std::pair<WaitState, std::pair<char*, unsigned>> entry) {
|
||||
::free(entry.second.first);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
class SampleIngestor : std::enable_shared_from_this<SampleIngestor> {
|
||||
public:
|
||||
virtual ~SampleIngestor();
|
||||
virtual void ingest(std::shared_ptr<Sample> const& sample) = 0;
|
||||
virtual void getConfig(std::map<std::string, std::string>&) const = 0;
|
||||
};
|
||||
|
||||
class NoneIngestor : public SampleIngestor {
|
||||
public:
|
||||
void ingest(std::shared_ptr<Sample> const& sample) override {}
|
||||
void getConfig(std::map<std::string, std::string>& res) const override { res["ingestor"] = "none"; }
|
||||
};
|
||||
|
||||
// The FluentD ingestor uses the pimpl idiom. This is to make compilation less heavy weight as this implementation has
|
||||
// dependencies to boost::asio
|
||||
struct FluentDIngestorImpl;
|
||||
|
||||
class FluentDIngestor : public SampleIngestor {
|
||||
public: // Public Types
|
||||
enum class Protocol { TCP, UDP };
|
||||
|
||||
private: // members
|
||||
FluentDIngestorImpl* impl;
|
||||
|
||||
public: // interface
|
||||
void ingest(std::shared_ptr<Sample> const& sample) override;
|
||||
FluentDIngestor(Protocol protocol, NetworkAddress& endpoint);
|
||||
void getConfig(std::map<std::string, std::string>& res) const override;
|
||||
~FluentDIngestor();
|
||||
};
|
||||
|
||||
struct ConfigError {
|
||||
std::string description;
|
||||
};
|
||||
|
||||
class ProfilerConfigT {
|
||||
private: // private types
|
||||
using Lock = std::unique_lock<std::mutex>;
|
||||
friend class crossbow::create_static<ProfilerConfigT>;
|
||||
|
||||
private: // members
|
||||
std::shared_ptr<SampleIngestor> ingestor = std::make_shared<NoneIngestor>();
|
||||
|
||||
private: // construction
|
||||
ProfilerConfigT() {}
|
||||
ProfilerConfigT(ProfilerConfigT const&) = delete;
|
||||
ProfilerConfigT& operator=(ProfilerConfigT const&) = delete;
|
||||
void setBackend(std::shared_ptr<SampleIngestor> ingestor) { this->ingestor = ingestor; }
|
||||
|
||||
public:
|
||||
void ingest(std::shared_ptr<Sample> sample) { ingestor->ingest(sample); }
|
||||
void reset(std::map<std::string, std::string> const& config);
|
||||
std::map<std::string, std::string> getConfig() const;
|
||||
};
|
||||
|
||||
using ProfilerConfig = crossbow::singleton<ProfilerConfigT>;
|
||||
|
||||
class SampleCollectorT {
|
||||
public: // Types
|
||||
friend struct crossbow::create_static<SampleCollectorT>;
|
||||
using Getter = std::function<std::vector<Reference<ActorLineage>>()>;
|
||||
|
||||
private:
|
||||
std::vector<IALPCollectorBase*> collectors;
|
||||
std::map<WaitState, Getter> getSamples;
|
||||
SampleCollectorT() {}
|
||||
std::map<std::string_view, std::any> collect(ActorLineage* lineage);
|
||||
|
||||
public:
|
||||
void addCollector(IALPCollectorBase* collector) { collectors.push_back(collector); }
|
||||
std::shared_ptr<Sample> collect();
|
||||
void addGetter(WaitState waitState, Getter const& getter) { getSamples[waitState] = getter; };
|
||||
};
|
||||
|
||||
using SampleCollector = crossbow::singleton<SampleCollectorT>;
|
||||
|
||||
class SampleCollection_t {
|
||||
friend struct crossbow::create_static<SampleCollection_t>;
|
||||
using Lock = std::unique_lock<std::mutex>;
|
||||
SampleCollection_t() {}
|
||||
|
||||
SampleCollector _collector;
|
||||
mutable std::mutex mutex;
|
||||
std::atomic<double> windowSize = 0.0;
|
||||
std::deque<std::shared_ptr<Sample>> data;
|
||||
ProfilerConfig config;
|
||||
Reference<ActorLineage> _currentLineage;
|
||||
|
||||
public:
|
||||
/**
|
||||
* Define how many samples the collection shoul keep. The window size is defined by time dimension.
|
||||
*
|
||||
* \param duration How long a sample should be kept in the collection.
|
||||
*/
|
||||
void setWindowSize(double duration) { windowSize.store(duration); }
|
||||
/**
|
||||
* By default returns reference counted pointers of all samples. A window can be defined in terms of absolute time.
|
||||
*
|
||||
* \param from The minimal age of all returned samples.
|
||||
* \param to The max age of all returned samples.
|
||||
*/
|
||||
std::vector<std::shared_ptr<Sample>> get(double from = 0.0, double to = std::numeric_limits<double>::max()) const;
|
||||
void collect(const Reference<ActorLineage>& lineage);
|
||||
const SampleCollector& collector() const { return _collector; }
|
||||
SampleCollector& collector() { return _collector; }
|
||||
Reference<ActorLineage> getLineage() { return _currentLineage; }
|
||||
};
|
||||
|
||||
using SampleCollection = crossbow::singleton<SampleCollection_t>;
|
||||
|
||||
struct ProfilerImpl;
|
||||
|
||||
namespace boost {
|
||||
namespace asio {
|
||||
// forward declare io_context because including boost asio is super expensive
|
||||
class io_context;
|
||||
} // namespace asio
|
||||
} // namespace boost
|
||||
|
||||
class ActorLineageProfilerT {
|
||||
friend struct crossbow::create_static<ActorLineageProfilerT>;
|
||||
ProfilerImpl* impl;
|
||||
SampleCollection collection;
|
||||
ActorLineageProfilerT();
|
||||
|
||||
public:
|
||||
~ActorLineageProfilerT();
|
||||
void setFrequency(unsigned frequency);
|
||||
boost::asio::io_context& context();
|
||||
};
|
||||
|
||||
using ActorLineageProfiler = crossbow::singleton<ActorLineageProfilerT>;
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* AnnotateActor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/AnnotateActor.h"
|
||||
|
||||
std::map<WaitState, std::function<std::vector<Reference<ActorLineage>>()>> samples;
|
|
@ -0,0 +1,91 @@
|
|||
/*
|
||||
* AnnotateActor.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "flow/flow.h"
|
||||
#include "flow/network.h"
|
||||
|
||||
#include <string_view>
|
||||
|
||||
// Used to manually instrument waiting actors to collect samples for the
|
||||
// sampling profiler.
|
||||
struct AnnotateActor {
|
||||
unsigned index;
|
||||
bool set;
|
||||
|
||||
AnnotateActor() : set(false) {}
|
||||
|
||||
AnnotateActor(LineageReference* lineage) : set(false) {
|
||||
#ifdef ENABLE_SAMPLING
|
||||
if (lineage->getPtr() != 0) {
|
||||
index = g_network->getActorLineageSet().insert(*lineage);
|
||||
set = (index != ActorLineageSet::npos);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
AnnotateActor(const AnnotateActor& other) = delete;
|
||||
AnnotateActor(AnnotateActor&& other) = delete;
|
||||
AnnotateActor& operator=(const AnnotateActor& other) = delete;
|
||||
|
||||
AnnotateActor& operator=(AnnotateActor&& other) {
|
||||
if (this == &other) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
this->index = other.index;
|
||||
this->set = other.set;
|
||||
|
||||
other.set = false;
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
~AnnotateActor() {
|
||||
#ifdef ENABLE_SAMPLING
|
||||
if (set) {
|
||||
g_network->getActorLineageSet().erase(index);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
};
|
||||
|
||||
enum class WaitState { Disk, Network, Running };
|
||||
// usually we shouldn't use `using namespace` in a header file, but literals should be safe as user defined literals
|
||||
// need to be prefixed with `_`
|
||||
using namespace std::literals;
|
||||
|
||||
constexpr std::string_view to_string(WaitState st) {
|
||||
switch (st) {
|
||||
case WaitState::Disk:
|
||||
return "Disk"sv;
|
||||
case WaitState::Network:
|
||||
return "Network"sv;
|
||||
case WaitState::Running:
|
||||
return "Running"sv;
|
||||
default:
|
||||
return ""sv;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef ENABLE_SAMPLING
|
||||
extern std::map<WaitState, std::function<std::vector<Reference<ActorLineage>>()>> samples;
|
||||
#endif
|
|
@ -32,7 +32,11 @@ public:
|
|||
bool isTerminate() const override { return true; }
|
||||
};
|
||||
|
||||
ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread, std::atomic<int> *sum, int count, int clientId, double meanSleep) {
|
||||
ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread,
|
||||
std::atomic<int>* sum,
|
||||
int count,
|
||||
int clientId,
|
||||
double meanSleep) {
|
||||
state int i = 0;
|
||||
state double randomSleep = 0.0;
|
||||
for (; i < count; ++i) {
|
||||
|
@ -43,11 +47,11 @@ ACTOR Future<Void> asyncTaskThreadClient(AsyncTaskThread* asyncTaskThread, std::
|
|||
return Void();
|
||||
}));
|
||||
TraceEvent("AsyncTaskThreadIncrementedSum")
|
||||
.detail("Index", i)
|
||||
.detail("Sum", sum->load())
|
||||
.detail("ClientId", clientId)
|
||||
.detail("RandomSleep", randomSleep)
|
||||
.detail("MeanSleep", meanSleep);
|
||||
.detail("Index", i)
|
||||
.detail("Sum", sum->load())
|
||||
.detail("ClientId", clientId)
|
||||
.detail("RandomSleep", randomSleep)
|
||||
.detail("MeanSleep", meanSleep);
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
@ -93,7 +97,8 @@ TEST_CASE("/asynctaskthread/add") {
|
|||
std::vector<Future<Void>> clients;
|
||||
clients.reserve(numClients);
|
||||
for (int clientId = 0; clientId < numClients; ++clientId) {
|
||||
clients.push_back(asyncTaskThreadClient(&asyncTaskThread, &sum, incrementsPerClient, clientId, deterministicRandom()->random01() * 0.01));
|
||||
clients.push_back(asyncTaskThreadClient(
|
||||
&asyncTaskThread, &sum, incrementsPerClient, clientId, deterministicRandom()->random01() * 0.01));
|
||||
}
|
||||
wait(waitForAll(clients));
|
||||
ASSERT_EQ(sum.load(), numClients * incrementsPerClient);
|
||||
|
@ -103,12 +108,12 @@ TEST_CASE("/asynctaskthread/add") {
|
|||
TEST_CASE("/asynctaskthread/error") {
|
||||
state AsyncTaskThread asyncTaskThread;
|
||||
try {
|
||||
wait(asyncTaskThread.execAsync([]{
|
||||
wait(asyncTaskThread.execAsync([] {
|
||||
throw operation_failed();
|
||||
return Void();
|
||||
}));
|
||||
ASSERT(false);
|
||||
} catch (Error &e) {
|
||||
} catch (Error& e) {
|
||||
ASSERT_EQ(e.code(), error_code_operation_failed);
|
||||
}
|
||||
return Void();
|
||||
|
|
|
@ -533,6 +533,7 @@ Future<Void> eraseLogData(Reference<ReadYourWritesTransaction> tr,
|
|||
CheckBackupUID = CheckBackupUID::False,
|
||||
Version backupUid = 0);
|
||||
Key getApplyKey(Version version, Key backupUid);
|
||||
Key getLogKey(Version version, Key backupUid);
|
||||
Version getLogKeyVersion(Key key);
|
||||
std::pair<Version, uint32_t> decodeBKMutationLogKey(Key key);
|
||||
Future<Void> logError(Database cx, Key keyErrors, const std::string& message);
|
||||
|
|
|
@ -229,6 +229,16 @@ Key getApplyKey(Version version, Key backupUid) {
|
|||
return k2.withPrefix(applyLogKeys.begin);
|
||||
}
|
||||
|
||||
Key getLogKey(Version version, Key backupUid) {
|
||||
int64_t vblock = (version - 1) / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
||||
uint64_t v = bigEndian64(version);
|
||||
uint32_t data = vblock & 0xffffffff;
|
||||
uint8_t hash = (uint8_t)hashlittle(&data, sizeof(uint32_t), 0);
|
||||
Key k1 = StringRef((uint8_t*)&v, sizeof(uint64_t)).withPrefix(StringRef(&hash, sizeof(uint8_t)));
|
||||
Key k2 = k1.withPrefix(backupUid);
|
||||
return k2.withPrefix(backupLogKeys.begin);
|
||||
}
|
||||
|
||||
Version getLogKeyVersion(Key key) {
|
||||
return bigEndian64(*(int64_t*)(key.begin() + backupLogPrefixBytes + sizeof(UID) + sizeof(uint8_t)));
|
||||
}
|
||||
|
@ -330,7 +340,7 @@ void decodeBackupLogValue(Arena& arena,
|
|||
}
|
||||
} else {
|
||||
Version ver = key_version->rangeContaining(logValue.param1).value();
|
||||
//TraceEvent("ApplyMutation").detail("LogValue", logValue.toString()).detail("Version", version).detail("Ver", ver).detail("Apply", version > ver && ver != invalidVersion);
|
||||
//TraceEvent("ApplyMutation").detail("LogValue", logValue).detail("Version", version).detail("Ver", ver).detail("Apply", version > ver && ver != invalidVersion);
|
||||
if (version > ver && ver != invalidVersion) {
|
||||
if (removePrefix.size()) {
|
||||
logValue.param1 = logValue.param1.removePrefix(removePrefix);
|
||||
|
|
|
@ -283,7 +283,6 @@ public:
|
|||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
Future<bool> BackupContainerAzureBlobStore::blobExists(const std::string& fileName) {
|
||||
|
|
|
@ -40,6 +40,12 @@ struct MutationsAndVersionRef {
|
|||
: mutations(to, from.mutations), version(from.version) {}
|
||||
int expectedSize() const { return mutations.expectedSize(); }
|
||||
|
||||
struct OrderByVersion {
|
||||
bool operator()(MutationsAndVersionRef const& a, MutationsAndVersionRef const& b) const {
|
||||
return a.version < b.version;
|
||||
}
|
||||
};
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, mutations, version);
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
set(FDBCLIENT_SRCS
|
||||
ActorLineageProfiler.h
|
||||
ActorLineageProfiler.cpp
|
||||
AnnotateActor.cpp
|
||||
AsyncFileS3BlobStore.actor.cpp
|
||||
AsyncFileS3BlobStore.actor.h
|
||||
AsyncTaskThread.actor.cpp
|
||||
|
@ -44,6 +47,7 @@ set(FDBCLIENT_SRCS
|
|||
FDBOptions.h
|
||||
FDBTypes.cpp
|
||||
FDBTypes.h
|
||||
FluentDSampleIngestor.cpp
|
||||
FileBackupAgent.actor.cpp
|
||||
GlobalConfig.h
|
||||
GlobalConfig.actor.h
|
||||
|
@ -71,6 +75,10 @@ set(FDBCLIENT_SRCS
|
|||
MultiVersionTransaction.actor.cpp
|
||||
MultiVersionTransaction.h
|
||||
MutationList.h
|
||||
MutationLogReader.actor.cpp
|
||||
MutationLogReader.actor.h
|
||||
NameLineage.h
|
||||
NameLineage.cpp
|
||||
NativeAPI.actor.cpp
|
||||
NativeAPI.actor.h
|
||||
Notified.h
|
||||
|
@ -78,6 +86,7 @@ set(FDBCLIENT_SRCS
|
|||
ParallelStream.actor.h
|
||||
PaxosConfigTransaction.actor.cpp
|
||||
PaxosConfigTransaction.h
|
||||
PImpl.h
|
||||
SimpleConfigTransaction.actor.cpp
|
||||
SpecialKeySpace.actor.cpp
|
||||
SpecialKeySpace.actor.h
|
||||
|
@ -106,10 +115,12 @@ set(FDBCLIENT_SRCS
|
|||
StorageServerInterface.h
|
||||
Subspace.cpp
|
||||
Subspace.h
|
||||
StackLineage.h
|
||||
StackLineage.cpp
|
||||
SystemData.cpp
|
||||
SystemData.h
|
||||
TagThrottle.actor.cpp
|
||||
TagThrottle.h
|
||||
TagThrottle.actor.h
|
||||
TaskBucket.actor.cpp
|
||||
TaskBucket.h
|
||||
TestKnobCollection.cpp
|
||||
|
@ -177,8 +188,20 @@ endif()
|
|||
|
||||
add_flow_target(STATIC_LIBRARY NAME fdbclient SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
||||
add_dependencies(fdbclient fdboptions fdb_c_options)
|
||||
if(BUILD_AZURE_BACKUP)
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc PRIVATE curl uuid azure-storage-lite)
|
||||
else()
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc)
|
||||
target_link_libraries(fdbclient PUBLIC fdbrpc msgpack)
|
||||
|
||||
# Create a separate fdbclient library with sampling enabled. This lets
|
||||
# fdbserver retain sampling functionality in client code while disabling
|
||||
# sampling for pure clients.
|
||||
add_flow_target(STATIC_LIBRARY NAME fdbclient_sampling SRCS ${FDBCLIENT_SRCS} ADDL_SRCS ${options_srcs})
|
||||
add_dependencies(fdbclient_sampling fdboptions fdb_c_options)
|
||||
target_link_libraries(fdbclient_sampling PUBLIC fdbrpc_sampling msgpack)
|
||||
target_compile_definitions(fdbclient_sampling PRIVATE -DENABLE_SAMPLING)
|
||||
if(WIN32)
|
||||
add_dependencies(fdbclient_sampling_actors fdbclient_actors)
|
||||
endif()
|
||||
|
||||
if(BUILD_AZURE_BACKUP)
|
||||
target_link_libraries(fdbclient PRIVATE curl uuid azure-storage-lite)
|
||||
target_link_libraries(fdbclient_sampling PRIVATE curl uuid azure-storage-lite)
|
||||
endif()
|
||||
|
|
|
@ -49,3 +49,7 @@ Optional<KnobValue> ClientKnobCollection::tryParseKnobValue(std::string const& k
|
|||
bool ClientKnobCollection::trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) {
|
||||
return knobValue.visitSetKnob(knobName, flowKnobs) || knobValue.visitSetKnob(knobName, clientKnobs);
|
||||
}
|
||||
|
||||
bool ClientKnobCollection::isAtomic(std::string const& knobName) const {
|
||||
return flowKnobs.isAtomic(knobName) || clientKnobs.isAtomic(knobName);
|
||||
}
|
||||
|
|
|
@ -43,4 +43,5 @@ public:
|
|||
TestKnobs const& getTestKnobs() const override { throw internal_error(); }
|
||||
Optional<KnobValue> tryParseKnobValue(std::string const& knobName, std::string const& knobValue) const override;
|
||||
bool trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) override;
|
||||
bool isAtomic(std::string const& knobName) const override;
|
||||
};
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include "fdbclient/SystemData.h"
|
||||
#include "flow/UnitTest.h"
|
||||
|
||||
#define init(knob, value) initKnob(knob, value, #knob)
|
||||
#define init(...) KNOB_FN(__VA_ARGS__, INIT_ATOMIC_KNOB, INIT_KNOB)(__VA_ARGS__)
|
||||
|
||||
ClientKnobs::ClientKnobs(Randomize randomize) {
|
||||
initialize(randomize);
|
||||
|
@ -247,6 +247,10 @@ void ClientKnobs::initialize(Randomize randomize) {
|
|||
init( TAG_THROTTLE_RECHECK_INTERVAL, 5.0 ); if( randomize && BUGGIFY ) TAG_THROTTLE_RECHECK_INTERVAL = 0.0;
|
||||
init( TAG_THROTTLE_EXPIRATION_INTERVAL, 60.0 ); if( randomize && BUGGIFY ) TAG_THROTTLE_EXPIRATION_INTERVAL = 1.0;
|
||||
|
||||
// busyness reporting
|
||||
init( BUSYNESS_SPIKE_START_THRESHOLD, 0.100 );
|
||||
init( BUSYNESS_SPIKE_SATURATED_THRESHOLD, 0.500 );
|
||||
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
|
|
|
@ -238,6 +238,10 @@ public:
|
|||
double TAG_THROTTLE_RECHECK_INTERVAL;
|
||||
double TAG_THROTTLE_EXPIRATION_INTERVAL;
|
||||
|
||||
// busyness reporting
|
||||
double BUSYNESS_SPIKE_START_THRESHOLD;
|
||||
double BUSYNESS_SPIKE_SATURATED_THRESHOLD;
|
||||
|
||||
ClientKnobs(Randomize randomize);
|
||||
void initialize(Randomize randomize);
|
||||
};
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
#ifndef FDBCLIENT_CLIENTLOGEVENTS_H
|
||||
#define FDBCLIENT_CLIENTLOGEVENTS_H
|
||||
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/CommitProxyInterface.h"
|
||||
|
||||
namespace FdbClientLogEvents {
|
||||
enum class EventType {
|
||||
GET_VERSION_LATENCY = 0,
|
||||
|
@ -252,7 +255,7 @@ struct EventCommit : public Event {
|
|||
.setMaxEventLength(-1)
|
||||
.detail("TransactionID", id)
|
||||
.setMaxFieldLength(maxFieldLength)
|
||||
.detail("Mutation", mutation.toString());
|
||||
.detail("Mutation", mutation);
|
||||
}
|
||||
|
||||
TraceEvent("TransactionTrace_Commit")
|
||||
|
@ -316,7 +319,7 @@ struct EventCommit_V2 : public Event {
|
|||
.setMaxEventLength(-1)
|
||||
.detail("TransactionID", id)
|
||||
.setMaxFieldLength(maxFieldLength)
|
||||
.detail("Mutation", mutation.toString());
|
||||
.detail("Mutation", mutation);
|
||||
}
|
||||
|
||||
TraceEvent("TransactionTrace_Commit")
|
||||
|
@ -430,7 +433,7 @@ struct EventCommitError : public Event {
|
|||
.setMaxEventLength(-1)
|
||||
.detail("TransactionID", id)
|
||||
.setMaxFieldLength(maxFieldLength)
|
||||
.detail("Mutation", mutation.toString());
|
||||
.detail("Mutation", mutation);
|
||||
}
|
||||
|
||||
TraceEvent("TransactionTrace_CommitError").detail("TransactionID", id).detail("ErrCode", errCode);
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/StorageServerInterface.h"
|
||||
#include "fdbclient/CommitTransaction.h"
|
||||
#include "fdbclient/TagThrottle.h"
|
||||
#include "fdbclient/TagThrottle.actor.h"
|
||||
#include "fdbclient/GlobalConfig.h"
|
||||
|
||||
#include "fdbrpc/Stats.h"
|
||||
|
|
|
@ -116,6 +116,33 @@ std::string KnobValueRef::toString() const {
|
|||
return std::visit(ToStringFunc{}, value);
|
||||
}
|
||||
|
||||
ConfigDBType configDBTypeFromString(std::string const& str) {
|
||||
if (str == "disabled") {
|
||||
return ConfigDBType::DISABLED;
|
||||
} else if (str == "simple") {
|
||||
return ConfigDBType::SIMPLE;
|
||||
} else if (str == "paxos") {
|
||||
return ConfigDBType::PAXOS;
|
||||
} else {
|
||||
TraceEvent(SevWarnAlways, "InvalidConfigDBString");
|
||||
return ConfigDBType::DISABLED;
|
||||
}
|
||||
}
|
||||
|
||||
std::string configDBTypeToString(ConfigDBType configDBType) {
|
||||
switch (configDBType) {
|
||||
case ConfigDBType::DISABLED:
|
||||
return "disabled";
|
||||
case ConfigDBType::SIMPLE:
|
||||
return "simple";
|
||||
case ConfigDBType::PAXOS:
|
||||
return "paxos";
|
||||
default:
|
||||
ASSERT(false);
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("/fdbclient/ConfigDB/ConfigKey/EncodeDecode") {
|
||||
Tuple tuple;
|
||||
tuple << "class-A"_sr
|
||||
|
|
|
@ -199,8 +199,5 @@ struct ConfigCommitAnnotationRef {
|
|||
};
|
||||
using ConfigCommitAnnotation = Standalone<ConfigCommitAnnotationRef>;
|
||||
|
||||
enum class UseConfigDB {
|
||||
DISABLED,
|
||||
SIMPLE,
|
||||
PAXOS,
|
||||
};
|
||||
ConfigDBType configDBTypeFromString(std::string const&);
|
||||
std::string configDBTypeToString(ConfigDBType);
|
||||
|
|
|
@ -55,21 +55,18 @@ bool ConfigGeneration::operator!=(ConfigGeneration const& rhs) const {
|
|||
return !(*this == rhs);
|
||||
}
|
||||
|
||||
void ConfigTransactionCommitRequest::set(KeyRef key, ValueRef value) {
|
||||
if (key == configTransactionDescriptionKey) {
|
||||
annotation.description = KeyRef(arena, value);
|
||||
bool ConfigGeneration::operator<(ConfigGeneration const& rhs) const {
|
||||
if (committedVersion != rhs.committedVersion) {
|
||||
return committedVersion < rhs.committedVersion;
|
||||
} else {
|
||||
ConfigKey configKey = ConfigKeyRef::decodeKey(key);
|
||||
auto knobValue = IKnobCollection::parseKnobValue(
|
||||
configKey.knobName.toString(), value.toString(), IKnobCollection::Type::TEST);
|
||||
mutations.emplace_back_deep(arena, configKey, knobValue.contents());
|
||||
return liveVersion < rhs.liveVersion;
|
||||
}
|
||||
}
|
||||
|
||||
void ConfigTransactionCommitRequest::clear(KeyRef key) {
|
||||
if (key == configTransactionDescriptionKey) {
|
||||
annotation.description = ""_sr;
|
||||
bool ConfigGeneration::operator>(ConfigGeneration const& rhs) const {
|
||||
if (committedVersion != rhs.committedVersion) {
|
||||
return committedVersion > rhs.committedVersion;
|
||||
} else {
|
||||
mutations.emplace_back_deep(arena, ConfigKeyRef::decodeKey(key), Optional<KnobValueRef>{});
|
||||
return liveVersion > rhs.liveVersion;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,18 +28,20 @@
|
|||
#include "flow/flow.h"
|
||||
|
||||
struct ConfigGeneration {
|
||||
// The live version of each node is monotonically increasing
|
||||
Version liveVersion{ 0 };
|
||||
// The committedVersion of each node is the version of the last commit made durable.
|
||||
// Each committedVersion was previously given to clients as a liveVersion, prior to commit.
|
||||
Version committedVersion{ 0 };
|
||||
// The live version of each node is monotonically increasing
|
||||
Version liveVersion{ 0 };
|
||||
|
||||
bool operator==(ConfigGeneration const&) const;
|
||||
bool operator!=(ConfigGeneration const&) const;
|
||||
bool operator<(ConfigGeneration const&) const;
|
||||
bool operator>(ConfigGeneration const&) const;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, liveVersion, committedVersion);
|
||||
serializer(ar, committedVersion, liveVersion);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -57,12 +59,16 @@ struct ConfigTransactionGetGenerationReply {
|
|||
|
||||
struct ConfigTransactionGetGenerationRequest {
|
||||
static constexpr FileIdentifier file_identifier = 138941;
|
||||
// A hint to catch up lagging nodes:
|
||||
Optional<Version> lastSeenLiveVersion;
|
||||
ReplyPromise<ConfigTransactionGetGenerationReply> reply;
|
||||
ConfigTransactionGetGenerationRequest() = default;
|
||||
explicit ConfigTransactionGetGenerationRequest(Optional<Version> const& lastSeenLiveVersion)
|
||||
: lastSeenLiveVersion(lastSeenLiveVersion) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, reply);
|
||||
serializer(ar, lastSeenLiveVersion, reply);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -102,10 +108,13 @@ struct ConfigTransactionCommitRequest {
|
|||
ConfigCommitAnnotationRef annotation;
|
||||
ReplyPromise<Void> reply;
|
||||
|
||||
size_t expectedSize() const { return mutations.expectedSize() + annotation.expectedSize(); }
|
||||
ConfigTransactionCommitRequest() = default;
|
||||
explicit ConfigTransactionCommitRequest(ConfigGeneration generation,
|
||||
VectorRef<ConfigMutationRef> mutations,
|
||||
ConfigCommitAnnotationRef annotation)
|
||||
: generation(generation), mutations(arena, mutations), annotation(arena, annotation) {}
|
||||
|
||||
void set(KeyRef key, ValueRef value);
|
||||
void clear(KeyRef key);
|
||||
size_t expectedSize() const { return mutations.expectedSize() + annotation.expectedSize(); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
|
|
|
@ -2355,7 +2355,7 @@ std::string getDRMutationStreamId(StatusObjectReader statusObj, const char* cont
|
|||
}
|
||||
}
|
||||
}
|
||||
TraceEvent(SevWarn, "DBA_TagNotPresentInStatus").detail("Tag", tagName.toString()).detail("Context", context);
|
||||
TraceEvent(SevWarn, "DBA_TagNotPresentInStatus").detail("Tag", tagName).detail("Context", context);
|
||||
throw backup_error();
|
||||
} catch (std::runtime_error& e) {
|
||||
TraceEvent(SevWarn, "DBA_GetDRMutationStreamIdFail").detail("Error", e.what());
|
||||
|
@ -3096,7 +3096,7 @@ public:
|
|||
state Future<Optional<Key>> fBackupKeysPacked =
|
||||
tr->get(backupAgent->config.get(BinaryWriter::toValue(logUid, Unversioned()))
|
||||
.pack(BackupAgentBase::keyConfigBackupRanges));
|
||||
state Future<Optional<Value>> flogVersionKey =
|
||||
state Future<Optional<Value>> flogVersionKey =
|
||||
tr->get(backupAgent->states.get(BinaryWriter::toValue(logUid, Unversioned()))
|
||||
.pack(BackupAgentBase::keyStateLogBeginVersion));
|
||||
|
||||
|
@ -3115,13 +3115,11 @@ public:
|
|||
|
||||
state Optional<Value> stopVersionKey = wait(fStopVersionKey);
|
||||
Optional<Value> logVersionKey = wait(flogVersionKey);
|
||||
state std::string logVersionText
|
||||
= ". Last log version is "
|
||||
+ (
|
||||
logVersionKey.present()
|
||||
? format("%lld", BinaryReader::fromStringRef<Version>(logVersionKey.get(), Unversioned()))
|
||||
: "unset"
|
||||
);
|
||||
state std::string logVersionText =
|
||||
". Last log version is " +
|
||||
(logVersionKey.present()
|
||||
? format("%lld", BinaryReader::fromStringRef<Version>(logVersionKey.get(), Unversioned()))
|
||||
: "unset");
|
||||
Optional<Key> backupKeysPacked = wait(fBackupKeysPacked);
|
||||
|
||||
state Standalone<VectorRef<KeyRangeRef>> backupRanges;
|
||||
|
@ -3140,8 +3138,8 @@ public:
|
|||
"The DR on tag `" + tagNameDisplay + "' is NOT a complete copy of the primary database.\n";
|
||||
break;
|
||||
case EBackupState::STATE_RUNNING_DIFFERENTIAL:
|
||||
statusText +=
|
||||
"The DR on tag `" + tagNameDisplay + "' is a complete copy of the primary database" + logVersionText + ".\n";
|
||||
statusText += "The DR on tag `" + tagNameDisplay +
|
||||
"' is a complete copy of the primary database" + logVersionText + ".\n";
|
||||
break;
|
||||
case EBackupState::STATE_COMPLETED: {
|
||||
Version stopVersion =
|
||||
|
|
|
@ -252,20 +252,20 @@ public:
|
|||
// Management API, create snapshot
|
||||
Future<Void> createSnapshot(StringRef uid, StringRef snapshot_command);
|
||||
|
||||
Future<Standalone<VectorRef<MutationsAndVersionRef>>> getRangeFeedMutations(
|
||||
Future<Standalone<VectorRef<MutationsAndVersionRef>>> getChangeFeedMutations(
|
||||
StringRef rangeID,
|
||||
Version begin = 0,
|
||||
Version end = std::numeric_limits<Version>::max(),
|
||||
KeyRange range = allKeys);
|
||||
|
||||
Future<Void> getRangeFeedStream(const PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>>& results,
|
||||
StringRef rangeID,
|
||||
Version begin = 0,
|
||||
Version end = std::numeric_limits<Version>::max(),
|
||||
KeyRange range = allKeys);
|
||||
Future<Void> getChangeFeedStream(const PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>>& results,
|
||||
StringRef rangeID,
|
||||
Version begin = 0,
|
||||
Version end = std::numeric_limits<Version>::max(),
|
||||
KeyRange range = allKeys);
|
||||
|
||||
Future<std::vector<std::pair<Key, KeyRange>>> getOverlappingRangeFeeds(KeyRangeRef ranges, Version minVersion);
|
||||
Future<Void> popRangeFeedMutations(StringRef rangeID, Version version);
|
||||
Future<std::vector<std::pair<Key, KeyRange>>> getOverlappingChangeFeeds(KeyRangeRef ranges, Version minVersion);
|
||||
Future<Void> popChangeFeedMutations(StringRef rangeID, Version version);
|
||||
|
||||
Future<Void> getBlobGranuleRangesStream(const PromiseStream<KeyRange>& results, KeyRange range);
|
||||
// TODO add optional for end version so it can do a GRV in the transaction it already has to do
|
||||
|
@ -460,6 +460,10 @@ public:
|
|||
// Requests to the storage server will no longer be duplicated to its pair TSS.
|
||||
void removeTssMapping(StorageServerInterface const& ssi);
|
||||
|
||||
// used in template functions to create a transaction
|
||||
using TransactionT = ReadYourWritesTransaction;
|
||||
Reference<TransactionT> createTransaction();
|
||||
|
||||
private:
|
||||
std::unordered_map<KeyRef, Reference<WatchMetadata>> watchMap;
|
||||
};
|
||||
|
|
|
@ -41,8 +41,8 @@ typedef UID SpanID;
|
|||
enum {
|
||||
tagLocalitySpecial = -1, // tag with this locality means it is invalidTag (id=0), txsTag (id=1), or cacheTag (id=2)
|
||||
tagLocalityLogRouter = -2,
|
||||
tagLocalityRemoteLog = -3, // tag created by log router for remote tLogs
|
||||
tagLocalityUpgraded = -4,
|
||||
tagLocalityRemoteLog = -3, // tag created by log router for remote (aka. not in Primary DC) tLogs
|
||||
tagLocalityUpgraded = -4, // tlogs with old log format
|
||||
tagLocalitySatellite = -5,
|
||||
tagLocalityLogRouterMapped = -6, // The pseudo tag used by log routers to pop the real LogRouter tag (i.e., -2)
|
||||
tagLocalityTxs = -7,
|
||||
|
|
|
@ -3426,7 +3426,8 @@ struct RestoreLogDataTaskFunc : RestoreFileTaskFuncBase {
|
|||
|
||||
state Key mutationLogPrefix = restore.mutationLogPrefix();
|
||||
state Reference<IAsyncFile> inFile = wait(bc->readFile(logFile.fileName));
|
||||
state Standalone<VectorRef<KeyValueRef>> dataOriginal = wait(decodeMutationLogFileBlock(inFile, readOffset, readLen));
|
||||
state Standalone<VectorRef<KeyValueRef>> dataOriginal =
|
||||
wait(decodeMutationLogFileBlock(inFile, readOffset, readLen));
|
||||
|
||||
// Filter the KV pairs extracted from the log file block to remove any records known to not be needed for this
|
||||
// restore based on the restore range set.
|
||||
|
|
|
@ -0,0 +1,259 @@
|
|||
/*
|
||||
* FluentDSampleIngestor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/ActorLineageProfiler.h"
|
||||
#include <boost/asio.hpp>
|
||||
#include <boost/asio/co_spawn.hpp>
|
||||
#include <msgpack.hpp>
|
||||
|
||||
namespace {
|
||||
|
||||
boost::asio::ip::address ipAddress(IPAddress const& n) {
|
||||
if (n.isV6()) {
|
||||
return boost::asio::ip::address_v6(n.toV6());
|
||||
} else {
|
||||
return boost::asio::ip::address_v4(n.toV4());
|
||||
}
|
||||
}
|
||||
|
||||
template <class Protocol>
|
||||
boost::asio::ip::basic_endpoint<Protocol> toEndpoint(NetworkAddress const n) {
|
||||
return boost::asio::ip::basic_endpoint<Protocol>(ipAddress(n.ip), n.port);
|
||||
}
|
||||
|
||||
struct FluentDSocket {
|
||||
virtual ~FluentDSocket() {}
|
||||
virtual void connect(NetworkAddress const& endpoint) = 0;
|
||||
virtual void send(std::shared_ptr<Sample> const& sample) = 0;
|
||||
virtual const boost::system::error_code& failed() const = 0;
|
||||
};
|
||||
|
||||
template <class Protocol, class Callback>
|
||||
class SampleSender : public std::enable_shared_from_this<SampleSender<Protocol, Callback>> {
|
||||
using Socket = typename Protocol::socket;
|
||||
using Iter = typename decltype(Sample::data)::iterator;
|
||||
Socket& socket;
|
||||
Callback callback;
|
||||
Iter iter, end;
|
||||
std::shared_ptr<Sample> sample_; // to keep from being deallocated
|
||||
|
||||
struct Buf {
|
||||
const char* data;
|
||||
const unsigned size;
|
||||
Buf(const char* data, unsigned size) : data(data), size(size) {}
|
||||
Buf(Buf const&) = delete;
|
||||
Buf& operator=(Buf const&) = delete;
|
||||
~Buf() { delete[] data; }
|
||||
};
|
||||
|
||||
void sendCompletionHandler(boost::system::error_code const& ec) {
|
||||
if (ec) {
|
||||
callback(ec);
|
||||
} else {
|
||||
++iter;
|
||||
sendNext();
|
||||
}
|
||||
}
|
||||
|
||||
void send(boost::asio::ip::tcp::socket& socket, std::shared_ptr<Buf> const& buf) {
|
||||
boost::asio::async_write(socket,
|
||||
boost::asio::const_buffer(buf->data, buf->size),
|
||||
[buf, this](auto const& ec, size_t) { this->sendCompletionHandler(ec); });
|
||||
}
|
||||
void send(boost::asio::ip::udp::socket& socket, std::shared_ptr<Buf> const& buf) {
|
||||
socket.async_send(boost::asio::const_buffer(buf->data, buf->size),
|
||||
[buf, this](auto const& ec, size_t) { this->sendCompletionHandler(ec); });
|
||||
}
|
||||
|
||||
void sendNext() {
|
||||
if (iter == end) {
|
||||
callback(boost::system::error_code());
|
||||
return;
|
||||
}
|
||||
// 1. calculate size of buffer
|
||||
unsigned size = 1; // 1 for fixmap identifier byte
|
||||
auto waitState = to_string(iter->first);
|
||||
if (waitState.size() < 32) {
|
||||
size += waitState.size() + 1;
|
||||
} else {
|
||||
size += waitState.size() + 2;
|
||||
}
|
||||
size += iter->second.second;
|
||||
// 2. allocate the buffer
|
||||
std::unique_ptr<char[]> buf(new char[size]);
|
||||
unsigned off = 0;
|
||||
// 3. serialize fixmap
|
||||
buf[off++] = 0x81; // map of size 1
|
||||
// 3.1 serialize key
|
||||
if (waitState.size() < 32) {
|
||||
buf[off++] = 0xa0 + waitState.size(); // fixstr
|
||||
} else {
|
||||
buf[off++] = 0xd9;
|
||||
buf[off++] = char(waitState.size());
|
||||
}
|
||||
memcpy(buf.get() + off, waitState.data(), waitState.size());
|
||||
off += waitState.size();
|
||||
// 3.2 append serialized value
|
||||
memcpy(buf.get() + off, iter->second.first, iter->second.second);
|
||||
// 4. send the result to fluentd
|
||||
send(socket, std::make_shared<Buf>(buf.release(), size));
|
||||
}
|
||||
|
||||
public:
|
||||
SampleSender(Socket& socket, Callback const& callback, std::shared_ptr<Sample> const& sample)
|
||||
: socket(socket), callback(callback), iter(sample->data.begin()), end(sample->data.end()), sample_(sample) {
|
||||
sendNext();
|
||||
}
|
||||
};
|
||||
|
||||
// Sample function to make instanciation of SampleSender easier
|
||||
template <class Protocol, class Callback>
|
||||
std::shared_ptr<SampleSender<Protocol, Callback>> makeSampleSender(typename Protocol::socket& socket,
|
||||
Callback const& callback,
|
||||
std::shared_ptr<Sample> const& sample) {
|
||||
return std::make_shared<SampleSender<Protocol, Callback>>(socket, callback, sample);
|
||||
}
|
||||
|
||||
template <class Protocol>
|
||||
struct FluentDSocketImpl : FluentDSocket, std::enable_shared_from_this<FluentDSocketImpl<Protocol>> {
|
||||
static constexpr unsigned MAX_QUEUE_SIZE = 100;
|
||||
boost::asio::io_context& context;
|
||||
typename Protocol::socket socket;
|
||||
FluentDSocketImpl(boost::asio::io_context& context) : context(context), socket(context) {}
|
||||
bool ready = false;
|
||||
std::deque<std::shared_ptr<Sample>> queue;
|
||||
boost::system::error_code _failed;
|
||||
|
||||
const boost::system::error_code& failed() const override { return _failed; }
|
||||
|
||||
void sendCompletionHandler(boost::system::error_code const& ec) {
|
||||
if (ec) {
|
||||
// TODO: trace error
|
||||
_failed = ec;
|
||||
return;
|
||||
}
|
||||
if (queue.empty()) {
|
||||
ready = true;
|
||||
} else {
|
||||
auto sample = queue.front();
|
||||
queue.pop_front();
|
||||
sendImpl(sample);
|
||||
}
|
||||
}
|
||||
|
||||
void sendImpl(std::shared_ptr<Sample> const& sample) {
|
||||
makeSampleSender<Protocol>(
|
||||
socket,
|
||||
[self = this->shared_from_this()](boost::system::error_code const& ec) { self->sendCompletionHandler(ec); },
|
||||
sample);
|
||||
}
|
||||
|
||||
void send(std::shared_ptr<Sample> const& sample) override {
|
||||
if (_failed) {
|
||||
return;
|
||||
}
|
||||
if (ready) {
|
||||
ready = false;
|
||||
sendImpl(sample);
|
||||
} else {
|
||||
if (queue.size() < MAX_QUEUE_SIZE) {
|
||||
queue.push_back(sample);
|
||||
} // TODO: else trace a warning
|
||||
}
|
||||
}
|
||||
|
||||
void connect(NetworkAddress const& endpoint) override {
|
||||
auto to = toEndpoint<Protocol>(endpoint);
|
||||
socket.async_connect(to, [self = this->shared_from_this()](boost::system::error_code const& ec) {
|
||||
if (ec) {
|
||||
// TODO: error handling
|
||||
self->_failed = ec;
|
||||
return;
|
||||
}
|
||||
self->ready = true;
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
struct FluentDIngestorImpl {
|
||||
using Protocol = FluentDIngestor::Protocol;
|
||||
Protocol protocol;
|
||||
NetworkAddress endpoint;
|
||||
boost::asio::io_context& io_context;
|
||||
std::shared_ptr<FluentDSocket> socket;
|
||||
boost::asio::steady_timer retryTimer;
|
||||
FluentDIngestorImpl(Protocol protocol, NetworkAddress const& endpoint)
|
||||
: protocol(protocol), endpoint(endpoint), io_context(ActorLineageProfiler::instance().context()),
|
||||
retryTimer(io_context) {
|
||||
connect();
|
||||
}
|
||||
|
||||
~FluentDIngestorImpl() { retryTimer.cancel(); }
|
||||
|
||||
void connect() {
|
||||
switch (protocol) {
|
||||
case Protocol::TCP:
|
||||
socket.reset(new FluentDSocketImpl<boost::asio::ip::tcp>(io_context));
|
||||
break;
|
||||
case Protocol::UDP:
|
||||
socket.reset(new FluentDSocketImpl<boost::asio::ip::udp>(io_context));
|
||||
break;
|
||||
}
|
||||
socket->connect(endpoint);
|
||||
}
|
||||
|
||||
void retry() {
|
||||
retryTimer = boost::asio::steady_timer(io_context, std::chrono::seconds(1));
|
||||
retryTimer.async_wait([this](auto const& ec) {
|
||||
if (ec) {
|
||||
return;
|
||||
}
|
||||
connect();
|
||||
});
|
||||
socket.reset();
|
||||
}
|
||||
};
|
||||
|
||||
FluentDIngestor::~FluentDIngestor() {
|
||||
delete impl;
|
||||
}
|
||||
|
||||
FluentDIngestor::FluentDIngestor(Protocol protocol, NetworkAddress& endpoint)
|
||||
: impl(new FluentDIngestorImpl(protocol, endpoint)) {}
|
||||
|
||||
void FluentDIngestor::ingest(const std::shared_ptr<Sample>& sample) {
|
||||
if (!impl->socket) {
|
||||
// the connection failed in the past and we wait for a timeout before we retry
|
||||
return;
|
||||
} else if (impl->socket->failed()) {
|
||||
impl->retry();
|
||||
return;
|
||||
} else {
|
||||
impl->socket->send(sample);
|
||||
}
|
||||
}
|
||||
|
||||
void FluentDIngestor::getConfig(std::map<std::string, std::string>& res) const {
|
||||
res["ingestor"] = "fluentd";
|
||||
res["collector_endpoint"] = impl->endpoint.toString();
|
||||
res["collector_protocol"] = impl->protocol == Protocol::TCP ? "tcp" : "udp";
|
||||
}
|
|
@ -34,6 +34,9 @@ const KeyRef fdbClientInfoTxnSizeLimit = LiteralStringRef("config/fdb_client_inf
|
|||
const KeyRef transactionTagSampleRate = LiteralStringRef("config/transaction_tag_sample_rate");
|
||||
const KeyRef transactionTagSampleCost = LiteralStringRef("config/transaction_tag_sample_cost");
|
||||
|
||||
const KeyRef samplingFrequency = LiteralStringRef("visibility/sampling/frequency");
|
||||
const KeyRef samplingWindow = LiteralStringRef("visibility/sampling/window");
|
||||
|
||||
GlobalConfig::GlobalConfig(Database& cx) : cx(cx), lastUpdate(0) {}
|
||||
|
||||
GlobalConfig& GlobalConfig::globalConfig() {
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
|
||||
#include <any>
|
||||
#include <map>
|
||||
#include <optional>
|
||||
#include <type_traits>
|
||||
#include <unordered_map>
|
||||
|
||||
|
@ -49,6 +50,9 @@ extern const KeyRef fdbClientInfoTxnSizeLimit;
|
|||
extern const KeyRef transactionTagSampleRate;
|
||||
extern const KeyRef transactionTagSampleCost;
|
||||
|
||||
extern const KeyRef samplingFrequency;
|
||||
extern const KeyRef samplingWindow;
|
||||
|
||||
// Structure used to hold the values stored by global configuration. The arena
|
||||
// is used as memory to store both the key and the value (the value is only
|
||||
// stored in the arena if it is an object; primitives are just copied).
|
||||
|
@ -78,6 +82,7 @@ public:
|
|||
g_network->setGlobal(INetwork::enGlobalConfig, config);
|
||||
config->_updater = updater(config, dbInfo);
|
||||
// Bind changes in `db` to the `dbInfoChanged` AsyncTrigger.
|
||||
// TODO: Change AsyncTrigger to a Reference
|
||||
forward(db, std::addressof(config->dbInfoChanged));
|
||||
} else {
|
||||
GlobalConfig* config = reinterpret_cast<GlobalConfig*>(g_network->global(INetwork::enGlobalConfig));
|
||||
|
@ -137,9 +142,11 @@ public:
|
|||
Future<Void> onChange();
|
||||
|
||||
// Calls \ref fn when the value associated with \ref key is changed. \ref
|
||||
// fn is passed the updated value for the key, or an empty optional if the
|
||||
// key has been cleared. If the value is an allocated object, its memory
|
||||
// remains in the control of the global configuration.
|
||||
// key should be one of the string literals defined at the top of
|
||||
// GlobalConfig.actor.cpp, to ensure memory validity. \ref fn is passed the
|
||||
// updated value for the key, or an empty optional if the key has been
|
||||
// cleared. If the value is an allocated object, its memory remains in the
|
||||
// control of global configuration.
|
||||
void trigger(KeyRef key, std::function<void(std::optional<std::any>)> fn);
|
||||
|
||||
private:
|
||||
|
@ -171,6 +178,7 @@ private:
|
|||
AsyncTrigger configChanged;
|
||||
std::unordered_map<StringRef, Reference<ConfigValue>> data;
|
||||
Version lastUpdate;
|
||||
// The key should be a global config string literal key (see the top of this class).
|
||||
std::unordered_map<KeyRef, std::function<void(std::optional<std::any>)>> callbacks;
|
||||
};
|
||||
|
||||
|
|
|
@ -88,6 +88,14 @@ public:
|
|||
|
||||
virtual void addref() = 0;
|
||||
virtual void delref() = 0;
|
||||
|
||||
// used in template functions as returned Future type
|
||||
template <class Type>
|
||||
using FutureT = ThreadFuture<Type>;
|
||||
// internal use only, return true by default
|
||||
// Only if it's a MultiVersionTransaction and the underlying transaction handler is null,
|
||||
// it will return false
|
||||
virtual bool isValid() { return true; }
|
||||
};
|
||||
|
||||
// An interface that represents a connection to a cluster made by a client
|
||||
|
@ -115,6 +123,9 @@ public:
|
|||
virtual ThreadFuture<Void> forceRecoveryWithDataLoss(const StringRef& dcid) = 0;
|
||||
// Management API, create snapshot
|
||||
virtual ThreadFuture<Void> createSnapshot(const StringRef& uid, const StringRef& snapshot_command) = 0;
|
||||
|
||||
// used in template functions as the Transaction type that can be created through createTransaction()
|
||||
using TransactionT = ITransaction;
|
||||
};
|
||||
|
||||
// An interface that presents the top-level FDB client API as exposed through the C bindings
|
||||
|
|
|
@ -88,3 +88,14 @@ IKnobCollection const& IKnobCollection::getGlobalKnobCollection() {
|
|||
IKnobCollection& IKnobCollection::getMutableGlobalKnobCollection() {
|
||||
return *globalKnobCollection;
|
||||
}
|
||||
|
||||
ConfigMutationRef IKnobCollection::createSetMutation(Arena arena, KeyRef key, ValueRef value) {
|
||||
ConfigKey configKey = ConfigKeyRef::decodeKey(key);
|
||||
auto knobValue =
|
||||
IKnobCollection::parseKnobValue(configKey.knobName.toString(), value.toString(), IKnobCollection::Type::TEST);
|
||||
return ConfigMutationRef(arena, configKey, knobValue.contents());
|
||||
}
|
||||
|
||||
ConfigMutationRef IKnobCollection::createClearMutation(Arena arena, KeyRef key) {
|
||||
return ConfigMutationRef(arena, ConfigKeyRef::decodeKey(key), {});
|
||||
}
|
||||
|
|
|
@ -54,14 +54,18 @@ public:
|
|||
virtual ClientKnobs const& getClientKnobs() const = 0;
|
||||
virtual ServerKnobs const& getServerKnobs() const = 0;
|
||||
virtual class TestKnobs const& getTestKnobs() const = 0;
|
||||
virtual void clearTestKnobs() {}
|
||||
virtual Optional<KnobValue> tryParseKnobValue(std::string const& knobName, std::string const& knobValue) const = 0;
|
||||
KnobValue parseKnobValue(std::string const& knobName, std::string const& knobValue) const;
|
||||
static KnobValue parseKnobValue(std::string const& knobName, std::string const& knobValue, Type);
|
||||
// Result indicates whether or not knob was successfully set:
|
||||
virtual bool trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) = 0;
|
||||
void setKnob(std::string const& knobName, KnobValueRef const& knobValue);
|
||||
virtual bool isAtomic(std::string const& knobName) const = 0;
|
||||
|
||||
static void setGlobalKnobCollection(Type, Randomize, IsSimulated);
|
||||
static IKnobCollection const& getGlobalKnobCollection();
|
||||
static IKnobCollection& getMutableGlobalKnobCollection();
|
||||
static ConfigMutationRef createSetMutation(Arena, KeyRef, ValueRef);
|
||||
static ConfigMutationRef createClearMutation(Arena, KeyRef);
|
||||
};
|
||||
|
|
|
@ -38,3 +38,16 @@ ISingleThreadTransaction* ISingleThreadTransaction::allocateOnForeignThread(Type
|
|||
ASSERT(false);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Reference<ISingleThreadTransaction> ISingleThreadTransaction::create(Type type, Database const& cx) {
|
||||
Reference<ISingleThreadTransaction> result;
|
||||
if (type == Type::RYW) {
|
||||
result = makeReference<ReadYourWritesTransaction>();
|
||||
} else if (type == Type::SIMPLE_CONFIG) {
|
||||
result = makeReference<SimpleConfigTransaction>();
|
||||
} else {
|
||||
result = makeReference<PaxosConfigTransaction>();
|
||||
}
|
||||
result->setDatabase(cx);
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -44,7 +44,8 @@ public:
|
|||
PAXOS_CONFIG,
|
||||
};
|
||||
|
||||
static ISingleThreadTransaction* allocateOnForeignThread(Type type);
|
||||
static ISingleThreadTransaction* allocateOnForeignThread(Type);
|
||||
static Reference<ISingleThreadTransaction> create(Type, Database const&);
|
||||
virtual void setDatabase(Database const&) = 0;
|
||||
|
||||
virtual void setVersion(Version v) = 0;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/ReadYourWrites.h"
|
||||
#include "fdbclient/Subspace.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
|
@ -322,6 +323,10 @@ public:
|
|||
return tr->clear(space.pack(Codec<KeyType>::pack(key)));
|
||||
}
|
||||
|
||||
void erase(Reference<ITransaction> tr, KeyType const& key) {
|
||||
return tr->clear(space.pack(Codec<KeyType>::pack(key)));
|
||||
}
|
||||
|
||||
void erase(Reference<ReadYourWritesTransaction> tr, KeyType const& begin, KeyType const& end) {
|
||||
return tr->clear(KeyRangeRef(space.pack(Codec<KeyType>::pack(begin)), space.pack(Codec<KeyType>::pack(end))));
|
||||
}
|
||||
|
|
|
@ -2459,68 +2459,6 @@ ACTOR Future<Void> waitForPrimaryDC(Database cx, StringRef dcId) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> changeCachedRange(Database cx, KeyRangeRef range, bool add) {
|
||||
state ReadYourWritesTransaction tr(cx);
|
||||
state KeyRange sysRange = KeyRangeRef(storageCacheKey(range.begin), storageCacheKey(range.end));
|
||||
state KeyRange sysRangeClear = KeyRangeRef(storageCacheKey(range.begin), keyAfter(storageCacheKey(range.end)));
|
||||
state KeyRange privateRange = KeyRangeRef(cacheKeysKey(0, range.begin), cacheKeysKey(0, range.end));
|
||||
state Value trueValue = storageCacheValue(std::vector<uint16_t>{ 0 });
|
||||
state Value falseValue = storageCacheValue(std::vector<uint16_t>{});
|
||||
loop {
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
tr.clear(sysRangeClear);
|
||||
tr.clear(privateRange);
|
||||
tr.addReadConflictRange(privateRange);
|
||||
RangeResult previous =
|
||||
wait(tr.getRange(KeyRangeRef(storageCachePrefix, sysRange.begin), 1, Snapshot::True));
|
||||
bool prevIsCached = false;
|
||||
if (!previous.empty()) {
|
||||
std::vector<uint16_t> prevVal;
|
||||
decodeStorageCacheValue(previous[0].value, prevVal);
|
||||
prevIsCached = !prevVal.empty();
|
||||
}
|
||||
if (prevIsCached && !add) {
|
||||
// we need to uncache from here
|
||||
tr.set(sysRange.begin, falseValue);
|
||||
tr.set(privateRange.begin, serverKeysFalse);
|
||||
} else if (!prevIsCached && add) {
|
||||
// we need to cache, starting from here
|
||||
tr.set(sysRange.begin, trueValue);
|
||||
tr.set(privateRange.begin, serverKeysTrue);
|
||||
}
|
||||
RangeResult after = wait(tr.getRange(KeyRangeRef(sysRange.end, storageCacheKeys.end), 1, Snapshot::False));
|
||||
bool afterIsCached = false;
|
||||
if (!after.empty()) {
|
||||
std::vector<uint16_t> afterVal;
|
||||
decodeStorageCacheValue(after[0].value, afterVal);
|
||||
afterIsCached = afterVal.empty();
|
||||
}
|
||||
if (afterIsCached && !add) {
|
||||
tr.set(sysRange.end, trueValue);
|
||||
tr.set(privateRange.end, serverKeysTrue);
|
||||
} else if (!afterIsCached && add) {
|
||||
tr.set(sysRange.end, falseValue);
|
||||
tr.set(privateRange.end, serverKeysFalse);
|
||||
}
|
||||
wait(tr.commit());
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
state Error err = e;
|
||||
wait(tr.onError(err));
|
||||
TraceEvent(SevDebug, "ChangeCachedRangeError").error(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Future<Void> addCachedRange(const Database& cx, KeyRangeRef range) {
|
||||
return changeCachedRange(cx, range, true);
|
||||
}
|
||||
Future<Void> removeCachedRange(const Database& cx, KeyRangeRef range) {
|
||||
return changeCachedRange(cx, range, false);
|
||||
}
|
||||
|
||||
json_spirit::Value_type normJSONType(json_spirit::Value_type type) {
|
||||
if (type == json_spirit::int_type)
|
||||
return json_spirit::real_type;
|
||||
|
|
|
@ -248,8 +248,81 @@ bool schemaMatch(json_spirit::mValue const& schema,
|
|||
// storage nodes
|
||||
ACTOR Future<Void> mgmtSnapCreate(Database cx, Standalone<StringRef> snapCmd, UID snapUID);
|
||||
|
||||
Future<Void> addCachedRange(const Database& cx, KeyRangeRef range);
|
||||
Future<Void> removeCachedRange(const Database& cx, KeyRangeRef range);
|
||||
// Management API written in template code to support both IClientAPI and NativeAPI
|
||||
namespace ManagementAPI {
|
||||
|
||||
ACTOR template <class DB>
|
||||
Future<Void> changeCachedRange(Reference<DB> db, KeyRangeRef range, bool add) {
|
||||
state Reference<typename DB::TransactionT> tr = db->createTransaction();
|
||||
state KeyRange sysRange = KeyRangeRef(storageCacheKey(range.begin), storageCacheKey(range.end));
|
||||
state KeyRange sysRangeClear = KeyRangeRef(storageCacheKey(range.begin), keyAfter(storageCacheKey(range.end)));
|
||||
state KeyRange privateRange = KeyRangeRef(cacheKeysKey(0, range.begin), cacheKeysKey(0, range.end));
|
||||
state Value trueValue = storageCacheValue(std::vector<uint16_t>{ 0 });
|
||||
state Value falseValue = storageCacheValue(std::vector<uint16_t>{});
|
||||
loop {
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
try {
|
||||
tr->clear(sysRangeClear);
|
||||
tr->clear(privateRange);
|
||||
tr->addReadConflictRange(privateRange);
|
||||
// hold the returned standalone object's memory
|
||||
state typename DB::TransactionT::template FutureT<RangeResult> previousFuture =
|
||||
tr->getRange(KeyRangeRef(storageCachePrefix, sysRange.begin), 1, Snapshot::False, Reverse::True);
|
||||
RangeResult previous = wait(safeThreadFutureToFuture(previousFuture));
|
||||
bool prevIsCached = false;
|
||||
if (!previous.empty()) {
|
||||
std::vector<uint16_t> prevVal;
|
||||
decodeStorageCacheValue(previous[0].value, prevVal);
|
||||
prevIsCached = !prevVal.empty();
|
||||
}
|
||||
if (prevIsCached && !add) {
|
||||
// we need to uncache from here
|
||||
tr->set(sysRange.begin, falseValue);
|
||||
tr->set(privateRange.begin, serverKeysFalse);
|
||||
} else if (!prevIsCached && add) {
|
||||
// we need to cache, starting from here
|
||||
tr->set(sysRange.begin, trueValue);
|
||||
tr->set(privateRange.begin, serverKeysTrue);
|
||||
}
|
||||
// hold the returned standalone object's memory
|
||||
state typename DB::TransactionT::template FutureT<RangeResult> afterFuture =
|
||||
tr->getRange(KeyRangeRef(sysRange.end, storageCacheKeys.end), 1, Snapshot::False, Reverse::False);
|
||||
RangeResult after = wait(safeThreadFutureToFuture(afterFuture));
|
||||
bool afterIsCached = false;
|
||||
if (!after.empty()) {
|
||||
std::vector<uint16_t> afterVal;
|
||||
decodeStorageCacheValue(after[0].value, afterVal);
|
||||
afterIsCached = afterVal.empty();
|
||||
}
|
||||
if (afterIsCached && !add) {
|
||||
tr->set(sysRange.end, trueValue);
|
||||
tr->set(privateRange.end, serverKeysTrue);
|
||||
} else if (!afterIsCached && add) {
|
||||
tr->set(sysRange.end, falseValue);
|
||||
tr->set(privateRange.end, serverKeysFalse);
|
||||
}
|
||||
wait(safeThreadFutureToFuture(tr->commit()));
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
state Error err = e;
|
||||
wait(safeThreadFutureToFuture(tr->onError(e)));
|
||||
TraceEvent(SevDebug, "ChangeCachedRangeError").error(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <class DB>
|
||||
Future<Void> addCachedRange(Reference<DB> db, KeyRangeRef range) {
|
||||
return changeCachedRange(db, range, true);
|
||||
}
|
||||
|
||||
template <class DB>
|
||||
Future<Void> removeCachedRange(Reference<DB> db, KeyRangeRef range) {
|
||||
return changeCachedRange(db, range, false);
|
||||
}
|
||||
|
||||
} // namespace ManagementAPI
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
||||
#endif
|
||||
|
|
|
@ -842,7 +842,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
|
|||
clientInfo->set(ni);
|
||||
successIdx = idx;
|
||||
} else {
|
||||
TEST(rep.getError().code() == error_code_failed_to_progress); // Coordinator cannot talk to cluster controller
|
||||
TEST(rep.getError().code() == error_code_failed_to_progress); // Coordinator cant talk to cluster controller
|
||||
idx = (idx + 1) % addrs.size();
|
||||
if (idx == successIdx) {
|
||||
wait(delay(CLIENT_KNOBS->COORDINATOR_RECONNECTION_DELAY));
|
||||
|
|
|
@ -877,6 +877,11 @@ void MultiVersionTransaction::reset() {
|
|||
updateTransaction();
|
||||
}
|
||||
|
||||
bool MultiVersionTransaction::isValid() {
|
||||
auto tr = getTransaction();
|
||||
return tr.transaction.isValid();
|
||||
}
|
||||
|
||||
// MultiVersionDatabase
|
||||
MultiVersionDatabase::MultiVersionDatabase(MultiVersionApi* api,
|
||||
int threadIdx,
|
||||
|
@ -998,12 +1003,17 @@ ThreadFuture<Void> MultiVersionDatabase::createSnapshot(const StringRef& uid, co
|
|||
}
|
||||
|
||||
// Get network thread busyness
|
||||
// Return the busyness for the main thread. When using external clients, take the larger of the local client
|
||||
// and the external client's busyness.
|
||||
double MultiVersionDatabase::getMainThreadBusyness() {
|
||||
ASSERT(g_network);
|
||||
|
||||
double localClientBusyness = g_network->networkInfo.metrics.networkBusyness;
|
||||
if (dbState->db) {
|
||||
return dbState->db->getMainThreadBusyness();
|
||||
return std::max(dbState->db->getMainThreadBusyness(), localClientBusyness);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return localClientBusyness;
|
||||
}
|
||||
|
||||
// Returns the protocol version reported by the coordinator this client is connected to
|
||||
|
|
|
@ -388,6 +388,9 @@ public:
|
|||
void addref() override { ThreadSafeReferenceCounted<MultiVersionTransaction>::addref(); }
|
||||
void delref() override { ThreadSafeReferenceCounted<MultiVersionTransaction>::delref(); }
|
||||
|
||||
// return true if the underlying transaction pointer is not empty
|
||||
bool isValid() override;
|
||||
|
||||
private:
|
||||
const Reference<MultiVersionDatabase> db;
|
||||
ThreadSpinLock lock;
|
||||
|
|
|
@ -0,0 +1,196 @@
|
|||
/*
|
||||
* MutationLogReader.actor.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/MutationLogReader.actor.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
namespace {
|
||||
|
||||
Key versionToKey(Version version, Key prefix) {
|
||||
uint64_t versionBigEndian = bigEndian64(version);
|
||||
return KeyRef((uint8_t*)&versionBigEndian, sizeof(uint64_t)).withPrefix(prefix);
|
||||
}
|
||||
|
||||
Version keyRefToVersion(KeyRef key, int prefixLen) {
|
||||
return (Version)bigEndian64(*((uint64_t*)key.substr(prefixLen).begin()));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
namespace mutation_log_reader {
|
||||
|
||||
Standalone<RangeResultRef> RangeResultBlock::consume() {
|
||||
Version stopVersion = std::min(lastVersion,
|
||||
(firstVersion + CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE - 1) /
|
||||
CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE * CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE) +
|
||||
1; // firstVersion rounded up to the nearest 1M versions, then + 1
|
||||
int startIndex = indexToRead;
|
||||
while (indexToRead < result.size() && keyRefToVersion(result[indexToRead].key, prefixLen) < stopVersion) {
|
||||
++indexToRead;
|
||||
}
|
||||
if (indexToRead < result.size()) {
|
||||
firstVersion = keyRefToVersion(result[indexToRead].key, prefixLen); // the version of result[indexToRead]
|
||||
}
|
||||
return Standalone<RangeResultRef>(
|
||||
RangeResultRef(result.slice(startIndex, indexToRead), result.more, result.readThrough), result.arena());
|
||||
}
|
||||
|
||||
void PipelinedReader::startReading(Database cx) {
|
||||
reader = getNext(cx);
|
||||
}
|
||||
|
||||
Future<Void> PipelinedReader::getNext(Database cx) {
|
||||
return getNext_impl(this, cx);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> PipelinedReader::getNext_impl(PipelinedReader* self, Database cx) {
|
||||
state Transaction tr(cx);
|
||||
|
||||
state GetRangeLimits limits(GetRangeLimits::ROW_LIMIT_UNLIMITED,
|
||||
(g_network->isSimulated() && !g_simulator.speedUpSimulation)
|
||||
? CLIENT_KNOBS->BACKUP_SIMULATED_LIMIT_BYTES
|
||||
: CLIENT_KNOBS->BACKUP_GET_RANGE_LIMIT_BYTES);
|
||||
|
||||
state Key begin = versionToKey(self->currentBeginVersion, self->prefix);
|
||||
state Key end = versionToKey(self->endVersion, self->prefix);
|
||||
|
||||
loop {
|
||||
// Get the lock
|
||||
wait(self->readerLimit.take());
|
||||
|
||||
// Read begin to end forever until successful
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
RangeResult kvs = wait(tr.getRange(KeyRangeRef(begin, end), limits));
|
||||
|
||||
// No more results, send end of stream
|
||||
if (!kvs.empty()) {
|
||||
// Send results to the reads stream
|
||||
self->reads.send(
|
||||
RangeResultBlock{ .result = kvs,
|
||||
.firstVersion = keyRefToVersion(kvs.front().key, self->prefix.size()),
|
||||
.lastVersion = keyRefToVersion(kvs.back().key, self->prefix.size()),
|
||||
.hash = self->hash,
|
||||
.prefixLen = self->prefix.size(),
|
||||
.indexToRead = 0 });
|
||||
}
|
||||
|
||||
if (!kvs.more) {
|
||||
self->reads.sendError(end_of_stream());
|
||||
return Void();
|
||||
}
|
||||
|
||||
begin = kvs.readThrough.present() ? kvs.readThrough.get() : keyAfter(kvs.back().key);
|
||||
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_transaction_too_old) {
|
||||
// We are using this transaction until it's too old and then resetting to a fresh one,
|
||||
// so we don't need to delay.
|
||||
tr.fullReset();
|
||||
} else {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace mutation_log_reader
|
||||
|
||||
ACTOR Future<Void> MutationLogReader::initializePQ(MutationLogReader* self) {
|
||||
state int h;
|
||||
for (h = 0; h < 256; ++h) {
|
||||
try {
|
||||
mutation_log_reader::RangeResultBlock front = waitNext(self->pipelinedReaders[h]->reads.getFuture());
|
||||
self->priorityQueue.push(front);
|
||||
} catch (Error& e) {
|
||||
if (e.code() != error_code_end_of_stream) {
|
||||
throw e;
|
||||
}
|
||||
++self->finished;
|
||||
}
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Standalone<RangeResultRef>> MutationLogReader::getNext() {
|
||||
return getNext_impl(this);
|
||||
}
|
||||
|
||||
ACTOR Future<Standalone<RangeResultRef>> MutationLogReader::getNext_impl(MutationLogReader* self) {
|
||||
loop {
|
||||
if (self->finished == 256) {
|
||||
state int i;
|
||||
for (i = 0; i < self->pipelinedReaders.size(); ++i) {
|
||||
wait(self->pipelinedReaders[i]->done());
|
||||
}
|
||||
throw end_of_stream();
|
||||
}
|
||||
mutation_log_reader::RangeResultBlock top = self->priorityQueue.top();
|
||||
self->priorityQueue.pop();
|
||||
uint8_t hash = top.hash;
|
||||
state Standalone<RangeResultRef> ret = top.consume();
|
||||
if (top.empty()) {
|
||||
self->pipelinedReaders[(int)hash]->release();
|
||||
try {
|
||||
mutation_log_reader::RangeResultBlock next =
|
||||
waitNext(self->pipelinedReaders[(int)hash]->reads.getFuture());
|
||||
self->priorityQueue.push(next);
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_end_of_stream) {
|
||||
++self->finished;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self->priorityQueue.push(top);
|
||||
}
|
||||
if (ret.size() != 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
// UNIT TESTS
|
||||
TEST_CASE("/fdbclient/mutationlogreader/VersionKeyRefConversion") {
|
||||
Key prefix = LiteralStringRef("foos");
|
||||
|
||||
ASSERT(keyRefToVersion(versionToKey(0, prefix), prefix.size()) == 0);
|
||||
ASSERT(keyRefToVersion(versionToKey(1, prefix), prefix.size()) == 1);
|
||||
ASSERT(keyRefToVersion(versionToKey(-1, prefix), prefix.size()) == -1);
|
||||
ASSERT(keyRefToVersion(versionToKey(std::numeric_limits<int64_t>::min(), prefix), prefix.size()) ==
|
||||
std::numeric_limits<int64_t>::min());
|
||||
ASSERT(keyRefToVersion(versionToKey(std::numeric_limits<int64_t>::max(), prefix), prefix.size()) ==
|
||||
std::numeric_limits<int64_t>::max());
|
||||
|
||||
return Void();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void forceLinkMutationLogReaderTests() {}
|
|
@ -0,0 +1,137 @@
|
|||
/*
|
||||
* MutationLogReader.actor.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#if defined(NO_INTELLISENSE) && !defined(FDBCLIENT_MUTATION_LOG_READER_ACTOR_G_H)
|
||||
#define FDBCLIENT_MUTATION_LOG_READER_ACTOR_G_H
|
||||
#include "fdbclient/MutationLogReader.actor.g.h"
|
||||
#elif !defined(FDBCLIENT_MUTATION_LOG_READER_ACTOR_H)
|
||||
#define FDBCLIENT_MUTATION_LOG_READER_ACTOR_H
|
||||
|
||||
#include <deque>
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/NativeAPI.actor.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "flow/actorcompiler.h" // has to be last include
|
||||
|
||||
namespace mutation_log_reader {
|
||||
|
||||
// RangeResultBlock is the wrapper of RangeResult. Each PipelinedReader maintains a Deque of RangeResultBlocks, from its
|
||||
// getRange results. MutationLogReader maintains a min heap of RangeResultBlocks, and provides RangeResult
|
||||
// partially in it to consumer.
|
||||
struct RangeResultBlock {
|
||||
RangeResult result;
|
||||
Version firstVersion; // version of first record, inclusive
|
||||
Version lastVersion; // version of last record, inclusive
|
||||
uint8_t hash; // points back to the PipelinedReader
|
||||
int prefixLen; // size of keyspace, uid, and hash prefix
|
||||
int indexToRead; // index of first unconsumed record
|
||||
|
||||
// When the consumer reads, provides (partial) RangeResult, from firstVersion to min(lastVersion, firstVersion
|
||||
// rounded up to the nearest 1M), to ensure that no versions out of this RangeResultBlock can be in between.
|
||||
Standalone<RangeResultRef> consume();
|
||||
|
||||
bool empty() { return indexToRead == result.size(); }
|
||||
|
||||
bool operator<(const RangeResultBlock& r) const {
|
||||
// We want a min heap. The standard C++ priority queue is a max heap.
|
||||
return firstVersion > r.firstVersion;
|
||||
}
|
||||
};
|
||||
|
||||
// PipelinedReader is the class actually doing range read (getRange). A MutationLogReader has 256 PipelinedReaders, each
|
||||
// in charge of one hash value from 0-255.
|
||||
class PipelinedReader {
|
||||
public:
|
||||
PipelinedReader(uint8_t h, Version bv, Version ev, unsigned pd, Key p)
|
||||
: readerLimit(pd), hash(h), prefix(StringRef(&hash, sizeof(uint8_t)).withPrefix(p)), beginVersion(bv),
|
||||
endVersion(ev), currentBeginVersion(bv), pipelineDepth(pd) {}
|
||||
|
||||
void startReading(Database cx);
|
||||
Future<Void> getNext(Database cx);
|
||||
ACTOR static Future<Void> getNext_impl(PipelinedReader* self, Database cx);
|
||||
|
||||
void release() { readerLimit.release(); }
|
||||
|
||||
PromiseStream<RangeResultBlock> reads;
|
||||
FlowLock readerLimit;
|
||||
uint8_t hash;
|
||||
Key prefix; // "\xff\x02/alog/UID/hash/" for restore, or "\xff\x02/blog/UID/hash/" for backup
|
||||
|
||||
Future<Void> done() { return reader; }
|
||||
|
||||
private:
|
||||
Version beginVersion, endVersion, currentBeginVersion;
|
||||
unsigned pipelineDepth;
|
||||
Future<Void> reader;
|
||||
};
|
||||
|
||||
} // namespace mutation_log_reader
|
||||
|
||||
// MutationLogReader provides a strictly version ordered stream of KV pairs that represent mutation log chunks written
|
||||
// by the FDB backup log feature. A MutationLogReader has 256 PipelinedReaders, each in charge of one hash value from
|
||||
// 0-255. It keeps a min heap of RangeResultBlocks, ordered by their first version. At any time, each PipelinedReader
|
||||
// has at most one RangeResultBlock in MutationLogReader's min heap. When the consumer reads from MutationLogReader, the
|
||||
// MutationLogReader calls the heap's top RangeResultBlock's consume() function, to make sure it does deliver perfectly
|
||||
// ordered mutations.
|
||||
class MutationLogReader : public ReferenceCounted<MutationLogReader> {
|
||||
public:
|
||||
MutationLogReader() : finished(256) {}
|
||||
MutationLogReader(Database cx, Version bv, Version ev, Key uid, Key beginKey, unsigned pd)
|
||||
: beginVersion(bv), endVersion(ev), prefix(uid.withPrefix(beginKey)), pipelineDepth(pd), finished(0) {
|
||||
pipelinedReaders.reserve(256);
|
||||
if (pipelineDepth > 0) {
|
||||
for (int h = 0; h < 256; ++h) {
|
||||
pipelinedReaders.emplace_back(new mutation_log_reader::PipelinedReader(
|
||||
(uint8_t)h, beginVersion, endVersion, pipelineDepth, prefix));
|
||||
pipelinedReaders[h]->startReading(cx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Reference<MutationLogReader>> Create(Database cx,
|
||||
Version bv,
|
||||
Version ev,
|
||||
Key uid,
|
||||
Key beginKey,
|
||||
unsigned pd) {
|
||||
state Reference<MutationLogReader> self(new MutationLogReader(cx, bv, ev, uid, beginKey, pd));
|
||||
wait(self->initializePQ(self.getPtr()));
|
||||
return self;
|
||||
}
|
||||
|
||||
Future<Standalone<RangeResultRef>> getNext();
|
||||
|
||||
private:
|
||||
ACTOR static Future<Void> initializePQ(MutationLogReader* self);
|
||||
ACTOR static Future<Standalone<RangeResultRef>> getNext_impl(MutationLogReader* self);
|
||||
|
||||
std::vector<std::unique_ptr<mutation_log_reader::PipelinedReader>> pipelinedReaders;
|
||||
std::priority_queue<mutation_log_reader::RangeResultBlock> priorityQueue;
|
||||
Version beginVersion, endVersion;
|
||||
Key prefix; // "\xff\x02/alog/UID/" for restore, or "\xff\x02/blog/UID/" for backup
|
||||
unsigned pipelineDepth;
|
||||
unsigned finished;
|
||||
};
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
||||
#endif
|
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* NameLineage.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/NameLineage.h"
|
||||
|
||||
namespace {
|
||||
NameLineageCollector nameLineageCollector;
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* NameLineage.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <string_view>
|
||||
|
||||
#include "fdbclient/ActorLineageProfiler.h"
|
||||
|
||||
struct NameLineage : LineageProperties<NameLineage> {
|
||||
static constexpr std::string_view name = "Actor"sv;
|
||||
const char* actorName;
|
||||
};
|
||||
|
||||
struct NameLineageCollector : IALPCollector<NameLineage> {
|
||||
NameLineageCollector() : IALPCollector() {}
|
||||
std::optional<std::any> collect(ActorLineage* lineage) override {
|
||||
auto str = lineage->get(&NameLineage::actorName);
|
||||
if (str.has_value()) {
|
||||
return std::string_view(*str, std::strlen(*str));
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
};
|
|
@ -32,6 +32,8 @@
|
|||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "fdbrpc/MultiInterface.h"
|
||||
|
||||
#include "fdbclient/ActorLineageProfiler.h"
|
||||
#include "fdbclient/AnnotateActor.h"
|
||||
#include "fdbclient/Atomic.h"
|
||||
#include "fdbclient/ClusterInterface.h"
|
||||
#include "fdbclient/CoordinationInterface.h"
|
||||
|
@ -42,6 +44,7 @@
|
|||
#include "fdbclient/KeyBackedTypes.h"
|
||||
#include "fdbclient/KeyRangeMap.h"
|
||||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
#include "fdbclient/NameLineage.h"
|
||||
#include "fdbclient/CommitProxyInterface.h"
|
||||
#include "fdbclient/MonitorLeader.h"
|
||||
#include "fdbclient/MutationList.h"
|
||||
|
@ -50,6 +53,7 @@
|
|||
#include "fdbclient/SpecialKeySpace.actor.h"
|
||||
#include "fdbclient/StorageServerInterface.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbclient/TransactionLineage.h"
|
||||
#include "fdbclient/versions.h"
|
||||
#include "fdbrpc/LoadBalance.h"
|
||||
#include "fdbrpc/Net2FileSystem.h"
|
||||
|
@ -87,6 +91,9 @@ using std::pair;
|
|||
|
||||
namespace {
|
||||
|
||||
TransactionLineageCollector transactionLineageCollector;
|
||||
NameLineageCollector nameLineageCollector;
|
||||
|
||||
template <class Interface, class Request>
|
||||
Future<REPLY_TYPE(Request)> loadBalance(
|
||||
DatabaseContext* ctx,
|
||||
|
@ -147,16 +154,25 @@ void DatabaseContext::addTssMapping(StorageServerInterface const& ssi, StorageSe
|
|||
result->second = tssi;
|
||||
}
|
||||
|
||||
// data requests duplicated for load and data comparison
|
||||
queueModel.updateTssEndpoint(ssi.getValue.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.getValue.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.getKey.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.getKey.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.getKeyValues.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.getKeyValues.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.watchValue.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.watchValue.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.getKeyValuesStream.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.getKeyValuesStream.getEndpoint(), metrics));
|
||||
|
||||
// non-data requests duplicated for load
|
||||
queueModel.updateTssEndpoint(ssi.watchValue.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.watchValue.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.splitMetrics.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.splitMetrics.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.getReadHotRanges.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.getReadHotRanges.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.getRangeSplitPoints.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.getRangeSplitPoints.getEndpoint(), metrics));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -168,8 +184,12 @@ void DatabaseContext::removeTssMapping(StorageServerInterface const& ssi) {
|
|||
queueModel.removeTssEndpoint(ssi.getValue.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.getKey.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.getKeyValues.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.watchValue.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.getKeyValuesStream.getEndpoint().token.first());
|
||||
|
||||
queueModel.removeTssEndpoint(ssi.watchValue.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.splitMetrics.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.getReadHotRanges.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.getRangeSplitPoints.getEndpoint().token.first());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -354,6 +374,7 @@ ACTOR Future<Void> databaseLogger(DatabaseContext* cx) {
|
|||
state double lastLogged = 0;
|
||||
loop {
|
||||
wait(delay(CLIENT_KNOBS->SYSTEM_MONITOR_INTERVAL, TaskPriority::FlushTrace));
|
||||
|
||||
TraceEvent ev("TransactionMetrics", cx->dbId);
|
||||
|
||||
ev.detail("Elapsed", (lastLogged == 0) ? 0 : now() - lastLogged)
|
||||
|
@ -364,6 +385,7 @@ ACTOR Future<Void> databaseLogger(DatabaseContext* cx) {
|
|||
|
||||
cx->cc.logToTraceEvent(ev);
|
||||
|
||||
ev.detail("LocationCacheEntryCount", cx->locationCache.size());
|
||||
ev.detail("MeanLatency", cx->latencies.mean())
|
||||
.detail("MedianLatency", cx->latencies.median())
|
||||
.detail("Latency90", cx->latencies.percentile(0.90))
|
||||
|
@ -1260,6 +1282,14 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionF
|
|||
std::make_unique<DataDistributionImpl>(
|
||||
KeyRangeRef(LiteralStringRef("data_distribution/"), LiteralStringRef("data_distribution0"))
|
||||
.withPrefix(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::MANAGEMENT).begin)));
|
||||
registerSpecialKeySpaceModule(
|
||||
SpecialKeySpace::MODULE::ACTORLINEAGE,
|
||||
SpecialKeySpace::IMPLTYPE::READONLY,
|
||||
std::make_unique<ActorLineageImpl>(SpecialKeySpace::getModuleRange(SpecialKeySpace::MODULE::ACTORLINEAGE)));
|
||||
registerSpecialKeySpaceModule(SpecialKeySpace::MODULE::ACTOR_PROFILER_CONF,
|
||||
SpecialKeySpace::IMPLTYPE::READWRITE,
|
||||
std::make_unique<ActorProfilerConf>(SpecialKeySpace::getModuleRange(
|
||||
SpecialKeySpace::MODULE::ACTOR_PROFILER_CONF)));
|
||||
}
|
||||
if (apiVersionAtLeast(630)) {
|
||||
registerSpecialKeySpaceModule(SpecialKeySpace::MODULE::TRANSACTION,
|
||||
|
@ -1761,6 +1791,8 @@ Database Database::createDatabase(Reference<ClusterConnectionFile> connFile,
|
|||
auto database = Database(db);
|
||||
GlobalConfig::create(
|
||||
database, Reference<AsyncVar<ClientDBInfo> const>(clientInfo), std::addressof(clientInfo->get()));
|
||||
GlobalConfig::globalConfig().trigger(samplingFrequency, samplingProfilerUpdateFrequency);
|
||||
GlobalConfig::globalConfig().trigger(samplingWindow, samplingProfilerUpdateWindow);
|
||||
return database;
|
||||
}
|
||||
|
||||
|
@ -1999,8 +2031,25 @@ ACTOR Future<Void> monitorNetworkBusyness() {
|
|||
tracker.windowedTimer = now();
|
||||
}
|
||||
|
||||
g_network->networkInfo.metrics.networkBusyness =
|
||||
std::min(elapsed, tracker.duration) / elapsed; // average duration spent doing "work"
|
||||
double busyFraction = std::min(elapsed, tracker.duration) / elapsed;
|
||||
|
||||
// The burstiness score is an indicator of the maximum busyness spike over the measurement interval.
|
||||
// It scales linearly from 0 to 1 as the largest burst goes from the start to the saturation threshold.
|
||||
// This allows us to account for saturation that happens in smaller bursts than the measurement interval.
|
||||
//
|
||||
// Burstiness will not be calculated if the saturation threshold is smaller than the start threshold or
|
||||
// if either value is negative.
|
||||
double burstiness = 0;
|
||||
if (CLIENT_KNOBS->BUSYNESS_SPIKE_START_THRESHOLD >= 0 &&
|
||||
CLIENT_KNOBS->BUSYNESS_SPIKE_SATURATED_THRESHOLD >= CLIENT_KNOBS->BUSYNESS_SPIKE_START_THRESHOLD) {
|
||||
burstiness = std::min(1.0,
|
||||
std::max(0.0, tracker.maxDuration - CLIENT_KNOBS->BUSYNESS_SPIKE_START_THRESHOLD) /
|
||||
std::max(1e-6,
|
||||
CLIENT_KNOBS->BUSYNESS_SPIKE_SATURATED_THRESHOLD -
|
||||
CLIENT_KNOBS->BUSYNESS_SPIKE_START_THRESHOLD));
|
||||
}
|
||||
|
||||
g_network->networkInfo.metrics.networkBusyness = std::max(busyFraction, burstiness);
|
||||
|
||||
tracker.duration = 0;
|
||||
tracker.maxDuration = 0;
|
||||
|
@ -2744,8 +2793,10 @@ ACTOR Future<Version> watchValue(Future<Version> version,
|
|||
cx->invalidateCache(key);
|
||||
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY, info.taskID));
|
||||
} else if (e.code() == error_code_watch_cancelled || e.code() == error_code_process_behind) {
|
||||
TEST(e.code() == error_code_watch_cancelled); // Too many watches on storage server, poll for changes
|
||||
// clang-format off
|
||||
TEST(e.code() == error_code_watch_cancelled); // Too many watches on the storage server, poll for changes instead
|
||||
TEST(e.code() == error_code_process_behind); // The storage servers are all behind
|
||||
// clang-format on
|
||||
wait(delay(CLIENT_KNOBS->WATCH_POLLING_TIME, info.taskID));
|
||||
} else if (e.code() == error_code_timed_out) { // The storage server occasionally times out watches in case
|
||||
// it was cancelled
|
||||
|
@ -3333,6 +3384,7 @@ ACTOR Future<RangeResult> getRange(Database cx,
|
|||
throw deterministicRandom()->randomChoice(
|
||||
std::vector<Error>{ transaction_too_old(), future_version() });
|
||||
}
|
||||
// state AnnotateActor annotation(currentLineage);
|
||||
GetKeyValuesReply _rep =
|
||||
wait(loadBalance(cx.getPtr(),
|
||||
beginServer.second,
|
||||
|
@ -4093,8 +4145,7 @@ SpanID generateSpanID(int transactionTracingEnabled) {
|
|||
}
|
||||
}
|
||||
|
||||
Transaction::Transaction()
|
||||
: info(TaskPriority::DefaultEndpoint, generateSpanID(true)), span(info.spanID, "Transaction"_loc) {}
|
||||
Transaction::Transaction() : info(TaskPriority::DefaultEndpoint, generateSpanID(true)) {}
|
||||
|
||||
Transaction::Transaction(Database const& cx)
|
||||
: info(cx->taskID, generateSpanID(cx->transactionTracingEnabled)), numErrors(0), options(cx),
|
||||
|
@ -4315,23 +4366,23 @@ Future<Standalone<VectorRef<const char*>>> Transaction::getAddressesForKey(const
|
|||
return getAddressesForKeyActor(key, ver, cx, info, options);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> registerRangeFeedActor(Transaction* tr, Key rangeID, KeyRange range) {
|
||||
state Key rangeIDKey = rangeID.withPrefix(rangeFeedPrefix);
|
||||
ACTOR Future<Void> registerChangeFeedActor(Transaction* tr, Key rangeID, KeyRange range) {
|
||||
state Key rangeIDKey = rangeID.withPrefix(changeFeedPrefix);
|
||||
Optional<Value> val = wait(tr->get(rangeIDKey));
|
||||
if (!val.present()) {
|
||||
tr->set(rangeIDKey, rangeFeedValue(range));
|
||||
} else if (decodeRangeFeedValue(val.get()) != range) {
|
||||
tr->set(rangeIDKey, changeFeedValue(range, invalidVersion, false));
|
||||
} else if (std::get<0>(decodeChangeFeedValue(val.get())) != range) {
|
||||
throw unsupported_operation();
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> Transaction::registerRangeFeed(const Key& rangeID, const KeyRange& range) {
|
||||
return registerRangeFeedActor(this, rangeID, range);
|
||||
Future<Void> Transaction::registerChangeFeed(const Key& rangeID, const KeyRange& range) {
|
||||
return registerChangeFeedActor(this, rangeID, range);
|
||||
}
|
||||
|
||||
void Transaction::destroyRangeFeed(const Key& rangeID) {
|
||||
clear(rangeID.withPrefix(rangeFeedPrefix));
|
||||
void Transaction::destroyChangeFeed(const Key& rangeID) {
|
||||
clear(rangeID.withPrefix(changeFeedPrefix));
|
||||
}
|
||||
|
||||
ACTOR Future<Key> getKeyAndConflictRange(Database cx,
|
||||
|
@ -6359,7 +6410,7 @@ void enableClientInfoLogging() {
|
|||
}
|
||||
|
||||
ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID snapUID) {
|
||||
TraceEvent("SnapCreateEnter").detail("SnapCmd", snapCmd.toString()).detail("UID", snapUID);
|
||||
TraceEvent("SnapCreateEnter").detail("SnapCmd", snapCmd).detail("UID", snapUID);
|
||||
try {
|
||||
loop {
|
||||
choose {
|
||||
|
@ -6369,7 +6420,7 @@ ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID sn
|
|||
ProxySnapRequest(snapCmd, snapUID, snapUID),
|
||||
cx->taskID,
|
||||
AtMostOnce::True))) {
|
||||
TraceEvent("SnapCreateExit").detail("SnapCmd", snapCmd.toString()).detail("UID", snapUID);
|
||||
TraceEvent("SnapCreateExit").detail("SnapCmd", snapCmd).detail("UID", snapUID);
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
|
@ -6531,104 +6582,221 @@ Future<Void> DatabaseContext::createSnapshot(StringRef uid, StringRef snapshot_c
|
|||
return createSnapshotActor(this, UID::fromString(uid_str), snapshot_command);
|
||||
}
|
||||
|
||||
ACTOR Future<Standalone<VectorRef<MutationsAndVersionRef>>> getRangeFeedMutationsActor(Reference<DatabaseContext> db,
|
||||
StringRef rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
ACTOR Future<Standalone<VectorRef<MutationsAndVersionRef>>> getChangeFeedMutationsActor(Reference<DatabaseContext> db,
|
||||
StringRef rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
// FIXME: this function is out of date!
|
||||
state Database cx(db);
|
||||
state Transaction tr(cx);
|
||||
state Key rangeIDKey = rangeID.withPrefix(rangeFeedPrefix);
|
||||
state Span span("NAPI:GetRangeFeedMutations"_loc);
|
||||
state Key rangeIDKey = rangeID.withPrefix(changeFeedPrefix);
|
||||
state Span span("NAPI:GetChangeFeedMutations"_loc);
|
||||
Optional<Value> val = wait(tr.get(rangeIDKey));
|
||||
if (!val.present()) {
|
||||
throw unsupported_operation();
|
||||
}
|
||||
KeyRange keys = decodeRangeFeedValue(val.get());
|
||||
state KeyRange keys = std::get<0>(decodeChangeFeedValue(val.get())) & range;
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
keys,
|
||||
100,
|
||||
Reverse::False,
|
||||
&StorageServerInterface::rangeFeed,
|
||||
&StorageServerInterface::changeFeed,
|
||||
TransactionInfo(TaskPriority::DefaultEndpoint, span.context)));
|
||||
|
||||
if (locations.size() > 1) {
|
||||
throw unsupported_operation();
|
||||
}
|
||||
|
||||
state RangeFeedRequest req;
|
||||
state ChangeFeedRequest req;
|
||||
req.rangeID = rangeID;
|
||||
req.begin = begin;
|
||||
req.end = end;
|
||||
req.range = keys;
|
||||
|
||||
RangeFeedReply rep = wait(loadBalance(cx.getPtr(),
|
||||
locations[0].second,
|
||||
&StorageServerInterface::rangeFeed,
|
||||
req,
|
||||
TaskPriority::DefaultPromiseEndpoint,
|
||||
AtMostOnce::False,
|
||||
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr));
|
||||
ChangeFeedReply rep = wait(loadBalance(cx.getPtr(),
|
||||
locations[0].second,
|
||||
&StorageServerInterface::changeFeed,
|
||||
req,
|
||||
TaskPriority::DefaultPromiseEndpoint,
|
||||
AtMostOnce::False,
|
||||
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr));
|
||||
return Standalone<VectorRef<MutationsAndVersionRef>>(rep.mutations, rep.arena);
|
||||
}
|
||||
|
||||
Future<Standalone<VectorRef<MutationsAndVersionRef>>> DatabaseContext::getRangeFeedMutations(StringRef rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
return getRangeFeedMutationsActor(Reference<DatabaseContext>::addRef(this), rangeID, begin, end, range);
|
||||
Future<Standalone<VectorRef<MutationsAndVersionRef>>> DatabaseContext::getChangeFeedMutations(StringRef rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
return getChangeFeedMutationsActor(Reference<DatabaseContext>::addRef(this), rangeID, begin, end, range);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> getRangeFeedStreamActor(Reference<DatabaseContext> db,
|
||||
PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>> results,
|
||||
Key rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
state Database cx(db);
|
||||
state Transaction tr(cx);
|
||||
state Key rangeIDKey = rangeID.withPrefix(rangeFeedPrefix);
|
||||
state Span span("NAPI:GetRangeFeedStream"_loc);
|
||||
state KeyRange keys;
|
||||
ACTOR Future<Void> singleChangeFeedStream(StorageServerInterface interf,
|
||||
PromiseStream<Standalone<MutationsAndVersionRef>> results,
|
||||
Key rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
loop {
|
||||
try {
|
||||
Optional<Value> val = wait(tr.get(rangeIDKey));
|
||||
if (!val.present()) {
|
||||
results.sendError(unsupported_operation());
|
||||
return Void();
|
||||
state ChangeFeedStreamRequest req;
|
||||
req.rangeID = rangeID;
|
||||
req.begin = begin;
|
||||
req.end = end;
|
||||
req.range = range;
|
||||
|
||||
state ReplyPromiseStream<ChangeFeedStreamReply> replyStream = interf.changeFeedStream.getReplyStream(req);
|
||||
|
||||
loop {
|
||||
state ChangeFeedStreamReply rep = waitNext(replyStream.getFuture());
|
||||
begin = rep.mutations.back().version + 1;
|
||||
state int resultLoc = 0;
|
||||
while (resultLoc < rep.mutations.size()) {
|
||||
if (rep.mutations[resultLoc].mutations.size() || rep.mutations[resultLoc].version + 1 == end) {
|
||||
wait(results.onEmpty());
|
||||
results.send(rep.mutations[resultLoc]);
|
||||
}
|
||||
resultLoc++;
|
||||
}
|
||||
if (begin == end) {
|
||||
results.sendError(end_of_stream());
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
keys = decodeRangeFeedValue(val.get());
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
if (e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed ||
|
||||
e.code() == error_code_connection_failed || e.code() == error_code_unknown_change_feed ||
|
||||
e.code() == error_code_actor_cancelled) {
|
||||
throw;
|
||||
}
|
||||
results.sendError(e);
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct MutationAndVersionStream {
|
||||
Standalone<MutationsAndVersionRef> next;
|
||||
PromiseStream<Standalone<MutationsAndVersionRef>> results;
|
||||
|
||||
bool operator<(MutationAndVersionStream const& rhs) const { return next.version > rhs.next.version; }
|
||||
};
|
||||
|
||||
ACTOR Future<Void> mergeChangeFeedStream(std::vector<std::pair<StorageServerInterface, KeyRange>> interfs,
|
||||
PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>> results,
|
||||
Key rangeID,
|
||||
Version* begin,
|
||||
Version end) {
|
||||
state std::priority_queue<MutationAndVersionStream, std::vector<MutationAndVersionStream>> mutations;
|
||||
state std::vector<Future<Void>> fetchers(interfs.size());
|
||||
state std::vector<MutationAndVersionStream> streams(interfs.size());
|
||||
for (int i = 0; i < interfs.size(); i++) {
|
||||
fetchers[i] =
|
||||
singleChangeFeedStream(interfs[i].first, streams[i].results, rangeID, *begin, end, interfs[i].second);
|
||||
}
|
||||
state int interfNum = 0;
|
||||
while (interfNum < interfs.size()) {
|
||||
try {
|
||||
Standalone<MutationsAndVersionRef> res = waitNext(streams[interfNum].results.getFuture());
|
||||
streams[interfNum].next = res;
|
||||
mutations.push(streams[interfNum]);
|
||||
} catch (Error& e) {
|
||||
if (e.code() != error_code_end_of_stream) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
interfNum++;
|
||||
}
|
||||
state Version checkVersion = invalidVersion;
|
||||
state Standalone<VectorRef<MutationsAndVersionRef>> nextOut;
|
||||
while (mutations.size()) {
|
||||
state MutationAndVersionStream nextStream = mutations.top();
|
||||
mutations.pop();
|
||||
ASSERT(nextStream.next.version >= checkVersion);
|
||||
if (nextStream.next.version != checkVersion) {
|
||||
if (nextOut.size()) {
|
||||
*begin = checkVersion + 1;
|
||||
results.send(nextOut);
|
||||
nextOut = Standalone<VectorRef<MutationsAndVersionRef>>();
|
||||
}
|
||||
checkVersion = nextStream.next.version;
|
||||
}
|
||||
nextOut.push_back_deep(nextOut.arena(), nextStream.next);
|
||||
try {
|
||||
Standalone<MutationsAndVersionRef> res = waitNext(nextStream.results.getFuture());
|
||||
nextStream.next = res;
|
||||
mutations.push(nextStream);
|
||||
} catch (Error& e) {
|
||||
if (e.code() != error_code_end_of_stream) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (nextOut.size()) {
|
||||
results.send(nextOut);
|
||||
}
|
||||
throw end_of_stream();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> getChangeFeedStreamActor(Reference<DatabaseContext> db,
|
||||
PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>> results,
|
||||
StringRef rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
state Database cx(db);
|
||||
state Transaction tr(cx);
|
||||
state Key rangeIDKey = rangeID.withPrefix(changeFeedPrefix);
|
||||
state Span span("NAPI:GetChangeFeedStream"_loc);
|
||||
state KeyRange keys;
|
||||
|
||||
loop {
|
||||
loop {
|
||||
try {
|
||||
Version readVer = wait(tr.getReadVersion());
|
||||
if (readVer < begin) {
|
||||
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY));
|
||||
} else {
|
||||
Optional<Value> val = wait(tr.get(rangeIDKey));
|
||||
if (!val.present()) {
|
||||
results.sendError(unsupported_operation());
|
||||
return Void();
|
||||
}
|
||||
keys = std::get<0>(decodeChangeFeedValue(val.get())) & range;
|
||||
break;
|
||||
}
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
keys,
|
||||
100,
|
||||
1000,
|
||||
Reverse::False,
|
||||
&StorageServerInterface::rangeFeed,
|
||||
&StorageServerInterface::changeFeed,
|
||||
TransactionInfo(TaskPriority::DefaultEndpoint, span.context)));
|
||||
|
||||
if (locations.size() > 1) {
|
||||
if (locations.size() >= 1000) {
|
||||
ASSERT(false);
|
||||
results.sendError(unsupported_operation());
|
||||
return Void();
|
||||
}
|
||||
|
||||
state int useIdx = -1;
|
||||
|
||||
loop {
|
||||
state std::vector<int> chosenLocations(locations.size());
|
||||
state int loc = 0;
|
||||
while (loc < locations.size()) {
|
||||
// FIXME: create a load balance function for this code so future users of reply streams do not have
|
||||
// to duplicate this code
|
||||
int count = 0;
|
||||
for (int i = 0; i < locations[0].second->size(); i++) {
|
||||
int useIdx = -1;
|
||||
for (int i = 0; i < locations[loc].second->size(); i++) {
|
||||
if (!IFailureMonitor::failureMonitor()
|
||||
.getState(
|
||||
locations[0].second->get(i, &StorageServerInterface::rangeFeedStream).getEndpoint())
|
||||
locations[loc].second->get(i, &StorageServerInterface::changeFeedStream).getEndpoint())
|
||||
.failed) {
|
||||
if (deterministicRandom()->random01() <= 1.0 / ++count) {
|
||||
useIdx = i;
|
||||
|
@ -6637,13 +6805,15 @@ ACTOR Future<Void> getRangeFeedStreamActor(Reference<DatabaseContext> db,
|
|||
}
|
||||
|
||||
if (useIdx >= 0) {
|
||||
break;
|
||||
chosenLocations[loc] = useIdx;
|
||||
loc++;
|
||||
continue;
|
||||
}
|
||||
|
||||
vector<Future<Void>> ok(locations[0].second->size());
|
||||
vector<Future<Void>> ok(locations[loc].second->size());
|
||||
for (int i = 0; i < ok.size(); i++) {
|
||||
ok[i] = IFailureMonitor::failureMonitor().onStateEqual(
|
||||
locations[0].second->get(i, &StorageServerInterface::rangeFeedStream).getEndpoint(),
|
||||
locations[loc].second->get(i, &StorageServerInterface::changeFeedStream).getEndpoint(),
|
||||
FailureStatus(false));
|
||||
}
|
||||
|
||||
|
@ -6654,23 +6824,36 @@ ACTOR Future<Void> getRangeFeedStreamActor(Reference<DatabaseContext> db,
|
|||
}
|
||||
|
||||
wait(allAlternativesFailedDelay(quorum(ok, 1)));
|
||||
loc = 0;
|
||||
}
|
||||
|
||||
state RangeFeedStreamRequest req;
|
||||
req.rangeID = rangeID;
|
||||
req.begin = begin;
|
||||
req.end = end;
|
||||
if (locations.size() > 1) {
|
||||
std::vector<std::pair<StorageServerInterface, KeyRange>> interfs;
|
||||
for (int i = 0; i < locations.size(); i++) {
|
||||
interfs.push_back(std::make_pair(locations[i].second->getInterface(chosenLocations[i]),
|
||||
locations[i].first & range));
|
||||
}
|
||||
wait(mergeChangeFeedStream(interfs, results, rangeID, &begin, end) || cx->connectionFileChanged());
|
||||
} else {
|
||||
state ChangeFeedStreamRequest req;
|
||||
req.rangeID = rangeID;
|
||||
req.begin = begin;
|
||||
req.end = end;
|
||||
req.range = range;
|
||||
|
||||
state ReplyPromiseStream<RangeFeedStreamReply> replyStream =
|
||||
locations[0].second->get(useIdx, &StorageServerInterface::rangeFeedStream).getReplyStream(req);
|
||||
state ReplyPromiseStream<ChangeFeedStreamReply> replyStream =
|
||||
locations[0]
|
||||
.second->get(chosenLocations[0], &StorageServerInterface::changeFeedStream)
|
||||
.getReplyStream(req);
|
||||
|
||||
loop {
|
||||
wait(results.onEmpty());
|
||||
choose {
|
||||
when(wait(cx->connectionFileChanged())) { break; }
|
||||
when(RangeFeedStreamReply rep = waitNext(replyStream.getFuture())) {
|
||||
begin = rep.mutations.back().version + 1;
|
||||
results.send(Standalone<VectorRef<MutationsAndVersionRef>>(rep.mutations, rep.arena));
|
||||
loop {
|
||||
wait(results.onEmpty());
|
||||
choose {
|
||||
when(wait(cx->connectionFileChanged())) { break; }
|
||||
when(ChangeFeedStreamReply rep = waitNext(replyStream.getFuture())) {
|
||||
begin = rep.mutations.back().version + 1;
|
||||
results.send(Standalone<VectorRef<MutationsAndVersionRef>>(rep.mutations, rep.arena));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -6679,7 +6862,7 @@ ACTOR Future<Void> getRangeFeedStreamActor(Reference<DatabaseContext> db,
|
|||
throw;
|
||||
}
|
||||
if (e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed ||
|
||||
e.code() == error_code_connection_failed) {
|
||||
e.code() == error_code_connection_failed || e.code() == error_code_unknown_change_feed) {
|
||||
cx->invalidateCache(keys);
|
||||
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY));
|
||||
} else {
|
||||
|
@ -6690,85 +6873,164 @@ ACTOR Future<Void> getRangeFeedStreamActor(Reference<DatabaseContext> db,
|
|||
}
|
||||
}
|
||||
|
||||
Future<Void> DatabaseContext::getRangeFeedStream(
|
||||
Future<Void> DatabaseContext::getChangeFeedStream(
|
||||
const PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>>& results,
|
||||
StringRef rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
return getRangeFeedStreamActor(Reference<DatabaseContext>::addRef(this), results, rangeID, begin, end, range);
|
||||
return getChangeFeedStreamActor(Reference<DatabaseContext>::addRef(this), results, rangeID, begin, end, range);
|
||||
}
|
||||
|
||||
ACTOR Future<std::vector<std::pair<Key, KeyRange>>> getOverlappingRangeFeedsActor(Reference<DatabaseContext> db,
|
||||
KeyRangeRef range,
|
||||
Version minVersion) {
|
||||
state Database cx(db);
|
||||
state Transaction tr(cx);
|
||||
state Span span("NAPI:GetOverlappingRangeFeeds"_loc);
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
range,
|
||||
100,
|
||||
Reverse::False,
|
||||
&StorageServerInterface::rangeFeed,
|
||||
TransactionInfo(TaskPriority::DefaultEndpoint, span.context)));
|
||||
|
||||
if (locations.size() > 1) {
|
||||
throw unsupported_operation();
|
||||
}
|
||||
|
||||
state OverlappingRangeFeedsRequest req;
|
||||
ACTOR Future<std::vector<std::pair<Key, KeyRange>>> singleLocationOverlappingChangeFeeds(
|
||||
Database cx,
|
||||
Reference<LocationInfo> location,
|
||||
KeyRangeRef range,
|
||||
Version minVersion) {
|
||||
state OverlappingChangeFeedsRequest req;
|
||||
req.range = range;
|
||||
req.minVersion = minVersion;
|
||||
|
||||
OverlappingRangeFeedsReply rep = wait(loadBalance(cx.getPtr(),
|
||||
locations[0].second,
|
||||
&StorageServerInterface::overlappingRangeFeeds,
|
||||
req,
|
||||
TaskPriority::DefaultPromiseEndpoint,
|
||||
AtMostOnce::False,
|
||||
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr));
|
||||
OverlappingChangeFeedsReply rep = wait(loadBalance(cx.getPtr(),
|
||||
location,
|
||||
&StorageServerInterface::overlappingChangeFeeds,
|
||||
req,
|
||||
TaskPriority::DefaultPromiseEndpoint,
|
||||
AtMostOnce::False,
|
||||
cx->enableLocalityLoadBalance ? &cx->queueModel : nullptr));
|
||||
return rep.rangeIds;
|
||||
}
|
||||
|
||||
Future<std::vector<std::pair<Key, KeyRange>>> DatabaseContext::getOverlappingRangeFeeds(KeyRangeRef range,
|
||||
Version minVersion) {
|
||||
return getOverlappingRangeFeedsActor(Reference<DatabaseContext>::addRef(this), range, minVersion);
|
||||
bool compareChangeFeedResult(const std::pair<Key, KeyRange>& i, const std::pair<Key, KeyRange>& j) {
|
||||
return i.first < j.first;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> popRangeFeedMutationsActor(Reference<DatabaseContext> db, StringRef rangeID, Version version) {
|
||||
ACTOR Future<std::vector<std::pair<Key, KeyRange>>> getOverlappingChangeFeedsActor(Reference<DatabaseContext> db,
|
||||
KeyRangeRef range,
|
||||
Version minVersion) {
|
||||
state Database cx(db);
|
||||
state Transaction tr(cx);
|
||||
state Key rangeIDKey = rangeID.withPrefix(rangeFeedPrefix);
|
||||
state Span span("NAPI:PopRangeFeedMutations"_loc);
|
||||
state Span span("NAPI:GetOverlappingChangeFeeds"_loc);
|
||||
|
||||
loop {
|
||||
try {
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
range,
|
||||
1000,
|
||||
Reverse::False,
|
||||
&StorageServerInterface::overlappingChangeFeeds,
|
||||
TransactionInfo(TaskPriority::DefaultEndpoint, span.context)));
|
||||
|
||||
if (locations.size() >= 1000) {
|
||||
throw unsupported_operation();
|
||||
}
|
||||
|
||||
state std::vector<Future<std::vector<std::pair<Key, KeyRange>>>> allOverlappingRequests;
|
||||
for (auto& it : locations) {
|
||||
allOverlappingRequests.push_back(
|
||||
singleLocationOverlappingChangeFeeds(cx, it.second, it.first & range, minVersion));
|
||||
}
|
||||
wait(waitForAll(allOverlappingRequests));
|
||||
|
||||
std::vector<std::pair<Key, KeyRange>> result;
|
||||
for (auto& it : allOverlappingRequests) {
|
||||
result.insert(result.end(), it.get().begin(), it.get().end());
|
||||
}
|
||||
std::sort(result.begin(), result.end(), compareChangeFeedResult);
|
||||
result.resize(std::unique(result.begin(), result.end()) - result.begin());
|
||||
return result;
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed) {
|
||||
cx->invalidateCache(range);
|
||||
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY));
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Future<std::vector<std::pair<Key, KeyRange>>> DatabaseContext::getOverlappingChangeFeeds(KeyRangeRef range,
|
||||
Version minVersion) {
|
||||
return getOverlappingChangeFeedsActor(Reference<DatabaseContext>::addRef(this), range, minVersion);
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> popChangeFeedBackup(Database cx, StringRef rangeID, Version version) {
|
||||
state Transaction tr(cx);
|
||||
loop {
|
||||
try {
|
||||
state Key rangeIDKey = rangeID.withPrefix(changeFeedPrefix);
|
||||
Optional<Value> val = wait(tr.get(rangeIDKey));
|
||||
if (val.present()) {
|
||||
KeyRange range;
|
||||
Version popVersion;
|
||||
bool stopped;
|
||||
std::tie(range, popVersion, stopped) = decodeChangeFeedValue(val.get());
|
||||
if (version > popVersion) {
|
||||
tr.set(rangeIDKey, changeFeedValue(range, invalidVersion, stopped));
|
||||
}
|
||||
} else {
|
||||
throw unsupported_operation();
|
||||
}
|
||||
wait(tr.commit());
|
||||
return Void();
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> popChangeFeedMutationsActor(Reference<DatabaseContext> db, StringRef rangeID, Version version) {
|
||||
state Database cx(db);
|
||||
state Transaction tr(cx);
|
||||
state Key rangeIDKey = rangeID.withPrefix(changeFeedPrefix);
|
||||
state Span span("NAPI:PopChangeFeedMutations"_loc);
|
||||
Optional<Value> val = wait(tr.get(rangeIDKey));
|
||||
if (!val.present()) {
|
||||
throw unsupported_operation();
|
||||
}
|
||||
KeyRange keys = decodeRangeFeedValue(val.get());
|
||||
state KeyRange keys = std::get<0>(decodeChangeFeedValue(val.get()));
|
||||
state vector<pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
keys,
|
||||
100,
|
||||
3,
|
||||
Reverse::False,
|
||||
&StorageServerInterface::rangeFeed,
|
||||
&StorageServerInterface::changeFeed,
|
||||
TransactionInfo(TaskPriority::DefaultEndpoint, span.context)));
|
||||
|
||||
if (locations.size() > 1) {
|
||||
throw unsupported_operation();
|
||||
if (locations.size() > 2) {
|
||||
wait(popChangeFeedBackup(cx, rangeID, version));
|
||||
return Void();
|
||||
}
|
||||
|
||||
// FIXME: lookup both the src and dest shards as of the pop version to ensure all locations are popped
|
||||
state std::vector<Future<Void>> popRequests;
|
||||
for (int i = 0; i < locations[0].second->size(); i++) {
|
||||
popRequests.push_back(
|
||||
locations[0].second->getInterface(i).rangeFeedPop.getReply(RangeFeedPopRequest(rangeID, version)));
|
||||
for (int i = 0; i < locations.size(); i++) {
|
||||
for (int j = 0; j < locations[i].second->size(); j++) {
|
||||
popRequests.push_back(locations[i].second->getInterface(j).changeFeedPop.getReply(
|
||||
ChangeFeedPopRequest(rangeID, version, locations[i].first)));
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
choose {
|
||||
when(wait(waitForAll(popRequests))) {}
|
||||
when(wait(delay(5.0))) { wait(popChangeFeedBackup(cx, rangeID, version)); }
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() != error_code_unknown_change_feed && e.code() != error_code_wrong_shard_server &&
|
||||
e.code() != error_code_all_alternatives_failed) {
|
||||
throw;
|
||||
}
|
||||
cx->invalidateCache(keys);
|
||||
wait(popChangeFeedBackup(cx, rangeID, version));
|
||||
}
|
||||
wait(waitForAll(popRequests));
|
||||
return Void();
|
||||
}
|
||||
|
||||
Future<Void> DatabaseContext::popRangeFeedMutations(StringRef rangeID, Version version) {
|
||||
return popRangeFeedMutationsActor(Reference<DatabaseContext>::addRef(this), rangeID, version);
|
||||
Future<Void> DatabaseContext::popChangeFeedMutations(StringRef rangeID, Version version) {
|
||||
return popChangeFeedMutationsActor(Reference<DatabaseContext>::addRef(this), rangeID, version);
|
||||
}
|
||||
|
||||
#define BG_REQUEST_DEBUG false
|
||||
|
@ -7031,3 +7293,7 @@ ACTOR Future<Void> setPerpetualStorageWiggle(Database cx, bool enable, LockAware
|
|||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
Reference<DatabaseContext::TransactionT> DatabaseContext::createTransaction() {
|
||||
return makeReference<ReadYourWritesTransaction>(Database(Reference<DatabaseContext>::addRef(this)));
|
||||
}
|
||||
|
|
|
@ -107,6 +107,7 @@ public:
|
|||
inline DatabaseContext* getPtr() const { return db.getPtr(); }
|
||||
inline DatabaseContext* extractPtr() { return db.extractPtr(); }
|
||||
DatabaseContext* operator->() const { return db.getPtr(); }
|
||||
Reference<DatabaseContext> getReference() const { return db; }
|
||||
|
||||
const UniqueOrderedOptionList<FDBTransactionOptions>& getTransactionDefaults() const;
|
||||
|
||||
|
@ -181,6 +182,9 @@ struct TransactionInfo {
|
|||
// prefix/<key2> : '0' - any keys equal or larger than this key are (definitely) not conflicting keys
|
||||
std::shared_ptr<CoalescedKeyRangeMap<Value>> conflictingKeys;
|
||||
|
||||
// Only available so that Transaction can have a default constructor, for use in state variables
|
||||
TransactionInfo() : taskID(), spanID(), useProvisionalProxies() {}
|
||||
|
||||
explicit TransactionInfo(TaskPriority taskID, SpanID spanID)
|
||||
: taskID(taskID), spanID(spanID), useProvisionalProxies(false) {}
|
||||
};
|
||||
|
@ -327,8 +331,8 @@ public:
|
|||
|
||||
[[nodiscard]] Future<Standalone<VectorRef<const char*>>> getAddressesForKey(const Key& key);
|
||||
|
||||
Future<Void> registerRangeFeed(const Key& rangeID, const KeyRange& range);
|
||||
void destroyRangeFeed(const Key& rangeID);
|
||||
Future<Void> registerChangeFeed(const Key& rangeID, const KeyRange& range);
|
||||
void destroyChangeFeed(const Key& rangeID);
|
||||
|
||||
void enableCheckWrites();
|
||||
void addReadConflictRange(KeyRangeRef const& keys);
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* PImpl.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
template <class T>
|
||||
class PImpl {
|
||||
std::unique_ptr<T> impl;
|
||||
struct ConstructorTag {};
|
||||
template <class... Args>
|
||||
PImpl(ConstructorTag, Args&&... args) : impl(std::make_unique<T>(std::forward<Args>(args)...)) {}
|
||||
|
||||
public:
|
||||
PImpl() = default;
|
||||
template <class... Args>
|
||||
static PImpl create(Args&&... args) {
|
||||
return PImpl(ConstructorTag{}, std::forward<Args>(args)...);
|
||||
}
|
||||
T& operator*() { return *impl; }
|
||||
T const& operator*() const { return *impl; }
|
||||
T* operator->() { return impl.get(); }
|
||||
T const* operator->() const { return impl.get(); }
|
||||
};
|
|
@ -64,9 +64,7 @@ TEST_CASE("/fdbclient/ParallelStream") {
|
|||
state ParallelStream<ParallelStreamTest::TestValue> parallelStream(results, bufferLimit);
|
||||
state Future<Void> consumer = ParallelStreamTest::consume(results.getFuture(), numProducers);
|
||||
state std::vector<Future<Void>> producers;
|
||||
TraceEvent("StartingParallelStreamTest")
|
||||
.detail("BufferLimit", bufferLimit)
|
||||
.detail("NumProducers", numProducers);
|
||||
TraceEvent("StartingParallelStreamTest").detail("BufferLimit", bufferLimit).detail("NumProducers", numProducers);
|
||||
state int i = 0;
|
||||
for (; i < numProducers; ++i) {
|
||||
ParallelStream<ParallelStreamTest::TestValue>::Fragment* fragment = wait(parallelStream.createFragment());
|
||||
|
|
|
@ -22,35 +22,176 @@
|
|||
#include "fdbclient/PaxosConfigTransaction.h"
|
||||
#include "flow/actorcompiler.h" // must be last include
|
||||
|
||||
class PaxosConfigTransactionImpl {
|
||||
ConfigTransactionCommitRequest toCommit;
|
||||
Future<ConfigGeneration> getGenerationFuture;
|
||||
class CommitQuorum {
|
||||
ActorCollection actors{ false };
|
||||
std::vector<ConfigTransactionInterface> ctis;
|
||||
size_t failed{ 0 };
|
||||
size_t successful{ 0 };
|
||||
size_t maybeCommitted{ 0 };
|
||||
Promise<Void> result;
|
||||
Standalone<VectorRef<ConfigMutationRef>> mutations;
|
||||
ConfigCommitAnnotation annotation;
|
||||
|
||||
ConfigTransactionCommitRequest getCommitRequest(ConfigGeneration generation) const {
|
||||
return ConfigTransactionCommitRequest(generation, mutations, annotation);
|
||||
}
|
||||
|
||||
void updateResult() {
|
||||
if (successful >= ctis.size() / 2 + 1 && result.canBeSet()) {
|
||||
result.send(Void());
|
||||
} else if (failed >= ctis.size() / 2 + 1 && result.canBeSet()) {
|
||||
result.sendError(not_committed());
|
||||
} else {
|
||||
// Check if it is possible to ever receive quorum agreement
|
||||
auto totalRequestsOutstanding = ctis.size() - (failed + successful + maybeCommitted);
|
||||
if ((failed + totalRequestsOutstanding < ctis.size() / 2 + 1) &&
|
||||
(successful + totalRequestsOutstanding < ctis.size() / 2 + 1) && result.canBeSet()) {
|
||||
result.sendError(commit_unknown_result());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> addRequestActor(CommitQuorum* self,
|
||||
ConfigGeneration generation,
|
||||
ConfigTransactionInterface cti) {
|
||||
try {
|
||||
wait(retryBrokenPromise(cti.commit, self->getCommitRequest(generation)));
|
||||
++self->successful;
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_not_committed) {
|
||||
++self->failed;
|
||||
} else {
|
||||
++self->maybeCommitted;
|
||||
}
|
||||
}
|
||||
self->updateResult();
|
||||
return Void();
|
||||
}
|
||||
|
||||
public:
|
||||
CommitQuorum() = default;
|
||||
explicit CommitQuorum(std::vector<ConfigTransactionInterface> const& ctis) : ctis(ctis) {}
|
||||
void set(KeyRef key, ValueRef value) {
|
||||
if (key == configTransactionDescriptionKey) {
|
||||
annotation.description = ValueRef(annotation.arena(), value);
|
||||
} else {
|
||||
mutations.push_back_deep(mutations.arena(),
|
||||
IKnobCollection::createSetMutation(mutations.arena(), key, value));
|
||||
}
|
||||
}
|
||||
void clear(KeyRef key) {
|
||||
if (key == configTransactionDescriptionKey) {
|
||||
annotation.description = ""_sr;
|
||||
} else {
|
||||
mutations.push_back_deep(mutations.arena(), IKnobCollection::createClearMutation(mutations.arena(), key));
|
||||
}
|
||||
}
|
||||
void setTimestamp() { annotation.timestamp = now(); }
|
||||
size_t expectedSize() const { return annotation.expectedSize() + mutations.expectedSize(); }
|
||||
Future<Void> commit(ConfigGeneration generation) {
|
||||
// Send commit message to all replicas, even those that did not return the used replica.
|
||||
// This way, slow replicas are kept up date.
|
||||
for (const auto& cti : ctis) {
|
||||
actors.add(addRequestActor(this, generation, cti));
|
||||
}
|
||||
return result.getFuture();
|
||||
}
|
||||
bool committed() const { return result.isSet(); }
|
||||
};
|
||||
|
||||
class GetGenerationQuorum {
|
||||
ActorCollection actors{ false };
|
||||
std::vector<ConfigTransactionInterface> ctis;
|
||||
std::map<ConfigGeneration, std::vector<ConfigTransactionInterface>> seenGenerations;
|
||||
Promise<ConfigGeneration> result;
|
||||
size_t totalRepliesReceived{ 0 };
|
||||
size_t maxAgreement{ 0 };
|
||||
Optional<Version> lastSeenLiveVersion;
|
||||
Future<ConfigGeneration> getGenerationFuture;
|
||||
|
||||
ACTOR static Future<Void> addRequestActor(GetGenerationQuorum* self, ConfigTransactionInterface cti) {
|
||||
ConfigTransactionGetGenerationReply reply = wait(
|
||||
retryBrokenPromise(cti.getGeneration, ConfigTransactionGetGenerationRequest{ self->lastSeenLiveVersion }));
|
||||
|
||||
++self->totalRepliesReceived;
|
||||
auto gen = reply.generation;
|
||||
self->lastSeenLiveVersion = std::max(gen.liveVersion, self->lastSeenLiveVersion.orDefault(::invalidVersion));
|
||||
auto& replicas = self->seenGenerations[gen];
|
||||
replicas.push_back(cti);
|
||||
self->maxAgreement = std::max(replicas.size(), self->maxAgreement);
|
||||
if (replicas.size() >= self->ctis.size() / 2 + 1 && !self->result.isSet()) {
|
||||
self->result.send(gen);
|
||||
} else if (self->maxAgreement + (self->ctis.size() - self->totalRepliesReceived) <
|
||||
(self->ctis.size() / 2 + 1)) {
|
||||
if (!self->result.isError()) {
|
||||
self->result.sendError(failed_to_reach_quorum());
|
||||
}
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<ConfigGeneration> getGenerationActor(GetGenerationQuorum* self) {
|
||||
state int retries = 0;
|
||||
loop {
|
||||
for (const auto& cti : self->ctis) {
|
||||
self->actors.add(addRequestActor(self, cti));
|
||||
}
|
||||
try {
|
||||
choose {
|
||||
when(ConfigGeneration generation = wait(self->result.getFuture())) { return generation; }
|
||||
when(wait(self->actors.getResult())) { ASSERT(false); }
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_failed_to_reach_quorum) {
|
||||
TEST(true); // Failed to reach quorum getting generation
|
||||
wait(delayJittered(0.01 * (1 << retries)));
|
||||
++retries;
|
||||
self->actors.clear(false);
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
GetGenerationQuorum() = default;
|
||||
explicit GetGenerationQuorum(std::vector<ConfigTransactionInterface> const& ctis,
|
||||
Optional<Version> const& lastSeenLiveVersion = {})
|
||||
: ctis(ctis), lastSeenLiveVersion(lastSeenLiveVersion) {}
|
||||
Future<ConfigGeneration> getGeneration() {
|
||||
if (!getGenerationFuture.isValid()) {
|
||||
getGenerationFuture = getGenerationActor(this);
|
||||
}
|
||||
return getGenerationFuture;
|
||||
}
|
||||
bool isReady() const {
|
||||
return getGenerationFuture.isValid() && getGenerationFuture.isReady() && !getGenerationFuture.isError();
|
||||
}
|
||||
Optional<ConfigGeneration> getCachedGeneration() const {
|
||||
return isReady() ? getGenerationFuture.get() : Optional<ConfigGeneration>{};
|
||||
}
|
||||
std::vector<ConfigTransactionInterface> getReadReplicas() const {
|
||||
ASSERT(isReady());
|
||||
return seenGenerations.at(getGenerationFuture.get());
|
||||
}
|
||||
Optional<Version> getLastSeenLiveVersion() const { return lastSeenLiveVersion; }
|
||||
};
|
||||
|
||||
class PaxosConfigTransactionImpl {
|
||||
std::vector<ConfigTransactionInterface> ctis;
|
||||
GetGenerationQuorum getGenerationQuorum;
|
||||
CommitQuorum commitQuorum;
|
||||
int numRetries{ 0 };
|
||||
bool committed{ false };
|
||||
Optional<UID> dID;
|
||||
Database cx;
|
||||
|
||||
ACTOR static Future<ConfigGeneration> getGeneration(PaxosConfigTransactionImpl* self) {
|
||||
state std::vector<Future<ConfigTransactionGetGenerationReply>> getGenerationFutures;
|
||||
getGenerationFutures.reserve(self->ctis.size());
|
||||
for (auto const& cti : self->ctis) {
|
||||
getGenerationFutures.push_back(cti.getGeneration.getReply(ConfigTransactionGetGenerationRequest{}));
|
||||
}
|
||||
// FIXME: Must tolerate failures and disagreement
|
||||
wait(waitForAll(getGenerationFutures));
|
||||
return getGenerationFutures[0].get().generation;
|
||||
}
|
||||
|
||||
ACTOR static Future<Optional<Value>> get(PaxosConfigTransactionImpl* self, Key key) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
state ConfigKey configKey = ConfigKey::decodeKey(key);
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
ConfigGeneration generation = wait(self->getGenerationQuorum.getGeneration());
|
||||
// TODO: Load balance
|
||||
ConfigTransactionGetReply reply =
|
||||
wait(self->ctis[0].get.getReply(ConfigTransactionGetRequest{ generation, configKey }));
|
||||
ConfigTransactionGetReply reply = wait(retryBrokenPromise(
|
||||
self->getGenerationQuorum.getReadReplicas()[0].get, ConfigTransactionGetRequest{ generation, configKey }));
|
||||
if (reply.value.present()) {
|
||||
return reply.value.get().toValue();
|
||||
} else {
|
||||
|
@ -59,13 +200,11 @@ class PaxosConfigTransactionImpl {
|
|||
}
|
||||
|
||||
ACTOR static Future<RangeResult> getConfigClasses(PaxosConfigTransactionImpl* self) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
ConfigGeneration generation = wait(self->getGenerationQuorum.getGeneration());
|
||||
// TODO: Load balance
|
||||
ConfigTransactionGetConfigClassesReply reply =
|
||||
wait(self->ctis[0].getClasses.getReply(ConfigTransactionGetConfigClassesRequest{ generation }));
|
||||
wait(retryBrokenPromise(self->getGenerationQuorum.getReadReplicas()[0].getClasses,
|
||||
ConfigTransactionGetConfigClassesRequest{ generation }));
|
||||
RangeResult result;
|
||||
result.reserve(result.arena(), reply.configClasses.size());
|
||||
for (const auto& configClass : reply.configClasses) {
|
||||
|
@ -75,13 +214,11 @@ class PaxosConfigTransactionImpl {
|
|||
}
|
||||
|
||||
ACTOR static Future<RangeResult> getKnobs(PaxosConfigTransactionImpl* self, Optional<Key> configClass) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
ConfigGeneration generation = wait(self->getGenerationQuorum.getGeneration());
|
||||
// TODO: Load balance
|
||||
ConfigTransactionGetKnobsReply reply =
|
||||
wait(self->ctis[0].getKnobs.getReply(ConfigTransactionGetKnobsRequest{ generation, configClass }));
|
||||
wait(retryBrokenPromise(self->getGenerationQuorum.getReadReplicas()[0].getKnobs,
|
||||
ConfigTransactionGetKnobsRequest{ generation, configClass }));
|
||||
RangeResult result;
|
||||
result.reserve(result.arena(), reply.knobNames.size());
|
||||
for (const auto& knobName : reply.knobNames) {
|
||||
|
@ -91,45 +228,47 @@ class PaxosConfigTransactionImpl {
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> commit(PaxosConfigTransactionImpl* self) {
|
||||
if (!self->getGenerationFuture.isValid()) {
|
||||
self->getGenerationFuture = getGeneration(self);
|
||||
}
|
||||
wait(store(self->toCommit.generation, self->getGenerationFuture));
|
||||
self->toCommit.annotation.timestamp = now();
|
||||
std::vector<Future<Void>> commitFutures;
|
||||
commitFutures.reserve(self->ctis.size());
|
||||
for (const auto& cti : self->ctis) {
|
||||
commitFutures.push_back(cti.commit.getReply(self->toCommit));
|
||||
}
|
||||
// FIXME: Must tolerate failures and disagreement
|
||||
wait(quorum(commitFutures, commitFutures.size() / 2 + 1));
|
||||
self->committed = true;
|
||||
ConfigGeneration generation = wait(self->getGenerationQuorum.getGeneration());
|
||||
self->commitQuorum.setTimestamp();
|
||||
wait(self->commitQuorum.commit(generation));
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> onError(PaxosConfigTransactionImpl* self, Error e) {
|
||||
// TODO: Improve this:
|
||||
TraceEvent("ConfigIncrementOnError").error(e).detail("NumRetries", self->numRetries);
|
||||
if (e.code() == error_code_transaction_too_old || e.code() == error_code_not_committed) {
|
||||
wait(delay((1 << self->numRetries++) * 0.01 * deterministicRandom()->random01()));
|
||||
self->reset();
|
||||
return Void();
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
||||
public:
|
||||
Future<Version> getReadVersion() {
|
||||
if (!getGenerationFuture.isValid()) {
|
||||
getGenerationFuture = getGeneration(this);
|
||||
}
|
||||
return map(getGenerationFuture, [](auto const& gen) { return gen.committedVersion; });
|
||||
return map(getGenerationQuorum.getGeneration(), [](auto const& gen) { return gen.committedVersion; });
|
||||
}
|
||||
|
||||
Optional<Version> getCachedReadVersion() const {
|
||||
if (getGenerationFuture.isValid() && getGenerationFuture.isReady() && !getGenerationFuture.isError()) {
|
||||
return getGenerationFuture.get().committedVersion;
|
||||
auto gen = getGenerationQuorum.getCachedGeneration();
|
||||
if (gen.present()) {
|
||||
return gen.get().committedVersion;
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
Version getCommittedVersion() const { return committed ? getGenerationFuture.get().liveVersion : ::invalidVersion; }
|
||||
Version getCommittedVersion() const {
|
||||
return commitQuorum.committed() ? getGenerationQuorum.getCachedGeneration().get().liveVersion
|
||||
: ::invalidVersion;
|
||||
}
|
||||
|
||||
int64_t getApproximateSize() const { return toCommit.expectedSize(); }
|
||||
int64_t getApproximateSize() const { return commitQuorum.expectedSize(); }
|
||||
|
||||
void set(KeyRef key, ValueRef value) { toCommit.set(key, value); }
|
||||
void set(KeyRef key, ValueRef value) { commitQuorum.set(key, value); }
|
||||
|
||||
void clear(KeyRef key) { toCommit.clear(key); }
|
||||
void clear(KeyRef key) { commitQuorum.clear(key); }
|
||||
|
||||
Future<Optional<Value>> get(Key const& key) { return get(this, key); }
|
||||
|
||||
|
@ -146,21 +285,13 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
Future<Void> onError(Error const& e) {
|
||||
// TODO: Improve this:
|
||||
if (e.code() == error_code_transaction_too_old) {
|
||||
reset();
|
||||
return delay((1 << numRetries++) * 0.01 * deterministicRandom()->random01());
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
Future<Void> onError(Error const& e) { return onError(this, e); }
|
||||
|
||||
void debugTransaction(UID dID) { this->dID = dID; }
|
||||
|
||||
void reset() {
|
||||
getGenerationFuture = Future<ConfigGeneration>{};
|
||||
toCommit = {};
|
||||
committed = false;
|
||||
getGenerationQuorum = GetGenerationQuorum{ ctis };
|
||||
commitQuorum = CommitQuorum{ ctis };
|
||||
}
|
||||
|
||||
void fullReset() {
|
||||
|
@ -186,21 +317,24 @@ public:
|
|||
for (const auto& coordinator : coordinators) {
|
||||
ctis.emplace_back(coordinator);
|
||||
}
|
||||
getGenerationQuorum = GetGenerationQuorum{ ctis };
|
||||
commitQuorum = CommitQuorum{ ctis };
|
||||
}
|
||||
|
||||
PaxosConfigTransactionImpl(std::vector<ConfigTransactionInterface> const& ctis) : ctis(ctis) {}
|
||||
PaxosConfigTransactionImpl(std::vector<ConfigTransactionInterface> const& ctis)
|
||||
: ctis(ctis), getGenerationQuorum(ctis), commitQuorum(ctis) {}
|
||||
};
|
||||
|
||||
Future<Version> PaxosConfigTransaction::getReadVersion() {
|
||||
return impl().getReadVersion();
|
||||
return impl->getReadVersion();
|
||||
}
|
||||
|
||||
Optional<Version> PaxosConfigTransaction::getCachedReadVersion() const {
|
||||
return impl().getCachedReadVersion();
|
||||
return impl->getCachedReadVersion();
|
||||
}
|
||||
|
||||
Future<Optional<Value>> PaxosConfigTransaction::get(Key const& key, Snapshot) {
|
||||
return impl().get(key);
|
||||
return impl->get(key);
|
||||
}
|
||||
|
||||
Future<RangeResult> PaxosConfigTransaction::getRange(KeySelector const& begin,
|
||||
|
@ -211,7 +345,7 @@ Future<RangeResult> PaxosConfigTransaction::getRange(KeySelector const& begin,
|
|||
if (reverse) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
return impl->getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
Future<RangeResult> PaxosConfigTransaction::getRange(KeySelector begin,
|
||||
|
@ -222,27 +356,27 @@ Future<RangeResult> PaxosConfigTransaction::getRange(KeySelector begin,
|
|||
if (reverse) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
return impl->getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::set(KeyRef const& key, ValueRef const& value) {
|
||||
return impl().set(key, value);
|
||||
return impl->set(key, value);
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::clear(KeyRef const& key) {
|
||||
return impl().clear(key);
|
||||
return impl->clear(key);
|
||||
}
|
||||
|
||||
Future<Void> PaxosConfigTransaction::commit() {
|
||||
return impl().commit();
|
||||
return impl->commit();
|
||||
}
|
||||
|
||||
Version PaxosConfigTransaction::getCommittedVersion() const {
|
||||
return impl().getCommittedVersion();
|
||||
return impl->getCommittedVersion();
|
||||
}
|
||||
|
||||
int64_t PaxosConfigTransaction::getApproximateSize() const {
|
||||
return impl().getApproximateSize();
|
||||
return impl->getApproximateSize();
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::setOption(FDBTransactionOptions::Option option, Optional<StringRef> value) {
|
||||
|
@ -250,7 +384,7 @@ void PaxosConfigTransaction::setOption(FDBTransactionOptions::Option option, Opt
|
|||
}
|
||||
|
||||
Future<Void> PaxosConfigTransaction::onError(Error const& e) {
|
||||
return impl().onError(e);
|
||||
return impl->onError(e);
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::cancel() {
|
||||
|
@ -259,28 +393,28 @@ void PaxosConfigTransaction::cancel() {
|
|||
}
|
||||
|
||||
void PaxosConfigTransaction::reset() {
|
||||
impl().reset();
|
||||
impl->reset();
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::fullReset() {
|
||||
impl().fullReset();
|
||||
impl->fullReset();
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::debugTransaction(UID dID) {
|
||||
impl().debugTransaction(dID);
|
||||
impl->debugTransaction(dID);
|
||||
}
|
||||
|
||||
void PaxosConfigTransaction::checkDeferredError() const {
|
||||
impl().checkDeferredError(deferredError);
|
||||
impl->checkDeferredError(deferredError);
|
||||
}
|
||||
|
||||
PaxosConfigTransaction::PaxosConfigTransaction(std::vector<ConfigTransactionInterface> const& ctis)
|
||||
: _impl(std::make_unique<PaxosConfigTransactionImpl>(ctis)) {}
|
||||
: impl(PImpl<PaxosConfigTransactionImpl>::create(ctis)) {}
|
||||
|
||||
PaxosConfigTransaction::PaxosConfigTransaction() = default;
|
||||
|
||||
PaxosConfigTransaction::~PaxosConfigTransaction() = default;
|
||||
|
||||
void PaxosConfigTransaction::setDatabase(Database const& cx) {
|
||||
_impl = std::make_unique<PaxosConfigTransactionImpl>(cx);
|
||||
impl = PImpl<PaxosConfigTransactionImpl>::create(cx);
|
||||
}
|
||||
|
|
|
@ -23,14 +23,13 @@
|
|||
#include <memory>
|
||||
|
||||
#include "fdbclient/IConfigTransaction.h"
|
||||
#include "fdbclient/PImpl.h"
|
||||
|
||||
/*
|
||||
* Fault-tolerant configuration transaction implementation
|
||||
*/
|
||||
class PaxosConfigTransaction final : public IConfigTransaction, public FastAllocated<PaxosConfigTransaction> {
|
||||
std::unique_ptr<class PaxosConfigTransactionImpl> _impl;
|
||||
PaxosConfigTransactionImpl const& impl() const { return *_impl; }
|
||||
PaxosConfigTransactionImpl& impl() { return *_impl; }
|
||||
PImpl<class PaxosConfigTransactionImpl> impl;
|
||||
|
||||
public:
|
||||
PaxosConfigTransaction(std::vector<ConfigTransactionInterface> const&);
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* ProcessInterface.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/AnnotateActor.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
|
||||
constexpr UID WLTOKEN_PROCESS(-1, 21);
|
||||
|
||||
struct ProcessInterface {
|
||||
constexpr static FileIdentifier file_identifier = 985636;
|
||||
RequestStream<struct GetProcessInterfaceRequest> getInterface;
|
||||
RequestStream<struct ActorLineageRequest> actorLineage;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, actorLineage);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetProcessInterfaceRequest {
|
||||
constexpr static FileIdentifier file_identifier = 7632546;
|
||||
ReplyPromise<ProcessInterface> reply;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
// This type is used to send serialized sample data over the network.
|
||||
struct SerializedSample {
|
||||
constexpr static FileIdentifier file_identifier = 15785634;
|
||||
|
||||
double time;
|
||||
std::unordered_map<WaitState, std::string> data;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, time, data);
|
||||
}
|
||||
};
|
||||
|
||||
struct ActorLineageReply {
|
||||
constexpr static FileIdentifier file_identifier = 1887656;
|
||||
std::vector<SerializedSample> samples;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, samples);
|
||||
}
|
||||
};
|
||||
|
||||
struct ActorLineageRequest {
|
||||
constexpr static FileIdentifier file_identifier = 11654765;
|
||||
WaitState waitStateStart, waitStateEnd;
|
||||
time_t timeStart, timeEnd;
|
||||
ReplyPromise<ActorLineageReply> reply;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, waitStateStart, waitStateEnd, timeStart, timeEnd, reply);
|
||||
}
|
||||
};
|
|
@ -175,6 +175,10 @@ public:
|
|||
void setSpecialKeySpaceErrorMsg(const std::string& msg) { specialKeySpaceErrorMsg = msg; }
|
||||
Transaction& getTransaction() { return tr; }
|
||||
|
||||
// used in template functions as returned Future type
|
||||
template <typename Type>
|
||||
using FutureT = Future<Type>;
|
||||
|
||||
private:
|
||||
friend class RYWImpl;
|
||||
|
||||
|
|
|
@ -50,3 +50,7 @@ Optional<KnobValue> ServerKnobCollection::tryParseKnobValue(std::string const& k
|
|||
bool ServerKnobCollection::trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) {
|
||||
return clientKnobCollection.trySetKnob(knobName, knobValue) || knobValue.visitSetKnob(knobName, serverKnobs);
|
||||
}
|
||||
|
||||
bool ServerKnobCollection::isAtomic(std::string const& knobName) const {
|
||||
return clientKnobCollection.isAtomic(knobName) || serverKnobs.isAtomic(knobName);
|
||||
}
|
||||
|
|
|
@ -41,4 +41,5 @@ public:
|
|||
TestKnobs const& getTestKnobs() const override { throw internal_error(); }
|
||||
Optional<KnobValue> tryParseKnobValue(std::string const& knobName, std::string const& knobValue) const override;
|
||||
bool trySetKnob(std::string const& knobName, KnobValueRef const& knobValue) override;
|
||||
bool isAtomic(std::string const& knobName) const override;
|
||||
};
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
#include "fdbclient/ServerKnobs.h"
|
||||
|
||||
#define init(knob, value) initKnob(knob, value, #knob)
|
||||
#define init(...) KNOB_FN(__VA_ARGS__, INIT_ATOMIC_KNOB, INIT_KNOB)(__VA_ARGS__)
|
||||
|
||||
ServerKnobs::ServerKnobs(Randomize randomize, ClientKnobs* clientKnobs, IsSimulated isSimulated) {
|
||||
initialize(randomize, clientKnobs, isSimulated);
|
||||
|
@ -64,6 +64,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( TLOG_MESSAGE_BLOCK_BYTES, 10e6 );
|
||||
init( TLOG_MESSAGE_BLOCK_OVERHEAD_FACTOR, double(TLOG_MESSAGE_BLOCK_BYTES) / (TLOG_MESSAGE_BLOCK_BYTES - MAX_MESSAGE_SIZE) ); //1.0121466709838096006362758832473
|
||||
init( PEEK_TRACKER_EXPIRATION_TIME, 600 ); if( randomize && BUGGIFY ) PEEK_TRACKER_EXPIRATION_TIME = deterministicRandom()->coinflip() ? 0.1 : 120;
|
||||
init( PEEK_USING_STREAMING, true );
|
||||
init( PARALLEL_GET_MORE_REQUESTS, 32 ); if( randomize && BUGGIFY ) PARALLEL_GET_MORE_REQUESTS = 2;
|
||||
init( MULTI_CURSOR_PRE_FETCH_LIMIT, 10 );
|
||||
init( MAX_QUEUE_COMMIT_BYTES, 15e6 ); if( randomize && BUGGIFY ) MAX_QUEUE_COMMIT_BYTES = 5000;
|
||||
|
@ -343,6 +344,9 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( ROCKSDB_PREFIX_LEN, 0 );
|
||||
init( ROCKSDB_BLOCK_CACHE_SIZE, 0 );
|
||||
init( ROCKSDB_METRICS_DELAY, 60.0 );
|
||||
init( ROCKSDB_READ_VALUE_TIMEOUT, 5.0 );
|
||||
init( ROCKSDB_READ_VALUE_PREFIX_TIMEOUT, 5.0 );
|
||||
init( ROCKSDB_READ_RANGE_TIMEOUT, 5.0 );
|
||||
|
||||
// Leader election
|
||||
bool longLeaderElection = randomize && BUGGIFY;
|
||||
|
@ -627,6 +631,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( FETCH_KEYS_TOO_LONG_TIME_CRITERIA, 300.0 );
|
||||
init( MAX_STORAGE_COMMIT_TIME, 120.0 ); //The max fsync stall time on the storage server and tlog before marking a disk as failed
|
||||
init( RANGESTREAM_LIMIT_BYTES, 2e6 ); if( randomize && BUGGIFY ) RANGESTREAM_LIMIT_BYTES = 1;
|
||||
init( ENABLE_CLEAR_RANGE_EAGER_READS, true );
|
||||
|
||||
//Wait Failure
|
||||
init( MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS, 250 ); if( randomize && BUGGIFY ) MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS = 2;
|
||||
|
@ -743,7 +748,8 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
|||
init( REDWOOD_LAZY_CLEAR_MAX_PAGES, 1e6 );
|
||||
init( REDWOOD_REMAP_CLEANUP_WINDOW, 50 );
|
||||
init( REDWOOD_REMAP_CLEANUP_LAG, 0.1 );
|
||||
init( REDWOOD_LOGGING_INTERVAL, 5.0 );
|
||||
init( REDWOOD_METRICS_INTERVAL, 5.0 );
|
||||
init( REDWOOD_HISTOGRAM_INTERVAL, 30.0 );
|
||||
|
||||
// Server request latency measurement
|
||||
init( LATENCY_SAMPLE_SIZE, 100000 );
|
||||
|
|
|
@ -41,6 +41,7 @@ public:
|
|||
// often, so that versions always advance smoothly
|
||||
|
||||
// TLogs
|
||||
bool PEEK_USING_STREAMING;
|
||||
double TLOG_TIMEOUT; // tlog OR commit proxy failure - master's reaction time
|
||||
double TLOG_SLOW_REJOIN_WARN_TIMEOUT_SECS; // Warns if a tlog takes too long to rejoin
|
||||
double RECOVERY_TLOG_SMART_QUORUM_DELAY; // smaller might be better for bug amplification
|
||||
|
@ -275,6 +276,9 @@ public:
|
|||
int ROCKSDB_PREFIX_LEN;
|
||||
int64_t ROCKSDB_BLOCK_CACHE_SIZE;
|
||||
double ROCKSDB_METRICS_DELAY;
|
||||
double ROCKSDB_READ_VALUE_TIMEOUT;
|
||||
double ROCKSDB_READ_VALUE_PREFIX_TIMEOUT;
|
||||
double ROCKSDB_READ_RANGE_TIMEOUT;
|
||||
|
||||
// Leader election
|
||||
int MAX_NOTIFICATIONS;
|
||||
|
@ -563,6 +567,7 @@ public:
|
|||
double FETCH_KEYS_TOO_LONG_TIME_CRITERIA;
|
||||
double MAX_STORAGE_COMMIT_TIME;
|
||||
int64_t RANGESTREAM_LIMIT_BYTES;
|
||||
bool ENABLE_CLEAR_RANGE_EAGER_READS;
|
||||
|
||||
// Wait Failure
|
||||
int MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS;
|
||||
|
@ -690,7 +695,8 @@ public:
|
|||
int64_t REDWOOD_REMAP_CLEANUP_WINDOW; // Remap remover lag interval in which to coalesce page writes
|
||||
double REDWOOD_REMAP_CLEANUP_LAG; // Maximum allowed remap remover lag behind the cleanup window as a multiple of
|
||||
// the window size
|
||||
double REDWOOD_LOGGING_INTERVAL;
|
||||
double REDWOOD_METRICS_INTERVAL;
|
||||
double REDWOOD_HISTOGRAM_INTERVAL;
|
||||
|
||||
// Server request latency measurement
|
||||
int LATENCY_SAMPLE_SIZE;
|
||||
|
|
|
@ -43,7 +43,7 @@ class SimpleConfigTransactionImpl {
|
|||
}
|
||||
ConfigTransactionGetGenerationRequest req;
|
||||
ConfigTransactionGetGenerationReply reply =
|
||||
wait(self->cti.getGeneration.getReply(ConfigTransactionGetGenerationRequest{}));
|
||||
wait(retryBrokenPromise(self->cti.getGeneration, ConfigTransactionGetGenerationRequest{}));
|
||||
if (self->dID.present()) {
|
||||
TraceEvent("SimpleConfigTransactionGotReadVersion", self->dID.get())
|
||||
.detail("Version", reply.generation.liveVersion);
|
||||
|
@ -63,7 +63,7 @@ class SimpleConfigTransactionImpl {
|
|||
.detail("KnobName", configKey.knobName);
|
||||
}
|
||||
ConfigTransactionGetReply reply =
|
||||
wait(self->cti.get.getReply(ConfigTransactionGetRequest{ generation, configKey }));
|
||||
wait(retryBrokenPromise(self->cti.get, ConfigTransactionGetRequest{ generation, configKey }));
|
||||
if (self->dID.present()) {
|
||||
TraceEvent("SimpleConfigTransactionGotValue", self->dID.get())
|
||||
.detail("Value", reply.value.get().toString());
|
||||
|
@ -81,7 +81,7 @@ class SimpleConfigTransactionImpl {
|
|||
}
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
ConfigTransactionGetConfigClassesReply reply =
|
||||
wait(self->cti.getClasses.getReply(ConfigTransactionGetConfigClassesRequest{ generation }));
|
||||
wait(retryBrokenPromise(self->cti.getClasses, ConfigTransactionGetConfigClassesRequest{ generation }));
|
||||
RangeResult result;
|
||||
for (const auto& configClass : reply.configClasses) {
|
||||
result.push_back_deep(result.arena(), KeyValueRef(configClass, ""_sr));
|
||||
|
@ -95,7 +95,7 @@ class SimpleConfigTransactionImpl {
|
|||
}
|
||||
ConfigGeneration generation = wait(self->getGenerationFuture);
|
||||
ConfigTransactionGetKnobsReply reply =
|
||||
wait(self->cti.getKnobs.getReply(ConfigTransactionGetKnobsRequest{ generation, configClass }));
|
||||
wait(retryBrokenPromise(self->cti.getKnobs, ConfigTransactionGetKnobsRequest{ generation, configClass }));
|
||||
RangeResult result;
|
||||
for (const auto& knobName : reply.knobNames) {
|
||||
result.push_back_deep(result.arena(), KeyValueRef(knobName, ""_sr));
|
||||
|
@ -109,11 +109,21 @@ class SimpleConfigTransactionImpl {
|
|||
}
|
||||
wait(store(self->toCommit.generation, self->getGenerationFuture));
|
||||
self->toCommit.annotation.timestamp = now();
|
||||
wait(self->cti.commit.getReply(self->toCommit));
|
||||
wait(retryBrokenPromise(self->cti.commit, self->toCommit));
|
||||
self->committed = true;
|
||||
return Void();
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> onError(SimpleConfigTransactionImpl* self, Error e) {
|
||||
// TODO: Improve this:
|
||||
if (e.code() == error_code_transaction_too_old || e.code() == error_code_not_committed) {
|
||||
wait(delay((1 << self->numRetries++) * 0.01 * deterministicRandom()->random01()));
|
||||
self->reset();
|
||||
return Void();
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
|
||||
public:
|
||||
SimpleConfigTransactionImpl(Database const& cx) : cx(cx) {
|
||||
auto coordinators = cx->getConnectionFile()->getConnectionString().coordinators();
|
||||
|
@ -123,9 +133,14 @@ public:
|
|||
|
||||
SimpleConfigTransactionImpl(ConfigTransactionInterface const& cti) : cti(cti) {}
|
||||
|
||||
void set(KeyRef key, ValueRef value) { toCommit.set(key, value); }
|
||||
void set(KeyRef key, ValueRef value) {
|
||||
toCommit.mutations.push_back_deep(toCommit.arena,
|
||||
IKnobCollection::createSetMutation(toCommit.arena, key, value));
|
||||
}
|
||||
|
||||
void clear(KeyRef key) { toCommit.clear(key); }
|
||||
void clear(KeyRef key) {
|
||||
toCommit.mutations.push_back_deep(toCommit.arena, IKnobCollection::createClearMutation(toCommit.arena, key));
|
||||
}
|
||||
|
||||
Future<Optional<Value>> get(KeyRef key) { return get(this, key); }
|
||||
|
||||
|
@ -144,14 +159,7 @@ public:
|
|||
|
||||
Future<Void> commit() { return commit(this); }
|
||||
|
||||
Future<Void> onError(Error const& e) {
|
||||
// TODO: Improve this:
|
||||
if (e.code() == error_code_transaction_too_old) {
|
||||
reset();
|
||||
return delay((1 << numRetries++) * 0.01 * deterministicRandom()->random01());
|
||||
}
|
||||
throw e;
|
||||
}
|
||||
Future<Void> onError(Error const& e) { return onError(this, e); }
|
||||
|
||||
Future<Version> getReadVersion() {
|
||||
if (!getGenerationFuture.isValid())
|
||||
|
@ -183,9 +191,7 @@ public:
|
|||
|
||||
size_t getApproximateSize() const { return toCommit.expectedSize(); }
|
||||
|
||||
void debugTransaction(UID dID) {
|
||||
this->dID = dID;
|
||||
}
|
||||
void debugTransaction(UID dID) { this->dID = dID; }
|
||||
|
||||
void checkDeferredError(Error const& deferredError) const {
|
||||
if (deferredError.code() != invalid_error_code) {
|
||||
|
@ -198,15 +204,15 @@ public:
|
|||
}; // SimpleConfigTransactionImpl
|
||||
|
||||
Future<Version> SimpleConfigTransaction::getReadVersion() {
|
||||
return impl().getReadVersion();
|
||||
return impl->getReadVersion();
|
||||
}
|
||||
|
||||
Optional<Version> SimpleConfigTransaction::getCachedReadVersion() const {
|
||||
return impl().getCachedReadVersion();
|
||||
return impl->getCachedReadVersion();
|
||||
}
|
||||
|
||||
Future<Optional<Value>> SimpleConfigTransaction::get(Key const& key, Snapshot snapshot) {
|
||||
return impl().get(key);
|
||||
return impl->get(key);
|
||||
}
|
||||
|
||||
Future<RangeResult> SimpleConfigTransaction::getRange(KeySelector const& begin,
|
||||
|
@ -217,7 +223,7 @@ Future<RangeResult> SimpleConfigTransaction::getRange(KeySelector const& begin,
|
|||
if (reverse) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
return impl->getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
Future<RangeResult> SimpleConfigTransaction::getRange(KeySelector begin,
|
||||
|
@ -228,27 +234,27 @@ Future<RangeResult> SimpleConfigTransaction::getRange(KeySelector begin,
|
|||
if (reverse) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
return impl().getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
return impl->getRange(KeyRangeRef(begin.getKey(), end.getKey()));
|
||||
}
|
||||
|
||||
void SimpleConfigTransaction::set(KeyRef const& key, ValueRef const& value) {
|
||||
impl().set(key, value);
|
||||
impl->set(key, value);
|
||||
}
|
||||
|
||||
void SimpleConfigTransaction::clear(KeyRef const& key) {
|
||||
impl().clear(key);
|
||||
impl->clear(key);
|
||||
}
|
||||
|
||||
Future<Void> SimpleConfigTransaction::commit() {
|
||||
return impl().commit();
|
||||
return impl->commit();
|
||||
}
|
||||
|
||||
Version SimpleConfigTransaction::getCommittedVersion() const {
|
||||
return impl().getCommittedVersion();
|
||||
return impl->getCommittedVersion();
|
||||
}
|
||||
|
||||
int64_t SimpleConfigTransaction::getApproximateSize() const {
|
||||
return impl().getApproximateSize();
|
||||
return impl->getApproximateSize();
|
||||
}
|
||||
|
||||
void SimpleConfigTransaction::setOption(FDBTransactionOptions::Option option, Optional<StringRef> value) {
|
||||
|
@ -256,7 +262,7 @@ void SimpleConfigTransaction::setOption(FDBTransactionOptions::Option option, Op
|
|||
}
|
||||
|
||||
Future<Void> SimpleConfigTransaction::onError(Error const& e) {
|
||||
return impl().onError(e);
|
||||
return impl->onError(e);
|
||||
}
|
||||
|
||||
void SimpleConfigTransaction::cancel() {
|
||||
|
@ -265,27 +271,27 @@ void SimpleConfigTransaction::cancel() {
|
|||
}
|
||||
|
||||
void SimpleConfigTransaction::reset() {
|
||||
return impl().reset();
|
||||
return impl->reset();
|
||||
}
|
||||
|
||||
void SimpleConfigTransaction::fullReset() {
|
||||
return impl().fullReset();
|
||||
return impl->fullReset();
|
||||
}
|
||||
|
||||
void SimpleConfigTransaction::debugTransaction(UID dID) {
|
||||
impl().debugTransaction(dID);
|
||||
impl->debugTransaction(dID);
|
||||
}
|
||||
|
||||
void SimpleConfigTransaction::checkDeferredError() const {
|
||||
impl().checkDeferredError(deferredError);
|
||||
impl->checkDeferredError(deferredError);
|
||||
}
|
||||
|
||||
void SimpleConfigTransaction::setDatabase(Database const& cx) {
|
||||
_impl = std::make_unique<SimpleConfigTransactionImpl>(cx);
|
||||
impl = PImpl<SimpleConfigTransactionImpl>::create(cx);
|
||||
}
|
||||
|
||||
SimpleConfigTransaction::SimpleConfigTransaction(ConfigTransactionInterface const& cti)
|
||||
: _impl(std::make_unique<SimpleConfigTransactionImpl>(cti)) {}
|
||||
: impl(PImpl<SimpleConfigTransactionImpl>::create(cti)) {}
|
||||
|
||||
SimpleConfigTransaction::SimpleConfigTransaction() = default;
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "fdbclient/CoordinationInterface.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbclient/IConfigTransaction.h"
|
||||
#include "fdbclient/PImpl.h"
|
||||
#include "flow/Error.h"
|
||||
#include "flow/flow.h"
|
||||
|
||||
|
@ -36,9 +37,7 @@
|
|||
* (the lowest coordinator by IP address), so there is no fault tolerance.
|
||||
*/
|
||||
class SimpleConfigTransaction final : public IConfigTransaction, public FastAllocated<SimpleConfigTransaction> {
|
||||
std::unique_ptr<class SimpleConfigTransactionImpl> _impl;
|
||||
SimpleConfigTransactionImpl const& impl() const { return *_impl; }
|
||||
SimpleConfigTransactionImpl& impl() { return *_impl; }
|
||||
PImpl<class SimpleConfigTransactionImpl> impl;
|
||||
|
||||
public:
|
||||
SimpleConfigTransaction(ConfigTransactionInterface const&);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue