Merge branch 'master' of github.com:apple/foundationdb into jfu-grv-cache

This commit is contained in:
Jon Fu 2021-11-19 14:46:55 -05:00
commit 3f24128da4
334 changed files with 37847 additions and 3576 deletions

View File

@ -89,7 +89,7 @@ message(STATUS "Current git version ${CURRENT_GIT_VERSION}")
set(FDB_PACKAGE_NAME "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}") set(FDB_PACKAGE_NAME "${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR}")
set(FDB_VERSION ${PROJECT_VERSION}) set(FDB_VERSION ${PROJECT_VERSION})
set(FDB_VERSION_PLAIN ${FDB_VERSION}) set(FDB_VERSION_PLAIN ${FDB_VERSION})
configure_file(${CMAKE_SOURCE_DIR}/versions.target.cmake ${CMAKE_SOURCE_DIR}/versions.target) configure_file(${CMAKE_SOURCE_DIR}/versions.target.cmake ${CMAKE_CURRENT_BINARY_DIR}/versions.target)
message(STATUS "FDB version is ${FDB_VERSION}") message(STATUS "FDB version is ${FDB_VERSION}")
message(STATUS "FDB package name is ${FDB_PACKAGE_NAME}") message(STATUS "FDB package name is ${FDB_PACKAGE_NAME}")
@ -158,6 +158,7 @@ endif()
include(CompileBoost) include(CompileBoost)
include(GetMsgpack) include(GetMsgpack)
add_subdirectory(contrib)
add_subdirectory(flow) add_subdirectory(flow)
add_subdirectory(fdbrpc) add_subdirectory(fdbrpc)
add_subdirectory(fdbclient) add_subdirectory(fdbclient)
@ -169,7 +170,6 @@ else()
add_subdirectory(fdbservice) add_subdirectory(fdbservice)
endif() endif()
add_subdirectory(fdbbackup) add_subdirectory(fdbbackup)
add_subdirectory(contrib)
add_subdirectory(tests) add_subdirectory(tests)
add_subdirectory(flowbench EXCLUDE_FROM_ALL) add_subdirectory(flowbench EXCLUDE_FROM_ALL)
if(WITH_PYTHON AND WITH_C_BINDING) if(WITH_PYTHON AND WITH_C_BINDING)

View File

@ -117,8 +117,8 @@ should simulate it using an anonymous transaction. Remember that set and clear
operations must immediately commit (with appropriate retry behavior!). operations must immediately commit (with appropriate retry behavior!).
Any error that bubbles out of these operations must be caught. In the event of Any error that bubbles out of these operations must be caught. In the event of
an error, you must push the packed tuple of the string `"ERROR"` and the error an error, you must push the packed tuple of the byte string `"ERROR"` and the
code (as a string, not an integer). error code (as a byte string, not an integer).
Some operations may allow you to push future values onto the stack. When popping Some operations may allow you to push future values onto the stack. When popping
objects from the stack, the future MUST BE waited on and errors caught before objects from the stack, the future MUST BE waited on and errors caught before

View File

@ -7,18 +7,26 @@ file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/foundationdb)
set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S) set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S)
set(platform "linux") set(os "linux")
set(cpu "intel")
if(APPLE) if(APPLE)
set(platform "osx") set(os "osx")
elseif(WIN32) elseif(WIN32)
set(platform "windows") set(os "windows")
set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.asm) set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.asm)
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux" AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") endif()
set(platform "linux-aarch64")
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm64")
set(cpu "aarch64")
endif()
set(IS_ARM_MAC NO)
if(APPLE AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
set(IS_ARM_MAC YES)
endif() endif()
add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform} COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${os} ${cpu}
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp ${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
${asm_file} ${asm_file}
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
@ -66,8 +74,10 @@ if(WIN32)
set_property(SOURCE ${asm_file} PROPERTY LANGUAGE ASM_MASM) set_property(SOURCE ${asm_file} PROPERTY LANGUAGE ASM_MASM)
endif() endif()
# The tests don't build on windows # The tests don't build on windows and ARM macs
if(NOT WIN32) # doctest doesn't seem to compile on ARM macs, we should
# check later whether this works
if(NOT WIN32 AND NOT IS_ARM_MAC)
set(MAKO_SRCS set(MAKO_SRCS
test/mako/mako.c test/mako/mako.c
test/mako/mako.h test/mako/mako.h

View File

@ -436,21 +436,12 @@ extern "C" DLLEXPORT FDBFuture* fdb_transaction_get_addresses_for_key(FDBTransac
return (FDBFuture*)(TXN(tr)->getAddressesForKey(KeyRef(key_name, key_name_length)).extractPtr()); return (FDBFuture*)(TXN(tr)->getAddressesForKey(KeyRef(key_name, key_name_length)).extractPtr());
} }
FDBFuture* fdb_transaction_get_range_impl(FDBTransaction* tr, // Set to the actual limit, target_bytes, and reverse.
uint8_t const* begin_key_name, FDBFuture* validate_and_update_parameters(int& limit,
int begin_key_name_length, int& target_bytes,
fdb_bool_t begin_or_equal,
int begin_offset,
uint8_t const* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal,
int end_offset,
int limit,
int target_bytes,
FDBStreamingMode mode, FDBStreamingMode mode,
int iteration, int iteration,
fdb_bool_t snapshot, fdb_bool_t& reverse) {
fdb_bool_t reverse) {
/* This method may be called with a runtime API version of 13, in /* This method may be called with a runtime API version of 13, in
which negative row limits are a reverse range read */ which negative row limits are a reverse range read */
if (g_api_version <= 13 && limit < 0) { if (g_api_version <= 13 && limit < 0) {
@ -500,6 +491,27 @@ FDBFuture* fdb_transaction_get_range_impl(FDBTransaction* tr,
else if (mode_bytes != GetRangeLimits::BYTE_LIMIT_UNLIMITED) else if (mode_bytes != GetRangeLimits::BYTE_LIMIT_UNLIMITED)
target_bytes = std::min(target_bytes, mode_bytes); target_bytes = std::min(target_bytes, mode_bytes);
return nullptr;
}
FDBFuture* fdb_transaction_get_range_impl(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
fdb_bool_t begin_or_equal,
int begin_offset,
uint8_t const* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal,
int end_offset,
int limit,
int target_bytes,
FDBStreamingMode mode,
int iteration,
fdb_bool_t snapshot,
fdb_bool_t reverse) {
FDBFuture* r = validate_and_update_parameters(limit, target_bytes, mode, iteration, reverse);
if (r != nullptr)
return r;
return ( return (
FDBFuture*)(TXN(tr) FDBFuture*)(TXN(tr)
->getRange( ->getRange(
@ -511,6 +523,60 @@ FDBFuture* fdb_transaction_get_range_impl(FDBTransaction* tr,
.extractPtr()); .extractPtr());
} }
FDBFuture* fdb_transaction_get_range_and_flat_map_impl(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
fdb_bool_t begin_or_equal,
int begin_offset,
uint8_t const* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal,
int end_offset,
uint8_t const* mapper_name,
int mapper_name_length,
int limit,
int target_bytes,
FDBStreamingMode mode,
int iteration,
fdb_bool_t snapshot,
fdb_bool_t reverse) {
FDBFuture* r = validate_and_update_parameters(limit, target_bytes, mode, iteration, reverse);
if (r != nullptr)
return r;
return (
FDBFuture*)(TXN(tr)
->getRangeAndFlatMap(
KeySelectorRef(KeyRef(begin_key_name, begin_key_name_length), begin_or_equal, begin_offset),
KeySelectorRef(KeyRef(end_key_name, end_key_name_length), end_or_equal, end_offset),
StringRef(mapper_name, mapper_name_length),
GetRangeLimits(limit, target_bytes),
snapshot,
reverse)
.extractPtr());
}
// TODO: Support FDB_API_ADDED in generate_asm.py and then this can be replaced with fdb_api_ptr_unimpl.
FDBFuture* fdb_transaction_get_range_and_flat_map_v699(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
fdb_bool_t begin_or_equal,
int begin_offset,
uint8_t const* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal,
int end_offset,
uint8_t const* mapper_name,
int mapper_name_length,
int limit,
int target_bytes,
FDBStreamingMode mode,
int iteration,
fdb_bool_t snapshot,
fdb_bool_t reverse) {
fprintf(stderr, "UNIMPLEMENTED FDB API FUNCTION\n");
abort();
}
FDBFuture* fdb_transaction_get_range_selector_v13(FDBTransaction* tr, FDBFuture* fdb_transaction_get_range_selector_v13(FDBTransaction* tr,
uint8_t const* begin_key_name, uint8_t const* begin_key_name,
int begin_key_name_length, int begin_key_name_length,
@ -702,6 +768,7 @@ extern "C" DLLEXPORT fdb_error_t fdb_select_api_version_impl(int runtime_version
// WARNING: use caution when implementing removed functions by calling public API functions. This can lead to // WARNING: use caution when implementing removed functions by calling public API functions. This can lead to
// undesired behavior when using the multi-version API. Instead, it is better to have both the removed and public // undesired behavior when using the multi-version API. Instead, it is better to have both the removed and public
// functions call an internal implementation function. See fdb_create_database_impl for an example. // functions call an internal implementation function. See fdb_create_database_impl for an example.
FDB_API_CHANGED(fdb_transaction_get_range_and_flat_map, 700);
FDB_API_REMOVED(fdb_future_get_version, 620); FDB_API_REMOVED(fdb_future_get_version, 620);
FDB_API_REMOVED(fdb_create_cluster, 610); FDB_API_REMOVED(fdb_create_cluster, 610);
FDB_API_REMOVED(fdb_cluster_create_database, 610); FDB_API_REMOVED(fdb_cluster_create_database, 610);

View File

@ -244,6 +244,24 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range(FDBTransaction
fdb_bool_t reverse); fdb_bool_t reverse);
#endif #endif
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range_and_flat_map(FDBTransaction* tr,
uint8_t const* begin_key_name,
int begin_key_name_length,
fdb_bool_t begin_or_equal,
int begin_offset,
uint8_t const* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal,
int end_offset,
uint8_t const* mapper_name,
int mapper_name_length,
int limit,
int target_bytes,
FDBStreamingMode mode,
int iteration,
fdb_bool_t snapshot,
fdb_bool_t reverse);
DLLEXPORT void fdb_transaction_set(FDBTransaction* tr, DLLEXPORT void fdb_transaction_set(FDBTransaction* tr,
uint8_t const* key_name, uint8_t const* key_name,
int key_name_length, int key_name_length,

View File

@ -23,7 +23,7 @@
import re import re
import sys import sys
(platform, source, asm, h) = sys.argv[1:] (os, cpu, source, asm, h) = sys.argv[1:]
functions = {} functions = {}
@ -59,14 +59,15 @@ def write_windows_asm(asmfile, functions):
def write_unix_asm(asmfile, functions, prefix): def write_unix_asm(asmfile, functions, prefix):
if platform != "linux-aarch64": if cpu != "aarch64":
asmfile.write(".intel_syntax noprefix\n") asmfile.write(".intel_syntax noprefix\n")
if platform.startswith('linux') or platform == "freebsd": if cpu == 'aarch64' or os == 'linux' or os == 'freebsd':
asmfile.write("\n.data\n") asmfile.write("\n.data\n")
for f in functions: for f in functions:
asmfile.write("\t.extern fdb_api_ptr_%s\n" % f) asmfile.write("\t.extern fdb_api_ptr_%s\n" % f)
if os == 'linux' or os == 'freebsd':
asmfile.write("\n.text\n") asmfile.write("\n.text\n")
for f in functions: for f in functions:
asmfile.write("\t.global %s\n\t.type %s, @function\n" % (f, f)) asmfile.write("\t.global %s\n\t.type %s, @function\n" % (f, f))
@ -104,8 +105,14 @@ def write_unix_asm(asmfile, functions, prefix):
# .size g, .-g # .size g, .-g
# .ident "GCC: (GNU) 8.3.1 20190311 (Red Hat 8.3.1-3)" # .ident "GCC: (GNU) 8.3.1 20190311 (Red Hat 8.3.1-3)"
if platform == "linux-aarch64": p = ''
asmfile.write("\tadrp x8, :got:fdb_api_ptr_%s\n" % (f)) if os == 'osx':
p = '_'
if cpu == "aarch64":
asmfile.write("\tldr x16, =%sfdb_api_ptr_%s\n" % (p, f))
if os == 'osx':
asmfile.write("\tldr x16, [x16]\n")
else:
asmfile.write("\tldr x8, [x8, #:got_lo12:fdb_api_ptr_%s]\n" % (f)) asmfile.write("\tldr x8, [x8, #:got_lo12:fdb_api_ptr_%s]\n" % (f))
asmfile.write("\tldr x8, [x8]\n") asmfile.write("\tldr x8, [x8]\n")
asmfile.write("\tbr x8\n") asmfile.write("\tbr x8\n")
@ -123,15 +130,15 @@ with open(asm, 'w') as asmfile:
hfile.write( hfile.write(
"void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n") "void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
if platform.startswith('linux'): if os == 'linux':
write_unix_asm(asmfile, functions, '') write_unix_asm(asmfile, functions, '')
elif platform == "osx": elif os == "osx":
write_unix_asm(asmfile, functions, '_') write_unix_asm(asmfile, functions, '_')
elif platform == "windows": elif os == "windows":
write_windows_asm(asmfile, functions) write_windows_asm(asmfile, functions)
for f in functions: for f in functions:
if platform == "windows": if os == "windows":
hfile.write("extern \"C\" ") hfile.write("extern \"C\" ")
hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f) hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f)
for v in functions[f]: for v in functions[f]:

View File

@ -2,6 +2,7 @@
#include <fcntl.h> #include <fcntl.h>
#include <getopt.h> #include <getopt.h>
#include <math.h> #include <math.h>
#include <stdint.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
@ -942,7 +943,7 @@ int run_workload(FDBTransaction* transaction,
if (tracetimer == dotrace) { if (tracetimer == dotrace) {
fdb_error_t err; fdb_error_t err;
tracetimer = 0; tracetimer = 0;
snprintf(traceid, 32, "makotrace%019lld", total_xacts); snprintf(traceid, 32, "makotrace%019ld", total_xacts);
fprintf(debugme, "DEBUG: txn tracing %s\n", traceid); fprintf(debugme, "DEBUG: txn tracing %s\n", traceid);
err = fdb_transaction_set_option(transaction, err = fdb_transaction_set_option(transaction,
FDB_TR_OPTION_DEBUG_TRANSACTION_IDENTIFIER, FDB_TR_OPTION_DEBUG_TRANSACTION_IDENTIFIER,
@ -1065,7 +1066,8 @@ void* worker_thread(void* thread_args) {
int worker_id = ((thread_args_t*)thread_args)->process->worker_id; int worker_id = ((thread_args_t*)thread_args)->process->worker_id;
int thread_id = ((thread_args_t*)thread_args)->thread_id; int thread_id = ((thread_args_t*)thread_args)->thread_id;
mako_args_t* args = ((thread_args_t*)thread_args)->process->args; mako_args_t* args = ((thread_args_t*)thread_args)->process->args;
FDBDatabase* database = ((thread_args_t*)thread_args)->process->database; size_t database_index = ((thread_args_t*)thread_args)->database_index;
FDBDatabase* database = ((thread_args_t*)thread_args)->process->databases[database_index];
fdb_error_t err; fdb_error_t err;
int rc; int rc;
FDBTransaction* transaction; FDBTransaction* transaction;
@ -1099,11 +1101,12 @@ void* worker_thread(void* thread_args) {
} }
fprintf(debugme, fprintf(debugme,
"DEBUG: worker_id:%d (%d) thread_id:%d (%d) (tid:%lld)\n", "DEBUG: worker_id:%d (%d) thread_id:%d (%d) database_index:%lu (tid:%lu)\n",
worker_id, worker_id,
args->num_processes, args->num_processes,
thread_id, thread_id,
args->num_threads, args->num_threads,
database_index,
(uint64_t)pthread_self()); (uint64_t)pthread_self());
if (args->tpsmax) { if (args->tpsmax) {
@ -1231,6 +1234,7 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
fprintf(debugme, "DEBUG: worker %d started\n", worker_id); fprintf(debugme, "DEBUG: worker %d started\n", worker_id);
/* Everything starts from here */ /* Everything starts from here */
err = fdb_select_api_version(args->api_version); err = fdb_select_api_version(args->api_version);
if (err) { if (err) {
fprintf(stderr, "ERROR: Failed at %s:%d (%s)\n", __FILE__, __LINE__, fdb_get_error(err)); fprintf(stderr, "ERROR: Failed at %s:%d (%s)\n", __FILE__, __LINE__, fdb_get_error(err));
@ -1291,6 +1295,20 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
} }
} }
if (args->client_threads_per_version > 0) {
err = fdb_network_set_option(
FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION, (uint8_t*)&args->client_threads_per_version, sizeof(int64_t));
if (err) {
fprintf(stderr,
"ERROR: fdb_network_set_option (FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION) (%d): %s\n",
args->client_threads_per_version,
fdb_get_error(err));
// let's exit here since we do not want to confuse users
// that mako is running with multi-threaded client enabled
return -1;
}
}
/* Network thread must be setup before doing anything */ /* Network thread must be setup before doing anything */
fprintf(debugme, "DEBUG: fdb_setup_network\n"); fprintf(debugme, "DEBUG: fdb_setup_network\n");
err = fdb_setup_network(); err = fdb_setup_network();
@ -1328,11 +1346,16 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
fdb_future_destroy(f); fdb_future_destroy(f);
#else /* >= 610 */ #else /* >= 610 */
fdb_create_database(args->cluster_file, &process.database); for (size_t i = 0; i < args->num_databases; i++) {
#endif size_t cluster_index = args->num_fdb_clusters <= 1 ? 0 : i % args->num_fdb_clusters;
fdb_create_database(args->cluster_files[cluster_index], &process.databases[i]);
fprintf(debugme, "DEBUG: creating database at cluster %s\n", args->cluster_files[cluster_index]);
if (args->disable_ryw) { if (args->disable_ryw) {
fdb_database_set_option(process.database, FDB_DB_OPTION_SNAPSHOT_RYW_DISABLE, (uint8_t*)NULL, 0); fdb_database_set_option(process.databases[i], FDB_DB_OPTION_SNAPSHOT_RYW_DISABLE, (uint8_t*)NULL, 0);
} }
}
#endif
fprintf(debugme, "DEBUG: creating %d worker threads\n", args->num_threads); fprintf(debugme, "DEBUG: creating %d worker threads\n", args->num_threads);
worker_threads = (pthread_t*)calloc(sizeof(pthread_t), args->num_threads); worker_threads = (pthread_t*)calloc(sizeof(pthread_t), args->num_threads);
if (!worker_threads) { if (!worker_threads) {
@ -1349,6 +1372,8 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
for (i = 0; i < args->num_threads; i++) { for (i = 0; i < args->num_threads; i++) {
thread_args[i].thread_id = i; thread_args[i].thread_id = i;
thread_args[i].database_index = i % args->num_databases;
for (int op = 0; op < MAX_OP; op++) { for (int op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
thread_args[i].block[op] = (lat_block_t*)malloc(sizeof(lat_block_t)); thread_args[i].block[op] = (lat_block_t*)malloc(sizeof(lat_block_t));
@ -1388,7 +1413,10 @@ failExit:
free(thread_args); free(thread_args);
/* clean up database and cluster */ /* clean up database and cluster */
fdb_database_destroy(process.database); for (size_t i = 0; i < args->num_databases; i++) {
fdb_database_destroy(process.databases[i]);
}
#if FDB_API_VERSION < 610 #if FDB_API_VERSION < 610
fdb_cluster_destroy(cluster); fdb_cluster_destroy(cluster);
#endif #endif
@ -1414,6 +1442,8 @@ int init_args(mako_args_t* args) {
if (!args) if (!args)
return -1; return -1;
memset(args, 0, sizeof(mako_args_t)); /* zero-out everything */ memset(args, 0, sizeof(mako_args_t)); /* zero-out everything */
args->num_fdb_clusters = 0;
args->num_databases = 1;
args->api_version = fdb_get_max_api_version(); args->api_version = fdb_get_max_api_version();
args->json = 0; args->json = 0;
args->num_processes = 1; args->num_processes = 1;
@ -1446,7 +1476,9 @@ int init_args(mako_args_t* args) {
for (i = 0; i < MAX_OP; i++) { for (i = 0; i < MAX_OP; i++) {
args->txnspec.ops[i][OP_COUNT] = 0; args->txnspec.ops[i][OP_COUNT] = 0;
} }
args->client_threads_per_version = 0;
args->disable_ryw = 0; args->disable_ryw = 0;
args->json_output_path[0] = '\0';
return 0; return 0;
} }
@ -1579,6 +1611,7 @@ void usage() {
printf("%-24s %s\n", "-v, --verbose", "Specify verbosity"); printf("%-24s %s\n", "-v, --verbose", "Specify verbosity");
printf("%-24s %s\n", "-a, --api_version=API_VERSION", "Specify API_VERSION to use"); printf("%-24s %s\n", "-a, --api_version=API_VERSION", "Specify API_VERSION to use");
printf("%-24s %s\n", "-c, --cluster=FILE", "Specify FDB cluster file"); printf("%-24s %s\n", "-c, --cluster=FILE", "Specify FDB cluster file");
printf("%-24s %s\n", "-d, --num_databases=NUM_DATABASES", "Specify number of databases");
printf("%-24s %s\n", "-p, --procs=PROCS", "Specify number of worker processes"); printf("%-24s %s\n", "-p, --procs=PROCS", "Specify number of worker processes");
printf("%-24s %s\n", "-t, --threads=THREADS", "Specify number of worker threads"); printf("%-24s %s\n", "-t, --threads=THREADS", "Specify number of worker threads");
printf("%-24s %s\n", "-r, --rows=ROWS", "Specify number of records"); printf("%-24s %s\n", "-r, --rows=ROWS", "Specify number of records");
@ -1612,6 +1645,7 @@ void usage() {
printf("%-24s %s\n", " --flatbuffers", "Use flatbuffers"); printf("%-24s %s\n", " --flatbuffers", "Use flatbuffers");
printf("%-24s %s\n", " --streaming", "Streaming mode: all (default), iterator, small, medium, large, serial"); printf("%-24s %s\n", " --streaming", "Streaming mode: all (default), iterator, small, medium, large, serial");
printf("%-24s %s\n", " --disable_ryw", "Disable snapshot read-your-writes"); printf("%-24s %s\n", " --disable_ryw", "Disable snapshot read-your-writes");
printf("%-24s %s\n", " --json_report=PATH", "Output stats to the specified json file (Default: mako.json)");
} }
/* parse benchmark paramters */ /* parse benchmark paramters */
@ -1620,10 +1654,12 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
int c; int c;
int idx; int idx;
while (1) { while (1) {
const char* short_options = "a:c:p:t:r:s:i:x:v:m:hjz"; const char* short_options = "a:c:d:p:t:r:s:i:x:v:m:hz";
static struct option long_options[] = { /* name, has_arg, flag, val */ static struct option long_options[] = {
/* name, has_arg, flag, val */
{ "api_version", required_argument, NULL, 'a' }, { "api_version", required_argument, NULL, 'a' },
{ "cluster", required_argument, NULL, 'c' }, { "cluster", required_argument, NULL, 'c' },
{ "num_databases", optional_argument, NULL, 'd' },
{ "procs", required_argument, NULL, 'p' }, { "procs", required_argument, NULL, 'p' },
{ "threads", required_argument, NULL, 't' }, { "threads", required_argument, NULL, 't' },
{ "rows", required_argument, NULL, 'r' }, { "rows", required_argument, NULL, 'r' },
@ -1648,7 +1684,6 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
{ "txntrace", required_argument, NULL, ARG_TXNTRACE }, { "txntrace", required_argument, NULL, ARG_TXNTRACE },
/* no args */ /* no args */
{ "help", no_argument, NULL, 'h' }, { "help", no_argument, NULL, 'h' },
{ "json", no_argument, NULL, 'j' },
{ "zipf", no_argument, NULL, 'z' }, { "zipf", no_argument, NULL, 'z' },
{ "commitget", no_argument, NULL, ARG_COMMITGET }, { "commitget", no_argument, NULL, ARG_COMMITGET },
{ "flatbuffers", no_argument, NULL, ARG_FLATBUFFERS }, { "flatbuffers", no_argument, NULL, ARG_FLATBUFFERS },
@ -1657,13 +1692,16 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
{ "txntagging", required_argument, NULL, ARG_TXNTAGGING }, { "txntagging", required_argument, NULL, ARG_TXNTAGGING },
{ "txntagging_prefix", required_argument, NULL, ARG_TXNTAGGINGPREFIX }, { "txntagging_prefix", required_argument, NULL, ARG_TXNTAGGINGPREFIX },
{ "version", no_argument, NULL, ARG_VERSION }, { "version", no_argument, NULL, ARG_VERSION },
{ "client_threads_per_version", required_argument, NULL, ARG_CLIENT_THREADS_PER_VERSION },
{ "disable_ryw", no_argument, NULL, ARG_DISABLE_RYW }, { "disable_ryw", no_argument, NULL, ARG_DISABLE_RYW },
{ "json_report", optional_argument, NULL, ARG_JSON_REPORT },
{ NULL, 0, NULL, 0 } { NULL, 0, NULL, 0 }
}; };
idx = 0; idx = 0;
c = getopt_long(argc, argv, short_options, long_options, &idx); c = getopt_long(argc, argv, short_options, long_options, &idx);
if (c < 0) if (c < 0) {
break; break;
}
switch (c) { switch (c) {
case '?': case '?':
case 'h': case 'h':
@ -1672,8 +1710,17 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
case 'a': case 'a':
args->api_version = atoi(optarg); args->api_version = atoi(optarg);
break; break;
case 'c': case 'c': {
strcpy(args->cluster_file, optarg); const char delim[] = ",";
char* cluster_file = strtok(optarg, delim);
while (cluster_file != NULL) {
strcpy(args->cluster_files[args->num_fdb_clusters++], cluster_file);
cluster_file = strtok(NULL, delim);
}
break;
}
case 'd':
args->num_databases = atoi(optarg);
break; break;
case 'p': case 'p':
args->num_processes = atoi(optarg); args->num_processes = atoi(optarg);
@ -1812,9 +1859,22 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
} }
memcpy(args->txntagging_prefix, optarg, strlen(optarg)); memcpy(args->txntagging_prefix, optarg, strlen(optarg));
break; break;
case ARG_CLIENT_THREADS_PER_VERSION:
args->client_threads_per_version = atoi(optarg);
break;
case ARG_DISABLE_RYW: case ARG_DISABLE_RYW:
args->disable_ryw = 1; args->disable_ryw = 1;
break; break;
case ARG_JSON_REPORT:
if (optarg == NULL && (argv[optind] == NULL || (argv[optind] != NULL && argv[optind][0] == '-'))) {
// if --report_json is the last option and no file is specified
// or --report_json is followed by another option
char default_file[] = "mako.json";
strncpy(args->json_output_path, default_file, strlen(default_file));
} else {
strncpy(args->json_output_path, optarg, strlen(optarg) + 1);
}
break;
} }
} }
@ -1841,6 +1901,41 @@ int parse_args(int argc, char* argv[], mako_args_t* args) {
return 0; return 0;
} }
char* get_ops_name(int ops_code) {
switch (ops_code) {
case OP_GETREADVERSION:
return "GRV";
case OP_GET:
return "GET";
case OP_GETRANGE:
return "GETRANGE";
case OP_SGET:
return "SGET";
case OP_SGETRANGE:
return "SGETRANGE";
case OP_UPDATE:
return "UPDATE";
case OP_INSERT:
return "INSERT";
case OP_INSERTRANGE:
return "INSERTRANGE";
case OP_CLEAR:
return "CLEAR";
case OP_SETCLEAR:
return "SETCLEAR";
case OP_CLEARRANGE:
return "CLEARRANGE";
case OP_SETCLEARRANGE:
return "SETCLEARRANGE";
case OP_COMMIT:
return "COMMIT";
case OP_TRANSACTION:
return "TRANSACTION";
default:
return "";
}
}
int validate_args(mako_args_t* args) { int validate_args(mako_args_t* args) {
if (args->mode == MODE_INVALID) { if (args->mode == MODE_INVALID) {
fprintf(stderr, "ERROR: --mode has to be set\n"); fprintf(stderr, "ERROR: --mode has to be set\n");
@ -1858,6 +1953,28 @@ int validate_args(mako_args_t* args) {
fprintf(stderr, "ERROR: --vallen must be a positive integer\n"); fprintf(stderr, "ERROR: --vallen must be a positive integer\n");
return -1; return -1;
} }
if (args->num_fdb_clusters > NUM_CLUSTERS_MAX) {
fprintf(stderr, "ERROR: Mako is not supported to do work to more than %d clusters\n", NUM_CLUSTERS_MAX);
return -1;
}
if (args->num_databases > NUM_DATABASES_MAX) {
fprintf(stderr, "ERROR: Mako is not supported to do work to more than %d databases\n", NUM_DATABASES_MAX);
return -1;
}
if (args->num_databases < args->num_fdb_clusters) {
fprintf(stderr,
"ERROR: --num_databases (%d) must be >= number of clusters(%d)\n",
args->num_databases,
args->num_fdb_clusters);
return -1;
}
if (args->num_threads < args->num_databases) {
fprintf(stderr,
"ERROR: --threads (%d) must be >= number of databases (%d)\n",
args->num_threads,
args->num_databases);
return -1;
}
if (args->key_length < 4 /* "mako" */ + digits(args->rows)) { if (args->key_length < 4 /* "mako" */ + digits(args->rows)) {
fprintf(stderr, fprintf(stderr,
"ERROR: --keylen must be larger than %d to store \"mako\" prefix " "ERROR: --keylen must be larger than %d to store \"mako\" prefix "
@ -1888,7 +2005,7 @@ int validate_args(mako_args_t* args) {
#define STATS_TITLE_WIDTH 12 #define STATS_TITLE_WIDTH 12
#define STATS_FIELD_WIDTH 12 #define STATS_FIELD_WIDTH 12
void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, struct timespec* prev) { void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, struct timespec* prev, FILE* fp) {
int i, j; int i, j;
int op; int op;
int print_err; int print_err;
@ -1901,7 +2018,7 @@ void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, s
uint64_t totalxacts = 0; uint64_t totalxacts = 0;
static uint64_t conflicts_prev = 0; static uint64_t conflicts_prev = 0;
uint64_t conflicts = 0; uint64_t conflicts = 0;
double durationns = (now->tv_sec - prev->tv_sec) * 1000000000.0 + (now->tv_nsec - prev->tv_nsec); double duration_nsec = (now->tv_sec - prev->tv_sec) * 1000000000.0 + (now->tv_nsec - prev->tv_nsec);
for (i = 0; i < args->num_processes; i++) { for (i = 0; i < args->num_processes; i++) {
for (j = 0; j < args->num_threads; j++) { for (j = 0; j < args->num_threads; j++) {
@ -1913,10 +2030,18 @@ void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, s
} }
} }
} }
if (fp) {
fwrite("{", 1, 1, fp);
}
printf("%" STR(STATS_TITLE_WIDTH) "s ", "OPS"); printf("%" STR(STATS_TITLE_WIDTH) "s ", "OPS");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0) { if (args->txnspec.ops[op][OP_COUNT] > 0) {
printf("%" STR(STATS_FIELD_WIDTH) "lld ", ops_total[op] - ops_total_prev[op]); uint64_t ops_total_diff = ops_total[op] - ops_total_prev[op];
printf("%" STR(STATS_FIELD_WIDTH) "lu ", ops_total_diff);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), ops_total_diff);
}
errors_diff[op] = errors_total[op] - errors_total_prev[op]; errors_diff[op] = errors_total[op] - errors_total_prev[op];
print_err = (errors_diff[op] > 0); print_err = (errors_diff[op] > 0);
ops_total_prev[op] = ops_total[op]; ops_total_prev[op] = ops_total[op];
@ -1924,22 +2049,34 @@ void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, s
} }
} }
/* TPS */ /* TPS */
printf("%" STR(STATS_FIELD_WIDTH) ".2f ", (totalxacts - totalxacts_prev) * 1000000000.0 / durationns); double tps = (totalxacts - totalxacts_prev) * 1000000000.0 / duration_nsec;
printf("%" STR(STATS_FIELD_WIDTH) ".2f ", tps);
if (fp) {
fprintf(fp, "\"tps\": %.2f,", tps);
}
totalxacts_prev = totalxacts; totalxacts_prev = totalxacts;
/* Conflicts */ /* Conflicts */
printf("%" STR(STATS_FIELD_WIDTH) ".2f\n", (conflicts - conflicts_prev) * 1000000000.0 / durationns); double conflicts_diff = (conflicts - conflicts_prev) * 1000000000.0 / duration_nsec;
printf("%" STR(STATS_FIELD_WIDTH) ".2f\n", conflicts_diff);
if (fp) {
fprintf(fp, "\"conflictsPerSec\": %.2f},", conflicts_diff);
}
conflicts_prev = conflicts; conflicts_prev = conflicts;
if (print_err) { if (print_err) {
printf("%" STR(STATS_TITLE_WIDTH) "s ", "Errors"); printf("%" STR(STATS_TITLE_WIDTH) "s ", "Errors");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0) { if (args->txnspec.ops[op][OP_COUNT] > 0) {
printf("%" STR(STATS_FIELD_WIDTH) "lld ", errors_diff[op]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", errors_diff[op]);
if (fp) {
fprintf(fp, "\"errors\": %.2f", conflicts_diff);
}
} }
} }
printf("\n"); printf("\n");
} }
return; return;
} }
@ -1953,44 +2090,7 @@ void print_stats_header(mako_args_t* args, bool show_commit, bool is_first_heade
printf(" "); printf(" ");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0) { if (args->txnspec.ops[op][OP_COUNT] > 0) {
switch (op) { printf("%" STR(STATS_FIELD_WIDTH) "s ", get_ops_name(op));
case OP_GETREADVERSION:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "GRV");
break;
case OP_GET:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "GET");
break;
case OP_GETRANGE:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "GETRANGE");
break;
case OP_SGET:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "SGET");
break;
case OP_SGETRANGE:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "SGETRANGE");
break;
case OP_UPDATE:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "UPDATE");
break;
case OP_INSERT:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "INSERT");
break;
case OP_INSERTRANGE:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "INSERTRANGE");
break;
case OP_CLEAR:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "CLEAR");
break;
case OP_SETCLEAR:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "SETCLEAR");
break;
case OP_CLEARRANGE:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "CLEARRANGE");
break;
case OP_SETCLEARRANGE:
printf("%" STR(STATS_FIELD_WIDTH) "s ", "SETCLRRANGE");
break;
}
} }
} }
@ -2043,7 +2143,8 @@ void print_report(mako_args_t* args,
mako_stats_t* stats, mako_stats_t* stats,
struct timespec* timer_now, struct timespec* timer_now,
struct timespec* timer_start, struct timespec* timer_start,
pid_t* pid_main) { pid_t* pid_main,
FILE* fp) {
int i, j, k, op, index; int i, j, k, op, index;
uint64_t totalxacts = 0; uint64_t totalxacts = 0;
uint64_t conflicts = 0; uint64_t conflicts = 0;
@ -2055,7 +2156,7 @@ void print_report(mako_args_t* args,
uint64_t lat_samples[MAX_OP] = { 0 }; uint64_t lat_samples[MAX_OP] = { 0 };
uint64_t lat_max[MAX_OP] = { 0 }; uint64_t lat_max[MAX_OP] = { 0 };
uint64_t durationns = uint64_t duration_nsec =
(timer_now->tv_sec - timer_start->tv_sec) * 1000000000 + (timer_now->tv_nsec - timer_start->tv_nsec); (timer_now->tv_sec - timer_start->tv_sec) * 1000000000 + (timer_now->tv_nsec - timer_start->tv_nsec);
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
@ -2089,7 +2190,8 @@ void print_report(mako_args_t* args,
} }
/* overall stats */ /* overall stats */
printf("\n====== Total Duration %6.3f sec ======\n\n", (double)durationns / 1000000000); double total_duration = duration_nsec * 1.0 / 1000000000;
printf("\n====== Total Duration %6.3f sec ======\n\n", total_duration);
printf("Total Processes: %8d\n", args->num_processes); printf("Total Processes: %8d\n", args->num_processes);
printf("Total Threads: %8d\n", args->num_threads); printf("Total Threads: %8d\n", args->num_threads);
if (args->tpsmax == args->tpsmin) if (args->tpsmax == args->tpsmin)
@ -2111,35 +2213,65 @@ void print_report(mako_args_t* args,
break; break;
} }
} }
printf("Total Xacts: %8lld\n", totalxacts); printf("Total Xacts: %8lu\n", totalxacts);
printf("Total Conflicts: %8lld\n", conflicts); printf("Total Conflicts: %8lu\n", conflicts);
printf("Total Errors: %8lld\n", totalerrors); printf("Total Errors: %8lu\n", totalerrors);
printf("Overall TPS: %8lld\n\n", totalxacts * 1000000000 / durationns); printf("Overall TPS: %8lu\n\n", totalxacts * 1000000000 / duration_nsec);
if (fp) {
fprintf(fp, "\"results\": {");
fprintf(fp, "\"totalDuration\": %6.3f,", total_duration);
fprintf(fp, "\"totalProcesses\": %d,", args->num_processes);
fprintf(fp, "\"totalThreads\": %d,", args->num_threads);
fprintf(fp, "\"targetTPS\": %d,", args->tpsmax);
fprintf(fp, "\"totalXacts\": %lu,", totalxacts);
fprintf(fp, "\"totalConflicts\": %lu,", conflicts);
fprintf(fp, "\"totalErrors\": %lu,", totalerrors);
fprintf(fp, "\"overallTPS\": %lu,", totalxacts * 1000000000 / duration_nsec);
}
/* per-op stats */ /* per-op stats */
print_stats_header(args, true, true, false); print_stats_header(args, true, true, false);
/* OPS */ /* OPS */
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Total OPS"); printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Total OPS");
if (fp) {
fprintf(fp, "\"totalOps\": {");
}
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if ((args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) || op == OP_COMMIT) { if ((args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) || op == OP_COMMIT) {
printf("%" STR(STATS_FIELD_WIDTH) "lld ", ops_total[op]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", ops_total[op]);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), ops_total[op]);
}
} }
} }
/* TPS */ /* TPS */
printf("%" STR(STATS_FIELD_WIDTH) ".2f ", totalxacts * 1000000000.0 / durationns); double tps = totalxacts * 1000000000.0 / duration_nsec;
printf("%" STR(STATS_FIELD_WIDTH) ".2f ", tps);
/* Conflicts */ /* Conflicts */
printf("%" STR(STATS_FIELD_WIDTH) ".2f\n", conflicts * 1000000000.0 / durationns); double conflicts_rate = conflicts * 1000000000.0 / duration_nsec;
printf("%" STR(STATS_FIELD_WIDTH) ".2f\n", conflicts_rate);
if (fp) {
fprintf(fp, "}, \"tps\": %.2f, \"conflictsPerSec\": %.2f, \"errors\": {", tps, conflicts_rate);
}
/* Errors */ /* Errors */
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Errors"); printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Errors");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) { if (args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) {
printf("%" STR(STATS_FIELD_WIDTH) "lld ", errors_total[op]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", errors_total[op]);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), errors_total[op]);
} }
} }
}
if (fp) {
fprintf(fp, "}, \"numSamples\": {");
}
printf("\n\n"); printf("\n\n");
printf("%s", "Latency (us)"); printf("%s", "Latency (us)");
@ -2150,33 +2282,48 @@ void print_report(mako_args_t* args,
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
if (lat_total[op]) { if (lat_total[op]) {
printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_samples[op]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_samples[op]);
} else { } else {
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
} }
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_samples[op]);
}
} }
} }
printf("\n"); printf("\n");
/* Min Latency */ /* Min Latency */
if (fp) {
fprintf(fp, "}, \"minLatency\": {");
}
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Min"); printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Min");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
if (lat_min[op] == -1) { if (lat_min[op] == -1) {
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
} else { } else {
printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_min[op]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_min[op]);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_min[op]);
}
} }
} }
} }
printf("\n"); printf("\n");
/* Avg Latency */ /* Avg Latency */
if (fp) {
fprintf(fp, "}, \"avgLatency\": {");
}
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Avg"); printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Avg");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
if (lat_total[op]) { if (lat_total[op]) {
printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_total[op] / lat_samples[op]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_total[op] / lat_samples[op]);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_total[op] / lat_samples[op]);
}
} else { } else {
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
} }
@ -2185,13 +2332,19 @@ void print_report(mako_args_t* args,
printf("\n"); printf("\n");
/* Max Latency */ /* Max Latency */
if (fp) {
fprintf(fp, "}, \"maxLatency\": {");
}
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Max"); printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Max");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
if (lat_max[op] == 0) { if (lat_max[op] == 0) {
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
} else { } else {
printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_max[op]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_max[op]);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_max[op]);
}
} }
} }
} }
@ -2202,6 +2355,9 @@ void print_report(mako_args_t* args,
int point_99_9pct, point_99pct, point_95pct; int point_99_9pct, point_99pct, point_95pct;
/* Median Latency */ /* Median Latency */
if (fp) {
fprintf(fp, "}, \"medianLatency\": {");
}
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Median"); printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Median");
int num_points[MAX_OP] = { 0 }; int num_points[MAX_OP] = { 0 };
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
@ -2237,7 +2393,10 @@ void print_report(mako_args_t* args,
} else { } else {
median = (dataPoints[op][num_points[op] / 2] + dataPoints[op][num_points[op] / 2 - 1]) >> 1; median = (dataPoints[op][num_points[op] / 2] + dataPoints[op][num_points[op] / 2 - 1]) >> 1;
} }
printf("%" STR(STATS_FIELD_WIDTH) "lld ", median); printf("%" STR(STATS_FIELD_WIDTH) "lu ", median);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), median);
}
} else { } else {
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
} }
@ -2246,6 +2405,9 @@ void print_report(mako_args_t* args,
printf("\n"); printf("\n");
/* 95%ile Latency */ /* 95%ile Latency */
if (fp) {
fprintf(fp, "}, \"p95Latency\": {");
}
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "95.0 pctile"); printf("%-" STR(STATS_TITLE_WIDTH) "s ", "95.0 pctile");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
@ -2255,7 +2417,10 @@ void print_report(mako_args_t* args,
} }
if (lat_total[op]) { if (lat_total[op]) {
point_95pct = ((float)(num_points[op]) * 0.95) - 1; point_95pct = ((float)(num_points[op]) * 0.95) - 1;
printf("%" STR(STATS_FIELD_WIDTH) "lld ", dataPoints[op][point_95pct]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", dataPoints[op][point_95pct]);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), dataPoints[op][point_95pct]);
}
} else { } else {
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
} }
@ -2264,6 +2429,9 @@ void print_report(mako_args_t* args,
printf("\n"); printf("\n");
/* 99%ile Latency */ /* 99%ile Latency */
if (fp) {
fprintf(fp, "}, \"p99Latency\": {");
}
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "99.0 pctile"); printf("%-" STR(STATS_TITLE_WIDTH) "s ", "99.0 pctile");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
@ -2273,7 +2441,10 @@ void print_report(mako_args_t* args,
} }
if (lat_total[op]) { if (lat_total[op]) {
point_99pct = ((float)(num_points[op]) * 0.99) - 1; point_99pct = ((float)(num_points[op]) * 0.99) - 1;
printf("%" STR(STATS_FIELD_WIDTH) "lld ", dataPoints[op][point_99pct]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", dataPoints[op][point_99pct]);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), dataPoints[op][point_99pct]);
}
} else { } else {
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
} }
@ -2282,6 +2453,9 @@ void print_report(mako_args_t* args,
printf("\n"); printf("\n");
/* 99.9%ile Latency */ /* 99.9%ile Latency */
if (fp) {
fprintf(fp, "}, \"p99.9Latency\": {");
}
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "99.9 pctile"); printf("%-" STR(STATS_TITLE_WIDTH) "s ", "99.9 pctile");
for (op = 0; op < MAX_OP; op++) { for (op = 0; op < MAX_OP; op++) {
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) { if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
@ -2291,13 +2465,19 @@ void print_report(mako_args_t* args,
} }
if (lat_total[op]) { if (lat_total[op]) {
point_99_9pct = ((float)(num_points[op]) * 0.999) - 1; point_99_9pct = ((float)(num_points[op]) * 0.999) - 1;
printf("%" STR(STATS_FIELD_WIDTH) "lld ", dataPoints[op][point_99_9pct]); printf("%" STR(STATS_FIELD_WIDTH) "lu ", dataPoints[op][point_99_9pct]);
if (fp) {
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), dataPoints[op][point_99_9pct]);
}
} else { } else {
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A"); printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
} }
} }
} }
printf("\n"); printf("\n");
if (fp) {
fprintf(fp, "}}");
}
char command_remove[NAME_MAX] = { '\0' }; char command_remove[NAME_MAX] = { '\0' };
sprintf(command_remove, "rm -rf %s%d", TEMP_DATA_STORE, *pid_main); sprintf(command_remove, "rm -rf %s%d", TEMP_DATA_STORE, *pid_main);
@ -2328,6 +2508,44 @@ int stats_process_main(mako_args_t* args,
if (args->verbose >= VERBOSE_DEFAULT) if (args->verbose >= VERBOSE_DEFAULT)
print_stats_header(args, false, true, false); print_stats_header(args, false, true, false);
FILE* fp = NULL;
if (args->json_output_path[0] != '\0') {
fp = fopen(args->json_output_path, "w");
fprintf(fp, "{\"makoArgs\": {");
fprintf(fp, "\"api_version\": %d,", args->api_version);
fprintf(fp, "\"json\": %d,", args->json);
fprintf(fp, "\"num_processes\": %d,", args->num_processes);
fprintf(fp, "\"num_threads\": %d,", args->num_threads);
fprintf(fp, "\"mode\": %d,", args->mode);
fprintf(fp, "\"rows\": %d,", args->rows);
fprintf(fp, "\"seconds\": %d,", args->seconds);
fprintf(fp, "\"iteration\": %d,", args->iteration);
fprintf(fp, "\"tpsmax\": %d,", args->tpsmax);
fprintf(fp, "\"tpsmin\": %d,", args->tpsmin);
fprintf(fp, "\"tpsinterval\": %d,", args->tpsinterval);
fprintf(fp, "\"tpschange\": %d,", args->tpschange);
fprintf(fp, "\"sampling\": %d,", args->sampling);
fprintf(fp, "\"key_length\": %d,", args->key_length);
fprintf(fp, "\"value_length\": %d,", args->value_length);
fprintf(fp, "\"commit_get\": %d,", args->commit_get);
fprintf(fp, "\"verbose\": %d,", args->verbose);
fprintf(fp, "\"cluster_files\": \"%s\",", args->cluster_files[0]);
fprintf(fp, "\"log_group\": \"%s\",", args->log_group);
fprintf(fp, "\"prefixpadding\": %d,", args->prefixpadding);
fprintf(fp, "\"trace\": %d,", args->trace);
fprintf(fp, "\"tracepath\": \"%s\",", args->tracepath);
fprintf(fp, "\"traceformat\": %d,", args->traceformat);
fprintf(fp, "\"knobs\": \"%s\",", args->knobs);
fprintf(fp, "\"flatbuffers\": %d,", args->flatbuffers);
fprintf(fp, "\"txntrace\": %d,", args->txntrace);
fprintf(fp, "\"txntagging\": %d,", args->txntagging);
fprintf(fp, "\"txntagging_prefix\": \"%s\",", args->txntagging_prefix);
fprintf(fp, "\"streaming_mode\": %d,", args->streaming_mode);
fprintf(fp, "\"disable_ryw\": %d,", args->disable_ryw);
fprintf(fp, "\"json_output_path\": \"%s\",", args->json_output_path);
fprintf(fp, "},\"samples\": [");
}
clock_gettime(CLOCK_MONOTONIC_COARSE, &timer_start); clock_gettime(CLOCK_MONOTONIC_COARSE, &timer_start);
timer_prev.tv_sec = timer_start.tv_sec; timer_prev.tv_sec = timer_start.tv_sec;
timer_prev.tv_nsec = timer_start.tv_nsec; timer_prev.tv_nsec = timer_start.tv_nsec;
@ -2369,19 +2587,28 @@ int stats_process_main(mako_args_t* args,
} }
if (args->verbose >= VERBOSE_DEFAULT) if (args->verbose >= VERBOSE_DEFAULT)
print_stats(args, stats, &timer_now, &timer_prev); print_stats(args, stats, &timer_now, &timer_prev, fp);
timer_prev.tv_sec = timer_now.tv_sec; timer_prev.tv_sec = timer_now.tv_sec;
timer_prev.tv_nsec = timer_now.tv_nsec; timer_prev.tv_nsec = timer_now.tv_nsec;
} }
} }
if (fp) {
fprintf(fp, "],");
}
/* print report */ /* print report */
if (args->verbose >= VERBOSE_DEFAULT) { if (args->verbose >= VERBOSE_DEFAULT) {
clock_gettime(CLOCK_MONOTONIC_COARSE, &timer_now); clock_gettime(CLOCK_MONOTONIC_COARSE, &timer_now);
while (*stopcount < args->num_threads * args->num_processes) { while (*stopcount < args->num_threads * args->num_processes) {
usleep(10000); /* 10ms */ usleep(10000); /* 10ms */
} }
print_report(args, stats, &timer_now, &timer_start, pid_main); print_report(args, stats, &timer_now, &timer_start, pid_main, fp);
}
if (fp) {
fprintf(fp, "}");
fclose(fp);
} }
return 0; return 0;
@ -2591,6 +2818,7 @@ failExit:
if (shmfd) { if (shmfd) {
close(shmfd); close(shmfd);
shm_unlink(shmpath); shm_unlink(shmpath);
unlink(shmpath);
} }
return 0; return 0;

View File

@ -81,7 +81,9 @@ enum Arguments {
ARG_TXNTAGGING, ARG_TXNTAGGING,
ARG_TXNTAGGINGPREFIX, ARG_TXNTAGGINGPREFIX,
ARG_STREAMING_MODE, ARG_STREAMING_MODE,
ARG_DISABLE_RYW ARG_DISABLE_RYW,
ARG_CLIENT_THREADS_PER_VERSION,
ARG_JSON_REPORT
}; };
enum TPSChangeTypes { TPS_SIN, TPS_SQUARE, TPS_PULSE }; enum TPSChangeTypes { TPS_SIN, TPS_SQUARE, TPS_PULSE };
@ -103,6 +105,8 @@ typedef struct {
#define LOGGROUP_MAX 256 #define LOGGROUP_MAX 256
#define KNOB_MAX 256 #define KNOB_MAX 256
#define TAGPREFIXLENGTH_MAX 8 #define TAGPREFIXLENGTH_MAX 8
#define NUM_CLUSTERS_MAX 3
#define NUM_DATABASES_MAX 10
/* benchmark parameters */ /* benchmark parameters */
typedef struct { typedef struct {
@ -125,7 +129,9 @@ typedef struct {
int commit_get; int commit_get;
int verbose; int verbose;
mako_txnspec_t txnspec; mako_txnspec_t txnspec;
char cluster_file[PATH_MAX]; char cluster_files[NUM_CLUSTERS_MAX][PATH_MAX];
int num_fdb_clusters;
int num_databases;
char log_group[LOGGROUP_MAX]; char log_group[LOGGROUP_MAX];
int prefixpadding; int prefixpadding;
int trace; int trace;
@ -137,7 +143,9 @@ typedef struct {
int txntagging; int txntagging;
char txntagging_prefix[TAGPREFIXLENGTH_MAX]; char txntagging_prefix[TAGPREFIXLENGTH_MAX];
FDBStreamingMode streaming_mode; FDBStreamingMode streaming_mode;
int client_threads_per_version;
int disable_ryw; int disable_ryw;
char json_output_path[PATH_MAX];
} mako_args_t; } mako_args_t;
/* shared memory */ /* shared memory */
@ -173,14 +181,15 @@ typedef struct {
typedef struct { typedef struct {
int worker_id; int worker_id;
pid_t parent_id; pid_t parent_id;
FDBDatabase* database;
mako_args_t* args; mako_args_t* args;
mako_shmhdr_t* shm; mako_shmhdr_t* shm;
FDBDatabase* databases[NUM_DATABASES_MAX];
} process_info_t; } process_info_t;
/* args for threads */ /* args for threads */
typedef struct { typedef struct {
int thread_id; int thread_id;
int database_index; // index of the database to do work to
int elem_size[MAX_OP]; /* stores the multiple of LAT_BLOCK_SIZE to check the memory allocation of each operation */ int elem_size[MAX_OP]; /* stores the multiple of LAT_BLOCK_SIZE to check the memory allocation of each operation */
bool is_memory_allocated[MAX_OP]; /* flag specified for each operation, whether the memory was allocated to that bool is_memory_allocated[MAX_OP]; /* flag specified for each operation, whether the memory was allocated to that
specific operation */ specific operation */

View File

@ -39,7 +39,11 @@ Arguments
| - ``run``: Run the benchmark | - ``run``: Run the benchmark
- | ``-c | --cluster <cluster file>`` - | ``-c | --cluster <cluster file>``
| FDB cluster file (Required) | FDB cluster files (Required, comma-separated)
- | ``-d | --num_databases <num_databases>``
| Number of database objects (Default: 1)
| If more than 1 cluster is provided, this value should be >= number of cluster
- | ``-a | --api_version <api_version>`` - | ``-a | --api_version <api_version>``
| FDB API version to use (Default: Latest) | FDB API version to use (Default: Latest)
@ -110,6 +114,13 @@ Arguments
| - 2 Annoying | - 2 Annoying
| - 3 Very Annoying (a.k.a. DEBUG) | - 3 Very Annoying (a.k.a. DEBUG)
- | ``--disable_ryw``
| Disable snapshot read-your-writes
- | ``--json_report`` defaults to ``mako.json``
| ``--json_report=PATH``
| Output stats to the specified json file
Transaction Specification Transaction Specification
========================= =========================

View File

@ -67,25 +67,25 @@ void runTests(struct ResultSet* rs) {
fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE); fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE);
e = getSize(rs, tr, sizes + i); e = getSize(rs, tr, sizes + i);
checkError(e, "transaction get size", rs); checkError(e, "transaction get size", rs);
printf("size %d: %u\n", i, sizes[i]); printf("size %d: %ld\n", i, sizes[i]);
i++; i++;
fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE); fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE);
e = getSize(rs, tr, sizes + i); e = getSize(rs, tr, sizes + i);
checkError(e, "transaction get size", rs); checkError(e, "transaction get size", rs);
printf("size %d: %u\n", i, sizes[i]); printf("size %d: %ld\n", i, sizes[i]);
i++; i++;
fdb_transaction_clear(tr, keys[i], KEY_SIZE); fdb_transaction_clear(tr, keys[i], KEY_SIZE);
e = getSize(rs, tr, sizes + i); e = getSize(rs, tr, sizes + i);
checkError(e, "transaction get size", rs); checkError(e, "transaction get size", rs);
printf("size %d: %u\n", i, sizes[i]); printf("size %d: %ld\n", i, sizes[i]);
i++; i++;
fdb_transaction_clear_range(tr, keys[i], KEY_SIZE, keys[i + 1], KEY_SIZE); fdb_transaction_clear_range(tr, keys[i], KEY_SIZE, keys[i + 1], KEY_SIZE);
e = getSize(rs, tr, sizes + i); e = getSize(rs, tr, sizes + i);
checkError(e, "transaction get size", rs); checkError(e, "transaction get size", rs);
printf("size %d: %u\n", i, sizes[i]); printf("size %d: %ld\n", i, sizes[i]);
i++; i++;
for (j = 0; j + 1 < i; j++) { for (j = 0; j + 1 < i; j++) {

View File

@ -193,6 +193,41 @@ KeyValueArrayFuture Transaction::get_range(const uint8_t* begin_key_name,
reverse)); reverse));
} }
KeyValueArrayFuture Transaction::get_range_and_flat_map(const uint8_t* begin_key_name,
int begin_key_name_length,
fdb_bool_t begin_or_equal,
int begin_offset,
const uint8_t* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal,
int end_offset,
const uint8_t* mapper_name,
int mapper_name_length,
int limit,
int target_bytes,
FDBStreamingMode mode,
int iteration,
fdb_bool_t snapshot,
fdb_bool_t reverse) {
return KeyValueArrayFuture(fdb_transaction_get_range_and_flat_map(tr_,
begin_key_name,
begin_key_name_length,
begin_or_equal,
begin_offset,
end_key_name,
end_key_name_length,
end_or_equal,
end_offset,
mapper_name,
mapper_name_length,
limit,
target_bytes,
mode,
iteration,
snapshot,
reverse));
}
EmptyFuture Transaction::watch(std::string_view key) { EmptyFuture Transaction::watch(std::string_view key) {
return EmptyFuture(fdb_transaction_watch(tr_, (const uint8_t*)key.data(), key.size())); return EmptyFuture(fdb_transaction_watch(tr_, (const uint8_t*)key.data(), key.size()));
} }

View File

@ -219,6 +219,25 @@ public:
fdb_bool_t snapshot, fdb_bool_t snapshot,
fdb_bool_t reverse); fdb_bool_t reverse);
// WARNING: This feature is considered experimental at this time. It is only allowed when using snapshot isolation
// AND disabling read-your-writes. Returns a future which will be set to an FDBKeyValue array.
KeyValueArrayFuture get_range_and_flat_map(const uint8_t* begin_key_name,
int begin_key_name_length,
fdb_bool_t begin_or_equal,
int begin_offset,
const uint8_t* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal,
int end_offset,
const uint8_t* mapper_name,
int mapper_name_length,
int limit,
int target_bytes,
FDBStreamingMode mode,
int iteration,
fdb_bool_t snapshot,
fdb_bool_t reverse);
// Wrapper around fdb_transaction_watch. Returns a future representing an // Wrapper around fdb_transaction_watch. Returns a future representing an
// empty value. // empty value.
EmptyFuture watch(std::string_view key); EmptyFuture watch(std::string_view key);

View File

@ -40,6 +40,7 @@
#define DOCTEST_CONFIG_IMPLEMENT #define DOCTEST_CONFIG_IMPLEMENT
#include "doctest.h" #include "doctest.h"
#include "fdbclient/rapidjson/document.h" #include "fdbclient/rapidjson/document.h"
#include "fdbclient/Tuple.h"
#include "flow/config.h" #include "flow/config.h"
@ -76,7 +77,7 @@ fdb_error_t wait_future(fdb::Future& f) {
// Given a string s, returns the "lowest" string greater than any string that // Given a string s, returns the "lowest" string greater than any string that
// starts with s. Taken from // starts with s. Taken from
// https://github.com/apple/foundationdb/blob/e7d72f458c6a985fdfa677ae021f357d6f49945b/flow/flow.cpp#L223. // https://github.com/apple/foundationdb/blob/e7d72f458c6a985fdfa677ae021f357d6f49945b/flow/flow.cpp#L223.
std::string strinc(const std::string& s) { std::string strinc_str(const std::string& s) {
int index = -1; int index = -1;
for (index = s.size() - 1; index >= 0; --index) { for (index = s.size() - 1; index >= 0; --index) {
if ((uint8_t)s[index] != 255) { if ((uint8_t)s[index] != 255) {
@ -92,16 +93,16 @@ std::string strinc(const std::string& s) {
return r; return r;
} }
TEST_CASE("strinc") { TEST_CASE("strinc_str") {
CHECK(strinc("a").compare("b") == 0); CHECK(strinc_str("a").compare("b") == 0);
CHECK(strinc("y").compare("z") == 0); CHECK(strinc_str("y").compare("z") == 0);
CHECK(strinc("!").compare("\"") == 0); CHECK(strinc_str("!").compare("\"") == 0);
CHECK(strinc("*").compare("+") == 0); CHECK(strinc_str("*").compare("+") == 0);
CHECK(strinc("fdb").compare("fdc") == 0); CHECK(strinc_str("fdb").compare("fdc") == 0);
CHECK(strinc("foundation database 6").compare("foundation database 7") == 0); CHECK(strinc_str("foundation database 6").compare("foundation database 7") == 0);
char terminated[] = { 'a', 'b', '\xff' }; char terminated[] = { 'a', 'b', '\xff' };
CHECK(strinc(std::string(terminated, 3)).compare("ac") == 0); CHECK(strinc_str(std::string(terminated, 3)).compare("ac") == 0);
} }
// Helper function to add `prefix` to all keys in the given map. Returns a new // Helper function to add `prefix` to all keys in the given map. Returns a new
@ -117,7 +118,7 @@ std::map<std::string, std::string> create_data(std::map<std::string, std::string
// Clears all data in the database, then inserts the given key value pairs. // Clears all data in the database, then inserts the given key value pairs.
void insert_data(FDBDatabase* db, const std::map<std::string, std::string>& data) { void insert_data(FDBDatabase* db, const std::map<std::string, std::string>& data) {
fdb::Transaction tr(db); fdb::Transaction tr(db);
auto end_key = strinc(prefix); auto end_key = strinc_str(prefix);
while (1) { while (1) {
tr.clear_range(prefix, end_key); tr.clear_range(prefix, end_key);
for (const auto& [key, val] : data) { for (const auto& [key, val] : data) {
@ -224,6 +225,59 @@ GetRangeResult get_range(fdb::Transaction& tr,
return GetRangeResult{ results, out_more != 0, 0 }; return GetRangeResult{ results, out_more != 0, 0 };
} }
GetRangeResult get_range_and_flat_map(fdb::Transaction& tr,
const uint8_t* begin_key_name,
int begin_key_name_length,
fdb_bool_t begin_or_equal,
int begin_offset,
const uint8_t* end_key_name,
int end_key_name_length,
fdb_bool_t end_or_equal,
int end_offset,
const uint8_t* mapper_name,
int mapper_name_length,
int limit,
int target_bytes,
FDBStreamingMode mode,
int iteration,
fdb_bool_t snapshot,
fdb_bool_t reverse) {
fdb::KeyValueArrayFuture f1 = tr.get_range_and_flat_map(begin_key_name,
begin_key_name_length,
begin_or_equal,
begin_offset,
end_key_name,
end_key_name_length,
end_or_equal,
end_offset,
mapper_name,
mapper_name_length,
limit,
target_bytes,
mode,
iteration,
snapshot,
reverse);
fdb_error_t err = wait_future(f1);
if (err) {
return GetRangeResult{ {}, false, err };
}
const FDBKeyValue* out_kv;
int out_count;
fdb_bool_t out_more;
fdb_check(f1.get(&out_kv, &out_count, &out_more));
std::vector<std::pair<std::string, std::string>> results;
for (int i = 0; i < out_count; ++i) {
std::string key((const char*)out_kv[i].key, out_kv[i].key_length);
std::string value((const char*)out_kv[i].value, out_kv[i].value_length);
results.emplace_back(key, value);
}
return GetRangeResult{ results, out_more != 0, 0 };
}
// Clears all data in the database. // Clears all data in the database.
void clear_data(FDBDatabase* db) { void clear_data(FDBDatabase* db) {
insert_data(db, {}); insert_data(db, {});
@ -819,6 +873,124 @@ TEST_CASE("fdb_transaction_set_read_version future_version") {
CHECK(err == 1009); // future_version CHECK(err == 1009); // future_version
} }
const std::string EMPTY = Tuple().pack().toString();
const KeyRef RECORD = "RECORD"_sr;
const KeyRef INDEX = "INDEX"_sr;
static Key primaryKey(const int i) {
return Key(format("primary-key-of-record-%08d", i));
}
static Key indexKey(const int i) {
return Key(format("index-key-of-record-%08d", i));
}
static Value dataOfRecord(const int i) {
return Value(format("data-of-record-%08d", i));
}
static std::string indexEntryKey(const int i) {
return Tuple().append(StringRef(prefix)).append(INDEX).append(indexKey(i)).append(primaryKey(i)).pack().toString();
}
static std::string recordKey(const int i) {
return Tuple().append(prefix).append(RECORD).append(primaryKey(i)).pack().toString();
}
static std::string recordValue(const int i) {
return Tuple().append(dataOfRecord(i)).pack().toString();
}
TEST_CASE("fdb_transaction_get_range_and_flat_map") {
// Note: The user requested `prefix` should be added as the first element of the tuple that forms the key, rather
// than the prefix of the key. So we don't use key() or create_data() in this test.
std::map<std::string, std::string> data;
for (int i = 0; i < 3; i++) {
data[indexEntryKey(i)] = EMPTY;
data[recordKey(i)] = recordValue(i);
}
insert_data(db, data);
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).pack().toString();
fdb::Transaction tr(db);
// get_range_and_flat_map is only support without RYW. This is a must!!!
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
while (1) {
auto result = get_range_and_flat_map(
tr,
// [0, 1]
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL((const uint8_t*)indexEntryKey(0).c_str(), indexEntryKey(0).size()),
FDB_KEYSEL_FIRST_GREATER_THAN((const uint8_t*)indexEntryKey(1).c_str(), indexEntryKey(1).size()),
(const uint8_t*)mapper.c_str(),
mapper.size(),
/* limit */ 0,
/* target_bytes */ 0,
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
/* iteration */ 0,
/* snapshot */ true,
/* reverse */ 0);
if (result.err) {
fdb::EmptyFuture f1 = tr.on_error(result.err);
fdb_check(wait_future(f1));
continue;
}
// Only the first 2 records are supposed to be returned.
if (result.kvs.size() < 2) {
CHECK(result.more);
// Retry.
continue;
}
CHECK(result.kvs.size() == 2);
CHECK(!result.more);
for (int i = 0; i < 2; i++) {
const auto& [key, value] = result.kvs[i];
std::cout << "result[" << i << "]: key=" << key << ", value=" << value << std::endl;
// OUTPUT:
// result[0]: key=fdbRECORDprimary-key-of-record-00000000, value=data-of-record-00000000
// result[1]: key=fdbRECORDprimary-key-of-record-00000001, value=data-of-record-00000001
CHECK(recordKey(i).compare(key) == 0);
CHECK(recordValue(i).compare(value) == 0);
}
break;
}
}
TEST_CASE("fdb_transaction_get_range_and_flat_map_restricted_to_snapshot") {
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).pack().toString();
fdb::Transaction tr(db);
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
auto result = get_range_and_flat_map(
tr,
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL((const uint8_t*)indexEntryKey(0).c_str(), indexEntryKey(0).size()),
FDB_KEYSEL_FIRST_GREATER_THAN((const uint8_t*)indexEntryKey(1).c_str(), indexEntryKey(1).size()),
(const uint8_t*)mapper.c_str(),
mapper.size(),
/* limit */ 0,
/* target_bytes */ 0,
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
/* iteration */ 0,
/* snapshot */ false, // Set snapshot to false
/* reverse */ 0);
ASSERT(result.err == error_code_client_invalid_operation);
}
TEST_CASE("fdb_transaction_get_range_and_flat_map_restricted_to_ryw_disable") {
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).pack().toString();
fdb::Transaction tr(db);
// Not set FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE.
auto result = get_range_and_flat_map(
tr,
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL((const uint8_t*)indexEntryKey(0).c_str(), indexEntryKey(0).size()),
FDB_KEYSEL_FIRST_GREATER_THAN((const uint8_t*)indexEntryKey(1).c_str(), indexEntryKey(1).size()),
(const uint8_t*)mapper.c_str(),
mapper.size(),
/* limit */ 0,
/* target_bytes */ 0,
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
/* iteration */ 0,
/* snapshot */ true,
/* reverse */ 0);
ASSERT(result.err == error_code_client_invalid_operation);
}
TEST_CASE("fdb_transaction_get_range reverse") { TEST_CASE("fdb_transaction_get_range reverse") {
std::map<std::string, std::string> data = create_data({ { "a", "1" }, { "b", "2" }, { "c", "3" }, { "d", "4" } }); std::map<std::string, std::string> data = create_data({ { "a", "1" }, { "b", "2" }, { "c", "3" }, { "d", "4" } });
insert_data(db, data); insert_data(db, data);
@ -1726,7 +1898,7 @@ TEST_CASE("fdb_transaction_add_conflict_range") {
fdb::Transaction tr2(db); fdb::Transaction tr2(db);
while (1) { while (1) {
fdb_check(tr2.add_conflict_range(key("a"), strinc(key("a")), FDB_CONFLICT_RANGE_TYPE_WRITE)); fdb_check(tr2.add_conflict_range(key("a"), strinc_str(key("a")), FDB_CONFLICT_RANGE_TYPE_WRITE));
fdb::EmptyFuture f1 = tr2.commit(); fdb::EmptyFuture f1 = tr2.commit();
fdb_error_t err = wait_future(f1); fdb_error_t err = wait_future(f1);
@ -1739,8 +1911,8 @@ TEST_CASE("fdb_transaction_add_conflict_range") {
} }
while (1) { while (1) {
fdb_check(tr.add_conflict_range(key("a"), strinc(key("a")), FDB_CONFLICT_RANGE_TYPE_READ)); fdb_check(tr.add_conflict_range(key("a"), strinc_str(key("a")), FDB_CONFLICT_RANGE_TYPE_READ));
fdb_check(tr.add_conflict_range(key("a"), strinc(key("a")), FDB_CONFLICT_RANGE_TYPE_WRITE)); fdb_check(tr.add_conflict_range(key("a"), strinc_str(key("a")), FDB_CONFLICT_RANGE_TYPE_WRITE));
fdb::EmptyFuture f1 = tr.commit(); fdb::EmptyFuture f1 = tr.commit();
fdb_error_t err = wait_future(f1); fdb_error_t err = wait_future(f1);
@ -1828,41 +2000,6 @@ TEST_CASE("special-key-space set transaction ID after write") {
} }
} }
TEST_CASE("special-key-space set token after write") {
fdb::Transaction tr(db);
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
while (1) {
tr.set(key("foo"), "bar");
tr.set("\xff\xff/tracing/token", "false");
fdb::ValueFuture f1 = tr.get("\xff\xff/tracing/token",
/* snapshot */ false);
fdb_error_t err = wait_future(f1);
if (err) {
fdb::EmptyFuture f2 = tr.on_error(err);
fdb_check(wait_future(f2));
continue;
}
int out_present;
char* val;
int vallen;
fdb_check(f1.get(&out_present, (const uint8_t**)&val, &vallen));
REQUIRE(out_present);
uint64_t token = std::stoul(std::string(val, vallen));
CHECK(token != 0);
break;
}
}
TEST_CASE("special-key-space valid token") {
auto value = get_value("\xff\xff/tracing/token", /* snapshot */ false, {});
REQUIRE(value.has_value());
uint64_t token = std::stoul(value.value());
CHECK(token > 0);
}
TEST_CASE("special-key-space disable tracing") { TEST_CASE("special-key-space disable tracing") {
fdb::Transaction tr(db); fdb::Transaction tr(db);
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0)); fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
@ -1890,48 +2027,6 @@ TEST_CASE("special-key-space disable tracing") {
} }
} }
TEST_CASE("FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_DISABLE") {
fdb_check(fdb_database_set_option(db, FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_DISABLE, nullptr, 0));
auto value = get_value("\xff\xff/tracing/token", /* snapshot */ false, {});
REQUIRE(value.has_value());
uint64_t token = std::stoul(value.value());
CHECK(token == 0);
fdb_check(fdb_database_set_option(db, FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_ENABLE, nullptr, 0));
}
TEST_CASE("FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_DISABLE enable tracing for transaction") {
fdb_check(fdb_database_set_option(db, FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_DISABLE, nullptr, 0));
fdb::Transaction tr(db);
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
while (1) {
tr.set("\xff\xff/tracing/token", "true");
fdb::ValueFuture f1 = tr.get("\xff\xff/tracing/token",
/* snapshot */ false);
fdb_error_t err = wait_future(f1);
if (err) {
fdb::EmptyFuture f2 = tr.on_error(err);
fdb_check(wait_future(f2));
continue;
}
int out_present;
char* val;
int vallen;
fdb_check(f1.get(&out_present, (const uint8_t**)&val, &vallen));
REQUIRE(out_present);
uint64_t token = std::stoul(std::string(val, vallen));
CHECK(token > 0);
break;
}
fdb_check(fdb_database_set_option(db, FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_ENABLE, nullptr, 0));
}
TEST_CASE("special-key-space tracing get range") { TEST_CASE("special-key-space tracing get range") {
std::string tracingBegin = "\xff\xff/tracing/"; std::string tracingBegin = "\xff\xff/tracing/";
std::string tracingEnd = "\xff\xff/tracing0"; std::string tracingEnd = "\xff\xff/tracing0";
@ -1964,8 +2059,6 @@ TEST_CASE("special-key-space tracing get range") {
CHECK(!out_more); CHECK(!out_more);
CHECK(out_count == 2); CHECK(out_count == 2);
CHECK(std::string((char*)out_kv[0].key, out_kv[0].key_length) == tracingBegin + "token");
CHECK(std::stoul(std::string((char*)out_kv[0].value, out_kv[0].value_length)) > 0);
CHECK(std::string((char*)out_kv[1].key, out_kv[1].key_length) == tracingBegin + "transaction_id"); CHECK(std::string((char*)out_kv[1].key, out_kv[1].key_length) == tracingBegin + "transaction_id");
CHECK(std::stoul(std::string((char*)out_kv[1].value, out_kv[1].value_length)) > 0); CHECK(std::stoul(std::string((char*)out_kv[1].value, out_kv[1].value_length)) > 0);
break; break;
@ -2217,7 +2310,7 @@ TEST_CASE("commit_does_not_reset") {
continue; continue;
} }
fdb_check(tr2.add_conflict_range(key("foo"), strinc(key("foo")), FDB_CONFLICT_RANGE_TYPE_READ)); fdb_check(tr2.add_conflict_range(key("foo"), strinc_str(key("foo")), FDB_CONFLICT_RANGE_TYPE_READ));
tr2.set(key("foo"), "bar"); tr2.set(key("foo"), "bar");
fdb::EmptyFuture tr2CommitFuture = tr2.commit(); fdb::EmptyFuture tr2CommitFuture = tr2.commit();
err = wait_future(tr2CommitFuture); err = wait_future(tr2CommitFuture);

View File

@ -176,9 +176,9 @@ void promiseSend(JNIEnv, jclass, jlong self, jboolean value) {
struct JNIError { struct JNIError {
JNIEnv* env; JNIEnv* env;
jthrowable throwable = nullptr; jthrowable throwable{ nullptr };
const char* file; const char* file{ nullptr };
int line; int line{ 0 };
std::string location() const { std::string location() const {
if (file == nullptr) { if (file == nullptr) {

View File

@ -756,6 +756,76 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
return (jlong)f; return (jlong)f;
} }
JNIEXPORT jlong JNICALL
Java_com_apple_foundationdb_FDBTransaction_Transaction_1getRangeAndFlatMap(JNIEnv* jenv,
jobject,
jlong tPtr,
jbyteArray keyBeginBytes,
jboolean orEqualBegin,
jint offsetBegin,
jbyteArray keyEndBytes,
jboolean orEqualEnd,
jint offsetEnd,
jbyteArray mapperBytes,
jint rowLimit,
jint targetBytes,
jint streamingMode,
jint iteration,
jboolean snapshot,
jboolean reverse) {
if (!tPtr || !keyBeginBytes || !keyEndBytes || !mapperBytes) {
throwParamNotNull(jenv);
return 0;
}
FDBTransaction* tr = (FDBTransaction*)tPtr;
uint8_t* barrBegin = (uint8_t*)jenv->GetByteArrayElements(keyBeginBytes, JNI_NULL);
if (!barrBegin) {
if (!jenv->ExceptionOccurred())
throwRuntimeEx(jenv, "Error getting handle to native resources");
return 0;
}
uint8_t* barrEnd = (uint8_t*)jenv->GetByteArrayElements(keyEndBytes, JNI_NULL);
if (!barrEnd) {
jenv->ReleaseByteArrayElements(keyBeginBytes, (jbyte*)barrBegin, JNI_ABORT);
if (!jenv->ExceptionOccurred())
throwRuntimeEx(jenv, "Error getting handle to native resources");
return 0;
}
uint8_t* barrMapper = (uint8_t*)jenv->GetByteArrayElements(mapperBytes, JNI_NULL);
if (!barrMapper) {
jenv->ReleaseByteArrayElements(keyBeginBytes, (jbyte*)barrBegin, JNI_ABORT);
jenv->ReleaseByteArrayElements(keyEndBytes, (jbyte*)barrEnd, JNI_ABORT);
if (!jenv->ExceptionOccurred())
throwRuntimeEx(jenv, "Error getting handle to native resources");
return 0;
}
FDBFuture* f = fdb_transaction_get_range_and_flat_map(tr,
barrBegin,
jenv->GetArrayLength(keyBeginBytes),
orEqualBegin,
offsetBegin,
barrEnd,
jenv->GetArrayLength(keyEndBytes),
orEqualEnd,
offsetEnd,
barrMapper,
jenv->GetArrayLength(mapperBytes),
rowLimit,
targetBytes,
(FDBStreamingMode)streamingMode,
iteration,
snapshot,
reverse);
jenv->ReleaseByteArrayElements(keyBeginBytes, (jbyte*)barrBegin, JNI_ABORT);
jenv->ReleaseByteArrayElements(keyEndBytes, (jbyte*)barrEnd, JNI_ABORT);
jenv->ReleaseByteArrayElements(mapperBytes, (jbyte*)barrMapper, JNI_ABORT);
return (jlong)f;
}
JNIEXPORT void JNICALL Java_com_apple_foundationdb_FutureResults_FutureResults_1getDirect(JNIEnv* jenv, JNIEXPORT void JNICALL Java_com_apple_foundationdb_FutureResults_FutureResults_1getDirect(JNIEnv* jenv,
jobject, jobject,
jlong future, jlong future,

View File

@ -0,0 +1,256 @@
/*
* RangeAndFlatMapQueryIntegrationTest.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicReference;
import com.apple.foundationdb.async.AsyncIterable;
import com.apple.foundationdb.async.AsyncUtil;
import com.apple.foundationdb.tuple.ByteArrayUtil;
import com.apple.foundationdb.tuple.Tuple;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
@ExtendWith(RequiresDatabase.class)
class RangeAndFlatMapQueryIntegrationTest {
private static final FDB fdb = FDB.selectAPIVersion(710);
public String databaseArg = null;
private Database openFDB() { return fdb.open(databaseArg); }
@BeforeEach
@AfterEach
void clearDatabase() throws Exception {
/*
* Empty the database before and after each run, just in case
*/
try (Database db = openFDB()) {
db.run(tr -> {
tr.clear(Range.startsWith(new byte[] { (byte)0x00 }));
return null;
});
}
}
static private final byte[] EMPTY = Tuple.from().pack();
static private final String PREFIX = "prefix";
static private final String RECORD = "RECORD";
static private final String INDEX = "INDEX";
static private String primaryKey(int i) { return String.format("primary-key-of-record-%08d", i); }
static private String indexKey(int i) { return String.format("index-key-of-record-%08d", i); }
static private String dataOfRecord(int i) { return String.format("data-of-record-%08d", i); }
static byte[] MAPPER = Tuple.from(PREFIX, RECORD, "{K[3]}").pack();
static private byte[] indexEntryKey(final int i) {
return Tuple.from(PREFIX, INDEX, indexKey(i), primaryKey(i)).pack();
}
static private byte[] recordKey(final int i) { return Tuple.from(PREFIX, RECORD, primaryKey(i)).pack(); }
static private byte[] recordValue(final int i) { return Tuple.from(dataOfRecord(i)).pack(); }
static private void insertRecordWithIndex(final Transaction tr, final int i) {
tr.set(indexEntryKey(i), EMPTY);
tr.set(recordKey(i), recordValue(i));
}
private static String getArgFromEnv() {
String[] clusterFiles = MultiClientHelper.readClusterFromEnv();
String cluster = clusterFiles[0];
System.out.printf("Using Cluster: %s\n", cluster);
return cluster;
}
public static void main(String[] args) throws Exception {
final RangeAndFlatMapQueryIntegrationTest test = new RangeAndFlatMapQueryIntegrationTest();
test.databaseArg = getArgFromEnv();
test.clearDatabase();
test.comparePerformance();
test.clearDatabase();
}
int numRecords = 10000;
int numQueries = 10000;
int numRecordsPerQuery = 100;
boolean validate = false;
@Test
void comparePerformance() {
FDB fdb = FDB.selectAPIVersion(710);
try (Database db = openFDB()) {
insertRecordsWithIndexes(numRecords, db);
instrument(rangeQueryAndGet, "rangeQueryAndGet", db);
instrument(rangeQueryAndFlatMap, "rangeQueryAndFlatMap", db);
}
}
private void instrument(final RangeQueryWithIndex query, final String name, final Database db) {
System.out.printf("Starting %s (numQueries:%d, numRecordsPerQuery:%d)\n", name, numQueries, numRecordsPerQuery);
long startTime = System.currentTimeMillis();
for (int queryId = 0; queryId < numQueries; queryId++) {
int begin = ThreadLocalRandom.current().nextInt(numRecords - numRecordsPerQuery);
query.run(begin, begin + numRecordsPerQuery, db);
}
long time = System.currentTimeMillis() - startTime;
System.out.printf("Finished %s, it takes %d ms for %d queries (%d qps)\n", name, time, numQueries,
numQueries * 1000L / time);
}
static private final int RECORDS_PER_TXN = 100;
static private void insertRecordsWithIndexes(int n, Database db) {
int i = 0;
while (i < n) {
int begin = i;
int end = Math.min(n, i + RECORDS_PER_TXN);
// insert [begin, end) in one transaction
db.run(tr -> {
for (int t = begin; t < end; t++) {
insertRecordWithIndex(tr, t);
}
return null;
});
i = end;
}
}
public interface RangeQueryWithIndex {
void run(int begin, int end, Database db);
}
RangeQueryWithIndex rangeQueryAndGet = (int begin, int end, Database db) -> db.run(tr -> {
try {
List<KeyValue> kvs = tr.getRange(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)),
KeySelector.firstGreaterOrEqual(indexEntryKey(end)),
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
.asList()
.get();
Assertions.assertEquals(end - begin, kvs.size());
// Get the records of each index entry IN PARALLEL.
List<CompletableFuture<byte[]>> resultFutures = new ArrayList<>();
// In reality, we need to get the record key by parsing the index entry key. But considering this is a
// performance test, we just ignore the returned key and simply generate it from recordKey.
for (int id = begin; id < end; id++) {
resultFutures.add(tr.get(recordKey(id)));
}
AsyncUtil.whenAll(resultFutures).get();
if (validate) {
final Iterator<KeyValue> indexes = kvs.iterator();
final Iterator<CompletableFuture<byte[]>> records = resultFutures.iterator();
for (int id = begin; id < end; id++) {
Assertions.assertTrue(indexes.hasNext());
assertByteArrayEquals(indexEntryKey(id), indexes.next().getKey());
Assertions.assertTrue(records.hasNext());
assertByteArrayEquals(recordValue(id), records.next().get());
}
Assertions.assertFalse(indexes.hasNext());
Assertions.assertFalse(records.hasNext());
}
} catch (Exception e) {
Assertions.fail("Unexpected exception", e);
}
return null;
});
RangeQueryWithIndex rangeQueryAndFlatMap = (int begin, int end, Database db) -> db.run(tr -> {
try {
tr.options().setReadYourWritesDisable();
List<KeyValue> kvs =
tr.snapshot()
.getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)),
KeySelector.firstGreaterOrEqual(indexEntryKey(end)), MAPPER,
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
.asList()
.get();
Assertions.assertEquals(end - begin, kvs.size());
if (validate) {
final Iterator<KeyValue> results = kvs.iterator();
for (int id = begin; id < end; id++) {
Assertions.assertTrue(results.hasNext());
assertByteArrayEquals(recordValue(id), results.next().getValue());
}
Assertions.assertFalse(results.hasNext());
}
} catch (Exception e) {
Assertions.fail("Unexpected exception", e);
}
return null;
});
void assertByteArrayEquals(byte[] expected, byte[] actual) {
Assertions.assertEquals(ByteArrayUtil.printable(expected), ByteArrayUtil.printable(actual));
}
@Test
void rangeAndFlatMapQueryOverMultipleRows() throws Exception {
try (Database db = openFDB()) {
insertRecordsWithIndexes(3, db);
List<byte[]> expected_data_of_records = new ArrayList<>();
for (int i = 0; i <= 1; i++) {
expected_data_of_records.add(recordValue(i));
}
db.run(tr -> {
// getRangeAndFlatMap is only support without RYW. This is a must!!!
tr.options().setReadYourWritesDisable();
// getRangeAndFlatMap is only supported with snapshot.
Iterator<KeyValue> kvs =
tr.snapshot()
.getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(0)),
KeySelector.firstGreaterThan(indexEntryKey(1)), MAPPER,
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
.iterator();
Iterator<byte[]> expected_data_of_records_iter = expected_data_of_records.iterator();
while (expected_data_of_records_iter.hasNext()) {
Assertions.assertTrue(kvs.hasNext(), "iterator ended too early");
KeyValue kv = kvs.next();
byte[] actual_data_of_record = kv.getValue();
byte[] expected_data_of_record = expected_data_of_records_iter.next();
// System.out.println("result key:" + ByteArrayUtil.printable(kv.getKey()) + " value:" +
// ByteArrayUtil.printable(kv.getValue())); Output:
// result
// key:\x02prefix\x00\x02INDEX\x00\x02index-key-of-record-0\x00\x02primary-key-of-record-0\x00
// value:\x02data-of-record-0\x00
// result
// key:\x02prefix\x00\x02INDEX\x00\x02index-key-of-record-1\x00\x02primary-key-of-record-1\x00
// value:\x02data-of-record-1\x00
// For now, we don't guarantee what that the returned keys mean.
Assertions.assertArrayEquals(expected_data_of_record, actual_data_of_record,
"Incorrect data of record!");
}
Assertions.assertFalse(kvs.hasNext(), "Iterator returned too much data");
return null;
});
}
}
}

View File

@ -88,8 +88,11 @@ public class FakeFDBTransaction extends FDBTransaction {
public int getNumRangeCalls() { return numRangeCalls; } public int getNumRangeCalls() { return numRangeCalls; }
@Override @Override
protected FutureResults getRange_internal(KeySelector begin, KeySelector end, int rowLimit, int targetBytes, protected FutureResults getRange_internal(KeySelector begin, KeySelector end,
int streamingMode, int iteration, boolean isSnapshot, boolean reverse) { // TODO: map is not supported in FakeFDBTransaction yet.
byte[] mapper, // Nullable
int rowLimit, int targetBytes, int streamingMode, int iteration,
boolean isSnapshot, boolean reverse) {
numRangeCalls++; numRangeCalls++;
// TODO this is probably not correct for all KeySelector instances--we'll want to match with real behavior // TODO this is probably not correct for all KeySelector instances--we'll want to match with real behavior
NavigableMap<byte[], byte[]> range = NavigableMap<byte[], byte[]> range =

View File

@ -91,6 +91,15 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
return FDBTransaction.this.getRangeSplitPoints(range, chunkSize); return FDBTransaction.this.getRangeSplitPoints(range, chunkSize);
} }
@Override
public AsyncIterable<KeyValue> getRangeAndFlatMap(KeySelector begin, KeySelector end, byte[] mapper, int limit,
boolean reverse, StreamingMode mode) {
if (mapper == null) {
throw new IllegalArgumentException("Mapper must be non-null");
}
return new RangeQuery(FDBTransaction.this, true, begin, end, mapper, limit, reverse, mode, eventKeeper);
}
/////////////////// ///////////////////
// getRange -> KeySelectors // getRange -> KeySelectors
/////////////////// ///////////////////
@ -338,6 +347,12 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
return this.getRangeSplitPoints(range.begin, range.end, chunkSize); return this.getRangeSplitPoints(range.begin, range.end, chunkSize);
} }
@Override
public AsyncIterable<KeyValue> getRangeAndFlatMap(KeySelector begin, KeySelector end, byte[] mapper, int limit,
boolean reverse, StreamingMode mode) {
throw new UnsupportedOperationException("getRangeAndFlatMap is only supported in snapshot");
}
/////////////////// ///////////////////
// getRange -> KeySelectors // getRange -> KeySelectors
/////////////////// ///////////////////
@ -415,10 +430,10 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
} }
// Users of this function must close the returned FutureResults when finished // Users of this function must close the returned FutureResults when finished
protected FutureResults getRange_internal( protected FutureResults getRange_internal(KeySelector begin, KeySelector end,
KeySelector begin, KeySelector end, byte[] mapper, // Nullable
int rowLimit, int targetBytes, int streamingMode, int rowLimit, int targetBytes, int streamingMode, int iteration,
int iteration, boolean isSnapshot, boolean reverse) { boolean isSnapshot, boolean reverse) {
if (eventKeeper != null) { if (eventKeeper != null) {
eventKeeper.increment(Events.JNI_CALL); eventKeeper.increment(Events.JNI_CALL);
} }
@ -429,9 +444,13 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
begin.toString(), end.toString(), rowLimit, targetBytes, streamingMode, begin.toString(), end.toString(), rowLimit, targetBytes, streamingMode,
iteration, Boolean.toString(isSnapshot), Boolean.toString(reverse)));*/ iteration, Boolean.toString(isSnapshot), Boolean.toString(reverse)));*/
return new FutureResults( return new FutureResults(
Transaction_getRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(), mapper == null
end.getKey(), end.orEqual(), end.getOffset(), rowLimit, targetBytes, ? Transaction_getRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(), end.getKey(),
streamingMode, iteration, isSnapshot, reverse), end.orEqual(), end.getOffset(), rowLimit, targetBytes, streamingMode,
iteration, isSnapshot, reverse)
: Transaction_getRangeAndFlatMap(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
end.getKey(), end.orEqual(), end.getOffset(), mapper, rowLimit,
targetBytes, streamingMode, iteration, isSnapshot, reverse),
FDB.instance().isDirectBufferQueriesEnabled(), executor, eventKeeper); FDB.instance().isDirectBufferQueriesEnabled(), executor, eventKeeper);
} finally { } finally {
pointerReadLock.unlock(); pointerReadLock.unlock();
@ -771,6 +790,12 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
byte[] keyEnd, boolean orEqualEnd, int offsetEnd, byte[] keyEnd, boolean orEqualEnd, int offsetEnd,
int rowLimit, int targetBytes, int streamingMode, int iteration, int rowLimit, int targetBytes, int streamingMode, int iteration,
boolean isSnapshot, boolean reverse); boolean isSnapshot, boolean reverse);
private native long Transaction_getRangeAndFlatMap(long cPtr, byte[] keyBegin, boolean orEqualBegin,
int offsetBegin, byte[] keyEnd, boolean orEqualEnd,
int offsetEnd,
byte[] mapper, // Nonnull
int rowLimit, int targetBytes, int streamingMode, int iteration,
boolean isSnapshot, boolean reverse);
private native void Transaction_addConflictRange(long cPtr, private native void Transaction_addConflictRange(long cPtr,
byte[] keyBegin, byte[] keyEnd, int conflictRangeType); byte[] keyBegin, byte[] keyEnd, int conflictRangeType);
private native void Transaction_set(long cPtr, byte[] key, byte[] value); private native void Transaction_set(long cPtr, byte[] key, byte[] value);

View File

@ -49,17 +49,19 @@ class RangeQuery implements AsyncIterable<KeyValue> {
private final FDBTransaction tr; private final FDBTransaction tr;
private final KeySelector begin; private final KeySelector begin;
private final KeySelector end; private final KeySelector end;
private final byte[] mapper; // Nullable
private final boolean snapshot; private final boolean snapshot;
private final int rowLimit; private final int rowLimit;
private final boolean reverse; private final boolean reverse;
private final StreamingMode streamingMode; private final StreamingMode streamingMode;
private final EventKeeper eventKeeper; private final EventKeeper eventKeeper;
RangeQuery(FDBTransaction transaction, boolean isSnapshot, KeySelector begin, KeySelector end, int rowLimit, RangeQuery(FDBTransaction transaction, boolean isSnapshot, KeySelector begin, KeySelector end, byte[] mapper,
boolean reverse, StreamingMode streamingMode, EventKeeper eventKeeper) { int rowLimit, boolean reverse, StreamingMode streamingMode, EventKeeper eventKeeper) {
this.tr = transaction; this.tr = transaction;
this.begin = begin; this.begin = begin;
this.end = end; this.end = end;
this.mapper = mapper;
this.snapshot = isSnapshot; this.snapshot = isSnapshot;
this.rowLimit = rowLimit; this.rowLimit = rowLimit;
this.reverse = reverse; this.reverse = reverse;
@ -67,6 +69,12 @@ class RangeQuery implements AsyncIterable<KeyValue> {
this.eventKeeper = eventKeeper; this.eventKeeper = eventKeeper;
} }
// RangeQueryAndFlatMap
RangeQuery(FDBTransaction transaction, boolean isSnapshot, KeySelector begin, KeySelector end, int rowLimit,
boolean reverse, StreamingMode streamingMode, EventKeeper eventKeeper) {
this(transaction, isSnapshot, begin, end, null, rowLimit, reverse, streamingMode, eventKeeper);
}
/** /**
* Returns all the results from the range requested as a {@code List}. If there were no * Returns all the results from the range requested as a {@code List}. If there were no
* limits on the original query and there is a large amount of data in the database * limits on the original query and there is a large amount of data in the database
@ -83,16 +91,16 @@ class RangeQuery implements AsyncIterable<KeyValue> {
// if the streaming mode is EXACT, try and grab things as one chunk // if the streaming mode is EXACT, try and grab things as one chunk
if(mode == StreamingMode.EXACT) { if(mode == StreamingMode.EXACT) {
FutureResults range = tr.getRange_internal(
this.begin, this.end, this.rowLimit, 0, StreamingMode.EXACT.code(), FutureResults range = tr.getRange_internal(this.begin, this.end, this.mapper, this.rowLimit, 0,
1, this.snapshot, this.reverse); StreamingMode.EXACT.code(), 1, this.snapshot, this.reverse);
return range.thenApply(result -> result.get().values) return range.thenApply(result -> result.get().values)
.whenComplete((result, e) -> range.close()); .whenComplete((result, e) -> range.close());
} }
// If the streaming mode is not EXACT, simply collect the results of an // If the streaming mode is not EXACT, simply collect the results of an
// iteration into a list // iteration into a list
return AsyncUtil.collect(new RangeQuery(tr, snapshot, begin, end, rowLimit, reverse, mode, eventKeeper), return AsyncUtil.collect(new RangeQuery(tr, snapshot, begin, end, mapper, rowLimit, reverse, mode, eventKeeper),
tr.getExecutor()); tr.getExecutor());
} }
@ -221,8 +229,8 @@ class RangeQuery implements AsyncIterable<KeyValue> {
nextFuture = new CompletableFuture<>(); nextFuture = new CompletableFuture<>();
final long sTime = System.nanoTime(); final long sTime = System.nanoTime();
fetchingChunk = tr.getRange_internal(begin, end, rowsLimited ? rowsRemaining : 0, 0, streamingMode.code(), fetchingChunk = tr.getRange_internal(begin, end, mapper, rowsLimited ? rowsRemaining : 0, 0,
++iteration, snapshot, reverse); streamingMode.code(), ++iteration, snapshot, reverse);
BiConsumer<RangeResultInfo,Throwable> cons = new FetchComplete(fetchingChunk,nextFuture); BiConsumer<RangeResultInfo,Throwable> cons = new FetchComplete(fetchingChunk,nextFuture);
if(eventKeeper!=null){ if(eventKeeper!=null){

View File

@ -424,6 +424,41 @@ public interface ReadTransaction extends ReadTransactionContext {
AsyncIterable<KeyValue> getRange(Range range, AsyncIterable<KeyValue> getRange(Range range,
int limit, boolean reverse, StreamingMode mode); int limit, boolean reverse, StreamingMode mode);
/**
* WARNING: This feature is considered experimental at this time. It is only allowed when using snapshot isolation
* AND disabling read-your-writes.
*
* @see KeySelector
* @see AsyncIterator
*
* @param begin the beginning of the range (inclusive)
* @param end the end of the range (exclusive)
* @param mapper TODO
* @param limit the maximum number of results to return. Limits results to the
* <i>first</i> keys in the range. Pass {@link #ROW_LIMIT_UNLIMITED} if this query
* should not limit the number of results. If {@code reverse} is {@code true} rows
* will be limited starting at the end of the range.
* @param reverse return results starting at the end of the range in reverse order.
* Reading ranges in reverse is supported natively by the database and should
* have minimal extra cost.
* @param mode provide a hint about how the results are to be used. This
* can provide speed improvements or efficiency gains based on the caller's
* knowledge of the upcoming access pattern.
*
* <p>
* When converting the result of this query to a list using {@link AsyncIterable#asList()} with the {@code
* ITERATOR} streaming mode, the query is automatically modified to fetch results in larger batches. This is done
* because it is known in advance that the {@link AsyncIterable#asList()} function will fetch all results in the
* range. If a limit is specified, the {@code EXACT} streaming mode will be used, and otherwise it will use {@code
* WANT_ALL}.
*
* To achieve comparable performance when iterating over an entire range without using {@link
* AsyncIterable#asList()}, the same streaming mode would need to be used.
* </p>
* @return a handle to access the results of the asynchronous call
*/
AsyncIterable<KeyValue> getRangeAndFlatMap(KeySelector begin, KeySelector end, byte[] mapper, int limit,
boolean reverse, StreamingMode mode);
/** /**
* Gets an estimate for the number of bytes stored in the given range. * Gets an estimate for the number of bytes stored in the given range.

View File

@ -52,6 +52,7 @@ set(JAVA_INTEGRATION_TESTS
src/integration/com/apple/foundationdb/CycleMultiClientIntegrationTest.java src/integration/com/apple/foundationdb/CycleMultiClientIntegrationTest.java
src/integration/com/apple/foundationdb/SidebandMultiThreadClientTest.java src/integration/com/apple/foundationdb/SidebandMultiThreadClientTest.java
src/integration/com/apple/foundationdb/RepeatableReadMultiThreadClientTest.java src/integration/com/apple/foundationdb/RepeatableReadMultiThreadClientTest.java
src/integration/com/apple/foundationdb/RangeAndFlatMapQueryIntegrationTest.java
) )
# Resources that are used in integration testing, but are not explicitly test files (JUnit rules, # Resources that are used in integration testing, but are not explicitly test files (JUnit rules,

View File

@ -385,7 +385,7 @@ def coordinators(logger):
# verify now we have 5 coordinators and the description is updated # verify now we have 5 coordinators and the description is updated
output2 = run_fdbcli_command('coordinators') output2 = run_fdbcli_command('coordinators')
assert output2.split('\n')[0].split(': ')[-1] == new_cluster_description assert output2.split('\n')[0].split(': ')[-1] == new_cluster_description
assert output2.split('\n')[1] == 'Cluster coordinators ({}): {}'.format(5, ','.join(addresses)) assert output2.split('\n')[1] == 'Cluster coordinators ({}): {}'.format(args.process_number, ','.join(addresses))
# auto change should go back to 1 coordinator # auto change should go back to 1 coordinator
run_fdbcli_command('coordinators', 'auto') run_fdbcli_command('coordinators', 'auto')
assert len(get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')) == 1 assert len(get_value_from_status_json(True, 'client', 'coordinators', 'coordinators')) == 1

View File

@ -49,8 +49,8 @@ function(compile_boost)
include(ExternalProject) include(ExternalProject)
set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install") set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install")
ExternalProject_add("${COMPILE_BOOST_TARGET}Project" ExternalProject_add("${COMPILE_BOOST_TARGET}Project"
URL "https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2" URL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.bz2"
URL_HASH SHA256=59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722 URL_HASH SHA256=fc9f85fc030e233142908241af7a846e60630aa7388de9a5fafb1f3a26840854
CONFIGURE_COMMAND ${BOOTSTRAP_COMMAND} ${BOOTSTRAP_ARGS} --with-libraries=${BOOTSTRAP_LIBRARIES} --with-toolset=${BOOST_TOOLSET} CONFIGURE_COMMAND ${BOOTSTRAP_COMMAND} ${BOOTSTRAP_ARGS} --with-libraries=${BOOTSTRAP_LIBRARIES} --with-toolset=${BOOST_TOOLSET}
BUILD_COMMAND ${B2_COMMAND} link=static ${COMPILE_BOOST_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install BUILD_COMMAND ${B2_COMMAND} link=static ${COMPILE_BOOST_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install
BUILD_IN_SOURCE ON BUILD_IN_SOURCE ON
@ -113,7 +113,7 @@ if(WIN32)
return() return()
endif() endif()
find_package(Boost 1.72.0 EXACT QUIET COMPONENTS context CONFIG PATHS ${BOOST_HINT_PATHS}) find_package(Boost 1.77.0 EXACT QUIET COMPONENTS context CONFIG PATHS ${BOOST_HINT_PATHS})
set(FORCE_BOOST_BUILD OFF CACHE BOOL "Forces cmake to build boost and ignores any installed boost") set(FORCE_BOOST_BUILD OFF CACHE BOOL "Forces cmake to build boost and ignores any installed boost")
if(Boost_FOUND AND NOT FORCE_BOOST_BUILD) if(Boost_FOUND AND NOT FORCE_BOOST_BUILD)

View File

@ -282,19 +282,12 @@ else()
-Woverloaded-virtual -Woverloaded-virtual
-Wshift-sign-overflow -Wshift-sign-overflow
# Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 11 # Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 11
-Wno-comment
-Wno-delete-non-virtual-dtor -Wno-delete-non-virtual-dtor
-Wno-format
-Wno-mismatched-tags
-Wno-missing-field-initializers
-Wno-sign-compare -Wno-sign-compare
-Wno-tautological-pointer-compare
-Wno-undefined-var-template -Wno-undefined-var-template
-Wno-unknown-pragmas
-Wno-unknown-warning-option -Wno-unknown-warning-option
-Wno-unused-function
-Wno-unused-local-typedef
-Wno-unused-parameter -Wno-unused-parameter
-Wno-constant-logical-operand
) )
if (USE_CCACHE) if (USE_CCACHE)
add_compile_options( add_compile_options(
@ -320,7 +313,7 @@ else()
-fvisibility=hidden -fvisibility=hidden
-Wreturn-type -Wreturn-type
-fPIC) -fPIC)
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^x86") if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^x86" AND NOT CLANG)
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wclass-memaccess>) add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wclass-memaccess>)
endif() endif()
if (GPERFTOOLS_FOUND AND GCC) if (GPERFTOOLS_FOUND AND GCC)

View File

@ -185,12 +185,6 @@ install(DIRECTORY "${script_dir}/clients/usr/lib/cmake"
DESTINATION usr/lib DESTINATION usr/lib
COMPONENT clients-versioned) COMPONENT clients-versioned)
################################################################################
# Move Docker Setup
################################################################################
file(COPY "${PROJECT_SOURCE_DIR}/packaging/docker" DESTINATION "${PROJECT_BINARY_DIR}/packages/")
################################################################################ ################################################################################
# General CPack configuration # General CPack configuration
################################################################################ ################################################################################
@ -228,7 +222,7 @@ set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients")
set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb${PROJECT_VERSION}-clients") set(CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME "foundationdb${PROJECT_VERSION}-clients")
# MacOS needs a file exiension for the LICENSE file # MacOS needs a file extension for the LICENSE file
configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY) configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY)
################################################################################ ################################################################################

View File

@ -35,7 +35,7 @@ else()
BUILD_BYPRODUCTS "${JEMALLOC_DIR}/include/jemalloc/jemalloc.h" BUILD_BYPRODUCTS "${JEMALLOC_DIR}/include/jemalloc/jemalloc.h"
"${JEMALLOC_DIR}/lib/libjemalloc.a" "${JEMALLOC_DIR}/lib/libjemalloc.a"
"${JEMALLOC_DIR}/lib/libjemalloc_pic.a" "${JEMALLOC_DIR}/lib/libjemalloc_pic.a"
CONFIGURE_COMMAND ./configure --prefix=${JEMALLOC_DIR} --enable-static --disable-cxx CONFIGURE_COMMAND ./configure --prefix=${JEMALLOC_DIR} --enable-static --disable-cxx --enable-prof
BUILD_IN_SOURCE ON BUILD_IN_SOURCE ON
BUILD_COMMAND make BUILD_COMMAND make
INSTALL_DIR "${JEMALLOC_DIR}" INSTALL_DIR "${JEMALLOC_DIR}"

View File

@ -1,3 +1,4 @@
add_subdirectory(fmt-8.0.1)
if(NOT WIN32) if(NOT WIN32)
add_subdirectory(monitoring) add_subdirectory(monitoring)
add_subdirectory(TraceLogHelper) add_subdirectory(TraceLogHelper)

View File

@ -0,0 +1 @@
DisableFormat: true

View File

@ -0,0 +1,410 @@
cmake_minimum_required(VERSION 3.1...3.18)
# Fallback for using newer policies on CMake <3.12.
if(${CMAKE_VERSION} VERSION_LESS 3.12)
cmake_policy(VERSION ${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION})
endif()
# Determine if fmt is built as a subproject (using add_subdirectory)
# or if it is the master project.
if (NOT DEFINED FMT_MASTER_PROJECT)
set(FMT_MASTER_PROJECT OFF)
if (CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
set(FMT_MASTER_PROJECT ON)
message(STATUS "CMake version: ${CMAKE_VERSION}")
endif ()
endif ()
# Joins arguments and places the results in ${result_var}.
function(join result_var)
set(result "")
foreach (arg ${ARGN})
set(result "${result}${arg}")
endforeach ()
set(${result_var} "${result}" PARENT_SCOPE)
endfunction()
function(enable_module target)
if (MSVC)
set(BMI ${CMAKE_CURRENT_BINARY_DIR}/${target}.ifc)
target_compile_options(${target}
PRIVATE /interface /ifcOutput ${BMI}
INTERFACE /reference fmt=${BMI})
endif ()
set_target_properties(${target} PROPERTIES ADDITIONAL_CLEAN_FILES ${BMI})
set_source_files_properties(${BMI} PROPERTIES GENERATED ON)
endfunction()
include(CMakeParseArguments)
# Sets a cache variable with a docstring joined from multiple arguments:
# set(<variable> <value>... CACHE <type> <docstring>...)
# This allows splitting a long docstring for readability.
function(set_verbose)
# cmake_parse_arguments is broken in CMake 3.4 (cannot parse CACHE) so use
# list instead.
list(GET ARGN 0 var)
list(REMOVE_AT ARGN 0)
list(GET ARGN 0 val)
list(REMOVE_AT ARGN 0)
list(REMOVE_AT ARGN 0)
list(GET ARGN 0 type)
list(REMOVE_AT ARGN 0)
join(doc ${ARGN})
set(${var} ${val} CACHE ${type} ${doc})
endfunction()
# Set the default CMAKE_BUILD_TYPE to Release.
# This should be done before the project command since the latter can set
# CMAKE_BUILD_TYPE itself (it does so for nmake).
if (FMT_MASTER_PROJECT AND NOT CMAKE_BUILD_TYPE)
set_verbose(CMAKE_BUILD_TYPE Release CACHE STRING
"Choose the type of build, options are: None(CMAKE_CXX_FLAGS or "
"CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel.")
endif ()
project(FMT CXX)
include(GNUInstallDirs)
set_verbose(FMT_INC_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE STRING
"Installation directory for include files, a relative path that "
"will be joined with ${CMAKE_INSTALL_PREFIX} or an absolute path.")
option(FMT_PEDANTIC "Enable extra warnings and expensive tests." OFF)
option(FMT_WERROR "Halt the compilation with an error on compiler warnings."
OFF)
# Options that control generation of various targets.
option(FMT_DOC "Generate the doc target." ${FMT_MASTER_PROJECT})
option(FMT_INSTALL "Generate the install target." ${FMT_MASTER_PROJECT})
option(FMT_TEST "Generate the test target." ${FMT_MASTER_PROJECT})
option(FMT_FUZZ "Generate the fuzz target." OFF)
option(FMT_CUDA_TEST "Generate the cuda-test target." OFF)
option(FMT_OS "Include core requiring OS (Windows/Posix) " ON)
option(FMT_MODULE "Build a module instead of a traditional library." OFF)
set(FMT_CAN_MODULE OFF)
if (CMAKE_CXX_STANDARD GREATER 17 AND
# msvc 16.10-pre4
MSVC AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 19.29.30035)
set(FMT_CAN_MODULE ON)
endif ()
if (NOT FMT_CAN_MODULE)
set(FMT_MODULE OFF)
message(STATUS "Module support is disabled.")
endif ()
if (FMT_TEST AND FMT_MODULE)
# The tests require {fmt} to be compiled as traditional library
message(STATUS "Testing is incompatible with build mode 'module'.")
endif ()
# Get version from core.h
file(READ include/fmt/core.h core_h)
if (NOT core_h MATCHES "FMT_VERSION ([0-9]+)([0-9][0-9])([0-9][0-9])")
message(FATAL_ERROR "Cannot get FMT_VERSION from core.h.")
endif ()
# Use math to skip leading zeros if any.
math(EXPR CPACK_PACKAGE_VERSION_MAJOR ${CMAKE_MATCH_1})
math(EXPR CPACK_PACKAGE_VERSION_MINOR ${CMAKE_MATCH_2})
math(EXPR CPACK_PACKAGE_VERSION_PATCH ${CMAKE_MATCH_3})
join(FMT_VERSION ${CPACK_PACKAGE_VERSION_MAJOR}.${CPACK_PACKAGE_VERSION_MINOR}.
${CPACK_PACKAGE_VERSION_PATCH})
message(STATUS "Version: ${FMT_VERSION}")
message(STATUS "Build type: ${CMAKE_BUILD_TYPE}")
if (NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
endif ()
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH}
"${CMAKE_CURRENT_SOURCE_DIR}/support/cmake")
include(cxx14)
include(CheckCXXCompilerFlag)
include(JoinPaths)
list(FIND CMAKE_CXX_COMPILE_FEATURES "cxx_variadic_templates" index)
if (${index} GREATER -1)
# Use cxx_variadic_templates instead of more appropriate cxx_std_11 for
# compatibility with older CMake versions.
set(FMT_REQUIRED_FEATURES cxx_variadic_templates)
endif ()
message(STATUS "Required features: ${FMT_REQUIRED_FEATURES}")
if (FMT_MASTER_PROJECT AND NOT DEFINED CMAKE_CXX_VISIBILITY_PRESET)
set_verbose(CMAKE_CXX_VISIBILITY_PRESET hidden CACHE STRING
"Preset for the export of private symbols")
set_property(CACHE CMAKE_CXX_VISIBILITY_PRESET PROPERTY STRINGS
hidden default)
endif ()
if (FMT_MASTER_PROJECT AND NOT DEFINED CMAKE_VISIBILITY_INLINES_HIDDEN)
set_verbose(CMAKE_VISIBILITY_INLINES_HIDDEN ON CACHE BOOL
"Whether to add a compile flag to hide symbols of inline functions")
endif ()
if (CMAKE_CXX_COMPILER_ID MATCHES "GNU")
set(PEDANTIC_COMPILE_FLAGS -pedantic-errors -Wall -Wextra -pedantic
-Wold-style-cast -Wundef
-Wredundant-decls -Wwrite-strings -Wpointer-arith
-Wcast-qual -Wformat=2 -Wmissing-include-dirs
-Wcast-align
-Wctor-dtor-privacy -Wdisabled-optimization
-Winvalid-pch -Woverloaded-virtual
-Wconversion -Wswitch-enum -Wundef
-Wno-ctor-dtor-privacy -Wno-format-nonliteral)
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.6)
set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS}
-Wno-dangling-else -Wno-unused-local-typedefs)
endif ()
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS} -Wdouble-promotion
-Wtrampolines -Wzero-as-null-pointer-constant -Wuseless-cast
-Wvector-operation-performance -Wsized-deallocation -Wshadow)
endif ()
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 6.0)
set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS} -Wshift-overflow=2
-Wnull-dereference -Wduplicated-cond)
endif ()
set(WERROR_FLAG -Werror)
endif ()
if (CMAKE_CXX_COMPILER_ID MATCHES "Clang")
set(PEDANTIC_COMPILE_FLAGS -Wall -Wextra -pedantic -Wconversion -Wundef
-Wdeprecated -Wweak-vtables -Wshadow
-Wno-gnu-zero-variadic-macro-arguments)
check_cxx_compiler_flag(-Wzero-as-null-pointer-constant HAS_NULLPTR_WARNING)
if (HAS_NULLPTR_WARNING)
set(PEDANTIC_COMPILE_FLAGS ${PEDANTIC_COMPILE_FLAGS}
-Wzero-as-null-pointer-constant)
endif ()
set(WERROR_FLAG -Werror)
endif ()
if (MSVC)
set(PEDANTIC_COMPILE_FLAGS /W3)
set(WERROR_FLAG /WX)
endif ()
if (FMT_MASTER_PROJECT AND CMAKE_GENERATOR MATCHES "Visual Studio")
# If Microsoft SDK is installed create script run-msbuild.bat that
# calls SetEnv.cmd to set up build environment and runs msbuild.
# It is useful when building Visual Studio projects with the SDK
# toolchain rather than Visual Studio.
include(FindSetEnv)
if (WINSDK_SETENV)
set(MSBUILD_SETUP "call \"${WINSDK_SETENV}\"")
endif ()
# Set FrameworkPathOverride to get rid of MSB3644 warnings.
join(netfxpath
"C:\\Program Files\\Reference Assemblies\\Microsoft\\Framework\\"
".NETFramework\\v4.0")
file(WRITE run-msbuild.bat "
${MSBUILD_SETUP}
${CMAKE_MAKE_PROGRAM} -p:FrameworkPathOverride=\"${netfxpath}\" %*")
endif ()
set(strtod_l_headers stdlib.h)
if (APPLE)
set(strtod_l_headers ${strtod_l_headers} xlocale.h)
endif ()
include(CheckSymbolExists)
if (WIN32)
check_symbol_exists(_strtod_l "${strtod_l_headers}" HAVE_STRTOD_L)
else ()
check_symbol_exists(strtod_l "${strtod_l_headers}" HAVE_STRTOD_L)
endif ()
function(add_headers VAR)
set(headers ${${VAR}})
foreach (header ${ARGN})
set(headers ${headers} include/fmt/${header})
endforeach()
set(${VAR} ${headers} PARENT_SCOPE)
endfunction()
# Define the fmt library, its includes and the needed defines.
add_headers(FMT_HEADERS args.h chrono.h color.h compile.h core.h format.h
format-inl.h locale.h os.h ostream.h printf.h ranges.h
xchar.h)
if (FMT_MODULE)
set(FMT_SOURCES src/fmt.cc)
elseif (FMT_OS)
set(FMT_SOURCES src/format.cc src/os.cc)
else()
set(FMT_SOURCES src/format.cc)
endif ()
add_library(fmt ${FMT_SOURCES} ${FMT_HEADERS})
add_library(fmt::fmt ALIAS fmt)
if (HAVE_STRTOD_L)
target_compile_definitions(fmt PUBLIC FMT_LOCALE)
endif ()
if (MINGW)
check_cxx_compiler_flag("Wa,-mbig-obj" FMT_HAS_MBIG_OBJ)
if (${FMT_HAS_MBIG_OBJ})
target_compile_options(fmt PUBLIC "-Wa,-mbig-obj")
endif()
endif ()
if (FMT_WERROR)
target_compile_options(fmt PRIVATE ${WERROR_FLAG})
endif ()
if (FMT_PEDANTIC)
target_compile_options(fmt PRIVATE ${PEDANTIC_COMPILE_FLAGS})
endif ()
if (FMT_MODULE)
enable_module(fmt)
endif ()
target_compile_features(fmt INTERFACE ${FMT_REQUIRED_FEATURES})
target_include_directories(fmt PUBLIC
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:${FMT_INC_DIR}>)
set(FMT_DEBUG_POSTFIX d CACHE STRING "Debug library postfix.")
set_target_properties(fmt PROPERTIES
VERSION ${FMT_VERSION} SOVERSION ${CPACK_PACKAGE_VERSION_MAJOR}
DEBUG_POSTFIX "${FMT_DEBUG_POSTFIX}")
# Set FMT_LIB_NAME for pkg-config fmt.pc. We cannot use the OUTPUT_NAME target
# property because it's not set by default.
set(FMT_LIB_NAME fmt)
if (CMAKE_BUILD_TYPE STREQUAL "Debug")
set(FMT_LIB_NAME ${FMT_LIB_NAME}${FMT_DEBUG_POSTFIX})
endif ()
if (BUILD_SHARED_LIBS)
if (UNIX AND NOT APPLE AND NOT ${CMAKE_SYSTEM_NAME} MATCHES "SunOS" AND
NOT EMSCRIPTEN)
# Fix rpmlint warning:
# unused-direct-shlib-dependency /usr/lib/libformat.so.1.1.0 /lib/libm.so.6.
target_link_libraries(fmt -Wl,--as-needed)
endif ()
target_compile_definitions(fmt PRIVATE FMT_EXPORT INTERFACE FMT_SHARED)
endif ()
if (FMT_SAFE_DURATION_CAST)
target_compile_definitions(fmt PUBLIC FMT_SAFE_DURATION_CAST)
endif()
add_library(fmt-header-only INTERFACE)
add_library(fmt::fmt-header-only ALIAS fmt-header-only)
target_compile_definitions(fmt-header-only INTERFACE FMT_HEADER_ONLY=1)
target_compile_features(fmt-header-only INTERFACE ${FMT_REQUIRED_FEATURES})
target_include_directories(fmt-header-only INTERFACE
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/include>
$<INSTALL_INTERFACE:${FMT_INC_DIR}>)
# Install targets.
if (FMT_INSTALL)
include(CMakePackageConfigHelpers)
set_verbose(FMT_CMAKE_DIR ${CMAKE_INSTALL_LIBDIR}/cmake/fmt CACHE STRING
"Installation directory for cmake files, a relative path that "
"will be joined with ${CMAKE_INSTALL_PREFIX} or an absolute "
"path.")
set(version_config ${PROJECT_BINARY_DIR}/fmt-config-version.cmake)
set(project_config ${PROJECT_BINARY_DIR}/fmt-config.cmake)
set(pkgconfig ${PROJECT_BINARY_DIR}/fmt.pc)
set(targets_export_name fmt-targets)
set_verbose(FMT_LIB_DIR ${CMAKE_INSTALL_LIBDIR} CACHE STRING
"Installation directory for libraries, a relative path that "
"will be joined to ${CMAKE_INSTALL_PREFIX} or an absolute path.")
set_verbose(FMT_PKGCONFIG_DIR ${CMAKE_INSTALL_LIBDIR}/pkgconfig CACHE PATH
"Installation directory for pkgconfig (.pc) files, a relative "
"path that will be joined with ${CMAKE_INSTALL_PREFIX} or an "
"absolute path.")
# Generate the version, config and target files into the build directory.
write_basic_package_version_file(
${version_config}
VERSION ${FMT_VERSION}
COMPATIBILITY AnyNewerVersion)
join_paths(libdir_for_pc_file "\${exec_prefix}" "${FMT_LIB_DIR}")
join_paths(includedir_for_pc_file "\${prefix}" "${FMT_INC_DIR}")
configure_file(
"${PROJECT_SOURCE_DIR}/support/cmake/fmt.pc.in"
"${pkgconfig}"
@ONLY)
configure_package_config_file(
${PROJECT_SOURCE_DIR}/support/cmake/fmt-config.cmake.in
${project_config}
INSTALL_DESTINATION ${FMT_CMAKE_DIR})
set(INSTALL_TARGETS fmt fmt-header-only)
# Install the library and headers.
install(TARGETS ${INSTALL_TARGETS} EXPORT ${targets_export_name}
LIBRARY DESTINATION ${FMT_LIB_DIR}
ARCHIVE DESTINATION ${FMT_LIB_DIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
# Use a namespace because CMake provides better diagnostics for namespaced
# imported targets.
export(TARGETS ${INSTALL_TARGETS} NAMESPACE fmt::
FILE ${PROJECT_BINARY_DIR}/${targets_export_name}.cmake)
# Install version, config and target files.
install(
FILES ${project_config} ${version_config}
DESTINATION ${FMT_CMAKE_DIR})
install(EXPORT ${targets_export_name} DESTINATION ${FMT_CMAKE_DIR}
NAMESPACE fmt::)
install(FILES $<TARGET_PDB_FILE:${INSTALL_TARGETS}>
DESTINATION ${FMT_LIB_DIR} OPTIONAL)
install(FILES ${FMT_HEADERS} DESTINATION "${FMT_INC_DIR}/fmt")
install(FILES "${pkgconfig}" DESTINATION "${FMT_PKGCONFIG_DIR}")
endif ()
if (FMT_DOC)
add_subdirectory(doc)
endif ()
if (FMT_TEST)
enable_testing()
add_subdirectory(test)
endif ()
# Control fuzzing independent of the unit tests.
if (FMT_FUZZ)
add_subdirectory(test/fuzzing)
# The FMT_FUZZ macro is used to prevent resource exhaustion in fuzzing
# mode and make fuzzing practically possible. It is similar to
# FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION but uses a different name to
# avoid interfering with fuzzing of projects that use {fmt}.
# See also https://llvm.org/docs/LibFuzzer.html#fuzzer-friendly-build-mode.
target_compile_definitions(fmt PUBLIC FMT_FUZZ)
endif ()
set(gitignore ${PROJECT_SOURCE_DIR}/.gitignore)
if (FMT_MASTER_PROJECT AND EXISTS ${gitignore})
# Get the list of ignored files from .gitignore.
file (STRINGS ${gitignore} lines)
list(REMOVE_ITEM lines /doc/html)
foreach (line ${lines})
string(REPLACE "." "[.]" line "${line}")
string(REPLACE "*" ".*" line "${line}")
set(ignored_files ${ignored_files} "${line}$" "${line}/")
endforeach ()
set(ignored_files ${ignored_files}
/.git /breathe /format-benchmark sphinx/ .buildinfo .doctrees)
set(CPACK_SOURCE_GENERATOR ZIP)
set(CPACK_SOURCE_IGNORE_FILES ${ignored_files})
set(CPACK_SOURCE_PACKAGE_FILE_NAME fmt-${FMT_VERSION})
set(CPACK_PACKAGE_NAME fmt)
set(CPACK_RESOURCE_FILE_README ${PROJECT_SOURCE_DIR}/README.rst)
include(CPack)
endif ()

View File

@ -0,0 +1,27 @@
Copyright (c) 2012 - present, Victor Zverovich
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--- Optional exception to the license ---
As an exception, if, as a result of your compiling your source code, portions
of this Software are embedded into a machine-executable object form of such
source code, you may redistribute such embedded portions in such object form
without including the above copyright and permission notices.

View File

@ -0,0 +1,232 @@
// Formatting library for C++ - dynamic format arguments
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#ifndef FMT_ARGS_H_
#define FMT_ARGS_H_
#include <functional> // std::reference_wrapper
#include <memory> // std::unique_ptr
#include <vector>
#include "core.h"
FMT_BEGIN_NAMESPACE
namespace detail {
template <typename T> struct is_reference_wrapper : std::false_type {};
template <typename T>
struct is_reference_wrapper<std::reference_wrapper<T>> : std::true_type {};
template <typename T> const T& unwrap(const T& v) { return v; }
template <typename T> const T& unwrap(const std::reference_wrapper<T>& v) {
return static_cast<const T&>(v);
}
class dynamic_arg_list {
// Workaround for clang's -Wweak-vtables. Unlike for regular classes, for
// templates it doesn't complain about inability to deduce single translation
// unit for placing vtable. So storage_node_base is made a fake template.
template <typename = void> struct node {
virtual ~node() = default;
std::unique_ptr<node<>> next;
};
template <typename T> struct typed_node : node<> {
T value;
template <typename Arg>
FMT_CONSTEXPR typed_node(const Arg& arg) : value(arg) {}
template <typename Char>
FMT_CONSTEXPR typed_node(const basic_string_view<Char>& arg)
: value(arg.data(), arg.size()) {}
};
std::unique_ptr<node<>> head_;
public:
template <typename T, typename Arg> const T& push(const Arg& arg) {
auto new_node = std::unique_ptr<typed_node<T>>(new typed_node<T>(arg));
auto& value = new_node->value;
new_node->next = std::move(head_);
head_ = std::move(new_node);
return value;
}
};
} // namespace detail
/**
\rst
A dynamic version of `fmt::format_arg_store`.
It's equipped with a storage to potentially temporary objects which lifetimes
could be shorter than the format arguments object.
It can be implicitly converted into `~fmt::basic_format_args` for passing
into type-erased formatting functions such as `~fmt::vformat`.
\endrst
*/
template <typename Context>
class dynamic_format_arg_store
#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409
// Workaround a GCC template argument substitution bug.
: public basic_format_args<Context>
#endif
{
private:
using char_type = typename Context::char_type;
template <typename T> struct need_copy {
static constexpr detail::type mapped_type =
detail::mapped_type_constant<T, Context>::value;
enum {
value = !(detail::is_reference_wrapper<T>::value ||
std::is_same<T, basic_string_view<char_type>>::value ||
std::is_same<T, detail::std_string_view<char_type>>::value ||
(mapped_type != detail::type::cstring_type &&
mapped_type != detail::type::string_type &&
mapped_type != detail::type::custom_type))
};
};
template <typename T>
using stored_type = conditional_t<detail::is_string<T>::value &&
!has_formatter<T, Context>::value &&
!detail::is_reference_wrapper<T>::value,
std::basic_string<char_type>, T>;
// Storage of basic_format_arg must be contiguous.
std::vector<basic_format_arg<Context>> data_;
std::vector<detail::named_arg_info<char_type>> named_info_;
// Storage of arguments not fitting into basic_format_arg must grow
// without relocation because items in data_ refer to it.
detail::dynamic_arg_list dynamic_args_;
friend class basic_format_args<Context>;
unsigned long long get_types() const {
return detail::is_unpacked_bit | data_.size() |
(named_info_.empty()
? 0ULL
: static_cast<unsigned long long>(detail::has_named_args_bit));
}
const basic_format_arg<Context>* data() const {
return named_info_.empty() ? data_.data() : data_.data() + 1;
}
template <typename T> void emplace_arg(const T& arg) {
data_.emplace_back(detail::make_arg<Context>(arg));
}
template <typename T>
void emplace_arg(const detail::named_arg<char_type, T>& arg) {
if (named_info_.empty()) {
constexpr const detail::named_arg_info<char_type>* zero_ptr{nullptr};
data_.insert(data_.begin(), {zero_ptr, 0});
}
data_.emplace_back(detail::make_arg<Context>(detail::unwrap(arg.value)));
auto pop_one = [](std::vector<basic_format_arg<Context>>* data) {
data->pop_back();
};
std::unique_ptr<std::vector<basic_format_arg<Context>>, decltype(pop_one)>
guard{&data_, pop_one};
named_info_.push_back({arg.name, static_cast<int>(data_.size() - 2u)});
data_[0].value_.named_args = {named_info_.data(), named_info_.size()};
guard.release();
}
public:
/**
\rst
Adds an argument into the dynamic store for later passing to a formatting
function.
Note that custom types and string types (but not string views) are copied
into the store dynamically allocating memory if necessary.
**Example**::
fmt::dynamic_format_arg_store<fmt::format_context> store;
store.push_back(42);
store.push_back("abc");
store.push_back(1.5f);
std::string result = fmt::vformat("{} and {} and {}", store);
\endrst
*/
template <typename T> void push_back(const T& arg) {
if (detail::const_check(need_copy<T>::value))
emplace_arg(dynamic_args_.push<stored_type<T>>(arg));
else
emplace_arg(detail::unwrap(arg));
}
/**
\rst
Adds a reference to the argument into the dynamic store for later passing to
a formatting function.
**Example**::
fmt::dynamic_format_arg_store<fmt::format_context> store;
char band[] = "Rolling Stones";
store.push_back(std::cref(band));
band[9] = 'c'; // Changing str affects the output.
std::string result = fmt::vformat("{}", store);
// result == "Rolling Scones"
\endrst
*/
template <typename T> void push_back(std::reference_wrapper<T> arg) {
static_assert(
need_copy<T>::value,
"objects of built-in types and string views are always copied");
emplace_arg(arg.get());
}
/**
Adds named argument into the dynamic store for later passing to a formatting
function. ``std::reference_wrapper`` is supported to avoid copying of the
argument. The name is always copied into the store.
*/
template <typename T>
void push_back(const detail::named_arg<char_type, T>& arg) {
const char_type* arg_name =
dynamic_args_.push<std::basic_string<char_type>>(arg.name).c_str();
if (detail::const_check(need_copy<T>::value)) {
emplace_arg(
fmt::arg(arg_name, dynamic_args_.push<stored_type<T>>(arg.value)));
} else {
emplace_arg(fmt::arg(arg_name, arg.value));
}
}
/** Erase all elements from the store */
void clear() {
data_.clear();
named_info_.clear();
dynamic_args_ = detail::dynamic_arg_list();
}
/**
\rst
Reserves space to store at least *new_cap* arguments including
*new_cap_named* named arguments.
\endrst
*/
void reserve(size_t new_cap, size_t new_cap_named) {
FMT_ASSERT(new_cap >= new_cap_named,
"Set of arguments includes set of named arguments");
data_.reserve(new_cap);
named_info_.reserve(new_cap_named);
}
};
FMT_END_NAMESPACE
#endif // FMT_ARGS_H_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,627 @@
// Formatting library for C++ - color support
//
// Copyright (c) 2018 - present, Victor Zverovich and fmt contributors
// All rights reserved.
//
// For the license information refer to format.h.
#ifndef FMT_COLOR_H_
#define FMT_COLOR_H_
#include "format.h"
// __declspec(deprecated) is broken in some MSVC versions.
#if FMT_MSC_VER
# define FMT_DEPRECATED_NONMSVC
#else
# define FMT_DEPRECATED_NONMSVC FMT_DEPRECATED
#endif
FMT_BEGIN_NAMESPACE
FMT_MODULE_EXPORT_BEGIN
enum class color : uint32_t {
alice_blue = 0xF0F8FF, // rgb(240,248,255)
antique_white = 0xFAEBD7, // rgb(250,235,215)
aqua = 0x00FFFF, // rgb(0,255,255)
aquamarine = 0x7FFFD4, // rgb(127,255,212)
azure = 0xF0FFFF, // rgb(240,255,255)
beige = 0xF5F5DC, // rgb(245,245,220)
bisque = 0xFFE4C4, // rgb(255,228,196)
black = 0x000000, // rgb(0,0,0)
blanched_almond = 0xFFEBCD, // rgb(255,235,205)
blue = 0x0000FF, // rgb(0,0,255)
blue_violet = 0x8A2BE2, // rgb(138,43,226)
brown = 0xA52A2A, // rgb(165,42,42)
burly_wood = 0xDEB887, // rgb(222,184,135)
cadet_blue = 0x5F9EA0, // rgb(95,158,160)
chartreuse = 0x7FFF00, // rgb(127,255,0)
chocolate = 0xD2691E, // rgb(210,105,30)
coral = 0xFF7F50, // rgb(255,127,80)
cornflower_blue = 0x6495ED, // rgb(100,149,237)
cornsilk = 0xFFF8DC, // rgb(255,248,220)
crimson = 0xDC143C, // rgb(220,20,60)
cyan = 0x00FFFF, // rgb(0,255,255)
dark_blue = 0x00008B, // rgb(0,0,139)
dark_cyan = 0x008B8B, // rgb(0,139,139)
dark_golden_rod = 0xB8860B, // rgb(184,134,11)
dark_gray = 0xA9A9A9, // rgb(169,169,169)
dark_green = 0x006400, // rgb(0,100,0)
dark_khaki = 0xBDB76B, // rgb(189,183,107)
dark_magenta = 0x8B008B, // rgb(139,0,139)
dark_olive_green = 0x556B2F, // rgb(85,107,47)
dark_orange = 0xFF8C00, // rgb(255,140,0)
dark_orchid = 0x9932CC, // rgb(153,50,204)
dark_red = 0x8B0000, // rgb(139,0,0)
dark_salmon = 0xE9967A, // rgb(233,150,122)
dark_sea_green = 0x8FBC8F, // rgb(143,188,143)
dark_slate_blue = 0x483D8B, // rgb(72,61,139)
dark_slate_gray = 0x2F4F4F, // rgb(47,79,79)
dark_turquoise = 0x00CED1, // rgb(0,206,209)
dark_violet = 0x9400D3, // rgb(148,0,211)
deep_pink = 0xFF1493, // rgb(255,20,147)
deep_sky_blue = 0x00BFFF, // rgb(0,191,255)
dim_gray = 0x696969, // rgb(105,105,105)
dodger_blue = 0x1E90FF, // rgb(30,144,255)
fire_brick = 0xB22222, // rgb(178,34,34)
floral_white = 0xFFFAF0, // rgb(255,250,240)
forest_green = 0x228B22, // rgb(34,139,34)
fuchsia = 0xFF00FF, // rgb(255,0,255)
gainsboro = 0xDCDCDC, // rgb(220,220,220)
ghost_white = 0xF8F8FF, // rgb(248,248,255)
gold = 0xFFD700, // rgb(255,215,0)
golden_rod = 0xDAA520, // rgb(218,165,32)
gray = 0x808080, // rgb(128,128,128)
green = 0x008000, // rgb(0,128,0)
green_yellow = 0xADFF2F, // rgb(173,255,47)
honey_dew = 0xF0FFF0, // rgb(240,255,240)
hot_pink = 0xFF69B4, // rgb(255,105,180)
indian_red = 0xCD5C5C, // rgb(205,92,92)
indigo = 0x4B0082, // rgb(75,0,130)
ivory = 0xFFFFF0, // rgb(255,255,240)
khaki = 0xF0E68C, // rgb(240,230,140)
lavender = 0xE6E6FA, // rgb(230,230,250)
lavender_blush = 0xFFF0F5, // rgb(255,240,245)
lawn_green = 0x7CFC00, // rgb(124,252,0)
lemon_chiffon = 0xFFFACD, // rgb(255,250,205)
light_blue = 0xADD8E6, // rgb(173,216,230)
light_coral = 0xF08080, // rgb(240,128,128)
light_cyan = 0xE0FFFF, // rgb(224,255,255)
light_golden_rod_yellow = 0xFAFAD2, // rgb(250,250,210)
light_gray = 0xD3D3D3, // rgb(211,211,211)
light_green = 0x90EE90, // rgb(144,238,144)
light_pink = 0xFFB6C1, // rgb(255,182,193)
light_salmon = 0xFFA07A, // rgb(255,160,122)
light_sea_green = 0x20B2AA, // rgb(32,178,170)
light_sky_blue = 0x87CEFA, // rgb(135,206,250)
light_slate_gray = 0x778899, // rgb(119,136,153)
light_steel_blue = 0xB0C4DE, // rgb(176,196,222)
light_yellow = 0xFFFFE0, // rgb(255,255,224)
lime = 0x00FF00, // rgb(0,255,0)
lime_green = 0x32CD32, // rgb(50,205,50)
linen = 0xFAF0E6, // rgb(250,240,230)
magenta = 0xFF00FF, // rgb(255,0,255)
maroon = 0x800000, // rgb(128,0,0)
medium_aquamarine = 0x66CDAA, // rgb(102,205,170)
medium_blue = 0x0000CD, // rgb(0,0,205)
medium_orchid = 0xBA55D3, // rgb(186,85,211)
medium_purple = 0x9370DB, // rgb(147,112,219)
medium_sea_green = 0x3CB371, // rgb(60,179,113)
medium_slate_blue = 0x7B68EE, // rgb(123,104,238)
medium_spring_green = 0x00FA9A, // rgb(0,250,154)
medium_turquoise = 0x48D1CC, // rgb(72,209,204)
medium_violet_red = 0xC71585, // rgb(199,21,133)
midnight_blue = 0x191970, // rgb(25,25,112)
mint_cream = 0xF5FFFA, // rgb(245,255,250)
misty_rose = 0xFFE4E1, // rgb(255,228,225)
moccasin = 0xFFE4B5, // rgb(255,228,181)
navajo_white = 0xFFDEAD, // rgb(255,222,173)
navy = 0x000080, // rgb(0,0,128)
old_lace = 0xFDF5E6, // rgb(253,245,230)
olive = 0x808000, // rgb(128,128,0)
olive_drab = 0x6B8E23, // rgb(107,142,35)
orange = 0xFFA500, // rgb(255,165,0)
orange_red = 0xFF4500, // rgb(255,69,0)
orchid = 0xDA70D6, // rgb(218,112,214)
pale_golden_rod = 0xEEE8AA, // rgb(238,232,170)
pale_green = 0x98FB98, // rgb(152,251,152)
pale_turquoise = 0xAFEEEE, // rgb(175,238,238)
pale_violet_red = 0xDB7093, // rgb(219,112,147)
papaya_whip = 0xFFEFD5, // rgb(255,239,213)
peach_puff = 0xFFDAB9, // rgb(255,218,185)
peru = 0xCD853F, // rgb(205,133,63)
pink = 0xFFC0CB, // rgb(255,192,203)
plum = 0xDDA0DD, // rgb(221,160,221)
powder_blue = 0xB0E0E6, // rgb(176,224,230)
purple = 0x800080, // rgb(128,0,128)
rebecca_purple = 0x663399, // rgb(102,51,153)
red = 0xFF0000, // rgb(255,0,0)
rosy_brown = 0xBC8F8F, // rgb(188,143,143)
royal_blue = 0x4169E1, // rgb(65,105,225)
saddle_brown = 0x8B4513, // rgb(139,69,19)
salmon = 0xFA8072, // rgb(250,128,114)
sandy_brown = 0xF4A460, // rgb(244,164,96)
sea_green = 0x2E8B57, // rgb(46,139,87)
sea_shell = 0xFFF5EE, // rgb(255,245,238)
sienna = 0xA0522D, // rgb(160,82,45)
silver = 0xC0C0C0, // rgb(192,192,192)
sky_blue = 0x87CEEB, // rgb(135,206,235)
slate_blue = 0x6A5ACD, // rgb(106,90,205)
slate_gray = 0x708090, // rgb(112,128,144)
snow = 0xFFFAFA, // rgb(255,250,250)
spring_green = 0x00FF7F, // rgb(0,255,127)
steel_blue = 0x4682B4, // rgb(70,130,180)
tan = 0xD2B48C, // rgb(210,180,140)
teal = 0x008080, // rgb(0,128,128)
thistle = 0xD8BFD8, // rgb(216,191,216)
tomato = 0xFF6347, // rgb(255,99,71)
turquoise = 0x40E0D0, // rgb(64,224,208)
violet = 0xEE82EE, // rgb(238,130,238)
wheat = 0xF5DEB3, // rgb(245,222,179)
white = 0xFFFFFF, // rgb(255,255,255)
white_smoke = 0xF5F5F5, // rgb(245,245,245)
yellow = 0xFFFF00, // rgb(255,255,0)
yellow_green = 0x9ACD32 // rgb(154,205,50)
}; // enum class color
enum class terminal_color : uint8_t {
black = 30,
red,
green,
yellow,
blue,
magenta,
cyan,
white,
bright_black = 90,
bright_red,
bright_green,
bright_yellow,
bright_blue,
bright_magenta,
bright_cyan,
bright_white
};
enum class emphasis : uint8_t {
bold = 1,
italic = 1 << 1,
underline = 1 << 2,
strikethrough = 1 << 3
};
// rgb is a struct for red, green and blue colors.
// Using the name "rgb" makes some editors show the color in a tooltip.
struct rgb {
FMT_CONSTEXPR rgb() : r(0), g(0), b(0) {}
FMT_CONSTEXPR rgb(uint8_t r_, uint8_t g_, uint8_t b_) : r(r_), g(g_), b(b_) {}
FMT_CONSTEXPR rgb(uint32_t hex)
: r((hex >> 16) & 0xFF), g((hex >> 8) & 0xFF), b(hex & 0xFF) {}
FMT_CONSTEXPR rgb(color hex)
: r((uint32_t(hex) >> 16) & 0xFF),
g((uint32_t(hex) >> 8) & 0xFF),
b(uint32_t(hex) & 0xFF) {}
uint8_t r;
uint8_t g;
uint8_t b;
};
FMT_BEGIN_DETAIL_NAMESPACE
// color is a struct of either a rgb color or a terminal color.
struct color_type {
FMT_CONSTEXPR color_type() FMT_NOEXCEPT : is_rgb(), value{} {}
FMT_CONSTEXPR color_type(color rgb_color) FMT_NOEXCEPT : is_rgb(true),
value{} {
value.rgb_color = static_cast<uint32_t>(rgb_color);
}
FMT_CONSTEXPR color_type(rgb rgb_color) FMT_NOEXCEPT : is_rgb(true), value{} {
value.rgb_color = (static_cast<uint32_t>(rgb_color.r) << 16) |
(static_cast<uint32_t>(rgb_color.g) << 8) | rgb_color.b;
}
FMT_CONSTEXPR color_type(terminal_color term_color) FMT_NOEXCEPT : is_rgb(),
value{} {
value.term_color = static_cast<uint8_t>(term_color);
}
bool is_rgb;
union color_union {
uint8_t term_color;
uint32_t rgb_color;
} value;
};
FMT_END_DETAIL_NAMESPACE
/** A text style consisting of foreground and background colors and emphasis. */
class text_style {
public:
FMT_CONSTEXPR text_style(emphasis em = emphasis()) FMT_NOEXCEPT
: set_foreground_color(),
set_background_color(),
ems(em) {}
FMT_CONSTEXPR text_style& operator|=(const text_style& rhs) {
if (!set_foreground_color) {
set_foreground_color = rhs.set_foreground_color;
foreground_color = rhs.foreground_color;
} else if (rhs.set_foreground_color) {
if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb)
FMT_THROW(format_error("can't OR a terminal color"));
foreground_color.value.rgb_color |= rhs.foreground_color.value.rgb_color;
}
if (!set_background_color) {
set_background_color = rhs.set_background_color;
background_color = rhs.background_color;
} else if (rhs.set_background_color) {
if (!background_color.is_rgb || !rhs.background_color.is_rgb)
FMT_THROW(format_error("can't OR a terminal color"));
background_color.value.rgb_color |= rhs.background_color.value.rgb_color;
}
ems = static_cast<emphasis>(static_cast<uint8_t>(ems) |
static_cast<uint8_t>(rhs.ems));
return *this;
}
friend FMT_CONSTEXPR text_style operator|(text_style lhs,
const text_style& rhs) {
return lhs |= rhs;
}
FMT_DEPRECATED_NONMSVC FMT_CONSTEXPR text_style& operator&=(
const text_style& rhs) {
return and_assign(rhs);
}
FMT_DEPRECATED_NONMSVC friend FMT_CONSTEXPR text_style
operator&(text_style lhs, const text_style& rhs) {
return lhs.and_assign(rhs);
}
FMT_CONSTEXPR bool has_foreground() const FMT_NOEXCEPT {
return set_foreground_color;
}
FMT_CONSTEXPR bool has_background() const FMT_NOEXCEPT {
return set_background_color;
}
FMT_CONSTEXPR bool has_emphasis() const FMT_NOEXCEPT {
return static_cast<uint8_t>(ems) != 0;
}
FMT_CONSTEXPR detail::color_type get_foreground() const FMT_NOEXCEPT {
FMT_ASSERT(has_foreground(), "no foreground specified for this style");
return foreground_color;
}
FMT_CONSTEXPR detail::color_type get_background() const FMT_NOEXCEPT {
FMT_ASSERT(has_background(), "no background specified for this style");
return background_color;
}
FMT_CONSTEXPR emphasis get_emphasis() const FMT_NOEXCEPT {
FMT_ASSERT(has_emphasis(), "no emphasis specified for this style");
return ems;
}
private:
FMT_CONSTEXPR text_style(bool is_foreground,
detail::color_type text_color) FMT_NOEXCEPT
: set_foreground_color(),
set_background_color(),
ems() {
if (is_foreground) {
foreground_color = text_color;
set_foreground_color = true;
} else {
background_color = text_color;
set_background_color = true;
}
}
// DEPRECATED!
FMT_CONSTEXPR text_style& and_assign(const text_style& rhs) {
if (!set_foreground_color) {
set_foreground_color = rhs.set_foreground_color;
foreground_color = rhs.foreground_color;
} else if (rhs.set_foreground_color) {
if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb)
FMT_THROW(format_error("can't AND a terminal color"));
foreground_color.value.rgb_color &= rhs.foreground_color.value.rgb_color;
}
if (!set_background_color) {
set_background_color = rhs.set_background_color;
background_color = rhs.background_color;
} else if (rhs.set_background_color) {
if (!background_color.is_rgb || !rhs.background_color.is_rgb)
FMT_THROW(format_error("can't AND a terminal color"));
background_color.value.rgb_color &= rhs.background_color.value.rgb_color;
}
ems = static_cast<emphasis>(static_cast<uint8_t>(ems) &
static_cast<uint8_t>(rhs.ems));
return *this;
}
friend FMT_CONSTEXPR_DECL text_style fg(detail::color_type foreground)
FMT_NOEXCEPT;
friend FMT_CONSTEXPR_DECL text_style bg(detail::color_type background)
FMT_NOEXCEPT;
detail::color_type foreground_color;
detail::color_type background_color;
bool set_foreground_color;
bool set_background_color;
emphasis ems;
};
/** Creates a text style from the foreground (text) color. */
FMT_CONSTEXPR inline text_style fg(detail::color_type foreground) FMT_NOEXCEPT {
return text_style(true, foreground);
}
/** Creates a text style from the background color. */
FMT_CONSTEXPR inline text_style bg(detail::color_type background) FMT_NOEXCEPT {
return text_style(false, background);
}
FMT_CONSTEXPR inline text_style operator|(emphasis lhs,
emphasis rhs) FMT_NOEXCEPT {
return text_style(lhs) | rhs;
}
FMT_BEGIN_DETAIL_NAMESPACE
template <typename Char> struct ansi_color_escape {
FMT_CONSTEXPR ansi_color_escape(detail::color_type text_color,
const char* esc) FMT_NOEXCEPT {
// If we have a terminal color, we need to output another escape code
// sequence.
if (!text_color.is_rgb) {
bool is_background = esc == string_view("\x1b[48;2;");
uint32_t value = text_color.value.term_color;
// Background ASCII codes are the same as the foreground ones but with
// 10 more.
if (is_background) value += 10u;
size_t index = 0;
buffer[index++] = static_cast<Char>('\x1b');
buffer[index++] = static_cast<Char>('[');
if (value >= 100u) {
buffer[index++] = static_cast<Char>('1');
value %= 100u;
}
buffer[index++] = static_cast<Char>('0' + value / 10u);
buffer[index++] = static_cast<Char>('0' + value % 10u);
buffer[index++] = static_cast<Char>('m');
buffer[index++] = static_cast<Char>('\0');
return;
}
for (int i = 0; i < 7; i++) {
buffer[i] = static_cast<Char>(esc[i]);
}
rgb color(text_color.value.rgb_color);
to_esc(color.r, buffer + 7, ';');
to_esc(color.g, buffer + 11, ';');
to_esc(color.b, buffer + 15, 'm');
buffer[19] = static_cast<Char>(0);
}
FMT_CONSTEXPR ansi_color_escape(emphasis em) FMT_NOEXCEPT {
uint8_t em_codes[4] = {};
uint8_t em_bits = static_cast<uint8_t>(em);
if (em_bits & static_cast<uint8_t>(emphasis::bold)) em_codes[0] = 1;
if (em_bits & static_cast<uint8_t>(emphasis::italic)) em_codes[1] = 3;
if (em_bits & static_cast<uint8_t>(emphasis::underline)) em_codes[2] = 4;
if (em_bits & static_cast<uint8_t>(emphasis::strikethrough))
em_codes[3] = 9;
size_t index = 0;
for (int i = 0; i < 4; ++i) {
if (!em_codes[i]) continue;
buffer[index++] = static_cast<Char>('\x1b');
buffer[index++] = static_cast<Char>('[');
buffer[index++] = static_cast<Char>('0' + em_codes[i]);
buffer[index++] = static_cast<Char>('m');
}
buffer[index++] = static_cast<Char>(0);
}
FMT_CONSTEXPR operator const Char*() const FMT_NOEXCEPT { return buffer; }
FMT_CONSTEXPR const Char* begin() const FMT_NOEXCEPT { return buffer; }
FMT_CONSTEXPR_CHAR_TRAITS const Char* end() const FMT_NOEXCEPT {
return buffer + std::char_traits<Char>::length(buffer);
}
private:
Char buffer[7u + 3u * 4u + 1u];
static FMT_CONSTEXPR void to_esc(uint8_t c, Char* out,
char delimiter) FMT_NOEXCEPT {
out[0] = static_cast<Char>('0' + c / 100);
out[1] = static_cast<Char>('0' + c / 10 % 10);
out[2] = static_cast<Char>('0' + c % 10);
out[3] = static_cast<Char>(delimiter);
}
};
template <typename Char>
FMT_CONSTEXPR ansi_color_escape<Char> make_foreground_color(
detail::color_type foreground) FMT_NOEXCEPT {
return ansi_color_escape<Char>(foreground, "\x1b[38;2;");
}
template <typename Char>
FMT_CONSTEXPR ansi_color_escape<Char> make_background_color(
detail::color_type background) FMT_NOEXCEPT {
return ansi_color_escape<Char>(background, "\x1b[48;2;");
}
template <typename Char>
FMT_CONSTEXPR ansi_color_escape<Char> make_emphasis(emphasis em) FMT_NOEXCEPT {
return ansi_color_escape<Char>(em);
}
template <typename Char>
inline void fputs(const Char* chars, FILE* stream) FMT_NOEXCEPT {
std::fputs(chars, stream);
}
template <>
inline void fputs<wchar_t>(const wchar_t* chars, FILE* stream) FMT_NOEXCEPT {
std::fputws(chars, stream);
}
template <typename Char> inline void reset_color(FILE* stream) FMT_NOEXCEPT {
fputs("\x1b[0m", stream);
}
template <> inline void reset_color<wchar_t>(FILE* stream) FMT_NOEXCEPT {
fputs(L"\x1b[0m", stream);
}
template <typename Char>
inline void reset_color(buffer<Char>& buffer) FMT_NOEXCEPT {
auto reset_color = string_view("\x1b[0m");
buffer.append(reset_color.begin(), reset_color.end());
}
template <typename Char>
void vformat_to(buffer<Char>& buf, const text_style& ts,
basic_string_view<Char> format_str,
basic_format_args<buffer_context<type_identity_t<Char>>> args) {
bool has_style = false;
if (ts.has_emphasis()) {
has_style = true;
auto emphasis = detail::make_emphasis<Char>(ts.get_emphasis());
buf.append(emphasis.begin(), emphasis.end());
}
if (ts.has_foreground()) {
has_style = true;
auto foreground = detail::make_foreground_color<Char>(ts.get_foreground());
buf.append(foreground.begin(), foreground.end());
}
if (ts.has_background()) {
has_style = true;
auto background = detail::make_background_color<Char>(ts.get_background());
buf.append(background.begin(), background.end());
}
detail::vformat_to(buf, format_str, args, {});
if (has_style) detail::reset_color<Char>(buf);
}
FMT_END_DETAIL_NAMESPACE
template <typename S, typename Char = char_t<S>>
void vprint(std::FILE* f, const text_style& ts, const S& format,
basic_format_args<buffer_context<type_identity_t<Char>>> args) {
basic_memory_buffer<Char> buf;
detail::vformat_to(buf, ts, to_string_view(format), args);
buf.push_back(Char(0));
detail::fputs(buf.data(), f);
}
/**
\rst
Formats a string and prints it to the specified file stream using ANSI
escape sequences to specify text formatting.
**Example**::
fmt::print(fmt::emphasis::bold | fg(fmt::color::red),
"Elapsed time: {0:.2f} seconds", 1.23);
\endrst
*/
template <typename S, typename... Args,
FMT_ENABLE_IF(detail::is_string<S>::value)>
void print(std::FILE* f, const text_style& ts, const S& format_str,
const Args&... args) {
vprint(f, ts, format_str,
fmt::make_args_checked<Args...>(format_str, args...));
}
/**
\rst
Formats a string and prints it to stdout using ANSI escape sequences to
specify text formatting.
**Example**::
fmt::print(fmt::emphasis::bold | fg(fmt::color::red),
"Elapsed time: {0:.2f} seconds", 1.23);
\endrst
*/
template <typename S, typename... Args,
FMT_ENABLE_IF(detail::is_string<S>::value)>
void print(const text_style& ts, const S& format_str, const Args&... args) {
return print(stdout, ts, format_str, args...);
}
template <typename S, typename Char = char_t<S>>
inline std::basic_string<Char> vformat(
const text_style& ts, const S& format_str,
basic_format_args<buffer_context<type_identity_t<Char>>> args) {
basic_memory_buffer<Char> buf;
detail::vformat_to(buf, ts, to_string_view(format_str), args);
return fmt::to_string(buf);
}
/**
\rst
Formats arguments and returns the result as a string using ANSI
escape sequences to specify text formatting.
**Example**::
#include <fmt/color.h>
std::string message = fmt::format(fmt::emphasis::bold | fg(fmt::color::red),
"The answer is {}", 42);
\endrst
*/
template <typename S, typename... Args, typename Char = char_t<S>>
inline std::basic_string<Char> format(const text_style& ts, const S& format_str,
const Args&... args) {
return fmt::vformat(ts, to_string_view(format_str),
fmt::make_args_checked<Args...>(format_str, args...));
}
/**
Formats a string with the given text_style and writes the output to ``out``.
*/
template <typename OutputIt, typename Char,
FMT_ENABLE_IF(detail::is_output_iterator<OutputIt, Char>::value)>
OutputIt vformat_to(
OutputIt out, const text_style& ts, basic_string_view<Char> format_str,
basic_format_args<buffer_context<type_identity_t<Char>>> args) {
auto&& buf = detail::get_buffer<Char>(out);
detail::vformat_to(buf, ts, format_str, args);
return detail::get_iterator(buf);
}
/**
\rst
Formats arguments with the given text_style, writes the result to the output
iterator ``out`` and returns the iterator past the end of the output range.
**Example**::
std::vector<char> out;
fmt::format_to(std::back_inserter(out),
fmt::emphasis::bold | fg(fmt::color::red), "{}", 42);
\endrst
*/
template <typename OutputIt, typename S, typename... Args,
bool enable = detail::is_output_iterator<OutputIt, char_t<S>>::value&&
detail::is_string<S>::value>
inline auto format_to(OutputIt out, const text_style& ts, const S& format_str,
Args&&... args) ->
typename std::enable_if<enable, OutputIt>::type {
return vformat_to(out, ts, to_string_view(format_str),
fmt::make_args_checked<Args...>(format_str, args...));
}
FMT_MODULE_EXPORT_END
FMT_END_NAMESPACE
#endif // FMT_COLOR_H_

View File

@ -0,0 +1,639 @@
// Formatting library for C++ - experimental format string compilation
//
// Copyright (c) 2012 - present, Victor Zverovich and fmt contributors
// All rights reserved.
//
// For the license information refer to format.h.
#ifndef FMT_COMPILE_H_
#define FMT_COMPILE_H_
#include "format.h"
FMT_BEGIN_NAMESPACE
namespace detail {
// An output iterator that counts the number of objects written to it and
// discards them.
class counting_iterator {
private:
size_t count_;
public:
using iterator_category = std::output_iterator_tag;
using difference_type = std::ptrdiff_t;
using pointer = void;
using reference = void;
using _Unchecked_type = counting_iterator; // Mark iterator as checked.
struct value_type {
template <typename T> void operator=(const T&) {}
};
counting_iterator() : count_(0) {}
size_t count() const { return count_; }
counting_iterator& operator++() {
++count_;
return *this;
}
counting_iterator operator++(int) {
auto it = *this;
++*this;
return it;
}
friend counting_iterator operator+(counting_iterator it, difference_type n) {
it.count_ += static_cast<size_t>(n);
return it;
}
value_type operator*() const { return {}; }
};
template <typename Char, typename InputIt>
inline counting_iterator copy_str(InputIt begin, InputIt end,
counting_iterator it) {
return it + (end - begin);
}
template <typename OutputIt> class truncating_iterator_base {
protected:
OutputIt out_;
size_t limit_;
size_t count_ = 0;
truncating_iterator_base() : out_(), limit_(0) {}
truncating_iterator_base(OutputIt out, size_t limit)
: out_(out), limit_(limit) {}
public:
using iterator_category = std::output_iterator_tag;
using value_type = typename std::iterator_traits<OutputIt>::value_type;
using difference_type = std::ptrdiff_t;
using pointer = void;
using reference = void;
using _Unchecked_type =
truncating_iterator_base; // Mark iterator as checked.
OutputIt base() const { return out_; }
size_t count() const { return count_; }
};
// An output iterator that truncates the output and counts the number of objects
// written to it.
template <typename OutputIt,
typename Enable = typename std::is_void<
typename std::iterator_traits<OutputIt>::value_type>::type>
class truncating_iterator;
template <typename OutputIt>
class truncating_iterator<OutputIt, std::false_type>
: public truncating_iterator_base<OutputIt> {
mutable typename truncating_iterator_base<OutputIt>::value_type blackhole_;
public:
using value_type = typename truncating_iterator_base<OutputIt>::value_type;
truncating_iterator() = default;
truncating_iterator(OutputIt out, size_t limit)
: truncating_iterator_base<OutputIt>(out, limit) {}
truncating_iterator& operator++() {
if (this->count_++ < this->limit_) ++this->out_;
return *this;
}
truncating_iterator operator++(int) {
auto it = *this;
++*this;
return it;
}
value_type& operator*() const {
return this->count_ < this->limit_ ? *this->out_ : blackhole_;
}
};
template <typename OutputIt>
class truncating_iterator<OutputIt, std::true_type>
: public truncating_iterator_base<OutputIt> {
public:
truncating_iterator() = default;
truncating_iterator(OutputIt out, size_t limit)
: truncating_iterator_base<OutputIt>(out, limit) {}
template <typename T> truncating_iterator& operator=(T val) {
if (this->count_++ < this->limit_) *this->out_++ = val;
return *this;
}
truncating_iterator& operator++() { return *this; }
truncating_iterator& operator++(int) { return *this; }
truncating_iterator& operator*() { return *this; }
};
// A compile-time string which is compiled into fast formatting code.
class compiled_string {};
template <typename S>
struct is_compiled_string : std::is_base_of<compiled_string, S> {};
/**
\rst
Converts a string literal *s* into a format string that will be parsed at
compile time and converted into efficient formatting code. Requires C++17
``constexpr if`` compiler support.
**Example**::
// Converts 42 into std::string using the most efficient method and no
// runtime format string processing.
std::string s = fmt::format(FMT_COMPILE("{}"), 42);
\endrst
*/
#ifdef __cpp_if_constexpr
# define FMT_COMPILE(s) \
FMT_STRING_IMPL(s, fmt::detail::compiled_string, explicit)
#else
# define FMT_COMPILE(s) FMT_STRING(s)
#endif
#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS
template <typename Char, size_t N,
fmt::detail_exported::fixed_string<Char, N> Str>
struct udl_compiled_string : compiled_string {
using char_type = Char;
constexpr operator basic_string_view<char_type>() const {
return {Str.data, N - 1};
}
};
#endif
template <typename T, typename... Tail>
const T& first(const T& value, const Tail&...) {
return value;
}
#ifdef __cpp_if_constexpr
template <typename... Args> struct type_list {};
// Returns a reference to the argument at index N from [first, rest...].
template <int N, typename T, typename... Args>
constexpr const auto& get([[maybe_unused]] const T& first,
[[maybe_unused]] const Args&... rest) {
static_assert(N < 1 + sizeof...(Args), "index is out of bounds");
if constexpr (N == 0)
return first;
else
return get<N - 1>(rest...);
}
template <typename Char, typename... Args>
constexpr int get_arg_index_by_name(basic_string_view<Char> name,
type_list<Args...>) {
return get_arg_index_by_name<Args...>(name);
}
template <int N, typename> struct get_type_impl;
template <int N, typename... Args> struct get_type_impl<N, type_list<Args...>> {
using type = remove_cvref_t<decltype(get<N>(std::declval<Args>()...))>;
};
template <int N, typename T>
using get_type = typename get_type_impl<N, T>::type;
template <typename T> struct is_compiled_format : std::false_type {};
template <typename Char> struct text {
basic_string_view<Char> data;
using char_type = Char;
template <typename OutputIt, typename... Args>
constexpr OutputIt format(OutputIt out, const Args&...) const {
return write<Char>(out, data);
}
};
template <typename Char>
struct is_compiled_format<text<Char>> : std::true_type {};
template <typename Char>
constexpr text<Char> make_text(basic_string_view<Char> s, size_t pos,
size_t size) {
return {{&s[pos], size}};
}
template <typename Char> struct code_unit {
Char value;
using char_type = Char;
template <typename OutputIt, typename... Args>
constexpr OutputIt format(OutputIt out, const Args&...) const {
return write<Char>(out, value);
}
};
// This ensures that the argument type is convertible to `const T&`.
template <typename T, int N, typename... Args>
constexpr const T& get_arg_checked(const Args&... args) {
const auto& arg = get<N>(args...);
if constexpr (detail::is_named_arg<remove_cvref_t<decltype(arg)>>()) {
return arg.value;
} else {
return arg;
}
}
template <typename Char>
struct is_compiled_format<code_unit<Char>> : std::true_type {};
// A replacement field that refers to argument N.
template <typename Char, typename T, int N> struct field {
using char_type = Char;
template <typename OutputIt, typename... Args>
constexpr OutputIt format(OutputIt out, const Args&... args) const {
return write<Char>(out, get_arg_checked<T, N>(args...));
}
};
template <typename Char, typename T, int N>
struct is_compiled_format<field<Char, T, N>> : std::true_type {};
// A replacement field that refers to argument with name.
template <typename Char> struct runtime_named_field {
using char_type = Char;
basic_string_view<Char> name;
template <typename OutputIt, typename T>
constexpr static bool try_format_argument(
OutputIt& out,
// [[maybe_unused]] due to unused-but-set-parameter warning in GCC 7,8,9
[[maybe_unused]] basic_string_view<Char> arg_name, const T& arg) {
if constexpr (is_named_arg<typename std::remove_cv<T>::type>::value) {
if (arg_name == arg.name) {
out = write<Char>(out, arg.value);
return true;
}
}
return false;
}
template <typename OutputIt, typename... Args>
constexpr OutputIt format(OutputIt out, const Args&... args) const {
bool found = (try_format_argument(out, name, args) || ...);
if (!found) {
throw format_error("argument with specified name is not found");
}
return out;
}
};
template <typename Char>
struct is_compiled_format<runtime_named_field<Char>> : std::true_type {};
// A replacement field that refers to argument N and has format specifiers.
template <typename Char, typename T, int N> struct spec_field {
using char_type = Char;
formatter<T, Char> fmt;
template <typename OutputIt, typename... Args>
constexpr FMT_INLINE OutputIt format(OutputIt out,
const Args&... args) const {
const auto& vargs =
fmt::make_format_args<basic_format_context<OutputIt, Char>>(args...);
basic_format_context<OutputIt, Char> ctx(out, vargs);
return fmt.format(get_arg_checked<T, N>(args...), ctx);
}
};
template <typename Char, typename T, int N>
struct is_compiled_format<spec_field<Char, T, N>> : std::true_type {};
template <typename L, typename R> struct concat {
L lhs;
R rhs;
using char_type = typename L::char_type;
template <typename OutputIt, typename... Args>
constexpr OutputIt format(OutputIt out, const Args&... args) const {
out = lhs.format(out, args...);
return rhs.format(out, args...);
}
};
template <typename L, typename R>
struct is_compiled_format<concat<L, R>> : std::true_type {};
template <typename L, typename R>
constexpr concat<L, R> make_concat(L lhs, R rhs) {
return {lhs, rhs};
}
struct unknown_format {};
template <typename Char>
constexpr size_t parse_text(basic_string_view<Char> str, size_t pos) {
for (size_t size = str.size(); pos != size; ++pos) {
if (str[pos] == '{' || str[pos] == '}') break;
}
return pos;
}
template <typename Args, size_t POS, int ID, typename S>
constexpr auto compile_format_string(S format_str);
template <typename Args, size_t POS, int ID, typename T, typename S>
constexpr auto parse_tail(T head, S format_str) {
if constexpr (POS !=
basic_string_view<typename S::char_type>(format_str).size()) {
constexpr auto tail = compile_format_string<Args, POS, ID>(format_str);
if constexpr (std::is_same<remove_cvref_t<decltype(tail)>,
unknown_format>())
return tail;
else
return make_concat(head, tail);
} else {
return head;
}
}
template <typename T, typename Char> struct parse_specs_result {
formatter<T, Char> fmt;
size_t end;
int next_arg_id;
};
constexpr int manual_indexing_id = -1;
template <typename T, typename Char>
constexpr parse_specs_result<T, Char> parse_specs(basic_string_view<Char> str,
size_t pos, int next_arg_id) {
str.remove_prefix(pos);
auto ctx = basic_format_parse_context<Char>(str, {}, next_arg_id);
auto f = formatter<T, Char>();
auto end = f.parse(ctx);
return {f, pos + fmt::detail::to_unsigned(end - str.data()) + 1,
next_arg_id == 0 ? manual_indexing_id : ctx.next_arg_id()};
}
template <typename Char> struct arg_id_handler {
arg_ref<Char> arg_id;
constexpr int operator()() {
FMT_ASSERT(false, "handler cannot be used with automatic indexing");
return 0;
}
constexpr int operator()(int id) {
arg_id = arg_ref<Char>(id);
return 0;
}
constexpr int operator()(basic_string_view<Char> id) {
arg_id = arg_ref<Char>(id);
return 0;
}
constexpr void on_error(const char* message) { throw format_error(message); }
};
template <typename Char> struct parse_arg_id_result {
arg_ref<Char> arg_id;
const Char* arg_id_end;
};
template <int ID, typename Char>
constexpr auto parse_arg_id(const Char* begin, const Char* end) {
auto handler = arg_id_handler<Char>{arg_ref<Char>{}};
auto arg_id_end = parse_arg_id(begin, end, handler);
return parse_arg_id_result<Char>{handler.arg_id, arg_id_end};
}
template <typename T, typename Enable = void> struct field_type {
using type = remove_cvref_t<T>;
};
template <typename T>
struct field_type<T, enable_if_t<detail::is_named_arg<T>::value>> {
using type = remove_cvref_t<decltype(T::value)>;
};
template <typename T, typename Args, size_t END_POS, int ARG_INDEX, int NEXT_ID,
typename S>
constexpr auto parse_replacement_field_then_tail(S format_str) {
using char_type = typename S::char_type;
constexpr auto str = basic_string_view<char_type>(format_str);
constexpr char_type c = END_POS != str.size() ? str[END_POS] : char_type();
if constexpr (c == '}') {
return parse_tail<Args, END_POS + 1, NEXT_ID>(
field<char_type, typename field_type<T>::type, ARG_INDEX>(),
format_str);
} else if constexpr (c == ':') {
constexpr auto result = parse_specs<typename field_type<T>::type>(
str, END_POS + 1, NEXT_ID == manual_indexing_id ? 0 : NEXT_ID);
return parse_tail<Args, result.end, result.next_arg_id>(
spec_field<char_type, typename field_type<T>::type, ARG_INDEX>{
result.fmt},
format_str);
}
}
// Compiles a non-empty format string and returns the compiled representation
// or unknown_format() on unrecognized input.
template <typename Args, size_t POS, int ID, typename S>
constexpr auto compile_format_string(S format_str) {
using char_type = typename S::char_type;
constexpr auto str = basic_string_view<char_type>(format_str);
if constexpr (str[POS] == '{') {
if constexpr (POS + 1 == str.size())
throw format_error("unmatched '{' in format string");
if constexpr (str[POS + 1] == '{') {
return parse_tail<Args, POS + 2, ID>(make_text(str, POS, 1), format_str);
} else if constexpr (str[POS + 1] == '}' || str[POS + 1] == ':') {
static_assert(ID != manual_indexing_id,
"cannot switch from manual to automatic argument indexing");
constexpr auto next_id =
ID != manual_indexing_id ? ID + 1 : manual_indexing_id;
return parse_replacement_field_then_tail<get_type<ID, Args>, Args,
POS + 1, ID, next_id>(
format_str);
} else {
constexpr auto arg_id_result =
parse_arg_id<ID>(str.data() + POS + 1, str.data() + str.size());
constexpr auto arg_id_end_pos = arg_id_result.arg_id_end - str.data();
constexpr char_type c =
arg_id_end_pos != str.size() ? str[arg_id_end_pos] : char_type();
static_assert(c == '}' || c == ':', "missing '}' in format string");
if constexpr (arg_id_result.arg_id.kind == arg_id_kind::index) {
static_assert(
ID == manual_indexing_id || ID == 0,
"cannot switch from automatic to manual argument indexing");
constexpr auto arg_index = arg_id_result.arg_id.val.index;
return parse_replacement_field_then_tail<get_type<arg_index, Args>,
Args, arg_id_end_pos,
arg_index, manual_indexing_id>(
format_str);
} else if constexpr (arg_id_result.arg_id.kind == arg_id_kind::name) {
constexpr auto arg_index =
get_arg_index_by_name(arg_id_result.arg_id.val.name, Args{});
if constexpr (arg_index != invalid_arg_index) {
constexpr auto next_id =
ID != manual_indexing_id ? ID + 1 : manual_indexing_id;
return parse_replacement_field_then_tail<
decltype(get_type<arg_index, Args>::value), Args, arg_id_end_pos,
arg_index, next_id>(format_str);
} else {
if constexpr (c == '}') {
return parse_tail<Args, arg_id_end_pos + 1, ID>(
runtime_named_field<char_type>{arg_id_result.arg_id.val.name},
format_str);
} else if constexpr (c == ':') {
return unknown_format(); // no type info for specs parsing
}
}
}
}
} else if constexpr (str[POS] == '}') {
if constexpr (POS + 1 == str.size())
throw format_error("unmatched '}' in format string");
return parse_tail<Args, POS + 2, ID>(make_text(str, POS, 1), format_str);
} else {
constexpr auto end = parse_text(str, POS + 1);
if constexpr (end - POS > 1) {
return parse_tail<Args, end, ID>(make_text(str, POS, end - POS),
format_str);
} else {
return parse_tail<Args, end, ID>(code_unit<char_type>{str[POS]},
format_str);
}
}
}
template <typename... Args, typename S,
FMT_ENABLE_IF(detail::is_compiled_string<S>::value)>
constexpr auto compile(S format_str) {
constexpr auto str = basic_string_view<typename S::char_type>(format_str);
if constexpr (str.size() == 0) {
return detail::make_text(str, 0, 0);
} else {
constexpr auto result =
detail::compile_format_string<detail::type_list<Args...>, 0, 0>(
format_str);
return result;
}
}
#endif // __cpp_if_constexpr
} // namespace detail
FMT_MODULE_EXPORT_BEGIN
#ifdef __cpp_if_constexpr
template <typename CompiledFormat, typename... Args,
typename Char = typename CompiledFormat::char_type,
FMT_ENABLE_IF(detail::is_compiled_format<CompiledFormat>::value)>
FMT_INLINE std::basic_string<Char> format(const CompiledFormat& cf,
const Args&... args) {
auto s = std::basic_string<Char>();
cf.format(std::back_inserter(s), args...);
return s;
}
template <typename OutputIt, typename CompiledFormat, typename... Args,
FMT_ENABLE_IF(detail::is_compiled_format<CompiledFormat>::value)>
constexpr FMT_INLINE OutputIt format_to(OutputIt out, const CompiledFormat& cf,
const Args&... args) {
return cf.format(out, args...);
}
template <typename S, typename... Args,
FMT_ENABLE_IF(detail::is_compiled_string<S>::value)>
FMT_INLINE std::basic_string<typename S::char_type> format(const S&,
Args&&... args) {
if constexpr (std::is_same<typename S::char_type, char>::value) {
constexpr auto str = basic_string_view<typename S::char_type>(S());
if constexpr (str.size() == 2 && str[0] == '{' && str[1] == '}') {
const auto& first = detail::first(args...);
if constexpr (detail::is_named_arg<
remove_cvref_t<decltype(first)>>::value) {
return fmt::to_string(first.value);
} else {
return fmt::to_string(first);
}
}
}
constexpr auto compiled = detail::compile<Args...>(S());
if constexpr (std::is_same<remove_cvref_t<decltype(compiled)>,
detail::unknown_format>()) {
return format(static_cast<basic_string_view<typename S::char_type>>(S()),
std::forward<Args>(args)...);
} else {
return format(compiled, std::forward<Args>(args)...);
}
}
template <typename OutputIt, typename S, typename... Args,
FMT_ENABLE_IF(detail::is_compiled_string<S>::value)>
FMT_CONSTEXPR OutputIt format_to(OutputIt out, const S&, Args&&... args) {
constexpr auto compiled = detail::compile<Args...>(S());
if constexpr (std::is_same<remove_cvref_t<decltype(compiled)>,
detail::unknown_format>()) {
return format_to(out,
static_cast<basic_string_view<typename S::char_type>>(S()),
std::forward<Args>(args)...);
} else {
return format_to(out, compiled, std::forward<Args>(args)...);
}
}
#endif
template <typename OutputIt, typename S, typename... Args,
FMT_ENABLE_IF(detail::is_compiled_string<S>::value)>
format_to_n_result<OutputIt> format_to_n(OutputIt out, size_t n,
const S& format_str, Args&&... args) {
auto it = format_to(detail::truncating_iterator<OutputIt>(out, n), format_str,
std::forward<Args>(args)...);
return {it.base(), it.count()};
}
template <typename S, typename... Args,
FMT_ENABLE_IF(detail::is_compiled_string<S>::value)>
size_t formatted_size(const S& format_str, const Args&... args) {
return format_to(detail::counting_iterator(), format_str, args...).count();
}
template <typename S, typename... Args,
FMT_ENABLE_IF(detail::is_compiled_string<S>::value)>
void print(std::FILE* f, const S& format_str, const Args&... args) {
memory_buffer buffer;
format_to(std::back_inserter(buffer), format_str, args...);
detail::print(f, {buffer.data(), buffer.size()});
}
template <typename S, typename... Args,
FMT_ENABLE_IF(detail::is_compiled_string<S>::value)>
void print(const S& format_str, const Args&... args) {
print(stdout, format_str, args...);
}
#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS
inline namespace literals {
template <detail_exported::fixed_string Str>
constexpr detail::udl_compiled_string<
remove_cvref_t<decltype(Str.data[0])>,
sizeof(Str.data) / sizeof(decltype(Str.data[0])), Str>
operator""_cf() {
return {};
}
} // namespace literals
#endif
FMT_MODULE_EXPORT_END
FMT_END_NAMESPACE
#endif // FMT_COMPILE_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,2 @@
#include "xchar.h"
#warning fmt/locale.h is deprecated, include fmt/format.h or fmt/xchar.h instead

View File

@ -0,0 +1,515 @@
// Formatting library for C++ - optional OS-specific functionality
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#ifndef FMT_OS_H_
#define FMT_OS_H_
#include <cerrno>
#include <clocale> // locale_t
#include <cstddef>
#include <cstdio>
#include <cstdlib> // strtod_l
#include <system_error> // std::system_error
#if defined __APPLE__ || defined(__FreeBSD__)
# include <xlocale.h> // for LC_NUMERIC_MASK on OS X
#endif
#include "format.h"
// UWP doesn't provide _pipe.
#if FMT_HAS_INCLUDE("winapifamily.h")
# include <winapifamily.h>
#endif
#if (FMT_HAS_INCLUDE(<fcntl.h>) || defined(__APPLE__) || \
defined(__linux__)) && \
(!defined(WINAPI_FAMILY) || (WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP))
# include <fcntl.h> // for O_RDONLY
# define FMT_USE_FCNTL 1
#else
# define FMT_USE_FCNTL 0
#endif
#ifndef FMT_POSIX
# if defined(_WIN32) && !defined(__MINGW32__)
// Fix warnings about deprecated symbols.
# define FMT_POSIX(call) _##call
# else
# define FMT_POSIX(call) call
# endif
#endif
// Calls to system functions are wrapped in FMT_SYSTEM for testability.
#ifdef FMT_SYSTEM
# define FMT_POSIX_CALL(call) FMT_SYSTEM(call)
#else
# define FMT_SYSTEM(call) ::call
# ifdef _WIN32
// Fix warnings about deprecated symbols.
# define FMT_POSIX_CALL(call) ::_##call
# else
# define FMT_POSIX_CALL(call) ::call
# endif
#endif
// Retries the expression while it evaluates to error_result and errno
// equals to EINTR.
#ifndef _WIN32
# define FMT_RETRY_VAL(result, expression, error_result) \
do { \
(result) = (expression); \
} while ((result) == (error_result) && errno == EINTR)
#else
# define FMT_RETRY_VAL(result, expression, error_result) result = (expression)
#endif
#define FMT_RETRY(result, expression) FMT_RETRY_VAL(result, expression, -1)
FMT_BEGIN_NAMESPACE
FMT_MODULE_EXPORT_BEGIN
/**
\rst
A reference to a null-terminated string. It can be constructed from a C
string or ``std::string``.
You can use one of the following type aliases for common character types:
+---------------+-----------------------------+
| Type | Definition |
+===============+=============================+
| cstring_view | basic_cstring_view<char> |
+---------------+-----------------------------+
| wcstring_view | basic_cstring_view<wchar_t> |
+---------------+-----------------------------+
This class is most useful as a parameter type to allow passing
different types of strings to a function, for example::
template <typename... Args>
std::string format(cstring_view format_str, const Args & ... args);
format("{}", 42);
format(std::string("{}"), 42);
\endrst
*/
template <typename Char> class basic_cstring_view {
private:
const Char* data_;
public:
/** Constructs a string reference object from a C string. */
basic_cstring_view(const Char* s) : data_(s) {}
/**
\rst
Constructs a string reference from an ``std::string`` object.
\endrst
*/
basic_cstring_view(const std::basic_string<Char>& s) : data_(s.c_str()) {}
/** Returns the pointer to a C string. */
const Char* c_str() const { return data_; }
};
using cstring_view = basic_cstring_view<char>;
using wcstring_view = basic_cstring_view<wchar_t>;
template <typename Char> struct formatter<std::error_code, Char> {
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return ctx.begin();
}
template <typename FormatContext>
FMT_CONSTEXPR auto format(const std::error_code& ec, FormatContext& ctx) const
-> decltype(ctx.out()) {
auto out = ctx.out();
out = detail::write_bytes(out, ec.category().name(),
basic_format_specs<Char>());
out = detail::write<Char>(out, Char(':'));
out = detail::write<Char>(out, ec.value());
return out;
}
};
#ifdef _WIN32
FMT_API const std::error_category& system_category() FMT_NOEXCEPT;
FMT_BEGIN_DETAIL_NAMESPACE
// A converter from UTF-16 to UTF-8.
// It is only provided for Windows since other systems support UTF-8 natively.
class utf16_to_utf8 {
private:
memory_buffer buffer_;
public:
utf16_to_utf8() {}
FMT_API explicit utf16_to_utf8(basic_string_view<wchar_t> s);
operator string_view() const { return string_view(&buffer_[0], size()); }
size_t size() const { return buffer_.size() - 1; }
const char* c_str() const { return &buffer_[0]; }
std::string str() const { return std::string(&buffer_[0], size()); }
// Performs conversion returning a system error code instead of
// throwing exception on conversion error. This method may still throw
// in case of memory allocation error.
FMT_API int convert(basic_string_view<wchar_t> s);
};
FMT_API void format_windows_error(buffer<char>& out, int error_code,
const char* message) FMT_NOEXCEPT;
FMT_END_DETAIL_NAMESPACE
FMT_API std::system_error vwindows_error(int error_code, string_view format_str,
format_args args);
/**
\rst
Constructs a :class:`std::system_error` object with the description
of the form
.. parsed-literal::
*<message>*: *<system-message>*
where *<message>* is the formatted message and *<system-message>* is the
system message corresponding to the error code.
*error_code* is a Windows error code as given by ``GetLastError``.
If *error_code* is not a valid error code such as -1, the system message
will look like "error -1".
**Example**::
// This throws a system_error with the description
// cannot open file 'madeup': The system cannot find the file specified.
// or similar (system message may vary).
const char *filename = "madeup";
LPOFSTRUCT of = LPOFSTRUCT();
HFILE file = OpenFile(filename, &of, OF_READ);
if (file == HFILE_ERROR) {
throw fmt::windows_error(GetLastError(),
"cannot open file '{}'", filename);
}
\endrst
*/
template <typename... Args>
std::system_error windows_error(int error_code, string_view message,
const Args&... args) {
return vwindows_error(error_code, message, fmt::make_format_args(args...));
}
// Reports a Windows error without throwing an exception.
// Can be used to report errors from destructors.
FMT_API void report_windows_error(int error_code,
const char* message) FMT_NOEXCEPT;
#else
inline const std::error_category& system_category() FMT_NOEXCEPT {
return std::system_category();
}
#endif // _WIN32
// std::system is not available on some platforms such as iOS (#2248).
#ifdef __OSX__
template <typename S, typename... Args, typename Char = char_t<S>>
void say(const S& format_str, Args&&... args) {
std::system(format("say \"{}\"", format(format_str, args...)).c_str());
}
#endif
// A buffered file.
class buffered_file {
private:
FILE* file_;
friend class file;
explicit buffered_file(FILE* f) : file_(f) {}
public:
buffered_file(const buffered_file&) = delete;
void operator=(const buffered_file&) = delete;
// Constructs a buffered_file object which doesn't represent any file.
buffered_file() FMT_NOEXCEPT : file_(nullptr) {}
// Destroys the object closing the file it represents if any.
FMT_API ~buffered_file() FMT_NOEXCEPT;
public:
buffered_file(buffered_file&& other) FMT_NOEXCEPT : file_(other.file_) {
other.file_ = nullptr;
}
buffered_file& operator=(buffered_file&& other) {
close();
file_ = other.file_;
other.file_ = nullptr;
return *this;
}
// Opens a file.
FMT_API buffered_file(cstring_view filename, cstring_view mode);
// Closes the file.
FMT_API void close();
// Returns the pointer to a FILE object representing this file.
FILE* get() const FMT_NOEXCEPT { return file_; }
// We place parentheses around fileno to workaround a bug in some versions
// of MinGW that define fileno as a macro.
FMT_API int(fileno)() const;
void vprint(string_view format_str, format_args args) {
fmt::vprint(file_, format_str, args);
}
template <typename... Args>
inline void print(string_view format_str, const Args&... args) {
vprint(format_str, fmt::make_format_args(args...));
}
};
#if FMT_USE_FCNTL
// A file. Closed file is represented by a file object with descriptor -1.
// Methods that are not declared with FMT_NOEXCEPT may throw
// fmt::system_error in case of failure. Note that some errors such as
// closing the file multiple times will cause a crash on Windows rather
// than an exception. You can get standard behavior by overriding the
// invalid parameter handler with _set_invalid_parameter_handler.
class file {
private:
int fd_; // File descriptor.
// Constructs a file object with a given descriptor.
explicit file(int fd) : fd_(fd) {}
public:
// Possible values for the oflag argument to the constructor.
enum {
RDONLY = FMT_POSIX(O_RDONLY), // Open for reading only.
WRONLY = FMT_POSIX(O_WRONLY), // Open for writing only.
RDWR = FMT_POSIX(O_RDWR), // Open for reading and writing.
CREATE = FMT_POSIX(O_CREAT), // Create if the file doesn't exist.
APPEND = FMT_POSIX(O_APPEND), // Open in append mode.
TRUNC = FMT_POSIX(O_TRUNC) // Truncate the content of the file.
};
// Constructs a file object which doesn't represent any file.
file() FMT_NOEXCEPT : fd_(-1) {}
// Opens a file and constructs a file object representing this file.
FMT_API file(cstring_view path, int oflag);
public:
file(const file&) = delete;
void operator=(const file&) = delete;
file(file&& other) FMT_NOEXCEPT : fd_(other.fd_) { other.fd_ = -1; }
// Move assignment is not noexcept because close may throw.
file& operator=(file&& other) {
close();
fd_ = other.fd_;
other.fd_ = -1;
return *this;
}
// Destroys the object closing the file it represents if any.
FMT_API ~file() FMT_NOEXCEPT;
// Returns the file descriptor.
int descriptor() const FMT_NOEXCEPT { return fd_; }
// Closes the file.
FMT_API void close();
// Returns the file size. The size has signed type for consistency with
// stat::st_size.
FMT_API long long size() const;
// Attempts to read count bytes from the file into the specified buffer.
FMT_API size_t read(void* buffer, size_t count);
// Attempts to write count bytes from the specified buffer to the file.
FMT_API size_t write(const void* buffer, size_t count);
// Duplicates a file descriptor with the dup function and returns
// the duplicate as a file object.
FMT_API static file dup(int fd);
// Makes fd be the copy of this file descriptor, closing fd first if
// necessary.
FMT_API void dup2(int fd);
// Makes fd be the copy of this file descriptor, closing fd first if
// necessary.
FMT_API void dup2(int fd, std::error_code& ec) FMT_NOEXCEPT;
// Creates a pipe setting up read_end and write_end file objects for reading
// and writing respectively.
FMT_API static void pipe(file& read_end, file& write_end);
// Creates a buffered_file object associated with this file and detaches
// this file object from the file.
FMT_API buffered_file fdopen(const char* mode);
};
// Returns the memory page size.
long getpagesize();
FMT_BEGIN_DETAIL_NAMESPACE
struct buffer_size {
buffer_size() = default;
size_t value = 0;
buffer_size operator=(size_t val) const {
auto bs = buffer_size();
bs.value = val;
return bs;
}
};
struct ostream_params {
int oflag = file::WRONLY | file::CREATE | file::TRUNC;
size_t buffer_size = BUFSIZ > 32768 ? BUFSIZ : 32768;
ostream_params() {}
template <typename... T>
ostream_params(T... params, int new_oflag) : ostream_params(params...) {
oflag = new_oflag;
}
template <typename... T>
ostream_params(T... params, detail::buffer_size bs)
: ostream_params(params...) {
this->buffer_size = bs.value;
}
};
FMT_END_DETAIL_NAMESPACE
constexpr detail::buffer_size buffer_size;
/** A fast output stream which is not thread-safe. */
class FMT_API ostream final : private detail::buffer<char> {
private:
file file_;
void flush() {
if (size() == 0) return;
file_.write(data(), size());
clear();
}
void grow(size_t) override;
ostream(cstring_view path, const detail::ostream_params& params)
: file_(path, params.oflag) {
set(new char[params.buffer_size], params.buffer_size);
}
public:
ostream(ostream&& other)
: detail::buffer<char>(other.data(), other.size(), other.capacity()),
file_(std::move(other.file_)) {
other.clear();
other.set(nullptr, 0);
}
~ostream() {
flush();
delete[] data();
}
template <typename... T>
friend ostream output_file(cstring_view path, T... params);
void close() {
flush();
file_.close();
}
/**
Formats ``args`` according to specifications in ``fmt`` and writes the
output to the file.
*/
template <typename... T> void print(format_string<T...> fmt, T&&... args) {
vformat_to(detail::buffer_appender<char>(*this), fmt,
fmt::make_format_args(args...));
}
};
/**
\rst
Opens a file for writing. Supported parameters passed in *params*:
* ``<integer>``: Flags passed to `open
<https://pubs.opengroup.org/onlinepubs/007904875/functions/open.html>`_
(``file::WRONLY | file::CREATE`` by default)
* ``buffer_size=<integer>``: Output buffer size
**Example**::
auto out = fmt::output_file("guide.txt");
out.print("Don't {}", "Panic");
\endrst
*/
template <typename... T>
inline ostream output_file(cstring_view path, T... params) {
return {path, detail::ostream_params(params...)};
}
#endif // FMT_USE_FCNTL
#ifdef FMT_LOCALE
// A "C" numeric locale.
class locale {
private:
# ifdef _WIN32
using locale_t = _locale_t;
static void freelocale(locale_t loc) { _free_locale(loc); }
static double strtod_l(const char* nptr, char** endptr, _locale_t loc) {
return _strtod_l(nptr, endptr, loc);
}
# endif
locale_t locale_;
public:
using type = locale_t;
locale(const locale&) = delete;
void operator=(const locale&) = delete;
locale() {
# ifndef _WIN32
locale_ = FMT_SYSTEM(newlocale(LC_NUMERIC_MASK, "C", nullptr));
# else
locale_ = _create_locale(LC_NUMERIC, "C");
# endif
if (!locale_) FMT_THROW(system_error(errno, "cannot create locale"));
}
~locale() { freelocale(locale_); }
type get() const { return locale_; }
// Converts string to floating-point number and advances str past the end
// of the parsed input.
double strtod(const char*& str) const {
char* end = nullptr;
double result = strtod_l(str, &end, locale_);
str = end;
return result;
}
};
using Locale FMT_DEPRECATED_ALIAS = locale;
#endif // FMT_LOCALE
FMT_MODULE_EXPORT_END
FMT_END_NAMESPACE
#endif // FMT_OS_H_

View File

@ -0,0 +1,181 @@
// Formatting library for C++ - std::ostream support
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#ifndef FMT_OSTREAM_H_
#define FMT_OSTREAM_H_
#include <ostream>
#include "format.h"
FMT_BEGIN_NAMESPACE
template <typename Char> class basic_printf_parse_context;
template <typename OutputIt, typename Char> class basic_printf_context;
namespace detail {
template <class Char> class formatbuf : public std::basic_streambuf<Char> {
private:
using int_type = typename std::basic_streambuf<Char>::int_type;
using traits_type = typename std::basic_streambuf<Char>::traits_type;
buffer<Char>& buffer_;
public:
formatbuf(buffer<Char>& buf) : buffer_(buf) {}
protected:
// The put-area is actually always empty. This makes the implementation
// simpler and has the advantage that the streambuf and the buffer are always
// in sync and sputc never writes into uninitialized memory. The obvious
// disadvantage is that each call to sputc always results in a (virtual) call
// to overflow. There is no disadvantage here for sputn since this always
// results in a call to xsputn.
int_type overflow(int_type ch = traits_type::eof()) FMT_OVERRIDE {
if (!traits_type::eq_int_type(ch, traits_type::eof()))
buffer_.push_back(static_cast<Char>(ch));
return ch;
}
std::streamsize xsputn(const Char* s, std::streamsize count) FMT_OVERRIDE {
buffer_.append(s, s + count);
return count;
}
};
struct converter {
template <typename T, FMT_ENABLE_IF(is_integral<T>::value)> converter(T);
};
template <typename Char> struct test_stream : std::basic_ostream<Char> {
private:
void_t<> operator<<(converter);
};
// Hide insertion operators for built-in types.
template <typename Char, typename Traits>
void_t<> operator<<(std::basic_ostream<Char, Traits>&, Char);
template <typename Char, typename Traits>
void_t<> operator<<(std::basic_ostream<Char, Traits>&, char);
template <typename Traits>
void_t<> operator<<(std::basic_ostream<char, Traits>&, char);
template <typename Traits>
void_t<> operator<<(std::basic_ostream<char, Traits>&, signed char);
template <typename Traits>
void_t<> operator<<(std::basic_ostream<char, Traits>&, unsigned char);
// Checks if T has a user-defined operator<< (e.g. not a member of
// std::ostream).
template <typename T, typename Char> class is_streamable {
private:
template <typename U>
static bool_constant<!std::is_same<decltype(std::declval<test_stream<Char>&>()
<< std::declval<U>()),
void_t<>>::value>
test(int);
template <typename> static std::false_type test(...);
using result = decltype(test<T>(0));
public:
is_streamable() = default;
static const bool value = result::value;
};
// Write the content of buf to os.
template <typename Char>
void write_buffer(std::basic_ostream<Char>& os, buffer<Char>& buf) {
const Char* buf_data = buf.data();
using unsigned_streamsize = std::make_unsigned<std::streamsize>::type;
unsigned_streamsize size = buf.size();
unsigned_streamsize max_size = to_unsigned(max_value<std::streamsize>());
do {
unsigned_streamsize n = size <= max_size ? size : max_size;
os.write(buf_data, static_cast<std::streamsize>(n));
buf_data += n;
size -= n;
} while (size != 0);
}
template <typename Char, typename T>
void format_value(buffer<Char>& buf, const T& value,
locale_ref loc = locale_ref()) {
formatbuf<Char> format_buf(buf);
std::basic_ostream<Char> output(&format_buf);
#if !defined(FMT_STATIC_THOUSANDS_SEPARATOR)
if (loc) output.imbue(loc.get<std::locale>());
#endif
output << value;
output.exceptions(std::ios_base::failbit | std::ios_base::badbit);
buf.try_resize(buf.size());
}
// Formats an object of type T that has an overloaded ostream operator<<.
template <typename T, typename Char>
struct fallback_formatter<T, Char, enable_if_t<is_streamable<T, Char>::value>>
: private formatter<basic_string_view<Char>, Char> {
FMT_CONSTEXPR auto parse(basic_format_parse_context<Char>& ctx)
-> decltype(ctx.begin()) {
return formatter<basic_string_view<Char>, Char>::parse(ctx);
}
template <typename ParseCtx,
FMT_ENABLE_IF(std::is_same<
ParseCtx, basic_printf_parse_context<Char>>::value)>
auto parse(ParseCtx& ctx) -> decltype(ctx.begin()) {
return ctx.begin();
}
template <typename OutputIt>
auto format(const T& value, basic_format_context<OutputIt, Char>& ctx)
-> OutputIt {
basic_memory_buffer<Char> buffer;
format_value(buffer, value, ctx.locale());
basic_string_view<Char> str(buffer.data(), buffer.size());
return formatter<basic_string_view<Char>, Char>::format(str, ctx);
}
template <typename OutputIt>
auto format(const T& value, basic_printf_context<OutputIt, Char>& ctx)
-> OutputIt {
basic_memory_buffer<Char> buffer;
format_value(buffer, value, ctx.locale());
return std::copy(buffer.begin(), buffer.end(), ctx.out());
}
};
} // namespace detail
FMT_MODULE_EXPORT
template <typename Char>
void vprint(std::basic_ostream<Char>& os, basic_string_view<Char> format_str,
basic_format_args<buffer_context<type_identity_t<Char>>> args) {
basic_memory_buffer<Char> buffer;
detail::vformat_to(buffer, format_str, args);
detail::write_buffer(os, buffer);
}
/**
\rst
Prints formatted data to the stream *os*.
**Example**::
fmt::print(cerr, "Don't {}!", "panic");
\endrst
*/
FMT_MODULE_EXPORT
template <typename S, typename... Args,
typename Char = enable_if_t<detail::is_string<S>::value, char_t<S>>>
void print(std::basic_ostream<Char>& os, const S& format_str, Args&&... args) {
vprint(os, to_string_view(format_str),
fmt::make_args_checked<Args...>(format_str, args...));
}
FMT_END_NAMESPACE
#endif // FMT_OSTREAM_H_

View File

@ -0,0 +1,652 @@
// Formatting library for C++ - legacy printf implementation
//
// Copyright (c) 2012 - 2016, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#ifndef FMT_PRINTF_H_
#define FMT_PRINTF_H_
#include <algorithm> // std::max
#include <limits> // std::numeric_limits
#include <ostream>
#include "format.h"
FMT_BEGIN_NAMESPACE
FMT_MODULE_EXPORT_BEGIN
template <typename T> struct printf_formatter { printf_formatter() = delete; };
template <typename Char>
class basic_printf_parse_context : public basic_format_parse_context<Char> {
using basic_format_parse_context<Char>::basic_format_parse_context;
};
template <typename OutputIt, typename Char> class basic_printf_context {
private:
OutputIt out_;
basic_format_args<basic_printf_context> args_;
public:
using char_type = Char;
using format_arg = basic_format_arg<basic_printf_context>;
using parse_context_type = basic_printf_parse_context<Char>;
template <typename T> using formatter_type = printf_formatter<T>;
/**
\rst
Constructs a ``printf_context`` object. References to the arguments are
stored in the context object so make sure they have appropriate lifetimes.
\endrst
*/
basic_printf_context(OutputIt out,
basic_format_args<basic_printf_context> args)
: out_(out), args_(args) {}
OutputIt out() { return out_; }
void advance_to(OutputIt it) { out_ = it; }
detail::locale_ref locale() { return {}; }
format_arg arg(int id) const { return args_.get(id); }
FMT_CONSTEXPR void on_error(const char* message) {
detail::error_handler().on_error(message);
}
};
FMT_BEGIN_DETAIL_NAMESPACE
// Checks if a value fits in int - used to avoid warnings about comparing
// signed and unsigned integers.
template <bool IsSigned> struct int_checker {
template <typename T> static bool fits_in_int(T value) {
unsigned max = max_value<int>();
return value <= max;
}
static bool fits_in_int(bool) { return true; }
};
template <> struct int_checker<true> {
template <typename T> static bool fits_in_int(T value) {
return value >= (std::numeric_limits<int>::min)() &&
value <= max_value<int>();
}
static bool fits_in_int(int) { return true; }
};
class printf_precision_handler {
public:
template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>
int operator()(T value) {
if (!int_checker<std::numeric_limits<T>::is_signed>::fits_in_int(value))
FMT_THROW(format_error("number is too big"));
return (std::max)(static_cast<int>(value), 0);
}
template <typename T, FMT_ENABLE_IF(!std::is_integral<T>::value)>
int operator()(T) {
FMT_THROW(format_error("precision is not integer"));
return 0;
}
};
// An argument visitor that returns true iff arg is a zero integer.
class is_zero_int {
public:
template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>
bool operator()(T value) {
return value == 0;
}
template <typename T, FMT_ENABLE_IF(!std::is_integral<T>::value)>
bool operator()(T) {
return false;
}
};
template <typename T> struct make_unsigned_or_bool : std::make_unsigned<T> {};
template <> struct make_unsigned_or_bool<bool> { using type = bool; };
template <typename T, typename Context> class arg_converter {
private:
using char_type = typename Context::char_type;
basic_format_arg<Context>& arg_;
char_type type_;
public:
arg_converter(basic_format_arg<Context>& arg, char_type type)
: arg_(arg), type_(type) {}
void operator()(bool value) {
if (type_ != 's') operator()<bool>(value);
}
template <typename U, FMT_ENABLE_IF(std::is_integral<U>::value)>
void operator()(U value) {
bool is_signed = type_ == 'd' || type_ == 'i';
using target_type = conditional_t<std::is_same<T, void>::value, U, T>;
if (const_check(sizeof(target_type) <= sizeof(int))) {
// Extra casts are used to silence warnings.
if (is_signed) {
arg_ = detail::make_arg<Context>(
static_cast<int>(static_cast<target_type>(value)));
} else {
using unsigned_type = typename make_unsigned_or_bool<target_type>::type;
arg_ = detail::make_arg<Context>(
static_cast<unsigned>(static_cast<unsigned_type>(value)));
}
} else {
if (is_signed) {
// glibc's printf doesn't sign extend arguments of smaller types:
// std::printf("%lld", -42); // prints "4294967254"
// but we don't have to do the same because it's a UB.
arg_ = detail::make_arg<Context>(static_cast<long long>(value));
} else {
arg_ = detail::make_arg<Context>(
static_cast<typename make_unsigned_or_bool<U>::type>(value));
}
}
}
template <typename U, FMT_ENABLE_IF(!std::is_integral<U>::value)>
void operator()(U) {} // No conversion needed for non-integral types.
};
// Converts an integer argument to T for printf, if T is an integral type.
// If T is void, the argument is converted to corresponding signed or unsigned
// type depending on the type specifier: 'd' and 'i' - signed, other -
// unsigned).
template <typename T, typename Context, typename Char>
void convert_arg(basic_format_arg<Context>& arg, Char type) {
visit_format_arg(arg_converter<T, Context>(arg, type), arg);
}
// Converts an integer argument to char for printf.
template <typename Context> class char_converter {
private:
basic_format_arg<Context>& arg_;
public:
explicit char_converter(basic_format_arg<Context>& arg) : arg_(arg) {}
template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>
void operator()(T value) {
arg_ = detail::make_arg<Context>(
static_cast<typename Context::char_type>(value));
}
template <typename T, FMT_ENABLE_IF(!std::is_integral<T>::value)>
void operator()(T) {} // No conversion needed for non-integral types.
};
// An argument visitor that return a pointer to a C string if argument is a
// string or null otherwise.
template <typename Char> struct get_cstring {
template <typename T> const Char* operator()(T) { return nullptr; }
const Char* operator()(const Char* s) { return s; }
};
// Checks if an argument is a valid printf width specifier and sets
// left alignment if it is negative.
template <typename Char> class printf_width_handler {
private:
using format_specs = basic_format_specs<Char>;
format_specs& specs_;
public:
explicit printf_width_handler(format_specs& specs) : specs_(specs) {}
template <typename T, FMT_ENABLE_IF(std::is_integral<T>::value)>
unsigned operator()(T value) {
auto width = static_cast<uint32_or_64_or_128_t<T>>(value);
if (detail::is_negative(value)) {
specs_.align = align::left;
width = 0 - width;
}
unsigned int_max = max_value<int>();
if (width > int_max) FMT_THROW(format_error("number is too big"));
return static_cast<unsigned>(width);
}
template <typename T, FMT_ENABLE_IF(!std::is_integral<T>::value)>
unsigned operator()(T) {
FMT_THROW(format_error("width is not integer"));
return 0;
}
};
// The ``printf`` argument formatter.
template <typename OutputIt, typename Char>
class printf_arg_formatter : public arg_formatter<Char> {
private:
using base = arg_formatter<Char>;
using context_type = basic_printf_context<OutputIt, Char>;
using format_specs = basic_format_specs<Char>;
context_type& context_;
OutputIt write_null_pointer(bool is_string = false) {
auto s = this->specs;
s.type = 0;
return write_bytes(this->out, is_string ? "(null)" : "(nil)", s);
}
public:
printf_arg_formatter(OutputIt iter, format_specs& s, context_type& ctx)
: base{iter, s, locale_ref()}, context_(ctx) {}
OutputIt operator()(monostate value) { return base::operator()(value); }
template <typename T, FMT_ENABLE_IF(detail::is_integral<T>::value)>
OutputIt operator()(T value) {
// MSVC2013 fails to compile separate overloads for bool and Char so use
// std::is_same instead.
if (std::is_same<T, Char>::value) {
format_specs fmt_specs = this->specs;
if (fmt_specs.type && fmt_specs.type != 'c')
return (*this)(static_cast<int>(value));
fmt_specs.sign = sign::none;
fmt_specs.alt = false;
fmt_specs.fill[0] = ' '; // Ignore '0' flag for char types.
// align::numeric needs to be overwritten here since the '0' flag is
// ignored for non-numeric types
if (fmt_specs.align == align::none || fmt_specs.align == align::numeric)
fmt_specs.align = align::right;
return write<Char>(this->out, static_cast<Char>(value), fmt_specs);
}
return base::operator()(value);
}
template <typename T, FMT_ENABLE_IF(std::is_floating_point<T>::value)>
OutputIt operator()(T value) {
return base::operator()(value);
}
/** Formats a null-terminated C string. */
OutputIt operator()(const char* value) {
if (value) return base::operator()(value);
return write_null_pointer(this->specs.type != 'p');
}
/** Formats a null-terminated wide C string. */
OutputIt operator()(const wchar_t* value) {
if (value) return base::operator()(value);
return write_null_pointer(this->specs.type != 'p');
}
OutputIt operator()(basic_string_view<Char> value) {
return base::operator()(value);
}
/** Formats a pointer. */
OutputIt operator()(const void* value) {
return value ? base::operator()(value) : write_null_pointer();
}
/** Formats an argument of a custom (user-defined) type. */
OutputIt operator()(typename basic_format_arg<context_type>::handle handle) {
auto parse_ctx =
basic_printf_parse_context<Char>(basic_string_view<Char>());
handle.format(parse_ctx, context_);
return this->out;
}
};
template <typename Char>
void parse_flags(basic_format_specs<Char>& specs, const Char*& it,
const Char* end) {
for (; it != end; ++it) {
switch (*it) {
case '-':
specs.align = align::left;
break;
case '+':
specs.sign = sign::plus;
break;
case '0':
specs.fill[0] = '0';
break;
case ' ':
if (specs.sign != sign::plus) {
specs.sign = sign::space;
}
break;
case '#':
specs.alt = true;
break;
default:
return;
}
}
}
template <typename Char, typename GetArg>
int parse_header(const Char*& it, const Char* end,
basic_format_specs<Char>& specs, GetArg get_arg) {
int arg_index = -1;
Char c = *it;
if (c >= '0' && c <= '9') {
// Parse an argument index (if followed by '$') or a width possibly
// preceded with '0' flag(s).
int value = parse_nonnegative_int(it, end, -1);
if (it != end && *it == '$') { // value is an argument index
++it;
arg_index = value != -1 ? value : max_value<int>();
} else {
if (c == '0') specs.fill[0] = '0';
if (value != 0) {
// Nonzero value means that we parsed width and don't need to
// parse it or flags again, so return now.
if (value == -1) FMT_THROW(format_error("number is too big"));
specs.width = value;
return arg_index;
}
}
}
parse_flags(specs, it, end);
// Parse width.
if (it != end) {
if (*it >= '0' && *it <= '9') {
specs.width = parse_nonnegative_int(it, end, -1);
if (specs.width == -1) FMT_THROW(format_error("number is too big"));
} else if (*it == '*') {
++it;
specs.width = static_cast<int>(visit_format_arg(
detail::printf_width_handler<Char>(specs), get_arg(-1)));
}
}
return arg_index;
}
template <typename Char, typename Context>
void vprintf(buffer<Char>& buf, basic_string_view<Char> format,
basic_format_args<Context> args) {
using OutputIt = buffer_appender<Char>;
auto out = OutputIt(buf);
auto context = basic_printf_context<OutputIt, Char>(out, args);
auto parse_ctx = basic_printf_parse_context<Char>(format);
// Returns the argument with specified index or, if arg_index is -1, the next
// argument.
auto get_arg = [&](int arg_index) {
if (arg_index < 0)
arg_index = parse_ctx.next_arg_id();
else
parse_ctx.check_arg_id(--arg_index);
return detail::get_arg(context, arg_index);
};
const Char* start = parse_ctx.begin();
const Char* end = parse_ctx.end();
auto it = start;
while (it != end) {
if (!detail::find<false, Char>(it, end, '%', it)) {
it = end; // detail::find leaves it == nullptr if it doesn't find '%'
break;
}
Char c = *it++;
if (it != end && *it == c) {
out = detail::write(
out, basic_string_view<Char>(start, detail::to_unsigned(it - start)));
start = ++it;
continue;
}
out = detail::write(out, basic_string_view<Char>(
start, detail::to_unsigned(it - 1 - start)));
basic_format_specs<Char> specs;
specs.align = align::right;
// Parse argument index, flags and width.
int arg_index = parse_header(it, end, specs, get_arg);
if (arg_index == 0) parse_ctx.on_error("argument not found");
// Parse precision.
if (it != end && *it == '.') {
++it;
c = it != end ? *it : 0;
if ('0' <= c && c <= '9') {
specs.precision = parse_nonnegative_int(it, end, 0);
} else if (c == '*') {
++it;
specs.precision = static_cast<int>(
visit_format_arg(detail::printf_precision_handler(), get_arg(-1)));
} else {
specs.precision = 0;
}
}
auto arg = get_arg(arg_index);
// For d, i, o, u, x, and X conversion specifiers, if a precision is
// specified, the '0' flag is ignored
if (specs.precision >= 0 && arg.is_integral())
specs.fill[0] =
' '; // Ignore '0' flag for non-numeric types or if '-' present.
if (specs.precision >= 0 && arg.type() == detail::type::cstring_type) {
auto str = visit_format_arg(detail::get_cstring<Char>(), arg);
auto str_end = str + specs.precision;
auto nul = std::find(str, str_end, Char());
arg = detail::make_arg<basic_printf_context<OutputIt, Char>>(
basic_string_view<Char>(
str, detail::to_unsigned(nul != str_end ? nul - str
: specs.precision)));
}
if (specs.alt && visit_format_arg(detail::is_zero_int(), arg))
specs.alt = false;
if (specs.fill[0] == '0') {
if (arg.is_arithmetic() && specs.align != align::left)
specs.align = align::numeric;
else
specs.fill[0] = ' '; // Ignore '0' flag for non-numeric types or if '-'
// flag is also present.
}
// Parse length and convert the argument to the required type.
c = it != end ? *it++ : 0;
Char t = it != end ? *it : 0;
using detail::convert_arg;
switch (c) {
case 'h':
if (t == 'h') {
++it;
t = it != end ? *it : 0;
convert_arg<signed char>(arg, t);
} else {
convert_arg<short>(arg, t);
}
break;
case 'l':
if (t == 'l') {
++it;
t = it != end ? *it : 0;
convert_arg<long long>(arg, t);
} else {
convert_arg<long>(arg, t);
}
break;
case 'j':
convert_arg<intmax_t>(arg, t);
break;
case 'z':
convert_arg<size_t>(arg, t);
break;
case 't':
convert_arg<std::ptrdiff_t>(arg, t);
break;
case 'L':
// printf produces garbage when 'L' is omitted for long double, no
// need to do the same.
break;
default:
--it;
convert_arg<void>(arg, c);
}
// Parse type.
if (it == end) FMT_THROW(format_error("invalid format string"));
specs.type = static_cast<char>(*it++);
if (arg.is_integral()) {
// Normalize type.
switch (specs.type) {
case 'i':
case 'u':
specs.type = 'd';
break;
case 'c':
visit_format_arg(
detail::char_converter<basic_printf_context<OutputIt, Char>>(arg),
arg);
break;
}
}
start = it;
// Format argument.
out = visit_format_arg(
detail::printf_arg_formatter<OutputIt, Char>(out, specs, context), arg);
}
detail::write(out, basic_string_view<Char>(start, to_unsigned(it - start)));
}
FMT_END_DETAIL_NAMESPACE
template <typename Char>
using basic_printf_context_t =
basic_printf_context<detail::buffer_appender<Char>, Char>;
using printf_context = basic_printf_context_t<char>;
using wprintf_context = basic_printf_context_t<wchar_t>;
using printf_args = basic_format_args<printf_context>;
using wprintf_args = basic_format_args<wprintf_context>;
/**
\rst
Constructs an `~fmt::format_arg_store` object that contains references to
arguments and can be implicitly converted to `~fmt::printf_args`.
\endrst
*/
template <typename... T>
inline auto make_printf_args(const T&... args)
-> format_arg_store<printf_context, T...> {
return {args...};
}
/**
\rst
Constructs an `~fmt::format_arg_store` object that contains references to
arguments and can be implicitly converted to `~fmt::wprintf_args`.
\endrst
*/
template <typename... T>
inline auto make_wprintf_args(const T&... args)
-> format_arg_store<wprintf_context, T...> {
return {args...};
}
template <typename S, typename Char = char_t<S>>
inline auto vsprintf(
const S& fmt,
basic_format_args<basic_printf_context_t<type_identity_t<Char>>> args)
-> std::basic_string<Char> {
basic_memory_buffer<Char> buffer;
vprintf(buffer, to_string_view(fmt), args);
return to_string(buffer);
}
/**
\rst
Formats arguments and returns the result as a string.
**Example**::
std::string message = fmt::sprintf("The answer is %d", 42);
\endrst
*/
template <typename S, typename... T,
typename Char = enable_if_t<detail::is_string<S>::value, char_t<S>>>
inline auto sprintf(const S& fmt, const T&... args) -> std::basic_string<Char> {
using context = basic_printf_context_t<Char>;
return vsprintf(to_string_view(fmt), fmt::make_format_args<context>(args...));
}
template <typename S, typename Char = char_t<S>>
inline auto vfprintf(
std::FILE* f, const S& fmt,
basic_format_args<basic_printf_context_t<type_identity_t<Char>>> args)
-> int {
basic_memory_buffer<Char> buffer;
vprintf(buffer, to_string_view(fmt), args);
size_t size = buffer.size();
return std::fwrite(buffer.data(), sizeof(Char), size, f) < size
? -1
: static_cast<int>(size);
}
/**
\rst
Prints formatted data to the file *f*.
**Example**::
fmt::fprintf(stderr, "Don't %s!", "panic");
\endrst
*/
template <typename S, typename... T, typename Char = char_t<S>>
inline auto fprintf(std::FILE* f, const S& fmt, const T&... args) -> int {
using context = basic_printf_context_t<Char>;
return vfprintf(f, to_string_view(fmt),
fmt::make_format_args<context>(args...));
}
template <typename S, typename Char = char_t<S>>
inline auto vprintf(
const S& fmt,
basic_format_args<basic_printf_context_t<type_identity_t<Char>>> args)
-> int {
return vfprintf(stdout, to_string_view(fmt), args);
}
/**
\rst
Prints formatted data to ``stdout``.
**Example**::
fmt::printf("Elapsed time: %.2f seconds", 1.23);
\endrst
*/
template <typename S, typename... T, FMT_ENABLE_IF(detail::is_string<S>::value)>
inline auto printf(const S& fmt, const T&... args) -> int {
return vprintf(
to_string_view(fmt),
fmt::make_format_args<basic_printf_context_t<char_t<S>>>(args...));
}
template <typename S, typename Char = char_t<S>>
FMT_DEPRECATED auto vfprintf(
std::basic_ostream<Char>& os, const S& fmt,
basic_format_args<basic_printf_context_t<type_identity_t<Char>>> args)
-> int {
basic_memory_buffer<Char> buffer;
vprintf(buffer, to_string_view(fmt), args);
os.write(buffer.data(), static_cast<std::streamsize>(buffer.size()));
return static_cast<int>(buffer.size());
}
template <typename S, typename... T, typename Char = char_t<S>>
FMT_DEPRECATED auto fprintf(std::basic_ostream<Char>& os, const S& fmt,
const T&... args) -> int {
return vfprintf(os, to_string_view(fmt),
fmt::make_format_args<basic_printf_context_t<Char>>(args...));
}
FMT_MODULE_EXPORT_END
FMT_END_NAMESPACE
#endif // FMT_PRINTF_H_

View File

@ -0,0 +1,468 @@
// Formatting library for C++ - experimental range support
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
//
// Copyright (c) 2018 - present, Remotion (Igor Schulz)
// All Rights Reserved
// {fmt} support for ranges, containers and types tuple interface.
#ifndef FMT_RANGES_H_
#define FMT_RANGES_H_
#include <initializer_list>
#include <type_traits>
#include "format.h"
FMT_BEGIN_NAMESPACE
template <typename Char, typename Enable = void> struct formatting_range {
#ifdef FMT_DEPRECATED_BRACED_RANGES
Char prefix = '{';
Char postfix = '}';
#else
Char prefix = '[';
Char postfix = ']';
#endif
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return ctx.begin();
}
};
template <typename Char, typename Enable = void> struct formatting_tuple {
Char prefix = '(';
Char postfix = ')';
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return ctx.begin();
}
};
namespace detail {
template <typename RangeT, typename OutputIterator>
OutputIterator copy(const RangeT& range, OutputIterator out) {
for (auto it = range.begin(), end = range.end(); it != end; ++it)
*out++ = *it;
return out;
}
template <typename OutputIterator>
OutputIterator copy(const char* str, OutputIterator out) {
while (*str) *out++ = *str++;
return out;
}
template <typename OutputIterator>
OutputIterator copy(char ch, OutputIterator out) {
*out++ = ch;
return out;
}
template <typename OutputIterator>
OutputIterator copy(wchar_t ch, OutputIterator out) {
*out++ = ch;
return out;
}
/// Return true value if T has std::string interface, like std::string_view.
template <typename T> class is_std_string_like {
template <typename U>
static auto check(U* p)
-> decltype((void)p->find('a'), p->length(), (void)p->data(), int());
template <typename> static void check(...);
public:
static FMT_CONSTEXPR_DECL const bool value =
is_string<T>::value || !std::is_void<decltype(check<T>(nullptr))>::value;
};
template <typename Char>
struct is_std_string_like<fmt::basic_string_view<Char>> : std::true_type {};
template <typename... Ts> struct conditional_helper {};
template <typename T, typename _ = void> struct is_range_ : std::false_type {};
#if !FMT_MSC_VER || FMT_MSC_VER > 1800
# define FMT_DECLTYPE_RETURN(val) \
->decltype(val) { return val; } \
static_assert( \
true, "") // This makes it so that a semicolon is required after the
// macro, which helps clang-format handle the formatting.
// C array overload
template <typename T, std::size_t N>
auto range_begin(const T (&arr)[N]) -> const T* {
return arr;
}
template <typename T, std::size_t N>
auto range_end(const T (&arr)[N]) -> const T* {
return arr + N;
}
template <typename T, typename Enable = void>
struct has_member_fn_begin_end_t : std::false_type {};
template <typename T>
struct has_member_fn_begin_end_t<T, void_t<decltype(std::declval<T>().begin()),
decltype(std::declval<T>().end())>>
: std::true_type {};
// Member function overload
template <typename T>
auto range_begin(T&& rng) FMT_DECLTYPE_RETURN(static_cast<T&&>(rng).begin());
template <typename T>
auto range_end(T&& rng) FMT_DECLTYPE_RETURN(static_cast<T&&>(rng).end());
// ADL overload. Only participates in overload resolution if member functions
// are not found.
template <typename T>
auto range_begin(T&& rng)
-> enable_if_t<!has_member_fn_begin_end_t<T&&>::value,
decltype(begin(static_cast<T&&>(rng)))> {
return begin(static_cast<T&&>(rng));
}
template <typename T>
auto range_end(T&& rng) -> enable_if_t<!has_member_fn_begin_end_t<T&&>::value,
decltype(end(static_cast<T&&>(rng)))> {
return end(static_cast<T&&>(rng));
}
template <typename T, typename Enable = void>
struct has_const_begin_end : std::false_type {};
template <typename T, typename Enable = void>
struct has_mutable_begin_end : std::false_type {};
template <typename T>
struct has_const_begin_end<
T, void_t<decltype(detail::range_begin(
std::declval<const remove_cvref_t<T>&>())),
decltype(detail::range_begin(
std::declval<const remove_cvref_t<T>&>()))>>
: std::true_type {};
template <typename T>
struct has_mutable_begin_end<
T, void_t<decltype(detail::range_begin(std::declval<T>())),
decltype(detail::range_begin(std::declval<T>())),
enable_if_t<std::is_copy_constructible<T>::value>>>
: std::true_type {};
template <typename T>
struct is_range_<T, void>
: std::integral_constant<bool, (has_const_begin_end<T>::value ||
has_mutable_begin_end<T>::value)> {};
template <typename T, typename Enable = void> struct range_to_view;
template <typename T>
struct range_to_view<T, enable_if_t<has_const_begin_end<T>::value>> {
struct view_t {
const T* m_range_ptr;
auto begin() const FMT_DECLTYPE_RETURN(detail::range_begin(*m_range_ptr));
auto end() const FMT_DECLTYPE_RETURN(detail::range_end(*m_range_ptr));
};
static auto view(const T& range) -> view_t { return {&range}; }
};
template <typename T>
struct range_to_view<T, enable_if_t<!has_const_begin_end<T>::value &&
has_mutable_begin_end<T>::value>> {
struct view_t {
T m_range_copy;
auto begin() FMT_DECLTYPE_RETURN(detail::range_begin(m_range_copy));
auto end() FMT_DECLTYPE_RETURN(detail::range_end(m_range_copy));
};
static auto view(const T& range) -> view_t { return {range}; }
};
# undef FMT_DECLTYPE_RETURN
#endif
/// tuple_size and tuple_element check.
template <typename T> class is_tuple_like_ {
template <typename U>
static auto check(U* p) -> decltype(std::tuple_size<U>::value, int());
template <typename> static void check(...);
public:
static FMT_CONSTEXPR_DECL const bool value =
!std::is_void<decltype(check<T>(nullptr))>::value;
};
// Check for integer_sequence
#if defined(__cpp_lib_integer_sequence) || FMT_MSC_VER >= 1900
template <typename T, T... N>
using integer_sequence = std::integer_sequence<T, N...>;
template <size_t... N> using index_sequence = std::index_sequence<N...>;
template <size_t N> using make_index_sequence = std::make_index_sequence<N>;
#else
template <typename T, T... N> struct integer_sequence {
using value_type = T;
static FMT_CONSTEXPR size_t size() { return sizeof...(N); }
};
template <size_t... N> using index_sequence = integer_sequence<size_t, N...>;
template <typename T, size_t N, T... Ns>
struct make_integer_sequence : make_integer_sequence<T, N - 1, N - 1, Ns...> {};
template <typename T, T... Ns>
struct make_integer_sequence<T, 0, Ns...> : integer_sequence<T, Ns...> {};
template <size_t N>
using make_index_sequence = make_integer_sequence<size_t, N>;
#endif
template <class Tuple, class F, size_t... Is>
void for_each(index_sequence<Is...>, Tuple&& tup, F&& f) FMT_NOEXCEPT {
using std::get;
// using free function get<I>(T) now.
const int _[] = {0, ((void)f(get<Is>(tup)), 0)...};
(void)_; // blocks warnings
}
template <class T>
FMT_CONSTEXPR make_index_sequence<std::tuple_size<T>::value> get_indexes(
T const&) {
return {};
}
template <class Tuple, class F> void for_each(Tuple&& tup, F&& f) {
const auto indexes = get_indexes(tup);
for_each(indexes, std::forward<Tuple>(tup), std::forward<F>(f));
}
template <typename Range>
using value_type =
remove_cvref_t<decltype(*detail::range_begin(std::declval<Range>()))>;
template <typename OutputIt> OutputIt write_delimiter(OutputIt out) {
*out++ = ',';
*out++ = ' ';
return out;
}
template <
typename Char, typename OutputIt, typename Arg,
FMT_ENABLE_IF(is_std_string_like<typename std::decay<Arg>::type>::value)>
OutputIt write_range_entry(OutputIt out, const Arg& v) {
*out++ = '"';
out = write<Char>(out, v);
*out++ = '"';
return out;
}
template <typename Char, typename OutputIt, typename Arg,
FMT_ENABLE_IF(std::is_same<Arg, Char>::value)>
OutputIt write_range_entry(OutputIt out, const Arg v) {
*out++ = '\'';
*out++ = v;
*out++ = '\'';
return out;
}
template <
typename Char, typename OutputIt, typename Arg,
FMT_ENABLE_IF(!is_std_string_like<typename std::decay<Arg>::type>::value &&
!std::is_same<Arg, Char>::value)>
OutputIt write_range_entry(OutputIt out, const Arg& v) {
return write<Char>(out, v);
}
} // namespace detail
template <typename T> struct is_tuple_like {
static FMT_CONSTEXPR_DECL const bool value =
detail::is_tuple_like_<T>::value && !detail::is_range_<T>::value;
};
template <typename TupleT, typename Char>
struct formatter<TupleT, Char, enable_if_t<fmt::is_tuple_like<TupleT>::value>> {
private:
// C++11 generic lambda for format()
template <typename FormatContext> struct format_each {
template <typename T> void operator()(const T& v) {
if (i > 0) out = detail::write_delimiter(out);
out = detail::write_range_entry<Char>(out, v);
++i;
}
formatting_tuple<Char>& formatting;
size_t& i;
typename std::add_lvalue_reference<
decltype(std::declval<FormatContext>().out())>::type out;
};
public:
formatting_tuple<Char> formatting;
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return formatting.parse(ctx);
}
template <typename FormatContext = format_context>
auto format(const TupleT& values, FormatContext& ctx) -> decltype(ctx.out()) {
auto out = ctx.out();
size_t i = 0;
detail::copy(formatting.prefix, out);
detail::for_each(values, format_each<FormatContext>{formatting, i, out});
detail::copy(formatting.postfix, out);
return ctx.out();
}
};
template <typename T, typename Char> struct is_range {
static FMT_CONSTEXPR_DECL const bool value =
detail::is_range_<T>::value && !detail::is_std_string_like<T>::value &&
!std::is_convertible<T, std::basic_string<Char>>::value &&
!std::is_constructible<detail::std_string_view<Char>, T>::value;
};
template <typename T, typename Char>
struct formatter<
T, Char,
enable_if_t<
fmt::is_range<T, Char>::value
// Workaround a bug in MSVC 2017 and earlier.
#if !FMT_MSC_VER || FMT_MSC_VER >= 1927
&& (has_formatter<detail::value_type<T>, format_context>::value ||
detail::has_fallback_formatter<detail::value_type<T>, Char>::value)
#endif
>> {
formatting_range<Char> formatting;
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return formatting.parse(ctx);
}
template <typename FormatContext>
typename FormatContext::iterator format(const T& values, FormatContext& ctx) {
auto out = detail::copy(formatting.prefix, ctx.out());
size_t i = 0;
auto view = detail::range_to_view<T>::view(values);
auto it = view.begin();
auto end = view.end();
for (; it != end; ++it) {
if (i > 0) out = detail::write_delimiter(out);
out = detail::write_range_entry<Char>(out, *it);
++i;
}
return detail::copy(formatting.postfix, out);
}
};
template <typename Char, typename... T> struct tuple_join_view : detail::view {
const std::tuple<T...>& tuple;
basic_string_view<Char> sep;
tuple_join_view(const std::tuple<T...>& t, basic_string_view<Char> s)
: tuple(t), sep{s} {}
};
template <typename Char, typename... T>
using tuple_arg_join = tuple_join_view<Char, T...>;
template <typename Char, typename... T>
struct formatter<tuple_join_view<Char, T...>, Char> {
template <typename ParseContext>
FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const tuple_join_view<Char, T...>& value, FormatContext& ctx) ->
typename FormatContext::iterator {
return format(value, ctx, detail::make_index_sequence<sizeof...(T)>{});
}
private:
template <typename FormatContext, size_t... N>
auto format(const tuple_join_view<Char, T...>& value, FormatContext& ctx,
detail::index_sequence<N...>) ->
typename FormatContext::iterator {
using std::get;
return format_args(value, ctx, get<N>(value.tuple)...);
}
template <typename FormatContext>
auto format_args(const tuple_join_view<Char, T...>&, FormatContext& ctx) ->
typename FormatContext::iterator {
// NOTE: for compilers that support C++17, this empty function instantiation
// can be replaced with a constexpr branch in the variadic overload.
return ctx.out();
}
template <typename FormatContext, typename Arg, typename... Args>
auto format_args(const tuple_join_view<Char, T...>& value, FormatContext& ctx,
const Arg& arg, const Args&... args) ->
typename FormatContext::iterator {
using base = formatter<typename std::decay<Arg>::type, Char>;
auto out = base().format(arg, ctx);
if (sizeof...(Args) > 0) {
out = std::copy(value.sep.begin(), value.sep.end(), out);
ctx.advance_to(out);
return format_args(value, ctx, args...);
}
return out;
}
};
FMT_MODULE_EXPORT_BEGIN
/**
\rst
Returns an object that formats `tuple` with elements separated by `sep`.
**Example**::
std::tuple<int, char> t = {1, 'a'};
fmt::print("{}", fmt::join(t, ", "));
// Output: "1, a"
\endrst
*/
template <typename... T>
FMT_CONSTEXPR auto join(const std::tuple<T...>& tuple, string_view sep)
-> tuple_join_view<char, T...> {
return {tuple, sep};
}
template <typename... T>
FMT_CONSTEXPR auto join(const std::tuple<T...>& tuple,
basic_string_view<wchar_t> sep)
-> tuple_join_view<wchar_t, T...> {
return {tuple, sep};
}
/**
\rst
Returns an object that formats `initializer_list` with elements separated by
`sep`.
**Example**::
fmt::print("{}", fmt::join({1, 2, 3}, ", "));
// Output: "1, 2, 3"
\endrst
*/
template <typename T>
auto join(std::initializer_list<T> list, string_view sep)
-> join_view<const T*, const T*> {
return join(std::begin(list), std::end(list), sep);
}
FMT_MODULE_EXPORT_END
FMT_END_NAMESPACE
#endif // FMT_RANGES_H_

View File

@ -0,0 +1,236 @@
// Formatting library for C++ - optional wchar_t and exotic character support
//
// Copyright (c) 2012 - present, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#ifndef FMT_WCHAR_H_
#define FMT_WCHAR_H_
#include <cwchar>
#include <tuple>
#include "format.h"
FMT_BEGIN_NAMESPACE
namespace detail {
template <typename T>
using is_exotic_char = bool_constant<!std::is_same<T, char>::value>;
}
FMT_MODULE_EXPORT_BEGIN
using wstring_view = basic_string_view<wchar_t>;
using wformat_parse_context = basic_format_parse_context<wchar_t>;
using wformat_context = buffer_context<wchar_t>;
using wformat_args = basic_format_args<wformat_context>;
using wmemory_buffer = basic_memory_buffer<wchar_t>;
#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409
// Workaround broken conversion on older gcc.
template <typename... Args> using wformat_string = wstring_view;
#else
template <typename... Args>
using wformat_string = basic_format_string<wchar_t, type_identity_t<Args>...>;
#endif
template <> struct is_char<wchar_t> : std::true_type {};
template <> struct is_char<detail::char8_type> : std::true_type {};
template <> struct is_char<char16_t> : std::true_type {};
template <> struct is_char<char32_t> : std::true_type {};
template <typename... Args>
constexpr format_arg_store<wformat_context, Args...> make_wformat_args(
const Args&... args) {
return {args...};
}
inline namespace literals {
constexpr auto operator"" _format(const wchar_t* s, size_t n)
-> detail::udl_formatter<wchar_t> {
return {{s, n}};
}
#if FMT_USE_USER_DEFINED_LITERALS && !FMT_USE_NONTYPE_TEMPLATE_PARAMETERS
constexpr detail::udl_arg<wchar_t> operator"" _a(const wchar_t* s, size_t) {
return {s};
}
#endif
} // namespace literals
template <typename It, typename Sentinel>
auto join(It begin, Sentinel end, wstring_view sep)
-> join_view<It, Sentinel, wchar_t> {
return {begin, end, sep};
}
template <typename Range>
auto join(Range&& range, wstring_view sep)
-> join_view<detail::iterator_t<Range>, detail::sentinel_t<Range>,
wchar_t> {
return join(std::begin(range), std::end(range), sep);
}
template <typename T>
auto join(std::initializer_list<T> list, wstring_view sep)
-> join_view<const T*, const T*, wchar_t> {
return join(std::begin(list), std::end(list), sep);
}
template <typename Char, FMT_ENABLE_IF(!std::is_same<Char, char>::value)>
auto vformat(basic_string_view<Char> format_str,
basic_format_args<buffer_context<type_identity_t<Char>>> args)
-> std::basic_string<Char> {
basic_memory_buffer<Char> buffer;
detail::vformat_to(buffer, format_str, args);
return to_string(buffer);
}
// Pass char_t as a default template parameter instead of using
// std::basic_string<char_t<S>> to reduce the symbol size.
template <typename S, typename... Args, typename Char = char_t<S>,
FMT_ENABLE_IF(!std::is_same<Char, char>::value)>
auto format(const S& format_str, Args&&... args) -> std::basic_string<Char> {
const auto& vargs = fmt::make_args_checked<Args...>(format_str, args...);
return vformat(to_string_view(format_str), vargs);
}
template <typename Locale, typename S, typename Char = char_t<S>,
FMT_ENABLE_IF(detail::is_locale<Locale>::value&&
detail::is_exotic_char<Char>::value)>
inline auto vformat(
const Locale& loc, const S& format_str,
basic_format_args<buffer_context<type_identity_t<Char>>> args)
-> std::basic_string<Char> {
return detail::vformat(loc, to_string_view(format_str), args);
}
template <typename Locale, typename S, typename... Args,
typename Char = char_t<S>,
FMT_ENABLE_IF(detail::is_locale<Locale>::value&&
detail::is_exotic_char<Char>::value)>
inline auto format(const Locale& loc, const S& format_str, Args&&... args)
-> std::basic_string<Char> {
return detail::vformat(loc, to_string_view(format_str),
fmt::make_args_checked<Args...>(format_str, args...));
}
template <typename OutputIt, typename S, typename Char = char_t<S>,
FMT_ENABLE_IF(detail::is_output_iterator<OutputIt, Char>::value&&
detail::is_exotic_char<Char>::value)>
auto vformat_to(OutputIt out, const S& format_str,
basic_format_args<buffer_context<type_identity_t<Char>>> args)
-> OutputIt {
auto&& buf = detail::get_buffer<Char>(out);
detail::vformat_to(buf, to_string_view(format_str), args);
return detail::get_iterator(buf);
}
template <typename OutputIt, typename S, typename... Args,
typename Char = char_t<S>,
FMT_ENABLE_IF(detail::is_output_iterator<OutputIt, Char>::value&&
detail::is_exotic_char<Char>::value)>
inline auto format_to(OutputIt out, const S& fmt, Args&&... args) -> OutputIt {
const auto& vargs = fmt::make_args_checked<Args...>(fmt, args...);
return vformat_to(out, to_string_view(fmt), vargs);
}
template <typename S, typename... Args, typename Char, size_t SIZE,
typename Allocator, FMT_ENABLE_IF(detail::is_string<S>::value)>
FMT_DEPRECATED auto format_to(basic_memory_buffer<Char, SIZE, Allocator>& buf,
const S& format_str, Args&&... args) ->
typename buffer_context<Char>::iterator {
const auto& vargs = fmt::make_args_checked<Args...>(format_str, args...);
detail::vformat_to(buf, to_string_view(format_str), vargs, {});
return detail::buffer_appender<Char>(buf);
}
template <typename Locale, typename S, typename OutputIt, typename... Args,
typename Char = char_t<S>,
FMT_ENABLE_IF(detail::is_output_iterator<OutputIt, Char>::value&&
detail::is_locale<Locale>::value&&
detail::is_exotic_char<Char>::value)>
inline auto vformat_to(
OutputIt out, const Locale& loc, const S& format_str,
basic_format_args<buffer_context<type_identity_t<Char>>> args) -> OutputIt {
auto&& buf = detail::get_buffer<Char>(out);
vformat_to(buf, to_string_view(format_str), args, detail::locale_ref(loc));
return detail::get_iterator(buf);
}
template <
typename OutputIt, typename Locale, typename S, typename... Args,
typename Char = char_t<S>,
bool enable = detail::is_output_iterator<OutputIt, Char>::value&&
detail::is_locale<Locale>::value&& detail::is_exotic_char<Char>::value>
inline auto format_to(OutputIt out, const Locale& loc, const S& format_str,
Args&&... args) ->
typename std::enable_if<enable, OutputIt>::type {
const auto& vargs = fmt::make_args_checked<Args...>(format_str, args...);
return vformat_to(out, loc, to_string_view(format_str), vargs);
}
template <typename OutputIt, typename Char, typename... Args,
FMT_ENABLE_IF(detail::is_output_iterator<OutputIt, Char>::value&&
detail::is_exotic_char<Char>::value)>
inline auto vformat_to_n(
OutputIt out, size_t n, basic_string_view<Char> format_str,
basic_format_args<buffer_context<type_identity_t<Char>>> args)
-> format_to_n_result<OutputIt> {
detail::iterator_buffer<OutputIt, Char, detail::fixed_buffer_traits> buf(out,
n);
detail::vformat_to(buf, format_str, args);
return {buf.out(), buf.count()};
}
template <typename OutputIt, typename S, typename... Args,
typename Char = char_t<S>,
FMT_ENABLE_IF(detail::is_output_iterator<OutputIt, Char>::value&&
detail::is_exotic_char<Char>::value)>
inline auto format_to_n(OutputIt out, size_t n, const S& fmt,
const Args&... args) -> format_to_n_result<OutputIt> {
const auto& vargs = fmt::make_args_checked<Args...>(fmt, args...);
return vformat_to_n(out, n, to_string_view(fmt), vargs);
}
template <typename S, typename... Args, typename Char = char_t<S>,
FMT_ENABLE_IF(detail::is_exotic_char<Char>::value)>
inline auto formatted_size(const S& fmt, Args&&... args) -> size_t {
detail::counting_buffer<Char> buf;
const auto& vargs = fmt::make_args_checked<Args...>(fmt, args...);
detail::vformat_to(buf, to_string_view(fmt), vargs);
return buf.count();
}
inline void vprint(std::FILE* f, wstring_view fmt, wformat_args args) {
wmemory_buffer buffer;
detail::vformat_to(buffer, fmt, args);
buffer.push_back(L'\0');
if (std::fputws(buffer.data(), f) == -1)
FMT_THROW(system_error(errno, FMT_STRING("cannot write to file")));
}
inline void vprint(wstring_view fmt, wformat_args args) {
vprint(stdout, fmt, args);
}
template <typename... T>
void print(std::FILE* f, wformat_string<T...> fmt, T&&... args) {
return vprint(f, wstring_view(fmt), make_wformat_args(args...));
}
template <typename... T> void print(wformat_string<T...> fmt, T&&... args) {
return vprint(wstring_view(fmt), make_wformat_args(args...));
}
/**
Converts *value* to ``std::wstring`` using the default format for type *T*.
*/
template <typename T> inline auto to_wstring(const T& value) -> std::wstring {
return format(FMT_STRING(L"{}"), value);
}
FMT_MODULE_EXPORT_END
FMT_END_NAMESPACE
#endif // FMT_WCHAR_H_

View File

@ -0,0 +1,100 @@
module;
#ifndef __cpp_modules
# error Module not supported.
#endif
// put all implementation-provided headers into the global module fragment
// to prevent attachment to this module
#if !defined(_CRT_SECURE_NO_WARNINGS) && defined(_MSC_VER)
# define _CRT_SECURE_NO_WARNINGS
#endif
#if !defined(WIN32_LEAN_AND_MEAN) && defined(_WIN32)
# define WIN32_LEAN_AND_MEAN
#endif
#include <algorithm>
#include <cctype>
#include <cerrno>
#include <chrono>
#include <climits>
#include <clocale>
#include <cmath>
#include <cstdarg>
#include <cstddef>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <cwchar>
#include <exception>
#include <functional>
#include <iterator>
#include <limits>
#include <locale>
#include <memory>
#include <ostream>
#include <sstream>
#include <stdexcept>
#include <string>
#include <string_view>
#include <system_error>
#include <type_traits>
#include <utility>
#include <vector>
#if _MSC_VER
# include <intrin.h>
#endif
#if defined __APPLE__ || defined(__FreeBSD__)
# include <xlocale.h>
#endif
#if __has_include(<winapifamily.h>)
# include <winapifamily.h>
#endif
#if (__has_include(<fcntl.h>) || defined(__APPLE__) || \
defined(__linux__)) && \
(!defined(WINAPI_FAMILY) || (WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP))
# include <fcntl.h>
# include <sys/stat.h>
# include <sys/types.h>
# ifndef _WIN32
# include <unistd.h>
# else
# include <io.h>
# endif
#endif
#ifdef _WIN32
# include <windows.h>
#endif
export module fmt;
#define FMT_MODULE_EXPORT export
#define FMT_MODULE_EXPORT_BEGIN export {
#define FMT_MODULE_EXPORT_END }
#define FMT_BEGIN_DETAIL_NAMESPACE \
} \
namespace detail {
#define FMT_END_DETAIL_NAMESPACE \
} \
export {
// all library-provided declarations and definitions
// must be in the module purview to be exported
#include "fmt/args.h"
#include "fmt/chrono.h"
#include "fmt/color.h"
#include "fmt/compile.h"
#include "fmt/format.h"
#include "fmt/os.h"
#include "fmt/printf.h"
#include "fmt/xchar.h"
// gcc doesn't yet implement private module fragments
#if !FMT_GCC_VERSION
module : private;
#endif
#include "format.cc"
#include "os.cc"

View File

@ -0,0 +1,78 @@
// Formatting library for C++
//
// Copyright (c) 2012 - 2016, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
#include "fmt/format-inl.h"
FMT_BEGIN_NAMESPACE
namespace detail {
template <typename T>
int format_float(char* buf, std::size_t size, const char* format, int precision,
T value) {
#ifdef FMT_FUZZ
if (precision > 100000)
throw std::runtime_error(
"fuzz mode - avoid large allocation inside snprintf");
#endif
// Suppress the warning about nonliteral format string.
int (*snprintf_ptr)(char*, size_t, const char*, ...) = FMT_SNPRINTF;
return precision < 0 ? snprintf_ptr(buf, size, format, value)
: snprintf_ptr(buf, size, format, precision, value);
}
template FMT_API dragonbox::decimal_fp<float> dragonbox::to_decimal(float x)
FMT_NOEXCEPT;
template FMT_API dragonbox::decimal_fp<double> dragonbox::to_decimal(double x)
FMT_NOEXCEPT;
} // namespace detail
// Workaround a bug in MSVC2013 that prevents instantiation of format_float.
int (*instantiate_format_float)(double, int, detail::float_specs,
detail::buffer<char>&) = detail::format_float;
#ifndef FMT_STATIC_THOUSANDS_SEPARATOR
template FMT_API detail::locale_ref::locale_ref(const std::locale& loc);
template FMT_API std::locale detail::locale_ref::get<std::locale>() const;
#endif
// Explicit instantiations for char.
template FMT_API auto detail::thousands_sep_impl(locale_ref)
-> thousands_sep_result<char>;
template FMT_API char detail::decimal_point_impl(locale_ref);
template FMT_API void detail::buffer<char>::append(const char*, const char*);
// DEPRECATED!
// There is no correspondent extern template in format.h because of
// incompatibility between clang and gcc (#2377).
template FMT_API void detail::vformat_to(
detail::buffer<char>&, string_view,
basic_format_args<FMT_BUFFER_CONTEXT(char)>, detail::locale_ref);
template FMT_API int detail::snprintf_float(double, int, detail::float_specs,
detail::buffer<char>&);
template FMT_API int detail::snprintf_float(long double, int,
detail::float_specs,
detail::buffer<char>&);
template FMT_API int detail::format_float(double, int, detail::float_specs,
detail::buffer<char>&);
template FMT_API int detail::format_float(long double, int, detail::float_specs,
detail::buffer<char>&);
// Explicit instantiations for wchar_t.
template FMT_API auto detail::thousands_sep_impl(locale_ref)
-> thousands_sep_result<wchar_t>;
template FMT_API wchar_t detail::decimal_point_impl(locale_ref);
template FMT_API void detail::buffer<wchar_t>::append(const wchar_t*,
const wchar_t*);
template struct detail::basic_data<void>;
FMT_END_NAMESPACE

360
contrib/fmt-8.0.1/src/os.cc Normal file
View File

@ -0,0 +1,360 @@
// Formatting library for C++ - optional OS-specific functionality
//
// Copyright (c) 2012 - 2016, Victor Zverovich
// All rights reserved.
//
// For the license information refer to format.h.
// Disable bogus MSVC warnings.
#if !defined(_CRT_SECURE_NO_WARNINGS) && defined(_MSC_VER)
# define _CRT_SECURE_NO_WARNINGS
#endif
#include "fmt/os.h"
#include <climits>
#if FMT_USE_FCNTL
# include <sys/stat.h>
# include <sys/types.h>
# ifndef _WIN32
# include <unistd.h>
# else
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <io.h>
# define O_CREAT _O_CREAT
# define O_TRUNC _O_TRUNC
# ifndef S_IRUSR
# define S_IRUSR _S_IREAD
# endif
# ifndef S_IWUSR
# define S_IWUSR _S_IWRITE
# endif
# ifdef __MINGW32__
# define _SH_DENYNO 0x40
# endif
# endif // _WIN32
#endif // FMT_USE_FCNTL
#ifdef _WIN32
# include <windows.h>
#endif
#ifdef fileno
# undef fileno
#endif
namespace {
#ifdef _WIN32
// Return type of read and write functions.
using rwresult = int;
// On Windows the count argument to read and write is unsigned, so convert
// it from size_t preventing integer overflow.
inline unsigned convert_rwcount(std::size_t count) {
return count <= UINT_MAX ? static_cast<unsigned>(count) : UINT_MAX;
}
#elif FMT_USE_FCNTL
// Return type of read and write functions.
using rwresult = ssize_t;
inline std::size_t convert_rwcount(std::size_t count) { return count; }
#endif
} // namespace
FMT_BEGIN_NAMESPACE
#ifdef _WIN32
detail::utf16_to_utf8::utf16_to_utf8(basic_string_view<wchar_t> s) {
if (int error_code = convert(s)) {
FMT_THROW(windows_error(error_code,
"cannot convert string from UTF-16 to UTF-8"));
}
}
int detail::utf16_to_utf8::convert(basic_string_view<wchar_t> s) {
if (s.size() > INT_MAX) return ERROR_INVALID_PARAMETER;
int s_size = static_cast<int>(s.size());
if (s_size == 0) {
// WideCharToMultiByte does not support zero length, handle separately.
buffer_.resize(1);
buffer_[0] = 0;
return 0;
}
int length = WideCharToMultiByte(CP_UTF8, 0, s.data(), s_size, nullptr, 0,
nullptr, nullptr);
if (length == 0) return GetLastError();
buffer_.resize(length + 1);
length = WideCharToMultiByte(CP_UTF8, 0, s.data(), s_size, &buffer_[0],
length, nullptr, nullptr);
if (length == 0) return GetLastError();
buffer_[length] = 0;
return 0;
}
namespace detail {
class system_message {
system_message(const system_message&) = delete;
void operator=(const system_message&) = delete;
unsigned long result_;
wchar_t* message_;
static bool is_whitespace(wchar_t c) FMT_NOEXCEPT {
return c == L' ' || c == L'\n' || c == L'\r' || c == L'\t' || c == L'\0';
}
public:
explicit system_message(unsigned long error_code)
: result_(0), message_(nullptr) {
result_ = FormatMessageW(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
nullptr, error_code, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
reinterpret_cast<wchar_t*>(&message_), 0, nullptr);
if (result_ != 0) {
while (result_ != 0 && is_whitespace(message_[result_ - 1])) {
--result_;
}
}
}
~system_message() { LocalFree(message_); }
explicit operator bool() const FMT_NOEXCEPT { return result_ != 0; }
operator basic_string_view<wchar_t>() const FMT_NOEXCEPT {
return basic_string_view<wchar_t>(message_, result_);
}
};
class utf8_system_category final : public std::error_category {
public:
const char* name() const FMT_NOEXCEPT override { return "system"; }
std::string message(int error_code) const override {
system_message msg(error_code);
if (msg) {
utf16_to_utf8 utf8_message;
if (utf8_message.convert(msg) == ERROR_SUCCESS) {
return utf8_message.str();
}
}
return "unknown error";
}
};
} // namespace detail
FMT_API const std::error_category& system_category() FMT_NOEXCEPT {
static const detail::utf8_system_category category;
return category;
}
std::system_error vwindows_error(int err_code, string_view format_str,
format_args args) {
auto ec = std::error_code(err_code, system_category());
return std::system_error(ec, vformat(format_str, args));
}
void detail::format_windows_error(detail::buffer<char>& out, int error_code,
const char* message) FMT_NOEXCEPT {
FMT_TRY {
system_message msg(error_code);
if (msg) {
utf16_to_utf8 utf8_message;
if (utf8_message.convert(msg) == ERROR_SUCCESS) {
format_to(buffer_appender<char>(out), "{}: {}", message, utf8_message);
return;
}
}
}
FMT_CATCH(...) {}
format_error_code(out, error_code, message);
}
void report_windows_error(int error_code, const char* message) FMT_NOEXCEPT {
report_error(detail::format_windows_error, error_code, message);
}
#endif // _WIN32
buffered_file::~buffered_file() FMT_NOEXCEPT {
if (file_ && FMT_SYSTEM(fclose(file_)) != 0)
report_system_error(errno, "cannot close file");
}
buffered_file::buffered_file(cstring_view filename, cstring_view mode) {
FMT_RETRY_VAL(file_, FMT_SYSTEM(fopen(filename.c_str(), mode.c_str())),
nullptr);
if (!file_)
FMT_THROW(system_error(errno, "cannot open file {}", filename.c_str()));
}
void buffered_file::close() {
if (!file_) return;
int result = FMT_SYSTEM(fclose(file_));
file_ = nullptr;
if (result != 0) FMT_THROW(system_error(errno, "cannot close file"));
}
// A macro used to prevent expansion of fileno on broken versions of MinGW.
#define FMT_ARGS
int buffered_file::fileno() const {
int fd = FMT_POSIX_CALL(fileno FMT_ARGS(file_));
if (fd == -1) FMT_THROW(system_error(errno, "cannot get file descriptor"));
return fd;
}
#if FMT_USE_FCNTL
file::file(cstring_view path, int oflag) {
int mode = S_IRUSR | S_IWUSR;
# if defined(_WIN32) && !defined(__MINGW32__)
fd_ = -1;
FMT_POSIX_CALL(sopen_s(&fd_, path.c_str(), oflag, _SH_DENYNO, mode));
# else
FMT_RETRY(fd_, FMT_POSIX_CALL(open(path.c_str(), oflag, mode)));
# endif
if (fd_ == -1)
FMT_THROW(system_error(errno, "cannot open file {}", path.c_str()));
}
file::~file() FMT_NOEXCEPT {
// Don't retry close in case of EINTR!
// See http://linux.derkeiler.com/Mailing-Lists/Kernel/2005-09/3000.html
if (fd_ != -1 && FMT_POSIX_CALL(close(fd_)) != 0)
report_system_error(errno, "cannot close file");
}
void file::close() {
if (fd_ == -1) return;
// Don't retry close in case of EINTR!
// See http://linux.derkeiler.com/Mailing-Lists/Kernel/2005-09/3000.html
int result = FMT_POSIX_CALL(close(fd_));
fd_ = -1;
if (result != 0) FMT_THROW(system_error(errno, "cannot close file"));
}
long long file::size() const {
# ifdef _WIN32
// Use GetFileSize instead of GetFileSizeEx for the case when _WIN32_WINNT
// is less than 0x0500 as is the case with some default MinGW builds.
// Both functions support large file sizes.
DWORD size_upper = 0;
HANDLE handle = reinterpret_cast<HANDLE>(_get_osfhandle(fd_));
DWORD size_lower = FMT_SYSTEM(GetFileSize(handle, &size_upper));
if (size_lower == INVALID_FILE_SIZE) {
DWORD error = GetLastError();
if (error != NO_ERROR)
FMT_THROW(windows_error(GetLastError(), "cannot get file size"));
}
unsigned long long long_size = size_upper;
return (long_size << sizeof(DWORD) * CHAR_BIT) | size_lower;
# else
using Stat = struct stat;
Stat file_stat = Stat();
if (FMT_POSIX_CALL(fstat(fd_, &file_stat)) == -1)
FMT_THROW(system_error(errno, "cannot get file attributes"));
static_assert(sizeof(long long) >= sizeof(file_stat.st_size),
"return type of file::size is not large enough");
return file_stat.st_size;
# endif
}
std::size_t file::read(void* buffer, std::size_t count) {
rwresult result = 0;
FMT_RETRY(result, FMT_POSIX_CALL(read(fd_, buffer, convert_rwcount(count))));
if (result < 0) FMT_THROW(system_error(errno, "cannot read from file"));
return detail::to_unsigned(result);
}
std::size_t file::write(const void* buffer, std::size_t count) {
rwresult result = 0;
FMT_RETRY(result, FMT_POSIX_CALL(write(fd_, buffer, convert_rwcount(count))));
if (result < 0) FMT_THROW(system_error(errno, "cannot write to file"));
return detail::to_unsigned(result);
}
file file::dup(int fd) {
// Don't retry as dup doesn't return EINTR.
// http://pubs.opengroup.org/onlinepubs/009695399/functions/dup.html
int new_fd = FMT_POSIX_CALL(dup(fd));
if (new_fd == -1)
FMT_THROW(system_error(errno, "cannot duplicate file descriptor {}", fd));
return file(new_fd);
}
void file::dup2(int fd) {
int result = 0;
FMT_RETRY(result, FMT_POSIX_CALL(dup2(fd_, fd)));
if (result == -1) {
FMT_THROW(system_error(errno, "cannot duplicate file descriptor {} to {}",
fd_, fd));
}
}
void file::dup2(int fd, std::error_code& ec) FMT_NOEXCEPT {
int result = 0;
FMT_RETRY(result, FMT_POSIX_CALL(dup2(fd_, fd)));
if (result == -1) ec = std::error_code(errno, std::generic_category());
}
void file::pipe(file& read_end, file& write_end) {
// Close the descriptors first to make sure that assignments don't throw
// and there are no leaks.
read_end.close();
write_end.close();
int fds[2] = {};
# ifdef _WIN32
// Make the default pipe capacity same as on Linux 2.6.11+.
enum { DEFAULT_CAPACITY = 65536 };
int result = FMT_POSIX_CALL(pipe(fds, DEFAULT_CAPACITY, _O_BINARY));
# else
// Don't retry as the pipe function doesn't return EINTR.
// http://pubs.opengroup.org/onlinepubs/009696799/functions/pipe.html
int result = FMT_POSIX_CALL(pipe(fds));
# endif
if (result != 0) FMT_THROW(system_error(errno, "cannot create pipe"));
// The following assignments don't throw because read_fd and write_fd
// are closed.
read_end = file(fds[0]);
write_end = file(fds[1]);
}
buffered_file file::fdopen(const char* mode) {
// Don't retry as fdopen doesn't return EINTR.
# if defined(__MINGW32__) && defined(_POSIX_)
FILE* f = ::fdopen(fd_, mode);
# else
FILE* f = FMT_POSIX_CALL(fdopen(fd_, mode));
# endif
if (!f)
FMT_THROW(
system_error(errno, "cannot associate stream with file descriptor"));
buffered_file bf(f);
fd_ = -1;
return bf;
}
long getpagesize() {
# ifdef _WIN32
SYSTEM_INFO si;
GetSystemInfo(&si);
return si.dwPageSize;
# else
long size = FMT_POSIX_CALL(sysconf(_SC_PAGESIZE));
if (size < 0) FMT_THROW(system_error(errno, "cannot get memory page size"));
return size;
# endif
}
FMT_API void ostream::grow(size_t) {
if (this->size() == this->capacity()) flush();
}
#endif // FMT_USE_FCNTL
FMT_END_NAMESPACE

View File

@ -0,0 +1,7 @@
# A CMake script to find SetEnv.cmd.
find_program(WINSDK_SETENV NAMES SetEnv.cmd
PATHS "[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows;CurrentInstallFolder]/bin")
if (WINSDK_SETENV AND PRINT_PATH)
execute_process(COMMAND ${CMAKE_COMMAND} -E echo "${WINSDK_SETENV}")
endif ()

View File

@ -0,0 +1,26 @@
# This module provides function for joining paths
# known from from most languages
#
# Original license:
# SPDX-License-Identifier: (MIT OR CC0-1.0)
# Explicit permission given to distribute this module under
# the terms of the project as described in /LICENSE.rst.
# Copyright 2020 Jan Tojnar
# https://github.com/jtojnar/cmake-snips
#
# Modelled after Pythons os.path.join
# https://docs.python.org/3.7/library/os.path.html#os.path.join
# Windows not supported
function(join_paths joined_path first_path_segment)
set(temp_path "${first_path_segment}")
foreach(current_segment IN LISTS ARGN)
if(NOT ("${current_segment}" STREQUAL ""))
if(IS_ABSOLUTE "${current_segment}")
set(temp_path "${current_segment}")
else()
set(temp_path "${temp_path}/${current_segment}")
endif()
endif()
endforeach()
set(${joined_path} "${temp_path}" PARENT_SCOPE)
endfunction()

View File

@ -0,0 +1,70 @@
# C++14 feature support detection
include(CheckCXXSourceCompiles)
include(CheckCXXCompilerFlag)
if (NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 11)
endif()
message(STATUS "CXX_STANDARD: ${CMAKE_CXX_STANDARD}")
if (CMAKE_CXX_STANDARD EQUAL 20)
check_cxx_compiler_flag(-std=c++20 has_std_20_flag)
check_cxx_compiler_flag(-std=c++2a has_std_2a_flag)
if (has_std_20_flag)
set(CXX_STANDARD_FLAG -std=c++20)
elseif (has_std_2a_flag)
set(CXX_STANDARD_FLAG -std=c++2a)
endif ()
elseif (CMAKE_CXX_STANDARD EQUAL 17)
check_cxx_compiler_flag(-std=c++17 has_std_17_flag)
check_cxx_compiler_flag(-std=c++1z has_std_1z_flag)
if (has_std_17_flag)
set(CXX_STANDARD_FLAG -std=c++17)
elseif (has_std_1z_flag)
set(CXX_STANDARD_FLAG -std=c++1z)
endif ()
elseif (CMAKE_CXX_STANDARD EQUAL 14)
check_cxx_compiler_flag(-std=c++14 has_std_14_flag)
check_cxx_compiler_flag(-std=c++1y has_std_1y_flag)
if (has_std_14_flag)
set(CXX_STANDARD_FLAG -std=c++14)
elseif (has_std_1y_flag)
set(CXX_STANDARD_FLAG -std=c++1y)
endif ()
elseif (CMAKE_CXX_STANDARD EQUAL 11)
check_cxx_compiler_flag(-std=c++11 has_std_11_flag)
check_cxx_compiler_flag(-std=c++0x has_std_0x_flag)
if (has_std_11_flag)
set(CXX_STANDARD_FLAG -std=c++11)
elseif (has_std_0x_flag)
set(CXX_STANDARD_FLAG -std=c++0x)
endif ()
endif ()
set(CMAKE_REQUIRED_FLAGS ${CXX_STANDARD_FLAG})
# Check if user-defined literals are available
check_cxx_source_compiles("
void operator\"\" _udl(long double);
int main() {}"
SUPPORTS_USER_DEFINED_LITERALS)
if (NOT SUPPORTS_USER_DEFINED_LITERALS)
set (SUPPORTS_USER_DEFINED_LITERALS OFF)
endif ()
# Check if <variant> is available
set(CMAKE_REQUIRED_FLAGS -std=c++1z)
check_cxx_source_compiles("
#include <variant>
int main() {}"
FMT_HAS_VARIANT)
if (NOT FMT_HAS_VARIANT)
set (FMT_HAS_VARIANT OFF)
endif ()
set(CMAKE_REQUIRED_FLAGS )

View File

@ -0,0 +1,4 @@
@PACKAGE_INIT@
include(${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake)
check_required_components(fmt)

View File

@ -0,0 +1,11 @@
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=@CMAKE_INSTALL_PREFIX@
libdir=@libdir_for_pc_file@
includedir=@includedir_for_pc_file@
Name: fmt
Description: A modern formatting library
Version: @FMT_VERSION@
Libs: -L${libdir} -l@FMT_LIB_NAME@
Cflags: -I${includedir}

View File

@ -66,6 +66,8 @@
and then exits. and then exits.
--no-status Disables the initial status check done when starting --no-status Disables the initial status check done when starting
the CLI. the CLI.
--api-version APIVERSION
Specifies the version of the API for the CLI to use.
--tls_certificate_file CERTFILE --tls_certificate_file CERTFILE
The path of a file containing the TLS certificate and CA The path of a file containing the TLS certificate and CA
chain. chain.
@ -109,6 +111,8 @@
and then exits. and then exits.
--no-status Disables the initial status check done when starting --no-status Disables the initial status check done when starting
the CLI. the CLI.
--api-version APIVERSION
Specifies the version of the API for the CLI to use.
--tls_certificate_file CERTFILE --tls_certificate_file CERTFILE
The path of a file containing the TLS certificate and CA The path of a file containing the TLS certificate and CA
chain. chain.
@ -152,6 +156,8 @@
and then exits. and then exits.
--no-status Disables the initial status check done when starting --no-status Disables the initial status check done when starting
the CLI. the CLI.
--api-version APIVERSION
Specifies the version of the API for the CLI to use.
--tls_certificate_file CERTFILE --tls_certificate_file CERTFILE
The path of a file containing the TLS certificate and CA The path of a file containing the TLS certificate and CA
chain. chain.
@ -195,6 +201,8 @@
and then exits. and then exits.
--no-status Disables the initial status check done when starting --no-status Disables the initial status check done when starting
the CLI. the CLI.
--api-version APIVERSION
Specifies the version of the API for the CLI to use.
--tls_certificate_file CERTFILE --tls_certificate_file CERTFILE
The path of a file containing the TLS certificate and CA The path of a file containing the TLS certificate and CA
chain. chain.

View File

@ -0,0 +1,111 @@
# Transaction State Store (txnStateStore)
This document describes the transaction state store (often is referred as `txnStateStore` in the code) in FDB. The transaction state store keeps important metadata about the database to bootstrap the database, to guide the transaction system to persist writes (i.e., help assign storage tags to mutations at commit proxies), and to manage data (i.e., shard) movement metadata. This is a critical piece of information that have to be consistent across many processes and to be persistent for recovery.
Acknowledgment: A lot of contents are taken from [Evan's FDB brownbag talk](https://drive.google.com/file/d/15UvKiNc-jSFfDGygNmLQP_d4b14X3DAS/).
## What information is stored in transaction state store?
The information includes: shard mapping (key range to storage server mapping, i.e.,
`keyServers`), storage server tags (`serverTags`), tagLocalityList, storage server tag
history, database locked flag, metadata version, mustContainSystemMutations, coordinators,
storage server interface (`serverList`), database configurations, TSS mappings and
quarantines, backup apply mutation ranges and log ranges, etc.
The information of transaction state store is kept in the system key space, i.e., using the
`\xff` prefix. Note all data in the system key space are saved on storage servers. The
`txnStateStore` is only a part of the `\xff` key space, and is additionally kept in the
memory of commit proxies as well as disks of the log system (i.e., TLogs). Changes to
the `txnStateStore` are special mutations to the `\xff` key space, and are called
inconsistently in the code base as "metadata mutations" in commit proxies and
"state mutations" in Resolvers.
## Why do we need transaction state store?
When bootstraping an FDB cluster, the new master (i.e., the sequencer) role recruits a
new transaction system and initializes them. In particular, the transaction state store
is first read by the master from previous generation's log system, and then broadcast to
all commit proxies of the new transaction system. After initializing `txnStateStore`, these
commit proxies know how to assign mutations with storage server tags: `txnStateStore`
contains the shard map from key range to storage servers; commit proxies use the shard
map to find and attach the destination storage tags for each mutation.
## How is transaction state store replicated?
The `txnStateStore` is replicated in all commit proxies' memories. It is very important
that `txnStateStore` data are consistent, otherwise, a shard change issued by one commit
proxy could result in a situation where different proxies think they should send a
mutation to different storage servers, thus causing data corruptions.
FDB solves this problem by state machine replication: all commit proxies start with the
same `txnStateStore` data (from master broadcast), and apply the same sequence of mutations.
Because commits happen at all proxies, it is difficult to maintain the same order as well
as minimize the communication among them. Fortunately, every transaction has to send a
conflict resolution request to all Resolvers and they process transactions in strict order
of commit versions. Leveraging this mechanism, each commit proxy sends all metadata
(i.e., system key) mutations to all Resolvers. Resolvers keep these mutations in memory
and forward to other commit proxies in separate resolution response. Each commit proxy
receive resolution response, along with metadata mutations happend at other proxies before
its commit version, and apply all these metadata mutations in the commit order.
Finally, this proxy only writes metadata mutations in its own transaction batch to TLogs,
i.e., do not write other proxies' metadata mutations to TLogs to avoid repeated writes.
It's worth calling out that everything in the `txnStateStore` is stored at some storage
servers and a client (e.g., `fdbcli`) can read from these storage servers. During the
commit process, commit proxies parse all mutations in a batch of transactions, and apply
changes (i.e., metadata mutations) to its in-memory copy of `txnStateStore`. Later, the
same changes are applied at storage servers for persistence. Additionally, the process
to store `txnStateStore` at log system is described below.
Notably `applyMetadataMutations()` is the function that commit proxies use to make changes
to `txnStateStore`. The key ranges stored in `txnStateStore` include `[\xff, \xff\x02)` and
`[\xff\x03, \xff\xff)`, but not everything in these ranges. There is no data in the range
of `[\xff\x02, \xff\x03)` belong to `txnStateStore`, e.g., `\xff\x02` prefix is used for
backup data and is *NOT* metadata mutations.
## How is transaction state store persisted at log system?
When a commit proxy writes metadata mutations to the log system, the proxy assigns a
"txs" tag to the mutation. Depending on FDB versions, the "txs" tag can be one special
tag `txsTag{ tagLocalitySpecial, 1 }` for `TLogVersion::V3` (FDB 6.1) or a randomized
"txs" tag for `TLogVersion::V4` (FDB 6.2 and later) and larger. The idea of randomized
"txs" tag is to spread metadata mutations to all TLogs for faster parallel recovery of
`txnStateStore`.
At TLogs, all mutation data are indexed by tags. "txs" tag data is special, since it is
only peeked by the master during the transaction system recovery.
See [TLog Spilling doc](tlog-spilling.md.html) for more detailed discussion on the
topic of spilling "txs" data. In short, `txsTag` is spilled by value.
"txs" tag data is indexed and stored in both primary TLogs and satellite TLogs.
Note satellite TLogs only index log router tags and "txs" tags.
## How is transaction state store implemented?
`txnStateStore` is kept in memory at commit proxies using `KeyValueStoreMemory`, which
uses `LogSystemDiskQueueAdapter` to be durable with the log system. As a result, reading
from `txnStateStore` never blocks, which means the futures returned by read calls should
always be ready. Writes to `txnStateStore` are first buffered by the `LogSystemDiskQueueAdapter`
in memory. After a commit proxy pushes transaction data to the log system and the data
becomes durable, the proxy clears the buffered data in `LogSystemDiskQueueAdapter`.
* Master reads `txnStateStore` from old log system: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/masterserver.actor.cpp#L928-L931
* Master broadcasts `txnStateStore` to commit proxies: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/masterserver.actor.cpp#L940-L968
* Commit proxies receive txnStateStore broadcast and builds the `keyInfo` map: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1886-L1927
* Look up `keyInfo` map for `GetKeyServerLocationsRequest`: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1464
* Look up `keyInfo` map for assign mutations with storage tags: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L926 and https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L965-L1010
* Commit proxies recover database lock flag and metadata version: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1942-L1944
* Commit proxies add metadata mutations to Resolver request: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L137-L140
* Resolvers keep these mutations in memory: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/Resolver.actor.cpp#L220-L230
* Resolvers copy metadata mutations to resolution reply message: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/Resolver.actor.cpp#L244-L249
* Commit proxies apply all metadata mutations (including those from other proxies) in the commit order: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L740-L770
* Commit proxies only write metadata mutations in its own transaction batch to TLogs: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L772-L774 adds mutations to `storeCommits`. Later in `postResolution()`, https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1162-L1176, only the last one in `storeCommits` are send to TLogs.
* Commit proxies clear the buffered data in `LogSystemDiskQueueAdapter` after TLog push: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1283-L1287

View File

@ -53,7 +53,7 @@ copyright = u'2013-2021 Apple, Inc and the FoundationDB project authors'
# Load the version information from 'versions.target' # Load the version information from 'versions.target'
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
version_path = os.path.join(os.path.dirname(__file__), '..', '..', 'versions.target') version_path = os.path.join(os.path.dirname(sys.executable), '..', '..', '..', 'versions.target')
tree = ET.parse(version_path) tree = ET.parse(version_path)
root = tree.getroot() root = tree.getroot()

View File

@ -588,3 +588,43 @@
.. |locality-get-addresses-for-key-blurb| replace:: .. |locality-get-addresses-for-key-blurb| replace::
Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing ``key`` and its associated value. Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing ``key`` and its associated value.
.. |option-knob| replace::
Sets internal tuning or debugging knobs. The argument to this function should be a string representing the knob name and the value, e.g. "transaction_size_limit=1000".
.. |option-tls-verify-peers| replace::
Sets the peer certificate field verification criteria.
.. |option-tls-ca-bytes| replace::
Sets the certificate authority bundle.
.. |option-tls-ca-path| replace::
Sets the file from which to load the certificate authority bundle.
.. |option-tls-password| replace::
Sets the passphrase for encrypted private key. Password should be set before setting the key for the password to be used.
.. |option-set-disable-local-client| replace::
Prevents connections through the local client, allowing only connections through externally loaded client libraries.
.. |option-set-client-threads-per-version| replace::
Spawns multiple worker threads for each version of the client that is loaded. Setting this to a number greater than one implies disable_local_client.
.. |option-disable-client-statistics-logging| replace::
Disables logging of client statistics, such as sampled transaction activity.
.. |option-enable-run-loop-profiling| replace::
Enables debugging feature to perform run loop profiling. Requires trace logging to be enabled. WARNING: this feature is not recommended for use in production.
.. |option-set-distributed-client-tracer| replace::
Sets a tracer to run on the client. Should be set to the same value as the tracer set on the server.

View File

@ -125,6 +125,10 @@ After importing the ``fdb`` module and selecting an API version, you probably wa
.. note:: |network-options-warning| .. note:: |network-options-warning|
.. method :: fdb.options.set_knob(knob)
|option-knob|
.. method :: fdb.options.set_trace_enable( output_directory=None ) .. method :: fdb.options.set_trace_enable( output_directory=None )
|option-trace-enable-blurb| |option-trace-enable-blurb|
@ -189,6 +193,48 @@ After importing the ``fdb`` module and selecting an API version, you probably wa
|option-tls-key-bytes| |option-tls-key-bytes|
.. method :: fdb.options.set_tls_verify_peers(verification_pattern)
|option-tls-verify-peers|
.. method :: fdb.options.set_tls_ca_bytes(ca_bundle)
|option-tls-ca-bytes|
.. method :: fdb.options.set_tls_ca_path(path)
|option-tls-ca-path|
.. method :: fdb.options.set_tls_password(password)
|option-tls-password|
.. method :: fdb.options.set_disable_multi_version_client_api()
|option-disable-multi-version-client-api|
.. method :: fdb.options.set_disable_local_client()
|option-set-disable-local-client|
.. method :: fdb.options.set_client_threads_per_version(number)
|option-set-client-threads-per-version|
.. method :: fdb.options.set_disable_client_statistics_logging()
|option-disable-client-statistics-logging|
.. method :: fdb.options.set_enable_run_loop_profiling()
|option-enable-run-loop-profiling|
.. method :: fdb.options.set_distributed_client_tracer(tracer_type)
|option-set-distributed-client-tracer|
Please refer to fdboptions.py (generated) for a comprehensive list of options.
.. _api-python-keys: .. _api-python-keys:
Keys and values Keys and values

View File

@ -164,6 +164,10 @@ If the ``failed`` keyword is specified, the address is marked as failed and adde
For more information on excluding servers, see :ref:`removing-machines-from-a-cluster`. For more information on excluding servers, see :ref:`removing-machines-from-a-cluster`.
Warning about potential dataloss ``failed`` option: if a server is the last one in some team(s), excluding it with ``failed`` will lose all data in the team(s), and hence ``failed`` should only be set when the server(s) have permanently failed.
In the case all servers of a team have failed permanently, excluding all the servers will clean up the corresponding keyrange, and fix the invalid metadata. The keyrange will be assigned to a new team as an empty shard.
exit exit
---- ----

View File

@ -700,7 +700,7 @@
"ssd", "ssd",
"ssd-1", "ssd-1",
"ssd-2", "ssd-2",
"ssd-redwood-experimental", "ssd-redwood-1-experimental",
"ssd-rocksdb-experimental", "ssd-rocksdb-experimental",
"memory", "memory",
"memory-1", "memory-1",
@ -713,7 +713,7 @@
"ssd", "ssd",
"ssd-1", "ssd-1",
"ssd-2", "ssd-2",
"ssd-redwood-experimental", "ssd-redwood-1-experimental",
"ssd-rocksdb-experimental", "ssd-rocksdb-experimental",
"memory", "memory",
"memory-1", "memory-1",

View File

@ -2,6 +2,13 @@
Release Notes Release Notes
############# #############
6.3.22
======
* Added histograms to client GRV batcher. `(PR #5760) <https://github.com/apple/foundationdb/pull/5760>`_
* Added FastAlloc memory utilization trace. `(PR #5759) <https://github.com/apple/foundationdb/pull/5759>`_
* Added locality cache size to TransactionMetrics. `(PR #5771) <https://github.com/apple/foundationdb/pull/5771>`_
* Added a new feature that allows FDB to failover to remote DC when the primary is experiencing massive grey failure. This feature is turned off by default. `(PR #5774) <https://github.com/apple/foundationdb/pull/5774>`_
6.3.21 6.3.21
====== ======
* Added a ThreadID field to all trace events for the purpose of multi-threaded client debugging. `(PR #5665) <https://github.com/apple/foundationdb/pull/5665>`_ * Added a ThreadID field to all trace events for the purpose of multi-threaded client debugging. `(PR #5665) <https://github.com/apple/foundationdb/pull/5665>`_

View File

@ -30,6 +30,7 @@ Features
* Improved the efficiency with which storage servers replicate data between themselves. `(PR #5017) <https://github.com/apple/foundationdb/pull/5017>`_ * Improved the efficiency with which storage servers replicate data between themselves. `(PR #5017) <https://github.com/apple/foundationdb/pull/5017>`_
* Added support to ``exclude command`` to exclude based on locality match. `(PR #5113) <https://github.com/apple/foundationdb/pull/5113>`_ * Added support to ``exclude command`` to exclude based on locality match. `(PR #5113) <https://github.com/apple/foundationdb/pull/5113>`_
* Add the ``trace_partial_file_suffix`` network option. This option will give unfinished trace files a special suffix to indicate they're not complete yet. When the trace file is complete, it is renamed to remove the suffix. `(PR #5328) <https://github.com/apple/foundationdb/pull/5328>`_ * Add the ``trace_partial_file_suffix`` network option. This option will give unfinished trace files a special suffix to indicate they're not complete yet. When the trace file is complete, it is renamed to remove the suffix. `(PR #5328) <https://github.com/apple/foundationdb/pull/5328>`_
* Added "get range and flat map" feature with new APIs (see Bindings section). Storage servers are able to generate the keys in the queries based on another query. With this, upper layer can push some computations down to FDB, to improve latency and bandwidth when read. `(PR #5609) <https://github.com/apple/foundationdb/pull/5609>`_
Performance Performance
----------- -----------
@ -86,6 +87,8 @@ Bindings
* C: Added a function, ``fdb_database_create_snapshot``, to create a snapshot of the database. `(PR #4241) <https://github.com/apple/foundationdb/pull/4241/files>`_ * C: Added a function, ``fdb_database_create_snapshot``, to create a snapshot of the database. `(PR #4241) <https://github.com/apple/foundationdb/pull/4241/files>`_
* C: Added ``fdb_database_get_main_thread_busyness`` function to report how busy a client's main thread is. `(PR #4504) <https://github.com/apple/foundationdb/pull/4504>`_ * C: Added ``fdb_database_get_main_thread_busyness`` function to report how busy a client's main thread is. `(PR #4504) <https://github.com/apple/foundationdb/pull/4504>`_
* Java: Added ``Database.getMainThreadBusyness`` function to report how busy a client's main thread is. `(PR #4564) <https://github.com/apple/foundationdb/pull/4564>`_ * Java: Added ``Database.getMainThreadBusyness`` function to report how busy a client's main thread is. `(PR #4564) <https://github.com/apple/foundationdb/pull/4564>`_
* C: Added ``fdb_transaction_get_range_and_flat_map`` function to support running queries based on another query in one request. `(PR #5609) <https://github.com/apple/foundationdb/pull/5609>`_
* Java: Added ``Transaction.getRangeAndFlatMap`` function to support running queries based on another query in one request. `(PR #5609) <https://github.com/apple/foundationdb/pull/5609>`_
Other Changes Other Changes
------------- -------------

View File

@ -85,11 +85,10 @@ Control options
In addition to the command line parameter described above, tracing can be set In addition to the command line parameter described above, tracing can be set
at a database and transaction level. at a database and transaction level.
Tracing can be globally disabled by setting the Tracing can be controlled on a global level by setting the
``distributed_transaction_trace_disable`` database option. It can be enabled by ``TRACING_SAMPLE_RATE`` knob. Set the knob to 0.0 to record no traces, to 1.0
setting the ``distributed_transaction_trace_enable`` database option. If to record all traces, or somewhere in the middle. Traces are sampled as a unit.
neither option is specified but a tracer option is set as described above, All individual spans in the trace will be included in the sample.
tracing will be enabled.
Tracing can be enabled or disabled for individual transactions. The special key Tracing can be enabled or disabled for individual transactions. The special key
space exposes an API to set a custom trace ID for a transaction, or to disable space exposes an API to set a custom trace ID for a transaction, or to disable

View File

@ -13,14 +13,14 @@ Adding tags to transactions
Tags are added to transaction by using transaction options. Each transaction can include up to five tags, and each tag must not exceed 16 characters. There are two options that can be used to add tags: Tags are added to transaction by using transaction options. Each transaction can include up to five tags, and each tag must not exceed 16 characters. There are two options that can be used to add tags:
* ``TAG`` - Adds a tag to the transaction. This tag will not be used for auto-throttling and is not included with read requests. Tags set in this way can only be throttled manually. * ``TAG`` - Adds a tag to the transaction. This tag will not be used for auto-throttling and is not included with read or commit requests. Tags set in this way can only be throttled manually.
* ``AUTO_THROTTLE_TAG`` - Adds a tag to the transaction that can be both automatically and manually throttled. To support busy tag detection, these tags may be sent as part of read requests. * ``AUTO_THROTTLE_TAG`` - Adds a tag to the transaction that can be both automatically and manually throttled. To support busy tag detection, these tags may be sent as part of read or commit requests.
See the documentation for your particular language binding for details about setting this option. See the documentation for your particular language binding for details about setting this option.
.. note:: If setting hierarchical tags, it is recommended that you not use auto-throttle tags at multiple levels of the hierarchy. Otherwise, the cluster will favor throttling those tags set at higher levels, as they will include more transactions. .. note:: If setting hierarchical tags, it is recommended that you not use auto-throttle tags at multiple levels of the hierarchy. Otherwise, the cluster will favor throttling those tags set at higher levels, as they will include more transactions.
.. note:: Tags must be included as part of all get read version requests, and a sample of read requests will include auto-throttled tags. Additionally, each tag imposes additional costs with those requests. It is therefore recommended that you not use excessive numbers or lengths of transaction tags. .. note:: Tags must be included as part of all get read version requests, and a sample of read and commit requests will include auto-throttled tags. Additionally, each tag imposes additional costs with those requests. It is therefore recommended that you not use excessive numbers or lengths of transaction tags.
Tag throttling overview Tag throttling overview
======================= =======================
@ -48,7 +48,7 @@ When a transaction tag is throttled, this information will be communicated to th
Automatic transaction tag throttling Automatic transaction tag throttling
==================================== ====================================
When using the ``AUTO_THROTTLE_TAG`` transaction option, the cluster will monitor read activity for the chosen tags and may choose to reduce a tag's transaction rate limit if a storage server gets busy enough and has a sufficient portion of its read traffic coming from that one tag. When using the ``AUTO_THROTTLE_TAG`` transaction option, the cluster will monitor activity for the chosen tags and may choose to reduce a tag's transaction rate limit if a storage server gets busy enough and has a sufficient portion of its traffic coming from that one tag.
When a tag is auto-throttled, the default priority transaction rate will be decreased to reduce the percentage of traffic attributable to that tag to a reasonable amount of total traffic on the affected storage server(s), and batch priority transactions for that tag will be stopped completely. When a tag is auto-throttled, the default priority transaction rate will be decreased to reduce the percentage of traffic attributable to that tag to a reasonable amount of total traffic on the affected storage server(s), and batch priority transactions for that tag will be stopped completely.

View File

@ -31,7 +31,7 @@ Because TSS recruitment only pairs *new* storage processes, you must add process
Example commands Example commands
---------------- ----------------
Set the desired TSS processes count to 4, using the redwood storage engine: ``configure tss ssd-redwood-experimental count=4``. Set the desired TSS processes count to 4, using the redwood storage engine: ``configure tss ssd-redwood-1-experimental count=4``.
Change the desired TSS process count to 2: ``configure tss count=2``. Change the desired TSS process count to 2: ``configure tss count=2``.

View File

@ -37,6 +37,7 @@
#include "fdbclient/BackupAgent.actor.h" #include "fdbclient/BackupAgent.actor.h"
#include "fdbclient/Status.h" #include "fdbclient/Status.h"
#include "fdbclient/BackupContainer.h" #include "fdbclient/BackupContainer.h"
#include "fdbclient/ClusterConnectionFile.h"
#include "fdbclient/KeyBackedTypes.h" #include "fdbclient/KeyBackedTypes.h"
#include "fdbclient/IKnobCollection.h" #include "fdbclient/IKnobCollection.h"
#include "fdbclient/RunTransaction.actor.h" #include "fdbclient/RunTransaction.actor.h"
@ -3095,7 +3096,7 @@ Optional<Database> connectToCluster(std::string const& clusterFile,
} catch (Error& e) { } catch (Error& e) {
if (!quiet) { if (!quiet) {
fprintf(stderr, "ERROR: %s\n", e.what()); fprintf(stderr, "ERROR: %s\n", e.what());
fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str()); fprintf(stderr, "ERROR: Unable to connect to cluster from `%s'\n", ccf->getLocation().c_str());
} }
return db; return db;
} }
@ -3761,35 +3762,6 @@ int main(int argc, char* argv[]) {
} }
} }
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection();
for (const auto& [knobName, knobValueString] : knobs) {
try {
auto knobValue = g_knobs.parseKnobValue(knobName, knobValueString);
g_knobs.setKnob(knobName, knobValue);
} catch (Error& e) {
if (e.code() == error_code_invalid_option_value) {
fprintf(stderr,
"WARNING: Invalid value '%s' for knob option '%s'\n",
knobValueString.c_str(),
knobName.c_str());
TraceEvent(SevWarnAlways, "InvalidKnobValue")
.detail("Knob", printable(knobName))
.detail("Value", printable(knobValueString));
} else {
fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", knobName.c_str(), e.what());
TraceEvent(SevError, "FailedToSetKnob")
.detail("Knob", printable(knobName))
.detail("Value", printable(knobValueString))
.error(e);
throw;
}
}
}
// Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs
g_knobs.initialize(Randomize::False, IsSimulated::False);
if (trace) { if (trace) {
if (!traceLogGroup.empty()) if (!traceLogGroup.empty())
setNetworkOption(FDBNetworkOptions::TRACE_LOG_GROUP, StringRef(traceLogGroup)); setNetworkOption(FDBNetworkOptions::TRACE_LOG_GROUP, StringRef(traceLogGroup));
@ -3830,6 +3802,34 @@ int main(int argc, char* argv[]) {
return FDB_EXIT_ERROR; return FDB_EXIT_ERROR;
} }
auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection();
for (const auto& [knobName, knobValueString] : knobs) {
try {
auto knobValue = g_knobs.parseKnobValue(knobName, knobValueString);
g_knobs.setKnob(knobName, knobValue);
} catch (Error& e) {
if (e.code() == error_code_invalid_option_value) {
fprintf(stderr,
"WARNING: Invalid value '%s' for knob option '%s'\n",
knobValueString.c_str(),
knobName.c_str());
TraceEvent(SevWarnAlways, "InvalidKnobValue")
.detail("Knob", printable(knobName))
.detail("Value", printable(knobValueString));
} else {
fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", knobName.c_str(), e.what());
TraceEvent(SevError, "FailedToSetKnob")
.detail("Knob", printable(knobName))
.detail("Value", printable(knobValueString))
.error(e);
throw;
}
}
}
// Reinitialize knobs in order to update knobs that are dependent on explicitly set knobs
g_knobs.initialize(Randomize::False, IsSimulated::False);
TraceEvent("ProgramStart") TraceEvent("ProgramStart")
.setMaxEventLength(12000) .setMaxEventLength(12000)
.detail("SourceVersion", getSourceVersion()) .detail("SourceVersion", getSourceVersion())

View File

@ -0,0 +1,97 @@
/*
* BlobRangeCommand.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbcli/fdbcli.actor.h"
#include "fdbclient/FDBOptions.g.h"
#include "fdbclient/IClientApi.h"
#include "fdbclient/ManagementAPI.actor.h"
#include "flow/Arena.h"
#include "flow/FastRef.h"
#include "flow/ThreadHelper.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
namespace {
// copy to standalones for krm
ACTOR Future<Void> setBlobRange(Database db, Key startKey, Key endKey, Value value) {
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(db);
loop {
try {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
// FIXME: check that the set range is currently inactive, and that a revoked range is currently its own
// range in the map and fully set.
tr->set(blobRangeChangeKey, deterministicRandom()->randomUniqueID().toString());
// This is not coalescing because we want to keep each range logically separate.
wait(krmSetRange(tr, blobRangeKeys.begin, KeyRange(KeyRangeRef(startKey, endKey)), value));
wait(tr->commit());
printf("Successfully updated blob range [%s - %s) to %s\n",
startKey.printable().c_str(),
endKey.printable().c_str(),
value.printable().c_str());
return Void();
} catch (Error& e) {
wait(tr->onError(e));
}
}
}
} // namespace
namespace fdb_cli {
ACTOR Future<bool> blobRangeCommandActor(Database localDb, std::vector<StringRef> tokens) {
// enables blob writing for the given range
if (tokens.size() != 4) {
printUsage(tokens[0]);
return false;
} else if (tokens[3] > LiteralStringRef("\xff")) {
// TODO is this something we want?
printf("Cannot blobbify system keyspace! Problematic End Key: %s\n", tokens[3].printable().c_str());
return false;
} else if (tokens[2] >= tokens[3]) {
printf("Invalid blob range [%s - %s)\n", tokens[2].printable().c_str(), tokens[3].printable().c_str());
} else {
if (tokencmp(tokens[1], "start")) {
printf("Starting blobbify range for [%s - %s)\n",
tokens[2].printable().c_str(),
tokens[3].printable().c_str());
wait(setBlobRange(localDb, tokens[2], tokens[3], LiteralStringRef("1")));
} else if (tokencmp(tokens[1], "stop")) {
printf("Stopping blobbify range for [%s - %s)\n",
tokens[2].printable().c_str(),
tokens[3].printable().c_str());
wait(setBlobRange(localDb, tokens[2], tokens[3], StringRef()));
} else {
printUsage(tokens[0]);
printf("Usage: blobrange <start|stop> <startkey> <endkey>");
return false;
}
}
return true;
}
CommandFactory blobRangeFactory("blobrange", CommandHelp("blobrange <start|stop> <startkey> <endkey>", "", ""));
} // namespace fdb_cli

View File

@ -2,6 +2,7 @@ set(FDBCLI_SRCS
fdbcli.actor.cpp fdbcli.actor.cpp
fdbcli.actor.h fdbcli.actor.h
AdvanceVersionCommand.actor.cpp AdvanceVersionCommand.actor.cpp
BlobRangeCommand.actor.cpp
CacheRangeCommand.actor.cpp CacheRangeCommand.actor.cpp
ConfigureCommand.actor.cpp ConfigureCommand.actor.cpp
ConsistencyCheckCommand.actor.cpp ConsistencyCheckCommand.actor.cpp
@ -16,6 +17,7 @@ set(FDBCLI_SRCS
IncludeCommand.actor.cpp IncludeCommand.actor.cpp
KillCommand.actor.cpp KillCommand.actor.cpp
LockCommand.actor.cpp LockCommand.actor.cpp
ChangeFeedCommand.actor.cpp
MaintenanceCommand.actor.cpp MaintenanceCommand.actor.cpp
ProfileCommand.actor.cpp ProfileCommand.actor.cpp
SetClassCommand.actor.cpp SetClassCommand.actor.cpp

View File

@ -0,0 +1,187 @@
/*
* ChangeFeedCommand.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "contrib/fmt-8.0.1/include/fmt/format.h"
#include "fdbcli/fdbcli.actor.h"
#include "fdbclient/FDBOptions.g.h"
#include "fdbclient/IClientApi.h"
#include "fdbclient/Knobs.h"
#include "fdbclient/Schemas.h"
#include "fdbclient/ManagementAPI.actor.h"
#include "flow/Arena.h"
#include "flow/FastRef.h"
#include "flow/ThreadHelper.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
namespace {
ACTOR Future<Void> changeFeedList(Database db) {
state ReadYourWritesTransaction tr(db);
loop {
try {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
RangeResult result = wait(tr.getRange(changeFeedKeys, CLIENT_KNOBS->TOO_MANY));
// shouldn't have many quarantined TSSes
ASSERT(!result.more);
printf("Found %d range feeds%s\n", result.size(), result.size() == 0 ? "." : ":");
for (auto& it : result) {
auto range = std::get<0>(decodeChangeFeedValue(it.value));
printf(" %s: %s - %s\n",
it.key.removePrefix(changeFeedPrefix).toString().c_str(),
range.begin.toString().c_str(),
range.end.toString().c_str());
}
return Void();
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
} // namespace
namespace fdb_cli {
ACTOR Future<Void> requestVersionUpdate(Database localDb, Reference<ChangeFeedData> feedData) {
loop {
wait(delay(5.0));
Transaction tr(localDb);
state Version ver = wait(tr.getReadVersion());
fmt::print("Requesting version {}\n", ver);
wait(feedData->whenAtLeast(ver));
fmt::print("Feed at version {}\n", ver);
}
}
ACTOR Future<bool> changeFeedCommandActor(Database localDb, std::vector<StringRef> tokens, Future<Void> warn) {
if (tokens.size() == 1) {
printUsage(tokens[0]);
return false;
}
if (tokencmp(tokens[1], "list")) {
if (tokens.size() != 2) {
printUsage(tokens[0]);
return false;
}
wait(changeFeedList(localDb));
return true;
} else if (tokencmp(tokens[1], "register")) {
if (tokens.size() != 5) {
printUsage(tokens[0]);
return false;
}
wait(updateChangeFeed(
localDb, tokens[2], ChangeFeedStatus::CHANGE_FEED_CREATE, KeyRangeRef(tokens[3], tokens[4])));
} else if (tokencmp(tokens[1], "stop")) {
if (tokens.size() != 3) {
printUsage(tokens[0]);
return false;
}
wait(updateChangeFeed(localDb, tokens[2], ChangeFeedStatus::CHANGE_FEED_STOP));
} else if (tokencmp(tokens[1], "destroy")) {
if (tokens.size() != 3) {
printUsage(tokens[0]);
return false;
}
wait(updateChangeFeed(localDb, tokens[2], ChangeFeedStatus::CHANGE_FEED_DESTROY));
} else if (tokencmp(tokens[1], "stream")) {
if (tokens.size() < 3 || tokens.size() > 5) {
printUsage(tokens[0]);
return false;
}
Version begin = 0;
Version end = std::numeric_limits<Version>::max();
if (tokens.size() > 3) {
int n = 0;
if (sscanf(tokens[3].toString().c_str(), "%ld%n", &begin, &n) != 1 || n != tokens[3].size()) {
printUsage(tokens[0]);
return false;
}
}
if (tokens.size() > 4) {
int n = 0;
if (sscanf(tokens[4].toString().c_str(), "%" PRId64 "%n", &end, &n) != 1 || n != tokens[4].size()) {
printUsage(tokens[0]);
return false;
}
}
if (warn.isValid()) {
warn.cancel();
}
state Reference<ChangeFeedData> feedData = makeReference<ChangeFeedData>();
state Future<Void> feed = localDb->getChangeFeedStream(feedData, tokens[2], begin, end);
state Future<Void> versionUpdates = requestVersionUpdate(localDb, feedData);
printf("\n");
try {
state Future<Void> feedInterrupt = LineNoise::onKeyboardInterrupt();
loop {
choose {
when(Standalone<VectorRef<MutationsAndVersionRef>> res =
waitNext(feedData->mutations.getFuture())) {
for (auto& it : res) {
for (auto& it2 : it.mutations) {
fmt::print("{0} {1}\n", it.version, it2.toString());
}
}
}
when(wait(feedInterrupt)) {
feedInterrupt = Future<Void>();
feed.cancel();
feedData = makeReference<ChangeFeedData>();
break;
}
}
}
return true;
} catch (Error& e) {
if (e.code() == error_code_end_of_stream) {
return true;
}
throw;
}
} else if (tokencmp(tokens[1], "pop")) {
if (tokens.size() != 4) {
printUsage(tokens[0]);
return false;
}
Version v;
int n = 0;
if (sscanf(tokens[3].toString().c_str(), "%ld%n", &v, &n) != 1 || n != tokens[3].size()) {
printUsage(tokens[0]);
return false;
} else {
wait(localDb->popChangeFeedMutations(tokens[2], v));
}
} else {
printUsage(tokens[0]);
return false;
}
return true;
}
CommandFactory changeFeedFactory(
"changefeed",
CommandHelp("changefeed <register|destroy|stop|stream|pop|list> <RANGEID> <BEGIN> <END>", "", ""));
} // namespace fdb_cli

View File

@ -393,5 +393,11 @@ CommandFactory excludeFactory(
"command returns \nimmediately without checking if the exclusions have completed successfully.\n" "command returns \nimmediately without checking if the exclusions have completed successfully.\n"
"If 'FORCE' is set, the command does not perform safety checks before excluding.\n" "If 'FORCE' is set, the command does not perform safety checks before excluding.\n"
"If 'failed' is set, the transaction log queue is dropped pre-emptively before waiting\n" "If 'failed' is set, the transaction log queue is dropped pre-emptively before waiting\n"
"for data movement to finish and the server cannot be included again.")); "for data movement to finish and the server cannot be included again."
"\n\nWARNING of potential dataloss\n:"
"If a to-be-excluded server is the last server of some team(s), and 'failed' is set, the data in the team(s) "
"will be lost. 'failed' should be set only if the server(s) have permanently failed."
"In the case all servers of a team have failed permanently and dataloss has been a fact, excluding all the "
"servers will clean up the corresponding keyrange, and fix the invalid metadata. The keyrange will be "
"assigned to a new team as an empty shard."));
} // namespace fdb_cli } // namespace fdb_cli

View File

@ -18,6 +18,8 @@
* limitations under the License. * limitations under the License.
*/ */
#include "contrib/fmt-8.0.1/include/fmt/format.h"
#include "fdbcli/fdbcli.actor.h" #include "fdbcli/fdbcli.actor.h"
#include "fdbclient/FDBOptions.g.h" #include "fdbclient/FDBOptions.g.h"
@ -48,7 +50,7 @@ ACTOR Future<Void> printProcessClass(Reference<IDatabase> db) {
ASSERT(processSourceList.size() == processTypeList.size()); ASSERT(processSourceList.size() == processTypeList.size());
if (!processTypeList.size()) if (!processTypeList.size())
printf("No processes are registered in the database.\n"); printf("No processes are registered in the database.\n");
printf("There are currently %zu processes in the database:\n", processTypeList.size()); fmt::print("There are currently {} processes in the database:\n", processTypeList.size());
for (int index = 0; index < processTypeList.size(); index++) { for (int index = 0; index < processTypeList.size(); index++) {
std::string address = std::string address =
processTypeList[index].key.removePrefix(fdb_cli::processClassTypeSpecialKeyRange.begin).toString(); processTypeList[index].key.removePrefix(fdb_cli::processClassTypeSpecialKeyRange.begin).toString();

View File

@ -19,6 +19,7 @@
*/ */
#include "boost/lexical_cast.hpp" #include "boost/lexical_cast.hpp"
#include "fdbclient/ClusterConnectionFile.h"
#include "fdbclient/NativeAPI.actor.h" #include "fdbclient/NativeAPI.actor.h"
#include "fdbclient/FDBTypes.h" #include "fdbclient/FDBTypes.h"
#include "fdbclient/IClientApi.h" #include "fdbclient/IClientApi.h"
@ -35,6 +36,7 @@
#include "fdbclient/Schemas.h" #include "fdbclient/Schemas.h"
#include "fdbclient/CoordinationInterface.h" #include "fdbclient/CoordinationInterface.h"
#include "fdbclient/FDBOptions.g.h" #include "fdbclient/FDBOptions.g.h"
#include "fdbclient/SystemData.h"
#include "fdbclient/TagThrottle.actor.h" #include "fdbclient/TagThrottle.actor.h"
#include "fdbclient/Tuple.h" #include "fdbclient/Tuple.h"
@ -90,7 +92,8 @@ enum {
OPT_BUILD_FLAGS, OPT_BUILD_FLAGS,
OPT_TRACE_FORMAT, OPT_TRACE_FORMAT,
OPT_KNOB, OPT_KNOB,
OPT_DEBUG_TLS OPT_DEBUG_TLS,
OPT_API_VERSION,
}; };
CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP }, CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
@ -112,6 +115,7 @@ CSimpleOpt::SOption g_rgOptions[] = { { OPT_CONNFILE, "-C", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP }, { OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP }, { OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_DEBUG_TLS, "--debug-tls", SO_NONE }, { OPT_DEBUG_TLS, "--debug-tls", SO_NONE },
{ OPT_API_VERSION, "--api-version", SO_REQ_SEP },
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
TLS_OPTION_FLAGS TLS_OPTION_FLAGS
@ -428,6 +432,8 @@ static void printProgramUsage(const char* name) {
" and then exits.\n" " and then exits.\n"
" --no-status Disables the initial status check done when starting\n" " --no-status Disables the initial status check done when starting\n"
" the CLI.\n" " the CLI.\n"
" --api-version APIVERSION\n"
" Specifies the version of the API for the CLI to use.\n"
#ifndef TLS_DISABLED #ifndef TLS_DISABLED
TLS_HELP TLS_HELP
#endif #endif
@ -1030,8 +1036,8 @@ ACTOR Future<bool> exclude(Database db,
locality.c_str()); locality.c_str());
} }
ClusterConnectionString ccs = wait(ccf->getStoredConnectionString());
bool foundCoordinator = false; bool foundCoordinator = false;
auto ccs = ClusterConnectionFile(ccf->getFilename()).getConnectionString();
for (const auto& c : ccs.coordinators()) { for (const auto& c : ccs.coordinators()) {
if (std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip, c.port)) || if (std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip, c.port)) ||
std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip))) { std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip))) {
@ -1371,6 +1377,9 @@ struct CLIOptions {
std::vector<std::pair<std::string, std::string>> knobs; std::vector<std::pair<std::string, std::string>> knobs;
// api version, using the latest version by default
int api_version = FDB_API_VERSION;
CLIOptions(int argc, char* argv[]) { CLIOptions(int argc, char* argv[]) {
program_name = argv[0]; program_name = argv[0];
for (int a = 0; a < argc; a++) { for (int a = 0; a < argc; a++) {
@ -1393,7 +1402,9 @@ struct CLIOptions {
exit_code = FDB_EXIT_ERROR; exit_code = FDB_EXIT_ERROR;
return; return;
} }
}
void setupKnobs() {
auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection(); auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection();
for (const auto& [knobName, knobValueString] : knobs) { for (const auto& [knobName, knobValueString] : knobs) {
try { try {
@ -1433,6 +1444,22 @@ struct CLIOptions {
case OPT_CONNFILE: case OPT_CONNFILE:
clusterFile = args.OptionArg(); clusterFile = args.OptionArg();
break; break;
case OPT_API_VERSION: {
char* endptr;
api_version = strtoul((char*)args.OptionArg(), &endptr, 10);
if (*endptr != '\0') {
fprintf(stderr, "ERROR: invalid client version %s\n", args.OptionArg());
return 1;
} else if (api_version < 700 || api_version > FDB_API_VERSION) {
// multi-version fdbcli only available after 7.0
fprintf(stderr,
"ERROR: api version %s is not supported. (Min: 700, Max: %d)\n",
args.OptionArg(),
FDB_API_VERSION);
return 1;
}
break;
}
case OPT_TRACE: case OPT_TRACE:
trace = true; trace = true;
break; break;
@ -1532,6 +1559,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
state Database localDb; state Database localDb;
state Reference<IDatabase> db; state Reference<IDatabase> db;
state Reference<ITransaction> tr; state Reference<ITransaction> tr;
state Transaction trx;
state bool writeMode = false; state bool writeMode = false;
@ -1559,14 +1587,14 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
TraceEvent::setNetworkThread(); TraceEvent::setNetworkThread();
try { try {
localDb = Database::createDatabase(ccf, -1, IsInternal::False); localDb = Database::createDatabase(ccf, opt.api_version, IsInternal::False);
if (!opt.exec.present()) { if (!opt.exec.present()) {
printf("Using cluster file `%s'.\n", ccf->getFilename().c_str()); printf("Using cluster file `%s'.\n", ccf->getLocation().c_str());
} }
db = API->createDatabase(opt.clusterFile.c_str()); db = API->createDatabase(opt.clusterFile.c_str());
} catch (Error& e) { } catch (Error& e) {
fprintf(stderr, "ERROR: %s (%d)\n", e.what(), e.code()); fprintf(stderr, "ERROR: %s (%d)\n", e.what(), e.code());
printf("Unable to connect to cluster from `%s'\n", ccf->getFilename().c_str()); printf("Unable to connect to cluster from `%s'\n", ccf->getLocation().c_str());
return 1; return 1;
} }
@ -1577,7 +1605,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
.detail("Version", FDB_VT_VERSION) .detail("Version", FDB_VT_VERSION)
.detail("PackageName", FDB_VT_PACKAGE_NAME) .detail("PackageName", FDB_VT_PACKAGE_NAME)
.detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(nullptr)) .detailf("ActualTime", "%lld", DEBUG_DETERMINISM ? 0 : time(nullptr))
.detail("ClusterFile", ccf->getFilename().c_str()) .detail("ClusterFile", ccf->toString())
.detail("ConnectionString", ccf->getConnectionString().toString()) .detail("ConnectionString", ccf->getConnectionString().toString())
.setMaxFieldLength(10000) .setMaxFieldLength(10000)
.detail("CommandLine", opt.commandLine) .detail("CommandLine", opt.commandLine)
@ -1642,7 +1670,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
// Don't put dangerous commands in the command history // Don't put dangerous commands in the command history
if (line.find("writemode") == std::string::npos && line.find("expensive_data_check") == std::string::npos && if (line.find("writemode") == std::string::npos && line.find("expensive_data_check") == std::string::npos &&
line.find("unlock") == std::string::npos) line.find("unlock") == std::string::npos && line.find("blobrange") == std::string::npos)
linenoise.historyAdd(line); linenoise.historyAdd(line);
} }
@ -1855,6 +1883,20 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
continue; continue;
} }
if (tokencmp(tokens[0], "changefeed")) {
bool _result = wait(makeInterruptable(changeFeedCommandActor(localDb, tokens, warn)));
if (!_result)
is_error = true;
continue;
}
if (tokencmp(tokens[0], "blobrange")) {
bool _result = wait(makeInterruptable(blobRangeCommandActor(localDb, tokens)));
if (!_result)
is_error = true;
continue;
}
if (tokencmp(tokens[0], "unlock")) { if (tokencmp(tokens[0], "unlock")) {
if ((tokens.size() != 2) || (tokens[1].size() != 32) || if ((tokens.size() != 2) || (tokens[1].size() != 32) ||
!std::all_of(tokens[1].begin(), tokens[1].end(), &isxdigit)) { !std::all_of(tokens[1].begin(), tokens[1].end(), &isxdigit)) {
@ -2392,8 +2434,6 @@ int main(int argc, char** argv) {
registerCrashHandler(); registerCrashHandler();
IKnobCollection::setGlobalKnobCollection(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
#ifdef __unixish__ #ifdef __unixish__
struct sigaction act; struct sigaction act;
@ -2492,9 +2532,12 @@ int main(int argc, char** argv) {
} }
try { try {
// Note: refactoring fdbcli, in progress API->selectApiVersion(opt.api_version);
API->selectApiVersion(FDB_API_VERSION);
API->setupNetwork(); API->setupNetwork();
opt.setupKnobs();
if (opt.exit_code != -1) {
return opt.exit_code;
}
Future<int> cliFuture = runCli(opt); Future<int> cliFuture = runCli(opt);
Future<Void> timeoutFuture = opt.exit_timeout ? timeExit(opt.exit_timeout) : Never(); Future<Void> timeoutFuture = opt.exit_timeout ? timeExit(opt.exit_timeout) : Never();
auto f = stopNetworkAfter(success(cliFuture) || timeoutFuture); auto f = stopNetworkAfter(success(cliFuture) || timeoutFuture);

View File

@ -165,6 +165,10 @@ ACTOR Future<bool> killCommandActor(Reference<IDatabase> db,
// lock/unlock command // lock/unlock command
ACTOR Future<bool> lockCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens); ACTOR Future<bool> lockCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
ACTOR Future<bool> unlockDatabaseActor(Reference<IDatabase> db, UID uid); ACTOR Future<bool> unlockDatabaseActor(Reference<IDatabase> db, UID uid);
// changefeed command
ACTOR Future<bool> changeFeedCommandActor(Database localDb, std::vector<StringRef> tokens, Future<Void> warn);
// blobrange command
ACTOR Future<bool> blobRangeCommandActor(Database localDb, std::vector<StringRef> tokens);
// maintenance command // maintenance command
ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db, StringRef zoneId, double seconds, bool printWarning = false); ACTOR Future<bool> setHealthyZone(Reference<IDatabase> db, StringRef zoneId, double seconds, bool printWarning = false);
ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db, ACTOR Future<bool> clearHealthyZone(Reference<IDatabase> db,

View File

@ -242,6 +242,9 @@ void sample(LineageReference* lineagePtr) {
if (!lineagePtr->isValid()) { if (!lineagePtr->isValid()) {
return; return;
} }
if (!lineagePtr->isAllocated()) {
lineagePtr->allocate();
}
(*lineagePtr)->modify(&NameLineage::actorName) = lineagePtr->actorName(); (*lineagePtr)->modify(&NameLineage::actorName) = lineagePtr->actorName();
boost::asio::post(ActorLineageProfiler::instance().context(), boost::asio::post(ActorLineageProfiler::instance().context(),
[lineage = LineageReference::addRef(lineagePtr->getPtr())]() { [lineage = LineageReference::addRef(lineagePtr->getPtr())]() {

View File

@ -96,7 +96,7 @@ struct ConfigError {
class ProfilerConfigT { class ProfilerConfigT {
private: // private types private: // private types
using Lock = std::unique_lock<std::mutex>; using Lock = std::unique_lock<std::mutex>;
friend class crossbow::create_static<ProfilerConfigT>; friend struct crossbow::create_static<ProfilerConfigT>;
private: // members private: // members
std::shared_ptr<SampleIngestor> ingestor = std::make_shared<NoneIngestor>(); std::shared_ptr<SampleIngestor> ingestor = std::make_shared<NoneIngestor>();

View File

@ -303,7 +303,8 @@ public:
static std::string lastOpenError; static std::string lastOpenError;
private: // TODO: change the following back to `private` once blob obj access is refactored
protected:
std::string URL; std::string URL;
Optional<std::string> encryptionKeyFileName; Optional<std::string> encryptionKeyFileName;
}; };

View File

@ -22,6 +22,7 @@
#include "fdbclient/BackupContainerAzureBlobStore.h" #include "fdbclient/BackupContainerAzureBlobStore.h"
#include "fdbclient/BackupContainerFileSystem.h" #include "fdbclient/BackupContainerFileSystem.h"
#include "fdbclient/BackupContainerLocalDirectory.h" #include "fdbclient/BackupContainerLocalDirectory.h"
#include "fdbclient/BackupContainerS3BlobStore.h"
#include "fdbclient/JsonBuilder.h" #include "fdbclient/JsonBuilder.h"
#include "flow/StreamCipher.h" #include "flow/StreamCipher.h"
#include "flow/UnitTest.h" #include "flow/UnitTest.h"
@ -1495,6 +1496,70 @@ Future<Void> BackupContainerFileSystem::createTestEncryptionKeyFile(std::string
#endif #endif
} }
// Get a BackupContainerFileSystem based on a container URL string
// TODO: refactor to not duplicate IBackupContainer::openContainer. It's the exact same
// code but returning a different template type because you can't cast between them
Reference<BackupContainerFileSystem> BackupContainerFileSystem::openContainerFS(
const std::string& url,
Optional<std::string> const& encryptionKeyFileName) {
static std::map<std::string, Reference<BackupContainerFileSystem>> m_cache;
Reference<BackupContainerFileSystem>& r = m_cache[url];
if (r)
return r;
try {
StringRef u(url);
if (u.startsWith("file://"_sr)) {
r = makeReference<BackupContainerLocalDirectory>(url, encryptionKeyFileName);
} else if (u.startsWith("blobstore://"_sr)) {
std::string resource;
// The URL parameters contain blobstore endpoint tunables as well as possible backup-specific options.
S3BlobStoreEndpoint::ParametersT backupParams;
Reference<S3BlobStoreEndpoint> bstore =
S3BlobStoreEndpoint::fromString(url, &resource, &lastOpenError, &backupParams);
if (resource.empty())
throw backup_invalid_url();
for (auto c : resource)
if (!isalnum(c) && c != '_' && c != '-' && c != '.' && c != '/')
throw backup_invalid_url();
r = makeReference<BackupContainerS3BlobStore>(bstore, resource, backupParams, encryptionKeyFileName);
}
#ifdef BUILD_AZURE_BACKUP
else if (u.startsWith("azure://"_sr)) {
u.eat("azure://"_sr);
auto accountName = u.eat("@"_sr).toString();
auto endpoint = u.eat("/"_sr).toString();
auto containerName = u.eat("/"_sr).toString();
r = makeReference<BackupContainerAzureBlobStore>(
endpoint, accountName, containerName, encryptionKeyFileName);
}
#endif
else {
lastOpenError = "invalid URL prefix";
throw backup_invalid_url();
}
r->encryptionKeyFileName = encryptionKeyFileName;
r->URL = url;
return r;
} catch (Error& e) {
if (e.code() == error_code_actor_cancelled)
throw;
TraceEvent m(SevWarn, "BackupContainer");
m.detail("Description", "Invalid container specification. See help.");
m.detail("URL", url);
m.error(e);
if (e.code() == error_code_backup_invalid_url)
m.detail("LastOpenError", lastOpenError);
throw;
}
}
namespace backup_test { namespace backup_test {
int chooseFileSize(std::vector<int>& sizes) { int chooseFileSize(std::vector<int>& sizes) {

View File

@ -79,6 +79,11 @@ public:
Future<Void> create() override = 0; Future<Void> create() override = 0;
Future<bool> exists() override = 0; Future<bool> exists() override = 0;
// TODO: refactor this to separate out the "deal with blob store" stuff from the backup business logic
static Reference<BackupContainerFileSystem> openContainerFS(
const std::string& url,
const Optional<std::string>& encryptionKeyFileName = {});
// Get a list of fileNames and their sizes in the container under the given path // Get a list of fileNames and their sizes in the container under the given path
// Although not required, an implementation can avoid traversing unwanted subfolders // Although not required, an implementation can avoid traversing unwanted subfolders
// by calling folderPathFilter(absoluteFolderPath) and checking for a false return value. // by calling folderPathFilter(absoluteFolderPath) and checking for a false return value.

View File

@ -0,0 +1,103 @@
/*
* BlobGranuleCommon.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FDBCLIENT_BLOBGRANULECOMMON_H
#define FDBCLIENT_BLOBGRANULECOMMON_H
#pragma once
#include <sstream>
#include "fdbclient/CommitTransaction.h"
#include "fdbclient/FDBTypes.h"
// file format of actual blob files
struct GranuleSnapshot : VectorRef<KeyValueRef> {
constexpr static FileIdentifier file_identifier = 1300395;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, ((VectorRef<KeyValueRef>&)*this));
}
};
struct GranuleDeltas : VectorRef<MutationsAndVersionRef> {
constexpr static FileIdentifier file_identifier = 8563013;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, ((VectorRef<MutationsAndVersionRef>&)*this));
}
};
struct BlobFilePointerRef {
constexpr static FileIdentifier file_identifier = 5253554;
StringRef filename;
int64_t offset;
int64_t length;
BlobFilePointerRef() {}
BlobFilePointerRef(Arena& to, const std::string& filename, int64_t offset, int64_t length)
: filename(to, filename), offset(offset), length(length) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, filename, offset, length);
}
std::string toString() const {
std::stringstream ss;
ss << filename.toString() << ":" << offset << ":" << length;
return std::move(ss).str();
}
};
// the assumption of this response is that the client will deserialize the files and apply the mutations themselves
// TODO could filter out delta files that don't intersect the key range being requested?
// TODO since client request passes version, we don't need to include the version of each mutation in the response if we
// pruned it there
struct BlobGranuleChunkRef {
constexpr static FileIdentifier file_identifier = 865198;
KeyRangeRef keyRange;
Version includedVersion;
Optional<BlobFilePointerRef> snapshotFile; // not set if it's an incremental read
VectorRef<BlobFilePointerRef> deltaFiles;
GranuleDeltas newDeltas;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, keyRange, includedVersion, snapshotFile, deltaFiles, newDeltas);
}
};
enum BlobGranuleSplitState { Unknown = 0, Started = 1, Assigned = 2, Done = 3 };
struct BlobGranuleHistoryValue {
constexpr static FileIdentifier file_identifier = 991434;
UID granuleID;
VectorRef<std::pair<KeyRangeRef, Version>> parentGranules;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, granuleID, parentGranules);
}
};
#endif

View File

@ -0,0 +1,551 @@
/*
* BlobGranuleReader.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <map>
#include <vector>
#include "contrib/fmt-8.0.1/include/fmt/format.h"
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
#include "fdbclient/Atomic.h"
#include "fdbclient/BlobGranuleCommon.h"
#include "fdbclient/BlobGranuleReader.actor.h"
#include "fdbclient/BlobWorkerCommon.h"
#include "fdbclient/BlobWorkerInterface.h"
#include "fdbclient/SystemData.h" // for allKeys unit test - could remove
#include "flow/UnitTest.h"
#include "flow/actorcompiler.h" // This must be the last #include.
// TODO more efficient data structure besides std::map? PTree is unecessary since this isn't versioned, but some other
// sorted thing could work. And if it used arenas it'd probably be more efficient with allocations, since everything
// else is in 1 arena and discarded at the end.
// TODO could refactor the file reading code from here and the delta file function into another actor,
// then this part would also be testable? but meh
#define BG_READ_DEBUG false
ACTOR Future<Arena> readSnapshotFile(Reference<BackupContainerFileSystem> bstore,
BlobFilePointerRef f,
KeyRangeRef keyRange,
std::map<KeyRef, ValueRef>* dataMap) {
try {
state Arena arena;
// printf("Starting read of snapshot file %s\n", filename.c_str());
state Reference<IAsyncFile> reader = wait(bstore->readFile(f.filename.toString()));
// printf("Got snapshot file size %lld\n", size);
state uint8_t* data = new (arena) uint8_t[f.length];
// printf("Reading %lld bytes from snapshot file %s\n", size, filename.c_str());
int readSize = wait(reader->read(data, f.length, f.offset));
// printf("Read %lld bytes from snapshot file %s\n", readSize, filename.c_str());
ASSERT(f.length == readSize);
// weird stuff for deserializing vector and arenas
Arena parseArena;
GranuleSnapshot snapshot;
StringRef dataRef(data, f.length);
ArenaObjectReader rdr(arena, dataRef, Unversioned());
rdr.deserialize(FileIdentifierFor<GranuleSnapshot>::value, snapshot, parseArena);
arena.dependsOn(parseArena);
// GranuleSnapshot snapshot = ObjectReader::fromStringRef<GranuleSnapshot>(dataRef, Unversioned();)
// printf("Parsed %d rows from snapshot file %s\n", snapshot.size(), filename.c_str());
// TODO REMOVE sanity check eventually
for (int i = 0; i < snapshot.size() - 1; i++) {
if (snapshot[i].key >= snapshot[i + 1].key) {
printf("BG SORT ORDER VIOLATION IN SNAPSHOT FILE: '%s', '%s'\n",
snapshot[i].key.printable().c_str(),
snapshot[i + 1].key.printable().c_str());
}
ASSERT(snapshot[i].key < snapshot[i + 1].key);
}
int i = 0;
while (i < snapshot.size() && snapshot[i].key < keyRange.begin) {
/*if (snapshot.size() < 10) { // debug
printf(" Pruning %s < %s\n", snapshot[i].key.printable().c_str(), keyRange.begin.printable().c_str());
}*/
i++;
}
while (i < snapshot.size() && snapshot[i].key < keyRange.end) {
dataMap->insert({ snapshot[i].key, snapshot[i].value });
/*if (snapshot.size() < 10) { // debug
printf(" Including %s\n", snapshot[i].key.printable().c_str());
}*/
i++;
}
/*if (snapshot.size() < 10) { // debug
while (i < snapshot.size()) {
printf(" Pruning %s >= %s\n", snapshot[i].key.printable().c_str(), keyRange.end.printable().c_str());
i++;
}
}*/
if (BG_READ_DEBUG) {
fmt::print("Started with {0} rows from snapshot file {1} after pruning to [{2} - {3})\n",
dataMap->size(),
f.toString(),
keyRange.begin.printable(),
keyRange.end.printable());
}
return arena;
} catch (Error& e) {
printf("Reading snapshot file %s got error %s\n", f.toString().c_str(), e.name());
throw e;
}
}
ACTOR Future<Standalone<GranuleDeltas>> readDeltaFile(Reference<BackupContainerFileSystem> bstore,
BlobFilePointerRef f,
KeyRangeRef keyRange,
Version readVersion) {
try {
// printf("Starting read of delta file %s\n", filename.c_str());
state Standalone<GranuleDeltas> result;
state Reference<IAsyncFile> reader = wait(bstore->readFile(f.filename.toString()));
// printf("Got delta file size %lld\n", size);
state uint8_t* data = new (result.arena()) uint8_t[f.length];
// printf("Reading %lld bytes from delta file %s into %p\n", size, filename.c_str(), data);
int readSize = wait(reader->read(data, f.length, f.offset));
// printf("Read %d bytes from delta file %s\n", readSize, filename.c_str());
ASSERT(f.length == readSize);
// Don't do range or version filtering in here since we'd have to copy/rewrite the deltas and it might starve
// snapshot read task, do it in main thread
// weirdness with vector refs and arenas here
Arena parseArena;
StringRef dataRef(data, f.length);
ArenaObjectReader rdr(result.arena(), dataRef, Unversioned());
rdr.deserialize(FileIdentifierFor<GranuleDeltas>::value, result.contents(), parseArena);
result.arena().dependsOn(parseArena);
if (BG_READ_DEBUG) {
printf("Parsed %d deltas from delta file %s\n", result.size(), f.toString().c_str());
}
// TODO REMOVE sanity check
for (int i = 0; i < result.size() - 1; i++) {
if (result[i].version > result[i + 1].version) {
fmt::print("BG VERSION ORDER VIOLATION IN DELTA FILE: '{0}', '{1}'\n",
result[i].version,
result[i + 1].version);
}
ASSERT(result[i].version <= result[i + 1].version);
}
return result;
} catch (Error& e) {
printf("Reading delta file %s got error %s\n", f.toString().c_str(), e.name());
throw e;
}
}
// TODO this giant switch is mostly lifted from storage server.
// Could refactor atomics to have a generic "handle this atomic mutation" thing instead of having to duplicate code with
// the switch statement everywhere?
static void applyDelta(std::map<KeyRef, ValueRef>* dataMap, Arena& ar, KeyRangeRef keyRange, MutationRef m) {
if (m.type == MutationRef::ClearRange) {
if (m.param2 <= keyRange.begin || m.param1 >= keyRange.end) {
return;
}
// keyRange is inclusive on start, lower_bound is inclusive with the argument, and erase is inclusive for the
// begin. So if lower bound didn't find the exact key, we need to go up one so it doesn't erase an extra key
// outside the range.
std::map<KeyRef, ValueRef>::iterator itStart = dataMap->lower_bound(m.param1);
if (itStart != dataMap->end() && itStart->first < m.param1) {
itStart++;
}
// keyRange is exclusive on end, lower bound is inclusive with the argument, and erase is exclusive for the end
// key. So if lower bound didn't find the exact key, we need to go up one so it doesn't skip the last key it
// should erase
std::map<KeyRef, ValueRef>::iterator itEnd = dataMap->lower_bound(m.param2);
if (itEnd != dataMap->end() && itEnd->first < m.param2) {
itEnd++;
}
dataMap->erase(itStart, itEnd);
} else {
if (m.param1 < keyRange.begin || m.param1 >= keyRange.end) {
return;
}
// TODO: we don't need atomics here since eager reads handles it
std::map<KeyRef, ValueRef>::iterator it = dataMap->find(m.param1);
if (m.type != MutationRef::SetValue) {
Optional<StringRef> oldVal;
if (it != dataMap->end()) {
oldVal = it->second;
}
switch (m.type) {
case MutationRef::AddValue:
m.param2 = doLittleEndianAdd(oldVal, m.param2, ar);
break;
case MutationRef::And:
m.param2 = doAnd(oldVal, m.param2, ar);
break;
case MutationRef::Or:
m.param2 = doOr(oldVal, m.param2, ar);
break;
case MutationRef::Xor:
m.param2 = doXor(oldVal, m.param2, ar);
break;
case MutationRef::AppendIfFits:
m.param2 = doAppendIfFits(oldVal, m.param2, ar);
break;
case MutationRef::Max:
m.param2 = doMax(oldVal, m.param2, ar);
break;
case MutationRef::Min:
m.param2 = doMin(oldVal, m.param2, ar);
break;
case MutationRef::ByteMin:
m.param2 = doByteMin(oldVal, m.param2, ar);
break;
case MutationRef::ByteMax:
m.param2 = doByteMax(oldVal, m.param2, ar);
break;
case MutationRef::MinV2:
m.param2 = doMinV2(oldVal, m.param2, ar);
break;
case MutationRef::AndV2:
m.param2 = doAndV2(oldVal, m.param2, ar);
break;
case MutationRef::CompareAndClear:
if (oldVal.present() && m.param2 == oldVal.get()) {
m.type = MutationRef::ClearRange;
m.param2 = keyAfter(m.param1, ar);
applyDelta(dataMap, ar, keyRange, m);
};
return;
}
}
if (it == dataMap->end()) {
dataMap->insert({ m.param1, m.param2 });
} else {
it->second = m.param2;
}
}
}
// TODO might want to change this to an actor so it can yield periodically?
static void applyDeltas(std::map<KeyRef, ValueRef>* dataMap,
Arena& arena,
GranuleDeltas deltas,
KeyRangeRef keyRange,
Version readVersion,
Version* lastFileEndVersion) {
if (!deltas.empty()) {
// check that consecutive delta file versions are disjoint
ASSERT(*lastFileEndVersion < deltas.front().version);
}
for (MutationsAndVersionRef& delta : deltas) {
if (delta.version > readVersion) {
*lastFileEndVersion = readVersion;
return;
}
for (auto& m : delta.mutations) {
applyDelta(dataMap, arena, keyRange, m);
}
}
if (!deltas.empty()) {
*lastFileEndVersion = deltas.back().version;
}
}
// TODO: improve the interface of this function so that it doesn't need
// to be passed the entire BlobWorkerStats object
ACTOR Future<RangeResult> readBlobGranule(BlobGranuleChunkRef chunk,
KeyRangeRef keyRange,
Version readVersion,
Reference<BackupContainerFileSystem> bstore,
Optional<BlobWorkerStats*> stats) {
// TODO REMOVE with V2 of protocol
ASSERT(readVersion == chunk.includedVersion);
// Arena to hold all allocations for applying deltas. Most of it, and the arenas produced by reading the files,
// will likely be tossed if there are a significant number of mutations, so we copy at the end instead of doing a
// dependsOn.
// FIXME: probably some threshold of a small percentage of the data is actually changed, where it makes sense to
// just to dependsOn instead of copy, to use a little extra memory footprint to help cpu?
state Arena arena;
try {
state std::map<KeyRef, ValueRef> dataMap;
state Version lastFileEndVersion = invalidVersion;
Future<Arena> readSnapshotFuture;
if (chunk.snapshotFile.present()) {
readSnapshotFuture = readSnapshotFile(bstore, chunk.snapshotFile.get(), keyRange, &dataMap);
if (stats.present()) {
++stats.get()->s3GetReqs;
}
} else {
readSnapshotFuture = Future<Arena>(Arena());
}
state std::vector<Future<Standalone<GranuleDeltas>>> readDeltaFutures;
readDeltaFutures.reserve(chunk.deltaFiles.size());
for (BlobFilePointerRef deltaFile : chunk.deltaFiles) {
readDeltaFutures.push_back(readDeltaFile(bstore, deltaFile, keyRange, readVersion));
if (stats.present()) {
++stats.get()->s3GetReqs;
}
}
Arena snapshotArena = wait(readSnapshotFuture);
arena.dependsOn(snapshotArena);
if (BG_READ_DEBUG) {
fmt::print("Applying {} delta files\n", readDeltaFutures.size());
}
for (Future<Standalone<GranuleDeltas>> deltaFuture : readDeltaFutures) {
Standalone<GranuleDeltas> result = wait(deltaFuture);
arena.dependsOn(result.arena());
applyDeltas(&dataMap, arena, result, keyRange, readVersion, &lastFileEndVersion);
wait(yield());
}
if (BG_READ_DEBUG) {
printf("Applying %d memory deltas\n", chunk.newDeltas.size());
}
applyDeltas(&dataMap, arena, chunk.newDeltas, keyRange, readVersion, &lastFileEndVersion);
wait(yield());
RangeResult ret;
for (auto& it : dataMap) {
ret.push_back_deep(ret.arena(), KeyValueRef(it.first, it.second));
// TODO for large reads, probably wait to yield periodically here for SlowTask
}
return ret;
} catch (Error& e) {
printf("Reading blob granule got error %s\n", e.name());
throw e;
}
}
// TODO probably should add things like limit/bytelimit at some point?
ACTOR Future<Void> readBlobGranules(BlobGranuleFileRequest request,
BlobGranuleFileReply reply,
Reference<BackupContainerFileSystem> bstore,
PromiseStream<RangeResult> results) {
// TODO for large amount of chunks, this should probably have some sort of buffer limit like ReplyPromiseStream.
// Maybe just use ReplyPromiseStream instead of PromiseStream?
try {
state int i;
for (i = 0; i < reply.chunks.size(); i++) {
/*printf("ReadBlobGranules processing chunk %d [%s - %s)\n",
i,
reply.chunks[i].keyRange.begin.printable().c_str(),
reply.chunks[i].keyRange.end.printable().c_str());*/
RangeResult chunkResult =
wait(readBlobGranule(reply.chunks[i], request.keyRange, request.readVersion, bstore));
results.send(std::move(chunkResult));
}
// printf("ReadBlobGranules done, sending EOS\n");
results.sendError(end_of_stream());
} catch (Error& e) {
printf("ReadBlobGranules got error %s\n", e.name());
results.sendError(e);
}
return Void();
}
TEST_CASE("/blobgranule/reader/applyDelta") {
printf("Testing blob granule deltas\n");
Arena a;
// do this 2 phase arena creation of string refs instead of LiteralStringRef because there is no char* StringRef
// constructor, and valgrind might complain if the stringref data isn't in the arena
std::string sk_a = "A";
std::string sk_ab = "AB";
std::string sk_b = "B";
std::string sk_c = "C";
std::string sk_z = "Z";
std::string sval1 = "1";
std::string sval2 = "2";
StringRef k_a = StringRef(a, sk_a);
StringRef k_ab = StringRef(a, sk_ab);
StringRef k_b = StringRef(a, sk_b);
StringRef k_c = StringRef(a, sk_c);
StringRef k_z = StringRef(a, sk_z);
StringRef val1 = StringRef(a, sval1);
StringRef val2 = StringRef(a, sval2);
std::map<KeyRef, ValueRef> data;
data.insert({ k_a, val1 });
data.insert({ k_ab, val1 });
data.insert({ k_b, val1 });
std::map<KeyRef, ValueRef> correctData = data;
std::map<KeyRef, ValueRef> originalData = data;
ASSERT(data == correctData);
// test all clear permutations
MutationRef mClearEverything(MutationRef::ClearRange, allKeys.begin, allKeys.end);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mClearEverything);
correctData.clear();
ASSERT(data == correctData);
MutationRef mClearEverything2(MutationRef::ClearRange, allKeys.begin, k_c);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mClearEverything2);
correctData.clear();
ASSERT(data == correctData);
MutationRef mClearEverything3(MutationRef::ClearRange, k_a, allKeys.end);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mClearEverything3);
correctData.clear();
ASSERT(data == correctData);
MutationRef mClearEverything4(MutationRef::ClearRange, k_a, k_c);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mClearEverything4);
correctData.clear();
ASSERT(data == correctData);
MutationRef mClearFirst(MutationRef::ClearRange, k_a, k_ab);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mClearFirst);
correctData.erase(k_a);
ASSERT(data == correctData);
MutationRef mClearSecond(MutationRef::ClearRange, k_ab, k_b);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mClearSecond);
correctData.erase(k_ab);
ASSERT(data == correctData);
MutationRef mClearThird(MutationRef::ClearRange, k_b, k_c);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mClearThird);
correctData.erase(k_b);
ASSERT(data == correctData);
MutationRef mClearFirst2(MutationRef::ClearRange, k_a, k_b);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mClearFirst2);
correctData.erase(k_a);
correctData.erase(k_ab);
ASSERT(data == correctData);
MutationRef mClearLast2(MutationRef::ClearRange, k_ab, k_c);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mClearLast2);
correctData.erase(k_ab);
correctData.erase(k_b);
ASSERT(data == correctData);
// test set data
MutationRef mSetA(MutationRef::SetValue, k_a, val2);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mSetA);
correctData[k_a] = val2;
ASSERT(data == correctData);
MutationRef mSetAB(MutationRef::SetValue, k_ab, val2);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mSetAB);
correctData[k_ab] = val2;
ASSERT(data == correctData);
MutationRef mSetB(MutationRef::SetValue, k_b, val2);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mSetB);
correctData[k_b] = val2;
ASSERT(data == correctData);
MutationRef mSetC(MutationRef::SetValue, k_c, val2);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mSetC);
correctData[k_c] = val2;
ASSERT(data == correctData);
// test pruning deltas that are outside of the key range
MutationRef mSetZ(MutationRef::SetValue, k_z, val2);
data = originalData;
applyDelta(&data, a, KeyRangeRef(k_a, k_c), mSetZ);
ASSERT(data == originalData);
applyDelta(&data, a, KeyRangeRef(k_ab, k_c), mSetA);
ASSERT(data == originalData);
applyDelta(&data, a, KeyRangeRef(k_ab, k_c), mClearFirst);
ASSERT(data == originalData);
applyDelta(&data, a, KeyRangeRef(k_a, k_ab), mClearThird);
ASSERT(data == originalData);
// Could test all other atomic ops, but if set, max, and compare+clear works, and the others all just directly call
// the atomics, there is little to test
MutationRef mCAndC1(MutationRef::CompareAndClear, k_a, val1);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mCAndC1);
correctData.erase(k_a);
ASSERT(data == correctData);
MutationRef mCAndC2(MutationRef::CompareAndClear, k_a, val2);
data = originalData;
applyDelta(&data, a, allKeys, mCAndC2);
ASSERT(data == originalData);
MutationRef mCAndCZ(MutationRef::CompareAndClear, k_z, val2);
data = originalData;
applyDelta(&data, a, allKeys, mCAndCZ);
ASSERT(data == originalData);
MutationRef mMaxA(MutationRef::ByteMax, k_a, val2);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mMaxA);
correctData[k_a] = val2;
ASSERT(data == correctData);
MutationRef mMaxC(MutationRef::ByteMax, k_c, val2);
data = originalData;
correctData = originalData;
applyDelta(&data, a, allKeys, mMaxC);
correctData[k_c] = val2;
ASSERT(data == correctData);
return Void();
}

View File

@ -0,0 +1,51 @@
/*
* BlobGranuleReader.actor.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source
// version.
#if defined(NO_INTELLISENSE) && !defined(BLOB_GRANULE_READER_CLIENT_G_H)
#define BLOB_GRANULE_READER_CLIENT_G_H
#include "fdbclient/BlobGranuleReader.actor.g.h"
#elif !defined(BLOB_GRANULE_READER_CLIENT_H)
#define BLOB_GRANULE_READER_CLIENT_H
#include "fdbclient/BlobWorkerInterface.h"
#include "fdbclient/BackupContainerFileSystem.h"
#include "fdbclient/BlobWorkerCommon.h"
#include "flow/actorcompiler.h" // This must be the last #include.
// Reads the fileset in the reply using the provided blob store, and filters data and mutations by key + version from
// the request
ACTOR Future<RangeResult> readBlobGranule(BlobGranuleChunkRef chunk,
KeyRangeRef keyRange,
Version readVersion,
Reference<BackupContainerFileSystem> bstore,
Optional<BlobWorkerStats*> stats = Optional<BlobWorkerStats*>());
ACTOR Future<Void> readBlobGranules(BlobGranuleFileRequest request,
BlobGranuleFileReply reply,
Reference<BackupContainerFileSystem> bstore,
PromiseStream<RangeResult> results);
#include "flow/unactorcompiler.h"
#endif

View File

@ -0,0 +1,69 @@
/*
* BlobWorkerCommon.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FDBCLIENT_BLOBWORKERCOMMON_H
#define FDBCLIENT_BLOBWORKERCOMMON_H
#include "fdbrpc/Stats.h"
struct BlobWorkerStats {
CounterCollection cc;
Counter s3PutReqs, s3GetReqs, s3DeleteReqs;
Counter deltaFilesWritten, snapshotFilesWritten;
Counter deltaBytesWritten, snapshotBytesWritten;
Counter bytesReadFromFDBForInitialSnapshot;
Counter bytesReadFromS3ForCompaction;
Counter rangeAssignmentRequests, readRequests;
Counter wrongShardServer;
Counter changeFeedInputBytes;
Counter readReqTotalFilesReturned;
Counter readReqDeltaBytesReturned;
Counter commitVersionChecks;
Counter granuleUpdateErrors;
int numRangesAssigned;
int mutationBytesBuffered;
int activeReadRequests;
Future<Void> logger;
// Current stats maintained for a given blob worker process
explicit BlobWorkerStats(UID id, double interval)
: cc("BlobWorkerStats", id.toString()),
s3PutReqs("S3PutReqs", cc), s3GetReqs("S3GetReqs", cc), s3DeleteReqs("S3DeleteReqs", cc),
deltaFilesWritten("DeltaFilesWritten", cc), snapshotFilesWritten("SnapshotFilesWritten", cc),
deltaBytesWritten("DeltaBytesWritten", cc), snapshotBytesWritten("SnapshotBytesWritten", cc),
bytesReadFromFDBForInitialSnapshot("BytesReadFromFDBForInitialSnapshot", cc),
bytesReadFromS3ForCompaction("BytesReadFromS3ForCompaction", cc),
rangeAssignmentRequests("RangeAssignmentRequests", cc), readRequests("ReadRequests", cc),
wrongShardServer("WrongShardServer", cc), changeFeedInputBytes("RangeFeedInputBytes", cc),
readReqTotalFilesReturned("ReadReqTotalFilesReturned", cc),
readReqDeltaBytesReturned("ReadReqDeltaBytesReturned", cc), commitVersionChecks("CommitVersionChecks", cc),
granuleUpdateErrors("GranuleUpdateErrors", cc), numRangesAssigned(0), mutationBytesBuffered(0) {
specialCounter(cc, "NumRangesAssigned", [this]() { return this->numRangesAssigned; });
specialCounter(cc, "MutationBytesBuffered", [this]() { return this->mutationBytesBuffered; });
specialCounter(cc, "ActiveReadRequests", [this]() { return this->activeReadRequests; });
logger = traceCounters("BlobWorkerMetrics", id, interval, &cc, "BlobWorkerMetrics");
}
};
#endif

View File

@ -0,0 +1,223 @@
/*
* BlobWorkerInterface.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FDBCLIENT_BLOBWORKERINTERFACE_H
#define FDBCLIENT_BLOBWORKERINTERFACE_H
#pragma once
#include "fdbclient/BlobGranuleCommon.h"
#include "fdbclient/FDBTypes.h"
#include "fdbrpc/fdbrpc.h"
#include "fdbrpc/Locality.h"
struct BlobWorkerInterface {
constexpr static FileIdentifier file_identifier = 8358753;
// TODO: mimic what StorageServerInterface does with sequential endpoint IDs
RequestStream<ReplyPromise<Void>> waitFailure;
RequestStream<struct BlobGranuleFileRequest> blobGranuleFileRequest;
RequestStream<struct AssignBlobRangeRequest> assignBlobRangeRequest;
RequestStream<struct RevokeBlobRangeRequest> revokeBlobRangeRequest;
RequestStream<struct GranuleStatusStreamRequest> granuleStatusStreamRequest;
RequestStream<struct HaltBlobWorkerRequest> haltBlobWorker;
struct LocalityData locality;
UID myId;
BlobWorkerInterface() {}
explicit BlobWorkerInterface(const struct LocalityData& l, UID id) : locality(l), myId(id) {}
void initEndpoints() {}
UID id() const { return myId; }
NetworkAddress address() const { return blobGranuleFileRequest.getEndpoint().getPrimaryAddress(); }
NetworkAddress stableAddress() const { return blobGranuleFileRequest.getEndpoint().getStableAddress(); }
bool operator==(const BlobWorkerInterface& r) const { return id() == r.id(); }
bool operator!=(const BlobWorkerInterface& r) const { return !(*this == r); }
std::string toString() const { return id().shortString(); }
template <class Archive>
void serialize(Archive& ar) {
serializer(ar,
waitFailure,
blobGranuleFileRequest,
assignBlobRangeRequest,
revokeBlobRangeRequest,
granuleStatusStreamRequest,
haltBlobWorker,
locality,
myId);
}
};
struct BlobGranuleFileReply {
constexpr static FileIdentifier file_identifier = 6858612;
Arena arena;
VectorRef<BlobGranuleChunkRef> chunks;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, chunks, arena);
}
};
// TODO could do a reply promise stream of file mutations to bound memory requirements?
// Have to load whole snapshot file into memory though so it doesn't actually matter too much
struct BlobGranuleFileRequest {
constexpr static FileIdentifier file_identifier = 4150141;
Arena arena;
KeyRangeRef keyRange;
Version beginVersion = 0;
Version readVersion;
ReplyPromise<BlobGranuleFileReply> reply;
BlobGranuleFileRequest() {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, keyRange, beginVersion, readVersion, reply, arena);
}
};
struct AssignBlobRangeReply {
constexpr static FileIdentifier file_identifier = 6431923;
bool epochOk; // false if the worker has seen a new manager
AssignBlobRangeReply() {}
explicit AssignBlobRangeReply(bool epochOk) : epochOk(epochOk) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, epochOk);
}
};
struct RevokeBlobRangeRequest {
constexpr static FileIdentifier file_identifier = 4844288;
Arena arena;
KeyRangeRef keyRange;
int64_t managerEpoch;
int64_t managerSeqno;
bool dispose;
ReplyPromise<AssignBlobRangeReply> reply;
RevokeBlobRangeRequest() {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, keyRange, managerEpoch, managerSeqno, dispose, reply, arena);
}
};
struct AssignBlobRangeRequest {
constexpr static FileIdentifier file_identifier = 905381;
Arena arena;
KeyRangeRef keyRange;
int64_t managerEpoch;
int64_t managerSeqno;
// If continueAssignment is true, this is just to instruct the worker that it *still* owns the range, so it should
// re-snapshot it and continue.
// For an initial assignment, reassignent, split, or merge, continueAssignment==false.
bool continueAssignment;
ReplyPromise<AssignBlobRangeReply> reply;
AssignBlobRangeRequest() {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, keyRange, managerEpoch, managerSeqno, continueAssignment, reply, arena);
}
};
// reply per granule
// TODO: could eventually add other types of metrics to report back to the manager here
struct GranuleStatusReply : public ReplyPromiseStreamReply {
constexpr static FileIdentifier file_identifier = 7563104;
KeyRange granuleRange;
bool doSplit;
int64_t epoch;
int64_t seqno;
UID granuleID;
Version startVersion;
Version latestVersion;
GranuleStatusReply() {}
explicit GranuleStatusReply(KeyRange range,
bool doSplit,
int64_t epoch,
int64_t seqno,
UID granuleID,
Version startVersion,
Version latestVersion)
: granuleRange(range), doSplit(doSplit), epoch(epoch), seqno(seqno), granuleID(granuleID),
startVersion(startVersion), latestVersion(latestVersion) {}
int expectedSize() const { return sizeof(GranuleStatusReply) + granuleRange.expectedSize(); }
template <class Ar>
void serialize(Ar& ar) {
serializer(ar,
ReplyPromiseStreamReply::acknowledgeToken,
ReplyPromiseStreamReply::sequence,
granuleRange,
doSplit,
epoch,
seqno,
granuleID,
startVersion,
latestVersion);
}
};
// manager makes one request per worker, it sends all range updates through this stream
struct GranuleStatusStreamRequest {
constexpr static FileIdentifier file_identifier = 2289677;
int64_t managerEpoch;
ReplyPromiseStream<GranuleStatusReply> reply;
GranuleStatusStreamRequest() {}
explicit GranuleStatusStreamRequest(int64_t managerEpoch) : managerEpoch(managerEpoch) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, managerEpoch, reply);
}
};
struct HaltBlobWorkerRequest {
constexpr static FileIdentifier file_identifier = 1985879;
UID requesterID;
ReplyPromise<Void> reply;
int64_t managerEpoch;
HaltBlobWorkerRequest() {}
explicit HaltBlobWorkerRequest(int64_t managerEpoch, UID uid) : requesterID(uid), managerEpoch(managerEpoch) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, managerEpoch, requesterID, reply);
}
};
#endif

View File

@ -20,12 +20,24 @@ set(FDBCLIENT_SRCS
BackupContainerS3BlobStore.h BackupContainerS3BlobStore.h
ClientBooleanParams.cpp ClientBooleanParams.cpp
ClientBooleanParams.h ClientBooleanParams.h
BlobWorkerInterface.h
BlobGranuleReader.actor.cpp
BlobGranuleReader.actor.h
BlobGranuleCommon.h
BlobWorkerCommon.h
ClientKnobCollection.cpp ClientKnobCollection.cpp
ClientKnobCollection.h ClientKnobCollection.h
ClientKnobs.cpp ClientKnobs.cpp
ClientKnobs.h ClientKnobs.h
ClientLogEvents.h ClientLogEvents.h
ClientVersion.h
ClientWorkerInterface.h ClientWorkerInterface.h
ClusterConnectionFile.actor.cpp
ClusterConnectionFile.h
ClusterConnectionKey.actor.cpp
ClusterConnectionKey.actor.h
ClusterConnectionMemoryRecord.actor.cpp
ClusterConnectionMemoryRecord.h
ClusterInterface.h ClusterInterface.h
CommitProxyInterface.h CommitProxyInterface.h
CommitTransaction.h CommitTransaction.h
@ -62,11 +74,15 @@ set(FDBCLIENT_SRCS
Knobs.h Knobs.h
IKnobCollection.cpp IKnobCollection.cpp
IKnobCollection.h IKnobCollection.h
LocalClientAPI.cpp
LocalClientAPI.h
ManagementAPI.actor.cpp ManagementAPI.actor.cpp
ManagementAPI.actor.h ManagementAPI.actor.h
MonitorLeader.actor.cpp MonitorLeader.actor.cpp
MonitorLeader.h MonitorLeader.h
MultiVersionAssignmentVars.h MultiVersionAssignmentVars.h
ClientLibManagement.actor.cpp
ClientLibManagement.actor.h
MultiVersionTransaction.actor.cpp MultiVersionTransaction.actor.cpp
MultiVersionTransaction.h MultiVersionTransaction.h
MutationList.h MutationList.h

View File

@ -75,6 +75,9 @@ void ClientKnobs::initialize(Randomize randomize) {
init( VALUE_SIZE_LIMIT, 1e5 ); init( VALUE_SIZE_LIMIT, 1e5 );
init( SPLIT_KEY_SIZE_LIMIT, KEY_SIZE_LIMIT/2 ); if( randomize && BUGGIFY ) SPLIT_KEY_SIZE_LIMIT = KEY_SIZE_LIMIT - 31;//serverKeysPrefixFor(UID()).size() - 1; init( SPLIT_KEY_SIZE_LIMIT, KEY_SIZE_LIMIT/2 ); if( randomize && BUGGIFY ) SPLIT_KEY_SIZE_LIMIT = KEY_SIZE_LIMIT - 31;//serverKeysPrefixFor(UID()).size() - 1;
init( METADATA_VERSION_CACHE_SIZE, 1000 ); init( METADATA_VERSION_CACHE_SIZE, 1000 );
init( CHANGE_FEED_LOCATION_LIMIT, 10000 );
init( CHANGE_FEED_CACHE_SIZE, 100000 ); if( randomize && BUGGIFY ) CHANGE_FEED_CACHE_SIZE = 1;
init( CHANGE_FEED_POP_TIMEOUT, 5.0 );
init( MAX_BATCH_SIZE, 1000 ); if( randomize && BUGGIFY ) MAX_BATCH_SIZE = 1; init( MAX_BATCH_SIZE, 1000 ); if( randomize && BUGGIFY ) MAX_BATCH_SIZE = 1;
init( GRV_BATCH_TIMEOUT, 0.005 ); if( randomize && BUGGIFY ) GRV_BATCH_TIMEOUT = 0.1; init( GRV_BATCH_TIMEOUT, 0.005 ); if( randomize && BUGGIFY ) GRV_BATCH_TIMEOUT = 0.1;
@ -97,6 +100,7 @@ void ClientKnobs::initialize(Randomize randomize) {
init( RANGESTREAM_FRAGMENT_SIZE, 1e6 ); init( RANGESTREAM_FRAGMENT_SIZE, 1e6 );
init( RANGESTREAM_BUFFERED_FRAGMENTS_LIMIT, 20 ); init( RANGESTREAM_BUFFERED_FRAGMENTS_LIMIT, 20 );
init( QUARANTINE_TSS_ON_MISMATCH, true ); if( randomize && BUGGIFY ) QUARANTINE_TSS_ON_MISMATCH = false; // if true, a tss mismatch will put the offending tss in quarantine. If false, it will just be killed init( QUARANTINE_TSS_ON_MISMATCH, true ); if( randomize && BUGGIFY ) QUARANTINE_TSS_ON_MISMATCH = false; // if true, a tss mismatch will put the offending tss in quarantine. If false, it will just be killed
init( CHANGE_FEED_EMPTY_BATCH_TIME, 0.005 );
//KeyRangeMap //KeyRangeMap
init( KRM_GET_RANGE_LIMIT, 1e5 ); if( randomize && BUGGIFY ) KRM_GET_RANGE_LIMIT = 10; init( KRM_GET_RANGE_LIMIT, 1e5 ); if( randomize && BUGGIFY ) KRM_GET_RANGE_LIMIT = 10;
@ -257,6 +261,13 @@ void ClientKnobs::initialize(Randomize randomize) {
init( BUSYNESS_SPIKE_START_THRESHOLD, 0.100 ); init( BUSYNESS_SPIKE_START_THRESHOLD, 0.100 );
init( BUSYNESS_SPIKE_SATURATED_THRESHOLD, 0.500 ); init( BUSYNESS_SPIKE_SATURATED_THRESHOLD, 0.500 );
// multi-version client control
init( MVC_CLIENTLIB_CHUNK_SIZE, 8*1024 );
init( MVC_CLIENTLIB_CHUNKS_PER_TRANSACTION, 32 );
// blob granules
init( ENABLE_BLOB_GRANULES, false );
// clang-format on // clang-format on
} }

View File

@ -74,6 +74,9 @@ public:
int64_t VALUE_SIZE_LIMIT; int64_t VALUE_SIZE_LIMIT;
int64_t SPLIT_KEY_SIZE_LIMIT; int64_t SPLIT_KEY_SIZE_LIMIT;
int METADATA_VERSION_CACHE_SIZE; int METADATA_VERSION_CACHE_SIZE;
int64_t CHANGE_FEED_LOCATION_LIMIT;
int64_t CHANGE_FEED_CACHE_SIZE;
double CHANGE_FEED_POP_TIMEOUT;
int MAX_BATCH_SIZE; int MAX_BATCH_SIZE;
double GRV_BATCH_TIMEOUT; double GRV_BATCH_TIMEOUT;
@ -97,6 +100,7 @@ public:
int64_t RANGESTREAM_FRAGMENT_SIZE; int64_t RANGESTREAM_FRAGMENT_SIZE;
int RANGESTREAM_BUFFERED_FRAGMENTS_LIMIT; int RANGESTREAM_BUFFERED_FRAGMENTS_LIMIT;
bool QUARANTINE_TSS_ON_MISMATCH; bool QUARANTINE_TSS_ON_MISMATCH;
double CHANGE_FEED_EMPTY_BATCH_TIME;
// KeyRangeMap // KeyRangeMap
int KRM_GET_RANGE_LIMIT; int KRM_GET_RANGE_LIMIT;
@ -249,6 +253,13 @@ public:
double BUSYNESS_SPIKE_START_THRESHOLD; double BUSYNESS_SPIKE_START_THRESHOLD;
double BUSYNESS_SPIKE_SATURATED_THRESHOLD; double BUSYNESS_SPIKE_SATURATED_THRESHOLD;
// multi-version client control
int MVC_CLIENTLIB_CHUNK_SIZE;
int MVC_CLIENTLIB_CHUNKS_PER_TRANSACTION;
// blob granules
bool ENABLE_BLOB_GRANULES;
ClientKnobs(Randomize randomize); ClientKnobs(Randomize randomize);
void initialize(Randomize randomize); void initialize(Randomize randomize);
}; };

View File

@ -0,0 +1,801 @@
/*
* ClientLibManagement.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbclient/ClientLibManagement.actor.h"
#include "fdbclient/Schemas.h"
#include "fdbclient/NativeAPI.actor.h"
#include "fdbclient/ManagementAPI.actor.h"
#include "fdbclient/ClientKnobs.h"
#include "fdbclient/SystemData.h"
#include "fdbclient/versions.h"
#include "fdbrpc/IAsyncFile.h"
#include "flow/Platform.h"
#include <algorithm>
#include <string>
#include <stdio.h>
#include "flow/Trace.h"
#include "flow/actorcompiler.h" // This must be the last #include.
namespace ClientLibManagement {
struct ClientLibBinaryInfo {
size_t totalBytes = 0;
size_t chunkCnt = 0;
size_t chunkSize = 0;
Standalone<StringRef> sumBytes;
};
#define ASSERT_INDEX_IN_RANGE(idx, arr) ASSERT(idx >= 0 && idx < sizeof(arr) / sizeof(arr[0]))
const std::string& getStatusName(ClientLibStatus status) {
static const std::string statusNames[] = { "disabled", "uploading", "download", "active" };
int idx = static_cast<int>(status);
ASSERT_INDEX_IN_RANGE(idx, statusNames);
return statusNames[idx];
}
ClientLibStatus getStatusByName(std::string_view statusName) {
static std::map<std::string_view, ClientLibStatus> statusByName;
// initialize the map on demand
if (statusByName.empty()) {
for (int i = 0; i < static_cast<int>(ClientLibStatus::COUNT); i++) {
ClientLibStatus status = static_cast<ClientLibStatus>(i);
statusByName[getStatusName(status)] = status;
}
}
auto statusIter = statusByName.find(statusName);
if (statusIter == statusByName.cend()) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Error", format("Unknown status value %s", std::string(statusName).c_str()));
throw client_lib_invalid_metadata();
}
return statusIter->second;
}
const std::string& getPlatformName(ClientLibPlatform platform) {
static const std::string platformNames[] = { "unknown", "x84_64-linux", "x86_64-windows", "x86_64-macos" };
int idx = static_cast<int>(platform);
ASSERT_INDEX_IN_RANGE(idx, platformNames);
return platformNames[idx];
}
ClientLibPlatform getPlatformByName(std::string_view platformName) {
static std::map<std::string_view, ClientLibPlatform> platformByName;
// initialize the map on demand
if (platformByName.empty()) {
for (int i = 0; i < static_cast<int>(ClientLibPlatform::COUNT); i++) {
ClientLibPlatform platform = static_cast<ClientLibPlatform>(i);
platformByName[getPlatformName(platform)] = platform;
}
}
auto platfIter = platformByName.find(platformName);
if (platfIter == platformByName.cend()) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Error", format("Unknown platform value %s", std::string(platformName).c_str()));
throw client_lib_invalid_metadata();
}
return platfIter->second;
}
const std::string& getChecksumAlgName(ClientLibChecksumAlg checksumAlg) {
static const std::string checksumAlgNames[] = { "md5" };
int idx = static_cast<int>(checksumAlg);
ASSERT_INDEX_IN_RANGE(idx, checksumAlgNames);
return checksumAlgNames[idx];
}
ClientLibChecksumAlg getChecksumAlgByName(std::string_view checksumAlgName) {
static std::map<std::string_view, ClientLibChecksumAlg> checksumAlgByName;
// initialize the map on demand
if (checksumAlgByName.empty()) {
for (int i = 0; i < (int)ClientLibChecksumAlg::COUNT; i++) {
ClientLibChecksumAlg checksumAlg = static_cast<ClientLibChecksumAlg>(i);
checksumAlgByName[getChecksumAlgName(checksumAlg)] = checksumAlg;
}
}
auto iter = checksumAlgByName.find(checksumAlgName);
if (iter == checksumAlgByName.cend()) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Error", format("Unknown checksum algorithm %s", std::string(checksumAlgName).c_str()));
throw client_lib_invalid_metadata();
}
return iter->second;
}
namespace {
bool isValidTargetStatus(ClientLibStatus status) {
return status == ClientLibStatus::DISABLED || status == ClientLibStatus::DOWNLOAD ||
status == ClientLibStatus::ACTIVE;
}
bool isAvailableForDownload(ClientLibStatus status) {
return status == ClientLibStatus::DOWNLOAD || status == ClientLibStatus::ACTIVE;
}
void updateClientLibChangeCounter(Transaction& tr, ClientLibStatus prevStatus, ClientLibStatus newStatus) {
static const int64_t counterIncVal = 1;
if ((prevStatus != newStatus) &&
(newStatus == ClientLibStatus::DOWNLOAD || newStatus == ClientLibStatus::ACTIVE ||
prevStatus == ClientLibStatus::DOWNLOAD || prevStatus == ClientLibStatus::ACTIVE)) {
tr.atomicOp(clientLibChangeCounterKey,
StringRef(reinterpret_cast<const uint8_t*>(&counterIncVal), sizeof(counterIncVal)),
MutationRef::AddValue);
}
}
json_spirit::mObject parseMetadataJson(StringRef metadataString) {
json_spirit::mValue parsedMetadata;
if (!json_spirit::read_string(metadataString.toString(), parsedMetadata) ||
parsedMetadata.type() != json_spirit::obj_type) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Reason", "InvalidJSON")
.detail("Configuration", metadataString);
throw client_lib_invalid_metadata();
}
return parsedMetadata.get_obj();
}
const std::string& getMetadataStrAttr(const json_spirit::mObject& metadataJson, const std::string& attrName) {
auto attrIter = metadataJson.find(attrName);
if (attrIter == metadataJson.cend() || attrIter->second.type() != json_spirit::str_type) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Error", format("Missing attribute %s", attrName.c_str()));
throw client_lib_invalid_metadata();
}
return attrIter->second.get_str();
}
int getMetadataIntAttr(const json_spirit::mObject& metadataJson, const std::string& attrName) {
auto attrIter = metadataJson.find(attrName);
if (attrIter == metadataJson.cend() || attrIter->second.type() != json_spirit::int_type) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Error", format("Missing attribute %s", attrName.c_str()));
throw client_lib_invalid_metadata();
}
return attrIter->second.get_int();
}
bool validVersionPartNum(int num) {
return (num >= 0 && num < 1000);
}
int getNumericVersionEncoding(const std::string& versionStr) {
int major, minor, patch;
int charsScanned;
int numScanned = sscanf(versionStr.c_str(), "%d.%d.%d%n", &major, &minor, &patch, &charsScanned);
if (numScanned != 3 || !validVersionPartNum(major) || !validVersionPartNum(minor) || !validVersionPartNum(patch) ||
charsScanned != versionStr.size()) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Error", format("Invalid version string %s", versionStr.c_str()));
throw client_lib_invalid_metadata();
}
return ((major * 1000) + minor) * 1000 + patch;
}
Standalone<StringRef> getIdFromMetadataJson(const json_spirit::mObject& metadataJson) {
std::ostringstream libIdBuilder;
libIdBuilder << getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_PLATFORM) << "/";
libIdBuilder << format("%09d", getNumericVersionEncoding(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_VERSION)))
<< "/";
libIdBuilder << getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_TYPE) << "/";
libIdBuilder << getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_CHECKSUM);
return Standalone<StringRef>(libIdBuilder.str());
}
Key metadataKeyFromId(StringRef clientLibId) {
return clientLibId.withPrefix(clientLibMetadataPrefix);
}
Key chunkKeyPrefixFromId(StringRef clientLibId) {
return clientLibId.withPrefix(clientLibBinaryPrefix).withSuffix(LiteralStringRef("/"));
}
KeyRef chunkKeyFromNo(StringRef clientLibBinPrefix, size_t chunkNo, Arena& arena) {
return clientLibBinPrefix.withSuffix(format("%06zu", chunkNo), arena);
}
[[maybe_unused]] ClientLibPlatform getCurrentClientPlatform() {
#ifdef __x86_64__
#if defined(_WIN32)
return ClientLibPlatform::X86_64_WINDOWS;
#elif defined(__linux__)
return ClientLibPlatform::X86_64_LINUX;
#elif defined(__FreeBSD__) || defined(__APPLE__)
return ClientLibPlatform::X86_64_MACOS;
#else
return ClientLibPlatform::UNKNOWN;
#endif
#else // not __x86_64__
return ClientLibPlatform::UNKNOWN;
#endif
}
Standalone<StringRef> byteArrayToHexString(StringRef input) {
static const char* digits = "0123456789abcdef";
Standalone<StringRef> output = makeString(input.size() * 2);
char* pout = reinterpret_cast<char*>(mutateString(output));
for (const uint8_t* pin = input.begin(); pin != input.end(); ++pin) {
*pout++ = digits[(*pin >> 4) & 0xF];
*pout++ = digits[(*pin) & 0xF];
}
return output;
}
} // namespace
Standalone<StringRef> md5SumToHexString(MD5_CTX& sum) {
Standalone<StringRef> sumBytes = makeString(16);
::MD5_Final(mutateString(sumBytes), &sum);
return byteArrayToHexString(sumBytes);
}
ClientLibFilter& ClientLibFilter::filterNewerPackageVersion(const std::string& versionStr) {
matchNewerPackageVersion = true;
this->numericPkgVersion = getNumericVersionEncoding(versionStr);
return *this;
}
Standalone<StringRef> getClientLibIdFromMetadataJson(StringRef metadataString) {
json_spirit::mObject parsedMetadata = parseMetadataJson(metadataString);
return getIdFromMetadataJson(parsedMetadata);
}
namespace {
ACTOR Future<Void> uploadClientLibBinary(Database db,
StringRef libFilePath,
KeyRef chunkKeyPrefix,
ClientLibBinaryInfo* binInfo) {
state int chunkSize = getAlignedUpperBound(CLIENT_KNOBS->MVC_CLIENTLIB_CHUNK_SIZE, 1024);
state int transactionSize = std::max(CLIENT_KNOBS->MVC_CLIENTLIB_CHUNKS_PER_TRANSACTION, 1) * chunkSize;
state size_t fileOffset = 0;
state size_t chunkNo = 0;
state MD5_CTX sum;
state Arena arena;
state StringRef buf;
state Transaction tr;
state size_t firstChunkNo;
// Disabling AIO, because it currently supports only page-aligned writes, but the size of a client library
// is not necessariliy page-aligned, need to investigate if it is a limitation of AIO or just the way
// we are wrapping it
state Reference<IAsyncFile> fClientLib = wait(IAsyncFileSystem::filesystem()->open(
libFilePath.toString(), IAsyncFile::OPEN_READONLY | IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_NO_AIO, 0));
::MD5_Init(&sum);
loop {
arena = Arena();
// Use page-aligned buffers for enabling possible future use with AIO
buf = makeAlignedString(_PAGE_SIZE, transactionSize, arena);
state int bytesRead = wait(fClientLib->read(mutateString(buf), transactionSize, fileOffset));
fileOffset += bytesRead;
if (bytesRead <= 0) {
break;
}
::MD5_Update(&sum, buf.begin(), bytesRead);
tr = Transaction(db);
firstChunkNo = chunkNo;
loop {
try {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
int bufferOffset = 0;
chunkNo = firstChunkNo;
while (bufferOffset < bytesRead) {
size_t chunkLen = std::min(chunkSize, bytesRead - bufferOffset);
KeyRef chunkKey = chunkKeyFromNo(chunkKeyPrefix, chunkNo, arena);
chunkNo++;
tr.set(chunkKey, ValueRef(mutateString(buf) + bufferOffset, chunkLen));
bufferOffset += chunkLen;
}
wait(tr.commit());
break;
} catch (Error& e) {
wait(tr.onError(e));
}
}
if (bytesRead < transactionSize) {
break;
}
}
binInfo->totalBytes = fileOffset;
binInfo->chunkCnt = chunkNo;
binInfo->chunkSize = chunkSize;
binInfo->sumBytes = md5SumToHexString(sum);
return Void();
}
} // namespace
ACTOR Future<Void> uploadClientLibrary(Database db,
Standalone<StringRef> metadataString,
Standalone<StringRef> libFilePath) {
state json_spirit::mObject metadataJson;
state Standalone<StringRef> clientLibId;
state Key clientLibMetaKey;
state Key clientLibBinPrefix;
state std::string jsStr;
state Transaction tr;
state ClientLibBinaryInfo binInfo;
state ClientLibStatus targetStatus;
metadataJson = parseMetadataJson(metadataString);
json_spirit::mValue schema;
if (!json_spirit::read_string(JSONSchemas::clientLibMetadataSchema.toString(), schema)) {
ASSERT(false);
}
std::string errorStr;
if (!schemaMatch(schema.get_obj(), metadataJson, errorStr, SevWarnAlways)) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Reason", "SchemaMismatch")
.detail("Configuration", metadataString)
.detail("Error", errorStr);
throw client_lib_invalid_metadata();
}
clientLibId = getIdFromMetadataJson(metadataJson);
clientLibMetaKey = metadataKeyFromId(clientLibId);
clientLibBinPrefix = chunkKeyPrefixFromId(clientLibId);
targetStatus = getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS));
if (!isValidTargetStatus(targetStatus)) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Reason", "InvalidTargetStatus")
.detail("Configuration", metadataString);
throw client_lib_invalid_metadata();
}
// check if checksumalg and platform attributes have valid values
getChecksumAlgByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_CHECKSUM_ALG));
getPlatformByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_PLATFORM));
// Check if further mandatory attributes are set
getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_GIT_HASH);
getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_PROTOCOL);
getMetadataIntAttr(metadataJson, CLIENTLIB_ATTR_API_VERSION);
metadataJson[CLIENTLIB_ATTR_STATUS] = getStatusName(ClientLibStatus::UPLOADING);
jsStr = json_spirit::write_string(json_spirit::mValue(metadataJson));
/*
* Check if the client library with the same identifier already exists.
* If not, write its metadata with "uploading" state to prevent concurrent uploads
*/
tr = Transaction(db);
loop {
try {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
Optional<Value> existingMeta = wait(tr.get(clientLibMetaKey));
if (existingMeta.present()) {
TraceEvent(SevWarnAlways, "ClientLibraryAlreadyExists")
.detail("Key", clientLibMetaKey)
.detail("ExistingMetadata", existingMeta.get().toString());
throw client_lib_already_exists();
}
TraceEvent("ClientLibraryBeginUpload").detail("Key", clientLibMetaKey);
tr.set(clientLibMetaKey, ValueRef(jsStr));
wait(tr.commit());
break;
} catch (Error& e) {
wait(tr.onError(e));
}
}
/*
* Upload the binary of the client library in chunks
*/
wait(uploadClientLibBinary(db, libFilePath, clientLibBinPrefix, &binInfo));
std::string checkSum = getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_CHECKSUM);
if (binInfo.sumBytes != StringRef(checkSum)) {
TraceEvent(SevWarnAlways, "ClientLibraryChecksumMismatch")
.detail("Expected", checkSum)
.detail("Actual", binInfo.sumBytes)
.detail("Configuration", metadataString);
// Rollback the upload operation
try {
wait(deleteClientLibrary(db, clientLibId));
} catch (Error& e) {
TraceEvent(SevError, "ClientLibraryUploadRollbackFailed").error(e);
}
throw client_lib_invalid_binary();
}
/*
* Update the metadata entry, with additional information about the binary
* and change its state from "uploading" to the given one
*/
metadataJson[CLIENTLIB_ATTR_SIZE] = static_cast<int64_t>(binInfo.totalBytes);
metadataJson[CLIENTLIB_ATTR_CHUNK_COUNT] = static_cast<int64_t>(binInfo.chunkCnt);
metadataJson[CLIENTLIB_ATTR_CHUNK_SIZE] = static_cast<int64_t>(binInfo.chunkSize);
metadataJson[CLIENTLIB_ATTR_FILENAME] = basename(libFilePath.toString());
metadataJson[CLIENTLIB_ATTR_STATUS] = getStatusName(targetStatus);
jsStr = json_spirit::write_string(json_spirit::mValue(metadataJson));
tr.reset();
loop {
try {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.set(clientLibMetaKey, ValueRef(jsStr));
updateClientLibChangeCounter(tr, ClientLibStatus::DISABLED, targetStatus);
wait(tr.commit());
break;
} catch (Error& e) {
wait(tr.onError(e));
}
}
TraceEvent("ClientLibraryUploadDone").detail("Key", clientLibMetaKey);
return Void();
}
ACTOR Future<Void> downloadClientLibrary(Database db,
Standalone<StringRef> clientLibId,
Standalone<StringRef> libFilePath) {
state Key clientLibMetaKey = metadataKeyFromId(clientLibId);
state Key chunkKeyPrefix = chunkKeyPrefixFromId(clientLibId);
state int chunksPerTransaction = std::max(CLIENT_KNOBS->MVC_CLIENTLIB_CHUNKS_PER_TRANSACTION, 1);
state int transactionSize;
state json_spirit::mObject metadataJson;
state std::string checkSum;
state size_t chunkCount;
state size_t binarySize;
state size_t expectedChunkSize;
state Transaction tr;
state size_t fileOffset;
state MD5_CTX sum;
state Arena arena;
state StringRef buf;
state size_t bufferOffset;
state size_t fromChunkNo;
state size_t toChunkNo;
state std::vector<Future<Optional<Value>>> chunkFutures;
TraceEvent("ClientLibraryBeginDownload").detail("Key", clientLibMetaKey);
/*
* First read the metadata to get information about the status and
* the chunk count of the client library
*/
loop {
tr = Transaction(db);
try {
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
Optional<Value> metadataOpt = wait(tr.get(clientLibMetaKey));
if (!metadataOpt.present()) {
TraceEvent(SevWarnAlways, "ClientLibraryNotFound").detail("Key", clientLibMetaKey);
throw client_lib_not_found();
}
metadataJson = parseMetadataJson(metadataOpt.get());
break;
} catch (Error& e) {
wait(tr.onError(e));
}
}
// Prevent downloading not yet uploaded and disabled libraries
if (!isAvailableForDownload(getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS)))) {
throw client_lib_not_available();
}
// Disabling AIO, because it currently supports only page-aligned writes, but the size of a client library
// is not necessariliy page-aligned, need to investigate if it is a limitation of AIO or just the way
// we are wrapping it
int64_t flags = IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE | IAsyncFile::OPEN_READWRITE | IAsyncFile::OPEN_CREATE |
IAsyncFile::OPEN_UNCACHED | IAsyncFile::OPEN_NO_AIO;
state Reference<IAsyncFile> fClientLib =
wait(IAsyncFileSystem::filesystem()->open(libFilePath.toString(), flags, 0666));
checkSum = getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_CHECKSUM);
chunkCount = getMetadataIntAttr(metadataJson, CLIENTLIB_ATTR_CHUNK_COUNT);
binarySize = getMetadataIntAttr(metadataJson, CLIENTLIB_ATTR_SIZE);
expectedChunkSize = getMetadataIntAttr(metadataJson, CLIENTLIB_ATTR_CHUNK_SIZE);
transactionSize = chunksPerTransaction * expectedChunkSize;
fileOffset = 0;
fromChunkNo = 0;
::MD5_Init(&sum);
arena = Arena();
// Use page-aligned buffers for enabling possible future use with AIO
buf = makeAlignedString(_PAGE_SIZE, transactionSize, arena);
loop {
if (fromChunkNo == chunkCount) {
break;
}
tr = Transaction(db);
toChunkNo = std::min(chunkCount, fromChunkNo + chunksPerTransaction);
// read a batch of file chunks concurrently
loop {
try {
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
chunkFutures.clear();
for (size_t chunkNo = fromChunkNo; chunkNo < toChunkNo; chunkNo++) {
KeyRef chunkKey = chunkKeyFromNo(chunkKeyPrefix, chunkNo, arena);
chunkFutures.push_back(tr.get(chunkKey));
}
wait(waitForAll(chunkFutures));
break;
} catch (Error& e) {
wait(tr.onError(e));
}
}
// check the read chunks and copy them to a buffer
bufferOffset = 0;
size_t chunkNo = fromChunkNo;
for (auto chunkOptFuture : chunkFutures) {
if (!chunkOptFuture.get().present()) {
TraceEvent(SevWarnAlways, "ClientLibraryChunkNotFound")
.detail("Key", chunkKeyFromNo(chunkKeyPrefix, chunkNo, arena));
throw client_lib_invalid_binary();
}
StringRef chunkVal = chunkOptFuture.get().get();
// All chunks exept for the last one must be of the expected size to guarantee
// alignment when writing to file
if ((chunkNo != (chunkCount - 1) && chunkVal.size() != expectedChunkSize) ||
chunkVal.size() > expectedChunkSize) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidChunkSize")
.detail("Key", chunkKeyFromNo(chunkKeyPrefix, chunkNo, arena))
.detail("MaxSize", expectedChunkSize)
.detail("ActualSize", chunkVal.size());
throw client_lib_invalid_binary();
}
memcpy(mutateString(buf) + bufferOffset, chunkVal.begin(), chunkVal.size());
bufferOffset += chunkVal.size();
chunkNo++;
}
// write the chunks to the file, update checksum
if (bufferOffset > 0) {
wait(fClientLib->write(buf.begin(), bufferOffset, fileOffset));
fileOffset += bufferOffset;
::MD5_Update(&sum, buf.begin(), bufferOffset);
}
// move to the next batch
fromChunkNo = toChunkNo;
}
// check if the downloaded file size is as expected
if (fileOffset != binarySize) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidSize")
.detail("ExpectedSize", binarySize)
.detail("ActualSize", fileOffset);
throw client_lib_invalid_binary();
}
// check if the checksum of downloaded file is as expected
Standalone<StringRef> sumBytesStr = md5SumToHexString(sum);
if (sumBytesStr != StringRef(checkSum)) {
TraceEvent(SevWarnAlways, "ClientLibraryChecksumMismatch")
.detail("Expected", checkSum)
.detail("Actual", sumBytesStr)
.detail("Key", clientLibMetaKey);
throw client_lib_invalid_binary();
}
wait(fClientLib->sync());
TraceEvent("ClientLibraryDownloadDone").detail("Key", clientLibMetaKey);
return Void();
}
ACTOR Future<Void> deleteClientLibrary(Database db, Standalone<StringRef> clientLibId) {
state Key clientLibMetaKey = metadataKeyFromId(clientLibId.toString());
state Key chunkKeyPrefix = chunkKeyPrefixFromId(clientLibId.toString());
TraceEvent("ClientLibraryBeginDelete").detail("Key", clientLibMetaKey);
loop {
state Transaction tr(db);
try {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
Optional<Value> metadataOpt = wait(tr.get(clientLibMetaKey));
if (!metadataOpt.present()) {
TraceEvent(SevWarnAlways, "ClientLibraryNotFound").detail("Key", clientLibMetaKey);
throw client_lib_not_found();
}
json_spirit::mObject metadataJson = parseMetadataJson(metadataOpt.get());
ClientLibStatus status = getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS));
tr.clear(prefixRange(chunkKeyPrefix));
tr.clear(clientLibMetaKey);
updateClientLibChangeCounter(tr, status, ClientLibStatus::DISABLED);
wait(tr.commit());
break;
} catch (Error& e) {
wait(tr.onError(e));
}
}
TraceEvent("ClientLibraryDeleteDone").detail("Key", clientLibMetaKey);
return Void();
}
namespace {
void applyClientLibFilter(const ClientLibFilter& filter,
const RangeResultRef& scanResults,
Standalone<VectorRef<StringRef>>& filteredResults) {
for (const auto& [k, v] : scanResults) {
try {
json_spirit::mObject metadataJson = parseMetadataJson(v);
if (filter.matchAvailableOnly &&
!isAvailableForDownload(getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS)))) {
continue;
}
if (filter.matchCompatibleAPI &&
getMetadataIntAttr(metadataJson, CLIENTLIB_ATTR_API_VERSION) < filter.apiVersion) {
continue;
}
if (filter.matchNewerPackageVersion && !filter.matchPlatform &&
getNumericVersionEncoding(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_VERSION)) <=
filter.numericPkgVersion) {
continue;
}
filteredResults.push_back_deep(filteredResults.arena(), v);
} catch (Error& e) {
// Entries with invalid metadata on the cluster
// Can happen only if the official management interface is bypassed
ASSERT(e.code() == error_code_client_lib_invalid_metadata);
TraceEvent(SevError, "ClientLibraryIgnoringInvalidMetadata").detail("Metadata", v);
}
}
}
} // namespace
ACTOR Future<Standalone<VectorRef<StringRef>>> listClientLibraries(Database db, ClientLibFilter filter) {
state Standalone<VectorRef<StringRef>> result;
state Transaction tr(db);
state PromiseStream<Standalone<RangeResultRef>> scanResults;
state Key fromKey;
state Key toKey;
state KeyRangeRef scanRange;
state Future<Void> stream;
loop {
try {
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
if (filter.matchPlatform) {
Key prefixWithPlatform =
clientLibMetadataPrefix.withSuffix(std::string(getPlatformName(filter.platformVal)));
fromKey = prefixWithPlatform.withSuffix(LiteralStringRef("/"));
if (filter.matchNewerPackageVersion) {
fromKey = fromKey.withSuffix(format("%09d", filter.numericPkgVersion + 1));
}
toKey = prefixWithPlatform.withSuffix(LiteralStringRef("0"));
scanRange = KeyRangeRef(fromKey, toKey);
} else {
scanRange = clientLibMetadataKeys;
}
scanResults = PromiseStream<Standalone<RangeResultRef>>();
stream = tr.getRangeStream(scanResults, scanRange, GetRangeLimits());
loop {
Standalone<RangeResultRef> scanResultRange = waitNext(scanResults.getFuture());
applyClientLibFilter(filter, scanResultRange, result);
}
} catch (Error& e) {
if (e.code() == error_code_end_of_stream) {
break;
}
wait(tr.onError(e));
}
}
return result;
}
ACTOR Future<ClientLibStatus> getClientLibraryStatus(Database db, Standalone<StringRef> clientLibId) {
state Key clientLibMetaKey = metadataKeyFromId(clientLibId);
state Transaction tr(db);
loop {
try {
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
Optional<Value> metadataOpt = wait(tr.get(clientLibMetaKey));
if (!metadataOpt.present()) {
TraceEvent(SevWarnAlways, "ClientLibraryNotFound").detail("Key", clientLibMetaKey);
throw client_lib_not_found();
}
json_spirit::mObject metadataJson = parseMetadataJson(metadataOpt.get());
return getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS));
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
ACTOR Future<Void> changeClientLibraryStatus(Database db,
Standalone<StringRef> clientLibId,
ClientLibStatus newStatus) {
state Key clientLibMetaKey = metadataKeyFromId(clientLibId);
state json_spirit::mObject metadataJson;
state std::string jsStr;
state Transaction tr;
if (!isValidTargetStatus(newStatus)) {
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
.detail("Reason", "InvalidTargetStatus")
.detail("Status", getStatusName(newStatus));
throw client_lib_invalid_metadata();
}
loop {
tr = Transaction(db);
try {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
Optional<Value> metadataOpt = wait(tr.get(clientLibMetaKey));
if (!metadataOpt.present()) {
TraceEvent(SevWarnAlways, "ClientLibraryNotFound").detail("Key", clientLibMetaKey);
throw client_lib_not_found();
}
metadataJson = parseMetadataJson(metadataOpt.get());
ClientLibStatus prevStatus = getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS));
if (prevStatus == newStatus) {
return Void();
}
metadataJson[CLIENTLIB_ATTR_STATUS] = getStatusName(newStatus);
jsStr = json_spirit::write_string(json_spirit::mValue(metadataJson));
tr.set(clientLibMetaKey, ValueRef(jsStr));
updateClientLibChangeCounter(tr, prevStatus, newStatus);
wait(tr.commit());
break;
} catch (Error& e) {
if (e.code() == error_code_client_lib_not_found) {
throw;
}
wait(tr.onError(e));
}
}
TraceEvent("ClientLibraryStatusChanged").detail("Key", clientLibMetaKey).detail("Status", getStatusName(newStatus));
return Void();
}
} // namespace ClientLibManagement

View File

@ -0,0 +1,146 @@
/*
* ClientLibManagement.actor.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#if defined(NO_INTELLISENSE) && !defined(FDBCLIENT_MULTI_VERSION_CLIENT_CONTROL_ACTOR_G_H)
#define FDBCLIENT_MULTI_VERSION_CLIENT_CONTROL_ACTOR_G_H
#include "fdbclient/ClientLibManagement.actor.g.h"
#elif !defined(FDBCLIENT_MULTI_VERSION_CLIENT_CONTROL_ACTOR_H)
#define FDBCLIENT_MULTI_VERSION_CLIENT_CONTROL_ACTOR_H
#include <string>
#include "fdbclient/NativeAPI.actor.h"
#include "fdbclient/md5/md5.h"
#include "flow/actorcompiler.h" // has to be last include
namespace ClientLibManagement {
enum class ClientLibStatus {
DISABLED = 0,
UPLOADING, // 1
DOWNLOAD, // 2
ACTIVE, // 3
COUNT // must be the last one
};
enum class ClientLibPlatform {
UNKNOWN = 0,
X86_64_LINUX,
X86_64_WINDOWS,
X86_64_MACOS,
COUNT // must be the last one
};
// Currently we support only one,
// but we may want to change it in the future
enum class ClientLibChecksumAlg {
MD5 = 0,
COUNT // must be the last one
};
inline const std::string CLIENTLIB_ATTR_PLATFORM{ "platform" };
inline const std::string CLIENTLIB_ATTR_STATUS{ "status" };
inline const std::string CLIENTLIB_ATTR_CHECKSUM{ "checksum" };
inline const std::string CLIENTLIB_ATTR_VERSION{ "version" };
inline const std::string CLIENTLIB_ATTR_TYPE{ "type" };
inline const std::string CLIENTLIB_ATTR_API_VERSION{ "apiversion" };
inline const std::string CLIENTLIB_ATTR_PROTOCOL{ "protocol" };
inline const std::string CLIENTLIB_ATTR_GIT_HASH{ "githash" };
inline const std::string CLIENTLIB_ATTR_FILENAME{ "filename" };
inline const std::string CLIENTLIB_ATTR_SIZE{ "size" };
inline const std::string CLIENTLIB_ATTR_CHUNK_COUNT{ "chunkcount" };
inline const std::string CLIENTLIB_ATTR_CHUNK_SIZE{ "chunksize" };
inline const std::string CLIENTLIB_ATTR_CHECKSUM_ALG{ "checksumalg" };
struct ClientLibFilter {
bool matchAvailableOnly = false;
bool matchPlatform = false;
bool matchCompatibleAPI = false;
bool matchNewerPackageVersion = false;
ClientLibPlatform platformVal = ClientLibPlatform::UNKNOWN;
int apiVersion = 0;
int numericPkgVersion = 0;
ClientLibFilter& filterAvailable() {
matchAvailableOnly = true;
return *this;
}
ClientLibFilter& filterPlatform(ClientLibPlatform platformVal) {
matchPlatform = true;
this->platformVal = platformVal;
return *this;
}
ClientLibFilter& filterCompatibleAPI(int apiVersion) {
matchCompatibleAPI = true;
this->apiVersion = apiVersion;
return *this;
}
// expects a version string like "6.3.10"
ClientLibFilter& filterNewerPackageVersion(const std::string& versionStr);
};
const std::string& getStatusName(ClientLibStatus status);
ClientLibStatus getStatusByName(std::string_view statusName);
const std::string& getPlatformName(ClientLibPlatform platform);
ClientLibPlatform getPlatformByName(std::string_view platformName);
const std::string& getChecksumAlgName(ClientLibChecksumAlg checksumAlg);
ClientLibChecksumAlg getChecksumAlgByName(std::string_view checksumAlgName);
// encodes MD5 result to a hexadecimal string to be provided in the checksum attribute
Standalone<StringRef> md5SumToHexString(MD5_CTX& sum);
// Upload a client library binary from a file and associated metadata JSON
// to the system keyspace of the database
ACTOR Future<Void> uploadClientLibrary(Database db,
Standalone<StringRef> metadataString,
Standalone<StringRef> libFilePath);
// Determine clientLibId from the relevant attributes of the metadata JSON
Standalone<StringRef> getClientLibIdFromMetadataJson(StringRef metadataString);
// Download a client library binary from the system keyspace of the database
// and save it at the given file path
ACTOR Future<Void> downloadClientLibrary(Database db,
Standalone<StringRef> clientLibId,
Standalone<StringRef> libFilePath);
// Delete the client library binary from to the system keyspace of the database
ACTOR Future<Void> deleteClientLibrary(Database db, Standalone<StringRef> clientLibId);
// List client libraries available on the cluster, with the specified filter
// Returns metadata JSON of each library
ACTOR Future<Standalone<VectorRef<StringRef>>> listClientLibraries(Database db, ClientLibFilter filter);
// Get the current status of an uploaded client library
ACTOR Future<ClientLibStatus> getClientLibraryStatus(Database db, Standalone<StringRef> clientLibId);
// Change client library metadata status
ACTOR Future<Void> changeClientLibraryStatus(Database db, Standalone<StringRef> clientLibId, ClientLibStatus newStatus);
} // namespace ClientLibManagement
#include "flow/unactorcompiler.h"
#endif

77
fdbclient/ClientVersion.h Normal file
View File

@ -0,0 +1,77 @@
/*
* ClientVersion.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FDBCLIENT_CLIENT_VERSION_H
#define FDBCLIENT_CLIENT_VERSION_H
#pragma once
#include "flow/Arena.h"
struct ClientVersionRef {
StringRef clientVersion;
StringRef sourceVersion;
StringRef protocolVersion;
ClientVersionRef() { initUnknown(); }
ClientVersionRef(Arena& arena, ClientVersionRef const& cv)
: clientVersion(arena, cv.clientVersion), sourceVersion(arena, cv.sourceVersion),
protocolVersion(arena, cv.protocolVersion) {}
ClientVersionRef(StringRef clientVersion, StringRef sourceVersion, StringRef protocolVersion)
: clientVersion(clientVersion), sourceVersion(sourceVersion), protocolVersion(protocolVersion) {}
ClientVersionRef(StringRef versionString) {
std::vector<StringRef> parts = versionString.splitAny(LiteralStringRef(","));
if (parts.size() != 3) {
initUnknown();
return;
}
clientVersion = parts[0];
sourceVersion = parts[1];
protocolVersion = parts[2];
}
void initUnknown() {
clientVersion = LiteralStringRef("Unknown");
sourceVersion = LiteralStringRef("Unknown");
protocolVersion = LiteralStringRef("Unknown");
}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, clientVersion, sourceVersion, protocolVersion);
}
size_t expectedSize() const { return clientVersion.size() + sourceVersion.size() + protocolVersion.size(); }
bool operator<(const ClientVersionRef& rhs) const {
if (protocolVersion != rhs.protocolVersion) {
return protocolVersion < rhs.protocolVersion;
}
// These comparisons are arbitrary because they aren't ordered
if (clientVersion != rhs.clientVersion) {
return clientVersion < rhs.clientVersion;
}
return sourceVersion < rhs.sourceVersion;
}
};
#endif

View File

@ -31,8 +31,10 @@
// A ClientWorkerInterface is embedded as the first element of a WorkerInterface. // A ClientWorkerInterface is embedded as the first element of a WorkerInterface.
struct ClientWorkerInterface { struct ClientWorkerInterface {
constexpr static FileIdentifier file_identifier = 12418152; constexpr static FileIdentifier file_identifier = 12418152;
RequestStream<struct RebootRequest> reboot; RequestStream<struct RebootRequest> reboot;
RequestStream<struct ProfilerRequest> profiler; RequestStream<struct ProfilerRequest> profiler;
RequestStream<struct SetFailureInjection> setFailureInjection;
bool operator==(ClientWorkerInterface const& r) const { return id() == r.id(); } bool operator==(ClientWorkerInterface const& r) const { return id() == r.id(); }
bool operator!=(ClientWorkerInterface const& r) const { return id() != r.id(); } bool operator!=(ClientWorkerInterface const& r) const { return id() != r.id(); }
@ -43,7 +45,7 @@ struct ClientWorkerInterface {
template <class Ar> template <class Ar>
void serialize(Ar& ar) { void serialize(Ar& ar) {
serializer(ar, reboot, profiler); serializer(ar, reboot, profiler, setFailureInjection);
} }
}; };
@ -88,4 +90,39 @@ struct ProfilerRequest {
} }
}; };
struct SetFailureInjection {
constexpr static FileIdentifier file_identifier = 15439864;
ReplyPromise<Void> reply;
struct DiskFailureCommand {
// how often should the disk be stalled (0 meaning once, 10 meaning every 10 secs)
double stallInterval;
// Period of time disk stalls will be injected for
double stallPeriod;
// Period of time the disk will be slowed down for
double throttlePeriod;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, stallInterval, stallPeriod, throttlePeriod);
}
};
struct FlipBitsCommand {
// percent of bits to flip in the given file
double percentBitFlips;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, percentBitFlips);
}
};
Optional<DiskFailureCommand> diskFailure;
Optional<FlipBitsCommand> flipBits;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, reply, diskFailure, flipBits);
}
};
#endif #endif

View File

@ -0,0 +1,172 @@
/*
* ClusterConnectionFile.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbclient/ClusterConnectionFile.h"
#include "fdbclient/MonitorLeader.h"
#include "flow/actorcompiler.h" // has to be last include
// Loads and parses the file at 'filename', throwing errors if the file cannot be read or the format is invalid.
ClusterConnectionFile::ClusterConnectionFile(std::string const& filename)
: IClusterConnectionRecord(ConnectionStringNeedsPersisted::False) {
if (!fileExists(filename)) {
throw no_cluster_file_found();
}
cs = ClusterConnectionString(readFileBytes(filename, MAX_CLUSTER_FILE_BYTES));
this->filename = filename;
}
// Creates a cluster file with a given connection string and saves it to the specified file.
ClusterConnectionFile::ClusterConnectionFile(std::string const& filename, ClusterConnectionString const& contents)
: IClusterConnectionRecord(ConnectionStringNeedsPersisted::True) {
this->filename = filename;
cs = contents;
}
// Sets the connections string held by this object and persists it.
Future<Void> ClusterConnectionFile::setConnectionString(ClusterConnectionString const& conn) {
ASSERT(filename.size());
cs = conn;
return success(persist());
}
// Get the connection string stored in the file.
Future<ClusterConnectionString> ClusterConnectionFile::getStoredConnectionString() {
try {
return ClusterConnectionFile(filename).cs;
} catch (Error& e) {
return e;
}
}
// Checks whether the connection string in the file matches the connection string stored in memory. The cluster
// string stored in the file is returned via the reference parameter connectionString.
Future<bool> ClusterConnectionFile::upToDate(ClusterConnectionString& fileConnectionString) {
try {
// the cluster file hasn't been created yet so there's nothing to check
if (needsToBePersisted())
return true;
ClusterConnectionFile temp(filename);
fileConnectionString = temp.getConnectionString();
return fileConnectionString.toString() == cs.toString();
} catch (Error& e) {
TraceEvent(SevWarnAlways, "ClusterFileError").error(e).detail("Filename", filename);
return false; // Swallow the error and report that the file is out of date
}
}
// Returns the specified path of the cluster file.
std::string ClusterConnectionFile::getLocation() const {
return filename;
}
// Creates a copy of this object with a modified connection string but that isn't persisted.
Reference<IClusterConnectionRecord> ClusterConnectionFile::makeIntermediateRecord(
ClusterConnectionString const& connectionString) const {
return makeReference<ClusterConnectionFile>(filename, connectionString);
}
// Returns a string representation of this cluster connection record. This will include the type of record and the
// filename of the cluster file.
std::string ClusterConnectionFile::toString() const {
// This is a fairly naive attempt to generate a URI-like string. It will not account for characters like spaces, it
// may use backslashes in windows paths, etc.
// SOMEDAY: we should encode this string as a proper URI.
return "file://" + filename;
}
// returns <resolved name, was default file>
std::pair<std::string, bool> ClusterConnectionFile::lookupClusterFileName(std::string const& filename) {
if (filename.length())
return std::make_pair(filename, false);
std::string f;
bool isDefaultFile = true;
if (platform::getEnvironmentVar(CLUSTER_FILE_ENV_VAR_NAME, f)) {
// If this is set but points to a file that does not
// exist, we will not fallback to any other methods
isDefaultFile = false;
} else if (fileExists("fdb.cluster"))
f = "fdb.cluster";
else
f = platform::getDefaultClusterFilePath();
return std::make_pair(f, isDefaultFile);
}
// get a human readable error message describing the error returned from the constructor
std::string ClusterConnectionFile::getErrorString(std::pair<std::string, bool> const& resolvedClusterFile,
Error const& e) {
bool isDefault = resolvedClusterFile.second;
if (e.code() == error_code_connection_string_invalid) {
return format("Invalid cluster file `%s': %d %s", resolvedClusterFile.first.c_str(), e.code(), e.what());
} else if (e.code() == error_code_no_cluster_file_found) {
if (isDefault)
return format("Unable to read cluster file `./fdb.cluster' or `%s' and %s unset: %d %s",
platform::getDefaultClusterFilePath().c_str(),
CLUSTER_FILE_ENV_VAR_NAME,
e.code(),
e.what());
else
return format(
"Unable to read cluster file `%s': %d %s", resolvedClusterFile.first.c_str(), e.code(), e.what());
} else {
return format(
"Unexpected error loading cluster file `%s': %d %s", resolvedClusterFile.first.c_str(), e.code(), e.what());
}
}
// Writes the connection string to the cluster file
Future<bool> ClusterConnectionFile::persist() {
setPersisted();
if (filename.size()) {
try {
atomicReplace(filename,
"# DO NOT EDIT!\n# This file is auto-generated, it is not to be edited by hand\n" +
cs.toString().append("\n"));
Future<bool> isUpToDate = IClusterConnectionRecord::upToDate();
// The implementation of upToDate in this class is synchronous
ASSERT(isUpToDate.isReady());
if (!isUpToDate.get()) {
// This should only happen in rare scenarios where multiple processes are updating the same file to
// different values simultaneously In that case, we don't have any guarantees about which file will
// ultimately be written
TraceEvent(SevWarnAlways, "ClusterFileChangedAfterReplace")
.detail("Filename", filename)
.detail("ConnectionString", cs.toString());
return false;
}
return true;
} catch (Error& e) {
TraceEvent(SevWarnAlways, "UnableToChangeConnectionFile")
.error(e)
.detail("Filename", filename)
.detail("ConnectionString", cs.toString());
}
}
return false;
}

View File

@ -0,0 +1,76 @@
/*
* ClusterConnectionFile.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef FDBCLIENT_CLUSTERCONNECTIONFILE_H
#define FDBCLIENT_CLUSTERCONNECTIONFILE_H
#include "fdbclient/CoordinationInterface.h"
#include "flow/actorcompiler.h" // has to be last include
// An implementation of IClusterConnectionRecord backed by a file.
class ClusterConnectionFile : public IClusterConnectionRecord, ReferenceCounted<ClusterConnectionFile>, NonCopyable {
public:
// Loads and parses the file at 'filename', throwing errors if the file cannot be read or the format is invalid.
explicit ClusterConnectionFile(std::string const& filename);
// Creates a cluster file with a given connection string and saves it to the specified file.
explicit ClusterConnectionFile(std::string const& filename, ClusterConnectionString const& contents);
// Sets the connections string held by this object and persists it.
Future<Void> setConnectionString(ClusterConnectionString const&) override;
// Get the connection string stored in the file.
Future<ClusterConnectionString> getStoredConnectionString() override;
// Checks whether the connection string in the file matches the connection string stored in memory. The cluster
// string stored in the file is returned via the reference parameter connectionString.
Future<bool> upToDate(ClusterConnectionString& fileConnectionString) override;
// Returns the specified path of the cluster file.
std::string getLocation() const override;
// Creates a copy of this object with a modified connection string but that isn't persisted.
Reference<IClusterConnectionRecord> makeIntermediateRecord(
ClusterConnectionString const& connectionString) const override;
// Returns a string representation of this cluster connection record. This will include the type of record and the
// filename of the cluster file.
std::string toString() const override;
void addref() override { ReferenceCounted<ClusterConnectionFile>::addref(); }
void delref() override { ReferenceCounted<ClusterConnectionFile>::delref(); }
// returns <resolved name, was default file>
static std::pair<std::string, bool> lookupClusterFileName(std::string const& filename);
// get a human readable error message describing the error returned from the constructor
static std::string getErrorString(std::pair<std::string, bool> const& resolvedFile, Error const& e);
protected:
// Writes the connection string to the cluster file
Future<bool> persist() override;
private:
std::string filename;
};
#include "flow/unactorcompiler.h"
#endif

View File

@ -0,0 +1,167 @@
/*
* ClusterConnectionKey.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbclient/ClusterConnectionKey.actor.h"
#include "flow/actorcompiler.h" // has to be last include
// Creates a cluster connection record with a given connection string and saves it to the specified key. Needs to be
// persisted should be set to true unless this ClusterConnectionKey is being created with the value read from the
// key.
ClusterConnectionKey::ClusterConnectionKey(Database db,
Key connectionStringKey,
ClusterConnectionString const& contents,
ConnectionStringNeedsPersisted needsToBePersisted)
: IClusterConnectionRecord(needsToBePersisted), db(db), connectionStringKey(connectionStringKey) {
if (!needsToBePersisted) {
lastPersistedConnectionString = ValueRef(contents.toString());
}
cs = contents;
}
// Loads and parses the connection string at the specified key, throwing errors if the file cannot be read or the
// format is invalid.
ACTOR Future<Reference<ClusterConnectionKey>> ClusterConnectionKey::loadClusterConnectionKey(Database db,
Key connectionStringKey) {
state Transaction tr(db);
loop {
try {
Optional<Value> v = wait(tr.get(connectionStringKey));
if (!v.present()) {
throw connection_string_invalid();
}
return makeReference<ClusterConnectionKey>(db,
connectionStringKey,
ClusterConnectionString(v.get().toString()),
ConnectionStringNeedsPersisted::False);
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
// Sets the connections string held by this object and persists it.
Future<Void> ClusterConnectionKey::setConnectionString(ClusterConnectionString const& connectionString) {
cs = connectionString;
return success(persist());
}
// Get the connection string stored in the database.
ACTOR Future<ClusterConnectionString> ClusterConnectionKey::getStoredConnectionStringImpl(
Reference<ClusterConnectionKey> self) {
Reference<ClusterConnectionKey> cck =
wait(ClusterConnectionKey::loadClusterConnectionKey(self->db, self->connectionStringKey));
return cck->cs;
}
Future<ClusterConnectionString> ClusterConnectionKey::getStoredConnectionString() {
return getStoredConnectionStringImpl(Reference<ClusterConnectionKey>::addRef(this));
}
ACTOR Future<bool> ClusterConnectionKey::upToDateImpl(Reference<ClusterConnectionKey> self,
ClusterConnectionString* connectionString) {
try {
// the cluster file hasn't been created yet so there's nothing to check
if (self->needsToBePersisted())
return true;
Reference<ClusterConnectionKey> temp =
wait(ClusterConnectionKey::loadClusterConnectionKey(self->db, self->connectionStringKey));
*connectionString = temp->getConnectionString();
return connectionString->toString() == self->cs.toString();
} catch (Error& e) {
TraceEvent(SevWarnAlways, "ClusterKeyError").error(e).detail("Key", self->connectionStringKey);
return false; // Swallow the error and report that the file is out of date
}
}
// Checks whether the connection string in the database matches the connection string stored in memory. The cluster
// string stored in the database is returned via the reference parameter connectionString.
Future<bool> ClusterConnectionKey::upToDate(ClusterConnectionString& connectionString) {
return upToDateImpl(Reference<ClusterConnectionKey>::addRef(this), &connectionString);
}
// Returns the key where the connection string is stored.
std::string ClusterConnectionKey::getLocation() const {
return printable(connectionStringKey);
}
// Creates a copy of this object with a modified connection string but that isn't persisted.
Reference<IClusterConnectionRecord> ClusterConnectionKey::makeIntermediateRecord(
ClusterConnectionString const& connectionString) const {
return makeReference<ClusterConnectionKey>(db, connectionStringKey, connectionString);
}
// Returns a string representation of this cluster connection record. This will include the type of record and the
// key where the record is stored.
std::string ClusterConnectionKey::toString() const {
return "fdbkey://" + printable(connectionStringKey);
}
ACTOR Future<bool> ClusterConnectionKey::persistImpl(Reference<ClusterConnectionKey> self) {
self->setPersisted();
state Value newConnectionString = ValueRef(self->cs.toString());
try {
state Transaction tr(self->db);
loop {
try {
Optional<Value> existingConnectionString = wait(tr.get(self->connectionStringKey));
// Someone has already updated the connection string to what we want
if (existingConnectionString.present() && existingConnectionString.get() == newConnectionString) {
self->lastPersistedConnectionString = newConnectionString;
return true;
}
// Someone has updated the connection string to something we didn't expect, in which case we leave it
// alone. It's possible this could result in the stored string getting stuck if the connection string
// changes twice and only the first change is recorded. If the process that wrote the first change dies
// and no other process attempts to write the intermediate state, then only a newly opened connection
// key would be able to update the state.
else if (existingConnectionString.present() &&
existingConnectionString != self->lastPersistedConnectionString) {
TraceEvent(SevWarnAlways, "UnableToChangeConnectionKeyDueToMismatch")
.detail("ConnectionKey", self->connectionStringKey)
.detail("NewConnectionString", newConnectionString)
.detail("ExpectedStoredConnectionString", self->lastPersistedConnectionString)
.detail("ActualStoredConnectionString", existingConnectionString);
return false;
}
tr.set(self->connectionStringKey, newConnectionString);
wait(tr.commit());
self->lastPersistedConnectionString = newConnectionString;
return true;
} catch (Error& e) {
wait(tr.onError(e));
}
}
} catch (Error& e) {
TraceEvent(SevWarnAlways, "UnableToChangeConnectionKey")
.error(e)
.detail("ConnectionKey", self->connectionStringKey)
.detail("ConnectionString", self->cs.toString());
}
return false;
};
// Writes the connection string to the database
Future<bool> ClusterConnectionKey::persist() {
return persistImpl(Reference<ClusterConnectionKey>::addRef(this));
}

View File

@ -0,0 +1,92 @@
/*
* ClusterConnectionKey.actor.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source
// version.
#if defined(NO_INTELLISENSE) && !defined(FDBCLIENT_CLUSTERCONNECTIONKEY_ACTOR_G_H)
#define FDBCLIENT_CLUSTERCONNECTIONKEY_ACTOR_G_H
#include "fdbclient/ClusterConnectionKey.actor.g.h"
#elif !defined(FDBCLIENT_CLUSTERCONNECTIONKEY_ACTOR_H)
#define FDBCLIENT_CLUSTERCONNECTIONKEY_ACTOR_H
#include "fdbclient/CoordinationInterface.h"
#include "fdbclient/NativeAPI.actor.h"
#include "flow/actorcompiler.h" // has to be last include
// An implementation of IClusterConnectionRecord backed by a key in a FoundationDB database.
class ClusterConnectionKey : public IClusterConnectionRecord, ReferenceCounted<ClusterConnectionKey>, NonCopyable {
public:
// Creates a cluster connection record with a given connection string and saves it to the specified key. Needs to be
// persisted should be set to true unless this ClusterConnectionKey is being created with the value read from the
// key.
ClusterConnectionKey(Database db,
Key connectionStringKey,
ClusterConnectionString const& contents,
ConnectionStringNeedsPersisted needsToBePersisted = ConnectionStringNeedsPersisted::True);
// Loads and parses the connection string at the specified key, throwing errors if the file cannot be read or the
// format is invalid.
ACTOR static Future<Reference<ClusterConnectionKey>> loadClusterConnectionKey(Database db, Key connectionStringKey);
// Sets the connections string held by this object and persists it.
Future<Void> setConnectionString(ClusterConnectionString const&) override;
// Get the connection string stored in the database.
Future<ClusterConnectionString> getStoredConnectionString() override;
// Checks whether the connection string in the database matches the connection string stored in memory. The cluster
// string stored in the database is returned via the reference parameter connectionString.
Future<bool> upToDate(ClusterConnectionString& connectionString) override;
// Returns the key where the connection string is stored.
std::string getLocation() const override;
// Creates a copy of this object with a modified connection string but that isn't persisted.
Reference<IClusterConnectionRecord> makeIntermediateRecord(
ClusterConnectionString const& connectionString) const override;
// Returns a string representation of this cluster connection record. This will include the type of record and the
// key where the record is stored.
std::string toString() const override;
void addref() override { ReferenceCounted<ClusterConnectionKey>::addref(); }
void delref() override { ReferenceCounted<ClusterConnectionKey>::delref(); }
protected:
// Writes the connection string to the database
Future<bool> persist() override;
private:
ACTOR static Future<ClusterConnectionString> getStoredConnectionStringImpl(Reference<ClusterConnectionKey> self);
ACTOR static Future<bool> upToDateImpl(Reference<ClusterConnectionKey> self,
ClusterConnectionString* connectionString);
ACTOR static Future<bool> persistImpl(Reference<ClusterConnectionKey> self);
// The database where the connection key is stored. Note that this does not need to be the same database as the one
// that the connection string would connect to.
Database db;
Key connectionStringKey;
Optional<Value> lastPersistedConnectionString;
};
#include "flow/unactorcompiler.h"
#endif

View File

@ -0,0 +1,63 @@
/*
* ClusterConnectionMemoryRecord.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbclient/ClusterConnectionMemoryRecord.h"
#include "fdbclient/MonitorLeader.h"
#include "flow/actorcompiler.h" // has to be last include
// Sets the connections string held by this object.
Future<Void> ClusterConnectionMemoryRecord::setConnectionString(ClusterConnectionString const& conn) {
cs = conn;
return Void();
}
// Returns the connection string currently held in this object (there is no persistent storage).
Future<ClusterConnectionString> ClusterConnectionMemoryRecord::getStoredConnectionString() {
return cs;
}
// Because the memory record is not persisted, it is always up to date and this returns true. The connection string
// is returned via the reference parameter connectionString.
Future<bool> ClusterConnectionMemoryRecord::upToDate(ClusterConnectionString& fileConnectionString) {
fileConnectionString = cs;
return true;
}
// Returns the ID of the memory record.
std::string ClusterConnectionMemoryRecord::getLocation() const {
return id.toString();
}
// Returns a copy of this object with a modified connection string.
Reference<IClusterConnectionRecord> ClusterConnectionMemoryRecord::makeIntermediateRecord(
ClusterConnectionString const& connectionString) const {
return makeReference<ClusterConnectionMemoryRecord>(connectionString);
}
// Returns a string representation of this cluster connection record. This will include the type and id of the
// record.
std::string ClusterConnectionMemoryRecord::toString() const {
return "memory://" + id.toString();
}
// This is a no-op for memory records. Returns true to indicate success.
Future<bool> ClusterConnectionMemoryRecord::persist() {
return true;
}

View File

@ -0,0 +1,71 @@
/*
* ClusterConnectionMemoryRecord.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#ifndef FDBCLIENT_CLUSTERCONNECTIONMEMORYRECORD_H
#define FDBCLIENT_CLUSTERCONNECTIONMEMORYRECORD_H
#include "fdbclient/CoordinationInterface.h"
// An implementation of IClusterConnectionRecord that is stored in memory only and not persisted.
class ClusterConnectionMemoryRecord : public IClusterConnectionRecord,
ReferenceCounted<ClusterConnectionMemoryRecord>,
NonCopyable {
public:
// Creates a cluster file with a given connection string.
explicit ClusterConnectionMemoryRecord(ClusterConnectionString const& contents)
: IClusterConnectionRecord(ConnectionStringNeedsPersisted::False), id(deterministicRandom()->randomUniqueID()) {
cs = contents;
}
// Sets the connections string held by this object.
Future<Void> setConnectionString(ClusterConnectionString const&) override;
// Returns the connection string currently held in this object (there is no persistent storage).
Future<ClusterConnectionString> getStoredConnectionString() override;
// Because the memory record is not persisted, it is always up to date and this returns true. The connection string
// is returned via the reference parameter connectionString.
Future<bool> upToDate(ClusterConnectionString& fileConnectionString) override;
// Returns a location string for the memory record that includes its ID.
std::string getLocation() const override;
// Returns a copy of this object with a modified connection string.
Reference<IClusterConnectionRecord> makeIntermediateRecord(
ClusterConnectionString const& connectionString) const override;
// Returns a string representation of this cluster connection record. This will include the type and id of the
// record.
std::string toString() const override;
void addref() override { ReferenceCounted<ClusterConnectionMemoryRecord>::addref(); }
void delref() override { ReferenceCounted<ClusterConnectionMemoryRecord>::delref(); }
protected:
// This is a no-op for memory records. Returns true to indicate success.
Future<bool> persist() override;
private:
// A unique ID for the record
UID id;
};
#endif

View File

@ -27,6 +27,7 @@
#include "fdbclient/Status.h" #include "fdbclient/Status.h"
#include "fdbclient/CommitProxyInterface.h" #include "fdbclient/CommitProxyInterface.h"
#include "fdbclient/ClientWorkerInterface.h" #include "fdbclient/ClientWorkerInterface.h"
#include "fdbclient/ClientVersion.h"
struct ClusterInterface { struct ClusterInterface {
constexpr static FileIdentifier file_identifier = 15888863; constexpr static FileIdentifier file_identifier = 15888863;
@ -36,6 +37,9 @@ struct ClusterInterface {
RequestStream<ReplyPromise<Void>> ping; RequestStream<ReplyPromise<Void>> ping;
RequestStream<struct GetClientWorkersRequest> getClientWorkers; RequestStream<struct GetClientWorkersRequest> getClientWorkers;
RequestStream<struct ForceRecoveryRequest> forceRecovery; RequestStream<struct ForceRecoveryRequest> forceRecovery;
RequestStream<struct MoveShardRequest> moveShard;
RequestStream<struct RepairSystemDataRequest> repairSystemData;
RequestStream<struct SplitShardRequest> splitShard;
bool operator==(ClusterInterface const& r) const { return id() == r.id(); } bool operator==(ClusterInterface const& r) const { return id() == r.id(); }
bool operator!=(ClusterInterface const& r) const { return id() != r.id(); } bool operator!=(ClusterInterface const& r) const { return id() != r.id(); }
@ -45,7 +49,9 @@ struct ClusterInterface {
bool hasMessage() const { bool hasMessage() const {
return openDatabase.getFuture().isReady() || failureMonitoring.getFuture().isReady() || return openDatabase.getFuture().isReady() || failureMonitoring.getFuture().isReady() ||
databaseStatus.getFuture().isReady() || ping.getFuture().isReady() || databaseStatus.getFuture().isReady() || ping.getFuture().isReady() ||
getClientWorkers.getFuture().isReady() || forceRecovery.getFuture().isReady(); getClientWorkers.getFuture().isReady() || forceRecovery.getFuture().isReady() ||
moveShard.getFuture().isReady() || repairSystemData.getFuture().isReady() ||
splitShard.getFuture().isReady();
} }
void initEndpoints() { void initEndpoints() {
@ -55,11 +61,23 @@ struct ClusterInterface {
ping.getEndpoint(TaskPriority::ClusterController); ping.getEndpoint(TaskPriority::ClusterController);
getClientWorkers.getEndpoint(TaskPriority::ClusterController); getClientWorkers.getEndpoint(TaskPriority::ClusterController);
forceRecovery.getEndpoint(TaskPriority::ClusterController); forceRecovery.getEndpoint(TaskPriority::ClusterController);
moveShard.getEndpoint(TaskPriority::ClusterController);
repairSystemData.getEndpoint(TaskPriority::ClusterController);
splitShard.getEndpoint(TaskPriority::ClusterController);
} }
template <class Ar> template <class Ar>
void serialize(Ar& ar) { void serialize(Ar& ar) {
serializer(ar, openDatabase, failureMonitoring, databaseStatus, ping, getClientWorkers, forceRecovery); serializer(ar,
openDatabase,
failureMonitoring,
databaseStatus,
ping,
getClientWorkers,
forceRecovery,
moveShard,
repairSystemData,
splitShard);
} }
}; };
@ -80,56 +98,6 @@ struct ClusterControllerClientInterface {
} }
}; };
struct ClientVersionRef {
StringRef clientVersion;
StringRef sourceVersion;
StringRef protocolVersion;
ClientVersionRef() { initUnknown(); }
ClientVersionRef(Arena& arena, ClientVersionRef const& cv)
: clientVersion(arena, cv.clientVersion), sourceVersion(arena, cv.sourceVersion),
protocolVersion(arena, cv.protocolVersion) {}
ClientVersionRef(StringRef clientVersion, StringRef sourceVersion, StringRef protocolVersion)
: clientVersion(clientVersion), sourceVersion(sourceVersion), protocolVersion(protocolVersion) {}
ClientVersionRef(StringRef versionString) {
std::vector<StringRef> parts = versionString.splitAny(LiteralStringRef(","));
if (parts.size() != 3) {
initUnknown();
return;
}
clientVersion = parts[0];
sourceVersion = parts[1];
protocolVersion = parts[2];
}
void initUnknown() {
clientVersion = LiteralStringRef("Unknown");
sourceVersion = LiteralStringRef("Unknown");
protocolVersion = LiteralStringRef("Unknown");
}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, clientVersion, sourceVersion, protocolVersion);
}
size_t expectedSize() const { return clientVersion.size() + sourceVersion.size() + protocolVersion.size(); }
bool operator<(const ClientVersionRef& rhs) const {
if (protocolVersion != rhs.protocolVersion) {
return protocolVersion < rhs.protocolVersion;
}
// These comparisons are arbitrary because they aren't ordered
if (clientVersion != rhs.clientVersion) {
return clientVersion < rhs.clientVersion;
}
return sourceVersion < rhs.sourceVersion;
}
};
template <class T> template <class T>
struct ItemWithExamples { struct ItemWithExamples {
T item; T item;
@ -291,4 +259,68 @@ struct ForceRecoveryRequest {
} }
}; };
// Request to move a keyrange (shard) to a new team represented as addresses.
struct MoveShardRequest {
constexpr static FileIdentifier file_identifier = 2799592;
KeyRange shard;
std::vector<NetworkAddress> addresses;
ReplyPromise<Void> reply;
MoveShardRequest() {}
MoveShardRequest(KeyRange shard, std::vector<NetworkAddress> addresses)
: shard{ std::move(shard) }, addresses{ std::move(addresses) } {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, shard, addresses, reply);
}
};
// Request to trigger a master recovery, and during the following recovery, the system metadata will be
// reconstructed from TLogs, and written to a new SS team.
// This is used when metadata on SSes are lost or corrupted.
struct RepairSystemDataRequest {
constexpr static FileIdentifier file_identifier = 2799593;
ReplyPromise<Void> reply;
RepairSystemDataRequest() {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, reply);
}
};
// Returns the actual shards generated by the SplitShardRequest.
struct SplitShardReply {
constexpr static FileIdentifier file_identifier = 1384440;
std::vector<KeyRange> shards;
SplitShardReply() {}
explicit SplitShardReply(std::vector<KeyRange> shards) : shards{ std::move(shards) } {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, shards);
}
};
// Split keyrange [shard.begin, shard.end) into num shards.
// Split points are chosen as the arithmeticlly equal division points of the given range.
struct SplitShardRequest {
constexpr static FileIdentifier file_identifier = 1384443;
KeyRange shard;
int num;
ReplyPromise<SplitShardReply> reply;
SplitShardRequest() : num(0) {}
SplitShardRequest(KeyRange shard, int num) : shard{ std::move(shard) }, num(num) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, shard, num, reply);
}
};
#endif #endif

View File

@ -115,6 +115,9 @@ struct ClientDBInfo {
firstCommitProxy; // not serialized, used for commitOnFirstProxy when the commit proxies vector has been shrunk firstCommitProxy; // not serialized, used for commitOnFirstProxy when the commit proxies vector has been shrunk
Optional<Value> forward; Optional<Value> forward;
std::vector<VersionHistory> history; std::vector<VersionHistory> history;
// a counter increased every time a change of uploaded client libraries
// happens, the clients need to be aware of
uint64_t clientLibChangeCounter = 0;
ClientDBInfo() {} ClientDBInfo() {}
@ -126,7 +129,7 @@ struct ClientDBInfo {
if constexpr (!is_fb_function<Archive>) { if constexpr (!is_fb_function<Archive>) {
ASSERT(ar.protocolVersion().isValid()); ASSERT(ar.protocolVersion().isValid());
} }
serializer(ar, grvProxies, commitProxies, id, forward, history); serializer(ar, grvProxies, commitProxies, id, forward, history, clientLibChangeCounter);
} }
}; };

View File

@ -217,4 +217,32 @@ struct CommitTransactionRef {
} }
}; };
struct MutationsAndVersionRef {
VectorRef<MutationRef> mutations;
Version version = invalidVersion;
Version knownCommittedVersion = invalidVersion;
MutationsAndVersionRef() {}
explicit MutationsAndVersionRef(Version version, Version knownCommittedVersion)
: version(version), knownCommittedVersion(knownCommittedVersion) {}
MutationsAndVersionRef(VectorRef<MutationRef> mutations, Version version, Version knownCommittedVersion)
: mutations(mutations), version(version), knownCommittedVersion(knownCommittedVersion) {}
MutationsAndVersionRef(Arena& to, VectorRef<MutationRef> mutations, Version version, Version knownCommittedVersion)
: mutations(to, mutations), version(version), knownCommittedVersion(knownCommittedVersion) {}
MutationsAndVersionRef(Arena& to, const MutationsAndVersionRef& from)
: mutations(to, from.mutations), version(from.version), knownCommittedVersion(from.knownCommittedVersion) {}
int expectedSize() const { return mutations.expectedSize(); }
struct OrderByVersion {
bool operator()(MutationsAndVersionRef const& a, MutationsAndVersionRef const& b) const {
return a.version < b.version;
}
};
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, mutations, version, knownCommittedVersion);
}
};
#endif #endif

View File

@ -188,11 +188,11 @@ struct ConfigTransactionInterface {
public: public:
static constexpr FileIdentifier file_identifier = 982485; static constexpr FileIdentifier file_identifier = 982485;
struct RequestStream<ConfigTransactionGetGenerationRequest> getGeneration; class RequestStream<ConfigTransactionGetGenerationRequest> getGeneration;
struct RequestStream<ConfigTransactionGetRequest> get; class RequestStream<ConfigTransactionGetRequest> get;
struct RequestStream<ConfigTransactionGetConfigClassesRequest> getClasses; class RequestStream<ConfigTransactionGetConfigClassesRequest> getClasses;
struct RequestStream<ConfigTransactionGetKnobsRequest> getKnobs; class RequestStream<ConfigTransactionGetKnobsRequest> getKnobs;
struct RequestStream<ConfigTransactionCommitRequest> commit; class RequestStream<ConfigTransactionCommitRequest> commit;
ConfigTransactionInterface(); ConfigTransactionInterface();
void setupWellKnownEndpoints(); void setupWellKnownEndpoints();

View File

@ -45,11 +45,23 @@ struct ClientLeaderRegInterface {
} }
}; };
// A string containing the information necessary to connect to a cluster.
//
// The format of the connection string is: description:id@[addrs]+
// The description and id together are called the "key"
//
// The following is enforced about the format of the file:
// - The key must contain one (and only one) ':' character
// - The description contains only allowed characters (a-z, A-Z, 0-9, _)
// - The ID contains only allowed characters (a-z, A-Z, 0-9)
// - At least one address is specified
// - There is no address present more than once
class ClusterConnectionString { class ClusterConnectionString {
public: public:
ClusterConnectionString() {} ClusterConnectionString() {}
ClusterConnectionString(std::string const& connectionString); ClusterConnectionString(std::string const& connectionString);
ClusterConnectionString(std::vector<NetworkAddress>, Key); ClusterConnectionString(std::vector<NetworkAddress>, Key);
std::vector<NetworkAddress> const& coordinators() const { return coord; } std::vector<NetworkAddress> const& coordinators() const { return coord; }
Key clusterKey() const { return key; } Key clusterKey() const { return key; }
Key clusterKeyName() const { Key clusterKeyName() const {
@ -65,45 +77,72 @@ private:
Key key, keyDesc; Key key, keyDesc;
}; };
class ClusterConnectionFile : NonCopyable, public ReferenceCounted<ClusterConnectionFile> { FDB_DECLARE_BOOLEAN_PARAM(ConnectionStringNeedsPersisted);
// A record that stores the connection string used to connect to a cluster. This record can be updated when a cluster
// notifies a connected party that the connection string has changed.
//
// The typically used cluster connection record is a cluster file (implemented in ClusterConnectionFile). This interface
// provides an abstraction over the cluster file so that we can persist the connection string in other locations or have
// one that is only stored in memory.
class IClusterConnectionRecord {
public: public:
ClusterConnectionFile() {} IClusterConnectionRecord(ConnectionStringNeedsPersisted connectionStringNeedsPersisted)
// Loads and parses the file at 'path', throwing errors if the file cannot be read or the format is invalid. : connectionStringNeedsPersisted(connectionStringNeedsPersisted) {}
// virtual ~IClusterConnectionRecord() {}
// The format of the file is: description:id@[addrs]+
// The description and id together are called the "key"
//
// The following is enforced about the format of the file:
// - The key must contain one (and only one) ':' character
// - The description contains only allowed characters (a-z, A-Z, 0-9, _)
// - The ID contains only allowed characters (a-z, A-Z, 0-9)
// - At least one address is specified
// - There is no address present more than once
explicit ClusterConnectionFile(std::string const& path);
explicit ClusterConnectionFile(ClusterConnectionString const& cs) : cs(cs), setConn(false) {}
explicit ClusterConnectionFile(std::string const& filename, ClusterConnectionString const& contents);
// returns <resolved name, was default file>
static std::pair<std::string, bool> lookupClusterFileName(std::string const& filename);
// get a human readable error message describing the error returned from the constructor
static std::string getErrorString(std::pair<std::string, bool> const& resolvedFile, Error const& e);
// Returns the connection string currently held in this object. This may not match the stored record if it hasn't
// been persisted or if the persistent storage for the record has been modified externally.
ClusterConnectionString const& getConnectionString() const; ClusterConnectionString const& getConnectionString() const;
bool writeFile();
void setConnectionString(ClusterConnectionString const&); // Sets the connections string held by this object and persists it.
std::string const& getFilename() const { virtual Future<Void> setConnectionString(ClusterConnectionString const&) = 0;
ASSERT(filename.size());
return filename; // If this record is backed by persistent storage, get the connection string from that storage. Otherwise, return
} // the connection string stored in memory.
bool canGetFilename() const { return filename.size() != 0; } virtual Future<ClusterConnectionString> getStoredConnectionString() = 0;
bool fileContentsUpToDate() const;
bool fileContentsUpToDate(ClusterConnectionString& fileConnectionString) const; // Checks whether the connection string in persisten storage matches the connection string stored in memory.
Future<bool> upToDate();
// Checks whether the connection string in persisten storage matches the connection string stored in memory. The
// cluster string stored in persistent storage is returned via the reference parameter connectionString.
virtual Future<bool> upToDate(ClusterConnectionString& connectionString) = 0;
// Returns a string representing the location of the cluster record. For example, this could be the filename or key
// that stores the connection string.
virtual std::string getLocation() const = 0;
// Creates a copy of this object with a modified connection string but that isn't persisted.
virtual Reference<IClusterConnectionRecord> makeIntermediateRecord(
ClusterConnectionString const& connectionString) const = 0;
// Returns a string representation of this cluster connection record. This will include the type and location of the
// record.
virtual std::string toString() const = 0;
// Signals to the connection record that it was successfully used to connect to a cluster.
void notifyConnected(); void notifyConnected();
private: virtual void addref() = 0;
virtual void delref() = 0;
protected:
// Writes the connection string to the backing persistent storage, if applicable.
virtual Future<bool> persist() = 0;
// Returns whether the connection record contains a connection string that needs to be persisted upon connection.
bool needsToBePersisted() const;
// Clears the flag needs persisted flag.
void setPersisted();
ClusterConnectionString cs; ClusterConnectionString cs;
std::string filename;
bool setConn; private:
// A flag that indicates whether this connection record needs to be persisted when it succesfully establishes a
// connection.
bool connectionStringNeedsPersisted;
}; };
struct LeaderInfo { struct LeaderInfo {
@ -199,9 +238,9 @@ class ClientCoordinators {
public: public:
std::vector<ClientLeaderRegInterface> clientLeaderServers; std::vector<ClientLeaderRegInterface> clientLeaderServers;
Key clusterKey; Key clusterKey;
Reference<ClusterConnectionFile> ccf; Reference<IClusterConnectionRecord> ccr;
explicit ClientCoordinators(Reference<ClusterConnectionFile> ccf); explicit ClientCoordinators(Reference<IClusterConnectionRecord> ccr);
explicit ClientCoordinators(Key clusterKey, std::vector<NetworkAddress> coordinators); explicit ClientCoordinators(Key clusterKey, std::vector<NetworkAddress> coordinators);
ClientCoordinators() {} ClientCoordinators() {}
}; };

View File

@ -288,7 +288,7 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const {
result["storage_engine"] = "ssd-2"; result["storage_engine"] = "ssd-2";
} else if (tLogDataStoreType == KeyValueStoreType::SSD_BTREE_V2 && } else if (tLogDataStoreType == KeyValueStoreType::SSD_BTREE_V2 &&
storageServerStoreType == KeyValueStoreType::SSD_REDWOOD_V1) { storageServerStoreType == KeyValueStoreType::SSD_REDWOOD_V1) {
result["storage_engine"] = "ssd-redwood-experimental"; result["storage_engine"] = "ssd-redwood-1-experimental";
} else if (tLogDataStoreType == KeyValueStoreType::SSD_BTREE_V2 && } else if (tLogDataStoreType == KeyValueStoreType::SSD_BTREE_V2 &&
storageServerStoreType == KeyValueStoreType::SSD_ROCKSDB_V1) { storageServerStoreType == KeyValueStoreType::SSD_ROCKSDB_V1) {
result["storage_engine"] = "ssd-rocksdb-experimental"; result["storage_engine"] = "ssd-rocksdb-experimental";
@ -311,7 +311,7 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const {
} else if (testingStorageServerStoreType == KeyValueStoreType::SSD_BTREE_V2) { } else if (testingStorageServerStoreType == KeyValueStoreType::SSD_BTREE_V2) {
result["tss_storage_engine"] = "ssd-2"; result["tss_storage_engine"] = "ssd-2";
} else if (testingStorageServerStoreType == KeyValueStoreType::SSD_REDWOOD_V1) { } else if (testingStorageServerStoreType == KeyValueStoreType::SSD_REDWOOD_V1) {
result["tss_storage_engine"] = "ssd-redwood-experimental"; result["tss_storage_engine"] = "ssd-redwood-1-experimental";
} else if (testingStorageServerStoreType == KeyValueStoreType::SSD_ROCKSDB_V1) { } else if (testingStorageServerStoreType == KeyValueStoreType::SSD_ROCKSDB_V1) {
result["tss_storage_engine"] = "ssd-rocksdb-experimental"; result["tss_storage_engine"] = "ssd-rocksdb-experimental";
} else if (testingStorageServerStoreType == KeyValueStoreType::MEMORY_RADIXTREE) { } else if (testingStorageServerStoreType == KeyValueStoreType::MEMORY_RADIXTREE) {
@ -578,9 +578,6 @@ bool DatabaseConfiguration::setInternal(KeyRef key, ValueRef value) {
return true; // All of the above options currently require recovery to take effect return true; // All of the above options currently require recovery to take effect
} }
static KeyValueRef* lower_bound(VectorRef<KeyValueRef>& config, KeyRef const& key) {
return std::lower_bound(config.begin(), config.end(), KeyValueRef(key, ValueRef()), KeyValueRef::OrderByKey());
}
static KeyValueRef const* lower_bound(VectorRef<KeyValueRef> const& config, KeyRef const& key) { static KeyValueRef const* lower_bound(VectorRef<KeyValueRef> const& config, KeyRef const& key) {
return std::lower_bound(config.begin(), config.end(), KeyValueRef(key, ValueRef()), KeyValueRef::OrderByKey()); return std::lower_bound(config.begin(), config.end(), KeyValueRef(key, ValueRef()), KeyValueRef::OrderByKey());
} }

Some files were not shown because too many files have changed in this diff Show More