Fix typos
This commit is contained in:
parent
02182d556a
commit
a88114c222
|
@ -176,7 +176,7 @@ knobs.MAX_VERSION_RATE_MODIFIER
|
|||
|
||||
#### Bridging reference types
|
||||
|
||||
Some C++ types have reference counting and referencial semantics, i.e. they're passed around using raw or smart pointers that point to an instance. That instance typically has its own reference count, that keeps track of when the instance should be released. Such types can be bridged over to Swift reference types, and Swift's automatic reference counting (ARC) will automatically retain and release them using their C++ reference counting implementation.
|
||||
Some C++ types have reference counting and referential semantics, i.e. they're passed around using raw or smart pointers that point to an instance. That instance typically has its own reference count, that keeps track of when the instance should be released. Such types can be bridged over to Swift reference types, and Swift's automatic reference counting (ARC) will automatically retain and release them using their C++ reference counting implementation.
|
||||
|
||||
You can use the `SWIFT_CXX_REF` annotation for that. Right now `SWIFT_CXX_REF` does not work (due to https://github.com/apple/swift/issues/61620), so you have to make a custom annotation for each class you want to bridge with reference semantics to Swift. For example, the `MasterData` class receives the following annotation:
|
||||
|
||||
|
|
|
@ -482,7 +482,7 @@ class TestRunner(object):
|
|||
if timed_out.is_set():
|
||||
reason = "timed out after %d seconds" % (self.args.timeout,)
|
||||
util.get_logger().error(
|
||||
"\n'%s' did not complete succesfully (%s)" % (params[0], reason)
|
||||
"\n'%s' did not complete successfully (%s)" % (params[0], reason)
|
||||
)
|
||||
|
||||
util.get_logger().info("")
|
||||
|
|
|
@ -38,7 +38,7 @@ struct WorkloadSpec {
|
|||
std::unordered_map<std::string, std::string> options;
|
||||
};
|
||||
|
||||
// Test speficification loaded from a *.toml file
|
||||
// Test specification loaded from a *.toml file
|
||||
struct TestSpec {
|
||||
// Title of the test
|
||||
std::string title;
|
||||
|
@ -97,7 +97,7 @@ struct TestSpec {
|
|||
std::vector<WorkloadSpec> workloads;
|
||||
};
|
||||
|
||||
// Read the test specfication from a *.toml file
|
||||
// Read the test specification from a *.toml file
|
||||
TestSpec readTomlTestSpec(std::string fileName);
|
||||
|
||||
} // namespace FdbApiTester
|
||||
|
|
|
@ -320,7 +320,7 @@ protected:
|
|||
}
|
||||
|
||||
// Pointer to the transaction executor interface
|
||||
// Set in contructor, stays immutable
|
||||
// Set in constructor, stays immutable
|
||||
ITransactionExecutor* const executor;
|
||||
|
||||
// FDB database
|
||||
|
@ -344,21 +344,21 @@ protected:
|
|||
TOpStartFct startFct;
|
||||
|
||||
// Mutex protecting access to shared mutable state
|
||||
// Only the state that is accessible unter IN_PROGRESS state
|
||||
// Only the state that is accessible under IN_PROGRESS state
|
||||
// must be protected by mutex
|
||||
std::mutex mutex;
|
||||
|
||||
// Continuation to be called after completion of the transaction
|
||||
// Set in contructor, stays immutable
|
||||
// Set in constructor, stays immutable
|
||||
const TOpContFct contAfterDone;
|
||||
|
||||
// Reference to the scheduler
|
||||
// Set in contructor, stays immutable
|
||||
// Set in constructor, stays immutable
|
||||
// Cannot be accessed in DONE state, workloads can be completed and the scheduler deleted
|
||||
IScheduler* const scheduler;
|
||||
|
||||
// Retry limit
|
||||
// Set in contructor, stays immutable
|
||||
// Set in constructor, stays immutable
|
||||
const int retryLimit;
|
||||
|
||||
// Transaction execution state
|
||||
|
@ -386,7 +386,7 @@ protected:
|
|||
std::vector<fdb::Error> retriedErrors;
|
||||
|
||||
// blob granule base path
|
||||
// Set in contructor, stays immutable
|
||||
// Set in constructor, stays immutable
|
||||
const std::string bgBasePath;
|
||||
|
||||
// Indicates if the database error was injected
|
||||
|
|
|
@ -226,7 +226,7 @@ void WorkloadManager::run() {
|
|||
if (failed()) {
|
||||
fmt::print(stderr, "{} workloads failed\n", numWorkloadsFailed);
|
||||
} else {
|
||||
fprintf(stderr, "All workloads succesfully completed\n");
|
||||
fprintf(stderr, "All workloads successfully completed\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -46,12 +46,12 @@ public:
|
|||
virtual void checkProgress() = 0;
|
||||
};
|
||||
|
||||
// Workoad interface
|
||||
// Workload interface
|
||||
class IWorkload {
|
||||
public:
|
||||
virtual ~IWorkload() {}
|
||||
|
||||
// Intialize the workload
|
||||
// Initialize the workload
|
||||
virtual void init(WorkloadManager* manager) = 0;
|
||||
|
||||
// Start executing the workload
|
||||
|
@ -73,7 +73,7 @@ public:
|
|||
|
||||
// Workload configuration
|
||||
struct WorkloadConfig {
|
||||
// Workoad name
|
||||
// Workload name
|
||||
std::string name;
|
||||
|
||||
// Client ID assigned to the workload (a number from 0 to numClients-1)
|
||||
|
@ -136,7 +136,7 @@ protected:
|
|||
// Log an info message
|
||||
void info(const std::string& msg);
|
||||
|
||||
// Confirm a successfull progress check
|
||||
// Confirm a successful progress check
|
||||
void confirmProgress();
|
||||
|
||||
private:
|
||||
|
@ -166,7 +166,7 @@ protected:
|
|||
// Total number of clients
|
||||
int numClients;
|
||||
|
||||
// The maximum number of errors before stoppoing the workload
|
||||
// The maximum number of errors before stopping the workload
|
||||
int maxErrors;
|
||||
|
||||
// The timeout (in ms) automatically set for all transactions to a random value
|
||||
|
@ -187,12 +187,12 @@ protected:
|
|||
// Number of started transactions
|
||||
std::atomic<int> numTxStarted;
|
||||
|
||||
// Workload is in progress (intialized, but not completed)
|
||||
// Workload is in progress (initialized, but not completed)
|
||||
std::atomic<bool> inProgress;
|
||||
};
|
||||
|
||||
// Workload manager
|
||||
// Keeps track of active workoads, stops the scheduler after all workloads complete
|
||||
// Keeps track of active workloads, stops the scheduler after all workloads complete
|
||||
class WorkloadManager {
|
||||
public:
|
||||
WorkloadManager(ITransactionExecutor* txExecutor, IScheduler* scheduler)
|
||||
|
@ -214,7 +214,7 @@ public:
|
|||
return numWorkloadsFailed > 0;
|
||||
}
|
||||
|
||||
// Schedule statistics to be printed in regular timeintervals
|
||||
// Schedule statistics to be printed in regular time intervals
|
||||
void schedulePrintStatistics(int timeIntervalMs);
|
||||
|
||||
private:
|
||||
|
@ -222,7 +222,7 @@ private:
|
|||
|
||||
// Info about a running workload
|
||||
struct WorkloadInfo {
|
||||
// Reference to the workoad for ownership
|
||||
// Reference to the workload for ownership
|
||||
std::shared_ptr<IWorkload> ref;
|
||||
// Continuation to be executed after completing the workload
|
||||
TTaskFct cont;
|
||||
|
|
|
@ -97,7 +97,7 @@ void printProgramUsage(const char* execName) {
|
|||
" The path of a file containing the connection string for the\n"
|
||||
" FoundationDB cluster. The default is `fdb.cluster'\n"
|
||||
" --log Enables trace file logging for the CLI session.\n"
|
||||
" --log-dir PATH Specifes the output directory for trace files. If\n"
|
||||
" --log-dir PATH Specifies the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n"
|
||||
" --log-group LOG_GROUP\n"
|
||||
|
@ -123,7 +123,7 @@ void printProgramUsage(const char* execName) {
|
|||
" --api-version VERSION\n"
|
||||
" Required FDB API version (default %d).\n"
|
||||
" --transaction-retry-limit NUMBER\n"
|
||||
" Maximum number of retries per tranaction (default: 0 - unlimited)\n"
|
||||
" Maximum number of retries per transaction (default: 0 - unlimited)\n"
|
||||
" --blob-granule-local-file-path PATH\n"
|
||||
" Path to blob granule files on local filesystem\n"
|
||||
" -f, --test-file FILE\n"
|
||||
|
|
|
@ -149,7 +149,7 @@ def run_tester(args, cluster, test_file):
|
|||
else:
|
||||
reason = "exit code: %d" % ret_code
|
||||
get_logger().error(
|
||||
"\n'%s' did not complete succesfully (%s)" % (cmd[0], reason)
|
||||
"\n'%s' did not complete successfully (%s)" % (cmd[0], reason)
|
||||
)
|
||||
if log_dir is not None and not args.disable_log_dump:
|
||||
dump_client_logs(log_dir)
|
||||
|
|
|
@ -125,7 +125,7 @@ void printProgramUsage(const char* execName) {
|
|||
" --transaction-timeout MILLISECONDS\n"
|
||||
" The timeout for the test transactions in milliseconds (default: 0 - no timeout)\n"
|
||||
" --log Enables trace file logging for the CLI session.\n"
|
||||
" --log-dir PATH Specifes the output directory for trace files. If\n"
|
||||
" --log-dir PATH Specifies the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n"
|
||||
" --tmp-dir DIR\n"
|
||||
|
|
|
@ -598,7 +598,7 @@ class ClientConfigPrevVersionTests(unittest.TestCase):
|
|||
class ClientConfigSeparateCluster(unittest.TestCase):
|
||||
def test_wait_cluster_to_upgrade(self):
|
||||
# Test starting a client incompatible to a cluster and connecting
|
||||
# successfuly after cluster upgrade
|
||||
# successfully after cluster upgrade
|
||||
self.cluster = TestCluster(PREV_RELEASE_VERSION)
|
||||
self.cluster.setup()
|
||||
try:
|
||||
|
@ -639,7 +639,7 @@ class ClientConfigSeparateCluster(unittest.TestCase):
|
|||
self.cluster.tear_down()
|
||||
|
||||
def test_plaintext_cluster_tls_client(self):
|
||||
# Test connecting succesfully to a plaintext cluster with a TLS client
|
||||
# Test connecting successfully to a plaintext cluster with a TLS client
|
||||
self.cluster = TestCluster(
|
||||
CURRENT_VERSION, tls_config=TLSConfig(), disable_server_side_tls=True
|
||||
)
|
||||
|
@ -677,7 +677,7 @@ class ClientConfigSeparateCluster(unittest.TestCase):
|
|||
self.cluster.tear_down()
|
||||
|
||||
def test_plaintext_cluster_tls_client_plaintext_connection_disabled(self):
|
||||
# Test connecting succesfully to a plaintext cluster with a TLS-configured client with plaintext connections disabled
|
||||
# Test connecting successfully to a plaintext cluster with a TLS-configured client with plaintext connections disabled
|
||||
self.cluster = TestCluster(
|
||||
CURRENT_VERSION, tls_config=TLSConfig(), disable_server_side_tls=True
|
||||
)
|
||||
|
@ -860,7 +860,7 @@ class ClientTracingTests(unittest.TestCase):
|
|||
pattern += "\.\d+\.\w+\.\d+\.\d+\.{}$".format(self.test.trace_format)
|
||||
if re.match(pattern, name):
|
||||
return trace_file
|
||||
self.fail("No maching trace file found")
|
||||
self.fail("No matching trace file found")
|
||||
|
||||
def find_and_check_event(
|
||||
self, trace_file, event_type, attr_present, attr_missing, seqno=0
|
||||
|
|
|
@ -1207,7 +1207,7 @@ void usage() {
|
|||
"Duration in milliseconds after which a transaction times out in run mode. Set as transaction option");
|
||||
}
|
||||
|
||||
/* parse benchmark paramters */
|
||||
/* parse benchmark parameters */
|
||||
int parseArguments(int argc, char* argv[], Arguments& args) {
|
||||
int rc;
|
||||
int c;
|
||||
|
@ -2519,7 +2519,7 @@ int main(int argc, char* argv[]) {
|
|||
|
||||
// set --seconds in case no ending condition has been set
|
||||
if (args.seconds == 0 && args.iteration == 0) {
|
||||
args.seconds = 30; // default value accodring to documentation
|
||||
args.seconds = 30; // default value according to documentation
|
||||
}
|
||||
|
||||
// if no cluster file is passed, fall back to default parameters
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
namespace mako {
|
||||
|
||||
/* return the last key to be inserted */
|
||||
/* devide val equally among threads */
|
||||
/* divide val equally among threads */
|
||||
int computeThreadPortion(int val, int p_idx, int t_idx, int total_p, int total_t) {
|
||||
int interval = val / total_p / total_t;
|
||||
int remaining = val - (interval * total_p * total_t);
|
||||
|
|
|
@ -97,13 +97,13 @@ force_inline int insertBegin(int rows, int p_idx, int t_idx, int total_p, int to
|
|||
return (int)(round(interval * ((p_idx * total_t) + t_idx)));
|
||||
}
|
||||
|
||||
/* similar to insertBegin, insertEnd returns the last row numer */
|
||||
/* similar to insertBegin, insertEnd returns the last row number */
|
||||
force_inline int insertEnd(int rows, int p_idx, int t_idx, int total_p, int total_t) {
|
||||
double interval = (double)rows / total_p / total_t;
|
||||
return (int)(round(interval * ((p_idx * total_t) + t_idx + 1) - 1));
|
||||
}
|
||||
|
||||
/* devide a value equally among threads */
|
||||
/* divide a value equally among threads */
|
||||
int computeThreadPortion(int val, int p_idx, int t_idx, int total_p, int total_t);
|
||||
|
||||
/* similar to insertBegin/end, computeThreadTps computes
|
||||
|
@ -112,7 +112,7 @@ int computeThreadPortion(int val, int p_idx, int t_idx, int total_p, int total_t
|
|||
#define computeThreadTps(val, p_idx, t_idx, total_p, total_t) computeThreadPortion(val, p_idx, t_idx, total_p, total_t)
|
||||
|
||||
/* similar to computeThreadTps,
|
||||
* computeThreadIters computs the number of iterations.
|
||||
* computeThreadIters computes the number of iterations.
|
||||
*/
|
||||
#define computeThreadIters(val, p_idx, t_idx, total_p, total_t) \
|
||||
computeThreadPortion(val, p_idx, t_idx, total_p, total_t)
|
||||
|
|
|
@ -39,28 +39,28 @@
|
|||
.treeview .hover { color: red; cursor: pointer; }
|
||||
|
||||
.treeview li { background: url(images/treeview-default-line.gif) 0 0 no-repeat; }
|
||||
.treeview li.collapsable, .treeview li.expandable { background-position: 0 -176px; }
|
||||
.treeview li.collapsible, .treeview li.expandable { background-position: 0 -176px; }
|
||||
|
||||
.treeview .expandable-hitarea { background-position: -80px -3px; }
|
||||
|
||||
.treeview li.last { background-position: 0 -1766px }
|
||||
.treeview li.lastCollapsable, .treeview li.lastExpandable { background-image: url(images/treeview-default.gif); }
|
||||
.treeview li.lastCollapsable { background-position: 0 -111px }
|
||||
.treeview li.lastCollapsible, .treeview li.lastExpandable { background-image: url(images/treeview-default.gif); }
|
||||
.treeview li.lastCollapsible { background-position: 0 -111px }
|
||||
.treeview li.lastExpandable { background-position: -32px -67px }
|
||||
|
||||
.treeview div.lastCollapsable-hitarea, .treeview div.lastExpandable-hitarea { background-position: 0; }
|
||||
.treeview div.lastCollapsible-hitarea, .treeview div.lastExpandable-hitarea { background-position: 0; }
|
||||
|
||||
.treeview-red li { background-image: url(images/treeview-red-line.gif); }
|
||||
.treeview-red .hitarea, .treeview-red li.lastCollapsable, .treeview-red li.lastExpandable { background-image: url(images/treeview-red.gif); }
|
||||
.treeview-red .hitarea, .treeview-red li.lastCollapsible, .treeview-red li.lastExpandable { background-image: url(images/treeview-red.gif); }
|
||||
|
||||
.treeview-black li { background-image: url(images/treeview-black-line.gif); }
|
||||
.treeview-black .hitarea, .treeview-black li.lastCollapsable, .treeview-black li.lastExpandable { background-image: url(images/treeview-black.gif); }
|
||||
.treeview-black .hitarea, .treeview-black li.lastCollapsible, .treeview-black li.lastExpandable { background-image: url(images/treeview-black.gif); }
|
||||
|
||||
.treeview-gray li { background-image: url(images/treeview-gray-line.gif); }
|
||||
.treeview-gray .hitarea, .treeview-gray li.lastCollapsable, .treeview-gray li.lastExpandable { background-image: url(images/treeview-gray.gif); }
|
||||
.treeview-gray .hitarea, .treeview-gray li.lastCollapsible, .treeview-gray li.lastExpandable { background-image: url(images/treeview-gray.gif); }
|
||||
|
||||
.treeview-famfamfam li { background-image: url(images/treeview-famfamfam-line.gif); }
|
||||
.treeview-famfamfam .hitarea, .treeview-famfamfam li.lastCollapsable, .treeview-famfamfam li.lastExpandable { background-image: url(images/treeview-famfamfam.gif); }
|
||||
.treeview-famfamfam .hitarea, .treeview-famfamfam li.lastCollapsible, .treeview-famfamfam li.lastExpandable { background-image: url(images/treeview-famfamfam.gif); }
|
||||
|
||||
.treeview .placeholder {
|
||||
background: url(images/ajax-loader.gif) 0 0 no-repeat;
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
return proxied.apply(this, arguments).bind("add", function(event, branches) {
|
||||
$(branches).prev()
|
||||
.removeClass(CLASSES.last)
|
||||
.removeClass(CLASSES.lastCollapsable)
|
||||
.removeClass(CLASSES.lastCollapsible)
|
||||
.removeClass(CLASSES.lastExpandable)
|
||||
.find(">.hitarea")
|
||||
.removeClass(CLASSES.lastCollapsableHitarea)
|
||||
.removeClass(CLASSES.lastCollapsibleHitarea)
|
||||
.removeClass(CLASSES.lastExpandableHitarea);
|
||||
$(branches).find("li").andSelf().prepareBranches(settings).applyClasses(settings, $(this).data("toggler"));
|
||||
}).bind("remove", function(event, branches) {
|
||||
|
@ -27,10 +27,10 @@
|
|||
prev.filter(":last-child").addClass(CLASSES.last)
|
||||
.filter("." + CLASSES.expandable).replaceClass(CLASSES.last, CLASSES.lastExpandable).end()
|
||||
.find(">.hitarea").replaceClass(CLASSES.expandableHitarea, CLASSES.lastExpandableHitarea).end()
|
||||
.filter("." + CLASSES.collapsable).replaceClass(CLASSES.last, CLASSES.lastCollapsable).end()
|
||||
.find(">.hitarea").replaceClass(CLASSES.collapsableHitarea, CLASSES.lastCollapsableHitarea);
|
||||
.filter("." + CLASSES.collapsible).replaceClass(CLASSES.last, CLASSES.lastCollapsible).end()
|
||||
.find(">.hitarea").replaceClass(CLASSES.collapsibleHitarea, CLASSES.lastCollapsibleHitarea);
|
||||
if (parent.is(":not(:has(>))") && parent[0] != this) {
|
||||
parent.parent().removeClass(CLASSES.collapsable).removeClass(CLASSES.expandable)
|
||||
parent.parent().removeClass(CLASSES.collapsible).removeClass(CLASSES.expandable)
|
||||
parent.siblings(".hitarea").andSelf().remove();
|
||||
}
|
||||
});
|
||||
|
|
|
@ -79,8 +79,8 @@
|
|||
|
||||
// handle open ones
|
||||
this.not(":has(>ul:hidden)")
|
||||
.addClass(CLASSES.collapsable)
|
||||
.replaceClass(CLASSES.last, CLASSES.lastCollapsable);
|
||||
.addClass(CLASSES.collapsible)
|
||||
.replaceClass(CLASSES.last, CLASSES.lastCollapsible);
|
||||
|
||||
// create hitarea if not present
|
||||
var hitarea = this.find("div." + CLASSES.hitarea);
|
||||
|
@ -126,7 +126,7 @@
|
|||
};
|
||||
}
|
||||
// click on first element to collapse tree
|
||||
$("a:eq(0)", control).click( handler(CLASSES.collapsable) );
|
||||
$("a:eq(0)", control).click( handler(CLASSES.collapsible) );
|
||||
// click on second to expand tree
|
||||
$("a:eq(1)", control).click( handler(CLASSES.expandable) );
|
||||
// click on third to toggle tree
|
||||
|
@ -139,12 +139,12 @@
|
|||
.parent()
|
||||
// swap classes for hitarea
|
||||
.find(">.hitarea")
|
||||
.swapClass( CLASSES.collapsableHitarea, CLASSES.expandableHitarea )
|
||||
.swapClass( CLASSES.lastCollapsableHitarea, CLASSES.lastExpandableHitarea )
|
||||
.swapClass( CLASSES.collapsibleHitarea, CLASSES.expandableHitarea )
|
||||
.swapClass( CLASSES.lastCollapsibleHitarea, CLASSES.lastExpandableHitarea )
|
||||
.end()
|
||||
// swap classes for parent li
|
||||
.swapClass( CLASSES.collapsable, CLASSES.expandable )
|
||||
.swapClass( CLASSES.lastCollapsable, CLASSES.lastExpandable )
|
||||
.swapClass( CLASSES.collapsible, CLASSES.expandable )
|
||||
.swapClass( CLASSES.lastCollapsible, CLASSES.lastExpandable )
|
||||
// find child lists
|
||||
.find( ">ul" )
|
||||
// toggle them
|
||||
|
@ -154,11 +154,11 @@
|
|||
.siblings()
|
||||
// swap classes for hitarea
|
||||
.find(">.hitarea")
|
||||
.replaceClass( CLASSES.collapsableHitarea, CLASSES.expandableHitarea )
|
||||
.replaceClass( CLASSES.lastCollapsableHitarea, CLASSES.lastExpandableHitarea )
|
||||
.replaceClass( CLASSES.collapsibleHitarea, CLASSES.expandableHitarea )
|
||||
.replaceClass( CLASSES.lastCollapsibleHitarea, CLASSES.lastExpandableHitarea )
|
||||
.end()
|
||||
.replaceClass( CLASSES.collapsable, CLASSES.expandable )
|
||||
.replaceClass( CLASSES.lastCollapsable, CLASSES.lastExpandable )
|
||||
.replaceClass( CLASSES.collapsible, CLASSES.expandable )
|
||||
.replaceClass( CLASSES.lastCollapsible, CLASSES.lastExpandable )
|
||||
.find( ">ul" )
|
||||
.heightHide( settings.animated, settings.toggle );
|
||||
}
|
||||
|
@ -213,11 +213,11 @@
|
|||
if (settings.prerendered) {
|
||||
// if prerendered is on, replicate the basic class swapping
|
||||
items.filter("li")
|
||||
.swapClass( CLASSES.collapsable, CLASSES.expandable )
|
||||
.swapClass( CLASSES.lastCollapsable, CLASSES.lastExpandable )
|
||||
.swapClass( CLASSES.collapsible, CLASSES.expandable )
|
||||
.swapClass( CLASSES.lastCollapsible, CLASSES.lastExpandable )
|
||||
.find(">.hitarea")
|
||||
.swapClass( CLASSES.collapsableHitarea, CLASSES.expandableHitarea )
|
||||
.swapClass( CLASSES.lastCollapsableHitarea, CLASSES.lastExpandableHitarea );
|
||||
.swapClass( CLASSES.collapsibleHitarea, CLASSES.expandableHitarea )
|
||||
.swapClass( CLASSES.lastCollapsibleHitarea, CLASSES.lastExpandableHitarea );
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -244,10 +244,10 @@
|
|||
expandable: "expandable",
|
||||
expandableHitarea: "expandable-hitarea",
|
||||
lastExpandableHitarea: "lastExpandable-hitarea",
|
||||
collapsable: "collapsable",
|
||||
collapsableHitarea: "collapsable-hitarea",
|
||||
lastCollapsableHitarea: "lastCollapsable-hitarea",
|
||||
lastCollapsable: "lastCollapsable",
|
||||
collapsible: "collapsible",
|
||||
collapsibleHitarea: "collapsible-hitarea",
|
||||
lastCollapsibleHitarea: "lastCollapsible-hitarea",
|
||||
lastCollapsible: "lastCollapsible",
|
||||
lastExpandable: "lastExpandable",
|
||||
last: "last",
|
||||
hitarea: "hitarea"
|
||||
|
|
|
@ -195,7 +195,7 @@ func tupleToString(t tuple.Tuple) string {
|
|||
case tuple.Tuple:
|
||||
buffer.WriteString(tupleToString(el))
|
||||
default:
|
||||
log.Fatalf("Don't know how to stringify tuple elemement %v %T\n", el, el)
|
||||
log.Fatalf("Don't know how to stringify tuple element %v %T\n", el, el)
|
||||
}
|
||||
}
|
||||
buffer.WriteByte(')')
|
||||
|
|
|
@ -474,7 +474,7 @@ func (o TransactionOptions) SetNextWriteNoWriteConflictRange() error {
|
|||
return o.setOpt(30, nil)
|
||||
}
|
||||
|
||||
// Reads performed by a transaction will not see any prior mutations that occured in that transaction, instead seeing the value which was in the database at the transaction's read version. This option may provide a small performance benefit for the client, but also disables a number of client-side optimizations which are beneficial for transactions which tend to read and write the same keys within a single transaction. It is an error to set this option after performing any reads or writes on the transaction.
|
||||
// Reads performed by a transaction will not see any prior mutations that occurred in that transaction, instead seeing the value which was in the database at the transaction's read version. This option may provide a small performance benefit for the client, but also disables a number of client-side optimizations which are beneficial for transactions which tend to read and write the same keys within a single transaction. It is an error to set this option after performing any reads or writes on the transaction.
|
||||
func (o TransactionOptions) SetReadYourWritesDisable() error {
|
||||
return o.setOpt(51, nil)
|
||||
}
|
||||
|
|
|
@ -45,29 +45,29 @@ func (ks KeySelector) FDBKeySelector() KeySelector {
|
|||
return ks
|
||||
}
|
||||
|
||||
// LastLessThan returns the KeySelector specifying the lexigraphically greatest
|
||||
// key present in the database which is lexigraphically strictly less than the
|
||||
// LastLessThan returns the KeySelector specifying the lexicographically greatest
|
||||
// key present in the database which is lexicographically strictly less than the
|
||||
// given key.
|
||||
func LastLessThan(key KeyConvertible) KeySelector {
|
||||
return KeySelector{key, false, 0}
|
||||
}
|
||||
|
||||
// LastLessOrEqual returns the KeySelector specifying the lexigraphically
|
||||
// greatest key present in the database which is lexigraphically less than or
|
||||
// LastLessOrEqual returns the KeySelector specifying the lexicographically
|
||||
// greatest key present in the database which is lexicographically less than or
|
||||
// equal to the given key.
|
||||
func LastLessOrEqual(key KeyConvertible) KeySelector {
|
||||
return KeySelector{key, true, 0}
|
||||
}
|
||||
|
||||
// FirstGreaterThan returns the KeySelector specifying the lexigraphically least
|
||||
// key present in the database which is lexigraphically strictly greater than
|
||||
// FirstGreaterThan returns the KeySelector specifying the lexicographically least
|
||||
// key present in the database which is lexicographically strictly greater than
|
||||
// the given key.
|
||||
func FirstGreaterThan(key KeyConvertible) KeySelector {
|
||||
return KeySelector{key, true, 1}
|
||||
}
|
||||
|
||||
// FirstGreaterOrEqual returns the KeySelector specifying the lexigraphically
|
||||
// least key present in the database which is lexigraphically greater than or
|
||||
// FirstGreaterOrEqual returns the KeySelector specifying the lexicographically
|
||||
// least key present in the database which is lexicographically greater than or
|
||||
// equal to the given key.
|
||||
func FirstGreaterOrEqual(key KeyConvertible) KeySelector {
|
||||
return KeySelector{key, false, 1}
|
||||
|
|
|
@ -11,8 +11,9 @@ import (
|
|||
)
|
||||
|
||||
var update = flag.Bool("update", false, "update .golden files")
|
||||
|
||||
// Since go 1.20 math/rand uses automatically a random seed: https://tip.golang.org/doc/go1.20.
|
||||
// To enfore the old behaviour we intialize the random generator with a hard-coded seed.
|
||||
// To enforce the old behaviour we initialize the random generator with a hard-coded seed.
|
||||
// TODO: Rethink how useful the random generator in those test cases is.
|
||||
var randomGenerator *rand.Rand
|
||||
|
||||
|
|
|
@ -164,7 +164,7 @@ if(NOT WIN32 AND NOT APPLE AND NOT OPEN_FOR_IDE)
|
|||
endif()
|
||||
|
||||
target_include_directories(fdb_java PRIVATE ${JNI_INCLUDE_DIRS})
|
||||
# libfdb_java.so is loaded by fdb-java.jar and doesn't need to depened on jvm shared libraries.
|
||||
# libfdb_java.so is loaded by fdb-java.jar and doesn't need to depend on jvm shared libraries.
|
||||
target_link_libraries(fdb_java PRIVATE fdb_c)
|
||||
target_link_libraries(fdb_java PRIVATE fdb_java_native)
|
||||
if(APPLE)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
<!--
|
||||
The style for code written within the FDB Java bindings.
|
||||
Note that this style guide grew up somewhat organically from
|
||||
the idiosyncracies of the committers involved. It aims to
|
||||
the idiosyncrasies of the committers involved. It aims to
|
||||
be at least a little idiomatically Java while at the same time
|
||||
trying not to look too incongruous when compared to the style
|
||||
of our core products (e.g., fdbserver). It also isn't
|
||||
|
|
|
@ -34,7 +34,7 @@ test, do the following:
|
|||
|
||||
( see `BasicMultiClientIntegrationTest` for a good reference example)
|
||||
|
||||
It is important to note that it requires significant time to start and stop 3 separate clusters; if the underying test takes a long time to run,
|
||||
It is important to note that it requires significant time to start and stop 3 separate clusters; if the underlying test takes a long time to run,
|
||||
ctest will time out and kill the test. When that happens, there is no guarantee that the FDB clusters will be properly stopped! It is thus
|
||||
in your best interest to ensure that all tests run in a relatively small amount of time, or have a longer timeout attached.
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ import org.junit.jupiter.api.Assertions;
|
|||
|
||||
/**
|
||||
* Setup: Generating a cycle 0 -> 1 -> 2 -> 3 -> 0, its length is 4
|
||||
* Process: randomly choose an element, reverse 2nd and 4rd element, considering the chosen one as the 1st element.
|
||||
* Process: randomly choose an element, reverse 2nd and 4th element, considering the chosen one as the 1st element.
|
||||
* Check: verify no element is lost or added, and they are still a cycle.
|
||||
*
|
||||
* This test is to verify the atomicity of transactions.
|
||||
|
|
|
@ -92,7 +92,7 @@ class RangeQueryIntegrationTest {
|
|||
Assertions.assertTrue(kvs.hasNext(), "Did not return a record!");
|
||||
KeyValue n = kvs.next();
|
||||
Assertions.assertArrayEquals(key, n.getKey(), "Did not return a key correctly!");
|
||||
Assertions.assertArrayEquals(value, n.getValue(), "Did not return the corect value!");
|
||||
Assertions.assertArrayEquals(value, n.getValue(), "Did not return the correct value!");
|
||||
|
||||
return null;
|
||||
});
|
||||
|
|
|
@ -28,7 +28,7 @@ import com.apple.foundationdb.tuple.Tuple;
|
|||
import org.junit.jupiter.api.Assertions;
|
||||
|
||||
/**
|
||||
* This test verify transcations have repeatable read.
|
||||
* This test verify transactions have repeatable read.
|
||||
* 1 First set initialValue to key.
|
||||
* 2 Have transactions to read the key and verify the initialValue in a loop, if it does not
|
||||
* see the initialValue as the value, it set the flag to false.
|
||||
|
@ -38,7 +38,7 @@ import org.junit.jupiter.api.Assertions;
|
|||
*
|
||||
* 4 Verify that old transactions have not finished when new transactions have finished,
|
||||
* then verify old transactions does not have false flag -- it means that old transactions
|
||||
* are still seeting the initialValue even after new transactions set them to a new value.
|
||||
* are still seeing the initialValue even after new transactions set them to a new value.
|
||||
*/
|
||||
public class RepeatableReadMultiThreadClientTest {
|
||||
public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
|
|
@ -16,7 +16,7 @@ import org.junit.jupiter.api.Assertions;
|
|||
* Consumer would consume the key by checking the existence of the key, if it does not find the key,
|
||||
* then the test would fail.
|
||||
*
|
||||
* This test is to verify the causal consistency of transactions for mutli-threaded client.
|
||||
* This test is to verify the causal consistency of transactions for multi-threaded client.
|
||||
*/
|
||||
public class SidebandMultiThreadClientTest {
|
||||
public static final MultiClientHelper clientHelper = new MultiClientHelper();
|
||||
|
|
|
@ -273,7 +273,7 @@ class WatchesIntegrationTest {
|
|||
}
|
||||
|
||||
private void ensureConnected(Database db) throws Exception {
|
||||
// Run one transaction succesfully to ensure we established connection
|
||||
// Run one transaction successfully to ensure we established connection
|
||||
db.run(tr -> {
|
||||
tr.getReadVersion().join();
|
||||
return null;
|
||||
|
|
|
@ -395,7 +395,7 @@ class TuplePackingTest {
|
|||
Assumptions.assumeTrue(FDB.instance().getAPIVersion() > 520, "Skipping test because version is too old");
|
||||
|
||||
// this is a tricky case where there are two tuples with identical
|
||||
// respresentations but different semantics.
|
||||
// representations but different semantics.
|
||||
byte[] arr = new byte[0x0100fe];
|
||||
Arrays.fill(arr, (byte)0x7f); // the actual value doesn't matter, as long as it's not zero
|
||||
Tuple t1 = Tuple.from(arr, Versionstamp.complete(new byte[] { FF, FF, FF, FF, FF, FF, FF, FF, FF, FF }),
|
||||
|
|
|
@ -25,7 +25,7 @@ import java.util.concurrent.ArrayBlockingQueue;
|
|||
|
||||
/**
|
||||
* A singleton that manages a pool of {@link DirectByteBuffer}, that will be
|
||||
* shared by the {@link DirectBufferIterator} instances. It is responsibilty of
|
||||
* shared by the {@link DirectBufferIterator} instances. It is responsibility of
|
||||
* user to return the borrowed buffers.
|
||||
*/
|
||||
class DirectBufferPool {
|
||||
|
|
|
@ -572,7 +572,7 @@ public class FDB {
|
|||
|
||||
protected static boolean evalErrorPredicate(int predicate, int code) {
|
||||
if(singleton == null)
|
||||
throw new IllegalStateException("FDB API not yet initalized");
|
||||
throw new IllegalStateException("FDB API not yet initialized");
|
||||
return singleton.Error_predicate(predicate, code);
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ class RangeResult {
|
|||
|
||||
RangeResult(byte[] keyValues, int[] lengths, boolean more) {
|
||||
if(lengths.length % 2 != 0) {
|
||||
throw new IllegalArgumentException("There needs to be an even number of lenghts!");
|
||||
throw new IllegalArgumentException("There needs to be an even number of lengths!");
|
||||
}
|
||||
|
||||
int count = lengths.length / 2;
|
||||
|
|
|
@ -515,7 +515,7 @@ public class AsyncUtil {
|
|||
* The returned future will then be ready with the result of the
|
||||
* handler's future (or an error if that future completes exceptionally).
|
||||
* The handler will execute on the {@link com.apple.foundationdb.FDB#DEFAULT_EXECUTOR default executor}
|
||||
* used for asychronous tasks.
|
||||
* used for asynchronous tasks.
|
||||
*
|
||||
* @param future future to compose the handler onto
|
||||
* @param handler handler bi-function to compose onto the passed future
|
||||
|
|
|
@ -184,7 +184,7 @@ public class DirectorySubspace extends Subspace implements Directory {
|
|||
|
||||
/**
|
||||
* Called by all functions that could operate on this subspace directly (moveTo, remove, removeIfExists, exists).
|
||||
* Subclasses can chooose to return a different directory layer to use for the operation if path is in fact empty.
|
||||
* Subclasses can choose to return a different directory layer to use for the operation if path is in fact empty.
|
||||
*/
|
||||
DirectoryLayer getLayerForPath(List<String> path) {
|
||||
return directoryLayer;
|
||||
|
|
|
@ -232,7 +232,7 @@ public class Subspace {
|
|||
}
|
||||
|
||||
/**
|
||||
* Gets a {@link Range} respresenting all keys strictly in the {@code Subspace}.
|
||||
* Gets a {@link Range} representing all keys strictly in the {@code Subspace}.
|
||||
*
|
||||
* @return the {@link Range} of keyspace corresponding to this {@code Subspace}
|
||||
*/
|
||||
|
|
|
@ -223,7 +223,7 @@ make_enum("ConflictRangeType")
|
|||
|
||||
|
||||
def transactional(*tr_args, **tr_kwargs):
|
||||
"""Decorate a funcation as transactional.
|
||||
"""Decorate a function as transactional.
|
||||
|
||||
The decorator looks for a named argument (default "tr") and takes
|
||||
one of two actions, depending on the type of the parameter passed
|
||||
|
|
|
@ -197,7 +197,7 @@ module FDB
|
|||
|
||||
if old_path == new_path[0...old_path.length]
|
||||
raise ArgumentError,
|
||||
'The desination directory cannot be a subdirectory of the source directory.'
|
||||
'The destination directory cannot be a subdirectory of the source directory.'
|
||||
end
|
||||
|
||||
old_node = find(tr, old_path).prefetch_metadata(tr)
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
# an error if there are any .txt files in the test directory that do not
|
||||
# correspond to a test or are not ignore by a pattern
|
||||
# - IGNORE_PATTERNS regular expressions. All files that match any of those
|
||||
# experessions don't need to be associated with a test
|
||||
# expressions don't need to be associated with a test
|
||||
function(configure_testing)
|
||||
set(options ERROR_ON_ADDITIONAL_FILES)
|
||||
set(oneValueArgs TEST_DIRECTORY)
|
||||
|
|
|
@ -582,14 +582,14 @@ if (WITH_SWIFT)
|
|||
|
||||
set(SwiftOptions "${SwiftOptions} -Xcc -DWITH_SWIFT")
|
||||
|
||||
# Supress noisy C++ warnings from Swift.
|
||||
# Suppress noisy C++ warnings from Swift.
|
||||
set(SwiftOptions "${SwiftOptions} -Xcc -Wno-deprecated -Xcc -Wno-undefined-var-template")
|
||||
# Supress rapidjson noisy GCC pragma diagnostics.
|
||||
# Suppress rapidjson noisy GCC pragma diagnostics.
|
||||
set(SwiftOptions "${SwiftOptions} -Xcc -Wno-unknown-warning-option")
|
||||
|
||||
if (FOUNDATIONDB_CROSS_COMPILING)
|
||||
# Cross-compilation options.
|
||||
# For some reason we need to specify -sdk explictly to pass config-time
|
||||
# For some reason we need to specify -sdk explicitly to pass config-time
|
||||
# cmake checks, even though Swift does tend to pass it by itself for the
|
||||
# actual compilation.
|
||||
string(TOLOWER ${CMAKE_SYSTEM_PROCESSOR} TripleArch)
|
||||
|
|
|
@ -137,7 +137,7 @@ In the following sections, <span style="color:green">green</span> tag indicates
|
|||
| | | | After | GetValueDebug | [Reader.After](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbserver/KeyValueStoreSQLite.actor.cpp#L1662-L1664) | |
|
||||
| | StorageServer | | AfterRead | GetValueDebug | [getValueQ.AfterRead](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbserver/storageserver.actor.cpp#L1185-L1187) | |
|
||||
| **Client** | NativeAPI | getValue | After | GetValueDebug | [NativeAPI.getValue.After](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbclient/NativeAPI.actor.cpp#L2216-L2218) | (When successful) |
|
||||
| | | | Error | GetValueDebug | [NativeAPI.getValue.Error](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbclient/NativeAPI.actor.cpp#L2232-L2234) | (Wehn failure) |
|
||||
| | | | Error | GetValueDebug | [NativeAPI.getValue.Error](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbclient/NativeAPI.actor.cpp#L2232-L2234) | (When failure) |
|
||||
|
||||
|
||||
|
||||
|
@ -158,7 +158,7 @@ In the following sections, <span style="color:green">green</span> tag indicates
|
|||
| | | | Send | TransactionDebug | [storageserver.getKeyValues.Send](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbserver/storageserver.actor.cpp#L1866) | (When no keys found) |
|
||||
| | | | AfterReadRange | TransactionDebug | [storageserver.getKeyValues.AfterReadRange](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbserver/storageserver.actor.cpp#L1886) | (When found keys in this SS) |
|
||||
| **Client** | NativeAPI | getRange | After | TransactionDebug | [NativeAPI.getRange.After](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbclient/NativeAPI.actor.cpp#L3044-L3046) | (When successful) |
|
||||
| | | | Error | TransactionDebug | [NativeAPI.getRange.Error](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbclient/NativeAPI.actor.cpp#L3155-L3156) | (Wehn failure) |
|
||||
| | | | Error | TransactionDebug | [NativeAPI.getRange.Error](https://github.com/apple/foundationdb/blob/ffb8e27f4325db3dc8465e145bc308f6854500eb/fdbclient/NativeAPI.actor.cpp#L3155-L3156) | (When failure) |
|
||||
|
||||
### GetRange Fallback
|
||||
|
||||
|
|
|
@ -49,7 +49,7 @@ In `basicLoadBalance`, a *best* alternative is picked and used at the beginning.
|
|||
`loadBalance` provides a more sophisticated implementation of load balancing. In addition of the basic load balancing, it also provides a variety of features:
|
||||
|
||||
* Support for Test Storage Server ([TSS](https://github.com/apple/foundationdb/blob/main/documentation/sphinx/source/tss.rst))
|
||||
* Datacenter awaring alternative election
|
||||
* Datacenter-aware alternative election
|
||||
* Recording the latency and penalty from interfaces, and [prioritize the interfaces based on previously stored data](#with-queuemodel).
|
||||
* Able to handle timeouts and SS exceptions with retries.
|
||||
|
||||
|
@ -132,7 +132,7 @@ graph LR
|
|||
H4 --Additional request failed--> H3
|
||||
```
|
||||
|
||||
The first request has a timeout option. If the LB is not able to retrieve the response within the timout, more requests will be sent to secondary and other available interfaces. If the first request failed, it is reset and the next request will be considered as the first request. Certain types of errors can also be returned as response, e.g. `request_may_be_delivered` or `process_behind`, which may not trigger a load-balancer retry.
|
||||
The first request has a timeout option. If the LB is not able to retrieve the response within the timeout, more requests will be sent to secondary and other available interfaces. If the first request failed, it is reset and the next request will be considered as the first request. Certain types of errors can also be returned as response, e.g. `request_may_be_delivered` or `process_behind`, which may not trigger a load-balancer retry.
|
||||
|
||||
### Wait for available alternative
|
||||
|
||||
|
|
|
@ -88,4 +88,4 @@ The code that decodes a mutation block is in `ACTOR Future<Standalone<VectorRef<
|
|||
### Endianness
|
||||
When the restore decodes a serialized integer from the backup file, it needs to convert the serialized value from big endian to little endian.
|
||||
|
||||
The reason is as follows: When the backup procedure transfers the data to remote blob store, the backup data is encoded in big endian. However, FoundationDB currently only run on little endian machines. The endianness affects the interpretation of an integer, so we must perform the endianness convertion.
|
||||
The reason is as follows: When the backup procedure transfers the data to remote blob store, the backup data is encoded in big endian. However, FoundationDB currently only run on little endian machines. The endianness affects the interpretation of an integer, so we must perform the endianness conversion.
|
|
@ -96,7 +96,7 @@ In `status json` output, please look at field `.data.team_tracker.state` for tea
|
|||
|
||||
A key range is a shard. A shard is the minimum unit of moving data. The storage server’s ownership of a shard -- which SS owns which shard -- is stored in the system keyspace *serverKeys* (`\xff/serverKeys/`) and *keyServers* (`\xff/keyServers/`). To simplify the explanation, we refer to the storage server’s ownership of a shard as a shard’s ownership.
|
||||
|
||||
A shard’s ownership is used in transaction systems (commit proxy and tLogs) to route mutations to tLogs and storage servers. When a commit proxy receives a mutation, it uses the shard’s ownership to decide which *k* tLogs receive the mutation, assuming *k* is the replias factor. When a storage server pulls mutations from tLogs, it uses the shard’s ownership to decide which shards the SS is responsible for and which tLog the SS should pull the data from.
|
||||
A shard’s ownership is used in transaction systems (commit proxy and tLogs) to route mutations to tLogs and storage servers. When a commit proxy receives a mutation, it uses the shard’s ownership to decide which *k* tLogs receive the mutation, assuming *k* is the replicas factor. When a storage server pulls mutations from tLogs, it uses the shard’s ownership to decide which shards the SS is responsible for and which tLog the SS should pull the data from.
|
||||
|
||||
A shard’s ownership must be consistent across transaction systems and SSes, so that mutations can be correctly routed to SSes. Moving keys from a SS to another requires changing the shard’s ownership under ACID property. The ACID property is achieved by using FDB transactions to change the *serverKeys *(`\xff/serverKeys/`) and *keyServers* (`\xff/keyServers/`). The mutation on the *serverKeys *and* keyServers *will be categorized as private mutations in transaction system. Compared to normal mutation, the private mutations will change the transaction state store (txnStateStore) that maintains the *serverKeys* and *keyServers* for transaction systems (commit proxy and tLog) when it arrives on each transaction component (e.g., tLog). Because mutations are processed in total order with the ACID guarantees, the change to the txnStateStore will be executed in total order on each node and the change on the shard’s ownership will also be consistent.
|
||||
|
||||
|
@ -115,7 +115,7 @@ Before FDB 7.2, when the data distributor wants to rebalance shard, it only cons
|
|||
The data distributor will periodically check whether the read rebalance is needed. The conditions of rebalancing are
|
||||
* the **worst CPU usage of source team >= 0.15** , which means the source team is somewhat busy;
|
||||
* the ongoing relocation is less than the parallelism budget. `queuedRelocation[ priority ] < countLimit (default 50)`;
|
||||
* the source team is not throttled to be a data movement source team. `( now() - The last time the source team was selected ) * time volumn (default 20) > read sample interval (2 min default)`;
|
||||
* the source team is not throttled to be a data movement source team. `( now() - The last time the source team was selected ) * time volume (default 20) > read sample interval (2 min default)`;
|
||||
* the read load difference between source team and destination team is larger than 30% of the source team load;
|
||||
|
||||
## Metrics definition
|
||||
|
|
|
@ -81,7 +81,7 @@ Below diagram depicts the end-to-end encryption workflow detailing various modul
|
|||
|
||||
```
|
||||
_______________________________________________________
|
||||
| FDB CLUSER HOST |
|
||||
| FDB CLUSTER HOST |
|
||||
| |
|
||||
_____________________ | ________________________ _________________ |
|
||||
| | (proprietary) | | | | |
|
||||
|
@ -126,7 +126,7 @@ Given encryption keys will be needed as part of cluster-recovery, this process/r
|
|||
|
||||
Implements a native FDB KMS framework allowing multiple interfaces to co-existing and enabling FDB <-> KMS communication. Salient features:
|
||||
|
||||
* Abstract `KmsConnector` class, the class enables a specilization implementation to implement `actor` supporting desired communication protocol.
|
||||
* Abstract `KmsConnector` class, the class enables a specialization implementation to implement `actor` supporting desired communication protocol.
|
||||
* `KmsConnectorInterface` defines the supported endpoints allowing EncryptKeyProxy to fetch/refresh encryption keys.
|
||||
* `--kms-connector-type` configuration parameter supplied via `foundationdb.conf` controls the runtime selection of KmsConnector selection.
|
||||
|
||||
|
|
|
@ -111,10 +111,10 @@ limitingTps(tag) = min{limitingTps(tag, storage) : all storage servers}
|
|||
|
||||
If the throttling ratio is empty for all storage servers affected by a tag, then the per-tag, per-storage limiting TPS rate is also empty. In this case the target rate for this tag is simply the desired rate.
|
||||
|
||||
If an individual zone is unhealthy, it may cause the throttling ratio for storage servers in that zone to shoot up. This should not be misinterpretted as a workload issue that requires active throttling. Therefore, the zone with the worst throttling ratios is ignored when computing the limiting transaction rate for a tag (similar to the calculation of the global transaction limit in `Ratekeeper::updateRate`).
|
||||
If an individual zone is unhealthy, it may cause the throttling ratio for storage servers in that zone to shoot up. This should not be misinterpreted as a workload issue that requires active throttling. Therefore, the zone with the worst throttling ratios is ignored when computing the limiting transaction rate for a tag (similar to the calculation of the global transaction limit in `Ratekeeper::updateRate`).
|
||||
|
||||
### Client Rate Calculation
|
||||
The smoothed per-client rate for each tag is tracked within `GlobalTagThrottlerImpl::PerTagStatistics`. Once a target rate has been computed, this is passed to `GlobalTagThrotterImpl::PerTagStatistics::updateAndGetPerClientRate` which adjusts the per-client rate. The per-client rate is meant to limit the busiest clients, so that at equilibrium, the per-client rate will remain constant and the sum of throughput from all clients will match the target rate.
|
||||
The smoothed per-client rate for each tag is tracked within `GlobalTagThrottlerImpl::PerTagStatistics`. Once a target rate has been computed, this is passed to `GlobalTagThrottlerImpl::PerTagStatistics::updateAndGetPerClientRate` which adjusts the per-client rate. The per-client rate is meant to limit the busiest clients, so that at equilibrium, the per-client rate will remain constant and the sum of throughput from all clients will match the target rate.
|
||||
|
||||
## Simulation Testing
|
||||
The `ThroughputQuota.toml` test provides a simple end-to-end test using the global tag throttler. Quotas are set using the internal tag quota API in the `ThroughputQuota` workload. This is run with the `Cycle` workload, which randomly tags transactions.
|
||||
|
|
|
@ -138,7 +138,7 @@ However, reading the txnStateStore can be slow because it needs to read from dis
|
|||
**Recruiting roles step.**
|
||||
There are cases where the recovery can get stuck at recruiting enough roles for the txn system configuration. For example, if a cluster with replica factor equal to three has only three tLogs and one of them dies during the recovery, the cluster will not succeed in recruiting 3 tLogs and the recovery will get stuck. Another example is when a new database is created and the cluster does not have a valid txnStateStore. To get out of this situation, the CC will use an emergency transaction to forcibly change the configuration such that the recruitment can succeed. This configuration change may temporarily violate the contract of the desired configuration, but it is only temporary.
|
||||
|
||||
ServerKnob::CLUSTER_RECOVERY_EVENT_NAME_PREFIX defines the prefix for cluster recovery trace events. Hereafter, refered as 'RecoveryEventPrefix' in this document.
|
||||
ServerKnob::CLUSTER_RECOVERY_EVENT_NAME_PREFIX defines the prefix for cluster recovery trace events. Hereafter, referred as 'RecoveryEventPrefix' in this document.
|
||||
|
||||
We can use the trace event `<RecoveryEventPrefix>RecoveredConfig`, which dumps the information of the new transaction system’s configuration, to diagnose why the recovery is blocked in this phase.
|
||||
|
||||
|
|
|
@ -219,7 +219,7 @@ The rough outline of concrete changes proposed looks like:
|
|||
|
||||
Modifying how transaction logs spill data is a change to the on-disk files of
|
||||
transaction logs. The work for enabling safe upgrades and rollbacks of
|
||||
persistent state changes to transaction logs was split off into a seperate
|
||||
persistent state changes to transaction logs was split off into a separate
|
||||
design document: "Forward Compatibility for Transaction Logs".
|
||||
|
||||
That document describes a `log_version` configuration setting that controls the
|
||||
|
@ -257,7 +257,7 @@ same process.
|
|||
|
||||
Naively, this would create resource issues. Each instance would think that it
|
||||
is allowed its own 1.5GB buffer of in-memory mutations. Instead, internally to
|
||||
the TLog implmentation, the transaction log is split into two parts. A
|
||||
the TLog implementation, the transaction log is split into two parts. A
|
||||
`SharedTLog` is all the data that should be shared across multiple generations.
|
||||
A TLog is all the data that is private to one generation. Most notably, the
|
||||
1.5GB mutation buffer and the on-disk files are owned by the `SharedTLog`. The
|
||||
|
@ -367,7 +367,7 @@ same as `end-start`, intentionally. For this reason, the API is `(start, end)`
|
|||
and not `(start, length)`.
|
||||
|
||||
Spilled data, when using spill-by-value, was resistant to bitrot via data being
|
||||
checksummed interally within SQLite's B-tree. Now that reads can be done
|
||||
checksummed internally within SQLite's B-tree. Now that reads can be done
|
||||
directly, the responsibility for verifying data integrity falls upon the
|
||||
DiskQueue. `CheckHashes::TRUE` will cause the DiskQueue to use the checksum in
|
||||
each DiskQueue page to verify data integrity. If an externally maintained
|
||||
|
@ -433,11 +433,11 @@ the rest of the information about the database.
|
|||
|
||||
The in-memory storage engine writes an equal amount of mutations and snapshot
|
||||
data to a queue, an when a full snapshot of the data has been written, deletes
|
||||
the preceeding snapshot and begins writing a new one. When backing an
|
||||
the preceding snapshot and begins writing a new one. When backing an
|
||||
in-memory storage engine with the transaction logs, the
|
||||
`LogSystemDiskQueueAdapter` implements writing to a queue as committing
|
||||
mutations to the transaction logs with a special tag of `txsTag`, and deleting
|
||||
the preceeding snapshot as popping the transaction logs for the tag of `txsTag`
|
||||
the preceding snapshot as popping the transaction logs for the tag of `txsTag`
|
||||
until the version where the last full snapshot began.
|
||||
|
||||
This means that unlike every other commit that is tagged and stored on the
|
||||
|
@ -538,7 +538,7 @@ The expected implication of this are:
|
|||
2. Generating a peek response of 150KB could require reading 100MB of data, and allocating buffers to hold that 100MB.
|
||||
|
||||
OOMs were observed in early testing. Code has been added to specifically
|
||||
limit how much memory can be allocated for serving a signle peek request
|
||||
limit how much memory can be allocated for serving a single peek request
|
||||
and all concurrent peek requests, with knobs to allow tuning this per
|
||||
deployment configuration.
|
||||
|
||||
|
@ -628,7 +628,7 @@ minor impacts on recovery times:
|
|||
|
||||
## Observability
|
||||
|
||||
With the new changes, we must ensure that sufficent information has been exposed such that:
|
||||
With the new changes, we must ensure that sufficient information has been exposed such that:
|
||||
|
||||
1. If something goes wrong in production, we can understand what and why from trace logs.
|
||||
2. We can understand if the TLog is performing suboptimally, and if so, which knob we should change and by how much.
|
||||
|
|
|
@ -22,7 +22,7 @@ inconsistently in the code base as "metadata mutations" in commit proxies and
|
|||
|
||||
## Why do we need transaction state store?
|
||||
|
||||
When bootstraping an FDB cluster, the cluster controller (CC) role recruits a
|
||||
When bootstrapping an FDB cluster, the cluster controller (CC) role recruits a
|
||||
new transaction system and initializes them. In particular, the transaction state store
|
||||
is first read by the CC from previous generation's log system, and then broadcast to
|
||||
all commit proxies of the new transaction system. After initializing `txnStateStore`, these
|
||||
|
@ -45,7 +45,7 @@ conflict resolution request to all Resolvers and they process transactions in st
|
|||
of commit versions. Leveraging this mechanism, each commit proxy sends all metadata
|
||||
(i.e., system key) mutations to all Resolvers. Resolvers keep these mutations in memory
|
||||
and forward to other commit proxies in separate resolution response. Each commit proxy
|
||||
receive resolution response, along with metadata mutations happend at other proxies before
|
||||
receive resolution response, along with metadata mutations happened at other proxies before
|
||||
its commit version, and apply all these metadata mutations in the commit order.
|
||||
Finally, this proxy only writes metadata mutations in its own transaction batch to TLogs,
|
||||
i.e., do not write other proxies' metadata mutations to TLogs to avoid repeated writes.
|
||||
|
|
|
@ -504,7 +504,7 @@ class RubyModuleIndex(Index):
|
|||
# list of all modules, sorted by module name
|
||||
modules = sorted(iter(self.domain.data['modules'].items()),
|
||||
key=lambda x: x[0].lower())
|
||||
# sort out collapsable modules
|
||||
# sort out collapsible modules
|
||||
prev_modname = ''
|
||||
num_toplevels = 0
|
||||
for modname, (docname, synopsis, platforms, deprecated) in modules:
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
must be installed. If you upgrade a language binding to a new version, you may need to upgrade the FoundationDB client binaries as well. See :ref:`installing-client-binaries`.
|
||||
|
||||
.. |project-dependency| replace::
|
||||
If you have a project with automatic dependency installation and have expressed a dependency on foundationdb, it may automatically install the lastest version of the language binding when you deploy your project to a new machine. If you have not also upgraded the Foundation client binary, an unplanned upgrade of the language binding may encounter an incompatibility. You should therefore configure any project dependency on foundationdb in coordination with your overall upgrade strategy.
|
||||
If you have a project with automatic dependency installation and have expressed a dependency on foundationdb, it may automatically install the latest version of the language binding when you deploy your project to a new machine. If you have not also upgraded the Foundation client binary, an unplanned upgrade of the language binding may encounter an incompatibility. You should therefore configure any project dependency on foundationdb in coordination with your overall upgrade strategy.
|
||||
|
||||
.. |client-installed-bindings| replace::
|
||||
The language binding requires FoundationDB client binaries whose version is at least as recent. The binding installed with FoundationDB installation will automatically satisfy this requirement.
|
||||
|
@ -381,7 +381,7 @@
|
|||
|
||||
.. |option-read-your-writes-disable-blurb| replace::
|
||||
|
||||
When this option is invoked, a read performed by a transaction will not see any prior mutations that occured in that transaction, instead seeing the value which was in the database at the transaction's read version. This option may provide a small performance benefit for the client, but also disables a number of client-side optimizations which are beneficial for transactions which tend to read and write the same keys within a single transaction.
|
||||
When this option is invoked, a read performed by a transaction will not see any prior mutations that occurred in that transaction, instead seeing the value which was in the database at the transaction's read version. This option may provide a small performance benefit for the client, but also disables a number of client-side optimizations which are beneficial for transactions which tend to read and write the same keys within a single transaction.
|
||||
|
||||
.. |option-read-your-writes-disable-note| replace::
|
||||
|
||||
|
@ -460,7 +460,7 @@
|
|||
|
||||
.. |future-cancel-blurb| replace::
|
||||
|
||||
Cancels |future-type-string| and its associated asynchronous operation. If called before the future is ready, attempts to access its value will |error-raise-type| an :ref:`operation_cancelled <developer-guide-error-codes>` |error-type|. Cancelling a future which is already ready has no effect. Note that even if a future is not ready, its associated asynchronous operation may have succesfully completed and be unable to be cancelled.
|
||||
Cancels |future-type-string| and its associated asynchronous operation. If called before the future is ready, attempts to access its value will |error-raise-type| an :ref:`operation_cancelled <developer-guide-error-codes>` |error-type|. Cancelling a future which is already ready has no effect. Note that even if a future is not ready, its associated asynchronous operation may have successfully completed and be unable to be cancelled.
|
||||
|
||||
.. |fdb-open-blurb1| replace::
|
||||
Connects to the cluster specified by the :ref:`cluster file <foundationdb-cluster-file>`. This function is often called without any parameters, using only the defaults. If no cluster file is passed, FoundationDB automatically :ref:`determines a cluster file <specifying-a-cluster-file>` with which to connect to a cluster.
|
||||
|
@ -509,7 +509,7 @@
|
|||
Returns the tuple encoded by the given key, with the subspace's prefix tuple and raw prefix removed.
|
||||
|
||||
.. |subspace-range-blurb| replace::
|
||||
Returns a range representing all keys in the subspace that encode tuples strictly starting with the specifed tuple.
|
||||
Returns a range representing all keys in the subspace that encode tuples strictly starting with the specified tuple.
|
||||
|
||||
.. |subspace-contains-blurb| replace::
|
||||
Returns true if ``key`` starts with |key-meth|, indicating that the subspace logically contains ``key``.
|
||||
|
|
|
@ -106,7 +106,7 @@ Blob store Backup URLs can have optional parameters at the end which set various
|
|||
|
||||
Here is a complete list of valid parameters:
|
||||
|
||||
*secure_connection* (or *sc*) - Set 1 for secure connection and 0 for unsecure connection. Defaults to secure connection.
|
||||
*secure_connection* (or *sc*) - Set 1 for secure connection and 0 for insecure connection. Defaults to secure connection.
|
||||
|
||||
*connect_tries* (or *ct*) - Number of times to try to connect for each request.
|
||||
|
||||
|
|
|
@ -642,7 +642,7 @@ The subclasses of the ``ApiWorkload`` inherit the following configuration option
|
|||
- ``numRandomOperations``: the number of random operations to be executed per workload (default: 1000)
|
||||
- ``runUntilStop``: run the workload indefinitely until the stop command is received (default: false).
|
||||
This execution mode in upgrade tests and other scripted tests, where the workload needs to
|
||||
be generated continously until completion of the scripted test.
|
||||
be generated continuously until completion of the scripted test.
|
||||
- ``numOperationsForProgressCheck``: the number of operations to be performed to confirm a progress
|
||||
check (default: 10). This option is used in combination with ``runUntilStop``. Progress checks are
|
||||
initiated by a test script to check if the client workload is successfully progressing after a
|
||||
|
|
|
@ -423,7 +423,7 @@ flow
|
|||
|
||||
``profile flow run <DURATION> <FILENAME> <PROCESS...>``
|
||||
|
||||
Enables flow profiling on the specifed processes for ``DURATION`` seconds. Profiling output will be stored at the specified filename relative to the fdbserver process's trace log directory. To profile all processes, use ``all`` for the ``PROCESS`` parameter.
|
||||
Enables flow profiling on the specified processes for ``DURATION`` seconds. Profiling output will be stored at the specified filename relative to the fdbserver process's trace log directory. To profile all processes, use ``all`` for the ``PROCESS`` parameter.
|
||||
|
||||
heap
|
||||
^^^^
|
||||
|
|
|
@ -874,7 +874,7 @@ In order to assign read and commit versions to transactions, a client will never
|
|||
As mentioned before, the algorithm to assign read versions is a bit more complex. At the start of a transaction, a client will ask a GRV proxy server for a read version. The GRV proxy will reply with the last committed version as of the time it received the request - this is important to guarantee external consistency. This is how this is achieved:
|
||||
|
||||
#. The client will send a GRV (get read version) request to a GRV proxy.
|
||||
#. The GRV proxy will batch GRV requests for a short amount of time (it depends on load and configuartion how big these batches will be).
|
||||
#. The GRV proxy will batch GRV requests for a short amount of time (it depends on load and configuration how big these batches will be).
|
||||
#. The proxy will do the following steps in parallel:
|
||||
* Ask master for their most recent committed version (the largest version of proxies' committed version for which the transactions are successfully written to the transaction log system).
|
||||
* Send a message to the transaction log system to verify that it is still writable. This is to prevent that we fetch read versions from a GRV proxy that has been declared to be dead.
|
||||
|
@ -886,7 +886,7 @@ Conflict Detection
|
|||
|
||||
This section will only explain conceptually how transactions are resolved in FoundationDB. The implementation will use multiple servers running the *Resolver* role and the keyspace will be sharded across them. It will also only allow resolving transactions whose read versions are less than 5 million versions older than their commit version (around 5 seconds).
|
||||
|
||||
A resolver will keep a map in memory which stores the written keys of each commit version. A simpified resolver state could look like this:
|
||||
A resolver will keep a map in memory which stores the written keys of each commit version. A simplified resolver state could look like this:
|
||||
|
||||
======= =======
|
||||
Version Keys
|
||||
|
@ -897,7 +897,7 @@ Version Keys
|
|||
1340 t, u, x
|
||||
======= =======
|
||||
|
||||
Now let's assume we have a transaction with read version *1200* and the assigned commit version will be something larger than 1340 - let's say it is *1450*. In that transaction we read keys ``b, m, s`` and we want to write to ``a``. Note that we didn't read ``a`` - so we will issue a blind write. The resolver will check whether any of the read keys (``b, m, or s``) appers in any line between version *1200* and the most recent version, *1450*. The last write to ``b`` was at version 1000 which was before the read version. This means that transaction read the most recent value. We don't know about any recent writes to the other keys. Therefore the resolver will decide that this transaction does *NOT* conflict and it can be committed. It will then add this new write set to its internal state so that it can resolve future transactions. The new state will look like this:
|
||||
Now let's assume we have a transaction with read version *1200* and the assigned commit version will be something larger than 1340 - let's say it is *1450*. In that transaction we read keys ``b, m, s`` and we want to write to ``a``. Note that we didn't read ``a`` - so we will issue a blind write. The resolver will check whether any of the read keys (``b, m, or s``) appears in any line between version *1200* and the most recent version, *1450*. The last write to ``b`` was at version 1000 which was before the read version. This means that transaction read the most recent value. We don't know about any recent writes to the other keys. Therefore the resolver will decide that this transaction does *NOT* conflict and it can be committed. It will then add this new write set to its internal state so that it can resolve future transactions. The new state will look like this:
|
||||
|
||||
======= =======
|
||||
Version Keys
|
||||
|
|
|
@ -21,7 +21,7 @@ Actors
|
|||
FDB_TRACE_PROBE(actor_create, "actorname")
|
||||
FDB_TRACE_PROBE(actor_destroy, "actorname")
|
||||
|
||||
Get's called whenever an actor is created or gets destroyed. It provides one argument which is a
|
||||
Gets called whenever an actor is created or gets destroyed. It provides one argument which is a
|
||||
string and it is the name of the actor.
|
||||
|
||||
.. code-block:: c
|
||||
|
|
|
@ -92,9 +92,9 @@ cluster.messages primary_dc_missing Unab
|
|||
cluster.messages fetch_primary_dc_timeout Fetching primary DC timed out.
|
||||
cluster.processes.<process>.messages file_open_error Unable to open ‘<file>’ (<os_error>).
|
||||
cluster.processes.<process>.messages incorrect_cluster_file_contents Cluster file contents do not match current cluster connection string. Verify cluster file is writable and has not been overwritten externally.
|
||||
cluster.processes.<process>.messages io_error <error> occured in <subsystem>
|
||||
cluster.processes.<process>.messages platform_error <error> occured in <subsystem>
|
||||
cluster.processes.<process>.messages process_error <error> occured in <subsystem>
|
||||
cluster.processes.<process>.messages io_error <error> occurred in <subsystem>
|
||||
cluster.processes.<process>.messages platform_error <error> occurred in <subsystem>
|
||||
cluster.processes.<process>.messages process_error <error> occurred in <subsystem>
|
||||
==================================== ==================================== =================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================
|
||||
|
||||
The JSON path ``cluster.recovery_state``, when it exists, is an Object containing at least ``"name"`` and ``"description"``. The possible values for those fields are in the following table:
|
||||
|
|
|
@ -199,7 +199,7 @@ that process, and wait for necessary data to be moved away.
|
|||
#. ``\xff\xff/management/options/excluded/force`` Read/write. Setting this key disables safety checks for writes to ``\xff\xff/management/excluded/<exclusion>``. Setting this key only has an effect in the current transaction and is not persisted on commit.
|
||||
#. ``\xff\xff/management/options/failed/force`` Read/write. Setting this key disables safety checks for writes to ``\xff\xff/management/failed/<exclusion>``. Setting this key only has an effect in the current transaction and is not persisted on commit.
|
||||
#. ``\xff\xff/management/min_required_commit_version`` Read/write. Changing this key will change the corresponding system key ``\xff/minRequiredCommitVersion = [[Version]]``. The value of this special key is the literal text of the underlying ``Version``, which is ``int64_t``. If you set the key with a value failed to be parsed as ``int64_t``, ``special_keys_api_failure`` will be thrown. In addition, the given ``Version`` should be larger than the current read version and smaller than the upper bound(``2**63-1-version_per_second*3600*24*365*1000``). Otherwise, ``special_keys_api_failure`` is thrown. For more details, see help text of ``fdbcli`` command ``advanceversion``.
|
||||
#. ``\xff\xff/management/maintenance/<zone_id> := <seconds>`` Read/write. Set/clear a key in this range will change the corresponding system key ``\xff\x02/healthyZone``. The value is a literal text of a non-negative ``double`` which represents the remaining time for the zone to be in maintenance. Commiting with an invalid value will throw ``special_keys_api_failure``. Only one zone is allowed to be in maintenance at the same time. Setting a new key in the range will override the old one and the transaction will throw ``special_keys_api_failure`` error if more than one zone is given. For more details, see help text of ``fdbcli`` command ``maintenance``.
|
||||
#. ``\xff\xff/management/maintenance/<zone_id> := <seconds>`` Read/write. Set/clear a key in this range will change the corresponding system key ``\xff\x02/healthyZone``. The value is a literal text of a non-negative ``double`` which represents the remaining time for the zone to be in maintenance. Committing with an invalid value will throw ``special_keys_api_failure``. Only one zone is allowed to be in maintenance at the same time. Setting a new key in the range will override the old one and the transaction will throw ``special_keys_api_failure`` error if more than one zone is given. For more details, see help text of ``fdbcli`` command ``maintenance``.
|
||||
In addition, a special key ``\xff\xff/management/maintenance/IgnoreSSFailures`` in the range, if set, will disable datadistribution for storage server failures.
|
||||
It is doing the same thing as the fdbcli command ``datadistribution disable ssfailure``.
|
||||
Maintenance mode will be unable to use until the key is cleared, which is the same as the fdbcli command ``datadistribution enable ssfailure``.
|
||||
|
@ -207,7 +207,7 @@ that process, and wait for necessary data to be moved away.
|
|||
#. ``\xff\xff/management/data_distribution/<mode|rebalance_ignored>`` Read/write. Changing these two keys will change the two corresponding system keys ``\xff/dataDistributionMode`` and ``\xff\x02/rebalanceDDIgnored``. The value of ``\xff\xff/management/data_distribution/mode`` is a literal text of ``0`` (disable) or ``1`` (enable). Transactions committed with invalid values will throw ``special_keys_api_failure`` . The value of ``\xff\xff/management/data_distribution/rebalance_ignored`` is empty. If present, it means data distribution is disabled for rebalance. Any transaction committed with non-empty value for this key will throw ``special_keys_api_failure``. For more details, see help text of ``fdbcli`` command ``datadistribution``.
|
||||
#. ``\xff\xff/management/consistency_check_suspended`` Read/write. Set or read this key will set or read the underlying system key ``\xff\x02/ConsistencyCheck/Suspend``. The value of this special key is unused thus if present, will be empty. In particular, if the key exists, then consistency is suspended. For more details, see help text of ``fdbcli`` command ``consistencycheck``.
|
||||
#. ``\xff\xff/management/db_locked`` Read/write. A single key that can be read and modified. Set the key with a 32 bytes hex string UID will lock the database and clear the key will unlock. Read the key will return the UID string as the value. If the database is already locked, then the commit will fail with the ``special_keys_api_failure`` error. For more details, see help text of ``fdbcli`` command ``lock`` and ``unlock``.
|
||||
#. ``\xff\xff/management/auto_coordinators`` Read-only. A single key, if read, will return a set of processes which is able to satisfy the current redundency level and serve as new coordinators. The return value is formatted as a comma delimited string of network addresses of coordinators, i.e. ``<ip:port>,<ip:port>,...,<ip:port>``.
|
||||
#. ``\xff\xff/management/auto_coordinators`` Read-only. A single key, if read, will return a set of processes which is able to satisfy the current redundancy level and serve as new coordinators. The return value is formatted as a comma delimited string of network addresses of coordinators, i.e. ``<ip:port>,<ip:port>,...,<ip:port>``.
|
||||
#. ``\xff\xff/management/excluded_locality/<locality>`` Read/write. Indicates that the cluster should move data away from processes matching ``<locality>``, so that they can be safely removed. See :ref:`removing machines from a cluster <removing-machines-from-a-cluster>` for documentation for the corresponding fdbcli command.
|
||||
#. ``\xff\xff/management/failed_locality/<locality>`` Read/write. Indicates that the cluster should consider matching processes as permanently failed. This allows the cluster to avoid maintaining extra state and doing extra work in the hope that these processes come back. See :ref:`removing machines from a cluster <removing-machines-from-a-cluster>` for documentation for the corresponding fdbcli command.
|
||||
#. ``\xff\xff/management/options/excluded_locality/force`` Read/write. Setting this key disables safety checks for writes to ``\xff\xff/management/excluded_locality/<locality>``. Setting this key only has an effect in the current transaction and is not persisted on commit.
|
||||
|
@ -230,8 +230,8 @@ For example, you can change a process type or update coordinators by manipulatin
|
|||
|
||||
#. ``\xff\xff/configuration/process/class_type/<address> := <class_type>`` Read/write. Reading keys in the range will retrieve processes' class types. Setting keys in the range will update processes' class types. The process matching ``<address>`` will be assigned to the given class type if the commit is successful. The valid class types are ``storage``, ``transaction``, ``resolution``, etc. A full list of class type can be found via ``fdbcli`` command ``help setclass``. Clearing keys is forbidden in the range. Instead, you can set the type as ``default``, which will clear the assigned class type if existing. For more details, see help text of ``fdbcli`` command ``setclass``.
|
||||
#. ``\xff\xff/configuration/process/class_source/<address> := <class_source>`` Read-only. Reading keys in the range will retrieve processes' class source. The class source is one of ``command_line``, ``configure_auto``, ``set_class`` and ``invalid``, indicating the source that the process's class type comes from.
|
||||
#. ``\xff\xff/configuration/coordinators/processes := <ip:port>,<ip:port>,...,<ip:port>`` Read/write. A single key, if read, will return a comma delimited string of coordinators' network addresses. Thus to provide a new set of cooridinators, set the key with a correct formatted string of new coordinators' network addresses. As there's always the need to have coordinators, clear on the key is forbidden and a transaction will fail with the ``special_keys_api_failure`` error if the clear is committed. For more details, see help text of ``fdbcli`` command ``coordinators``.
|
||||
#. ``\xff\xff/configuration/coordinators/cluster_description := <new_description>`` Read/write. A single key, if read, will return the cluster description. Thus modifying the key will update the cluster decription. The new description needs to match ``[A-Za-z0-9_]+``, otherwise, the ``special_keys_api_failure`` error will be thrown. In addition, clear on the key is meaningless thus forbidden. For more details, see help text of ``fdbcli`` command ``coordinators``.
|
||||
#. ``\xff\xff/configuration/coordinators/processes := <ip:port>,<ip:port>,...,<ip:port>`` Read/write. A single key, if read, will return a comma delimited string of coordinators' network addresses. Thus to provide a new set of coordinators, set the key with a correct formatted string of new coordinators' network addresses. As there's always the need to have coordinators, clear on the key is forbidden and a transaction will fail with the ``special_keys_api_failure`` error if the clear is committed. For more details, see help text of ``fdbcli`` command ``coordinators``.
|
||||
#. ``\xff\xff/configuration/coordinators/cluster_description := <new_description>`` Read/write. A single key, if read, will return the cluster description. Thus modifying the key will update the cluster description. The new description needs to match ``[A-Za-z0-9_]+``, otherwise, the ``special_keys_api_failure`` error will be thrown. In addition, clear on the key is meaningless thus forbidden. For more details, see help text of ``fdbcli`` command ``coordinators``.
|
||||
|
||||
The ``<address>`` here is the network address of the corresponding process. Thus the general form is ``ip:port``.
|
||||
|
||||
|
|
|
@ -55,6 +55,6 @@ When operating in the tenant mode ``required_experimental`` or using a metaclust
|
|||
|
||||
.. note :: Setting the ``READ_SYSTEM_KEYS`` or ``ACCESS_SYSTEM_KEYS`` options implies ``RAW_ACCESS`` for your transaction.
|
||||
|
||||
.. note :: Many :doc:`special keys <special-keys>` operations access parts of the system keys and will implictly enable raw access on the transactions in which they are used.
|
||||
.. note :: Many :doc:`special keys <special-keys>` operations access parts of the system keys and will implicitly enable raw access on the transactions in which they are used.
|
||||
|
||||
.. warning :: Care should be taken when using raw access to run transactions spanning multiple tenants if the tenant feature is being utilized to aid in moving data between clusters. In such scenarios, it may not be guaranteed that all of the data you intend to access is on a single cluster.
|
||||
|
|
|
@ -43,7 +43,7 @@ void printConvertUsage() {
|
|||
<< " Begin version.\n"
|
||||
<< " -e, --end END End version.\n"
|
||||
<< " --log Enables trace file logging for the CLI session.\n"
|
||||
<< " --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
<< " --logdir PATH Specifies the output directory for trace files. If\n"
|
||||
<< " unspecified, defaults to the current directory. Has\n"
|
||||
<< " no effect unless --log is specified.\n"
|
||||
<< " --loggroup LOG_GROUP\n"
|
||||
|
@ -430,7 +430,7 @@ struct LogFileWriter {
|
|||
|
||||
Future<Void> writeKV(Key k, Value v) { return writeKV_impl(this, k, v); }
|
||||
|
||||
// Adds a new mutation to an interal buffer and writes out when encountering
|
||||
// Adds a new mutation to an internal buffer and writes out when encountering
|
||||
// a new commitVersion or exceeding the block size.
|
||||
ACTOR static Future<Void> addMutation(LogFileWriter* self, Version commitVersion, MutationListRef mutations) {
|
||||
state Standalone<StringRef> value = BinaryWriter::toValue(mutations, IncludeVersion());
|
||||
|
|
|
@ -71,7 +71,7 @@ void printDecodeUsage() {
|
|||
" -i, --input FILE\n"
|
||||
" Log file filter, only matched files are decoded.\n"
|
||||
" --log Enables trace file logging for the CLI session.\n"
|
||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" --logdir PATH Specifies the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n"
|
||||
" --loggroup LOG_GROUP\n"
|
||||
|
@ -887,7 +887,7 @@ ACTOR Future<Void> decode_logs(Reference<DecodeParams> params) {
|
|||
// rangeFiles = getRelevantRangeFiles(filteredRangeFiles, params);
|
||||
std::vector<RangeFile> files = wait(getRangeFiles(container, params));
|
||||
rangeFiles = files;
|
||||
printLogFiles("Releavant range files are: ", rangeFiles);
|
||||
printLogFiles("Relevant range files are: ", rangeFiles);
|
||||
}
|
||||
|
||||
TraceEvent("TotalFiles", uid).detail("LogFiles", logFiles.size()).detail("RangeFiles", rangeFiles.size());
|
||||
|
|
|
@ -986,7 +986,7 @@ static void printAgentUsage(bool devhelp) {
|
|||
" then `%s'.\n",
|
||||
platform::getDefaultClusterFilePath().c_str());
|
||||
printf(" --log Enables trace file logging for the CLI session.\n"
|
||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" --logdir PATH Specifies the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n");
|
||||
printf(" --loggroup LOG_GROUP\n"
|
||||
|
@ -1112,7 +1112,7 @@ static void printBackupUsage(bool devhelp) {
|
|||
printf(" --partitioned-log-experimental Starts with new type of backup system using partitioned logs.\n");
|
||||
printf(" -n, --dryrun For backup start or restore start, performs a trial run with no actual changes made.\n");
|
||||
printf(" --log Enables trace file logging for the CLI session.\n"
|
||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" --logdir PATH Specifies the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n");
|
||||
printf(" --loggroup LOG_GROUP\n"
|
||||
|
@ -1270,7 +1270,7 @@ static void printDBAgentUsage(bool devhelp) {
|
|||
" The path of a file containing the connection string for the\n"
|
||||
" source FoundationDB cluster.\n");
|
||||
printf(" --log Enables trace file logging for the CLI session.\n"
|
||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" --logdir PATH Specifies the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n");
|
||||
printf(" --loggroup LOG_GROUP\n"
|
||||
|
@ -1325,7 +1325,7 @@ static void printDBBackupUsage(bool devhelp) {
|
|||
printf(" --dstonly Abort will not make any changes on the source cluster.\n");
|
||||
printf(TLS_HELP);
|
||||
printf(" --log Enables trace file logging for the CLI session.\n"
|
||||
" --logdir PATH Specifes the output directory for trace files. If\n"
|
||||
" --logdir PATH Specifies the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n");
|
||||
printf(" --loggroup LOG_GROUP\n"
|
||||
|
@ -2768,7 +2768,7 @@ std::pair<Version, Version> getMaxMinRestorableVersions(const BackupDescription&
|
|||
}
|
||||
|
||||
// If restoreVersion is invalidVersion or latestVersion, use the maximum or minimum restorable version respectively for
|
||||
// selected key ranges. If restoreTimestamp is specified, any specified restoreVersion will be overriden to the version
|
||||
// selected key ranges. If restoreTimestamp is specified, any specified restoreVersion will be overridden to the version
|
||||
// resolved to that timestamp.
|
||||
ACTOR Future<Void> queryBackup(const char* name,
|
||||
std::string destinationContainer,
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
namespace file_converter {
|
||||
|
||||
// File format convertion constants
|
||||
// File format conversion constants
|
||||
enum {
|
||||
OPT_CONTAINER,
|
||||
OPT_FILE_TYPE,
|
||||
|
|
|
@ -98,7 +98,7 @@ ACTOR Future<bool> changeCoordinators(Reference<IDatabase> db, std::vector<Strin
|
|||
}
|
||||
// if auto change, read the special key to retrieve the recommended config
|
||||
if (automatic) {
|
||||
// if previous read failed, retry, otherwise, use the same recommened config
|
||||
// if previous read failed, retry, otherwise, use the same recommended config
|
||||
if (!auto_coordinators_str.size()) {
|
||||
// Hold the reference to the standalone's memory
|
||||
state ThreadFuture<Optional<Value>> auto_coordinatorsF =
|
||||
|
@ -206,7 +206,7 @@ CommandFactory coordinatorsFactory(
|
|||
CommandHelp(
|
||||
"coordinators auto|<ADDRESS>+ [description=new_cluster_description]",
|
||||
"change cluster coordinators or description",
|
||||
"If 'auto' is specified, coordinator addresses will be choosen automatically to support the configured "
|
||||
"If 'auto' is specified, coordinator addresses will be chosen automatically to support the configured "
|
||||
"redundancy level. (If the current set of coordinators are healthy and already support the redundancy level, "
|
||||
"nothing will be changed.)\n\nOtherwise, sets the coordinators to the list of IP:port pairs specified by "
|
||||
"<ADDRESS>+. An fdbserver process must be running on each of the specified addresses.\n\ne.g. coordinators "
|
||||
|
|
|
@ -62,7 +62,7 @@ ACTOR Future<Void> getAuditProgressByRange(Database cx, AuditType auditType, UID
|
|||
throw e;
|
||||
}
|
||||
if (retryCount > 30) {
|
||||
printf("Imcomplete check\n");
|
||||
printf("Incomplete check\n");
|
||||
return Void();
|
||||
}
|
||||
wait(delay(0.5));
|
||||
|
|
|
@ -51,7 +51,7 @@ ACTOR Future<bool> profileCommandActor(Database db,
|
|||
wait(db->globalConfig->onInitialized());
|
||||
if (tokencmp(tokens[2], "get")) {
|
||||
if (tokens.size() != 3) {
|
||||
fprintf(stderr, "ERROR: Addtional arguments to `get` are not supported.\n");
|
||||
fprintf(stderr, "ERROR: Additional arguments to `get` are not supported.\n");
|
||||
return false;
|
||||
}
|
||||
std::string sampleRateStr = "default";
|
||||
|
|
|
@ -133,7 +133,7 @@ std::string getProcessAddressByServerID(StatusObjectReader processesMap, std::st
|
|||
}
|
||||
} catch (std::exception&) {
|
||||
// If an entry in the process map is badly formed then something will throw. Since we are
|
||||
// looking for a positive match, just ignore any read execeptions and move on to the next proc
|
||||
// looking for a positive match, just ignore any read exceptions and move on to the next proc
|
||||
}
|
||||
}
|
||||
return "unknown";
|
||||
|
@ -1261,7 +1261,7 @@ void printStatus(StatusObjectReader statusObj,
|
|||
|
||||
// status minimal
|
||||
else if (level == StatusClient::MINIMAL) {
|
||||
// Checking for field exsistence is not necessary here because if a field is missing there is no additional
|
||||
// Checking for field existence is not necessary here because if a field is missing there is no additional
|
||||
// information that we would be able to display if we continued execution. Instead, any missing fields will
|
||||
// throw and the catch will display the proper message.
|
||||
try {
|
||||
|
|
|
@ -162,7 +162,7 @@ CommandFactory versionEpochFactory(
|
|||
"Read or write the version epoch",
|
||||
"If no arguments are specified, reports the offset between the expected version "
|
||||
"and the actual version. Otherwise, enables, disables, or commits the version epoch. "
|
||||
"Setting the version epoch can be irreversible since it can cause a large verison jump. "
|
||||
"Setting the version epoch can be irreversible since it can cause a large version jump. "
|
||||
"Thus, the version epoch must first by enabled with the enable or set command. This "
|
||||
"causes a recovery. Once the version epoch has been set, versions may be given out at "
|
||||
"a faster or slower rate to attempt to match the actual version to the expected version, "
|
||||
|
|
|
@ -479,7 +479,7 @@ static void printProgramUsage(const char* name) {
|
|||
" then `%s'.\n",
|
||||
platform::getDefaultClusterFilePath().c_str());
|
||||
printf(" --log Enables trace file logging for the CLI session.\n"
|
||||
" --log-dir PATH Specifes the output directory for trace files. If\n"
|
||||
" --log-dir PATH Specifies the output directory for trace files. If\n"
|
||||
" unspecified, defaults to the current directory. Has\n"
|
||||
" no effect unless --log is specified.\n"
|
||||
" --log-group LOG_GROUP\n"
|
||||
|
@ -583,7 +583,7 @@ void initHelp() {
|
|||
|
||||
helpMap["setknob"] = CommandHelp("setknob <KEY> <VALUE> [CONFIG_CLASS]",
|
||||
"updates a knob to specified value",
|
||||
"setknob will prompt for a descrption of the changes" ESCAPINGKV);
|
||||
"setknob will prompt for a description of the changes" ESCAPINGKV);
|
||||
|
||||
helpMap["getknob"] = CommandHelp(
|
||||
"getknob <KEY> [CONFIG_CLASS]", "gets the value of the specified knob", "CONFIG_CLASS is optional." ESCAPINGK);
|
||||
|
|
|
@ -405,7 +405,7 @@ ACTOR Future<Void> persistAuditState(Database cx,
|
|||
// Clear persistent progress data of the new audit if complete
|
||||
if (auditPhase == AuditPhase::Complete) {
|
||||
clearAuditProgressMetadata(&tr, auditState.getType(), auditState.id);
|
||||
} // We keep the progess metadata of Failed and Error audits for further investigations
|
||||
} // We keep the progress metadata of Failed and Error audits for further investigations
|
||||
// Check existing state
|
||||
Optional<Value> res_ = wait(tr.get(auditKey(auditState.getType(), auditState.id)));
|
||||
if (!res_.present()) { // has been cancelled
|
||||
|
@ -494,7 +494,7 @@ ACTOR Future<Void> persistAuditStateByRange(Database cx, AuditStorageState audit
|
|||
}
|
||||
// It is possible ddAuditState is complete while some progress is about to persist
|
||||
// Since doAuditOnStorageServer may repeatedly issue multiple requests (see getReplyUnlessFailedFor)
|
||||
// For this case, no need to proceed. Sliently exit
|
||||
// For this case, no need to proceed. Silently exit
|
||||
if (ddAuditState.getPhase() == AuditPhase::Complete) {
|
||||
break;
|
||||
}
|
||||
|
@ -585,7 +585,7 @@ ACTOR Future<Void> persistAuditStateByServer(Database cx, AuditStorageState audi
|
|||
}
|
||||
// It is possible ddAuditState is complete while some progress is about to persist
|
||||
// Since doAuditOnStorageServer may repeatedly issue multiple requests (see getReplyUnlessFailedFor)
|
||||
// For this case, no need to proceed. Sliently exit
|
||||
// For this case, no need to proceed. Silently exit
|
||||
if (ddAuditState.getPhase() == AuditPhase::Complete) {
|
||||
break;
|
||||
}
|
||||
|
@ -1005,7 +1005,7 @@ Optional<std::pair<KeyRange, KeyRange>> rangesSame(std::vector<KeyRange> rangesA
|
|||
// from the perspective of ServerKeys system key space
|
||||
// Input: (1) SS id; (2) transaction tr; (3) within range
|
||||
// Return AuditGetServerKeysRes, including: (1) complete range by a single read range;
|
||||
// (2) verison of the read; (3) ranges of the input SS
|
||||
// (2) version of the read; (3) ranges of the input SS
|
||||
ACTOR Future<AuditGetServerKeysRes> getThisServerKeysFromServerKeys(UID serverID, Transaction* tr, KeyRange range) {
|
||||
state RangeResult readResult;
|
||||
state AuditGetServerKeysRes res;
|
||||
|
@ -1028,7 +1028,7 @@ ACTOR Future<AuditGetServerKeysRes> getThisServerKeysFromServerKeys(UID serverID
|
|||
.detail("Range", range)
|
||||
.detail("Prefix", serverKeysPrefixFor(serverID))
|
||||
.detail("ResultSize", readResult.size())
|
||||
.detail("AduitServerID", serverID);
|
||||
.detail("AuditServerID", serverID);
|
||||
|
||||
std::vector<KeyRange> ownRanges;
|
||||
for (int i = 0; i < readResult.size() - 1; ++i) {
|
||||
|
@ -1036,7 +1036,7 @@ ACTOR Future<AuditGetServerKeysRes> getThisServerKeysFromServerKeys(UID serverID
|
|||
.detail("ValueIsServerKeysFalse", readResult[i].value == serverKeysFalse)
|
||||
.detail("ServerHasKey", serverHasKey(readResult[i].value))
|
||||
.detail("Range", KeyRangeRef(readResult[i].key, readResult[i + 1].key))
|
||||
.detail("AduitServerID", serverID);
|
||||
.detail("AuditServerID", serverID);
|
||||
if (serverHasKey(readResult[i].value)) {
|
||||
KeyRange shardRange;
|
||||
ownRanges.push_back(Standalone(KeyRangeRef(readResult[i].key, readResult[i + 1].key)));
|
||||
|
@ -1044,7 +1044,7 @@ ACTOR Future<AuditGetServerKeysRes> getThisServerKeysFromServerKeys(UID serverID
|
|||
}
|
||||
const KeyRange completeRange = Standalone(KeyRangeRef(range.begin, readResult.back().key));
|
||||
TraceEvent(SevVerbose, "AuditUtilGetThisServerKeysFromServerKeysEnd", serverID)
|
||||
.detail("AduitServerID", serverID)
|
||||
.detail("AuditServerID", serverID)
|
||||
.detail("Range", range)
|
||||
.detail("Prefix", serverKeysPrefixFor(serverID))
|
||||
.detail("ReadAtVersion", readAtVersion)
|
||||
|
@ -1055,7 +1055,7 @@ ACTOR Future<AuditGetServerKeysRes> getThisServerKeysFromServerKeys(UID serverID
|
|||
} catch (Error& e) {
|
||||
TraceEvent(SevDebug, "AuditUtilGetThisServerKeysError", serverID)
|
||||
.errorUnsuppressed(e)
|
||||
.detail("AduitServerID", serverID);
|
||||
.detail("AuditServerID", serverID);
|
||||
throw e;
|
||||
}
|
||||
|
||||
|
@ -1065,7 +1065,7 @@ ACTOR Future<AuditGetServerKeysRes> getThisServerKeysFromServerKeys(UID serverID
|
|||
// Given an input server, get ranges within the input range via the input transaction
|
||||
// from the perspective of KeyServers system key space
|
||||
// Input: (1) Audit Server ID (for logging); (2) transaction tr; (3) within range
|
||||
// Return AuditGetKeyServersRes, including : (1) complete range by a single read range; (2) verison of the read;
|
||||
// Return AuditGetKeyServersRes, including : (1) complete range by a single read range; (2) version of the read;
|
||||
// (3) map between SSes and their ranges --- in KeyServers space, a range corresponds to multiple SSes
|
||||
ACTOR Future<AuditGetKeyServersRes> getShardMapFromKeyServers(UID auditServerId, Transaction* tr, KeyRange range) {
|
||||
state AuditGetKeyServersRes res;
|
||||
|
@ -1101,7 +1101,7 @@ ACTOR Future<AuditGetKeyServersRes> getShardMapFromKeyServers(UID auditServerId,
|
|||
TraceEvent(SevVerbose, "AuditUtilGetThisServerKeysFromKeyServersReadDone", auditServerId)
|
||||
.detail("Range", range)
|
||||
.detail("ResultSize", readResult.size())
|
||||
.detail("AduitServerID", auditServerId);
|
||||
.detail("AuditServerID", auditServerId);
|
||||
|
||||
// produce result
|
||||
std::unordered_map<UID, std::vector<KeyRange>> serverOwnRanges;
|
||||
|
|
|
@ -734,7 +734,7 @@ ACTOR Future<Void> sendCommitTransactionRequest(CommitTransactionRequest req,
|
|||
Key versionKey = BinaryWriter::toValue(newBeginVersion, Unversioned());
|
||||
Key rangeEnd = getApplyKey(newBeginVersion, uid);
|
||||
|
||||
// mutations and encrypted mutations (and their relationship) is described in greater detail in the defenition of
|
||||
// mutations and encrypted mutations (and their relationship) is described in greater detail in the definition of
|
||||
// CommitTransactionRef in CommitTransaction.h
|
||||
req.transaction.mutations.push_back_deep(req.arena, MutationRef(MutationRef::SetValue, applyBegin, versionKey));
|
||||
req.transaction.encryptedMutations.push_back_deep(req.arena, Optional<MutationRef>());
|
||||
|
|
|
@ -242,7 +242,7 @@ public:
|
|||
}
|
||||
|
||||
// For a list of log files specified by their indices (of the same tag),
|
||||
// returns if they are continous in the range [begin, end]. If "tags" is not
|
||||
// returns if they are continuous in the range [begin, end]. If "tags" is not
|
||||
// nullptr, then it will be populated with [begin, end] -> tags, where next
|
||||
// pair's begin <= previous pair's end + 1. On return, the last pair's end
|
||||
// version (inclusive) gives the continuous range from begin.
|
||||
|
@ -324,10 +324,10 @@ public:
|
|||
end = std::min(end, tags.rbegin()->first.second);
|
||||
TraceEvent("ContinuousLogEnd").detail("Partition", 0).detail("EndVersion", end).detail("Begin", begin);
|
||||
|
||||
// for each range in tags, check all partitions from 1 are continouous
|
||||
// for each range in tags, check all partitions from 1 are continuous
|
||||
Version lastEnd = begin;
|
||||
for (const auto& [beginEnd, count] : tags) {
|
||||
Version tagEnd = beginEnd.second; // This range's minimum continous partition version
|
||||
Version tagEnd = beginEnd.second; // This range's minimum continuous partition version
|
||||
for (int i = 1; i < count; i++) {
|
||||
std::map<std::pair<Version, Version>, int> rangeTags;
|
||||
isContinuous(logs, tagIndices[i], beginEnd.first, beginEnd.second, &rangeTags);
|
||||
|
@ -511,7 +511,7 @@ public:
|
|||
state Version scanEnd = std::numeric_limits<Version>::max();
|
||||
|
||||
// Use the known log range if present
|
||||
// Logs are assumed to be contiguious between metaLogBegin and metaLogEnd, so initalize desc accordingly
|
||||
// Logs are assumed to be contiguous between metaLogBegin and metaLogEnd, so initialize desc accordingly
|
||||
if (metaLogBegin.present() && metaLogEnd.present()) {
|
||||
// minLogBegin is the greater of the log begin metadata OR the unreliable end version since we can't count
|
||||
// on log file presence before that version.
|
||||
|
|
|
@ -1341,7 +1341,7 @@ void DecryptBlobCipherAes256Ctr::validateAuthTokenV1(const uint8_t* ciphertext,
|
|||
BlobCipherEncryptHeaderRef headerRefCopy = BlobCipherEncryptHeaderRef(headerRef);
|
||||
|
||||
AesCtrWithAuth<Params> algoHeaderCopy = std::get<AesCtrWithAuth<Params>>(headerRefCopy.algoHeader);
|
||||
// preserve the 'persisted' token for future validation before reseting the field
|
||||
// preserve the 'persisted' token for future validation before resetting the field
|
||||
memcpy(&persisted[0], &algoHeaderCopy.v1.authToken[0], Params::authTokenSize);
|
||||
memset(&algoHeaderCopy.v1.authToken[0], 0, Params::authTokenSize);
|
||||
|
||||
|
@ -1905,7 +1905,7 @@ void Sha256KCV::checkEqual(const Reference<BlobCipherKey>& cipher, const Encrypt
|
|||
CODE_PROBE(true, "Sha256 Key Check Value mismatch");
|
||||
TraceEvent(SevWarnAlways, "Sha256KCVMismatch")
|
||||
.detail("Computed", cipher->getBaseCipherKCV())
|
||||
.detail("Persited", persisted)
|
||||
.detail("Persisted", persisted)
|
||||
.detail("DomainId", cipher->getDomainId())
|
||||
.detail("BaseCipherId", cipher->getBaseCipherId());
|
||||
throw encrypt_key_check_value_mismatch();
|
||||
|
@ -2061,7 +2061,7 @@ void testKeyCacheEssentials(DomainKeyMap& domainKeyMap,
|
|||
}
|
||||
TraceEvent("TestLooksupDone").log();
|
||||
|
||||
// Ensure attemtping to insert existing cipherKey (identical) more than once is treated as a NOP
|
||||
// Ensure attempting to insert existing cipherKey (identical) more than once is treated as a NOP
|
||||
try {
|
||||
Reference<BaseCipher> baseCipher = domainKeyMap[minDomainId][minBaseCipherKeyId];
|
||||
cipherKeyCache->insertCipherKey(baseCipher->domainId,
|
||||
|
@ -2076,7 +2076,7 @@ void testKeyCacheEssentials(DomainKeyMap& domainKeyMap,
|
|||
}
|
||||
TraceEvent("TestReinsertIdempotentKeyDone").log();
|
||||
|
||||
// Ensure attemtping to insert an existing cipherKey (modified) fails with appropriate error
|
||||
// Ensure attempting to insert an existing cipherKey (modified) fails with appropriate error
|
||||
try {
|
||||
Reference<BaseCipher> baseCipher = domainKeyMap[minDomainId][minBaseCipherKeyId];
|
||||
uint8_t rawCipher[baseCipher->len];
|
||||
|
|
|
@ -369,8 +369,8 @@ struct IndexBlockRef {
|
|||
void finalize(Optional<BlobGranuleCipherKeysCtx> cipherKeysCtx, Arena& arena) {
|
||||
if (cipherKeysCtx.present()) {
|
||||
// IndexBlock childBlock pointers offsets are relative to IndexBlock endOffset instead of file start offset.
|
||||
// Compressing indexBlock will need offset recalculation (circular depedency). IndexBlock size is bounded by
|
||||
// number of chunks and sizeof(KeyPrefix), 'not' compressing IndexBlock shouldn't cause significant file
|
||||
// Compressing indexBlock will need offset recalculation (circular dependency). IndexBlock size is bounded
|
||||
// by number of chunks and sizeof(KeyPrefix), 'not' compressing IndexBlock shouldn't cause significant file
|
||||
// size bloat.
|
||||
CODE_PROBE(true, "encrypting index block");
|
||||
ASSERT(cipherKeysCtx.present());
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#include "fdbclient/CoordinationInterface.h"
|
||||
#include "fdbclient/GetEncryptCipherKeys_impl.actor.h"
|
||||
|
||||
// Instantiate ClientDBInfo related tempates
|
||||
// Instantiate ClientDBInfo related templates
|
||||
template class ReplyPromise<struct ClientDBInfo>;
|
||||
template class ReplyPromise<CachedSerialization<ClientDBInfo>>;
|
||||
template class GetEncryptCipherKeys<ClientDBInfo>;
|
||||
|
|
|
@ -741,7 +741,7 @@ struct CopyLogRangeTaskFunc : TaskFuncBase {
|
|||
.get(BackupAgentBase::keyConfig)
|
||||
.get(task->params[BackupAgentBase::keyConfigLogUid]);
|
||||
state std::vector<RangeResult> nextMutations;
|
||||
state bool isTimeoutOccured = false;
|
||||
state bool isTimeoutOccurred = false;
|
||||
state Optional<KeyRef> lastKey;
|
||||
state Version lastVersion;
|
||||
state int64_t nextMutationSize = 0;
|
||||
|
@ -799,7 +799,7 @@ struct CopyLogRangeTaskFunc : TaskFuncBase {
|
|||
bool first = true;
|
||||
for (auto m : mutations) {
|
||||
for (auto kv : m) {
|
||||
if (isTimeoutOccured) {
|
||||
if (isTimeoutOccurred) {
|
||||
Version newVersion = getLogKeyVersion(kv.key);
|
||||
|
||||
if (newVersion > lastVersion) {
|
||||
|
@ -832,13 +832,13 @@ struct CopyLogRangeTaskFunc : TaskFuncBase {
|
|||
if (nextVersionAfterBreak.present()) {
|
||||
return nextVersionAfterBreak;
|
||||
}
|
||||
if (!isTimeoutOccured && timer_monotonic() >= breakTime && lastKey.present()) {
|
||||
// timeout occured
|
||||
if (!isTimeoutOccurred && timer_monotonic() >= breakTime && lastKey.present()) {
|
||||
// timeout occurred
|
||||
// continue to copy mutations with the
|
||||
// same version before break because
|
||||
// the next run should start from the beginning of a version > lastVersion.
|
||||
lastVersion = getLogKeyVersion(lastKey.get());
|
||||
isTimeoutOccured = true;
|
||||
isTimeoutOccurred = true;
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_actor_cancelled || e.code() == error_code_backup_error)
|
||||
|
|
|
@ -413,7 +413,7 @@ std::string DatabaseConfiguration::configureStringFromJSON(const StatusObject& j
|
|||
result += kv.first + ":=" + format("%d", kv.second.get_int());
|
||||
} else if (kv.second.type() == json_spirit::str_type) {
|
||||
// For string values, some properties can set with a "<name>=<value>" syntax in "configure"
|
||||
// Such properites are listed here:
|
||||
// Such properties are listed here:
|
||||
static std::set<std::string> directSet = {
|
||||
"storage_migration_type", "tenant_mode", "encryption_at_rest_mode",
|
||||
"storage_engine", "log_engine", "perpetual_storage_wiggle_engine"
|
||||
|
|
|
@ -121,8 +121,8 @@ typedef FileBackupAgent::ERestoreState ERestoreState;
|
|||
|
||||
StringRef FileBackupAgent::restoreStateText(ERestoreState id) {
|
||||
switch (id) {
|
||||
case ERestoreState::UNITIALIZED:
|
||||
return "unitialized"_sr;
|
||||
case ERestoreState::UNINITIALIZED:
|
||||
return "uninitialized"_sr;
|
||||
case ERestoreState::QUEUED:
|
||||
return "queued"_sr;
|
||||
case ERestoreState::STARTING:
|
||||
|
@ -265,7 +265,7 @@ public:
|
|||
|
||||
Future<bool> isRunnable(Reference<ReadYourWritesTransaction> tr) {
|
||||
return map(stateEnum().getD(tr), [](ERestoreState s) -> bool {
|
||||
return s != ERestoreState::ABORTED && s != ERestoreState::COMPLETED && s != ERestoreState::UNITIALIZED;
|
||||
return s != ERestoreState::ABORTED && s != ERestoreState::COMPLETED && s != ERestoreState::UNINITIALIZED;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -2165,7 +2165,7 @@ struct BackupSnapshotDispatchTask : BackupTaskFuncBase {
|
|||
// so store a completion key for the dispatch finish() to set when dispatching the batch is
|
||||
// done.
|
||||
state TaskCompletionKey dispatchCompletionKey = TaskCompletionKey::joinWith(snapshotBatchFuture);
|
||||
// this is a bad hack - but flow doesn't work well with lambda functions and caputring
|
||||
// this is a bad hack - but flow doesn't work well with lambda functions and capturing
|
||||
// state variables...
|
||||
auto cfg = &config;
|
||||
auto tx = &tr;
|
||||
|
@ -4722,7 +4722,7 @@ ACTOR Future<ERestoreState> abortRestore(Reference<ReadYourWritesTransaction> tr
|
|||
state KeyBackedTag tag = makeRestoreTag(tagName.toString());
|
||||
state Optional<UidAndAbortedFlagT> current = wait(tag.get(tr));
|
||||
if (!current.present())
|
||||
return ERestoreState::UNITIALIZED;
|
||||
return ERestoreState::UNINITIALIZED;
|
||||
|
||||
state RestoreConfig restore(current.get().first);
|
||||
|
||||
|
@ -5634,8 +5634,8 @@ public:
|
|||
if (verbose)
|
||||
printf("waitRestore: Tag: %s State: %s\n",
|
||||
tagName.toString().c_str(),
|
||||
FileBackupAgent::restoreStateText(ERestoreState::UNITIALIZED).toString().c_str());
|
||||
return ERestoreState::UNITIALIZED;
|
||||
FileBackupAgent::restoreStateText(ERestoreState::UNINITIALIZED).toString().c_str());
|
||||
return ERestoreState::UNINITIALIZED;
|
||||
}
|
||||
|
||||
state RestoreConfig restore(current.get().first);
|
||||
|
|
|
@ -124,7 +124,7 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
// Sample function to make instanciation of SampleSender easier
|
||||
// Sample function to make instantiation of SampleSender easier
|
||||
template <class Protocol, class Callback>
|
||||
std::shared_ptr<SampleSender<Protocol, Callback>> makeSampleSender(typename Protocol::socket& socket,
|
||||
Callback const& callback,
|
||||
|
|
|
@ -1388,7 +1388,7 @@ void MultiVersionTransaction::updateTransaction(bool setPersistentOptions) {
|
|||
}
|
||||
|
||||
// When called from the constructor or from reset(), all persistent options are database options and therefore
|
||||
// alredy set on newTr.transaction if it got created sucessfully. If newTr.transaction could not be created (i.e.,
|
||||
// already set on newTr.transaction if it got created successfully. If newTr.transaction could not be created (i.e.,
|
||||
// because no database with a matching version is present), the local timeout set in setTimeout() applies, so we
|
||||
// need to set it.
|
||||
if (setPersistentOptions || !newTr.transaction) {
|
||||
|
|
|
@ -2264,7 +2264,7 @@ void DatabaseContext::expireThrottles() {
|
|||
// file name, and also used to annotate all trace events.
|
||||
//
|
||||
// If trace_initialize_on_setup is not set, tracing is initialized when opening a database.
|
||||
// In that case we can immediatelly determine the IP. Thus, we can use the IP in the
|
||||
// In that case we can immediately determine the IP. Thus, we can use the IP in the
|
||||
// trace file name and annotate all events with it.
|
||||
//
|
||||
// If trace_initialize_on_setup network option is set, tracing is at first initialized without
|
||||
|
@ -5421,7 +5421,7 @@ ACTOR Future<Void> getRangeStream(Reference<TransactionState> trState,
|
|||
state Key e = wait(fe);
|
||||
|
||||
if (!snapshot) {
|
||||
// FIXME: this conflict range is too large, and should be updated continously as results are returned
|
||||
// FIXME: this conflict range is too large, and should be updated continuously as results are returned
|
||||
conflictRange.send(std::make_pair(std::min(b, Key(begin.getKey(), begin.arena())),
|
||||
std::max(e, Key(end.getKey(), end.arena()))));
|
||||
}
|
||||
|
@ -6057,7 +6057,7 @@ void Transaction::atomicOp(const KeyRef& key,
|
|||
|
||||
void TransactionState::addClearCost() {
|
||||
// NOTE: The throttling cost of each clear is assumed to be one page.
|
||||
// This makes compuation fast, but can be inaccurate and may
|
||||
// This makes computation fast, but can be inaccurate and may
|
||||
// underestimate the cost of large clears.
|
||||
totalCost += CLIENT_KNOBS->TAG_THROTTLING_PAGE_SIZE;
|
||||
}
|
||||
|
@ -6487,7 +6487,7 @@ void Transaction::setupWatches() {
|
|||
|
||||
watches.clear();
|
||||
} catch (Error&) {
|
||||
ASSERT(false); // The above code must NOT throw because commit has already occured.
|
||||
ASSERT(false); // The above code must NOT throw because commit has already occurred.
|
||||
throw internal_error();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,10 +86,10 @@ public:
|
|||
};
|
||||
|
||||
// read() Performs a read (get, getKey, getRange, etc), in the context of the given transaction. Snapshot or RYW
|
||||
// reads are distingushed by the type Iter being SnapshotCache::iterator or RYWIterator. Fills in the snapshot cache
|
||||
// as a side effect but does not affect conflict ranges. Some (indicated) overloads of read are required to update
|
||||
// the given *it to point to the key that was read, so that the corresponding overload of addConflictRange() can
|
||||
// make use of it.
|
||||
// reads are distinguished by the type Iter being SnapshotCache::iterator or RYWIterator. Fills in the snapshot
|
||||
// cache as a side effect but does not affect conflict ranges. Some (indicated) overloads of read are required
|
||||
// to update the given *it to point to the key that was read, so that the corresponding overload of
|
||||
// addConflictRange() can make use of it.
|
||||
|
||||
ACTOR template <class Iter>
|
||||
static Future<Optional<Value>> read(ReadYourWritesTransaction* ryw, GetValueReq read, Iter* it) {
|
||||
|
|
|
@ -866,7 +866,7 @@ ACTOR Future<Reference<HTTP::IncomingResponse>> doRequest_impl(Reference<S3BlobS
|
|||
req->data.headers["Host"] = bstore->host;
|
||||
req->data.headers["Accept"] = "application/xml";
|
||||
|
||||
// Avoid to send request with an empty resouce.
|
||||
// Avoid to send request with an empty resource.
|
||||
if (resource.empty()) {
|
||||
resource = "/";
|
||||
}
|
||||
|
|
|
@ -1008,7 +1008,7 @@ ACTOR Future<bool> checkExclusion(Database db,
|
|||
state int64_t totalKvStoreUsedBytes = 0;
|
||||
state int64_t totalKvStoreUsedBytesNotExcluded = 0;
|
||||
state int64_t totalKvStoreAvailableBytes = 0;
|
||||
// Keep track if we exclude any storage process with the provided adddresses
|
||||
// Keep track if we exclude any storage process with the provided addresses
|
||||
state bool excludedAddressesContainsStorageRole = false;
|
||||
|
||||
try {
|
||||
|
@ -1130,7 +1130,7 @@ void includeServers(ReadYourWritesTransaction* ryw) {
|
|||
// CAUSAL_WRITE_RISKY
|
||||
ryw->setOption(FDBTransactionOptions::CAUSAL_WRITE_RISKY);
|
||||
std::string versionKey = deterministicRandom()->randomUniqueID().toString();
|
||||
// for exluded servers
|
||||
// for excluded servers
|
||||
auto ranges =
|
||||
ryw->getSpecialKeySpaceWriteMap().containedRanges(SpecialKeySpace::getManagementApiCommandRange("exclude"));
|
||||
auto iter = ranges.begin();
|
||||
|
@ -1327,7 +1327,7 @@ ACTOR Future<RangeResult> getProcessClassActor(ReadYourWritesTransaction* ryw, K
|
|||
workers.erase(last, workers.end());
|
||||
RangeResult result;
|
||||
for (auto& w : workers) {
|
||||
// exclude :tls in keys even the network addresss is TLS
|
||||
// exclude :tls in keys even the network address is TLS
|
||||
KeyRef k(prefix.withSuffix(formatIpPort(w.address.ip, w.address.port), result.arena()));
|
||||
if (kr.contains(k)) {
|
||||
ValueRef v(result.arena(), w.processClass.toString());
|
||||
|
@ -1450,7 +1450,7 @@ ACTOR Future<RangeResult> getProcessClassSourceActor(ReadYourWritesTransaction*
|
|||
workers.erase(last, workers.end());
|
||||
RangeResult result;
|
||||
for (auto& w : workers) {
|
||||
// exclude :tls in keys even the network addresss is TLS
|
||||
// exclude :tls in keys even the network address is TLS
|
||||
Key k(prefix.withSuffix(formatIpPort(w.address.ip, w.address.port)));
|
||||
if (kr.contains(k)) {
|
||||
Value v(w.processClass.sourceString());
|
||||
|
@ -1771,12 +1771,12 @@ ACTOR Future<RangeResult> coordinatorsGetRangeActor(ReadYourWritesTransaction* r
|
|||
state ClusterConnectionString cs = ryw->getDatabase()->getConnectionRecord()->getConnectionString();
|
||||
state std::vector<NetworkAddress> coordinator_processes = wait(cs.tryResolveHostnames());
|
||||
RangeResult result;
|
||||
Key cluster_decription_key = prefix.withSuffix("cluster_description"_sr);
|
||||
if (kr.contains(cluster_decription_key)) {
|
||||
result.push_back_deep(result.arena(), KeyValueRef(cluster_decription_key, cs.clusterKeyName()));
|
||||
Key cluster_description_key = prefix.withSuffix("cluster_description"_sr);
|
||||
if (kr.contains(cluster_description_key)) {
|
||||
result.push_back_deep(result.arena(), KeyValueRef(cluster_description_key, cs.clusterKeyName()));
|
||||
}
|
||||
// Note : the sort by string is anti intuition, ex. 1.1.1.1:11 < 1.1.1.1:5
|
||||
// include :tls in keys if the network addresss is TLS
|
||||
// include :tls in keys if the network address is TLS
|
||||
std::sort(coordinator_processes.begin(),
|
||||
coordinator_processes.end(),
|
||||
[](const NetworkAddress& lhs, const NetworkAddress& rhs) { return lhs.toString() < rhs.toString(); });
|
||||
|
@ -1848,8 +1848,8 @@ ACTOR static Future<Optional<std::string>> coordinatorsCommitActor(ReadYourWrite
|
|||
|
||||
std::string newName;
|
||||
// check update for cluster_description
|
||||
Key cluster_decription_key = "cluster_description"_sr.withPrefix(kr.begin);
|
||||
auto entry = ryw->getSpecialKeySpaceWriteMap()[cluster_decription_key];
|
||||
Key cluster_description_key = "cluster_description"_sr.withPrefix(kr.begin);
|
||||
auto entry = ryw->getSpecialKeySpaceWriteMap()[cluster_description_key];
|
||||
if (entry.first) {
|
||||
// check valid description [a-zA-Z0-9_]+
|
||||
if (entry.second.present() && isAlphaNumeric(entry.second.get().toString())) {
|
||||
|
|
|
@ -166,7 +166,7 @@ void JSONDoc::cleanOps(json_spirit::mObject& obj) {
|
|||
if (version == 0 || version > JSONDoc::expires_reference_version)
|
||||
kv->second = o.at(op);
|
||||
else {
|
||||
// Thing is expired so competely remove its key from the enclosing Object
|
||||
// Thing is expired so completely remove its key from the enclosing Object
|
||||
auto tmp = kv;
|
||||
++kv;
|
||||
obj.erase(tmp);
|
||||
|
|
|
@ -572,7 +572,7 @@ TEST_CASE("/StorageServerInterface/TSSCompare/TestComparison") {
|
|||
ASSERT(TSS_doCompare(GetKeyReply(KeySelectorRef(StringRef(a, s_a), true, 0), false),
|
||||
GetKeyReply(KeySelectorRef(StringRef(a, s_a), false, 1), false)));
|
||||
|
||||
// explictly test checksum function
|
||||
// explicitly test checksum function
|
||||
std::string s12 = "ABCDEFGHIJKL";
|
||||
std::string s13 = "ABCDEFGHIJKLO";
|
||||
std::string checksumStart13 = "(13)";
|
||||
|
|
|
@ -62,7 +62,7 @@ const KeyRangeRef keyServersKeyServersKeys("\xff/keyServers/\xff/keyServers/"_sr
|
|||
const KeyRef keyServersKeyServersKey = keyServersKeyServersKeys.begin;
|
||||
|
||||
// These constants are selected to be easily recognized during debugging.
|
||||
// Note that the last bit of the follwing constants is 0, indicating that physical shard move is disabled.
|
||||
// Note that the last bit of the following constants is 0, indicating that physical shard move is disabled.
|
||||
const UID anonymousShardId = UID(0x666666, 0x88888888);
|
||||
const uint64_t emptyShardId = 0x2222222;
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ Make sure we built FDB with `-DBUILD_AZURE_BACKUP=ON`
|
|||
|
||||
# Test
|
||||
|
||||
If you run _BackupToBlob_ and _RestoreFromBlob_ workloads with the paramter _backupURL_ starts with `azure://`,
|
||||
If you run _BackupToBlob_ and _RestoreFromBlob_ workloads with the parameter _backupURL_ starts with `azure://`,
|
||||
the workload will backup to and restore from the azure blob storage.
|
||||
For example, _BackupAzureBlobCorrectness.toml_
|
||||
|
||||
|
@ -18,7 +18,7 @@ The code now supports the following style urls:
|
|||
## Local test environment
|
||||
|
||||
We need to use the _Azurite_ to simulate an Azure blob service locally.
|
||||
Please follow the [turtorial](https://docs.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub) to start your service locally.
|
||||
Please follow the [tutorial](https://docs.microsoft.com/en-us/azure/storage/common/storage-use-azurite?tabs=docker-hub) to start your service locally.
|
||||
|
||||
For example,
|
||||
```
|
||||
|
@ -28,6 +28,6 @@ docker run -p 10000:10000 -v `pwd`:<path> -w <path> mcr.microsoft.com/azure-stor
|
|||
### Notice
|
||||
|
||||
- To use uses _https_, we need to provide the certificates via `--cert` and `--key`
|
||||
The detailed [turtorial](https://github.com/Azure/Azurite/blob/main/README.md#https-setup) to setup HTTPS. (We tested with the `mkcert` method)
|
||||
The detailed [tutorial](https://github.com/Azure/Azurite/blob/main/README.md#https-setup) to setup HTTPS. (We tested with the `mkcert` method)
|
||||
- To use Azure SDKs, we need to pass `--oauth basic` option
|
||||
- Please take a look at the [difference](https://github.com/Azure/Azurite/blob/main/README.md#differences-between-azurite-and-azure-storage) between Azurite and Azure Storage
|
||||
|
|
|
@ -148,7 +148,7 @@ class SampleCollection_t {
|
|||
|
||||
public:
|
||||
/**
|
||||
* Define how many samples the collection shoul keep. The window size is defined by time dimension.
|
||||
* Define how many samples the collection should keep. The window size is defined by time dimension.
|
||||
*
|
||||
* \param duration How long a sample should be kept in the collection.
|
||||
*/
|
||||
|
|
|
@ -158,7 +158,7 @@ public:
|
|||
|
||||
/** RESTORE **/
|
||||
|
||||
enum ERestoreState { UNITIALIZED = 0, QUEUED = 1, STARTING = 2, RUNNING = 3, COMPLETED = 4, ABORTED = 5 };
|
||||
enum ERestoreState { UNINITIALIZED = 0, QUEUED = 1, STARTING = 2, RUNNING = 3, COMPLETED = 4, ABORTED = 5 };
|
||||
static StringRef restoreStateText(ERestoreState id);
|
||||
static Key getPauseKey();
|
||||
|
||||
|
@ -696,7 +696,7 @@ public:
|
|||
return map(tag().get(tr), [u, p, task](Optional<std::string> const& tag) -> Void {
|
||||
if (!tag.present())
|
||||
throw restore_error();
|
||||
// Validation contition is that the uidPair key must be exactly {u, false}
|
||||
// Validation condition is that the uidPair key must be exactly {u, false}
|
||||
TaskBucket::setValidationCondition(
|
||||
task, KeyBackedTag(tag.get(), p).key, TupleCodec<UidAndAbortedFlagT>::pack({ u, false }));
|
||||
return Void();
|
||||
|
@ -898,7 +898,7 @@ public:
|
|||
// Latest version for which all prior versions have saved by backup workers.
|
||||
KeyBackedProperty<Version> latestBackupWorkerSavedVersion() { return configSpace.pack(__FUNCTION__sr); }
|
||||
|
||||
// Stop differntial logging if already started or don't start after completing KV ranges
|
||||
// Stop differential logging if already started or don't start after completing KV ranges
|
||||
KeyBackedProperty<bool> stopWhenDone() { return configSpace.pack(__FUNCTION__sr); }
|
||||
|
||||
// Enable snapshot backup file encryption
|
||||
|
|
|
@ -181,7 +181,7 @@ private:
|
|||
Future<Void> clear();
|
||||
};
|
||||
|
||||
// To avoid the need to scan the underyling filesystem in many cases, some important version boundaries are stored
|
||||
// To avoid the need to scan the underlying filesystem in many cases, some important version boundaries are stored
|
||||
// in named files. These versions also indicate what version ranges are known to be deleted or partially deleted.
|
||||
//
|
||||
// The values below describe version ranges as follows:
|
||||
|
|
|
@ -512,7 +512,7 @@ struct BlobCipherEncryptHeaderRef {
|
|||
|
||||
// API supports following validation:
|
||||
// 1. Ensure input BlobCipherDetails (textDetails and/or headerDetails) matches with the input
|
||||
// 2. Ensure persited KCV matches with the input values
|
||||
// 2. Ensure persisted KCV matches with the input values
|
||||
// 3. Ensure input IV buffer matches with the persisted ones.
|
||||
//
|
||||
// Currently API is used by BlobGranule encryption where encryption key lookup based on persisted
|
||||
|
@ -702,7 +702,7 @@ private:
|
|||
};
|
||||
|
||||
// This interface allows FDB processes participating in encryption to store and
|
||||
// index recently used encyption cipher keys. FDB encryption has two dimensions:
|
||||
// index recently used encryption cipher keys. FDB encryption has two dimensions:
|
||||
// 1. Mapping on cipher encryption keys per "encryption domains"
|
||||
// 2. Per encryption domain, the cipher keys are index using {baseCipherKeyId, salt} tuple.
|
||||
//
|
||||
|
@ -710,7 +710,7 @@ private:
|
|||
// key. For details refer to:
|
||||
// https://csrc.nist.gov/publications/detail/sp/800-57-part-1/rev-3/archive/2012-07-10
|
||||
//
|
||||
// Below gives a pictoral representation of in-memory datastructure implemented
|
||||
// Below gives a pictorial representation of in-memory datastructure implemented
|
||||
// to index encryption keys:
|
||||
// { encryptionDomain -> { {baseCipherId, salt} -> cipherKey } }
|
||||
//
|
||||
|
@ -767,10 +767,10 @@ public:
|
|||
// API enables inserting base encryption cipher details to the BlobCipherKeyIdCache
|
||||
// Given cipherKeys are immutable, attempting to re-insert same 'identical' cipherKey
|
||||
// is treated as a NOP (success), however, an attempt to update cipherKey would throw
|
||||
// 'encrypt_update_cipher' exception. Returns the inserted cipher key if sucess.
|
||||
// 'encrypt_update_cipher' exception. Returns the inserted cipher key if success.
|
||||
//
|
||||
// API NOTE: Recommended usecase is to update encryption cipher-key regeneration while performing
|
||||
// decryption. The encryptionheader would contain relevant details including: 'encryptDomainId',
|
||||
// decryption. The encryption header would contain relevant details including: 'encryptDomainId',
|
||||
// 'baseCipherId' & 'salt'. The caller needs to fetch 'baseCipherKey' detail and re-populate KeyCache.
|
||||
// Also, the invocation will NOT update the latest cipher-key details.
|
||||
|
||||
|
@ -788,7 +788,7 @@ public:
|
|||
// API returns list of all 'cached' cipherKeys
|
||||
std::vector<Reference<BlobCipherKey>> getAllCipherKeys();
|
||||
|
||||
// Return number of cipher keys in the cahce.
|
||||
// Return number of cipher keys in the cache.
|
||||
size_t getSize() const { return keyIdCache.size(); }
|
||||
|
||||
private:
|
||||
|
@ -803,7 +803,7 @@ using BlobCipherDomainCacheMap = std::unordered_map<EncryptCipherDomainId, Refer
|
|||
|
||||
class BlobCipherKeyCache : NonCopyable, public ReferenceCounted<BlobCipherKeyCache> {
|
||||
public:
|
||||
// Public visibility constructior ONLY to assist FlowSingleton instance creation.
|
||||
// Public visibility constructor ONLY to assist FlowSingleton instance creation.
|
||||
// API Note: Constructor is expected to be instantiated only in simulation mode.
|
||||
|
||||
explicit BlobCipherKeyCache(bool ignored) { ASSERT(g_network->isSimulated()); }
|
||||
|
@ -1010,7 +1010,7 @@ private:
|
|||
Optional<Reference<BlobCipherKey>> headerCipherKeyOpt;
|
||||
bool authTokensValidationDone;
|
||||
|
||||
// API is resposible to validate persisted EncryptionHeader sanity, it does following checks
|
||||
// API is responsible to validate persisted EncryptionHeader sanity, it does following checks
|
||||
// 1. Parse and validate EncryptionHeaderFlags (version compliant checks)
|
||||
// 2. Parse and validate KCVs
|
||||
// 3. Parse and validate auth-tokens if applicable.
|
||||
|
@ -1076,7 +1076,7 @@ public:
|
|||
|
||||
EncryptCipherKeyCheckValue computeKCV(const uint8_t* cipher, const int len);
|
||||
|
||||
static void checkEqual(const Reference<BlobCipherKey>& cipher, const EncryptCipherKeyCheckValue persited);
|
||||
static void checkEqual(const Reference<BlobCipherKey>& cipher, const EncryptCipherKeyCheckValue persisted);
|
||||
|
||||
private:
|
||||
EVP_MD_CTX* ctx;
|
||||
|
|
|
@ -52,7 +52,7 @@ struct BlobWorkerStats {
|
|||
int64_t numRangesAssigned;
|
||||
int64_t mutationBytesBuffered;
|
||||
int activeReadRequests;
|
||||
// TODO: add gauge for granules blocking on old snapshots, once this guage is fixed
|
||||
// TODO: add gauge for granules blocking on old snapshots, once this gauge is fixed
|
||||
int granulesPendingSplitCheck;
|
||||
Version minimumCFVersion;
|
||||
Version cfVersionLag;
|
||||
|
|
|
@ -354,7 +354,7 @@ struct CommitTransactionRef {
|
|||
VectorRef<KeyRangeRef> read_conflict_ranges;
|
||||
VectorRef<KeyRangeRef> write_conflict_ranges;
|
||||
VectorRef<MutationRef> mutations; // metadata mutations
|
||||
// encryptedMutations should be a 1-1 corespondence with mutations field above. That is either
|
||||
// encryptedMutations should be a 1-1 correspondence with mutations field above. That is either
|
||||
// encryptedMutations.size() == 0 or encryptedMutations.size() == mutations.size() and encryptedMutations[i] =
|
||||
// mutations[i].encrypt(). Currently this field is not serialized so clients should NOT set this field during a
|
||||
// usual commit path. It is currently only used during backup mutation log restores.
|
||||
|
|
|
@ -94,7 +94,7 @@ struct ConsistencyScanState : public KeyBackedClass {
|
|||
|
||||
bool enabled = false;
|
||||
|
||||
// The values below are NOT being intialized from knobs because once the scan is enabled
|
||||
// The values below are NOT being initialized from knobs because once the scan is enabled
|
||||
// changing the knobs does nothing. The consistency check knobs are for the consistency
|
||||
// check workload, which is different from the Consistency Scan feature
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
|
||||
Conversions between UTF32, UTF-16, and UTF-8. Header file.
|
||||
|
||||
Several funtions are included here, forming a complete set of
|
||||
Several functions are included here, forming a complete set of
|
||||
conversions between the three formats. UTF-7 is not included
|
||||
here, but is handled in a separate source file.
|
||||
|
||||
|
@ -104,7 +104,7 @@ typedef unsigned char Boolean; /* 0 or 1 */
|
|||
typedef enum {
|
||||
conversionOK, /* conversion successful */
|
||||
sourceExhausted, /* partial character in source, but hit end */
|
||||
targetExhausted, /* insuff. room in target for conversion */
|
||||
targetExhausted, /* insufficient room in target for conversion */
|
||||
sourceIllegal /* source sequence is illegal/malformed */
|
||||
} ConversionResult;
|
||||
|
||||
|
|
|
@ -134,10 +134,10 @@ public:
|
|||
// the connection string stored in memory.
|
||||
virtual Future<ClusterConnectionString> getStoredConnectionString() = 0;
|
||||
|
||||
// Checks whether the connection string in persisten storage matches the connection string stored in memory.
|
||||
// Checks whether the connection string in persistent storage matches the connection string stored in memory.
|
||||
Future<bool> upToDate();
|
||||
|
||||
// Checks whether the connection string in persisten storage matches the connection string stored in memory. The
|
||||
// Checks whether the connection string in persistent storage matches the connection string stored in memory. The
|
||||
// cluster string stored in persistent storage is returned via the reference parameter connectionString.
|
||||
virtual Future<bool> upToDate(ClusterConnectionString& connectionString) = 0;
|
||||
|
||||
|
@ -172,7 +172,7 @@ protected:
|
|||
ClusterConnectionString cs;
|
||||
|
||||
private:
|
||||
// A flag that indicates whether this connection record needs to be persisted when it succesfully establishes a
|
||||
// A flag that indicates whether this connection record needs to be persisted when it successfully establishes a
|
||||
// connection.
|
||||
bool connectionStringNeedsPersisted;
|
||||
};
|
||||
|
@ -189,7 +189,7 @@ struct LeaderInfo {
|
|||
static const uint64_t changeIDMask = ~(uint64_t(0b1111111) << 57);
|
||||
Value serializedInfo;
|
||||
// If true, serializedInfo is a connection string instead!
|
||||
// If true, it also means the receipient need to update their local cluster file
|
||||
// If true, it also means the recipient need to update their local cluster file
|
||||
// with the latest list of coordinators
|
||||
bool forward;
|
||||
|
||||
|
|
|
@ -147,7 +147,7 @@ struct DatabaseConfiguration {
|
|||
return result;
|
||||
}
|
||||
|
||||
// Counts the number of DCs required including remote and satellites for current database configuraiton.
|
||||
// Counts the number of DCs required including remote and satellites for current database configuration.
|
||||
int32_t minDatacentersRequired() const {
|
||||
int minRequired = 0;
|
||||
for (auto& r : regions) {
|
||||
|
@ -165,9 +165,9 @@ struct DatabaseConfiguration {
|
|||
return minRequired;
|
||||
}
|
||||
|
||||
// Retuns the maximum number of discrete failures a cluster can tolerate.
|
||||
// Returns the maximum number of discrete failures a cluster can tolerate.
|
||||
// In HA mode, `fullyReplicatedRegions` is set to "1" initially when data is being
|
||||
// replicated to remote, and will be incremented later. `forAvailablity` is set to true
|
||||
// replicated to remote, and will be incremented later. `forAvailability` is set to true
|
||||
// if we want to account the number for machines that can recruit new tLogs/SS after failures.
|
||||
// Killing an entire datacenter counts as killing one zone in modes that support it.
|
||||
int32_t maxZoneFailuresTolerated(int fullyReplicatedRegions, bool forAvailability) const {
|
||||
|
|
|
@ -528,7 +528,7 @@ public:
|
|||
|
||||
// Disallow any reads at a read version lower than minAcceptableReadVersion. This way the client does not have to
|
||||
// trust that the read version (possibly set manually by the application) is actually from the correct cluster.
|
||||
// Updated everytime we get a GRV response
|
||||
// Updated every time we get a GRV response
|
||||
Version minAcceptableReadVersion = std::numeric_limits<Version>::max();
|
||||
void validateVersion(Version) const;
|
||||
|
||||
|
|
|
@ -168,7 +168,7 @@ struct TagsAndMessage {
|
|||
TagsAndMessage(StringRef message, VectorRef<Tag> tags) : message(message), tags(tags) {}
|
||||
|
||||
// Loads tags and message from a serialized buffer. "rd" is checkpointed at
|
||||
// its begining position to allow the caller to rewind if needed.
|
||||
// its beginning position to allow the caller to rewind if needed.
|
||||
// T can be ArenaReader or BinaryReader.
|
||||
template <class T>
|
||||
void loadFromArena(T* rd, uint32_t* messageVersionSub) {
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
|
||||
/* This file defines "management" interfaces that have been templated to support both IClientAPI
|
||||
and Native version of databases, transactions, etc., and includes functions for performing cluster
|
||||
managment tasks. It isn't exposed to C clients or anywhere outside our code base and doesn't need
|
||||
management tasks. It isn't exposed to C clients or anywhere outside our code base and doesn't need
|
||||
to be versioned. It doesn't do anything you can't do with the standard API and some knowledge of
|
||||
the contents of the system key space.
|
||||
*/
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue