Merge remote-tracking branch 'origin/master' into fix-clang-warnings

This commit is contained in:
sfc-gh-tclinkenbeard 2021-07-28 12:29:27 -07:00
commit 94a65865d9
114 changed files with 866 additions and 441 deletions

View File

@ -42,7 +42,7 @@ FDBLibTLSPolicy::FDBLibTLSPolicy(Reference<FDBLibTLSPlugin> plugin)
key_data_set(false), verify_peers_set(false) {
if ((tls_cfg = tls_config_new()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSConfigError");
TraceEvent(SevError, "FDBLibTLSConfigError").log();
throw std::runtime_error("FDBLibTLSConfigError");
}
@ -67,14 +67,14 @@ ITLSSession* FDBLibTLSPolicy::create_session(bool is_client,
// servername, since this will be ignored - the servername should be
// matched by the verify criteria instead.
if (verify_peers_set && servername != nullptr) {
TraceEvent(SevError, "FDBLibTLSVerifyPeersWithServerName");
TraceEvent(SevError, "FDBLibTLSVerifyPeersWithServerName").log();
return nullptr;
}
// If verify peers has not been set, then require a server name to
// avoid an accidental lack of name validation.
if (!verify_peers_set && servername == nullptr) {
TraceEvent(SevError, "FDBLibTLSNoServerName");
TraceEvent(SevError, "FDBLibTLSNoServerName").log();
return nullptr;
}
}
@ -123,18 +123,18 @@ struct stack_st_X509* FDBLibTLSPolicy::parse_cert_pem(const uint8_t* cert_pem, s
if (cert_pem_len > INT_MAX)
goto err;
if ((bio = BIO_new_mem_buf((void*)cert_pem, cert_pem_len)) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err;
}
if ((certs = sk_X509_new_null()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err;
}
ERR_clear_error();
while ((cert = PEM_read_bio_X509(bio, nullptr, password_cb, nullptr)) != nullptr) {
if (!sk_X509_push(certs, cert)) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err;
}
}
@ -150,7 +150,7 @@ struct stack_st_X509* FDBLibTLSPolicy::parse_cert_pem(const uint8_t* cert_pem, s
}
if (sk_X509_num(certs) < 1) {
TraceEvent(SevError, "FDBLibTLSNoCerts");
TraceEvent(SevError, "FDBLibTLSNoCerts").log();
goto err;
}
@ -168,11 +168,11 @@ err:
bool FDBLibTLSPolicy::set_ca_data(const uint8_t* ca_data, int ca_len) {
if (ca_data_set) {
TraceEvent(SevError, "FDBLibTLSCAAlreadySet");
TraceEvent(SevError, "FDBLibTLSCAAlreadySet").log();
return false;
}
if (session_created) {
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive");
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
return false;
}
@ -194,11 +194,11 @@ bool FDBLibTLSPolicy::set_ca_data(const uint8_t* ca_data, int ca_len) {
bool FDBLibTLSPolicy::set_cert_data(const uint8_t* cert_data, int cert_len) {
if (cert_data_set) {
TraceEvent(SevError, "FDBLibTLSCertAlreadySet");
TraceEvent(SevError, "FDBLibTLSCertAlreadySet").log();
return false;
}
if (session_created) {
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive");
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
return false;
}
@ -218,11 +218,11 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
bool rc = false;
if (key_data_set) {
TraceEvent(SevError, "FDBLibTLSKeyAlreadySet");
TraceEvent(SevError, "FDBLibTLSKeyAlreadySet").log();
goto err;
}
if (session_created) {
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive");
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
goto err;
}
@ -231,7 +231,7 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
long len;
if ((bio = BIO_new_mem_buf((void*)key_data, key_len)) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err;
}
ERR_clear_error();
@ -241,7 +241,7 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
if ((ERR_GET_LIB(errnum) == ERR_LIB_PEM && ERR_GET_REASON(errnum) == PEM_R_BAD_DECRYPT) ||
(ERR_GET_LIB(errnum) == ERR_LIB_EVP && ERR_GET_REASON(errnum) == EVP_R_BAD_DECRYPT)) {
TraceEvent(SevError, "FDBLibTLSIncorrectPassword");
TraceEvent(SevError, "FDBLibTLSIncorrectPassword").log();
} else {
ERR_error_string_n(errnum, errbuf, sizeof(errbuf));
TraceEvent(SevError, "FDBLibTLSPrivateKeyError").detail("LibcryptoErrorMessage", errbuf);
@ -250,15 +250,15 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
}
BIO_free(bio);
if ((bio = BIO_new(BIO_s_mem())) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err;
}
if (!PEM_write_bio_PrivateKey(bio, key, nullptr, nullptr, 0, nullptr, nullptr)) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err;
}
if ((len = BIO_get_mem_data(bio, &data)) <= 0) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory");
TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err;
}
if (tls_config_set_key_mem(tls_cfg, (const uint8_t*)data, len) == -1) {
@ -283,16 +283,16 @@ err:
bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[], int verify_peers_len[]) {
if (verify_peers_set) {
TraceEvent(SevError, "FDBLibTLSVerifyPeersAlreadySet");
TraceEvent(SevError, "FDBLibTLSVerifyPeersAlreadySet").log();
return false;
}
if (session_created) {
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive");
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
return false;
}
if (count < 1) {
TraceEvent(SevError, "FDBLibTLSNoVerifyPeers");
TraceEvent(SevError, "FDBLibTLSNoVerifyPeers").log();
return false;
}

View File

@ -73,7 +73,7 @@ FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy,
if (is_client) {
if ((tls_ctx = tls_client()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSClientError", uid);
TraceEvent(SevError, "FDBLibTLSClientError", uid).log();
throw std::runtime_error("FDBLibTLSClientError");
}
if (tls_configure(tls_ctx, policy->tls_cfg) == -1) {
@ -88,7 +88,7 @@ FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy,
}
} else {
if ((tls_sctx = tls_server()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSServerError", uid);
TraceEvent(SevError, "FDBLibTLSServerError", uid).log();
throw std::runtime_error("FDBLibTLSServerError");
}
if (tls_configure(tls_sctx, policy->tls_cfg) == -1) {
@ -250,7 +250,7 @@ std::tuple<bool, std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLS
// Verify the certificate.
if ((store_ctx = X509_STORE_CTX_new()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid);
TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid).log();
reason = "Out of memory";
goto err;
}
@ -333,7 +333,7 @@ bool FDBLibTLSSession::verify_peer() {
return true;
if ((cert_pem = tls_peer_cert_chain_pem(tls_ctx, &cert_pem_len)) == nullptr) {
TraceEvent(SevError, "FDBLibTLSNoCertError", uid);
TraceEvent(SevError, "FDBLibTLSNoCertError", uid).log();
goto err;
}
if ((certs = policy->parse_cert_pem(cert_pem, cert_pem_len)) == nullptr)
@ -388,14 +388,14 @@ int FDBLibTLSSession::handshake() {
int FDBLibTLSSession::read(uint8_t* data, int length) {
if (!handshake_completed) {
TraceEvent(SevError, "FDBLibTLSReadHandshakeError");
TraceEvent(SevError, "FDBLibTLSReadHandshakeError").log();
return FAILED;
}
ssize_t n = tls_read(tls_ctx, data, length);
if (n > 0) {
if (n > INT_MAX) {
TraceEvent(SevError, "FDBLibTLSReadOverflow");
TraceEvent(SevError, "FDBLibTLSReadOverflow").log();
return FAILED;
}
return (int)n;
@ -415,14 +415,14 @@ int FDBLibTLSSession::read(uint8_t* data, int length) {
int FDBLibTLSSession::write(const uint8_t* data, int length) {
if (!handshake_completed) {
TraceEvent(SevError, "FDBLibTLSWriteHandshakeError", uid);
TraceEvent(SevError, "FDBLibTLSWriteHandshakeError", uid).log();
return FAILED;
}
ssize_t n = tls_write(tls_ctx, data, length);
if (n > 0) {
if (n > INT_MAX) {
TraceEvent(SevError, "FDBLibTLSWriteOverflow", uid);
TraceEvent(SevError, "FDBLibTLSWriteOverflow", uid).log();
return FAILED;
}
return (int)n;

View File

@ -2177,6 +2177,81 @@ TEST_CASE("monitor_network_busyness") {
CHECK(containsGreaterZero);
}
// Commit a transaction and confirm it has not been reset
TEST_CASE("commit_does_not_reset") {
fdb::Transaction tr(db);
fdb::Transaction tr2(db);
// Commit two transactions, one that will fail with conflict and the other
// that will succeed. Ensure both transactions are not reset at the end.
while (1) {
fdb::Int64Future tr1GrvFuture = tr.get_read_version();
fdb_error_t err = wait_future(tr1GrvFuture);
if (err) {
fdb::EmptyFuture tr1OnErrorFuture = tr.on_error(err);
fdb_check(wait_future(tr1OnErrorFuture));
continue;
}
int64_t tr1StartVersion;
CHECK(!tr1GrvFuture.get(&tr1StartVersion));
fdb::Int64Future tr2GrvFuture = tr2.get_read_version();
err = wait_future(tr2GrvFuture);
if (err) {
fdb::EmptyFuture tr2OnErrorFuture = tr2.on_error(err);
fdb_check(wait_future(tr2OnErrorFuture));
continue;
}
int64_t tr2StartVersion;
CHECK(!tr2GrvFuture.get(&tr2StartVersion));
tr.set(key("foo"), "bar");
fdb::EmptyFuture tr1CommitFuture = tr.commit();
err = wait_future(tr1CommitFuture);
if (err) {
fdb::EmptyFuture tr1OnErrorFuture = tr.on_error(err);
fdb_check(wait_future(tr1OnErrorFuture));
continue;
}
fdb_check(tr2.add_conflict_range(key("foo"), strinc(key("foo")), FDB_CONFLICT_RANGE_TYPE_READ));
tr2.set(key("foo"), "bar");
fdb::EmptyFuture tr2CommitFuture = tr2.commit();
err = wait_future(tr2CommitFuture);
CHECK(err == 1020); // not_committed
fdb::Int64Future tr1GrvFuture2 = tr.get_read_version();
err = wait_future(tr1GrvFuture2);
if (err) {
fdb::EmptyFuture tr1OnErrorFuture = tr.on_error(err);
fdb_check(wait_future(tr1OnErrorFuture));
continue;
}
int64_t tr1EndVersion;
CHECK(!tr1GrvFuture2.get(&tr1EndVersion));
fdb::Int64Future tr2GrvFuture2 = tr2.get_read_version();
err = wait_future(tr2GrvFuture2);
if (err) {
fdb::EmptyFuture tr2OnErrorFuture = tr2.on_error(err);
fdb_check(wait_future(tr2OnErrorFuture));
continue;
}
int64_t tr2EndVersion;
CHECK(!tr2GrvFuture2.get(&tr2EndVersion));
// If we reset the transaction, then the read version will change
CHECK(tr1StartVersion == tr1EndVersion);
CHECK(tr2StartVersion == tr2EndVersion);
break;
}
}
int main(int argc, char** argv) {
if (argc < 3) {
std::cout << "Unit tests for the FoundationDB C API.\n"

View File

@ -0,0 +1,188 @@
/*
* RepeatableReadMultiThreadClientTest
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import com.apple.foundationdb.tuple.Tuple;
import org.junit.jupiter.api.Assertions;
/**
* This test verify transcations have repeatable read.
* 1 First set initialValue to key.
* 2 Have transactions to read the key and verify the initialValue in a loop, if it does not
* see the initialValue as the value, it set the flag to false.
*
* 3 Then have new transactions set the value and then read to verify the new value is set,
* if it does not read the new value, set the flag to false.
*
* 4 Verify that old transactions have not finished when new transactions have finished,
* then verify old transactions does not have false flag -- it means that old transactions
* are still seeting the initialValue even after new transactions set them to a new value.
*/
public class RepeatableReadMultiThreadClientTest {
public static final MultiClientHelper clientHelper = new MultiClientHelper();
private static final int oldValueReadCount = 30;
private static final int threadPerDB = 5;
private static final String key = "foo";
private static final String initialValue = "bar";
private static final String newValue = "cool";
private static final Map<Thread, OldValueReader> threadToOldValueReaders = new HashMap<>();
public static void main(String[] args) throws Exception {
FDB fdb = FDB.selectAPIVersion(710);
setupThreads(fdb);
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
System.out.println("Starting tests");
setup(dbs);
System.out.println("Start processing and validating");
readOldValue(dbs);
setNewValueAndRead(dbs);
System.out.println("Test finished");
}
private static synchronized void setupThreads(FDB fdb) {
int clientThreadsPerVersion = clientHelper.readClusterFromEnv().length;
fdb.options().setClientThreadsPerVersion(clientThreadsPerVersion);
System.out.printf("thread per version is %d\n", clientThreadsPerVersion);
fdb.options().setExternalClientDirectory("/var/dynamic-conf/lib");
fdb.options().setTraceEnable("/tmp");
fdb.options().setKnob("min_trace_severity=5");
}
private static void setup(Collection<Database> dbs) {
// 0 -> 1 -> 2 -> 3 -> 0
for (Database db : dbs) {
db.run(tr -> {
tr.set(Tuple.from(key).pack(), Tuple.from(initialValue).pack());
return null;
});
}
}
private static void readOldValue(Collection<Database> dbs) throws InterruptedException {
for (Database db : dbs) {
for (int i = 0; i < threadPerDB; i++) {
final OldValueReader oldValueReader = new OldValueReader(db);
final Thread thread = new Thread(OldValueReader.create(db));
thread.start();
threadToOldValueReaders.put(thread, oldValueReader);
}
}
}
private static void setNewValueAndRead(Collection<Database> dbs) throws InterruptedException {
// threads running NewValueReader need to wait for threads to start first who run OldValueReader
Thread.sleep(1000);
final Map<Thread, NewValueReader> threads = new HashMap<>();
for (Database db : dbs) {
for (int i = 0; i < threadPerDB; i++) {
final NewValueReader newValueReader = new NewValueReader(db);
final Thread thread = new Thread(NewValueReader.create(db));
thread.start();
threads.put(thread, newValueReader);
}
}
for (Map.Entry<Thread, NewValueReader> entry : threads.entrySet()) {
entry.getKey().join();
Assertions.assertTrue(entry.getValue().succeed, "new value reader failed to read the correct value");
}
for (Map.Entry<Thread, OldValueReader> entry : threadToOldValueReaders.entrySet()) {
Assertions.assertTrue(entry.getKey().isAlive(), "Old value reader finished too soon, cannot verify repeatable read, succeed is " + entry.getValue().succeed);
}
for (Map.Entry<Thread, OldValueReader> entry : threadToOldValueReaders.entrySet()) {
entry.getKey().join();
Assertions.assertTrue(entry.getValue().succeed, "old value reader failed to read the correct value");
}
}
public static class OldValueReader implements Runnable {
private final Database db;
private boolean succeed;
private OldValueReader(Database db) {
this.db = db;
this.succeed = true;
}
public static OldValueReader create(Database db) {
return new OldValueReader(db);
}
@Override
public void run() {
db.run(tr -> {
try {
for (int i = 0; i < oldValueReadCount; i++) {
byte[] result = tr.get(Tuple.from(key).pack()).join();
String value = Tuple.fromBytes(result).getString(0);
if (!initialValue.equals(value)) {
succeed = false;
break;
}
Thread.sleep(100);
}
}
catch (Exception e) {
succeed = false;
}
return null;
});
}
}
public static class NewValueReader implements Runnable {
private final Database db;
private boolean succeed;
public NewValueReader(Database db) {
this.db = db;
this.succeed = true;
}
public static NewValueReader create(Database db) {
return new NewValueReader(db);
}
@Override
public void run() {
db.run(tr -> {
tr.set(Tuple.from(key).pack(), Tuple.from(newValue).pack());
return null;
});
String value = db.run(tr -> {
byte[] result = tr.get(Tuple.from(key).pack()).join();
return Tuple.fromBytes(result).getString(0);
});
if (!newValue.equals(value)) {
succeed = false;
}
}
}
}

View File

@ -51,6 +51,7 @@ set(JAVA_INTEGRATION_TESTS
src/integration/com/apple/foundationdb/BasicMultiClientIntegrationTest.java
src/integration/com/apple/foundationdb/CycleMultiClientIntegrationTest.java
src/integration/com/apple/foundationdb/SidebandMultiThreadClientTest.java
src/integration/com/apple/foundationdb/RepeatableReadMultiThreadClientTest.java
)
# Resources that are used in integration testing, but are not explicitly test files (JUnit rules,

View File

@ -39,6 +39,9 @@ function(configure_testing)
endfunction()
function(verify_testing)
if(NOT ENABLE_SIMULATION_TESTS)
return()
endif()
foreach(test_file IN LISTS fdb_test_files)
message(SEND_ERROR "${test_file} found but it is not associated with a test")
endforeach()
@ -119,6 +122,7 @@ function(add_fdb_test)
set(VALGRIND_OPTION "--use-valgrind")
endif()
list(TRANSFORM ADD_FDB_TEST_TEST_FILES PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/")
if (ENABLE_SIMULATION_TESTS)
add_test(NAME ${test_name}
COMMAND $<TARGET_FILE:Python::Interpreter> ${TestRunner}
-n ${test_name}
@ -142,6 +146,7 @@ function(add_fdb_test)
get_filename_component(test_dir ${test_dir_full} NAME)
set_tests_properties(${test_name} PROPERTIES TIMEOUT ${this_test_timeout} LABELS "${test_dir}")
endif()
endif()
# set variables used for generating test packages
set(TEST_NAMES ${TEST_NAMES} ${test_name} PARENT_SCOPE)
set(TEST_FILES_${test_name} ${ADD_FDB_TEST_TEST_FILES} PARENT_SCOPE)

View File

@ -638,6 +638,15 @@ namespace SummarizeTest
{
if(!String.IsNullOrEmpty(errLine.Data))
{
if (errLine.Data.EndsWith("WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!")) {
// When running ASAN we expect to see this message. Boost coroutine should be using the correct asan annotations so that it shouldn't produce any false positives.
return;
}
if (errLine.Data.EndsWith("Warning: unimplemented fcntl command: 1036")) {
// Valgrind produces this warning when F_SET_RW_HINT is used
return;
}
hasError = true;
if(Errors.Count < maxErrors) {
if(errLine.Data.Length > maxErrorLength) {
@ -962,10 +971,6 @@ namespace SummarizeTest
int stderrBytes = 0;
foreach (string err in outputErrors)
{
if (err.EndsWith("WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!")) {
// When running ASAN we expect to see this message. Boost coroutine should be using the correct asan annotations so that it shouldn't produce any false positives.
continue;
}
if (stderrSeverity == (int)Magnesium.Severity.SevError)
{
error = true;

View File

@ -25,6 +25,8 @@ API version 700
General
-------
* Committing a transaction will no longer partially reset it. In particular, getting the read version from a transaction that has committed or failed to commit with an error will return the original read version.
Python bindings
---------------

View File

@ -91,6 +91,7 @@ Other Changes
* The ``foundationdb`` service installed by the RPM packages will now automatically restart ``fdbmonitor`` after 60 seconds when it fails. `(PR #3841) <https://github.com/apple/foundationdb/pull/3841>`_
* Capture output of forked snapshot processes in trace events. `(PR #4254) <https://github.com/apple/foundationdb/pull/4254/files>`_
* Add ErrorKind field to Severity 40 trace events. `(PR #4741) <https://github.com/apple/foundationdb/pull/4741/files>`_
* Committing a transaction will no longer partially reset it as of API version 700. `(PR #5271) <https://github.com/apple/foundationdb/pull/5271/files>`_
Earlier release notes
---------------------

View File

@ -571,7 +571,7 @@ int main(int argc, char** argv) {
}
if (!param.tlsConfig.setupTLS()) {
TraceEvent(SevError, "TLSError");
TraceEvent(SevError, "TLSError").log();
throw tls_error();
}

View File

@ -3960,6 +3960,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
is_error = true;
continue;
}
wait(makeInterruptable(GlobalConfig::globalConfig().onInitialized()));
if (tokencmp(tokens[2], "get")) {
if (tokens.size() != 3) {
fprintf(stderr, "ERROR: Addtional arguments to `get` are not supported.\n");

View File

@ -227,7 +227,7 @@ Future<Reference<IAsyncFile>> BackupContainerLocalDirectory::readFile(const std:
}
if (g_simulator.getCurrentProcess()->uid == UID()) {
TraceEvent(SevError, "BackupContainerReadFileOnUnsetProcessID");
TraceEvent(SevError, "BackupContainerReadFileOnUnsetProcessID").log();
}
std::string uniquePath = fullPath + "." + g_simulator.getCurrentProcess()->uid.toString() + ".lnk";
unlink(uniquePath.c_str());

View File

@ -364,7 +364,7 @@ struct BackupRangeTaskFunc : TaskFuncBase {
TEST(true); // range insert delayed because too versionMap is too large
if (rangeCount > CLIENT_KNOBS->BACKUP_MAP_KEY_UPPER_LIMIT)
TraceEvent(SevWarnAlways, "DBA_KeyRangeMapTooLarge");
TraceEvent(SevWarnAlways, "DBA_KeyRangeMapTooLarge").log();
wait(delay(1));
task->params[BackupRangeTaskFunc::keyBackupRangeBeginKey] = rangeBegin;
@ -1882,7 +1882,7 @@ struct CopyDiffLogsUpgradeTaskFunc : TaskFuncBase {
state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
if (task->params[BackupAgentBase::destUid].size() == 0) {
TraceEvent("DBA_CopyDiffLogsUpgradeTaskFuncAbortInUpgrade");
TraceEvent("DBA_CopyDiffLogsUpgradeTaskFuncAbortInUpgrade").log();
wait(success(AbortOldBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::signal(onDone))));
} else {
Version beginVersion =
@ -2377,11 +2377,11 @@ void checkAtomicSwitchOverConfig(StatusObjectReader srcStatus, StatusObjectReade
try {
// Check if src is unlocked and dest is locked
if (getLockedStatus(srcStatus) != false) {
TraceEvent(SevWarn, "DBA_AtomicSwitchOverSrcLocked");
TraceEvent(SevWarn, "DBA_AtomicSwitchOverSrcLocked").log();
throw backup_error();
}
if (getLockedStatus(destStatus) != true) {
TraceEvent(SevWarn, "DBA_AtomicSwitchOverDestUnlocked");
TraceEvent(SevWarn, "DBA_AtomicSwitchOverDestUnlocked").log();
throw backup_error();
}
// Check if mutation-stream-id matches
@ -2402,7 +2402,7 @@ void checkAtomicSwitchOverConfig(StatusObjectReader srcStatus, StatusObjectReade
destDRAgents.end(),
std::inserter(intersectingAgents, intersectingAgents.begin()));
if (intersectingAgents.empty()) {
TraceEvent(SevWarn, "DBA_SwitchOverPossibleDRAgentsIncorrectSetup");
TraceEvent(SevWarn, "DBA_SwitchOverPossibleDRAgentsIncorrectSetup").log();
throw backup_error();
}
} catch (std::runtime_error& e) {
@ -2757,7 +2757,7 @@ public:
}
}
TraceEvent("DBA_SwitchoverReady");
TraceEvent("DBA_SwitchoverReady").log();
try {
wait(backupAgent->discontinueBackup(dest, tagName));
@ -2768,7 +2768,7 @@ public:
wait(success(backupAgent->waitBackup(dest, tagName, StopWhenDone::True)));
TraceEvent("DBA_SwitchoverStopped");
TraceEvent("DBA_SwitchoverStopped").log();
state ReadYourWritesTransaction tr3(dest);
loop {
@ -2789,7 +2789,7 @@ public:
}
}
TraceEvent("DBA_SwitchoverVersionUpgraded");
TraceEvent("DBA_SwitchoverVersionUpgraded").log();
try {
wait(drAgent.submitBackup(backupAgent->taskBucket->src,
@ -2805,15 +2805,15 @@ public:
throw;
}
TraceEvent("DBA_SwitchoverSubmitted");
TraceEvent("DBA_SwitchoverSubmitted").log();
wait(success(drAgent.waitSubmitted(backupAgent->taskBucket->src, tagName)));
TraceEvent("DBA_SwitchoverStarted");
TraceEvent("DBA_SwitchoverStarted").log();
wait(backupAgent->unlockBackup(dest, tagName));
TraceEvent("DBA_SwitchoverUnlocked");
TraceEvent("DBA_SwitchoverUnlocked").log();
return Void();
}

View File

@ -5478,7 +5478,7 @@ public:
try {
wait(discontinueBackup(backupAgent, ryw_tr, tagName));
wait(ryw_tr->commit());
TraceEvent("AS_DiscontinuedBackup");
TraceEvent("AS_DiscontinuedBackup").log();
break;
} catch (Error& e) {
if (e.code() == error_code_backup_unneeded || e.code() == error_code_backup_duplicate) {
@ -5489,7 +5489,7 @@ public:
}
wait(success(waitBackup(backupAgent, cx, tagName.toString(), StopWhenDone::True)));
TraceEvent("AS_BackupStopped");
TraceEvent("AS_BackupStopped").log();
ryw_tr->reset();
loop {
@ -5502,7 +5502,7 @@ public:
ryw_tr->clear(range);
}
wait(ryw_tr->commit());
TraceEvent("AS_ClearedRange");
TraceEvent("AS_ClearedRange").log();
break;
} catch (Error& e) {
wait(ryw_tr->onError(e));
@ -5512,7 +5512,7 @@ public:
Reference<IBackupContainer> bc = wait(backupConfig.backupContainer().getOrThrow(cx));
if (fastRestore) {
TraceEvent("AtomicParallelRestoreStartRestore");
TraceEvent("AtomicParallelRestoreStartRestore").log();
Version targetVersion = ::invalidVersion;
wait(submitParallelRestore(cx,
tagName,
@ -5533,7 +5533,7 @@ public:
}
return -1;
} else {
TraceEvent("AS_StartRestore");
TraceEvent("AS_StartRestore").log();
Version ver = wait(restore(backupAgent,
cx,
cx,

View File

@ -77,6 +77,7 @@ void GlobalConfig::trigger(KeyRef key, std::function<void(std::optional<std::any
}
void GlobalConfig::insert(KeyRef key, ValueRef value) {
TraceEvent(SevInfo, "GlobalConfig_Insert").detail("Key", key).detail("Value", value);
data.erase(key);
Arena arena(key.expectedSize() + value.expectedSize());
@ -112,6 +113,7 @@ void GlobalConfig::erase(Key key) {
}
void GlobalConfig::erase(KeyRangeRef range) {
TraceEvent(SevInfo, "GlobalConfig_Erase").detail("Range", range);
auto it = data.begin();
while (it != data.end()) {
if (range.contains(it->first)) {
@ -174,6 +176,7 @@ ACTOR Future<Void> GlobalConfig::migrate(GlobalConfig* self) {
// Updates local copy of global configuration by reading the entire key-range
// from storage.
ACTOR Future<Void> GlobalConfig::refresh(GlobalConfig* self) {
TraceEvent trace(SevInfo, "GlobalConfig_Refresh");
self->erase(KeyRangeRef(""_sr, "\xff"_sr));
Transaction tr(self->cx);

View File

@ -108,6 +108,7 @@ public:
// the key.
template <typename T, typename std::enable_if<std::is_arithmetic<T>{}, bool>::type = true>
const T get(KeyRef name, T defaultVal) {
TraceEvent(SevInfo, "GlobalConfig_Get").detail("Key", name);
try {
auto configValue = get(name);
if (configValue.isValid()) {

View File

@ -26,6 +26,28 @@
#include "flow/Platform.h"
#include "flow/actorcompiler.h" // has to be last include
namespace {
std::string trim(std::string const& connectionString) {
// Strip out whitespace
// Strip out characters between a # and a newline
std::string trimmed;
auto end = connectionString.end();
for (auto c = connectionString.begin(); c != end; ++c) {
if (*c == '#') {
++c;
while (c != end && *c != '\n' && *c != '\r')
++c;
if (c == end)
break;
} else if (*c != ' ' && *c != '\n' && *c != '\r' && *c != '\t')
trimmed += *c;
}
return trimmed;
}
} // namespace
std::pair<std::string, bool> ClusterConnectionFile::lookupClusterFileName(std::string const& filename) {
if (filename.length())
return std::make_pair(filename, false);
@ -154,24 +176,6 @@ std::string ClusterConnectionString::getErrorString(std::string const& source, E
}
}
std::string trim(std::string const& connectionString) {
// Strip out whitespace
// Strip out characters between a # and a newline
std::string trimmed;
auto end = connectionString.end();
for (auto c = connectionString.begin(); c != end; ++c) {
if (*c == '#') {
++c;
while (c != end && *c != '\n' && *c != '\r')
++c;
if (c == end)
break;
} else if (*c != ' ' && *c != '\n' && *c != '\r' && *c != '\t')
trimmed += *c;
}
return trimmed;
}
ClusterConnectionString::ClusterConnectionString(std::string const& connectionString) {
auto trimmed = trim(connectionString);
@ -838,6 +842,7 @@ ACTOR Future<MonitorLeaderInfo> monitorProxiesOneGeneration(
clientInfo->set(ni);
successIdx = idx;
} else {
TEST(rep.getError().code() == error_code_failed_to_progress); // Coordinator cannot talk to cluster controller
idx = (idx + 1) % addrs.size();
if (idx == successIdx) {
wait(delay(CLIENT_KNOBS->COORDINATOR_RECONNECTION_DELAY));

View File

@ -1521,7 +1521,7 @@ std::vector<std::pair<std::string, bool>> MultiVersionApi::copyExternalLibraryPe
#else
std::vector<std::pair<std::string, bool>> MultiVersionApi::copyExternalLibraryPerThread(std::string path) {
if (threadCount > 1) {
TraceEvent(SevError, "MultipleClientThreadsUnsupportedOnWindows");
TraceEvent(SevError, "MultipleClientThreadsUnsupportedOnWindows").log();
throw unsupported_operation();
}
std::vector<std::pair<std::string, bool>> paths;

View File

@ -488,7 +488,7 @@ ACTOR static Future<Void> transactionInfoCommitActor(Transaction* tr, std::vecto
ACTOR static Future<Void> delExcessClntTxnEntriesActor(Transaction* tr, int64_t clientTxInfoSizeLimit) {
state const Key clientLatencyName = CLIENT_LATENCY_INFO_PREFIX.withPrefix(fdbClientInfoPrefixRange.begin);
state const Key clientLatencyAtomicCtr = CLIENT_LATENCY_INFO_CTR_PREFIX.withPrefix(fdbClientInfoPrefixRange.begin);
TraceEvent(SevInfo, "DelExcessClntTxnEntriesCalled");
TraceEvent(SevInfo, "DelExcessClntTxnEntriesCalled").log();
loop {
try {
tr->reset();
@ -496,7 +496,7 @@ ACTOR static Future<Void> delExcessClntTxnEntriesActor(Transaction* tr, int64_t
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
Optional<Value> ctrValue = wait(tr->get(KeyRef(clientLatencyAtomicCtr), Snapshot::True));
if (!ctrValue.present()) {
TraceEvent(SevInfo, "NumClntTxnEntriesNotFound");
TraceEvent(SevInfo, "NumClntTxnEntriesNotFound").log();
return Void();
}
state int64_t txInfoSize = 0;
@ -1627,7 +1627,7 @@ ACTOR static Future<Void> switchConnectionFileImpl(Reference<ClusterConnectionFi
loop {
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
try {
TraceEvent("SwitchConnectionFileAttemptingGRV");
TraceEvent("SwitchConnectionFileAttemptingGRV").log();
Version v = wait(tr.getReadVersion());
TraceEvent("SwitchConnectionFileGotRV")
.detail("ReadVersion", v)
@ -5199,7 +5199,7 @@ Future<Void> Transaction::commitMutations() {
if (options.debugDump) {
UID u = nondeterministicRandom()->randomUniqueID();
TraceEvent("TransactionDump", u);
TraceEvent("TransactionDump", u).log();
for (auto i = tr.transaction.mutations.begin(); i != tr.transaction.mutations.end(); ++i)
TraceEvent("TransactionMutation", u)
.detail("T", i->type)
@ -5244,7 +5244,10 @@ ACTOR Future<Void> commitAndWatch(Transaction* self) {
self->setupWatches();
}
if (!self->apiVersionAtLeast(700)) {
self->reset();
}
return Void();
} catch (Error& e) {
if (e.code() != error_code_actor_cancelled) {
@ -5253,8 +5256,11 @@ ACTOR Future<Void> commitAndWatch(Transaction* self) {
}
self->versionstampPromise.sendError(transaction_invalid_version());
if (!self->apiVersionAtLeast(700)) {
self->reset();
}
}
throw;
}
@ -6326,7 +6332,7 @@ void Transaction::setToken(uint64_t token) {
void enableClientInfoLogging() {
ASSERT(networkOptions.logClientInfo.present() == false);
networkOptions.logClientInfo = true;
TraceEvent(SevInfo, "ClientInfoLoggingEnabled");
TraceEvent(SevInfo, "ClientInfoLoggingEnabled").log();
}
ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID snapUID) {
@ -6380,7 +6386,7 @@ ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exc
}
throw;
}
TraceEvent("ExclusionSafetyCheckCoordinators");
TraceEvent("ExclusionSafetyCheckCoordinators").log();
state ClientCoordinators coordinatorList(cx->getConnectionFile());
state vector<Future<Optional<LeaderInfo>>> leaderServers;
leaderServers.reserve(coordinatorList.clientLeaderServers.size());
@ -6393,7 +6399,7 @@ ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exc
choose {
when(wait(smartQuorum(leaderServers, leaderServers.size() / 2 + 1, 1.0))) {}
when(wait(delay(3.0))) {
TraceEvent("ExclusionSafetyCheckNoCoordinatorQuorum");
TraceEvent("ExclusionSafetyCheckNoCoordinatorQuorum").log();
return false;
}
}

View File

@ -1164,7 +1164,7 @@ public:
if (!ryw->resetPromise.isSet())
ryw->resetPromise.sendError(transaction_timed_out());
wait(delay(deterministicRandom()->random01() * 5));
TraceEvent("ClientBuggifyInFlightCommit");
TraceEvent("ClientBuggifyInFlightCommit").log();
wait(ryw->tr.commit());
}

View File

@ -366,6 +366,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
init( START_TRANSACTION_MAX_EMPTY_QUEUE_BUDGET, 10.0 );
init( START_TRANSACTION_MAX_QUEUE_SIZE, 1e6 );
init( KEY_LOCATION_MAX_QUEUE_SIZE, 1e6 );
init( COMMIT_PROXY_LIVENESS_TIMEOUT, 20.0 );
init( COMMIT_TRANSACTION_BATCH_INTERVAL_FROM_IDLE, 0.0005 ); if( randomize && BUGGIFY ) COMMIT_TRANSACTION_BATCH_INTERVAL_FROM_IDLE = 0.005;
init( COMMIT_TRANSACTION_BATCH_INTERVAL_MIN, 0.001 ); if( randomize && BUGGIFY ) COMMIT_TRANSACTION_BATCH_INTERVAL_MIN = 0.1;
@ -644,6 +645,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
init( DBINFO_FAILED_DELAY, 1.0 );
init( ENABLE_WORKER_HEALTH_MONITOR, false );
init( WORKER_HEALTH_MONITOR_INTERVAL, 60.0 );
init( PEER_LATENCY_CHECK_MIN_POPULATION, 30 );
init( PEER_LATENCY_DEGRADATION_PERCENTILE, 0.90 );
init( PEER_LATENCY_DEGRADATION_THRESHOLD, 0.05 );
init( PEER_TIMEOUT_PERCENTAGE_DEGRADATION_THRESHOLD, 0.1 );
@ -653,7 +655,9 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
// Coordination
init( COORDINATED_STATE_ONCONFLICT_POLL_INTERVAL, 1.0 ); if( randomize && BUGGIFY ) COORDINATED_STATE_ONCONFLICT_POLL_INTERVAL = 10.0;
init( FORWARD_REQUEST_TOO_OLD, 4*24*60*60 ); if( randomize && BUGGIFY ) FORWARD_REQUEST_TOO_OLD = 60.0;
init( ENABLE_CROSS_CLUSTER_SUPPORT, true ); if( randomize && BUGGIFY ) ENABLE_CROSS_CLUSTER_SUPPORT = false;
init( COORDINATOR_LEADER_CONNECTION_TIMEOUT, 20.0 );
// Buggification
init( BUGGIFIED_EVENTUAL_CONSISTENCY, 1.0 );

View File

@ -297,6 +297,7 @@ public:
double START_TRANSACTION_MAX_EMPTY_QUEUE_BUDGET;
int START_TRANSACTION_MAX_QUEUE_SIZE;
int KEY_LOCATION_MAX_QUEUE_SIZE;
double COMMIT_PROXY_LIVENESS_TIMEOUT;
double COMMIT_TRANSACTION_BATCH_INTERVAL_FROM_IDLE;
double COMMIT_TRANSACTION_BATCH_INTERVAL_MIN;
@ -593,6 +594,8 @@ public:
double COORDINATED_STATE_ONCONFLICT_POLL_INTERVAL;
bool ENABLE_CROSS_CLUSTER_SUPPORT; // Allow a coordinator to serve requests whose connection string does not match
// the local descriptor
double FORWARD_REQUEST_TOO_OLD; // Do not forward requests older than this setting
double COORDINATOR_LEADER_CONNECTION_TIMEOUT;
// Buggification
double BUGGIFIED_EVENTUAL_CONSISTENCY;

View File

@ -129,7 +129,7 @@ void decodeKeyServersValue(RangeResult result,
std::sort(src.begin(), src.end());
std::sort(dest.begin(), dest.end());
if (missingIsError && (src.size() != srcTag.size() || dest.size() != destTag.size())) {
TraceEvent(SevError, "AttemptedToDecodeMissingTag");
TraceEvent(SevError, "AttemptedToDecodeMissingTag").log();
for (const KeyValueRef& kv : result) {
Tag tag = decodeServerTagValue(kv.value);
UID serverID = decodeServerTagKey(kv.key);

View File

@ -234,6 +234,8 @@ struct YieldMockNetwork final : INetwork, ReferenceCounted<YieldMockNetwork> {
Future<class Void> delay(double seconds, TaskPriority taskID) override { return nextTick.getFuture(); }
Future<class Void> orderedDelay(double seconds, TaskPriority taskID) override { return nextTick.getFuture(); }
Future<class Void> yield(TaskPriority taskID) override {
if (check_yield(taskID))
return delay(0, taskID);

View File

@ -922,9 +922,9 @@ ACTOR static void deliver(TransportData* self,
// We want to run the task at the right priority. If the priority is higher than the current priority (which is
// ReadSocket) we can just upgrade. Otherwise we'll context switch so that we don't block other tasks that might run
// with a higher priority. ReplyPromiseStream needs to guarentee that messages are recieved in the order they were
// sent, so even in the case of local delivery those messages need to skip this delay.
if (priority < TaskPriority::ReadSocket || (priority != TaskPriority::NoDeliverDelay && !inReadSocket)) {
wait(delay(0, priority));
// sent, so we are using orderedDelay.
if (priority < TaskPriority::ReadSocket || !inReadSocket) {
wait(orderedDelay(0, priority));
} else {
g_network->setCurrentTask(priority);
}
@ -1019,7 +1019,7 @@ static void scanPackets(TransportData* transport,
BUGGIFY_WITH_PROB(0.0001)) {
g_simulator.lastConnectionFailure = g_network->now();
isBuggifyEnabled = true;
TraceEvent(SevInfo, "BitsFlip");
TraceEvent(SevInfo, "BitsFlip").log();
int flipBits = 32 - (int)floor(log2(deterministicRandom()->randomUInt32()));
uint32_t firstFlipByteLocation = deterministicRandom()->randomUInt32() % packetLen;

View File

@ -361,7 +361,7 @@ struct NetNotifiedQueueWithAcknowledgements final : NotifiedQueue<T>,
FlowTransport::transport().sendUnreliable(
SerializeSource<ErrorOr<AcknowledgementReply>>(
AcknowledgementReply(acknowledgements.bytesAcknowledged)),
acknowledgements.getEndpoint(TaskPriority::NoDeliverDelay),
acknowledgements.getEndpoint(TaskPriority::ReadSocket),
false);
}
}
@ -378,7 +378,7 @@ struct NetNotifiedQueueWithAcknowledgements final : NotifiedQueue<T>,
acknowledgements.bytesAcknowledged += res.expectedSize();
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<AcknowledgementReply>>(
AcknowledgementReply(acknowledgements.bytesAcknowledged)),
acknowledgements.getEndpoint(TaskPriority::NoDeliverDelay),
acknowledgements.getEndpoint(TaskPriority::ReadSocket),
false);
}
return res;
@ -389,13 +389,13 @@ struct NetNotifiedQueueWithAcknowledgements final : NotifiedQueue<T>,
// Notify the server that a client is not using this ReplyPromiseStream anymore
FlowTransport::transport().sendUnreliable(
SerializeSource<ErrorOr<AcknowledgementReply>>(operation_obsolete()),
acknowledgements.getEndpoint(TaskPriority::NoDeliverDelay),
acknowledgements.getEndpoint(TaskPriority::ReadSocket),
false);
}
if (isRemoteEndpoint() && !sentError && !acknowledgements.failures.isReady()) {
// The ReplyPromiseStream was cancelled before sending an error, so the storage server must have died
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(broken_promise()),
getEndpoint(TaskPriority::NoDeliverDelay),
getEndpoint(TaskPriority::ReadSocket),
false);
}
}
@ -406,9 +406,6 @@ struct NetNotifiedQueueWithAcknowledgements final : NotifiedQueue<T>,
template <class T>
class ReplyPromiseStream {
public:
// The endpoints of a ReplyPromiseStream must be initialized at Task::NoDeliverDelay, because a
// delay(0) in FlowTransport deliver can cause out of order delivery.
// stream.send( request )
// Unreliable at most once delivery: Delivers request unless there is a connection failure (zero or one times)
@ -416,7 +413,7 @@ public:
void send(U&& value) const {
if (queue->isRemoteEndpoint()) {
if (!queue->acknowledgements.getRawEndpoint().isValid()) {
value.acknowledgeToken = queue->acknowledgements.getEndpoint(TaskPriority::NoDeliverDelay).token;
value.acknowledgeToken = queue->acknowledgements.getEndpoint(TaskPriority::ReadSocket).token;
}
queue->acknowledgements.bytesSent += value.expectedSize();
FlowTransport::transport().sendUnreliable(
@ -477,7 +474,7 @@ public:
errors->delPromiseRef();
}
const Endpoint& getEndpoint() const { return queue->getEndpoint(TaskPriority::NoDeliverDelay); }
const Endpoint& getEndpoint() const { return queue->getEndpoint(TaskPriority::ReadSocket); }
bool operator==(const ReplyPromiseStream<T>& rhs) const { return queue == rhs.queue; }
bool isEmpty() const { return !queue->isReady(); }

View File

@ -470,12 +470,12 @@ public:
state TaskPriority currentTaskID = g_network->getCurrentTask();
if (++openCount >= 3000) {
TraceEvent(SevError, "TooManyFiles");
TraceEvent(SevError, "TooManyFiles").log();
ASSERT(false);
}
if (openCount == 2000) {
TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyFiles");
TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyFiles").log();
g_simulator.speedUpSimulation = true;
g_simulator.connectionFailuresDisableDuration = 1e6;
}
@ -859,13 +859,17 @@ public:
ASSERT(taskID >= TaskPriority::Min && taskID <= TaskPriority::Max);
return delay(seconds, taskID, currentProcess);
}
Future<class Void> delay(double seconds, TaskPriority taskID, ProcessInfo* machine) {
Future<class Void> orderedDelay(double seconds, TaskPriority taskID) override {
ASSERT(taskID >= TaskPriority::Min && taskID <= TaskPriority::Max);
return delay(seconds, taskID, currentProcess, true);
}
Future<class Void> delay(double seconds, TaskPriority taskID, ProcessInfo* machine, bool ordered = false) {
ASSERT(seconds >= -0.0001);
seconds = std::max(0.0, seconds);
Future<Void> f;
if (!currentProcess->rebooting && machine == currentProcess && !currentProcess->shutdownSignal.isSet() &&
FLOW_KNOBS->MAX_BUGGIFIED_DELAY > 0 &&
if (!ordered && !currentProcess->rebooting && machine == currentProcess &&
!currentProcess->shutdownSignal.isSet() && FLOW_KNOBS->MAX_BUGGIFIED_DELAY > 0 &&
deterministicRandom()->random01() < 0.25) { // FIXME: why doesnt this work when we are changing machines?
seconds += FLOW_KNOBS->MAX_BUGGIFIED_DELAY * pow(deterministicRandom()->random01(), 1000.0);
}

View File

@ -404,7 +404,7 @@ void applyMetadataMutations(SpanID const& spanContext,
confChange = true;
TEST(true); // Recovering at a higher version.
} else if (m.param1 == writeRecoveryKey) {
TraceEvent("WriteRecoveryKeySet", dbgid);
TraceEvent("WriteRecoveryKeySet", dbgid).log();
if (!initialCommit)
txnStateStore->set(KeyValueRef(m.param1, m.param2));
TEST(true); // Snapshot created, setting writeRecoveryKey in txnStateStore

View File

@ -477,7 +477,7 @@ ACTOR Future<bool> monitorBackupStartedKeyChanges(BackupData* self, bool present
if (present || !watch)
return true;
} else {
TraceEvent("BackupWorkerEmptyStartKey", self->myId);
TraceEvent("BackupWorkerEmptyStartKey", self->myId).log();
self->onBackupChanges(uidVersions);
self->exitEarly = shouldExit;
@ -887,7 +887,7 @@ ACTOR Future<Void> pullAsyncData(BackupData* self) {
state Version tagAt = std::max(self->pulledVersion.get(), std::max(self->startVersion, self->savedVersion));
state Arena prev;
TraceEvent("BackupWorkerPull", self->myId);
TraceEvent("BackupWorkerPull", self->myId).log();
loop {
while (self->paused.get()) {
wait(self->paused.onChange());
@ -1017,7 +1017,7 @@ ACTOR static Future<Void> monitorWorkerPause(BackupData* self) {
Optional<Value> value = wait(tr->get(backupPausedKey));
bool paused = value.present() && value.get() == LiteralStringRef("1");
if (self->paused.get() != paused) {
TraceEvent(paused ? "BackupWorkerPaused" : "BackupWorkerResumed", self->myId);
TraceEvent(paused ? "BackupWorkerPaused" : "BackupWorkerResumed", self->myId).log();
self->paused.set(paused);
}

View File

@ -195,6 +195,8 @@ public:
}
loop {
tr.reset();
// Wait for some changes
while (!self->anyDelta.get())
wait(self->anyDelta.onChange());
@ -1962,7 +1964,7 @@ public:
}
if (bestDC != clusterControllerDcId) {
TraceEvent("BestDCIsNotClusterDC");
TraceEvent("BestDCIsNotClusterDC").log();
vector<Optional<Key>> dcPriority;
dcPriority.push_back(bestDC);
desiredDcIds.set(dcPriority);
@ -3094,7 +3096,7 @@ ACTOR Future<Void> clusterWatchDatabase(ClusterControllerData* cluster, ClusterC
// When this someday is implemented, make sure forced failures still cause the master to be recruited again
loop {
TraceEvent("CCWDB", cluster->id);
TraceEvent("CCWDB", cluster->id).log();
try {
state double recoveryStart = now();
TraceEvent("CCWDB", cluster->id).detail("Recruiting", "Master");
@ -3915,7 +3917,7 @@ ACTOR Future<Void> timeKeeperSetVersion(ClusterControllerData* self) {
ACTOR Future<Void> timeKeeper(ClusterControllerData* self) {
state KeyBackedMap<int64_t, Version> versionMap(timeKeeperPrefixRange.begin);
TraceEvent("TimeKeeperStarted");
TraceEvent("TimeKeeperStarted").log();
wait(timeKeeperSetVersion(self));
@ -3929,7 +3931,7 @@ ACTOR Future<Void> timeKeeper(ClusterControllerData* self) {
// how long it is taking to hear responses from each other component.
UID debugID = deterministicRandom()->randomUniqueID();
TraceEvent("TimeKeeperCommit", debugID);
TraceEvent("TimeKeeperCommit", debugID).log();
tr->debugTransaction(debugID);
}
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
@ -4080,7 +4082,7 @@ ACTOR Future<Void> monitorProcessClasses(ClusterControllerData* self) {
}
wait(trVer.commit());
TraceEvent("ProcessClassUpgrade");
TraceEvent("ProcessClassUpgrade").log();
break;
} catch (Error& e) {
wait(trVer.onError(e));
@ -4509,7 +4511,7 @@ ACTOR Future<Void> handleForcedRecoveries(ClusterControllerData* self, ClusterCo
}
wait(fCommit);
}
TraceEvent("ForcedRecoveryFinish", self->id);
TraceEvent("ForcedRecoveryFinish", self->id).log();
self->db.forceRecovery = false;
req.reply.send(Void());
}
@ -4518,7 +4520,7 @@ ACTOR Future<Void> handleForcedRecoveries(ClusterControllerData* self, ClusterCo
ACTOR Future<DataDistributorInterface> startDataDistributor(ClusterControllerData* self) {
wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID.
TraceEvent("CCStartDataDistributor", self->id);
TraceEvent("CCStartDataDistributor", self->id).log();
loop {
try {
state bool no_distributor = !self->db.serverInfo->get().distributor.present();
@ -4585,7 +4587,7 @@ ACTOR Future<Void> monitorDataDistributor(ClusterControllerData* self) {
ACTOR Future<Void> startRatekeeper(ClusterControllerData* self) {
wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID.
TraceEvent("CCStartRatekeeper", self->id);
TraceEvent("CCStartRatekeeper", self->id).log();
loop {
try {
state bool no_ratekeeper = !self->db.serverInfo->get().ratekeeper.present();
@ -4702,7 +4704,7 @@ ACTOR Future<Void> dbInfoUpdater(ClusterControllerData* self) {
req.serializedDbInfo =
BinaryWriter::toValue(self->db.serverInfo->get(), AssumeVersion(g_network->protocolVersion()));
TraceEvent("DBInfoStartBroadcast", self->id);
TraceEvent("DBInfoStartBroadcast", self->id).log();
choose {
when(std::vector<Endpoint> notUpdated =
wait(broadcastDBInfoRequest(req, SERVER_KNOBS->DBINFO_SEND_AMOUNT, Optional<Endpoint>(), false))) {
@ -4722,7 +4724,7 @@ ACTOR Future<Void> workerHealthMonitor(ClusterControllerData* self) {
loop {
try {
while (!self->goodRecruitmentTime.isReady()) {
wait(self->goodRecruitmentTime);
wait(lowPriorityDelay(SERVER_KNOBS->CC_WORKER_HEALTH_CHECKING_INTERVAL));
}
self->degradedServers = self->getServersWithDegradedLink();
@ -4757,7 +4759,7 @@ ACTOR Future<Void> workerHealthMonitor(ClusterControllerData* self) {
}
} else {
self->excludedDegradedServers.clear();
TraceEvent("DegradedServerDetectedAndSuggestRecovery");
TraceEvent("DegradedServerDetectedAndSuggestRecovery").log();
}
}
}

View File

@ -1756,7 +1756,7 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo> const> db,
ExclusionSafetyCheckRequest req) {
TraceEvent("SafetyCheckCommitProxyBegin");
TraceEvent("SafetyCheckCommitProxyBegin").log();
state ExclusionSafetyCheckReply reply(false);
if (!db->get().distributor.present()) {
TraceEvent(SevWarnAlways, "DataDistributorNotPresent").detail("Operation", "ExclusionSafetyCheck");
@ -1778,7 +1778,7 @@ ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo> cons
throw e;
}
}
TraceEvent("SafetyCheckCommitProxyFinish");
TraceEvent("SafetyCheckCommitProxyFinish").log();
req.reply.send(reply);
return Void();
}
@ -1796,7 +1796,7 @@ ACTOR Future<Void> reportTxnTagCommitCost(UID myID,
TraceEvent("ProxyRatekeeperChanged", myID).detail("RKID", db->get().ratekeeper.get().id());
nextRequestTimer = Void();
} else {
TraceEvent("ProxyRatekeeperDied", myID);
TraceEvent("ProxyRatekeeperDied", myID).log();
nextRequestTimer = Never();
}
}
@ -1923,16 +1923,20 @@ ACTOR Future<Void> commitProxyServerCore(CommitProxyInterface proxy,
lastCommit = now();
if (trs.size() || lastCommitComplete.isReady()) {
lastCommitComplete =
lastCommitComplete = transformError(
timeoutError(
commitBatch(&commitData,
const_cast<std::vector<CommitTransactionRequest>*>(&batchedRequests.first),
batchBytes);
batchBytes),
SERVER_KNOBS->COMMIT_PROXY_LIVENESS_TIMEOUT),
timed_out(),
failed_to_progress());
addActor.send(lastCommitComplete);
}
}
}
when(ProxySnapRequest snapReq = waitNext(proxy.proxySnapReq.getFuture())) {
TraceEvent(SevDebug, "SnapMasterEnqueue");
TraceEvent(SevDebug, "SnapMasterEnqueue").log();
addActor.send(proxySnapCreate(snapReq, &commitData));
}
when(ExclusionSafetyCheckRequest exclCheckReq = waitNext(proxy.exclusionSafetyCheckReq.getFuture())) {
@ -2068,9 +2072,11 @@ ACTOR Future<Void> commitProxyServer(CommitProxyInterface proxy,
if (e.code() != error_code_worker_removed && e.code() != error_code_tlog_stopped &&
e.code() != error_code_master_tlog_failed && e.code() != error_code_coordinators_changed &&
e.code() != error_code_coordinated_state_conflict && e.code() != error_code_new_coordinators_timed_out) {
e.code() != error_code_coordinated_state_conflict && e.code() != error_code_new_coordinators_timed_out &&
e.code() != error_code_failed_to_progress) {
throw;
}
TEST(e.code() == error_code_failed_to_progress); // Commit proxy failed to progress
}
return Void();
}

View File

@ -316,7 +316,7 @@ struct MovableCoordinatedStateImpl {
Value oldQuorumState = wait(cs.read());
if (oldQuorumState != self->lastCSValue.get()) {
TEST(true); // Quorum change aborted by concurrent write to old coordination state
TraceEvent("QuorumChangeAbortedByConcurrency");
TraceEvent("QuorumChangeAbortedByConcurrency").log();
throw coordinated_state_conflict();
}

View File

@ -37,6 +37,30 @@
// This module implements coordinationServer() and the interfaces in CoordinationInterface.h
namespace {
class LivenessChecker {
double threshold;
AsyncVar<double> lastTime;
ACTOR static Future<Void> checkStuck(LivenessChecker const* self) {
loop {
choose {
when(wait(delayUntil(self->lastTime.get() + self->threshold))) { return Void(); }
when(wait(self->lastTime.onChange())) {}
}
}
}
public:
explicit LivenessChecker(double threshold) : threshold(threshold), lastTime(now()) {}
void confirmLiveness() { lastTime.set(now()); }
Future<Void> checkStuck() const { return checkStuck(this); }
};
} // namespace
struct GenerationRegVal {
UniqueGeneration readGen, writeGen;
Optional<Value> val;
@ -179,7 +203,10 @@ TEST_CASE("/fdbserver/Coordination/localGenerationReg/simple") {
ACTOR Future<Void> openDatabase(ClientData* db,
int* clientCount,
Reference<AsyncVar<bool>> hasConnectedClients,
OpenDatabaseCoordRequest req) {
OpenDatabaseCoordRequest req,
Future<Void> checkStuck) {
state ErrorOr<CachedSerialization<ClientDBInfo>> replyContents;
++(*clientCount);
hasConnectedClients->set(true);
@ -191,19 +218,27 @@ ACTOR Future<Void> openDatabase(ClientData* db,
while (db->clientInfo->get().read().id == req.knownClientInfoID &&
!db->clientInfo->get().read().forward.present()) {
choose {
when(wait(checkStuck)) {
replyContents = failed_to_progress();
break;
}
when(wait(yieldedFuture(db->clientInfo->onChange()))) {}
when(wait(delayJittered(SERVER_KNOBS->CLIENT_REGISTER_INTERVAL))) {
if (req.supportedVersions.size() > 0) {
db->clientStatusInfoMap.erase(req.reply.getEndpoint().getPrimaryAddress());
}
replyContents = db->clientInfo->get();
break;
} // The client might be long gone!
}
}
if (req.supportedVersions.size() > 0) {
db->clientStatusInfoMap.erase(req.reply.getEndpoint().getPrimaryAddress());
if (replyContents.present()) {
req.reply.send(replyContents.get());
} else {
req.reply.sendError(replyContents.getError());
}
req.reply.send(db->clientInfo->get());
if (--(*clientCount) == 0) {
hasConnectedClients->set(false);
}
@ -255,6 +290,7 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
state AsyncVar<Value> leaderInterface;
state Reference<AsyncVar<Optional<LeaderInfo>>> currentElectedLeader =
makeReference<AsyncVar<Optional<LeaderInfo>>>();
state LivenessChecker canConnectToLeader(SERVER_KNOBS->COORDINATOR_LEADER_CONNECTION_TIMEOUT);
loop choose {
when(OpenDatabaseCoordRequest req = waitNext(interf.openDatabase.getFuture())) {
@ -266,7 +302,8 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
leaderMon =
monitorLeaderForProxies(req.clusterKey, req.coordinators, &clientData, currentElectedLeader);
}
actors.add(openDatabase(&clientData, &clientCount, hasConnectedClients, req));
actors.add(
openDatabase(&clientData, &clientCount, hasConnectedClients, req, canConnectToLeader.checkStuck()));
}
}
when(ElectionResultRequest req = waitNext(interf.electionResult.getFuture())) {
@ -320,8 +357,11 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
// TODO: use notify to only send a heartbeat once per interval
availableLeaders.erase(LeaderInfo(req.prevChangeID));
availableLeaders.insert(req.myInfo);
req.reply.send(
LeaderHeartbeatReply{ currentNominee.present() && currentNominee.get().equalInternalId(req.myInfo) });
bool const isCurrentLeader = currentNominee.present() && currentNominee.get().equalInternalId(req.myInfo);
if (isCurrentLeader) {
canConnectToLeader.confirmLiveness();
}
req.reply.send(LeaderHeartbeatReply{ isCurrentLeader });
}
when(ForwardRequest req = waitNext(interf.forward.getFuture())) {
LeaderInfo newInfo;
@ -425,12 +465,18 @@ const KeyRangeRef fwdKeys(LiteralStringRef("\xff"
LiteralStringRef("\xff"
"fwe"));
// The time when forwarding was last set is stored in this range:
const KeyRangeRef fwdTimeKeys(LiteralStringRef("\xff"
"fwdTime"),
LiteralStringRef("\xff"
"fwdTimf"));
struct LeaderRegisterCollection {
// SOMEDAY: Factor this into a generic tool? Extend ActorCollection to support removal actions? What?
ActorCollection actors;
Map<Key, LeaderElectionRegInterface> registerInterfaces;
Map<Key, LeaderInfo> forward;
OnDemandStore* pStore;
Map<Key, double> forwardStartTime;
LeaderRegisterCollection(OnDemandStore* pStore) : actors(false), pStore(pStore) {}
@ -438,32 +484,58 @@ struct LeaderRegisterCollection {
if (!self->pStore->exists())
return Void();
OnDemandStore& store = *self->pStore;
RangeResult forwardingInfo = wait(store->readRange(fwdKeys));
state Future<Standalone<RangeResultRef>> forwardingInfoF = store->readRange(fwdKeys);
state Future<Standalone<RangeResultRef>> forwardingTimeF = store->readRange(fwdTimeKeys);
wait(success(forwardingInfoF) && success(forwardingTimeF));
Standalone<RangeResultRef> forwardingInfo = forwardingInfoF.get();
Standalone<RangeResultRef> forwardingTime = forwardingTimeF.get();
for (int i = 0; i < forwardingInfo.size(); i++) {
LeaderInfo forwardInfo;
forwardInfo.forward = true;
forwardInfo.serializedInfo = forwardingInfo[i].value;
self->forward[forwardingInfo[i].key.removePrefix(fwdKeys.begin)] = forwardInfo;
}
for (int i = 0; i < forwardingTime.size(); i++) {
double time = BinaryReader::fromStringRef<double>(forwardingTime[i].value, Unversioned());
self->forwardStartTime[forwardingTime[i].key.removePrefix(fwdTimeKeys.begin)] = time;
}
return Void();
}
Future<Void> onError() { return actors.getResult(); }
// Check if the this coordinator is no longer the leader, and the new one was stored in the "forward" keyspace.
// If the "forward" keyspace was set some time ago (as configured by knob), log an error to indicate the client is
// using a very old cluster file.
Optional<LeaderInfo> getForward(KeyRef key) {
auto i = forward.find(key);
auto t = forwardStartTime.find(key);
if (i == forward.end())
return Optional<LeaderInfo>();
if (t != forwardStartTime.end()) {
double forwardTime = t->value;
if (now() - forwardTime > SERVER_KNOBS->FORWARD_REQUEST_TOO_OLD) {
TraceEvent(SevWarnAlways, "AccessOldForward")
.detail("ForwardSetSecondsAgo", now() - forwardTime)
.detail("ForwardClusterKey", key);
}
}
return i->value;
}
// When the lead coordinator changes, store the new connection ID in the "fwd" keyspace.
// If a request arrives using an old connection id, resend it to the new coordinator using the stored connection id.
// Store when this change took place in the fwdTime keyspace.
ACTOR static Future<Void> setForward(LeaderRegisterCollection* self, KeyRef key, ClusterConnectionString conn) {
double forwardTime = now();
LeaderInfo forwardInfo;
forwardInfo.forward = true;
forwardInfo.serializedInfo = conn.toString();
self->forward[key] = forwardInfo;
self->forwardStartTime[key] = forwardTime;
OnDemandStore& store = *self->pStore;
store->set(KeyValueRef(key.withPrefix(fwdKeys.begin), conn.toString()));
store->set(KeyValueRef(key.withPrefix(fwdTimeKeys.begin), BinaryWriter::toValue(forwardTime, Unversioned())));
wait(store->commit());
return Void();
}

View File

@ -173,7 +173,7 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
}
}
TraceEvent("CoroStop");
TraceEvent("CoroStop").log();
delete userData;
stopped.send(Void());
return;
@ -181,14 +181,14 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
TraceEvent("WorkPoolError").error(e, true);
error.sendError(e);
} catch (...) {
TraceEvent("WorkPoolError");
TraceEvent("WorkPoolError").log();
error.sendError(unknown_error());
}
try {
delete userData;
} catch (...) {
TraceEvent(SevError, "WorkPoolErrorShutdownError");
TraceEvent(SevError, "WorkPoolErrorShutdownError").log();
}
stopped.send(Void());
}

View File

@ -149,7 +149,7 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
}
}
TraceEvent("CoroStop");
TraceEvent("CoroStop").log();
delete userData;
stopped.send(Void());
return;
@ -157,14 +157,14 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
TraceEvent("WorkPoolError").error(e, true);
error.sendError(e);
} catch (...) {
TraceEvent("WorkPoolError");
TraceEvent("WorkPoolError").log();
error.sendError(unknown_error());
}
try {
delete userData;
} catch (...) {
TraceEvent(SevError, "WorkPoolErrorShutdownError");
TraceEvent(SevError, "WorkPoolErrorShutdownError").log();
}
stopped.send(Void());
}

View File

@ -190,7 +190,7 @@ public:
: servers(servers), healthy(true), priority(SERVER_KNOBS->PRIORITY_TEAM_HEALTHY), wrongConfiguration(false),
id(deterministicRandom()->randomUniqueID()) {
if (servers.empty()) {
TraceEvent(SevInfo, "ConstructTCTeamFromEmptyServers");
TraceEvent(SevInfo, "ConstructTCTeamFromEmptyServers").log();
}
serverIDs.reserve(servers.size());
for (int i = 0; i < servers.size(); i++) {
@ -445,7 +445,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
}
if (!result->mode || !ddEnabledState->isDDEnabled()) {
// DD can be disabled persistently (result->mode = 0) or transiently (isDDEnabled() = 0)
TraceEvent(SevDebug, "GetInitialDataDistribution_DisabledDD");
TraceEvent(SevDebug, "GetInitialDataDistribution_DisabledDD").log();
return result;
}
@ -475,7 +475,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
wait(tr.onError(e));
ASSERT(!succeeded); // We shouldn't be retrying if we have already started modifying result in this loop
TraceEvent("GetInitialTeamsRetry", distributorId);
TraceEvent("GetInitialTeamsRetry", distributorId).log();
}
}
@ -4160,14 +4160,14 @@ ACTOR Future<Void> monitorPerpetualStorageWiggle(DDTeamCollection* teamCollectio
&stopWiggleSignal, finishStorageWiggleSignal.getFuture(), teamCollection));
collection.add(perpetualStorageWiggler(
&stopWiggleSignal, finishStorageWiggleSignal, teamCollection, ddEnabledState));
TraceEvent("PerpetualStorageWiggleOpen", teamCollection->distributorId);
TraceEvent("PerpetualStorageWiggleOpen", teamCollection->distributorId).log();
} else if (speed == 0) {
if (!stopWiggleSignal.get()) {
stopWiggleSignal.set(true);
wait(collection.signalAndReset());
teamCollection->pauseWiggle->set(true);
}
TraceEvent("PerpetualStorageWiggleClose", teamCollection->distributorId);
TraceEvent("PerpetualStorageWiggleClose", teamCollection->distributorId).log();
}
wait(watchFuture);
break;
@ -4262,7 +4262,7 @@ ACTOR Future<Void> waitHealthyZoneChange(DDTeamCollection* self) {
auto p = decodeHealthyZoneValue(val.get());
if (p.first == ignoreSSFailuresZoneString) {
// healthyZone is now overloaded for DD diabling purpose, which does not timeout
TraceEvent("DataDistributionDisabledForStorageServerFailuresStart", self->distributorId);
TraceEvent("DataDistributionDisabledForStorageServerFailuresStart", self->distributorId).log();
healthyZoneTimeout = Never();
} else if (p.second > tr.getReadVersion().get()) {
double timeoutSeconds =
@ -4277,15 +4277,15 @@ ACTOR Future<Void> waitHealthyZoneChange(DDTeamCollection* self) {
}
} else if (self->healthyZone.get().present()) {
// maintenance hits timeout
TraceEvent("MaintenanceZoneEndTimeout", self->distributorId);
TraceEvent("MaintenanceZoneEndTimeout", self->distributorId).log();
self->healthyZone.set(Optional<Key>());
}
} else if (self->healthyZone.get().present()) {
// `healthyZone` has been cleared
if (self->healthyZone.get().get() == ignoreSSFailuresZoneString) {
TraceEvent("DataDistributionDisabledForStorageServerFailuresEnd", self->distributorId);
TraceEvent("DataDistributionDisabledForStorageServerFailuresEnd", self->distributorId).log();
} else {
TraceEvent("MaintenanceZoneEndManualClear", self->distributorId);
TraceEvent("MaintenanceZoneEndManualClear", self->distributorId).log();
}
self->healthyZone.set(Optional<Key>());
}
@ -4432,7 +4432,7 @@ ACTOR Future<Void> storageServerFailureTracker(DDTeamCollection* self,
status->isFailed = false;
} else if (self->clearHealthyZoneFuture.isReady()) {
self->clearHealthyZoneFuture = clearHealthyZone(self->cx);
TraceEvent("MaintenanceZoneCleared", self->distributorId);
TraceEvent("MaintenanceZoneCleared", self->distributorId).log();
self->healthyZone.set(Optional<Key>());
}
}
@ -5491,7 +5491,7 @@ ACTOR Future<Void> serverGetTeamRequests(TeamCollectionInterface tci, DDTeamColl
}
ACTOR Future<Void> remoteRecovered(Reference<AsyncVar<ServerDBInfo> const> db) {
TraceEvent("DDTrackerStarting");
TraceEvent("DDTrackerStarting").log();
while (db->get().recoveryState < RecoveryState::ALL_LOGS_RECRUITED) {
TraceEvent("DDTrackerStarting").detail("RecoveryState", (int)db->get().recoveryState);
wait(db->onChange());
@ -5625,7 +5625,7 @@ ACTOR Future<Void> waitForDataDistributionEnabled(Database cx, const DDEnabledSt
try {
Optional<Value> mode = wait(tr.get(dataDistributionModeKey));
if (!mode.present() && ddEnabledState->isDDEnabled()) {
TraceEvent("WaitForDDEnabledSucceeded");
TraceEvent("WaitForDDEnabledSucceeded").log();
return Void();
}
if (mode.present()) {
@ -5636,7 +5636,7 @@ ACTOR Future<Void> waitForDataDistributionEnabled(Database cx, const DDEnabledSt
.detail("Mode", m)
.detail("IsDDEnabled", ddEnabledState->isDDEnabled());
if (m && ddEnabledState->isDDEnabled()) {
TraceEvent("WaitForDDEnabledSucceeded");
TraceEvent("WaitForDDEnabledSucceeded").log();
return Void();
}
}
@ -5711,7 +5711,7 @@ ACTOR Future<Void> debugCheckCoalescing(Database cx) {
.detail("Value", ranges[j].value);
}
TraceEvent("DoneCheckingCoalescing");
TraceEvent("DoneCheckingCoalescing").log();
return Void();
} catch (Error& e) {
wait(tr.onError(e));
@ -5807,10 +5807,10 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
state Promise<UID> removeFailedServer;
try {
loop {
TraceEvent("DDInitTakingMoveKeysLock", self->ddId);
TraceEvent("DDInitTakingMoveKeysLock", self->ddId).log();
MoveKeysLock lock_ = wait(takeMoveKeysLock(cx, self->ddId));
lock = lock_;
TraceEvent("DDInitTookMoveKeysLock", self->ddId);
TraceEvent("DDInitTookMoveKeysLock", self->ddId).log();
DatabaseConfiguration configuration_ = wait(getDatabaseConfiguration(cx));
configuration = configuration_;
@ -5854,7 +5854,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
}
}
TraceEvent("DDInitUpdatedReplicaKeys", self->ddId);
TraceEvent("DDInitUpdatedReplicaKeys", self->ddId).log();
Reference<InitialDataDistribution> initData_ = wait(getInitialDataDistribution(
cx,
self->ddId,
@ -5882,7 +5882,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
// mode may be set true by system operator using fdbcli and isDDEnabled() set to true
break;
}
TraceEvent("DataDistributionDisabled", self->ddId);
TraceEvent("DataDistributionDisabled", self->ddId).log();
TraceEvent("MovingData", self->ddId)
.detail("InFlight", 0)
@ -5919,7 +5919,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
.trackLatest("TotalDataInFlightRemote");
wait(waitForDataDistributionEnabled(cx, ddEnabledState));
TraceEvent("DataDistributionEnabled");
TraceEvent("DataDistributionEnabled").log();
}
// When/If this assertion fails, Evan owes Ben a pat on the back for his foresight
@ -6256,7 +6256,7 @@ ACTOR Future<Void> ddSnapCreateCore(DistributorSnapRequest snapReq, Reference<As
}
wait(waitForAll(enablePops));
} catch (Error& error) {
TraceEvent(SevDebug, "IgnoreEnableTLogPopFailure");
TraceEvent(SevDebug, "IgnoreEnableTLogPopFailure").log();
}
}
throw e;
@ -6271,7 +6271,7 @@ ACTOR Future<Void> ddSnapCreate(DistributorSnapRequest snapReq,
if (!ddEnabledState->setDDEnabled(false, snapReq.snapUID)) {
// disable DD before doing snapCreate, if previous snap req has already disabled DD then this operation fails
// here
TraceEvent("SnapDDSetDDEnabledFailedInMemoryCheck");
TraceEvent("SnapDDSetDDEnabledFailedInMemoryCheck").log();
snapReq.reply.sendError(operation_failed());
return Void();
}
@ -6344,18 +6344,18 @@ bool _exclusionSafetyCheck(vector<UID>& excludeServerIDs, DDTeamCollection* team
ACTOR Future<Void> ddExclusionSafetyCheck(DistributorExclusionSafetyCheckRequest req,
Reference<DataDistributorData> self,
Database cx) {
TraceEvent("DDExclusionSafetyCheckBegin", self->ddId);
TraceEvent("DDExclusionSafetyCheckBegin", self->ddId).log();
vector<StorageServerInterface> ssis = wait(getStorageServers(cx));
DistributorExclusionSafetyCheckReply reply(true);
if (!self->teamCollection) {
TraceEvent("DDExclusionSafetyCheckTeamCollectionInvalid", self->ddId);
TraceEvent("DDExclusionSafetyCheckTeamCollectionInvalid", self->ddId).log();
reply.safe = false;
req.reply.send(reply);
return Void();
}
// If there is only 1 team, unsafe to mark failed: team building can get stuck due to lack of servers left
if (self->teamCollection->teams.size() <= 1) {
TraceEvent("DDExclusionSafetyCheckNotEnoughTeams", self->ddId);
TraceEvent("DDExclusionSafetyCheckNotEnoughTeams", self->ddId).log();
reply.safe = false;
req.reply.send(reply);
return Void();
@ -6371,7 +6371,7 @@ ACTOR Future<Void> ddExclusionSafetyCheck(DistributorExclusionSafetyCheckRequest
}
}
reply.safe = _exclusionSafetyCheck(excludeServerIDs, self->teamCollection);
TraceEvent("DDExclusionSafetyCheckFinish", self->ddId);
TraceEvent("DDExclusionSafetyCheckFinish", self->ddId).log();
req.reply.send(reply);
return Void();
}

View File

@ -300,7 +300,7 @@ ACTOR Future<Void> getRate(UID myID,
TraceEvent("ProxyRatekeeperChanged", myID).detail("RKID", db->get().ratekeeper.get().id());
nextRequestTimer = Void(); // trigger GetRate request
} else {
TraceEvent("ProxyRatekeeperDied", myID);
TraceEvent("ProxyRatekeeperDied", myID).log();
nextRequestTimer = Never();
reply = Never();
}

View File

@ -141,7 +141,7 @@ public:
Future<Void> commit(bool sequential) override {
if (getAvailableSize() <= 0) {
TraceEvent(SevError, "KeyValueStoreMemory_OutOfSpace", id);
TraceEvent(SevError, "KeyValueStoreMemory_OutOfSpace", id).log();
return Never();
}
@ -605,7 +605,7 @@ private:
if (zeroFillSize) {
if (exactRecovery) {
TraceEvent(SevError, "KVSMemExpectedExact", self->id);
TraceEvent(SevError, "KVSMemExpectedExact", self->id).log();
ASSERT(false);
}

View File

@ -727,7 +727,7 @@ struct RawCursor {
try {
db.checkError("BtreeCloseCursor", sqlite3BtreeCloseCursor(cursor));
} catch (...) {
TraceEvent(SevError, "RawCursorDestructionError");
TraceEvent(SevError, "RawCursorDestructionError").log();
}
delete[](char*) cursor;
}
@ -1737,9 +1737,9 @@ private:
freeListPages(freeListPages), cursor(nullptr), dbgid(dbgid), readThreads(*pReadThreads),
checkAllChecksumsOnOpen(checkAllChecksumsOnOpen), checkIntegrityOnOpen(checkIntegrityOnOpen) {}
~Writer() override {
TraceEvent("KVWriterDestroying", dbgid);
TraceEvent("KVWriterDestroying", dbgid).log();
delete cursor;
TraceEvent("KVWriterDestroyed", dbgid);
TraceEvent("KVWriterDestroyed", dbgid).log();
}
void init() override {
if (checkAllChecksumsOnOpen) {

View File

@ -156,7 +156,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
}
if (leader.present() && leader.get().second && leader.get().first.equalInternalId(myInfo)) {
TraceEvent("BecomingLeader", myInfo.changeID);
TraceEvent("BecomingLeader", myInfo.changeID).log();
ASSERT(leader.get().first.serializedInfo == proposedSerializedInterface);
outSerializedLeader->set(leader.get().first.serializedInfo);
iAmLeader = true;
@ -184,7 +184,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
when(wait(nominees->onChange())) {}
when(wait(badCandidateTimeout.isValid() ? badCandidateTimeout : Never())) {
TEST(true); // Bad candidate timeout
TraceEvent("LeaderBadCandidateTimeout", myInfo.changeID);
TraceEvent("LeaderBadCandidateTimeout", myInfo.changeID).log();
break;
}
when(wait(candidacies)) { ASSERT(false); }
@ -225,7 +225,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
//TraceEvent("StillLeader", myInfo.changeID);
} // We are still leader
when(wait(quorum(false_heartbeats, false_heartbeats.size() / 2 + 1))) {
TraceEvent("ReplacedAsLeader", myInfo.changeID);
TraceEvent("ReplacedAsLeader", myInfo.changeID).log();
break;
} // We are definitely not leader
when(wait(delay(SERVER_KNOBS->POLLING_FREQUENCY))) {
@ -243,7 +243,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
.detail("Coordinator",
coordinators.leaderElectionServers[i].candidacy.getEndpoint().getPrimaryAddress());
}
TraceEvent("ReleasingLeadership", myInfo.changeID);
TraceEvent("ReleasingLeadership", myInfo.changeID).log();
break;
} // Give up on being leader, because we apparently have poor communications
when(wait(asyncPriorityInfo->onChange())) {}

View File

@ -291,7 +291,7 @@ public:
if (allLocations) {
// special handling for allLocations
TraceEvent("AllLocationsSet");
TraceEvent("AllLocationsSet").log();
for (int i = 0; i < logServers.size(); i++) {
newLocations.push_back(i);
}

View File

@ -374,7 +374,7 @@ ACTOR Future<Void> updateMetricRegistration(Database cx, MetricsConfig* config,
ACTOR Future<Void> runMetrics(Future<Database> fcx, Key prefix) {
// Never log to an empty prefix, it's pretty much always a bad idea.
if (prefix.size() == 0) {
TraceEvent(SevWarnAlways, "TDMetricsRefusingEmptyPrefix");
TraceEvent(SevWarnAlways, "TDMetricsRefusingEmptyPrefix").log();
return Void();
}

View File

@ -100,7 +100,7 @@ ACTOR static Future<Void> checkMoveKeysLock(Transaction* tr,
const DDEnabledState* ddEnabledState,
bool isWrite = true) {
if (!ddEnabledState->isDDEnabled()) {
TraceEvent(SevDebug, "DDDisabledByInMemoryCheck");
TraceEvent(SevDebug, "DDDisabledByInMemoryCheck").log();
throw movekeys_conflict();
}
Optional<Value> readVal = wait(tr->get(moveKeysLockOwnerKey));
@ -1143,7 +1143,7 @@ ACTOR Future<std::pair<Version, Tag>> addStorageServer(Database cx, StorageServe
if (SERVER_KNOBS->TSS_HACK_IDENTITY_MAPPING) {
// THIS SHOULD NEVER BE ENABLED IN ANY NON-TESTING ENVIRONMENT
TraceEvent(SevError, "TSSIdentityMappingEnabled");
TraceEvent(SevError, "TSSIdentityMappingEnabled").log();
tssMapDB.set(tr, server.id(), server.id());
}
}
@ -1268,7 +1268,7 @@ ACTOR Future<Void> removeStorageServer(Database cx,
if (SERVER_KNOBS->TSS_HACK_IDENTITY_MAPPING) {
// THIS SHOULD NEVER BE ENABLED IN ANY NON-TESTING ENVIRONMENT
TraceEvent(SevError, "TSSIdentityMappingEnabled");
TraceEvent(SevError, "TSSIdentityMappingEnabled").log();
tssMapDB.erase(tr, serverID);
} else if (tssPairID.present()) {
// remove the TSS from the mapping
@ -1440,7 +1440,7 @@ void seedShardServers(Arena& arena, CommitTransactionRef& tr, vector<StorageServ
tr.set(arena, serverListKeyFor(s.id()), serverListValue(s));
if (SERVER_KNOBS->TSS_HACK_IDENTITY_MAPPING) {
// THIS SHOULD NEVER BE ENABLED IN ANY NON-TESTING ENVIRONMENT
TraceEvent(SevError, "TSSIdentityMappingEnabled");
TraceEvent(SevError, "TSSIdentityMappingEnabled").log();
// hack key-backed map here since we can't really change CommitTransactionRef to a RYW transaction
Key uidRef = Codec<UID>::pack(s.id()).pack();
tr.set(arena, uidRef.withPrefix(tssMappingKeys.begin), uidRef);

View File

@ -1387,7 +1387,7 @@ ACTOR Future<Void> restorePersistentState(TLogData* self, LocalityData locality)
state KeyRange tagKeys;
// PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it
TraceEvent("TLogRestorePersistentState", self->dbgid);
TraceEvent("TLogRestorePersistentState", self->dbgid).log();
IKeyValueStore* storage = self->persistentData;
state Future<Optional<Value>> fFormat = storage->readValue(persistFormat.key);
@ -1575,7 +1575,7 @@ ACTOR Future<Void> tLog(IKeyValueStore* persistentData,
state TLogData self(tlogId, workerID, persistentData, persistentQueue, db);
state Future<Void> error = actorCollection(self.sharedActors.getFuture());
TraceEvent("SharedTlog", tlogId);
TraceEvent("SharedTlog", tlogId).log();
try {
wait(restorePersistentState(&self, locality));

View File

@ -876,7 +876,7 @@ ACTOR Future<Void> tLogPop(TLogData* self, TLogPopRequest req, Reference<LogData
// timeout check for ignorePopRequest
if (self->ignorePopRequest && (g_network->now() > self->ignorePopDeadline)) {
TraceEvent("EnableTLogPlayAllIgnoredPops");
TraceEvent("EnableTLogPlayAllIgnoredPops").log();
// use toBePopped and issue all the pops
state std::map<Tag, Version>::iterator it;
state vector<Future<Void>> ignoredPops;
@ -1666,7 +1666,7 @@ ACTOR Future<Void> initPersistentState(TLogData* self, Reference<LogData> logDat
updatePersistentPopped(self, logData, logData->getTagData(tag));
}
TraceEvent("TLogInitCommit", logData->logId);
TraceEvent("TLogInitCommit", logData->logId).log();
wait(ioTimeoutError(self->persistentData->commit(), SERVER_KNOBS->TLOG_MAX_CREATE_DURATION));
return Void();
}
@ -1869,7 +1869,7 @@ ACTOR Future<Void> tLogEnablePopReq(TLogEnablePopRequest enablePopReq, TLogData*
enablePopReq.reply.sendError(operation_failed());
return Void();
}
TraceEvent("EnableTLogPlayAllIgnoredPops2");
TraceEvent("EnableTLogPlayAllIgnoredPops2").log();
// use toBePopped and issue all the pops
std::map<Tag, Version>::iterator it;
vector<Future<Void>> ignoredPops;
@ -1923,7 +1923,7 @@ ACTOR Future<Void> serveTLogInterface(TLogData* self,
}
if (!logData->isPrimary && logData->stopped) {
TraceEvent("TLogAlreadyStopped", self->dbgid);
TraceEvent("TLogAlreadyStopped", self->dbgid).log();
logData->removed = logData->removed && logData->logSystem->get()->endEpoch();
}
} else {
@ -2198,22 +2198,22 @@ ACTOR Future<Void> tLogCore(TLogData* self,
}
ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid);
TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid).log();
try {
TLogQueueEntry r = wait(self->persistentQueue->readNext(self));
throw internal_error();
} catch (Error& e) {
if (e.code() != error_code_end_of_stream)
throw;
TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid);
TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid).log();
return Void();
}
}
ACTOR Future<Void> checkRecovered(TLogData* self) {
TraceEvent("TLogCheckRecoveredBegin", self->dbgid);
TraceEvent("TLogCheckRecoveredBegin", self->dbgid).log();
Optional<Value> v = wait(self->persistentData->readValue(StringRef()));
TraceEvent("TLogCheckRecoveredEnd", self->dbgid);
TraceEvent("TLogCheckRecoveredEnd", self->dbgid).log();
return Void();
}
@ -2227,7 +2227,7 @@ ACTOR Future<Void> restorePersistentState(TLogData* self,
state KeyRange tagKeys;
// PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it
TraceEvent("TLogRestorePersistentState", self->dbgid);
TraceEvent("TLogRestorePersistentState", self->dbgid).log();
state IKeyValueStore* storage = self->persistentData;
wait(storage->init());
@ -2585,7 +2585,7 @@ ACTOR Future<Void> tLogStart(TLogData* self, InitializeTLogRequest req, Locality
logData->removed = rejoinMasters(self, recruited, req.epoch, Future<Void>(Void()), req.isPrimary);
self->queueOrder.push_back(recruited.id());
TraceEvent("TLogStart", logData->logId);
TraceEvent("TLogStart", logData->logId).log();
state Future<Void> updater;
state bool pulledRecoveryVersions = false;
try {
@ -2730,7 +2730,7 @@ ACTOR Future<Void> tLog(IKeyValueStore* persistentData,
state TLogData self(tlogId, workerID, persistentData, persistentQueue, db, degraded, folder);
state Future<Void> error = actorCollection(self.sharedActors.getFuture());
TraceEvent("SharedTlog", tlogId);
TraceEvent("SharedTlog", tlogId).log();
try {
if (restoreFromDisk) {
wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests));

View File

@ -1464,7 +1464,7 @@ ACTOR Future<Void> tLogPop(TLogData* self, TLogPopRequest req, Reference<LogData
// timeout check for ignorePopRequest
if (self->ignorePopRequest && (g_network->now() > self->ignorePopDeadline)) {
TraceEvent("EnableTLogPlayAllIgnoredPops");
TraceEvent("EnableTLogPlayAllIgnoredPops").log();
// use toBePopped and issue all the pops
std::map<Tag, Version>::iterator it;
vector<Future<Void>> ignoredPops;
@ -1871,7 +1871,7 @@ ACTOR Future<Void> watchDegraded(TLogData* self) {
wait(lowPriorityDelay(SERVER_KNOBS->TLOG_DEGRADED_DURATION));
TraceEvent(SevWarnAlways, "TLogDegraded", self->dbgid);
TraceEvent(SevWarnAlways, "TLogDegraded", self->dbgid).log();
TEST(true); // TLog degraded
self->degraded->set(true);
return Void();
@ -2109,7 +2109,7 @@ ACTOR Future<Void> initPersistentState(TLogData* self, Reference<LogData> logDat
updatePersistentPopped(self, logData, logData->getTagData(tag));
}
TraceEvent("TLogInitCommit", logData->logId);
TraceEvent("TLogInitCommit", logData->logId).log();
wait(self->persistentData->commit());
return Void();
}
@ -2312,7 +2312,7 @@ ACTOR Future<Void> tLogEnablePopReq(TLogEnablePopRequest enablePopReq, TLogData*
enablePopReq.reply.sendError(operation_failed());
return Void();
}
TraceEvent("EnableTLogPlayAllIgnoredPops2");
TraceEvent("EnableTLogPlayAllIgnoredPops2").log();
// use toBePopped and issue all the pops
std::map<Tag, Version>::iterator it;
state vector<Future<Void>> ignoredPops;
@ -2657,7 +2657,7 @@ ACTOR Future<Void> tLogCore(TLogData* self,
}
ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid);
TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid).log();
try {
bool recoveryFinished = wait(self->persistentQueue->initializeRecovery(0));
if (recoveryFinished)
@ -2667,15 +2667,15 @@ ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
} catch (Error& e) {
if (e.code() != error_code_end_of_stream)
throw;
TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid);
TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid).log();
return Void();
}
}
ACTOR Future<Void> checkRecovered(TLogData* self) {
TraceEvent("TLogCheckRecoveredBegin", self->dbgid);
TraceEvent("TLogCheckRecoveredBegin", self->dbgid).log();
Optional<Value> v = wait(self->persistentData->readValue(StringRef()));
TraceEvent("TLogCheckRecoveredEnd", self->dbgid);
TraceEvent("TLogCheckRecoveredEnd", self->dbgid).log();
return Void();
}
@ -2690,7 +2690,7 @@ ACTOR Future<Void> restorePersistentState(TLogData* self,
state KeyRange tagKeys;
// PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it
TraceEvent("TLogRestorePersistentState", self->dbgid);
TraceEvent("TLogRestorePersistentState", self->dbgid).log();
state IKeyValueStore* storage = self->persistentData;
wait(storage->init());
@ -3219,7 +3219,7 @@ ACTOR Future<Void> tLog(IKeyValueStore* persistentData,
state TLogData self(tlogId, workerID, persistentData, persistentQueue, db, degraded, folder);
state Future<Void> error = actorCollection(self.sharedActors.getFuture());
TraceEvent("SharedTlog", tlogId);
TraceEvent("SharedTlog", tlogId).log();
try {
if (restoreFromDisk) {
wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests));

View File

@ -576,7 +576,7 @@ ACTOR Future<Void> repairDeadDatacenter(Database cx,
// FIXME: the primary and remote can both be considered dead because excludes are not handled properly by the
// datacenterDead function
if (primaryDead && remoteDead) {
TraceEvent(SevWarnAlways, "CannotDisableFearlessConfiguration");
TraceEvent(SevWarnAlways, "CannotDisableFearlessConfiguration").log();
return Void();
}
if (primaryDead || remoteDead) {
@ -647,7 +647,7 @@ ACTOR Future<Void> waitForQuietDatabase(Database cx,
loop {
try {
TraceEvent("QuietDatabaseWaitingOnDataDistributor");
TraceEvent("QuietDatabaseWaitingOnDataDistributor").log();
WorkerInterface distributorWorker = wait(getDataDistributorWorker(cx, dbInfo));
UID distributorUID = dbInfo->get().distributor.get().id();
TraceEvent("QuietDatabaseGotDataDistributor", distributorUID)

View File

@ -801,14 +801,14 @@ ACTOR Future<Void> monitorThrottlingChanges(RatekeeperData* self) {
autoThrottlingEnabled.get().get() == LiteralStringRef("0")) {
TEST(true); // Auto-throttling disabled
if (self->autoThrottlingEnabled) {
TraceEvent("AutoTagThrottlingDisabled", self->id);
TraceEvent("AutoTagThrottlingDisabled", self->id).log();
}
self->autoThrottlingEnabled = false;
} else if (autoThrottlingEnabled.get().present() &&
autoThrottlingEnabled.get().get() == LiteralStringRef("1")) {
TEST(true); // Auto-throttling enabled
if (!self->autoThrottlingEnabled) {
TraceEvent("AutoTagThrottlingEnabled", self->id);
TraceEvent("AutoTagThrottlingEnabled", self->id).log();
}
self->autoThrottlingEnabled = true;
} else {
@ -870,7 +870,7 @@ ACTOR Future<Void> monitorThrottlingChanges(RatekeeperData* self) {
committed = true;
wait(watchFuture);
TraceEvent("RatekeeperThrottleSignaled", self->id);
TraceEvent("RatekeeperThrottleSignaled", self->id).log();
TEST(true); // Tag throttle changes detected
break;
} catch (Error& e) {

View File

@ -473,7 +473,7 @@ ACTOR static Future<Void> precomputeMutationsResult(Reference<ApplierBatchData>
}
}
TraceEvent("FastRestoreApplierGetAndComputeStagingKeysWaitOn", applierID);
TraceEvent("FastRestoreApplierGetAndComputeStagingKeysWaitOn", applierID).log();
wait(waitForAll(fGetAndComputeKeys));
// Sanity check all stagingKeys have been precomputed

View File

@ -317,7 +317,7 @@ struct ApplierBatchData : public ReferenceCounted<ApplierBatchData> {
return false;
}
}
TraceEvent("FastRestoreApplierAllKeysPrecomputed");
TraceEvent("FastRestoreApplierAllKeysPrecomputed").log();
return true;
}

View File

@ -714,7 +714,7 @@ ACTOR static Future<std::vector<RestoreRequest>> collectRestoreRequests(Database
// restoreRequestTriggerKey should already been set
loop {
try {
TraceEvent("FastRestoreControllerPhaseCollectRestoreRequestsWait");
TraceEvent("FastRestoreControllerPhaseCollectRestoreRequestsWait").log();
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
@ -732,7 +732,7 @@ ACTOR static Future<std::vector<RestoreRequest>> collectRestoreRequests(Database
}
break;
} else {
TraceEvent(SevError, "FastRestoreControllerPhaseCollectRestoreRequestsEmptyRequests");
TraceEvent(SevError, "FastRestoreControllerPhaseCollectRestoreRequestsEmptyRequests").log();
wait(delay(5.0));
}
} catch (Error& e) {
@ -1079,7 +1079,7 @@ ACTOR static Future<Void> notifyLoadersVersionBatchFinished(std::map<UID, Restor
// Terminate those roles if terminate = true
ACTOR static Future<Void> notifyRestoreCompleted(Reference<RestoreControllerData> self, bool terminate = false) {
std::vector<std::pair<UID, RestoreFinishRequest>> requests;
TraceEvent("FastRestoreControllerPhaseNotifyRestoreCompletedStart");
TraceEvent("FastRestoreControllerPhaseNotifyRestoreCompletedStart").log();
for (auto& loader : self->loadersInterf) {
requests.emplace_back(loader.first, RestoreFinishRequest(terminate));
}
@ -1099,7 +1099,7 @@ ACTOR static Future<Void> notifyRestoreCompleted(Reference<RestoreControllerData
wait(endLoaders && endAppliers);
}
TraceEvent("FastRestoreControllerPhaseNotifyRestoreCompletedDone");
TraceEvent("FastRestoreControllerPhaseNotifyRestoreCompletedDone").log();
return Void();
}
@ -1128,7 +1128,7 @@ ACTOR static Future<Void> signalRestoreCompleted(Reference<RestoreControllerData
}
}
TraceEvent("FastRestoreControllerAllRestoreCompleted");
TraceEvent("FastRestoreControllerAllRestoreCompleted").log();
return Void();
}

View File

@ -277,7 +277,7 @@ ACTOR static Future<Void> waitOnRestoreRequests(Database cx, UID nodeID = UID())
state Optional<Value> numRequests;
// wait for the restoreRequestTriggerKey to be set by the client/test workload
TraceEvent("FastRestoreWaitOnRestoreRequest", nodeID);
TraceEvent("FastRestoreWaitOnRestoreRequest", nodeID).log();
loop {
try {
tr.reset();
@ -288,9 +288,9 @@ ACTOR static Future<Void> waitOnRestoreRequests(Database cx, UID nodeID = UID())
if (!numRequests.present()) {
state Future<Void> watchForRestoreRequest = tr.watch(restoreRequestTriggerKey);
wait(tr.commit());
TraceEvent(SevInfo, "FastRestoreWaitOnRestoreRequestTriggerKey", nodeID);
TraceEvent(SevInfo, "FastRestoreWaitOnRestoreRequestTriggerKey", nodeID).log();
wait(watchForRestoreRequest);
TraceEvent(SevInfo, "FastRestoreDetectRestoreRequestTriggerKeyChanged", nodeID);
TraceEvent(SevInfo, "FastRestoreDetectRestoreRequestTriggerKeyChanged", nodeID).log();
} else {
TraceEvent(SevInfo, "FastRestoreRestoreRequestTriggerKey", nodeID)
.detail("TriggerKey", numRequests.get().toString());

View File

@ -408,7 +408,7 @@ ACTOR Future<Void> runDr(Reference<ClusterConnectionFile> connFile) {
wait(delay(1.0));
}
TraceEvent("StoppingDrAgents");
TraceEvent("StoppingDrAgents").log();
for (auto it : agentFutures) {
it.cancel();
@ -2205,7 +2205,7 @@ ACTOR void setupAndRun(std::string dataFolder,
TraceEvent(SevError, "SetupAndRunError").error(e);
}
TraceEvent("SimulatedSystemDestruct");
TraceEvent("SimulatedSystemDestruct").log();
g_simulator.stop();
destructed = true;
wait(Never());

View File

@ -425,7 +425,7 @@ ACTOR Future<Version> waitForVersion(StorageCacheData* data, Version version) {
}
if (deterministicRandom()->random01() < 0.001)
TraceEvent("WaitForVersion1000x");
TraceEvent("WaitForVersion1000x").log();
choose {
when(wait(data->version.whenAtLeast(version))) {
// FIXME: A bunch of these can block with or without the following delay 0.
@ -1363,7 +1363,7 @@ ACTOR Future<Void> fetchKeys(StorageCacheData* data, AddingCacheRange* cacheRang
// doesn't fit on this cache. For now, we can just fail this cache role. In future, we should think
// about evicting some data to make room for the remaining keys
if (this_block.more) {
TraceEvent(SevDebug, "CacheWarmupMoreDataThanLimit", data->thisServerID);
TraceEvent(SevDebug, "CacheWarmupMoreDataThanLimit", data->thisServerID).log();
throw please_reboot();
}
@ -1780,7 +1780,7 @@ private:
rollback(data, rollbackVersion, currentVersion);
}
} else {
TraceEvent(SevWarn, "SCPrivateCacheMutation: Unknown private mutation");
TraceEvent(SevWarn, "SCPrivateCacheMutation: Unknown private mutation").log();
// ASSERT(false); // Unknown private mutation
}
}
@ -2156,6 +2156,7 @@ ACTOR Future<Void> watchInterface(StorageCacheData* self, StorageServerInterface
tr.set(storageKey, storageCacheServerValue(ssi));
wait(tr.commit());
}
tr.reset();
break;
} catch (Error& e) {
wait(tr.onError(e));

View File

@ -2160,7 +2160,7 @@ ACTOR Future<Void> initPersistentState(TLogData* self, Reference<LogData> logDat
updatePersistentPopped(self, logData, logData->getTagData(tag));
}
TraceEvent("TLogInitCommit", logData->logId);
TraceEvent("TLogInitCommit", logData->logId).log();
wait(ioTimeoutError(self->persistentData->commit(), SERVER_KNOBS->TLOG_MAX_CREATE_DURATION));
return Void();
}
@ -2713,7 +2713,7 @@ ACTOR Future<Void> tLogCore(TLogData* self,
}
ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid);
TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid).log();
try {
bool recoveryFinished = wait(self->persistentQueue->initializeRecovery(0));
if (recoveryFinished)
@ -2723,15 +2723,15 @@ ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
} catch (Error& e) {
if (e.code() != error_code_end_of_stream)
throw;
TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid);
TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid).log();
return Void();
}
}
ACTOR Future<Void> checkRecovered(TLogData* self) {
TraceEvent("TLogCheckRecoveredBegin", self->dbgid);
TraceEvent("TLogCheckRecoveredBegin", self->dbgid).log();
Optional<Value> v = wait(self->persistentData->readValue(StringRef()));
TraceEvent("TLogCheckRecoveredEnd", self->dbgid);
TraceEvent("TLogCheckRecoveredEnd", self->dbgid).log();
return Void();
}
@ -2746,7 +2746,7 @@ ACTOR Future<Void> restorePersistentState(TLogData* self,
state KeyRange tagKeys;
// PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it
TraceEvent("TLogRestorePersistentState", self->dbgid);
TraceEvent("TLogRestorePersistentState", self->dbgid).log();
state IKeyValueStore* storage = self->persistentData;
wait(storage->init());
@ -3294,7 +3294,7 @@ ACTOR Future<Void> tLog(IKeyValueStore* persistentData,
state TLogData self(tlogId, workerID, persistentData, persistentQueue, db, degraded, folder);
state Future<Void> error = actorCollection(self.sharedActors.getFuture());
TraceEvent("SharedTlog", tlogId);
TraceEvent("SharedTlog", tlogId).log();
try {
if (restoreFromDisk) {
wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests));

View File

@ -415,7 +415,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted<TagPartition
}
for (auto& t : newState.tLogs) {
if (!t.isLocal) {
TraceEvent("RemoteLogsWritten", dbgid);
TraceEvent("RemoteLogsWritten", dbgid).log();
remoteLogsWrittenToCoreState = true;
break;
}
@ -1101,7 +1101,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted<TagPartition
bool canDiscardPopped) final {
Version end = getEnd();
if (!tLogs.size()) {
TraceEvent("TLogPeekTxsNoLogs", dbgid);
TraceEvent("TLogPeekTxsNoLogs", dbgid).log();
return makeReference<ILogSystem::ServerPeekCursor>(
Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), txsTag, begin, end, false, false);
}
@ -1534,7 +1534,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted<TagPartition
wait(waitForAll(poppedReady) || maxGetPoppedDuration);
if (maxGetPoppedDuration.isReady()) {
TraceEvent(SevWarnAlways, "PoppedTxsNotReady", dbgid);
TraceEvent(SevWarnAlways, "PoppedTxsNotReady", dbgid).log();
}
Version maxPopped = 1;
@ -2480,7 +2480,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted<TagPartition
LogEpoch recoveryCount,
int8_t remoteLocality,
std::vector<Tag> allTags) {
TraceEvent("RemoteLogRecruitment_WaitingForWorkers");
TraceEvent("RemoteLogRecruitment_WaitingForWorkers").log();
state RecruitRemoteFromConfigurationReply remoteWorkers = wait(fRemoteWorkers);
state Reference<LogSet> logSet(new LogSet());
@ -2655,7 +2655,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted<TagPartition
self->remoteRecoveryComplete = waitForAll(recoveryComplete);
self->tLogs.push_back(logSet);
TraceEvent("RemoteLogRecruitment_CompletingRecovery");
TraceEvent("RemoteLogRecruitment_CompletingRecovery").log();
return Void();
}
@ -3149,7 +3149,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted<TagPartition
// Step 1: Verify that if all the failed TLogs come back, they can't form a quorum.
if (can_obtain_quorum(locking_failed)) {
TraceEvent(SevInfo, "MasterRecoveryTLogLockingImpossible", dbgid);
TraceEvent(SevInfo, "MasterRecoveryTLogLockingImpossible", dbgid).log();
return;
}

View File

@ -276,7 +276,7 @@ struct MasterData : NonCopyable, ReferenceCounted<MasterData> {
reportLiveCommittedVersionRequests("ReportLiveCommittedVersionRequests", cc) {
logger = traceCounters("MasterMetrics", dbgid, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc, "MasterMetrics");
if (forceRecovery && !myInterface.locality.dcId().present()) {
TraceEvent(SevError, "ForcedRecoveryRequiresDcID");
TraceEvent(SevError, "ForcedRecoveryRequiresDcID").log();
forceRecovery = false;
}
}
@ -904,7 +904,7 @@ ACTOR Future<Void> readTransactionSystemState(Reference<MasterData> self,
// make KeyValueStoreMemory guarantee immediate reads, we should be able to get rid of
// the discardCommit() below and not need a writable log adapter
TraceEvent("RTSSComplete", self->dbgid);
TraceEvent("RTSSComplete", self->dbgid).log();
return Void();
}
@ -1087,7 +1087,7 @@ ACTOR Future<Void> recoverFrom(Reference<MasterData> self,
when(Standalone<CommitTransactionRef> _req = wait(provisional)) {
state Standalone<CommitTransactionRef> req = _req; // mutable
TEST(true); // Emergency transaction processing during recovery
TraceEvent("EmergencyTransaction", self->dbgid);
TraceEvent("EmergencyTransaction", self->dbgid).log();
for (auto m = req.mutations.begin(); m != req.mutations.end(); ++m)
TraceEvent("EmergencyTransactionMutation", self->dbgid)
.detail("MType", m->type)
@ -1102,7 +1102,7 @@ ACTOR Future<Void> recoverFrom(Reference<MasterData> self,
initialConfChanges->clear();
if (self->originalConfiguration.isValid() &&
self->configuration.usableRegions != self->originalConfiguration.usableRegions) {
TraceEvent(SevWarnAlways, "CannotChangeUsableRegions", self->dbgid);
TraceEvent(SevWarnAlways, "CannotChangeUsableRegions", self->dbgid).log();
self->configuration = self->originalConfiguration;
} else {
initialConfChanges->push_back(req);
@ -1500,7 +1500,7 @@ ACTOR Future<Void> trackTlogRecovery(Reference<MasterData> self,
if (newState.oldTLogData.size() && configuration.repopulateRegionAntiQuorum > 0 &&
self->logSystem->remoteStorageRecovered()) {
TraceEvent(SevWarnAlways, "RecruitmentStalled_RemoteStorageRecovered", self->dbgid);
TraceEvent(SevWarnAlways, "RecruitmentStalled_RemoteStorageRecovered", self->dbgid).log();
self->recruitmentStalled->set(true);
}
self->registrationTrigger.trigger();
@ -1570,7 +1570,7 @@ ACTOR static Future<Optional<Version>> getMinBackupVersion(Reference<MasterData>
minVersion = minVersion.present() ? std::min(version, minVersion.get()) : version;
}
} else {
TraceEvent("EmptyBackupStartKey", self->dbgid);
TraceEvent("EmptyBackupStartKey", self->dbgid).log();
}
return minVersion;
@ -1663,7 +1663,7 @@ ACTOR static Future<Void> recruitBackupWorkers(Reference<MasterData> self, Datab
std::vector<InitializeBackupReply> newRecruits = wait(getAll(initializationReplies));
self->logSystem->setBackupWorkers(newRecruits);
TraceEvent("BackupRecruitmentDone", self->dbgid);
TraceEvent("BackupRecruitmentDone", self->dbgid).log();
self->registrationTrigger.trigger();
return Void();
}
@ -1723,7 +1723,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
if (g_network->isSimulated() && self->cstate.myDBState.oldTLogData.size() > CLIENT_KNOBS->MAX_GENERATIONS_SIM) {
g_simulator.connectionFailuresDisableDuration = 1e6;
g_simulator.speedUpSimulation = true;
TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyGenerations");
TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyGenerations").log();
}
}
@ -1812,7 +1812,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
tr.set(recoveryCommitRequest.arena, snapshotEndVersionKey, (bw << self->lastEpochEnd).toValue());
// Pause the backups that got restored in this snapshot to avoid data corruption
// Requires further operational work to abort the backup
TraceEvent("MasterRecoveryPauseBackupAgents");
TraceEvent("MasterRecoveryPauseBackupAgents").log();
Key backupPauseKey = FileBackupAgent::getPauseKey();
tr.set(recoveryCommitRequest.arena, backupPauseKey, StringRef());
// Clear the key so multiple recoveries will not overwrite the first version recorded
@ -1882,7 +1882,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
tr.read_snapshot = self->recoveryTransactionVersion; // lastEpochEnd would make more sense, but isn't in the initial
// window of the resolver(s)
TraceEvent("MasterRecoveryCommit", self->dbgid);
TraceEvent("MasterRecoveryCommit", self->dbgid).log();
state Future<ErrorOr<CommitID>> recoveryCommit = self->commitProxies[0].commit.tryGetReply(recoveryCommitRequest);
self->addActor.send(self->logSystem->onError());
self->addActor.send(waitResolverFailure(self->resolvers));
@ -1930,7 +1930,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
debug_advanceMinCommittedVersion(UID(), self->recoveryTransactionVersion);
if (debugResult) {
TraceEvent(self->forceRecovery ? SevWarn : SevError, "DBRecoveryDurabilityError");
TraceEvent(self->forceRecovery ? SevWarn : SevError, "DBRecoveryDurabilityError").log();
}
TraceEvent("MasterCommittedTLogs", self->dbgid)

View File

@ -1189,7 +1189,7 @@ Future<Version> waitForVersion(StorageServer* data, Version version, SpanID span
}
if (deterministicRandom()->random01() < 0.001) {
TraceEvent("WaitForVersion1000x");
TraceEvent("WaitForVersion1000x").log();
}
return waitForVersionActor(data, version, spanContext);
}
@ -3542,10 +3542,10 @@ private:
ASSERT(ssId == data->thisServerID);
if (m.type == MutationRef::SetValue) {
TEST(true); // Putting TSS in quarantine
TraceEvent(SevWarn, "TSSQuarantineStart", data->thisServerID);
TraceEvent(SevWarn, "TSSQuarantineStart", data->thisServerID).log();
data->startTssQuarantine();
} else {
TraceEvent(SevWarn, "TSSQuarantineStop", data->thisServerID);
TraceEvent(SevWarn, "TSSQuarantineStop", data->thisServerID).log();
// dipose of this TSS
throw worker_removed();
}
@ -3620,7 +3620,7 @@ ACTOR Future<Void> update(StorageServer* data, bool* pReceivedUpdate) {
!g_simulator.speedUpSimulation && data->tssFaultInjectTime.present() &&
data->tssFaultInjectTime.get() < now()) {
if (deterministicRandom()->random01() < 0.01) {
TraceEvent(SevWarnAlways, "TSSInjectDelayForever", data->thisServerID);
TraceEvent(SevWarnAlways, "TSSInjectDelayForever", data->thisServerID).log();
// small random chance to just completely get stuck here, each tss should eventually hit this in this
// mode
wait(tssDelayForever());
@ -3835,7 +3835,7 @@ ACTOR Future<Void> update(StorageServer* data, bool* pReceivedUpdate) {
} else if (ver != invalidVersion) { // This change belongs to a version < minVersion
DEBUG_MUTATION("SSPeek", ver, msg).detail("ServerID", data->thisServerID);
if (ver == 1) {
TraceEvent("SSPeekMutation", data->thisServerID);
TraceEvent("SSPeekMutation", data->thisServerID).log();
// The following trace event may produce a value with special characters
//TraceEvent("SSPeekMutation", data->thisServerID).detail("Mutation", msg.toString()).detail("Version", cloneCursor2->version().toString());
}
@ -4333,15 +4333,15 @@ ACTOR Future<bool> restoreDurableState(StorageServer* data, IKeyValueStore* stor
data->byteSampleRecovery =
restoreByteSample(data, storage, byteSampleSampleRecovered, startByteSampleRestore.getFuture());
TraceEvent("ReadingDurableState", data->thisServerID);
TraceEvent("ReadingDurableState", data->thisServerID).log();
wait(waitForAll(std::vector{ fFormat, fID, ftssPairID, fTssQuarantine, fVersion, fLogProtocol, fPrimaryLocality }));
wait(waitForAll(std::vector{ fShardAssigned, fShardAvailable }));
wait(byteSampleSampleRecovered.getFuture());
TraceEvent("RestoringDurableState", data->thisServerID);
TraceEvent("RestoringDurableState", data->thisServerID).log();
if (!fFormat.get().present()) {
// The DB was never initialized
TraceEvent("DBNeverInitialized", data->thisServerID);
TraceEvent("DBNeverInitialized", data->thisServerID).log();
storage->dispose();
data->thisServerID = UID();
data->sk = Key();
@ -5262,7 +5262,7 @@ ACTOR Future<Void> replaceInterface(StorageServer* self, StorageServerInterface
}
if (self->history.size() && BUGGIFY) {
TraceEvent("SSHistoryReboot", self->thisServerID);
TraceEvent("SSHistoryReboot", self->thisServerID).log();
throw please_reboot();
}
@ -5337,7 +5337,7 @@ ACTOR Future<Void> storageServer(IKeyValueStore* persistentData,
try {
state double start = now();
TraceEvent("StorageServerRebootStart", self.thisServerID);
TraceEvent("StorageServerRebootStart", self.thisServerID).log();
wait(self.storage.init());
choose {
@ -5346,7 +5346,7 @@ ACTOR Future<Void> storageServer(IKeyValueStore* persistentData,
when(wait(self.storage.commit())) {}
when(wait(memoryStoreRecover(persistentData, connFile, self.thisServerID))) {
TraceEvent("DisposeStorageServer", self.thisServerID);
TraceEvent("DisposeStorageServer", self.thisServerID).log();
throw worker_removed();
}
}

View File

@ -817,7 +817,7 @@ ACTOR Future<DistributedTestResults> runWorkload(Database cx, std::vector<Tester
}
state std::vector<Future<ErrorOr<CheckReply>>> checks;
TraceEvent("CheckingResults");
TraceEvent("CheckingResults").log();
printf("checking test (%s)...\n", printable(spec.title).c_str());
@ -1016,7 +1016,7 @@ ACTOR Future<bool> runTest(Database cx,
if (spec.useDB && spec.clearAfterTest) {
try {
TraceEvent("TesterClearingDatabase");
TraceEvent("TesterClearingDatabase").log();
wait(timeoutError(clearData(cx), 1000.0));
} catch (Error& e) {
TraceEvent(SevError, "ErrorClearingDatabaseAfterTest").error(e);
@ -1559,7 +1559,7 @@ ACTOR Future<Void> runTests(Reference<AsyncVar<Optional<struct ClusterController
}
when(wait(cc->onChange())) {}
when(wait(testerTimeout)) {
TraceEvent(SevError, "TesterRecruitmentTimeout");
TraceEvent(SevError, "TesterRecruitmentTimeout").log();
throw timed_out();
}
}

View File

@ -848,7 +848,7 @@ bool checkHighMemory(int64_t threshold, bool* error) {
uint64_t page_size = sysconf(_SC_PAGESIZE);
int fd = open("/proc/self/statm", O_RDONLY | O_CLOEXEC);
if (fd < 0) {
TraceEvent("OpenStatmFileFailure");
TraceEvent("OpenStatmFileFailure").log();
*error = true;
return false;
}
@ -857,7 +857,7 @@ bool checkHighMemory(int64_t threshold, bool* error) {
char stat_buf[buf_sz];
ssize_t stat_nread = read(fd, stat_buf, buf_sz);
if (stat_nread < 0) {
TraceEvent("ReadStatmFileFailure");
TraceEvent("ReadStatmFileFailure").log();
*error = true;
return false;
}
@ -869,7 +869,7 @@ bool checkHighMemory(int64_t threshold, bool* error) {
return true;
}
#else
TraceEvent("CheckHighMemoryUnsupported");
TraceEvent("CheckHighMemoryUnsupported").log();
*error = true;
#endif
return false;
@ -926,7 +926,7 @@ ACTOR Future<Void> storageServerRollbackRebooter(std::set<std::pair<UID, KeyValu
else if (e.getError().code() != error_code_please_reboot)
throw e.getError();
TraceEvent("StorageServerRequestedReboot", id);
TraceEvent("StorageServerRequestedReboot", id).log();
StorageServerInterface recruited;
recruited.uniqueID = id;
@ -964,7 +964,7 @@ ACTOR Future<Void> storageCacheRollbackRebooter(Future<Void> prevStorageCache,
loop {
ErrorOr<Void> e = wait(errorOr(prevStorageCache));
if (!e.isError()) {
TraceEvent("StorageCacheRequestedReboot1", id);
TraceEvent("StorageCacheRequestedReboot1", id).log();
return Void();
} else if (e.getError().code() != error_code_please_reboot &&
e.getError().code() != error_code_worker_removed) {
@ -972,7 +972,7 @@ ACTOR Future<Void> storageCacheRollbackRebooter(Future<Void> prevStorageCache,
throw e.getError();
}
TraceEvent("StorageCacheRequestedReboot", id);
TraceEvent("StorageCacheRequestedReboot", id).log();
StorageServerInterface recruited;
recruited.uniqueID = deterministicRandom()->randomUniqueID(); // id;
@ -1504,7 +1504,7 @@ ACTOR Future<Void> workerServer(Reference<ClusterConnectionFile> connFile,
}
throw please_reboot();
} else {
TraceEvent("ProcessReboot");
TraceEvent("ProcessReboot").log();
ASSERT(!rebootReq.deleteData);
flushAndExit(0);
}
@ -2017,7 +2017,7 @@ ACTOR Future<Void> printOnFirstConnected(Reference<AsyncVar<Optional<ClusterInte
ci->get().get().openDatabase.getEndpoint(), FailureStatus(false))
: Never())) {
printf("FDBD joined cluster.\n");
TraceEvent("FDBDConnected");
TraceEvent("FDBDConnected").log();
return Void();
}
when(wait(ci->onChange())) {}

View File

@ -480,7 +480,7 @@ public:
TraceEvent("AtomicOpCorrectnessApiWorkload").detail("OpType", "MIN");
// API Version 500
setApiVersion(&cx, 500);
TraceEvent(SevInfo, "Running Atomic Op Min Correctness Test Api Version 500");
TraceEvent(SevInfo, "Running Atomic Op Min Correctness Test Api Version 500").log();
wait(self->testAtomicOpUnsetOnNonExistingKey(cx, self, MutationRef::Min, key));
wait(self->testAtomicOpApi(
cx, self, MutationRef::Min, key, [](uint64_t val1, uint64_t val2) { return val1 < val2 ? val1 : val2; }));
@ -513,7 +513,7 @@ public:
ACTOR Future<Void> testMax(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_max_");
TraceEvent(SevInfo, "Running Atomic Op MAX Correctness Current Api Version");
TraceEvent(SevInfo, "Running Atomic Op MAX Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::Max, key));
wait(self->testAtomicOpApi(
cx, self, MutationRef::Max, key, [](uint64_t val1, uint64_t val2) { return val1 > val2 ? val1 : val2; }));
@ -530,7 +530,7 @@ public:
TraceEvent("AtomicOpCorrectnessApiWorkload").detail("OpType", "AND");
// API Version 500
setApiVersion(&cx, 500);
TraceEvent(SevInfo, "Running Atomic Op AND Correctness Test Api Version 500");
TraceEvent(SevInfo, "Running Atomic Op AND Correctness Test Api Version 500").log();
wait(self->testAtomicOpUnsetOnNonExistingKey(cx, self, MutationRef::And, key));
wait(self->testAtomicOpApi(
cx, self, MutationRef::And, key, [](uint64_t val1, uint64_t val2) { return val1 & val2; }));
@ -563,7 +563,7 @@ public:
ACTOR Future<Void> testOr(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_or_");
TraceEvent(SevInfo, "Running Atomic Op OR Correctness Current Api Version");
TraceEvent(SevInfo, "Running Atomic Op OR Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::Or, key));
wait(self->testAtomicOpApi(
cx, self, MutationRef::Or, key, [](uint64_t val1, uint64_t val2) { return val1 | val2; }));
@ -576,7 +576,7 @@ public:
ACTOR Future<Void> testXor(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_xor_");
TraceEvent(SevInfo, "Running Atomic Op XOR Correctness Current Api Version");
TraceEvent(SevInfo, "Running Atomic Op XOR Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::Xor, key));
wait(self->testAtomicOpApi(
cx, self, MutationRef::Xor, key, [](uint64_t val1, uint64_t val2) { return val1 ^ val2; }));
@ -588,7 +588,7 @@ public:
ACTOR Future<Void> testAdd(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_add_");
TraceEvent(SevInfo, "Running Atomic Op ADD Correctness Current Api Version");
TraceEvent(SevInfo, "Running Atomic Op ADD Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::AddValue, key));
wait(self->testAtomicOpApi(
cx, self, MutationRef::AddValue, key, [](uint64_t val1, uint64_t val2) { return val1 + val2; }));
@ -601,7 +601,7 @@ public:
ACTOR Future<Void> testCompareAndClear(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_compare_and_clear_");
TraceEvent(SevInfo, "Running Atomic Op COMPARE_AND_CLEAR Correctness Current Api Version");
TraceEvent(SevInfo, "Running Atomic Op COMPARE_AND_CLEAR Correctness Current Api Version").log();
wait(self->testCompareAndClearAtomicOpApi(cx, self, key, true));
wait(self->testCompareAndClearAtomicOpApi(cx, self, key, false));
return Void();
@ -610,7 +610,7 @@ public:
ACTOR Future<Void> testByteMin(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_byte_min_");
TraceEvent(SevInfo, "Running Atomic Op BYTE_MIN Correctness Current Api Version");
TraceEvent(SevInfo, "Running Atomic Op BYTE_MIN Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::ByteMin, key));
wait(self->testAtomicOpApi(cx, self, MutationRef::ByteMin, key, [](uint64_t val1, uint64_t val2) {
return StringRef((const uint8_t*)&val1, sizeof(val1)) < StringRef((const uint8_t*)&val2, sizeof(val2))
@ -626,7 +626,7 @@ public:
ACTOR Future<Void> testByteMax(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_byte_max_");
TraceEvent(SevInfo, "Running Atomic Op BYTE_MAX Correctness Current Api Version");
TraceEvent(SevInfo, "Running Atomic Op BYTE_MAX Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::ByteMax, key));
wait(self->testAtomicOpApi(cx, self, MutationRef::ByteMax, key, [](uint64_t val1, uint64_t val2) {
return StringRef((const uint8_t*)&val1, sizeof(val1)) > StringRef((const uint8_t*)&val2, sizeof(val2))

View File

@ -104,14 +104,14 @@ struct AtomicRestoreWorkload : TestWorkload {
throw;
}
TraceEvent("AtomicRestore_Wait");
TraceEvent("AtomicRestore_Wait").log();
wait(success(backupAgent.waitBackup(cx, BackupAgentBase::getDefaultTagName(), StopWhenDone::False)));
TraceEvent("AtomicRestore_BackupStart");
TraceEvent("AtomicRestore_BackupStart").log();
wait(delay(self->restoreAfter * deterministicRandom()->random01()));
TraceEvent("AtomicRestore_RestoreStart");
TraceEvent("AtomicRestore_RestoreStart").log();
if (self->fastRestore) { // New fast parallel restore
TraceEvent(SevInfo, "AtomicParallelRestore");
TraceEvent(SevInfo, "AtomicParallelRestore").log();
wait(backupAgent.atomicParallelRestore(
cx, BackupAgentBase::getDefaultTag(), self->backupRanges, self->addPrefix, self->removePrefix));
} else { // Old style restore
@ -141,7 +141,7 @@ struct AtomicRestoreWorkload : TestWorkload {
g_simulator.backupAgents = ISimulator::BackupAgentType::NoBackupAgents;
}
TraceEvent("AtomicRestore_Done");
TraceEvent("AtomicRestore_Done").log();
return Void();
}
};

View File

@ -53,7 +53,7 @@ struct AtomicSwitchoverWorkload : TestWorkload {
ACTOR static Future<Void> _setup(Database cx, AtomicSwitchoverWorkload* self) {
state DatabaseBackupAgent backupAgent(cx);
try {
TraceEvent("AS_Submit1");
TraceEvent("AS_Submit1").log();
wait(backupAgent.submitBackup(self->extraDB,
BackupAgentBase::getDefaultTag(),
self->backupRanges,
@ -61,7 +61,7 @@ struct AtomicSwitchoverWorkload : TestWorkload {
StringRef(),
StringRef(),
LockDB::True));
TraceEvent("AS_Submit2");
TraceEvent("AS_Submit2").log();
} catch (Error& e) {
if (e.code() != error_code_backup_duplicate)
throw;
@ -167,27 +167,27 @@ struct AtomicSwitchoverWorkload : TestWorkload {
state DatabaseBackupAgent backupAgent(cx);
state DatabaseBackupAgent restoreTool(self->extraDB);
TraceEvent("AS_Wait1");
TraceEvent("AS_Wait1").log();
wait(success(backupAgent.waitBackup(self->extraDB, BackupAgentBase::getDefaultTag(), StopWhenDone::False)));
TraceEvent("AS_Ready1");
TraceEvent("AS_Ready1").log();
wait(delay(deterministicRandom()->random01() * self->switch1delay));
TraceEvent("AS_Switch1");
TraceEvent("AS_Switch1").log();
wait(backupAgent.atomicSwitchover(
self->extraDB, BackupAgentBase::getDefaultTag(), self->backupRanges, StringRef(), StringRef()));
TraceEvent("AS_Wait2");
TraceEvent("AS_Wait2").log();
wait(success(restoreTool.waitBackup(cx, BackupAgentBase::getDefaultTag(), StopWhenDone::False)));
TraceEvent("AS_Ready2");
TraceEvent("AS_Ready2").log();
wait(delay(deterministicRandom()->random01() * self->switch2delay));
TraceEvent("AS_Switch2");
TraceEvent("AS_Switch2").log();
wait(restoreTool.atomicSwitchover(
cx, BackupAgentBase::getDefaultTag(), self->backupRanges, StringRef(), StringRef()));
TraceEvent("AS_Wait3");
TraceEvent("AS_Wait3").log();
wait(success(backupAgent.waitBackup(self->extraDB, BackupAgentBase::getDefaultTag(), StopWhenDone::False)));
TraceEvent("AS_Ready3");
TraceEvent("AS_Ready3").log();
wait(delay(deterministicRandom()->random01() * self->stopDelay));
TraceEvent("AS_Abort");
TraceEvent("AS_Abort").log();
wait(backupAgent.abortBackup(self->extraDB, BackupAgentBase::getDefaultTag()));
TraceEvent("AS_Done");
TraceEvent("AS_Done").log();
// SOMEDAY: Remove after backup agents can exist quiescently
if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) {

View File

@ -384,7 +384,7 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
Key(),
Key(),
self->locked)));
TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID);
TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID).log();
ASSERT(false);
} catch (Error& e) {
if (e.code() != error_code_restore_destination_not_empty) {

View File

@ -430,7 +430,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
Key(),
Key(),
self->locked)));
TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID);
TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID).log();
ASSERT(false);
} catch (Error& e) {
if (e.code() != error_code_restore_destination_not_empty) {

View File

@ -52,7 +52,7 @@ struct BackupToDBAbort : TestWorkload {
ACTOR static Future<Void> _setup(BackupToDBAbort* self, Database cx) {
state DatabaseBackupAgent backupAgent(cx);
try {
TraceEvent("BDBA_Submit1");
TraceEvent("BDBA_Submit1").log();
wait(backupAgent.submitBackup(self->extraDB,
BackupAgentBase::getDefaultTag(),
self->backupRanges,
@ -60,7 +60,7 @@ struct BackupToDBAbort : TestWorkload {
StringRef(),
StringRef(),
LockDB::True));
TraceEvent("BDBA_Submit2");
TraceEvent("BDBA_Submit2").log();
} catch (Error& e) {
if (e.code() != error_code_backup_duplicate)
throw;
@ -79,15 +79,15 @@ struct BackupToDBAbort : TestWorkload {
TraceEvent("BDBA_Start").detail("Delay", self->abortDelay);
wait(delay(self->abortDelay));
TraceEvent("BDBA_Wait");
TraceEvent("BDBA_Wait").log();
wait(success(backupAgent.waitBackup(self->extraDB, BackupAgentBase::getDefaultTag(), StopWhenDone::False)));
TraceEvent("BDBA_Lock");
TraceEvent("BDBA_Lock").log();
wait(lockDatabase(cx, self->lockid));
TraceEvent("BDBA_Abort");
TraceEvent("BDBA_Abort").log();
wait(backupAgent.abortBackup(self->extraDB, BackupAgentBase::getDefaultTag()));
TraceEvent("BDBA_Unlock");
TraceEvent("BDBA_Unlock").log();
wait(backupAgent.unlockBackup(self->extraDB, BackupAgentBase::getDefaultTag()));
TraceEvent("BDBA_End");
TraceEvent("BDBA_End").log();
// SOMEDAY: Remove after backup agents can exist quiescently
if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) {
@ -98,7 +98,7 @@ struct BackupToDBAbort : TestWorkload {
}
ACTOR static Future<bool> _check(BackupToDBAbort* self, Database cx) {
TraceEvent("BDBA_UnlockPrimary");
TraceEvent("BDBA_UnlockPrimary").log();
// Too much of the tester framework expects the primary database to be unlocked, so we unlock it
// once all of the workloads have finished.
wait(unlockDatabase(cx, self->lockid));

View File

@ -78,7 +78,7 @@ struct BackupToDBUpgradeWorkload : TestWorkload {
auto extraFile = makeReference<ClusterConnectionFile>(*g_simulator.extraDB);
extraDB = Database::createDatabase(extraFile, -1);
TraceEvent("DRU_Start");
TraceEvent("DRU_Start").log();
}
std::string description() const override { return "BackupToDBUpgrade"; }
@ -459,7 +459,7 @@ struct BackupToDBUpgradeWorkload : TestWorkload {
}
}
TraceEvent("DRU_DiffRanges");
TraceEvent("DRU_DiffRanges").log();
wait(diffRanges(prevBackupRanges, self->backupPrefix, cx, self->extraDB));
// abort backup

View File

@ -284,7 +284,7 @@ Future<Void> bulkSetup(Database cx,
wait(delay(1.0));
} else {
wait(delay(1.0));
TraceEvent("DynamicWarmingDone");
TraceEvent("DynamicWarmingDone").log();
break;
}
}

View File

@ -65,9 +65,9 @@ struct ChangeConfigWorkload : TestWorkload {
// It is not safe to allow automatic failover to a region which is not fully replicated,
// so wait for both regions to be fully replicated before enabling failover
wait(success(changeConfig(extraDB, g_simulator.startingDisabledConfiguration, true)));
TraceEvent("WaitForReplicasExtra");
TraceEvent("WaitForReplicasExtra").log();
wait(waitForFullReplication(extraDB));
TraceEvent("WaitForReplicasExtraEnd");
TraceEvent("WaitForReplicasExtraEnd").log();
}
wait(success(changeConfig(extraDB, self->configMode, true)));
}
@ -99,9 +99,9 @@ struct ChangeConfigWorkload : TestWorkload {
// It is not safe to allow automatic failover to a region which is not fully replicated,
// so wait for both regions to be fully replicated before enabling failover
wait(success(changeConfig(cx, g_simulator.startingDisabledConfiguration, true)));
TraceEvent("WaitForReplicas");
TraceEvent("WaitForReplicas").log();
wait(waitForFullReplication(cx));
TraceEvent("WaitForReplicasEnd");
TraceEvent("WaitForReplicasEnd").log();
}
wait(success(changeConfig(cx, self->configMode, true)));
}

View File

@ -45,6 +45,7 @@ struct CommitBugWorkload : TestWorkload {
try {
tr.set(key, val1);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
TraceEvent("CommitBugSetVal1Error").error(e);
@ -57,6 +58,7 @@ struct CommitBugWorkload : TestWorkload {
try {
tr.set(key, val2);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
TraceEvent("CommitBugSetVal2Error").error(e);
@ -85,6 +87,7 @@ struct CommitBugWorkload : TestWorkload {
try {
tr.clear(key);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
TraceEvent("CommitBugClearValError").error(e);

View File

@ -100,7 +100,7 @@ struct ConflictRangeWorkload : TestWorkload {
loop {
state Transaction tr0(cx);
try {
TraceEvent("ConflictRangeReset");
TraceEvent("ConflictRangeReset").log();
insertedSet.clear();
if (self->testReadYourWrites) {

View File

@ -142,7 +142,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
}
Future<Void> start(Database const& cx) override {
TraceEvent("ConsistencyCheck");
TraceEvent("ConsistencyCheck").log();
return _start(cx, this);
}
@ -186,10 +186,10 @@ struct ConsistencyCheckWorkload : TestWorkload {
ACTOR Future<Void> _start(Database cx, ConsistencyCheckWorkload* self) {
loop {
while (self->suspendConsistencyCheck.get()) {
TraceEvent("ConsistencyCheck_Suspended");
TraceEvent("ConsistencyCheck_Suspended").log();
wait(self->suspendConsistencyCheck.onChange());
}
TraceEvent("ConsistencyCheck_StartingOrResuming");
TraceEvent("ConsistencyCheck_StartingOrResuming").log();
choose {
when(wait(self->runCheck(cx, self))) {
if (!self->indefinite)
@ -222,7 +222,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
}
RangeResult res = wait(tr.getRange(configKeys, 1000));
if (res.size() == 1000) {
TraceEvent("ConsistencyCheck_TooManyConfigOptions");
TraceEvent("ConsistencyCheck_TooManyConfigOptions").log();
self->testFailure("Read too many configuration options");
}
for (int i = 0; i < res.size(); i++)
@ -251,7 +251,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
// the allowed maximum number of teams
bool teamCollectionValid = wait(getTeamCollectionValid(cx, self->dbInfo));
if (!teamCollectionValid) {
TraceEvent(SevError, "ConsistencyCheck_TooManyTeams");
TraceEvent(SevError, "ConsistencyCheck_TooManyTeams").log();
self->testFailure("The number of process or machine teams is larger than the allowed maximum "
"number of teams");
}
@ -1817,7 +1817,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
self->testFailure("No storage server on worker");
return false;
} else {
TraceEvent(SevWarn, "ConsistencyCheck_TSSMissing");
TraceEvent(SevWarn, "ConsistencyCheck_TSSMissing").log();
}
}
@ -1992,7 +1992,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
Optional<Value> currentKey = wait(tr.get(coordinatorsKey));
if (!currentKey.present()) {
TraceEvent("ConsistencyCheck_NoCoordinatorKey");
TraceEvent("ConsistencyCheck_NoCoordinatorKey").log();
return false;
}

View File

@ -93,7 +93,7 @@ struct CpuProfilerWorkload : TestWorkload {
if (!replies[i].get().present())
self->success = false;
TraceEvent("DoneSignalingProfiler");
TraceEvent("DoneSignalingProfiler").log();
}
return Void();
@ -104,14 +104,14 @@ struct CpuProfilerWorkload : TestWorkload {
ACTOR Future<Void> _start(Database cx, CpuProfilerWorkload* self) {
wait(delay(self->initialDelay));
if (self->clientId == 0)
TraceEvent("SignalProfilerOn");
TraceEvent("SignalProfilerOn").log();
wait(timeoutError(self->updateProfiler(true, cx, self), 60.0));
// If a duration was given, let the duration elapse and then shut the profiler off
if (self->duration > 0) {
wait(delay(self->duration));
if (self->clientId == 0)
TraceEvent("SignalProfilerOff");
TraceEvent("SignalProfilerOff").log();
wait(timeoutError(self->updateProfiler(false, cx, self), 60.0));
}
@ -124,7 +124,7 @@ struct CpuProfilerWorkload : TestWorkload {
// If no duration was given, then shut the profiler off now
if (self->duration <= 0) {
if (self->clientId == 0)
TraceEvent("SignalProfilerOff");
TraceEvent("SignalProfilerOff").log();
wait(timeoutError(self->updateProfiler(false, cx, self), 60.0));
}

View File

@ -104,7 +104,7 @@ struct CycleWorkload : TestWorkload {
state Transaction tr(cx);
if (deterministicRandom()->random01() >= self->traceParentProbability) {
state Span span("CycleClient"_loc);
TraceEvent("CycleTracingTransaction", span.context);
TraceEvent("CycleTracingTransaction", span.context).log();
tr.setOption(FDBTransactionOptions::SPAN_PARENT,
BinaryWriter::toValue(span.context, Unversioned()));
}
@ -154,7 +154,7 @@ struct CycleWorkload : TestWorkload {
}
void logTestData(const VectorRef<KeyValueRef>& data) {
TraceEvent("TestFailureDetail");
TraceEvent("TestFailureDetail").log();
int index = 0;
for (auto& entry : data) {
TraceEvent("CurrentDataEntry")

View File

@ -50,7 +50,7 @@ struct DDMetricsWorkload : TestWorkload {
try {
TraceEvent("DDMetricsWaiting").detail("StartDelay", self->startDelay);
wait(delay(self->startDelay));
TraceEvent("DDMetricsStarting");
TraceEvent("DDMetricsStarting").log();
state double startTime = now();
loop {
wait(delay(2.5));

View File

@ -64,7 +64,7 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
Future<bool> check(Database const& cx) override {
if (clientId == 0 && !switchComplete) {
TraceEvent(SevError, "DifferentClustersSwitchNotComplete");
TraceEvent(SevError, "DifferentClustersSwitchNotComplete").log();
return false;
}
return true;
@ -133,17 +133,17 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
return Void();
}));
wait(lockDatabase(self->originalDB, lockUid) && lockDatabase(self->extraDB, lockUid));
TraceEvent("DifferentClusters_LockedDatabases");
TraceEvent("DifferentClusters_LockedDatabases").log();
std::pair<Version, Optional<Value>> read1 = wait(doRead(self->originalDB, self));
state Version rv = read1.first;
state Optional<Value> val1 = read1.second;
wait(doWrite(self->extraDB, self->keyToRead, val1));
TraceEvent("DifferentClusters_CopiedDatabase");
TraceEvent("DifferentClusters_CopiedDatabase").log();
wait(advanceVersion(self->extraDB, rv));
TraceEvent("DifferentClusters_AdvancedVersion");
TraceEvent("DifferentClusters_AdvancedVersion").log();
wait(cx->switchConnectionFile(
makeReference<ClusterConnectionFile>(self->extraDB->getConnectionFile()->getConnectionString())));
TraceEvent("DifferentClusters_SwitchedConnectionFile");
TraceEvent("DifferentClusters_SwitchedConnectionFile").log();
state Transaction tr(cx);
tr.setVersion(rv);
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
@ -160,17 +160,17 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
// that a storage server serves a read at |rv| even after the recovery caused by unlocking the database, and we
// want to make that more likely for this test. So read at |rv| then unlock.
wait(unlockDatabase(self->extraDB, lockUid));
TraceEvent("DifferentClusters_UnlockedExtraDB");
TraceEvent("DifferentClusters_UnlockedExtraDB").log();
ASSERT(!watchFuture.isReady() || watchFuture.isError());
wait(doWrite(self->extraDB, self->keyToWatch, Optional<Value>{ LiteralStringRef("") }));
TraceEvent("DifferentClusters_WaitingForWatch");
TraceEvent("DifferentClusters_WaitingForWatch").log();
try {
wait(timeoutError(watchFuture, (self->testDuration - self->switchAfter) / 2));
} catch (Error& e) {
TraceEvent("DifferentClusters_WatchError").error(e);
wait(tr.onError(e));
}
TraceEvent("DifferentClusters_Done");
TraceEvent("DifferentClusters_Done").log();
self->switchComplete = true;
wait(unlockDatabase(self->originalDB, lockUid)); // So quietDatabase can finish
return Void();
@ -191,6 +191,7 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
serializer(w, x);
tr.set(self->keyToRead, w.toValue());
wait(tr.commit());
tr.reset();
} catch (Error& e) {
wait(tr.onError(e));
}

View File

@ -142,19 +142,19 @@ struct ExternalWorkload : TestWorkload, FDBWorkloadContext {
.detail("WorkloadName", wName);
library = loadLibrary(fullPath.c_str());
if (library == nullptr) {
TraceEvent(SevError, "ExternalWorkloadLoadError");
TraceEvent(SevError, "ExternalWorkloadLoadError").log();
success = false;
return;
}
workloadFactory = reinterpret_cast<decltype(workloadFactory)>(loadFunction(library, "workloadFactory"));
if (workloadFactory == nullptr) {
TraceEvent(SevError, "ExternalFactoryNotFound");
TraceEvent(SevError, "ExternalFactoryNotFound").log();
success = false;
return;
}
workloadImpl = (*workloadFactory)(FDBLoggerImpl::instance())->create(wName.toString());
if (!workloadImpl) {
TraceEvent(SevError, "WorkloadNotFound");
TraceEvent(SevError, "WorkloadNotFound").log();
success = false;
}
workloadImpl->init(this);

View File

@ -75,7 +75,7 @@ struct HealthMetricsApiWorkload : TestWorkload {
Future<bool> check(Database const& cx) override {
if (healthMetricsStoppedUpdating) {
TraceEvent(SevError, "HealthMetricsStoppedUpdating");
TraceEvent(SevError, "HealthMetricsStoppedUpdating").log();
return false;
}
bool correctHealthMetricsState = true;

View File

@ -92,11 +92,11 @@ struct IncrementalBackupWorkload : TestWorkload {
}
loop {
// Wait for backup container to be created and avoid race condition
TraceEvent("IBackupWaitContainer");
TraceEvent("IBackupWaitContainer").log();
wait(success(self->backupAgent.waitBackup(
cx, self->tag.toString(), StopWhenDone::False, &backupContainer, &backupUID)));
if (!backupContainer.isValid()) {
TraceEvent("IBackupCheckListContainersAttempt");
TraceEvent("IBackupCheckListContainersAttempt").log();
state std::vector<std::string> containers =
wait(IBackupContainer::listContainers(self->backupDir.toString()));
TraceEvent("IBackupCheckListContainersSuccess")
@ -132,7 +132,7 @@ struct IncrementalBackupWorkload : TestWorkload {
}
if (self->stopBackup) {
try {
TraceEvent("IBackupDiscontinueBackup");
TraceEvent("IBackupDiscontinueBackup").log();
wait(self->backupAgent.discontinueBackup(cx, self->tag));
} catch (Error& e) {
TraceEvent("IBackupDiscontinueBackupException").error(e);
@ -148,7 +148,7 @@ struct IncrementalBackupWorkload : TestWorkload {
if (self->submitOnly) {
Standalone<VectorRef<KeyRangeRef>> backupRanges;
backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
TraceEvent("IBackupSubmitAttempt");
TraceEvent("IBackupSubmitAttempt").log();
try {
wait(self->backupAgent.submitBackup(cx,
self->backupDir,
@ -165,7 +165,7 @@ struct IncrementalBackupWorkload : TestWorkload {
throw;
}
}
TraceEvent("IBackupSubmitSuccess");
TraceEvent("IBackupSubmitSuccess").log();
}
if (self->restoreOnly) {
if (self->clearBackupAgentKeys) {
@ -189,7 +189,7 @@ struct IncrementalBackupWorkload : TestWorkload {
wait(success(self->backupAgent.waitBackup(
cx, self->tag.toString(), StopWhenDone::False, &backupContainer, &backupUID)));
if (self->checkBeginVersion) {
TraceEvent("IBackupReadSystemKeys");
TraceEvent("IBackupReadSystemKeys").log();
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
loop {
try {
@ -201,7 +201,7 @@ struct IncrementalBackupWorkload : TestWorkload {
.detail("WriteRecoveryValue", writeFlag.present() ? writeFlag.get().toString() : "N/A")
.detail("EndVersionValue", versionValue.present() ? versionValue.get().toString() : "N/A");
if (!versionValue.present()) {
TraceEvent("IBackupCheckSpecialKeysFailure");
TraceEvent("IBackupCheckSpecialKeysFailure").log();
// Snapshot failed to write to special keys, possibly due to snapshot itself failing
throw key_not_found();
}
@ -217,7 +217,7 @@ struct IncrementalBackupWorkload : TestWorkload {
}
}
}
TraceEvent("IBackupStartListContainersAttempt");
TraceEvent("IBackupStartListContainersAttempt").log();
state std::vector<std::string> containers =
wait(IBackupContainer::listContainers(self->backupDir.toString()));
TraceEvent("IBackupStartListContainersSuccess")
@ -239,7 +239,7 @@ struct IncrementalBackupWorkload : TestWorkload {
OnlyApplyMutationLogs::True,
InconsistentSnapshotOnly::False,
beginVersion)));
TraceEvent("IBackupRestoreSuccess");
TraceEvent("IBackupRestoreSuccess").log();
}
return Void();
}

View File

@ -115,7 +115,7 @@ struct KVTest {
~KVTest() { close(); }
void close() {
if (store) {
TraceEvent("KVTestDestroy");
TraceEvent("KVTestDestroy").log();
if (dispose)
store->dispose();
else
@ -373,7 +373,7 @@ ACTOR Future<Void> testKVStore(KVStoreTestWorkload* workload) {
state Error err;
// wait( delay(1) );
TraceEvent("GO");
TraceEvent("GO").log();
UID id = deterministicRandom()->randomUniqueID();
std::string fn = workload->filename.size() ? workload->filename : id.toString();

View File

@ -56,11 +56,11 @@ struct KillRegionWorkload : TestWorkload {
void getMetrics(vector<PerfMetric>& m) override {}
ACTOR static Future<Void> _setup(KillRegionWorkload* self, Database cx) {
TraceEvent("ForceRecovery_DisablePrimaryBegin");
TraceEvent("ForceRecovery_DisablePrimaryBegin").log();
wait(success(changeConfig(cx, g_simulator.disablePrimary, true)));
TraceEvent("ForceRecovery_WaitForRemote");
TraceEvent("ForceRecovery_WaitForRemote").log();
wait(waitForPrimaryDC(cx, LiteralStringRef("1")));
TraceEvent("ForceRecovery_DisablePrimaryComplete");
TraceEvent("ForceRecovery_DisablePrimaryComplete").log();
return Void();
}
@ -74,14 +74,14 @@ struct KillRegionWorkload : TestWorkload {
ACTOR static Future<Void> killRegion(KillRegionWorkload* self, Database cx) {
ASSERT(g_network->isSimulated());
if (deterministicRandom()->random01() < 0.5) {
TraceEvent("ForceRecovery_DisableRemoteBegin");
TraceEvent("ForceRecovery_DisableRemoteBegin").log();
wait(success(changeConfig(cx, g_simulator.disableRemote, true)));
TraceEvent("ForceRecovery_WaitForPrimary");
TraceEvent("ForceRecovery_WaitForPrimary").log();
wait(waitForPrimaryDC(cx, LiteralStringRef("0")));
TraceEvent("ForceRecovery_DisableRemoteComplete");
TraceEvent("ForceRecovery_DisableRemoteComplete").log();
wait(success(changeConfig(cx, g_simulator.originalRegions, true)));
}
TraceEvent("ForceRecovery_Wait");
TraceEvent("ForceRecovery_Wait").log();
wait(delay(deterministicRandom()->random01() * self->testDuration));
g_simulator.killDataCenter(LiteralStringRef("0"),
@ -97,11 +97,11 @@ struct KillRegionWorkload : TestWorkload {
: ISimulator::RebootAndDelete,
true);
TraceEvent("ForceRecovery_Begin");
TraceEvent("ForceRecovery_Begin").log();
wait(forceRecovery(cx->getConnectionFile(), LiteralStringRef("1")));
TraceEvent("ForceRecovery_UsableRegions");
TraceEvent("ForceRecovery_UsableRegions").log();
DatabaseConfiguration conf = wait(getDatabaseConfiguration(cx));
@ -119,7 +119,7 @@ struct KillRegionWorkload : TestWorkload {
wait(success(changeConfig(cx, "usable_regions=1", true)));
}
TraceEvent("ForceRecovery_Complete");
TraceEvent("ForceRecovery_Complete").log();
return Void();
}

View File

@ -54,7 +54,7 @@ struct LogMetricsWorkload : TestWorkload {
state BinaryWriter br(Unversioned());
vector<WorkerDetails> workers = wait(getWorkers(self->dbInfo));
// vector<Future<Void>> replies;
TraceEvent("RateChangeTrigger");
TraceEvent("RateChangeTrigger").log();
SetMetricsLogRateRequest req(rate);
for (int i = 0; i < workers.size(); i++) {
workers[i].interf.setMetricsRate.send(req);

View File

@ -77,7 +77,7 @@ struct LowLatencyWorkload : TestWorkload {
++self->operations;
loop {
try {
TraceEvent("StartLowLatencyTransaction");
TraceEvent("StartLowLatencyTransaction").log();
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
if (doCommit) {

View File

@ -39,18 +39,18 @@ static std::set<int> const& normalAttritionErrors() {
ACTOR Future<bool> ignoreSSFailuresForDuration(Database cx, double duration) {
// duration doesn't matter since this won't timeout
TraceEvent("IgnoreSSFailureStart");
TraceEvent("IgnoreSSFailureStart").log();
wait(success(setHealthyZone(cx, ignoreSSFailuresZoneString, 0)));
TraceEvent("IgnoreSSFailureWait");
TraceEvent("IgnoreSSFailureWait").log();
wait(delay(duration));
TraceEvent("IgnoreSSFailureClear");
TraceEvent("IgnoreSSFailureClear").log();
state Transaction tr(cx);
loop {
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.clear(healthyZoneKey);
wait(tr.commit());
TraceEvent("IgnoreSSFailureComplete");
TraceEvent("IgnoreSSFailureComplete").log();
return true;
} catch (Error& e) {
wait(tr.onError(e));
@ -311,7 +311,7 @@ struct MachineAttritionWorkload : TestWorkload {
TEST(true); // Killing a machine
wait(delay(delayBeforeKill));
TraceEvent("WorkerKillAfterDelay");
TraceEvent("WorkerKillAfterDelay").log();
if (self->waitForVersion) {
state Transaction tr(cx);

View File

@ -30,7 +30,7 @@
struct RunRestoreWorkerWorkload : TestWorkload {
Future<Void> worker;
RunRestoreWorkerWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
TraceEvent("RunRestoreWorkerWorkloadMX");
TraceEvent("RunRestoreWorkerWorkloadMX").log();
}
std::string description() const override { return "RunRestoreWorkerWorkload"; }

View File

@ -259,7 +259,7 @@ struct PingWorkload : TestWorkload {
// peers[i].payloadPing.getEndpoint().getPrimaryAddress(), pingId ) ); peers[i].payloadPing.send( req );
// replies.push_back( self->payloadDelayer( req, peers[i].payloadPing ) );
}
TraceEvent("PayloadPingSent", pingId);
TraceEvent("PayloadPingSent", pingId).log();
wait(waitForAll(replies));
double elapsed = now() - start;
TraceEvent("PayloadPingDone", pingId).detail("Elapsed", elapsed);

View File

@ -184,7 +184,7 @@ struct PopulateTPCC : TestWorkload {
}
}
}
TraceEvent("PopulateItemsDone");
TraceEvent("PopulateItemsDone").log();
return Void();
}

View File

@ -62,13 +62,13 @@ struct MoveKeysWorkload : TestWorkload {
}
state int oldMode = wait(setDDMode(cx, 0));
TraceEvent("RMKStartModeSetting");
TraceEvent("RMKStartModeSetting").log();
wait(timeout(
reportErrors(self->worker(cx, self), "MoveKeysWorkloadWorkerError"), self->testDuration, Void()));
// Always set the DD mode back, even if we die with an error
TraceEvent("RMKDoneMoving");
TraceEvent("RMKDoneMoving").log();
wait(success(setDDMode(cx, oldMode)));
TraceEvent("RMKDoneModeSetting");
TraceEvent("RMKDoneModeSetting").log();
}
return Void();
}
@ -87,7 +87,7 @@ struct MoveKeysWorkload : TestWorkload {
vector<StorageServerInterface> getRandomTeam(vector<StorageServerInterface> storageServers, int teamSize) {
if (storageServers.size() < teamSize) {
TraceEvent(SevWarnAlways, "LessThanThreeStorageServers");
TraceEvent(SevWarnAlways, "LessThanThreeStorageServers").log();
throw operation_failed();
}
@ -105,7 +105,7 @@ struct MoveKeysWorkload : TestWorkload {
}
if (t.size() < teamSize) {
TraceEvent(SevWarnAlways, "LessThanThreeUniqueMachines");
TraceEvent(SevWarnAlways, "LessThanThreeUniqueMachines").log();
throw operation_failed();
}

View File

@ -125,6 +125,7 @@ struct RandomSelectorWorkload : TestWorkload {
//TraceEvent("RYOWInit").detail("Key",myKeyA).detail("Value",myValue);
}
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
wait(tr.onError(e));
@ -149,6 +150,7 @@ struct RandomSelectorWorkload : TestWorkload {
try {
tr.set(StringRef(clientID + "d/" + myKeyA), myValue);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
wait(tr.onError(e));
@ -163,6 +165,7 @@ struct RandomSelectorWorkload : TestWorkload {
try {
tr.clear(StringRef(clientID + "d/" + myKeyA));
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
wait(tr.onError(e));
@ -184,6 +187,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.clear(KeyRangeRef(StringRef(clientID + "d/" + myKeyA),
StringRef(clientID + "d/" + myKeyB)));
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
wait(tr.onError(e));
@ -231,6 +235,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::AddValue);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
error = e;
@ -254,6 +259,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::AppendIfFits);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
error = e;
@ -277,6 +283,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::And);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
error = e;
@ -300,6 +307,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Or);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
error = e;
@ -323,6 +331,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Xor);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
error = e;
@ -346,6 +355,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Max);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
error = e;
@ -369,6 +379,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Min);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
error = e;
@ -392,6 +403,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::ByteMin);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
error = e;
@ -415,6 +427,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::ByteMax);
wait(tr.commit());
tr.reset();
break;
} catch (Error& e) {
error = e;

View File

@ -73,7 +73,7 @@ struct RestoreBackupWorkload final : TestWorkload {
.detail("TargetVersion", waitForVersion);
if (desc.contiguousLogEnd.present() && desc.contiguousLogEnd.get() >= waitForVersion) {
try {
TraceEvent("DiscontinuingBackup");
TraceEvent("DiscontinuingBackup").log();
wait(self->backupAgent.discontinueBackup(cx, self->tag));
} catch (Error& e) {
TraceEvent("ErrorDiscontinuingBackup").error(e);

View File

@ -114,7 +114,7 @@ struct SimpleAtomicAddWorkload : TestWorkload {
}
loop {
try {
TraceEvent("SAACheckKey");
TraceEvent("SAACheckKey").log();
Optional<Value> actualValue = wait(tr.get(self->sumKey));
uint64_t actualValueInt = 0;
if (actualValue.present()) {

View File

@ -90,7 +90,7 @@ public: // variables
public: // ctor & dtor
SnapTestWorkload(WorkloadContext const& wcx)
: TestWorkload(wcx), numSnaps(0), maxSnapDelay(0.0), testID(0), snapUID() {
TraceEvent("SnapTestWorkloadConstructor");
TraceEvent("SnapTestWorkloadConstructor").log();
std::string workloadName = "SnapTest";
maxRetryCntToRetrieveMessage = 10;
@ -107,11 +107,11 @@ public: // ctor & dtor
public: // workload functions
std::string description() const override { return "SnapTest"; }
Future<Void> setup(Database const& cx) override {
TraceEvent("SnapTestWorkloadSetup");
TraceEvent("SnapTestWorkloadSetup").log();
return Void();
}
Future<Void> start(Database const& cx) override {
TraceEvent("SnapTestWorkloadStart");
TraceEvent("SnapTestWorkloadStart").log();
if (clientId == 0) {
return _start(cx, this);
}
@ -120,7 +120,7 @@ public: // workload functions
ACTOR Future<bool> _check(Database cx, SnapTestWorkload* self) {
if (self->skipCheck) {
TraceEvent(SevWarnAlways, "SnapCheckIgnored");
TraceEvent(SevWarnAlways, "SnapCheckIgnored").log();
return true;
}
state Transaction tr(cx);
@ -250,7 +250,7 @@ public: // workload functions
bool backupFailed = atoi(ini.GetValue("RESTORE", "BackupFailed"));
if (backupFailed) {
// since backup failed, skip the restore checking
TraceEvent(SevWarnAlways, "BackupFailedSkippingRestoreCheck");
TraceEvent(SevWarnAlways, "BackupFailedSkippingRestoreCheck").log();
return Void();
}
state KeySelector begin = firstGreaterOrEqual(normalKeys.begin);
@ -265,7 +265,7 @@ public: // workload functions
try {
RangeResult kvRange = wait(tr.getRange(begin, end, 1000));
if (!kvRange.more && kvRange.size() == 0) {
TraceEvent("SnapTestNoMoreEntries");
TraceEvent("SnapTestNoMoreEntries").log();
break;
}

View File

@ -721,7 +721,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
ASSERT(false);
} else {
// If no worker process returned, skip the test
TraceEvent(SevDebug, "EmptyWorkerListInSetClassTest");
TraceEvent(SevDebug, "EmptyWorkerListInSetClassTest").log();
}
} catch (Error& e) {
if (e.code() == error_code_actor_cancelled)
@ -783,6 +783,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
Value(worker.processClass.toString())); // Set it as the same class type as before, thus only
// class source will be changed
wait(tx->commit());
tx->reset();
Optional<Value> class_source = wait(tx->get(
Key("process/class_source/" + address)
.withPrefix(
@ -796,7 +797,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
tx->reset();
} else {
// If no worker process returned, skip the test
TraceEvent(SevDebug, "EmptyWorkerListInSetClassTest");
TraceEvent(SevDebug, "EmptyWorkerListInSetClassTest").log();
}
} catch (Error& e) {
wait(tx->onError(e));
@ -832,7 +833,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
}
}
}
TraceEvent(SevDebug, "DatabaseLocked");
TraceEvent(SevDebug, "DatabaseLocked").log();
// if database locked, fdb read should get database_locked error
try {
tx->reset();
@ -851,7 +852,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
// unlock the database
tx->clear(SpecialKeySpace::getManagementApiCommandPrefix("lock"));
wait(tx->commit());
TraceEvent(SevDebug, "DatabaseUnlocked");
TraceEvent(SevDebug, "DatabaseUnlocked").log();
tx->reset();
// read should be successful
RangeResult res = wait(tx->getRange(normalKeys, 1));

View File

@ -101,7 +101,7 @@ struct StatusWorkload : TestWorkload {
TraceEvent(SevError, "SchemaCoverageRequirementsException").detail("What", e.what());
throw unknown_error();
} catch (...) {
TraceEvent(SevError, "SchemaCoverageRequirementsException");
TraceEvent(SevError, "SchemaCoverageRequirementsException").log();
throw unknown_error();
}
}
@ -153,6 +153,7 @@ struct StatusWorkload : TestWorkload {
tr.set(latencyBandConfigKey, ValueRef(config));
wait(tr.commit());
tr.reset();
if (deterministicRandom()->random01() < 0.3) {
return Void();

View File

@ -112,7 +112,7 @@ struct ThrottlingWorkload : KVWorkload {
}
wait(tr.commit());
if (deterministicRandom()->randomInt(0, 1000) == 0)
TraceEvent("TransactionCommittedx1000");
TraceEvent("TransactionCommittedx1000").log();
++self->transactionsCommitted;
} catch (Error& e) {
if (e.code() == error_code_actor_cancelled)

View File

@ -39,7 +39,7 @@ struct TimeKeeperCorrectnessWorkload : TestWorkload {
void getMetrics(vector<PerfMetric>& m) override {}
ACTOR static Future<Void> _start(Database cx, TimeKeeperCorrectnessWorkload* self) {
TraceEvent(SevInfo, "TKCorrectness_Start");
TraceEvent(SevInfo, "TKCorrectness_Start").log();
state double start = now();
while (now() - start > self->testDuration) {
@ -60,7 +60,7 @@ struct TimeKeeperCorrectnessWorkload : TestWorkload {
wait(delay(std::min(SERVER_KNOBS->TIME_KEEPER_DELAY / 10, (int64_t)1L)));
}
TraceEvent(SevInfo, "TKCorrectness_Completed");
TraceEvent(SevInfo, "TKCorrectness_Completed").log();
return Void();
}
@ -111,7 +111,7 @@ struct TimeKeeperCorrectnessWorkload : TestWorkload {
}
}
TraceEvent(SevInfo, "TKCorrectness_Passed");
TraceEvent(SevInfo, "TKCorrectness_Passed").log();
return true;
} catch (Error& e) {
wait(tr->onError(e));

View File

@ -111,7 +111,7 @@ struct TriggerRecoveryLoopWorkload : TestWorkload {
else
tr.set(LiteralStringRef("\xff\xff/reboot_worker"), it.second);
}
TraceEvent(SevInfo, "TriggerRecoveryLoop_AttempedKillAll");
TraceEvent(SevInfo, "TriggerRecoveryLoop_AttempedKillAll").log();
return Void();
} catch (Error& e) {
wait(tr.onError(e));

View File

@ -297,7 +297,7 @@ struct VersionStampWorkload : TestWorkload {
wait(tr.onError(e));
}
}
TraceEvent("VST_CheckEnd");
TraceEvent("VST_CheckEnd").log();
return true;
}

View File

@ -518,7 +518,7 @@ ACTOR Future<Void> commitAndUpdateMemory(ReadYourWritesTransaction* tr,
}
if (failed) {
TraceEvent(SevError, "WriteConflictRangeError");
TraceEvent(SevError, "WriteConflictRangeError").log();
for (transactionIter = transactionRanges.begin(); transactionIter != transactionRanges.end();
++transactionIter) {
TraceEvent("WCRTransaction")

View File

@ -135,7 +135,7 @@ struct WriteTagThrottlingWorkload : KVWorkload {
return true;
if (writeThrottle) {
if (!badActorThrottleRetries && !goodActorThrottleRetries) {
TraceEvent(SevWarn, "NoThrottleTriggered");
TraceEvent(SevWarn, "NoThrottleTriggered").log();
}
if (badActorThrottleRetries < goodActorThrottleRetries) {
TraceEvent(SevWarnAlways, "IncorrectThrottle")

Some files were not shown because too many files have changed in this diff Show More