Merge branch 'master' into master

This commit is contained in:
neethuhaneesha 2021-07-28 15:25:50 -07:00 committed by GitHub
commit b5a302058a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
125 changed files with 1466 additions and 739 deletions

View File

@ -42,7 +42,7 @@ FDBLibTLSPolicy::FDBLibTLSPolicy(Reference<FDBLibTLSPlugin> plugin)
key_data_set(false), verify_peers_set(false) { key_data_set(false), verify_peers_set(false) {
if ((tls_cfg = tls_config_new()) == nullptr) { if ((tls_cfg = tls_config_new()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSConfigError"); TraceEvent(SevError, "FDBLibTLSConfigError").log();
throw std::runtime_error("FDBLibTLSConfigError"); throw std::runtime_error("FDBLibTLSConfigError");
} }
@ -67,14 +67,14 @@ ITLSSession* FDBLibTLSPolicy::create_session(bool is_client,
// servername, since this will be ignored - the servername should be // servername, since this will be ignored - the servername should be
// matched by the verify criteria instead. // matched by the verify criteria instead.
if (verify_peers_set && servername != nullptr) { if (verify_peers_set && servername != nullptr) {
TraceEvent(SevError, "FDBLibTLSVerifyPeersWithServerName"); TraceEvent(SevError, "FDBLibTLSVerifyPeersWithServerName").log();
return nullptr; return nullptr;
} }
// If verify peers has not been set, then require a server name to // If verify peers has not been set, then require a server name to
// avoid an accidental lack of name validation. // avoid an accidental lack of name validation.
if (!verify_peers_set && servername == nullptr) { if (!verify_peers_set && servername == nullptr) {
TraceEvent(SevError, "FDBLibTLSNoServerName"); TraceEvent(SevError, "FDBLibTLSNoServerName").log();
return nullptr; return nullptr;
} }
} }
@ -123,18 +123,18 @@ struct stack_st_X509* FDBLibTLSPolicy::parse_cert_pem(const uint8_t* cert_pem, s
if (cert_pem_len > INT_MAX) if (cert_pem_len > INT_MAX)
goto err; goto err;
if ((bio = BIO_new_mem_buf((void*)cert_pem, cert_pem_len)) == nullptr) { if ((bio = BIO_new_mem_buf((void*)cert_pem, cert_pem_len)) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory"); TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err; goto err;
} }
if ((certs = sk_X509_new_null()) == nullptr) { if ((certs = sk_X509_new_null()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory"); TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err; goto err;
} }
ERR_clear_error(); ERR_clear_error();
while ((cert = PEM_read_bio_X509(bio, nullptr, password_cb, nullptr)) != nullptr) { while ((cert = PEM_read_bio_X509(bio, nullptr, password_cb, nullptr)) != nullptr) {
if (!sk_X509_push(certs, cert)) { if (!sk_X509_push(certs, cert)) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory"); TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err; goto err;
} }
} }
@ -150,7 +150,7 @@ struct stack_st_X509* FDBLibTLSPolicy::parse_cert_pem(const uint8_t* cert_pem, s
} }
if (sk_X509_num(certs) < 1) { if (sk_X509_num(certs) < 1) {
TraceEvent(SevError, "FDBLibTLSNoCerts"); TraceEvent(SevError, "FDBLibTLSNoCerts").log();
goto err; goto err;
} }
@ -168,11 +168,11 @@ err:
bool FDBLibTLSPolicy::set_ca_data(const uint8_t* ca_data, int ca_len) { bool FDBLibTLSPolicy::set_ca_data(const uint8_t* ca_data, int ca_len) {
if (ca_data_set) { if (ca_data_set) {
TraceEvent(SevError, "FDBLibTLSCAAlreadySet"); TraceEvent(SevError, "FDBLibTLSCAAlreadySet").log();
return false; return false;
} }
if (session_created) { if (session_created) {
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive"); TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
return false; return false;
} }
@ -194,11 +194,11 @@ bool FDBLibTLSPolicy::set_ca_data(const uint8_t* ca_data, int ca_len) {
bool FDBLibTLSPolicy::set_cert_data(const uint8_t* cert_data, int cert_len) { bool FDBLibTLSPolicy::set_cert_data(const uint8_t* cert_data, int cert_len) {
if (cert_data_set) { if (cert_data_set) {
TraceEvent(SevError, "FDBLibTLSCertAlreadySet"); TraceEvent(SevError, "FDBLibTLSCertAlreadySet").log();
return false; return false;
} }
if (session_created) { if (session_created) {
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive"); TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
return false; return false;
} }
@ -218,11 +218,11 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
bool rc = false; bool rc = false;
if (key_data_set) { if (key_data_set) {
TraceEvent(SevError, "FDBLibTLSKeyAlreadySet"); TraceEvent(SevError, "FDBLibTLSKeyAlreadySet").log();
goto err; goto err;
} }
if (session_created) { if (session_created) {
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive"); TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
goto err; goto err;
} }
@ -231,7 +231,7 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
long len; long len;
if ((bio = BIO_new_mem_buf((void*)key_data, key_len)) == nullptr) { if ((bio = BIO_new_mem_buf((void*)key_data, key_len)) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory"); TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err; goto err;
} }
ERR_clear_error(); ERR_clear_error();
@ -241,7 +241,7 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
if ((ERR_GET_LIB(errnum) == ERR_LIB_PEM && ERR_GET_REASON(errnum) == PEM_R_BAD_DECRYPT) || if ((ERR_GET_LIB(errnum) == ERR_LIB_PEM && ERR_GET_REASON(errnum) == PEM_R_BAD_DECRYPT) ||
(ERR_GET_LIB(errnum) == ERR_LIB_EVP && ERR_GET_REASON(errnum) == EVP_R_BAD_DECRYPT)) { (ERR_GET_LIB(errnum) == ERR_LIB_EVP && ERR_GET_REASON(errnum) == EVP_R_BAD_DECRYPT)) {
TraceEvent(SevError, "FDBLibTLSIncorrectPassword"); TraceEvent(SevError, "FDBLibTLSIncorrectPassword").log();
} else { } else {
ERR_error_string_n(errnum, errbuf, sizeof(errbuf)); ERR_error_string_n(errnum, errbuf, sizeof(errbuf));
TraceEvent(SevError, "FDBLibTLSPrivateKeyError").detail("LibcryptoErrorMessage", errbuf); TraceEvent(SevError, "FDBLibTLSPrivateKeyError").detail("LibcryptoErrorMessage", errbuf);
@ -250,15 +250,15 @@ bool FDBLibTLSPolicy::set_key_data(const uint8_t* key_data, int key_len, const c
} }
BIO_free(bio); BIO_free(bio);
if ((bio = BIO_new(BIO_s_mem())) == nullptr) { if ((bio = BIO_new(BIO_s_mem())) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory"); TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err; goto err;
} }
if (!PEM_write_bio_PrivateKey(bio, key, nullptr, nullptr, 0, nullptr, nullptr)) { if (!PEM_write_bio_PrivateKey(bio, key, nullptr, nullptr, 0, nullptr, nullptr)) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory"); TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err; goto err;
} }
if ((len = BIO_get_mem_data(bio, &data)) <= 0) { if ((len = BIO_get_mem_data(bio, &data)) <= 0) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory"); TraceEvent(SevError, "FDBLibTLSOutOfMemory").log();
goto err; goto err;
} }
if (tls_config_set_key_mem(tls_cfg, (const uint8_t*)data, len) == -1) { if (tls_config_set_key_mem(tls_cfg, (const uint8_t*)data, len) == -1) {
@ -283,16 +283,16 @@ err:
bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[], int verify_peers_len[]) { bool FDBLibTLSPolicy::set_verify_peers(int count, const uint8_t* verify_peers[], int verify_peers_len[]) {
if (verify_peers_set) { if (verify_peers_set) {
TraceEvent(SevError, "FDBLibTLSVerifyPeersAlreadySet"); TraceEvent(SevError, "FDBLibTLSVerifyPeersAlreadySet").log();
return false; return false;
} }
if (session_created) { if (session_created) {
TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive"); TraceEvent(SevError, "FDBLibTLSPolicyAlreadyActive").log();
return false; return false;
} }
if (count < 1) { if (count < 1) {
TraceEvent(SevError, "FDBLibTLSNoVerifyPeers"); TraceEvent(SevError, "FDBLibTLSNoVerifyPeers").log();
return false; return false;
} }

View File

@ -73,7 +73,7 @@ FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy,
if (is_client) { if (is_client) {
if ((tls_ctx = tls_client()) == nullptr) { if ((tls_ctx = tls_client()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSClientError", uid); TraceEvent(SevError, "FDBLibTLSClientError", uid).log();
throw std::runtime_error("FDBLibTLSClientError"); throw std::runtime_error("FDBLibTLSClientError");
} }
if (tls_configure(tls_ctx, policy->tls_cfg) == -1) { if (tls_configure(tls_ctx, policy->tls_cfg) == -1) {
@ -88,7 +88,7 @@ FDBLibTLSSession::FDBLibTLSSession(Reference<FDBLibTLSPolicy> policy,
} }
} else { } else {
if ((tls_sctx = tls_server()) == nullptr) { if ((tls_sctx = tls_server()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSServerError", uid); TraceEvent(SevError, "FDBLibTLSServerError", uid).log();
throw std::runtime_error("FDBLibTLSServerError"); throw std::runtime_error("FDBLibTLSServerError");
} }
if (tls_configure(tls_sctx, policy->tls_cfg) == -1) { if (tls_configure(tls_sctx, policy->tls_cfg) == -1) {
@ -250,7 +250,7 @@ std::tuple<bool, std::string> FDBLibTLSSession::check_verify(Reference<FDBLibTLS
// Verify the certificate. // Verify the certificate.
if ((store_ctx = X509_STORE_CTX_new()) == nullptr) { if ((store_ctx = X509_STORE_CTX_new()) == nullptr) {
TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid); TraceEvent(SevError, "FDBLibTLSOutOfMemory", uid).log();
reason = "Out of memory"; reason = "Out of memory";
goto err; goto err;
} }
@ -333,7 +333,7 @@ bool FDBLibTLSSession::verify_peer() {
return true; return true;
if ((cert_pem = tls_peer_cert_chain_pem(tls_ctx, &cert_pem_len)) == nullptr) { if ((cert_pem = tls_peer_cert_chain_pem(tls_ctx, &cert_pem_len)) == nullptr) {
TraceEvent(SevError, "FDBLibTLSNoCertError", uid); TraceEvent(SevError, "FDBLibTLSNoCertError", uid).log();
goto err; goto err;
} }
if ((certs = policy->parse_cert_pem(cert_pem, cert_pem_len)) == nullptr) if ((certs = policy->parse_cert_pem(cert_pem, cert_pem_len)) == nullptr)
@ -388,14 +388,14 @@ int FDBLibTLSSession::handshake() {
int FDBLibTLSSession::read(uint8_t* data, int length) { int FDBLibTLSSession::read(uint8_t* data, int length) {
if (!handshake_completed) { if (!handshake_completed) {
TraceEvent(SevError, "FDBLibTLSReadHandshakeError"); TraceEvent(SevError, "FDBLibTLSReadHandshakeError").log();
return FAILED; return FAILED;
} }
ssize_t n = tls_read(tls_ctx, data, length); ssize_t n = tls_read(tls_ctx, data, length);
if (n > 0) { if (n > 0) {
if (n > INT_MAX) { if (n > INT_MAX) {
TraceEvent(SevError, "FDBLibTLSReadOverflow"); TraceEvent(SevError, "FDBLibTLSReadOverflow").log();
return FAILED; return FAILED;
} }
return (int)n; return (int)n;
@ -415,14 +415,14 @@ int FDBLibTLSSession::read(uint8_t* data, int length) {
int FDBLibTLSSession::write(const uint8_t* data, int length) { int FDBLibTLSSession::write(const uint8_t* data, int length) {
if (!handshake_completed) { if (!handshake_completed) {
TraceEvent(SevError, "FDBLibTLSWriteHandshakeError", uid); TraceEvent(SevError, "FDBLibTLSWriteHandshakeError", uid).log();
return FAILED; return FAILED;
} }
ssize_t n = tls_write(tls_ctx, data, length); ssize_t n = tls_write(tls_ctx, data, length);
if (n > 0) { if (n > 0) {
if (n > INT_MAX) { if (n > INT_MAX) {
TraceEvent(SevError, "FDBLibTLSWriteOverflow", uid); TraceEvent(SevError, "FDBLibTLSWriteOverflow", uid).log();
return FAILED; return FAILED;
} }
return (int)n; return (int)n;

View File

@ -162,7 +162,7 @@ extern "C" DLLEXPORT fdb_bool_t fdb_future_is_ready(FDBFuture* f) {
return TSAVB(f)->isReady(); return TSAVB(f)->isReady();
} }
class CAPICallback : public ThreadCallback { class CAPICallback final : public ThreadCallback {
public: public:
CAPICallback(void (*callbackf)(FDBFuture*, void*), FDBFuture* f, void* userdata) CAPICallback(void (*callbackf)(FDBFuture*, void*), FDBFuture* f, void* userdata)
: callbackf(callbackf), f(f), userdata(userdata) {} : callbackf(callbackf), f(f), userdata(userdata) {}

View File

@ -66,6 +66,7 @@ public:
}; };
struct FDBPromise { struct FDBPromise {
virtual ~FDBPromise() = default;
virtual void send(void*) = 0; virtual void send(void*) = 0;
}; };

View File

@ -2177,6 +2177,81 @@ TEST_CASE("monitor_network_busyness") {
CHECK(containsGreaterZero); CHECK(containsGreaterZero);
} }
// Commit a transaction and confirm it has not been reset
TEST_CASE("commit_does_not_reset") {
fdb::Transaction tr(db);
fdb::Transaction tr2(db);
// Commit two transactions, one that will fail with conflict and the other
// that will succeed. Ensure both transactions are not reset at the end.
while (1) {
fdb::Int64Future tr1GrvFuture = tr.get_read_version();
fdb_error_t err = wait_future(tr1GrvFuture);
if (err) {
fdb::EmptyFuture tr1OnErrorFuture = tr.on_error(err);
fdb_check(wait_future(tr1OnErrorFuture));
continue;
}
int64_t tr1StartVersion;
CHECK(!tr1GrvFuture.get(&tr1StartVersion));
fdb::Int64Future tr2GrvFuture = tr2.get_read_version();
err = wait_future(tr2GrvFuture);
if (err) {
fdb::EmptyFuture tr2OnErrorFuture = tr2.on_error(err);
fdb_check(wait_future(tr2OnErrorFuture));
continue;
}
int64_t tr2StartVersion;
CHECK(!tr2GrvFuture.get(&tr2StartVersion));
tr.set(key("foo"), "bar");
fdb::EmptyFuture tr1CommitFuture = tr.commit();
err = wait_future(tr1CommitFuture);
if (err) {
fdb::EmptyFuture tr1OnErrorFuture = tr.on_error(err);
fdb_check(wait_future(tr1OnErrorFuture));
continue;
}
fdb_check(tr2.add_conflict_range(key("foo"), strinc(key("foo")), FDB_CONFLICT_RANGE_TYPE_READ));
tr2.set(key("foo"), "bar");
fdb::EmptyFuture tr2CommitFuture = tr2.commit();
err = wait_future(tr2CommitFuture);
CHECK(err == 1020); // not_committed
fdb::Int64Future tr1GrvFuture2 = tr.get_read_version();
err = wait_future(tr1GrvFuture2);
if (err) {
fdb::EmptyFuture tr1OnErrorFuture = tr.on_error(err);
fdb_check(wait_future(tr1OnErrorFuture));
continue;
}
int64_t tr1EndVersion;
CHECK(!tr1GrvFuture2.get(&tr1EndVersion));
fdb::Int64Future tr2GrvFuture2 = tr2.get_read_version();
err = wait_future(tr2GrvFuture2);
if (err) {
fdb::EmptyFuture tr2OnErrorFuture = tr2.on_error(err);
fdb_check(wait_future(tr2OnErrorFuture));
continue;
}
int64_t tr2EndVersion;
CHECK(!tr2GrvFuture2.get(&tr2EndVersion));
// If we reset the transaction, then the read version will change
CHECK(tr1StartVersion == tr1EndVersion);
CHECK(tr2StartVersion == tr2EndVersion);
break;
}
}
int main(int argc, char** argv) { int main(int argc, char** argv) {
if (argc < 3) { if (argc < 3) {
std::cout << "Unit tests for the FoundationDB C API.\n" std::cout << "Unit tests for the FoundationDB C API.\n"

View File

@ -513,7 +513,7 @@ struct JVM {
} }
}; };
struct JavaWorkload : FDBWorkload { struct JavaWorkload final : FDBWorkload {
std::shared_ptr<JVM> jvm; std::shared_ptr<JVM> jvm;
FDBLogger& log; FDBLogger& log;
FDBWorkloadContext* context = nullptr; FDBWorkloadContext* context = nullptr;

View File

@ -0,0 +1,188 @@
/*
* RepeatableReadMultiThreadClientTest
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import com.apple.foundationdb.tuple.Tuple;
import org.junit.jupiter.api.Assertions;
/**
* This test verify transcations have repeatable read.
* 1 First set initialValue to key.
* 2 Have transactions to read the key and verify the initialValue in a loop, if it does not
* see the initialValue as the value, it set the flag to false.
*
* 3 Then have new transactions set the value and then read to verify the new value is set,
* if it does not read the new value, set the flag to false.
*
* 4 Verify that old transactions have not finished when new transactions have finished,
* then verify old transactions does not have false flag -- it means that old transactions
* are still seeting the initialValue even after new transactions set them to a new value.
*/
public class RepeatableReadMultiThreadClientTest {
public static final MultiClientHelper clientHelper = new MultiClientHelper();
private static final int oldValueReadCount = 30;
private static final int threadPerDB = 5;
private static final String key = "foo";
private static final String initialValue = "bar";
private static final String newValue = "cool";
private static final Map<Thread, OldValueReader> threadToOldValueReaders = new HashMap<>();
public static void main(String[] args) throws Exception {
FDB fdb = FDB.selectAPIVersion(710);
setupThreads(fdb);
Collection<Database> dbs = clientHelper.openDatabases(fdb); // the clientHelper will close the databases for us
System.out.println("Starting tests");
setup(dbs);
System.out.println("Start processing and validating");
readOldValue(dbs);
setNewValueAndRead(dbs);
System.out.println("Test finished");
}
private static synchronized void setupThreads(FDB fdb) {
int clientThreadsPerVersion = clientHelper.readClusterFromEnv().length;
fdb.options().setClientThreadsPerVersion(clientThreadsPerVersion);
System.out.printf("thread per version is %d\n", clientThreadsPerVersion);
fdb.options().setExternalClientDirectory("/var/dynamic-conf/lib");
fdb.options().setTraceEnable("/tmp");
fdb.options().setKnob("min_trace_severity=5");
}
private static void setup(Collection<Database> dbs) {
// 0 -> 1 -> 2 -> 3 -> 0
for (Database db : dbs) {
db.run(tr -> {
tr.set(Tuple.from(key).pack(), Tuple.from(initialValue).pack());
return null;
});
}
}
private static void readOldValue(Collection<Database> dbs) throws InterruptedException {
for (Database db : dbs) {
for (int i = 0; i < threadPerDB; i++) {
final OldValueReader oldValueReader = new OldValueReader(db);
final Thread thread = new Thread(OldValueReader.create(db));
thread.start();
threadToOldValueReaders.put(thread, oldValueReader);
}
}
}
private static void setNewValueAndRead(Collection<Database> dbs) throws InterruptedException {
// threads running NewValueReader need to wait for threads to start first who run OldValueReader
Thread.sleep(1000);
final Map<Thread, NewValueReader> threads = new HashMap<>();
for (Database db : dbs) {
for (int i = 0; i < threadPerDB; i++) {
final NewValueReader newValueReader = new NewValueReader(db);
final Thread thread = new Thread(NewValueReader.create(db));
thread.start();
threads.put(thread, newValueReader);
}
}
for (Map.Entry<Thread, NewValueReader> entry : threads.entrySet()) {
entry.getKey().join();
Assertions.assertTrue(entry.getValue().succeed, "new value reader failed to read the correct value");
}
for (Map.Entry<Thread, OldValueReader> entry : threadToOldValueReaders.entrySet()) {
Assertions.assertTrue(entry.getKey().isAlive(), "Old value reader finished too soon, cannot verify repeatable read, succeed is " + entry.getValue().succeed);
}
for (Map.Entry<Thread, OldValueReader> entry : threadToOldValueReaders.entrySet()) {
entry.getKey().join();
Assertions.assertTrue(entry.getValue().succeed, "old value reader failed to read the correct value");
}
}
public static class OldValueReader implements Runnable {
private final Database db;
private boolean succeed;
private OldValueReader(Database db) {
this.db = db;
this.succeed = true;
}
public static OldValueReader create(Database db) {
return new OldValueReader(db);
}
@Override
public void run() {
db.run(tr -> {
try {
for (int i = 0; i < oldValueReadCount; i++) {
byte[] result = tr.get(Tuple.from(key).pack()).join();
String value = Tuple.fromBytes(result).getString(0);
if (!initialValue.equals(value)) {
succeed = false;
break;
}
Thread.sleep(100);
}
}
catch (Exception e) {
succeed = false;
}
return null;
});
}
}
public static class NewValueReader implements Runnable {
private final Database db;
private boolean succeed;
public NewValueReader(Database db) {
this.db = db;
this.succeed = true;
}
public static NewValueReader create(Database db) {
return new NewValueReader(db);
}
@Override
public void run() {
db.run(tr -> {
tr.set(Tuple.from(key).pack(), Tuple.from(newValue).pack());
return null;
});
String value = db.run(tr -> {
byte[] result = tr.get(Tuple.from(key).pack()).join();
return Tuple.fromBytes(result).getString(0);
});
if (!newValue.equals(value)) {
succeed = false;
}
}
}
}

View File

@ -51,6 +51,7 @@ set(JAVA_INTEGRATION_TESTS
src/integration/com/apple/foundationdb/BasicMultiClientIntegrationTest.java src/integration/com/apple/foundationdb/BasicMultiClientIntegrationTest.java
src/integration/com/apple/foundationdb/CycleMultiClientIntegrationTest.java src/integration/com/apple/foundationdb/CycleMultiClientIntegrationTest.java
src/integration/com/apple/foundationdb/SidebandMultiThreadClientTest.java src/integration/com/apple/foundationdb/SidebandMultiThreadClientTest.java
src/integration/com/apple/foundationdb/RepeatableReadMultiThreadClientTest.java
) )
# Resources that are used in integration testing, but are not explicitly test files (JUnit rules, # Resources that are used in integration testing, but are not explicitly test files (JUnit rules,

View File

@ -401,6 +401,32 @@ def exclude(logger):
output4 = run_fdbcli_command('exclude') output4 = run_fdbcli_command('exclude')
assert no_excluded_process_output in output4 assert no_excluded_process_output in output4
# read the system key 'k', need to enable the option first
def read_system_key(k):
output = run_fdbcli_command('option', 'on', 'READ_SYSTEM_KEYS;', 'get', k)
if 'is' not in output:
# key not present
return None
_, value = output.split(' is ')
return value
@enable_logging()
def throttle(logger):
# no throttled tags at the beginning
no_throttle_tags_output = 'There are no throttled tags'
assert run_fdbcli_command('throttle', 'list') == no_throttle_tags_output
# test 'throttle enable auto'
run_fdbcli_command('throttle', 'enable', 'auto')
# verify the change is applied by reading the system key
# not an elegant way, may change later
enable_flag = read_system_key('\\xff\\x02/throttledTags/autoThrottlingEnabled')
assert enable_flag == "`1'"
run_fdbcli_command('throttle', 'disable', 'auto')
enable_flag = read_system_key('\\xff\\x02/throttledTags/autoThrottlingEnabled')
# verify disabled
assert enable_flag == "`0'"
# TODO : test manual throttling, not easy to do now
if __name__ == '__main__': if __name__ == '__main__':
# fdbcli_tests.py <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number> # fdbcli_tests.py <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>
assert len(sys.argv) == 4, "Please pass arguments: <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>" assert len(sys.argv) == 4, "Please pass arguments: <path_to_fdbcli_binary> <path_to_fdb_cluster_file> <process_number>"
@ -420,6 +446,7 @@ if __name__ == '__main__':
setclass() setclass()
suspend() suspend()
transaction() transaction()
throttle()
else: else:
assert process_number > 1, "Process number should be positive" assert process_number > 1, "Process number should be positive"
coordinators() coordinators()

View File

@ -39,6 +39,9 @@ function(configure_testing)
endfunction() endfunction()
function(verify_testing) function(verify_testing)
if(NOT ENABLE_SIMULATION_TESTS)
return()
endif()
foreach(test_file IN LISTS fdb_test_files) foreach(test_file IN LISTS fdb_test_files)
message(SEND_ERROR "${test_file} found but it is not associated with a test") message(SEND_ERROR "${test_file} found but it is not associated with a test")
endforeach() endforeach()
@ -119,6 +122,7 @@ function(add_fdb_test)
set(VALGRIND_OPTION "--use-valgrind") set(VALGRIND_OPTION "--use-valgrind")
endif() endif()
list(TRANSFORM ADD_FDB_TEST_TEST_FILES PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/") list(TRANSFORM ADD_FDB_TEST_TEST_FILES PREPEND "${CMAKE_CURRENT_SOURCE_DIR}/")
if (ENABLE_SIMULATION_TESTS)
add_test(NAME ${test_name} add_test(NAME ${test_name}
COMMAND $<TARGET_FILE:Python::Interpreter> ${TestRunner} COMMAND $<TARGET_FILE:Python::Interpreter> ${TestRunner}
-n ${test_name} -n ${test_name}
@ -142,6 +146,7 @@ function(add_fdb_test)
get_filename_component(test_dir ${test_dir_full} NAME) get_filename_component(test_dir ${test_dir_full} NAME)
set_tests_properties(${test_name} PROPERTIES TIMEOUT ${this_test_timeout} LABELS "${test_dir}") set_tests_properties(${test_name} PROPERTIES TIMEOUT ${this_test_timeout} LABELS "${test_dir}")
endif() endif()
endif()
# set variables used for generating test packages # set variables used for generating test packages
set(TEST_NAMES ${TEST_NAMES} ${test_name} PARENT_SCOPE) set(TEST_NAMES ${TEST_NAMES} ${test_name} PARENT_SCOPE)
set(TEST_FILES_${test_name} ${ADD_FDB_TEST_TEST_FILES} PARENT_SCOPE) set(TEST_FILES_${test_name} ${ADD_FDB_TEST_TEST_FILES} PARENT_SCOPE)

View File

@ -285,7 +285,7 @@ else()
-Wpessimizing-move -Wpessimizing-move
-Woverloaded-virtual -Woverloaded-virtual
-Wshift-sign-overflow -Wshift-sign-overflow
# Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 10 # Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 11
-Wno-comment -Wno-comment
-Wno-dangling-else -Wno-dangling-else
-Wno-delete-non-virtual-dtor -Wno-delete-non-virtual-dtor
@ -297,13 +297,11 @@ else()
-Wno-sign-compare -Wno-sign-compare
-Wno-tautological-pointer-compare -Wno-tautological-pointer-compare
-Wno-undefined-var-template -Wno-undefined-var-template
-Wno-tautological-pointer-compare
-Wno-unknown-pragmas -Wno-unknown-pragmas
-Wno-unknown-warning-option -Wno-unknown-warning-option
-Wno-unused-function -Wno-unused-function
-Wno-unused-local-typedef -Wno-unused-local-typedef
-Wno-unused-parameter -Wno-unused-parameter
-Wno-self-assign
) )
if (USE_CCACHE) if (USE_CCACHE)
add_compile_options( add_compile_options(

View File

@ -638,6 +638,15 @@ namespace SummarizeTest
{ {
if(!String.IsNullOrEmpty(errLine.Data)) if(!String.IsNullOrEmpty(errLine.Data))
{ {
if (errLine.Data.EndsWith("WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!")) {
// When running ASAN we expect to see this message. Boost coroutine should be using the correct asan annotations so that it shouldn't produce any false positives.
return;
}
if (errLine.Data.EndsWith("Warning: unimplemented fcntl command: 1036")) {
// Valgrind produces this warning when F_SET_RW_HINT is used
return;
}
hasError = true; hasError = true;
if(Errors.Count < maxErrors) { if(Errors.Count < maxErrors) {
if(errLine.Data.Length > maxErrorLength) { if(errLine.Data.Length > maxErrorLength) {
@ -962,14 +971,6 @@ namespace SummarizeTest
int stderrBytes = 0; int stderrBytes = 0;
foreach (string err in outputErrors) foreach (string err in outputErrors)
{ {
if (err.EndsWith("WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!")) {
// When running ASAN we expect to see this message. Boost coroutine should be using the correct asan annotations so that it shouldn't produce any false positives.
continue;
}
if (err.EndsWith("Warning: unimplemented fcntl command: 1036")) {
// Valgrind produces this warning when F_SET_RW_HINT is used
continue;
}
if (stderrSeverity == (int)Magnesium.Severity.SevError) if (stderrSeverity == (int)Magnesium.Severity.SevError)
{ {
error = true; error = true;

View File

@ -25,6 +25,8 @@ API version 700
General General
------- -------
* Committing a transaction will no longer partially reset it. In particular, getting the read version from a transaction that has committed or failed to commit with an error will return the original read version.
Python bindings Python bindings
--------------- ---------------

View File

@ -7,6 +7,16 @@ Release Notes
* The multi-version client API would not propagate errors that occurred when creating databases on external clients. This could result in a invalid memory accesses. `(PR #5221) <https://github.com/apple/foundationdb/pull/5221>`_ * The multi-version client API would not propagate errors that occurred when creating databases on external clients. This could result in a invalid memory accesses. `(PR #5221) <https://github.com/apple/foundationdb/pull/5221>`_
* Fixed a race between the multi-version client connecting to a cluster and destroying the database that could cause an assertion failure. `(PR #5221) <https://github.com/apple/foundationdb/pull/5221>`_ * Fixed a race between the multi-version client connecting to a cluster and destroying the database that could cause an assertion failure. `(PR #5221) <https://github.com/apple/foundationdb/pull/5221>`_
6.3.17
======
* Made readValuePrefix consistent regarding error messages. `(PR #5160) <https://github.com/apple/foundationdb/pull/5160>`_
* Added ``TLogPopDetails`` trace event to tLog pop. `(PR #5134) <https://github.com/apple/foundationdb/pull/5134>`_
* Added ``CommitBatchingEmptyMessageRatio`` metric to track the ratio of empty messages to tlogs. `(PR #5087) <https://github.com/apple/foundationdb/pull/5087>`_
* Observability improvements in ProxyStats. `(PR #5046) <https://github.com/apple/foundationdb/pull/5046>`_
* Added ``RecoveryInternal`` and ``ProxyReplies`` trace events to recovery_transaction step in recovery. `(PR #5038) <https://github.com/apple/foundationdb/pull/5038>`_
* Multi-threaded client documentation improvements. `(PR #5033) <https://github.com/apple/foundationdb/pull/5033>`_
* Added ``ClusterControllerWorkerFailed`` trace event when a worker is removed from cluster controller. `(PR #5035) <https://github.com/apple/foundationdb/pull/5035>`_
* Added histograms for storage server write path components. `(PR #5019) <https://github.com/apple/foundationdb/pull/5019>`_
6.3.15 6.3.15
====== ======

View File

@ -93,6 +93,7 @@ Other Changes
* Capture output of forked snapshot processes in trace events. `(PR #4254) <https://github.com/apple/foundationdb/pull/4254/files>`_ * Capture output of forked snapshot processes in trace events. `(PR #4254) <https://github.com/apple/foundationdb/pull/4254/files>`_
* Add ErrorKind field to Severity 40 trace events. `(PR #4741) <https://github.com/apple/foundationdb/pull/4741/files>`_ * Add ErrorKind field to Severity 40 trace events. `(PR #4741) <https://github.com/apple/foundationdb/pull/4741/files>`_
* Added histograms for the storage server write path components. `(PR #5021) <https://github.com/apple/foundationdb/pull/5021/files>`_ * Added histograms for the storage server write path components. `(PR #5021) <https://github.com/apple/foundationdb/pull/5021/files>`_
* Committing a transaction will no longer partially reset it as of API version 700. `(PR #5271) <https://github.com/apple/foundationdb/pull/5271/files>`_
Earlier release notes Earlier release notes
--------------------- ---------------------

View File

@ -571,7 +571,7 @@ int main(int argc, char** argv) {
} }
if (!param.tlsConfig.setupTLS()) { if (!param.tlsConfig.setupTLS()) {
TraceEvent(SevError, "TLSError"); TraceEvent(SevError, "TLSError").log();
throw tls_error(); throw tls_error();
} }

View File

@ -8,6 +8,7 @@ set(FDBCLI_SRCS
ForceRecoveryWithDataLossCommand.actor.cpp ForceRecoveryWithDataLossCommand.actor.cpp
MaintenanceCommand.actor.cpp MaintenanceCommand.actor.cpp
SnapshotCommand.actor.cpp SnapshotCommand.actor.cpp
ThrottleCommand.actor.cpp
Util.cpp Util.cpp
linenoise/linenoise.h) linenoise/linenoise.h)

View File

@ -0,0 +1,645 @@
/*
* ThrottleCommand.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbcli/fdbcli.actor.h"
#include "fdbclient/IClientApi.h"
#include "fdbclient/TagThrottle.h"
#include "fdbclient/Knobs.h"
#include "fdbclient/SystemData.h"
#include "fdbclient/CommitTransaction.h"
#include "flow/Arena.h"
#include "flow/FastRef.h"
#include "flow/ThreadHelper.actor.h"
#include "flow/genericactors.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
namespace {
// Helper functions copied from TagThrottle.actor.cpp
// The only difference is transactions are changed to go through MultiversionTransaction,
// instead of the native Transaction(i.e., RYWTransaction)
ACTOR Future<bool> getValidAutoEnabled(Reference<ITransaction> tr) {
state bool result;
loop {
Optional<Value> value = wait(safeThreadFutureToFuture(tr->get(tagThrottleAutoEnabledKey)));
if (!value.present()) {
tr->reset();
wait(delay(CLIENT_KNOBS->DEFAULT_BACKOFF));
continue;
} else if (value.get() == LiteralStringRef("1")) {
result = true;
} else if (value.get() == LiteralStringRef("0")) {
result = false;
} else {
TraceEvent(SevWarnAlways, "InvalidAutoTagThrottlingValue").detail("Value", value.get());
tr->reset();
wait(delay(CLIENT_KNOBS->DEFAULT_BACKOFF));
continue;
}
return result;
};
}
ACTOR Future<std::vector<TagThrottleInfo>> getThrottledTags(Reference<IDatabase> db,
int limit,
bool containsRecommend = false) {
state Reference<ITransaction> tr = db->createTransaction();
state bool reportAuto = containsRecommend;
loop {
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
try {
if (!containsRecommend) {
wait(store(reportAuto, getValidAutoEnabled(tr)));
}
state ThreadFuture<RangeResult> f = tr->getRange(
reportAuto ? tagThrottleKeys : KeyRangeRef(tagThrottleKeysPrefix, tagThrottleAutoKeysPrefix), limit);
RangeResult throttles = wait(safeThreadFutureToFuture(f));
std::vector<TagThrottleInfo> results;
for (auto throttle : throttles) {
results.push_back(TagThrottleInfo(TagThrottleKey::fromKey(throttle.key),
TagThrottleValue::fromValue(throttle.value)));
}
return results;
} catch (Error& e) {
wait(safeThreadFutureToFuture(tr->onError(e)));
}
}
}
ACTOR Future<std::vector<TagThrottleInfo>> getRecommendedTags(Reference<IDatabase> db, int limit) {
state Reference<ITransaction> tr = db->createTransaction();
loop {
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
try {
bool enableAuto = wait(getValidAutoEnabled(tr));
if (enableAuto) {
return std::vector<TagThrottleInfo>();
}
state ThreadFuture<RangeResult> f =
tr->getRange(KeyRangeRef(tagThrottleAutoKeysPrefix, tagThrottleKeys.end), limit);
RangeResult throttles = wait(safeThreadFutureToFuture(f));
std::vector<TagThrottleInfo> results;
for (auto throttle : throttles) {
results.push_back(TagThrottleInfo(TagThrottleKey::fromKey(throttle.key),
TagThrottleValue::fromValue(throttle.value)));
}
return results;
} catch (Error& e) {
wait(safeThreadFutureToFuture(tr->onError(e)));
}
}
}
ACTOR Future<Void> updateThrottleCount(Reference<ITransaction> tr, int64_t delta) {
state ThreadFuture<Optional<Value>> countVal = tr->get(tagThrottleCountKey);
state ThreadFuture<Optional<Value>> limitVal = tr->get(tagThrottleLimitKey);
wait(success(safeThreadFutureToFuture(countVal)) && success(safeThreadFutureToFuture(limitVal)));
int64_t count = 0;
int64_t limit = 0;
if (countVal.get().present()) {
BinaryReader reader(countVal.get().get(), Unversioned());
reader >> count;
}
if (limitVal.get().present()) {
BinaryReader reader(limitVal.get().get(), Unversioned());
reader >> limit;
}
count += delta;
if (count > limit) {
throw too_many_tag_throttles();
}
BinaryWriter writer(Unversioned());
writer << count;
tr->set(tagThrottleCountKey, writer.toValue());
return Void();
}
void signalThrottleChange(Reference<ITransaction> tr) {
tr->atomicOp(
tagThrottleSignalKey, LiteralStringRef("XXXXXXXXXX\x00\x00\x00\x00"), MutationRef::SetVersionstampedValue);
}
ACTOR Future<Void> throttleTags(Reference<IDatabase> db,
TagSet tags,
double tpsRate,
double initialDuration,
TagThrottleType throttleType,
TransactionPriority priority,
Optional<double> expirationTime = Optional<double>(),
Optional<TagThrottledReason> reason = Optional<TagThrottledReason>()) {
state Reference<ITransaction> tr = db->createTransaction();
state Key key = TagThrottleKey(tags, throttleType, priority).toKey();
ASSERT(initialDuration > 0);
if (throttleType == TagThrottleType::MANUAL) {
reason = TagThrottledReason::MANUAL;
}
TagThrottleValue throttle(tpsRate,
expirationTime.present() ? expirationTime.get() : 0,
initialDuration,
reason.present() ? reason.get() : TagThrottledReason::UNSET);
BinaryWriter wr(IncludeVersion(ProtocolVersion::withTagThrottleValueReason()));
wr << throttle;
state Value value = wr.toValue();
loop {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
try {
if (throttleType == TagThrottleType::MANUAL) {
Optional<Value> oldThrottle = wait(safeThreadFutureToFuture(tr->get(key)));
if (!oldThrottle.present()) {
wait(updateThrottleCount(tr, 1));
}
}
tr->set(key, value);
if (throttleType == TagThrottleType::MANUAL) {
signalThrottleChange(tr);
}
wait(safeThreadFutureToFuture(tr->commit()));
return Void();
} catch (Error& e) {
wait(safeThreadFutureToFuture(tr->onError(e)));
}
}
}
ACTOR Future<bool> unthrottleTags(Reference<IDatabase> db,
TagSet tags,
Optional<TagThrottleType> throttleType,
Optional<TransactionPriority> priority) {
state Reference<ITransaction> tr = db->createTransaction();
state std::vector<Key> keys;
for (auto p : allTransactionPriorities) {
if (!priority.present() || priority.get() == p) {
if (!throttleType.present() || throttleType.get() == TagThrottleType::AUTO) {
keys.push_back(TagThrottleKey(tags, TagThrottleType::AUTO, p).toKey());
}
if (!throttleType.present() || throttleType.get() == TagThrottleType::MANUAL) {
keys.push_back(TagThrottleKey(tags, TagThrottleType::MANUAL, p).toKey());
}
}
}
state bool removed = false;
loop {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
try {
state std::vector<Future<Optional<Value>>> values;
values.reserve(keys.size());
for (auto key : keys) {
values.push_back(safeThreadFutureToFuture(tr->get(key)));
}
wait(waitForAll(values));
int delta = 0;
for (int i = 0; i < values.size(); ++i) {
if (values[i].get().present()) {
if (TagThrottleKey::fromKey(keys[i]).throttleType == TagThrottleType::MANUAL) {
delta -= 1;
}
tr->clear(keys[i]);
// Report that we are removing this tag if we ever see it present.
// This protects us from getting confused if the transaction is maybe committed.
// It's ok if someone else actually ends up removing this tag at the same time
// and we aren't the ones to actually do it.
removed = true;
}
}
if (delta != 0) {
wait(updateThrottleCount(tr, delta));
}
if (removed) {
signalThrottleChange(tr);
wait(safeThreadFutureToFuture(tr->commit()));
}
return removed;
} catch (Error& e) {
wait(safeThreadFutureToFuture(tr->onError(e)));
}
}
}
ACTOR Future<Void> enableAuto(Reference<IDatabase> db, bool enabled) {
state Reference<ITransaction> tr = db->createTransaction();
loop {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
try {
Optional<Value> value = wait(safeThreadFutureToFuture(tr->get(tagThrottleAutoEnabledKey)));
if (!value.present() || (enabled && value.get() != LiteralStringRef("1")) ||
(!enabled && value.get() != LiteralStringRef("0"))) {
tr->set(tagThrottleAutoEnabledKey, LiteralStringRef(enabled ? "1" : "0"));
signalThrottleChange(tr);
wait(safeThreadFutureToFuture(tr->commit()));
}
return Void();
} catch (Error& e) {
wait(safeThreadFutureToFuture(tr->onError(e)));
}
}
}
ACTOR Future<bool> unthrottleMatchingThrottles(Reference<IDatabase> db,
KeyRef beginKey,
KeyRef endKey,
Optional<TransactionPriority> priority,
bool onlyExpiredThrottles) {
state Reference<ITransaction> tr = db->createTransaction();
state KeySelector begin = firstGreaterOrEqual(beginKey);
state KeySelector end = firstGreaterOrEqual(endKey);
state bool removed = false;
loop {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
try {
// holds memory of the RangeResult
state ThreadFuture<RangeResult> f = tr->getRange(begin, end, 1000);
state RangeResult tags = wait(safeThreadFutureToFuture(f));
state uint64_t unthrottledTags = 0;
uint64_t manualUnthrottledTags = 0;
for (auto tag : tags) {
if (onlyExpiredThrottles) {
double expirationTime = TagThrottleValue::fromValue(tag.value).expirationTime;
if (expirationTime == 0 || expirationTime > now()) {
continue;
}
}
TagThrottleKey key = TagThrottleKey::fromKey(tag.key);
if (priority.present() && key.priority != priority.get()) {
continue;
}
if (key.throttleType == TagThrottleType::MANUAL) {
++manualUnthrottledTags;
}
removed = true;
tr->clear(tag.key);
unthrottledTags++;
}
if (manualUnthrottledTags > 0) {
wait(updateThrottleCount(tr, -manualUnthrottledTags));
}
if (unthrottledTags > 0) {
signalThrottleChange(tr);
}
wait(safeThreadFutureToFuture(tr->commit()));
if (!tags.more) {
return removed;
}
ASSERT(tags.size() > 0);
begin = KeySelector(firstGreaterThan(tags[tags.size() - 1].key), tags.arena());
} catch (Error& e) {
wait(safeThreadFutureToFuture(tr->onError(e)));
}
}
}
Future<bool> unthrottleAll(Reference<IDatabase> db,
Optional<TagThrottleType> tagThrottleType,
Optional<TransactionPriority> priority) {
KeyRef begin = tagThrottleKeys.begin;
KeyRef end = tagThrottleKeys.end;
if (tagThrottleType.present() && tagThrottleType == TagThrottleType::AUTO) {
begin = tagThrottleAutoKeysPrefix;
} else if (tagThrottleType.present() && tagThrottleType == TagThrottleType::MANUAL) {
end = tagThrottleAutoKeysPrefix;
}
return unthrottleMatchingThrottles(db, begin, end, priority, false);
}
} // namespace
namespace fdb_cli {
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() == 1) {
printUsage(tokens[0]);
return false;
} else if (tokencmp(tokens[1], "list")) {
if (tokens.size() > 4) {
printf("Usage: throttle list [throttled|recommended|all] [LIMIT]\n");
printf("\n");
printf("Lists tags that are currently throttled.\n");
printf("The default LIMIT is 100 tags.\n");
return false;
}
state bool reportThrottled = true;
state bool reportRecommended = false;
if (tokens.size() >= 3) {
if (tokencmp(tokens[2], "recommended")) {
reportThrottled = false;
reportRecommended = true;
} else if (tokencmp(tokens[2], "all")) {
reportThrottled = true;
reportRecommended = true;
} else if (!tokencmp(tokens[2], "throttled")) {
printf("ERROR: failed to parse `%s'.\n", printable(tokens[2]).c_str());
return false;
}
}
state int throttleListLimit = 100;
if (tokens.size() >= 4) {
char* end;
throttleListLimit = std::strtol((const char*)tokens[3].begin(), &end, 10);
if ((tokens.size() > 4 && !std::isspace(*end)) || (tokens.size() == 4 && *end != '\0')) {
fprintf(stderr, "ERROR: failed to parse limit `%s'.\n", printable(tokens[3]).c_str());
return false;
}
}
state std::vector<TagThrottleInfo> tags;
if (reportThrottled && reportRecommended) {
wait(store(tags, getThrottledTags(db, throttleListLimit, true)));
} else if (reportThrottled) {
wait(store(tags, getThrottledTags(db, throttleListLimit)));
} else if (reportRecommended) {
wait(store(tags, getRecommendedTags(db, throttleListLimit)));
}
bool anyLogged = false;
for (auto itr = tags.begin(); itr != tags.end(); ++itr) {
if (itr->expirationTime > now()) {
if (!anyLogged) {
printf("Throttled tags:\n\n");
printf(" Rate (txn/s) | Expiration (s) | Priority | Type | Reason |Tag\n");
printf(" --------------+----------------+-----------+--------+------------+------\n");
anyLogged = true;
}
std::string reasonStr = "unset";
if (itr->reason == TagThrottledReason::MANUAL) {
reasonStr = "manual";
} else if (itr->reason == TagThrottledReason::BUSY_WRITE) {
reasonStr = "busy write";
} else if (itr->reason == TagThrottledReason::BUSY_READ) {
reasonStr = "busy read";
}
printf(" %12d | %13ds | %9s | %6s | %10s |%s\n",
(int)(itr->tpsRate),
std::min((int)(itr->expirationTime - now()), (int)(itr->initialDuration)),
transactionPriorityToString(itr->priority, false),
itr->throttleType == TagThrottleType::AUTO ? "auto" : "manual",
reasonStr.c_str(),
itr->tag.toString().c_str());
}
}
if (tags.size() == throttleListLimit) {
printf("\nThe tag limit `%d' was reached. Use the [LIMIT] argument to view additional tags.\n",
throttleListLimit);
printf("Usage: throttle list [LIMIT]\n");
}
if (!anyLogged) {
printf("There are no %s tags\n", reportThrottled ? "throttled" : "recommended");
}
} else if (tokencmp(tokens[1], "on")) {
if (tokens.size() < 4 || !tokencmp(tokens[2], "tag") || tokens.size() > 7) {
printf("Usage: throttle on tag <TAG> [RATE] [DURATION] [PRIORITY]\n");
printf("\n");
printf("Enables throttling for transactions with the specified tag.\n");
printf("An optional transactions per second rate can be specified (default 0).\n");
printf("An optional duration can be specified, which must include a time suffix (s, m, h, "
"d) (default 1h).\n");
printf("An optional priority can be specified. Choices are `default', `immediate', and "
"`batch' (default `default').\n");
return false;
}
double tpsRate = 0.0;
uint64_t duration = 3600;
TransactionPriority priority = TransactionPriority::DEFAULT;
if (tokens.size() >= 5) {
char* end;
tpsRate = std::strtod((const char*)tokens[4].begin(), &end);
if ((tokens.size() > 5 && !std::isspace(*end)) || (tokens.size() == 5 && *end != '\0')) {
fprintf(stderr, "ERROR: failed to parse rate `%s'.\n", printable(tokens[4]).c_str());
return false;
}
if (tpsRate < 0) {
fprintf(stderr, "ERROR: rate cannot be negative `%f'\n", tpsRate);
return false;
}
}
if (tokens.size() == 6) {
Optional<uint64_t> parsedDuration = parseDuration(tokens[5].toString());
if (!parsedDuration.present()) {
fprintf(stderr, "ERROR: failed to parse duration `%s'.\n", printable(tokens[5]).c_str());
return false;
}
duration = parsedDuration.get();
if (duration == 0) {
fprintf(stderr, "ERROR: throttle duration cannot be 0\n");
return false;
}
}
if (tokens.size() == 7) {
if (tokens[6] == LiteralStringRef("default")) {
priority = TransactionPriority::DEFAULT;
} else if (tokens[6] == LiteralStringRef("immediate")) {
priority = TransactionPriority::IMMEDIATE;
} else if (tokens[6] == LiteralStringRef("batch")) {
priority = TransactionPriority::BATCH;
} else {
fprintf(stderr,
"ERROR: unrecognized priority `%s'. Must be one of `default',\n `immediate', "
"or `batch'.\n",
tokens[6].toString().c_str());
return false;
}
}
TagSet tags;
tags.addTag(tokens[3]);
wait(throttleTags(db, tags, tpsRate, duration, TagThrottleType::MANUAL, priority));
printf("Tag `%s' has been throttled\n", tokens[3].toString().c_str());
} else if (tokencmp(tokens[1], "off")) {
int nextIndex = 2;
TagSet tags;
bool throttleTypeSpecified = false;
bool is_error = false;
Optional<TagThrottleType> throttleType = TagThrottleType::MANUAL;
Optional<TransactionPriority> priority;
if (tokens.size() == 2) {
is_error = true;
}
while (nextIndex < tokens.size() && !is_error) {
if (tokencmp(tokens[nextIndex], "all")) {
if (throttleTypeSpecified) {
is_error = true;
continue;
}
throttleTypeSpecified = true;
throttleType = Optional<TagThrottleType>();
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "auto")) {
if (throttleTypeSpecified) {
is_error = true;
continue;
}
throttleTypeSpecified = true;
throttleType = TagThrottleType::AUTO;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "manual")) {
if (throttleTypeSpecified) {
is_error = true;
continue;
}
throttleTypeSpecified = true;
throttleType = TagThrottleType::MANUAL;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "default")) {
if (priority.present()) {
is_error = true;
continue;
}
priority = TransactionPriority::DEFAULT;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "immediate")) {
if (priority.present()) {
is_error = true;
continue;
}
priority = TransactionPriority::IMMEDIATE;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "batch")) {
if (priority.present()) {
is_error = true;
continue;
}
priority = TransactionPriority::BATCH;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "tag")) {
if (tags.size() > 0 || nextIndex == tokens.size() - 1) {
is_error = true;
continue;
}
tags.addTag(tokens[nextIndex + 1]);
nextIndex += 2;
}
}
if (!is_error) {
state const char* throttleTypeString =
!throttleType.present() ? "" : (throttleType.get() == TagThrottleType::AUTO ? "auto-" : "manually ");
state std::string priorityString =
priority.present() ? format(" at %s priority", transactionPriorityToString(priority.get(), false)) : "";
if (tags.size() > 0) {
bool success = wait(unthrottleTags(db, tags, throttleType, priority));
if (success) {
printf("Unthrottled tag `%s'%s\n", tokens[3].toString().c_str(), priorityString.c_str());
} else {
printf("Tag `%s' was not %sthrottled%s\n",
tokens[3].toString().c_str(),
throttleTypeString,
priorityString.c_str());
}
} else {
bool unthrottled = wait(unthrottleAll(db, throttleType, priority));
if (unthrottled) {
printf("Unthrottled all %sthrottled tags%s\n", throttleTypeString, priorityString.c_str());
} else {
printf("There were no tags being %sthrottled%s\n", throttleTypeString, priorityString.c_str());
}
}
} else {
printf("Usage: throttle off [all|auto|manual] [tag <TAG>] [PRIORITY]\n");
printf("\n");
printf("Disables throttling for throttles matching the specified filters. At least one "
"filter must be used.\n\n");
printf("An optional qualifier `all', `auto', or `manual' can be used to specify the type "
"of throttle\n");
printf("affected. `all' targets all throttles, `auto' targets those created by the "
"cluster, and\n");
printf("`manual' targets those created manually (default `manual').\n\n");
printf("The `tag' filter can be use to turn off only a specific tag.\n\n");
printf("The priority filter can be used to turn off only throttles at specific priorities. "
"Choices are\n");
printf("`default', `immediate', or `batch'. By default, all priorities are targeted.\n");
}
} else if (tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable")) {
if (tokens.size() != 3 || !tokencmp(tokens[2], "auto")) {
printf("Usage: throttle <enable|disable> auto\n");
printf("\n");
printf("Enables or disable automatic tag throttling.\n");
return false;
}
state bool autoTagThrottlingEnabled = tokencmp(tokens[1], "enable");
wait(enableAuto(db, autoTagThrottlingEnabled));
printf("Automatic tag throttling has been %s\n", autoTagThrottlingEnabled ? "enabled" : "disabled");
} else {
printUsage(tokens[0]);
return false;
}
return true;
}
CommandFactory throttleFactory(
"throttle",
CommandHelp("throttle <on|off|enable auto|disable auto|list> [ARGS]",
"view and control throttled tags",
"Use `on' and `off' to manually throttle or unthrottle tags. Use `enable auto' or `disable auto' "
"to enable or disable automatic tag throttling. Use `list' to print the list of throttled tags.\n"));
} // namespace fdb_cli

View File

@ -648,11 +648,6 @@ void initHelp() {
"namespace for all the profiling-related commands.", "namespace for all the profiling-related commands.",
"Different types support different actions. Run `profile` to get a list of " "Different types support different actions. Run `profile` to get a list of "
"types, and iteratively explore the help.\n"); "types, and iteratively explore the help.\n");
helpMap["throttle"] =
CommandHelp("throttle <on|off|enable auto|disable auto|list> [ARGS]",
"view and control throttled tags",
"Use `on' and `off' to manually throttle or unthrottle tags. Use `enable auto' or `disable auto' "
"to enable or disable automatic tag throttling. Use `list' to print the list of throttled tags.\n");
helpMap["cache_range"] = CommandHelp( helpMap["cache_range"] = CommandHelp(
"cache_range <set|clear> <BEGINKEY> <ENDKEY>", "cache_range <set|clear> <BEGINKEY> <ENDKEY>",
"Mark a key range to add to or remove from storage caches.", "Mark a key range to add to or remove from storage caches.",
@ -3960,6 +3955,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
is_error = true; is_error = true;
continue; continue;
} }
wait(makeInterruptable(GlobalConfig::globalConfig().onInitialized()));
if (tokencmp(tokens[2], "get")) { if (tokencmp(tokens[2], "get")) {
if (tokens.size() != 3) { if (tokens.size() != 3) {
fprintf(stderr, "ERROR: Addtional arguments to `get` are not supported.\n"); fprintf(stderr, "ERROR: Addtional arguments to `get` are not supported.\n");
@ -4494,300 +4490,12 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
} }
if (tokencmp(tokens[0], "throttle")) { if (tokencmp(tokens[0], "throttle")) {
if (tokens.size() == 1) { bool _result = wait(throttleCommandActor(db2, tokens));
printUsage(tokens[0]); if (!_result)
is_error = true;
continue;
} else if (tokencmp(tokens[1], "list")) {
if (tokens.size() > 4) {
printf("Usage: throttle list [throttled|recommended|all] [LIMIT]\n");
printf("\n");
printf("Lists tags that are currently throttled.\n");
printf("The default LIMIT is 100 tags.\n");
is_error = true; is_error = true;
continue; continue;
} }
state bool reportThrottled = true;
state bool reportRecommended = false;
if (tokens.size() >= 3) {
if (tokencmp(tokens[2], "recommended")) {
reportThrottled = false;
reportRecommended = true;
} else if (tokencmp(tokens[2], "all")) {
reportThrottled = true;
reportRecommended = true;
} else if (!tokencmp(tokens[2], "throttled")) {
printf("ERROR: failed to parse `%s'.\n", printable(tokens[2]).c_str());
is_error = true;
continue;
}
}
state int throttleListLimit = 100;
if (tokens.size() >= 4) {
char* end;
throttleListLimit = std::strtol((const char*)tokens[3].begin(), &end, 10);
if ((tokens.size() > 4 && !std::isspace(*end)) || (tokens.size() == 4 && *end != '\0')) {
fprintf(stderr, "ERROR: failed to parse limit `%s'.\n", printable(tokens[3]).c_str());
is_error = true;
continue;
}
}
state std::vector<TagThrottleInfo> tags;
if (reportThrottled && reportRecommended) {
wait(store(tags, ThrottleApi::getThrottledTags(db, throttleListLimit, true)));
} else if (reportThrottled) {
wait(store(tags, ThrottleApi::getThrottledTags(db, throttleListLimit)));
} else if (reportRecommended) {
wait(store(tags, ThrottleApi::getRecommendedTags(db, throttleListLimit)));
}
bool anyLogged = false;
for (auto itr = tags.begin(); itr != tags.end(); ++itr) {
if (itr->expirationTime > now()) {
if (!anyLogged) {
printf("Throttled tags:\n\n");
printf(" Rate (txn/s) | Expiration (s) | Priority | Type | Reason |Tag\n");
printf(
" --------------+----------------+-----------+--------+------------+------\n");
anyLogged = true;
}
std::string reasonStr = "unset";
if (itr->reason == TagThrottledReason::MANUAL) {
reasonStr = "manual";
} else if (itr->reason == TagThrottledReason::BUSY_WRITE) {
reasonStr = "busy write";
} else if (itr->reason == TagThrottledReason::BUSY_READ) {
reasonStr = "busy read";
}
printf(" %12d | %13ds | %9s | %6s | %10s |%s\n",
(int)(itr->tpsRate),
std::min((int)(itr->expirationTime - now()), (int)(itr->initialDuration)),
transactionPriorityToString(itr->priority, false),
itr->throttleType == TagThrottleType::AUTO ? "auto" : "manual",
reasonStr.c_str(),
itr->tag.toString().c_str());
}
}
if (tags.size() == throttleListLimit) {
printf(
"\nThe tag limit `%d' was reached. Use the [LIMIT] argument to view additional tags.\n",
throttleListLimit);
printf("Usage: throttle list [LIMIT]\n");
}
if (!anyLogged) {
printf("There are no %s tags\n", reportThrottled ? "throttled" : "recommended");
}
} else if (tokencmp(tokens[1], "on")) {
if (tokens.size() < 4 || !tokencmp(tokens[2], "tag") || tokens.size() > 7) {
printf("Usage: throttle on tag <TAG> [RATE] [DURATION] [PRIORITY]\n");
printf("\n");
printf("Enables throttling for transactions with the specified tag.\n");
printf("An optional transactions per second rate can be specified (default 0).\n");
printf("An optional duration can be specified, which must include a time suffix (s, m, h, "
"d) (default 1h).\n");
printf("An optional priority can be specified. Choices are `default', `immediate', and "
"`batch' (default `default').\n");
is_error = true;
continue;
}
double tpsRate = 0.0;
uint64_t duration = 3600;
TransactionPriority priority = TransactionPriority::DEFAULT;
if (tokens.size() >= 5) {
char* end;
tpsRate = std::strtod((const char*)tokens[4].begin(), &end);
if ((tokens.size() > 5 && !std::isspace(*end)) || (tokens.size() == 5 && *end != '\0')) {
fprintf(stderr, "ERROR: failed to parse rate `%s'.\n", printable(tokens[4]).c_str());
is_error = true;
continue;
}
if (tpsRate < 0) {
fprintf(stderr, "ERROR: rate cannot be negative `%f'\n", tpsRate);
is_error = true;
continue;
}
}
if (tokens.size() == 6) {
Optional<uint64_t> parsedDuration = parseDuration(tokens[5].toString());
if (!parsedDuration.present()) {
fprintf(
stderr, "ERROR: failed to parse duration `%s'.\n", printable(tokens[5]).c_str());
is_error = true;
continue;
}
duration = parsedDuration.get();
if (duration == 0) {
fprintf(stderr, "ERROR: throttle duration cannot be 0\n");
is_error = true;
continue;
}
}
if (tokens.size() == 7) {
if (tokens[6] == LiteralStringRef("default")) {
priority = TransactionPriority::DEFAULT;
} else if (tokens[6] == LiteralStringRef("immediate")) {
priority = TransactionPriority::IMMEDIATE;
} else if (tokens[6] == LiteralStringRef("batch")) {
priority = TransactionPriority::BATCH;
} else {
fprintf(stderr,
"ERROR: unrecognized priority `%s'. Must be one of `default',\n `immediate', "
"or `batch'.\n",
tokens[6].toString().c_str());
is_error = true;
continue;
}
}
TagSet tags;
tags.addTag(tokens[3]);
wait(ThrottleApi::throttleTags(db, tags, tpsRate, duration, TagThrottleType::MANUAL, priority));
printf("Tag `%s' has been throttled\n", tokens[3].toString().c_str());
} else if (tokencmp(tokens[1], "off")) {
int nextIndex = 2;
TagSet tags;
bool throttleTypeSpecified = false;
Optional<TagThrottleType> throttleType = TagThrottleType::MANUAL;
Optional<TransactionPriority> priority;
if (tokens.size() == 2) {
is_error = true;
}
while (nextIndex < tokens.size() && !is_error) {
if (tokencmp(tokens[nextIndex], "all")) {
if (throttleTypeSpecified) {
is_error = true;
continue;
}
throttleTypeSpecified = true;
throttleType = Optional<TagThrottleType>();
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "auto")) {
if (throttleTypeSpecified) {
is_error = true;
continue;
}
throttleTypeSpecified = true;
throttleType = TagThrottleType::AUTO;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "manual")) {
if (throttleTypeSpecified) {
is_error = true;
continue;
}
throttleTypeSpecified = true;
throttleType = TagThrottleType::MANUAL;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "default")) {
if (priority.present()) {
is_error = true;
continue;
}
priority = TransactionPriority::DEFAULT;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "immediate")) {
if (priority.present()) {
is_error = true;
continue;
}
priority = TransactionPriority::IMMEDIATE;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "batch")) {
if (priority.present()) {
is_error = true;
continue;
}
priority = TransactionPriority::BATCH;
++nextIndex;
} else if (tokencmp(tokens[nextIndex], "tag")) {
if (tags.size() > 0 || nextIndex == tokens.size() - 1) {
is_error = true;
continue;
}
tags.addTag(tokens[nextIndex + 1]);
nextIndex += 2;
}
}
if (!is_error) {
state const char* throttleTypeString =
!throttleType.present()
? ""
: (throttleType.get() == TagThrottleType::AUTO ? "auto-" : "manually ");
state std::string priorityString =
priority.present()
? format(" at %s priority", transactionPriorityToString(priority.get(), false))
: "";
if (tags.size() > 0) {
bool success = wait(ThrottleApi::unthrottleTags(db, tags, throttleType, priority));
if (success) {
printf("Unthrottled tag `%s'%s\n",
tokens[3].toString().c_str(),
priorityString.c_str());
} else {
printf("Tag `%s' was not %sthrottled%s\n",
tokens[3].toString().c_str(),
throttleTypeString,
priorityString.c_str());
}
} else {
bool unthrottled = wait(ThrottleApi::unthrottleAll(db, throttleType, priority));
if (unthrottled) {
printf("Unthrottled all %sthrottled tags%s\n",
throttleTypeString,
priorityString.c_str());
} else {
printf("There were no tags being %sthrottled%s\n",
throttleTypeString,
priorityString.c_str());
}
}
} else {
printf("Usage: throttle off [all|auto|manual] [tag <TAG>] [PRIORITY]\n");
printf("\n");
printf("Disables throttling for throttles matching the specified filters. At least one "
"filter must be used.\n\n");
printf("An optional qualifier `all', `auto', or `manual' can be used to specify the type "
"of throttle\n");
printf("affected. `all' targets all throttles, `auto' targets those created by the "
"cluster, and\n");
printf("`manual' targets those created manually (default `manual').\n\n");
printf("The `tag' filter can be use to turn off only a specific tag.\n\n");
printf("The priority filter can be used to turn off only throttles at specific priorities. "
"Choices are\n");
printf("`default', `immediate', or `batch'. By default, all priorities are targeted.\n");
}
} else if (tokencmp(tokens[1], "enable") || tokencmp(tokens[1], "disable")) {
if (tokens.size() != 3 || !tokencmp(tokens[2], "auto")) {
printf("Usage: throttle <enable|disable> auto\n");
printf("\n");
printf("Enables or disable automatic tag throttling.\n");
is_error = true;
continue;
}
state bool autoTagThrottlingEnabled = tokencmp(tokens[1], "enable");
wait(ThrottleApi::enableAuto(db, autoTagThrottlingEnabled));
printf("Automatic tag throttling has been %s\n",
autoTagThrottlingEnabled ? "enabled" : "disabled");
} else {
printUsage(tokens[0]);
is_error = true;
}
continue;
}
if (tokencmp(tokens[0], "cache_range")) { if (tokencmp(tokens[0], "cache_range")) {
if (tokens.size() != 4) { if (tokens.size() != 4) {
printUsage(tokens[0]); printUsage(tokens[0]);

View File

@ -83,6 +83,8 @@ ACTOR Future<bool> forceRecoveryWithDataLossCommandActor(Reference<IDatabase> db
ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens); ACTOR Future<bool> maintenanceCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// snapshot command // snapshot command
ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens); ACTOR Future<bool> snapshotCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// throttle command
ACTOR Future<bool> throttleCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
} // namespace fdb_cli } // namespace fdb_cli

View File

@ -227,7 +227,7 @@ Future<Reference<IAsyncFile>> BackupContainerLocalDirectory::readFile(const std:
} }
if (g_simulator.getCurrentProcess()->uid == UID()) { if (g_simulator.getCurrentProcess()->uid == UID()) {
TraceEvent(SevError, "BackupContainerReadFileOnUnsetProcessID"); TraceEvent(SevError, "BackupContainerReadFileOnUnsetProcessID").log();
} }
std::string uniquePath = fullPath + "." + g_simulator.getCurrentProcess()->uid.toString() + ".lnk"; std::string uniquePath = fullPath + "." + g_simulator.getCurrentProcess()->uid.toString() + ".lnk";
unlink(uniquePath.c_str()); unlink(uniquePath.c_str());

View File

@ -364,7 +364,7 @@ struct BackupRangeTaskFunc : TaskFuncBase {
TEST(true); // range insert delayed because too versionMap is too large TEST(true); // range insert delayed because too versionMap is too large
if (rangeCount > CLIENT_KNOBS->BACKUP_MAP_KEY_UPPER_LIMIT) if (rangeCount > CLIENT_KNOBS->BACKUP_MAP_KEY_UPPER_LIMIT)
TraceEvent(SevWarnAlways, "DBA_KeyRangeMapTooLarge"); TraceEvent(SevWarnAlways, "DBA_KeyRangeMapTooLarge").log();
wait(delay(1)); wait(delay(1));
task->params[BackupRangeTaskFunc::keyBackupRangeBeginKey] = rangeBegin; task->params[BackupRangeTaskFunc::keyBackupRangeBeginKey] = rangeBegin;
@ -1882,7 +1882,7 @@ struct CopyDiffLogsUpgradeTaskFunc : TaskFuncBase {
state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]); state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
if (task->params[BackupAgentBase::destUid].size() == 0) { if (task->params[BackupAgentBase::destUid].size() == 0) {
TraceEvent("DBA_CopyDiffLogsUpgradeTaskFuncAbortInUpgrade"); TraceEvent("DBA_CopyDiffLogsUpgradeTaskFuncAbortInUpgrade").log();
wait(success(AbortOldBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::signal(onDone)))); wait(success(AbortOldBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::signal(onDone))));
} else { } else {
Version beginVersion = Version beginVersion =
@ -2377,11 +2377,11 @@ void checkAtomicSwitchOverConfig(StatusObjectReader srcStatus, StatusObjectReade
try { try {
// Check if src is unlocked and dest is locked // Check if src is unlocked and dest is locked
if (getLockedStatus(srcStatus) != false) { if (getLockedStatus(srcStatus) != false) {
TraceEvent(SevWarn, "DBA_AtomicSwitchOverSrcLocked"); TraceEvent(SevWarn, "DBA_AtomicSwitchOverSrcLocked").log();
throw backup_error(); throw backup_error();
} }
if (getLockedStatus(destStatus) != true) { if (getLockedStatus(destStatus) != true) {
TraceEvent(SevWarn, "DBA_AtomicSwitchOverDestUnlocked"); TraceEvent(SevWarn, "DBA_AtomicSwitchOverDestUnlocked").log();
throw backup_error(); throw backup_error();
} }
// Check if mutation-stream-id matches // Check if mutation-stream-id matches
@ -2402,7 +2402,7 @@ void checkAtomicSwitchOverConfig(StatusObjectReader srcStatus, StatusObjectReade
destDRAgents.end(), destDRAgents.end(),
std::inserter(intersectingAgents, intersectingAgents.begin())); std::inserter(intersectingAgents, intersectingAgents.begin()));
if (intersectingAgents.empty()) { if (intersectingAgents.empty()) {
TraceEvent(SevWarn, "DBA_SwitchOverPossibleDRAgentsIncorrectSetup"); TraceEvent(SevWarn, "DBA_SwitchOverPossibleDRAgentsIncorrectSetup").log();
throw backup_error(); throw backup_error();
} }
} catch (std::runtime_error& e) { } catch (std::runtime_error& e) {
@ -2757,7 +2757,7 @@ public:
} }
} }
TraceEvent("DBA_SwitchoverReady"); TraceEvent("DBA_SwitchoverReady").log();
try { try {
wait(backupAgent->discontinueBackup(dest, tagName)); wait(backupAgent->discontinueBackup(dest, tagName));
@ -2768,7 +2768,7 @@ public:
wait(success(backupAgent->waitBackup(dest, tagName, StopWhenDone::True))); wait(success(backupAgent->waitBackup(dest, tagName, StopWhenDone::True)));
TraceEvent("DBA_SwitchoverStopped"); TraceEvent("DBA_SwitchoverStopped").log();
state ReadYourWritesTransaction tr3(dest); state ReadYourWritesTransaction tr3(dest);
loop { loop {
@ -2789,7 +2789,7 @@ public:
} }
} }
TraceEvent("DBA_SwitchoverVersionUpgraded"); TraceEvent("DBA_SwitchoverVersionUpgraded").log();
try { try {
wait(drAgent.submitBackup(backupAgent->taskBucket->src, wait(drAgent.submitBackup(backupAgent->taskBucket->src,
@ -2805,15 +2805,15 @@ public:
throw; throw;
} }
TraceEvent("DBA_SwitchoverSubmitted"); TraceEvent("DBA_SwitchoverSubmitted").log();
wait(success(drAgent.waitSubmitted(backupAgent->taskBucket->src, tagName))); wait(success(drAgent.waitSubmitted(backupAgent->taskBucket->src, tagName)));
TraceEvent("DBA_SwitchoverStarted"); TraceEvent("DBA_SwitchoverStarted").log();
wait(backupAgent->unlockBackup(dest, tagName)); wait(backupAgent->unlockBackup(dest, tagName));
TraceEvent("DBA_SwitchoverUnlocked"); TraceEvent("DBA_SwitchoverUnlocked").log();
return Void(); return Void();
} }

View File

@ -5478,7 +5478,7 @@ public:
try { try {
wait(discontinueBackup(backupAgent, ryw_tr, tagName)); wait(discontinueBackup(backupAgent, ryw_tr, tagName));
wait(ryw_tr->commit()); wait(ryw_tr->commit());
TraceEvent("AS_DiscontinuedBackup"); TraceEvent("AS_DiscontinuedBackup").log();
break; break;
} catch (Error& e) { } catch (Error& e) {
if (e.code() == error_code_backup_unneeded || e.code() == error_code_backup_duplicate) { if (e.code() == error_code_backup_unneeded || e.code() == error_code_backup_duplicate) {
@ -5489,7 +5489,7 @@ public:
} }
wait(success(waitBackup(backupAgent, cx, tagName.toString(), StopWhenDone::True))); wait(success(waitBackup(backupAgent, cx, tagName.toString(), StopWhenDone::True)));
TraceEvent("AS_BackupStopped"); TraceEvent("AS_BackupStopped").log();
ryw_tr->reset(); ryw_tr->reset();
loop { loop {
@ -5502,7 +5502,7 @@ public:
ryw_tr->clear(range); ryw_tr->clear(range);
} }
wait(ryw_tr->commit()); wait(ryw_tr->commit());
TraceEvent("AS_ClearedRange"); TraceEvent("AS_ClearedRange").log();
break; break;
} catch (Error& e) { } catch (Error& e) {
wait(ryw_tr->onError(e)); wait(ryw_tr->onError(e));
@ -5512,7 +5512,7 @@ public:
Reference<IBackupContainer> bc = wait(backupConfig.backupContainer().getOrThrow(cx)); Reference<IBackupContainer> bc = wait(backupConfig.backupContainer().getOrThrow(cx));
if (fastRestore) { if (fastRestore) {
TraceEvent("AtomicParallelRestoreStartRestore"); TraceEvent("AtomicParallelRestoreStartRestore").log();
Version targetVersion = ::invalidVersion; Version targetVersion = ::invalidVersion;
wait(submitParallelRestore(cx, wait(submitParallelRestore(cx,
tagName, tagName,
@ -5533,7 +5533,7 @@ public:
} }
return -1; return -1;
} else { } else {
TraceEvent("AS_StartRestore"); TraceEvent("AS_StartRestore").log();
Version ver = wait(restore(backupAgent, Version ver = wait(restore(backupAgent,
cx, cx,
cx, cx,

View File

@ -77,6 +77,7 @@ void GlobalConfig::trigger(KeyRef key, std::function<void(std::optional<std::any
} }
void GlobalConfig::insert(KeyRef key, ValueRef value) { void GlobalConfig::insert(KeyRef key, ValueRef value) {
TraceEvent(SevInfo, "GlobalConfig_Insert").detail("Key", key).detail("Value", value);
data.erase(key); data.erase(key);
Arena arena(key.expectedSize() + value.expectedSize()); Arena arena(key.expectedSize() + value.expectedSize());
@ -112,6 +113,7 @@ void GlobalConfig::erase(Key key) {
} }
void GlobalConfig::erase(KeyRangeRef range) { void GlobalConfig::erase(KeyRangeRef range) {
TraceEvent(SevInfo, "GlobalConfig_Erase").detail("Range", range);
auto it = data.begin(); auto it = data.begin();
while (it != data.end()) { while (it != data.end()) {
if (range.contains(it->first)) { if (range.contains(it->first)) {
@ -174,6 +176,7 @@ ACTOR Future<Void> GlobalConfig::migrate(GlobalConfig* self) {
// Updates local copy of global configuration by reading the entire key-range // Updates local copy of global configuration by reading the entire key-range
// from storage. // from storage.
ACTOR Future<Void> GlobalConfig::refresh(GlobalConfig* self) { ACTOR Future<Void> GlobalConfig::refresh(GlobalConfig* self) {
TraceEvent trace(SevInfo, "GlobalConfig_Refresh");
self->erase(KeyRangeRef(""_sr, "\xff"_sr)); self->erase(KeyRangeRef(""_sr, "\xff"_sr));
Transaction tr(self->cx); Transaction tr(self->cx);

View File

@ -108,6 +108,7 @@ public:
// the key. // the key.
template <typename T, typename std::enable_if<std::is_arithmetic<T>{}, bool>::type = true> template <typename T, typename std::enable_if<std::is_arithmetic<T>{}, bool>::type = true>
const T get(KeyRef name, T defaultVal) { const T get(KeyRef name, T defaultVal) {
TraceEvent(SevInfo, "GlobalConfig_Get").detail("Key", name);
try { try {
auto configValue = get(name); auto configValue = get(name);
if (configValue.isValid()) { if (configValue.isValid()) {

View File

@ -1521,7 +1521,7 @@ std::vector<std::pair<std::string, bool>> MultiVersionApi::copyExternalLibraryPe
#else #else
std::vector<std::pair<std::string, bool>> MultiVersionApi::copyExternalLibraryPerThread(std::string path) { std::vector<std::pair<std::string, bool>> MultiVersionApi::copyExternalLibraryPerThread(std::string path) {
if (threadCount > 1) { if (threadCount > 1) {
TraceEvent(SevError, "MultipleClientThreadsUnsupportedOnWindows"); TraceEvent(SevError, "MultipleClientThreadsUnsupportedOnWindows").log();
throw unsupported_operation(); throw unsupported_operation();
} }
std::vector<std::pair<std::string, bool>> paths; std::vector<std::pair<std::string, bool>> paths;

View File

@ -488,7 +488,7 @@ ACTOR static Future<Void> transactionInfoCommitActor(Transaction* tr, std::vecto
ACTOR static Future<Void> delExcessClntTxnEntriesActor(Transaction* tr, int64_t clientTxInfoSizeLimit) { ACTOR static Future<Void> delExcessClntTxnEntriesActor(Transaction* tr, int64_t clientTxInfoSizeLimit) {
state const Key clientLatencyName = CLIENT_LATENCY_INFO_PREFIX.withPrefix(fdbClientInfoPrefixRange.begin); state const Key clientLatencyName = CLIENT_LATENCY_INFO_PREFIX.withPrefix(fdbClientInfoPrefixRange.begin);
state const Key clientLatencyAtomicCtr = CLIENT_LATENCY_INFO_CTR_PREFIX.withPrefix(fdbClientInfoPrefixRange.begin); state const Key clientLatencyAtomicCtr = CLIENT_LATENCY_INFO_CTR_PREFIX.withPrefix(fdbClientInfoPrefixRange.begin);
TraceEvent(SevInfo, "DelExcessClntTxnEntriesCalled"); TraceEvent(SevInfo, "DelExcessClntTxnEntriesCalled").log();
loop { loop {
try { try {
tr->reset(); tr->reset();
@ -496,7 +496,7 @@ ACTOR static Future<Void> delExcessClntTxnEntriesActor(Transaction* tr, int64_t
tr->setOption(FDBTransactionOptions::LOCK_AWARE); tr->setOption(FDBTransactionOptions::LOCK_AWARE);
Optional<Value> ctrValue = wait(tr->get(KeyRef(clientLatencyAtomicCtr), Snapshot::True)); Optional<Value> ctrValue = wait(tr->get(KeyRef(clientLatencyAtomicCtr), Snapshot::True));
if (!ctrValue.present()) { if (!ctrValue.present()) {
TraceEvent(SevInfo, "NumClntTxnEntriesNotFound"); TraceEvent(SevInfo, "NumClntTxnEntriesNotFound").log();
return Void(); return Void();
} }
state int64_t txInfoSize = 0; state int64_t txInfoSize = 0;
@ -1627,7 +1627,7 @@ ACTOR static Future<Void> switchConnectionFileImpl(Reference<ClusterConnectionFi
loop { loop {
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE); tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
try { try {
TraceEvent("SwitchConnectionFileAttemptingGRV"); TraceEvent("SwitchConnectionFileAttemptingGRV").log();
Version v = wait(tr.getReadVersion()); Version v = wait(tr.getReadVersion());
TraceEvent("SwitchConnectionFileGotRV") TraceEvent("SwitchConnectionFileGotRV")
.detail("ReadVersion", v) .detail("ReadVersion", v)
@ -5199,7 +5199,7 @@ Future<Void> Transaction::commitMutations() {
if (options.debugDump) { if (options.debugDump) {
UID u = nondeterministicRandom()->randomUniqueID(); UID u = nondeterministicRandom()->randomUniqueID();
TraceEvent("TransactionDump", u); TraceEvent("TransactionDump", u).log();
for (auto i = tr.transaction.mutations.begin(); i != tr.transaction.mutations.end(); ++i) for (auto i = tr.transaction.mutations.begin(); i != tr.transaction.mutations.end(); ++i)
TraceEvent("TransactionMutation", u) TraceEvent("TransactionMutation", u)
.detail("T", i->type) .detail("T", i->type)
@ -5244,7 +5244,10 @@ ACTOR Future<Void> commitAndWatch(Transaction* self) {
self->setupWatches(); self->setupWatches();
} }
if (!self->apiVersionAtLeast(700)) {
self->reset(); self->reset();
}
return Void(); return Void();
} catch (Error& e) { } catch (Error& e) {
if (e.code() != error_code_actor_cancelled) { if (e.code() != error_code_actor_cancelled) {
@ -5253,8 +5256,11 @@ ACTOR Future<Void> commitAndWatch(Transaction* self) {
} }
self->versionstampPromise.sendError(transaction_invalid_version()); self->versionstampPromise.sendError(transaction_invalid_version());
if (!self->apiVersionAtLeast(700)) {
self->reset(); self->reset();
} }
}
throw; throw;
} }
@ -6326,7 +6332,7 @@ void Transaction::setToken(uint64_t token) {
void enableClientInfoLogging() { void enableClientInfoLogging() {
ASSERT(networkOptions.logClientInfo.present() == false); ASSERT(networkOptions.logClientInfo.present() == false);
networkOptions.logClientInfo = true; networkOptions.logClientInfo = true;
TraceEvent(SevInfo, "ClientInfoLoggingEnabled"); TraceEvent(SevInfo, "ClientInfoLoggingEnabled").log();
} }
ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID snapUID) { ACTOR Future<Void> snapCreate(Database cx, Standalone<StringRef> snapCmd, UID snapUID) {
@ -6380,7 +6386,7 @@ ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exc
} }
throw; throw;
} }
TraceEvent("ExclusionSafetyCheckCoordinators"); TraceEvent("ExclusionSafetyCheckCoordinators").log();
state ClientCoordinators coordinatorList(cx->getConnectionFile()); state ClientCoordinators coordinatorList(cx->getConnectionFile());
state vector<Future<Optional<LeaderInfo>>> leaderServers; state vector<Future<Optional<LeaderInfo>>> leaderServers;
leaderServers.reserve(coordinatorList.clientLeaderServers.size()); leaderServers.reserve(coordinatorList.clientLeaderServers.size());
@ -6393,7 +6399,7 @@ ACTOR Future<bool> checkSafeExclusions(Database cx, vector<AddressExclusion> exc
choose { choose {
when(wait(smartQuorum(leaderServers, leaderServers.size() / 2 + 1, 1.0))) {} when(wait(smartQuorum(leaderServers, leaderServers.size() / 2 + 1, 1.0))) {}
when(wait(delay(3.0))) { when(wait(delay(3.0))) {
TraceEvent("ExclusionSafetyCheckNoCoordinatorQuorum"); TraceEvent("ExclusionSafetyCheckNoCoordinatorQuorum").log();
return false; return false;
} }
} }

View File

@ -1164,7 +1164,7 @@ public:
if (!ryw->resetPromise.isSet()) if (!ryw->resetPromise.isSet())
ryw->resetPromise.sendError(transaction_timed_out()); ryw->resetPromise.sendError(transaction_timed_out());
wait(delay(deterministicRandom()->random01() * 5)); wait(delay(deterministicRandom()->random01() * 5));
TraceEvent("ClientBuggifyInFlightCommit"); TraceEvent("ClientBuggifyInFlightCommit").log();
wait(ryw->tr.commit()); wait(ryw->tr.commit());
} }

View File

@ -129,7 +129,7 @@ void decodeKeyServersValue(RangeResult result,
std::sort(src.begin(), src.end()); std::sort(src.begin(), src.end());
std::sort(dest.begin(), dest.end()); std::sort(dest.begin(), dest.end());
if (missingIsError && (src.size() != srcTag.size() || dest.size() != destTag.size())) { if (missingIsError && (src.size() != srcTag.size() || dest.size() != destTag.size())) {
TraceEvent(SevError, "AttemptedToDecodeMissingTag"); TraceEvent(SevError, "AttemptedToDecodeMissingTag").log();
for (const KeyValueRef& kv : result) { for (const KeyValueRef& kv : result) {
Tag tag = decodeServerTagValue(kv.value); Tag tag = decodeServerTagValue(kv.value);
UID serverID = decodeServerTagKey(kv.key); UID serverID = decodeServerTagKey(kv.key);

View File

@ -234,6 +234,8 @@ struct YieldMockNetwork final : INetwork, ReferenceCounted<YieldMockNetwork> {
Future<class Void> delay(double seconds, TaskPriority taskID) override { return nextTick.getFuture(); } Future<class Void> delay(double seconds, TaskPriority taskID) override { return nextTick.getFuture(); }
Future<class Void> orderedDelay(double seconds, TaskPriority taskID) override { return nextTick.getFuture(); }
Future<class Void> yield(TaskPriority taskID) override { Future<class Void> yield(TaskPriority taskID) override {
if (check_yield(taskID)) if (check_yield(taskID))
return delay(0, taskID); return delay(0, taskID);

View File

@ -922,9 +922,9 @@ ACTOR static void deliver(TransportData* self,
// We want to run the task at the right priority. If the priority is higher than the current priority (which is // We want to run the task at the right priority. If the priority is higher than the current priority (which is
// ReadSocket) we can just upgrade. Otherwise we'll context switch so that we don't block other tasks that might run // ReadSocket) we can just upgrade. Otherwise we'll context switch so that we don't block other tasks that might run
// with a higher priority. ReplyPromiseStream needs to guarentee that messages are recieved in the order they were // with a higher priority. ReplyPromiseStream needs to guarentee that messages are recieved in the order they were
// sent, so even in the case of local delivery those messages need to skip this delay. // sent, so we are using orderedDelay.
if (priority < TaskPriority::ReadSocket || (priority != TaskPriority::NoDeliverDelay && !inReadSocket)) { if (priority < TaskPriority::ReadSocket || !inReadSocket) {
wait(delay(0, priority)); wait(orderedDelay(0, priority));
} else { } else {
g_network->setCurrentTask(priority); g_network->setCurrentTask(priority);
} }
@ -1019,7 +1019,7 @@ static void scanPackets(TransportData* transport,
BUGGIFY_WITH_PROB(0.0001)) { BUGGIFY_WITH_PROB(0.0001)) {
g_simulator.lastConnectionFailure = g_network->now(); g_simulator.lastConnectionFailure = g_network->now();
isBuggifyEnabled = true; isBuggifyEnabled = true;
TraceEvent(SevInfo, "BitsFlip"); TraceEvent(SevInfo, "BitsFlip").log();
int flipBits = 32 - (int)floor(log2(deterministicRandom()->randomUInt32())); int flipBits = 32 - (int)floor(log2(deterministicRandom()->randomUInt32()));
uint32_t firstFlipByteLocation = deterministicRandom()->randomUInt32() % packetLen; uint32_t firstFlipByteLocation = deterministicRandom()->randomUInt32() % packetLen;

View File

@ -80,7 +80,7 @@ struct CounterCollection {
void logToTraceEvent(TraceEvent& te) const; void logToTraceEvent(TraceEvent& te) const;
}; };
struct Counter : ICounter, NonCopyable { struct Counter final : ICounter, NonCopyable {
public: public:
typedef int64_t Value; typedef int64_t Value;

View File

@ -361,7 +361,7 @@ struct NetNotifiedQueueWithAcknowledgements final : NotifiedQueue<T>,
FlowTransport::transport().sendUnreliable( FlowTransport::transport().sendUnreliable(
SerializeSource<ErrorOr<AcknowledgementReply>>( SerializeSource<ErrorOr<AcknowledgementReply>>(
AcknowledgementReply(acknowledgements.bytesAcknowledged)), AcknowledgementReply(acknowledgements.bytesAcknowledged)),
acknowledgements.getEndpoint(TaskPriority::NoDeliverDelay), acknowledgements.getEndpoint(TaskPriority::ReadSocket),
false); false);
} }
} }
@ -378,7 +378,7 @@ struct NetNotifiedQueueWithAcknowledgements final : NotifiedQueue<T>,
acknowledgements.bytesAcknowledged += res.expectedSize(); acknowledgements.bytesAcknowledged += res.expectedSize();
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<AcknowledgementReply>>( FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<AcknowledgementReply>>(
AcknowledgementReply(acknowledgements.bytesAcknowledged)), AcknowledgementReply(acknowledgements.bytesAcknowledged)),
acknowledgements.getEndpoint(TaskPriority::NoDeliverDelay), acknowledgements.getEndpoint(TaskPriority::ReadSocket),
false); false);
} }
return res; return res;
@ -389,13 +389,13 @@ struct NetNotifiedQueueWithAcknowledgements final : NotifiedQueue<T>,
// Notify the server that a client is not using this ReplyPromiseStream anymore // Notify the server that a client is not using this ReplyPromiseStream anymore
FlowTransport::transport().sendUnreliable( FlowTransport::transport().sendUnreliable(
SerializeSource<ErrorOr<AcknowledgementReply>>(operation_obsolete()), SerializeSource<ErrorOr<AcknowledgementReply>>(operation_obsolete()),
acknowledgements.getEndpoint(TaskPriority::NoDeliverDelay), acknowledgements.getEndpoint(TaskPriority::ReadSocket),
false); false);
} }
if (isRemoteEndpoint() && !sentError && !acknowledgements.failures.isReady()) { if (isRemoteEndpoint() && !sentError && !acknowledgements.failures.isReady()) {
// The ReplyPromiseStream was cancelled before sending an error, so the storage server must have died // The ReplyPromiseStream was cancelled before sending an error, so the storage server must have died
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(broken_promise()), FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(broken_promise()),
getEndpoint(TaskPriority::NoDeliverDelay), getEndpoint(TaskPriority::ReadSocket),
false); false);
} }
} }
@ -406,9 +406,6 @@ struct NetNotifiedQueueWithAcknowledgements final : NotifiedQueue<T>,
template <class T> template <class T>
class ReplyPromiseStream { class ReplyPromiseStream {
public: public:
// The endpoints of a ReplyPromiseStream must be initialized at Task::NoDeliverDelay, because a
// delay(0) in FlowTransport deliver can cause out of order delivery.
// stream.send( request ) // stream.send( request )
// Unreliable at most once delivery: Delivers request unless there is a connection failure (zero or one times) // Unreliable at most once delivery: Delivers request unless there is a connection failure (zero or one times)
@ -416,7 +413,7 @@ public:
void send(U&& value) const { void send(U&& value) const {
if (queue->isRemoteEndpoint()) { if (queue->isRemoteEndpoint()) {
if (!queue->acknowledgements.getRawEndpoint().isValid()) { if (!queue->acknowledgements.getRawEndpoint().isValid()) {
value.acknowledgeToken = queue->acknowledgements.getEndpoint(TaskPriority::NoDeliverDelay).token; value.acknowledgeToken = queue->acknowledgements.getEndpoint(TaskPriority::ReadSocket).token;
} }
queue->acknowledgements.bytesSent += value.expectedSize(); queue->acknowledgements.bytesSent += value.expectedSize();
FlowTransport::transport().sendUnreliable( FlowTransport::transport().sendUnreliable(
@ -477,7 +474,7 @@ public:
errors->delPromiseRef(); errors->delPromiseRef();
} }
const Endpoint& getEndpoint() const { return queue->getEndpoint(TaskPriority::NoDeliverDelay); } const Endpoint& getEndpoint() const { return queue->getEndpoint(TaskPriority::ReadSocket); }
bool operator==(const ReplyPromiseStream<T>& rhs) const { return queue == rhs.queue; } bool operator==(const ReplyPromiseStream<T>& rhs) const { return queue == rhs.queue; }
bool isEmpty() const { return !queue->isReady(); } bool isEmpty() const { return !queue->isReady(); }

View File

@ -470,12 +470,12 @@ public:
state TaskPriority currentTaskID = g_network->getCurrentTask(); state TaskPriority currentTaskID = g_network->getCurrentTask();
if (++openCount >= 3000) { if (++openCount >= 3000) {
TraceEvent(SevError, "TooManyFiles"); TraceEvent(SevError, "TooManyFiles").log();
ASSERT(false); ASSERT(false);
} }
if (openCount == 2000) { if (openCount == 2000) {
TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyFiles"); TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyFiles").log();
g_simulator.speedUpSimulation = true; g_simulator.speedUpSimulation = true;
g_simulator.connectionFailuresDisableDuration = 1e6; g_simulator.connectionFailuresDisableDuration = 1e6;
} }
@ -859,13 +859,17 @@ public:
ASSERT(taskID >= TaskPriority::Min && taskID <= TaskPriority::Max); ASSERT(taskID >= TaskPriority::Min && taskID <= TaskPriority::Max);
return delay(seconds, taskID, currentProcess); return delay(seconds, taskID, currentProcess);
} }
Future<class Void> delay(double seconds, TaskPriority taskID, ProcessInfo* machine) { Future<class Void> orderedDelay(double seconds, TaskPriority taskID) override {
ASSERT(taskID >= TaskPriority::Min && taskID <= TaskPriority::Max);
return delay(seconds, taskID, currentProcess, true);
}
Future<class Void> delay(double seconds, TaskPriority taskID, ProcessInfo* machine, bool ordered = false) {
ASSERT(seconds >= -0.0001); ASSERT(seconds >= -0.0001);
seconds = std::max(0.0, seconds); seconds = std::max(0.0, seconds);
Future<Void> f; Future<Void> f;
if (!currentProcess->rebooting && machine == currentProcess && !currentProcess->shutdownSignal.isSet() && if (!ordered && !currentProcess->rebooting && machine == currentProcess &&
FLOW_KNOBS->MAX_BUGGIFIED_DELAY > 0 && !currentProcess->shutdownSignal.isSet() && FLOW_KNOBS->MAX_BUGGIFIED_DELAY > 0 &&
deterministicRandom()->random01() < 0.25) { // FIXME: why doesnt this work when we are changing machines? deterministicRandom()->random01() < 0.25) { // FIXME: why doesnt this work when we are changing machines?
seconds += FLOW_KNOBS->MAX_BUGGIFIED_DELAY * pow(deterministicRandom()->random01(), 1000.0); seconds += FLOW_KNOBS->MAX_BUGGIFIED_DELAY * pow(deterministicRandom()->random01(), 1000.0);
} }

View File

@ -404,7 +404,7 @@ void applyMetadataMutations(SpanID const& spanContext,
confChange = true; confChange = true;
TEST(true); // Recovering at a higher version. TEST(true); // Recovering at a higher version.
} else if (m.param1 == writeRecoveryKey) { } else if (m.param1 == writeRecoveryKey) {
TraceEvent("WriteRecoveryKeySet", dbgid); TraceEvent("WriteRecoveryKeySet", dbgid).log();
if (!initialCommit) if (!initialCommit)
txnStateStore->set(KeyValueRef(m.param1, m.param2)); txnStateStore->set(KeyValueRef(m.param1, m.param2));
TEST(true); // Snapshot created, setting writeRecoveryKey in txnStateStore TEST(true); // Snapshot created, setting writeRecoveryKey in txnStateStore

View File

@ -477,7 +477,7 @@ ACTOR Future<bool> monitorBackupStartedKeyChanges(BackupData* self, bool present
if (present || !watch) if (present || !watch)
return true; return true;
} else { } else {
TraceEvent("BackupWorkerEmptyStartKey", self->myId); TraceEvent("BackupWorkerEmptyStartKey", self->myId).log();
self->onBackupChanges(uidVersions); self->onBackupChanges(uidVersions);
self->exitEarly = shouldExit; self->exitEarly = shouldExit;
@ -887,7 +887,7 @@ ACTOR Future<Void> pullAsyncData(BackupData* self) {
state Version tagAt = std::max(self->pulledVersion.get(), std::max(self->startVersion, self->savedVersion)); state Version tagAt = std::max(self->pulledVersion.get(), std::max(self->startVersion, self->savedVersion));
state Arena prev; state Arena prev;
TraceEvent("BackupWorkerPull", self->myId); TraceEvent("BackupWorkerPull", self->myId).log();
loop { loop {
while (self->paused.get()) { while (self->paused.get()) {
wait(self->paused.onChange()); wait(self->paused.onChange());
@ -1017,7 +1017,7 @@ ACTOR static Future<Void> monitorWorkerPause(BackupData* self) {
Optional<Value> value = wait(tr->get(backupPausedKey)); Optional<Value> value = wait(tr->get(backupPausedKey));
bool paused = value.present() && value.get() == LiteralStringRef("1"); bool paused = value.present() && value.get() == LiteralStringRef("1");
if (self->paused.get() != paused) { if (self->paused.get() != paused) {
TraceEvent(paused ? "BackupWorkerPaused" : "BackupWorkerResumed", self->myId); TraceEvent(paused ? "BackupWorkerPaused" : "BackupWorkerResumed", self->myId).log();
self->paused.set(paused); self->paused.set(paused);
} }

View File

@ -195,6 +195,8 @@ public:
} }
loop { loop {
tr.reset();
// Wait for some changes // Wait for some changes
while (!self->anyDelta.get()) while (!self->anyDelta.get())
wait(self->anyDelta.onChange()); wait(self->anyDelta.onChange());
@ -1962,7 +1964,7 @@ public:
} }
if (bestDC != clusterControllerDcId) { if (bestDC != clusterControllerDcId) {
TraceEvent("BestDCIsNotClusterDC"); TraceEvent("BestDCIsNotClusterDC").log();
vector<Optional<Key>> dcPriority; vector<Optional<Key>> dcPriority;
dcPriority.push_back(bestDC); dcPriority.push_back(bestDC);
desiredDcIds.set(dcPriority); desiredDcIds.set(dcPriority);
@ -3094,7 +3096,7 @@ ACTOR Future<Void> clusterWatchDatabase(ClusterControllerData* cluster, ClusterC
// When this someday is implemented, make sure forced failures still cause the master to be recruited again // When this someday is implemented, make sure forced failures still cause the master to be recruited again
loop { loop {
TraceEvent("CCWDB", cluster->id); TraceEvent("CCWDB", cluster->id).log();
try { try {
state double recoveryStart = now(); state double recoveryStart = now();
TraceEvent("CCWDB", cluster->id).detail("Recruiting", "Master"); TraceEvent("CCWDB", cluster->id).detail("Recruiting", "Master");
@ -3915,7 +3917,7 @@ ACTOR Future<Void> timeKeeperSetVersion(ClusterControllerData* self) {
ACTOR Future<Void> timeKeeper(ClusterControllerData* self) { ACTOR Future<Void> timeKeeper(ClusterControllerData* self) {
state KeyBackedMap<int64_t, Version> versionMap(timeKeeperPrefixRange.begin); state KeyBackedMap<int64_t, Version> versionMap(timeKeeperPrefixRange.begin);
TraceEvent("TimeKeeperStarted"); TraceEvent("TimeKeeperStarted").log();
wait(timeKeeperSetVersion(self)); wait(timeKeeperSetVersion(self));
@ -3929,7 +3931,7 @@ ACTOR Future<Void> timeKeeper(ClusterControllerData* self) {
// how long it is taking to hear responses from each other component. // how long it is taking to hear responses from each other component.
UID debugID = deterministicRandom()->randomUniqueID(); UID debugID = deterministicRandom()->randomUniqueID();
TraceEvent("TimeKeeperCommit", debugID); TraceEvent("TimeKeeperCommit", debugID).log();
tr->debugTransaction(debugID); tr->debugTransaction(debugID);
} }
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
@ -4080,7 +4082,7 @@ ACTOR Future<Void> monitorProcessClasses(ClusterControllerData* self) {
} }
wait(trVer.commit()); wait(trVer.commit());
TraceEvent("ProcessClassUpgrade"); TraceEvent("ProcessClassUpgrade").log();
break; break;
} catch (Error& e) { } catch (Error& e) {
wait(trVer.onError(e)); wait(trVer.onError(e));
@ -4509,7 +4511,7 @@ ACTOR Future<Void> handleForcedRecoveries(ClusterControllerData* self, ClusterCo
} }
wait(fCommit); wait(fCommit);
} }
TraceEvent("ForcedRecoveryFinish", self->id); TraceEvent("ForcedRecoveryFinish", self->id).log();
self->db.forceRecovery = false; self->db.forceRecovery = false;
req.reply.send(Void()); req.reply.send(Void());
} }
@ -4518,7 +4520,7 @@ ACTOR Future<Void> handleForcedRecoveries(ClusterControllerData* self, ClusterCo
ACTOR Future<DataDistributorInterface> startDataDistributor(ClusterControllerData* self) { ACTOR Future<DataDistributorInterface> startDataDistributor(ClusterControllerData* self) {
wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID. wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID.
TraceEvent("CCStartDataDistributor", self->id); TraceEvent("CCStartDataDistributor", self->id).log();
loop { loop {
try { try {
state bool no_distributor = !self->db.serverInfo->get().distributor.present(); state bool no_distributor = !self->db.serverInfo->get().distributor.present();
@ -4585,7 +4587,7 @@ ACTOR Future<Void> monitorDataDistributor(ClusterControllerData* self) {
ACTOR Future<Void> startRatekeeper(ClusterControllerData* self) { ACTOR Future<Void> startRatekeeper(ClusterControllerData* self) {
wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID. wait(delay(0.0)); // If master fails at the same time, give it a chance to clear master PID.
TraceEvent("CCStartRatekeeper", self->id); TraceEvent("CCStartRatekeeper", self->id).log();
loop { loop {
try { try {
state bool no_ratekeeper = !self->db.serverInfo->get().ratekeeper.present(); state bool no_ratekeeper = !self->db.serverInfo->get().ratekeeper.present();
@ -4702,7 +4704,7 @@ ACTOR Future<Void> dbInfoUpdater(ClusterControllerData* self) {
req.serializedDbInfo = req.serializedDbInfo =
BinaryWriter::toValue(self->db.serverInfo->get(), AssumeVersion(g_network->protocolVersion())); BinaryWriter::toValue(self->db.serverInfo->get(), AssumeVersion(g_network->protocolVersion()));
TraceEvent("DBInfoStartBroadcast", self->id); TraceEvent("DBInfoStartBroadcast", self->id).log();
choose { choose {
when(std::vector<Endpoint> notUpdated = when(std::vector<Endpoint> notUpdated =
wait(broadcastDBInfoRequest(req, SERVER_KNOBS->DBINFO_SEND_AMOUNT, Optional<Endpoint>(), false))) { wait(broadcastDBInfoRequest(req, SERVER_KNOBS->DBINFO_SEND_AMOUNT, Optional<Endpoint>(), false))) {
@ -4757,7 +4759,7 @@ ACTOR Future<Void> workerHealthMonitor(ClusterControllerData* self) {
} }
} else { } else {
self->excludedDegradedServers.clear(); self->excludedDegradedServers.clear();
TraceEvent("DegradedServerDetectedAndSuggestRecovery"); TraceEvent("DegradedServerDetectedAndSuggestRecovery").log();
} }
} }
} }

View File

@ -1756,7 +1756,7 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo> const> db, ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo> const> db,
ExclusionSafetyCheckRequest req) { ExclusionSafetyCheckRequest req) {
TraceEvent("SafetyCheckCommitProxyBegin"); TraceEvent("SafetyCheckCommitProxyBegin").log();
state ExclusionSafetyCheckReply reply(false); state ExclusionSafetyCheckReply reply(false);
if (!db->get().distributor.present()) { if (!db->get().distributor.present()) {
TraceEvent(SevWarnAlways, "DataDistributorNotPresent").detail("Operation", "ExclusionSafetyCheck"); TraceEvent(SevWarnAlways, "DataDistributorNotPresent").detail("Operation", "ExclusionSafetyCheck");
@ -1778,7 +1778,7 @@ ACTOR Future<Void> proxyCheckSafeExclusion(Reference<AsyncVar<ServerDBInfo> cons
throw e; throw e;
} }
} }
TraceEvent("SafetyCheckCommitProxyFinish"); TraceEvent("SafetyCheckCommitProxyFinish").log();
req.reply.send(reply); req.reply.send(reply);
return Void(); return Void();
} }
@ -1796,7 +1796,7 @@ ACTOR Future<Void> reportTxnTagCommitCost(UID myID,
TraceEvent("ProxyRatekeeperChanged", myID).detail("RKID", db->get().ratekeeper.get().id()); TraceEvent("ProxyRatekeeperChanged", myID).detail("RKID", db->get().ratekeeper.get().id());
nextRequestTimer = Void(); nextRequestTimer = Void();
} else { } else {
TraceEvent("ProxyRatekeeperDied", myID); TraceEvent("ProxyRatekeeperDied", myID).log();
nextRequestTimer = Never(); nextRequestTimer = Never();
} }
} }
@ -1936,7 +1936,7 @@ ACTOR Future<Void> commitProxyServerCore(CommitProxyInterface proxy,
} }
} }
when(ProxySnapRequest snapReq = waitNext(proxy.proxySnapReq.getFuture())) { when(ProxySnapRequest snapReq = waitNext(proxy.proxySnapReq.getFuture())) {
TraceEvent(SevDebug, "SnapMasterEnqueue"); TraceEvent(SevDebug, "SnapMasterEnqueue").log();
addActor.send(proxySnapCreate(snapReq, &commitData)); addActor.send(proxySnapCreate(snapReq, &commitData));
} }
when(ExclusionSafetyCheckRequest exclCheckReq = waitNext(proxy.exclusionSafetyCheckReq.getFuture())) { when(ExclusionSafetyCheckRequest exclCheckReq = waitNext(proxy.exclusionSafetyCheckReq.getFuture())) {

View File

@ -316,7 +316,7 @@ struct MovableCoordinatedStateImpl {
Value oldQuorumState = wait(cs.read()); Value oldQuorumState = wait(cs.read());
if (oldQuorumState != self->lastCSValue.get()) { if (oldQuorumState != self->lastCSValue.get()) {
TEST(true); // Quorum change aborted by concurrent write to old coordination state TEST(true); // Quorum change aborted by concurrent write to old coordination state
TraceEvent("QuorumChangeAbortedByConcurrency"); TraceEvent("QuorumChangeAbortedByConcurrency").log();
throw coordinated_state_conflict(); throw coordinated_state_conflict();
} }

View File

@ -143,7 +143,7 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
} }
}; };
struct Worker : Threadlike { struct Worker final : Threadlike {
Pool* pool; Pool* pool;
IThreadPoolReceiver* userData; IThreadPoolReceiver* userData;
bool stop; bool stop;
@ -173,7 +173,7 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
} }
} }
TraceEvent("CoroStop"); TraceEvent("CoroStop").log();
delete userData; delete userData;
stopped.send(Void()); stopped.send(Void());
return; return;
@ -181,14 +181,14 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
TraceEvent("WorkPoolError").error(e, true); TraceEvent("WorkPoolError").error(e, true);
error.sendError(e); error.sendError(e);
} catch (...) { } catch (...) {
TraceEvent("WorkPoolError"); TraceEvent("WorkPoolError").log();
error.sendError(unknown_error()); error.sendError(unknown_error());
} }
try { try {
delete userData; delete userData;
} catch (...) { } catch (...) {
TraceEvent(SevError, "WorkPoolErrorShutdownError"); TraceEvent(SevError, "WorkPoolErrorShutdownError").log();
} }
stopped.send(Void()); stopped.send(Void());
} }

View File

@ -149,7 +149,7 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
} }
} }
TraceEvent("CoroStop"); TraceEvent("CoroStop").log();
delete userData; delete userData;
stopped.send(Void()); stopped.send(Void());
return; return;
@ -157,14 +157,14 @@ class WorkPool final : public IThreadPool, public ReferenceCounted<WorkPool<Thre
TraceEvent("WorkPoolError").error(e, true); TraceEvent("WorkPoolError").error(e, true);
error.sendError(e); error.sendError(e);
} catch (...) { } catch (...) {
TraceEvent("WorkPoolError"); TraceEvent("WorkPoolError").log();
error.sendError(unknown_error()); error.sendError(unknown_error());
} }
try { try {
delete userData; delete userData;
} catch (...) { } catch (...) {
TraceEvent(SevError, "WorkPoolErrorShutdownError"); TraceEvent(SevError, "WorkPoolErrorShutdownError").log();
} }
stopped.send(Void()); stopped.send(Void());
} }

View File

@ -190,7 +190,7 @@ public:
: servers(servers), healthy(true), priority(SERVER_KNOBS->PRIORITY_TEAM_HEALTHY), wrongConfiguration(false), : servers(servers), healthy(true), priority(SERVER_KNOBS->PRIORITY_TEAM_HEALTHY), wrongConfiguration(false),
id(deterministicRandom()->randomUniqueID()) { id(deterministicRandom()->randomUniqueID()) {
if (servers.empty()) { if (servers.empty()) {
TraceEvent(SevInfo, "ConstructTCTeamFromEmptyServers"); TraceEvent(SevInfo, "ConstructTCTeamFromEmptyServers").log();
} }
serverIDs.reserve(servers.size()); serverIDs.reserve(servers.size());
for (int i = 0; i < servers.size(); i++) { for (int i = 0; i < servers.size(); i++) {
@ -445,7 +445,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
} }
if (!result->mode || !ddEnabledState->isDDEnabled()) { if (!result->mode || !ddEnabledState->isDDEnabled()) {
// DD can be disabled persistently (result->mode = 0) or transiently (isDDEnabled() = 0) // DD can be disabled persistently (result->mode = 0) or transiently (isDDEnabled() = 0)
TraceEvent(SevDebug, "GetInitialDataDistribution_DisabledDD"); TraceEvent(SevDebug, "GetInitialDataDistribution_DisabledDD").log();
return result; return result;
} }
@ -475,7 +475,7 @@ ACTOR Future<Reference<InitialDataDistribution>> getInitialDataDistribution(Data
wait(tr.onError(e)); wait(tr.onError(e));
ASSERT(!succeeded); // We shouldn't be retrying if we have already started modifying result in this loop ASSERT(!succeeded); // We shouldn't be retrying if we have already started modifying result in this loop
TraceEvent("GetInitialTeamsRetry", distributorId); TraceEvent("GetInitialTeamsRetry", distributorId).log();
} }
} }
@ -4160,14 +4160,14 @@ ACTOR Future<Void> monitorPerpetualStorageWiggle(DDTeamCollection* teamCollectio
&stopWiggleSignal, finishStorageWiggleSignal.getFuture(), teamCollection)); &stopWiggleSignal, finishStorageWiggleSignal.getFuture(), teamCollection));
collection.add(perpetualStorageWiggler( collection.add(perpetualStorageWiggler(
&stopWiggleSignal, finishStorageWiggleSignal, teamCollection, ddEnabledState)); &stopWiggleSignal, finishStorageWiggleSignal, teamCollection, ddEnabledState));
TraceEvent("PerpetualStorageWiggleOpen", teamCollection->distributorId); TraceEvent("PerpetualStorageWiggleOpen", teamCollection->distributorId).log();
} else if (speed == 0) { } else if (speed == 0) {
if (!stopWiggleSignal.get()) { if (!stopWiggleSignal.get()) {
stopWiggleSignal.set(true); stopWiggleSignal.set(true);
wait(collection.signalAndReset()); wait(collection.signalAndReset());
teamCollection->pauseWiggle->set(true); teamCollection->pauseWiggle->set(true);
} }
TraceEvent("PerpetualStorageWiggleClose", teamCollection->distributorId); TraceEvent("PerpetualStorageWiggleClose", teamCollection->distributorId).log();
} }
wait(watchFuture); wait(watchFuture);
break; break;
@ -4262,7 +4262,7 @@ ACTOR Future<Void> waitHealthyZoneChange(DDTeamCollection* self) {
auto p = decodeHealthyZoneValue(val.get()); auto p = decodeHealthyZoneValue(val.get());
if (p.first == ignoreSSFailuresZoneString) { if (p.first == ignoreSSFailuresZoneString) {
// healthyZone is now overloaded for DD diabling purpose, which does not timeout // healthyZone is now overloaded for DD diabling purpose, which does not timeout
TraceEvent("DataDistributionDisabledForStorageServerFailuresStart", self->distributorId); TraceEvent("DataDistributionDisabledForStorageServerFailuresStart", self->distributorId).log();
healthyZoneTimeout = Never(); healthyZoneTimeout = Never();
} else if (p.second > tr.getReadVersion().get()) { } else if (p.second > tr.getReadVersion().get()) {
double timeoutSeconds = double timeoutSeconds =
@ -4277,15 +4277,15 @@ ACTOR Future<Void> waitHealthyZoneChange(DDTeamCollection* self) {
} }
} else if (self->healthyZone.get().present()) { } else if (self->healthyZone.get().present()) {
// maintenance hits timeout // maintenance hits timeout
TraceEvent("MaintenanceZoneEndTimeout", self->distributorId); TraceEvent("MaintenanceZoneEndTimeout", self->distributorId).log();
self->healthyZone.set(Optional<Key>()); self->healthyZone.set(Optional<Key>());
} }
} else if (self->healthyZone.get().present()) { } else if (self->healthyZone.get().present()) {
// `healthyZone` has been cleared // `healthyZone` has been cleared
if (self->healthyZone.get().get() == ignoreSSFailuresZoneString) { if (self->healthyZone.get().get() == ignoreSSFailuresZoneString) {
TraceEvent("DataDistributionDisabledForStorageServerFailuresEnd", self->distributorId); TraceEvent("DataDistributionDisabledForStorageServerFailuresEnd", self->distributorId).log();
} else { } else {
TraceEvent("MaintenanceZoneEndManualClear", self->distributorId); TraceEvent("MaintenanceZoneEndManualClear", self->distributorId).log();
} }
self->healthyZone.set(Optional<Key>()); self->healthyZone.set(Optional<Key>());
} }
@ -4432,7 +4432,7 @@ ACTOR Future<Void> storageServerFailureTracker(DDTeamCollection* self,
status->isFailed = false; status->isFailed = false;
} else if (self->clearHealthyZoneFuture.isReady()) { } else if (self->clearHealthyZoneFuture.isReady()) {
self->clearHealthyZoneFuture = clearHealthyZone(self->cx); self->clearHealthyZoneFuture = clearHealthyZone(self->cx);
TraceEvent("MaintenanceZoneCleared", self->distributorId); TraceEvent("MaintenanceZoneCleared", self->distributorId).log();
self->healthyZone.set(Optional<Key>()); self->healthyZone.set(Optional<Key>());
} }
} }
@ -5491,7 +5491,7 @@ ACTOR Future<Void> serverGetTeamRequests(TeamCollectionInterface tci, DDTeamColl
} }
ACTOR Future<Void> remoteRecovered(Reference<AsyncVar<ServerDBInfo> const> db) { ACTOR Future<Void> remoteRecovered(Reference<AsyncVar<ServerDBInfo> const> db) {
TraceEvent("DDTrackerStarting"); TraceEvent("DDTrackerStarting").log();
while (db->get().recoveryState < RecoveryState::ALL_LOGS_RECRUITED) { while (db->get().recoveryState < RecoveryState::ALL_LOGS_RECRUITED) {
TraceEvent("DDTrackerStarting").detail("RecoveryState", (int)db->get().recoveryState); TraceEvent("DDTrackerStarting").detail("RecoveryState", (int)db->get().recoveryState);
wait(db->onChange()); wait(db->onChange());
@ -5625,7 +5625,7 @@ ACTOR Future<Void> waitForDataDistributionEnabled(Database cx, const DDEnabledSt
try { try {
Optional<Value> mode = wait(tr.get(dataDistributionModeKey)); Optional<Value> mode = wait(tr.get(dataDistributionModeKey));
if (!mode.present() && ddEnabledState->isDDEnabled()) { if (!mode.present() && ddEnabledState->isDDEnabled()) {
TraceEvent("WaitForDDEnabledSucceeded"); TraceEvent("WaitForDDEnabledSucceeded").log();
return Void(); return Void();
} }
if (mode.present()) { if (mode.present()) {
@ -5636,7 +5636,7 @@ ACTOR Future<Void> waitForDataDistributionEnabled(Database cx, const DDEnabledSt
.detail("Mode", m) .detail("Mode", m)
.detail("IsDDEnabled", ddEnabledState->isDDEnabled()); .detail("IsDDEnabled", ddEnabledState->isDDEnabled());
if (m && ddEnabledState->isDDEnabled()) { if (m && ddEnabledState->isDDEnabled()) {
TraceEvent("WaitForDDEnabledSucceeded"); TraceEvent("WaitForDDEnabledSucceeded").log();
return Void(); return Void();
} }
} }
@ -5711,7 +5711,7 @@ ACTOR Future<Void> debugCheckCoalescing(Database cx) {
.detail("Value", ranges[j].value); .detail("Value", ranges[j].value);
} }
TraceEvent("DoneCheckingCoalescing"); TraceEvent("DoneCheckingCoalescing").log();
return Void(); return Void();
} catch (Error& e) { } catch (Error& e) {
wait(tr.onError(e)); wait(tr.onError(e));
@ -5807,10 +5807,10 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
state Promise<UID> removeFailedServer; state Promise<UID> removeFailedServer;
try { try {
loop { loop {
TraceEvent("DDInitTakingMoveKeysLock", self->ddId); TraceEvent("DDInitTakingMoveKeysLock", self->ddId).log();
MoveKeysLock lock_ = wait(takeMoveKeysLock(cx, self->ddId)); MoveKeysLock lock_ = wait(takeMoveKeysLock(cx, self->ddId));
lock = lock_; lock = lock_;
TraceEvent("DDInitTookMoveKeysLock", self->ddId); TraceEvent("DDInitTookMoveKeysLock", self->ddId).log();
DatabaseConfiguration configuration_ = wait(getDatabaseConfiguration(cx)); DatabaseConfiguration configuration_ = wait(getDatabaseConfiguration(cx));
configuration = configuration_; configuration = configuration_;
@ -5854,7 +5854,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
} }
} }
TraceEvent("DDInitUpdatedReplicaKeys", self->ddId); TraceEvent("DDInitUpdatedReplicaKeys", self->ddId).log();
Reference<InitialDataDistribution> initData_ = wait(getInitialDataDistribution( Reference<InitialDataDistribution> initData_ = wait(getInitialDataDistribution(
cx, cx,
self->ddId, self->ddId,
@ -5882,7 +5882,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
// mode may be set true by system operator using fdbcli and isDDEnabled() set to true // mode may be set true by system operator using fdbcli and isDDEnabled() set to true
break; break;
} }
TraceEvent("DataDistributionDisabled", self->ddId); TraceEvent("DataDistributionDisabled", self->ddId).log();
TraceEvent("MovingData", self->ddId) TraceEvent("MovingData", self->ddId)
.detail("InFlight", 0) .detail("InFlight", 0)
@ -5919,7 +5919,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self,
.trackLatest("TotalDataInFlightRemote"); .trackLatest("TotalDataInFlightRemote");
wait(waitForDataDistributionEnabled(cx, ddEnabledState)); wait(waitForDataDistributionEnabled(cx, ddEnabledState));
TraceEvent("DataDistributionEnabled"); TraceEvent("DataDistributionEnabled").log();
} }
// When/If this assertion fails, Evan owes Ben a pat on the back for his foresight // When/If this assertion fails, Evan owes Ben a pat on the back for his foresight
@ -6256,7 +6256,7 @@ ACTOR Future<Void> ddSnapCreateCore(DistributorSnapRequest snapReq, Reference<As
} }
wait(waitForAll(enablePops)); wait(waitForAll(enablePops));
} catch (Error& error) { } catch (Error& error) {
TraceEvent(SevDebug, "IgnoreEnableTLogPopFailure"); TraceEvent(SevDebug, "IgnoreEnableTLogPopFailure").log();
} }
} }
throw e; throw e;
@ -6271,7 +6271,7 @@ ACTOR Future<Void> ddSnapCreate(DistributorSnapRequest snapReq,
if (!ddEnabledState->setDDEnabled(false, snapReq.snapUID)) { if (!ddEnabledState->setDDEnabled(false, snapReq.snapUID)) {
// disable DD before doing snapCreate, if previous snap req has already disabled DD then this operation fails // disable DD before doing snapCreate, if previous snap req has already disabled DD then this operation fails
// here // here
TraceEvent("SnapDDSetDDEnabledFailedInMemoryCheck"); TraceEvent("SnapDDSetDDEnabledFailedInMemoryCheck").log();
snapReq.reply.sendError(operation_failed()); snapReq.reply.sendError(operation_failed());
return Void(); return Void();
} }
@ -6344,18 +6344,18 @@ bool _exclusionSafetyCheck(vector<UID>& excludeServerIDs, DDTeamCollection* team
ACTOR Future<Void> ddExclusionSafetyCheck(DistributorExclusionSafetyCheckRequest req, ACTOR Future<Void> ddExclusionSafetyCheck(DistributorExclusionSafetyCheckRequest req,
Reference<DataDistributorData> self, Reference<DataDistributorData> self,
Database cx) { Database cx) {
TraceEvent("DDExclusionSafetyCheckBegin", self->ddId); TraceEvent("DDExclusionSafetyCheckBegin", self->ddId).log();
vector<StorageServerInterface> ssis = wait(getStorageServers(cx)); vector<StorageServerInterface> ssis = wait(getStorageServers(cx));
DistributorExclusionSafetyCheckReply reply(true); DistributorExclusionSafetyCheckReply reply(true);
if (!self->teamCollection) { if (!self->teamCollection) {
TraceEvent("DDExclusionSafetyCheckTeamCollectionInvalid", self->ddId); TraceEvent("DDExclusionSafetyCheckTeamCollectionInvalid", self->ddId).log();
reply.safe = false; reply.safe = false;
req.reply.send(reply); req.reply.send(reply);
return Void(); return Void();
} }
// If there is only 1 team, unsafe to mark failed: team building can get stuck due to lack of servers left // If there is only 1 team, unsafe to mark failed: team building can get stuck due to lack of servers left
if (self->teamCollection->teams.size() <= 1) { if (self->teamCollection->teams.size() <= 1) {
TraceEvent("DDExclusionSafetyCheckNotEnoughTeams", self->ddId); TraceEvent("DDExclusionSafetyCheckNotEnoughTeams", self->ddId).log();
reply.safe = false; reply.safe = false;
req.reply.send(reply); req.reply.send(reply);
return Void(); return Void();
@ -6371,7 +6371,7 @@ ACTOR Future<Void> ddExclusionSafetyCheck(DistributorExclusionSafetyCheckRequest
} }
} }
reply.safe = _exclusionSafetyCheck(excludeServerIDs, self->teamCollection); reply.safe = _exclusionSafetyCheck(excludeServerIDs, self->teamCollection);
TraceEvent("DDExclusionSafetyCheckFinish", self->ddId); TraceEvent("DDExclusionSafetyCheckFinish", self->ddId).log();
req.reply.send(reply); req.reply.send(reply);
return Void(); return Void();
} }

View File

@ -300,7 +300,7 @@ ACTOR Future<Void> getRate(UID myID,
TraceEvent("ProxyRatekeeperChanged", myID).detail("RKID", db->get().ratekeeper.get().id()); TraceEvent("ProxyRatekeeperChanged", myID).detail("RKID", db->get().ratekeeper.get().id());
nextRequestTimer = Void(); // trigger GetRate request nextRequestTimer = Void(); // trigger GetRate request
} else { } else {
TraceEvent("ProxyRatekeeperDied", myID); TraceEvent("ProxyRatekeeperDied", myID).log();
nextRequestTimer = Never(); nextRequestTimer = Never();
reply = Never(); reply = Never();
} }

View File

@ -32,6 +32,7 @@
*/ */
class IConfigDatabaseNode : public ReferenceCounted<IConfigDatabaseNode> { class IConfigDatabaseNode : public ReferenceCounted<IConfigDatabaseNode> {
public: public:
virtual ~IConfigDatabaseNode() = default;
virtual Future<Void> serve(ConfigTransactionInterface const&) = 0; virtual Future<Void> serve(ConfigTransactionInterface const&) = 0;
virtual Future<Void> serve(ConfigFollowerInterface const&) = 0; virtual Future<Void> serve(ConfigFollowerInterface const&) = 0;

View File

@ -141,7 +141,7 @@ public:
Future<Void> commit(bool sequential) override { Future<Void> commit(bool sequential) override {
if (getAvailableSize() <= 0) { if (getAvailableSize() <= 0) {
TraceEvent(SevError, "KeyValueStoreMemory_OutOfSpace", id); TraceEvent(SevError, "KeyValueStoreMemory_OutOfSpace", id).log();
return Never(); return Never();
} }
@ -605,7 +605,7 @@ private:
if (zeroFillSize) { if (zeroFillSize) {
if (exactRecovery) { if (exactRecovery) {
TraceEvent(SevError, "KVSMemExpectedExact", self->id); TraceEvent(SevError, "KVSMemExpectedExact", self->id).log();
ASSERT(false); ASSERT(false);
} }

View File

@ -727,7 +727,7 @@ struct RawCursor {
try { try {
db.checkError("BtreeCloseCursor", sqlite3BtreeCloseCursor(cursor)); db.checkError("BtreeCloseCursor", sqlite3BtreeCloseCursor(cursor));
} catch (...) { } catch (...) {
TraceEvent(SevError, "RawCursorDestructionError"); TraceEvent(SevError, "RawCursorDestructionError").log();
} }
delete[](char*) cursor; delete[](char*) cursor;
} }
@ -1639,7 +1639,7 @@ private:
return cursor; return cursor;
} }
struct ReadValueAction : TypedAction<Reader, ReadValueAction>, FastAllocated<ReadValueAction> { struct ReadValueAction final : TypedAction<Reader, ReadValueAction>, FastAllocated<ReadValueAction> {
Key key; Key key;
Optional<UID> debugID; Optional<UID> debugID;
ThreadReturnPromise<Optional<Value>> result; ThreadReturnPromise<Optional<Value>> result;
@ -1692,7 +1692,7 @@ private:
// if (t >= 1.0) TraceEvent("ReadValuePrefixActionSlow",dbgid).detail("Elapsed", t); // if (t >= 1.0) TraceEvent("ReadValuePrefixActionSlow",dbgid).detail("Elapsed", t);
} }
struct ReadRangeAction : TypedAction<Reader, ReadRangeAction>, FastAllocated<ReadRangeAction> { struct ReadRangeAction final : TypedAction<Reader, ReadRangeAction>, FastAllocated<ReadRangeAction> {
KeyRange keys; KeyRange keys;
int rowLimit, byteLimit; int rowLimit, byteLimit;
ThreadReturnPromise<RangeResult> result; ThreadReturnPromise<RangeResult> result;
@ -1737,9 +1737,9 @@ private:
freeListPages(freeListPages), cursor(nullptr), dbgid(dbgid), readThreads(*pReadThreads), freeListPages(freeListPages), cursor(nullptr), dbgid(dbgid), readThreads(*pReadThreads),
checkAllChecksumsOnOpen(checkAllChecksumsOnOpen), checkIntegrityOnOpen(checkIntegrityOnOpen) {} checkAllChecksumsOnOpen(checkAllChecksumsOnOpen), checkIntegrityOnOpen(checkIntegrityOnOpen) {}
~Writer() override { ~Writer() override {
TraceEvent("KVWriterDestroying", dbgid); TraceEvent("KVWriterDestroying", dbgid).log();
delete cursor; delete cursor;
TraceEvent("KVWriterDestroyed", dbgid); TraceEvent("KVWriterDestroyed", dbgid).log();
} }
void init() override { void init() override {
if (checkAllChecksumsOnOpen) { if (checkAllChecksumsOnOpen) {
@ -1775,7 +1775,7 @@ private:
} }
} }
struct InitAction : TypedAction<Writer, InitAction>, FastAllocated<InitAction> { struct InitAction final : TypedAction<Writer, InitAction>, FastAllocated<InitAction> {
ThreadReturnPromise<Void> result; ThreadReturnPromise<Void> result;
double getTimeEstimate() const override { return 0; } double getTimeEstimate() const override { return 0; }
}; };
@ -1784,7 +1784,7 @@ private:
a.result.send(Void()); a.result.send(Void());
} }
struct SetAction : TypedAction<Writer, SetAction>, FastAllocated<SetAction> { struct SetAction final : TypedAction<Writer, SetAction>, FastAllocated<SetAction> {
KeyValue kv; KeyValue kv;
SetAction(KeyValue kv) : kv(kv) {} SetAction(KeyValue kv) : kv(kv) {}
double getTimeEstimate() const override { return SERVER_KNOBS->SET_TIME_ESTIMATE; } double getTimeEstimate() const override { return SERVER_KNOBS->SET_TIME_ESTIMATE; }
@ -1799,7 +1799,7 @@ private:
TraceEvent("SetActionFinished", dbgid).detail("Elapsed", now() - s); TraceEvent("SetActionFinished", dbgid).detail("Elapsed", now() - s);
} }
struct ClearAction : TypedAction<Writer, ClearAction>, FastAllocated<ClearAction> { struct ClearAction final : TypedAction<Writer, ClearAction>, FastAllocated<ClearAction> {
KeyRange range; KeyRange range;
ClearAction(KeyRange range) : range(range) {} ClearAction(KeyRange range) : range(range) {}
double getTimeEstimate() const override { return SERVER_KNOBS->CLEAR_TIME_ESTIMATE; } double getTimeEstimate() const override { return SERVER_KNOBS->CLEAR_TIME_ESTIMATE; }
@ -1813,7 +1813,7 @@ private:
TraceEvent("ClearActionFinished", dbgid).detail("Elapsed", now() - s); TraceEvent("ClearActionFinished", dbgid).detail("Elapsed", now() - s);
} }
struct CommitAction : TypedAction<Writer, CommitAction>, FastAllocated<CommitAction> { struct CommitAction final : TypedAction<Writer, CommitAction>, FastAllocated<CommitAction> {
double issuedTime; double issuedTime;
ThreadReturnPromise<Void> result; ThreadReturnPromise<Void> result;
CommitAction() : issuedTime(now()) {} CommitAction() : issuedTime(now()) {}
@ -1887,7 +1887,8 @@ private:
// freeListPages, iterationsi, freeTableEmpty); // freeListPages, iterationsi, freeTableEmpty);
} }
struct SpringCleaningAction : TypedAction<Writer, SpringCleaningAction>, FastAllocated<SpringCleaningAction> { struct SpringCleaningAction final : TypedAction<Writer, SpringCleaningAction>,
FastAllocated<SpringCleaningAction> {
ThreadReturnPromise<SpringCleaningWorkPerformed> result; ThreadReturnPromise<SpringCleaningWorkPerformed> result;
double getTimeEstimate() const override { double getTimeEstimate() const override {
return std::max(SERVER_KNOBS->SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE, return std::max(SERVER_KNOBS->SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE,

View File

@ -156,7 +156,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
} }
if (leader.present() && leader.get().second && leader.get().first.equalInternalId(myInfo)) { if (leader.present() && leader.get().second && leader.get().first.equalInternalId(myInfo)) {
TraceEvent("BecomingLeader", myInfo.changeID); TraceEvent("BecomingLeader", myInfo.changeID).log();
ASSERT(leader.get().first.serializedInfo == proposedSerializedInterface); ASSERT(leader.get().first.serializedInfo == proposedSerializedInterface);
outSerializedLeader->set(leader.get().first.serializedInfo); outSerializedLeader->set(leader.get().first.serializedInfo);
iAmLeader = true; iAmLeader = true;
@ -184,7 +184,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
when(wait(nominees->onChange())) {} when(wait(nominees->onChange())) {}
when(wait(badCandidateTimeout.isValid() ? badCandidateTimeout : Never())) { when(wait(badCandidateTimeout.isValid() ? badCandidateTimeout : Never())) {
TEST(true); // Bad candidate timeout TEST(true); // Bad candidate timeout
TraceEvent("LeaderBadCandidateTimeout", myInfo.changeID); TraceEvent("LeaderBadCandidateTimeout", myInfo.changeID).log();
break; break;
} }
when(wait(candidacies)) { ASSERT(false); } when(wait(candidacies)) { ASSERT(false); }
@ -225,7 +225,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
//TraceEvent("StillLeader", myInfo.changeID); //TraceEvent("StillLeader", myInfo.changeID);
} // We are still leader } // We are still leader
when(wait(quorum(false_heartbeats, false_heartbeats.size() / 2 + 1))) { when(wait(quorum(false_heartbeats, false_heartbeats.size() / 2 + 1))) {
TraceEvent("ReplacedAsLeader", myInfo.changeID); TraceEvent("ReplacedAsLeader", myInfo.changeID).log();
break; break;
} // We are definitely not leader } // We are definitely not leader
when(wait(delay(SERVER_KNOBS->POLLING_FREQUENCY))) { when(wait(delay(SERVER_KNOBS->POLLING_FREQUENCY))) {
@ -243,7 +243,7 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
.detail("Coordinator", .detail("Coordinator",
coordinators.leaderElectionServers[i].candidacy.getEndpoint().getPrimaryAddress()); coordinators.leaderElectionServers[i].candidacy.getEndpoint().getPrimaryAddress());
} }
TraceEvent("ReleasingLeadership", myInfo.changeID); TraceEvent("ReleasingLeadership", myInfo.changeID).log();
break; break;
} // Give up on being leader, because we apparently have poor communications } // Give up on being leader, because we apparently have poor communications
when(wait(asyncPriorityInfo->onChange())) {} when(wait(asyncPriorityInfo->onChange())) {}

View File

@ -291,7 +291,7 @@ public:
if (allLocations) { if (allLocations) {
// special handling for allLocations // special handling for allLocations
TraceEvent("AllLocationsSet"); TraceEvent("AllLocationsSet").log();
for (int i = 0; i < logServers.size(); i++) { for (int i = 0; i < logServers.size(); i++) {
newLocations.push_back(i); newLocations.push_back(i);
} }

View File

@ -374,7 +374,7 @@ ACTOR Future<Void> updateMetricRegistration(Database cx, MetricsConfig* config,
ACTOR Future<Void> runMetrics(Future<Database> fcx, Key prefix) { ACTOR Future<Void> runMetrics(Future<Database> fcx, Key prefix) {
// Never log to an empty prefix, it's pretty much always a bad idea. // Never log to an empty prefix, it's pretty much always a bad idea.
if (prefix.size() == 0) { if (prefix.size() == 0) {
TraceEvent(SevWarnAlways, "TDMetricsRefusingEmptyPrefix"); TraceEvent(SevWarnAlways, "TDMetricsRefusingEmptyPrefix").log();
return Void(); return Void();
} }

View File

@ -100,7 +100,7 @@ ACTOR static Future<Void> checkMoveKeysLock(Transaction* tr,
const DDEnabledState* ddEnabledState, const DDEnabledState* ddEnabledState,
bool isWrite = true) { bool isWrite = true) {
if (!ddEnabledState->isDDEnabled()) { if (!ddEnabledState->isDDEnabled()) {
TraceEvent(SevDebug, "DDDisabledByInMemoryCheck"); TraceEvent(SevDebug, "DDDisabledByInMemoryCheck").log();
throw movekeys_conflict(); throw movekeys_conflict();
} }
Optional<Value> readVal = wait(tr->get(moveKeysLockOwnerKey)); Optional<Value> readVal = wait(tr->get(moveKeysLockOwnerKey));
@ -1143,7 +1143,7 @@ ACTOR Future<std::pair<Version, Tag>> addStorageServer(Database cx, StorageServe
if (SERVER_KNOBS->TSS_HACK_IDENTITY_MAPPING) { if (SERVER_KNOBS->TSS_HACK_IDENTITY_MAPPING) {
// THIS SHOULD NEVER BE ENABLED IN ANY NON-TESTING ENVIRONMENT // THIS SHOULD NEVER BE ENABLED IN ANY NON-TESTING ENVIRONMENT
TraceEvent(SevError, "TSSIdentityMappingEnabled"); TraceEvent(SevError, "TSSIdentityMappingEnabled").log();
tssMapDB.set(tr, server.id(), server.id()); tssMapDB.set(tr, server.id(), server.id());
} }
} }
@ -1268,7 +1268,7 @@ ACTOR Future<Void> removeStorageServer(Database cx,
if (SERVER_KNOBS->TSS_HACK_IDENTITY_MAPPING) { if (SERVER_KNOBS->TSS_HACK_IDENTITY_MAPPING) {
// THIS SHOULD NEVER BE ENABLED IN ANY NON-TESTING ENVIRONMENT // THIS SHOULD NEVER BE ENABLED IN ANY NON-TESTING ENVIRONMENT
TraceEvent(SevError, "TSSIdentityMappingEnabled"); TraceEvent(SevError, "TSSIdentityMappingEnabled").log();
tssMapDB.erase(tr, serverID); tssMapDB.erase(tr, serverID);
} else if (tssPairID.present()) { } else if (tssPairID.present()) {
// remove the TSS from the mapping // remove the TSS from the mapping
@ -1440,7 +1440,7 @@ void seedShardServers(Arena& arena, CommitTransactionRef& tr, vector<StorageServ
tr.set(arena, serverListKeyFor(s.id()), serverListValue(s)); tr.set(arena, serverListKeyFor(s.id()), serverListValue(s));
if (SERVER_KNOBS->TSS_HACK_IDENTITY_MAPPING) { if (SERVER_KNOBS->TSS_HACK_IDENTITY_MAPPING) {
// THIS SHOULD NEVER BE ENABLED IN ANY NON-TESTING ENVIRONMENT // THIS SHOULD NEVER BE ENABLED IN ANY NON-TESTING ENVIRONMENT
TraceEvent(SevError, "TSSIdentityMappingEnabled"); TraceEvent(SevError, "TSSIdentityMappingEnabled").log();
// hack key-backed map here since we can't really change CommitTransactionRef to a RYW transaction // hack key-backed map here since we can't really change CommitTransactionRef to a RYW transaction
Key uidRef = Codec<UID>::pack(s.id()).pack(); Key uidRef = Codec<UID>::pack(s.id()).pack();
tr.set(arena, uidRef.withPrefix(tssMappingKeys.begin), uidRef); tr.set(arena, uidRef.withPrefix(tssMappingKeys.begin), uidRef);

View File

@ -108,7 +108,7 @@ struct TLogQueueEntryRef {
typedef Standalone<TLogQueueEntryRef> TLogQueueEntry; typedef Standalone<TLogQueueEntryRef> TLogQueueEntry;
struct TLogQueue : public IClosable { struct TLogQueue final : public IClosable {
public: public:
TLogQueue(IDiskQueue* queue, UID dbgid) : queue(queue), dbgid(dbgid) {} TLogQueue(IDiskQueue* queue, UID dbgid) : queue(queue), dbgid(dbgid) {}
@ -1387,7 +1387,7 @@ ACTOR Future<Void> restorePersistentState(TLogData* self, LocalityData locality)
state KeyRange tagKeys; state KeyRange tagKeys;
// PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it // PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it
TraceEvent("TLogRestorePersistentState", self->dbgid); TraceEvent("TLogRestorePersistentState", self->dbgid).log();
IKeyValueStore* storage = self->persistentData; IKeyValueStore* storage = self->persistentData;
state Future<Optional<Value>> fFormat = storage->readValue(persistFormat.key); state Future<Optional<Value>> fFormat = storage->readValue(persistFormat.key);
@ -1575,7 +1575,7 @@ ACTOR Future<Void> tLog(IKeyValueStore* persistentData,
state TLogData self(tlogId, workerID, persistentData, persistentQueue, db); state TLogData self(tlogId, workerID, persistentData, persistentQueue, db);
state Future<Void> error = actorCollection(self.sharedActors.getFuture()); state Future<Void> error = actorCollection(self.sharedActors.getFuture());
TraceEvent("SharedTlog", tlogId); TraceEvent("SharedTlog", tlogId).log();
try { try {
wait(restorePersistentState(&self, locality)); wait(restorePersistentState(&self, locality));

View File

@ -99,7 +99,7 @@ typedef Standalone<TLogQueueEntryRef> TLogQueueEntry;
struct LogData; struct LogData;
struct TLogData; struct TLogData;
struct TLogQueue : public IClosable { struct TLogQueue final : public IClosable {
public: public:
TLogQueue(IDiskQueue* queue, UID dbgid) : queue(queue), dbgid(dbgid) {} TLogQueue(IDiskQueue* queue, UID dbgid) : queue(queue), dbgid(dbgid) {}
@ -876,7 +876,7 @@ ACTOR Future<Void> tLogPop(TLogData* self, TLogPopRequest req, Reference<LogData
// timeout check for ignorePopRequest // timeout check for ignorePopRequest
if (self->ignorePopRequest && (g_network->now() > self->ignorePopDeadline)) { if (self->ignorePopRequest && (g_network->now() > self->ignorePopDeadline)) {
TraceEvent("EnableTLogPlayAllIgnoredPops"); TraceEvent("EnableTLogPlayAllIgnoredPops").log();
// use toBePopped and issue all the pops // use toBePopped and issue all the pops
state std::map<Tag, Version>::iterator it; state std::map<Tag, Version>::iterator it;
state vector<Future<Void>> ignoredPops; state vector<Future<Void>> ignoredPops;
@ -1666,7 +1666,7 @@ ACTOR Future<Void> initPersistentState(TLogData* self, Reference<LogData> logDat
updatePersistentPopped(self, logData, logData->getTagData(tag)); updatePersistentPopped(self, logData, logData->getTagData(tag));
} }
TraceEvent("TLogInitCommit", logData->logId); TraceEvent("TLogInitCommit", logData->logId).log();
wait(ioTimeoutError(self->persistentData->commit(), SERVER_KNOBS->TLOG_MAX_CREATE_DURATION)); wait(ioTimeoutError(self->persistentData->commit(), SERVER_KNOBS->TLOG_MAX_CREATE_DURATION));
return Void(); return Void();
} }
@ -1869,7 +1869,7 @@ ACTOR Future<Void> tLogEnablePopReq(TLogEnablePopRequest enablePopReq, TLogData*
enablePopReq.reply.sendError(operation_failed()); enablePopReq.reply.sendError(operation_failed());
return Void(); return Void();
} }
TraceEvent("EnableTLogPlayAllIgnoredPops2"); TraceEvent("EnableTLogPlayAllIgnoredPops2").log();
// use toBePopped and issue all the pops // use toBePopped and issue all the pops
std::map<Tag, Version>::iterator it; std::map<Tag, Version>::iterator it;
vector<Future<Void>> ignoredPops; vector<Future<Void>> ignoredPops;
@ -1923,7 +1923,7 @@ ACTOR Future<Void> serveTLogInterface(TLogData* self,
} }
if (!logData->isPrimary && logData->stopped) { if (!logData->isPrimary && logData->stopped) {
TraceEvent("TLogAlreadyStopped", self->dbgid); TraceEvent("TLogAlreadyStopped", self->dbgid).log();
logData->removed = logData->removed && logData->logSystem->get()->endEpoch(); logData->removed = logData->removed && logData->logSystem->get()->endEpoch();
} }
} else { } else {
@ -2198,22 +2198,22 @@ ACTOR Future<Void> tLogCore(TLogData* self,
} }
ACTOR Future<Void> checkEmptyQueue(TLogData* self) { ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid); TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid).log();
try { try {
TLogQueueEntry r = wait(self->persistentQueue->readNext(self)); TLogQueueEntry r = wait(self->persistentQueue->readNext(self));
throw internal_error(); throw internal_error();
} catch (Error& e) { } catch (Error& e) {
if (e.code() != error_code_end_of_stream) if (e.code() != error_code_end_of_stream)
throw; throw;
TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid); TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid).log();
return Void(); return Void();
} }
} }
ACTOR Future<Void> checkRecovered(TLogData* self) { ACTOR Future<Void> checkRecovered(TLogData* self) {
TraceEvent("TLogCheckRecoveredBegin", self->dbgid); TraceEvent("TLogCheckRecoveredBegin", self->dbgid).log();
Optional<Value> v = wait(self->persistentData->readValue(StringRef())); Optional<Value> v = wait(self->persistentData->readValue(StringRef()));
TraceEvent("TLogCheckRecoveredEnd", self->dbgid); TraceEvent("TLogCheckRecoveredEnd", self->dbgid).log();
return Void(); return Void();
} }
@ -2227,7 +2227,7 @@ ACTOR Future<Void> restorePersistentState(TLogData* self,
state KeyRange tagKeys; state KeyRange tagKeys;
// PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it // PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it
TraceEvent("TLogRestorePersistentState", self->dbgid); TraceEvent("TLogRestorePersistentState", self->dbgid).log();
state IKeyValueStore* storage = self->persistentData; state IKeyValueStore* storage = self->persistentData;
wait(storage->init()); wait(storage->init());
@ -2585,7 +2585,7 @@ ACTOR Future<Void> tLogStart(TLogData* self, InitializeTLogRequest req, Locality
logData->removed = rejoinMasters(self, recruited, req.epoch, Future<Void>(Void()), req.isPrimary); logData->removed = rejoinMasters(self, recruited, req.epoch, Future<Void>(Void()), req.isPrimary);
self->queueOrder.push_back(recruited.id()); self->queueOrder.push_back(recruited.id());
TraceEvent("TLogStart", logData->logId); TraceEvent("TLogStart", logData->logId).log();
state Future<Void> updater; state Future<Void> updater;
state bool pulledRecoveryVersions = false; state bool pulledRecoveryVersions = false;
try { try {
@ -2730,7 +2730,7 @@ ACTOR Future<Void> tLog(IKeyValueStore* persistentData,
state TLogData self(tlogId, workerID, persistentData, persistentQueue, db, degraded, folder); state TLogData self(tlogId, workerID, persistentData, persistentQueue, db, degraded, folder);
state Future<Void> error = actorCollection(self.sharedActors.getFuture()); state Future<Void> error = actorCollection(self.sharedActors.getFuture());
TraceEvent("SharedTlog", tlogId); TraceEvent("SharedTlog", tlogId).log();
try { try {
if (restoreFromDisk) { if (restoreFromDisk) {
wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests)); wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests));

View File

@ -100,7 +100,7 @@ typedef Standalone<TLogQueueEntryRef> TLogQueueEntry;
struct LogData; struct LogData;
struct TLogData; struct TLogData;
struct TLogQueue : public IClosable { struct TLogQueue final : public IClosable {
public: public:
TLogQueue(IDiskQueue* queue, UID dbgid) : queue(queue), dbgid(dbgid) {} TLogQueue(IDiskQueue* queue, UID dbgid) : queue(queue), dbgid(dbgid) {}
@ -1464,7 +1464,7 @@ ACTOR Future<Void> tLogPop(TLogData* self, TLogPopRequest req, Reference<LogData
// timeout check for ignorePopRequest // timeout check for ignorePopRequest
if (self->ignorePopRequest && (g_network->now() > self->ignorePopDeadline)) { if (self->ignorePopRequest && (g_network->now() > self->ignorePopDeadline)) {
TraceEvent("EnableTLogPlayAllIgnoredPops"); TraceEvent("EnableTLogPlayAllIgnoredPops").log();
// use toBePopped and issue all the pops // use toBePopped and issue all the pops
std::map<Tag, Version>::iterator it; std::map<Tag, Version>::iterator it;
vector<Future<Void>> ignoredPops; vector<Future<Void>> ignoredPops;
@ -1871,7 +1871,7 @@ ACTOR Future<Void> watchDegraded(TLogData* self) {
wait(lowPriorityDelay(SERVER_KNOBS->TLOG_DEGRADED_DURATION)); wait(lowPriorityDelay(SERVER_KNOBS->TLOG_DEGRADED_DURATION));
TraceEvent(SevWarnAlways, "TLogDegraded", self->dbgid); TraceEvent(SevWarnAlways, "TLogDegraded", self->dbgid).log();
TEST(true); // TLog degraded TEST(true); // TLog degraded
self->degraded->set(true); self->degraded->set(true);
return Void(); return Void();
@ -2109,7 +2109,7 @@ ACTOR Future<Void> initPersistentState(TLogData* self, Reference<LogData> logDat
updatePersistentPopped(self, logData, logData->getTagData(tag)); updatePersistentPopped(self, logData, logData->getTagData(tag));
} }
TraceEvent("TLogInitCommit", logData->logId); TraceEvent("TLogInitCommit", logData->logId).log();
wait(self->persistentData->commit()); wait(self->persistentData->commit());
return Void(); return Void();
} }
@ -2312,7 +2312,7 @@ ACTOR Future<Void> tLogEnablePopReq(TLogEnablePopRequest enablePopReq, TLogData*
enablePopReq.reply.sendError(operation_failed()); enablePopReq.reply.sendError(operation_failed());
return Void(); return Void();
} }
TraceEvent("EnableTLogPlayAllIgnoredPops2"); TraceEvent("EnableTLogPlayAllIgnoredPops2").log();
// use toBePopped and issue all the pops // use toBePopped and issue all the pops
std::map<Tag, Version>::iterator it; std::map<Tag, Version>::iterator it;
state vector<Future<Void>> ignoredPops; state vector<Future<Void>> ignoredPops;
@ -2657,7 +2657,7 @@ ACTOR Future<Void> tLogCore(TLogData* self,
} }
ACTOR Future<Void> checkEmptyQueue(TLogData* self) { ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid); TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid).log();
try { try {
bool recoveryFinished = wait(self->persistentQueue->initializeRecovery(0)); bool recoveryFinished = wait(self->persistentQueue->initializeRecovery(0));
if (recoveryFinished) if (recoveryFinished)
@ -2667,15 +2667,15 @@ ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
} catch (Error& e) { } catch (Error& e) {
if (e.code() != error_code_end_of_stream) if (e.code() != error_code_end_of_stream)
throw; throw;
TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid); TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid).log();
return Void(); return Void();
} }
} }
ACTOR Future<Void> checkRecovered(TLogData* self) { ACTOR Future<Void> checkRecovered(TLogData* self) {
TraceEvent("TLogCheckRecoveredBegin", self->dbgid); TraceEvent("TLogCheckRecoveredBegin", self->dbgid).log();
Optional<Value> v = wait(self->persistentData->readValue(StringRef())); Optional<Value> v = wait(self->persistentData->readValue(StringRef()));
TraceEvent("TLogCheckRecoveredEnd", self->dbgid); TraceEvent("TLogCheckRecoveredEnd", self->dbgid).log();
return Void(); return Void();
} }
@ -2690,7 +2690,7 @@ ACTOR Future<Void> restorePersistentState(TLogData* self,
state KeyRange tagKeys; state KeyRange tagKeys;
// PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it // PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it
TraceEvent("TLogRestorePersistentState", self->dbgid); TraceEvent("TLogRestorePersistentState", self->dbgid).log();
state IKeyValueStore* storage = self->persistentData; state IKeyValueStore* storage = self->persistentData;
wait(storage->init()); wait(storage->init());
@ -3219,7 +3219,7 @@ ACTOR Future<Void> tLog(IKeyValueStore* persistentData,
state TLogData self(tlogId, workerID, persistentData, persistentQueue, db, degraded, folder); state TLogData self(tlogId, workerID, persistentData, persistentQueue, db, degraded, folder);
state Future<Void> error = actorCollection(self.sharedActors.getFuture()); state Future<Void> error = actorCollection(self.sharedActors.getFuture());
TraceEvent("SharedTlog", tlogId); TraceEvent("SharedTlog", tlogId).log();
try { try {
if (restoreFromDisk) { if (restoreFromDisk) {
wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests)); wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests));

View File

@ -576,7 +576,7 @@ ACTOR Future<Void> repairDeadDatacenter(Database cx,
// FIXME: the primary and remote can both be considered dead because excludes are not handled properly by the // FIXME: the primary and remote can both be considered dead because excludes are not handled properly by the
// datacenterDead function // datacenterDead function
if (primaryDead && remoteDead) { if (primaryDead && remoteDead) {
TraceEvent(SevWarnAlways, "CannotDisableFearlessConfiguration"); TraceEvent(SevWarnAlways, "CannotDisableFearlessConfiguration").log();
return Void(); return Void();
} }
if (primaryDead || remoteDead) { if (primaryDead || remoteDead) {
@ -647,7 +647,7 @@ ACTOR Future<Void> waitForQuietDatabase(Database cx,
loop { loop {
try { try {
TraceEvent("QuietDatabaseWaitingOnDataDistributor"); TraceEvent("QuietDatabaseWaitingOnDataDistributor").log();
WorkerInterface distributorWorker = wait(getDataDistributorWorker(cx, dbInfo)); WorkerInterface distributorWorker = wait(getDataDistributorWorker(cx, dbInfo));
UID distributorUID = dbInfo->get().distributor.get().id(); UID distributorUID = dbInfo->get().distributor.get().id();
TraceEvent("QuietDatabaseGotDataDistributor", distributorUID) TraceEvent("QuietDatabaseGotDataDistributor", distributorUID)

View File

@ -801,14 +801,14 @@ ACTOR Future<Void> monitorThrottlingChanges(RatekeeperData* self) {
autoThrottlingEnabled.get().get() == LiteralStringRef("0")) { autoThrottlingEnabled.get().get() == LiteralStringRef("0")) {
TEST(true); // Auto-throttling disabled TEST(true); // Auto-throttling disabled
if (self->autoThrottlingEnabled) { if (self->autoThrottlingEnabled) {
TraceEvent("AutoTagThrottlingDisabled", self->id); TraceEvent("AutoTagThrottlingDisabled", self->id).log();
} }
self->autoThrottlingEnabled = false; self->autoThrottlingEnabled = false;
} else if (autoThrottlingEnabled.get().present() && } else if (autoThrottlingEnabled.get().present() &&
autoThrottlingEnabled.get().get() == LiteralStringRef("1")) { autoThrottlingEnabled.get().get() == LiteralStringRef("1")) {
TEST(true); // Auto-throttling enabled TEST(true); // Auto-throttling enabled
if (!self->autoThrottlingEnabled) { if (!self->autoThrottlingEnabled) {
TraceEvent("AutoTagThrottlingEnabled", self->id); TraceEvent("AutoTagThrottlingEnabled", self->id).log();
} }
self->autoThrottlingEnabled = true; self->autoThrottlingEnabled = true;
} else { } else {
@ -870,7 +870,7 @@ ACTOR Future<Void> monitorThrottlingChanges(RatekeeperData* self) {
committed = true; committed = true;
wait(watchFuture); wait(watchFuture);
TraceEvent("RatekeeperThrottleSignaled", self->id); TraceEvent("RatekeeperThrottleSignaled", self->id).log();
TEST(true); // Tag throttle changes detected TEST(true); // Tag throttle changes detected
break; break;
} catch (Error& e) { } catch (Error& e) {

View File

@ -473,7 +473,7 @@ ACTOR static Future<Void> precomputeMutationsResult(Reference<ApplierBatchData>
} }
} }
TraceEvent("FastRestoreApplierGetAndComputeStagingKeysWaitOn", applierID); TraceEvent("FastRestoreApplierGetAndComputeStagingKeysWaitOn", applierID).log();
wait(waitForAll(fGetAndComputeKeys)); wait(waitForAll(fGetAndComputeKeys));
// Sanity check all stagingKeys have been precomputed // Sanity check all stagingKeys have been precomputed

View File

@ -317,7 +317,7 @@ struct ApplierBatchData : public ReferenceCounted<ApplierBatchData> {
return false; return false;
} }
} }
TraceEvent("FastRestoreApplierAllKeysPrecomputed"); TraceEvent("FastRestoreApplierAllKeysPrecomputed").log();
return true; return true;
} }

View File

@ -714,7 +714,7 @@ ACTOR static Future<std::vector<RestoreRequest>> collectRestoreRequests(Database
// restoreRequestTriggerKey should already been set // restoreRequestTriggerKey should already been set
loop { loop {
try { try {
TraceEvent("FastRestoreControllerPhaseCollectRestoreRequestsWait"); TraceEvent("FastRestoreControllerPhaseCollectRestoreRequestsWait").log();
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::LOCK_AWARE); tr.setOption(FDBTransactionOptions::LOCK_AWARE);
@ -732,7 +732,7 @@ ACTOR static Future<std::vector<RestoreRequest>> collectRestoreRequests(Database
} }
break; break;
} else { } else {
TraceEvent(SevError, "FastRestoreControllerPhaseCollectRestoreRequestsEmptyRequests"); TraceEvent(SevError, "FastRestoreControllerPhaseCollectRestoreRequestsEmptyRequests").log();
wait(delay(5.0)); wait(delay(5.0));
} }
} catch (Error& e) { } catch (Error& e) {
@ -1079,7 +1079,7 @@ ACTOR static Future<Void> notifyLoadersVersionBatchFinished(std::map<UID, Restor
// Terminate those roles if terminate = true // Terminate those roles if terminate = true
ACTOR static Future<Void> notifyRestoreCompleted(Reference<RestoreControllerData> self, bool terminate = false) { ACTOR static Future<Void> notifyRestoreCompleted(Reference<RestoreControllerData> self, bool terminate = false) {
std::vector<std::pair<UID, RestoreFinishRequest>> requests; std::vector<std::pair<UID, RestoreFinishRequest>> requests;
TraceEvent("FastRestoreControllerPhaseNotifyRestoreCompletedStart"); TraceEvent("FastRestoreControllerPhaseNotifyRestoreCompletedStart").log();
for (auto& loader : self->loadersInterf) { for (auto& loader : self->loadersInterf) {
requests.emplace_back(loader.first, RestoreFinishRequest(terminate)); requests.emplace_back(loader.first, RestoreFinishRequest(terminate));
} }
@ -1099,7 +1099,7 @@ ACTOR static Future<Void> notifyRestoreCompleted(Reference<RestoreControllerData
wait(endLoaders && endAppliers); wait(endLoaders && endAppliers);
} }
TraceEvent("FastRestoreControllerPhaseNotifyRestoreCompletedDone"); TraceEvent("FastRestoreControllerPhaseNotifyRestoreCompletedDone").log();
return Void(); return Void();
} }
@ -1128,7 +1128,7 @@ ACTOR static Future<Void> signalRestoreCompleted(Reference<RestoreControllerData
} }
} }
TraceEvent("FastRestoreControllerAllRestoreCompleted"); TraceEvent("FastRestoreControllerAllRestoreCompleted").log();
return Void(); return Void();
} }

View File

@ -277,7 +277,7 @@ ACTOR static Future<Void> waitOnRestoreRequests(Database cx, UID nodeID = UID())
state Optional<Value> numRequests; state Optional<Value> numRequests;
// wait for the restoreRequestTriggerKey to be set by the client/test workload // wait for the restoreRequestTriggerKey to be set by the client/test workload
TraceEvent("FastRestoreWaitOnRestoreRequest", nodeID); TraceEvent("FastRestoreWaitOnRestoreRequest", nodeID).log();
loop { loop {
try { try {
tr.reset(); tr.reset();
@ -288,9 +288,9 @@ ACTOR static Future<Void> waitOnRestoreRequests(Database cx, UID nodeID = UID())
if (!numRequests.present()) { if (!numRequests.present()) {
state Future<Void> watchForRestoreRequest = tr.watch(restoreRequestTriggerKey); state Future<Void> watchForRestoreRequest = tr.watch(restoreRequestTriggerKey);
wait(tr.commit()); wait(tr.commit());
TraceEvent(SevInfo, "FastRestoreWaitOnRestoreRequestTriggerKey", nodeID); TraceEvent(SevInfo, "FastRestoreWaitOnRestoreRequestTriggerKey", nodeID).log();
wait(watchForRestoreRequest); wait(watchForRestoreRequest);
TraceEvent(SevInfo, "FastRestoreDetectRestoreRequestTriggerKeyChanged", nodeID); TraceEvent(SevInfo, "FastRestoreDetectRestoreRequestTriggerKeyChanged", nodeID).log();
} else { } else {
TraceEvent(SevInfo, "FastRestoreRestoreRequestTriggerKey", nodeID) TraceEvent(SevInfo, "FastRestoreRestoreRequestTriggerKey", nodeID)
.detail("TriggerKey", numRequests.get().toString()); .detail("TriggerKey", numRequests.get().toString());

View File

@ -408,7 +408,7 @@ ACTOR Future<Void> runDr(Reference<ClusterConnectionFile> connFile) {
wait(delay(1.0)); wait(delay(1.0));
} }
TraceEvent("StoppingDrAgents"); TraceEvent("StoppingDrAgents").log();
for (auto it : agentFutures) { for (auto it : agentFutures) {
it.cancel(); it.cancel();
@ -2205,7 +2205,7 @@ ACTOR void setupAndRun(std::string dataFolder,
TraceEvent(SevError, "SetupAndRunError").error(e); TraceEvent(SevError, "SetupAndRunError").error(e);
} }
TraceEvent("SimulatedSystemDestruct"); TraceEvent("SimulatedSystemDestruct").log();
g_simulator.stop(); g_simulator.stop();
destructed = true; destructed = true;
wait(Never()); wait(Never());

View File

@ -425,7 +425,7 @@ ACTOR Future<Version> waitForVersion(StorageCacheData* data, Version version) {
} }
if (deterministicRandom()->random01() < 0.001) if (deterministicRandom()->random01() < 0.001)
TraceEvent("WaitForVersion1000x"); TraceEvent("WaitForVersion1000x").log();
choose { choose {
when(wait(data->version.whenAtLeast(version))) { when(wait(data->version.whenAtLeast(version))) {
// FIXME: A bunch of these can block with or without the following delay 0. // FIXME: A bunch of these can block with or without the following delay 0.
@ -1363,7 +1363,7 @@ ACTOR Future<Void> fetchKeys(StorageCacheData* data, AddingCacheRange* cacheRang
// doesn't fit on this cache. For now, we can just fail this cache role. In future, we should think // doesn't fit on this cache. For now, we can just fail this cache role. In future, we should think
// about evicting some data to make room for the remaining keys // about evicting some data to make room for the remaining keys
if (this_block.more) { if (this_block.more) {
TraceEvent(SevDebug, "CacheWarmupMoreDataThanLimit", data->thisServerID); TraceEvent(SevDebug, "CacheWarmupMoreDataThanLimit", data->thisServerID).log();
throw please_reboot(); throw please_reboot();
} }
@ -1780,7 +1780,7 @@ private:
rollback(data, rollbackVersion, currentVersion); rollback(data, rollbackVersion, currentVersion);
} }
} else { } else {
TraceEvent(SevWarn, "SCPrivateCacheMutation: Unknown private mutation"); TraceEvent(SevWarn, "SCPrivateCacheMutation: Unknown private mutation").log();
// ASSERT(false); // Unknown private mutation // ASSERT(false); // Unknown private mutation
} }
} }
@ -2156,6 +2156,7 @@ ACTOR Future<Void> watchInterface(StorageCacheData* self, StorageServerInterface
tr.set(storageKey, storageCacheServerValue(ssi)); tr.set(storageKey, storageCacheServerValue(ssi));
wait(tr.commit()); wait(tr.commit());
} }
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
wait(tr.onError(e)); wait(tr.onError(e));

View File

@ -2160,7 +2160,7 @@ ACTOR Future<Void> initPersistentState(TLogData* self, Reference<LogData> logDat
updatePersistentPopped(self, logData, logData->getTagData(tag)); updatePersistentPopped(self, logData, logData->getTagData(tag));
} }
TraceEvent("TLogInitCommit", logData->logId); TraceEvent("TLogInitCommit", logData->logId).log();
wait(ioTimeoutError(self->persistentData->commit(), SERVER_KNOBS->TLOG_MAX_CREATE_DURATION)); wait(ioTimeoutError(self->persistentData->commit(), SERVER_KNOBS->TLOG_MAX_CREATE_DURATION));
return Void(); return Void();
} }
@ -2713,7 +2713,7 @@ ACTOR Future<Void> tLogCore(TLogData* self,
} }
ACTOR Future<Void> checkEmptyQueue(TLogData* self) { ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid); TraceEvent("TLogCheckEmptyQueueBegin", self->dbgid).log();
try { try {
bool recoveryFinished = wait(self->persistentQueue->initializeRecovery(0)); bool recoveryFinished = wait(self->persistentQueue->initializeRecovery(0));
if (recoveryFinished) if (recoveryFinished)
@ -2723,15 +2723,15 @@ ACTOR Future<Void> checkEmptyQueue(TLogData* self) {
} catch (Error& e) { } catch (Error& e) {
if (e.code() != error_code_end_of_stream) if (e.code() != error_code_end_of_stream)
throw; throw;
TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid); TraceEvent("TLogCheckEmptyQueueEnd", self->dbgid).log();
return Void(); return Void();
} }
} }
ACTOR Future<Void> checkRecovered(TLogData* self) { ACTOR Future<Void> checkRecovered(TLogData* self) {
TraceEvent("TLogCheckRecoveredBegin", self->dbgid); TraceEvent("TLogCheckRecoveredBegin", self->dbgid).log();
Optional<Value> v = wait(self->persistentData->readValue(StringRef())); Optional<Value> v = wait(self->persistentData->readValue(StringRef()));
TraceEvent("TLogCheckRecoveredEnd", self->dbgid); TraceEvent("TLogCheckRecoveredEnd", self->dbgid).log();
return Void(); return Void();
} }
@ -2746,7 +2746,7 @@ ACTOR Future<Void> restorePersistentState(TLogData* self,
state KeyRange tagKeys; state KeyRange tagKeys;
// PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it // PERSIST: Read basic state from persistentData; replay persistentQueue but don't erase it
TraceEvent("TLogRestorePersistentState", self->dbgid); TraceEvent("TLogRestorePersistentState", self->dbgid).log();
state IKeyValueStore* storage = self->persistentData; state IKeyValueStore* storage = self->persistentData;
wait(storage->init()); wait(storage->init());
@ -3294,7 +3294,7 @@ ACTOR Future<Void> tLog(IKeyValueStore* persistentData,
state TLogData self(tlogId, workerID, persistentData, persistentQueue, db, degraded, folder); state TLogData self(tlogId, workerID, persistentData, persistentQueue, db, degraded, folder);
state Future<Void> error = actorCollection(self.sharedActors.getFuture()); state Future<Void> error = actorCollection(self.sharedActors.getFuture());
TraceEvent("SharedTlog", tlogId); TraceEvent("SharedTlog", tlogId).log();
try { try {
if (restoreFromDisk) { if (restoreFromDisk) {
wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests)); wait(restorePersistentState(&self, locality, oldLog, recovered, tlogRequests));

View File

@ -152,7 +152,7 @@ OldTLogCoreData::OldTLogCoreData(const OldLogData& oldData)
} }
} }
struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogSystem> { struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted<TagPartitionedLogSystem> {
const UID dbgid; const UID dbgid;
LogSystemType logSystemType; LogSystemType logSystemType;
std::vector<Reference<LogSet>> tLogs; // LogSets in different locations: primary, satellite, or remote std::vector<Reference<LogSet>> tLogs; // LogSets in different locations: primary, satellite, or remote
@ -415,7 +415,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
} }
for (auto& t : newState.tLogs) { for (auto& t : newState.tLogs) {
if (!t.isLocal) { if (!t.isLocal) {
TraceEvent("RemoteLogsWritten", dbgid); TraceEvent("RemoteLogsWritten", dbgid).log();
remoteLogsWrittenToCoreState = true; remoteLogsWrittenToCoreState = true;
break; break;
} }
@ -1101,7 +1101,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
bool canDiscardPopped) final { bool canDiscardPopped) final {
Version end = getEnd(); Version end = getEnd();
if (!tLogs.size()) { if (!tLogs.size()) {
TraceEvent("TLogPeekTxsNoLogs", dbgid); TraceEvent("TLogPeekTxsNoLogs", dbgid).log();
return makeReference<ILogSystem::ServerPeekCursor>( return makeReference<ILogSystem::ServerPeekCursor>(
Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), txsTag, begin, end, false, false); Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), txsTag, begin, end, false, false);
} }
@ -1534,7 +1534,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
wait(waitForAll(poppedReady) || maxGetPoppedDuration); wait(waitForAll(poppedReady) || maxGetPoppedDuration);
if (maxGetPoppedDuration.isReady()) { if (maxGetPoppedDuration.isReady()) {
TraceEvent(SevWarnAlways, "PoppedTxsNotReady", dbgid); TraceEvent(SevWarnAlways, "PoppedTxsNotReady", dbgid).log();
} }
Version maxPopped = 1; Version maxPopped = 1;
@ -2480,7 +2480,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
LogEpoch recoveryCount, LogEpoch recoveryCount,
int8_t remoteLocality, int8_t remoteLocality,
std::vector<Tag> allTags) { std::vector<Tag> allTags) {
TraceEvent("RemoteLogRecruitment_WaitingForWorkers"); TraceEvent("RemoteLogRecruitment_WaitingForWorkers").log();
state RecruitRemoteFromConfigurationReply remoteWorkers = wait(fRemoteWorkers); state RecruitRemoteFromConfigurationReply remoteWorkers = wait(fRemoteWorkers);
state Reference<LogSet> logSet(new LogSet()); state Reference<LogSet> logSet(new LogSet());
@ -2655,7 +2655,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
self->remoteRecoveryComplete = waitForAll(recoveryComplete); self->remoteRecoveryComplete = waitForAll(recoveryComplete);
self->tLogs.push_back(logSet); self->tLogs.push_back(logSet);
TraceEvent("RemoteLogRecruitment_CompletingRecovery"); TraceEvent("RemoteLogRecruitment_CompletingRecovery").log();
return Void(); return Void();
} }
@ -3149,7 +3149,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
// Step 1: Verify that if all the failed TLogs come back, they can't form a quorum. // Step 1: Verify that if all the failed TLogs come back, they can't form a quorum.
if (can_obtain_quorum(locking_failed)) { if (can_obtain_quorum(locking_failed)) {
TraceEvent(SevInfo, "MasterRecoveryTLogLockingImpossible", dbgid); TraceEvent(SevInfo, "MasterRecoveryTLogLockingImpossible", dbgid).log();
return; return;
} }

View File

@ -1970,7 +1970,7 @@ class DWALPagerSnapshot;
// This process basically describes a "Delayed" Write-Ahead-Log (DWAL) because the remap queue and the newly allocated // This process basically describes a "Delayed" Write-Ahead-Log (DWAL) because the remap queue and the newly allocated
// alternate pages it references basically serve as a write ahead log for pages that will eventially be copied // alternate pages it references basically serve as a write ahead log for pages that will eventially be copied
// back to their original location once the original version is no longer needed. // back to their original location once the original version is no longer needed.
class DWALPager : public IPager2 { class DWALPager final : public IPager2 {
public: public:
typedef FIFOQueue<LogicalPageID> LogicalPageQueueT; typedef FIFOQueue<LogicalPageID> LogicalPageQueueT;
typedef std::map<Version, LogicalPageID> VersionToPageMapT; typedef std::map<Version, LogicalPageID> VersionToPageMapT;

View File

@ -276,7 +276,7 @@ struct MasterData : NonCopyable, ReferenceCounted<MasterData> {
reportLiveCommittedVersionRequests("ReportLiveCommittedVersionRequests", cc) { reportLiveCommittedVersionRequests("ReportLiveCommittedVersionRequests", cc) {
logger = traceCounters("MasterMetrics", dbgid, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc, "MasterMetrics"); logger = traceCounters("MasterMetrics", dbgid, SERVER_KNOBS->WORKER_LOGGING_INTERVAL, &cc, "MasterMetrics");
if (forceRecovery && !myInterface.locality.dcId().present()) { if (forceRecovery && !myInterface.locality.dcId().present()) {
TraceEvent(SevError, "ForcedRecoveryRequiresDcID"); TraceEvent(SevError, "ForcedRecoveryRequiresDcID").log();
forceRecovery = false; forceRecovery = false;
} }
} }
@ -904,7 +904,7 @@ ACTOR Future<Void> readTransactionSystemState(Reference<MasterData> self,
// make KeyValueStoreMemory guarantee immediate reads, we should be able to get rid of // make KeyValueStoreMemory guarantee immediate reads, we should be able to get rid of
// the discardCommit() below and not need a writable log adapter // the discardCommit() below and not need a writable log adapter
TraceEvent("RTSSComplete", self->dbgid); TraceEvent("RTSSComplete", self->dbgid).log();
return Void(); return Void();
} }
@ -1087,7 +1087,7 @@ ACTOR Future<Void> recoverFrom(Reference<MasterData> self,
when(Standalone<CommitTransactionRef> _req = wait(provisional)) { when(Standalone<CommitTransactionRef> _req = wait(provisional)) {
state Standalone<CommitTransactionRef> req = _req; // mutable state Standalone<CommitTransactionRef> req = _req; // mutable
TEST(true); // Emergency transaction processing during recovery TEST(true); // Emergency transaction processing during recovery
TraceEvent("EmergencyTransaction", self->dbgid); TraceEvent("EmergencyTransaction", self->dbgid).log();
for (auto m = req.mutations.begin(); m != req.mutations.end(); ++m) for (auto m = req.mutations.begin(); m != req.mutations.end(); ++m)
TraceEvent("EmergencyTransactionMutation", self->dbgid) TraceEvent("EmergencyTransactionMutation", self->dbgid)
.detail("MType", m->type) .detail("MType", m->type)
@ -1102,7 +1102,7 @@ ACTOR Future<Void> recoverFrom(Reference<MasterData> self,
initialConfChanges->clear(); initialConfChanges->clear();
if (self->originalConfiguration.isValid() && if (self->originalConfiguration.isValid() &&
self->configuration.usableRegions != self->originalConfiguration.usableRegions) { self->configuration.usableRegions != self->originalConfiguration.usableRegions) {
TraceEvent(SevWarnAlways, "CannotChangeUsableRegions", self->dbgid); TraceEvent(SevWarnAlways, "CannotChangeUsableRegions", self->dbgid).log();
self->configuration = self->originalConfiguration; self->configuration = self->originalConfiguration;
} else { } else {
initialConfChanges->push_back(req); initialConfChanges->push_back(req);
@ -1500,7 +1500,7 @@ ACTOR Future<Void> trackTlogRecovery(Reference<MasterData> self,
if (newState.oldTLogData.size() && configuration.repopulateRegionAntiQuorum > 0 && if (newState.oldTLogData.size() && configuration.repopulateRegionAntiQuorum > 0 &&
self->logSystem->remoteStorageRecovered()) { self->logSystem->remoteStorageRecovered()) {
TraceEvent(SevWarnAlways, "RecruitmentStalled_RemoteStorageRecovered", self->dbgid); TraceEvent(SevWarnAlways, "RecruitmentStalled_RemoteStorageRecovered", self->dbgid).log();
self->recruitmentStalled->set(true); self->recruitmentStalled->set(true);
} }
self->registrationTrigger.trigger(); self->registrationTrigger.trigger();
@ -1570,7 +1570,7 @@ ACTOR static Future<Optional<Version>> getMinBackupVersion(Reference<MasterData>
minVersion = minVersion.present() ? std::min(version, minVersion.get()) : version; minVersion = minVersion.present() ? std::min(version, minVersion.get()) : version;
} }
} else { } else {
TraceEvent("EmptyBackupStartKey", self->dbgid); TraceEvent("EmptyBackupStartKey", self->dbgid).log();
} }
return minVersion; return minVersion;
@ -1663,7 +1663,7 @@ ACTOR static Future<Void> recruitBackupWorkers(Reference<MasterData> self, Datab
std::vector<InitializeBackupReply> newRecruits = wait(getAll(initializationReplies)); std::vector<InitializeBackupReply> newRecruits = wait(getAll(initializationReplies));
self->logSystem->setBackupWorkers(newRecruits); self->logSystem->setBackupWorkers(newRecruits);
TraceEvent("BackupRecruitmentDone", self->dbgid); TraceEvent("BackupRecruitmentDone", self->dbgid).log();
self->registrationTrigger.trigger(); self->registrationTrigger.trigger();
return Void(); return Void();
} }
@ -1723,7 +1723,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
if (g_network->isSimulated() && self->cstate.myDBState.oldTLogData.size() > CLIENT_KNOBS->MAX_GENERATIONS_SIM) { if (g_network->isSimulated() && self->cstate.myDBState.oldTLogData.size() > CLIENT_KNOBS->MAX_GENERATIONS_SIM) {
g_simulator.connectionFailuresDisableDuration = 1e6; g_simulator.connectionFailuresDisableDuration = 1e6;
g_simulator.speedUpSimulation = true; g_simulator.speedUpSimulation = true;
TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyGenerations"); TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyGenerations").log();
} }
} }
@ -1812,7 +1812,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
tr.set(recoveryCommitRequest.arena, snapshotEndVersionKey, (bw << self->lastEpochEnd).toValue()); tr.set(recoveryCommitRequest.arena, snapshotEndVersionKey, (bw << self->lastEpochEnd).toValue());
// Pause the backups that got restored in this snapshot to avoid data corruption // Pause the backups that got restored in this snapshot to avoid data corruption
// Requires further operational work to abort the backup // Requires further operational work to abort the backup
TraceEvent("MasterRecoveryPauseBackupAgents"); TraceEvent("MasterRecoveryPauseBackupAgents").log();
Key backupPauseKey = FileBackupAgent::getPauseKey(); Key backupPauseKey = FileBackupAgent::getPauseKey();
tr.set(recoveryCommitRequest.arena, backupPauseKey, StringRef()); tr.set(recoveryCommitRequest.arena, backupPauseKey, StringRef());
// Clear the key so multiple recoveries will not overwrite the first version recorded // Clear the key so multiple recoveries will not overwrite the first version recorded
@ -1882,7 +1882,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
tr.read_snapshot = self->recoveryTransactionVersion; // lastEpochEnd would make more sense, but isn't in the initial tr.read_snapshot = self->recoveryTransactionVersion; // lastEpochEnd would make more sense, but isn't in the initial
// window of the resolver(s) // window of the resolver(s)
TraceEvent("MasterRecoveryCommit", self->dbgid); TraceEvent("MasterRecoveryCommit", self->dbgid).log();
state Future<ErrorOr<CommitID>> recoveryCommit = self->commitProxies[0].commit.tryGetReply(recoveryCommitRequest); state Future<ErrorOr<CommitID>> recoveryCommit = self->commitProxies[0].commit.tryGetReply(recoveryCommitRequest);
self->addActor.send(self->logSystem->onError()); self->addActor.send(self->logSystem->onError());
self->addActor.send(waitResolverFailure(self->resolvers)); self->addActor.send(waitResolverFailure(self->resolvers));
@ -1930,7 +1930,7 @@ ACTOR Future<Void> masterCore(Reference<MasterData> self) {
debug_advanceMinCommittedVersion(UID(), self->recoveryTransactionVersion); debug_advanceMinCommittedVersion(UID(), self->recoveryTransactionVersion);
if (debugResult) { if (debugResult) {
TraceEvent(self->forceRecovery ? SevWarn : SevError, "DBRecoveryDurabilityError"); TraceEvent(self->forceRecovery ? SevWarn : SevError, "DBRecoveryDurabilityError").log();
} }
TraceEvent("MasterCommittedTLogs", self->dbgid) TraceEvent("MasterCommittedTLogs", self->dbgid)

View File

@ -1189,7 +1189,7 @@ Future<Version> waitForVersion(StorageServer* data, Version version, SpanID span
} }
if (deterministicRandom()->random01() < 0.001) { if (deterministicRandom()->random01() < 0.001) {
TraceEvent("WaitForVersion1000x"); TraceEvent("WaitForVersion1000x").log();
} }
return waitForVersionActor(data, version, spanContext); return waitForVersionActor(data, version, spanContext);
} }
@ -3542,10 +3542,10 @@ private:
ASSERT(ssId == data->thisServerID); ASSERT(ssId == data->thisServerID);
if (m.type == MutationRef::SetValue) { if (m.type == MutationRef::SetValue) {
TEST(true); // Putting TSS in quarantine TEST(true); // Putting TSS in quarantine
TraceEvent(SevWarn, "TSSQuarantineStart", data->thisServerID); TraceEvent(SevWarn, "TSSQuarantineStart", data->thisServerID).log();
data->startTssQuarantine(); data->startTssQuarantine();
} else { } else {
TraceEvent(SevWarn, "TSSQuarantineStop", data->thisServerID); TraceEvent(SevWarn, "TSSQuarantineStop", data->thisServerID).log();
// dipose of this TSS // dipose of this TSS
throw worker_removed(); throw worker_removed();
} }
@ -3620,7 +3620,7 @@ ACTOR Future<Void> update(StorageServer* data, bool* pReceivedUpdate) {
!g_simulator.speedUpSimulation && data->tssFaultInjectTime.present() && !g_simulator.speedUpSimulation && data->tssFaultInjectTime.present() &&
data->tssFaultInjectTime.get() < now()) { data->tssFaultInjectTime.get() < now()) {
if (deterministicRandom()->random01() < 0.01) { if (deterministicRandom()->random01() < 0.01) {
TraceEvent(SevWarnAlways, "TSSInjectDelayForever", data->thisServerID); TraceEvent(SevWarnAlways, "TSSInjectDelayForever", data->thisServerID).log();
// small random chance to just completely get stuck here, each tss should eventually hit this in this // small random chance to just completely get stuck here, each tss should eventually hit this in this
// mode // mode
wait(tssDelayForever()); wait(tssDelayForever());
@ -3835,7 +3835,7 @@ ACTOR Future<Void> update(StorageServer* data, bool* pReceivedUpdate) {
} else if (ver != invalidVersion) { // This change belongs to a version < minVersion } else if (ver != invalidVersion) { // This change belongs to a version < minVersion
DEBUG_MUTATION("SSPeek", ver, msg).detail("ServerID", data->thisServerID); DEBUG_MUTATION("SSPeek", ver, msg).detail("ServerID", data->thisServerID);
if (ver == 1) { if (ver == 1) {
TraceEvent("SSPeekMutation", data->thisServerID); TraceEvent("SSPeekMutation", data->thisServerID).log();
// The following trace event may produce a value with special characters // The following trace event may produce a value with special characters
//TraceEvent("SSPeekMutation", data->thisServerID).detail("Mutation", msg.toString()).detail("Version", cloneCursor2->version().toString()); //TraceEvent("SSPeekMutation", data->thisServerID).detail("Mutation", msg.toString()).detail("Version", cloneCursor2->version().toString());
} }
@ -4333,15 +4333,15 @@ ACTOR Future<bool> restoreDurableState(StorageServer* data, IKeyValueStore* stor
data->byteSampleRecovery = data->byteSampleRecovery =
restoreByteSample(data, storage, byteSampleSampleRecovered, startByteSampleRestore.getFuture()); restoreByteSample(data, storage, byteSampleSampleRecovered, startByteSampleRestore.getFuture());
TraceEvent("ReadingDurableState", data->thisServerID); TraceEvent("ReadingDurableState", data->thisServerID).log();
wait(waitForAll(std::vector{ fFormat, fID, ftssPairID, fTssQuarantine, fVersion, fLogProtocol, fPrimaryLocality })); wait(waitForAll(std::vector{ fFormat, fID, ftssPairID, fTssQuarantine, fVersion, fLogProtocol, fPrimaryLocality }));
wait(waitForAll(std::vector{ fShardAssigned, fShardAvailable })); wait(waitForAll(std::vector{ fShardAssigned, fShardAvailable }));
wait(byteSampleSampleRecovered.getFuture()); wait(byteSampleSampleRecovered.getFuture());
TraceEvent("RestoringDurableState", data->thisServerID); TraceEvent("RestoringDurableState", data->thisServerID).log();
if (!fFormat.get().present()) { if (!fFormat.get().present()) {
// The DB was never initialized // The DB was never initialized
TraceEvent("DBNeverInitialized", data->thisServerID); TraceEvent("DBNeverInitialized", data->thisServerID).log();
storage->dispose(); storage->dispose();
data->thisServerID = UID(); data->thisServerID = UID();
data->sk = Key(); data->sk = Key();
@ -5262,7 +5262,7 @@ ACTOR Future<Void> replaceInterface(StorageServer* self, StorageServerInterface
} }
if (self->history.size() && BUGGIFY) { if (self->history.size() && BUGGIFY) {
TraceEvent("SSHistoryReboot", self->thisServerID); TraceEvent("SSHistoryReboot", self->thisServerID).log();
throw please_reboot(); throw please_reboot();
} }
@ -5337,7 +5337,7 @@ ACTOR Future<Void> storageServer(IKeyValueStore* persistentData,
try { try {
state double start = now(); state double start = now();
TraceEvent("StorageServerRebootStart", self.thisServerID); TraceEvent("StorageServerRebootStart", self.thisServerID).log();
wait(self.storage.init()); wait(self.storage.init());
choose { choose {
@ -5346,7 +5346,7 @@ ACTOR Future<Void> storageServer(IKeyValueStore* persistentData,
when(wait(self.storage.commit())) {} when(wait(self.storage.commit())) {}
when(wait(memoryStoreRecover(persistentData, connFile, self.thisServerID))) { when(wait(memoryStoreRecover(persistentData, connFile, self.thisServerID))) {
TraceEvent("DisposeStorageServer", self.thisServerID); TraceEvent("DisposeStorageServer", self.thisServerID).log();
throw worker_removed(); throw worker_removed();
} }
} }

View File

@ -817,7 +817,7 @@ ACTOR Future<DistributedTestResults> runWorkload(Database cx, std::vector<Tester
} }
state std::vector<Future<ErrorOr<CheckReply>>> checks; state std::vector<Future<ErrorOr<CheckReply>>> checks;
TraceEvent("CheckingResults"); TraceEvent("CheckingResults").log();
printf("checking test (%s)...\n", printable(spec.title).c_str()); printf("checking test (%s)...\n", printable(spec.title).c_str());
@ -1016,7 +1016,7 @@ ACTOR Future<bool> runTest(Database cx,
if (spec.useDB && spec.clearAfterTest) { if (spec.useDB && spec.clearAfterTest) {
try { try {
TraceEvent("TesterClearingDatabase"); TraceEvent("TesterClearingDatabase").log();
wait(timeoutError(clearData(cx), 1000.0)); wait(timeoutError(clearData(cx), 1000.0));
} catch (Error& e) { } catch (Error& e) {
TraceEvent(SevError, "ErrorClearingDatabaseAfterTest").error(e); TraceEvent(SevError, "ErrorClearingDatabaseAfterTest").error(e);
@ -1559,7 +1559,7 @@ ACTOR Future<Void> runTests(Reference<AsyncVar<Optional<struct ClusterController
} }
when(wait(cc->onChange())) {} when(wait(cc->onChange())) {}
when(wait(testerTimeout)) { when(wait(testerTimeout)) {
TraceEvent(SevError, "TesterRecruitmentTimeout"); TraceEvent(SevError, "TesterRecruitmentTimeout").log();
throw timed_out(); throw timed_out();
} }
} }

View File

@ -848,7 +848,7 @@ bool checkHighMemory(int64_t threshold, bool* error) {
uint64_t page_size = sysconf(_SC_PAGESIZE); uint64_t page_size = sysconf(_SC_PAGESIZE);
int fd = open("/proc/self/statm", O_RDONLY | O_CLOEXEC); int fd = open("/proc/self/statm", O_RDONLY | O_CLOEXEC);
if (fd < 0) { if (fd < 0) {
TraceEvent("OpenStatmFileFailure"); TraceEvent("OpenStatmFileFailure").log();
*error = true; *error = true;
return false; return false;
} }
@ -857,7 +857,7 @@ bool checkHighMemory(int64_t threshold, bool* error) {
char stat_buf[buf_sz]; char stat_buf[buf_sz];
ssize_t stat_nread = read(fd, stat_buf, buf_sz); ssize_t stat_nread = read(fd, stat_buf, buf_sz);
if (stat_nread < 0) { if (stat_nread < 0) {
TraceEvent("ReadStatmFileFailure"); TraceEvent("ReadStatmFileFailure").log();
*error = true; *error = true;
return false; return false;
} }
@ -869,7 +869,7 @@ bool checkHighMemory(int64_t threshold, bool* error) {
return true; return true;
} }
#else #else
TraceEvent("CheckHighMemoryUnsupported"); TraceEvent("CheckHighMemoryUnsupported").log();
*error = true; *error = true;
#endif #endif
return false; return false;
@ -926,7 +926,7 @@ ACTOR Future<Void> storageServerRollbackRebooter(std::set<std::pair<UID, KeyValu
else if (e.getError().code() != error_code_please_reboot) else if (e.getError().code() != error_code_please_reboot)
throw e.getError(); throw e.getError();
TraceEvent("StorageServerRequestedReboot", id); TraceEvent("StorageServerRequestedReboot", id).log();
StorageServerInterface recruited; StorageServerInterface recruited;
recruited.uniqueID = id; recruited.uniqueID = id;
@ -964,7 +964,7 @@ ACTOR Future<Void> storageCacheRollbackRebooter(Future<Void> prevStorageCache,
loop { loop {
ErrorOr<Void> e = wait(errorOr(prevStorageCache)); ErrorOr<Void> e = wait(errorOr(prevStorageCache));
if (!e.isError()) { if (!e.isError()) {
TraceEvent("StorageCacheRequestedReboot1", id); TraceEvent("StorageCacheRequestedReboot1", id).log();
return Void(); return Void();
} else if (e.getError().code() != error_code_please_reboot && } else if (e.getError().code() != error_code_please_reboot &&
e.getError().code() != error_code_worker_removed) { e.getError().code() != error_code_worker_removed) {
@ -972,7 +972,7 @@ ACTOR Future<Void> storageCacheRollbackRebooter(Future<Void> prevStorageCache,
throw e.getError(); throw e.getError();
} }
TraceEvent("StorageCacheRequestedReboot", id); TraceEvent("StorageCacheRequestedReboot", id).log();
StorageServerInterface recruited; StorageServerInterface recruited;
recruited.uniqueID = deterministicRandom()->randomUniqueID(); // id; recruited.uniqueID = deterministicRandom()->randomUniqueID(); // id;
@ -1504,7 +1504,7 @@ ACTOR Future<Void> workerServer(Reference<ClusterConnectionFile> connFile,
} }
throw please_reboot(); throw please_reboot();
} else { } else {
TraceEvent("ProcessReboot"); TraceEvent("ProcessReboot").log();
ASSERT(!rebootReq.deleteData); ASSERT(!rebootReq.deleteData);
flushAndExit(0); flushAndExit(0);
} }
@ -2017,7 +2017,7 @@ ACTOR Future<Void> printOnFirstConnected(Reference<AsyncVar<Optional<ClusterInte
ci->get().get().openDatabase.getEndpoint(), FailureStatus(false)) ci->get().get().openDatabase.getEndpoint(), FailureStatus(false))
: Never())) { : Never())) {
printf("FDBD joined cluster.\n"); printf("FDBD joined cluster.\n");
TraceEvent("FDBDConnected"); TraceEvent("FDBDConnected").log();
return Void(); return Void();
} }
when(wait(ci->onChange())) {} when(wait(ci->onChange())) {}

View File

@ -480,7 +480,7 @@ public:
TraceEvent("AtomicOpCorrectnessApiWorkload").detail("OpType", "MIN"); TraceEvent("AtomicOpCorrectnessApiWorkload").detail("OpType", "MIN");
// API Version 500 // API Version 500
setApiVersion(&cx, 500); setApiVersion(&cx, 500);
TraceEvent(SevInfo, "Running Atomic Op Min Correctness Test Api Version 500"); TraceEvent(SevInfo, "Running Atomic Op Min Correctness Test Api Version 500").log();
wait(self->testAtomicOpUnsetOnNonExistingKey(cx, self, MutationRef::Min, key)); wait(self->testAtomicOpUnsetOnNonExistingKey(cx, self, MutationRef::Min, key));
wait(self->testAtomicOpApi( wait(self->testAtomicOpApi(
cx, self, MutationRef::Min, key, [](uint64_t val1, uint64_t val2) { return val1 < val2 ? val1 : val2; })); cx, self, MutationRef::Min, key, [](uint64_t val1, uint64_t val2) { return val1 < val2 ? val1 : val2; }));
@ -513,7 +513,7 @@ public:
ACTOR Future<Void> testMax(Database cx, AtomicOpsApiCorrectnessWorkload* self) { ACTOR Future<Void> testMax(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_max_"); state Key key = self->getTestKey("test_key_max_");
TraceEvent(SevInfo, "Running Atomic Op MAX Correctness Current Api Version"); TraceEvent(SevInfo, "Running Atomic Op MAX Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::Max, key)); wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::Max, key));
wait(self->testAtomicOpApi( wait(self->testAtomicOpApi(
cx, self, MutationRef::Max, key, [](uint64_t val1, uint64_t val2) { return val1 > val2 ? val1 : val2; })); cx, self, MutationRef::Max, key, [](uint64_t val1, uint64_t val2) { return val1 > val2 ? val1 : val2; }));
@ -530,7 +530,7 @@ public:
TraceEvent("AtomicOpCorrectnessApiWorkload").detail("OpType", "AND"); TraceEvent("AtomicOpCorrectnessApiWorkload").detail("OpType", "AND");
// API Version 500 // API Version 500
setApiVersion(&cx, 500); setApiVersion(&cx, 500);
TraceEvent(SevInfo, "Running Atomic Op AND Correctness Test Api Version 500"); TraceEvent(SevInfo, "Running Atomic Op AND Correctness Test Api Version 500").log();
wait(self->testAtomicOpUnsetOnNonExistingKey(cx, self, MutationRef::And, key)); wait(self->testAtomicOpUnsetOnNonExistingKey(cx, self, MutationRef::And, key));
wait(self->testAtomicOpApi( wait(self->testAtomicOpApi(
cx, self, MutationRef::And, key, [](uint64_t val1, uint64_t val2) { return val1 & val2; })); cx, self, MutationRef::And, key, [](uint64_t val1, uint64_t val2) { return val1 & val2; }));
@ -563,7 +563,7 @@ public:
ACTOR Future<Void> testOr(Database cx, AtomicOpsApiCorrectnessWorkload* self) { ACTOR Future<Void> testOr(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_or_"); state Key key = self->getTestKey("test_key_or_");
TraceEvent(SevInfo, "Running Atomic Op OR Correctness Current Api Version"); TraceEvent(SevInfo, "Running Atomic Op OR Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::Or, key)); wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::Or, key));
wait(self->testAtomicOpApi( wait(self->testAtomicOpApi(
cx, self, MutationRef::Or, key, [](uint64_t val1, uint64_t val2) { return val1 | val2; })); cx, self, MutationRef::Or, key, [](uint64_t val1, uint64_t val2) { return val1 | val2; }));
@ -576,7 +576,7 @@ public:
ACTOR Future<Void> testXor(Database cx, AtomicOpsApiCorrectnessWorkload* self) { ACTOR Future<Void> testXor(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_xor_"); state Key key = self->getTestKey("test_key_xor_");
TraceEvent(SevInfo, "Running Atomic Op XOR Correctness Current Api Version"); TraceEvent(SevInfo, "Running Atomic Op XOR Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::Xor, key)); wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::Xor, key));
wait(self->testAtomicOpApi( wait(self->testAtomicOpApi(
cx, self, MutationRef::Xor, key, [](uint64_t val1, uint64_t val2) { return val1 ^ val2; })); cx, self, MutationRef::Xor, key, [](uint64_t val1, uint64_t val2) { return val1 ^ val2; }));
@ -588,7 +588,7 @@ public:
ACTOR Future<Void> testAdd(Database cx, AtomicOpsApiCorrectnessWorkload* self) { ACTOR Future<Void> testAdd(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_add_"); state Key key = self->getTestKey("test_key_add_");
TraceEvent(SevInfo, "Running Atomic Op ADD Correctness Current Api Version"); TraceEvent(SevInfo, "Running Atomic Op ADD Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::AddValue, key)); wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::AddValue, key));
wait(self->testAtomicOpApi( wait(self->testAtomicOpApi(
cx, self, MutationRef::AddValue, key, [](uint64_t val1, uint64_t val2) { return val1 + val2; })); cx, self, MutationRef::AddValue, key, [](uint64_t val1, uint64_t val2) { return val1 + val2; }));
@ -601,7 +601,7 @@ public:
ACTOR Future<Void> testCompareAndClear(Database cx, AtomicOpsApiCorrectnessWorkload* self) { ACTOR Future<Void> testCompareAndClear(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_compare_and_clear_"); state Key key = self->getTestKey("test_key_compare_and_clear_");
TraceEvent(SevInfo, "Running Atomic Op COMPARE_AND_CLEAR Correctness Current Api Version"); TraceEvent(SevInfo, "Running Atomic Op COMPARE_AND_CLEAR Correctness Current Api Version").log();
wait(self->testCompareAndClearAtomicOpApi(cx, self, key, true)); wait(self->testCompareAndClearAtomicOpApi(cx, self, key, true));
wait(self->testCompareAndClearAtomicOpApi(cx, self, key, false)); wait(self->testCompareAndClearAtomicOpApi(cx, self, key, false));
return Void(); return Void();
@ -610,7 +610,7 @@ public:
ACTOR Future<Void> testByteMin(Database cx, AtomicOpsApiCorrectnessWorkload* self) { ACTOR Future<Void> testByteMin(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_byte_min_"); state Key key = self->getTestKey("test_key_byte_min_");
TraceEvent(SevInfo, "Running Atomic Op BYTE_MIN Correctness Current Api Version"); TraceEvent(SevInfo, "Running Atomic Op BYTE_MIN Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::ByteMin, key)); wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::ByteMin, key));
wait(self->testAtomicOpApi(cx, self, MutationRef::ByteMin, key, [](uint64_t val1, uint64_t val2) { wait(self->testAtomicOpApi(cx, self, MutationRef::ByteMin, key, [](uint64_t val1, uint64_t val2) {
return StringRef((const uint8_t*)&val1, sizeof(val1)) < StringRef((const uint8_t*)&val2, sizeof(val2)) return StringRef((const uint8_t*)&val1, sizeof(val1)) < StringRef((const uint8_t*)&val2, sizeof(val2))
@ -626,7 +626,7 @@ public:
ACTOR Future<Void> testByteMax(Database cx, AtomicOpsApiCorrectnessWorkload* self) { ACTOR Future<Void> testByteMax(Database cx, AtomicOpsApiCorrectnessWorkload* self) {
state Key key = self->getTestKey("test_key_byte_max_"); state Key key = self->getTestKey("test_key_byte_max_");
TraceEvent(SevInfo, "Running Atomic Op BYTE_MAX Correctness Current Api Version"); TraceEvent(SevInfo, "Running Atomic Op BYTE_MAX Correctness Current Api Version").log();
wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::ByteMax, key)); wait(self->testAtomicOpSetOnNonExistingKey(cx, self, MutationRef::ByteMax, key));
wait(self->testAtomicOpApi(cx, self, MutationRef::ByteMax, key, [](uint64_t val1, uint64_t val2) { wait(self->testAtomicOpApi(cx, self, MutationRef::ByteMax, key, [](uint64_t val1, uint64_t val2) {
return StringRef((const uint8_t*)&val1, sizeof(val1)) > StringRef((const uint8_t*)&val2, sizeof(val2)) return StringRef((const uint8_t*)&val1, sizeof(val1)) > StringRef((const uint8_t*)&val2, sizeof(val2))

View File

@ -104,14 +104,14 @@ struct AtomicRestoreWorkload : TestWorkload {
throw; throw;
} }
TraceEvent("AtomicRestore_Wait"); TraceEvent("AtomicRestore_Wait").log();
wait(success(backupAgent.waitBackup(cx, BackupAgentBase::getDefaultTagName(), StopWhenDone::False))); wait(success(backupAgent.waitBackup(cx, BackupAgentBase::getDefaultTagName(), StopWhenDone::False)));
TraceEvent("AtomicRestore_BackupStart"); TraceEvent("AtomicRestore_BackupStart").log();
wait(delay(self->restoreAfter * deterministicRandom()->random01())); wait(delay(self->restoreAfter * deterministicRandom()->random01()));
TraceEvent("AtomicRestore_RestoreStart"); TraceEvent("AtomicRestore_RestoreStart").log();
if (self->fastRestore) { // New fast parallel restore if (self->fastRestore) { // New fast parallel restore
TraceEvent(SevInfo, "AtomicParallelRestore"); TraceEvent(SevInfo, "AtomicParallelRestore").log();
wait(backupAgent.atomicParallelRestore( wait(backupAgent.atomicParallelRestore(
cx, BackupAgentBase::getDefaultTag(), self->backupRanges, self->addPrefix, self->removePrefix)); cx, BackupAgentBase::getDefaultTag(), self->backupRanges, self->addPrefix, self->removePrefix));
} else { // Old style restore } else { // Old style restore
@ -141,7 +141,7 @@ struct AtomicRestoreWorkload : TestWorkload {
g_simulator.backupAgents = ISimulator::BackupAgentType::NoBackupAgents; g_simulator.backupAgents = ISimulator::BackupAgentType::NoBackupAgents;
} }
TraceEvent("AtomicRestore_Done"); TraceEvent("AtomicRestore_Done").log();
return Void(); return Void();
} }
}; };

View File

@ -53,7 +53,7 @@ struct AtomicSwitchoverWorkload : TestWorkload {
ACTOR static Future<Void> _setup(Database cx, AtomicSwitchoverWorkload* self) { ACTOR static Future<Void> _setup(Database cx, AtomicSwitchoverWorkload* self) {
state DatabaseBackupAgent backupAgent(cx); state DatabaseBackupAgent backupAgent(cx);
try { try {
TraceEvent("AS_Submit1"); TraceEvent("AS_Submit1").log();
wait(backupAgent.submitBackup(self->extraDB, wait(backupAgent.submitBackup(self->extraDB,
BackupAgentBase::getDefaultTag(), BackupAgentBase::getDefaultTag(),
self->backupRanges, self->backupRanges,
@ -61,7 +61,7 @@ struct AtomicSwitchoverWorkload : TestWorkload {
StringRef(), StringRef(),
StringRef(), StringRef(),
LockDB::True)); LockDB::True));
TraceEvent("AS_Submit2"); TraceEvent("AS_Submit2").log();
} catch (Error& e) { } catch (Error& e) {
if (e.code() != error_code_backup_duplicate) if (e.code() != error_code_backup_duplicate)
throw; throw;
@ -167,27 +167,27 @@ struct AtomicSwitchoverWorkload : TestWorkload {
state DatabaseBackupAgent backupAgent(cx); state DatabaseBackupAgent backupAgent(cx);
state DatabaseBackupAgent restoreTool(self->extraDB); state DatabaseBackupAgent restoreTool(self->extraDB);
TraceEvent("AS_Wait1"); TraceEvent("AS_Wait1").log();
wait(success(backupAgent.waitBackup(self->extraDB, BackupAgentBase::getDefaultTag(), StopWhenDone::False))); wait(success(backupAgent.waitBackup(self->extraDB, BackupAgentBase::getDefaultTag(), StopWhenDone::False)));
TraceEvent("AS_Ready1"); TraceEvent("AS_Ready1").log();
wait(delay(deterministicRandom()->random01() * self->switch1delay)); wait(delay(deterministicRandom()->random01() * self->switch1delay));
TraceEvent("AS_Switch1"); TraceEvent("AS_Switch1").log();
wait(backupAgent.atomicSwitchover( wait(backupAgent.atomicSwitchover(
self->extraDB, BackupAgentBase::getDefaultTag(), self->backupRanges, StringRef(), StringRef())); self->extraDB, BackupAgentBase::getDefaultTag(), self->backupRanges, StringRef(), StringRef()));
TraceEvent("AS_Wait2"); TraceEvent("AS_Wait2").log();
wait(success(restoreTool.waitBackup(cx, BackupAgentBase::getDefaultTag(), StopWhenDone::False))); wait(success(restoreTool.waitBackup(cx, BackupAgentBase::getDefaultTag(), StopWhenDone::False)));
TraceEvent("AS_Ready2"); TraceEvent("AS_Ready2").log();
wait(delay(deterministicRandom()->random01() * self->switch2delay)); wait(delay(deterministicRandom()->random01() * self->switch2delay));
TraceEvent("AS_Switch2"); TraceEvent("AS_Switch2").log();
wait(restoreTool.atomicSwitchover( wait(restoreTool.atomicSwitchover(
cx, BackupAgentBase::getDefaultTag(), self->backupRanges, StringRef(), StringRef())); cx, BackupAgentBase::getDefaultTag(), self->backupRanges, StringRef(), StringRef()));
TraceEvent("AS_Wait3"); TraceEvent("AS_Wait3").log();
wait(success(backupAgent.waitBackup(self->extraDB, BackupAgentBase::getDefaultTag(), StopWhenDone::False))); wait(success(backupAgent.waitBackup(self->extraDB, BackupAgentBase::getDefaultTag(), StopWhenDone::False)));
TraceEvent("AS_Ready3"); TraceEvent("AS_Ready3").log();
wait(delay(deterministicRandom()->random01() * self->stopDelay)); wait(delay(deterministicRandom()->random01() * self->stopDelay));
TraceEvent("AS_Abort"); TraceEvent("AS_Abort").log();
wait(backupAgent.abortBackup(self->extraDB, BackupAgentBase::getDefaultTag())); wait(backupAgent.abortBackup(self->extraDB, BackupAgentBase::getDefaultTag()));
TraceEvent("AS_Done"); TraceEvent("AS_Done").log();
// SOMEDAY: Remove after backup agents can exist quiescently // SOMEDAY: Remove after backup agents can exist quiescently
if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) { if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) {

View File

@ -384,7 +384,7 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
Key(), Key(),
Key(), Key(),
self->locked))); self->locked)));
TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID); TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID).log();
ASSERT(false); ASSERT(false);
} catch (Error& e) { } catch (Error& e) {
if (e.code() != error_code_restore_destination_not_empty) { if (e.code() != error_code_restore_destination_not_empty) {

View File

@ -430,7 +430,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
Key(), Key(),
Key(), Key(),
self->locked))); self->locked)));
TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID); TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID).log();
ASSERT(false); ASSERT(false);
} catch (Error& e) { } catch (Error& e) {
if (e.code() != error_code_restore_destination_not_empty) { if (e.code() != error_code_restore_destination_not_empty) {

View File

@ -52,7 +52,7 @@ struct BackupToDBAbort : TestWorkload {
ACTOR static Future<Void> _setup(BackupToDBAbort* self, Database cx) { ACTOR static Future<Void> _setup(BackupToDBAbort* self, Database cx) {
state DatabaseBackupAgent backupAgent(cx); state DatabaseBackupAgent backupAgent(cx);
try { try {
TraceEvent("BDBA_Submit1"); TraceEvent("BDBA_Submit1").log();
wait(backupAgent.submitBackup(self->extraDB, wait(backupAgent.submitBackup(self->extraDB,
BackupAgentBase::getDefaultTag(), BackupAgentBase::getDefaultTag(),
self->backupRanges, self->backupRanges,
@ -60,7 +60,7 @@ struct BackupToDBAbort : TestWorkload {
StringRef(), StringRef(),
StringRef(), StringRef(),
LockDB::True)); LockDB::True));
TraceEvent("BDBA_Submit2"); TraceEvent("BDBA_Submit2").log();
} catch (Error& e) { } catch (Error& e) {
if (e.code() != error_code_backup_duplicate) if (e.code() != error_code_backup_duplicate)
throw; throw;
@ -79,15 +79,15 @@ struct BackupToDBAbort : TestWorkload {
TraceEvent("BDBA_Start").detail("Delay", self->abortDelay); TraceEvent("BDBA_Start").detail("Delay", self->abortDelay);
wait(delay(self->abortDelay)); wait(delay(self->abortDelay));
TraceEvent("BDBA_Wait"); TraceEvent("BDBA_Wait").log();
wait(success(backupAgent.waitBackup(self->extraDB, BackupAgentBase::getDefaultTag(), StopWhenDone::False))); wait(success(backupAgent.waitBackup(self->extraDB, BackupAgentBase::getDefaultTag(), StopWhenDone::False)));
TraceEvent("BDBA_Lock"); TraceEvent("BDBA_Lock").log();
wait(lockDatabase(cx, self->lockid)); wait(lockDatabase(cx, self->lockid));
TraceEvent("BDBA_Abort"); TraceEvent("BDBA_Abort").log();
wait(backupAgent.abortBackup(self->extraDB, BackupAgentBase::getDefaultTag())); wait(backupAgent.abortBackup(self->extraDB, BackupAgentBase::getDefaultTag()));
TraceEvent("BDBA_Unlock"); TraceEvent("BDBA_Unlock").log();
wait(backupAgent.unlockBackup(self->extraDB, BackupAgentBase::getDefaultTag())); wait(backupAgent.unlockBackup(self->extraDB, BackupAgentBase::getDefaultTag()));
TraceEvent("BDBA_End"); TraceEvent("BDBA_End").log();
// SOMEDAY: Remove after backup agents can exist quiescently // SOMEDAY: Remove after backup agents can exist quiescently
if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) { if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) {
@ -98,7 +98,7 @@ struct BackupToDBAbort : TestWorkload {
} }
ACTOR static Future<bool> _check(BackupToDBAbort* self, Database cx) { ACTOR static Future<bool> _check(BackupToDBAbort* self, Database cx) {
TraceEvent("BDBA_UnlockPrimary"); TraceEvent("BDBA_UnlockPrimary").log();
// Too much of the tester framework expects the primary database to be unlocked, so we unlock it // Too much of the tester framework expects the primary database to be unlocked, so we unlock it
// once all of the workloads have finished. // once all of the workloads have finished.
wait(unlockDatabase(cx, self->lockid)); wait(unlockDatabase(cx, self->lockid));

View File

@ -78,7 +78,7 @@ struct BackupToDBUpgradeWorkload : TestWorkload {
auto extraFile = makeReference<ClusterConnectionFile>(*g_simulator.extraDB); auto extraFile = makeReference<ClusterConnectionFile>(*g_simulator.extraDB);
extraDB = Database::createDatabase(extraFile, -1); extraDB = Database::createDatabase(extraFile, -1);
TraceEvent("DRU_Start"); TraceEvent("DRU_Start").log();
} }
std::string description() const override { return "BackupToDBUpgrade"; } std::string description() const override { return "BackupToDBUpgrade"; }
@ -459,7 +459,7 @@ struct BackupToDBUpgradeWorkload : TestWorkload {
} }
} }
TraceEvent("DRU_DiffRanges"); TraceEvent("DRU_DiffRanges").log();
wait(diffRanges(prevBackupRanges, self->backupPrefix, cx, self->extraDB)); wait(diffRanges(prevBackupRanges, self->backupPrefix, cx, self->extraDB));
// abort backup // abort backup

View File

@ -284,7 +284,7 @@ Future<Void> bulkSetup(Database cx,
wait(delay(1.0)); wait(delay(1.0));
} else { } else {
wait(delay(1.0)); wait(delay(1.0));
TraceEvent("DynamicWarmingDone"); TraceEvent("DynamicWarmingDone").log();
break; break;
} }
} }

View File

@ -65,9 +65,9 @@ struct ChangeConfigWorkload : TestWorkload {
// It is not safe to allow automatic failover to a region which is not fully replicated, // It is not safe to allow automatic failover to a region which is not fully replicated,
// so wait for both regions to be fully replicated before enabling failover // so wait for both regions to be fully replicated before enabling failover
wait(success(changeConfig(extraDB, g_simulator.startingDisabledConfiguration, true))); wait(success(changeConfig(extraDB, g_simulator.startingDisabledConfiguration, true)));
TraceEvent("WaitForReplicasExtra"); TraceEvent("WaitForReplicasExtra").log();
wait(waitForFullReplication(extraDB)); wait(waitForFullReplication(extraDB));
TraceEvent("WaitForReplicasExtraEnd"); TraceEvent("WaitForReplicasExtraEnd").log();
} }
wait(success(changeConfig(extraDB, self->configMode, true))); wait(success(changeConfig(extraDB, self->configMode, true)));
} }
@ -99,9 +99,9 @@ struct ChangeConfigWorkload : TestWorkload {
// It is not safe to allow automatic failover to a region which is not fully replicated, // It is not safe to allow automatic failover to a region which is not fully replicated,
// so wait for both regions to be fully replicated before enabling failover // so wait for both regions to be fully replicated before enabling failover
wait(success(changeConfig(cx, g_simulator.startingDisabledConfiguration, true))); wait(success(changeConfig(cx, g_simulator.startingDisabledConfiguration, true)));
TraceEvent("WaitForReplicas"); TraceEvent("WaitForReplicas").log();
wait(waitForFullReplication(cx)); wait(waitForFullReplication(cx));
TraceEvent("WaitForReplicasEnd"); TraceEvent("WaitForReplicasEnd").log();
} }
wait(success(changeConfig(cx, self->configMode, true))); wait(success(changeConfig(cx, self->configMode, true)));
} }

View File

@ -45,6 +45,7 @@ struct CommitBugWorkload : TestWorkload {
try { try {
tr.set(key, val1); tr.set(key, val1);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
TraceEvent("CommitBugSetVal1Error").error(e); TraceEvent("CommitBugSetVal1Error").error(e);
@ -57,6 +58,7 @@ struct CommitBugWorkload : TestWorkload {
try { try {
tr.set(key, val2); tr.set(key, val2);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
TraceEvent("CommitBugSetVal2Error").error(e); TraceEvent("CommitBugSetVal2Error").error(e);
@ -85,6 +87,7 @@ struct CommitBugWorkload : TestWorkload {
try { try {
tr.clear(key); tr.clear(key);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
TraceEvent("CommitBugClearValError").error(e); TraceEvent("CommitBugClearValError").error(e);

View File

@ -100,7 +100,7 @@ struct ConflictRangeWorkload : TestWorkload {
loop { loop {
state Transaction tr0(cx); state Transaction tr0(cx);
try { try {
TraceEvent("ConflictRangeReset"); TraceEvent("ConflictRangeReset").log();
insertedSet.clear(); insertedSet.clear();
if (self->testReadYourWrites) { if (self->testReadYourWrites) {

View File

@ -142,7 +142,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
} }
Future<Void> start(Database const& cx) override { Future<Void> start(Database const& cx) override {
TraceEvent("ConsistencyCheck"); TraceEvent("ConsistencyCheck").log();
return _start(cx, this); return _start(cx, this);
} }
@ -186,10 +186,10 @@ struct ConsistencyCheckWorkload : TestWorkload {
ACTOR Future<Void> _start(Database cx, ConsistencyCheckWorkload* self) { ACTOR Future<Void> _start(Database cx, ConsistencyCheckWorkload* self) {
loop { loop {
while (self->suspendConsistencyCheck.get()) { while (self->suspendConsistencyCheck.get()) {
TraceEvent("ConsistencyCheck_Suspended"); TraceEvent("ConsistencyCheck_Suspended").log();
wait(self->suspendConsistencyCheck.onChange()); wait(self->suspendConsistencyCheck.onChange());
} }
TraceEvent("ConsistencyCheck_StartingOrResuming"); TraceEvent("ConsistencyCheck_StartingOrResuming").log();
choose { choose {
when(wait(self->runCheck(cx, self))) { when(wait(self->runCheck(cx, self))) {
if (!self->indefinite) if (!self->indefinite)
@ -222,7 +222,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
} }
RangeResult res = wait(tr.getRange(configKeys, 1000)); RangeResult res = wait(tr.getRange(configKeys, 1000));
if (res.size() == 1000) { if (res.size() == 1000) {
TraceEvent("ConsistencyCheck_TooManyConfigOptions"); TraceEvent("ConsistencyCheck_TooManyConfigOptions").log();
self->testFailure("Read too many configuration options"); self->testFailure("Read too many configuration options");
} }
for (int i = 0; i < res.size(); i++) for (int i = 0; i < res.size(); i++)
@ -251,7 +251,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
// the allowed maximum number of teams // the allowed maximum number of teams
bool teamCollectionValid = wait(getTeamCollectionValid(cx, self->dbInfo)); bool teamCollectionValid = wait(getTeamCollectionValid(cx, self->dbInfo));
if (!teamCollectionValid) { if (!teamCollectionValid) {
TraceEvent(SevError, "ConsistencyCheck_TooManyTeams"); TraceEvent(SevError, "ConsistencyCheck_TooManyTeams").log();
self->testFailure("The number of process or machine teams is larger than the allowed maximum " self->testFailure("The number of process or machine teams is larger than the allowed maximum "
"number of teams"); "number of teams");
} }
@ -1817,7 +1817,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
self->testFailure("No storage server on worker"); self->testFailure("No storage server on worker");
return false; return false;
} else { } else {
TraceEvent(SevWarn, "ConsistencyCheck_TSSMissing"); TraceEvent(SevWarn, "ConsistencyCheck_TSSMissing").log();
} }
} }
@ -1992,7 +1992,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
Optional<Value> currentKey = wait(tr.get(coordinatorsKey)); Optional<Value> currentKey = wait(tr.get(coordinatorsKey));
if (!currentKey.present()) { if (!currentKey.present()) {
TraceEvent("ConsistencyCheck_NoCoordinatorKey"); TraceEvent("ConsistencyCheck_NoCoordinatorKey").log();
return false; return false;
} }

View File

@ -93,7 +93,7 @@ struct CpuProfilerWorkload : TestWorkload {
if (!replies[i].get().present()) if (!replies[i].get().present())
self->success = false; self->success = false;
TraceEvent("DoneSignalingProfiler"); TraceEvent("DoneSignalingProfiler").log();
} }
return Void(); return Void();
@ -104,14 +104,14 @@ struct CpuProfilerWorkload : TestWorkload {
ACTOR Future<Void> _start(Database cx, CpuProfilerWorkload* self) { ACTOR Future<Void> _start(Database cx, CpuProfilerWorkload* self) {
wait(delay(self->initialDelay)); wait(delay(self->initialDelay));
if (self->clientId == 0) if (self->clientId == 0)
TraceEvent("SignalProfilerOn"); TraceEvent("SignalProfilerOn").log();
wait(timeoutError(self->updateProfiler(true, cx, self), 60.0)); wait(timeoutError(self->updateProfiler(true, cx, self), 60.0));
// If a duration was given, let the duration elapse and then shut the profiler off // If a duration was given, let the duration elapse and then shut the profiler off
if (self->duration > 0) { if (self->duration > 0) {
wait(delay(self->duration)); wait(delay(self->duration));
if (self->clientId == 0) if (self->clientId == 0)
TraceEvent("SignalProfilerOff"); TraceEvent("SignalProfilerOff").log();
wait(timeoutError(self->updateProfiler(false, cx, self), 60.0)); wait(timeoutError(self->updateProfiler(false, cx, self), 60.0));
} }
@ -124,7 +124,7 @@ struct CpuProfilerWorkload : TestWorkload {
// If no duration was given, then shut the profiler off now // If no duration was given, then shut the profiler off now
if (self->duration <= 0) { if (self->duration <= 0) {
if (self->clientId == 0) if (self->clientId == 0)
TraceEvent("SignalProfilerOff"); TraceEvent("SignalProfilerOff").log();
wait(timeoutError(self->updateProfiler(false, cx, self), 60.0)); wait(timeoutError(self->updateProfiler(false, cx, self), 60.0));
} }

View File

@ -104,7 +104,7 @@ struct CycleWorkload : TestWorkload {
state Transaction tr(cx); state Transaction tr(cx);
if (deterministicRandom()->random01() >= self->traceParentProbability) { if (deterministicRandom()->random01() >= self->traceParentProbability) {
state Span span("CycleClient"_loc); state Span span("CycleClient"_loc);
TraceEvent("CycleTracingTransaction", span.context); TraceEvent("CycleTracingTransaction", span.context).log();
tr.setOption(FDBTransactionOptions::SPAN_PARENT, tr.setOption(FDBTransactionOptions::SPAN_PARENT,
BinaryWriter::toValue(span.context, Unversioned())); BinaryWriter::toValue(span.context, Unversioned()));
} }
@ -154,7 +154,7 @@ struct CycleWorkload : TestWorkload {
} }
void logTestData(const VectorRef<KeyValueRef>& data) { void logTestData(const VectorRef<KeyValueRef>& data) {
TraceEvent("TestFailureDetail"); TraceEvent("TestFailureDetail").log();
int index = 0; int index = 0;
for (auto& entry : data) { for (auto& entry : data) {
TraceEvent("CurrentDataEntry") TraceEvent("CurrentDataEntry")

View File

@ -50,7 +50,7 @@ struct DDMetricsWorkload : TestWorkload {
try { try {
TraceEvent("DDMetricsWaiting").detail("StartDelay", self->startDelay); TraceEvent("DDMetricsWaiting").detail("StartDelay", self->startDelay);
wait(delay(self->startDelay)); wait(delay(self->startDelay));
TraceEvent("DDMetricsStarting"); TraceEvent("DDMetricsStarting").log();
state double startTime = now(); state double startTime = now();
loop { loop {
wait(delay(2.5)); wait(delay(2.5));

View File

@ -64,7 +64,7 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
Future<bool> check(Database const& cx) override { Future<bool> check(Database const& cx) override {
if (clientId == 0 && !switchComplete) { if (clientId == 0 && !switchComplete) {
TraceEvent(SevError, "DifferentClustersSwitchNotComplete"); TraceEvent(SevError, "DifferentClustersSwitchNotComplete").log();
return false; return false;
} }
return true; return true;
@ -133,17 +133,17 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
return Void(); return Void();
})); }));
wait(lockDatabase(self->originalDB, lockUid) && lockDatabase(self->extraDB, lockUid)); wait(lockDatabase(self->originalDB, lockUid) && lockDatabase(self->extraDB, lockUid));
TraceEvent("DifferentClusters_LockedDatabases"); TraceEvent("DifferentClusters_LockedDatabases").log();
std::pair<Version, Optional<Value>> read1 = wait(doRead(self->originalDB, self)); std::pair<Version, Optional<Value>> read1 = wait(doRead(self->originalDB, self));
state Version rv = read1.first; state Version rv = read1.first;
state Optional<Value> val1 = read1.second; state Optional<Value> val1 = read1.second;
wait(doWrite(self->extraDB, self->keyToRead, val1)); wait(doWrite(self->extraDB, self->keyToRead, val1));
TraceEvent("DifferentClusters_CopiedDatabase"); TraceEvent("DifferentClusters_CopiedDatabase").log();
wait(advanceVersion(self->extraDB, rv)); wait(advanceVersion(self->extraDB, rv));
TraceEvent("DifferentClusters_AdvancedVersion"); TraceEvent("DifferentClusters_AdvancedVersion").log();
wait(cx->switchConnectionFile( wait(cx->switchConnectionFile(
makeReference<ClusterConnectionFile>(self->extraDB->getConnectionFile()->getConnectionString()))); makeReference<ClusterConnectionFile>(self->extraDB->getConnectionFile()->getConnectionString())));
TraceEvent("DifferentClusters_SwitchedConnectionFile"); TraceEvent("DifferentClusters_SwitchedConnectionFile").log();
state Transaction tr(cx); state Transaction tr(cx);
tr.setVersion(rv); tr.setVersion(rv);
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE); tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
@ -160,17 +160,17 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
// that a storage server serves a read at |rv| even after the recovery caused by unlocking the database, and we // that a storage server serves a read at |rv| even after the recovery caused by unlocking the database, and we
// want to make that more likely for this test. So read at |rv| then unlock. // want to make that more likely for this test. So read at |rv| then unlock.
wait(unlockDatabase(self->extraDB, lockUid)); wait(unlockDatabase(self->extraDB, lockUid));
TraceEvent("DifferentClusters_UnlockedExtraDB"); TraceEvent("DifferentClusters_UnlockedExtraDB").log();
ASSERT(!watchFuture.isReady() || watchFuture.isError()); ASSERT(!watchFuture.isReady() || watchFuture.isError());
wait(doWrite(self->extraDB, self->keyToWatch, Optional<Value>{ LiteralStringRef("") })); wait(doWrite(self->extraDB, self->keyToWatch, Optional<Value>{ LiteralStringRef("") }));
TraceEvent("DifferentClusters_WaitingForWatch"); TraceEvent("DifferentClusters_WaitingForWatch").log();
try { try {
wait(timeoutError(watchFuture, (self->testDuration - self->switchAfter) / 2)); wait(timeoutError(watchFuture, (self->testDuration - self->switchAfter) / 2));
} catch (Error& e) { } catch (Error& e) {
TraceEvent("DifferentClusters_WatchError").error(e); TraceEvent("DifferentClusters_WatchError").error(e);
wait(tr.onError(e)); wait(tr.onError(e));
} }
TraceEvent("DifferentClusters_Done"); TraceEvent("DifferentClusters_Done").log();
self->switchComplete = true; self->switchComplete = true;
wait(unlockDatabase(self->originalDB, lockUid)); // So quietDatabase can finish wait(unlockDatabase(self->originalDB, lockUid)); // So quietDatabase can finish
return Void(); return Void();
@ -191,6 +191,7 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
serializer(w, x); serializer(w, x);
tr.set(self->keyToRead, w.toValue()); tr.set(self->keyToRead, w.toValue());
wait(tr.commit()); wait(tr.commit());
tr.reset();
} catch (Error& e) { } catch (Error& e) {
wait(tr.onError(e)); wait(tr.onError(e));
} }

View File

@ -142,19 +142,19 @@ struct ExternalWorkload : TestWorkload, FDBWorkloadContext {
.detail("WorkloadName", wName); .detail("WorkloadName", wName);
library = loadLibrary(fullPath.c_str()); library = loadLibrary(fullPath.c_str());
if (library == nullptr) { if (library == nullptr) {
TraceEvent(SevError, "ExternalWorkloadLoadError"); TraceEvent(SevError, "ExternalWorkloadLoadError").log();
success = false; success = false;
return; return;
} }
workloadFactory = reinterpret_cast<decltype(workloadFactory)>(loadFunction(library, "workloadFactory")); workloadFactory = reinterpret_cast<decltype(workloadFactory)>(loadFunction(library, "workloadFactory"));
if (workloadFactory == nullptr) { if (workloadFactory == nullptr) {
TraceEvent(SevError, "ExternalFactoryNotFound"); TraceEvent(SevError, "ExternalFactoryNotFound").log();
success = false; success = false;
return; return;
} }
workloadImpl = (*workloadFactory)(FDBLoggerImpl::instance())->create(wName.toString()); workloadImpl = (*workloadFactory)(FDBLoggerImpl::instance())->create(wName.toString());
if (!workloadImpl) { if (!workloadImpl) {
TraceEvent(SevError, "WorkloadNotFound"); TraceEvent(SevError, "WorkloadNotFound").log();
success = false; success = false;
} }
workloadImpl->init(this); workloadImpl->init(this);

View File

@ -75,7 +75,7 @@ struct HealthMetricsApiWorkload : TestWorkload {
Future<bool> check(Database const& cx) override { Future<bool> check(Database const& cx) override {
if (healthMetricsStoppedUpdating) { if (healthMetricsStoppedUpdating) {
TraceEvent(SevError, "HealthMetricsStoppedUpdating"); TraceEvent(SevError, "HealthMetricsStoppedUpdating").log();
return false; return false;
} }
bool correctHealthMetricsState = true; bool correctHealthMetricsState = true;

View File

@ -92,11 +92,11 @@ struct IncrementalBackupWorkload : TestWorkload {
} }
loop { loop {
// Wait for backup container to be created and avoid race condition // Wait for backup container to be created and avoid race condition
TraceEvent("IBackupWaitContainer"); TraceEvent("IBackupWaitContainer").log();
wait(success(self->backupAgent.waitBackup( wait(success(self->backupAgent.waitBackup(
cx, self->tag.toString(), StopWhenDone::False, &backupContainer, &backupUID))); cx, self->tag.toString(), StopWhenDone::False, &backupContainer, &backupUID)));
if (!backupContainer.isValid()) { if (!backupContainer.isValid()) {
TraceEvent("IBackupCheckListContainersAttempt"); TraceEvent("IBackupCheckListContainersAttempt").log();
state std::vector<std::string> containers = state std::vector<std::string> containers =
wait(IBackupContainer::listContainers(self->backupDir.toString())); wait(IBackupContainer::listContainers(self->backupDir.toString()));
TraceEvent("IBackupCheckListContainersSuccess") TraceEvent("IBackupCheckListContainersSuccess")
@ -132,7 +132,7 @@ struct IncrementalBackupWorkload : TestWorkload {
} }
if (self->stopBackup) { if (self->stopBackup) {
try { try {
TraceEvent("IBackupDiscontinueBackup"); TraceEvent("IBackupDiscontinueBackup").log();
wait(self->backupAgent.discontinueBackup(cx, self->tag)); wait(self->backupAgent.discontinueBackup(cx, self->tag));
} catch (Error& e) { } catch (Error& e) {
TraceEvent("IBackupDiscontinueBackupException").error(e); TraceEvent("IBackupDiscontinueBackupException").error(e);
@ -148,7 +148,7 @@ struct IncrementalBackupWorkload : TestWorkload {
if (self->submitOnly) { if (self->submitOnly) {
Standalone<VectorRef<KeyRangeRef>> backupRanges; Standalone<VectorRef<KeyRangeRef>> backupRanges;
backupRanges.push_back_deep(backupRanges.arena(), normalKeys); backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
TraceEvent("IBackupSubmitAttempt"); TraceEvent("IBackupSubmitAttempt").log();
try { try {
wait(self->backupAgent.submitBackup(cx, wait(self->backupAgent.submitBackup(cx,
self->backupDir, self->backupDir,
@ -165,7 +165,7 @@ struct IncrementalBackupWorkload : TestWorkload {
throw; throw;
} }
} }
TraceEvent("IBackupSubmitSuccess"); TraceEvent("IBackupSubmitSuccess").log();
} }
if (self->restoreOnly) { if (self->restoreOnly) {
if (self->clearBackupAgentKeys) { if (self->clearBackupAgentKeys) {
@ -189,7 +189,7 @@ struct IncrementalBackupWorkload : TestWorkload {
wait(success(self->backupAgent.waitBackup( wait(success(self->backupAgent.waitBackup(
cx, self->tag.toString(), StopWhenDone::False, &backupContainer, &backupUID))); cx, self->tag.toString(), StopWhenDone::False, &backupContainer, &backupUID)));
if (self->checkBeginVersion) { if (self->checkBeginVersion) {
TraceEvent("IBackupReadSystemKeys"); TraceEvent("IBackupReadSystemKeys").log();
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx)); state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
loop { loop {
try { try {
@ -201,7 +201,7 @@ struct IncrementalBackupWorkload : TestWorkload {
.detail("WriteRecoveryValue", writeFlag.present() ? writeFlag.get().toString() : "N/A") .detail("WriteRecoveryValue", writeFlag.present() ? writeFlag.get().toString() : "N/A")
.detail("EndVersionValue", versionValue.present() ? versionValue.get().toString() : "N/A"); .detail("EndVersionValue", versionValue.present() ? versionValue.get().toString() : "N/A");
if (!versionValue.present()) { if (!versionValue.present()) {
TraceEvent("IBackupCheckSpecialKeysFailure"); TraceEvent("IBackupCheckSpecialKeysFailure").log();
// Snapshot failed to write to special keys, possibly due to snapshot itself failing // Snapshot failed to write to special keys, possibly due to snapshot itself failing
throw key_not_found(); throw key_not_found();
} }
@ -217,7 +217,7 @@ struct IncrementalBackupWorkload : TestWorkload {
} }
} }
} }
TraceEvent("IBackupStartListContainersAttempt"); TraceEvent("IBackupStartListContainersAttempt").log();
state std::vector<std::string> containers = state std::vector<std::string> containers =
wait(IBackupContainer::listContainers(self->backupDir.toString())); wait(IBackupContainer::listContainers(self->backupDir.toString()));
TraceEvent("IBackupStartListContainersSuccess") TraceEvent("IBackupStartListContainersSuccess")
@ -239,7 +239,7 @@ struct IncrementalBackupWorkload : TestWorkload {
OnlyApplyMutationLogs::True, OnlyApplyMutationLogs::True,
InconsistentSnapshotOnly::False, InconsistentSnapshotOnly::False,
beginVersion))); beginVersion)));
TraceEvent("IBackupRestoreSuccess"); TraceEvent("IBackupRestoreSuccess").log();
} }
return Void(); return Void();
} }

View File

@ -115,7 +115,7 @@ struct KVTest {
~KVTest() { close(); } ~KVTest() { close(); }
void close() { void close() {
if (store) { if (store) {
TraceEvent("KVTestDestroy"); TraceEvent("KVTestDestroy").log();
if (dispose) if (dispose)
store->dispose(); store->dispose();
else else
@ -373,7 +373,7 @@ ACTOR Future<Void> testKVStore(KVStoreTestWorkload* workload) {
state Error err; state Error err;
// wait( delay(1) ); // wait( delay(1) );
TraceEvent("GO"); TraceEvent("GO").log();
UID id = deterministicRandom()->randomUniqueID(); UID id = deterministicRandom()->randomUniqueID();
std::string fn = workload->filename.size() ? workload->filename : id.toString(); std::string fn = workload->filename.size() ? workload->filename : id.toString();

View File

@ -56,11 +56,11 @@ struct KillRegionWorkload : TestWorkload {
void getMetrics(vector<PerfMetric>& m) override {} void getMetrics(vector<PerfMetric>& m) override {}
ACTOR static Future<Void> _setup(KillRegionWorkload* self, Database cx) { ACTOR static Future<Void> _setup(KillRegionWorkload* self, Database cx) {
TraceEvent("ForceRecovery_DisablePrimaryBegin"); TraceEvent("ForceRecovery_DisablePrimaryBegin").log();
wait(success(changeConfig(cx, g_simulator.disablePrimary, true))); wait(success(changeConfig(cx, g_simulator.disablePrimary, true)));
TraceEvent("ForceRecovery_WaitForRemote"); TraceEvent("ForceRecovery_WaitForRemote").log();
wait(waitForPrimaryDC(cx, LiteralStringRef("1"))); wait(waitForPrimaryDC(cx, LiteralStringRef("1")));
TraceEvent("ForceRecovery_DisablePrimaryComplete"); TraceEvent("ForceRecovery_DisablePrimaryComplete").log();
return Void(); return Void();
} }
@ -74,14 +74,14 @@ struct KillRegionWorkload : TestWorkload {
ACTOR static Future<Void> killRegion(KillRegionWorkload* self, Database cx) { ACTOR static Future<Void> killRegion(KillRegionWorkload* self, Database cx) {
ASSERT(g_network->isSimulated()); ASSERT(g_network->isSimulated());
if (deterministicRandom()->random01() < 0.5) { if (deterministicRandom()->random01() < 0.5) {
TraceEvent("ForceRecovery_DisableRemoteBegin"); TraceEvent("ForceRecovery_DisableRemoteBegin").log();
wait(success(changeConfig(cx, g_simulator.disableRemote, true))); wait(success(changeConfig(cx, g_simulator.disableRemote, true)));
TraceEvent("ForceRecovery_WaitForPrimary"); TraceEvent("ForceRecovery_WaitForPrimary").log();
wait(waitForPrimaryDC(cx, LiteralStringRef("0"))); wait(waitForPrimaryDC(cx, LiteralStringRef("0")));
TraceEvent("ForceRecovery_DisableRemoteComplete"); TraceEvent("ForceRecovery_DisableRemoteComplete").log();
wait(success(changeConfig(cx, g_simulator.originalRegions, true))); wait(success(changeConfig(cx, g_simulator.originalRegions, true)));
} }
TraceEvent("ForceRecovery_Wait"); TraceEvent("ForceRecovery_Wait").log();
wait(delay(deterministicRandom()->random01() * self->testDuration)); wait(delay(deterministicRandom()->random01() * self->testDuration));
g_simulator.killDataCenter(LiteralStringRef("0"), g_simulator.killDataCenter(LiteralStringRef("0"),
@ -97,11 +97,11 @@ struct KillRegionWorkload : TestWorkload {
: ISimulator::RebootAndDelete, : ISimulator::RebootAndDelete,
true); true);
TraceEvent("ForceRecovery_Begin"); TraceEvent("ForceRecovery_Begin").log();
wait(forceRecovery(cx->getConnectionFile(), LiteralStringRef("1"))); wait(forceRecovery(cx->getConnectionFile(), LiteralStringRef("1")));
TraceEvent("ForceRecovery_UsableRegions"); TraceEvent("ForceRecovery_UsableRegions").log();
DatabaseConfiguration conf = wait(getDatabaseConfiguration(cx)); DatabaseConfiguration conf = wait(getDatabaseConfiguration(cx));
@ -119,7 +119,7 @@ struct KillRegionWorkload : TestWorkload {
wait(success(changeConfig(cx, "usable_regions=1", true))); wait(success(changeConfig(cx, "usable_regions=1", true)));
} }
TraceEvent("ForceRecovery_Complete"); TraceEvent("ForceRecovery_Complete").log();
return Void(); return Void();
} }

View File

@ -54,7 +54,7 @@ struct LogMetricsWorkload : TestWorkload {
state BinaryWriter br(Unversioned()); state BinaryWriter br(Unversioned());
vector<WorkerDetails> workers = wait(getWorkers(self->dbInfo)); vector<WorkerDetails> workers = wait(getWorkers(self->dbInfo));
// vector<Future<Void>> replies; // vector<Future<Void>> replies;
TraceEvent("RateChangeTrigger"); TraceEvent("RateChangeTrigger").log();
SetMetricsLogRateRequest req(rate); SetMetricsLogRateRequest req(rate);
for (int i = 0; i < workers.size(); i++) { for (int i = 0; i < workers.size(); i++) {
workers[i].interf.setMetricsRate.send(req); workers[i].interf.setMetricsRate.send(req);

View File

@ -77,7 +77,7 @@ struct LowLatencyWorkload : TestWorkload {
++self->operations; ++self->operations;
loop { loop {
try { try {
TraceEvent("StartLowLatencyTransaction"); TraceEvent("StartLowLatencyTransaction").log();
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
tr.setOption(FDBTransactionOptions::LOCK_AWARE); tr.setOption(FDBTransactionOptions::LOCK_AWARE);
if (doCommit) { if (doCommit) {

View File

@ -39,18 +39,18 @@ static std::set<int> const& normalAttritionErrors() {
ACTOR Future<bool> ignoreSSFailuresForDuration(Database cx, double duration) { ACTOR Future<bool> ignoreSSFailuresForDuration(Database cx, double duration) {
// duration doesn't matter since this won't timeout // duration doesn't matter since this won't timeout
TraceEvent("IgnoreSSFailureStart"); TraceEvent("IgnoreSSFailureStart").log();
wait(success(setHealthyZone(cx, ignoreSSFailuresZoneString, 0))); wait(success(setHealthyZone(cx, ignoreSSFailuresZoneString, 0)));
TraceEvent("IgnoreSSFailureWait"); TraceEvent("IgnoreSSFailureWait").log();
wait(delay(duration)); wait(delay(duration));
TraceEvent("IgnoreSSFailureClear"); TraceEvent("IgnoreSSFailureClear").log();
state Transaction tr(cx); state Transaction tr(cx);
loop { loop {
try { try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE); tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.clear(healthyZoneKey); tr.clear(healthyZoneKey);
wait(tr.commit()); wait(tr.commit());
TraceEvent("IgnoreSSFailureComplete"); TraceEvent("IgnoreSSFailureComplete").log();
return true; return true;
} catch (Error& e) { } catch (Error& e) {
wait(tr.onError(e)); wait(tr.onError(e));
@ -311,7 +311,7 @@ struct MachineAttritionWorkload : TestWorkload {
TEST(true); // Killing a machine TEST(true); // Killing a machine
wait(delay(delayBeforeKill)); wait(delay(delayBeforeKill));
TraceEvent("WorkerKillAfterDelay"); TraceEvent("WorkerKillAfterDelay").log();
if (self->waitForVersion) { if (self->waitForVersion) {
state Transaction tr(cx); state Transaction tr(cx);

View File

@ -30,7 +30,7 @@
struct RunRestoreWorkerWorkload : TestWorkload { struct RunRestoreWorkerWorkload : TestWorkload {
Future<Void> worker; Future<Void> worker;
RunRestoreWorkerWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) { RunRestoreWorkerWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
TraceEvent("RunRestoreWorkerWorkloadMX"); TraceEvent("RunRestoreWorkerWorkloadMX").log();
} }
std::string description() const override { return "RunRestoreWorkerWorkload"; } std::string description() const override { return "RunRestoreWorkerWorkload"; }

View File

@ -259,7 +259,7 @@ struct PingWorkload : TestWorkload {
// peers[i].payloadPing.getEndpoint().getPrimaryAddress(), pingId ) ); peers[i].payloadPing.send( req ); // peers[i].payloadPing.getEndpoint().getPrimaryAddress(), pingId ) ); peers[i].payloadPing.send( req );
// replies.push_back( self->payloadDelayer( req, peers[i].payloadPing ) ); // replies.push_back( self->payloadDelayer( req, peers[i].payloadPing ) );
} }
TraceEvent("PayloadPingSent", pingId); TraceEvent("PayloadPingSent", pingId).log();
wait(waitForAll(replies)); wait(waitForAll(replies));
double elapsed = now() - start; double elapsed = now() - start;
TraceEvent("PayloadPingDone", pingId).detail("Elapsed", elapsed); TraceEvent("PayloadPingDone", pingId).detail("Elapsed", elapsed);

View File

@ -184,7 +184,7 @@ struct PopulateTPCC : TestWorkload {
} }
} }
} }
TraceEvent("PopulateItemsDone"); TraceEvent("PopulateItemsDone").log();
return Void(); return Void();
} }

View File

@ -62,13 +62,13 @@ struct MoveKeysWorkload : TestWorkload {
} }
state int oldMode = wait(setDDMode(cx, 0)); state int oldMode = wait(setDDMode(cx, 0));
TraceEvent("RMKStartModeSetting"); TraceEvent("RMKStartModeSetting").log();
wait(timeout( wait(timeout(
reportErrors(self->worker(cx, self), "MoveKeysWorkloadWorkerError"), self->testDuration, Void())); reportErrors(self->worker(cx, self), "MoveKeysWorkloadWorkerError"), self->testDuration, Void()));
// Always set the DD mode back, even if we die with an error // Always set the DD mode back, even if we die with an error
TraceEvent("RMKDoneMoving"); TraceEvent("RMKDoneMoving").log();
wait(success(setDDMode(cx, oldMode))); wait(success(setDDMode(cx, oldMode)));
TraceEvent("RMKDoneModeSetting"); TraceEvent("RMKDoneModeSetting").log();
} }
return Void(); return Void();
} }
@ -87,7 +87,7 @@ struct MoveKeysWorkload : TestWorkload {
vector<StorageServerInterface> getRandomTeam(vector<StorageServerInterface> storageServers, int teamSize) { vector<StorageServerInterface> getRandomTeam(vector<StorageServerInterface> storageServers, int teamSize) {
if (storageServers.size() < teamSize) { if (storageServers.size() < teamSize) {
TraceEvent(SevWarnAlways, "LessThanThreeStorageServers"); TraceEvent(SevWarnAlways, "LessThanThreeStorageServers").log();
throw operation_failed(); throw operation_failed();
} }
@ -105,7 +105,7 @@ struct MoveKeysWorkload : TestWorkload {
} }
if (t.size() < teamSize) { if (t.size() < teamSize) {
TraceEvent(SevWarnAlways, "LessThanThreeUniqueMachines"); TraceEvent(SevWarnAlways, "LessThanThreeUniqueMachines").log();
throw operation_failed(); throw operation_failed();
} }

View File

@ -125,6 +125,7 @@ struct RandomSelectorWorkload : TestWorkload {
//TraceEvent("RYOWInit").detail("Key",myKeyA).detail("Value",myValue); //TraceEvent("RYOWInit").detail("Key",myKeyA).detail("Value",myValue);
} }
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
wait(tr.onError(e)); wait(tr.onError(e));
@ -149,6 +150,7 @@ struct RandomSelectorWorkload : TestWorkload {
try { try {
tr.set(StringRef(clientID + "d/" + myKeyA), myValue); tr.set(StringRef(clientID + "d/" + myKeyA), myValue);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
wait(tr.onError(e)); wait(tr.onError(e));
@ -163,6 +165,7 @@ struct RandomSelectorWorkload : TestWorkload {
try { try {
tr.clear(StringRef(clientID + "d/" + myKeyA)); tr.clear(StringRef(clientID + "d/" + myKeyA));
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
wait(tr.onError(e)); wait(tr.onError(e));
@ -184,6 +187,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.clear(KeyRangeRef(StringRef(clientID + "d/" + myKeyA), tr.clear(KeyRangeRef(StringRef(clientID + "d/" + myKeyA),
StringRef(clientID + "d/" + myKeyB))); StringRef(clientID + "d/" + myKeyB)));
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
wait(tr.onError(e)); wait(tr.onError(e));
@ -231,6 +235,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef()); tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::AddValue); tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::AddValue);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
error = e; error = e;
@ -254,6 +259,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef()); tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::AppendIfFits); tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::AppendIfFits);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
error = e; error = e;
@ -277,6 +283,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef()); tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::And); tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::And);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
error = e; error = e;
@ -300,6 +307,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef()); tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Or); tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Or);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
error = e; error = e;
@ -323,6 +331,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef()); tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Xor); tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Xor);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
error = e; error = e;
@ -346,6 +355,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef()); tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Max); tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Max);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
error = e; error = e;
@ -369,6 +379,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef()); tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Min); tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::Min);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
error = e; error = e;
@ -392,6 +403,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef()); tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::ByteMin); tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::ByteMin);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
error = e; error = e;
@ -415,6 +427,7 @@ struct RandomSelectorWorkload : TestWorkload {
tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef()); tr.set(StringRef(clientID + "z/" + myRandomIDKey), StringRef());
tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::ByteMax); tr.atomicOp(StringRef(clientID + "d/" + myKeyA), myValue, MutationRef::ByteMax);
wait(tr.commit()); wait(tr.commit());
tr.reset();
break; break;
} catch (Error& e) { } catch (Error& e) {
error = e; error = e;

View File

@ -73,7 +73,7 @@ struct RestoreBackupWorkload final : TestWorkload {
.detail("TargetVersion", waitForVersion); .detail("TargetVersion", waitForVersion);
if (desc.contiguousLogEnd.present() && desc.contiguousLogEnd.get() >= waitForVersion) { if (desc.contiguousLogEnd.present() && desc.contiguousLogEnd.get() >= waitForVersion) {
try { try {
TraceEvent("DiscontinuingBackup"); TraceEvent("DiscontinuingBackup").log();
wait(self->backupAgent.discontinueBackup(cx, self->tag)); wait(self->backupAgent.discontinueBackup(cx, self->tag));
} catch (Error& e) { } catch (Error& e) {
TraceEvent("ErrorDiscontinuingBackup").error(e); TraceEvent("ErrorDiscontinuingBackup").error(e);

View File

@ -114,7 +114,7 @@ struct SimpleAtomicAddWorkload : TestWorkload {
} }
loop { loop {
try { try {
TraceEvent("SAACheckKey"); TraceEvent("SAACheckKey").log();
Optional<Value> actualValue = wait(tr.get(self->sumKey)); Optional<Value> actualValue = wait(tr.get(self->sumKey));
uint64_t actualValueInt = 0; uint64_t actualValueInt = 0;
if (actualValue.present()) { if (actualValue.present()) {

View File

@ -90,7 +90,7 @@ public: // variables
public: // ctor & dtor public: // ctor & dtor
SnapTestWorkload(WorkloadContext const& wcx) SnapTestWorkload(WorkloadContext const& wcx)
: TestWorkload(wcx), numSnaps(0), maxSnapDelay(0.0), testID(0), snapUID() { : TestWorkload(wcx), numSnaps(0), maxSnapDelay(0.0), testID(0), snapUID() {
TraceEvent("SnapTestWorkloadConstructor"); TraceEvent("SnapTestWorkloadConstructor").log();
std::string workloadName = "SnapTest"; std::string workloadName = "SnapTest";
maxRetryCntToRetrieveMessage = 10; maxRetryCntToRetrieveMessage = 10;
@ -107,11 +107,11 @@ public: // ctor & dtor
public: // workload functions public: // workload functions
std::string description() const override { return "SnapTest"; } std::string description() const override { return "SnapTest"; }
Future<Void> setup(Database const& cx) override { Future<Void> setup(Database const& cx) override {
TraceEvent("SnapTestWorkloadSetup"); TraceEvent("SnapTestWorkloadSetup").log();
return Void(); return Void();
} }
Future<Void> start(Database const& cx) override { Future<Void> start(Database const& cx) override {
TraceEvent("SnapTestWorkloadStart"); TraceEvent("SnapTestWorkloadStart").log();
if (clientId == 0) { if (clientId == 0) {
return _start(cx, this); return _start(cx, this);
} }
@ -120,7 +120,7 @@ public: // workload functions
ACTOR Future<bool> _check(Database cx, SnapTestWorkload* self) { ACTOR Future<bool> _check(Database cx, SnapTestWorkload* self) {
if (self->skipCheck) { if (self->skipCheck) {
TraceEvent(SevWarnAlways, "SnapCheckIgnored"); TraceEvent(SevWarnAlways, "SnapCheckIgnored").log();
return true; return true;
} }
state Transaction tr(cx); state Transaction tr(cx);
@ -250,7 +250,7 @@ public: // workload functions
bool backupFailed = atoi(ini.GetValue("RESTORE", "BackupFailed")); bool backupFailed = atoi(ini.GetValue("RESTORE", "BackupFailed"));
if (backupFailed) { if (backupFailed) {
// since backup failed, skip the restore checking // since backup failed, skip the restore checking
TraceEvent(SevWarnAlways, "BackupFailedSkippingRestoreCheck"); TraceEvent(SevWarnAlways, "BackupFailedSkippingRestoreCheck").log();
return Void(); return Void();
} }
state KeySelector begin = firstGreaterOrEqual(normalKeys.begin); state KeySelector begin = firstGreaterOrEqual(normalKeys.begin);
@ -265,7 +265,7 @@ public: // workload functions
try { try {
RangeResult kvRange = wait(tr.getRange(begin, end, 1000)); RangeResult kvRange = wait(tr.getRange(begin, end, 1000));
if (!kvRange.more && kvRange.size() == 0) { if (!kvRange.more && kvRange.size() == 0) {
TraceEvent("SnapTestNoMoreEntries"); TraceEvent("SnapTestNoMoreEntries").log();
break; break;
} }

Some files were not shown because too many files have changed in this diff Show More