Merge pull request #1301 from mpilman/features/cheaper-traces
Defer formatting in traces to make them cheaper
This commit is contained in:
commit
058d028099
|
@ -1387,7 +1387,7 @@ ACTOR Future<Void> cleanupStatus(Reference<ReadYourWritesTransaction> tr, std::s
|
||||||
readMore = true;
|
readMore = true;
|
||||||
} catch(Error &e) {
|
} catch(Error &e) {
|
||||||
// If doc can't be parsed or isn't alive, delete it.
|
// If doc can't be parsed or isn't alive, delete it.
|
||||||
TraceEvent(SevWarn, "RemovedDeadBackupLayerStatus").detail("Key", printable(docs[i].key));
|
TraceEvent(SevWarn, "RemovedDeadBackupLayerStatus").detail("Key", docs[i].key);
|
||||||
tr->clear(docs[i].key);
|
tr->clear(docs[i].key);
|
||||||
// If limit is 1 then read more.
|
// If limit is 1 then read more.
|
||||||
if(limit == 1)
|
if(limit == 1)
|
||||||
|
|
|
@ -2292,7 +2292,7 @@ void fdbcli_comp_cmd(std::string const& text, std::vector<std::string>& lc) {
|
||||||
|
|
||||||
void LogCommand(std::string line, UID randomID, std::string errMsg) {
|
void LogCommand(std::string line, UID randomID, std::string errMsg) {
|
||||||
printf("%s\n", errMsg.c_str());
|
printf("%s\n", errMsg.c_str());
|
||||||
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", printable(StringRef(line))).detail("Error", printable(StringRef(errMsg)));
|
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line).detail("Error", errMsg);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CLIOptions {
|
struct CLIOptions {
|
||||||
|
@ -2523,7 +2523,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
state UID randomID = g_random->randomUniqueID();
|
state UID randomID = g_random->randomUniqueID();
|
||||||
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", printable(StringRef(line)));
|
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line);
|
||||||
|
|
||||||
bool malformed, partial;
|
bool malformed, partial;
|
||||||
state std::vector<std::vector<StringRef>> parsed = parseLine(line, malformed, partial);
|
state std::vector<std::vector<StringRef>> parsed = parseLine(line, malformed, partial);
|
||||||
|
@ -3345,7 +3345,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
||||||
}
|
}
|
||||||
catch(Error &e) {
|
catch(Error &e) {
|
||||||
//options->setOption() prints error message
|
//options->setOption() prints error message
|
||||||
TraceEvent(SevWarn, "CLISetOptionError").error(e).detail("Option", printable(tokens[2]));
|
TraceEvent(SevWarn, "CLISetOptionError").error(e).detail("Option", tokens[2]);
|
||||||
is_error = true;
|
is_error = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3357,7 +3357,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
|
||||||
is_error = true;
|
is_error = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", printable(StringRef(line))).detail("IsError", is_error);
|
TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line).detail("IsError", is_error);
|
||||||
|
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
if(e.code() != error_code_actor_cancelled)
|
if(e.code() != error_code_actor_cancelled)
|
||||||
|
|
|
@ -149,7 +149,7 @@ Standalone<VectorRef<KeyRangeRef>> getLogRanges(Version beginVersion, Version en
|
||||||
|
|
||||||
Key baLogRangePrefix = destUidValue.withPrefix(backupLogKeys.begin);
|
Key baLogRangePrefix = destUidValue.withPrefix(backupLogKeys.begin);
|
||||||
|
|
||||||
//TraceEvent("GetLogRanges").detail("DestUidValue", destUidValue).detail("Prefix", printable(StringRef(baLogRangePrefix)));
|
//TraceEvent("GetLogRanges").detail("DestUidValue", destUidValue).detail("Prefix", baLogRangePrefix);
|
||||||
|
|
||||||
for (int64_t vblock = beginVersion / blockSize; vblock < (endVersion + blockSize - 1) / blockSize; ++vblock) {
|
for (int64_t vblock = beginVersion / blockSize; vblock < (endVersion + blockSize - 1) / blockSize; ++vblock) {
|
||||||
int64_t tb = vblock * blockSize / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
int64_t tb = vblock * blockSize / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
||||||
|
@ -172,7 +172,7 @@ Standalone<VectorRef<KeyRangeRef>> getApplyRanges(Version beginVersion, Version
|
||||||
|
|
||||||
Key baLogRangePrefix = backupUid.withPrefix(applyLogKeys.begin);
|
Key baLogRangePrefix = backupUid.withPrefix(applyLogKeys.begin);
|
||||||
|
|
||||||
//TraceEvent("GetLogRanges").detail("BackupUid", backupUid).detail("Prefix", printable(StringRef(baLogRangePrefix)));
|
//TraceEvent("GetLogRanges").detail("BackupUid", backupUid).detail("Prefix", baLogRangePrefix);
|
||||||
|
|
||||||
for (int64_t vblock = beginVersion / CLIENT_KNOBS->APPLY_BLOCK_SIZE; vblock < (endVersion + CLIENT_KNOBS->APPLY_BLOCK_SIZE - 1) / CLIENT_KNOBS->APPLY_BLOCK_SIZE; ++vblock) {
|
for (int64_t vblock = beginVersion / CLIENT_KNOBS->APPLY_BLOCK_SIZE; vblock < (endVersion + CLIENT_KNOBS->APPLY_BLOCK_SIZE - 1) / CLIENT_KNOBS->APPLY_BLOCK_SIZE; ++vblock) {
|
||||||
int64_t tb = vblock * CLIENT_KNOBS->APPLY_BLOCK_SIZE / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
int64_t tb = vblock * CLIENT_KNOBS->APPLY_BLOCK_SIZE / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
||||||
|
@ -221,7 +221,7 @@ Standalone<VectorRef<MutationRef>> decodeBackupLogValue(StringRef value) {
|
||||||
offset += sizeof(uint64_t);
|
offset += sizeof(uint64_t);
|
||||||
if (protocolVersion <= 0x0FDB00A200090001){
|
if (protocolVersion <= 0x0FDB00A200090001){
|
||||||
TraceEvent(SevError, "DecodeBackupLogValue").detail("IncompatibleProtocolVersion", protocolVersion)
|
TraceEvent(SevError, "DecodeBackupLogValue").detail("IncompatibleProtocolVersion", protocolVersion)
|
||||||
.detail("ValueSize", value.size()).detail("Value", printable(value));
|
.detail("ValueSize", value.size()).detail("Value", value);
|
||||||
throw incompatible_protocol_version();
|
throw incompatible_protocol_version();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -267,7 +267,7 @@ Standalone<VectorRef<MutationRef>> decodeBackupLogValue(StringRef value) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
catch (Error& e) {
|
catch (Error& e) {
|
||||||
TraceEvent(e.code() == error_code_restore_missing_data ? SevWarn : SevError, "BA_DecodeBackupLogValue").error(e).GetLastError().detail("ValueSize", value.size()).detail("Value", printable(value));
|
TraceEvent(e.code() == error_code_restore_missing_data ? SevWarn : SevError, "BA_DecodeBackupLogValue").error(e).GetLastError().detail("ValueSize", value.size()).detail("Value", value);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -280,7 +280,7 @@ void decodeBackupLogValue(Arena& arena, VectorRef<MutationRef>& result, int& mut
|
||||||
offset += sizeof(uint64_t);
|
offset += sizeof(uint64_t);
|
||||||
if (protocolVersion <= 0x0FDB00A200090001){
|
if (protocolVersion <= 0x0FDB00A200090001){
|
||||||
TraceEvent(SevError, "DecodeBackupLogValue").detail("IncompatibleProtocolVersion", protocolVersion)
|
TraceEvent(SevError, "DecodeBackupLogValue").detail("IncompatibleProtocolVersion", protocolVersion)
|
||||||
.detail("ValueSize", value.size()).detail("Value", printable(value));
|
.detail("ValueSize", value.size()).detail("Value", value);
|
||||||
throw incompatible_protocol_version();
|
throw incompatible_protocol_version();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,7 +375,7 @@ void decodeBackupLogValue(Arena& arena, VectorRef<MutationRef>& result, int& mut
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (Error& e) {
|
catch (Error& e) {
|
||||||
TraceEvent(e.code() == error_code_restore_missing_data ? SevWarn : SevError, "BA_DecodeBackupLogValue").error(e).GetLastError().detail("ValueSize", value.size()).detail("Value", printable(value));
|
TraceEvent(e.code() == error_code_restore_missing_data ? SevWarn : SevError, "BA_DecodeBackupLogValue").error(e).GetLastError().detail("ValueSize", value.size()).detail("Value", value);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -384,7 +384,7 @@ void logErrorWorker(Reference<ReadYourWritesTransaction> tr, Key keyErrors, std:
|
||||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
if(now() - lastErrorTime > CLIENT_KNOBS->BACKUP_ERROR_DELAY) {
|
if(now() - lastErrorTime > CLIENT_KNOBS->BACKUP_ERROR_DELAY) {
|
||||||
TraceEvent("BA_LogError").detail("Key", printable(keyErrors)).detail("Message", message);
|
TraceEvent("BA_LogError").detail("Key", keyErrors).detail("Message", message);
|
||||||
lastErrorTime = now();
|
lastErrorTime = now();
|
||||||
}
|
}
|
||||||
tr->set(keyErrors, message);
|
tr->set(keyErrors, message);
|
||||||
|
@ -496,7 +496,7 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RCGroup> results, Fu
|
||||||
int index(0);
|
int index(0);
|
||||||
for (auto & s : rangevalue){
|
for (auto & s : rangevalue){
|
||||||
uint64_t groupKey = groupBy(s.key).first;
|
uint64_t groupKey = groupBy(s.key).first;
|
||||||
//TraceEvent("Log_ReadCommitted").detail("GroupKey", groupKey).detail("SkipGroup", skipGroup).detail("NextKey", printable(nextKey.key)).detail("End", printable(end.key)).detail("Valuesize", value.size()).detail("Index",index++).detail("Size",s.value.size());
|
//TraceEvent("Log_ReadCommitted").detail("GroupKey", groupKey).detail("SkipGroup", skipGroup).detail("NextKey", nextKey.key).detail("End", end.key).detail("Valuesize", value.size()).detail("Index",index++).detail("Size",s.value.size());
|
||||||
if (groupKey != skipGroup){
|
if (groupKey != skipGroup){
|
||||||
if (rcGroup.version == -1){
|
if (rcGroup.version == -1){
|
||||||
rcGroup.version = tr.getReadVersion().get();
|
rcGroup.version = tr.getReadVersion().get();
|
||||||
|
|
|
@ -1380,7 +1380,7 @@ public:
|
||||||
m_bucket = kv.second;
|
m_bucket = kv.second;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
TraceEvent(SevWarn, "BackupContainerBlobStoreInvalidParameter").detail("Name", printable(kv.first)).detail("Value", printable(kv.second));
|
TraceEvent(SevWarn, "BackupContainerBlobStoreInvalidParameter").detail("Name", kv.first).detail("Value", kv.second);
|
||||||
IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", kv.first.c_str());
|
IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", kv.first.c_str());
|
||||||
throw backup_invalid_url();
|
throw backup_invalid_url();
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,7 +81,7 @@ namespace FdbClientLogEvents {
|
||||||
Key key;
|
Key key;
|
||||||
|
|
||||||
void logEvent(std::string id) const {
|
void logEvent(std::string id) const {
|
||||||
TraceEvent("TransactionTrace_Get").detail("TransactionID", id).detail("Latency", latency).detail("ValueSizeBytes", valueSize).detail("Key", printable(key));
|
TraceEvent("TransactionTrace_Get").detail("TransactionID", id).detail("Latency", latency).detail("ValueSizeBytes", valueSize).detail("Key", key);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ namespace FdbClientLogEvents {
|
||||||
Key endKey;
|
Key endKey;
|
||||||
|
|
||||||
void logEvent(std::string id) const {
|
void logEvent(std::string id) const {
|
||||||
TraceEvent("TransactionTrace_GetRange").detail("TransactionID", id).detail("Latency", latency).detail("RangeSizeBytes", rangeSize).detail("StartKey", printable(startKey)).detail("EndKey", printable(endKey));
|
TraceEvent("TransactionTrace_GetRange").detail("TransactionID", id).detail("Latency", latency).detail("RangeSizeBytes", rangeSize).detail("StartKey", startKey).detail("EndKey", endKey);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -124,11 +124,11 @@ namespace FdbClientLogEvents {
|
||||||
|
|
||||||
void logEvent(std::string id) const {
|
void logEvent(std::string id) const {
|
||||||
for (auto &read_range : req.transaction.read_conflict_ranges) {
|
for (auto &read_range : req.transaction.read_conflict_ranges) {
|
||||||
TraceEvent("TransactionTrace_Commit_ReadConflictRange").detail("TransactionID", id).detail("Begin", printable(read_range.begin)).detail("End", printable(read_range.end));
|
TraceEvent("TransactionTrace_Commit_ReadConflictRange").detail("TransactionID", id).detail("Begin", read_range.begin).detail("End", read_range.end);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto &write_range : req.transaction.write_conflict_ranges) {
|
for (auto &write_range : req.transaction.write_conflict_ranges) {
|
||||||
TraceEvent("TransactionTrace_Commit_WriteConflictRange").detail("TransactionID", id).detail("Begin", printable(write_range.begin)).detail("End", printable(write_range.end));
|
TraceEvent("TransactionTrace_Commit_WriteConflictRange").detail("TransactionID", id).detail("Begin", write_range.begin).detail("End", write_range.end);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto &mutation : req.transaction.mutations) {
|
for (auto &mutation : req.transaction.mutations) {
|
||||||
|
@ -154,7 +154,7 @@ namespace FdbClientLogEvents {
|
||||||
Key key;
|
Key key;
|
||||||
|
|
||||||
void logEvent(std::string id) const {
|
void logEvent(std::string id) const {
|
||||||
TraceEvent("TransactionTrace_GetError").detail("TransactionID", id).detail("ErrCode", errCode).detail("Key", printable(key));
|
TraceEvent("TransactionTrace_GetError").detail("TransactionID", id).detail("ErrCode", errCode).detail("Key", key);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -174,7 +174,7 @@ namespace FdbClientLogEvents {
|
||||||
Key endKey;
|
Key endKey;
|
||||||
|
|
||||||
void logEvent(std::string id) const {
|
void logEvent(std::string id) const {
|
||||||
TraceEvent("TransactionTrace_GetRangeError").detail("TransactionID", id).detail("ErrCode", errCode).detail("StartKey", printable(startKey)).detail("EndKey", printable(endKey));
|
TraceEvent("TransactionTrace_GetRangeError").detail("TransactionID", id).detail("ErrCode", errCode).detail("StartKey", startKey).detail("EndKey", endKey);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -194,11 +194,11 @@ namespace FdbClientLogEvents {
|
||||||
|
|
||||||
void logEvent(std::string id) const {
|
void logEvent(std::string id) const {
|
||||||
for (auto &read_range : req.transaction.read_conflict_ranges) {
|
for (auto &read_range : req.transaction.read_conflict_ranges) {
|
||||||
TraceEvent("TransactionTrace_CommitError_ReadConflictRange").detail("TransactionID", id).detail("Begin", printable(read_range.begin)).detail("End", printable(read_range.end));
|
TraceEvent("TransactionTrace_CommitError_ReadConflictRange").detail("TransactionID", id).detail("Begin", read_range.begin).detail("End", read_range.end);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto &write_range : req.transaction.write_conflict_ranges) {
|
for (auto &write_range : req.transaction.write_conflict_ranges) {
|
||||||
TraceEvent("TransactionTrace_CommitError_WriteConflictRange").detail("TransactionID", id).detail("Begin", printable(write_range.begin)).detail("End", printable(write_range.end));
|
TraceEvent("TransactionTrace_CommitError_WriteConflictRange").detail("TransactionID", id).detail("Begin", write_range.begin).detail("End", write_range.end);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto &mutation : req.transaction.mutations) {
|
for (auto &mutation : req.transaction.mutations) {
|
||||||
|
|
|
@ -115,7 +115,7 @@ namespace dbBackup {
|
||||||
Future<Void> checkTaskVersion(Tr tr, Reference<Task> task, StringRef name, uint32_t version) {
|
Future<Void> checkTaskVersion(Tr tr, Reference<Task> task, StringRef name, uint32_t version) {
|
||||||
uint32_t taskVersion = task->getVersion();
|
uint32_t taskVersion = task->getVersion();
|
||||||
if (taskVersion > version) {
|
if (taskVersion > version) {
|
||||||
TraceEvent(SevError, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", printable(name)).detail("Version", version);
|
TraceEvent(SevError, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", name).detail("Version", version);
|
||||||
wait(logError(tr, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]),
|
wait(logError(tr, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]),
|
||||||
format("ERROR: %s task version `%lu' is greater than supported version `%lu'", task->params[Task::reservedTaskParamKeyType].toString().c_str(), (unsigned long)taskVersion, (unsigned long)version)));
|
format("ERROR: %s task version `%lu' is greater than supported version `%lu'", task->params[Task::reservedTaskParamKeyType].toString().c_str(), (unsigned long)taskVersion, (unsigned long)version)));
|
||||||
|
|
||||||
|
@ -305,7 +305,7 @@ namespace dbBackup {
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
//TraceEvent("DBA_Range").detail("Range", printable(KeyRangeRef(rangeBegin, rangeEnd))).detail("Version", values.second).detail("Size", values.first.size()).detail("LogUID", printable(task->params[BackupAgentBase::keyConfigLogUid])).detail("AddPrefix", printable(addPrefix)).detail("RemovePrefix", printable(removePrefix));
|
//TraceEvent("DBA_Range").detail("Range", KeyRangeRef(rangeBegin, rangeEnd)).detail("Version", values.second).detail("Size", values.first.size()).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]).detail("AddPrefix", addPrefix).detail("RemovePrefix", removePrefix);
|
||||||
|
|
||||||
Subspace krv(conf.get(DatabaseBackupAgent::keyRangeVersions));
|
Subspace krv(conf.get(DatabaseBackupAgent::keyRangeVersions));
|
||||||
state KeyRange versionRange = singleKeyRange(krv.pack(values.second));
|
state KeyRange versionRange = singleKeyRange(krv.pack(values.second));
|
||||||
|
@ -340,7 +340,7 @@ namespace dbBackup {
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if( values.first[valueLoc].key >= backupVersions.get()[versionLoc].key ) {
|
if( values.first[valueLoc].key >= backupVersions.get()[versionLoc].key ) {
|
||||||
//TraceEvent("DBA_Set", debugID).detail("Key", printable(values.first[valueLoc].key)).detail("Value", printable(values.first[valueLoc].value));
|
//TraceEvent("DBA_Set", debugID).detail("Key", values.first[valueLoc].key).detail("Value", values.first[valueLoc].value);
|
||||||
tr->set(values.first[valueLoc].key.removePrefix(removePrefix).withPrefix(addPrefix), values.first[valueLoc].value);
|
tr->set(values.first[valueLoc].key.removePrefix(removePrefix).withPrefix(addPrefix), values.first[valueLoc].value);
|
||||||
bytesSet += values.first[valueLoc].expectedSize() - removePrefix.expectedSize() + addPrefix.expectedSize();
|
bytesSet += values.first[valueLoc].expectedSize() - removePrefix.expectedSize() + addPrefix.expectedSize();
|
||||||
}
|
}
|
||||||
|
@ -353,7 +353,7 @@ namespace dbBackup {
|
||||||
|
|
||||||
wait(tr->commit());
|
wait(tr->commit());
|
||||||
Params.bytesWritten().set(task, Params.bytesWritten().getOrDefault(task) + bytesSet);
|
Params.bytesWritten().set(task, Params.bytesWritten().getOrDefault(task) + bytesSet);
|
||||||
//TraceEvent("DBA_SetComplete", debugID).detail("Ver", values.second).detail("LogVersion", logVersion).detail("ReadVersion", readVer).detail("CommitVer", tr.getCommittedVersion()).detail("Range", printable(versionRange));
|
//TraceEvent("DBA_SetComplete", debugID).detail("Ver", values.second).detail("LogVersion", logVersion).detail("ReadVersion", readVer).detail("CommitVer", tr.getCommittedVersion()).detail("Range", versionRange);
|
||||||
|
|
||||||
if(backupVersions.get().more) {
|
if(backupVersions.get().more) {
|
||||||
tr->reset();
|
tr->reset();
|
||||||
|
@ -737,7 +737,7 @@ namespace dbBackup {
|
||||||
state Version stopVersionData = stopValue.present() ? BinaryReader::fromStringRef<Version>(stopValue.get(), Unversioned()) : -1;
|
state Version stopVersionData = stopValue.present() ? BinaryReader::fromStringRef<Version>(stopValue.get(), Unversioned()) : -1;
|
||||||
|
|
||||||
if(endVersion - beginVersion > g_random->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) {
|
if(endVersion - beginVersion > g_random->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) {
|
||||||
TraceEvent("DBA_CopyLogs").detail("BeginVersion", beginVersion).detail("ApplyVersion", applyVersion).detail("EndVersion", endVersion).detail("StopVersionData", stopVersionData).detail("LogUID", printable(task->params[BackupAgentBase::keyConfigLogUid]));
|
TraceEvent("DBA_CopyLogs").detail("BeginVersion", beginVersion).detail("ApplyVersion", applyVersion).detail("EndVersion", endVersion).detail("StopVersionData", stopVersionData).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((stopVersionData == -1) || (stopVersionData >= applyVersion)) {
|
if ((stopVersionData == -1) || (stopVersionData >= applyVersion)) {
|
||||||
|
@ -941,7 +941,7 @@ namespace dbBackup {
|
||||||
Optional<Value> stopWhenDone = wait(fStopWhenDone);
|
Optional<Value> stopWhenDone = wait(fStopWhenDone);
|
||||||
|
|
||||||
if(endVersion - beginVersion > g_random->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) {
|
if(endVersion - beginVersion > g_random->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) {
|
||||||
TraceEvent("DBA_CopyDiffLogs").detail("BeginVersion", beginVersion).detail("EndVersion", endVersion).detail("LogUID", printable(task->params[BackupAgentBase::keyConfigLogUid]));
|
TraceEvent("DBA_CopyDiffLogs").detail("BeginVersion", beginVersion).detail("EndVersion", endVersion).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!stopWhenDone.present()) {
|
if (!stopWhenDone.present()) {
|
||||||
|
@ -1410,7 +1410,7 @@ namespace dbBackup {
|
||||||
|
|
||||||
state UID logUid = BinaryReader::fromStringRef<UID>(task->params[DatabaseBackupAgent::keyConfigLogUid], Unversioned());
|
state UID logUid = BinaryReader::fromStringRef<UID>(task->params[DatabaseBackupAgent::keyConfigLogUid], Unversioned());
|
||||||
|
|
||||||
TraceEvent("DBA_Complete").detail("RestoreVersion", restoreVersion).detail("Differential", stopWhenDone.present()).detail("LogUID", printable(task->params[BackupAgentBase::keyConfigLogUid]));
|
TraceEvent("DBA_Complete").detail("RestoreVersion", restoreVersion).detail("Differential", stopWhenDone.present()).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]);
|
||||||
|
|
||||||
// Start the complete task, if differential is not enabled
|
// Start the complete task, if differential is not enabled
|
||||||
if (stopWhenDone.present()) {
|
if (stopWhenDone.present()) {
|
||||||
|
@ -1935,8 +1935,8 @@ public:
|
||||||
else
|
else
|
||||||
wait(checkDatabaseLock(tr, logUid));
|
wait(checkDatabaseLock(tr, logUid));
|
||||||
|
|
||||||
TraceEvent("DBA_Submit").detail("LogUid", logUid).detail("Lock", lockDB).detail("LogUID", printable(logUidValue)).detail("Tag", printable(tagName))
|
TraceEvent("DBA_Submit").detail("LogUid", logUid).detail("Lock", lockDB).detail("LogUID", logUidValue).detail("Tag", tagName)
|
||||||
.detail("Key", printable(backupAgent->states.get(logUidValue).pack(DatabaseBackupAgent::keyFolderId))).detail("MapPrefix", printable(mapPrefix));
|
.detail("Key", backupAgent->states.get(logUidValue).pack(DatabaseBackupAgent::keyFolderId)).detail("MapPrefix", mapPrefix);
|
||||||
|
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
@ -1944,7 +1944,7 @@ public:
|
||||||
ACTOR static Future<Void> unlockBackup(DatabaseBackupAgent* backupAgent, Reference<ReadYourWritesTransaction> tr, Key tagName) {
|
ACTOR static Future<Void> unlockBackup(DatabaseBackupAgent* backupAgent, Reference<ReadYourWritesTransaction> tr, Key tagName) {
|
||||||
UID logUid = wait(backupAgent->getLogUid(tr, tagName));
|
UID logUid = wait(backupAgent->getLogUid(tr, tagName));
|
||||||
wait(unlockDatabase(tr, logUid));
|
wait(unlockDatabase(tr, logUid));
|
||||||
TraceEvent("DBA_Unlock").detail("Tag", printable(tagName));
|
TraceEvent("DBA_Unlock").detail("Tag", tagName);
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1997,7 +1997,7 @@ public:
|
||||||
tr2.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
tr2.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
tr2.setOption(FDBTransactionOptions::LOCK_AWARE);
|
tr2.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
state Optional<Value> backupUid = wait(tr2.get(backupAgent->states.get(BinaryWriter::toValue(destlogUid, Unversioned())).pack(DatabaseBackupAgent::keyFolderId)));
|
state Optional<Value> backupUid = wait(tr2.get(backupAgent->states.get(BinaryWriter::toValue(destlogUid, Unversioned())).pack(DatabaseBackupAgent::keyFolderId)));
|
||||||
TraceEvent("DBA_SwitchoverBackupUID").detail("Uid", printable(backupUid)).detail("Key", printable(backupAgent->states.get(BinaryWriter::toValue(destlogUid, Unversioned())).pack(DatabaseBackupAgent::keyFolderId)));
|
TraceEvent("DBA_SwitchoverBackupUID").detail("Uid", backupUid).detail("Key", backupAgent->states.get(BinaryWriter::toValue(destlogUid, Unversioned())).pack(DatabaseBackupAgent::keyFolderId));
|
||||||
if(!backupUid.present())
|
if(!backupUid.present())
|
||||||
throw backup_duplicate();
|
throw backup_duplicate();
|
||||||
Optional<Value> v = wait(tr2.get(BinaryWriter::toValue(destlogUid, Unversioned()).withPrefix(applyMutationsBeginRange.begin)));
|
Optional<Value> v = wait(tr2.get(BinaryWriter::toValue(destlogUid, Unversioned()).withPrefix(applyMutationsBeginRange.begin)));
|
||||||
|
|
|
@ -213,6 +213,23 @@ struct KeyRangeRef {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<KeyRangeRef> : std::true_type {
|
||||||
|
static std::string toString(const KeyRangeRef& value) {
|
||||||
|
auto begin = Traceable<StringRef>::toString(value.begin);
|
||||||
|
auto end = Traceable<StringRef>::toString(value.end);
|
||||||
|
std::string result;
|
||||||
|
result.reserve(begin.size() + end.size() + 3);
|
||||||
|
std::copy(begin.begin(), begin.end(), std::back_inserter(result));
|
||||||
|
result.push_back(' ');
|
||||||
|
result.push_back('-');
|
||||||
|
result.push_back(' ');
|
||||||
|
std::copy(end.begin(), end.end(), std::back_inserter(result));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
inline KeyRangeRef operator & (const KeyRangeRef& lhs, const KeyRangeRef& rhs) {
|
inline KeyRangeRef operator & (const KeyRangeRef& lhs, const KeyRangeRef& rhs) {
|
||||||
KeyRef b = std::max(lhs.begin, rhs.begin), e = std::min(lhs.end, rhs.end);
|
KeyRef b = std::max(lhs.begin, rhs.begin), e = std::min(lhs.end, rhs.end);
|
||||||
if (e < b)
|
if (e < b)
|
||||||
|
@ -263,6 +280,13 @@ struct KeyValueRef {
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<KeyValueRef> : std::true_type {
|
||||||
|
static std::string toString(const KeyValueRef& value) {
|
||||||
|
return Traceable<KeyRef>::toString(value.key) + format(":%d", value.value.size());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
typedef Standalone<KeyRef> Key;
|
typedef Standalone<KeyRef> Key;
|
||||||
typedef Standalone<ValueRef> Value;
|
typedef Standalone<ValueRef> Value;
|
||||||
typedef Standalone<KeyRangeRef> KeyRange;
|
typedef Standalone<KeyRangeRef> KeyRange;
|
||||||
|
@ -479,6 +503,13 @@ struct RangeResultRef : VectorRef<KeyValueRef> {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<RangeResultRef> : std::true_type {
|
||||||
|
static std::string toString(const RangeResultRef& value) {
|
||||||
|
return Traceable<VectorRef<KeyValueRef>>::toString(value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
struct KeyValueStoreType {
|
struct KeyValueStoreType {
|
||||||
// These enumerated values are stored in the database configuration, so can NEVER be changed. Only add new ones just before END.
|
// These enumerated values are stored in the database configuration, so can NEVER be changed. Only add new ones just before END.
|
||||||
enum StoreType {
|
enum StoreType {
|
||||||
|
@ -513,6 +544,13 @@ private:
|
||||||
uint32_t type;
|
uint32_t type;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<KeyValueStoreType> : std::true_type {
|
||||||
|
static std::string toString(KeyValueStoreType const& value) {
|
||||||
|
return value.toString();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
struct TLogVersion {
|
struct TLogVersion {
|
||||||
enum Version {
|
enum Version {
|
||||||
UNSET = 0,
|
UNSET = 0,
|
||||||
|
@ -548,6 +586,13 @@ struct TLogVersion {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<TLogVersion> : std::true_type {
|
||||||
|
static std::string toString(TLogVersion const& value) {
|
||||||
|
return Traceable<Version>::toString(value.version);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
struct TLogSpillType {
|
struct TLogSpillType {
|
||||||
// These enumerated values are stored in the database configuration, so can NEVER be changed. Only add new ones just before END.
|
// These enumerated values are stored in the database configuration, so can NEVER be changed. Only add new ones just before END.
|
||||||
enum SpillType {
|
enum SpillType {
|
||||||
|
|
|
@ -741,7 +741,7 @@ namespace fileBackup {
|
||||||
if (taskVersion > version) {
|
if (taskVersion > version) {
|
||||||
state Error err = task_invalid_version();
|
state Error err = task_invalid_version();
|
||||||
|
|
||||||
TraceEvent(SevWarn, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", printable(name)).detail("Version", version);
|
TraceEvent(SevWarn, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", name).detail("Version", version);
|
||||||
if (KeyBackedConfig::TaskParams.uid().exists(task)) {
|
if (KeyBackedConfig::TaskParams.uid().exists(task)) {
|
||||||
std::string msg = format("%s task version `%lu' is greater than supported version `%lu'", task->params[Task::reservedTaskParamKeyType].toString().c_str(), (unsigned long)taskVersion, (unsigned long)version);
|
std::string msg = format("%s task version `%lu' is greater than supported version `%lu'", task->params[Task::reservedTaskParamKeyType].toString().c_str(), (unsigned long)taskVersion, (unsigned long)version);
|
||||||
wait(BackupConfig(task).logError(cx, err, msg));
|
wait(BackupConfig(task).logError(cx, err, msg));
|
||||||
|
@ -806,7 +806,7 @@ namespace fileBackup {
|
||||||
TEST(true); // Canceling old backup task
|
TEST(true); // Canceling old backup task
|
||||||
|
|
||||||
TraceEvent(SevInfo, "FileBackupCancelOldTask")
|
TraceEvent(SevInfo, "FileBackupCancelOldTask")
|
||||||
.detail("Task", printable(task->params[Task::reservedTaskParamKeyType]))
|
.detail("Task", task->params[Task::reservedTaskParamKeyType])
|
||||||
.detail("TagName", tagName);
|
.detail("TagName", tagName);
|
||||||
wait(abortFiveZeroBackup(&backupAgent, tr, tagName));
|
wait(abortFiveZeroBackup(&backupAgent, tr, tagName));
|
||||||
|
|
||||||
|
@ -876,7 +876,7 @@ namespace fileBackup {
|
||||||
TEST(true); // Canceling 5.1 backup task
|
TEST(true); // Canceling 5.1 backup task
|
||||||
|
|
||||||
TraceEvent(SevInfo, "FileBackupCancelFiveOneTask")
|
TraceEvent(SevInfo, "FileBackupCancelFiveOneTask")
|
||||||
.detail("Task", printable(task->params[Task::reservedTaskParamKeyType]))
|
.detail("Task", task->params[Task::reservedTaskParamKeyType])
|
||||||
.detail("TagName", tagName);
|
.detail("TagName", tagName);
|
||||||
wait(abortFiveOneBackup(&backupAgent, tr, tagName));
|
wait(abortFiveOneBackup(&backupAgent, tr, tagName));
|
||||||
|
|
||||||
|
@ -2688,13 +2688,13 @@ namespace fileBackup {
|
||||||
.detail("ReadOffset", readOffset)
|
.detail("ReadOffset", readOffset)
|
||||||
.detail("ReadLen", readLen)
|
.detail("ReadLen", readLen)
|
||||||
.detail("CommitVersion", tr->getCommittedVersion())
|
.detail("CommitVersion", tr->getCommittedVersion())
|
||||||
.detail("BeginRange", printable(trRange.begin))
|
.detail("BeginRange", trRange.begin)
|
||||||
.detail("EndRange", printable(trRange.end))
|
.detail("EndRange", trRange.end)
|
||||||
.detail("StartIndex", start)
|
.detail("StartIndex", start)
|
||||||
.detail("EndIndex", i)
|
.detail("EndIndex", i)
|
||||||
.detail("DataSize", data.size())
|
.detail("DataSize", data.size())
|
||||||
.detail("Bytes", txBytes)
|
.detail("Bytes", txBytes)
|
||||||
.detail("OriginalFileRange", printable(originalFileRange))
|
.detail("OriginalFileRange", originalFileRange)
|
||||||
.detail("TaskInstance", THIS_ADDR);
|
.detail("TaskInstance", THIS_ADDR);
|
||||||
|
|
||||||
// Commit succeeded, so advance starting point
|
// Commit succeeded, so advance starting point
|
||||||
|
@ -3005,7 +3005,7 @@ namespace fileBackup {
|
||||||
TraceEvent("FileRestoreDispatch")
|
TraceEvent("FileRestoreDispatch")
|
||||||
.detail("RestoreUID", restore.getUid())
|
.detail("RestoreUID", restore.getUid())
|
||||||
.detail("BeginVersion", beginVersion)
|
.detail("BeginVersion", beginVersion)
|
||||||
.detail("BeginFile", printable(Params.beginFile().get(task)))
|
.detail("BeginFile", Params.beginFile().get(task))
|
||||||
.detail("BeginBlock", Params.beginBlock().get(task))
|
.detail("BeginBlock", Params.beginBlock().get(task))
|
||||||
.detail("RestoreVersion", restoreVersion)
|
.detail("RestoreVersion", restoreVersion)
|
||||||
.detail("ApplyLag", applyLag)
|
.detail("ApplyLag", applyLag)
|
||||||
|
@ -3019,7 +3019,7 @@ namespace fileBackup {
|
||||||
TraceEvent("FileRestoreDispatch")
|
TraceEvent("FileRestoreDispatch")
|
||||||
.detail("RestoreUID", restore.getUid())
|
.detail("RestoreUID", restore.getUid())
|
||||||
.detail("BeginVersion", beginVersion)
|
.detail("BeginVersion", beginVersion)
|
||||||
.detail("BeginFile", printable(Params.beginFile().get(task)))
|
.detail("BeginFile", Params.beginFile().get(task))
|
||||||
.detail("BeginBlock", Params.beginBlock().get(task))
|
.detail("BeginBlock", Params.beginBlock().get(task))
|
||||||
.detail("RestoreVersion", restoreVersion)
|
.detail("RestoreVersion", restoreVersion)
|
||||||
.detail("ApplyLag", applyLag)
|
.detail("ApplyLag", applyLag)
|
||||||
|
@ -3033,7 +3033,7 @@ namespace fileBackup {
|
||||||
TraceEvent("FileRestoreDispatch")
|
TraceEvent("FileRestoreDispatch")
|
||||||
.detail("RestoreUID", restore.getUid())
|
.detail("RestoreUID", restore.getUid())
|
||||||
.detail("BeginVersion", beginVersion)
|
.detail("BeginVersion", beginVersion)
|
||||||
.detail("BeginFile", printable(Params.beginFile().get(task)))
|
.detail("BeginFile", Params.beginFile().get(task))
|
||||||
.detail("BeginBlock", Params.beginBlock().get(task))
|
.detail("BeginBlock", Params.beginBlock().get(task))
|
||||||
.detail("ApplyLag", applyLag)
|
.detail("ApplyLag", applyLag)
|
||||||
.detail("Decision", "restore_complete")
|
.detail("Decision", "restore_complete")
|
||||||
|
@ -3141,7 +3141,7 @@ namespace fileBackup {
|
||||||
TraceEvent("FileRestoreDispatch")
|
TraceEvent("FileRestoreDispatch")
|
||||||
.detail("RestoreUID", restore.getUid())
|
.detail("RestoreUID", restore.getUid())
|
||||||
.detail("BeginVersion", beginVersion)
|
.detail("BeginVersion", beginVersion)
|
||||||
.detail("BeginFile", printable(Params.beginFile().get(task)))
|
.detail("BeginFile", Params.beginFile().get(task))
|
||||||
.detail("BeginBlock", Params.beginBlock().get(task))
|
.detail("BeginBlock", Params.beginBlock().get(task))
|
||||||
.detail("EndVersion", endVersion)
|
.detail("EndVersion", endVersion)
|
||||||
.detail("ApplyLag", applyLag)
|
.detail("ApplyLag", applyLag)
|
||||||
|
@ -3187,7 +3187,7 @@ namespace fileBackup {
|
||||||
TraceEvent("FileRestoreDispatch")
|
TraceEvent("FileRestoreDispatch")
|
||||||
.detail("RestoreUID", restore.getUid())
|
.detail("RestoreUID", restore.getUid())
|
||||||
.detail("BeginVersion", beginVersion)
|
.detail("BeginVersion", beginVersion)
|
||||||
.detail("BeginFile", printable(Params.beginFile().get(task)))
|
.detail("BeginFile", Params.beginFile().get(task))
|
||||||
.detail("BeginBlock", Params.beginBlock().get(task))
|
.detail("BeginBlock", Params.beginBlock().get(task))
|
||||||
.detail("EndVersion", endVersion)
|
.detail("EndVersion", endVersion)
|
||||||
.detail("ApplyLag", applyLag)
|
.detail("ApplyLag", applyLag)
|
||||||
|
|
|
@ -124,9 +124,9 @@ public:
|
||||||
return map(get(tr, snapshot), [=](Optional<T> val) -> T {
|
return map(get(tr, snapshot), [=](Optional<T> val) -> T {
|
||||||
if (!val.present()) {
|
if (!val.present()) {
|
||||||
TraceEvent(SevInfo, "KeyBackedProperty_KeyNotFound")
|
TraceEvent(SevInfo, "KeyBackedProperty_KeyNotFound")
|
||||||
.detail("Key", printable(keyCopy))
|
.detail("Key", keyCopy)
|
||||||
.detail("Err", err.code())
|
.detail("Err", err.code())
|
||||||
.detail("ParentTrace", backtrace.c_str());
|
.detail("ParentTrace", backtrace.c_str());
|
||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -212,7 +212,7 @@ ConfigurationResult::Type buildConfiguration( std::vector<StringRef> const& mode
|
||||||
|
|
||||||
for( auto t = m.begin(); t != m.end(); ++t ) {
|
for( auto t = m.begin(); t != m.end(); ++t ) {
|
||||||
if( outConf.count( t->first ) ) {
|
if( outConf.count( t->first ) ) {
|
||||||
TraceEvent(SevWarnAlways, "ConflictingOption").detail("Option", printable(StringRef(t->first)));
|
TraceEvent(SevWarnAlways, "ConflictingOption").detail("Option", t->first);
|
||||||
return ConfigurationResult::CONFLICTING_OPTIONS;
|
return ConfigurationResult::CONFLICTING_OPTIONS;
|
||||||
}
|
}
|
||||||
outConf[t->first] = t->second;
|
outConf[t->first] = t->second;
|
||||||
|
|
|
@ -763,7 +763,7 @@ void MultiVersionDatabase::DatabaseState::stateChanged() {
|
||||||
}
|
}
|
||||||
catch(Error &e) {
|
catch(Error &e) {
|
||||||
optionLock.leave();
|
optionLock.leave();
|
||||||
TraceEvent(SevError, "ClusterVersionChangeOptionError").error(e).detail("Option", option.first).detail("OptionValue", printable(option.second)).detail("LibPath", clients[newIndex]->libPath);
|
TraceEvent(SevError, "ClusterVersionChangeOptionError").error(e).detail("Option", option.first).detail("OptionValue", option.second).detail("LibPath", clients[newIndex]->libPath);
|
||||||
connectionAttempts[newIndex]->connected = false;
|
connectionAttempts[newIndex]->connected = false;
|
||||||
clients[newIndex]->failed = true;
|
clients[newIndex]->failed = true;
|
||||||
MultiVersionApi::api->updateSupportedVersions();
|
MultiVersionApi::api->updateSupportedVersions();
|
||||||
|
|
|
@ -1126,13 +1126,13 @@ AddressExclusion AddressExclusion::parse( StringRef const& key ) {
|
||||||
auto addr = NetworkAddress::parse(key.toString());
|
auto addr = NetworkAddress::parse(key.toString());
|
||||||
if (addr.isTLS()) {
|
if (addr.isTLS()) {
|
||||||
TraceEvent(SevWarnAlways, "AddressExclusionParseError")
|
TraceEvent(SevWarnAlways, "AddressExclusionParseError")
|
||||||
.detail("String", printable(key))
|
.detail("String", key)
|
||||||
.detail("Description", "Address inclusion string should not include `:tls' suffix.");
|
.detail("Description", "Address inclusion string should not include `:tls' suffix.");
|
||||||
return AddressExclusion();
|
return AddressExclusion();
|
||||||
}
|
}
|
||||||
return AddressExclusion(addr.ip, addr.port);
|
return AddressExclusion(addr.ip, addr.port);
|
||||||
} catch (Error& ) {
|
} catch (Error& ) {
|
||||||
TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", printable(key));
|
TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", key);
|
||||||
return AddressExclusion();
|
return AddressExclusion();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1334,7 +1334,7 @@ ACTOR Future<Optional<Value>> getValue( Future<Version> version, Key key, Databa
|
||||||
g_traceBatch.addAttach("GetValueAttachID", info.debugID.get().first(), getValueID.get().first());
|
g_traceBatch.addAttach("GetValueAttachID", info.debugID.get().first(), getValueID.get().first());
|
||||||
g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.Before"); //.detail("TaskID", g_network->getCurrentTask());
|
g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.Before"); //.detail("TaskID", g_network->getCurrentTask());
|
||||||
/*TraceEvent("TransactionDebugGetValueInfo", getValueID.get())
|
/*TraceEvent("TransactionDebugGetValueInfo", getValueID.get())
|
||||||
.detail("Key", printable(key))
|
.detail("Key", key)
|
||||||
.detail("ReqVersion", ver)
|
.detail("ReqVersion", ver)
|
||||||
.detail("Servers", describe(ssi.second->get()));*/
|
.detail("Servers", describe(ssi.second->get()));*/
|
||||||
}
|
}
|
||||||
|
@ -1356,7 +1356,7 @@ ACTOR Future<Optional<Value>> getValue( Future<Version> version, Key key, Databa
|
||||||
if( info.debugID.present() ) {
|
if( info.debugID.present() ) {
|
||||||
g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.After"); //.detail("TaskID", g_network->getCurrentTask());
|
g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.After"); //.detail("TaskID", g_network->getCurrentTask());
|
||||||
/*TraceEvent("TransactionDebugGetValueDone", getValueID.get())
|
/*TraceEvent("TransactionDebugGetValueDone", getValueID.get())
|
||||||
.detail("Key", printable(key))
|
.detail("Key", key)
|
||||||
.detail("ReqVersion", ver)
|
.detail("ReqVersion", ver)
|
||||||
.detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/
|
.detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/
|
||||||
}
|
}
|
||||||
|
@ -1367,7 +1367,7 @@ ACTOR Future<Optional<Value>> getValue( Future<Version> version, Key key, Databa
|
||||||
if( info.debugID.present() ) {
|
if( info.debugID.present() ) {
|
||||||
g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.Error"); //.detail("TaskID", g_network->getCurrentTask());
|
g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.Error"); //.detail("TaskID", g_network->getCurrentTask());
|
||||||
/*TraceEvent("TransactionDebugGetValueDone", getValueID.get())
|
/*TraceEvent("TransactionDebugGetValueDone", getValueID.get())
|
||||||
.detail("Key", printable(key))
|
.detail("Key", key)
|
||||||
.detail("ReqVersion", ver)
|
.detail("ReqVersion", ver)
|
||||||
.detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/
|
.detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/
|
||||||
}
|
}
|
||||||
|
@ -1404,11 +1404,11 @@ ACTOR Future<Key> getKey( Database cx, KeySelector k, Future<Version> version, T
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if( info.debugID.present() )
|
if( info.debugID.present() )
|
||||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.Before"); //.detail("StartKey", printable(k.getKey())).detail("Offset",k.offset).detail("OrEqual",k.orEqual);
|
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.Before"); //.detail("StartKey", k.getKey()).detail("Offset",k.offset).detail("OrEqual",k.orEqual);
|
||||||
++cx->transactionPhysicalReads;
|
++cx->transactionPhysicalReads;
|
||||||
GetKeyReply reply = wait( loadBalance( ssi.second, &StorageServerInterface::getKey, GetKeyRequest(k, version.get()), TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
GetKeyReply reply = wait( loadBalance( ssi.second, &StorageServerInterface::getKey, GetKeyRequest(k, version.get()), TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
||||||
if( info.debugID.present() )
|
if( info.debugID.present() )
|
||||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.After"); //.detail("NextKey",printable(reply.sel.key)).detail("Offset", reply.sel.offset).detail("OrEqual", k.orEqual);
|
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.After"); //.detail("NextKey",reply.sel.key).detail("Offset", reply.sel.offset).detail("OrEqual", k.orEqual);
|
||||||
k = reply.sel;
|
k = reply.sel;
|
||||||
if (!k.offset && k.orEqual) {
|
if (!k.offset && k.orEqual) {
|
||||||
return k.getKey();
|
return k.getKey();
|
||||||
|
@ -1421,7 +1421,7 @@ ACTOR Future<Key> getKey( Database cx, KeySelector k, Future<Version> version, T
|
||||||
} else {
|
} else {
|
||||||
TraceEvent(SevInfo, "GetKeyError")
|
TraceEvent(SevInfo, "GetKeyError")
|
||||||
.error(e)
|
.error(e)
|
||||||
.detail("AtKey", printable(k.getKey()))
|
.detail("AtKey", k.getKey())
|
||||||
.detail("Offset", k.offset);
|
.detail("Offset", k.offset);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
|
@ -1478,7 +1478,7 @@ ACTOR Future< Void > watchValue( Future<Version> version, Key key, Optional<Valu
|
||||||
//cannot do this until the storage server is notified on knownCommittedVersion changes from tlog (faster than the current update loop)
|
//cannot do this until the storage server is notified on knownCommittedVersion changes from tlog (faster than the current update loop)
|
||||||
Version v = wait( waitForCommittedVersion( cx, resp ) );
|
Version v = wait( waitForCommittedVersion( cx, resp ) );
|
||||||
|
|
||||||
//TraceEvent("WatcherCommitted").detail("CommittedVersion", v).detail("WatchVersion", resp).detail("Key", printable( key )).detail("Value", printable(value));
|
//TraceEvent("WatcherCommitted").detail("CommittedVersion", v).detail("WatchVersion", resp).detail("Key", key ).detail("Value", value);
|
||||||
|
|
||||||
if( v - resp < 50000000 ) // False if there is a master failure between getting the response and getting the committed version, Dependent on SERVER_KNOBS->MAX_VERSIONS_IN_FLIGHT
|
if( v - resp < 50000000 ) // False if there is a master failure between getting the response and getting the committed version, Dependent on SERVER_KNOBS->MAX_VERSIONS_IN_FLIGHT
|
||||||
return Void();
|
return Void();
|
||||||
|
@ -1551,13 +1551,13 @@ ACTOR Future<Standalone<RangeResultRef>> getExactRange( Database cx, Version ver
|
||||||
if( info.debugID.present() ) {
|
if( info.debugID.present() ) {
|
||||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getExactRange.Before");
|
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getExactRange.Before");
|
||||||
/*TraceEvent("TransactionDebugGetExactRangeInfo", info.debugID.get())
|
/*TraceEvent("TransactionDebugGetExactRangeInfo", info.debugID.get())
|
||||||
.detail("ReqBeginKey", printable(req.begin.getKey()))
|
.detail("ReqBeginKey", req.begin.getKey())
|
||||||
.detail("ReqEndKey", printable(req.end.getKey()))
|
.detail("ReqEndKey", req.end.getKey())
|
||||||
.detail("ReqLimit", req.limit)
|
.detail("ReqLimit", req.limit)
|
||||||
.detail("ReqLimitBytes", req.limitBytes)
|
.detail("ReqLimitBytes", req.limitBytes)
|
||||||
.detail("ReqVersion", req.version)
|
.detail("ReqVersion", req.version)
|
||||||
.detail("Reverse", reverse)
|
.detail("Reverse", reverse)
|
||||||
.detail("Servers", locations[shard].second->description());*/
|
.detail("Servers", locations[shard].second->description());*/
|
||||||
}
|
}
|
||||||
++cx->transactionPhysicalReads;
|
++cx->transactionPhysicalReads;
|
||||||
GetKeyValuesReply rep = wait( loadBalance( locations[shard].second, &StorageServerInterface::getKeyValues, req, TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
GetKeyValuesReply rep = wait( loadBalance( locations[shard].second, &StorageServerInterface::getKeyValues, req, TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) );
|
||||||
|
@ -1641,8 +1641,8 @@ ACTOR Future<Standalone<RangeResultRef>> getExactRange( Database cx, Version ver
|
||||||
} else {
|
} else {
|
||||||
TraceEvent(SevInfo, "GetExactRangeError")
|
TraceEvent(SevInfo, "GetExactRangeError")
|
||||||
.error(e)
|
.error(e)
|
||||||
.detail("ShardBegin", printable(locations[shard].first.begin))
|
.detail("ShardBegin", locations[shard].first.begin)
|
||||||
.detail("ShardEnd", printable(locations[shard].first.end));
|
.detail("ShardEnd", locations[shard].first.end);
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1816,13 +1816,13 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
|
||||||
if( info.debugID.present() ) {
|
if( info.debugID.present() ) {
|
||||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getRange.Before");
|
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getRange.Before");
|
||||||
/*TraceEvent("TransactionDebugGetRangeInfo", info.debugID.get())
|
/*TraceEvent("TransactionDebugGetRangeInfo", info.debugID.get())
|
||||||
.detail("ReqBeginKey", printable(req.begin.getKey()))
|
.detail("ReqBeginKey", req.begin.getKey())
|
||||||
.detail("ReqEndKey", printable(req.end.getKey()))
|
.detail("ReqEndKey", req.end.getKey())
|
||||||
.detail("OriginalBegin", originalBegin.toString())
|
.detail("OriginalBegin", originalBegin.toString())
|
||||||
.detail("OriginalEnd", originalEnd.toString())
|
.detail("OriginalEnd", originalEnd.toString())
|
||||||
.detail("Begin", begin.toString())
|
.detail("Begin", begin.toString())
|
||||||
.detail("End", end.toString())
|
.detail("End", end.toString())
|
||||||
.detail("Shard", printable(shard))
|
.detail("Shard", shard)
|
||||||
.detail("ReqLimit", req.limit)
|
.detail("ReqLimit", req.limit)
|
||||||
.detail("ReqLimitBytes", req.limitBytes)
|
.detail("ReqLimitBytes", req.limitBytes)
|
||||||
.detail("ReqVersion", req.version)
|
.detail("ReqVersion", req.version)
|
||||||
|
@ -1837,8 +1837,8 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
|
||||||
if( info.debugID.present() ) {
|
if( info.debugID.present() ) {
|
||||||
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getRange.After");//.detail("SizeOf", rep.data.size());
|
g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getRange.After");//.detail("SizeOf", rep.data.size());
|
||||||
/*TraceEvent("TransactionDebugGetRangeDone", info.debugID.get())
|
/*TraceEvent("TransactionDebugGetRangeDone", info.debugID.get())
|
||||||
.detail("ReqBeginKey", printable(req.begin.getKey()))
|
.detail("ReqBeginKey", req.begin.getKey())
|
||||||
.detail("ReqEndKey", printable(req.end.getKey()))
|
.detail("ReqEndKey", req.end.getKey())
|
||||||
.detail("RepIsMore", rep.more)
|
.detail("RepIsMore", rep.more)
|
||||||
.detail("VersionReturned", rep.version)
|
.detail("VersionReturned", rep.version)
|
||||||
.detail("RowsReturned", rep.data.size());*/
|
.detail("RowsReturned", rep.data.size());*/
|
||||||
|
@ -2484,19 +2484,21 @@ ACTOR void checkWrites( Database cx, Future<Void> committed, Promise<Void> outCo
|
||||||
Standalone<RangeResultRef> shouldBeEmpty = wait(
|
Standalone<RangeResultRef> shouldBeEmpty = wait(
|
||||||
tr.getRange( it->range(), 1 ) );
|
tr.getRange( it->range(), 1 ) );
|
||||||
if( shouldBeEmpty.size() ) {
|
if( shouldBeEmpty.size() ) {
|
||||||
TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Clear").detail("KeyBegin", printable(it->range().begin).c_str())
|
TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Clear").detail("KeyBegin", it->range().begin)
|
||||||
.detail("KeyEnd", printable(it->range().end).c_str());
|
.detail("KeyEnd", it->range().end);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Optional<Value> val = wait( tr.get( it->range().begin ) );
|
Optional<Value> val = wait( tr.get( it->range().begin ) );
|
||||||
if( !val.present() || val.get() != m.setValue ) {
|
if( !val.present() || val.get() != m.setValue ) {
|
||||||
TraceEvent evt = TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Set").detail("Key", printable(it->range().begin).c_str())
|
TraceEvent evt = TraceEvent(SevError, "CheckWritesFailed")
|
||||||
.detail("Expected", printable(m.setValue).c_str());
|
.detail("Class", "Set")
|
||||||
|
.detail("Key", it->range().begin)
|
||||||
|
.detail("Expected", m.setValue);
|
||||||
if( !val.present() )
|
if( !val.present() )
|
||||||
evt.detail("Actual", "_Value Missing_");
|
evt.detail("Actual", "_Value Missing_");
|
||||||
else
|
else
|
||||||
evt.detail("Actual", printable(val.get()).c_str());
|
evt.detail("Actual", val.get());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2515,7 +2517,7 @@ ACTOR static Future<Void> commitDummyTransaction( Database cx, KeyRange range, T
|
||||||
state int retries = 0;
|
state int retries = 0;
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
TraceEvent("CommitDummyTransaction").detail("Key", printable(range.begin)).detail("Retries", retries);
|
TraceEvent("CommitDummyTransaction").detail("Key", range.begin).detail("Retries", retries);
|
||||||
tr.options = options;
|
tr.options = options;
|
||||||
tr.info.taskID = info.taskID;
|
tr.info.taskID = info.taskID;
|
||||||
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
|
tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS );
|
||||||
|
@ -2526,7 +2528,7 @@ ACTOR static Future<Void> commitDummyTransaction( Database cx, KeyRange range, T
|
||||||
wait( tr.commit() );
|
wait( tr.commit() );
|
||||||
return Void();
|
return Void();
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent("CommitDummyTransactionError").error(e,true).detail("Key", printable(range.begin)).detail("Retries", retries);
|
TraceEvent("CommitDummyTransactionError").error(e,true).detail("Key", range.begin).detail("Retries", retries);
|
||||||
wait( tr.onError(e) );
|
wait( tr.onError(e) );
|
||||||
}
|
}
|
||||||
++retries;
|
++retries;
|
||||||
|
@ -2715,7 +2717,7 @@ Future<Void> Transaction::commitMutations() {
|
||||||
UID u = g_nondeterministic_random->randomUniqueID();
|
UID u = g_nondeterministic_random->randomUniqueID();
|
||||||
TraceEvent("TransactionDump", u);
|
TraceEvent("TransactionDump", u);
|
||||||
for(auto i=tr.transaction.mutations.begin(); i!=tr.transaction.mutations.end(); ++i)
|
for(auto i=tr.transaction.mutations.begin(); i!=tr.transaction.mutations.end(); ++i)
|
||||||
TraceEvent("TransactionMutation", u).detail("T", i->type).detail("P1", printable(i->param1)).detail("P2", printable(i->param2));
|
TraceEvent("TransactionMutation", u).detail("T", i->type).detail("P1", i->param1).detail("P2", i->param2);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(options.lockAware) {
|
if(options.lockAware) {
|
||||||
|
@ -2840,7 +2842,7 @@ void Transaction::setOption( FDBTransactionOptions::Option option, Optional<Stri
|
||||||
trLogInfo->identifier = printable(value.get());
|
trLogInfo->identifier = printable(value.get());
|
||||||
}
|
}
|
||||||
else if (trLogInfo->identifier != printable(value.get())) {
|
else if (trLogInfo->identifier != printable(value.get())) {
|
||||||
TraceEvent(SevWarn, "CannotChangeDebugTransactionIdentifier").detail("PreviousIdentifier", trLogInfo->identifier).detail("NewIdentifier", printable(value.get()));
|
TraceEvent(SevWarn, "CannotChangeDebugTransactionIdentifier").detail("PreviousIdentifier", trLogInfo->identifier).detail("NewIdentifier", value.get());
|
||||||
throw client_invalid_operation();
|
throw client_invalid_operation();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3154,7 +3156,7 @@ ACTOR Future< StorageMetrics > waitStorageMetrics(
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
TraceEvent(SevWarn, "WaitStorageMetricsPenalty")
|
TraceEvent(SevWarn, "WaitStorageMetricsPenalty")
|
||||||
.detail("Keys", printable(keys))
|
.detail("Keys", keys)
|
||||||
.detail("Limit", CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT)
|
.detail("Limit", CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT)
|
||||||
.detail("JitteredSecondsOfPenitence", CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY);
|
.detail("JitteredSecondsOfPenitence", CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY);
|
||||||
wait(delayJittered(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskDataDistribution));
|
wait(delayJittered(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskDataDistribution));
|
||||||
|
|
|
@ -574,7 +574,7 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
KeyRangeRef range = RandomTestImpl::getRandomRange(arena);
|
KeyRangeRef range = RandomTestImpl::getRandomRange(arena);
|
||||||
writes.addConflictRange(range);
|
writes.addConflictRange(range);
|
||||||
conflictMap.insert(range, true);
|
conflictMap.insert(range, true);
|
||||||
TraceEvent("RWMT_AddConflictRange").detail("Range", printable(range));
|
TraceEvent("RWMT_AddConflictRange").detail("Range", range);
|
||||||
}
|
}
|
||||||
else if(r == 1) {
|
else if(r == 1) {
|
||||||
KeyRangeRef range = RandomTestImpl::getRandomRange(arena);
|
KeyRangeRef range = RandomTestImpl::getRandomRange(arena);
|
||||||
|
@ -583,7 +583,7 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
conflictMap.insert(range, false);
|
conflictMap.insert(range, false);
|
||||||
clearMap.insert(range, false);
|
clearMap.insert(range, false);
|
||||||
unreadableMap.insert(range, true);
|
unreadableMap.insert(range, true);
|
||||||
TraceEvent("RWMT_AddUnmodifiedAndUnreadableRange").detail("Range", printable(range));
|
TraceEvent("RWMT_AddUnmodifiedAndUnreadableRange").detail("Range", range);
|
||||||
}
|
}
|
||||||
else if (r == 2) {
|
else if (r == 2) {
|
||||||
bool addConflict = g_random->random01() < 0.5;
|
bool addConflict = g_random->random01() < 0.5;
|
||||||
|
@ -594,7 +594,7 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
conflictMap.insert(range, true);
|
conflictMap.insert(range, true);
|
||||||
clearMap.insert(range, true);
|
clearMap.insert(range, true);
|
||||||
unreadableMap.insert(range, false);
|
unreadableMap.insert(range, false);
|
||||||
TraceEvent("RWMT_Clear").detail("Range", printable(range)).detail("AddConflict", addConflict);
|
TraceEvent("RWMT_Clear").detail("Range", range).detail("AddConflict", addConflict);
|
||||||
}
|
}
|
||||||
else if (r == 3) {
|
else if (r == 3) {
|
||||||
bool addConflict = g_random->random01() < 0.5;
|
bool addConflict = g_random->random01() < 0.5;
|
||||||
|
@ -606,7 +606,7 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
conflictMap.insert(key, true);
|
conflictMap.insert(key, true);
|
||||||
clearMap.insert(key, false);
|
clearMap.insert(key, false);
|
||||||
unreadableMap.insert(key, true);
|
unreadableMap.insert(key, true);
|
||||||
TraceEvent("RWMT_SetVersionstampedValue").detail("Key", printable(key)).detail("Value", value.size()).detail("AddConflict", addConflict);
|
TraceEvent("RWMT_SetVersionstampedValue").detail("Key", key).detail("Value", value.size()).detail("AddConflict", addConflict);
|
||||||
}
|
}
|
||||||
else if (r == 4) {
|
else if (r == 4) {
|
||||||
bool addConflict = g_random->random01() < 0.5;
|
bool addConflict = g_random->random01() < 0.5;
|
||||||
|
@ -618,7 +618,7 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
conflictMap.insert(key, true);
|
conflictMap.insert(key, true);
|
||||||
clearMap.insert(key, false);
|
clearMap.insert(key, false);
|
||||||
unreadableMap.insert(key, true);
|
unreadableMap.insert(key, true);
|
||||||
TraceEvent("RWMT_SetVersionstampedKey").detail("Key", printable(key)).detail("Value", value.size()).detail("AddConflict", addConflict);
|
TraceEvent("RWMT_SetVersionstampedKey").detail("Key", key).detail("Value", value.size()).detail("AddConflict", addConflict);
|
||||||
}
|
}
|
||||||
else if (r == 5) {
|
else if (r == 5) {
|
||||||
bool addConflict = g_random->random01() < 0.5;
|
bool addConflict = g_random->random01() < 0.5;
|
||||||
|
@ -638,7 +638,7 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
if (addConflict)
|
if (addConflict)
|
||||||
conflictMap.insert(key, true);
|
conflictMap.insert(key, true);
|
||||||
clearMap.insert(key, false);
|
clearMap.insert(key, false);
|
||||||
TraceEvent("RWMT_And").detail("Key", printable(key)).detail("Value", value.size()).detail("AddConflict", addConflict);
|
TraceEvent("RWMT_And").detail("Key", key).detail("Value", value.size()).detail("AddConflict", addConflict);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
bool addConflict = g_random->random01() < 0.5;
|
bool addConflict = g_random->random01() < 0.5;
|
||||||
|
@ -652,7 +652,7 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
if (addConflict)
|
if (addConflict)
|
||||||
conflictMap.insert(key, true);
|
conflictMap.insert(key, true);
|
||||||
clearMap.insert(key, false);
|
clearMap.insert(key, false);
|
||||||
TraceEvent("RWMT_Set").detail("Key", printable(key)).detail("Value", value.size()).detail("AddConflict", addConflict);
|
TraceEvent("RWMT_Set").detail("Key", key).detail("Value", value.size()).detail("AddConflict", addConflict);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -665,11 +665,11 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
if (it.is_operation()) {
|
if (it.is_operation()) {
|
||||||
ASSERT(setIter != setEnd);
|
ASSERT(setIter != setEnd);
|
||||||
TraceEvent("RWMT_CheckOperation")
|
TraceEvent("RWMT_CheckOperation")
|
||||||
.detail("WmKey", printable(it.beginKey().toStandaloneStringRef()))
|
.detail("WmKey", it.beginKey())
|
||||||
.detail("WmSize", it.op().size())
|
.detail("WmSize", it.op().size())
|
||||||
.detail("WmValue", it.op().top().value.present() ? std::to_string(it.op().top().value.get().size()) : "Not Found")
|
.detail("WmValue", it.op().top().value.present() ? std::to_string(it.op().top().value.get().size()) : "Not Found")
|
||||||
.detail("WmType", (int)it.op().top().type)
|
.detail("WmType", (int)it.op().top().type)
|
||||||
.detail("SmKey", printable(setIter->first))
|
.detail("SmKey", setIter->first)
|
||||||
.detail("SmSize", setIter->second.size())
|
.detail("SmSize", setIter->second.size())
|
||||||
.detail("SmValue", setIter->second.top().value.present() ? std::to_string(setIter->second.top().value.get().size()) : "Not Found")
|
.detail("SmValue", setIter->second.top().value.present() ? std::to_string(setIter->second.top().value.get().size()) : "Not Found")
|
||||||
.detail("SmType", (int)setIter->second.top().type);
|
.detail("SmType", (int)setIter->second.top().type);
|
||||||
|
@ -679,7 +679,7 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent("RWMT_CheckOperationFinal")
|
TraceEvent("RWMT_CheckOperationFinal")
|
||||||
.detail("WmKey", printable(it.beginKey().toStandaloneStringRef()))
|
.detail("WmKey", it.beginKey())
|
||||||
.detail("SmIter", setIter == setEnd);
|
.detail("SmIter", setIter == setEnd);
|
||||||
|
|
||||||
ASSERT(it.beginKey() >= allKeys.end && setIter == setEnd);
|
ASSERT(it.beginKey() >= allKeys.end && setIter == setEnd);
|
||||||
|
@ -728,8 +728,8 @@ TEST_CASE("/fdbclient/WriteMap/random") {
|
||||||
|
|
||||||
while (it.beginKey() < allKeys.end && unreadableIter != unreadableEnd) {
|
while (it.beginKey() < allKeys.end && unreadableIter != unreadableEnd) {
|
||||||
TraceEvent("RWMT_CheckUnreadable")
|
TraceEvent("RWMT_CheckUnreadable")
|
||||||
.detail("WriteMapRange", printable(KeyRangeRef(it.beginKey().toStandaloneStringRef(), it.endKey().toStandaloneStringRef())))
|
.detail("WriteMapRange", KeyRangeRef(it.beginKey().toStandaloneStringRef(), it.endKey().toStandaloneStringRef()))
|
||||||
.detail("UnreadableMapRange", printable(unreadableIter.range()))
|
.detail("UnreadableMapRange", unreadableIter.range())
|
||||||
.detail("WriteMapValue", it.is_unreadable())
|
.detail("WriteMapValue", it.is_unreadable())
|
||||||
.detail("UnreadableMapValue", unreadableIter.value());
|
.detail("UnreadableMapValue", unreadableIter.value());
|
||||||
ASSERT(unreadableIter.value() == it.is_unreadable());
|
ASSERT(unreadableIter.value() == it.is_unreadable());
|
||||||
|
|
|
@ -38,12 +38,12 @@ public:
|
||||||
if (kv) key = kv->key;
|
if (kv) key = kv->key;
|
||||||
}
|
}
|
||||||
TraceEvent("RYWDump")
|
TraceEvent("RYWDump")
|
||||||
.detail("Begin", printable(it.beginKey().toStandaloneStringRef()))
|
.detail("Begin", it.beginKey())
|
||||||
.detail("End", printable(it.endKey().toStandaloneStringRef()))
|
.detail("End", it.endKey())
|
||||||
.detail("Unknown", it.is_unknown_range())
|
.detail("Unknown", it.is_unknown_range())
|
||||||
.detail("Empty", it.is_empty_range())
|
.detail("Empty", it.is_empty_range())
|
||||||
.detail("KV", it.is_kv())
|
.detail("KV", it.is_kv())
|
||||||
.detail("Key", printable(key.get()));
|
.detail("Key", key.get());
|
||||||
if( it.endKey() == allKeys.end )
|
if( it.endKey() == allKeys.end )
|
||||||
break;
|
break;
|
||||||
++it;
|
++it;
|
||||||
|
@ -512,8 +512,8 @@ public:
|
||||||
.detail("Reached", limits.isReached())
|
.detail("Reached", limits.isReached())
|
||||||
.detail("ItemsPastEnd", itemsPastEnd)
|
.detail("ItemsPastEnd", itemsPastEnd)
|
||||||
.detail("EndOffset", -end.offset)
|
.detail("EndOffset", -end.offset)
|
||||||
.detail("ItBegin", printable(it.beginKey().toStandaloneStringRef()))
|
.detail("ItBegin", it.beginKey())
|
||||||
.detail("ItEnd", printable(itEnd.beginKey().toStandaloneStringRef()))
|
.detail("ItEnd", itEnd.beginKey())
|
||||||
.detail("Unknown", it.is_unknown_range())
|
.detail("Unknown", it.is_unknown_range())
|
||||||
.detail("Requests", requestCount);*/
|
.detail("Requests", requestCount);*/
|
||||||
|
|
||||||
|
@ -606,13 +606,13 @@ public:
|
||||||
ASSERT( !requestLimit.hasRowLimit() || requestLimit.rows > 0 );
|
ASSERT( !requestLimit.hasRowLimit() || requestLimit.rows > 0 );
|
||||||
ASSERT( requestLimit.hasRowLimit() || requestLimit.hasByteLimit() );
|
ASSERT( requestLimit.hasRowLimit() || requestLimit.hasByteLimit() );
|
||||||
|
|
||||||
//TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", printable(ucEnd.beginKey().toStandaloneStringRef())).detail("MinRows", requestLimit.minRows);
|
//TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey()).detail("MinRows", requestLimit.minRows);
|
||||||
|
|
||||||
additionalRows = 0;
|
additionalRows = 0;
|
||||||
Standalone<RangeResultRef> snapshot_read = wait( ryw->tr.getRange( read_begin, read_end, requestLimit, true, false ) );
|
Standalone<RangeResultRef> snapshot_read = wait( ryw->tr.getRange( read_begin, read_end, requestLimit, true, false ) );
|
||||||
KeyRangeRef range = getKnownKeyRange( snapshot_read, read_begin, read_end, ryw->arena );
|
KeyRangeRef range = getKnownKeyRange( snapshot_read, read_begin, read_end, ryw->arena );
|
||||||
|
|
||||||
//TraceEvent("RYWCacheInsert", randomID).detail("Range", printable(range)).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", printable(snapshot_read)).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", printable(snapshot_read.readThrough));
|
//TraceEvent("RYWCacheInsert", randomID).detail("Range", range).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", snapshot_read).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", snapshot_read.readThrough);
|
||||||
|
|
||||||
if( ryw->cache.insert( range, snapshot_read ) )
|
if( ryw->cache.insert( range, snapshot_read ) )
|
||||||
ryw->arena.dependsOn(snapshot_read.arena());
|
ryw->arena.dependsOn(snapshot_read.arena());
|
||||||
|
@ -636,7 +636,7 @@ public:
|
||||||
|
|
||||||
itemsPastEnd += maxCount - count;
|
itemsPastEnd += maxCount - count;
|
||||||
|
|
||||||
//TraceEvent("RYWaddKV", randomID).detail("Key", printable(it.beginKey().toStandaloneStringRef())).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastEnd", itemsPastEnd);
|
//TraceEvent("RYWaddKV", randomID).detail("Key", it.beginKey()).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastEnd", itemsPastEnd);
|
||||||
if( count ) result.append( result.arena(), start, count );
|
if( count ) result.append( result.arena(), start, count );
|
||||||
++it;
|
++it;
|
||||||
} else
|
} else
|
||||||
|
@ -785,8 +785,8 @@ public:
|
||||||
.detail("Reached", limits.isReached())
|
.detail("Reached", limits.isReached())
|
||||||
.detail("ItemsPastBegin", itemsPastBegin)
|
.detail("ItemsPastBegin", itemsPastBegin)
|
||||||
.detail("EndOffset", end.offset)
|
.detail("EndOffset", end.offset)
|
||||||
.detail("ItBegin", printable(it.beginKey().toStandaloneStringRef()))
|
.detail("ItBegin", it.beginKey())
|
||||||
.detail("ItEnd", printable(itEnd.beginKey().toStandaloneStringRef()))
|
.detail("ItEnd", itEnd.beginKey())
|
||||||
.detail("Unknown", it.is_unknown_range())
|
.detail("Unknown", it.is_unknown_range())
|
||||||
.detail("Kv", it.is_kv())
|
.detail("Kv", it.is_kv())
|
||||||
.detail("Requests", requestCount);*/
|
.detail("Requests", requestCount);*/
|
||||||
|
@ -883,13 +883,13 @@ public:
|
||||||
ASSERT( !requestLimit.hasRowLimit() || requestLimit.rows > 0 );
|
ASSERT( !requestLimit.hasRowLimit() || requestLimit.rows > 0 );
|
||||||
ASSERT( requestLimit.hasRowLimit() || requestLimit.hasByteLimit() );
|
ASSERT( requestLimit.hasRowLimit() || requestLimit.hasByteLimit() );
|
||||||
|
|
||||||
//TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", printable(ucEnd.beginKey().toStandaloneStringRef())).detail("MinRows", requestLimit.minRows);
|
//TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey()).detail("MinRows", requestLimit.minRows);
|
||||||
|
|
||||||
additionalRows = 0;
|
additionalRows = 0;
|
||||||
Standalone<RangeResultRef> snapshot_read = wait( ryw->tr.getRange( read_begin, read_end, requestLimit, true, true ) );
|
Standalone<RangeResultRef> snapshot_read = wait( ryw->tr.getRange( read_begin, read_end, requestLimit, true, true ) );
|
||||||
KeyRangeRef range = getKnownKeyRangeBack( snapshot_read, read_begin, read_end, ryw->arena );
|
KeyRangeRef range = getKnownKeyRangeBack( snapshot_read, read_begin, read_end, ryw->arena );
|
||||||
|
|
||||||
//TraceEvent("RYWCacheInsert", randomID).detail("Range", printable(range)).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", printable(snapshot_read)).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", printable(snapshot_read.readThrough));
|
//TraceEvent("RYWCacheInsert", randomID).detail("Range", range).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", snapshot_read).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", snapshot_read.readThrough);
|
||||||
|
|
||||||
RangeResultRef reversed;
|
RangeResultRef reversed;
|
||||||
reversed.resize(ryw->arena, snapshot_read.size());
|
reversed.resize(ryw->arena, snapshot_read.size());
|
||||||
|
@ -917,7 +917,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
itemsPastBegin += maxCount - count;
|
itemsPastBegin += maxCount - count;
|
||||||
//TraceEvent("RYWaddKV", randomID).detail("Key", printable(it.beginKey().toStandaloneStringRef())).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastBegin", itemsPastBegin);
|
//TraceEvent("RYWaddKV", randomID).detail("Key", it.beginKey()).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastBegin", itemsPastBegin);
|
||||||
if( count ) {
|
if( count ) {
|
||||||
int size = result.size();
|
int size = result.size();
|
||||||
result.resize(result.arena(),size+count);
|
result.resize(result.arena(),size+count);
|
||||||
|
@ -1982,7 +1982,7 @@ void ReadYourWritesTransaction::debugLogRetries(Optional<Error> error) {
|
||||||
if(error.present())
|
if(error.present())
|
||||||
trace.error(error.get(), true);
|
trace.error(error.get(), true);
|
||||||
if(!transactionDebugInfo->transactionName.empty())
|
if(!transactionDebugInfo->transactionName.empty())
|
||||||
trace.detail("TransactionName", printable(StringRef(transactionDebugInfo->transactionName)));
|
trace.detail("TransactionName", transactionDebugInfo->transactionName);
|
||||||
trace.detail("Elapsed", elapsed).detail("Retries", retries).detail("Committed", committed);
|
trace.detail("Elapsed", elapsed).detail("Retries", retries).detail("Committed", committed);
|
||||||
}
|
}
|
||||||
transactionDebugInfo->lastRetryLogTime = now();
|
transactionDebugInfo->lastRetryLogTime = now();
|
||||||
|
|
|
@ -101,6 +101,7 @@ struct ExtStringRef {
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
friend struct Traceable<ExtStringRef>;
|
||||||
StringRef base;
|
StringRef base;
|
||||||
int extra_zero_bytes;
|
int extra_zero_bytes;
|
||||||
};
|
};
|
||||||
|
@ -113,6 +114,19 @@ inline bool operator > ( const ExtStringRef& lhs, const ExtStringRef& rhs ) { re
|
||||||
inline bool operator <= ( const ExtStringRef& lhs, const ExtStringRef& rhs ) { return lhs.cmp(rhs)<=0; }
|
inline bool operator <= ( const ExtStringRef& lhs, const ExtStringRef& rhs ) { return lhs.cmp(rhs)<=0; }
|
||||||
inline bool operator >= ( const ExtStringRef& lhs, const ExtStringRef& rhs ) { return lhs.cmp(rhs)>=0; }
|
inline bool operator >= ( const ExtStringRef& lhs, const ExtStringRef& rhs ) { return lhs.cmp(rhs)>=0; }
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<ExtStringRef> : std::true_type {
|
||||||
|
static std::string toString(const ExtStringRef str) {
|
||||||
|
std::string result;
|
||||||
|
result.reserve(str.size());
|
||||||
|
std::copy(str.base.begin(), str.base.end(), std::back_inserter(result));
|
||||||
|
for (int i = 0; i < str.extra_zero_bytes; ++i) {
|
||||||
|
result.push_back('\0');
|
||||||
|
}
|
||||||
|
return Traceable<std::string>::toString(result);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
class SnapshotCache {
|
class SnapshotCache {
|
||||||
private:
|
private:
|
||||||
struct Entry {
|
struct Entry {
|
||||||
|
@ -343,7 +357,7 @@ public:
|
||||||
|
|
||||||
void dump() {
|
void dump() {
|
||||||
for( auto it = entries.begin(); it != entries.end(); ++it ) {
|
for( auto it = entries.begin(); it != entries.end(); ++it ) {
|
||||||
TraceEvent("CacheDump").detail("Begin", printable(it->beginKey)).detail("End", printable(it->endKey.toStandaloneStringRef())).detail("Values", printable(it->values));
|
TraceEvent("CacheDump").detail("Begin", it->beginKey).detail("End", it->endKey.toStandaloneStringRef()).detail("Values", it->values);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -483,7 +483,7 @@ Key logRangesEncodeKey(KeyRef keyBegin, UID logUid) {
|
||||||
// Returns the start key and optionally the logRange Uid
|
// Returns the start key and optionally the logRange Uid
|
||||||
KeyRef logRangesDecodeKey(KeyRef key, UID* logUid) {
|
KeyRef logRangesDecodeKey(KeyRef key, UID* logUid) {
|
||||||
if (key.size() < logRangesRange.begin.size() + sizeof(UID)) {
|
if (key.size() < logRangesRange.begin.size() + sizeof(UID)) {
|
||||||
TraceEvent(SevError, "InvalidDecodeKey").detail("Key", printable(key));
|
TraceEvent(SevError, "InvalidDecodeKey").detail("Key", key);
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -235,15 +235,15 @@ public:
|
||||||
|
|
||||||
if (task->params.find(Task::reservedTaskParamValidKey) == task->params.end()) {
|
if (task->params.find(Task::reservedTaskParamValidKey) == task->params.end()) {
|
||||||
TraceEvent("TB_TaskVerifyInvalidTask")
|
TraceEvent("TB_TaskVerifyInvalidTask")
|
||||||
.detail("Task", printable(task->params[Task::reservedTaskParamKeyType]))
|
.detail("Task", task->params[Task::reservedTaskParamKeyType])
|
||||||
.detail("ReservedTaskParamValidKey", "missing");
|
.detail("ReservedTaskParamValidKey", "missing");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (task->params.find(Task::reservedTaskParamValidValue) == task->params.end()) {
|
if (task->params.find(Task::reservedTaskParamValidValue) == task->params.end()) {
|
||||||
TraceEvent("TB_TaskVerifyInvalidTask")
|
TraceEvent("TB_TaskVerifyInvalidTask")
|
||||||
.detail("Task", printable(task->params[Task::reservedTaskParamKeyType]))
|
.detail("Task", task->params[Task::reservedTaskParamKeyType])
|
||||||
.detail("ReservedTaskParamValidKey", printable(task->params[Task::reservedTaskParamValidKey]))
|
.detail("ReservedTaskParamValidKey", task->params[Task::reservedTaskParamValidKey])
|
||||||
.detail("ReservedTaskParamValidValue", "missing");
|
.detail("ReservedTaskParamValidValue", "missing");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -254,19 +254,19 @@ public:
|
||||||
|
|
||||||
if (!keyValue.present()) {
|
if (!keyValue.present()) {
|
||||||
TraceEvent("TB_TaskVerifyInvalidTask")
|
TraceEvent("TB_TaskVerifyInvalidTask")
|
||||||
.detail("Task", printable(task->params[Task::reservedTaskParamKeyType]))
|
.detail("Task", task->params[Task::reservedTaskParamKeyType])
|
||||||
.detail("ReservedTaskParamValidKey", printable(task->params[Task::reservedTaskParamValidKey]))
|
.detail("ReservedTaskParamValidKey", task->params[Task::reservedTaskParamValidKey])
|
||||||
.detail("ReservedTaskParamValidValue", printable(task->params[Task::reservedTaskParamValidValue]))
|
.detail("ReservedTaskParamValidValue", task->params[Task::reservedTaskParamValidValue])
|
||||||
.detail("KeyValue", "missing");
|
.detail("KeyValue", "missing");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (keyValue.get().compare(StringRef(task->params[Task::reservedTaskParamValidValue]))) {
|
if (keyValue.get().compare(StringRef(task->params[Task::reservedTaskParamValidValue]))) {
|
||||||
TraceEvent("TB_TaskVerifyAbortedTask")
|
TraceEvent("TB_TaskVerifyAbortedTask")
|
||||||
.detail("Task", printable(task->params[Task::reservedTaskParamKeyType]))
|
.detail("Task", task->params[Task::reservedTaskParamKeyType])
|
||||||
.detail("ReservedTaskParamValidKey", printable(task->params[Task::reservedTaskParamValidKey]))
|
.detail("ReservedTaskParamValidKey", task->params[Task::reservedTaskParamValidKey])
|
||||||
.detail("ReservedTaskParamValidValue", printable(task->params[Task::reservedTaskParamValidValue]))
|
.detail("ReservedTaskParamValidValue", task->params[Task::reservedTaskParamValidValue])
|
||||||
.detail("KeyValue", printable(keyValue.get()));
|
.detail("KeyValue", keyValue.get());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -396,7 +396,7 @@ public:
|
||||||
} catch(Error &e) {
|
} catch(Error &e) {
|
||||||
TraceEvent(SevWarn, "TB_ExecuteFailure")
|
TraceEvent(SevWarn, "TB_ExecuteFailure")
|
||||||
.error(e)
|
.error(e)
|
||||||
.detail("TaskUID", task->key.printable())
|
.detail("TaskUID", task->key)
|
||||||
.detail("TaskType", task->params[Task::reservedTaskParamKeyType].printable())
|
.detail("TaskType", task->params[Task::reservedTaskParamKeyType].printable())
|
||||||
.detail("Priority", task->getPriority());
|
.detail("Priority", task->getPriority());
|
||||||
try {
|
try {
|
||||||
|
@ -709,14 +709,14 @@ public:
|
||||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||||
Standalone<RangeResultRef> values = wait(tr->getRange(subspace.range(), CLIENT_KNOBS->TOO_MANY));
|
Standalone<RangeResultRef> values = wait(tr->getRange(subspace.range(), CLIENT_KNOBS->TOO_MANY));
|
||||||
TraceEvent("TaskBucket").detail("DebugPrintRange", "Print DB Range").detail("Key", printable(subspace.key())).detail("Count", values.size()).detail("Msg", printable(msg));
|
TraceEvent("TaskBucket").detail("DebugPrintRange", "Print DB Range").detail("Key", subspace.key()).detail("Count", values.size()).detail("Msg", msg);
|
||||||
/*
|
|
||||||
printf("debugPrintRange key: (%d) %s\n", values.size(), printable(subspace.key()).c_str());
|
/*printf("debugPrintRange key: (%d) %s\n", values.size(), printable(subspace.key()).c_str());
|
||||||
for (auto & s : values) {
|
for (auto & s : values) {
|
||||||
printf(" key: %-40s value: %s\n", printable(s.key).c_str(), printable(s.value).c_str());
|
printf(" key: %-40s value: %s\n", printable(s.key).c_str(), s.value.c_str());
|
||||||
TraceEvent("TaskBucket").detail("DebugPrintRange", printable(msg))
|
TraceEvent("TaskBucket").detail("DebugPrintRange", msg)
|
||||||
.detail("Key", printable(s.key))
|
.detail("Key", s.key)
|
||||||
.detail("Value", printable(s.value));
|
.detail("Value", s.value);
|
||||||
}*/
|
}*/
|
||||||
|
|
||||||
return Void();
|
return Void();
|
||||||
|
@ -846,8 +846,8 @@ ACTOR static Future<Key> actorAddTask(TaskBucket* tb, Reference<ReadYourWritesTr
|
||||||
|
|
||||||
if (!validationValue.present()) {
|
if (!validationValue.present()) {
|
||||||
TraceEvent(SevError, "TB_AddTaskInvalidKey")
|
TraceEvent(SevError, "TB_AddTaskInvalidKey")
|
||||||
.detail("Task", printable(task->params[Task::reservedTaskParamKeyType]))
|
.detail("Task", task->params[Task::reservedTaskParamKeyType])
|
||||||
.detail("ValidationKey", printable(validationKey));
|
.detail("ValidationKey", validationKey);
|
||||||
throw invalid_option_value();
|
throw invalid_option_value();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1114,8 +1114,8 @@ public:
|
||||||
|
|
||||||
if (!validationValue.present()) {
|
if (!validationValue.present()) {
|
||||||
TraceEvent(SevError, "TB_OnSetAddTaskInvalidKey")
|
TraceEvent(SevError, "TB_OnSetAddTaskInvalidKey")
|
||||||
.detail("Task", printable(task->params[Task::reservedTaskParamKeyType]))
|
.detail("Task", task->params[Task::reservedTaskParamKeyType])
|
||||||
.detail("ValidationKey", printable(validationKey));
|
.detail("ValidationKey", validationKey);
|
||||||
throw invalid_option_value();
|
throw invalid_option_value();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -543,8 +543,8 @@ private:
|
||||||
iterator it( this );
|
iterator it( this );
|
||||||
it.skip(allKeys.begin);
|
it.skip(allKeys.begin);
|
||||||
while( it.beginKey() < allKeys.end ) {
|
while( it.beginKey() < allKeys.end ) {
|
||||||
TraceEvent("WriteMapDump").detail("Begin", printable(it.beginKey().toStandaloneStringRef()))
|
TraceEvent("WriteMapDump").detail("Begin", it.beginKey().toStandaloneStringRef())
|
||||||
.detail("End", printable(it.endKey().toStandaloneStringRef()))
|
.detail("End", it.endKey())
|
||||||
.detail("Cleared", it.is_cleared_range())
|
.detail("Cleared", it.is_cleared_range())
|
||||||
.detail("Conflicted", it.is_conflict_range())
|
.detail("Conflicted", it.is_conflict_range())
|
||||||
.detail("Operation", it.is_operation())
|
.detail("Operation", it.is_operation())
|
||||||
|
|
|
@ -255,7 +255,7 @@ void serializeReplicationPolicy(Ar& ar, Reference<IReplicationPolicy>& policy) {
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
TraceEvent(SevError, "SerializingInvalidPolicyType")
|
TraceEvent(SevError, "SerializingInvalidPolicyType")
|
||||||
.detailext("PolicyName", name);
|
.detail("PolicyName", name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
|
|
|
@ -986,9 +986,9 @@ public:
|
||||||
if( machine.processes[i]->locality.machineId() != locality.machineId() ) { // SOMEDAY: compute ip from locality to avoid this check
|
if( machine.processes[i]->locality.machineId() != locality.machineId() ) { // SOMEDAY: compute ip from locality to avoid this check
|
||||||
TraceEvent("Sim2Mismatch")
|
TraceEvent("Sim2Mismatch")
|
||||||
.detail("IP", format("%s", ip.toString().c_str()))
|
.detail("IP", format("%s", ip.toString().c_str()))
|
||||||
.detailext("MachineId", locality.machineId())
|
.detail("MachineId", locality.machineId())
|
||||||
.detail("NewName", name)
|
.detail("NewName", name)
|
||||||
.detailext("ExistingMachineId", machine.processes[i]->locality.machineId())
|
.detail("ExistingMachineId", machine.processes[i]->locality.machineId())
|
||||||
.detail("ExistingName", machine.processes[i]->name);
|
.detail("ExistingName", machine.processes[i]->name);
|
||||||
ASSERT( false );
|
ASSERT( false );
|
||||||
}
|
}
|
||||||
|
@ -1025,7 +1025,7 @@ public:
|
||||||
m->setGlobal(enNetworkConnections, (flowGlobalType) m->network);
|
m->setGlobal(enNetworkConnections, (flowGlobalType) m->network);
|
||||||
m->setGlobal(enASIOTimedOut, (flowGlobalType) false);
|
m->setGlobal(enASIOTimedOut, (flowGlobalType) false);
|
||||||
|
|
||||||
TraceEvent("NewMachine").detail("Name", name).detail("Address", m->address).detailext("MachineId", m->locality.machineId()).detail("Excluded", m->excluded).detail("Cleared", m->cleared);
|
TraceEvent("NewMachine").detail("Name", name).detail("Address", m->address).detail("MachineId", m->locality.machineId()).detail("Excluded", m->excluded).detail("Cleared", m->cleared);
|
||||||
|
|
||||||
// FIXME: Sometimes, connections to/from this process will explicitly close
|
// FIXME: Sometimes, connections to/from this process will explicitly close
|
||||||
|
|
||||||
|
@ -1210,7 +1210,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void destroyProcess( ISimulator::ProcessInfo *p ) {
|
virtual void destroyProcess( ISimulator::ProcessInfo *p ) {
|
||||||
TraceEvent("ProcessDestroyed").detail("Name", p->name).detail("Address", p->address).detailext("MachineId", p->locality.machineId());
|
TraceEvent("ProcessDestroyed").detail("Name", p->name).detail("Address", p->address).detail("MachineId", p->locality.machineId());
|
||||||
currentlyRebootingProcesses.insert(std::pair<NetworkAddress, ProcessInfo*>(p->address, p));
|
currentlyRebootingProcesses.insert(std::pair<NetworkAddress, ProcessInfo*>(p->address, p));
|
||||||
std::vector<ProcessInfo*>& processes = machines[ p->locality.machineId().get() ].processes;
|
std::vector<ProcessInfo*>& processes = machines[ p->locality.machineId().get() ].processes;
|
||||||
if( p != processes.back() ) {
|
if( p != processes.back() ) {
|
||||||
|
@ -1226,12 +1226,12 @@ public:
|
||||||
TEST( kt == InjectFaults ); // Simulated machine was killed with faults
|
TEST( kt == InjectFaults ); // Simulated machine was killed with faults
|
||||||
|
|
||||||
if (kt == KillInstantly) {
|
if (kt == KillInstantly) {
|
||||||
TraceEvent(SevWarn, "FailMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
TraceEvent(SevWarn, "FailMachine").detail("Name", machine->name).detail("Address", machine->address).detail("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
||||||
// This will remove all the "tracked" messages that came from the machine being killed
|
// This will remove all the "tracked" messages that came from the machine being killed
|
||||||
latestEventCache.clear();
|
latestEventCache.clear();
|
||||||
machine->failed = true;
|
machine->failed = true;
|
||||||
} else if (kt == InjectFaults) {
|
} else if (kt == InjectFaults) {
|
||||||
TraceEvent(SevWarn, "FaultMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
TraceEvent(SevWarn, "FaultMachine").detail("Name", machine->name).detail("Address", machine->address).detail("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
||||||
should_inject_fault = simulator_should_inject_fault;
|
should_inject_fault = simulator_should_inject_fault;
|
||||||
machine->fault_injection_r = g_random->randomUniqueID().first();
|
machine->fault_injection_r = g_random->randomUniqueID().first();
|
||||||
machine->fault_injection_p1 = 0.1;
|
machine->fault_injection_p1 = 0.1;
|
||||||
|
@ -1302,7 +1302,7 @@ public:
|
||||||
TEST(kt == InjectFaults); // Trying to kill by injecting faults
|
TEST(kt == InjectFaults); // Trying to kill by injecting faults
|
||||||
|
|
||||||
if(speedUpSimulation && !forceKill) {
|
if(speedUpSimulation && !forceKill) {
|
||||||
TraceEvent(SevWarn, "AbortedKill").detailext("MachineId", machineId).detail("Reason", "Unforced kill within speedy simulation.").backtrace();
|
TraceEvent(SevWarn, "AbortedKill").detail("MachineId", machineId).detail("Reason", "Unforced kill within speedy simulation.").backtrace();
|
||||||
if (ktFinal) *ktFinal = None;
|
if (ktFinal) *ktFinal = None;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1320,7 +1320,7 @@ public:
|
||||||
|
|
||||||
// Do nothing, if no processes to kill
|
// Do nothing, if no processes to kill
|
||||||
if (processesOnMachine == 0) {
|
if (processesOnMachine == 0) {
|
||||||
TraceEvent(SevWarn, "AbortedKill").detailext("MachineId", machineId).detail("Reason", "The target had no processes running.").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
TraceEvent(SevWarn, "AbortedKill").detail("MachineId", machineId).detail("Reason", "The target had no processes running.").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
||||||
if (ktFinal) *ktFinal = None;
|
if (ktFinal) *ktFinal = None;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1357,24 +1357,24 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!canKillProcesses(processesLeft, processesDead, kt, &kt)) {
|
if (!canKillProcesses(processesLeft, processesDead, kt, &kt)) {
|
||||||
TraceEvent("ChangedKillMachine").detailext("MachineId", machineId).detail("KillType", kt).detail("OrigKillType", ktOrig).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("Protected", protectedWorker).detail("Unavailable", unavailable).detail("Excluded", excluded).detail("Cleared", cleared).detail("ProtectedTotal", protectedAddresses.size()).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
TraceEvent("ChangedKillMachine").detail("MachineId", machineId).detail("KillType", kt).detail("OrigKillType", ktOrig).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("Protected", protectedWorker).detail("Unavailable", unavailable).detail("Excluded", excluded).detail("Cleared", cleared).detail("ProtectedTotal", protectedAddresses.size()).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
||||||
}
|
}
|
||||||
else if ((kt == KillInstantly) || (kt == InjectFaults)) {
|
else if ((kt == KillInstantly) || (kt == InjectFaults)) {
|
||||||
TraceEvent("DeadMachine").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
TraceEvent("DeadMachine").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
||||||
for (auto process : processesLeft) {
|
for (auto process : processesLeft) {
|
||||||
TraceEvent("DeadMachineSurvivors").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString());
|
TraceEvent("DeadMachineSurvivors").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString());
|
||||||
}
|
}
|
||||||
for (auto process : processesDead) {
|
for (auto process : processesDead) {
|
||||||
TraceEvent("DeadMachineVictims").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString());
|
TraceEvent("DeadMachineVictims").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
TraceEvent("ClearMachine").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
TraceEvent("ClearMachine").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
||||||
for (auto process : processesLeft) {
|
for (auto process : processesLeft) {
|
||||||
TraceEvent("ClearMachineSurvivors").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString());
|
TraceEvent("ClearMachineSurvivors").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString());
|
||||||
}
|
}
|
||||||
for (auto process : processesDead) {
|
for (auto process : processesDead) {
|
||||||
TraceEvent("ClearMachineVictims").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString());
|
TraceEvent("ClearMachineVictims").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1384,7 +1384,7 @@ public:
|
||||||
// Check if any processes on machine are rebooting
|
// Check if any processes on machine are rebooting
|
||||||
if( processesOnMachine != processesPerMachine && kt >= RebootAndDelete ) {
|
if( processesOnMachine != processesPerMachine && kt >= RebootAndDelete ) {
|
||||||
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
||||||
TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detailext("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detail("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
||||||
if (ktFinal) *ktFinal = None;
|
if (ktFinal) *ktFinal = None;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1392,12 +1392,12 @@ public:
|
||||||
// Check if any processes on machine are rebooting
|
// Check if any processes on machine are rebooting
|
||||||
if ( processesOnMachine != processesPerMachine ) {
|
if ( processesOnMachine != processesPerMachine ) {
|
||||||
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
||||||
TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detailext("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detail("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
||||||
if (ktFinal) *ktFinal = None;
|
if (ktFinal) *ktFinal = None;
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent("KillMachine").detailext("MachineId", machineId).detail("Kt", kt).detail("KtOrig", ktOrig).detail("KillableMachines", processesOnMachine).detail("ProcessPerMachine", processesPerMachine).detail("KillChanged", kt!=ktOrig);
|
TraceEvent("KillMachine").detail("MachineId", machineId).detail("Kt", kt).detail("KtOrig", ktOrig).detail("KillableMachines", processesOnMachine).detail("ProcessPerMachine", processesPerMachine).detail("KillChanged", kt!=ktOrig);
|
||||||
if ( kt < RebootAndDelete ) {
|
if ( kt < RebootAndDelete ) {
|
||||||
if(kt == InjectFaults && machines[machineId].machineProcess != nullptr)
|
if(kt == InjectFaults && machines[machineId].machineProcess != nullptr)
|
||||||
killProcess_internal( machines[machineId].machineProcess, kt );
|
killProcess_internal( machines[machineId].machineProcess, kt );
|
||||||
|
@ -1438,7 +1438,7 @@ public:
|
||||||
if (processDcId.present() && (processDcId == dcId)) {
|
if (processDcId.present() && (processDcId == dcId)) {
|
||||||
if ((kt != Reboot) && (protectedAddresses.count(procRecord->address))) {
|
if ((kt != Reboot) && (protectedAddresses.count(procRecord->address))) {
|
||||||
kt = Reboot;
|
kt = Reboot;
|
||||||
TraceEvent(SevWarn, "DcKillChanged").detailext("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig)
|
TraceEvent(SevWarn, "DcKillChanged").detail("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig)
|
||||||
.detail("Reason", "Datacenter has protected process").detail("ProcessAddress", procRecord->address).detail("Failed", procRecord->failed).detail("Rebooting", procRecord->rebooting).detail("Excluded", procRecord->excluded).detail("Cleared", procRecord->cleared).detail("Process", procRecord->toString());
|
.detail("Reason", "Datacenter has protected process").detail("ProcessAddress", procRecord->address).detail("Failed", procRecord->failed).detail("Rebooting", procRecord->rebooting).detail("Excluded", procRecord->excluded).detail("Cleared", procRecord->cleared).detail("Process", procRecord->toString());
|
||||||
}
|
}
|
||||||
datacenterMachines[processMachineId.get()] ++;
|
datacenterMachines[processMachineId.get()] ++;
|
||||||
|
@ -1463,15 +1463,15 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!canKillProcesses(processesLeft, processesDead, kt, &kt)) {
|
if (!canKillProcesses(processesLeft, processesDead, kt, &kt)) {
|
||||||
TraceEvent(SevWarn, "DcKillChanged").detailext("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig);
|
TraceEvent(SevWarn, "DcKillChanged").detail("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
TraceEvent("DeadDataCenter").detailext("DataCenter", dcId).detail("KillType", kt).detail("DcZones", datacenterMachines.size()).detail("DcProcesses", dcProcesses).detail("ProcessesDead", processesDead.size()).detail("ProcessesLeft", processesLeft.size()).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
TraceEvent("DeadDataCenter").detail("DataCenter", dcId).detail("KillType", kt).detail("DcZones", datacenterMachines.size()).detail("DcProcesses", dcProcesses).detail("ProcessesDead", processesDead.size()).detail("ProcessesLeft", processesLeft.size()).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
||||||
for (auto process : processesLeft) {
|
for (auto process : processesLeft) {
|
||||||
TraceEvent("DeadDcSurvivors").detailext("MachineId", process->locality.machineId()).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString());
|
TraceEvent("DeadDcSurvivors").detail("MachineId", process->locality.machineId()).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString());
|
||||||
}
|
}
|
||||||
for (auto process : processesDead) {
|
for (auto process : processesDead) {
|
||||||
TraceEvent("DeadDcVictims").detailext("MachineId", process->locality.machineId()).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString());
|
TraceEvent("DeadDcVictims").detail("MachineId", process->locality.machineId()).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1482,7 +1482,7 @@ public:
|
||||||
killMachine(datacenterMachine.first, kt, true, &ktResult);
|
killMachine(datacenterMachine.first, kt, true, &ktResult);
|
||||||
if (ktResult != kt) {
|
if (ktResult != kt) {
|
||||||
TraceEvent(SevWarn, "KillDCFail")
|
TraceEvent(SevWarn, "KillDCFail")
|
||||||
.detailext("Zone", datacenterMachine.first)
|
.detail("Zone", datacenterMachine.first)
|
||||||
.detail("KillType", kt)
|
.detail("KillType", kt)
|
||||||
.detail("KillTypeResult", ktResult)
|
.detail("KillTypeResult", ktResult)
|
||||||
.detail("KillTypeOrig", ktOrig);
|
.detail("KillTypeOrig", ktOrig);
|
||||||
|
@ -1495,7 +1495,7 @@ public:
|
||||||
TraceEvent("KillDataCenter")
|
TraceEvent("KillDataCenter")
|
||||||
.detail("DcZones", datacenterMachines.size())
|
.detail("DcZones", datacenterMachines.size())
|
||||||
.detail("DcProcesses", dcProcesses)
|
.detail("DcProcesses", dcProcesses)
|
||||||
.detailext("DCID", dcId)
|
.detail("DCID", dcId)
|
||||||
.detail("KillType", kt)
|
.detail("KillType", kt)
|
||||||
.detail("KillTypeOrig", ktOrig)
|
.detail("KillTypeOrig", ktOrig)
|
||||||
.detail("KillTypeMin", ktMin)
|
.detail("KillTypeMin", ktMin)
|
||||||
|
@ -1694,7 +1694,7 @@ static double networkLatency() {
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR void doReboot( ISimulator::ProcessInfo *p, ISimulator::KillType kt ) {
|
ACTOR void doReboot( ISimulator::ProcessInfo *p, ISimulator::KillType kt ) {
|
||||||
TraceEvent("RebootingProcessAttempt").detailext("ZoneId", p->locality.zoneId()).detail("KillType", kt).detail("Process", p->toString()).detail("StartingClass", p->startingClass.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).detail("Rebooting", p->rebooting).detail("TaskDefaultDelay", TaskDefaultDelay);
|
TraceEvent("RebootingProcessAttempt").detail("ZoneId", p->locality.zoneId()).detail("KillType", kt).detail("Process", p->toString()).detail("StartingClass", p->startingClass.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).detail("Rebooting", p->rebooting).detail("TaskDefaultDelay", TaskDefaultDelay);
|
||||||
|
|
||||||
wait( g_sim2.delay( 0, TaskDefaultDelay, p ) ); // Switch to the machine in question
|
wait( g_sim2.delay( 0, TaskDefaultDelay, p ) ); // Switch to the machine in question
|
||||||
|
|
||||||
|
@ -1708,7 +1708,7 @@ ACTOR void doReboot( ISimulator::ProcessInfo *p, ISimulator::KillType kt ) {
|
||||||
|
|
||||||
if( p->rebooting )
|
if( p->rebooting )
|
||||||
return;
|
return;
|
||||||
TraceEvent("RebootingProcess").detail("KillType", kt).detail("Address", p->address).detailext("ZoneId", p->locality.zoneId()).detailext("DataHall", p->locality.dataHallId()).detail("Locality", p->locality.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).backtrace();
|
TraceEvent("RebootingProcess").detail("KillType", kt).detail("Address", p->address).detail("ZoneId", p->locality.zoneId()).detail("DataHall", p->locality.dataHallId()).detail("Locality", p->locality.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).backtrace();
|
||||||
p->rebooting = true;
|
p->rebooting = true;
|
||||||
if ((kt == ISimulator::RebootAndDelete) || (kt == ISimulator::RebootProcessAndDelete)) {
|
if ((kt == ISimulator::RebootAndDelete) || (kt == ISimulator::RebootProcessAndDelete)) {
|
||||||
p->cleared = true;
|
p->cleared = true;
|
||||||
|
|
|
@ -106,7 +106,7 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
||||||
MutationRef privatized = m;
|
MutationRef privatized = m;
|
||||||
privatized.param1 = m.param1.withPrefix(systemKeys.begin, arena);
|
privatized.param1 = m.param1.withPrefix(systemKeys.begin, arena);
|
||||||
TraceEvent(SevDebug, "SendingPrivateMutation", dbgid).detail("Original", m.toString()).detail("Privatized", privatized.toString()).detail("Server", serverKeysDecodeServer(m.param1))
|
TraceEvent(SevDebug, "SendingPrivateMutation", dbgid).detail("Original", m.toString()).detail("Privatized", privatized.toString()).detail("Server", serverKeysDecodeServer(m.param1))
|
||||||
.detail("TagKey", printable(serverTagKeyFor( serverKeysDecodeServer(m.param1) ))).detail("Tag", decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor( serverKeysDecodeServer(m.param1) ) ).get().get() ).toString());
|
.detail("TagKey", serverTagKeyFor( serverKeysDecodeServer(m.param1) )).detail("Tag", decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor( serverKeysDecodeServer(m.param1) ) ).get().get() ).toString());
|
||||||
|
|
||||||
toCommit->addTag( decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor( serverKeysDecodeServer(m.param1) ) ).get().get() ) );
|
toCommit->addTag( decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor( serverKeysDecodeServer(m.param1) ) ).get().get() ) );
|
||||||
toCommit->addTypedMessage(privatized);
|
toCommit->addTypedMessage(privatized);
|
||||||
|
@ -151,7 +151,10 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
||||||
if(Optional<StringRef>(m.param2) != txnStateStore->readValue(m.param1).get().castTo<StringRef>()) { // FIXME: Make this check more specific, here or by reading configuration whenever there is a change
|
if(Optional<StringRef>(m.param2) != txnStateStore->readValue(m.param1).get().castTo<StringRef>()) { // FIXME: Make this check more specific, here or by reading configuration whenever there is a change
|
||||||
if(!m.param1.startsWith( excludedServersPrefix ) && m.param1 != excludedServersVersionKey) {
|
if(!m.param1.startsWith( excludedServersPrefix ) && m.param1 != excludedServersVersionKey) {
|
||||||
auto t = txnStateStore->readValue(m.param1).get();
|
auto t = txnStateStore->readValue(m.param1).get();
|
||||||
TraceEvent("MutationRequiresRestart", dbgid).detail("M", m.toString()).detail("PrevValue", t.present() ? printable(t.get()) : "(none)").detail("ToCommit", toCommit!=NULL);
|
TraceEvent("MutationRequiresRestart", dbgid)
|
||||||
|
.detail("M", m.toString())
|
||||||
|
.detail("PrevValue", t.present() ? t.get() : LiteralStringRef("(none)"))
|
||||||
|
.detail("ToCommit", toCommit!=NULL);
|
||||||
if(confChange) *confChange = true;
|
if(confChange) *confChange = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -230,8 +233,8 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log the modification
|
// Log the modification
|
||||||
TraceEvent("LogRangeAdd").detail("LogRanges", vecBackupKeys->size()).detail("MutationKey", printable(m.param1))
|
TraceEvent("LogRangeAdd").detail("LogRanges", vecBackupKeys->size()).detail("MutationKey", m.param1)
|
||||||
.detail("LogRangeBegin", printable(logRangeBegin)).detail("LogRangeEnd", printable(logRangeEnd));
|
.detail("LogRangeBegin", logRangeBegin).detail("LogRangeEnd", logRangeEnd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (m.param1.startsWith(globalKeysPrefix)) {
|
else if (m.param1.startsWith(globalKeysPrefix)) {
|
||||||
|
@ -383,8 +386,8 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
||||||
KeyRangeRef commonLogRange(range & logRangesRange);
|
KeyRangeRef commonLogRange(range & logRangesRange);
|
||||||
|
|
||||||
TraceEvent("LogRangeClear")
|
TraceEvent("LogRangeClear")
|
||||||
.detail("RangeBegin", printable(range.begin)).detail("RangeEnd", printable(range.end))
|
.detail("RangeBegin", range.begin).detail("RangeEnd", range.end)
|
||||||
.detail("IntersectBegin", printable(commonLogRange.begin)).detail("IntersectEnd", printable(commonLogRange.end));
|
.detail("IntersectBegin", commonLogRange.begin).detail("IntersectEnd", commonLogRange.end);
|
||||||
|
|
||||||
// Remove the key range from the vector, if defined
|
// Remove the key range from the vector, if defined
|
||||||
if (vecBackupKeys) {
|
if (vecBackupKeys) {
|
||||||
|
@ -406,9 +409,9 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
||||||
// Decode the log destination and key value
|
// Decode the log destination and key value
|
||||||
logKeyEnd = logRangesDecodeValue(logRangeAffected.value, &logDestination);
|
logKeyEnd = logRangesDecodeValue(logRangeAffected.value, &logDestination);
|
||||||
|
|
||||||
TraceEvent("LogRangeErase").detail("AffectedKey", printable(logRangeAffected.key)).detail("AffectedValue", printable(logRangeAffected.value))
|
TraceEvent("LogRangeErase").detail("AffectedKey", logRangeAffected.key).detail("AffectedValue", logRangeAffected.value)
|
||||||
.detail("LogKeyBegin", printable(logKeyBegin)).detail("LogKeyEnd", printable(logKeyEnd))
|
.detail("LogKeyBegin", logKeyBegin).detail("LogKeyEnd", logKeyEnd)
|
||||||
.detail("LogDestination", printable(logDestination));
|
.detail("LogDestination", logDestination);
|
||||||
|
|
||||||
// Identify the locations to place the backup key
|
// Identify the locations to place the backup key
|
||||||
auto logRanges = vecBackupKeys->modify(KeyRangeRef(logKeyBegin, logKeyEnd));
|
auto logRanges = vecBackupKeys->modify(KeyRangeRef(logKeyBegin, logKeyEnd));
|
||||||
|
|
|
@ -66,13 +66,6 @@ set(FDBSERVER_SRCS
|
||||||
SimulatedCluster.actor.cpp
|
SimulatedCluster.actor.cpp
|
||||||
SimulatedCluster.h
|
SimulatedCluster.h
|
||||||
SkipList.cpp
|
SkipList.cpp
|
||||||
sqlite/btree.h
|
|
||||||
sqlite/hash.h
|
|
||||||
sqlite/sqlite3.h
|
|
||||||
sqlite/sqlite3ext.h
|
|
||||||
sqlite/sqliteInt.h
|
|
||||||
sqlite/sqliteLimit.h
|
|
||||||
sqlite/sqlite3.amalgamation.c
|
|
||||||
Status.actor.cpp
|
Status.actor.cpp
|
||||||
Status.h
|
Status.h
|
||||||
StorageMetrics.actor.h
|
StorageMetrics.actor.h
|
||||||
|
@ -177,6 +170,18 @@ set(FDBSERVER_SRCS
|
||||||
workloads/WriteBandwidth.actor.cpp
|
workloads/WriteBandwidth.actor.cpp
|
||||||
workloads/WriteDuringRead.actor.cpp)
|
workloads/WriteDuringRead.actor.cpp)
|
||||||
|
|
||||||
|
set(SQLITE_SRCS
|
||||||
|
sqlite/btree.h
|
||||||
|
sqlite/hash.h
|
||||||
|
sqlite/sqlite3.h
|
||||||
|
sqlite/sqlite3ext.h
|
||||||
|
sqlite/sqliteInt.h
|
||||||
|
sqlite/sqliteLimit.h
|
||||||
|
sqlite/sqlite3.amalgamation.c)
|
||||||
|
|
||||||
|
add_library(sqlite ${SQLITE_SRCS})
|
||||||
|
target_compile_definitions(sqlite PRIVATE $<$<CONFIG:Debug>:NDEBUG>)
|
||||||
|
|
||||||
set(java_workload_docstring "Build the Java workloads (makes fdbserver link against JNI)")
|
set(java_workload_docstring "Build the Java workloads (makes fdbserver link against JNI)")
|
||||||
set(WITH_JAVA_WORKLOAD OFF CACHE BOOL "${java_workload_docstring}")
|
set(WITH_JAVA_WORKLOAD OFF CACHE BOOL "${java_workload_docstring}")
|
||||||
if(WITH_JAVA_WORKLOAD)
|
if(WITH_JAVA_WORKLOAD)
|
||||||
|
@ -189,7 +194,7 @@ add_flow_target(EXECUTABLE NAME fdbserver SRCS ${FDBSERVER_SRCS})
|
||||||
target_include_directories(fdbserver PRIVATE
|
target_include_directories(fdbserver PRIVATE
|
||||||
${CMAKE_CURRENT_BINARY_DIR}/workloads
|
${CMAKE_CURRENT_BINARY_DIR}/workloads
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
|
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
|
||||||
target_link_libraries(fdbserver PRIVATE fdbclient)
|
target_link_libraries(fdbserver PRIVATE fdbclient sqlite)
|
||||||
if(WITH_JAVA_WORKLOAD)
|
if(WITH_JAVA_WORKLOAD)
|
||||||
if(NOT JNI_FOUND)
|
if(NOT JNI_FOUND)
|
||||||
message(SEND_ERROR "Trying to build Java workload but couldn't find JNI")
|
message(SEND_ERROR "Trying to build Java workload but couldn't find JNI")
|
||||||
|
|
|
@ -1843,9 +1843,9 @@ void registerWorker( RegisterWorkerRequest req, ClusterControllerData *self ) {
|
||||||
newPriorityInfo.processClassFitness = newProcessClass.machineClassFitness(ProcessClass::ClusterController);
|
newPriorityInfo.processClassFitness = newProcessClass.machineClassFitness(ProcessClass::ClusterController);
|
||||||
|
|
||||||
if(info == self->id_worker.end()) {
|
if(info == self->id_worker.end()) {
|
||||||
TraceEvent("ClusterControllerActualWorkers", self->id).detail("WorkerId",w.id()).detailext("ProcessId", w.locality.processId()).detailext("ZoneId", w.locality.zoneId()).detailext("DataHall", w.locality.dataHallId()).detail("PClass", req.processClass.toString()).detail("Workers", self->id_worker.size());
|
TraceEvent("ClusterControllerActualWorkers", self->id).detail("WorkerId",w.id()).detail("ProcessId", w.locality.processId()).detail("ZoneId", w.locality.zoneId()).detail("DataHall", w.locality.dataHallId()).detail("PClass", req.processClass.toString()).detail("Workers", self->id_worker.size());
|
||||||
} else {
|
} else {
|
||||||
TraceEvent("ClusterControllerWorkerAlreadyRegistered", self->id).suppressFor(1.0).detail("WorkerId",w.id()).detailext("ProcessId", w.locality.processId()).detailext("ZoneId", w.locality.zoneId()).detailext("DataHall", w.locality.dataHallId()).detail("PClass", req.processClass.toString()).detail("Workers", self->id_worker.size());
|
TraceEvent("ClusterControllerWorkerAlreadyRegistered", self->id).suppressFor(1.0).detail("WorkerId",w.id()).detail("ProcessId", w.locality.processId()).detail("ZoneId", w.locality.zoneId()).detail("DataHall", w.locality.dataHallId()).detail("PClass", req.processClass.toString()).detail("Workers", self->id_worker.size());
|
||||||
}
|
}
|
||||||
if ( w.address() == g_network->getLocalAddress() ) {
|
if ( w.address() == g_network->getLocalAddress() ) {
|
||||||
if(self->changingDcIds.get().first) {
|
if(self->changingDcIds.get().first) {
|
||||||
|
@ -2449,7 +2449,7 @@ ACTOR Future<Void> doEmptyCommit(Database cx) {
|
||||||
ACTOR Future<Void> handleForcedRecoveries( ClusterControllerData *self, ClusterControllerFullInterface interf ) {
|
ACTOR Future<Void> handleForcedRecoveries( ClusterControllerData *self, ClusterControllerFullInterface interf ) {
|
||||||
loop {
|
loop {
|
||||||
state ForceRecoveryRequest req = waitNext( interf.clientInterface.forceRecovery.getFuture() );
|
state ForceRecoveryRequest req = waitNext( interf.clientInterface.forceRecovery.getFuture() );
|
||||||
TraceEvent("ForcedRecoveryStart", self->id).detail("ClusterControllerDcId", printable(self->clusterControllerDcId)).detail("DcId", req.dcId.printable());
|
TraceEvent("ForcedRecoveryStart", self->id).detail("ClusterControllerDcId", self->clusterControllerDcId).detail("DcId", req.dcId.printable());
|
||||||
state Future<Void> fCommit = doEmptyCommit(self->cx);
|
state Future<Void> fCommit = doEmptyCommit(self->cx);
|
||||||
wait(fCommit || delay(SERVER_KNOBS->FORCE_RECOVERY_CHECK_DELAY));
|
wait(fCommit || delay(SERVER_KNOBS->FORCE_RECOVERY_CHECK_DELAY));
|
||||||
if(!fCommit.isReady() || fCommit.isError()) {
|
if(!fCommit.isReady() || fCommit.isError()) {
|
||||||
|
|
|
@ -128,7 +128,7 @@ ACTOR Future<Void> localGenerationReg( GenerationRegInterface interf, OnDemandSt
|
||||||
// SOMEDAY: concurrent access to different keys?
|
// SOMEDAY: concurrent access to different keys?
|
||||||
loop choose {
|
loop choose {
|
||||||
when ( GenerationRegReadRequest _req = waitNext( interf.read.getFuture() ) ) {
|
when ( GenerationRegReadRequest _req = waitNext( interf.read.getFuture() ) ) {
|
||||||
TraceEvent("GenerationRegReadRequest").detail("From", _req.reply.getEndpoint().getPrimaryAddress()).detail("K", printable(_req.key));
|
TraceEvent("GenerationRegReadRequest").detail("From", _req.reply.getEndpoint().getPrimaryAddress()).detail("K", _req.key);
|
||||||
state GenerationRegReadRequest req = _req;
|
state GenerationRegReadRequest req = _req;
|
||||||
Optional<Value> rawV = wait( store->readValue( req.key ) );
|
Optional<Value> rawV = wait( store->readValue( req.key ) );
|
||||||
v = rawV.present() ? BinaryReader::fromStringRef<GenerationRegVal>( rawV.get(), IncludeVersion() ) : GenerationRegVal();
|
v = rawV.present() ? BinaryReader::fromStringRef<GenerationRegVal>( rawV.get(), IncludeVersion() ) : GenerationRegVal();
|
||||||
|
@ -149,11 +149,11 @@ ACTOR Future<Void> localGenerationReg( GenerationRegInterface interf, OnDemandSt
|
||||||
v.val = wrq.kv.value;
|
v.val = wrq.kv.value;
|
||||||
store->set( KeyValueRef( wrq.kv.key, BinaryWriter::toValue(v, IncludeVersion()) ) );
|
store->set( KeyValueRef( wrq.kv.key, BinaryWriter::toValue(v, IncludeVersion()) ) );
|
||||||
wait(store->commit());
|
wait(store->commit());
|
||||||
TraceEvent("GenerationRegWrote").detail("From", wrq.reply.getEndpoint().getPrimaryAddress()).detail("Key", printable(wrq.kv.key))
|
TraceEvent("GenerationRegWrote").detail("From", wrq.reply.getEndpoint().getPrimaryAddress()).detail("Key", wrq.kv.key)
|
||||||
.detail("ReqGen", wrq.gen.generation).detail("Returning", v.writeGen.generation);
|
.detail("ReqGen", wrq.gen.generation).detail("Returning", v.writeGen.generation);
|
||||||
wrq.reply.send( v.writeGen );
|
wrq.reply.send( v.writeGen );
|
||||||
} else {
|
} else {
|
||||||
TraceEvent("GenerationRegWriteFail").detail("From", wrq.reply.getEndpoint().getPrimaryAddress()).detail("Key", printable(wrq.kv.key))
|
TraceEvent("GenerationRegWriteFail").detail("From", wrq.reply.getEndpoint().getPrimaryAddress()).detail("Key", wrq.kv.key)
|
||||||
.detail("ReqGen", wrq.gen.generation).detail("ReadGen", v.readGen.generation).detail("WriteGen", v.writeGen.generation);
|
.detail("ReqGen", wrq.gen.generation).detail("ReadGen", v.readGen.generation).detail("WriteGen", v.writeGen.generation);
|
||||||
wrq.reply.send( std::max( v.readGen, v.writeGen ) );
|
wrq.reply.send( std::max( v.readGen, v.writeGen ) );
|
||||||
}
|
}
|
||||||
|
@ -267,7 +267,7 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
||||||
!currentNominee.present())
|
!currentNominee.present())
|
||||||
{
|
{
|
||||||
// Our state is back to the initial state, so we can safely stop this actor
|
// Our state is back to the initial state, so we can safely stop this actor
|
||||||
TraceEvent("EndingLeaderNomination").detail("Key", printable(key));
|
TraceEvent("EndingLeaderNomination").detail("Key", key);
|
||||||
return Void();
|
return Void();
|
||||||
} else {
|
} else {
|
||||||
Optional<LeaderInfo> nextNominee;
|
Optional<LeaderInfo> nextNominee;
|
||||||
|
@ -293,7 +293,7 @@ ACTOR Future<Void> leaderRegister(LeaderElectionRegInterface interf, Key key) {
|
||||||
|
|
||||||
if ( !nextNominee.present() || !foundCurrentNominee || currentNominee.get().leaderChangeRequired(nextNominee.get()) ) {
|
if ( !nextNominee.present() || !foundCurrentNominee || currentNominee.get().leaderChangeRequired(nextNominee.get()) ) {
|
||||||
TraceEvent("NominatingLeader").detail("Nominee", nextNominee.present() ? nextNominee.get().changeID : UID())
|
TraceEvent("NominatingLeader").detail("Nominee", nextNominee.present() ? nextNominee.get().changeID : UID())
|
||||||
.detail("Changed", nextNominee != currentNominee).detail("Key", printable(key));
|
.detail("Changed", nextNominee != currentNominee).detail("Key", key);
|
||||||
for(unsigned int i=0; i<notify.size(); i++)
|
for(unsigned int i=0; i<notify.size(); i++)
|
||||||
notify[i].send( nextNominee );
|
notify[i].send( nextNominee );
|
||||||
notify.clear();
|
notify.clear();
|
||||||
|
|
|
@ -2561,8 +2561,8 @@ ACTOR Future<Void> teamTracker(DDTeamCollection* self, Reference<TCTeamInfo> tea
|
||||||
if(g_random->random01() < 0.01) {
|
if(g_random->random01() < 0.01) {
|
||||||
TraceEvent("SendRelocateToDDQx100", self->distributorId)
|
TraceEvent("SendRelocateToDDQx100", self->distributorId)
|
||||||
.detail("Team", team->getDesc())
|
.detail("Team", team->getDesc())
|
||||||
.detail("KeyBegin", printable(rs.keys.begin))
|
.detail("KeyBegin", rs.keys.begin)
|
||||||
.detail("KeyEnd", printable(rs.keys.end))
|
.detail("KeyEnd", rs.keys.end)
|
||||||
.detail("Priority", rs.priority)
|
.detail("Priority", rs.priority)
|
||||||
.detail("TeamFailedMachines", team->size() - serversLeft)
|
.detail("TeamFailedMachines", team->size() - serversLeft)
|
||||||
.detail("TeamOKMachines", serversLeft);
|
.detail("TeamOKMachines", serversLeft);
|
||||||
|
@ -3259,14 +3259,14 @@ ACTOR Future<Void> updateReplicasKey(DDTeamCollection* self, Optional<Key> dcId)
|
||||||
|
|
||||||
wait(self->initialFailureReactionDelay && waitForAll(serverUpdates));
|
wait(self->initialFailureReactionDelay && waitForAll(serverUpdates));
|
||||||
wait(waitUntilHealthy(self));
|
wait(waitUntilHealthy(self));
|
||||||
TraceEvent("DDUpdatingReplicas", self->distributorId).detail("DcId", printable(dcId)).detail("Replicas", self->configuration.storageTeamSize);
|
TraceEvent("DDUpdatingReplicas", self->distributorId).detail("DcId", dcId).detail("Replicas", self->configuration.storageTeamSize);
|
||||||
state Transaction tr(self->cx);
|
state Transaction tr(self->cx);
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
Optional<Value> val = wait( tr.get(datacenterReplicasKeyFor(dcId)) );
|
Optional<Value> val = wait( tr.get(datacenterReplicasKeyFor(dcId)) );
|
||||||
state int oldReplicas = val.present() ? decodeDatacenterReplicasValue(val.get()) : 0;
|
state int oldReplicas = val.present() ? decodeDatacenterReplicasValue(val.get()) : 0;
|
||||||
if(oldReplicas == self->configuration.storageTeamSize) {
|
if(oldReplicas == self->configuration.storageTeamSize) {
|
||||||
TraceEvent("DDUpdatedAlready", self->distributorId).detail("DcId", printable(dcId)).detail("Replicas", self->configuration.storageTeamSize);
|
TraceEvent("DDUpdatedAlready", self->distributorId).detail("DcId", dcId).detail("Replicas", self->configuration.storageTeamSize);
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
if(oldReplicas < self->configuration.storageTeamSize) {
|
if(oldReplicas < self->configuration.storageTeamSize) {
|
||||||
|
@ -3274,7 +3274,7 @@ ACTOR Future<Void> updateReplicasKey(DDTeamCollection* self, Optional<Key> dcId)
|
||||||
}
|
}
|
||||||
tr.set(datacenterReplicasKeyFor(dcId), datacenterReplicasValue(self->configuration.storageTeamSize));
|
tr.set(datacenterReplicasKeyFor(dcId), datacenterReplicasValue(self->configuration.storageTeamSize));
|
||||||
wait( tr.commit() );
|
wait( tr.commit() );
|
||||||
TraceEvent("DDUpdatedReplicas", self->distributorId).detail("DcId", printable(dcId)).detail("Replicas", self->configuration.storageTeamSize).detail("OldReplicas", oldReplicas);
|
TraceEvent("DDUpdatedReplicas", self->distributorId).detail("DcId", dcId).detail("Replicas", self->configuration.storageTeamSize).detail("OldReplicas", oldReplicas);
|
||||||
return Void();
|
return Void();
|
||||||
} catch( Error &e ) {
|
} catch( Error &e ) {
|
||||||
wait( tr.onError(e) );
|
wait( tr.onError(e) );
|
||||||
|
@ -3459,7 +3459,7 @@ ACTOR Future<Void> debugCheckCoalescing(Database cx) {
|
||||||
|
|
||||||
for(int j = 0; j < ranges.size() - 2; j++)
|
for(int j = 0; j < ranges.size() - 2; j++)
|
||||||
if(ranges[j].value == ranges[j + 1].value)
|
if(ranges[j].value == ranges[j + 1].value)
|
||||||
TraceEvent(SevError, "UncoalescedValues", id).detail("Key1", printable(ranges[j].key)).detail("Key2", printable(ranges[j + 1].key)).detail("Value", printable(ranges[j].value));
|
TraceEvent(SevError, "UncoalescedValues", id).detail("Key1", ranges[j].key).detail("Key2", ranges[j + 1].key).detail("Value", ranges[j].value);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent("DoneCheckingCoalescing");
|
TraceEvent("DoneCheckingCoalescing");
|
||||||
|
@ -3594,8 +3594,8 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self)
|
||||||
initData = initData_;
|
initData = initData_;
|
||||||
if(initData->shards.size() > 1) {
|
if(initData->shards.size() > 1) {
|
||||||
TraceEvent("DDInitGotInitialDD", self->ddId)
|
TraceEvent("DDInitGotInitialDD", self->ddId)
|
||||||
.detail("B", printable(initData->shards.end()[-2].key))
|
.detail("B", initData->shards.end()[-2].key)
|
||||||
.detail("E", printable(initData->shards.end()[-1].key))
|
.detail("E", initData->shards.end()[-1].key)
|
||||||
.detail("Src", describe(initData->shards.end()[-2].primarySrc))
|
.detail("Src", describe(initData->shards.end()[-2].primarySrc))
|
||||||
.detail("Dest", describe(initData->shards.end()[-2].primaryDest))
|
.detail("Dest", describe(initData->shards.end()[-2].primaryDest))
|
||||||
.trackLatest("InitialDD");
|
.trackLatest("InitialDD");
|
||||||
|
@ -3643,7 +3643,7 @@ ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self)
|
||||||
teams.push_back(ShardsAffectedByTeamFailure::Team(initData->shards[shard].remoteSrc, false));
|
teams.push_back(ShardsAffectedByTeamFailure::Team(initData->shards[shard].remoteSrc, false));
|
||||||
}
|
}
|
||||||
if(g_network->isSimulated()) {
|
if(g_network->isSimulated()) {
|
||||||
TraceEvent("DDInitShard").detail("Keys", printable(keys)).detail("PrimarySrc", describe(initData->shards[shard].primarySrc)).detail("RemoteSrc", describe(initData->shards[shard].remoteSrc))
|
TraceEvent("DDInitShard").detail("Keys", keys).detail("PrimarySrc", describe(initData->shards[shard].primarySrc)).detail("RemoteSrc", describe(initData->shards[shard].remoteSrc))
|
||||||
.detail("PrimaryDest", describe(initData->shards[shard].primaryDest)).detail("RemoteDest", describe(initData->shards[shard].remoteDest));
|
.detail("PrimaryDest", describe(initData->shards[shard].primaryDest)).detail("RemoteDest", describe(initData->shards[shard].remoteDest));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -426,10 +426,10 @@ struct DDQueueData {
|
||||||
auto range = queueMap.rangeContaining( rdit->keys.begin );
|
auto range = queueMap.rangeContaining( rdit->keys.begin );
|
||||||
if( range.value() != *rdit || range.range() != rdit->keys )
|
if( range.value() != *rdit || range.range() != rdit->keys )
|
||||||
TraceEvent(SevError, "DDQueueValidateError4").detail("Problem", "relocates in the queue are in the queueMap exactly")
|
TraceEvent(SevError, "DDQueueValidateError4").detail("Problem", "relocates in the queue are in the queueMap exactly")
|
||||||
.detail("RangeBegin", printable(range.range().begin))
|
.detail("RangeBegin", range.range().begin)
|
||||||
.detail("RangeEnd", printable(range.range().end))
|
.detail("RangeEnd", range.range().end)
|
||||||
.detail("RelocateBegin2", printable(range.value().keys.begin))
|
.detail("RelocateBegin2", range.value().keys.begin)
|
||||||
.detail("RelocateEnd2", printable(range.value().keys.end))
|
.detail("RelocateEnd2", range.value().keys.end)
|
||||||
.detail("RelocateStart", range.value().startTime)
|
.detail("RelocateStart", range.value().startTime)
|
||||||
.detail("MapStart", rdit->startTime)
|
.detail("MapStart", rdit->startTime)
|
||||||
.detail("RelocateWork", range.value().workFactor)
|
.detail("RelocateWork", range.value().workFactor)
|
||||||
|
@ -570,7 +570,7 @@ struct DDQueueData {
|
||||||
|
|
||||||
//This function cannot handle relocation requests which split a shard into three pieces
|
//This function cannot handle relocation requests which split a shard into three pieces
|
||||||
void queueRelocation( RelocateData rd, std::set<UID> &serversToLaunchFrom ) {
|
void queueRelocation( RelocateData rd, std::set<UID> &serversToLaunchFrom ) {
|
||||||
//TraceEvent("QueueRelocationBegin").detail("Begin", printable(rd.keys.begin)).detail("End", printable(rd.keys.end));
|
//TraceEvent("QueueRelocationBegin").detail("Begin", rd.keys.begin).detail("End", rd.keys.end);
|
||||||
|
|
||||||
// remove all items from both queues that are fully contained in the new relocation (i.e. will be overwritten)
|
// remove all items from both queues that are fully contained in the new relocation (i.e. will be overwritten)
|
||||||
auto ranges = queueMap.intersectingRanges( rd.keys );
|
auto ranges = queueMap.intersectingRanges( rd.keys );
|
||||||
|
@ -637,7 +637,7 @@ struct DDQueueData {
|
||||||
|
|
||||||
rrs.interval = TraceInterval("QueuedRelocation");
|
rrs.interval = TraceInterval("QueuedRelocation");
|
||||||
/*TraceEvent(rrs.interval.begin(), distributorId);
|
/*TraceEvent(rrs.interval.begin(), distributorId);
|
||||||
.detail("KeyBegin", printable(rrs.keys.begin)).detail("KeyEnd", printable(rrs.keys.end))
|
.detail("KeyBegin", rrs.keys.begin).detail("KeyEnd", rrs.keys.end)
|
||||||
.detail("Priority", rrs.priority).detail("WantsNewServers", rrs.wantsNewServers);*/
|
.detail("Priority", rrs.priority).detail("WantsNewServers", rrs.wantsNewServers);*/
|
||||||
queuedRelocations++;
|
queuedRelocations++;
|
||||||
startRelocation(rrs.priority);
|
startRelocation(rrs.priority);
|
||||||
|
@ -657,7 +657,7 @@ struct DDQueueData {
|
||||||
if( !foundActiveRelocation ) {
|
if( !foundActiveRelocation ) {
|
||||||
newData.interval = TraceInterval("QueuedRelocation");
|
newData.interval = TraceInterval("QueuedRelocation");
|
||||||
/*TraceEvent(newData.interval.begin(), distributorId);
|
/*TraceEvent(newData.interval.begin(), distributorId);
|
||||||
.detail("KeyBegin", printable(newData.keys.begin)).detail("KeyEnd", printable(newData.keys.end))
|
.detail("KeyBegin", newData.keys.begin).detail("KeyEnd", newData.keys.end)
|
||||||
.detail("Priority", newData.priority).detail("WantsNewServers", newData.wantsNewServers);*/
|
.detail("Priority", newData.priority).detail("WantsNewServers", newData.wantsNewServers);*/
|
||||||
queuedRelocations++;
|
queuedRelocations++;
|
||||||
startRelocation(newData.priority);
|
startRelocation(newData.priority);
|
||||||
|
@ -677,8 +677,8 @@ struct DDQueueData {
|
||||||
}
|
}
|
||||||
|
|
||||||
/*TraceEvent("ReceivedRelocateShard", distributorId)
|
/*TraceEvent("ReceivedRelocateShard", distributorId)
|
||||||
.detail("KeyBegin", printable(rd.keys.begin))
|
.detail("KeyBegin", rd.keys.begin)
|
||||||
.detail("KeyEnd", printable(rd.keys.end))
|
.detail("KeyEnd", rd.keys.end)
|
||||||
.detail("Priority", rd.priority)
|
.detail("Priority", rd.priority)
|
||||||
.detail("AffectedRanges", affectedQueuedItems.size()); */
|
.detail("AffectedRanges", affectedQueuedItems.size()); */
|
||||||
}
|
}
|
||||||
|
@ -701,8 +701,8 @@ struct DDQueueData {
|
||||||
busyString += describe(rd.src[i]) + " - (" + busymap[ rd.src[i] ].toString() + "); ";
|
busyString += describe(rd.src[i]) + " - (" + busymap[ rd.src[i] ].toString() + "); ";
|
||||||
|
|
||||||
TraceEvent(title, distributorId)
|
TraceEvent(title, distributorId)
|
||||||
.detail("KeyBegin", printable(rd.keys.begin))
|
.detail("KeyBegin", rd.keys.begin)
|
||||||
.detail("KeyEnd", printable(rd.keys.end))
|
.detail("KeyEnd", rd.keys.end)
|
||||||
.detail("Priority", rd.priority)
|
.detail("Priority", rd.priority)
|
||||||
.detail("WorkFactor", rd.workFactor)
|
.detail("WorkFactor", rd.workFactor)
|
||||||
.detail("SourceServerCount", rd.src.size())
|
.detail("SourceServerCount", rd.src.size())
|
||||||
|
@ -759,9 +759,9 @@ struct DDQueueData {
|
||||||
it->value().priority >= rd.priority &&
|
it->value().priority >= rd.priority &&
|
||||||
rd.priority < PRIORITY_TEAM_REDUNDANT ) {
|
rd.priority < PRIORITY_TEAM_REDUNDANT ) {
|
||||||
/*TraceEvent("OverlappingInFlight", distributorId)
|
/*TraceEvent("OverlappingInFlight", distributorId)
|
||||||
.detail("KeyBegin", printable(it->value().keys.begin))
|
.detail("KeyBegin", it->value().keys.begin)
|
||||||
.detail("KeyEnd", printable(it->value().keys.end))
|
.detail("KeyEnd", it->value().keys.end)
|
||||||
.detail("Priority", it->value().priority); */
|
.detail("Priority", it->value().priority);*/
|
||||||
overlappingInFlight = true;
|
overlappingInFlight = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -867,7 +867,7 @@ ACTOR Future<Void> dataDistributionRelocator( DDQueueData *self, RelocateData rd
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent(relocateShardInterval.begin(), distributorId)
|
TraceEvent(relocateShardInterval.begin(), distributorId)
|
||||||
.detail("KeyBegin", printable(rd.keys.begin)).detail("KeyEnd", printable(rd.keys.end))
|
.detail("KeyBegin", rd.keys.begin).detail("KeyEnd", rd.keys.end)
|
||||||
.detail("Priority", rd.priority).detail("RelocationID", relocateShardInterval.pairID).detail("SuppressedEventCount", self->suppressIntervals);
|
.detail("Priority", rd.priority).detail("RelocationID", relocateShardInterval.pairID).detail("SuppressedEventCount", self->suppressIntervals);
|
||||||
|
|
||||||
if(relocateShardInterval.severity != SevDebug) {
|
if(relocateShardInterval.severity != SevDebug) {
|
||||||
|
|
|
@ -144,7 +144,7 @@ ACTOR Future<Void> trackShardBytes(
|
||||||
|
|
||||||
/*TraceEvent("TrackShardBytesStarting")
|
/*TraceEvent("TrackShardBytesStarting")
|
||||||
.detail("TrackerID", trackerID)
|
.detail("TrackerID", trackerID)
|
||||||
.detail("Keys", printable(keys))
|
.detail("Keys", keys)
|
||||||
.detail("TrackedBytesInitiallyPresent", shardSize->get().present())
|
.detail("TrackedBytesInitiallyPresent", shardSize->get().present())
|
||||||
.detail("StartingSize", shardSize->get().present() ? shardSize->get().get().metrics.bytes : 0)
|
.detail("StartingSize", shardSize->get().present() ? shardSize->get().get().metrics.bytes : 0)
|
||||||
.detail("StartingMerges", shardSize->get().present() ? shardSize->get().get().merges : 0);*/
|
.detail("StartingMerges", shardSize->get().present() ? shardSize->get().get().merges : 0);*/
|
||||||
|
@ -190,7 +190,7 @@ ACTOR Future<Void> trackShardBytes(
|
||||||
StorageMetrics metrics = wait( tr.waitStorageMetrics( keys, bounds.min, bounds.max, bounds.permittedError, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT ) );
|
StorageMetrics metrics = wait( tr.waitStorageMetrics( keys, bounds.min, bounds.max, bounds.permittedError, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT ) );
|
||||||
|
|
||||||
/*TraceEvent("ShardSizeUpdate")
|
/*TraceEvent("ShardSizeUpdate")
|
||||||
.detail("Keys", printable(keys))
|
.detail("Keys", keys)
|
||||||
.detail("UpdatedSize", metrics.metrics.bytes)
|
.detail("UpdatedSize", metrics.metrics.bytes)
|
||||||
.detail("Bandwidth", metrics.metrics.bytesPerKSecond)
|
.detail("Bandwidth", metrics.metrics.bytesPerKSecond)
|
||||||
.detail("BandwithStatus", getBandwidthStatus(metrics))
|
.detail("BandwithStatus", getBandwidthStatus(metrics))
|
||||||
|
@ -199,7 +199,7 @@ ACTOR Future<Void> trackShardBytes(
|
||||||
.detail("BandwidthLower", bounds.min.bytesPerKSecond)
|
.detail("BandwidthLower", bounds.min.bytesPerKSecond)
|
||||||
.detail("BandwidthUpper", bounds.max.bytesPerKSecond)
|
.detail("BandwidthUpper", bounds.max.bytesPerKSecond)
|
||||||
.detail("ShardSizePresent", shardSize->get().present())
|
.detail("ShardSizePresent", shardSize->get().present())
|
||||||
.detail("OldShardSize", shardSize->get().present() ? shardSize->get().get().metrics.bytes : 0 )
|
.detail("OldShardSize", shardSize->get().present() ? shardSize->get().get().metrics.bytes : 0)
|
||||||
.detail("TrackerID", trackerID);*/
|
.detail("TrackerID", trackerID);*/
|
||||||
|
|
||||||
if( shardSize->get().present() && addToSizeEstimate )
|
if( shardSize->get().present() && addToSizeEstimate )
|
||||||
|
@ -329,8 +329,8 @@ ACTOR Future<Void> shardSplitter(
|
||||||
|
|
||||||
if( g_random->random01() < 0.01 ) {
|
if( g_random->random01() < 0.01 ) {
|
||||||
TraceEvent("RelocateShardStartSplitx100", self->distributorId)
|
TraceEvent("RelocateShardStartSplitx100", self->distributorId)
|
||||||
.detail("Begin", printable(keys.begin))
|
.detail("Begin", keys.begin)
|
||||||
.detail("End", printable(keys.end))
|
.detail("End", keys.end)
|
||||||
.detail("MaxBytes", shardBounds.max.bytes)
|
.detail("MaxBytes", shardBounds.max.bytes)
|
||||||
.detail("MetricsBytes", metrics.bytes)
|
.detail("MetricsBytes", metrics.bytes)
|
||||||
.detail("Bandwidth", bandwidthStatus == BandwidthStatusHigh ? "High" : bandwidthStatus == BandwidthStatusNormal ? "Normal" : "Low")
|
.detail("Bandwidth", bandwidthStatus == BandwidthStatusHigh ? "High" : bandwidthStatus == BandwidthStatusNormal ? "Normal" : "Low")
|
||||||
|
@ -450,8 +450,8 @@ Future<Void> shardMerger(
|
||||||
KeyRange mergeRange = merged;
|
KeyRange mergeRange = merged;
|
||||||
|
|
||||||
TraceEvent("RelocateShardMergeMetrics", self->distributorId)
|
TraceEvent("RelocateShardMergeMetrics", self->distributorId)
|
||||||
.detail("OldKeys", printable(keys))
|
.detail("OldKeys", keys)
|
||||||
.detail("NewKeys", printable(mergeRange))
|
.detail("NewKeys", mergeRange)
|
||||||
.detail("EndingSize", endingStats.bytes)
|
.detail("EndingSize", endingStats.bytes)
|
||||||
.detail("BatchedMerges", shardsMerged);
|
.detail("BatchedMerges", shardsMerged);
|
||||||
|
|
||||||
|
@ -532,8 +532,8 @@ ACTOR Future<Void> shardTracker(
|
||||||
wait( delay( 0, TaskDataDistribution ) );
|
wait( delay( 0, TaskDataDistribution ) );
|
||||||
|
|
||||||
/*TraceEvent("ShardTracker", self->distributorId)
|
/*TraceEvent("ShardTracker", self->distributorId)
|
||||||
.detail("Begin", printable(keys.begin))
|
.detail("Begin", keys.begin)
|
||||||
.detail("End", printable(keys.end))
|
.detail("End", keys.end)
|
||||||
.detail("TrackerID", trackerID)
|
.detail("TrackerID", trackerID)
|
||||||
.detail("MaxBytes", self->maxShardSize->get().get())
|
.detail("MaxBytes", self->maxShardSize->get().get())
|
||||||
.detail("ShardSize", shardSize->get().get().bytes)
|
.detail("ShardSize", shardSize->get().get().bytes)
|
||||||
|
@ -572,7 +572,7 @@ void restartShardTrackers( DataDistributionTracker* self, KeyRangeRef keys, Opti
|
||||||
if( startingSize.present() ) {
|
if( startingSize.present() ) {
|
||||||
ASSERT( ranges.size() == 1 );
|
ASSERT( ranges.size() == 1 );
|
||||||
/*TraceEvent("ShardTrackerSizePreset", self->distributorId)
|
/*TraceEvent("ShardTrackerSizePreset", self->distributorId)
|
||||||
.detail("Keys", printable(keys))
|
.detail("Keys", keys)
|
||||||
.detail("Size", startingSize.get().metrics.bytes)
|
.detail("Size", startingSize.get().metrics.bytes)
|
||||||
.detail("Merges", startingSize.get().merges);*/
|
.detail("Merges", startingSize.get().merges);*/
|
||||||
TEST( true ); // shardTracker started with trackedBytes already set
|
TEST( true ); // shardTracker started with trackedBytes already set
|
||||||
|
@ -736,9 +736,9 @@ void ShardsAffectedByTeamFailure::defineShard( KeyRangeRef keys ) {
|
||||||
uniquify(prevTeams);
|
uniquify(prevTeams);
|
||||||
|
|
||||||
/*TraceEvent("ShardsAffectedByTeamFailureDefine")
|
/*TraceEvent("ShardsAffectedByTeamFailureDefine")
|
||||||
.detail("KeyBegin", printable(keys.begin))
|
.detail("KeyBegin", keys.begin)
|
||||||
.detail("KeyEnd", printable(keys.end))
|
.detail("KeyEnd", keys.end)
|
||||||
.detail("TeamCount", teams.size()); */
|
.detail("TeamCount", teams.size());*/
|
||||||
|
|
||||||
auto affectedRanges = shard_teams.getAffectedRangesAfterInsertion(keys);
|
auto affectedRanges = shard_teams.getAffectedRangesAfterInsertion(keys);
|
||||||
shard_teams.insert( keys, std::make_pair(teams, prevTeams) );
|
shard_teams.insert( keys, std::make_pair(teams, prevTeams) );
|
||||||
|
@ -754,8 +754,8 @@ void ShardsAffectedByTeamFailure::defineShard( KeyRangeRef keys ) {
|
||||||
|
|
||||||
void ShardsAffectedByTeamFailure::moveShard( KeyRangeRef keys, std::vector<Team> destinationTeams ) {
|
void ShardsAffectedByTeamFailure::moveShard( KeyRangeRef keys, std::vector<Team> destinationTeams ) {
|
||||||
/*TraceEvent("ShardsAffectedByTeamFailureMove")
|
/*TraceEvent("ShardsAffectedByTeamFailureMove")
|
||||||
.detail("KeyBegin", printable(keys.begin))
|
.detail("KeyBegin", keys.begin)
|
||||||
.detail("KeyEnd", printable(keys.end))
|
.detail("KeyEnd", keys.end)
|
||||||
.detail("NewTeamSize", destinationTeam.size())
|
.detail("NewTeamSize", destinationTeam.size())
|
||||||
.detail("NewTeam", describe(destinationTeam));*/
|
.detail("NewTeam", describe(destinationTeam));*/
|
||||||
|
|
||||||
|
@ -828,8 +828,8 @@ void ShardsAffectedByTeamFailure::check() {
|
||||||
for(auto x = team_shards.lower_bound( std::make_pair( *t, KeyRangeRef() ) ); x != team_shards.end() && x->first == *t; ++x)
|
for(auto x = team_shards.lower_bound( std::make_pair( *t, KeyRangeRef() ) ); x != team_shards.end() && x->first == *t; ++x)
|
||||||
shards += printable(x->second.begin) + "-" + printable(x->second.end) + ",";
|
shards += printable(x->second.begin) + "-" + printable(x->second.end) + ",";
|
||||||
TraceEvent(SevError,"SATFInvariantError2")
|
TraceEvent(SevError,"SATFInvariantError2")
|
||||||
.detail("KB", printable(i->begin()))
|
.detail("KB", i->begin())
|
||||||
.detail("KE", printable(i->end()))
|
.detail("KE", i->end())
|
||||||
.detail("Team", teamDesc)
|
.detail("Team", teamDesc)
|
||||||
.detail("Shards", shards);
|
.detail("Shards", shards);
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
|
|
|
@ -37,7 +37,7 @@ public:
|
||||||
location() : hi(0), lo(0) {}
|
location() : hi(0), lo(0) {}
|
||||||
location(int64_t lo) : hi(0), lo(lo) {}
|
location(int64_t lo) : hi(0), lo(lo) {}
|
||||||
location(int64_t hi, int64_t lo) : hi(hi), lo(lo) {}
|
location(int64_t hi, int64_t lo) : hi(hi), lo(lo) {}
|
||||||
operator std::string() { return format("%lld.%lld", hi, lo); } // FIXME: Return a 'HumanReadableDescription' instead of std::string, make TraceEvent::detail accept that (for safety)
|
operator std::string() const { return format("%lld.%lld", hi, lo); } // FIXME: Return a 'HumanReadableDescription' instead of std::string, make TraceEvent::detail accept that (for safety)
|
||||||
|
|
||||||
template<class Ar>
|
template<class Ar>
|
||||||
void serialize_unversioned(Ar& ar) {
|
void serialize_unversioned(Ar& ar) {
|
||||||
|
@ -81,6 +81,13 @@ public:
|
||||||
virtual StorageBytes getStorageBytes() = 0;
|
virtual StorageBytes getStorageBytes() = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<IDiskQueue::location> : std::true_type {
|
||||||
|
static std::string toString(const IDiskQueue::location& value) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// FIXME: One should be able to use SFINAE to choose between serialize and serialize_unversioned.
|
// FIXME: One should be able to use SFINAE to choose between serialize and serialize_unversioned.
|
||||||
template <class Ar> void load( Ar& ar, IDiskQueue::location& loc ) { loc.serialize_unversioned(ar); }
|
template <class Ar> void load( Ar& ar, IDiskQueue::location& loc ) { loc.serialize_unversioned(ar); }
|
||||||
template <class Ar> void save( Ar& ar, const IDiskQueue::location& loc ) { const_cast<IDiskQueue::location&>(loc).serialize_unversioned(ar); }
|
template <class Ar> void save( Ar& ar, const IDiskQueue::location& loc ) { const_cast<IDiskQueue::location&>(loc).serialize_unversioned(ar); }
|
||||||
|
|
|
@ -485,8 +485,8 @@ private:
|
||||||
if (h.op == OpSnapshotItem) { // snapshot data item
|
if (h.op == OpSnapshotItem) { // snapshot data item
|
||||||
/*if (p1 < uncommittedNextKey) {
|
/*if (p1 < uncommittedNextKey) {
|
||||||
TraceEvent(SevError, "RecSnapshotBack", self->id)
|
TraceEvent(SevError, "RecSnapshotBack", self->id)
|
||||||
.detail("NextKey", printable(uncommittedNextKey))
|
.detail("NextKey", uncommittedNextKey)
|
||||||
.detail("P1", printable(p1))
|
.detail("P1", p1)
|
||||||
.detail("Nextlocation", self->log->getNextReadLocation());
|
.detail("Nextlocation", self->log->getNextReadLocation());
|
||||||
}
|
}
|
||||||
ASSERT( p1 >= uncommittedNextKey );*/
|
ASSERT( p1 >= uncommittedNextKey );*/
|
||||||
|
@ -497,7 +497,7 @@ private:
|
||||||
++dbgSnapshotItemCount;
|
++dbgSnapshotItemCount;
|
||||||
} else if (h.op == OpSnapshotEnd || h.op == OpSnapshotAbort) { // snapshot complete
|
} else if (h.op == OpSnapshotEnd || h.op == OpSnapshotAbort) { // snapshot complete
|
||||||
TraceEvent("RecSnapshotEnd", self->id)
|
TraceEvent("RecSnapshotEnd", self->id)
|
||||||
.detail("NextKey", printable(uncommittedNextKey))
|
.detail("NextKey", uncommittedNextKey)
|
||||||
.detail("Nextlocation", self->log->getNextReadLocation())
|
.detail("Nextlocation", self->log->getNextReadLocation())
|
||||||
.detail("IsSnapshotEnd", h.op == OpSnapshotEnd);
|
.detail("IsSnapshotEnd", h.op == OpSnapshotEnd);
|
||||||
|
|
||||||
|
@ -526,7 +526,7 @@ private:
|
||||||
} else if (h.op == OpRollback) { // rollback previous transaction
|
} else if (h.op == OpRollback) { // rollback previous transaction
|
||||||
recoveryQueue.rollback();
|
recoveryQueue.rollback();
|
||||||
TraceEvent("KVSMemRecSnapshotRollback", self->id)
|
TraceEvent("KVSMemRecSnapshotRollback", self->id)
|
||||||
.detail("NextKey", printable(uncommittedNextKey));
|
.detail("NextKey", uncommittedNextKey);
|
||||||
uncommittedNextKey = self->recoveredSnapshotKey;
|
uncommittedNextKey = self->recoveredSnapshotKey;
|
||||||
uncommittedPrevSnapshotEnd = self->previousSnapshotEnd;
|
uncommittedPrevSnapshotEnd = self->previousSnapshotEnd;
|
||||||
uncommittedSnapshotEnd = self->currentSnapshotEnd;
|
uncommittedSnapshotEnd = self->currentSnapshotEnd;
|
||||||
|
@ -620,7 +620,7 @@ private:
|
||||||
state int snapItems = 0;
|
state int snapItems = 0;
|
||||||
state uint64_t snapshotBytes = 0;
|
state uint64_t snapshotBytes = 0;
|
||||||
|
|
||||||
TraceEvent("KVSMemStartingSnapshot", self->id).detail("StartKey", printable(nextKey));
|
TraceEvent("KVSMemStartingSnapshot", self->id).detail("StartKey", nextKey);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
wait( self->notifiedCommittedWriteBytes.whenAtLeast( snapshotTotalWrittenBytes + 1 ) );
|
wait( self->notifiedCommittedWriteBytes.whenAtLeast( snapshotTotalWrittenBytes + 1 ) );
|
||||||
|
@ -646,7 +646,7 @@ private:
|
||||||
if (next == self->data.end()) {
|
if (next == self->data.end()) {
|
||||||
auto thisSnapshotEnd = self->log_op( OpSnapshotEnd, StringRef(), StringRef() );
|
auto thisSnapshotEnd = self->log_op( OpSnapshotEnd, StringRef(), StringRef() );
|
||||||
//TraceEvent("SnapshotEnd", self->id)
|
//TraceEvent("SnapshotEnd", self->id)
|
||||||
// .detail("LastKey", printable(lastKey.present() ? lastKey.get() : LiteralStringRef("<none>")))
|
// .detail("LastKey", lastKey.present() ? lastKey.get() : LiteralStringRef("<none>"))
|
||||||
// .detail("CurrentSnapshotEndLoc", self->currentSnapshotEnd)
|
// .detail("CurrentSnapshotEndLoc", self->currentSnapshotEnd)
|
||||||
// .detail("PreviousSnapshotEndLoc", self->previousSnapshotEnd)
|
// .detail("PreviousSnapshotEndLoc", self->previousSnapshotEnd)
|
||||||
// .detail("ThisSnapshotEnd", thisSnapshotEnd)
|
// .detail("ThisSnapshotEnd", thisSnapshotEnd)
|
||||||
|
|
|
@ -1211,7 +1211,7 @@ int SQLiteDB::checkAllPageChecksums() {
|
||||||
Statement *jm = new Statement(*this, "PRAGMA journal_mode");
|
Statement *jm = new Statement(*this, "PRAGMA journal_mode");
|
||||||
ASSERT( jm->nextRow() );
|
ASSERT( jm->nextRow() );
|
||||||
if (jm->column(0) != LiteralStringRef("wal")){
|
if (jm->column(0) != LiteralStringRef("wal")){
|
||||||
TraceEvent(SevError, "JournalModeError").detail("Filename", filename).detail("Mode", printable(jm->column(0)));
|
TraceEvent(SevError, "JournalModeError").detail("Filename", filename).detail("Mode", jm->column(0));
|
||||||
ASSERT( false );
|
ASSERT( false );
|
||||||
}
|
}
|
||||||
delete jm;
|
delete jm;
|
||||||
|
@ -1337,7 +1337,7 @@ void SQLiteDB::open(bool writable) {
|
||||||
Statement jm(*this, "PRAGMA journal_mode");
|
Statement jm(*this, "PRAGMA journal_mode");
|
||||||
ASSERT( jm.nextRow() );
|
ASSERT( jm.nextRow() );
|
||||||
if (jm.column(0) != LiteralStringRef("wal")){
|
if (jm.column(0) != LiteralStringRef("wal")){
|
||||||
TraceEvent(SevError, "JournalModeError").detail("Filename", filename).detail("Mode", printable(jm.column(0)));
|
TraceEvent(SevError, "JournalModeError").detail("Filename", filename).detail("Mode", jm.column(0));
|
||||||
ASSERT( false );
|
ASSERT( false );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ Optional<LatencyBandConfig> LatencyBandConfig::parse(ValueRef configurationStrin
|
||||||
|
|
||||||
json_spirit::mValue parsedConfig;
|
json_spirit::mValue parsedConfig;
|
||||||
if(!json_spirit::read_string(configurationString.toString(), parsedConfig)) {
|
if(!json_spirit::read_string(configurationString.toString(), parsedConfig)) {
|
||||||
TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "InvalidJSON").detail("Configuration", printable(configurationString));
|
TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "InvalidJSON").detail("Configuration", configurationString);
|
||||||
return config;
|
return config;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ Optional<LatencyBandConfig> LatencyBandConfig::parse(ValueRef configurationStrin
|
||||||
|
|
||||||
std::string errorStr;
|
std::string errorStr;
|
||||||
if(!schemaMatch(schema.get_obj(), configJson, errorStr)) {
|
if(!schemaMatch(schema.get_obj(), configJson, errorStr)) {
|
||||||
TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "SchemaMismatch").detail("Configuration", printable(configurationString)).detail("Error", errorStr);
|
TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "SchemaMismatch").detail("Configuration", configurationString).detail("Error", errorStr);
|
||||||
return config;
|
return config;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -285,7 +285,7 @@ void peekMessagesFromMemory( LogRouterData* self, TLogPeekRequest const& req, Bi
|
||||||
ASSERT( !messages.getLength() );
|
ASSERT( !messages.getLength() );
|
||||||
|
|
||||||
auto& deque = get_version_messages(self, req.tag);
|
auto& deque = get_version_messages(self, req.tag);
|
||||||
//TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", printable(req.tag1)).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size());
|
//TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", req.tag1).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size());
|
||||||
|
|
||||||
auto it = std::lower_bound(deque.begin(), deque.end(), std::make_pair(req.begin, LengthPrefixedStringRef()), CompareFirst<std::pair<Version, LengthPrefixedStringRef>>());
|
auto it = std::lower_bound(deque.begin(), deque.end(), std::make_pair(req.begin, LengthPrefixedStringRef()), CompareFirst<std::pair<Version, LengthPrefixedStringRef>>());
|
||||||
|
|
||||||
|
|
|
@ -842,8 +842,8 @@ ACTOR Future<Void> commitBatch(
|
||||||
|
|
||||||
// if (debugMutation("BackupProxyCommit", commitVersion, backupMutation)) {
|
// if (debugMutation("BackupProxyCommit", commitVersion, backupMutation)) {
|
||||||
// TraceEvent("BackupProxyCommitTo", self->dbgid).detail("To", describe(tags)).detail("BackupMutation", backupMutation.toString())
|
// TraceEvent("BackupProxyCommitTo", self->dbgid).detail("To", describe(tags)).detail("BackupMutation", backupMutation.toString())
|
||||||
// .detail("BackupMutationSize", val.size()).detail("Version", commitVersion).detail("DestPath", printable(logRangeMutation.first))
|
// .detail("BackupMutationSize", val.size()).detail("Version", commitVersion).detail("DestPath", logRangeMutation.first)
|
||||||
// .detail("PartIndex", part).detail("PartIndexEndian", bigEndian32(part)).detail("PartData", printable(backupMutation.param1));
|
// .detail("PartIndex", part).detail("PartIndexEndian", bigEndian32(part)).detail("PartData", backupMutation.param1);
|
||||||
// }
|
// }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -283,8 +283,8 @@ ACTOR Future<Void> startMoveKeys( Database occ, KeyRange keys, vector<UID> serve
|
||||||
currentKeys = KeyRangeRef(currentKeys.begin, endKey);
|
currentKeys = KeyRangeRef(currentKeys.begin, endKey);
|
||||||
|
|
||||||
/*TraceEvent("StartMoveKeysBatch", relocationIntervalId)
|
/*TraceEvent("StartMoveKeysBatch", relocationIntervalId)
|
||||||
.detail("KeyBegin", printable(currentKeys.begin).c_str())
|
.detail("KeyBegin", currentKeys.begin.c_str())
|
||||||
.detail("KeyEnd", printable(currentKeys.end).c_str());*/
|
.detail("KeyEnd", currentKeys.end.c_str());*/
|
||||||
|
|
||||||
//printf("Moving '%s'-'%s' (%d) to %d servers\n", keys.begin.toString().c_str(), keys.end.toString().c_str(), old.size(), servers.size());
|
//printf("Moving '%s'-'%s' (%d) to %d servers\n", keys.begin.toString().c_str(), keys.end.toString().c_str(), old.size(), servers.size());
|
||||||
//for(int i=0; i<old.size(); i++)
|
//for(int i=0; i<old.size(); i++)
|
||||||
|
@ -301,8 +301,8 @@ ACTOR Future<Void> startMoveKeys( Database occ, KeyRange keys, vector<UID> serve
|
||||||
decodeKeyServersValue( old[i].value, src, dest );
|
decodeKeyServersValue( old[i].value, src, dest );
|
||||||
|
|
||||||
/*TraceEvent("StartMoveKeysOldRange", relocationIntervalId)
|
/*TraceEvent("StartMoveKeysOldRange", relocationIntervalId)
|
||||||
.detail("KeyBegin", printable(rangeIntersectKeys.begin).c_str())
|
.detail("KeyBegin", rangeIntersectKeys.begin.c_str())
|
||||||
.detail("KeyEnd", printable(rangeIntersectKeys.end).c_str())
|
.detail("KeyEnd", rangeIntersectKeys.end.c_str())
|
||||||
.detail("OldSrc", describe(src))
|
.detail("OldSrc", describe(src))
|
||||||
.detail("OldDest", describe(dest))
|
.detail("OldDest", describe(dest))
|
||||||
.detail("ReadVersion", tr.getReadVersion().get());*/
|
.detail("ReadVersion", tr.getReadVersion().get());*/
|
||||||
|
@ -366,8 +366,8 @@ ACTOR Future<Void> startMoveKeys( Database occ, KeyRange keys, vector<UID> serve
|
||||||
if(retries%10 == 0) {
|
if(retries%10 == 0) {
|
||||||
TraceEvent(retries == 50 ? SevWarnAlways : SevWarn, "StartMoveKeysRetrying", relocationIntervalId)
|
TraceEvent(retries == 50 ? SevWarnAlways : SevWarn, "StartMoveKeysRetrying", relocationIntervalId)
|
||||||
.error(err)
|
.error(err)
|
||||||
.detail("Keys", printable(keys))
|
.detail("Keys", keys)
|
||||||
.detail("BeginKey", printable(begin))
|
.detail("BeginKey", begin)
|
||||||
.detail("NumTries", retries);
|
.detail("NumTries", retries);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -468,7 +468,7 @@ ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> dest
|
||||||
ASSERT (!destinationTeam.empty());
|
ASSERT (!destinationTeam.empty());
|
||||||
|
|
||||||
try {
|
try {
|
||||||
TraceEvent(SevDebug, interval.begin(), relocationIntervalId).detail("KeyBegin", printable(keys.begin)).detail("KeyEnd", printable(keys.end));
|
TraceEvent(SevDebug, interval.begin(), relocationIntervalId).detail("KeyBegin", keys.begin).detail("KeyEnd", keys.end);
|
||||||
|
|
||||||
//This process can be split up into multiple transactions if there are too many existing overlapping shards
|
//This process can be split up into multiple transactions if there are too many existing overlapping shards
|
||||||
//In that case, each iteration of this loop will have begin set to the end of the last processed shard
|
//In that case, each iteration of this loop will have begin set to the end of the last processed shard
|
||||||
|
@ -538,13 +538,13 @@ ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> dest
|
||||||
alreadyMoved = destSet.empty() && srcSet == intendedTeam;
|
alreadyMoved = destSet.empty() && srcSet == intendedTeam;
|
||||||
if(destSet != intendedTeam && !alreadyMoved) {
|
if(destSet != intendedTeam && !alreadyMoved) {
|
||||||
TraceEvent(SevWarn, "MoveKeysDestTeamNotIntended", relocationIntervalId)
|
TraceEvent(SevWarn, "MoveKeysDestTeamNotIntended", relocationIntervalId)
|
||||||
.detail("KeyBegin", printable(keys.begin))
|
.detail("KeyBegin", keys.begin)
|
||||||
.detail("KeyEnd", printable(keys.end))
|
.detail("KeyEnd", keys.end)
|
||||||
.detail("IterationBegin", printable(begin))
|
.detail("IterationBegin", begin)
|
||||||
.detail("IterationEnd", printable(endKey))
|
.detail("IterationEnd", endKey)
|
||||||
.detail("DestSet", describe(destSet))
|
.detail("DestSet", describe(destSet))
|
||||||
.detail("IntendedTeam", describe(intendedTeam))
|
.detail("IntendedTeam", describe(intendedTeam))
|
||||||
.detail("KeyServers", printable(keyServers));
|
.detail("KeyServers", keyServers);
|
||||||
//ASSERT( false );
|
//ASSERT( false );
|
||||||
|
|
||||||
ASSERT(!dest.empty()); //The range has already been moved, but to a different dest (or maybe dest was cleared)
|
ASSERT(!dest.empty()); //The range has already been moved, but to a different dest (or maybe dest was cleared)
|
||||||
|
@ -589,18 +589,18 @@ ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> dest
|
||||||
if (!dest.size()) {
|
if (!dest.size()) {
|
||||||
TEST(true); // A previous finishMoveKeys for this range committed just as it was cancelled to start this one?
|
TEST(true); // A previous finishMoveKeys for this range committed just as it was cancelled to start this one?
|
||||||
TraceEvent("FinishMoveKeysNothingToDo", relocationIntervalId)
|
TraceEvent("FinishMoveKeysNothingToDo", relocationIntervalId)
|
||||||
.detail("KeyBegin", printable(keys.begin))
|
.detail("KeyBegin", keys.begin)
|
||||||
.detail("KeyEnd", printable(keys.end))
|
.detail("KeyEnd", keys.end)
|
||||||
.detail("IterationBegin", printable(begin))
|
.detail("IterationBegin", begin)
|
||||||
.detail("IterationEnd", printable(endKey));
|
.detail("IterationEnd", endKey);
|
||||||
begin = keyServers.end()[-1].key;
|
begin = keyServers.end()[-1].key;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
waitInterval = TraceInterval("RelocateShard_FinishMoveKeysWaitDurable");
|
waitInterval = TraceInterval("RelocateShard_FinishMoveKeysWaitDurable");
|
||||||
TraceEvent(SevDebug, waitInterval.begin(), relocationIntervalId)
|
TraceEvent(SevDebug, waitInterval.begin(), relocationIntervalId)
|
||||||
.detail("KeyBegin", printable(keys.begin))
|
.detail("KeyBegin", keys.begin)
|
||||||
.detail("KeyEnd", printable(keys.end));
|
.detail("KeyEnd", keys.end);
|
||||||
|
|
||||||
// Wait for a durable quorum of servers in destServers to have keys available (readWrite)
|
// Wait for a durable quorum of servers in destServers to have keys available (readWrite)
|
||||||
// They must also have at least the transaction read version so they can't "forget" the shard between
|
// They must also have at least the transaction read version so they can't "forget" the shard between
|
||||||
|
@ -668,10 +668,10 @@ ACTOR Future<Void> finishMoveKeys( Database occ, KeyRange keys, vector<UID> dest
|
||||||
if(retries%10 == 0) {
|
if(retries%10 == 0) {
|
||||||
TraceEvent(retries == 20 ? SevWarnAlways : SevWarn, "RelocateShard_FinishMoveKeysRetrying", relocationIntervalId)
|
TraceEvent(retries == 20 ? SevWarnAlways : SevWarn, "RelocateShard_FinishMoveKeysRetrying", relocationIntervalId)
|
||||||
.error(err)
|
.error(err)
|
||||||
.detail("KeyBegin", printable(keys.begin))
|
.detail("KeyBegin", keys.begin)
|
||||||
.detail("KeyEnd", printable(keys.end))
|
.detail("KeyEnd", keys.end)
|
||||||
.detail("IterationBegin", printable(begin))
|
.detail("IterationBegin", begin)
|
||||||
.detail("IterationEnd", printable(endKey));
|
.detail("IterationEnd", endKey);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -785,7 +785,7 @@ ACTOR Future<bool> canRemoveStorageServer( Transaction* tr, UID serverID ) {
|
||||||
ASSERT(keys.size() >= 2);
|
ASSERT(keys.size() >= 2);
|
||||||
|
|
||||||
if(keys[0].value == keys[1].value && keys[1].key != allKeys.end) {
|
if(keys[0].value == keys[1].value && keys[1].key != allKeys.end) {
|
||||||
TraceEvent("ServerKeysCoalescingError", serverID).detail("Key1", printable(keys[0].key)).detail("Key2", printable(keys[1].key)).detail("Value", printable(keys[0].value));
|
TraceEvent("ServerKeysCoalescingError", serverID).detail("Key1", keys[0].key).detail("Key2", keys[1].key).detail("Value", keys[0].value);
|
||||||
ASSERT(false);
|
ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -897,7 +897,7 @@ namespace oldTLog_4_6 {
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
//TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
//TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2);
|
||||||
// Wait until we have something to return that the caller doesn't already have
|
// Wait until we have something to return that the caller doesn't already have
|
||||||
if( logData->version.get() < req.begin ) {
|
if( logData->version.get() < req.begin ) {
|
||||||
wait( logData->version.whenAtLeast( req.begin ) );
|
wait( logData->version.whenAtLeast( req.begin ) );
|
||||||
|
@ -907,7 +907,7 @@ namespace oldTLog_4_6 {
|
||||||
state Version endVersion = logData->version.get() + 1;
|
state Version endVersion = logData->version.get() + 1;
|
||||||
|
|
||||||
//grab messages from disk
|
//grab messages from disk
|
||||||
//TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
//TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2);
|
||||||
if( req.begin <= logData->persistentDataDurableVersion ) {
|
if( req.begin <= logData->persistentDataDurableVersion ) {
|
||||||
// Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on
|
// Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on
|
||||||
// whether we get enough data from disk.
|
// whether we get enough data from disk.
|
||||||
|
@ -921,7 +921,7 @@ namespace oldTLog_4_6 {
|
||||||
persistTagMessagesKey(logData->logId, oldTag, req.begin),
|
persistTagMessagesKey(logData->logId, oldTag, req.begin),
|
||||||
persistTagMessagesKey(logData->logId, oldTag, logData->persistentDataDurableVersion + 1)), SERVER_KNOBS->DESIRED_TOTAL_BYTES, SERVER_KNOBS->DESIRED_TOTAL_BYTES));
|
persistTagMessagesKey(logData->logId, oldTag, logData->persistentDataDurableVersion + 1)), SERVER_KNOBS->DESIRED_TOTAL_BYTES, SERVER_KNOBS->DESIRED_TOTAL_BYTES));
|
||||||
|
|
||||||
//TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().getPrimaryAddress()).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? printable(kv1[0].key) : "").detail("Tag2ResultsLast", kv2.size() ? printable(kv2[0].key) : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence());
|
//TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().getPrimaryAddress()).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? kv1[0].key : "").detail("Tag2ResultsLast", kv2.size() ? kv2[0].key : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence());
|
||||||
|
|
||||||
for (auto &kv : kvs) {
|
for (auto &kv : kvs) {
|
||||||
auto ver = decodeTagMessagesKey(kv.key);
|
auto ver = decodeTagMessagesKey(kv.key);
|
||||||
|
@ -1244,7 +1244,7 @@ namespace oldTLog_4_6 {
|
||||||
wait( waitForAll( (vector<Future<Standalone<VectorRef<KeyValueRef>>>>(), fVers, fRecoverCounts) ) );
|
wait( waitForAll( (vector<Future<Standalone<VectorRef<KeyValueRef>>>>(), fVers, fRecoverCounts) ) );
|
||||||
|
|
||||||
if (fFormat.get().present() && !persistFormatReadableRange.contains( fFormat.get().get() )) {
|
if (fFormat.get().present() && !persistFormatReadableRange.contains( fFormat.get().get() )) {
|
||||||
TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", printable(fFormat.get().get())).detail("Expected", persistFormat.value.toString());
|
TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", fFormat.get().get()).detail("Expected", persistFormat.value.toString());
|
||||||
throw worker_recovery_failed();
|
throw worker_recovery_failed();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1255,7 +1255,7 @@ namespace oldTLog_4_6 {
|
||||||
throw worker_removed();
|
throw worker_removed();
|
||||||
} else {
|
} else {
|
||||||
// This should never happen
|
// This should never happen
|
||||||
TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", printable(v[0].key));
|
TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", v[0].key);
|
||||||
ASSERT( false );
|
ASSERT( false );
|
||||||
throw worker_recovery_failed();
|
throw worker_recovery_failed();
|
||||||
}
|
}
|
||||||
|
|
|
@ -912,7 +912,7 @@ void peekMessagesFromMemory( Reference<LogData> self, TLogPeekRequest const& req
|
||||||
ASSERT( !messages.getLength() );
|
ASSERT( !messages.getLength() );
|
||||||
|
|
||||||
auto& deque = getVersionMessages(self, req.tag);
|
auto& deque = getVersionMessages(self, req.tag);
|
||||||
//TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", printable(req.tag1)).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size());
|
//TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", req.tag1).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size());
|
||||||
|
|
||||||
Version begin = std::max( req.begin, self->persistentDataDurableVersion+1 );
|
Version begin = std::max( req.begin, self->persistentDataDurableVersion+1 );
|
||||||
auto it = std::lower_bound(deque.begin(), deque.end(), std::make_pair(begin, LengthPrefixedStringRef()), CompareFirst<std::pair<Version, LengthPrefixedStringRef>>());
|
auto it = std::lower_bound(deque.begin(), deque.end(), std::make_pair(begin, LengthPrefixedStringRef()), CompareFirst<std::pair<Version, LengthPrefixedStringRef>>());
|
||||||
|
@ -979,7 +979,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
//TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
//TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2);
|
||||||
// Wait until we have something to return that the caller doesn't already have
|
// Wait until we have something to return that the caller doesn't already have
|
||||||
if( logData->version.get() < req.begin ) {
|
if( logData->version.get() < req.begin ) {
|
||||||
wait( logData->version.whenAtLeast( req.begin ) );
|
wait( logData->version.whenAtLeast( req.begin ) );
|
||||||
|
@ -1027,7 +1027,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
||||||
state Version endVersion = logData->version.get() + 1;
|
state Version endVersion = logData->version.get() + 1;
|
||||||
|
|
||||||
//grab messages from disk
|
//grab messages from disk
|
||||||
//TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
//TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2);
|
||||||
if( req.begin <= logData->persistentDataDurableVersion ) {
|
if( req.begin <= logData->persistentDataDurableVersion ) {
|
||||||
// Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on
|
// Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on
|
||||||
// whether we get enough data from disk.
|
// whether we get enough data from disk.
|
||||||
|
@ -1041,7 +1041,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
||||||
persistTagMessagesKey(logData->logId, req.tag, req.begin),
|
persistTagMessagesKey(logData->logId, req.tag, req.begin),
|
||||||
persistTagMessagesKey(logData->logId, req.tag, logData->persistentDataDurableVersion + 1)), SERVER_KNOBS->DESIRED_TOTAL_BYTES, SERVER_KNOBS->DESIRED_TOTAL_BYTES));
|
persistTagMessagesKey(logData->logId, req.tag, logData->persistentDataDurableVersion + 1)), SERVER_KNOBS->DESIRED_TOTAL_BYTES, SERVER_KNOBS->DESIRED_TOTAL_BYTES));
|
||||||
|
|
||||||
//TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().address).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? printable(kv1[0].key) : "").detail("Tag2ResultsLast", kv2.size() ? printable(kv2[0].key) : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence());
|
//TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().address).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? kv1[0].key : "").detail("Tag2ResultsLast", kv2.size() ? kv2[0].key : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence());
|
||||||
|
|
||||||
for (auto &kv : kvs) {
|
for (auto &kv : kvs) {
|
||||||
auto ver = decodeTagMessagesKey(kv.key);
|
auto ver = decodeTagMessagesKey(kv.key);
|
||||||
|
@ -1714,7 +1714,7 @@ ACTOR Future<Void> restorePersistentState( TLogData* self, LocalityData locality
|
||||||
flushAndExit(0);
|
flushAndExit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", printable(fFormat.get().get())).detail("Expected", persistFormat.value.toString());
|
TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", fFormat.get().get()).detail("Expected", persistFormat.value.toString());
|
||||||
throw worker_recovery_failed();
|
throw worker_recovery_failed();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1725,7 +1725,7 @@ ACTOR Future<Void> restorePersistentState( TLogData* self, LocalityData locality
|
||||||
throw worker_removed();
|
throw worker_removed();
|
||||||
} else {
|
} else {
|
||||||
// This should never happen
|
// This should never happen
|
||||||
TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", printable(v[0].key));
|
TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", v[0].key);
|
||||||
ASSERT( false );
|
ASSERT( false );
|
||||||
throw worker_recovery_failed();
|
throw worker_recovery_failed();
|
||||||
}
|
}
|
||||||
|
|
|
@ -207,7 +207,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
||||||
cycles ++;
|
cycles ++;
|
||||||
TraceEvent("SimulatedFDBDPreWait").detail("Cycles", cycles).detail("RandomId", randomId)
|
TraceEvent("SimulatedFDBDPreWait").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||||
.detail("Address", NetworkAddress(ip, port, true, false))
|
.detail("Address", NetworkAddress(ip, port, true, false))
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detail("WaitTime", waitTime).detail("Port", port);
|
.detail("WaitTime", waitTime).detail("Port", port);
|
||||||
|
|
||||||
wait( delay( waitTime ) );
|
wait( delay( waitTime ) );
|
||||||
|
@ -218,8 +218,8 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
||||||
|
|
||||||
try {
|
try {
|
||||||
TraceEvent("SimulatedRebooterStarting").detail("Cycles", cycles).detail("RandomId", randomId)
|
TraceEvent("SimulatedRebooterStarting").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detailext("DataHall", localities.dataHallId())
|
.detail("DataHall", localities.dataHallId())
|
||||||
.detail("Address", process->address.toString())
|
.detail("Address", process->address.toString())
|
||||||
.detail("Excluded", process->excluded)
|
.detail("Excluded", process->excluded)
|
||||||
.detail("UsingSSL", sslEnabled);
|
.detail("UsingSSL", sslEnabled);
|
||||||
|
@ -266,19 +266,19 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
||||||
if(e.code() != error_code_actor_cancelled)
|
if(e.code() != error_code_actor_cancelled)
|
||||||
printf("SimulatedFDBDTerminated: %s\n", e.what());
|
printf("SimulatedFDBDTerminated: %s\n", e.what());
|
||||||
ASSERT( destructed || g_simulator.getCurrentProcess() == process ); // simulatedFDBD catch called on different process
|
ASSERT( destructed || g_simulator.getCurrentProcess() == process ); // simulatedFDBD catch called on different process
|
||||||
TraceEvent(e.code() == error_code_actor_cancelled || e.code() == error_code_file_not_found || destructed ? SevInfo : SevError, "SimulatedFDBDTerminated").error(e, true).detailext("ZoneId", localities.zoneId());
|
TraceEvent(e.code() == error_code_actor_cancelled || e.code() == error_code_file_not_found || destructed ? SevInfo : SevError, "SimulatedFDBDTerminated").error(e, true).detail("ZoneId", localities.zoneId());
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent("SimulatedFDBDDone").detail("Cycles", cycles).detail("RandomId", randomId)
|
TraceEvent("SimulatedFDBDDone").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||||
.detail("Address", process->address)
|
.detail("Address", process->address)
|
||||||
.detail("Excluded", process->excluded)
|
.detail("Excluded", process->excluded)
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detail("KillType", onShutdown.isReady() ? onShutdown.get() : ISimulator::None);
|
.detail("KillType", onShutdown.isReady() ? onShutdown.get() : ISimulator::None);
|
||||||
|
|
||||||
if (!onShutdown.isReady())
|
if (!onShutdown.isReady())
|
||||||
onShutdown = ISimulator::InjectFaults;
|
onShutdown = ISimulator::InjectFaults;
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
TraceEvent(destructed ? SevInfo : SevError, "SimulatedFDBDRebooterError").error(e, true).detailext("ZoneId", localities.zoneId()).detail("RandomId", randomId);
|
TraceEvent(destructed ? SevInfo : SevError, "SimulatedFDBDRebooterError").error(e, true).detail("ZoneId", localities.zoneId()).detail("RandomId", randomId);
|
||||||
onShutdown = e;
|
onShutdown = e;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -292,7 +292,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
||||||
.detail("Address", process->address)
|
.detail("Address", process->address)
|
||||||
.detail("Excluded", process->excluded)
|
.detail("Excluded", process->excluded)
|
||||||
.detail("Rebooting", process->rebooting)
|
.detail("Rebooting", process->rebooting)
|
||||||
.detailext("ZoneId", localities.zoneId());
|
.detail("ZoneId", localities.zoneId());
|
||||||
wait( g_simulator.onProcess( simProcess ) );
|
wait( g_simulator.onProcess( simProcess ) );
|
||||||
|
|
||||||
wait(delay(0.00001 + FLOW_KNOBS->MAX_BUGGIFIED_DELAY)); // One last chance for the process to clean up?
|
wait(delay(0.00001 + FLOW_KNOBS->MAX_BUGGIFIED_DELAY)); // One last chance for the process to clean up?
|
||||||
|
@ -303,14 +303,14 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
||||||
TraceEvent("SimulatedFDBDShutdown").detail("Cycles", cycles).detail("RandomId", randomId)
|
TraceEvent("SimulatedFDBDShutdown").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||||
.detail("Address", process->address)
|
.detail("Address", process->address)
|
||||||
.detail("Excluded", process->excluded)
|
.detail("Excluded", process->excluded)
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detail("KillType", shutdownResult);
|
.detail("KillType", shutdownResult);
|
||||||
|
|
||||||
if( shutdownResult < ISimulator::RebootProcessAndDelete ) {
|
if( shutdownResult < ISimulator::RebootProcessAndDelete ) {
|
||||||
TraceEvent("SimulatedFDBDLowerReboot").detail("Cycles", cycles).detail("RandomId", randomId)
|
TraceEvent("SimulatedFDBDLowerReboot").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||||
.detail("Address", process->address)
|
.detail("Address", process->address)
|
||||||
.detail("Excluded", process->excluded)
|
.detail("Excluded", process->excluded)
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detail("KillType", shutdownResult);
|
.detail("KillType", shutdownResult);
|
||||||
return onShutdown.get();
|
return onShutdown.get();
|
||||||
}
|
}
|
||||||
|
@ -318,7 +318,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
||||||
if( onShutdown.get() == ISimulator::RebootProcessAndDelete ) {
|
if( onShutdown.get() == ISimulator::RebootProcessAndDelete ) {
|
||||||
TraceEvent("SimulatedFDBDRebootAndDelete").detail("Cycles", cycles).detail("RandomId", randomId)
|
TraceEvent("SimulatedFDBDRebootAndDelete").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||||
.detail("Address", process->address)
|
.detail("Address", process->address)
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detail("KillType", shutdownResult);
|
.detail("KillType", shutdownResult);
|
||||||
*coordFolder = joinPath(baseFolder, g_random->randomUniqueID().toString());
|
*coordFolder = joinPath(baseFolder, g_random->randomUniqueID().toString());
|
||||||
*dataFolder = joinPath(baseFolder, g_random->randomUniqueID().toString());
|
*dataFolder = joinPath(baseFolder, g_random->randomUniqueID().toString());
|
||||||
|
@ -335,7 +335,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnec
|
||||||
else {
|
else {
|
||||||
TraceEvent("SimulatedFDBDJustRepeat").detail("Cycles", cycles).detail("RandomId", randomId)
|
TraceEvent("SimulatedFDBDJustRepeat").detail("Cycles", cycles).detail("RandomId", randomId)
|
||||||
.detail("Address", process->address)
|
.detail("Address", process->address)
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detail("KillType", shutdownResult);
|
.detail("KillType", shutdownResult);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -400,7 +400,7 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr, std::vector
|
||||||
Reference<ClusterConnectionFile> clusterFile(useSeedFile ? new ClusterConnectionFile(path, connStr.toString()) : new ClusterConnectionFile(path));
|
Reference<ClusterConnectionFile> clusterFile(useSeedFile ? new ClusterConnectionFile(path, connStr.toString()) : new ClusterConnectionFile(path));
|
||||||
const int listenPort = i*listenPerProcess + 1;
|
const int listenPort = i*listenPerProcess + 1;
|
||||||
processes.push_back(simulatedFDBDRebooter(clusterFile, ips[i], sslEnabled, tlsOptions, listenPort, listenPerProcess, localities, processClass, &myFolders[i], &coordFolders[i], baseFolder, connStr, useSeedFile, runBackupAgents));
|
processes.push_back(simulatedFDBDRebooter(clusterFile, ips[i], sslEnabled, tlsOptions, listenPort, listenPerProcess, localities, processClass, &myFolders[i], &coordFolders[i], baseFolder, connStr, useSeedFile, runBackupAgents));
|
||||||
TraceEvent("SimulatedMachineProcess", randomId).detail("Address", NetworkAddress(ips[i], listenPort, true, false)).detailext("ZoneId", localities.zoneId()).detailext("DataHall", localities.dataHallId()).detail("Folder", myFolders[i]);
|
TraceEvent("SimulatedMachineProcess", randomId).detail("Address", NetworkAddress(ips[i], listenPort, true, false)).detail("ZoneId", localities.zoneId()).detail("DataHall", localities.dataHallId()).detail("Folder", myFolders[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST( bootCount >= 1 ); // Simulated machine rebooted
|
TEST( bootCount >= 1 ); // Simulated machine rebooted
|
||||||
|
@ -418,8 +418,8 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr, std::vector
|
||||||
.detail("ProcessClass", processClass.toString())
|
.detail("ProcessClass", processClass.toString())
|
||||||
.detail("Restarting", restarting)
|
.detail("Restarting", restarting)
|
||||||
.detail("UseSeedFile", useSeedFile)
|
.detail("UseSeedFile", useSeedFile)
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detailext("DataHall", localities.dataHallId())
|
.detail("DataHall", localities.dataHallId())
|
||||||
.detail("Locality", localities.toString());
|
.detail("Locality", localities.toString());
|
||||||
|
|
||||||
wait( waitForAll( processes ) );
|
wait( waitForAll( processes ) );
|
||||||
|
@ -428,8 +428,8 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr, std::vector
|
||||||
.detail("Folder0", myFolders[0])
|
.detail("Folder0", myFolders[0])
|
||||||
.detail("CFolder0", coordFolders[0])
|
.detail("CFolder0", coordFolders[0])
|
||||||
.detail("MachineIPs", toIPVectorString(ips))
|
.detail("MachineIPs", toIPVectorString(ips))
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detailext("DataHall", localities.dataHallId());
|
.detail("DataHall", localities.dataHallId());
|
||||||
|
|
||||||
{
|
{
|
||||||
//Kill all open files, which may cause them to write invalid data.
|
//Kill all open files, which may cause them to write invalid data.
|
||||||
|
@ -468,8 +468,8 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr, std::vector
|
||||||
.detail("CFolder0", coordFolders[0])
|
.detail("CFolder0", coordFolders[0])
|
||||||
.detail("MachineIPs", toIPVectorString(ips))
|
.detail("MachineIPs", toIPVectorString(ips))
|
||||||
.detail("Closing", closingStr)
|
.detail("Closing", closingStr)
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detailext("DataHall", localities.dataHallId());
|
.detail("DataHall", localities.dataHallId());
|
||||||
|
|
||||||
ISimulator::MachineInfo* machine = g_simulator.getMachineById(localities.machineId());
|
ISimulator::MachineInfo* machine = g_simulator.getMachineById(localities.machineId());
|
||||||
machine->closingFiles = filenames;
|
machine->closingFiles = filenames;
|
||||||
|
@ -499,8 +499,8 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr, std::vector
|
||||||
if( shutdownDelayCount++ >= 50 ) { // Worker doesn't shut down instantly on reboot
|
if( shutdownDelayCount++ >= 50 ) { // Worker doesn't shut down instantly on reboot
|
||||||
TraceEvent(SevError, "SimulatedFDBDFilesCheck", randomId)
|
TraceEvent(SevError, "SimulatedFDBDFilesCheck", randomId)
|
||||||
.detail("PAddrs", toIPVectorString(ips))
|
.detail("PAddrs", toIPVectorString(ips))
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detailext("DataHall", localities.dataHallId());
|
.detail("DataHall", localities.dataHallId());
|
||||||
ASSERT( false );
|
ASSERT( false );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,8 +510,8 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr, std::vector
|
||||||
|
|
||||||
TraceEvent("SimulatedFDBDFilesClosed", randomId)
|
TraceEvent("SimulatedFDBDFilesClosed", randomId)
|
||||||
.detail("Address", toIPVectorString(ips))
|
.detail("Address", toIPVectorString(ips))
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detailext("DataHall", localities.dataHallId());
|
.detail("DataHall", localities.dataHallId());
|
||||||
|
|
||||||
g_simulator.destroyMachine(localities.machineId());
|
g_simulator.destroyMachine(localities.machineId());
|
||||||
|
|
||||||
|
@ -535,8 +535,8 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr, std::vector
|
||||||
.detail("Swap", swap)
|
.detail("Swap", swap)
|
||||||
.detail("KillType", killType)
|
.detail("KillType", killType)
|
||||||
.detail("RebootTime", rebootTime)
|
.detail("RebootTime", rebootTime)
|
||||||
.detailext("ZoneId", localities.zoneId())
|
.detail("ZoneId", localities.zoneId())
|
||||||
.detailext("DataHall", localities.dataHallId())
|
.detail("DataHall", localities.dataHallId())
|
||||||
.detail("MachineIPs", toIPVectorString(ips));
|
.detail("MachineIPs", toIPVectorString(ips));
|
||||||
|
|
||||||
wait( delay( rebootTime ) );
|
wait( delay( rebootTime ) );
|
||||||
|
@ -1141,7 +1141,7 @@ void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFo
|
||||||
|
|
||||||
ASSERT(g_simulator.storagePolicy && g_simulator.tLogPolicy);
|
ASSERT(g_simulator.storagePolicy && g_simulator.tLogPolicy);
|
||||||
ASSERT(!g_simulator.hasSatelliteReplication || g_simulator.satelliteTLogPolicy);
|
ASSERT(!g_simulator.hasSatelliteReplication || g_simulator.satelliteTLogPolicy);
|
||||||
TraceEvent("SimulatorConfig").detail("ConfigString", printable(StringRef(startingConfigString)));
|
TraceEvent("SimulatorConfig").detail("ConfigString", StringRef(startingConfigString));
|
||||||
|
|
||||||
const int dataCenters = simconfig.datacenters;
|
const int dataCenters = simconfig.datacenters;
|
||||||
const int machineCount = simconfig.machine_count;
|
const int machineCount = simconfig.machine_count;
|
||||||
|
@ -1235,7 +1235,7 @@ void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFo
|
||||||
|
|
||||||
*pConnString = conn;
|
*pConnString = conn;
|
||||||
|
|
||||||
TraceEvent("SimulatedConnectionString").detail("String", conn.toString()).detail("ConfigString", printable(StringRef(startingConfigString)));
|
TraceEvent("SimulatedConnectionString").detail("String", conn.toString()).detail("ConfigString", startingConfigString);
|
||||||
|
|
||||||
int assignedMachines = 0, nonVersatileMachines = 0;
|
int assignedMachines = 0, nonVersatileMachines = 0;
|
||||||
for( int dc = 0; dc < dataCenters; dc++ ) {
|
for( int dc = 0; dc < dataCenters; dc++ ) {
|
||||||
|
|
|
@ -1751,7 +1751,7 @@ ACTOR Future<JsonBuilderObject> layerStatusFetcher(Database cx, JsonBuilderArray
|
||||||
json.absorb(doc.get_obj());
|
json.absorb(doc.get_obj());
|
||||||
wait(yield());
|
wait(yield());
|
||||||
} catch(Error &e) {
|
} catch(Error &e) {
|
||||||
TraceEvent(SevWarn, "LayerStatusBadJSON").detail("Key", printable(docs[j].key));
|
TraceEvent(SevWarn, "LayerStatusBadJSON").detail("Key", docs[j].key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -73,7 +73,7 @@ struct StorageMetricSample {
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we didn't return above, we didn't find anything.
|
// If we didn't return above, we didn't find anything.
|
||||||
TraceEvent(SevWarn, "CannotSplitLastSampleKey").detail("Range", printable(range)).detail("Offset", offset);
|
TraceEvent(SevWarn, "CannotSplitLastSampleKey").detail("Range", range).detail("Offset", offset);
|
||||||
return front ? range.end : range.begin;
|
return front ? range.end : range.begin;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -307,7 +307,7 @@ struct StorageServerMetrics {
|
||||||
StorageMetrics estimated = req.estimated;
|
StorageMetrics estimated = req.estimated;
|
||||||
StorageMetrics remaining = getMetrics( req.keys ) + used;
|
StorageMetrics remaining = getMetrics( req.keys ) + used;
|
||||||
|
|
||||||
//TraceEvent("SplitMetrics").detail("Begin", printable(req.keys.begin)).detail("End", printable(req.keys.end)).detail("Remaining", remaining.bytes).detail("Used", used.bytes);
|
//TraceEvent("SplitMetrics").detail("Begin", req.keys.begin).detail("End", req.keys.end).detail("Remaining", remaining.bytes).detail("Used", used.bytes);
|
||||||
|
|
||||||
while( true ) {
|
while( true ) {
|
||||||
if( remaining.bytes < 2*SERVER_KNOBS->MIN_SHARD_BYTES )
|
if( remaining.bytes < 2*SERVER_KNOBS->MIN_SHARD_BYTES )
|
||||||
|
|
|
@ -1157,7 +1157,7 @@ void peekMessagesFromMemory( Reference<LogData> self, TLogPeekRequest const& req
|
||||||
ASSERT( !messages.getLength() );
|
ASSERT( !messages.getLength() );
|
||||||
|
|
||||||
auto& deque = getVersionMessages(self, req.tag);
|
auto& deque = getVersionMessages(self, req.tag);
|
||||||
//TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", printable(req.tag1)).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size());
|
//TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", req.tag1).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size());
|
||||||
|
|
||||||
Version begin = std::max( req.begin, self->persistentDataDurableVersion+1 );
|
Version begin = std::max( req.begin, self->persistentDataDurableVersion+1 );
|
||||||
auto it = std::lower_bound(deque.begin(), deque.end(), std::make_pair(begin, LengthPrefixedStringRef()), CompareFirst<std::pair<Version, LengthPrefixedStringRef>>());
|
auto it = std::lower_bound(deque.begin(), deque.end(), std::make_pair(begin, LengthPrefixedStringRef()), CompareFirst<std::pair<Version, LengthPrefixedStringRef>>());
|
||||||
|
@ -1260,7 +1260,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
//TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
//TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2);
|
||||||
// Wait until we have something to return that the caller doesn't already have
|
// Wait until we have something to return that the caller doesn't already have
|
||||||
if( logData->version.get() < req.begin ) {
|
if( logData->version.get() < req.begin ) {
|
||||||
wait( logData->version.whenAtLeast( req.begin ) );
|
wait( logData->version.whenAtLeast( req.begin ) );
|
||||||
|
@ -1308,7 +1308,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
||||||
state Version endVersion = logData->version.get() + 1;
|
state Version endVersion = logData->version.get() + 1;
|
||||||
|
|
||||||
//grab messages from disk
|
//grab messages from disk
|
||||||
//TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
//TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2);
|
||||||
if( req.begin <= logData->persistentDataDurableVersion ) {
|
if( req.begin <= logData->persistentDataDurableVersion ) {
|
||||||
// Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on
|
// Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on
|
||||||
// whether we get enough data from disk.
|
// whether we get enough data from disk.
|
||||||
|
@ -1345,7 +1345,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
||||||
persistTagMessageRefsKey(logData->logId, req.tag, req.begin),
|
persistTagMessageRefsKey(logData->logId, req.tag, req.begin),
|
||||||
persistTagMessageRefsKey(logData->logId, req.tag, logData->persistentDataDurableVersion + 1))));
|
persistTagMessageRefsKey(logData->logId, req.tag, logData->persistentDataDurableVersion + 1))));
|
||||||
|
|
||||||
//TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().getPrimaryAddress()).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? printable(kv1[0].key) : "").detail("Tag2ResultsLast", kv2.size() ? printable(kv2[0].key) : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence());
|
//TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().getPrimaryAddress()).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? kv1[0].key : "").detail("Tag2ResultsLast", kv2.size() ? kv2[0].key : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence());
|
||||||
|
|
||||||
state std::vector<std::pair<IDiskQueue::location, IDiskQueue::location>> commitLocations;
|
state std::vector<std::pair<IDiskQueue::location, IDiskQueue::location>> commitLocations;
|
||||||
state bool earlyEnd = false;
|
state bool earlyEnd = false;
|
||||||
|
@ -2087,7 +2087,7 @@ ACTOR Future<Void> restorePersistentState( TLogData* self, LocalityData locality
|
||||||
flushAndExit(0);
|
flushAndExit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", printable(fFormat.get().get())).detail("Expected", persistFormat.value.toString());
|
TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", fFormat.get().get()).detail("Expected", persistFormat.value.toString());
|
||||||
throw worker_recovery_failed();
|
throw worker_recovery_failed();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2098,7 +2098,7 @@ ACTOR Future<Void> restorePersistentState( TLogData* self, LocalityData locality
|
||||||
throw worker_removed();
|
throw worker_removed();
|
||||||
} else {
|
} else {
|
||||||
// This should never happen
|
// This should never happen
|
||||||
TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", printable(v[0].key));
|
TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", v[0].key);
|
||||||
ASSERT( false );
|
ASSERT( false );
|
||||||
throw worker_recovery_failed();
|
throw worker_recovery_failed();
|
||||||
}
|
}
|
||||||
|
|
|
@ -203,9 +203,9 @@ StringRef debugKey2 = LiteralStringRef( "\xff\xff\xff\xff" );
|
||||||
|
|
||||||
bool debugMutation( const char* context, Version version, MutationRef const& mutation ) {
|
bool debugMutation( const char* context, Version version, MutationRef const& mutation ) {
|
||||||
if ((mutation.type == mutation.SetValue || mutation.type == mutation.AddValue || mutation.type==mutation.DebugKey) && (mutation.param1 == debugKey || mutation.param1 == debugKey2))
|
if ((mutation.type == mutation.SetValue || mutation.type == mutation.AddValue || mutation.type==mutation.DebugKey) && (mutation.param1 == debugKey || mutation.param1 == debugKey2))
|
||||||
;//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("MutationType", "SetValue").detail("Key", printable(mutation.param1)).detail("Value", printable(mutation.param2));
|
;//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("MutationType", "SetValue").detail("Key", mutation.param1).detail("Value", mutation.param2);
|
||||||
else if ((mutation.type == mutation.ClearRange || mutation.type == mutation.DebugKeyRange) && ((mutation.param1<=debugKey && mutation.param2>debugKey) || (mutation.param1<=debugKey2 && mutation.param2>debugKey2)))
|
else if ((mutation.type == mutation.ClearRange || mutation.type == mutation.DebugKeyRange) && ((mutation.param1<=debugKey && mutation.param2>debugKey) || (mutation.param1<=debugKey2 && mutation.param2>debugKey2)))
|
||||||
;//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("MutationType", "ClearRange").detail("KeyBegin", printable(mutation.param1)).detail("KeyEnd", printable(mutation.param2));
|
;//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("MutationType", "ClearRange").detail("KeyBegin", mutation.param1).detail("KeyEnd", mutation.param2);
|
||||||
else
|
else
|
||||||
return false;
|
return false;
|
||||||
const char* type =
|
const char* type =
|
||||||
|
@ -223,7 +223,7 @@ bool debugMutation( const char* context, Version version, MutationRef const& mut
|
||||||
bool debugKeyRange( const char* context, Version version, KeyRangeRef const& keys ) {
|
bool debugKeyRange( const char* context, Version version, KeyRangeRef const& keys ) {
|
||||||
if (keys.contains(debugKey) || keys.contains(debugKey2)) {
|
if (keys.contains(debugKey) || keys.contains(debugKey2)) {
|
||||||
debugMutation(context, version, MutationRef(MutationRef::DebugKeyRange, keys.begin, keys.end) );
|
debugMutation(context, version, MutationRef(MutationRef::DebugKeyRange, keys.begin, keys.end) );
|
||||||
//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("KeyBegin", printable(keys.begin)).detail("KeyEnd", printable(keys.end));
|
//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("KeyBegin", keys.begin).detail("KeyEnd", keys.end);
|
||||||
return true;
|
return true;
|
||||||
} else
|
} else
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -308,7 +308,7 @@ ACTOR Future<Void> newTLogServers( Reference<MasterData> self, RecruitFromConfig
|
||||||
tr.set(tr.arena(), tagLocalityListKeyFor(recr.dcId), tagLocalityListValue(loc));
|
tr.set(tr.arena(), tagLocalityListKeyFor(recr.dcId), tagLocalityListValue(loc));
|
||||||
initialConfChanges->push_back(tr);
|
initialConfChanges->push_back(tr);
|
||||||
self->dcId_locality[recr.dcId] = loc;
|
self->dcId_locality[recr.dcId] = loc;
|
||||||
TraceEvent(SevWarn, "UnknownPrimaryDCID", self->dbgid).detail("PrimaryId", printable(recr.dcId)).detail("Loc", loc);
|
TraceEvent(SevWarn, "UnknownPrimaryDCID", self->dbgid).detail("PrimaryId", recr.dcId).detail("Loc", loc);
|
||||||
}
|
}
|
||||||
|
|
||||||
if( !self->dcId_locality.count(remoteDcId) ) {
|
if( !self->dcId_locality.count(remoteDcId) ) {
|
||||||
|
@ -317,7 +317,7 @@ ACTOR Future<Void> newTLogServers( Reference<MasterData> self, RecruitFromConfig
|
||||||
tr.set(tr.arena(), tagLocalityListKeyFor(remoteDcId), tagLocalityListValue(loc));
|
tr.set(tr.arena(), tagLocalityListKeyFor(remoteDcId), tagLocalityListValue(loc));
|
||||||
initialConfChanges->push_back(tr);
|
initialConfChanges->push_back(tr);
|
||||||
self->dcId_locality[remoteDcId] = loc;
|
self->dcId_locality[remoteDcId] = loc;
|
||||||
TraceEvent(SevWarn, "UnknownRemoteDCID", self->dbgid).detail("RemoteId", printable(remoteDcId)).detail("Loc", loc);
|
TraceEvent(SevWarn, "UnknownRemoteDCID", self->dbgid).detail("RemoteId", remoteDcId).detail("Loc", loc);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<UID> exclusionWorkerIds;
|
std::vector<UID> exclusionWorkerIds;
|
||||||
|
@ -522,7 +522,7 @@ ACTOR Future<Standalone<CommitTransactionRef>> provisionalMaster( Reference<Mast
|
||||||
!std::any_of(t->read_conflict_ranges.begin(), t->read_conflict_ranges.end(), [](KeyRangeRef const& r){return r.contains(lastEpochEndKey);}))
|
!std::any_of(t->read_conflict_ranges.begin(), t->read_conflict_ranges.end(), [](KeyRangeRef const& r){return r.contains(lastEpochEndKey);}))
|
||||||
{
|
{
|
||||||
for(auto m = t->mutations.begin(); m != t->mutations.end(); ++m) {
|
for(auto m = t->mutations.begin(); m != t->mutations.end(); ++m) {
|
||||||
TraceEvent("PM_CTM", parent->dbgid).detail("MType", m->type).detail("Param1", printable(m->param1)).detail("Param2", printable(m->param2));
|
TraceEvent("PM_CTM", parent->dbgid).detail("MType", m->type).detail("Param1", m->param1).detail("Param2", m->param2);
|
||||||
if (isMetadataMutation(*m)) {
|
if (isMetadataMutation(*m)) {
|
||||||
// We keep the mutations and write conflict ranges from this transaction, but not its read conflict ranges
|
// We keep the mutations and write conflict ranges from this transaction, but not its read conflict ranges
|
||||||
Standalone<CommitTransactionRef> out;
|
Standalone<CommitTransactionRef> out;
|
||||||
|
@ -613,7 +613,7 @@ ACTOR Future<Void> updateLocalityForDcId(Optional<Key> dcId, Reference<ILogSyste
|
||||||
ver = oldLogSystem->getKnownCommittedVersion();
|
ver = oldLogSystem->getKnownCommittedVersion();
|
||||||
}
|
}
|
||||||
locality->set( PeekSpecialInfo(loc.first,loc.second,ver) );
|
locality->set( PeekSpecialInfo(loc.first,loc.second,ver) );
|
||||||
TraceEvent("UpdatedLocalityForDcId").detail("DcId", printable(dcId)).detail("Locality0", loc.first).detail("Locality1", loc.second).detail("Version", ver);
|
TraceEvent("UpdatedLocalityForDcId").detail("DcId", dcId).detail("Locality0", loc.first).detail("Locality1", loc.second).detail("Version", ver);
|
||||||
wait( oldLogSystem->onLogSystemConfigChange() || oldLogSystem->onKnownCommittedVersionChange() );
|
wait( oldLogSystem->onLogSystemConfigChange() || oldLogSystem->onKnownCommittedVersionChange() );
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -702,7 +702,7 @@ ACTOR Future<Void> readTransactionSystemState( Reference<MasterData> self, Refer
|
||||||
|
|
||||||
//auto kvs = self->txnStateStore->readRange( systemKeys );
|
//auto kvs = self->txnStateStore->readRange( systemKeys );
|
||||||
//for( auto & kv : kvs.get() )
|
//for( auto & kv : kvs.get() )
|
||||||
// TraceEvent("MasterRecoveredTXS", self->dbgid).detail("K", printable(kv.key)).detail("V", printable(kv.value));
|
// TraceEvent("MasterRecoveredTXS", self->dbgid).detail("K", kv.key).detail("V", kv.value);
|
||||||
|
|
||||||
self->txnStateLogAdapter->setNextVersion( oldLogSystem->getEnd() ); //< FIXME: (1) the log adapter should do this automatically after recovery; (2) if we make KeyValueStoreMemory guarantee immediate reads, we should be able to get rid of the discardCommit() below and not need a writable log adapter
|
self->txnStateLogAdapter->setNextVersion( oldLogSystem->getEnd() ); //< FIXME: (1) the log adapter should do this automatically after recovery; (2) if we make KeyValueStoreMemory guarantee immediate reads, we should be able to get rid of the discardCommit() below and not need a writable log adapter
|
||||||
|
|
||||||
|
@ -849,7 +849,7 @@ ACTOR Future<Void> recoverFrom( Reference<MasterData> self, Reference<ILogSystem
|
||||||
TEST(true); // Emergency transaction processing during recovery
|
TEST(true); // Emergency transaction processing during recovery
|
||||||
TraceEvent("EmergencyTransaction", self->dbgid);
|
TraceEvent("EmergencyTransaction", self->dbgid);
|
||||||
for (auto m = req.mutations.begin(); m != req.mutations.end(); ++m)
|
for (auto m = req.mutations.begin(); m != req.mutations.end(); ++m)
|
||||||
TraceEvent("EmergencyTransactionMutation", self->dbgid).detail("MType", m->type).detail("P1", printable(m->param1)).detail("P2", printable(m->param2));
|
TraceEvent("EmergencyTransactionMutation", self->dbgid).detail("MType", m->type).detail("P1", m->param1).detail("P2", m->param2);
|
||||||
|
|
||||||
DatabaseConfiguration oldConf = self->configuration;
|
DatabaseConfiguration oldConf = self->configuration;
|
||||||
self->configuration = self->originalConfiguration;
|
self->configuration = self->originalConfiguration;
|
||||||
|
@ -1050,7 +1050,7 @@ ACTOR Future<Void> resolutionBalancing(Reference<MasterData> self) {
|
||||||
ResolutionSplitReply split = wait( brokenPromiseToNever(self->resolvers[metrics.lastItem()->second].split.getReply(req, TaskResolutionMetrics)) );
|
ResolutionSplitReply split = wait( brokenPromiseToNever(self->resolvers[metrics.lastItem()->second].split.getReply(req, TaskResolutionMetrics)) );
|
||||||
KeyRangeRef moveRange = range.second ? KeyRangeRef( range.first.begin, split.key ) : KeyRangeRef( split.key, range.first.end );
|
KeyRangeRef moveRange = range.second ? KeyRangeRef( range.first.begin, split.key ) : KeyRangeRef( split.key, range.first.end );
|
||||||
movedRanges.push_back_deep(movedRanges.arena(), ResolverMoveRef(moveRange, dest));
|
movedRanges.push_back_deep(movedRanges.arena(), ResolverMoveRef(moveRange, dest));
|
||||||
TraceEvent("MovingResolutionRange").detail("Src", src).detail("Dest", dest).detail("Amount", amount).detail("StartRange", printable(range.first)).detail("MoveRange", printable(moveRange)).detail("Used", split.used).detail("KeyResolverRanges", key_resolver.size());
|
TraceEvent("MovingResolutionRange").detail("Src", src).detail("Dest", dest).detail("Amount", amount).detail("StartRange", range.first).detail("MoveRange", moveRange).detail("Used", split.used).detail("KeyResolverRanges", key_resolver.size());
|
||||||
amount -= split.used;
|
amount -= split.used;
|
||||||
if(moveRange != range.first || amount <= 0 )
|
if(moveRange != range.first || amount <= 0 )
|
||||||
break;
|
break;
|
||||||
|
@ -1058,7 +1058,7 @@ ACTOR Future<Void> resolutionBalancing(Reference<MasterData> self) {
|
||||||
for(auto& it : movedRanges)
|
for(auto& it : movedRanges)
|
||||||
key_resolver.insert(it.range, it.dest);
|
key_resolver.insert(it.range, it.dest);
|
||||||
//for(auto& it : key_resolver.ranges())
|
//for(auto& it : key_resolver.ranges())
|
||||||
// TraceEvent("KeyResolver").detail("Range", printable(it.range())).detail("Value", it.value());
|
// TraceEvent("KeyResolver").detail("Range", it.range()).detail("Value", it.value());
|
||||||
|
|
||||||
self->resolverChangesVersion = self->version + 1;
|
self->resolverChangesVersion = self->version + 1;
|
||||||
for (auto& p : self->proxies)
|
for (auto& p : self->proxies)
|
||||||
|
@ -1284,7 +1284,7 @@ ACTOR Future<Void> masterCore( Reference<MasterData> self ) {
|
||||||
.detail("StatusCode", RecoveryStatus::recovery_transaction)
|
.detail("StatusCode", RecoveryStatus::recovery_transaction)
|
||||||
.detail("Status", RecoveryStatus::names[RecoveryStatus::recovery_transaction])
|
.detail("Status", RecoveryStatus::names[RecoveryStatus::recovery_transaction])
|
||||||
.detail("PrimaryLocality", self->primaryLocality)
|
.detail("PrimaryLocality", self->primaryLocality)
|
||||||
.detail("DcId", printable(self->myInterface.locality.dcId()))
|
.detail("DcId", self->myInterface.locality.dcId())
|
||||||
.trackLatest("MasterRecoveryState");
|
.trackLatest("MasterRecoveryState");
|
||||||
|
|
||||||
// Recovery transaction
|
// Recovery transaction
|
||||||
|
|
|
@ -535,7 +535,7 @@ public:
|
||||||
void addShard( ShardInfo* newShard ) {
|
void addShard( ShardInfo* newShard ) {
|
||||||
ASSERT( !newShard->keys.empty() );
|
ASSERT( !newShard->keys.empty() );
|
||||||
newShard->changeCounter = ++shardChangeCounter;
|
newShard->changeCounter = ++shardChangeCounter;
|
||||||
//TraceEvent("AddShard", this->thisServerID).detail("KeyBegin", printable(newShard->keys.begin)).detail("KeyEnd", printable(newShard->keys.end)).detail("State", newShard->isReadable() ? "Readable" : newShard->notAssigned() ? "NotAssigned" : "Adding").detail("Version", this->version.get());
|
//TraceEvent("AddShard", this->thisServerID).detail("KeyBegin", newShard->keys.begin).detail("KeyEnd", newShard->keys.end).detail("State", newShard->isReadable() ? "Readable" : newShard->notAssigned() ? "NotAssigned" : "Adding").detail("Version", this->version.get());
|
||||||
/*auto affected = shards.getAffectedRangesAfterInsertion( newShard->keys, Reference<ShardInfo>() );
|
/*auto affected = shards.getAffectedRangesAfterInsertion( newShard->keys, Reference<ShardInfo>() );
|
||||||
for(auto i = affected.begin(); i != affected.end(); ++i)
|
for(auto i = affected.begin(); i != affected.end(); ++i)
|
||||||
shards.insert( *i, Reference<ShardInfo>() );*/
|
shards.insert( *i, Reference<ShardInfo>() );*/
|
||||||
|
@ -619,7 +619,7 @@ bool validateRange( StorageServer::VersionedData::ViewAtVersion const& view, Key
|
||||||
// * Nonoverlapping: No clear overlaps a set or another clear, or adjoins another clear.
|
// * Nonoverlapping: No clear overlaps a set or another clear, or adjoins another clear.
|
||||||
// * Old mutations are erased: All items in versionedData.atLatest() have insertVersion() > durableVersion()
|
// * Old mutations are erased: All items in versionedData.atLatest() have insertVersion() > durableVersion()
|
||||||
|
|
||||||
TraceEvent("ValidateRange", id).detail("KeyBegin", printable(range.begin)).detail("KeyEnd", printable(range.end)).detail("Version", version);
|
TraceEvent("ValidateRange", id).detail("KeyBegin", range.begin).detail("KeyEnd", range.end).detail("Version", version);
|
||||||
KeyRef k;
|
KeyRef k;
|
||||||
bool ok = true;
|
bool ok = true;
|
||||||
bool kIsClear = false;
|
bool kIsClear = false;
|
||||||
|
@ -628,7 +628,7 @@ bool validateRange( StorageServer::VersionedData::ViewAtVersion const& view, Key
|
||||||
for(; i != view.end() && i.key() < range.end; ++i) {
|
for(; i != view.end() && i.key() < range.end; ++i) {
|
||||||
ASSERT( i.insertVersion() > minInsertVersion );
|
ASSERT( i.insertVersion() > minInsertVersion );
|
||||||
if (kIsClear && i->isClearTo() ? i.key() <= k : i.key() < k) {
|
if (kIsClear && i->isClearTo() ? i.key() <= k : i.key() < k) {
|
||||||
TraceEvent(SevError,"InvalidRange",id).detail("Key1", printable(k)).detail("Key2", printable(i.key())).detail("Version", version);
|
TraceEvent(SevError,"InvalidRange",id).detail("Key1", k).detail("Key2", i.key()).detail("Version", version);
|
||||||
ok = false;
|
ok = false;
|
||||||
}
|
}
|
||||||
//ASSERT( i.key() >= k );
|
//ASSERT( i.key() >= k );
|
||||||
|
@ -668,8 +668,8 @@ void validate(StorageServer* data, bool force = false) {
|
||||||
ShardInfo* shard = s->value().getPtr();
|
ShardInfo* shard = s->value().getPtr();
|
||||||
if (!shard->isInVersionedData()) {
|
if (!shard->isInVersionedData()) {
|
||||||
if (latest.lower_bound(s->begin()) != latest.lower_bound(s->end())) {
|
if (latest.lower_bound(s->begin()) != latest.lower_bound(s->end())) {
|
||||||
TraceEvent(SevError, "VF", data->thisServerID).detail("LastValidTime", data->debug_lastValidateTime).detail("KeyBegin", printable(s->begin())).detail("KeyEnd", printable(s->end()))
|
TraceEvent(SevError, "VF", data->thisServerID).detail("LastValidTime", data->debug_lastValidateTime).detail("KeyBegin", s->begin()).detail("KeyEnd", s->end())
|
||||||
.detail("FirstKey", printable(latest.lower_bound(s->begin()).key())).detail("FirstInsertV", latest.lower_bound(s->begin()).insertVersion());
|
.detail("FirstKey", latest.lower_bound(s->begin()).key()).detail("FirstInsertV", latest.lower_bound(s->begin()).insertVersion());
|
||||||
}
|
}
|
||||||
ASSERT( latest.lower_bound(s->begin()) == latest.lower_bound(s->end()) );
|
ASSERT( latest.lower_bound(s->begin()) == latest.lower_bound(s->end()) );
|
||||||
}
|
}
|
||||||
|
@ -784,7 +784,7 @@ ACTOR Future<Void> getValueQ( StorageServer* data, GetValueRequest req ) {
|
||||||
state uint64_t changeCounter = data->shardChangeCounter;
|
state uint64_t changeCounter = data->shardChangeCounter;
|
||||||
|
|
||||||
if (!data->shards[req.key]->isReadable()) {
|
if (!data->shards[req.key]->isReadable()) {
|
||||||
//TraceEvent("WrongShardServer", data->thisServerID).detail("Key", printable(req.key)).detail("Version", version).detail("In", "getValueQ");
|
//TraceEvent("WrongShardServer", data->thisServerID).detail("Key", req.key).detail("Version", version).detail("In", "getValueQ");
|
||||||
throw wrong_shard_server();
|
throw wrong_shard_server();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -861,7 +861,7 @@ ACTOR Future<Void> watchValue_impl( StorageServer* data, WatchValueRequest req )
|
||||||
GetValueRequest getReq( req.key, latest, req.debugID );
|
GetValueRequest getReq( req.key, latest, req.debugID );
|
||||||
state Future<Void> getValue = getValueQ( data, getReq ); //we are relying on the delay zero at the top of getValueQ, if removed we need one here
|
state Future<Void> getValue = getValueQ( data, getReq ); //we are relying on the delay zero at the top of getValueQ, if removed we need one here
|
||||||
GetValueReply reply = wait( getReq.reply.getFuture() );
|
GetValueReply reply = wait( getReq.reply.getFuture() );
|
||||||
//TraceEvent("WatcherCheckValue").detail("Key", printable( req.key ) ).detail("Value", printable( req.value ) ).detail("CurrentValue", printable( v ) ).detail("Ver", latest);
|
//TraceEvent("WatcherCheckValue").detail("Key", req.key ).detail("Value", req.value ).detail("CurrentValue", v ).detail("Ver", latest);
|
||||||
|
|
||||||
debugMutation("ShardWatchValue", latest, MutationRef(MutationRef::DebugKey, req.key, reply.value.present() ? StringRef( reply.value.get() ) : LiteralStringRef("<null>") ) );
|
debugMutation("ShardWatchValue", latest, MutationRef(MutationRef::DebugKey, req.key, reply.value.present() ? StringRef( reply.value.get() ) : LiteralStringRef("<null>") ) );
|
||||||
|
|
||||||
|
@ -1091,9 +1091,9 @@ ACTOR Future<GetKeyValuesReply> readRange( StorageServer* data, Version version,
|
||||||
if (more) { // if there might be more data, begin reading right after what we already found to find out
|
if (more) { // if there might be more data, begin reading right after what we already found to find out
|
||||||
//if (track) printf("more\n");
|
//if (track) printf("more\n");
|
||||||
if (!(limit<=0 || *pLimitBytes<=0 || result.data.end()[-1].key == atStorageVersion.end()[-1].key))
|
if (!(limit<=0 || *pLimitBytes<=0 || result.data.end()[-1].key == atStorageVersion.end()[-1].key))
|
||||||
TraceEvent(SevError, "ReadRangeIssue", data->thisServerID).detail("ReadBegin", printable(readBegin)).detail("ReadEnd", printable(readEnd))
|
TraceEvent(SevError, "ReadRangeIssue", data->thisServerID).detail("ReadBegin", readBegin).detail("ReadEnd", readEnd)
|
||||||
.detail("VStart", vStart ? printable(vStart.key()) : "nil").detail("VEnd", vEnd ? printable(vEnd.key()) : "nil")
|
.detail("VStart", vStart ? vStart.key() : LiteralStringRef("nil")).detail("VEnd", vEnd ? vEnd.key() : LiteralStringRef("nil"))
|
||||||
.detail("AtStorageVersionBack", printable(atStorageVersion.end()[-1].key)).detail("ResultBack", printable(result.data.end()[-1].key))
|
.detail("AtStorageVersionBack", atStorageVersion.end()[-1].key).detail("ResultBack", result.data.end()[-1].key)
|
||||||
.detail("Limit", limit).detail("LimitBytes", *pLimitBytes).detail("ResultSize", result.data.size()).detail("PrevSize", prevSize);
|
.detail("Limit", limit).detail("LimitBytes", *pLimitBytes).detail("ResultSize", result.data.size()).detail("PrevSize", prevSize);
|
||||||
readBegin = readBeginTemp = keyAfter( result.data.end()[-1].key );
|
readBegin = readBeginTemp = keyAfter( result.data.end()[-1].key );
|
||||||
ASSERT( limit<=0 || *pLimitBytes<=0 || result.data.end()[-1].key == atStorageVersion.end()[-1].key );
|
ASSERT( limit<=0 || *pLimitBytes<=0 || result.data.end()[-1].key == atStorageVersion.end()[-1].key );
|
||||||
|
@ -1111,8 +1111,8 @@ ACTOR Future<GetKeyValuesReply> readRange( StorageServer* data, Version version,
|
||||||
/*if (*pLimitBytes <= 0)
|
/*if (*pLimitBytes <= 0)
|
||||||
TraceEvent(SevWarn, "ReadRangeLimitExceeded")
|
TraceEvent(SevWarn, "ReadRangeLimitExceeded")
|
||||||
.detail("Version", version)
|
.detail("Version", version)
|
||||||
.detail("Begin", printable(range.begin) )
|
.detail("Begin", range.begin )
|
||||||
.detail("End", printable(range.end) )
|
.detail("End", range.end )
|
||||||
.detail("LimitReamin", limit)
|
.detail("LimitReamin", limit)
|
||||||
.detail("LimitBytesRemain", *pLimitBytes); */
|
.detail("LimitBytesRemain", *pLimitBytes); */
|
||||||
|
|
||||||
|
@ -1135,9 +1135,9 @@ ACTOR Future<GetKeyValuesReply> readRange( StorageServer* data, Version version,
|
||||||
if ( !(totalsize>originalLimitBytes ? prefix_equal : result.data==correct.data) || correct.more != result.more ) {
|
if ( !(totalsize>originalLimitBytes ? prefix_equal : result.data==correct.data) || correct.more != result.more ) {
|
||||||
TraceEvent(SevError, "IncorrectResult", rrid).detail("Server", data->thisServerID).detail("CorrectRows", correct.data.size())
|
TraceEvent(SevError, "IncorrectResult", rrid).detail("Server", data->thisServerID).detail("CorrectRows", correct.data.size())
|
||||||
.detail("FirstDifference", first_difference).detail("OriginalLimit", originalLimit)
|
.detail("FirstDifference", first_difference).detail("OriginalLimit", originalLimit)
|
||||||
.detail("ResultRows", result.data.size()).detail("Result0", printable(result.data[0].key)).detail("Correct0", printable(correct.data[0].key))
|
.detail("ResultRows", result.data.size()).detail("Result0", result.data[0].key).detail("Correct0", correct.data[0].key)
|
||||||
.detail("ResultN", result.data.size() ? printable(result.data[std::min(correct.data.size(),result.data.size())-1].key) : "nil")
|
.detail("ResultN", result.data.size() ? result.data[std::min(correct.data.size(),result.data.size())-1].key : "nil")
|
||||||
.detail("CorrectN", correct.data.size() ? printable(correct.data[std::min(correct.data.size(),result.data.size())-1].key) : "nil");
|
.detail("CorrectN", correct.data.size() ? correct.data[std::min(correct.data.size(),result.data.size())-1].key : "nil");
|
||||||
}*/
|
}*/
|
||||||
} else {
|
} else {
|
||||||
// Reverse read - abandon hope alle ye who enter here
|
// Reverse read - abandon hope alle ye who enter here
|
||||||
|
@ -1297,11 +1297,11 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
||||||
|
|
||||||
if( req.debugID.present() )
|
if( req.debugID.present() )
|
||||||
g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.AfterVersion");
|
g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.AfterVersion");
|
||||||
//.detail("ShardBegin", printable(shard.begin)).detail("ShardEnd", printable(shard.end));
|
//.detail("ShardBegin", shard.begin).detail("ShardEnd", shard.end);
|
||||||
//} catch (Error& e) { TraceEvent("WrongShardServer", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("Shard", "None").detail("In", "getKeyValues>getShardKeyRange"); throw e; }
|
//} catch (Error& e) { TraceEvent("WrongShardServer", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("Shard", "None").detail("In", "getKeyValues>getShardKeyRange"); throw e; }
|
||||||
|
|
||||||
if ( !selectorInRange(req.end, shard) && !(req.end.isFirstGreaterOrEqual() && req.end.getKey() == shard.end) ) {
|
if ( !selectorInRange(req.end, shard) && !(req.end.isFirstGreaterOrEqual() && req.end.getKey() == shard.end) ) {
|
||||||
// TraceEvent("WrongShardServer1", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("ShardBegin", printable(shard.begin)).detail("ShardEnd", printable(shard.end)).detail("In", "getKeyValues>checkShardExtents");
|
// TraceEvent("WrongShardServer1", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("ShardBegin", shard.begin).detail("ShardEnd", shard.end).detail("In", "getKeyValues>checkShardExtents");
|
||||||
throw wrong_shard_server();
|
throw wrong_shard_server();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1313,7 +1313,7 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
||||||
state Key end = wait(fEnd);
|
state Key end = wait(fEnd);
|
||||||
if( req.debugID.present() )
|
if( req.debugID.present() )
|
||||||
g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.AfterKeys");
|
g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.AfterKeys");
|
||||||
//.detail("Off1",offset1).detail("Off2",offset2).detail("ReqBegin",printable(req.begin.getKey())).detail("ReqEnd",printable(req.end.getKey()));
|
//.detail("Off1",offset1).detail("Off2",offset2).detail("ReqBegin",req.begin.getKey()).detail("ReqEnd",req.end.getKey());
|
||||||
|
|
||||||
// Offsets of zero indicate begin/end keys in this shard, which obviously means we can answer the query
|
// Offsets of zero indicate begin/end keys in this shard, which obviously means we can answer the query
|
||||||
// An end offset of 1 is also OK because the end key is exclusive, so if the first key of the next shard is the end the last actual key returned must be from this shard.
|
// An end offset of 1 is also OK because the end key is exclusive, so if the first key of the next shard is the end the last actual key returned must be from this shard.
|
||||||
|
@ -1323,14 +1323,14 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
||||||
// We could detect when offset1 takes us off the beginning of the database or offset2 takes us off the end, and return a clipped range rather
|
// We could detect when offset1 takes us off the beginning of the database or offset2 takes us off the end, and return a clipped range rather
|
||||||
// than an error (since that is what the NativeAPI.getRange will do anyway via its "slow path"), but we would have to add some flags to the response
|
// than an error (since that is what the NativeAPI.getRange will do anyway via its "slow path"), but we would have to add some flags to the response
|
||||||
// to encode whether we went off the beginning and the end, since it needs that information.
|
// to encode whether we went off the beginning and the end, since it needs that information.
|
||||||
//TraceEvent("WrongShardServer2", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("ShardBegin", printable(shard.begin)).detail("ShardEnd", printable(shard.end)).detail("In", "getKeyValues>checkOffsets").detail("BeginKey", printable(begin)).detail("EndKey", printable(end)).detail("BeginOffset", offset1).detail("EndOffset", offset2);
|
//TraceEvent("WrongShardServer2", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("ShardBegin", shard.begin).detail("ShardEnd", shard.end).detail("In", "getKeyValues>checkOffsets").detail("BeginKey", begin).detail("EndKey", end).detail("BeginOffset", offset1).detail("EndOffset", offset2);
|
||||||
throw wrong_shard_server();
|
throw wrong_shard_server();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (begin >= end) {
|
if (begin >= end) {
|
||||||
if( req.debugID.present() )
|
if( req.debugID.present() )
|
||||||
g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.Send");
|
g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.Send");
|
||||||
//.detail("Begin",printable(begin)).detail("End",printable(end));
|
//.detail("Begin",begin).detail("End",end);
|
||||||
|
|
||||||
GetKeyValuesReply none;
|
GetKeyValuesReply none;
|
||||||
none.version = version;
|
none.version = version;
|
||||||
|
@ -1347,7 +1347,7 @@ ACTOR Future<Void> getKeyValues( StorageServer* data, GetKeyValuesRequest req )
|
||||||
|
|
||||||
if( req.debugID.present() )
|
if( req.debugID.present() )
|
||||||
g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.AfterReadRange");
|
g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.AfterReadRange");
|
||||||
//.detail("Begin",printable(begin)).detail("End",printable(end)).detail("SizeOf",r.data.size());
|
//.detail("Begin",begin).detail("End",end).detail("SizeOf",r.data.size());
|
||||||
data->checkChangeCounter( changeCounter, KeyRangeRef( std::min<KeyRef>(begin, std::min<KeyRef>(req.begin.getKey(), req.end.getKey())), std::max<KeyRef>(end, std::max<KeyRef>(req.begin.getKey(), req.end.getKey())) ) );
|
data->checkChangeCounter( changeCounter, KeyRangeRef( std::min<KeyRef>(begin, std::min<KeyRef>(req.begin.getKey(), req.end.getKey())), std::max<KeyRef>(end, std::max<KeyRef>(req.begin.getKey(), req.end.getKey())) ) );
|
||||||
if (EXPENSIVE_VALIDATION) {
|
if (EXPENSIVE_VALIDATION) {
|
||||||
for (int i = 0; i < r.data.size(); i++)
|
for (int i = 0; i < r.data.size(); i++)
|
||||||
|
@ -1873,8 +1873,8 @@ ACTOR Future<Void> fetchKeys( StorageServer *data, AddingShard* shard ) {
|
||||||
debugKeyRange("fetchKeysBegin", data->version.get(), shard->keys);
|
debugKeyRange("fetchKeysBegin", data->version.get(), shard->keys);
|
||||||
|
|
||||||
TraceEvent(SevDebug, interval.begin(), data->thisServerID)
|
TraceEvent(SevDebug, interval.begin(), data->thisServerID)
|
||||||
.detail("KeyBegin", printable(shard->keys.begin))
|
.detail("KeyBegin", shard->keys.begin)
|
||||||
.detail("KeyEnd",printable(shard->keys.end));
|
.detail("KeyEnd",shard->keys.end);
|
||||||
|
|
||||||
validate(data);
|
validate(data);
|
||||||
|
|
||||||
|
@ -1935,8 +1935,8 @@ ACTOR Future<Void> fetchKeys( StorageServer *data, AddingShard* shard ) {
|
||||||
|
|
||||||
TraceEvent(SevDebug, "FetchKeysBlock", data->thisServerID).detail("FKID", interval.pairID)
|
TraceEvent(SevDebug, "FetchKeysBlock", data->thisServerID).detail("FKID", interval.pairID)
|
||||||
.detail("BlockRows", this_block.size()).detail("BlockBytes", expectedSize)
|
.detail("BlockRows", this_block.size()).detail("BlockBytes", expectedSize)
|
||||||
.detail("KeyBegin", printable(keys.begin)).detail("KeyEnd", printable(keys.end))
|
.detail("KeyBegin", keys.begin).detail("KeyEnd", keys.end)
|
||||||
.detail("Last", this_block.size() ? printable(this_block.end()[-1].key) : std::string())
|
.detail("Last", this_block.size() ? this_block.end()[-1].key : std::string())
|
||||||
.detail("Version", fetchVersion).detail("More", this_block.more);
|
.detail("Version", fetchVersion).detail("More", this_block.more);
|
||||||
debugKeyRange("fetchRange", fetchVersion, keys);
|
debugKeyRange("fetchRange", fetchVersion, keys);
|
||||||
for(auto k = this_block.begin(); k != this_block.end(); ++k) debugMutation("fetch", fetchVersion, MutationRef(MutationRef::SetValue, k->key, k->value));
|
for(auto k = this_block.begin(); k != this_block.end(); ++k) debugMutation("fetch", fetchVersion, MutationRef(MutationRef::SetValue, k->key, k->value));
|
||||||
|
@ -2124,8 +2124,8 @@ ACTOR Future<Void> fetchKeys( StorageServer *data, AddingShard* shard ) {
|
||||||
TraceEvent(SevError, "FetchKeysError", data->thisServerID)
|
TraceEvent(SevError, "FetchKeysError", data->thisServerID)
|
||||||
.error(e)
|
.error(e)
|
||||||
.detail("Elapsed", now()-startt)
|
.detail("Elapsed", now()-startt)
|
||||||
.detail("KeyBegin", printable(keys.begin))
|
.detail("KeyBegin", keys.begin)
|
||||||
.detail("KeyEnd",printable(keys.end));
|
.detail("KeyEnd",keys.end);
|
||||||
if (e.code() != error_code_actor_cancelled)
|
if (e.code() != error_code_actor_cancelled)
|
||||||
data->otherError.sendError(e); // Kill the storage server. Are there any recoverable errors?
|
data->otherError.sendError(e); // Kill the storage server. Are there any recoverable errors?
|
||||||
throw; // goes nowhere
|
throw; // goes nowhere
|
||||||
|
@ -2185,8 +2185,8 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
|
||||||
ASSERT( !keys.empty() );
|
ASSERT( !keys.empty() );
|
||||||
|
|
||||||
//TraceEvent("ChangeServerKeys", data->thisServerID)
|
//TraceEvent("ChangeServerKeys", data->thisServerID)
|
||||||
// .detail("KeyBegin", printable(keys.begin))
|
// .detail("KeyBegin", keys.begin)
|
||||||
// .detail("KeyEnd", printable(keys.end))
|
// .detail("KeyEnd", keys.end)
|
||||||
// .detail("NowAssigned", nowAssigned)
|
// .detail("NowAssigned", nowAssigned)
|
||||||
// .detail("Version", version)
|
// .detail("Version", version)
|
||||||
// .detail("Context", changeServerKeysContextName[(int)context]);
|
// .detail("Context", changeServerKeysContextName[(int)context]);
|
||||||
|
@ -2200,15 +2200,15 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
|
||||||
if( nowAssigned != it->value()->assigned() ) {
|
if( nowAssigned != it->value()->assigned() ) {
|
||||||
isDifferent = true;
|
isDifferent = true;
|
||||||
/*TraceEvent("CSKRangeDifferent", data->thisServerID)
|
/*TraceEvent("CSKRangeDifferent", data->thisServerID)
|
||||||
.detail("KeyBegin", printable(it->range().begin))
|
.detail("KeyBegin", it->range().begin)
|
||||||
.detail("KeyEnd", printable(it->range().end));*/
|
.detail("KeyEnd", it->range().end);*/
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if( !isDifferent ) {
|
if( !isDifferent ) {
|
||||||
//TraceEvent("CSKShortCircuit", data->thisServerID)
|
//TraceEvent("CSKShortCircuit", data->thisServerID)
|
||||||
// .detail("KeyBegin", printable(keys.begin))
|
// .detail("KeyBegin", keys.begin)
|
||||||
// .detail("KeyEnd", printable(keys.end));
|
// .detail("KeyEnd", keys.end);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2245,8 +2245,8 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss
|
||||||
KeyRangeRef range = keys & r->range();
|
KeyRangeRef range = keys & r->range();
|
||||||
bool dataAvailable = r->value()==latestVersion || r->value() >= version;
|
bool dataAvailable = r->value()==latestVersion || r->value() >= version;
|
||||||
/*TraceEvent("CSKRange", data->thisServerID)
|
/*TraceEvent("CSKRange", data->thisServerID)
|
||||||
.detail("KeyBegin", printable(range.begin))
|
.detail("KeyBegin", range.begin)
|
||||||
.detail("KeyEnd", printable(range.end))
|
.detail("KeyEnd", range.end)
|
||||||
.detail("Available", dataAvailable)
|
.detail("Available", dataAvailable)
|
||||||
.detail("NowAssigned", nowAssigned)
|
.detail("NowAssigned", nowAssigned)
|
||||||
.detail("NewestAvailable", r->value())
|
.detail("NewestAvailable", r->value())
|
||||||
|
@ -2832,7 +2832,7 @@ void setAvailableStatus( StorageServer* self, KeyRangeRef keys, bool available )
|
||||||
auto& mLV = self->addVersionToMutationLog( self->data().getLatestVersion() );
|
auto& mLV = self->addVersionToMutationLog( self->data().getLatestVersion() );
|
||||||
|
|
||||||
KeyRange availableKeys = KeyRangeRef( persistShardAvailableKeys.begin.toString() + keys.begin.toString(), persistShardAvailableKeys.begin.toString() + keys.end.toString() );
|
KeyRange availableKeys = KeyRangeRef( persistShardAvailableKeys.begin.toString() + keys.begin.toString(), persistShardAvailableKeys.begin.toString() + keys.end.toString() );
|
||||||
//TraceEvent("SetAvailableStatus", self->thisServerID).detail("Version", mLV.version).detail("RangeBegin", printable(availableKeys.begin)).detail("RangeEnd", printable(availableKeys.end));
|
//TraceEvent("SetAvailableStatus", self->thisServerID).detail("Version", mLV.version).detail("RangeBegin", availableKeys.begin).detail("RangeEnd", availableKeys.end);
|
||||||
|
|
||||||
self->addMutationToMutationLog( mLV, MutationRef( MutationRef::ClearRange, availableKeys.begin, availableKeys.end ) );
|
self->addMutationToMutationLog( mLV, MutationRef( MutationRef::ClearRange, availableKeys.begin, availableKeys.end ) );
|
||||||
self->addMutationToMutationLog( mLV, MutationRef( MutationRef::SetValue, availableKeys.begin, available ? LiteralStringRef("1") : LiteralStringRef("0") ) );
|
self->addMutationToMutationLog( mLV, MutationRef( MutationRef::SetValue, availableKeys.begin, available ? LiteralStringRef("1") : LiteralStringRef("0") ) );
|
||||||
|
@ -2849,7 +2849,7 @@ void setAssignedStatus( StorageServer* self, KeyRangeRef keys, bool nowAssigned
|
||||||
KeyRange assignedKeys = KeyRangeRef(
|
KeyRange assignedKeys = KeyRangeRef(
|
||||||
persistShardAssignedKeys.begin.toString() + keys.begin.toString(),
|
persistShardAssignedKeys.begin.toString() + keys.begin.toString(),
|
||||||
persistShardAssignedKeys.begin.toString() + keys.end.toString() );
|
persistShardAssignedKeys.begin.toString() + keys.end.toString() );
|
||||||
//TraceEvent("SetAssignedStatus", self->thisServerID).detail("Version", mLV.version).detail("RangeBegin", printable(assignedKeys.begin)).detail("RangeEnd", printable(assignedKeys.end));
|
//TraceEvent("SetAssignedStatus", self->thisServerID).detail("Version", mLV.version).detail("RangeBegin", assignedKeys.begin).detail("RangeEnd", assignedKeys.end);
|
||||||
self->addMutationToMutationLog( mLV, MutationRef( MutationRef::ClearRange, assignedKeys.begin, assignedKeys.end ) );
|
self->addMutationToMutationLog( mLV, MutationRef( MutationRef::ClearRange, assignedKeys.begin, assignedKeys.end ) );
|
||||||
self->addMutationToMutationLog( mLV, MutationRef( MutationRef::SetValue, assignedKeys.begin,
|
self->addMutationToMutationLog( mLV, MutationRef( MutationRef::SetValue, assignedKeys.begin,
|
||||||
nowAssigned ? LiteralStringRef("1") : LiteralStringRef("0") ) );
|
nowAssigned ? LiteralStringRef("1") : LiteralStringRef("0") ) );
|
||||||
|
@ -2954,7 +2954,7 @@ ACTOR Future<Void> applyByteSampleResult( StorageServer* data, IKeyValueStore* s
|
||||||
wait(delay(SERVER_KNOBS->BYTE_SAMPLE_LOAD_DELAY));
|
wait(delay(SERVER_KNOBS->BYTE_SAMPLE_LOAD_DELAY));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
TraceEvent("RecoveredByteSampleRange", data->thisServerID).detail("Begin", printable(begin)).detail("End", printable(end)).detail("Fetches", totalFetches).detail("Keys", totalKeys).detail("ReadBytes", totalBytes);
|
TraceEvent("RecoveredByteSampleRange", data->thisServerID).detail("Begin", begin).detail("End", end).detail("Fetches", totalFetches).detail("Keys", totalKeys).detail("ReadBytes", totalBytes);
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3051,7 +3051,7 @@ ACTOR Future<bool> restoreDurableState( StorageServer* data, IKeyValueStore* sto
|
||||||
ASSERT( !keys.empty() );
|
ASSERT( !keys.empty() );
|
||||||
bool nowAvailable = available[availableLoc].value!=LiteralStringRef("0");
|
bool nowAvailable = available[availableLoc].value!=LiteralStringRef("0");
|
||||||
/*if(nowAvailable)
|
/*if(nowAvailable)
|
||||||
TraceEvent("AvailableShard", data->thisServerID).detail("RangeBegin", printable(keys.begin)).detail("RangeEnd", printable(keys.end));*/
|
TraceEvent("AvailableShard", data->thisServerID).detail("RangeBegin", keys.begin).detail("RangeEnd", keys.end);*/
|
||||||
data->newestAvailableVersion.insert( keys, nowAvailable ? latestVersion : invalidVersion );
|
data->newestAvailableVersion.insert( keys, nowAvailable ? latestVersion : invalidVersion );
|
||||||
wait(yield());
|
wait(yield());
|
||||||
}
|
}
|
||||||
|
@ -3065,7 +3065,7 @@ ACTOR Future<bool> restoreDurableState( StorageServer* data, IKeyValueStore* sto
|
||||||
ASSERT( !keys.empty() );
|
ASSERT( !keys.empty() );
|
||||||
bool nowAssigned = assigned[assignedLoc].value!=LiteralStringRef("0");
|
bool nowAssigned = assigned[assignedLoc].value!=LiteralStringRef("0");
|
||||||
/*if(nowAssigned)
|
/*if(nowAssigned)
|
||||||
TraceEvent("AssignedShard", data->thisServerID).detail("RangeBegin", printable(keys.begin)).detail("RangeEnd", printable(keys.end));*/
|
TraceEvent("AssignedShard", data->thisServerID).detail("RangeBegin", keys.begin).detail("RangeEnd", keys.end);*/
|
||||||
changeServerKeys(data, keys, nowAssigned, version, CSK_RESTORE);
|
changeServerKeys(data, keys, nowAssigned, version, CSK_RESTORE);
|
||||||
|
|
||||||
if (!nowAssigned) ASSERT( data->newestAvailableVersion.allEqual(keys, invalidVersion) );
|
if (!nowAssigned) ASSERT( data->newestAvailableVersion.allEqual(keys, invalidVersion) );
|
||||||
|
|
|
@ -151,7 +151,7 @@ int getOption( VectorRef<KeyValueRef> options, Key key, int defaultValue) {
|
||||||
options[i].value = LiteralStringRef("");
|
options[i].value = LiteralStringRef("");
|
||||||
return r;
|
return r;
|
||||||
} else {
|
} else {
|
||||||
TraceEvent(SevError, "InvalidTestOption").detail("OptionName", printable(key));
|
TraceEvent(SevError, "InvalidTestOption").detail("OptionName", key);
|
||||||
throw test_specification_invalid();
|
throw test_specification_invalid();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -167,7 +167,7 @@ uint64_t getOption( VectorRef<KeyValueRef> options, Key key, uint64_t defaultVal
|
||||||
options[i].value = LiteralStringRef("");
|
options[i].value = LiteralStringRef("");
|
||||||
return r;
|
return r;
|
||||||
} else {
|
} else {
|
||||||
TraceEvent(SevError, "InvalidTestOption").detail("OptionName", printable(key));
|
TraceEvent(SevError, "InvalidTestOption").detail("OptionName", key);
|
||||||
throw test_specification_invalid();
|
throw test_specification_invalid();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -183,7 +183,7 @@ int64_t getOption( VectorRef<KeyValueRef> options, Key key, int64_t defaultValue
|
||||||
options[i].value = LiteralStringRef("");
|
options[i].value = LiteralStringRef("");
|
||||||
return r;
|
return r;
|
||||||
} else {
|
} else {
|
||||||
TraceEvent(SevError, "InvalidTestOption").detail("OptionName", printable(key));
|
TraceEvent(SevError, "InvalidTestOption").detail("OptionName", key);
|
||||||
throw test_specification_invalid();
|
throw test_specification_invalid();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -304,7 +304,7 @@ TestWorkload *getWorkloadIface( WorkloadRequest work, VectorRef<KeyValueRef> opt
|
||||||
auto unconsumedOptions = checkAllOptionsConsumed( workload ? workload->options : VectorRef<KeyValueRef>() );
|
auto unconsumedOptions = checkAllOptionsConsumed( workload ? workload->options : VectorRef<KeyValueRef>() );
|
||||||
if( !workload || unconsumedOptions.size() ) {
|
if( !workload || unconsumedOptions.size() ) {
|
||||||
TraceEvent evt(SevError,"TestCreationError");
|
TraceEvent evt(SevError,"TestCreationError");
|
||||||
evt.detail("TestName", printable(testName));
|
evt.detail("TestName", testName);
|
||||||
if( !workload ) {
|
if( !workload ) {
|
||||||
evt.detail("Reason", "Null workload");
|
evt.detail("Reason", "Null workload");
|
||||||
fprintf(stderr, "ERROR: Workload could not be created, perhaps testName (%s) is not a valid workload\n", printable(testName).c_str());
|
fprintf(stderr, "ERROR: Workload could not be created, perhaps testName (%s) is not a valid workload\n", printable(testName).c_str());
|
||||||
|
@ -509,7 +509,7 @@ ACTOR Future<Void> testerServerWorkload( WorkloadRequest work, Reference<Cluster
|
||||||
}
|
}
|
||||||
|
|
||||||
// add test for "done" ?
|
// add test for "done" ?
|
||||||
TraceEvent("WorkloadReceived", workIface.id()).detail("Title", printable(work.title) );
|
TraceEvent("WorkloadReceived", workIface.id()).detail("Title", work.title );
|
||||||
TestWorkload *workload = getWorkloadIface( work, dbInfo );
|
TestWorkload *workload = getWorkloadIface( work, dbInfo );
|
||||||
if(!workload) {
|
if(!workload) {
|
||||||
TraceEvent("TestCreationError").detail("Reason", "Workload could not be created");
|
TraceEvent("TestCreationError").detail("Reason", "Workload could not be created");
|
||||||
|
@ -635,7 +635,7 @@ void throwIfError(const std::vector<Future<ErrorOr<T>>> &futures, std::string er
|
||||||
|
|
||||||
ACTOR Future<DistributedTestResults> runWorkload( Database cx, std::vector< TesterInterface > testers,
|
ACTOR Future<DistributedTestResults> runWorkload( Database cx, std::vector< TesterInterface > testers,
|
||||||
TestSpec spec ) {
|
TestSpec spec ) {
|
||||||
TraceEvent("TestRunning").detail( "WorkloadTitle", printable(spec.title) )
|
TraceEvent("TestRunning").detail( "WorkloadTitle", spec.title )
|
||||||
.detail("TesterCount", testers.size()).detail("Phases", spec.phases)
|
.detail("TesterCount", testers.size()).detail("Phases", spec.phases)
|
||||||
.detail("TestTimeout", spec.timeout);
|
.detail("TestTimeout", spec.timeout);
|
||||||
|
|
||||||
|
@ -667,16 +667,16 @@ ACTOR Future<DistributedTestResults> runWorkload( Database cx, std::vector< Test
|
||||||
if( spec.phases & TestWorkload::SETUP ) {
|
if( spec.phases & TestWorkload::SETUP ) {
|
||||||
state std::vector< Future<ErrorOr<Void>> > setups;
|
state std::vector< Future<ErrorOr<Void>> > setups;
|
||||||
printf("setting up test (%s)...\n", printable(spec.title).c_str());
|
printf("setting up test (%s)...\n", printable(spec.title).c_str());
|
||||||
TraceEvent("TestSetupStart").detail("WorkloadTitle", printable(spec.title));
|
TraceEvent("TestSetupStart").detail("WorkloadTitle", spec.title);
|
||||||
for(int i= 0; i < workloads.size(); i++)
|
for(int i= 0; i < workloads.size(); i++)
|
||||||
setups.push_back( workloads[i].setup.template getReplyUnlessFailedFor<Void>( waitForFailureTime, 0) );
|
setups.push_back( workloads[i].setup.template getReplyUnlessFailedFor<Void>( waitForFailureTime, 0) );
|
||||||
wait( waitForAll( setups ) );
|
wait( waitForAll( setups ) );
|
||||||
throwIfError(setups, "SetupFailedForWorkload" + printable(spec.title));
|
throwIfError(setups, "SetupFailedForWorkload" + printable(spec.title));
|
||||||
TraceEvent("TestSetupComplete").detail("WorkloadTitle", printable(spec.title));
|
TraceEvent("TestSetupComplete").detail("WorkloadTitle", spec.title);
|
||||||
}
|
}
|
||||||
|
|
||||||
if( spec.phases & TestWorkload::EXECUTION ) {
|
if( spec.phases & TestWorkload::EXECUTION ) {
|
||||||
TraceEvent("TestStarting").detail("WorkloadTitle", printable(spec.title));
|
TraceEvent("TestStarting").detail("WorkloadTitle", spec.title);
|
||||||
printf("running test (%s)...\n", printable(spec.title).c_str());
|
printf("running test (%s)...\n", printable(spec.title).c_str());
|
||||||
state std::vector< Future<ErrorOr<Void>> > starts;
|
state std::vector< Future<ErrorOr<Void>> > starts;
|
||||||
for(int i= 0; i < workloads.size(); i++)
|
for(int i= 0; i < workloads.size(); i++)
|
||||||
|
@ -684,7 +684,7 @@ ACTOR Future<DistributedTestResults> runWorkload( Database cx, std::vector< Test
|
||||||
wait( waitForAll( starts ) );
|
wait( waitForAll( starts ) );
|
||||||
throwIfError(starts, "StartFailedForWorkload" + printable(spec.title));
|
throwIfError(starts, "StartFailedForWorkload" + printable(spec.title));
|
||||||
printf("%s complete\n", printable(spec.title).c_str());
|
printf("%s complete\n", printable(spec.title).c_str());
|
||||||
TraceEvent("TestComplete").detail("WorkloadTitle", printable(spec.title));
|
TraceEvent("TestComplete").detail("WorkloadTitle", spec.title);
|
||||||
}
|
}
|
||||||
|
|
||||||
if( spec.phases & TestWorkload::CHECK ) {
|
if( spec.phases & TestWorkload::CHECK ) {
|
||||||
|
@ -711,7 +711,7 @@ ACTOR Future<DistributedTestResults> runWorkload( Database cx, std::vector< Test
|
||||||
if( spec.phases & TestWorkload::METRICS ) {
|
if( spec.phases & TestWorkload::METRICS ) {
|
||||||
state std::vector< Future<ErrorOr<vector<PerfMetric>>> > metricTasks;
|
state std::vector< Future<ErrorOr<vector<PerfMetric>>> > metricTasks;
|
||||||
printf("fetching metrics (%s)...\n", printable(spec.title).c_str());
|
printf("fetching metrics (%s)...\n", printable(spec.title).c_str());
|
||||||
TraceEvent("TestFetchingMetrics").detail("WorkloadTitle", printable(spec.title));
|
TraceEvent("TestFetchingMetrics").detail("WorkloadTitle", spec.title);
|
||||||
for(int i= 0; i < workloads.size(); i++)
|
for(int i= 0; i < workloads.size(); i++)
|
||||||
metricTasks.push_back( workloads[i].metrics.template getReplyUnlessFailedFor<vector<PerfMetric>>(waitForFailureTime, 0) );
|
metricTasks.push_back( workloads[i].metrics.template getReplyUnlessFailedFor<vector<PerfMetric>>(waitForFailureTime, 0) );
|
||||||
wait( waitForAll( metricTasks ) );
|
wait( waitForAll( metricTasks ) );
|
||||||
|
@ -834,7 +834,7 @@ ACTOR Future<bool> runTest( Database cx, std::vector< TesterInterface > testers,
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent(ok ? SevInfo : SevWarnAlways, "TestResults")
|
TraceEvent(ok ? SevInfo : SevWarnAlways, "TestResults")
|
||||||
.detail("Workload", printable(spec.title))
|
.detail("Workload", spec.title)
|
||||||
.detail("Passed", (int)ok);
|
.detail("Passed", (int)ok);
|
||||||
//.detail("Metrics", metricSummary);
|
//.detail("Metrics", metricSummary);
|
||||||
|
|
||||||
|
@ -888,7 +888,7 @@ vector<TestSpec> readTests( ifstream& ifs ) {
|
||||||
}
|
}
|
||||||
|
|
||||||
spec.title = StringRef( value );
|
spec.title = StringRef( value );
|
||||||
TraceEvent("TestParserTest").detail("ParsedTest", printable( spec.title ));
|
TraceEvent("TestParserTest").detail("ParsedTest", spec.title );
|
||||||
} else if( attrib == "timeout" ) {
|
} else if( attrib == "timeout" ) {
|
||||||
sscanf( value.c_str(), "%d", &(spec.timeout) );
|
sscanf( value.c_str(), "%d", &(spec.timeout) );
|
||||||
ASSERT( spec.timeout > 0 );
|
ASSERT( spec.timeout > 0 );
|
||||||
|
|
|
@ -307,18 +307,18 @@ std::vector< DiskStore > getDiskStores( std::string folder, std::string suffix,
|
||||||
// Use the option string that's in the file rather than tLogOptions.toPrefix(),
|
// Use the option string that's in the file rather than tLogOptions.toPrefix(),
|
||||||
// because they might be different if a new option was introduced in this version.
|
// because they might be different if a new option was introduced in this version.
|
||||||
StringRef optionsString = filename.removePrefix(fileVersionedLogDataPrefix).eat("-");
|
StringRef optionsString = filename.removePrefix(fileVersionedLogDataPrefix).eat("-");
|
||||||
TraceEvent("DiskStoreVersioned").detail("Filename", printable(filename));
|
TraceEvent("DiskStoreVersioned").detail("Filename", filename);
|
||||||
ErrorOr<TLogOptions> tLogOptions = TLogOptions::FromStringRef(optionsString);
|
ErrorOr<TLogOptions> tLogOptions = TLogOptions::FromStringRef(optionsString);
|
||||||
if (tLogOptions.isError()) {
|
if (tLogOptions.isError()) {
|
||||||
TraceEvent(SevWarn, "DiskStoreMalformedFilename").detail("Filename", printable(filename));
|
TraceEvent(SevWarn, "DiskStoreMalformedFilename").detail("Filename", filename);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
TraceEvent("DiskStoreVersionedSuccess").detail("Filename", printable(filename));
|
TraceEvent("DiskStoreVersionedSuccess").detail("Filename", filename);
|
||||||
store.tLogOptions = tLogOptions.get();
|
store.tLogOptions = tLogOptions.get();
|
||||||
prefix = filename.substr(0, fileVersionedLogDataPrefix.size() + optionsString.size() + 1);
|
prefix = filename.substr(0, fileVersionedLogDataPrefix.size() + optionsString.size() + 1);
|
||||||
}
|
}
|
||||||
else if( filename.startsWith( fileLogDataPrefix ) ) {
|
else if( filename.startsWith( fileLogDataPrefix ) ) {
|
||||||
TraceEvent("DiskStoreUnversioned").detail("Filename", printable(filename));
|
TraceEvent("DiskStoreUnversioned").detail("Filename", filename);
|
||||||
store.storedComponent = DiskStore::TLogData;
|
store.storedComponent = DiskStore::TLogData;
|
||||||
store.tLogOptions.version = TLogVersion::V2;
|
store.tLogOptions.version = TLogVersion::V2;
|
||||||
store.tLogOptions.spillType = TLogSpillType::VALUE;
|
store.tLogOptions.spillType = TLogSpillType::VALUE;
|
||||||
|
@ -1261,7 +1261,7 @@ ACTOR Future<Void> fdbd(
|
||||||
try {
|
try {
|
||||||
|
|
||||||
ServerCoordinators coordinators( connFile );
|
ServerCoordinators coordinators( connFile );
|
||||||
TraceEvent("StartingFDBD").detailext("ZoneID", localities.zoneId()).detailext("MachineId", localities.machineId()).detail("DiskPath", dataFolder).detail("CoordPath", coordFolder);
|
TraceEvent("StartingFDBD").detail("ZoneID", localities.zoneId()).detail("MachineId", localities.machineId()).detail("DiskPath", dataFolder).detail("CoordPath", coordFolder);
|
||||||
|
|
||||||
// SOMEDAY: start the services on the machine in a staggered fashion in simulation?
|
// SOMEDAY: start the services on the machine in a staggered fashion in simulation?
|
||||||
state vector<Future<Void>> v;
|
state vector<Future<Void>> v;
|
||||||
|
|
|
@ -47,7 +47,7 @@ void printTrace(JNIEnv* env, jobject self, jint severity, jstring message, jobje
|
||||||
auto f = onMainThread([severity, &detailsMap, msg]() -> Future<Void> {
|
auto f = onMainThread([severity, &detailsMap, msg]() -> Future<Void> {
|
||||||
TraceEvent evt(Severity(severity), msg);
|
TraceEvent evt(Severity(severity), msg);
|
||||||
for (const auto& p : detailsMap) {
|
for (const auto& p : detailsMap) {
|
||||||
evt.detail(p.first, p.second);
|
evt.detail(p.first.c_str(), p.second);
|
||||||
}
|
}
|
||||||
return Void();
|
return Void();
|
||||||
});
|
});
|
||||||
|
|
|
@ -140,7 +140,7 @@ struct MachineAttritionWorkload : TestWorkload {
|
||||||
else
|
else
|
||||||
kt = ISimulator::RebootAndDelete;
|
kt = ISimulator::RebootAndDelete;
|
||||||
}
|
}
|
||||||
TraceEvent("Assassination").detailext("TargetDatacenter", target).detail("Reboot", self->reboot).detail("KillType", kt);
|
TraceEvent("Assassination").detail("TargetDatacenter", target).detail("Reboot", self->reboot).detail("KillType", kt);
|
||||||
|
|
||||||
g_simulator.killDataCenter( target, kt );
|
g_simulator.killDataCenter( target, kt );
|
||||||
} else {
|
} else {
|
||||||
|
@ -176,7 +176,7 @@ struct MachineAttritionWorkload : TestWorkload {
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent("Assassination").detail("TargetMachine", targetMachine.toString())
|
TraceEvent("Assassination").detail("TargetMachine", targetMachine.toString())
|
||||||
.detailext("ZoneId", targetMachine.zoneId())
|
.detail("ZoneId", targetMachine.zoneId())
|
||||||
.detail("Reboot", self->reboot).detail("KilledMachines", killedMachines)
|
.detail("Reboot", self->reboot).detail("KilledMachines", killedMachines)
|
||||||
.detail("MachinesToKill", self->machinesToKill).detail("MachinesToLeave", self->machinesToLeave)
|
.detail("MachinesToKill", self->machinesToKill).detail("MachinesToLeave", self->machinesToLeave)
|
||||||
.detail("Machines", self->machines.size()).detail("Replace", self->replacement);
|
.detail("Machines", self->machines.size()).detail("Replace", self->replacement);
|
||||||
|
|
|
@ -379,7 +379,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
|
||||||
TraceEvent("RemoveAndKill", functionId).detail("Step", removeViaClear ? "ClearMachines" : "KillMachines").detail("Addresses", describe(killAddrs)).detail("Processes", killProcArray.size()).detail("Zones", zoneIds.size()).detail("ClusterAvailable", g_simulator.isAvailable());
|
TraceEvent("RemoveAndKill", functionId).detail("Step", removeViaClear ? "ClearMachines" : "KillMachines").detail("Addresses", describe(killAddrs)).detail("Processes", killProcArray.size()).detail("Zones", zoneIds.size()).detail("ClusterAvailable", g_simulator.isAvailable());
|
||||||
for (auto& zoneId : zoneIds) {
|
for (auto& zoneId : zoneIds) {
|
||||||
killedMachine = g_simulator.killZone( zoneId, removeViaClear ? ISimulator::RebootAndDelete : ISimulator::KillInstantly );
|
killedMachine = g_simulator.killZone( zoneId, removeViaClear ? ISimulator::RebootAndDelete : ISimulator::KillInstantly );
|
||||||
TraceEvent(killedMachine ? SevInfo : SevWarn, "RemoveAndKill").detail("Step", removeViaClear ? "Clear Machine" : "Kill Machine").detailext("ZoneId", zoneId).detail(removeViaClear ? "Cleared" : "Killed", killedMachine).detail("ClusterAvailable", g_simulator.isAvailable());
|
TraceEvent(killedMachine ? SevInfo : SevWarn, "RemoveAndKill").detail("Step", removeViaClear ? "Clear Machine" : "Kill Machine").detail("ZoneId", zoneId).detail(removeViaClear ? "Cleared" : "Killed", killedMachine).detail("ClusterAvailable", g_simulator.isAvailable());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@
|
||||||
|
|
||||||
struct SayHelloTaskFunc : TaskFuncBase {
|
struct SayHelloTaskFunc : TaskFuncBase {
|
||||||
static StringRef name;
|
static StringRef name;
|
||||||
static const uint32_t version = 1;
|
static constexpr uint32_t version = 1;
|
||||||
|
|
||||||
StringRef getName() const { return name; };
|
StringRef getName() const { return name; };
|
||||||
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return Void(); };
|
Future<Void> execute(Database cx, Reference<TaskBucket> tb, Reference<FutureBucket> fb, Reference<Task> task) { return Void(); };
|
||||||
|
@ -43,7 +43,11 @@ struct SayHelloTaskFunc : TaskFuncBase {
|
||||||
// check task version
|
// check task version
|
||||||
uint32_t taskVersion = task->getVersion();
|
uint32_t taskVersion = task->getVersion();
|
||||||
if (taskVersion > SayHelloTaskFunc::version) {
|
if (taskVersion > SayHelloTaskFunc::version) {
|
||||||
TraceEvent("TaskBucketCorrectnessSayHello").detail("CheckTaskVersion", "taskVersion is larger than the funcVersion").detail("TaskVersion", taskVersion).detail("FuncVersion", SayHelloTaskFunc::version);
|
uint32_t v = SayHelloTaskFunc::version;
|
||||||
|
TraceEvent("TaskBucketCorrectnessSayHello")
|
||||||
|
.detail("CheckTaskVersion", "taskVersion is larger than the funcVersion")
|
||||||
|
.detail("TaskVersion", taskVersion)
|
||||||
|
.detail("FuncVersion", v);
|
||||||
}
|
}
|
||||||
|
|
||||||
state Reference<TaskFuture> done = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
|
state Reference<TaskFuture> done = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
|
|
||||||
#include "flow/ActorCollection.h"
|
#include "flow/ActorCollection.h"
|
||||||
#include "flow/IndexedSet.h"
|
#include "flow/IndexedSet.h"
|
||||||
|
#include "flow/UnitTest.h"
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
ACTOR Future<Void> actorCollection( FutureStream<Future<Void>> addActor, int* pCount, double *lastChangeTime, double *idleTime, double *allTime, bool returnWhenEmptied )
|
ACTOR Future<Void> actorCollection( FutureStream<Future<Void>> addActor, int* pCount, double *lastChangeTime, double *idleTime, double *allTime, bool returnWhenEmptied )
|
||||||
|
@ -58,3 +59,91 @@ ACTOR Future<Void> actorCollection( FutureStream<Future<Void>> addActor, int* pC
|
||||||
when (Error e = waitNext(errors.getFuture())) { throw e; }
|
when (Error e = waitNext(errors.getFuture())) { throw e; }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template<class T, class U>
|
||||||
|
struct Traceable<std::pair<T, U>> {
|
||||||
|
static constexpr bool value = Traceable<T>::value && Traceable<U>::value;
|
||||||
|
static std::string toString(const std::pair<T, U>& p) {
|
||||||
|
auto tStr = Traceable<T>::toString(p.first);
|
||||||
|
auto uStr = Traceable<U>::toString(p.second);
|
||||||
|
std::string result(tStr.size() + uStr.size() + 3, 'x');
|
||||||
|
std::copy(tStr.begin(), tStr.end(), result.begin());
|
||||||
|
auto iter = result.begin() + tStr.size();
|
||||||
|
*(iter++) = ' ';
|
||||||
|
*(iter++) = '-';
|
||||||
|
*(iter++) = ' ';
|
||||||
|
std::copy(uStr.begin(), uStr.end(), iter);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
TEST_CASE("/flow/TraceEvent") {
|
||||||
|
state unsigned i;
|
||||||
|
state double startTime;
|
||||||
|
state std::vector<std::string> strings;
|
||||||
|
state std::vector<int> keyIdx;
|
||||||
|
state std::vector<int> pairRnd;
|
||||||
|
state std::vector<int> num;
|
||||||
|
state std::vector<double> doub;
|
||||||
|
state std::vector<int> strIdx;
|
||||||
|
strings.reserve(10000);
|
||||||
|
keyIdx.reserve(1e6);
|
||||||
|
pairRnd.reserve(1e6);
|
||||||
|
num.reserve(1e6);
|
||||||
|
doub.reserve(1e6);
|
||||||
|
strIdx.reserve(1e6);
|
||||||
|
for (i = 0; i < 100; ++i) {
|
||||||
|
for (int j = 0; j < 100; ++j) {
|
||||||
|
strings.emplace_back(g_random->randomAlphaNumeric(g_random->randomInt(1, 30)));
|
||||||
|
}
|
||||||
|
wait(delay(0));
|
||||||
|
}
|
||||||
|
for (i = 0; i < 1e6; ++i) {
|
||||||
|
keyIdx.emplace_back(g_random->randomInt(0, strings.size()));
|
||||||
|
pairRnd.emplace_back(g_random->randomInt(-1000, 1000));
|
||||||
|
num.emplace_back(g_random->randomInt(0, 1000));
|
||||||
|
doub.emplace_back(g_random->random01());
|
||||||
|
strIdx.emplace_back(g_random->randomInt(0, strings.size()));
|
||||||
|
}
|
||||||
|
TraceEvent("pairsfilled")
|
||||||
|
.detail("MemoryUsage", getMemoryUsage());
|
||||||
|
printf("Sleeping for 20 seconds - attach perf now to PID %d\n", getpid());
|
||||||
|
wait(delay(20));
|
||||||
|
printf("Done sleeping\n");
|
||||||
|
startTime = g_network->now();
|
||||||
|
for (i = 0; i < 100000; ++i) {
|
||||||
|
for (unsigned j = 0; j < 100; ++j) {
|
||||||
|
int idx = (i+1)*j % keyIdx.size();
|
||||||
|
StringRef key(strings[keyIdx[idx]]);
|
||||||
|
auto p = std::make_pair(key, pairRnd[idx]);
|
||||||
|
TraceEvent("TestTraceLineNoDebug")
|
||||||
|
.detail("Num", num[idx])
|
||||||
|
.detail("Double", doub[idx])
|
||||||
|
.detail("str", strings[strIdx[idx]])
|
||||||
|
.detail("pair", p);
|
||||||
|
}
|
||||||
|
wait(delay(0));
|
||||||
|
}
|
||||||
|
TraceEvent("TraceDuration")
|
||||||
|
.detail("Time", g_network->now() - startTime);
|
||||||
|
startTime = g_network->now();
|
||||||
|
for (i = 0; i < 1000000; ++i) {
|
||||||
|
for (unsigned j = 0; j < 100; ++j) {
|
||||||
|
int idx = (i+1)*j % keyIdx.size();
|
||||||
|
StringRef key(strings[keyIdx[idx]]);
|
||||||
|
auto p = std::make_pair(key, pairRnd[idx]);
|
||||||
|
TraceEvent(SevDebug, "TestTraceLineDebug")
|
||||||
|
.detail("Num", num[idx])
|
||||||
|
.detail("Double", doub[idx])
|
||||||
|
.detail("str", strings[strIdx[idx]])
|
||||||
|
.detail("pair", p);
|
||||||
|
}
|
||||||
|
wait(delay(0));
|
||||||
|
}
|
||||||
|
TraceEvent("TraceDuration")
|
||||||
|
.detail("Time", g_network->now() - startTime);
|
||||||
|
printf("benchmark done\n");
|
||||||
|
wait(delay(10));
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
70
flow/Arena.h
70
flow/Arena.h
|
@ -33,6 +33,7 @@
|
||||||
#include <limits>
|
#include <limits>
|
||||||
#include <set>
|
#include <set>
|
||||||
#include <type_traits>
|
#include <type_traits>
|
||||||
|
#include <sstream>
|
||||||
|
|
||||||
// TrackIt is a zero-size class for tracking constructions, destructions, and assignments of instances
|
// TrackIt is a zero-size class for tracking constructions, destructions, and assignments of instances
|
||||||
// of a class. Just inherit TrackIt<T> from T to enable tracking of construction and destruction of
|
// of a class. Just inherit TrackIt<T> from T to enable tracking of construction and destruction of
|
||||||
|
@ -421,6 +422,13 @@ private:
|
||||||
bool valid;
|
bool valid;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
struct Traceable<Optional<T>> : std::conditional<Traceable<T>::value, std::true_type, std::false_type>::type {
|
||||||
|
static std::string toString(const Optional<T>& value) {
|
||||||
|
return value.present() ? Traceable<T>::toString(value.get()) : "[not set]";
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
//#define STANDALONE_ALWAYS_COPY
|
//#define STANDALONE_ALWAYS_COPY
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
|
@ -559,16 +567,9 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string toString() const { return std::string( (const char*)data, length ); }
|
std::string toString() const { return std::string( (const char*)data, length ); }
|
||||||
std::string printable() const {
|
|
||||||
std::string s;
|
static bool isPrintable(char c) { return c > 32 && c < 127; }
|
||||||
for (int i = 0; i<length; i++) {
|
inline std::string printable() const;
|
||||||
uint8_t b = (*this)[i];
|
|
||||||
if (b >= 32 && b < 127 && b != '\\') s += (char)b;
|
|
||||||
else if (b == '\\') s += "\\\\";
|
|
||||||
else s += format("\\x%02x", b);
|
|
||||||
}
|
|
||||||
return s;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string toHexString(int limit = -1) const {
|
std::string toHexString(int limit = -1) const {
|
||||||
if(limit < 0)
|
if(limit < 0)
|
||||||
|
@ -652,6 +653,35 @@ private:
|
||||||
};
|
};
|
||||||
#pragma pack( pop )
|
#pragma pack( pop )
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct TraceableString<StringRef> {
|
||||||
|
static const char* begin(StringRef value) {
|
||||||
|
return reinterpret_cast<const char*>(value.begin());
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool atEnd(const StringRef& value, const char* iter) {
|
||||||
|
return iter == reinterpret_cast<const char*>(value.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string toString(const StringRef& value) {
|
||||||
|
return value.toString();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<StringRef> : TraceableStringImpl<StringRef> {};
|
||||||
|
|
||||||
|
inline std::string StringRef::printable() const {
|
||||||
|
return Traceable<StringRef>::toString(*this);
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
struct Traceable<Standalone<T>> : std::conditional<Traceable<T>::value, std::true_type, std::false_type>::type {
|
||||||
|
static std::string toString(const Standalone<T>& value) {
|
||||||
|
return Traceable<T>::toString(value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
#define LiteralStringRef( str ) StringRef( (const uint8_t*)(str), sizeof((str))-1 )
|
#define LiteralStringRef( str ) StringRef( (const uint8_t*)(str), sizeof((str))-1 )
|
||||||
|
|
||||||
// makeString is used to allocate a Standalone<StringRef> of a known length for later
|
// makeString is used to allocate a Standalone<StringRef> of a known length for later
|
||||||
|
@ -859,6 +889,26 @@ private:
|
||||||
m_capacity = requiredCapacity;
|
m_capacity = requiredCapacity;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
struct Traceable<VectorRef<T>> {
|
||||||
|
constexpr static bool value = Traceable<T>::value;
|
||||||
|
|
||||||
|
static std::string toString(const VectorRef<T>& value) {
|
||||||
|
std::stringstream ss;
|
||||||
|
bool first = true;
|
||||||
|
for (const auto& v : value) {
|
||||||
|
if (first) {
|
||||||
|
first = false;
|
||||||
|
} else {
|
||||||
|
ss << ' ';
|
||||||
|
}
|
||||||
|
ss << Traceable<T>::toString(v);
|
||||||
|
}
|
||||||
|
return ss.str();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
template <class Archive, class T>
|
template <class Archive, class T>
|
||||||
inline void load( Archive& ar, VectorRef<T>& value ) {
|
inline void load( Archive& ar, VectorRef<T>& value ) {
|
||||||
// FIXME: range checking for length, here and in other serialize code
|
// FIXME: range checking for length, here and in other serialize code
|
||||||
|
|
|
@ -89,8 +89,8 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta
|
||||||
.detail("CachePageReadsMerged", netData.countFileCachePageReadsMerged - statState->networkState.countFileCachePageReadsMerged)
|
.detail("CachePageReadsMerged", netData.countFileCachePageReadsMerged - statState->networkState.countFileCachePageReadsMerged)
|
||||||
.detail("CacheWrites", netData.countFileCacheWrites - statState->networkState.countFileCacheWrites)
|
.detail("CacheWrites", netData.countFileCacheWrites - statState->networkState.countFileCacheWrites)
|
||||||
.detail("CacheReads", netData.countFileCacheReads - statState->networkState.countFileCacheReads)
|
.detail("CacheReads", netData.countFileCacheReads - statState->networkState.countFileCacheReads)
|
||||||
.detailext("ZoneID", machineState.zoneId)
|
.detail("ZoneID", machineState.zoneId)
|
||||||
.detailext("MachineID", machineState.machineId)
|
.detail("MachineID", machineState.machineId)
|
||||||
.detail("AIOSubmitCount", netData.countAIOSubmit - statState->networkState.countAIOSubmit)
|
.detail("AIOSubmitCount", netData.countAIOSubmit - statState->networkState.countAIOSubmit)
|
||||||
.detail("AIOCollectCount", netData.countAIOCollect - statState->networkState.countAIOCollect)
|
.detail("AIOCollectCount", netData.countAIOCollect - statState->networkState.countAIOCollect)
|
||||||
.detail("AIOSubmitLag", (g_network->networkMetrics.secSquaredSubmit - statState->networkMetricsState.secSquaredSubmit) / currentStats.elapsed)
|
.detail("AIOSubmitLag", (g_network->networkMetrics.secSquaredSubmit - statState->networkMetricsState.secSquaredSubmit) / currentStats.elapsed)
|
||||||
|
@ -153,8 +153,8 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta
|
||||||
.detail("TotalMemory", currentStats.machineTotalRAM)
|
.detail("TotalMemory", currentStats.machineTotalRAM)
|
||||||
.detail("CommittedMemory", currentStats.machineCommittedRAM)
|
.detail("CommittedMemory", currentStats.machineCommittedRAM)
|
||||||
.detail("AvailableMemory", currentStats.machineAvailableRAM)
|
.detail("AvailableMemory", currentStats.machineAvailableRAM)
|
||||||
.detailext("ZoneID", machineState.zoneId)
|
.detail("ZoneID", machineState.zoneId)
|
||||||
.detailext("MachineID", machineState.machineId)
|
.detail("MachineID", machineState.machineId)
|
||||||
.trackLatest("MachineMetrics");
|
.trackLatest("MachineMetrics");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1381,6 +1381,21 @@ struct MetricHandle {
|
||||||
Reference<T> ref;
|
Reference<T> ref;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
struct Traceable<MetricHandle<T>> : Traceable<typename T::ValueType> {
|
||||||
|
static std::string toString(const MetricHandle<T>& value) {
|
||||||
|
return Traceable<typename T::ValueType>::toString(value.getValue());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
struct SpecialTraceMetricType<MetricHandle<T>> : SpecialTraceMetricType<typename T::ValueType> {
|
||||||
|
using parent = SpecialTraceMetricType<typename T::ValueType>;
|
||||||
|
static auto getValue(const MetricHandle<T>& value) -> decltype(parent::getValue(value.getValue())) {
|
||||||
|
return parent::getValue(value.getValue());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
typedef MetricHandle<Int64Metric> Int64MetricHandle;
|
typedef MetricHandle<Int64Metric> Int64MetricHandle;
|
||||||
typedef MetricHandle<VersionMetric> VersionMetricHandle;
|
typedef MetricHandle<VersionMetric> VersionMetricHandle;
|
||||||
typedef MetricHandle<BoolMetric> BoolMetricHandle;
|
typedef MetricHandle<BoolMetric> BoolMetricHandle;
|
||||||
|
|
116
flow/Trace.cpp
116
flow/Trace.cpp
|
@ -654,11 +654,21 @@ void removeTraceRole(std::string role) {
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent::TraceEvent( const char* type, UID id ) : id(id), type(type), severity(SevInfo), initialized(false), enabled(true) {}
|
TraceEvent::TraceEvent( const char* type, UID id ) : id(id), type(type), severity(SevInfo), initialized(false), enabled(true) {}
|
||||||
TraceEvent::TraceEvent( Severity severity, const char* type, UID id ) : id(id), type(type), severity(severity), initialized(false), enabled(true) {}
|
TraceEvent::TraceEvent( Severity severity, const char* type, UID id )
|
||||||
TraceEvent::TraceEvent( TraceInterval& interval, UID id ) : id(id), type(interval.type), severity(interval.severity), initialized(false), enabled(true) {
|
: id(id), type(type), severity(severity), initialized(false),
|
||||||
|
enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity) {}
|
||||||
|
TraceEvent::TraceEvent( TraceInterval& interval, UID id )
|
||||||
|
: id(id), type(interval.type)
|
||||||
|
, severity(interval.severity)
|
||||||
|
, initialized(false)
|
||||||
|
, enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= interval.severity) {
|
||||||
init(interval);
|
init(interval);
|
||||||
}
|
}
|
||||||
TraceEvent::TraceEvent( Severity severity, TraceInterval& interval, UID id ) : id(id), type(interval.type), severity(severity), initialized(false), enabled(true) {
|
TraceEvent::TraceEvent( Severity severity, TraceInterval& interval, UID id )
|
||||||
|
: id(id), type(interval.type),
|
||||||
|
severity(severity),
|
||||||
|
initialized(false),
|
||||||
|
enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity) {
|
||||||
init(interval);
|
init(interval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -713,7 +723,7 @@ bool TraceEvent::init() {
|
||||||
severity = SevWarnAlways;
|
severity = SevWarnAlways;
|
||||||
}
|
}
|
||||||
|
|
||||||
detail("Severity", severity);
|
detail("Severity", int(severity));
|
||||||
detailf("Time", "%.6f", time);
|
detailf("Time", "%.6f", time);
|
||||||
detail("Type", type);
|
detail("Type", type);
|
||||||
if(g_network && g_network->isSimulated()) {
|
if(g_network && g_network->isSimulated()) {
|
||||||
|
@ -736,25 +746,23 @@ bool TraceEvent::init() {
|
||||||
return enabled;
|
return enabled;
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent& TraceEvent::error(class Error const& error, bool includeCancelled) {
|
TraceEvent& TraceEvent::errorImpl(class Error const& error, bool includeCancelled) {
|
||||||
if(enabled) {
|
if (error.code() != error_code_actor_cancelled || includeCancelled) {
|
||||||
if (error.code() != error_code_actor_cancelled || includeCancelled) {
|
err = error;
|
||||||
err = error;
|
if (initialized) {
|
||||||
if (initialized) {
|
if (error.isInjectedFault()) {
|
||||||
if (error.isInjectedFault()) {
|
detail("ErrorIsInjectedFault", true);
|
||||||
detail("ErrorIsInjectedFault", true);
|
if(severity == SevError) severity = SevWarnAlways;
|
||||||
if(severity == SevError) severity = SevWarnAlways;
|
|
||||||
}
|
|
||||||
detail("Error", error.name());
|
|
||||||
detail("ErrorDescription", error.what());
|
|
||||||
detail("ErrorCode", error.code());
|
|
||||||
}
|
}
|
||||||
|
detail("Error", error.name());
|
||||||
|
detail("ErrorDescription", error.what());
|
||||||
|
detail("ErrorCode", error.code());
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (initialized) {
|
||||||
|
TraceEvent(g_network && g_network->isSimulated() ? SevError : SevWarnAlways, std::string(TRACE_EVENT_INVALID_SUPPRESSION).append(type).c_str()).suppressFor(5);
|
||||||
} else {
|
} else {
|
||||||
if (initialized) {
|
enabled = false;
|
||||||
TraceEvent(g_network && g_network->isSimulated() ? SevError : SevWarnAlways, std::string(TRACE_EVENT_INVALID_SUPPRESSION).append(type).c_str()).suppressFor(5);
|
|
||||||
} else {
|
|
||||||
enabled = false;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return *this;
|
return *this;
|
||||||
|
@ -781,66 +789,18 @@ TraceEvent& TraceEvent::detailImpl( std::string&& key, std::string&& value, bool
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent& TraceEvent::detail( std::string key, std::string value ) {
|
void TraceEvent::setField(const char* key, int64_t value) {
|
||||||
return detailImpl(std::move(key), std::move(value));
|
tmpEventMetric->setField(key, value);
|
||||||
}
|
}
|
||||||
TraceEvent& TraceEvent::detail( std::string key, double value ) {
|
|
||||||
init();
|
void TraceEvent::setField(const char* key, double value) {
|
||||||
if(enabled)
|
tmpEventMetric->setField(key, value);
|
||||||
tmpEventMetric->setField(key.c_str(), value);
|
|
||||||
return detailfNoMetric( std::move(key), "%g", value );
|
|
||||||
}
|
}
|
||||||
TraceEvent& TraceEvent::detail( std::string key, int value ) {
|
|
||||||
init();
|
void TraceEvent::setField(const char* key, const std::string& value) {
|
||||||
if(enabled)
|
tmpEventMetric->setField(key, Standalone<StringRef>(value));
|
||||||
tmpEventMetric->setField(key.c_str(), (int64_t)value);
|
|
||||||
return detailfNoMetric( std::move(key), "%d", value );
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detail( std::string key, unsigned value ) {
|
|
||||||
init();
|
|
||||||
if(enabled)
|
|
||||||
tmpEventMetric->setField(key.c_str(), (int64_t)value);
|
|
||||||
return detailfNoMetric( std::move(key), "%u", value );
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detail( std::string key, long int value ) {
|
|
||||||
init();
|
|
||||||
if(enabled)
|
|
||||||
tmpEventMetric->setField(key.c_str(), (int64_t)value);
|
|
||||||
return detailfNoMetric( std::move(key), "%ld", value );
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detail( std::string key, long unsigned int value ) {
|
|
||||||
init();
|
|
||||||
if(enabled)
|
|
||||||
tmpEventMetric->setField(key.c_str(), (int64_t)value);
|
|
||||||
return detailfNoMetric( std::move(key), "%lu", value );
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detail( std::string key, long long int value ) {
|
|
||||||
init();
|
|
||||||
if(enabled)
|
|
||||||
tmpEventMetric->setField(key.c_str(), (int64_t)value);
|
|
||||||
return detailfNoMetric( std::move(key), "%lld", value );
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detail( std::string key, long long unsigned int value ) {
|
|
||||||
init();
|
|
||||||
if(enabled)
|
|
||||||
tmpEventMetric->setField(key.c_str(), (int64_t)value);
|
|
||||||
return detailfNoMetric( std::move(key), "%llu", value );
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detail( std::string key, const NetworkAddress& value ) {
|
|
||||||
return detailImpl( std::move(key), value.toString() );
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detail( std::string key, const IPAddress& value ) {
|
|
||||||
return detailImpl( std::move(key), value.toString() );
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detail( std::string key, const UID& value ) {
|
|
||||||
return detailf( std::move(key), "%016llx", value.first() ); // SOMEDAY: Log entire value? We also do this explicitly in some "lists" in various individual TraceEvent calls
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detailext( std::string key, StringRef const& value ) {
|
|
||||||
return detailImpl(std::move(key), value.printable());
|
|
||||||
}
|
|
||||||
TraceEvent& TraceEvent::detailext( std::string key, const Optional<Standalone<StringRef>>& value ) {
|
|
||||||
return detailImpl(std::move(key), (value.present()) ? value.get().printable() : "[not set]");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceEvent& TraceEvent::detailf( std::string key, const char* valueFormat, ... ) {
|
TraceEvent& TraceEvent::detailf( std::string key, const char* valueFormat, ... ) {
|
||||||
if (enabled) {
|
if (enabled) {
|
||||||
va_list args;
|
va_list args;
|
||||||
|
|
301
flow/Trace.h
301
flow/Trace.h
|
@ -26,6 +26,7 @@
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
#include <type_traits>
|
||||||
#include "flow/IRandom.h"
|
#include "flow/IRandom.h"
|
||||||
#include "flow/Error.h"
|
#include "flow/Error.h"
|
||||||
|
|
||||||
|
@ -133,9 +134,232 @@ private:
|
||||||
};
|
};
|
||||||
|
|
||||||
struct DynamicEventMetric;
|
struct DynamicEventMetric;
|
||||||
class StringRef;
|
|
||||||
template <class T> class Standalone;
|
template<class IntType>
|
||||||
template <class T> class Optional;
|
char base16Char(IntType c) {
|
||||||
|
switch (c) {
|
||||||
|
case 0:
|
||||||
|
return '0';
|
||||||
|
case 1:
|
||||||
|
return '1';
|
||||||
|
case 2:
|
||||||
|
return '2';
|
||||||
|
case 3:
|
||||||
|
return '3';
|
||||||
|
case 4:
|
||||||
|
return '4';
|
||||||
|
case 5:
|
||||||
|
return '5';
|
||||||
|
case 6:
|
||||||
|
return '6';
|
||||||
|
case 7:
|
||||||
|
return '7';
|
||||||
|
case 8:
|
||||||
|
return '8';
|
||||||
|
case 9:
|
||||||
|
return '9';
|
||||||
|
case 10:
|
||||||
|
return 'a';
|
||||||
|
case 11:
|
||||||
|
return 'b';
|
||||||
|
case 12:
|
||||||
|
return 'c';
|
||||||
|
case 13:
|
||||||
|
return 'd';
|
||||||
|
case 14:
|
||||||
|
return 'e';
|
||||||
|
case 15:
|
||||||
|
return 'f';
|
||||||
|
default:
|
||||||
|
UNSTOPPABLE_ASSERT(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// forward declare format from flow.h as we
|
||||||
|
// can't include flow.h here
|
||||||
|
std::string format(const char* form, ...);
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
struct Traceable : std::false_type {};
|
||||||
|
|
||||||
|
#define FORMAT_TRACEABLE(type, fmt) \
|
||||||
|
template<> \
|
||||||
|
struct Traceable<type> : std::true_type { \
|
||||||
|
static std::string toString(type value) { \
|
||||||
|
return format(fmt, value); \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
FORMAT_TRACEABLE(bool, "%d");
|
||||||
|
FORMAT_TRACEABLE(signed char, "%d");
|
||||||
|
FORMAT_TRACEABLE(unsigned char, "%d");
|
||||||
|
FORMAT_TRACEABLE(short, "%d");
|
||||||
|
FORMAT_TRACEABLE(unsigned short, "%d");
|
||||||
|
FORMAT_TRACEABLE(int, "%d");
|
||||||
|
FORMAT_TRACEABLE(unsigned, "%u");
|
||||||
|
FORMAT_TRACEABLE(long int, "%ld");
|
||||||
|
FORMAT_TRACEABLE(unsigned long int, "%lu");
|
||||||
|
FORMAT_TRACEABLE(long long int, "%lld");
|
||||||
|
FORMAT_TRACEABLE(unsigned long long int, "%llu");
|
||||||
|
FORMAT_TRACEABLE(double, "%g");
|
||||||
|
FORMAT_TRACEABLE(volatile long, "%ld");
|
||||||
|
FORMAT_TRACEABLE(volatile unsigned long, "%lu");
|
||||||
|
FORMAT_TRACEABLE(volatile long long, "%lld");
|
||||||
|
FORMAT_TRACEABLE(volatile unsigned long long, "%llu");
|
||||||
|
FORMAT_TRACEABLE(volatile double, "%g");
|
||||||
|
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<UID> : std::true_type {
|
||||||
|
static std::string toString(const UID& value) {
|
||||||
|
return format("%016llx", value.first());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class Str>
|
||||||
|
struct TraceableString {
|
||||||
|
static auto begin(const Str& value) -> decltype(value.begin()) {
|
||||||
|
return value.begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool atEnd(const Str& value, decltype(value.begin()) iter) {
|
||||||
|
return iter == value.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string toString(const Str& value) {
|
||||||
|
return value.toString();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct TraceableString<std::string> {
|
||||||
|
static auto begin(const std::string& value) -> decltype(value.begin()) {
|
||||||
|
return value.begin();
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool atEnd(const std::string& value, decltype(value.begin()) iter) {
|
||||||
|
return iter == value.end();
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class S>
|
||||||
|
static std::string toString(S&& value) {
|
||||||
|
return std::forward<S>(value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct TraceableString<const char*> {
|
||||||
|
static const char* begin(const char* value) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool atEnd(const char* value, const char* iter) {
|
||||||
|
return *iter == '\0';
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string toString(const char* value) {
|
||||||
|
return std::string(value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<size_t S>
|
||||||
|
struct TraceableString<char[S]> {
|
||||||
|
static const char* begin(const char* value) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool atEnd(const char* value, const char* iter) {
|
||||||
|
return iter - value == S;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string toString(const char* value) {
|
||||||
|
return std::string(value, S);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct TraceableString<char*> {
|
||||||
|
static const char* begin(char* value) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool atEnd(char* value, const char* iter) {
|
||||||
|
return *iter == '\0';
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string toString(char* value) {
|
||||||
|
return std::string(value);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
struct TraceableStringImpl : std::true_type {
|
||||||
|
static constexpr bool isPrintable(char c) { return c > 32 && c < 127; }
|
||||||
|
|
||||||
|
template<class Str>
|
||||||
|
static std::string toString(Str&& value) {
|
||||||
|
// if all characters are printable ascii, we simply return the string
|
||||||
|
int nonPrintables = 0;
|
||||||
|
int numBackslashes = 0;
|
||||||
|
auto val = value;
|
||||||
|
int size = 0;
|
||||||
|
for (auto iter = TraceableString<T>::begin(value); !TraceableString<T>::atEnd(value, iter); ++iter) {
|
||||||
|
++size;
|
||||||
|
if (!isPrintable(char(*iter))) {
|
||||||
|
++nonPrintables;
|
||||||
|
} else if (*iter == '\\') {
|
||||||
|
++numBackslashes;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (nonPrintables == 0 && numBackslashes == 0) {
|
||||||
|
return TraceableString<T>::toString(std::forward<Str>(value));
|
||||||
|
}
|
||||||
|
std::string result;
|
||||||
|
result.reserve(size - nonPrintables + (nonPrintables * 4) + numBackslashes);
|
||||||
|
for (auto iter = TraceableString<T>::begin(value); !TraceableString<T>::atEnd(value, iter); ++iter) {
|
||||||
|
if (isPrintable(*iter)) {
|
||||||
|
result.push_back(*iter);
|
||||||
|
} else if (*iter == '\\') {
|
||||||
|
result.push_back('\\');
|
||||||
|
result.push_back('\\');
|
||||||
|
} else {
|
||||||
|
result.push_back('\\');
|
||||||
|
result.push_back('x');
|
||||||
|
result.push_back(base16Char((*iter / 16) % 16));
|
||||||
|
result.push_back(base16Char(*iter % 16));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<const char*> : TraceableStringImpl<const char*> {};
|
||||||
|
template<>
|
||||||
|
struct Traceable<char*> : TraceableStringImpl<char*> {};
|
||||||
|
template<size_t S>
|
||||||
|
struct Traceable<char[S]> : TraceableStringImpl<char[S]> {};
|
||||||
|
template<>
|
||||||
|
struct Traceable<std::string> : TraceableStringImpl<std::string> {};
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
struct SpecialTraceMetricType
|
||||||
|
: std::conditional<std::is_integral<T>::value || std::is_enum<T>::value,
|
||||||
|
std::true_type, std::false_type>::type {
|
||||||
|
static int64_t getValue(T v) {
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
#define TRACE_METRIC_TYPE(from, to) \
|
||||||
|
template<> \
|
||||||
|
struct SpecialTraceMetricType<from> : std::true_type { \
|
||||||
|
static to getValue(from v) { \
|
||||||
|
return v; \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
TRACE_METRIC_TYPE(double, double);
|
||||||
|
|
||||||
struct TraceEvent {
|
struct TraceEvent {
|
||||||
TraceEvent( const char* type, UID id = UID() ); // Assumes SevInfo severity
|
TraceEvent( const char* type, UID id = UID() ); // Assumes SevInfo severity
|
||||||
|
@ -147,28 +371,67 @@ struct TraceEvent {
|
||||||
static bool isNetworkThread();
|
static bool isNetworkThread();
|
||||||
|
|
||||||
//Must be called directly after constructing the trace event
|
//Must be called directly after constructing the trace event
|
||||||
TraceEvent& error(const class Error& e, bool includeCancelled=false);
|
TraceEvent& error(const class Error& e, bool includeCancelled=false) {
|
||||||
|
if (enabled) {
|
||||||
|
return errorImpl(e, includeCancelled);
|
||||||
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
TraceEvent& detail( std::string key, std::string value );
|
template<class T>
|
||||||
TraceEvent& detail( std::string key, double value );
|
typename std::enable_if<Traceable<T>::value, TraceEvent&>::type
|
||||||
TraceEvent& detail( std::string key, long int value );
|
detail( std::string&& key, const T& value ) {
|
||||||
TraceEvent& detail( std::string key, long unsigned int value );
|
if (enabled && init()) {
|
||||||
TraceEvent& detail( std::string key, long long int value );
|
auto s = Traceable<T>::toString(value);
|
||||||
TraceEvent& detail( std::string key, long long unsigned int value );
|
addMetric(key.c_str(), value, s);
|
||||||
TraceEvent& detail( std::string key, int value );
|
return detailImpl(std::move(key), std::move(s), false);
|
||||||
TraceEvent& detail( std::string key, unsigned value );
|
}
|
||||||
TraceEvent& detail( std::string key, const struct NetworkAddress& value );
|
return *this;
|
||||||
TraceEvent& detail( std::string key, const IPAddress& value );
|
}
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
typename std::enable_if<Traceable<T>::value, TraceEvent&>::type
|
||||||
|
detail( const char* key, const T& value ) {
|
||||||
|
if (enabled && init()) {
|
||||||
|
auto s = Traceable<T>::toString(value);
|
||||||
|
addMetric(key, value, s);
|
||||||
|
return detailImpl(std::string(key), std::move(s), false);
|
||||||
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
template<class T>
|
||||||
|
typename std::enable_if<std::is_enum<T>::value, TraceEvent&>::type
|
||||||
|
detail(const char* key, T value) {
|
||||||
|
if (enabled && init()) {
|
||||||
|
setField(key, int64_t(value));
|
||||||
|
return detailImpl(std::string(key), format("%d", value), false);
|
||||||
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
TraceEvent& detailf( std::string key, const char* valueFormat, ... );
|
TraceEvent& detailf( std::string key, const char* valueFormat, ... );
|
||||||
TraceEvent& detailext( std::string key, const StringRef& value );
|
|
||||||
TraceEvent& detailext( std::string key, const Optional<Standalone<StringRef>>& value );
|
|
||||||
private:
|
private:
|
||||||
|
template<class T>
|
||||||
|
typename std::enable_if<SpecialTraceMetricType<T>::value, void>::type
|
||||||
|
addMetric(const char* key, const T& value, const std::string&) {
|
||||||
|
setField(key, SpecialTraceMetricType<T>::getValue(value));
|
||||||
|
}
|
||||||
|
|
||||||
|
template<class T>
|
||||||
|
typename std::enable_if<!SpecialTraceMetricType<T>::value, void>::type
|
||||||
|
addMetric(const char* key, const T&, const std::string& value) {
|
||||||
|
setField(key, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void setField(const char* key, int64_t value);
|
||||||
|
void setField(const char* key, double value);
|
||||||
|
void setField(const char* key, const std::string& value);
|
||||||
|
|
||||||
|
TraceEvent& errorImpl(const class Error& e, bool includeCancelled=false);
|
||||||
// Private version of detailf that does NOT write to the eventMetric. This is to be used by other detail methods
|
// Private version of detailf that does NOT write to the eventMetric. This is to be used by other detail methods
|
||||||
// which can write field metrics of a more appropriate type than string but use detailf() to add to the TraceEvent.
|
// which can write field metrics of a more appropriate type than string but use detailf() to add to the TraceEvent.
|
||||||
TraceEvent& detailfNoMetric( std::string&& key, const char* valueFormat, ... );
|
TraceEvent& detailfNoMetric( std::string&& key, const char* valueFormat, ... );
|
||||||
TraceEvent& detailImpl( std::string&& key, std::string&& value, bool writeEventMetricField=true );
|
TraceEvent& detailImpl( std::string&& key, std::string&& value, bool writeEventMetricField=true );
|
||||||
public:
|
public:
|
||||||
TraceEvent& detail( std::string key, const UID& value );
|
|
||||||
TraceEvent& backtrace(const std::string& prefix = "");
|
TraceEvent& backtrace(const std::string& prefix = "");
|
||||||
TraceEvent& trackLatest( const char* trackingKey );
|
TraceEvent& trackLatest( const char* trackingKey );
|
||||||
TraceEvent& sample( double sampleRate, bool logSampleRate=true );
|
TraceEvent& sample( double sampleRate, bool logSampleRate=true );
|
||||||
|
@ -249,8 +512,8 @@ public:
|
||||||
void setLatestError( const TraceEventFields& contents );
|
void setLatestError( const TraceEventFields& contents );
|
||||||
TraceEventFields getLatestError();
|
TraceEventFields getLatestError();
|
||||||
private:
|
private:
|
||||||
std::map<NetworkAddress, std::map<std::string, TraceEventFields>> latest;
|
std::map<struct NetworkAddress, std::map<std::string, TraceEventFields>> latest;
|
||||||
std::map<NetworkAddress, TraceEventFields> latestErrors;
|
std::map<struct NetworkAddress, TraceEventFields> latestErrors;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern LatestEventCache latestEventCache;
|
extern LatestEventCache latestEventCache;
|
||||||
|
|
|
@ -80,6 +80,8 @@ enum {
|
||||||
|
|
||||||
class Void;
|
class Void;
|
||||||
|
|
||||||
|
template<class T> class Optional;
|
||||||
|
|
||||||
struct IPAddress {
|
struct IPAddress {
|
||||||
// Represents both IPv4 and IPv6 address. For IPv4 addresses,
|
// Represents both IPv4 and IPv6 address. For IPv4 addresses,
|
||||||
// only the first 32bits are relevant and rest are initialized to
|
// only the first 32bits are relevant and rest are initialized to
|
||||||
|
@ -144,6 +146,13 @@ private:
|
||||||
} addr;
|
} addr;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<IPAddress> : std::true_type {
|
||||||
|
static std::string toString(const IPAddress& value) {
|
||||||
|
return value.toString();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
struct NetworkAddress {
|
struct NetworkAddress {
|
||||||
// A NetworkAddress identifies a particular running server (i.e. a TCP endpoint).
|
// A NetworkAddress identifies a particular running server (i.e. a TCP endpoint).
|
||||||
IPAddress ip;
|
IPAddress ip;
|
||||||
|
@ -192,6 +201,13 @@ struct NetworkAddress {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<>
|
||||||
|
struct Traceable<NetworkAddress> : std::true_type {
|
||||||
|
static std::string toString(const NetworkAddress& value) {
|
||||||
|
return value.toString();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
namespace std
|
namespace std
|
||||||
{
|
{
|
||||||
template <>
|
template <>
|
||||||
|
|
Loading…
Reference in New Issue