Some more trace event normalization
This commit is contained in:
parent
943dfa278b
commit
99c9958db7
|
@ -62,7 +62,7 @@ Version getVersionFromString(std::string const& value) {
|
|||
Version version(-1);
|
||||
int n = 0;
|
||||
if (sscanf(value.c_str(), "%lld%n", (long long*)&version, &n) != 1 || n != value.size()) {
|
||||
TraceEvent(SevWarnAlways, "getVersionFromString").detail("InvalidVersion", value);
|
||||
TraceEvent(SevWarnAlways, "GetVersionFromString").detail("InvalidVersion", value);
|
||||
throw restore_invalid_version();
|
||||
}
|
||||
return version;
|
||||
|
@ -430,7 +430,7 @@ ACTOR Future<Void> readCommitted(Database cx, PromiseStream<RCGroup> results, Fu
|
|||
rcGroup.groupKey = groupKey;
|
||||
}
|
||||
else if (rcGroup.groupKey != groupKey) {
|
||||
//TraceEvent("Log_ReadCommitted").detail("SendGroup0", rcGroup.groupKey).detail("ItemSize", rcGroup.items.size()).detail("Data_length",rcGroup.items[0].value.size());
|
||||
//TraceEvent("Log_ReadCommitted").detail("SendGroup0", rcGroup.groupKey).detail("ItemSize", rcGroup.items.size()).detail("DataLength",rcGroup.items[0].value.size());
|
||||
//state uint32_t len(0);
|
||||
//for (size_t j = 0; j < rcGroup.items.size(); ++j) {
|
||||
// len += rcGroup.items[j].value.size();
|
||||
|
|
|
@ -124,15 +124,15 @@ namespace FdbClientLogEvents {
|
|||
|
||||
void logEvent(std::string id) const {
|
||||
for (auto &read_range : req.transaction.read_conflict_ranges) {
|
||||
TraceEvent("TransactionTrace_Commit_ReadConflictRange").detail("TransactionID", id).detail("Begin", printable(read_range.begin)).detail("End", printable(read_range.end));
|
||||
TraceEvent("TransactionTrace_CommitReadConflictRange").detail("TransactionID", id).detail("Begin", printable(read_range.begin)).detail("End", printable(read_range.end));
|
||||
}
|
||||
|
||||
for (auto &write_range : req.transaction.write_conflict_ranges) {
|
||||
TraceEvent("TransactionTrace_Commit_WriteConflictRange").detail("TransactionID", id).detail("Begin", printable(write_range.begin)).detail("End", printable(write_range.end));
|
||||
TraceEvent("TransactionTrace_CommitWriteConflictRange").detail("TransactionID", id).detail("Begin", printable(write_range.begin)).detail("End", printable(write_range.end));
|
||||
}
|
||||
|
||||
for (auto &mutation : req.transaction.mutations) {
|
||||
TraceEvent("TransactionTrace_Commit_Mutation").detail("TransactionID", id).detail("Mutation", mutation.toString());
|
||||
TraceEvent("TransactionTrace_CommitMutation").detail("TransactionID", id).detail("Mutation", mutation.toString());
|
||||
}
|
||||
|
||||
TraceEvent("TransactionTrace_Commit").detail("TransactionID", id).detail("Latency", latency).detail("NumMutations", numMutations).detail("CommitSizeBytes", commitBytes);
|
||||
|
@ -194,15 +194,15 @@ namespace FdbClientLogEvents {
|
|||
|
||||
void logEvent(std::string id) const {
|
||||
for (auto &read_range : req.transaction.read_conflict_ranges) {
|
||||
TraceEvent("TransactionTrace_CommitError_ReadConflictRange").detail("TransactionID", id).detail("Begin", printable(read_range.begin)).detail("End", printable(read_range.end));
|
||||
TraceEvent("TransactionTrace_CommitErrorReadConflictRange").detail("TransactionID", id).detail("Begin", printable(read_range.begin)).detail("End", printable(read_range.end));
|
||||
}
|
||||
|
||||
for (auto &write_range : req.transaction.write_conflict_ranges) {
|
||||
TraceEvent("TransactionTrace_CommitError_WriteConflictRange").detail("TransactionID", id).detail("Begin", printable(write_range.begin)).detail("End", printable(write_range.end));
|
||||
TraceEvent("TransactionTrace_CommitErrorWriteConflictRange").detail("TransactionID", id).detail("Begin", printable(write_range.begin)).detail("End", printable(write_range.end));
|
||||
}
|
||||
|
||||
for (auto &mutation : req.transaction.mutations) {
|
||||
TraceEvent("TransactionTrace_CommitError_Mutation").detail("TransactionID", id).detail("Mutation", mutation.toString());
|
||||
TraceEvent("TransactionTrace_CommitErrorMutation").detail("TransactionID", id).detail("Mutation", mutation.toString());
|
||||
}
|
||||
|
||||
TraceEvent("TransactionTrace_CommitError").detail("TransactionID", id).detail("ErrCode", errCode);
|
||||
|
|
|
@ -109,7 +109,7 @@ namespace dbBackup {
|
|||
Future<Void> checkTaskVersion(Tr tr, Reference<Task> task, StringRef name, uint32_t version) {
|
||||
uint32_t taskVersion = task->getVersion();
|
||||
if (taskVersion > version) {
|
||||
TraceEvent(SevError, "BA_BackupRangeTaskFunc_execute").detail("TaskVersion", taskVersion).detail("Name", printable(name)).detail("Version", version);
|
||||
TraceEvent(SevError, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", printable(name)).detail("Version", version);
|
||||
Void _ = wait(logError(tr, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]),
|
||||
format("ERROR: %s task version `%lu' is greater than supported version `%lu'", task->params[Task::reservedTaskParamKeyType].toString().c_str(), (unsigned long)taskVersion, (unsigned long)version)));
|
||||
|
||||
|
@ -1335,7 +1335,7 @@ namespace dbBackup {
|
|||
state Reference<TaskFuture> onDone = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
|
||||
|
||||
if (task->params[BackupAgentBase::destUid].size() == 0) {
|
||||
TraceEvent("DBA_CopyDiffLogsUpgradeTaskFunc_abort_in_upgrade");
|
||||
TraceEvent("DBA_CopyDiffLogsUpgradeTaskFuncAbortInUpgrade");
|
||||
Key _ = wait(AbortOldBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::signal(onDone)));
|
||||
} else {
|
||||
Version beginVersion = BinaryReader::fromStringRef<Version>(task->params[DatabaseBackupAgent::keyBeginVersion], Unversioned());
|
||||
|
|
|
@ -705,7 +705,7 @@ namespace fileBackup {
|
|||
if (taskVersion > version) {
|
||||
state Error err = task_invalid_version();
|
||||
|
||||
TraceEvent(SevWarn, "BA_BackupRangeTaskFunc_Execute").detail("TaskVersion", taskVersion).detail("Name", printable(name)).detail("Version", version);
|
||||
TraceEvent(SevWarn, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", printable(name)).detail("Version", version);
|
||||
if (KeyBackedConfig::TaskParams.uid().exists(task)) {
|
||||
std::string msg = format("%s task version `%lu' is greater than supported version `%lu'", task->params[Task::reservedTaskParamKeyType].toString().c_str(), (unsigned long)taskVersion, (unsigned long)version);
|
||||
Void _ = wait(BackupConfig(task).logError(cx, err, msg));
|
||||
|
|
|
@ -2690,7 +2690,7 @@ ACTOR Future<GetReadVersionReply> getConsistentReadVersion( DatabaseContext *cx,
|
|||
}
|
||||
} catch (Error& e) {
|
||||
if( e.code() != error_code_broken_promise && e.code() != error_code_actor_cancelled )
|
||||
TraceEvent(SevError, "getConsistentReadVersionError").error(e);
|
||||
TraceEvent(SevError, "GetConsistentReadVersionError").error(e);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ int eraseDirectoryRecursive(std::string const& dir) {
|
|||
the directory we're deleting doesn't exist in the first
|
||||
place */
|
||||
if (error && errno != ENOENT) {
|
||||
TraceEvent(SevError, "nftw").detail("Directory", dir).GetLastError();
|
||||
TraceEvent(SevError, "EraseDirectoryRecursiveError").detail("Directory", dir).GetLastError();
|
||||
throw platform_error();
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -1392,7 +1392,7 @@ void SQLiteDB::createFromScratch() {
|
|||
ASSERT( freetable == table+1 );
|
||||
endTransaction();
|
||||
} else {
|
||||
TraceEvent("pgnoRoot").detail("Value", pgnoRoot);
|
||||
TraceEvent("PgnoRoot").detail("Value", pgnoRoot);
|
||||
checkError("CheckTables", SQLITE_CORRUPT);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ public:
|
|||
for (auto entry : resultEntries) {
|
||||
locations.push_back(locationOffset + *logServerMap->getObject(entry));
|
||||
}
|
||||
//TraceEvent("getPushLocations").detail("Policy", tLogPolicy->info())
|
||||
//TraceEvent("GetPushLocations").detail("Policy", tLogPolicy->info())
|
||||
// .detail("Results", locations.size()).detail("Selection", logServerSet->size())
|
||||
// .detail("Included", alsoServers.size()).detail("Duration", timer() - t);
|
||||
}
|
||||
|
|
|
@ -843,7 +843,7 @@ namespace oldTLog {
|
|||
if(it->first != currentVersion) {
|
||||
if (messages.getLength() >= SERVER_KNOBS->DESIRED_TOTAL_BYTES) {
|
||||
endVersion = it->first;
|
||||
//TraceEvent("tLogPeekMessagesReached2", self->dbgid);
|
||||
//TraceEvent("TLogPeekMessagesReached2", self->dbgid);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -897,7 +897,7 @@ namespace oldTLog {
|
|||
return Void();
|
||||
}
|
||||
|
||||
//TraceEvent("tLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
||||
//TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
||||
// Wait until we have something to return that the caller doesn't already have
|
||||
if( logData->version.get() < req.begin ) {
|
||||
Void _ = wait( logData->version.whenAtLeast( req.begin ) );
|
||||
|
@ -907,7 +907,7 @@ namespace oldTLog {
|
|||
state Version endVersion = logData->version.get() + 1;
|
||||
|
||||
//grab messages from disk
|
||||
//TraceEvent("tLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
||||
//TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2));
|
||||
if( req.begin <= logData->persistentDataDurableVersion ) {
|
||||
// Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on
|
||||
// whether we get enough data from disk.
|
||||
|
|
|
@ -1180,7 +1180,7 @@ ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool reboot
|
|||
writeFile(joinPath(clusterFileDir, "fdb.cluster"), connFile.get().toString());
|
||||
Void _ = wait(timeoutError(runTests(Reference<ClusterConnectionFile>(new ClusterConnectionFile(joinPath(clusterFileDir, "fdb.cluster"))), TEST_TYPE_FROM_FILE, TEST_ON_TESTERS, testerCount, testFile, startingConfiguration), buggifyActivated ? 36000.0 : 5400.0));
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevError, "setupAndRunError").error(e);
|
||||
TraceEvent(SevError, "SetupAndRunError").error(e);
|
||||
}
|
||||
|
||||
TraceEvent("SimulatedSystemDestruct");
|
||||
|
|
|
@ -527,7 +527,7 @@ struct RolesInfo {
|
|||
obj["query_queue_max"] = parseInt(extractAttribute(metrics, "QueryQueueMax"));
|
||||
obj["finished_queries"] = parseCounter(extractAttribute(metrics, "FinishedQueries"));
|
||||
|
||||
Version version = parseInt64(extractAttribute(metrics, "version"));
|
||||
Version version = parseInt64(extractAttribute(metrics, "Version"));
|
||||
obj["data_version"] = version;
|
||||
|
||||
if(maxTLogVersion > 0) {
|
||||
|
|
|
@ -700,7 +700,7 @@ static int asyncSleep(sqlite3_vfs *pVfs, int microseconds){
|
|||
waitFor( g_network->delay( microseconds*1e-6, TaskDefaultDelay ) || simCancel );
|
||||
return microseconds;
|
||||
} catch( Error &e ) {
|
||||
TraceEvent(SevError, "asyncSleepError").error(e,true);
|
||||
TraceEvent(SevError, "AsyncSleepError").error(e,true);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ ACTOR Future<Void> waitFailureClient(RequestStream<ReplyPromise<Void>> waitFailu
|
|||
} catch (Error &e){
|
||||
if (e.code() == error_code_actor_cancelled)
|
||||
throw;
|
||||
TraceEvent(SevError, "waitFailureClientError").error(e);
|
||||
TraceEvent(SevError, "WaitFailureClientError").error(e);
|
||||
ASSERT(false); // unknown error from waitFailureServer
|
||||
}
|
||||
}
|
||||
|
@ -74,7 +74,7 @@ ACTOR Future<Void> waitFailureTracker(RequestStream<ReplyPromise<Void>> waitFail
|
|||
} catch (Error &e){
|
||||
if (e.code() == error_code_actor_cancelled)
|
||||
throw;
|
||||
TraceEvent(SevError, "waitFailureClientError").error(e);
|
||||
TraceEvent(SevError, "WaitFailureClientError").error(e);
|
||||
ASSERT(false); // unknown error from waitFailureServer
|
||||
}
|
||||
}
|
||||
|
|
|
@ -538,7 +538,7 @@ ACTOR Future<Void> dumpDatabase( Database cx, std::string outputFilename, KeyRan
|
|||
}
|
||||
}
|
||||
} catch (Error& e) {
|
||||
TraceEvent(SevError,"dumpDatabaseError").error(e).detail("Filename", outputFilename);
|
||||
TraceEvent(SevError,"DumpDatabaseError").error(e).detail("Filename", outputFilename);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1017,7 +1017,7 @@ ACTOR Future<GetKeyValuesReply> readRange( StorageServer* data, Version version,
|
|||
// all but the last item are less than *pLimitBytes
|
||||
ASSERT( result.data.size() == 0 || *pLimitBytes + result.data.end()[-1].expectedSize() + sizeof(KeyValueRef) > 0 );
|
||||
/*if (*pLimitBytes <= 0)
|
||||
TraceEvent(SevWarn, "readRangeLimitExceeded")
|
||||
TraceEvent(SevWarn, "ReadRangeLimitExceeded")
|
||||
.detail("Version", version)
|
||||
.detail("Begin", printable(range.begin) )
|
||||
.detail("End", printable(range.end) )
|
||||
|
@ -2510,7 +2510,7 @@ ACTOR Future<Void> update( StorageServer* data, bool* pReceivedUpdate )
|
|||
|
||||
//TraceEvent("StorageServerUpdated", data->thisServerID).detail("Ver", ver).detail("DataVersion", data->version.get())
|
||||
// .detail("LastTLogVersion", data->lastTLogVersion).detail("NewOldest", updater.newOldestVersion).detail("DesiredOldest",data->desiredOldestVersion.get())
|
||||
// .detail("MAX_READ_TRANSACTION_LIFE_VERSIONS", SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS);
|
||||
// .detail("MaxReadTransactionLifeVersions", SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS);
|
||||
|
||||
// Trigger updateStorage if necessary
|
||||
ASSERT(updater.newOldestVersion <= data->version.get());
|
||||
|
|
|
@ -815,7 +815,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
recruited.initEndpoints();
|
||||
|
||||
std::map<std::string, std::string> details;
|
||||
startRole( recruited.id(), interf.id(), "logRouter", details );
|
||||
startRole( recruited.id(), interf.id(), "LogRouter", details );
|
||||
|
||||
DUMPTOKEN( recruited.peekMessages );
|
||||
DUMPTOKEN( recruited.popMessages );
|
||||
|
|
|
@ -40,7 +40,7 @@ struct DDMetricsWorkload : TestWorkload {
|
|||
ACTOR Future<int> getHighPriorityRelocationsInFlight( Database cx, DDMetricsWorkload *self ) {
|
||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, self->dbInfo));
|
||||
|
||||
TraceEvent("getHighPriorityReliocationsInFlight").detail("Database", printable(cx->dbName)).detail("Stage", "ContactingMaster");
|
||||
TraceEvent("GetHighPriorityReliocationsInFlight").detail("Database", printable(cx->dbName)).detail("Stage", "ContactingMaster");
|
||||
Standalone<StringRef> md = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
||||
EventLogRequest( StringRef( cx->dbName.toString() + "/MovingData" ) ) ), 1.0 ) );
|
||||
int relocations;
|
||||
|
|
|
@ -95,7 +95,7 @@ struct PerformanceWorkload : TestWorkload {
|
|||
TraceEvent start("PerformaceSetupStarting");
|
||||
for(int i = 0; i < options.size(); i++) {
|
||||
for(int j = 0; j < options[i].size(); j++) {
|
||||
start.detail(format("option-%d-%d", i, j).c_str(),
|
||||
start.detail(format("Option-%d-%d", i, j).c_str(),
|
||||
printable( options[i][j].key ) + "=" + printable( options[i][j].value ) );
|
||||
}
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ struct PerformanceWorkload : TestWorkload {
|
|||
start.detail("RateTarget", tps);
|
||||
for(int i = 0; i < options.size(); i++) {
|
||||
for(int j = 0; j < options[i].size(); j++) {
|
||||
start.detail(format("option-%d-%d", i, j).c_str(),
|
||||
start.detail(format("Option-%d-%d", i, j).c_str(),
|
||||
printable( options[i][j].key ) + "=" + printable( options[i][j].value ) );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -109,10 +109,10 @@ struct StatusWorkload : TestWorkload {
|
|||
}
|
||||
}
|
||||
} catch (std::exception& e) {
|
||||
TraceEvent(SevError,"schemaCoverageRequirementsException").detail("What", e.what());
|
||||
TraceEvent(SevError,"SchemaCoverageRequirementsException").detail("What", e.what());
|
||||
throw unknown_error();
|
||||
} catch (...) {
|
||||
TraceEvent(SevError,"schemaCoverageRequirementsException");
|
||||
TraceEvent(SevError,"SchemaCoverageRequirementsException");
|
||||
throw unknown_error();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -271,19 +271,19 @@ struct TaskBucketCorrectnessWorkload : TestWorkload {
|
|||
|
||||
Standalone<RangeResultRef> values = wait(tr->getRange(KeyRangeRef(LiteralStringRef("Hello_\x00"), LiteralStringRef("Hello_\xff")), CLIENT_KNOBS->TOO_MANY));
|
||||
if (values.size() != data.size()){
|
||||
TraceEvent(SevError, "checkSayHello").detail("CountNotMatch_Is", values.size()).detail("ShouldBe", data.size());
|
||||
TraceEvent(SevError, "CheckSayHello").detail("CountNotMatchIs", values.size()).detail("ShouldBe", data.size());
|
||||
for (auto & s : values) {
|
||||
TraceEvent("checkSayHello").detail("Item", printable(s)).detail("Value", printable(s.value));
|
||||
TraceEvent("CheckSayHello").detail("Item", printable(s)).detail("Value", printable(s.value));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
for (auto & s : values) {
|
||||
// TraceEvent("checkSayHello").detail("Item", printable(s)).detail("Value", printable(s.value));
|
||||
// TraceEvent("CheckSayHello").detail("Item", printable(s)).detail("Value", printable(s.value));
|
||||
data.erase(s.value.toString());
|
||||
}
|
||||
if (data.size() != 0){
|
||||
TraceEvent(SevError, "checkSayHello").detail("DataNotMatch", data.size());
|
||||
TraceEvent(SevError, "CheckSayHello").detail("DataNotMatch", data.size());
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ struct WatchAndWaitWorkload : TestWorkload {
|
|||
}
|
||||
}
|
||||
} catch( Error &e ) {
|
||||
TraceEvent(SevError, "watchAndWaitError").error(e);
|
||||
TraceEvent(SevError, "WatchAndWaitError").error(e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ private:
|
|||
int open() {
|
||||
fd = eventfd(0, EFD_NONBLOCK);
|
||||
if (fd<0) {
|
||||
TraceEvent(SevError, "eventfdError").GetLastError();
|
||||
TraceEvent(SevError, "EventfdError").GetLastError();
|
||||
throw platform_error();
|
||||
}
|
||||
return fd;
|
||||
|
|
|
@ -35,7 +35,7 @@ private:
|
|||
uint64_t gen64() {
|
||||
uint64_t curr = next;
|
||||
next = (uint64_t(random()) << 32) ^ random();
|
||||
if (TRACE_SAMPLE()) TraceEvent(SevSample, "random");
|
||||
if (TRACE_SAMPLE()) TraceEvent(SevSample, "Random");
|
||||
return curr;
|
||||
}
|
||||
|
||||
|
|
|
@ -549,7 +549,7 @@ void Net2::run() {
|
|||
|
||||
#ifdef WIN32
|
||||
if (timeBeginPeriod(1) != TIMERR_NOERROR)
|
||||
TraceEvent(SevError, "timeBeginPeriodError");
|
||||
TraceEvent(SevError, "TimeBeginPeriodError");
|
||||
#endif
|
||||
|
||||
timeOffsetLogger = logTimeOffset();
|
||||
|
|
|
@ -1741,11 +1741,10 @@ bool createDirectory( std::string const& directory ) {
|
|||
if (errno == EEXIST)
|
||||
continue;
|
||||
|
||||
TraceEvent(SevError, "mkdir").detail("Directory", directory).GetLastError();
|
||||
TraceEvent(SevError, "CreateDirectory").detail("Directory", directory).GetLastError();
|
||||
if (errno == EACCES)
|
||||
throw file_not_writable();
|
||||
else {
|
||||
TraceEvent(SevError, "CreateDirectory").detail("Directory", directory).GetLastError();
|
||||
throw platform_error();
|
||||
}
|
||||
}
|
||||
|
@ -2074,7 +2073,7 @@ void writeFileBytes(std::string const& filename, const uint8_t* data, size_t cou
|
|||
FILE* f = fopen(filename.c_str(), "wb");
|
||||
if (!f)
|
||||
{
|
||||
TraceEvent(SevError, "writeFileBytes").detail("Filename", filename).GetLastError();
|
||||
TraceEvent(SevError, "WriteFileBytes").detail("Filename", filename).GetLastError();
|
||||
throw file_not_writable();
|
||||
}
|
||||
|
||||
|
@ -2082,7 +2081,7 @@ void writeFileBytes(std::string const& filename, const uint8_t* data, size_t cou
|
|||
size_t length = fwrite(data, sizeof(uint8_t), count, f);
|
||||
if (length != count)
|
||||
{
|
||||
TraceEvent(SevError, "writeFileBytes").detail("Filename", filename).detail("WrittenLength", length).GetLastError();
|
||||
TraceEvent(SevError, "WriteFileBytes").detail("Filename", filename).detail("WrittenLength", length).GetLastError();
|
||||
throw file_not_writable();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -127,10 +127,10 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta
|
|||
|
||||
for (int i = 0; i<NetworkMetrics::SLOW_EVENT_BINS; i++)
|
||||
if (int c = g_network->networkMetrics.countSlowEvents[i] - statState->networkMetricsState.countSlowEvents[i])
|
||||
n.detail(format("N2_SlowTask%dM", 1 << i).c_str(), c);
|
||||
n.detail(format("SlowTask%dM", 1 << i).c_str(), c);
|
||||
for (int i = 0; i<NetworkMetrics::PRIORITY_BINS; i++)
|
||||
if (double x = g_network->networkMetrics.secSquaredPriorityBlocked[i] - statState->networkMetricsState.secSquaredPriorityBlocked[i])
|
||||
n.detail(format("N2_S2Pri%d", g_network->networkMetrics.priorityBins[i]).c_str(), x);
|
||||
n.detail(format("S2Pri%d", g_network->networkMetrics.priorityBins[i]).c_str(), x);
|
||||
}
|
||||
|
||||
if(machineMetrics) {
|
||||
|
|
|
@ -557,7 +557,7 @@ ACTOR template <class R, class F> Future<Void> doOnMainThread( Future<Void> sign
|
|||
result->send(r);
|
||||
} catch (Error& e) {
|
||||
if(!result->canBeSet()) {
|
||||
TraceEvent(SevError, "onMainThreadSetTwice").error(e,true);
|
||||
TraceEvent(SevError, "OnMainThreadSetTwice").error(e,true);
|
||||
}
|
||||
result->sendError(e);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue