From c008e16c81344cbe66f97c04324bb406102caafd Mon Sep 17 00:00:00 2001 From: mpilman Date: Thu, 14 Mar 2019 18:40:28 -0700 Subject: [PATCH 01/24] Defer formatting in traces to make them cheaper This is the first part of making `TraceEvent` cheaper. The main idea is to defer calls to any code that formats string. These are the main changes: - TraceEvent::detail now takes a c-string instead of std::string for literals. This prevents unnecessary allocations if the trace is not going to be printed in the first place (for example for SevDebug). Before that `detail` expected a `std::string` as key, which mean that any string literal would be copied on each call. - Templates Traceable and SpecialTraceMetricType. These templates can be specialized for any type that needs to be printed. The actual formatting will be deferred to after the `enabled` check. This provides two benefits: (1) if a TraceEvent is disabled, we don't pay for the formatting and (2) TraceEvent can trace types that it doesn't know about. - TraceEvent::enabled will be set in the constructor if the Severity is passed. This will make sure that `TraceEvent::init` is not called. - `TraceEvent::detail` will be inlined. So for disabled TraceEvent calls, a call to detail will only introduce a if-branch which is much cheaper than a function call. --- fdbclient/FDBTypes.h | 14 ++ fdbrpc/ReplicationPolicy.h | 2 +- fdbrpc/sim2.actor.cpp | 54 +++---- fdbserver/ClusterController.actor.cpp | 4 +- fdbserver/KeyValueStoreSQLite.actor.cpp | 4 + fdbserver/SimulatedCluster.actor.cpp | 48 +++--- fdbserver/worker.actor.cpp | 2 +- .../workloads/MachineAttrition.actor.cpp | 4 +- .../workloads/RemoveServersSafely.actor.cpp | 2 +- .../workloads/TaskBucketCorrectness.actor.cpp | 8 +- flow/Arena.h | 14 ++ flow/SystemMonitor.cpp | 8 +- flow/TDMetric.actor.h | 15 ++ flow/Trace.cpp | 110 ++++--------- flow/Trace.h | 150 +++++++++++++++--- flow/network.h | 16 ++ 16 files changed, 295 insertions(+), 160 deletions(-) diff --git a/fdbclient/FDBTypes.h b/fdbclient/FDBTypes.h index 766d39831c..49e832fcdd 100644 --- a/fdbclient/FDBTypes.h +++ b/fdbclient/FDBTypes.h @@ -513,6 +513,13 @@ private: uint32_t type; }; +template<> +struct Traceable : std::true_type { + static std::string toString(KeyValueStoreType const& value) { + return value.toString(); + } +}; + struct TLogVersion { enum Version { UNSET = 0, @@ -548,6 +555,13 @@ struct TLogVersion { } }; +template<> +struct Traceable : std::true_type { + static std::string toString(TLogVersion const& value) { + return Traceable::toString(value.version); + } +}; + struct TLogSpillType { // These enumerated values are stored in the database configuration, so can NEVER be changed. Only add new ones just before END. enum SpillType { diff --git a/fdbrpc/ReplicationPolicy.h b/fdbrpc/ReplicationPolicy.h index 259163436d..10b17d7041 100644 --- a/fdbrpc/ReplicationPolicy.h +++ b/fdbrpc/ReplicationPolicy.h @@ -255,7 +255,7 @@ void serializeReplicationPolicy(Ar& ar, Reference& policy) { } else { TraceEvent(SevError, "SerializingInvalidPolicyType") - .detailext("PolicyName", name); + .detail("PolicyName", name); } } else { diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index 7aa8166b27..609f1f625d 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -986,9 +986,9 @@ public: if( machine.processes[i]->locality.machineId() != locality.machineId() ) { // SOMEDAY: compute ip from locality to avoid this check TraceEvent("Sim2Mismatch") .detail("IP", format("%s", ip.toString().c_str())) - .detailext("MachineId", locality.machineId()) + .detail("MachineId", locality.machineId()) .detail("NewName", name) - .detailext("ExistingMachineId", machine.processes[i]->locality.machineId()) + .detail("ExistingMachineId", machine.processes[i]->locality.machineId()) .detail("ExistingName", machine.processes[i]->name); ASSERT( false ); } @@ -1025,7 +1025,7 @@ public: m->setGlobal(enNetworkConnections, (flowGlobalType) m->network); m->setGlobal(enASIOTimedOut, (flowGlobalType) false); - TraceEvent("NewMachine").detail("Name", name).detail("Address", m->address).detailext("MachineId", m->locality.machineId()).detail("Excluded", m->excluded).detail("Cleared", m->cleared); + TraceEvent("NewMachine").detail("Name", name).detail("Address", m->address).detail("MachineId", m->locality.machineId()).detail("Excluded", m->excluded).detail("Cleared", m->cleared); // FIXME: Sometimes, connections to/from this process will explicitly close @@ -1210,7 +1210,7 @@ public: } virtual void destroyProcess( ISimulator::ProcessInfo *p ) { - TraceEvent("ProcessDestroyed").detail("Name", p->name).detail("Address", p->address).detailext("MachineId", p->locality.machineId()); + TraceEvent("ProcessDestroyed").detail("Name", p->name).detail("Address", p->address).detail("MachineId", p->locality.machineId()); currentlyRebootingProcesses.insert(std::pair(p->address, p)); std::vector& processes = machines[ p->locality.machineId().get() ].processes; if( p != processes.back() ) { @@ -1226,12 +1226,12 @@ public: TEST( kt == InjectFaults ); // Simulated machine was killed with faults if (kt == KillInstantly) { - TraceEvent(SevWarn, "FailMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace(); + TraceEvent(SevWarn, "FailMachine").detail("Name", machine->name).detail("Address", machine->address).detail("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace(); // This will remove all the "tracked" messages that came from the machine being killed latestEventCache.clear(); machine->failed = true; } else if (kt == InjectFaults) { - TraceEvent(SevWarn, "FaultMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace(); + TraceEvent(SevWarn, "FaultMachine").detail("Name", machine->name).detail("Address", machine->address).detail("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace(); should_inject_fault = simulator_should_inject_fault; machine->fault_injection_r = g_random->randomUniqueID().first(); machine->fault_injection_p1 = 0.1; @@ -1302,7 +1302,7 @@ public: TEST(kt == InjectFaults); // Trying to kill by injecting faults if(speedUpSimulation && !forceKill) { - TraceEvent(SevWarn, "AbortedKill").detailext("MachineId", machineId).detail("Reason", "Unforced kill within speedy simulation.").backtrace(); + TraceEvent(SevWarn, "AbortedKill").detail("MachineId", machineId).detail("Reason", "Unforced kill within speedy simulation.").backtrace(); if (ktFinal) *ktFinal = None; return false; } @@ -1320,7 +1320,7 @@ public: // Do nothing, if no processes to kill if (processesOnMachine == 0) { - TraceEvent(SevWarn, "AbortedKill").detailext("MachineId", machineId).detail("Reason", "The target had no processes running.").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace(); + TraceEvent(SevWarn, "AbortedKill").detail("MachineId", machineId).detail("Reason", "The target had no processes running.").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace(); if (ktFinal) *ktFinal = None; return false; } @@ -1357,24 +1357,24 @@ public: } } if (!canKillProcesses(processesLeft, processesDead, kt, &kt)) { - TraceEvent("ChangedKillMachine").detailext("MachineId", machineId).detail("KillType", kt).detail("OrigKillType", ktOrig).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("Protected", protectedWorker).detail("Unavailable", unavailable).detail("Excluded", excluded).detail("Cleared", cleared).detail("ProtectedTotal", protectedAddresses.size()).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info()); + TraceEvent("ChangedKillMachine").detail("MachineId", machineId).detail("KillType", kt).detail("OrigKillType", ktOrig).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("Protected", protectedWorker).detail("Unavailable", unavailable).detail("Excluded", excluded).detail("Cleared", cleared).detail("ProtectedTotal", protectedAddresses.size()).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info()); } else if ((kt == KillInstantly) || (kt == InjectFaults)) { - TraceEvent("DeadMachine").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info()); + TraceEvent("DeadMachine").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info()); for (auto process : processesLeft) { - TraceEvent("DeadMachineSurvivors").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString()); + TraceEvent("DeadMachineSurvivors").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString()); } for (auto process : processesDead) { - TraceEvent("DeadMachineVictims").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString()); + TraceEvent("DeadMachineVictims").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString()); } } else { - TraceEvent("ClearMachine").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info()); + TraceEvent("ClearMachine").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info()); for (auto process : processesLeft) { - TraceEvent("ClearMachineSurvivors").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString()); + TraceEvent("ClearMachineSurvivors").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString()); } for (auto process : processesDead) { - TraceEvent("ClearMachineVictims").detailext("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString()); + TraceEvent("ClearMachineVictims").detail("MachineId", machineId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString()); } } } @@ -1384,7 +1384,7 @@ public: // Check if any processes on machine are rebooting if( processesOnMachine != processesPerMachine && kt >= RebootAndDelete ) { TEST(true); //Attempted reboot, but the target did not have all of its processes running - TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detailext("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace(); + TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detail("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace(); if (ktFinal) *ktFinal = None; return false; } @@ -1392,12 +1392,12 @@ public: // Check if any processes on machine are rebooting if ( processesOnMachine != processesPerMachine ) { TEST(true); //Attempted reboot, but the target did not have all of its processes running - TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detailext("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace(); + TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detail("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace(); if (ktFinal) *ktFinal = None; return false; } - TraceEvent("KillMachine").detailext("MachineId", machineId).detail("Kt", kt).detail("KtOrig", ktOrig).detail("KillableMachines", processesOnMachine).detail("ProcessPerMachine", processesPerMachine).detail("KillChanged", kt!=ktOrig); + TraceEvent("KillMachine").detail("MachineId", machineId).detail("Kt", kt).detail("KtOrig", ktOrig).detail("KillableMachines", processesOnMachine).detail("ProcessPerMachine", processesPerMachine).detail("KillChanged", kt!=ktOrig); if ( kt < RebootAndDelete ) { if(kt == InjectFaults && machines[machineId].machineProcess != nullptr) killProcess_internal( machines[machineId].machineProcess, kt ); @@ -1438,7 +1438,7 @@ public: if (processDcId.present() && (processDcId == dcId)) { if ((kt != Reboot) && (protectedAddresses.count(procRecord->address))) { kt = Reboot; - TraceEvent(SevWarn, "DcKillChanged").detailext("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig) + TraceEvent(SevWarn, "DcKillChanged").detail("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig) .detail("Reason", "Datacenter has protected process").detail("ProcessAddress", procRecord->address).detail("Failed", procRecord->failed).detail("Rebooting", procRecord->rebooting).detail("Excluded", procRecord->excluded).detail("Cleared", procRecord->cleared).detail("Process", procRecord->toString()); } datacenterMachines[processMachineId.get()] ++; @@ -1463,15 +1463,15 @@ public: } if (!canKillProcesses(processesLeft, processesDead, kt, &kt)) { - TraceEvent(SevWarn, "DcKillChanged").detailext("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig); + TraceEvent(SevWarn, "DcKillChanged").detail("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig); } else { - TraceEvent("DeadDataCenter").detailext("DataCenter", dcId).detail("KillType", kt).detail("DcZones", datacenterMachines.size()).detail("DcProcesses", dcProcesses).detail("ProcessesDead", processesDead.size()).detail("ProcessesLeft", processesLeft.size()).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info()); + TraceEvent("DeadDataCenter").detail("DataCenter", dcId).detail("KillType", kt).detail("DcZones", datacenterMachines.size()).detail("DcProcesses", dcProcesses).detail("ProcessesDead", processesDead.size()).detail("ProcessesLeft", processesLeft.size()).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info()); for (auto process : processesLeft) { - TraceEvent("DeadDcSurvivors").detailext("MachineId", process->locality.machineId()).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString()); + TraceEvent("DeadDcSurvivors").detail("MachineId", process->locality.machineId()).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString()); } for (auto process : processesDead) { - TraceEvent("DeadDcVictims").detailext("MachineId", process->locality.machineId()).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString()); + TraceEvent("DeadDcVictims").detail("MachineId", process->locality.machineId()).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString()); } } } @@ -1482,7 +1482,7 @@ public: killMachine(datacenterMachine.first, kt, true, &ktResult); if (ktResult != kt) { TraceEvent(SevWarn, "KillDCFail") - .detailext("Zone", datacenterMachine.first) + .detail("Zone", datacenterMachine.first) .detail("KillType", kt) .detail("KillTypeResult", ktResult) .detail("KillTypeOrig", ktOrig); @@ -1495,7 +1495,7 @@ public: TraceEvent("KillDataCenter") .detail("DcZones", datacenterMachines.size()) .detail("DcProcesses", dcProcesses) - .detailext("DCID", dcId) + .detail("DCID", dcId) .detail("KillType", kt) .detail("KillTypeOrig", ktOrig) .detail("KillTypeMin", ktMin) @@ -1694,7 +1694,7 @@ static double networkLatency() { } ACTOR void doReboot( ISimulator::ProcessInfo *p, ISimulator::KillType kt ) { - TraceEvent("RebootingProcessAttempt").detailext("ZoneId", p->locality.zoneId()).detail("KillType", kt).detail("Process", p->toString()).detail("StartingClass", p->startingClass.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).detail("Rebooting", p->rebooting).detail("TaskDefaultDelay", TaskDefaultDelay); + TraceEvent("RebootingProcessAttempt").detail("ZoneId", p->locality.zoneId()).detail("KillType", kt).detail("Process", p->toString()).detail("StartingClass", p->startingClass.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).detail("Rebooting", p->rebooting).detail("TaskDefaultDelay", TaskDefaultDelay); wait( g_sim2.delay( 0, TaskDefaultDelay, p ) ); // Switch to the machine in question @@ -1708,7 +1708,7 @@ ACTOR void doReboot( ISimulator::ProcessInfo *p, ISimulator::KillType kt ) { if( p->rebooting ) return; - TraceEvent("RebootingProcess").detail("KillType", kt).detail("Address", p->address).detailext("ZoneId", p->locality.zoneId()).detailext("DataHall", p->locality.dataHallId()).detail("Locality", p->locality.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).backtrace(); + TraceEvent("RebootingProcess").detail("KillType", kt).detail("Address", p->address).detail("ZoneId", p->locality.zoneId()).detail("DataHall", p->locality.dataHallId()).detail("Locality", p->locality.toString()).detail("Failed", p->failed).detail("Excluded", p->excluded).detail("Cleared", p->cleared).backtrace(); p->rebooting = true; if ((kt == ISimulator::RebootAndDelete) || (kt == ISimulator::RebootProcessAndDelete)) { p->cleared = true; diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index 63f6ff9ba8..d1c1858aa9 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -1843,9 +1843,9 @@ void registerWorker( RegisterWorkerRequest req, ClusterControllerData *self ) { newPriorityInfo.processClassFitness = newProcessClass.machineClassFitness(ProcessClass::ClusterController); if(info == self->id_worker.end()) { - TraceEvent("ClusterControllerActualWorkers", self->id).detail("WorkerId",w.id()).detailext("ProcessId", w.locality.processId()).detailext("ZoneId", w.locality.zoneId()).detailext("DataHall", w.locality.dataHallId()).detail("PClass", req.processClass.toString()).detail("Workers", self->id_worker.size()); + TraceEvent("ClusterControllerActualWorkers", self->id).detail("WorkerId",w.id()).detail("ProcessId", w.locality.processId()).detail("ZoneId", w.locality.zoneId()).detail("DataHall", w.locality.dataHallId()).detail("PClass", req.processClass.toString()).detail("Workers", self->id_worker.size()); } else { - TraceEvent("ClusterControllerWorkerAlreadyRegistered", self->id).suppressFor(1.0).detail("WorkerId",w.id()).detailext("ProcessId", w.locality.processId()).detailext("ZoneId", w.locality.zoneId()).detailext("DataHall", w.locality.dataHallId()).detail("PClass", req.processClass.toString()).detail("Workers", self->id_worker.size()); + TraceEvent("ClusterControllerWorkerAlreadyRegistered", self->id).suppressFor(1.0).detail("WorkerId",w.id()).detail("ProcessId", w.locality.processId()).detail("ZoneId", w.locality.zoneId()).detail("DataHall", w.locality.dataHallId()).detail("PClass", req.processClass.toString()).detail("Workers", self->id_worker.size()); } if ( w.address() == g_network->getLocalAddress() ) { if(self->changingDcIds.get().first) { diff --git a/fdbserver/KeyValueStoreSQLite.actor.cpp b/fdbserver/KeyValueStoreSQLite.actor.cpp index c557d61740..1f5f884cbf 100644 --- a/fdbserver/KeyValueStoreSQLite.actor.cpp +++ b/fdbserver/KeyValueStoreSQLite.actor.cpp @@ -39,6 +39,10 @@ u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*); #define sqlite3_mutex_leave(x) #endif +FORMAT_TRACEABLE(volatile long long, "%lld"); +FORMAT_TRACEABLE(volatile unsigned long long, "%llu"); +FORMAT_TRACEABLE(volatile double, "%g"); + void hexdump(FILE *fout, StringRef val); /*#undef state diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index 96b62f2fa8..5edb7c856f 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -207,7 +207,7 @@ ACTOR Future simulatedFDBDRebooter(Reference simulatedFDBDRebooter(Referenceaddress.toString()) .detail("Excluded", process->excluded) .detail("UsingSSL", sslEnabled); @@ -266,19 +266,19 @@ ACTOR Future simulatedFDBDRebooter(Referenceaddress) .detail("Excluded", process->excluded) - .detailext("ZoneId", localities.zoneId()) + .detail("ZoneId", localities.zoneId()) .detail("KillType", onShutdown.isReady() ? onShutdown.get() : ISimulator::None); if (!onShutdown.isReady()) onShutdown = ISimulator::InjectFaults; } catch (Error& e) { - TraceEvent(destructed ? SevInfo : SevError, "SimulatedFDBDRebooterError").error(e, true).detailext("ZoneId", localities.zoneId()).detail("RandomId", randomId); + TraceEvent(destructed ? SevInfo : SevError, "SimulatedFDBDRebooterError").error(e, true).detail("ZoneId", localities.zoneId()).detail("RandomId", randomId); onShutdown = e; } @@ -292,7 +292,7 @@ ACTOR Future simulatedFDBDRebooter(Referenceaddress) .detail("Excluded", process->excluded) .detail("Rebooting", process->rebooting) - .detailext("ZoneId", localities.zoneId()); + .detail("ZoneId", localities.zoneId()); wait( g_simulator.onProcess( simProcess ) ); wait(delay(0.00001 + FLOW_KNOBS->MAX_BUGGIFIED_DELAY)); // One last chance for the process to clean up? @@ -303,14 +303,14 @@ ACTOR Future simulatedFDBDRebooter(Referenceaddress) .detail("Excluded", process->excluded) - .detailext("ZoneId", localities.zoneId()) + .detail("ZoneId", localities.zoneId()) .detail("KillType", shutdownResult); if( shutdownResult < ISimulator::RebootProcessAndDelete ) { TraceEvent("SimulatedFDBDLowerReboot").detail("Cycles", cycles).detail("RandomId", randomId) .detail("Address", process->address) .detail("Excluded", process->excluded) - .detailext("ZoneId", localities.zoneId()) + .detail("ZoneId", localities.zoneId()) .detail("KillType", shutdownResult); return onShutdown.get(); } @@ -318,7 +318,7 @@ ACTOR Future simulatedFDBDRebooter(Referenceaddress) - .detailext("ZoneId", localities.zoneId()) + .detail("ZoneId", localities.zoneId()) .detail("KillType", shutdownResult); *coordFolder = joinPath(baseFolder, g_random->randomUniqueID().toString()); *dataFolder = joinPath(baseFolder, g_random->randomUniqueID().toString()); @@ -335,7 +335,7 @@ ACTOR Future simulatedFDBDRebooter(Referenceaddress) - .detailext("ZoneId", localities.zoneId()) + .detail("ZoneId", localities.zoneId()) .detail("KillType", shutdownResult); } } @@ -400,7 +400,7 @@ ACTOR Future simulatedMachine(ClusterConnectionString connStr, std::vector Reference clusterFile(useSeedFile ? new ClusterConnectionFile(path, connStr.toString()) : new ClusterConnectionFile(path)); const int listenPort = i*listenPerProcess + 1; processes.push_back(simulatedFDBDRebooter(clusterFile, ips[i], sslEnabled, tlsOptions, listenPort, listenPerProcess, localities, processClass, &myFolders[i], &coordFolders[i], baseFolder, connStr, useSeedFile, runBackupAgents)); - TraceEvent("SimulatedMachineProcess", randomId).detail("Address", NetworkAddress(ips[i], listenPort, true, false)).detailext("ZoneId", localities.zoneId()).detailext("DataHall", localities.dataHallId()).detail("Folder", myFolders[i]); + TraceEvent("SimulatedMachineProcess", randomId).detail("Address", NetworkAddress(ips[i], listenPort, true, false)).detail("ZoneId", localities.zoneId()).detail("DataHall", localities.dataHallId()).detail("Folder", myFolders[i]); } TEST( bootCount >= 1 ); // Simulated machine rebooted @@ -418,8 +418,8 @@ ACTOR Future simulatedMachine(ClusterConnectionString connStr, std::vector .detail("ProcessClass", processClass.toString()) .detail("Restarting", restarting) .detail("UseSeedFile", useSeedFile) - .detailext("ZoneId", localities.zoneId()) - .detailext("DataHall", localities.dataHallId()) + .detail("ZoneId", localities.zoneId()) + .detail("DataHall", localities.dataHallId()) .detail("Locality", localities.toString()); wait( waitForAll( processes ) ); @@ -428,8 +428,8 @@ ACTOR Future simulatedMachine(ClusterConnectionString connStr, std::vector .detail("Folder0", myFolders[0]) .detail("CFolder0", coordFolders[0]) .detail("MachineIPs", toIPVectorString(ips)) - .detailext("ZoneId", localities.zoneId()) - .detailext("DataHall", localities.dataHallId()); + .detail("ZoneId", localities.zoneId()) + .detail("DataHall", localities.dataHallId()); { //Kill all open files, which may cause them to write invalid data. @@ -468,8 +468,8 @@ ACTOR Future simulatedMachine(ClusterConnectionString connStr, std::vector .detail("CFolder0", coordFolders[0]) .detail("MachineIPs", toIPVectorString(ips)) .detail("Closing", closingStr) - .detailext("ZoneId", localities.zoneId()) - .detailext("DataHall", localities.dataHallId()); + .detail("ZoneId", localities.zoneId()) + .detail("DataHall", localities.dataHallId()); ISimulator::MachineInfo* machine = g_simulator.getMachineById(localities.machineId()); machine->closingFiles = filenames; @@ -499,8 +499,8 @@ ACTOR Future simulatedMachine(ClusterConnectionString connStr, std::vector if( shutdownDelayCount++ >= 50 ) { // Worker doesn't shut down instantly on reboot TraceEvent(SevError, "SimulatedFDBDFilesCheck", randomId) .detail("PAddrs", toIPVectorString(ips)) - .detailext("ZoneId", localities.zoneId()) - .detailext("DataHall", localities.dataHallId()); + .detail("ZoneId", localities.zoneId()) + .detail("DataHall", localities.dataHallId()); ASSERT( false ); } @@ -510,8 +510,8 @@ ACTOR Future simulatedMachine(ClusterConnectionString connStr, std::vector TraceEvent("SimulatedFDBDFilesClosed", randomId) .detail("Address", toIPVectorString(ips)) - .detailext("ZoneId", localities.zoneId()) - .detailext("DataHall", localities.dataHallId()); + .detail("ZoneId", localities.zoneId()) + .detail("DataHall", localities.dataHallId()); g_simulator.destroyMachine(localities.machineId()); @@ -535,8 +535,8 @@ ACTOR Future simulatedMachine(ClusterConnectionString connStr, std::vector .detail("Swap", swap) .detail("KillType", killType) .detail("RebootTime", rebootTime) - .detailext("ZoneId", localities.zoneId()) - .detailext("DataHall", localities.dataHallId()) + .detail("ZoneId", localities.zoneId()) + .detail("DataHall", localities.dataHallId()) .detail("MachineIPs", toIPVectorString(ips)); wait( delay( rebootTime ) ); diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index 93082c6b25..ff0b2d51ca 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -1261,7 +1261,7 @@ ACTOR Future fdbd( try { ServerCoordinators coordinators( connFile ); - TraceEvent("StartingFDBD").detailext("ZoneID", localities.zoneId()).detailext("MachineId", localities.machineId()).detail("DiskPath", dataFolder).detail("CoordPath", coordFolder); + TraceEvent("StartingFDBD").detail("ZoneID", localities.zoneId()).detail("MachineId", localities.machineId()).detail("DiskPath", dataFolder).detail("CoordPath", coordFolder); // SOMEDAY: start the services on the machine in a staggered fashion in simulation? state vector> v; diff --git a/fdbserver/workloads/MachineAttrition.actor.cpp b/fdbserver/workloads/MachineAttrition.actor.cpp index e69101f102..84d364d8e0 100644 --- a/fdbserver/workloads/MachineAttrition.actor.cpp +++ b/fdbserver/workloads/MachineAttrition.actor.cpp @@ -140,7 +140,7 @@ struct MachineAttritionWorkload : TestWorkload { else kt = ISimulator::RebootAndDelete; } - TraceEvent("Assassination").detailext("TargetDatacenter", target).detail("Reboot", self->reboot).detail("KillType", kt); + TraceEvent("Assassination").detail("TargetDatacenter", target).detail("Reboot", self->reboot).detail("KillType", kt); g_simulator.killDataCenter( target, kt ); } else { @@ -176,7 +176,7 @@ struct MachineAttritionWorkload : TestWorkload { } TraceEvent("Assassination").detail("TargetMachine", targetMachine.toString()) - .detailext("ZoneId", targetMachine.zoneId()) + .detail("ZoneId", targetMachine.zoneId()) .detail("Reboot", self->reboot).detail("KilledMachines", killedMachines) .detail("MachinesToKill", self->machinesToKill).detail("MachinesToLeave", self->machinesToLeave) .detail("Machines", self->machines.size()).detail("Replace", self->replacement); diff --git a/fdbserver/workloads/RemoveServersSafely.actor.cpp b/fdbserver/workloads/RemoveServersSafely.actor.cpp index d852db53b6..869085d406 100644 --- a/fdbserver/workloads/RemoveServersSafely.actor.cpp +++ b/fdbserver/workloads/RemoveServersSafely.actor.cpp @@ -379,7 +379,7 @@ struct RemoveServersSafelyWorkload : TestWorkload { TraceEvent("RemoveAndKill", functionId).detail("Step", removeViaClear ? "ClearMachines" : "KillMachines").detail("Addresses", describe(killAddrs)).detail("Processes", killProcArray.size()).detail("Zones", zoneIds.size()).detail("ClusterAvailable", g_simulator.isAvailable()); for (auto& zoneId : zoneIds) { killedMachine = g_simulator.killZone( zoneId, removeViaClear ? ISimulator::RebootAndDelete : ISimulator::KillInstantly ); - TraceEvent(killedMachine ? SevInfo : SevWarn, "RemoveAndKill").detail("Step", removeViaClear ? "Clear Machine" : "Kill Machine").detailext("ZoneId", zoneId).detail(removeViaClear ? "Cleared" : "Killed", killedMachine).detail("ClusterAvailable", g_simulator.isAvailable()); + TraceEvent(killedMachine ? SevInfo : SevWarn, "RemoveAndKill").detail("Step", removeViaClear ? "Clear Machine" : "Kill Machine").detail("ZoneId", zoneId).detail(removeViaClear ? "Cleared" : "Killed", killedMachine).detail("ClusterAvailable", g_simulator.isAvailable()); } } diff --git a/fdbserver/workloads/TaskBucketCorrectness.actor.cpp b/fdbserver/workloads/TaskBucketCorrectness.actor.cpp index f01b199532..655d431987 100644 --- a/fdbserver/workloads/TaskBucketCorrectness.actor.cpp +++ b/fdbserver/workloads/TaskBucketCorrectness.actor.cpp @@ -33,7 +33,7 @@ struct SayHelloTaskFunc : TaskFuncBase { static StringRef name; - static const uint32_t version = 1; + static constexpr uint32_t version = 1; StringRef getName() const { return name; }; Future execute(Database cx, Reference tb, Reference fb, Reference task) { return Void(); }; @@ -43,7 +43,11 @@ struct SayHelloTaskFunc : TaskFuncBase { // check task version uint32_t taskVersion = task->getVersion(); if (taskVersion > SayHelloTaskFunc::version) { - TraceEvent("TaskBucketCorrectnessSayHello").detail("CheckTaskVersion", "taskVersion is larger than the funcVersion").detail("TaskVersion", taskVersion).detail("FuncVersion", SayHelloTaskFunc::version); + uint32_t v = SayHelloTaskFunc::version; + TraceEvent("TaskBucketCorrectnessSayHello") + .detail("CheckTaskVersion", "taskVersion is larger than the funcVersion") + .detail("TaskVersion", taskVersion) + .detail("FuncVersion", v); } state Reference done = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]); diff --git a/flow/Arena.h b/flow/Arena.h index 33b333a462..f9677636a1 100644 --- a/flow/Arena.h +++ b/flow/Arena.h @@ -652,6 +652,20 @@ private: }; #pragma pack( pop ) +template<> +struct Traceable : std::true_type { + static std::string toString(const StringRef& value) { + return value.toString(); + } +}; + +template +struct Traceable> : std::conditional::value, std::true_type, std::false_type>::type { + static std::string toString(const Standalone& value) { + return Traceable::toString(value); + } +}; + #define LiteralStringRef( str ) StringRef( (const uint8_t*)(str), sizeof((str))-1 ) // makeString is used to allocate a Standalone of a known length for later diff --git a/flow/SystemMonitor.cpp b/flow/SystemMonitor.cpp index ea4f3975fc..74f8e4ff36 100644 --- a/flow/SystemMonitor.cpp +++ b/flow/SystemMonitor.cpp @@ -89,8 +89,8 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta .detail("CachePageReadsMerged", netData.countFileCachePageReadsMerged - statState->networkState.countFileCachePageReadsMerged) .detail("CacheWrites", netData.countFileCacheWrites - statState->networkState.countFileCacheWrites) .detail("CacheReads", netData.countFileCacheReads - statState->networkState.countFileCacheReads) - .detailext("ZoneID", machineState.zoneId) - .detailext("MachineID", machineState.machineId) + .detail("ZoneID", machineState.zoneId) + .detail("MachineID", machineState.machineId) .detail("AIOSubmitCount", netData.countAIOSubmit - statState->networkState.countAIOSubmit) .detail("AIOCollectCount", netData.countAIOCollect - statState->networkState.countAIOCollect) .detail("AIOSubmitLag", (g_network->networkMetrics.secSquaredSubmit - statState->networkMetricsState.secSquaredSubmit) / currentStats.elapsed) @@ -153,8 +153,8 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta .detail("TotalMemory", currentStats.machineTotalRAM) .detail("CommittedMemory", currentStats.machineCommittedRAM) .detail("AvailableMemory", currentStats.machineAvailableRAM) - .detailext("ZoneID", machineState.zoneId) - .detailext("MachineID", machineState.machineId) + .detail("ZoneID", machineState.zoneId) + .detail("MachineID", machineState.machineId) .trackLatest("MachineMetrics"); } } diff --git a/flow/TDMetric.actor.h b/flow/TDMetric.actor.h index 2299927bb5..4551088dc0 100755 --- a/flow/TDMetric.actor.h +++ b/flow/TDMetric.actor.h @@ -1381,6 +1381,21 @@ struct MetricHandle { Reference ref; }; +template +struct Traceable> : Traceable { + static std::string toString(const MetricHandle& value) { + return Traceable::toString(value.getValue()); + } +}; + +template +struct SpecialTraceMetricType> : SpecialTraceMetricType { + using parent = SpecialTraceMetricType; + static auto getValue(const MetricHandle& value) -> decltype(parent::getValue(value.getValue())) { + return parent::getValue(value.getValue()); + } +}; + typedef MetricHandle Int64MetricHandle; typedef MetricHandle VersionMetricHandle; typedef MetricHandle BoolMetricHandle; diff --git a/flow/Trace.cpp b/flow/Trace.cpp index 222fea68af..ffbdf2a434 100644 --- a/flow/Trace.cpp +++ b/flow/Trace.cpp @@ -654,11 +654,17 @@ void removeTraceRole(std::string role) { } TraceEvent::TraceEvent( const char* type, UID id ) : id(id), type(type), severity(SevInfo), initialized(false), enabled(true) {} -TraceEvent::TraceEvent( Severity severity, const char* type, UID id ) : id(id), type(type), severity(severity), initialized(false), enabled(true) {} +TraceEvent::TraceEvent( Severity severity, const char* type, UID id ) + : id(id), type(type), severity(severity), initialized(false), + enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY >= severity) {} TraceEvent::TraceEvent( TraceInterval& interval, UID id ) : id(id), type(interval.type), severity(interval.severity), initialized(false), enabled(true) { init(interval); } -TraceEvent::TraceEvent( Severity severity, TraceInterval& interval, UID id ) : id(id), type(interval.type), severity(severity), initialized(false), enabled(true) { +TraceEvent::TraceEvent( Severity severity, TraceInterval& interval, UID id ) + : id(id), type(interval.type), + severity(severity), + initialized(false), + enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY >= severity) { init(interval); } @@ -713,7 +719,7 @@ bool TraceEvent::init() { severity = SevWarnAlways; } - detail("Severity", severity); + detail("Severity", int(severity)); detailf("Time", "%.6f", time); detail("Type", type); if(g_network && g_network->isSimulated()) { @@ -736,25 +742,23 @@ bool TraceEvent::init() { return enabled; } -TraceEvent& TraceEvent::error(class Error const& error, bool includeCancelled) { - if(enabled) { - if (error.code() != error_code_actor_cancelled || includeCancelled) { - err = error; - if (initialized) { - if (error.isInjectedFault()) { - detail("ErrorIsInjectedFault", true); - if(severity == SevError) severity = SevWarnAlways; - } - detail("Error", error.name()); - detail("ErrorDescription", error.what()); - detail("ErrorCode", error.code()); +TraceEvent& TraceEvent::errorImpl(class Error const& error, bool includeCancelled) { + if (error.code() != error_code_actor_cancelled || includeCancelled) { + err = error; + if (initialized) { + if (error.isInjectedFault()) { + detail("ErrorIsInjectedFault", true); + if(severity == SevError) severity = SevWarnAlways; } + detail("Error", error.name()); + detail("ErrorDescription", error.what()); + detail("ErrorCode", error.code()); + } + } else { + if (initialized) { + TraceEvent(g_network && g_network->isSimulated() ? SevError : SevWarnAlways, std::string(TRACE_EVENT_INVALID_SUPPRESSION).append(type).c_str()).suppressFor(5); } else { - if (initialized) { - TraceEvent(g_network && g_network->isSimulated() ? SevError : SevWarnAlways, std::string(TRACE_EVENT_INVALID_SUPPRESSION).append(type).c_str()).suppressFor(5); - } else { - enabled = false; - } + enabled = false; } } return *this; @@ -781,66 +785,18 @@ TraceEvent& TraceEvent::detailImpl( std::string&& key, std::string&& value, bool return *this; } -TraceEvent& TraceEvent::detail( std::string key, std::string value ) { - return detailImpl(std::move(key), std::move(value)); +void TraceEvent::setField(const char* key, int64_t value) { + tmpEventMetric->setField(key, value); } -TraceEvent& TraceEvent::detail( std::string key, double value ) { - init(); - if(enabled) - tmpEventMetric->setField(key.c_str(), value); - return detailfNoMetric( std::move(key), "%g", value ); + +void TraceEvent::setField(const char* key, double value) { + tmpEventMetric->setField(key, value); } -TraceEvent& TraceEvent::detail( std::string key, int value ) { - init(); - if(enabled) - tmpEventMetric->setField(key.c_str(), (int64_t)value); - return detailfNoMetric( std::move(key), "%d", value ); -} -TraceEvent& TraceEvent::detail( std::string key, unsigned value ) { - init(); - if(enabled) - tmpEventMetric->setField(key.c_str(), (int64_t)value); - return detailfNoMetric( std::move(key), "%u", value ); -} -TraceEvent& TraceEvent::detail( std::string key, long int value ) { - init(); - if(enabled) - tmpEventMetric->setField(key.c_str(), (int64_t)value); - return detailfNoMetric( std::move(key), "%ld", value ); -} -TraceEvent& TraceEvent::detail( std::string key, long unsigned int value ) { - init(); - if(enabled) - tmpEventMetric->setField(key.c_str(), (int64_t)value); - return detailfNoMetric( std::move(key), "%lu", value ); -} -TraceEvent& TraceEvent::detail( std::string key, long long int value ) { - init(); - if(enabled) - tmpEventMetric->setField(key.c_str(), (int64_t)value); - return detailfNoMetric( std::move(key), "%lld", value ); -} -TraceEvent& TraceEvent::detail( std::string key, long long unsigned int value ) { - init(); - if(enabled) - tmpEventMetric->setField(key.c_str(), (int64_t)value); - return detailfNoMetric( std::move(key), "%llu", value ); -} -TraceEvent& TraceEvent::detail( std::string key, const NetworkAddress& value ) { - return detailImpl( std::move(key), value.toString() ); -} -TraceEvent& TraceEvent::detail( std::string key, const IPAddress& value ) { - return detailImpl( std::move(key), value.toString() ); -} -TraceEvent& TraceEvent::detail( std::string key, const UID& value ) { - return detailf( std::move(key), "%016llx", value.first() ); // SOMEDAY: Log entire value? We also do this explicitly in some "lists" in various individual TraceEvent calls -} -TraceEvent& TraceEvent::detailext( std::string key, StringRef const& value ) { - return detailImpl(std::move(key), value.printable()); -} -TraceEvent& TraceEvent::detailext( std::string key, const Optional>& value ) { - return detailImpl(std::move(key), (value.present()) ? value.get().printable() : "[not set]"); + +void TraceEvent::setField(const char* key, const std::string& value) { + tmpEventMetric->setField(key, Standalone(value)); } + TraceEvent& TraceEvent::detailf( std::string key, const char* valueFormat, ... ) { if (enabled) { va_list args; diff --git a/flow/Trace.h b/flow/Trace.h index e0bdee46d6..a3a381a274 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -26,6 +26,7 @@ #include #include #include +#include #include "flow/IRandom.h" #include "flow/Error.h" @@ -133,9 +134,62 @@ private: }; struct DynamicEventMetric; -class StringRef; -template class Standalone; -template class Optional; + +// forward declare format from flow.h as we +// can't include flow.h here +std::string format(const char* form, ...); + +template +struct Traceable : std::false_type {}; + +#define FORMAT_TRACEABLE(type, fmt) \ + template<> \ + struct Traceable : std::true_type { \ + static std::string toString(type value) { \ + return format(fmt, value); \ + } \ + } + +FORMAT_TRACEABLE(bool, "%d"); +FORMAT_TRACEABLE(signed char, "%d"); +FORMAT_TRACEABLE(unsigned char, "%d"); +FORMAT_TRACEABLE(short, "%d"); +FORMAT_TRACEABLE(unsigned short, "%d"); +FORMAT_TRACEABLE(int, "%d"); +FORMAT_TRACEABLE(unsigned, "%u"); +FORMAT_TRACEABLE(long int, "%ld"); +FORMAT_TRACEABLE(unsigned long int, "%lu"); +FORMAT_TRACEABLE(long long int, "%lld"); +FORMAT_TRACEABLE(unsigned long long int, "%llu"); +FORMAT_TRACEABLE(double, "%g"); + +template<> +struct Traceable : std::true_type { + static std::string toString(const UID& value) { + return format("%016llx", value.first()); + } +}; + + + +template +struct SpecialTraceMetricType + : std::conditional::value || std::is_enum::value, + std::true_type, std::false_type>::type { + static int64_t getValue(T v) { + return v; + } +}; + +#define TRACE_METRIC_TYPE(from, to) \ + template<> \ + struct SpecialTraceMetricType : std::true_type { \ + static to getValue(from v) { \ + return v; \ + } \ + } + +TRACE_METRIC_TYPE(double, double); struct TraceEvent { TraceEvent( const char* type, UID id = UID() ); // Assumes SevInfo severity @@ -147,28 +201,86 @@ struct TraceEvent { static bool isNetworkThread(); //Must be called directly after constructing the trace event - TraceEvent& error(const class Error& e, bool includeCancelled=false); + TraceEvent& error(const class Error& e, bool includeCancelled=false) { + if (enabled) { + return errorImpl(e, includeCancelled); + } + return *this; + } - TraceEvent& detail( std::string key, std::string value ); - TraceEvent& detail( std::string key, double value ); - TraceEvent& detail( std::string key, long int value ); - TraceEvent& detail( std::string key, long unsigned int value ); - TraceEvent& detail( std::string key, long long int value ); - TraceEvent& detail( std::string key, long long unsigned int value ); - TraceEvent& detail( std::string key, int value ); - TraceEvent& detail( std::string key, unsigned value ); - TraceEvent& detail( std::string key, const struct NetworkAddress& value ); - TraceEvent& detail( std::string key, const IPAddress& value ); + template + typename std::enable_if::value, TraceEvent&>::type + detail( std::string&& key, const T& value ) { + if (enabled) { + init(); + auto s = Traceable::toString(value); + addMetric(key.c_str(), value, s); + return detailImpl(std::move(key), std::move(s)); + } + return *this; + } + + template + typename std::enable_if::value, TraceEvent&>::type + detail( const char* key, const T& value ) { + if (enabled) { + init(); + auto s = Traceable::toString(value); + addMetric(key, value, s); + return detailImpl(std::string(key), std::move(s), false); + } + return *this; + } + template + typename std::enable_if::value, TraceEvent&>::type + detail(const char* key, T value) { + if (enabled) { + init(); + setField(key, int64_t(value)); + return detailImpl(std::string(key), format("%d", value), false); + } + return *this; + } + TraceEvent& detail(std::string key, std::string value) { + if (enabled) { + init(); + addMetric(key.c_str(), value, value); + return detailImpl(std::string(key), std::move(value), false); + } + return *this; + } + TraceEvent& detail(const char* key, std::string value) { + if (enabled) { + init(); + addMetric(key, value, value); + return detailImpl(std::string(key), std::move(value), false); + } + return *this; + } TraceEvent& detailf( std::string key, const char* valueFormat, ... ); - TraceEvent& detailext( std::string key, const StringRef& value ); - TraceEvent& detailext( std::string key, const Optional>& value ); private: + template + typename std::enable_if::value, void>::type + addMetric(const char* key, const T& value, const std::string&) { + setField(key, SpecialTraceMetricType::getValue(value)); + } + + template + typename std::enable_if::value, void>::type + addMetric(const char* key, const T&, const std::string& value) { + setField(key, value); + } + + void setField(const char* key, int64_t value); + void setField(const char* key, double value); + void setField(const char* key, const std::string& value); + + TraceEvent& errorImpl(const class Error& e, bool includeCancelled=false); // Private version of detailf that does NOT write to the eventMetric. This is to be used by other detail methods // which can write field metrics of a more appropriate type than string but use detailf() to add to the TraceEvent. TraceEvent& detailfNoMetric( std::string&& key, const char* valueFormat, ... ); TraceEvent& detailImpl( std::string&& key, std::string&& value, bool writeEventMetricField=true ); public: - TraceEvent& detail( std::string key, const UID& value ); TraceEvent& backtrace(const std::string& prefix = ""); TraceEvent& trackLatest( const char* trackingKey ); TraceEvent& sample( double sampleRate, bool logSampleRate=true ); @@ -249,8 +361,8 @@ public: void setLatestError( const TraceEventFields& contents ); TraceEventFields getLatestError(); private: - std::map> latest; - std::map latestErrors; + std::map> latest; + std::map latestErrors; }; extern LatestEventCache latestEventCache; diff --git a/flow/network.h b/flow/network.h index 9e7b3a385f..14641abff9 100644 --- a/flow/network.h +++ b/flow/network.h @@ -80,6 +80,8 @@ enum { class Void; +template class Optional; + struct IPAddress { // Represents both IPv4 and IPv6 address. For IPv4 addresses, // only the first 32bits are relevant and rest are initialized to @@ -144,6 +146,13 @@ private: } addr; }; +template<> +struct Traceable : std::true_type { + static std::string toString(const IPAddress& value) { + return value.toString(); + } +}; + struct NetworkAddress { // A NetworkAddress identifies a particular running server (i.e. a TCP endpoint). IPAddress ip; @@ -192,6 +201,13 @@ struct NetworkAddress { } }; +template<> +struct Traceable : std::true_type { + static std::string toString(const NetworkAddress& value) { + return value.toString(); + } +}; + namespace std { template <> From ae84f5424a17fe618f5359d259ed34735ba77d22 Mon Sep 17 00:00:00 2001 From: mpilman Date: Thu, 14 Mar 2019 22:15:27 -0700 Subject: [PATCH 02/24] Fix potential segfault when init changes enabled --- flow/Trace.h | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/flow/Trace.h b/flow/Trace.h index a3a381a274..1131679255 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -211,8 +211,7 @@ struct TraceEvent { template typename std::enable_if::value, TraceEvent&>::type detail( std::string&& key, const T& value ) { - if (enabled) { - init(); + if (enabled && init()) { auto s = Traceable::toString(value); addMetric(key.c_str(), value, s); return detailImpl(std::move(key), std::move(s)); @@ -223,8 +222,7 @@ struct TraceEvent { template typename std::enable_if::value, TraceEvent&>::type detail( const char* key, const T& value ) { - if (enabled) { - init(); + if (enabled && init()) { auto s = Traceable::toString(value); addMetric(key, value, s); return detailImpl(std::string(key), std::move(s), false); @@ -234,15 +232,14 @@ struct TraceEvent { template typename std::enable_if::value, TraceEvent&>::type detail(const char* key, T value) { - if (enabled) { - init(); + if (enabled && init()) { setField(key, int64_t(value)); return detailImpl(std::string(key), format("%d", value), false); } return *this; } TraceEvent& detail(std::string key, std::string value) { - if (enabled) { + if (enabled && init()) { init(); addMetric(key.c_str(), value, value); return detailImpl(std::string(key), std::move(value), false); @@ -250,8 +247,7 @@ struct TraceEvent { return *this; } TraceEvent& detail(const char* key, std::string value) { - if (enabled) { - init(); + if (enabled && init()) { addMetric(key, value, value); return detailImpl(std::string(key), std::move(value), false); } From 02e3b634fbc33c51f1a21107d0ceecc6cd560f80 Mon Sep 17 00:00:00 2001 From: mpilman Date: Thu, 14 Mar 2019 22:15:54 -0700 Subject: [PATCH 03/24] Compile sqlite with NDEBUG so we can debug --- fdbserver/CMakeLists.txt | 7 ------- 1 file changed, 7 deletions(-) diff --git a/fdbserver/CMakeLists.txt b/fdbserver/CMakeLists.txt index 2085f6d6a9..d3db62c675 100644 --- a/fdbserver/CMakeLists.txt +++ b/fdbserver/CMakeLists.txt @@ -66,13 +66,6 @@ set(FDBSERVER_SRCS SimulatedCluster.actor.cpp SimulatedCluster.h SkipList.cpp - sqlite/btree.h - sqlite/hash.h - sqlite/sqlite3.h - sqlite/sqlite3ext.h - sqlite/sqliteInt.h - sqlite/sqliteLimit.h - sqlite/sqlite3.amalgamation.c Status.actor.cpp Status.h StorageMetrics.actor.h From 8af0f15a43d6ba6c05f903422469e40acf7f707a Mon Sep 17 00:00:00 2001 From: mpilman Date: Thu, 14 Mar 2019 22:16:19 -0700 Subject: [PATCH 04/24] primitive performance test for TraceEvent --- flow/ActorCollection.actor.cpp | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/flow/ActorCollection.actor.cpp b/flow/ActorCollection.actor.cpp index 223fd3930a..ece3cbb4c2 100644 --- a/flow/ActorCollection.actor.cpp +++ b/flow/ActorCollection.actor.cpp @@ -20,6 +20,7 @@ #include "flow/ActorCollection.h" #include "flow/IndexedSet.h" +#include "flow/UnitTest.h" #include "flow/actorcompiler.h" // This must be the last #include. ACTOR Future actorCollection( FutureStream> addActor, int* pCount, double *lastChangeTime, double *idleTime, double *allTime, bool returnWhenEmptied ) @@ -58,3 +59,33 @@ ACTOR Future actorCollection( FutureStream> addActor, int* pC when (Error e = waitNext(errors.getFuture())) { throw e; } } } + +TEST_CASE("/flow/TraceEvent") { + state unsigned i; + state double startTime = g_network->now(); + state std::string str = "hello"; + for (i = 0; i < 10000; ++i) { + for (unsigned j = 0; j < 100; ++j) { + TraceEvent("TestTraceLineNoDebug") + .detail("Num", g_random->randomInt(0, 1000)) + .detail("Double", g_random->random01()) + .detail("hello", str); + } + wait(delay(0)); + } + TraceEvent("TraceDuration") + .detail("Time", g_network->now() - startTime); + startTime = g_network->now(); + for (i = 0; i < 10000; ++i) { + for (unsigned j = 0; j < 100; ++j) { + TraceEvent(SevDebug, "TestTraceLineNoDebug") + .detail("Num", g_random->randomInt(0, 1000)) + .detail("Double", g_random->random01()) + .detail("hello", str); + } + wait(delay(0)); + } + TraceEvent("TraceDuration") + .detail("Time", g_network->now() - startTime); + return Void(); +} From 9938a49c1efdfb866d72c8f56ed8dd12e5a60fe0 Mon Sep 17 00:00:00 2001 From: mpilman Date: Thu, 14 Mar 2019 22:28:42 -0700 Subject: [PATCH 05/24] make primitive test longer --- flow/ActorCollection.actor.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/ActorCollection.actor.cpp b/flow/ActorCollection.actor.cpp index ece3cbb4c2..92d167899f 100644 --- a/flow/ActorCollection.actor.cpp +++ b/flow/ActorCollection.actor.cpp @@ -64,7 +64,7 @@ TEST_CASE("/flow/TraceEvent") { state unsigned i; state double startTime = g_network->now(); state std::string str = "hello"; - for (i = 0; i < 10000; ++i) { + for (i = 0; i < 100000; ++i) { for (unsigned j = 0; j < 100; ++j) { TraceEvent("TestTraceLineNoDebug") .detail("Num", g_random->randomInt(0, 1000)) @@ -76,7 +76,7 @@ TEST_CASE("/flow/TraceEvent") { TraceEvent("TraceDuration") .detail("Time", g_network->now() - startTime); startTime = g_network->now(); - for (i = 0; i < 10000; ++i) { + for (i = 0; i < 1000000; ++i) { for (unsigned j = 0; j < 100; ++j) { TraceEvent(SevDebug, "TestTraceLineNoDebug") .detail("Num", g_random->randomInt(0, 1000)) From bb82f8560af9e2511a54a8ca3ea9a3fdcd7fcf01 Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 15 Mar 2019 09:11:05 -0700 Subject: [PATCH 06/24] process all volatile ints correctly in traces --- fdbserver/KeyValueStoreSQLite.actor.cpp | 4 ---- flow/Trace.h | 6 ++++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/fdbserver/KeyValueStoreSQLite.actor.cpp b/fdbserver/KeyValueStoreSQLite.actor.cpp index 1f5f884cbf..c557d61740 100644 --- a/fdbserver/KeyValueStoreSQLite.actor.cpp +++ b/fdbserver/KeyValueStoreSQLite.actor.cpp @@ -39,10 +39,6 @@ u32 sqlite3VdbeSerialGet(const unsigned char*, u32, Mem*); #define sqlite3_mutex_leave(x) #endif -FORMAT_TRACEABLE(volatile long long, "%lld"); -FORMAT_TRACEABLE(volatile unsigned long long, "%llu"); -FORMAT_TRACEABLE(volatile double, "%g"); - void hexdump(FILE *fout, StringRef val); /*#undef state diff --git a/flow/Trace.h b/flow/Trace.h index 1131679255..19ff9d2c6b 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -162,6 +162,12 @@ FORMAT_TRACEABLE(unsigned long int, "%lu"); FORMAT_TRACEABLE(long long int, "%lld"); FORMAT_TRACEABLE(unsigned long long int, "%llu"); FORMAT_TRACEABLE(double, "%g"); +FORMAT_TRACEABLE(volatile long, "%ld"); +FORMAT_TRACEABLE(volatile unsigned long, "%lu"); +FORMAT_TRACEABLE(volatile long long, "%lld"); +FORMAT_TRACEABLE(volatile unsigned long long, "%llu"); +FORMAT_TRACEABLE(volatile double, "%g"); + template<> struct Traceable : std::true_type { From 32a3aea8bbc70d204c81339b90ab8d6baa5e4236 Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 15 Mar 2019 09:16:16 -0700 Subject: [PATCH 07/24] Make strings random in tracing micro-benchmark --- flow/ActorCollection.actor.cpp | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/flow/ActorCollection.actor.cpp b/flow/ActorCollection.actor.cpp index 92d167899f..220e456c21 100644 --- a/flow/ActorCollection.actor.cpp +++ b/flow/ActorCollection.actor.cpp @@ -62,14 +62,22 @@ ACTOR Future actorCollection( FutureStream> addActor, int* pC TEST_CASE("/flow/TraceEvent") { state unsigned i; - state double startTime = g_network->now(); - state std::string str = "hello"; + state double startTime; + state std::vector strings; + strings.reserve(1000); + for (i = 0; i < 100; ++i) { + for (int j = 0; j < 100; ++j) { + strings.emplace_back(g_random->randomAlphaNumeric(g_random->randomInt(1, 30))); + } + } + wait(delay(0)); + startTime = g_network->now(); for (i = 0; i < 100000; ++i) { for (unsigned j = 0; j < 100; ++j) { TraceEvent("TestTraceLineNoDebug") .detail("Num", g_random->randomInt(0, 1000)) .detail("Double", g_random->random01()) - .detail("hello", str); + .detail("hello", strings[g_random->randomInt(0, strings.size())]); } wait(delay(0)); } @@ -81,7 +89,7 @@ TEST_CASE("/flow/TraceEvent") { TraceEvent(SevDebug, "TestTraceLineNoDebug") .detail("Num", g_random->randomInt(0, 1000)) .detail("Double", g_random->random01()) - .detail("hello", str); + .detail("hello", strings[g_random->randomInt(0, strings.size())]); } wait(delay(0)); } From 8120a33bda328f08417d355dd94190e1fe9803c4 Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 15 Mar 2019 10:24:03 -0700 Subject: [PATCH 08/24] Fix `>`-direction when initializing TraceEvent --- flow/Trace.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/Trace.cpp b/flow/Trace.cpp index ffbdf2a434..5869122fc6 100644 --- a/flow/Trace.cpp +++ b/flow/Trace.cpp @@ -656,7 +656,7 @@ void removeTraceRole(std::string role) { TraceEvent::TraceEvent( const char* type, UID id ) : id(id), type(type), severity(SevInfo), initialized(false), enabled(true) {} TraceEvent::TraceEvent( Severity severity, const char* type, UID id ) : id(id), type(type), severity(severity), initialized(false), - enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY >= severity) {} + enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity) {} TraceEvent::TraceEvent( TraceInterval& interval, UID id ) : id(id), type(interval.type), severity(interval.severity), initialized(false), enabled(true) { init(interval); } @@ -664,7 +664,7 @@ TraceEvent::TraceEvent( Severity severity, TraceInterval& interval, UID id ) : id(id), type(interval.type), severity(severity), initialized(false), - enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY >= severity) { + enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity) { init(interval); } From c9ee7f54e7462198afe5228e0443d6752e38e357 Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 15 Mar 2019 10:24:26 -0700 Subject: [PATCH 09/24] Add compound type to micro-benchmark --- flow/ActorCollection.actor.cpp | 44 ++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 5 deletions(-) diff --git a/flow/ActorCollection.actor.cpp b/flow/ActorCollection.actor.cpp index 220e456c21..4fde2e8776 100644 --- a/flow/ActorCollection.actor.cpp +++ b/flow/ActorCollection.actor.cpp @@ -60,24 +60,55 @@ ACTOR Future actorCollection( FutureStream> addActor, int* pC } } +template<> +struct Traceable : std::true_type { + template + static std::string toString(Str&& s) { + return std::forward(s); + } +}; + +template +struct Traceable> { + static constexpr bool value = Traceable::value && Traceable::value; + static std::string toString(const std::pair& p) { + auto tStr = Traceable::toString(p.first); + auto uStr = Traceable::toString(p.second); + std::string result(tStr.size() + uStr.size() + 3, 'x'); + std::copy(tStr.begin(), tStr.end(), result.begin()); + auto iter = result.begin() + tStr.size(); + *(iter++) = ' '; + *(iter++) = '-'; + *(iter++) = ' '; + std::copy(uStr.begin(), uStr.end(), iter); + return result; + } +}; + + TEST_CASE("/flow/TraceEvent") { state unsigned i; state double startTime; state std::vector strings; - strings.reserve(1000); + strings.reserve(10000); for (i = 0; i < 100; ++i) { for (int j = 0; j < 100; ++j) { strings.emplace_back(g_random->randomAlphaNumeric(g_random->randomInt(1, 30))); } + wait(delay(0)); } - wait(delay(0)); + TraceEvent("pairsfilled") + .detail("MemoryUsage", getMemoryUsage()); startTime = g_network->now(); for (i = 0; i < 100000; ++i) { for (unsigned j = 0; j < 100; ++j) { + StringRef key(strings[g_random->randomInt(0, strings.size())]); + auto p = std::make_pair(key, g_random->randomInt(-1000, 1000)); TraceEvent("TestTraceLineNoDebug") .detail("Num", g_random->randomInt(0, 1000)) .detail("Double", g_random->random01()) - .detail("hello", strings[g_random->randomInt(0, strings.size())]); + .detail("str", strings[g_random->randomInt(0, strings.size())]) + .detail("pair", p); } wait(delay(0)); } @@ -86,10 +117,13 @@ TEST_CASE("/flow/TraceEvent") { startTime = g_network->now(); for (i = 0; i < 1000000; ++i) { for (unsigned j = 0; j < 100; ++j) { - TraceEvent(SevDebug, "TestTraceLineNoDebug") + StringRef key(strings[g_random->randomInt(0, strings.size())]); + auto p = std::make_pair(key, g_random->randomInt(-1000, 1000)); + TraceEvent(SevDebug, "TestTraceLineDebug") .detail("Num", g_random->randomInt(0, 1000)) .detail("Double", g_random->random01()) - .detail("hello", strings[g_random->randomInt(0, strings.size())]); + .detail("str", strings[g_random->randomInt(0, strings.size())]) + .detail("pair", p); } wait(delay(0)); } From ea67b742c7b78fdd1632a9a551e245d93a536e3e Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 15 Mar 2019 15:32:39 -0700 Subject: [PATCH 10/24] Implemented Traceable for printable types --- fdbclient/FDBTypes.h | 24 ++++++ fdbserver/IDiskQueue.h | 9 ++- flow/ActorCollection.actor.cpp | 52 +++++++----- flow/Arena.h | 56 +++++++++++-- flow/Trace.h | 142 +++++++++++++++++++++++++++++---- 5 files changed, 241 insertions(+), 42 deletions(-) diff --git a/fdbclient/FDBTypes.h b/fdbclient/FDBTypes.h index 49e832fcdd..04a3efff5f 100644 --- a/fdbclient/FDBTypes.h +++ b/fdbclient/FDBTypes.h @@ -213,6 +213,23 @@ struct KeyRangeRef { }; }; +template<> +struct Traceable : std::true_type { + static std::string toString(const KeyRangeRef& value) { + auto begin = Traceable::toString(value.begin); + auto end = Traceable::toString(value.end); + std::string result; + result.reserve(begin.size() + end.size() + 3); + std::copy(begin.begin(), begin.end(), std::back_inserter(result)); + result.push_back(' '); + result.push_back('-'); + result.push_back(' '); + std::copy(end.begin(), end.end(), std::back_inserter(result)); + return result; + } +}; + + inline KeyRangeRef operator & (const KeyRangeRef& lhs, const KeyRangeRef& rhs) { KeyRef b = std::max(lhs.begin, rhs.begin), e = std::min(lhs.end, rhs.end); if (e < b) @@ -263,6 +280,13 @@ struct KeyValueRef { }; }; +template<> +struct Traceable : std::true_type { + static std::string toString(const KeyValueRef& value) { + return Traceable::toString(value.key) + format(":%d", value.value.size()); + } +}; + typedef Standalone Key; typedef Standalone Value; typedef Standalone KeyRange; diff --git a/fdbserver/IDiskQueue.h b/fdbserver/IDiskQueue.h index 4d677dd0ab..8f08d57fe2 100644 --- a/fdbserver/IDiskQueue.h +++ b/fdbserver/IDiskQueue.h @@ -37,7 +37,7 @@ public: location() : hi(0), lo(0) {} location(int64_t lo) : hi(0), lo(lo) {} location(int64_t hi, int64_t lo) : hi(hi), lo(lo) {} - operator std::string() { return format("%lld.%lld", hi, lo); } // FIXME: Return a 'HumanReadableDescription' instead of std::string, make TraceEvent::detail accept that (for safety) + operator std::string() const { return format("%lld.%lld", hi, lo); } // FIXME: Return a 'HumanReadableDescription' instead of std::string, make TraceEvent::detail accept that (for safety) template void serialize_unversioned(Ar& ar) { @@ -81,6 +81,13 @@ public: virtual StorageBytes getStorageBytes() = 0; }; +template<> +struct Traceable : std::true_type { + static std::string toString(const IDiskQueue::location& value) { + return value; + } +}; + // FIXME: One should be able to use SFINAE to choose between serialize and serialize_unversioned. template void load( Ar& ar, IDiskQueue::location& loc ) { loc.serialize_unversioned(ar); } template void save( Ar& ar, const IDiskQueue::location& loc ) { const_cast(loc).serialize_unversioned(ar); } diff --git a/flow/ActorCollection.actor.cpp b/flow/ActorCollection.actor.cpp index 4fde2e8776..3c5b1823fb 100644 --- a/flow/ActorCollection.actor.cpp +++ b/flow/ActorCollection.actor.cpp @@ -60,14 +60,6 @@ ACTOR Future actorCollection( FutureStream> addActor, int* pC } } -template<> -struct Traceable : std::true_type { - template - static std::string toString(Str&& s) { - return std::forward(s); - } -}; - template struct Traceable> { static constexpr bool value = Traceable::value && Traceable::value; @@ -90,24 +82,45 @@ TEST_CASE("/flow/TraceEvent") { state unsigned i; state double startTime; state std::vector strings; + state std::vector keyIdx; + state std::vector pairRnd; + state std::vector num; + state std::vector doub; + state std::vector strIdx; strings.reserve(10000); + keyIdx.reserve(1e6); + pairRnd.reserve(1e6); + num.reserve(1e6); + doub.reserve(1e6); + strIdx.reserve(1e6); for (i = 0; i < 100; ++i) { for (int j = 0; j < 100; ++j) { strings.emplace_back(g_random->randomAlphaNumeric(g_random->randomInt(1, 30))); } wait(delay(0)); } + for (i = 0; i < 1e6; ++i) { + keyIdx.emplace_back(g_random->randomInt(0, strings.size())); + pairRnd.emplace_back(g_random->randomInt(-1000, 1000)); + num.emplace_back(g_random->randomInt(0, 1000)); + doub.emplace_back(g_random->random01()); + strIdx.emplace_back(g_random->randomInt(0, strings.size())); + } TraceEvent("pairsfilled") .detail("MemoryUsage", getMemoryUsage()); + printf("Sleeping for 20 seconds - attach perf now to PID %d\n", getpid()); + wait(delay(20)); + printf("Done sleeping\n"); startTime = g_network->now(); for (i = 0; i < 100000; ++i) { for (unsigned j = 0; j < 100; ++j) { - StringRef key(strings[g_random->randomInt(0, strings.size())]); - auto p = std::make_pair(key, g_random->randomInt(-1000, 1000)); + int idx = (i+1)*j % keyIdx.size(); + StringRef key(strings[keyIdx[idx]]); + auto p = std::make_pair(key, pairRnd[idx]); TraceEvent("TestTraceLineNoDebug") - .detail("Num", g_random->randomInt(0, 1000)) - .detail("Double", g_random->random01()) - .detail("str", strings[g_random->randomInt(0, strings.size())]) + .detail("Num", num[idx]) + .detail("Double", doub[idx]) + .detail("str", strings[strIdx[idx]]) .detail("pair", p); } wait(delay(0)); @@ -117,17 +130,20 @@ TEST_CASE("/flow/TraceEvent") { startTime = g_network->now(); for (i = 0; i < 1000000; ++i) { for (unsigned j = 0; j < 100; ++j) { - StringRef key(strings[g_random->randomInt(0, strings.size())]); - auto p = std::make_pair(key, g_random->randomInt(-1000, 1000)); + int idx = (i+1)*j % keyIdx.size(); + StringRef key(strings[keyIdx[idx]]); + auto p = std::make_pair(key, pairRnd[idx]); TraceEvent(SevDebug, "TestTraceLineDebug") - .detail("Num", g_random->randomInt(0, 1000)) - .detail("Double", g_random->random01()) - .detail("str", strings[g_random->randomInt(0, strings.size())]) + .detail("Num", num[idx]) + .detail("Double", doub[idx]) + .detail("str", strings[strIdx[idx]]) .detail("pair", p); } wait(delay(0)); } TraceEvent("TraceDuration") .detail("Time", g_network->now() - startTime); + printf("benchmark done\n", getpid()); + wait(delay(10)); return Void(); } diff --git a/flow/Arena.h b/flow/Arena.h index f9677636a1..2586532d3f 100644 --- a/flow/Arena.h +++ b/flow/Arena.h @@ -33,6 +33,7 @@ #include #include #include +#include // TrackIt is a zero-size class for tracking constructions, destructions, and assignments of instances // of a class. Just inherit TrackIt from T to enable tracking of construction and destruction of @@ -559,15 +560,34 @@ public: } std::string toString() const { return std::string( (const char*)data, length ); } + + static bool isPrintable(char c) { return c > 32 && c < 127; } std::string printable() const { - std::string s; - for (int i = 0; i= 32 && b < 127 && b != '\\') s += (char)b; - else if (b == '\\') s += "\\\\"; - else s += format("\\x%02x", b); + std::string result; + int nonPrintables = 0; + int numBackslashes = 0; + for (auto c : *this) { + if (!isPrintable(c)) { + ++nonPrintables; + } else if (c == '\\') { + ++numBackslashes; + } } - return s; + result.reserve(size() - nonPrintables + (nonPrintables * 4) + numBackslashes); + for (auto c : *this) { + if (isPrintable(c)) { + result.push_back(c); + } else if (c == '\\') { + result.push_back('\\'); + result.push_back('\\'); + } else { + result.push_back('\\'); + result.push_back('x'); + result.push_back(base16Char((c / 16) % 16)); + result.push_back(base16Char(c % 16)); + } + } + return result; } std::string toHexString(int limit = -1) const { @@ -655,7 +675,7 @@ private: template<> struct Traceable : std::true_type { static std::string toString(const StringRef& value) { - return value.toString(); + return value.printable(); } }; @@ -873,6 +893,26 @@ private: m_capacity = requiredCapacity; } }; + +template +struct Traceable> { + constexpr static bool value = Traceable::value; + + static std::string toString(const VectorRef& value) { + std::stringstream ss; + bool first = true; + for (const auto& v : value) { + if (first) { + first = false; + } else { + ss << ' '; + } + ss << Traceable::toString(v); + } + return ss.str(); + } +}; + template inline void load( Archive& ar, VectorRef& value ) { // FIXME: range checking for length, here and in other serialize code diff --git a/flow/Trace.h b/flow/Trace.h index 19ff9d2c6b..9cf11b667b 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -135,6 +135,46 @@ private: struct DynamicEventMetric; +template +char base16Char(IntType c) { + switch (c) { + case 0: + return '0'; + case 1: + return '1'; + case 2: + return '2'; + case 3: + return '3'; + case 4: + return '4'; + case 5: + return '5'; + case 6: + return '6'; + case 7: + return '7'; + case 8: + return '8'; + case 9: + return '9'; + case 10: + return 'a'; + case 11: + return 'b'; + case 12: + return 'c'; + case 13: + return 'd'; + case 14: + return 'e'; + case 15: + return 'f'; + default: + UNSTOPPABLE_ASSERT(false); + } +} + // forward declare format from flow.h as we // can't include flow.h here std::string format(const char* form, ...); @@ -176,7 +216,94 @@ struct Traceable : std::true_type { } }; +template<> +struct Traceable : std::true_type { + static bool isPrintable(char c) { return c > 32 && c < 127; } + static std::string toString(const char* value) { + // if all characters are printable ascii, we simply return the string + int nonPrintables = 0; + int numBackslashes = 0; + auto val = value; + int size = 0; + while (auto c = *(val++)) { + ++size; + if (!Traceable::isPrintable(c)) { + ++nonPrintables; + } else if (c == '\\') { + ++numBackslashes; + } + } + if (nonPrintables == 0 && numBackslashes == 0) { + return std::string(value); + } + std::string result; + result.reserve(size - nonPrintables + (nonPrintables * 4) + numBackslashes); + while (auto c = *(val++)) { + if (Traceable::isPrintable(c)) { + result.push_back(c); + } else if (c == '\\') { + result.push_back('\\'); + result.push_back('\\'); + } else { + result.push_back('\\'); + result.push_back('x'); + result.push_back(base16Char((c / 16) % 16)); + result.push_back(base16Char(c % 16)); + } + } + return result; + } +}; +template +struct Traceable : std::true_type { + static std::string toString(const char* value) { + return Traceable::toString(value); + } +}; + +template<> +struct Traceable : std::true_type { + static std::string toString(const char* value) { + return Traceable::toString(value); + } +}; + +template<> +struct Traceable : std::true_type { + template + static std::string toString(Str&& value) { + // if all characters are printable ascii, we simply return the string + int nonPrintables = 0; + int numBackslashes = 0; + for (auto c : value) { + if (!Traceable::isPrintable(c)) { + ++nonPrintables; + } else if (c == '\\') { + ++numBackslashes; + } + } + if (nonPrintables == 0 && numBackslashes == 0) { + return std::forward(value); + } + std::string result; + result.reserve(value.size() - nonPrintables + (nonPrintables * 4) + numBackslashes); + for (auto c : value) { + if (Traceable::isPrintable(c)) { + result.push_back(c); + } else if (c == '\\') { + result.push_back('\\'); + result.push_back('\\'); + } else { + result.push_back('\\'); + result.push_back('x'); + result.push_back(base16Char((c / 16) % 16)); + result.push_back(base16Char(c % 16)); + } + } + return result; + } +}; template struct SpecialTraceMetricType @@ -244,21 +371,6 @@ struct TraceEvent { } return *this; } - TraceEvent& detail(std::string key, std::string value) { - if (enabled && init()) { - init(); - addMetric(key.c_str(), value, value); - return detailImpl(std::string(key), std::move(value), false); - } - return *this; - } - TraceEvent& detail(const char* key, std::string value) { - if (enabled && init()) { - addMetric(key, value, value); - return detailImpl(std::string(key), std::move(value), false); - } - return *this; - } TraceEvent& detailf( std::string key, const char* valueFormat, ... ); private: template From 1c16f87a4e4f0c00f07baac4869e5c4c56e9bba2 Mon Sep 17 00:00:00 2001 From: mpilman Date: Mon, 18 Mar 2019 15:03:43 -0700 Subject: [PATCH 11/24] Remove trace-calls to printable (in non-workloads) --- fdbbackup/backup.actor.cpp | 2 +- fdbcli/fdbcli.actor.cpp | 8 +-- fdbclient/BackupAgentBase.actor.cpp | 16 ++--- fdbclient/BackupContainer.actor.cpp | 2 +- fdbclient/ClientLogEvents.h | 16 ++--- fdbclient/DatabaseBackupAgent.actor.cpp | 22 +++--- fdbclient/FileBackupAgent.actor.cpp | 22 +++--- fdbclient/KeyBackedTypes.h | 6 +- fdbclient/ManagementAPI.actor.cpp | 2 +- fdbclient/MultiVersionTransaction.actor.cpp | 2 +- fdbclient/NativeAPI.actor.cpp | 56 +++++++-------- fdbclient/RYWIterator.cpp | 24 +++---- fdbclient/ReadYourWrites.actor.cpp | 28 ++++---- fdbclient/SnapshotCache.h | 2 +- fdbclient/SystemData.cpp | 2 +- fdbclient/TaskBucket.actor.cpp | 40 +++++------ fdbclient/WriteMap.h | 4 +- fdbserver/ApplyMetadataMutation.h | 18 ++--- fdbserver/ClusterController.actor.cpp | 2 +- fdbserver/Coordination.actor.cpp | 10 +-- fdbserver/DataDistribution.actor.cpp | 18 ++--- fdbserver/DataDistributionQueue.actor.cpp | 28 ++++---- fdbserver/DataDistributionTracker.actor.cpp | 30 ++++---- fdbserver/KeyValueStoreMemory.actor.cpp | 12 ++-- fdbserver/KeyValueStoreSQLite.actor.cpp | 4 +- fdbserver/LatencyBandConfig.cpp | 4 +- fdbserver/LogRouter.actor.cpp | 2 +- fdbserver/MasterProxyServer.actor.cpp | 4 +- fdbserver/MoveKeys.actor.cpp | 46 ++++++------ fdbserver/OldTLogServer_4_6.actor.cpp | 10 +-- fdbserver/OldTLogServer_6_0.actor.cpp | 12 ++-- fdbserver/SimulatedCluster.actor.cpp | 4 +- fdbserver/Status.actor.cpp | 2 +- fdbserver/StorageMetrics.actor.h | 4 +- fdbserver/TLogServer.actor.cpp | 12 ++-- fdbserver/fdbserver.actor.cpp | 6 +- fdbserver/masterserver.actor.cpp | 18 ++--- fdbserver/storageserver.actor.cpp | 80 ++++++++++----------- fdbserver/tester.actor.cpp | 26 +++---- fdbserver/worker.actor.cpp | 8 +-- 40 files changed, 307 insertions(+), 307 deletions(-) diff --git a/fdbbackup/backup.actor.cpp b/fdbbackup/backup.actor.cpp index 7a58a405bc..1386caed46 100644 --- a/fdbbackup/backup.actor.cpp +++ b/fdbbackup/backup.actor.cpp @@ -1387,7 +1387,7 @@ ACTOR Future cleanupStatus(Reference tr, std::s readMore = true; } catch(Error &e) { // If doc can't be parsed or isn't alive, delete it. - TraceEvent(SevWarn, "RemovedDeadBackupLayerStatus").detail("Key", printable(docs[i].key)); + TraceEvent(SevWarn, "RemovedDeadBackupLayerStatus").detail("Key", docs[i].key); tr->clear(docs[i].key); // If limit is 1 then read more. if(limit == 1) diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index 17001965d4..e195087499 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -2292,7 +2292,7 @@ void fdbcli_comp_cmd(std::string const& text, std::vector& lc) { void LogCommand(std::string line, UID randomID, std::string errMsg) { printf("%s\n", errMsg.c_str()); - TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", printable(StringRef(line))).detail("Error", printable(StringRef(errMsg))); + TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", StringRef(line)).detail("Error", StringRef(errMsg)); } struct CLIOptions { @@ -2523,7 +2523,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { try { state UID randomID = g_random->randomUniqueID(); - TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", printable(StringRef(line))); + TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", StringRef(line)); bool malformed, partial; state std::vector> parsed = parseLine(line, malformed, partial); @@ -3345,7 +3345,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { } catch(Error &e) { //options->setOption() prints error message - TraceEvent(SevWarn, "CLISetOptionError").error(e).detail("Option", printable(tokens[2])); + TraceEvent(SevWarn, "CLISetOptionError").error(e).detail("Option", tokens[2]); is_error = true; } } @@ -3357,7 +3357,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { is_error = true; } - TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", printable(StringRef(line))).detail("IsError", is_error); + TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", StringRef(line)).detail("IsError", is_error); } catch (Error& e) { if(e.code() != error_code_actor_cancelled) diff --git a/fdbclient/BackupAgentBase.actor.cpp b/fdbclient/BackupAgentBase.actor.cpp index 1ddc58901c..21eda31c39 100644 --- a/fdbclient/BackupAgentBase.actor.cpp +++ b/fdbclient/BackupAgentBase.actor.cpp @@ -149,7 +149,7 @@ Standalone> getLogRanges(Version beginVersion, Version en Key baLogRangePrefix = destUidValue.withPrefix(backupLogKeys.begin); - //TraceEvent("GetLogRanges").detail("DestUidValue", destUidValue).detail("Prefix", printable(StringRef(baLogRangePrefix))); + //TraceEvent("GetLogRanges").detail("DestUidValue", destUidValue).detail("Prefix", StringRef(baLogRangePrefix)); for (int64_t vblock = beginVersion / blockSize; vblock < (endVersion + blockSize - 1) / blockSize; ++vblock) { int64_t tb = vblock * blockSize / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE; @@ -172,7 +172,7 @@ Standalone> getApplyRanges(Version beginVersion, Version Key baLogRangePrefix = backupUid.withPrefix(applyLogKeys.begin); - //TraceEvent("GetLogRanges").detail("BackupUid", backupUid).detail("Prefix", printable(StringRef(baLogRangePrefix))); + //TraceEvent("GetLogRanges").detail("BackupUid", backupUid).detail("Prefix", StringRef(baLogRangePrefix)); for (int64_t vblock = beginVersion / CLIENT_KNOBS->APPLY_BLOCK_SIZE; vblock < (endVersion + CLIENT_KNOBS->APPLY_BLOCK_SIZE - 1) / CLIENT_KNOBS->APPLY_BLOCK_SIZE; ++vblock) { int64_t tb = vblock * CLIENT_KNOBS->APPLY_BLOCK_SIZE / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE; @@ -221,7 +221,7 @@ Standalone> decodeBackupLogValue(StringRef value) { offset += sizeof(uint64_t); if (protocolVersion <= 0x0FDB00A200090001){ TraceEvent(SevError, "DecodeBackupLogValue").detail("IncompatibleProtocolVersion", protocolVersion) - .detail("ValueSize", value.size()).detail("Value", printable(value)); + .detail("ValueSize", value.size()).detail("Value", value); throw incompatible_protocol_version(); } @@ -267,7 +267,7 @@ Standalone> decodeBackupLogValue(StringRef value) { return result; } catch (Error& e) { - TraceEvent(e.code() == error_code_restore_missing_data ? SevWarn : SevError, "BA_DecodeBackupLogValue").error(e).GetLastError().detail("ValueSize", value.size()).detail("Value", printable(value)); + TraceEvent(e.code() == error_code_restore_missing_data ? SevWarn : SevError, "BA_DecodeBackupLogValue").error(e).GetLastError().detail("ValueSize", value.size()).detail("Value", value); throw; } } @@ -280,7 +280,7 @@ void decodeBackupLogValue(Arena& arena, VectorRef& result, int& mut offset += sizeof(uint64_t); if (protocolVersion <= 0x0FDB00A200090001){ TraceEvent(SevError, "DecodeBackupLogValue").detail("IncompatibleProtocolVersion", protocolVersion) - .detail("ValueSize", value.size()).detail("Value", printable(value)); + .detail("ValueSize", value.size()).detail("Value", value); throw incompatible_protocol_version(); } @@ -375,7 +375,7 @@ void decodeBackupLogValue(Arena& arena, VectorRef& result, int& mut } } catch (Error& e) { - TraceEvent(e.code() == error_code_restore_missing_data ? SevWarn : SevError, "BA_DecodeBackupLogValue").error(e).GetLastError().detail("ValueSize", value.size()).detail("Value", printable(value)); + TraceEvent(e.code() == error_code_restore_missing_data ? SevWarn : SevError, "BA_DecodeBackupLogValue").error(e).GetLastError().detail("ValueSize", value.size()).detail("Value", value); throw; } } @@ -384,7 +384,7 @@ void logErrorWorker(Reference tr, Key keyErrors, std: tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr->setOption(FDBTransactionOptions::LOCK_AWARE); if(now() - lastErrorTime > CLIENT_KNOBS->BACKUP_ERROR_DELAY) { - TraceEvent("BA_LogError").detail("Key", printable(keyErrors)).detail("Message", message); + TraceEvent("BA_LogError").detail("Key", keyErrors).detail("Message", message); lastErrorTime = now(); } tr->set(keyErrors, message); @@ -496,7 +496,7 @@ ACTOR Future readCommitted(Database cx, PromiseStream results, Fu int index(0); for (auto & s : rangevalue){ uint64_t groupKey = groupBy(s.key).first; - //TraceEvent("Log_ReadCommitted").detail("GroupKey", groupKey).detail("SkipGroup", skipGroup).detail("NextKey", printable(nextKey.key)).detail("End", printable(end.key)).detail("Valuesize", value.size()).detail("Index",index++).detail("Size",s.value.size()); + //TraceEvent("Log_ReadCommitted").detail("GroupKey", groupKey).detail("SkipGroup", skipGroup).detail("NextKey", nextKey.key).detail("End", end.key).detail("Valuesize", value.size()).detail("Index",index++).detail("Size",s.value.size()); if (groupKey != skipGroup){ if (rcGroup.version == -1){ rcGroup.version = tr.getReadVersion().get(); diff --git a/fdbclient/BackupContainer.actor.cpp b/fdbclient/BackupContainer.actor.cpp index 8b3f46945f..fa4c5220e7 100644 --- a/fdbclient/BackupContainer.actor.cpp +++ b/fdbclient/BackupContainer.actor.cpp @@ -1380,7 +1380,7 @@ public: m_bucket = kv.second; continue; } - TraceEvent(SevWarn, "BackupContainerBlobStoreInvalidParameter").detail("Name", printable(kv.first)).detail("Value", printable(kv.second)); + TraceEvent(SevWarn, "BackupContainerBlobStoreInvalidParameter").detail("Name", kv.first).detail("Value", kv.second); IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", kv.first.c_str()); throw backup_invalid_url(); } diff --git a/fdbclient/ClientLogEvents.h b/fdbclient/ClientLogEvents.h index 09d1685e52..14191bf209 100644 --- a/fdbclient/ClientLogEvents.h +++ b/fdbclient/ClientLogEvents.h @@ -81,7 +81,7 @@ namespace FdbClientLogEvents { Key key; void logEvent(std::string id) const { - TraceEvent("TransactionTrace_Get").detail("TransactionID", id).detail("Latency", latency).detail("ValueSizeBytes", valueSize).detail("Key", printable(key)); + TraceEvent("TransactionTrace_Get").detail("TransactionID", id).detail("Latency", latency).detail("ValueSizeBytes", valueSize).detail("Key", key); } }; @@ -102,7 +102,7 @@ namespace FdbClientLogEvents { Key endKey; void logEvent(std::string id) const { - TraceEvent("TransactionTrace_GetRange").detail("TransactionID", id).detail("Latency", latency).detail("RangeSizeBytes", rangeSize).detail("StartKey", printable(startKey)).detail("EndKey", printable(endKey)); + TraceEvent("TransactionTrace_GetRange").detail("TransactionID", id).detail("Latency", latency).detail("RangeSizeBytes", rangeSize).detail("StartKey", startKey).detail("EndKey", endKey); } }; @@ -124,11 +124,11 @@ namespace FdbClientLogEvents { void logEvent(std::string id) const { for (auto &read_range : req.transaction.read_conflict_ranges) { - TraceEvent("TransactionTrace_Commit_ReadConflictRange").detail("TransactionID", id).detail("Begin", printable(read_range.begin)).detail("End", printable(read_range.end)); + TraceEvent("TransactionTrace_Commit_ReadConflictRange").detail("TransactionID", id).detail("Begin", read_range.begin).detail("End", read_range.end); } for (auto &write_range : req.transaction.write_conflict_ranges) { - TraceEvent("TransactionTrace_Commit_WriteConflictRange").detail("TransactionID", id).detail("Begin", printable(write_range.begin)).detail("End", printable(write_range.end)); + TraceEvent("TransactionTrace_Commit_WriteConflictRange").detail("TransactionID", id).detail("Begin", write_range.begin).detail("End", write_range.end); } for (auto &mutation : req.transaction.mutations) { @@ -154,7 +154,7 @@ namespace FdbClientLogEvents { Key key; void logEvent(std::string id) const { - TraceEvent("TransactionTrace_GetError").detail("TransactionID", id).detail("ErrCode", errCode).detail("Key", printable(key)); + TraceEvent("TransactionTrace_GetError").detail("TransactionID", id).detail("ErrCode", errCode).detail("Key", key); } }; @@ -174,7 +174,7 @@ namespace FdbClientLogEvents { Key endKey; void logEvent(std::string id) const { - TraceEvent("TransactionTrace_GetRangeError").detail("TransactionID", id).detail("ErrCode", errCode).detail("StartKey", printable(startKey)).detail("EndKey", printable(endKey)); + TraceEvent("TransactionTrace_GetRangeError").detail("TransactionID", id).detail("ErrCode", errCode).detail("StartKey", startKey).detail("EndKey", endKey); } }; @@ -194,11 +194,11 @@ namespace FdbClientLogEvents { void logEvent(std::string id) const { for (auto &read_range : req.transaction.read_conflict_ranges) { - TraceEvent("TransactionTrace_CommitError_ReadConflictRange").detail("TransactionID", id).detail("Begin", printable(read_range.begin)).detail("End", printable(read_range.end)); + TraceEvent("TransactionTrace_CommitError_ReadConflictRange").detail("TransactionID", id).detail("Begin", read_range.begin).detail("End", read_range.end); } for (auto &write_range : req.transaction.write_conflict_ranges) { - TraceEvent("TransactionTrace_CommitError_WriteConflictRange").detail("TransactionID", id).detail("Begin", printable(write_range.begin)).detail("End", printable(write_range.end)); + TraceEvent("TransactionTrace_CommitError_WriteConflictRange").detail("TransactionID", id).detail("Begin", write_range.begin).detail("End", write_range.end); } for (auto &mutation : req.transaction.mutations) { diff --git a/fdbclient/DatabaseBackupAgent.actor.cpp b/fdbclient/DatabaseBackupAgent.actor.cpp index 4137b1b6a2..60bc5d5c36 100644 --- a/fdbclient/DatabaseBackupAgent.actor.cpp +++ b/fdbclient/DatabaseBackupAgent.actor.cpp @@ -115,7 +115,7 @@ namespace dbBackup { Future checkTaskVersion(Tr tr, Reference task, StringRef name, uint32_t version) { uint32_t taskVersion = task->getVersion(); if (taskVersion > version) { - TraceEvent(SevError, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", printable(name)).detail("Version", version); + TraceEvent(SevError, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", name).detail("Version", version); wait(logError(tr, Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keyErrors).pack(task->params[BackupAgentBase::keyConfigLogUid]), format("ERROR: %s task version `%lu' is greater than supported version `%lu'", task->params[Task::reservedTaskParamKeyType].toString().c_str(), (unsigned long)taskVersion, (unsigned long)version))); @@ -305,7 +305,7 @@ namespace dbBackup { return Void(); } - //TraceEvent("DBA_Range").detail("Range", printable(KeyRangeRef(rangeBegin, rangeEnd))).detail("Version", values.second).detail("Size", values.first.size()).detail("LogUID", printable(task->params[BackupAgentBase::keyConfigLogUid])).detail("AddPrefix", printable(addPrefix)).detail("RemovePrefix", printable(removePrefix)); + //TraceEvent("DBA_Range").detail("Range", KeyRangeRef(rangeBegin, rangeEnd)).detail("Version", values.second).detail("Size", values.first.size()).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]).detail("AddPrefix", addPrefix).detail("RemovePrefix", removePrefix); Subspace krv(conf.get(DatabaseBackupAgent::keyRangeVersions)); state KeyRange versionRange = singleKeyRange(krv.pack(values.second)); @@ -340,7 +340,7 @@ namespace dbBackup { break; if( values.first[valueLoc].key >= backupVersions.get()[versionLoc].key ) { - //TraceEvent("DBA_Set", debugID).detail("Key", printable(values.first[valueLoc].key)).detail("Value", printable(values.first[valueLoc].value)); + //TraceEvent("DBA_Set", debugID).detail("Key", values.first[valueLoc].key).detail("Value", values.first[valueLoc].value); tr->set(values.first[valueLoc].key.removePrefix(removePrefix).withPrefix(addPrefix), values.first[valueLoc].value); bytesSet += values.first[valueLoc].expectedSize() - removePrefix.expectedSize() + addPrefix.expectedSize(); } @@ -353,7 +353,7 @@ namespace dbBackup { wait(tr->commit()); Params.bytesWritten().set(task, Params.bytesWritten().getOrDefault(task) + bytesSet); - //TraceEvent("DBA_SetComplete", debugID).detail("Ver", values.second).detail("LogVersion", logVersion).detail("ReadVersion", readVer).detail("CommitVer", tr.getCommittedVersion()).detail("Range", printable(versionRange)); + //TraceEvent("DBA_SetComplete", debugID).detail("Ver", values.second).detail("LogVersion", logVersion).detail("ReadVersion", readVer).detail("CommitVer", tr.getCommittedVersion()).detail("Range", versionRange); if(backupVersions.get().more) { tr->reset(); @@ -737,7 +737,7 @@ namespace dbBackup { state Version stopVersionData = stopValue.present() ? BinaryReader::fromStringRef(stopValue.get(), Unversioned()) : -1; if(endVersion - beginVersion > g_random->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) { - TraceEvent("DBA_CopyLogs").detail("BeginVersion", beginVersion).detail("ApplyVersion", applyVersion).detail("EndVersion", endVersion).detail("StopVersionData", stopVersionData).detail("LogUID", printable(task->params[BackupAgentBase::keyConfigLogUid])); + TraceEvent("DBA_CopyLogs").detail("BeginVersion", beginVersion).detail("ApplyVersion", applyVersion).detail("EndVersion", endVersion).detail("StopVersionData", stopVersionData).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]); } if ((stopVersionData == -1) || (stopVersionData >= applyVersion)) { @@ -941,7 +941,7 @@ namespace dbBackup { Optional stopWhenDone = wait(fStopWhenDone); if(endVersion - beginVersion > g_random->randomInt64(0, CLIENT_KNOBS->BACKUP_VERSION_DELAY)) { - TraceEvent("DBA_CopyDiffLogs").detail("BeginVersion", beginVersion).detail("EndVersion", endVersion).detail("LogUID", printable(task->params[BackupAgentBase::keyConfigLogUid])); + TraceEvent("DBA_CopyDiffLogs").detail("BeginVersion", beginVersion).detail("EndVersion", endVersion).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]); } if (!stopWhenDone.present()) { @@ -1410,7 +1410,7 @@ namespace dbBackup { state UID logUid = BinaryReader::fromStringRef(task->params[DatabaseBackupAgent::keyConfigLogUid], Unversioned()); - TraceEvent("DBA_Complete").detail("RestoreVersion", restoreVersion).detail("Differential", stopWhenDone.present()).detail("LogUID", printable(task->params[BackupAgentBase::keyConfigLogUid])); + TraceEvent("DBA_Complete").detail("RestoreVersion", restoreVersion).detail("Differential", stopWhenDone.present()).detail("LogUID", task->params[BackupAgentBase::keyConfigLogUid]); // Start the complete task, if differential is not enabled if (stopWhenDone.present()) { @@ -1935,8 +1935,8 @@ public: else wait(checkDatabaseLock(tr, logUid)); - TraceEvent("DBA_Submit").detail("LogUid", logUid).detail("Lock", lockDB).detail("LogUID", printable(logUidValue)).detail("Tag", printable(tagName)) - .detail("Key", printable(backupAgent->states.get(logUidValue).pack(DatabaseBackupAgent::keyFolderId))).detail("MapPrefix", printable(mapPrefix)); + TraceEvent("DBA_Submit").detail("LogUid", logUid).detail("Lock", lockDB).detail("LogUID", logUidValue).detail("Tag", tagName) + .detail("Key", backupAgent->states.get(logUidValue).pack(DatabaseBackupAgent::keyFolderId)).detail("MapPrefix", mapPrefix); return Void(); } @@ -1944,7 +1944,7 @@ public: ACTOR static Future unlockBackup(DatabaseBackupAgent* backupAgent, Reference tr, Key tagName) { UID logUid = wait(backupAgent->getLogUid(tr, tagName)); wait(unlockDatabase(tr, logUid)); - TraceEvent("DBA_Unlock").detail("Tag", printable(tagName)); + TraceEvent("DBA_Unlock").detail("Tag", tagName); return Void(); } @@ -1997,7 +1997,7 @@ public: tr2.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr2.setOption(FDBTransactionOptions::LOCK_AWARE); state Optional backupUid = wait(tr2.get(backupAgent->states.get(BinaryWriter::toValue(destlogUid, Unversioned())).pack(DatabaseBackupAgent::keyFolderId))); - TraceEvent("DBA_SwitchoverBackupUID").detail("Uid", printable(backupUid)).detail("Key", printable(backupAgent->states.get(BinaryWriter::toValue(destlogUid, Unversioned())).pack(DatabaseBackupAgent::keyFolderId))); + TraceEvent("DBA_SwitchoverBackupUID").detail("Uid", backupUid).detail("Key", backupAgent->states.get(BinaryWriter::toValue(destlogUid, Unversioned())).pack(DatabaseBackupAgent::keyFolderId)); if(!backupUid.present()) throw backup_duplicate(); Optional v = wait(tr2.get(BinaryWriter::toValue(destlogUid, Unversioned()).withPrefix(applyMutationsBeginRange.begin))); diff --git a/fdbclient/FileBackupAgent.actor.cpp b/fdbclient/FileBackupAgent.actor.cpp index 57e61d661e..c142a16da3 100644 --- a/fdbclient/FileBackupAgent.actor.cpp +++ b/fdbclient/FileBackupAgent.actor.cpp @@ -741,7 +741,7 @@ namespace fileBackup { if (taskVersion > version) { state Error err = task_invalid_version(); - TraceEvent(SevWarn, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", printable(name)).detail("Version", version); + TraceEvent(SevWarn, "BA_BackupRangeTaskFuncExecute").detail("TaskVersion", taskVersion).detail("Name", name).detail("Version", version); if (KeyBackedConfig::TaskParams.uid().exists(task)) { std::string msg = format("%s task version `%lu' is greater than supported version `%lu'", task->params[Task::reservedTaskParamKeyType].toString().c_str(), (unsigned long)taskVersion, (unsigned long)version); wait(BackupConfig(task).logError(cx, err, msg)); @@ -806,7 +806,7 @@ namespace fileBackup { TEST(true); // Canceling old backup task TraceEvent(SevInfo, "FileBackupCancelOldTask") - .detail("Task", printable(task->params[Task::reservedTaskParamKeyType])) + .detail("Task", task->params[Task::reservedTaskParamKeyType]) .detail("TagName", tagName); wait(abortFiveZeroBackup(&backupAgent, tr, tagName)); @@ -876,7 +876,7 @@ namespace fileBackup { TEST(true); // Canceling 5.1 backup task TraceEvent(SevInfo, "FileBackupCancelFiveOneTask") - .detail("Task", printable(task->params[Task::reservedTaskParamKeyType])) + .detail("Task", task->params[Task::reservedTaskParamKeyType]) .detail("TagName", tagName); wait(abortFiveOneBackup(&backupAgent, tr, tagName)); @@ -2688,13 +2688,13 @@ namespace fileBackup { .detail("ReadOffset", readOffset) .detail("ReadLen", readLen) .detail("CommitVersion", tr->getCommittedVersion()) - .detail("BeginRange", printable(trRange.begin)) - .detail("EndRange", printable(trRange.end)) + .detail("BeginRange", trRange.begin) + .detail("EndRange", trRange.end) .detail("StartIndex", start) .detail("EndIndex", i) .detail("DataSize", data.size()) .detail("Bytes", txBytes) - .detail("OriginalFileRange", printable(originalFileRange)) + .detail("OriginalFileRange", originalFileRange) .detail("TaskInstance", THIS_ADDR); // Commit succeeded, so advance starting point @@ -3005,7 +3005,7 @@ namespace fileBackup { TraceEvent("FileRestoreDispatch") .detail("RestoreUID", restore.getUid()) .detail("BeginVersion", beginVersion) - .detail("BeginFile", printable(Params.beginFile().get(task))) + .detail("BeginFile", Params.beginFile().get(task)) .detail("BeginBlock", Params.beginBlock().get(task)) .detail("RestoreVersion", restoreVersion) .detail("ApplyLag", applyLag) @@ -3019,7 +3019,7 @@ namespace fileBackup { TraceEvent("FileRestoreDispatch") .detail("RestoreUID", restore.getUid()) .detail("BeginVersion", beginVersion) - .detail("BeginFile", printable(Params.beginFile().get(task))) + .detail("BeginFile", Params.beginFile().get(task)) .detail("BeginBlock", Params.beginBlock().get(task)) .detail("RestoreVersion", restoreVersion) .detail("ApplyLag", applyLag) @@ -3033,7 +3033,7 @@ namespace fileBackup { TraceEvent("FileRestoreDispatch") .detail("RestoreUID", restore.getUid()) .detail("BeginVersion", beginVersion) - .detail("BeginFile", printable(Params.beginFile().get(task))) + .detail("BeginFile", Params.beginFile().get(task)) .detail("BeginBlock", Params.beginBlock().get(task)) .detail("ApplyLag", applyLag) .detail("Decision", "restore_complete") @@ -3141,7 +3141,7 @@ namespace fileBackup { TraceEvent("FileRestoreDispatch") .detail("RestoreUID", restore.getUid()) .detail("BeginVersion", beginVersion) - .detail("BeginFile", printable(Params.beginFile().get(task))) + .detail("BeginFile", Params.beginFile().get(task)) .detail("BeginBlock", Params.beginBlock().get(task)) .detail("EndVersion", endVersion) .detail("ApplyLag", applyLag) @@ -3187,7 +3187,7 @@ namespace fileBackup { TraceEvent("FileRestoreDispatch") .detail("RestoreUID", restore.getUid()) .detail("BeginVersion", beginVersion) - .detail("BeginFile", printable(Params.beginFile().get(task))) + .detail("BeginFile", Params.beginFile().get(task)) .detail("BeginBlock", Params.beginBlock().get(task)) .detail("EndVersion", endVersion) .detail("ApplyLag", applyLag) diff --git a/fdbclient/KeyBackedTypes.h b/fdbclient/KeyBackedTypes.h index 23e5d29250..9391843a48 100644 --- a/fdbclient/KeyBackedTypes.h +++ b/fdbclient/KeyBackedTypes.h @@ -124,9 +124,9 @@ public: return map(get(tr, snapshot), [=](Optional val) -> T { if (!val.present()) { TraceEvent(SevInfo, "KeyBackedProperty_KeyNotFound") - .detail("Key", printable(keyCopy)) - .detail("Err", err.code()) - .detail("ParentTrace", backtrace.c_str()); + .detail("Key", keyCopy) + .detail("Err", err.code()) + .detail("ParentTrace", backtrace.c_str()); throw err; } diff --git a/fdbclient/ManagementAPI.actor.cpp b/fdbclient/ManagementAPI.actor.cpp index ae61e0d467..2821546229 100644 --- a/fdbclient/ManagementAPI.actor.cpp +++ b/fdbclient/ManagementAPI.actor.cpp @@ -212,7 +212,7 @@ ConfigurationResult::Type buildConfiguration( std::vector const& mode for( auto t = m.begin(); t != m.end(); ++t ) { if( outConf.count( t->first ) ) { - TraceEvent(SevWarnAlways, "ConflictingOption").detail("Option", printable(StringRef(t->first))); + TraceEvent(SevWarnAlways, "ConflictingOption").detail("Option", t->first); return ConfigurationResult::CONFLICTING_OPTIONS; } outConf[t->first] = t->second; diff --git a/fdbclient/MultiVersionTransaction.actor.cpp b/fdbclient/MultiVersionTransaction.actor.cpp index a5031b8180..81e1ca1d28 100644 --- a/fdbclient/MultiVersionTransaction.actor.cpp +++ b/fdbclient/MultiVersionTransaction.actor.cpp @@ -763,7 +763,7 @@ void MultiVersionDatabase::DatabaseState::stateChanged() { } catch(Error &e) { optionLock.leave(); - TraceEvent(SevError, "ClusterVersionChangeOptionError").error(e).detail("Option", option.first).detail("OptionValue", printable(option.second)).detail("LibPath", clients[newIndex]->libPath); + TraceEvent(SevError, "ClusterVersionChangeOptionError").error(e).detail("Option", option.first).detail("OptionValue", option.second).detail("LibPath", clients[newIndex]->libPath); connectionAttempts[newIndex]->connected = false; clients[newIndex]->failed = true; MultiVersionApi::api->updateSupportedVersions(); diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index 62cd4940c0..ec09e35e4d 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -1126,13 +1126,13 @@ AddressExclusion AddressExclusion::parse( StringRef const& key ) { auto addr = NetworkAddress::parse(key.toString()); if (addr.isTLS()) { TraceEvent(SevWarnAlways, "AddressExclusionParseError") - .detail("String", printable(key)) + .detail("String", key) .detail("Description", "Address inclusion string should not include `:tls' suffix."); return AddressExclusion(); } return AddressExclusion(addr.ip, addr.port); } catch (Error& ) { - TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", printable(key)); + TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", key); return AddressExclusion(); } } @@ -1334,7 +1334,7 @@ ACTOR Future> getValue( Future version, Key key, Databa g_traceBatch.addAttach("GetValueAttachID", info.debugID.get().first(), getValueID.get().first()); g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.Before"); //.detail("TaskID", g_network->getCurrentTask()); /*TraceEvent("TransactionDebugGetValueInfo", getValueID.get()) - .detail("Key", printable(key)) + .detail("Key", key) .detail("ReqVersion", ver) .detail("Servers", describe(ssi.second->get()));*/ } @@ -1356,7 +1356,7 @@ ACTOR Future> getValue( Future version, Key key, Databa if( info.debugID.present() ) { g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.After"); //.detail("TaskID", g_network->getCurrentTask()); /*TraceEvent("TransactionDebugGetValueDone", getValueID.get()) - .detail("Key", printable(key)) + .detail("Key", key) .detail("ReqVersion", ver) .detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/ } @@ -1367,7 +1367,7 @@ ACTOR Future> getValue( Future version, Key key, Databa if( info.debugID.present() ) { g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.Error"); //.detail("TaskID", g_network->getCurrentTask()); /*TraceEvent("TransactionDebugGetValueDone", getValueID.get()) - .detail("Key", printable(key)) + .detail("Key", key) .detail("ReqVersion", ver) .detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/ } @@ -1404,11 +1404,11 @@ ACTOR Future getKey( Database cx, KeySelector k, Future version, T try { if( info.debugID.present() ) - g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.Before"); //.detail("StartKey", printable(k.getKey())).detail("Offset",k.offset).detail("OrEqual",k.orEqual); + g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.Before"); //.detail("StartKey", k.getKey()).detail("Offset",k.offset).detail("OrEqual",k.orEqual); ++cx->transactionPhysicalReads; GetKeyReply reply = wait( loadBalance( ssi.second, &StorageServerInterface::getKey, GetKeyRequest(k, version.get()), TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) ); if( info.debugID.present() ) - g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.After"); //.detail("NextKey",printable(reply.sel.key)).detail("Offset", reply.sel.offset).detail("OrEqual", k.orEqual); + g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getKey.After"); //.detail("NextKey",reply.sel.key).detail("Offset", reply.sel.offset).detail("OrEqual", k.orEqual); k = reply.sel; if (!k.offset && k.orEqual) { return k.getKey(); @@ -1421,7 +1421,7 @@ ACTOR Future getKey( Database cx, KeySelector k, Future version, T } else { TraceEvent(SevInfo, "GetKeyError") .error(e) - .detail("AtKey", printable(k.getKey())) + .detail("AtKey", k.getKey()) .detail("Offset", k.offset); throw e; } @@ -1478,7 +1478,7 @@ ACTOR Future< Void > watchValue( Future version, Key key, OptionalMAX_VERSIONS_IN_FLIGHT return Void(); @@ -1551,8 +1551,8 @@ ACTOR Future> getExactRange( Database cx, Version ver if( info.debugID.present() ) { g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getExactRange.Before"); /*TraceEvent("TransactionDebugGetExactRangeInfo", info.debugID.get()) - .detail("ReqBeginKey", printable(req.begin.getKey())) - .detail("ReqEndKey", printable(req.end.getKey())) + .detail("ReqBeginKey", req.begin.getKey()) + .detail("ReqEndKey", req.end.getKey()) .detail("ReqLimit", req.limit) .detail("ReqLimitBytes", req.limitBytes) .detail("ReqVersion", req.version) @@ -1641,8 +1641,8 @@ ACTOR Future> getExactRange( Database cx, Version ver } else { TraceEvent(SevInfo, "GetExactRangeError") .error(e) - .detail("ShardBegin", printable(locations[shard].first.begin)) - .detail("ShardEnd", printable(locations[shard].first.end)); + .detail("ShardBegin", locations[shard].first.begin) + .detail("ShardEnd", locations[shard].first.end); throw; } } @@ -1816,13 +1816,13 @@ ACTOR Future> getRange( Database cx, Reference> getRange( Database cx, Reference committed, Promise outCo Standalone shouldBeEmpty = wait( tr.getRange( it->range(), 1 ) ); if( shouldBeEmpty.size() ) { - TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Clear").detail("KeyBegin", printable(it->range().begin).c_str()) - .detail("KeyEnd", printable(it->range().end).c_str()); + TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Clear").detail("KeyBegin", it->range().begin.c_str()) + .detail("KeyEnd", it->range().end.c_str()); return; } } else { Optional val = wait( tr.get( it->range().begin ) ); if( !val.present() || val.get() != m.setValue ) { - TraceEvent evt = TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Set").detail("Key", printable(it->range().begin).c_str()) - .detail("Expected", printable(m.setValue).c_str()); + TraceEvent evt = TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Set").detail("Key", it->range().begin.c_str()) + .detail("Expected", m.setValue.c_str()); if( !val.present() ) evt.detail("Actual", "_Value Missing_"); else - evt.detail("Actual", printable(val.get()).c_str()); + evt.detail("Actual", val.get().c_str()); return; } } @@ -2515,7 +2515,7 @@ ACTOR static Future commitDummyTransaction( Database cx, KeyRange range, T state int retries = 0; loop { try { - TraceEvent("CommitDummyTransaction").detail("Key", printable(range.begin)).detail("Retries", retries); + TraceEvent("CommitDummyTransaction").detail("Key", range.begin).detail("Retries", retries); tr.options = options; tr.info.taskID = info.taskID; tr.setOption( FDBTransactionOptions::ACCESS_SYSTEM_KEYS ); @@ -2526,7 +2526,7 @@ ACTOR static Future commitDummyTransaction( Database cx, KeyRange range, T wait( tr.commit() ); return Void(); } catch (Error& e) { - TraceEvent("CommitDummyTransactionError").error(e,true).detail("Key", printable(range.begin)).detail("Retries", retries); + TraceEvent("CommitDummyTransactionError").error(e,true).detail("Key", range.begin).detail("Retries", retries); wait( tr.onError(e) ); } ++retries; @@ -2715,7 +2715,7 @@ Future Transaction::commitMutations() { UID u = g_nondeterministic_random->randomUniqueID(); TraceEvent("TransactionDump", u); for(auto i=tr.transaction.mutations.begin(); i!=tr.transaction.mutations.end(); ++i) - TraceEvent("TransactionMutation", u).detail("T", i->type).detail("P1", printable(i->param1)).detail("P2", printable(i->param2)); + TraceEvent("TransactionMutation", u).detail("T", i->type).detail("P1", i->param1).detail("P2", i->param2); } if(options.lockAware) { @@ -2840,7 +2840,7 @@ void Transaction::setOption( FDBTransactionOptions::Option option, Optionalidentifier = printable(value.get()); } else if (trLogInfo->identifier != printable(value.get())) { - TraceEvent(SevWarn, "CannotChangeDebugTransactionIdentifier").detail("PreviousIdentifier", trLogInfo->identifier).detail("NewIdentifier", printable(value.get())); + TraceEvent(SevWarn, "CannotChangeDebugTransactionIdentifier").detail("PreviousIdentifier", trLogInfo->identifier).detail("NewIdentifier", value.get()); throw client_invalid_operation(); } } @@ -3154,7 +3154,7 @@ ACTOR Future< StorageMetrics > waitStorageMetrics( } } else { TraceEvent(SevWarn, "WaitStorageMetricsPenalty") - .detail("Keys", printable(keys)) + .detail("Keys", keys) .detail("Limit", CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT) .detail("JitteredSecondsOfPenitence", CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY); wait(delayJittered(CLIENT_KNOBS->STORAGE_METRICS_TOO_MANY_SHARDS_DELAY, TaskDataDistribution)); diff --git a/fdbclient/RYWIterator.cpp b/fdbclient/RYWIterator.cpp index 47d5805979..773696f3ec 100644 --- a/fdbclient/RYWIterator.cpp +++ b/fdbclient/RYWIterator.cpp @@ -574,7 +574,7 @@ TEST_CASE("/fdbclient/WriteMap/random") { KeyRangeRef range = RandomTestImpl::getRandomRange(arena); writes.addConflictRange(range); conflictMap.insert(range, true); - TraceEvent("RWMT_AddConflictRange").detail("Range", printable(range)); + TraceEvent("RWMT_AddConflictRange").detail("Range", range); } else if(r == 1) { KeyRangeRef range = RandomTestImpl::getRandomRange(arena); @@ -583,7 +583,7 @@ TEST_CASE("/fdbclient/WriteMap/random") { conflictMap.insert(range, false); clearMap.insert(range, false); unreadableMap.insert(range, true); - TraceEvent("RWMT_AddUnmodifiedAndUnreadableRange").detail("Range", printable(range)); + TraceEvent("RWMT_AddUnmodifiedAndUnreadableRange").detail("Range", range); } else if (r == 2) { bool addConflict = g_random->random01() < 0.5; @@ -594,7 +594,7 @@ TEST_CASE("/fdbclient/WriteMap/random") { conflictMap.insert(range, true); clearMap.insert(range, true); unreadableMap.insert(range, false); - TraceEvent("RWMT_Clear").detail("Range", printable(range)).detail("AddConflict", addConflict); + TraceEvent("RWMT_Clear").detail("Range", range).detail("AddConflict", addConflict); } else if (r == 3) { bool addConflict = g_random->random01() < 0.5; @@ -606,7 +606,7 @@ TEST_CASE("/fdbclient/WriteMap/random") { conflictMap.insert(key, true); clearMap.insert(key, false); unreadableMap.insert(key, true); - TraceEvent("RWMT_SetVersionstampedValue").detail("Key", printable(key)).detail("Value", value.size()).detail("AddConflict", addConflict); + TraceEvent("RWMT_SetVersionstampedValue").detail("Key", key).detail("Value", value.size()).detail("AddConflict", addConflict); } else if (r == 4) { bool addConflict = g_random->random01() < 0.5; @@ -618,7 +618,7 @@ TEST_CASE("/fdbclient/WriteMap/random") { conflictMap.insert(key, true); clearMap.insert(key, false); unreadableMap.insert(key, true); - TraceEvent("RWMT_SetVersionstampedKey").detail("Key", printable(key)).detail("Value", value.size()).detail("AddConflict", addConflict); + TraceEvent("RWMT_SetVersionstampedKey").detail("Key", key).detail("Value", value.size()).detail("AddConflict", addConflict); } else if (r == 5) { bool addConflict = g_random->random01() < 0.5; @@ -638,7 +638,7 @@ TEST_CASE("/fdbclient/WriteMap/random") { if (addConflict) conflictMap.insert(key, true); clearMap.insert(key, false); - TraceEvent("RWMT_And").detail("Key", printable(key)).detail("Value", value.size()).detail("AddConflict", addConflict); + TraceEvent("RWMT_And").detail("Key", key).detail("Value", value.size()).detail("AddConflict", addConflict); } else { bool addConflict = g_random->random01() < 0.5; @@ -652,7 +652,7 @@ TEST_CASE("/fdbclient/WriteMap/random") { if (addConflict) conflictMap.insert(key, true); clearMap.insert(key, false); - TraceEvent("RWMT_Set").detail("Key", printable(key)).detail("Value", value.size()).detail("AddConflict", addConflict); + TraceEvent("RWMT_Set").detail("Key", key).detail("Value", value.size()).detail("AddConflict", addConflict); } } @@ -665,11 +665,11 @@ TEST_CASE("/fdbclient/WriteMap/random") { if (it.is_operation()) { ASSERT(setIter != setEnd); TraceEvent("RWMT_CheckOperation") - .detail("WmKey", printable(it.beginKey().toStandaloneStringRef())) + .detail("WmKey", it.beginKey().toStandaloneStringRef()) .detail("WmSize", it.op().size()) .detail("WmValue", it.op().top().value.present() ? std::to_string(it.op().top().value.get().size()) : "Not Found") .detail("WmType", (int)it.op().top().type) - .detail("SmKey", printable(setIter->first)) + .detail("SmKey", setIter->first) .detail("SmSize", setIter->second.size()) .detail("SmValue", setIter->second.top().value.present() ? std::to_string(setIter->second.top().value.get().size()) : "Not Found") .detail("SmType", (int)setIter->second.top().type); @@ -679,7 +679,7 @@ TEST_CASE("/fdbclient/WriteMap/random") { } TraceEvent("RWMT_CheckOperationFinal") - .detail("WmKey", printable(it.beginKey().toStandaloneStringRef())) + .detail("WmKey", it.beginKey().toStandaloneStringRef()) .detail("SmIter", setIter == setEnd); ASSERT(it.beginKey() >= allKeys.end && setIter == setEnd); @@ -728,8 +728,8 @@ TEST_CASE("/fdbclient/WriteMap/random") { while (it.beginKey() < allKeys.end && unreadableIter != unreadableEnd) { TraceEvent("RWMT_CheckUnreadable") - .detail("WriteMapRange", printable(KeyRangeRef(it.beginKey().toStandaloneStringRef(), it.endKey().toStandaloneStringRef()))) - .detail("UnreadableMapRange", printable(unreadableIter.range())) + .detail("WriteMapRange", KeyRangeRef(it.beginKey().toStandaloneStringRef(), it.endKey().toStandaloneStringRef())) + .detail("UnreadableMapRange", unreadableIter.range()) .detail("WriteMapValue", it.is_unreadable()) .detail("UnreadableMapValue", unreadableIter.value()); ASSERT(unreadableIter.value() == it.is_unreadable()); diff --git a/fdbclient/ReadYourWrites.actor.cpp b/fdbclient/ReadYourWrites.actor.cpp index 2b5c60f4fb..32ac64114e 100644 --- a/fdbclient/ReadYourWrites.actor.cpp +++ b/fdbclient/ReadYourWrites.actor.cpp @@ -38,12 +38,12 @@ public: if (kv) key = kv->key; } TraceEvent("RYWDump") - .detail("Begin", printable(it.beginKey().toStandaloneStringRef())) - .detail("End", printable(it.endKey().toStandaloneStringRef())) + .detail("Begin", it.beginKey().toStandaloneStringRef()) + .detail("End", it.endKey().toStandaloneStringRef()) .detail("Unknown", it.is_unknown_range()) .detail("Empty", it.is_empty_range()) .detail("KV", it.is_kv()) - .detail("Key", printable(key.get())); + .detail("Key", key.get()); if( it.endKey() == allKeys.end ) break; ++it; @@ -512,8 +512,8 @@ public: .detail("Reached", limits.isReached()) .detail("ItemsPastEnd", itemsPastEnd) .detail("EndOffset", -end.offset) - .detail("ItBegin", printable(it.beginKey().toStandaloneStringRef())) - .detail("ItEnd", printable(itEnd.beginKey().toStandaloneStringRef())) + .detail("ItBegin", it.beginKey().toStandaloneStringRef()) + .detail("ItEnd", itEnd.beginKey().toStandaloneStringRef()) .detail("Unknown", it.is_unknown_range()) .detail("Requests", requestCount);*/ @@ -606,13 +606,13 @@ public: ASSERT( !requestLimit.hasRowLimit() || requestLimit.rows > 0 ); ASSERT( requestLimit.hasRowLimit() || requestLimit.hasByteLimit() ); - //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", printable(ucEnd.beginKey().toStandaloneStringRef())).detail("MinRows", requestLimit.minRows); + //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey().toStandaloneStringRef()).detail("MinRows", requestLimit.minRows); additionalRows = 0; Standalone snapshot_read = wait( ryw->tr.getRange( read_begin, read_end, requestLimit, true, false ) ); KeyRangeRef range = getKnownKeyRange( snapshot_read, read_begin, read_end, ryw->arena ); - //TraceEvent("RYWCacheInsert", randomID).detail("Range", printable(range)).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", printable(snapshot_read)).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", printable(snapshot_read.readThrough)); + //TraceEvent("RYWCacheInsert", randomID).detail("Range", range).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", snapshot_read).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", snapshot_read.readThrough); if( ryw->cache.insert( range, snapshot_read ) ) ryw->arena.dependsOn(snapshot_read.arena()); @@ -636,7 +636,7 @@ public: itemsPastEnd += maxCount - count; - //TraceEvent("RYWaddKV", randomID).detail("Key", printable(it.beginKey().toStandaloneStringRef())).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastEnd", itemsPastEnd); + //TraceEvent("RYWaddKV", randomID).detail("Key", it.beginKey().toStandaloneStringRef()).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastEnd", itemsPastEnd); if( count ) result.append( result.arena(), start, count ); ++it; } else @@ -785,8 +785,8 @@ public: .detail("Reached", limits.isReached()) .detail("ItemsPastBegin", itemsPastBegin) .detail("EndOffset", end.offset) - .detail("ItBegin", printable(it.beginKey().toStandaloneStringRef())) - .detail("ItEnd", printable(itEnd.beginKey().toStandaloneStringRef())) + .detail("ItBegin", it.beginKey().toStandaloneStringRef()) + .detail("ItEnd", itEnd.beginKey().toStandaloneStringRef()) .detail("Unknown", it.is_unknown_range()) .detail("Kv", it.is_kv()) .detail("Requests", requestCount);*/ @@ -883,13 +883,13 @@ public: ASSERT( !requestLimit.hasRowLimit() || requestLimit.rows > 0 ); ASSERT( requestLimit.hasRowLimit() || requestLimit.hasByteLimit() ); - //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", printable(ucEnd.beginKey().toStandaloneStringRef())).detail("MinRows", requestLimit.minRows); + //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey().toStandaloneStringRef()).detail("MinRows", requestLimit.minRows); additionalRows = 0; Standalone snapshot_read = wait( ryw->tr.getRange( read_begin, read_end, requestLimit, true, true ) ); KeyRangeRef range = getKnownKeyRangeBack( snapshot_read, read_begin, read_end, ryw->arena ); - //TraceEvent("RYWCacheInsert", randomID).detail("Range", printable(range)).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", printable(snapshot_read)).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", printable(snapshot_read.readThrough)); + //TraceEvent("RYWCacheInsert", randomID).detail("Range", range).detail("ExpectedSize", snapshot_read.expectedSize()).detail("Rows", snapshot_read.size()).detail("Results", snapshot_read).detail("More", snapshot_read.more).detail("ReadToBegin", snapshot_read.readToBegin).detail("ReadThroughEnd", snapshot_read.readThroughEnd).detail("ReadThrough", snapshot_read.readThrough); RangeResultRef reversed; reversed.resize(ryw->arena, snapshot_read.size()); @@ -917,7 +917,7 @@ public: } itemsPastBegin += maxCount - count; - //TraceEvent("RYWaddKV", randomID).detail("Key", printable(it.beginKey().toStandaloneStringRef())).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastBegin", itemsPastBegin); + //TraceEvent("RYWaddKV", randomID).detail("Key", it.beginKey().toStandaloneStringRef()).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastBegin", itemsPastBegin); if( count ) { int size = result.size(); result.resize(result.arena(),size+count); @@ -1982,7 +1982,7 @@ void ReadYourWritesTransaction::debugLogRetries(Optional error) { if(error.present()) trace.error(error.get(), true); if(!transactionDebugInfo->transactionName.empty()) - trace.detail("TransactionName", printable(StringRef(transactionDebugInfo->transactionName))); + trace.detail("TransactionName", StringRef(transactionDebugInfo->transactionName)); trace.detail("Elapsed", elapsed).detail("Retries", retries).detail("Committed", committed); } transactionDebugInfo->lastRetryLogTime = now(); diff --git a/fdbclient/SnapshotCache.h b/fdbclient/SnapshotCache.h index d572b6034c..0b3eb6a515 100644 --- a/fdbclient/SnapshotCache.h +++ b/fdbclient/SnapshotCache.h @@ -343,7 +343,7 @@ public: void dump() { for( auto it = entries.begin(); it != entries.end(); ++it ) { - TraceEvent("CacheDump").detail("Begin", printable(it->beginKey)).detail("End", printable(it->endKey.toStandaloneStringRef())).detail("Values", printable(it->values)); + TraceEvent("CacheDump").detail("Begin", it->beginKey).detail("End", it->endKey.toStandaloneStringRef()).detail("Values", it->values); } } diff --git a/fdbclient/SystemData.cpp b/fdbclient/SystemData.cpp index a8cf78f009..7e7ba91e31 100644 --- a/fdbclient/SystemData.cpp +++ b/fdbclient/SystemData.cpp @@ -483,7 +483,7 @@ Key logRangesEncodeKey(KeyRef keyBegin, UID logUid) { // Returns the start key and optionally the logRange Uid KeyRef logRangesDecodeKey(KeyRef key, UID* logUid) { if (key.size() < logRangesRange.begin.size() + sizeof(UID)) { - TraceEvent(SevError, "InvalidDecodeKey").detail("Key", printable(key)); + TraceEvent(SevError, "InvalidDecodeKey").detail("Key", key); ASSERT(false); } diff --git a/fdbclient/TaskBucket.actor.cpp b/fdbclient/TaskBucket.actor.cpp index c5b9fb3713..05ef3cdf58 100644 --- a/fdbclient/TaskBucket.actor.cpp +++ b/fdbclient/TaskBucket.actor.cpp @@ -235,15 +235,15 @@ public: if (task->params.find(Task::reservedTaskParamValidKey) == task->params.end()) { TraceEvent("TB_TaskVerifyInvalidTask") - .detail("Task", printable(task->params[Task::reservedTaskParamKeyType])) + .detail("Task", task->params[Task::reservedTaskParamKeyType]) .detail("ReservedTaskParamValidKey", "missing"); return false; } if (task->params.find(Task::reservedTaskParamValidValue) == task->params.end()) { TraceEvent("TB_TaskVerifyInvalidTask") - .detail("Task", printable(task->params[Task::reservedTaskParamKeyType])) - .detail("ReservedTaskParamValidKey", printable(task->params[Task::reservedTaskParamValidKey])) + .detail("Task", task->params[Task::reservedTaskParamKeyType]) + .detail("ReservedTaskParamValidKey", task->params[Task::reservedTaskParamValidKey]) .detail("ReservedTaskParamValidValue", "missing"); return false; } @@ -254,19 +254,19 @@ public: if (!keyValue.present()) { TraceEvent("TB_TaskVerifyInvalidTask") - .detail("Task", printable(task->params[Task::reservedTaskParamKeyType])) - .detail("ReservedTaskParamValidKey", printable(task->params[Task::reservedTaskParamValidKey])) - .detail("ReservedTaskParamValidValue", printable(task->params[Task::reservedTaskParamValidValue])) + .detail("Task", task->params[Task::reservedTaskParamKeyType]) + .detail("ReservedTaskParamValidKey", task->params[Task::reservedTaskParamValidKey]) + .detail("ReservedTaskParamValidValue", task->params[Task::reservedTaskParamValidValue]) .detail("KeyValue", "missing"); return false; } if (keyValue.get().compare(StringRef(task->params[Task::reservedTaskParamValidValue]))) { TraceEvent("TB_TaskVerifyAbortedTask") - .detail("Task", printable(task->params[Task::reservedTaskParamKeyType])) - .detail("ReservedTaskParamValidKey", printable(task->params[Task::reservedTaskParamValidKey])) - .detail("ReservedTaskParamValidValue", printable(task->params[Task::reservedTaskParamValidValue])) - .detail("KeyValue", printable(keyValue.get())); + .detail("Task", task->params[Task::reservedTaskParamKeyType]) + .detail("ReservedTaskParamValidKey", task->params[Task::reservedTaskParamValidKey]) + .detail("ReservedTaskParamValidValue", task->params[Task::reservedTaskParamValidValue]) + .detail("KeyValue", keyValue.get()); return false; } @@ -396,7 +396,7 @@ public: } catch(Error &e) { TraceEvent(SevWarn, "TB_ExecuteFailure") .error(e) - .detail("TaskUID", task->key.printable()) + .detail("TaskUID", task->key.) .detail("TaskType", task->params[Task::reservedTaskParamKeyType].printable()) .detail("Priority", task->getPriority()); try { @@ -709,14 +709,14 @@ public: tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); tr->setOption(FDBTransactionOptions::LOCK_AWARE); Standalone values = wait(tr->getRange(subspace.range(), CLIENT_KNOBS->TOO_MANY)); - TraceEvent("TaskBucket").detail("DebugPrintRange", "Print DB Range").detail("Key", printable(subspace.key())).detail("Count", values.size()).detail("Msg", printable(msg)); + TraceEvent("TaskBucket").detail("DebugPrintRange", "Print DB Range").detail("Key", subspace.key()).detail("Count", values.size()).detail("Msg", msg); /* printf("debugPrintRange key: (%d) %s\n", values.size(), printable(subspace.key()).c_str()); for (auto & s : values) { - printf(" key: %-40s value: %s\n", printable(s.key).c_str(), printable(s.value).c_str()); - TraceEvent("TaskBucket").detail("DebugPrintRange", printable(msg)) - .detail("Key", printable(s.key)) - .detail("Value", printable(s.value)); + printf(" key: %-40s value: %s\n", printable(s.key).c_str(), s.value.c_str()); + TraceEvent("TaskBucket").detail("DebugPrintRange", msg) + .detail("Key", s.key) + .detail("Value", s.value); }*/ return Void(); @@ -846,8 +846,8 @@ ACTOR static Future actorAddTask(TaskBucket* tb, Referenceparams[Task::reservedTaskParamKeyType])) - .detail("ValidationKey", printable(validationKey)); + .detail("Task", task->params[Task::reservedTaskParamKeyType]) + .detail("ValidationKey", validationKey); throw invalid_option_value(); } @@ -1114,8 +1114,8 @@ public: if (!validationValue.present()) { TraceEvent(SevError, "TB_OnSetAddTaskInvalidKey") - .detail("Task", printable(task->params[Task::reservedTaskParamKeyType])) - .detail("ValidationKey", printable(validationKey)); + .detail("Task", task->params[Task::reservedTaskParamKeyType]) + .detail("ValidationKey", validationKey); throw invalid_option_value(); } diff --git a/fdbclient/WriteMap.h b/fdbclient/WriteMap.h index 5fd341b5b5..8ba6b145d4 100644 --- a/fdbclient/WriteMap.h +++ b/fdbclient/WriteMap.h @@ -543,8 +543,8 @@ private: iterator it( this ); it.skip(allKeys.begin); while( it.beginKey() < allKeys.end ) { - TraceEvent("WriteMapDump").detail("Begin", printable(it.beginKey().toStandaloneStringRef())) - .detail("End", printable(it.endKey().toStandaloneStringRef())) + TraceEvent("WriteMapDump").detail("Begin", it.beginKey().toStandaloneStringRef()) + .detail("End", it.endKey().toStandaloneStringRef()) .detail("Cleared", it.is_cleared_range()) .detail("Conflicted", it.is_conflict_range()) .detail("Operation", it.is_operation()) diff --git a/fdbserver/ApplyMetadataMutation.h b/fdbserver/ApplyMetadataMutation.h index 0788956323..7ba4b4c495 100644 --- a/fdbserver/ApplyMetadataMutation.h +++ b/fdbserver/ApplyMetadataMutation.h @@ -106,7 +106,7 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRefreadValue( serverTagKeyFor( serverKeysDecodeServer(m.param1) ) ).get().get() ).toString()); + .detail("TagKey", serverTagKeyFor( serverKeysDecodeServer(m.param1) )).detail("Tag", decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor( serverKeysDecodeServer(m.param1) ) ).get().get() ).toString()); toCommit->addTag( decodeServerTagValue( txnStateStore->readValue( serverTagKeyFor( serverKeysDecodeServer(m.param1) ) ).get().get() ) ); toCommit->addTypedMessage(privatized); @@ -151,7 +151,7 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef(m.param2) != txnStateStore->readValue(m.param1).get().castTo()) { // FIXME: Make this check more specific, here or by reading configuration whenever there is a change if(!m.param1.startsWith( excludedServersPrefix ) && m.param1 != excludedServersVersionKey) { auto t = txnStateStore->readValue(m.param1).get(); - TraceEvent("MutationRequiresRestart", dbgid).detail("M", m.toString()).detail("PrevValue", t.present() ? printable(t.get()) : "(none)").detail("ToCommit", toCommit!=NULL); + TraceEvent("MutationRequiresRestart", dbgid).detail("M", m.toString()).detail("PrevValue", t.present() ? t.get() : "(none)").detail("ToCommit", toCommit!=NULL); if(confChange) *confChange = true; } } @@ -230,8 +230,8 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRefsize()).detail("MutationKey", printable(m.param1)) - .detail("LogRangeBegin", printable(logRangeBegin)).detail("LogRangeEnd", printable(logRangeEnd)); + TraceEvent("LogRangeAdd").detail("LogRanges", vecBackupKeys->size()).detail("MutationKey", m.param1) + .detail("LogRangeBegin", logRangeBegin).detail("LogRangeEnd", logRangeEnd); } } else if (m.param1.startsWith(globalKeysPrefix)) { @@ -383,8 +383,8 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRefmodify(KeyRangeRef(logKeyBegin, logKeyEnd)); diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index d1c1858aa9..910ca64acd 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -2449,7 +2449,7 @@ ACTOR Future doEmptyCommit(Database cx) { ACTOR Future handleForcedRecoveries( ClusterControllerData *self, ClusterControllerFullInterface interf ) { loop { state ForceRecoveryRequest req = waitNext( interf.clientInterface.forceRecovery.getFuture() ); - TraceEvent("ForcedRecoveryStart", self->id).detail("ClusterControllerDcId", printable(self->clusterControllerDcId)).detail("DcId", req.dcId.printable()); + TraceEvent("ForcedRecoveryStart", self->id).detail("ClusterControllerDcId", self->clusterControllerDcId).detail("DcId", req.dcId.printable()); state Future fCommit = doEmptyCommit(self->cx); wait(fCommit || delay(SERVER_KNOBS->FORCE_RECOVERY_CHECK_DELAY)); if(!fCommit.isReady() || fCommit.isError()) { diff --git a/fdbserver/Coordination.actor.cpp b/fdbserver/Coordination.actor.cpp index a7642f099b..9f19c591be 100644 --- a/fdbserver/Coordination.actor.cpp +++ b/fdbserver/Coordination.actor.cpp @@ -128,7 +128,7 @@ ACTOR Future localGenerationReg( GenerationRegInterface interf, OnDemandSt // SOMEDAY: concurrent access to different keys? loop choose { when ( GenerationRegReadRequest _req = waitNext( interf.read.getFuture() ) ) { - TraceEvent("GenerationRegReadRequest").detail("From", _req.reply.getEndpoint().getPrimaryAddress()).detail("K", printable(_req.key)); + TraceEvent("GenerationRegReadRequest").detail("From", _req.reply.getEndpoint().getPrimaryAddress()).detail("K", _req.key); state GenerationRegReadRequest req = _req; Optional rawV = wait( store->readValue( req.key ) ); v = rawV.present() ? BinaryReader::fromStringRef( rawV.get(), IncludeVersion() ) : GenerationRegVal(); @@ -149,11 +149,11 @@ ACTOR Future localGenerationReg( GenerationRegInterface interf, OnDemandSt v.val = wrq.kv.value; store->set( KeyValueRef( wrq.kv.key, BinaryWriter::toValue(v, IncludeVersion()) ) ); wait(store->commit()); - TraceEvent("GenerationRegWrote").detail("From", wrq.reply.getEndpoint().getPrimaryAddress()).detail("Key", printable(wrq.kv.key)) + TraceEvent("GenerationRegWrote").detail("From", wrq.reply.getEndpoint().getPrimaryAddress()).detail("Key", wrq.kv.key) .detail("ReqGen", wrq.gen.generation).detail("Returning", v.writeGen.generation); wrq.reply.send( v.writeGen ); } else { - TraceEvent("GenerationRegWriteFail").detail("From", wrq.reply.getEndpoint().getPrimaryAddress()).detail("Key", printable(wrq.kv.key)) + TraceEvent("GenerationRegWriteFail").detail("From", wrq.reply.getEndpoint().getPrimaryAddress()).detail("Key", wrq.kv.key) .detail("ReqGen", wrq.gen.generation).detail("ReadGen", v.readGen.generation).detail("WriteGen", v.writeGen.generation); wrq.reply.send( std::max( v.readGen, v.writeGen ) ); } @@ -267,7 +267,7 @@ ACTOR Future leaderRegister(LeaderElectionRegInterface interf, Key key) { !currentNominee.present()) { // Our state is back to the initial state, so we can safely stop this actor - TraceEvent("EndingLeaderNomination").detail("Key", printable(key)); + TraceEvent("EndingLeaderNomination").detail("Key", key); return Void(); } else { Optional nextNominee; @@ -293,7 +293,7 @@ ACTOR Future leaderRegister(LeaderElectionRegInterface interf, Key key) { if ( !nextNominee.present() || !foundCurrentNominee || currentNominee.get().leaderChangeRequired(nextNominee.get()) ) { TraceEvent("NominatingLeader").detail("Nominee", nextNominee.present() ? nextNominee.get().changeID : UID()) - .detail("Changed", nextNominee != currentNominee).detail("Key", printable(key)); + .detail("Changed", nextNominee != currentNominee).detail("Key", key); for(unsigned int i=0; i teamTracker(DDTeamCollection* self, Reference tea if(g_random->random01() < 0.01) { TraceEvent("SendRelocateToDDQx100", self->distributorId) .detail("Team", team->getDesc()) - .detail("KeyBegin", printable(rs.keys.begin)) - .detail("KeyEnd", printable(rs.keys.end)) + .detail("KeyBegin", rs.keys.begin) + .detail("KeyEnd", rs.keys.end) .detail("Priority", rs.priority) .detail("TeamFailedMachines", team->size() - serversLeft) .detail("TeamOKMachines", serversLeft); @@ -3259,14 +3259,14 @@ ACTOR Future updateReplicasKey(DDTeamCollection* self, Optional dcId) wait(self->initialFailureReactionDelay && waitForAll(serverUpdates)); wait(waitUntilHealthy(self)); - TraceEvent("DDUpdatingReplicas", self->distributorId).detail("DcId", printable(dcId)).detail("Replicas", self->configuration.storageTeamSize); + TraceEvent("DDUpdatingReplicas", self->distributorId).detail("DcId", dcId).detail("Replicas", self->configuration.storageTeamSize); state Transaction tr(self->cx); loop { try { Optional val = wait( tr.get(datacenterReplicasKeyFor(dcId)) ); state int oldReplicas = val.present() ? decodeDatacenterReplicasValue(val.get()) : 0; if(oldReplicas == self->configuration.storageTeamSize) { - TraceEvent("DDUpdatedAlready", self->distributorId).detail("DcId", printable(dcId)).detail("Replicas", self->configuration.storageTeamSize); + TraceEvent("DDUpdatedAlready", self->distributorId).detail("DcId", dcId).detail("Replicas", self->configuration.storageTeamSize); return Void(); } if(oldReplicas < self->configuration.storageTeamSize) { @@ -3274,7 +3274,7 @@ ACTOR Future updateReplicasKey(DDTeamCollection* self, Optional dcId) } tr.set(datacenterReplicasKeyFor(dcId), datacenterReplicasValue(self->configuration.storageTeamSize)); wait( tr.commit() ); - TraceEvent("DDUpdatedReplicas", self->distributorId).detail("DcId", printable(dcId)).detail("Replicas", self->configuration.storageTeamSize).detail("OldReplicas", oldReplicas); + TraceEvent("DDUpdatedReplicas", self->distributorId).detail("DcId", dcId).detail("Replicas", self->configuration.storageTeamSize).detail("OldReplicas", oldReplicas); return Void(); } catch( Error &e ) { wait( tr.onError(e) ); @@ -3459,7 +3459,7 @@ ACTOR Future debugCheckCoalescing(Database cx) { for(int j = 0; j < ranges.size() - 2; j++) if(ranges[j].value == ranges[j + 1].value) - TraceEvent(SevError, "UncoalescedValues", id).detail("Key1", printable(ranges[j].key)).detail("Key2", printable(ranges[j + 1].key)).detail("Value", printable(ranges[j].value)); + TraceEvent(SevError, "UncoalescedValues", id).detail("Key1", ranges[j].key).detail("Key2", ranges[j + 1].key).detail("Value", ranges[j].value); } TraceEvent("DoneCheckingCoalescing"); @@ -3594,8 +3594,8 @@ ACTOR Future dataDistribution(Reference self) initData = initData_; if(initData->shards.size() > 1) { TraceEvent("DDInitGotInitialDD", self->ddId) - .detail("B", printable(initData->shards.end()[-2].key)) - .detail("E", printable(initData->shards.end()[-1].key)) + .detail("B", initData->shards.end()[-2].key) + .detail("E", initData->shards.end()[-1].key) .detail("Src", describe(initData->shards.end()[-2].primarySrc)) .detail("Dest", describe(initData->shards.end()[-2].primaryDest)) .trackLatest("InitialDD"); @@ -3643,7 +3643,7 @@ ACTOR Future dataDistribution(Reference self) teams.push_back(ShardsAffectedByTeamFailure::Team(initData->shards[shard].remoteSrc, false)); } if(g_network->isSimulated()) { - TraceEvent("DDInitShard").detail("Keys", printable(keys)).detail("PrimarySrc", describe(initData->shards[shard].primarySrc)).detail("RemoteSrc", describe(initData->shards[shard].remoteSrc)) + TraceEvent("DDInitShard").detail("Keys", keys).detail("PrimarySrc", describe(initData->shards[shard].primarySrc)).detail("RemoteSrc", describe(initData->shards[shard].remoteSrc)) .detail("PrimaryDest", describe(initData->shards[shard].primaryDest)).detail("RemoteDest", describe(initData->shards[shard].remoteDest)); } diff --git a/fdbserver/DataDistributionQueue.actor.cpp b/fdbserver/DataDistributionQueue.actor.cpp index f282ef12bc..775c51cdf2 100644 --- a/fdbserver/DataDistributionQueue.actor.cpp +++ b/fdbserver/DataDistributionQueue.actor.cpp @@ -426,10 +426,10 @@ struct DDQueueData { auto range = queueMap.rangeContaining( rdit->keys.begin ); if( range.value() != *rdit || range.range() != rdit->keys ) TraceEvent(SevError, "DDQueueValidateError4").detail("Problem", "relocates in the queue are in the queueMap exactly") - .detail("RangeBegin", printable(range.range().begin)) - .detail("RangeEnd", printable(range.range().end)) - .detail("RelocateBegin2", printable(range.value().keys.begin)) - .detail("RelocateEnd2", printable(range.value().keys.end)) + .detail("RangeBegin", range.range().begin) + .detail("RangeEnd", range.range().end) + .detail("RelocateBegin2", range.value().keys.begin) + .detail("RelocateEnd2", range.value().keys.end) .detail("RelocateStart", range.value().startTime) .detail("MapStart", rdit->startTime) .detail("RelocateWork", range.value().workFactor) @@ -570,7 +570,7 @@ struct DDQueueData { //This function cannot handle relocation requests which split a shard into three pieces void queueRelocation( RelocateData rd, std::set &serversToLaunchFrom ) { - //TraceEvent("QueueRelocationBegin").detail("Begin", printable(rd.keys.begin)).detail("End", printable(rd.keys.end)); + //TraceEvent("QueueRelocationBegin").detail("Begin", rd.keys.begin).detail("End", rd.keys.end); // remove all items from both queues that are fully contained in the new relocation (i.e. will be overwritten) auto ranges = queueMap.intersectingRanges( rd.keys ); @@ -637,7 +637,7 @@ struct DDQueueData { rrs.interval = TraceInterval("QueuedRelocation"); /*TraceEvent(rrs.interval.begin(), distributorId); - .detail("KeyBegin", printable(rrs.keys.begin)).detail("KeyEnd", printable(rrs.keys.end)) + .detail("KeyBegin", rrs.keys.begin).detail("KeyEnd", rrs.keys.end) .detail("Priority", rrs.priority).detail("WantsNewServers", rrs.wantsNewServers);*/ queuedRelocations++; startRelocation(rrs.priority); @@ -657,7 +657,7 @@ struct DDQueueData { if( !foundActiveRelocation ) { newData.interval = TraceInterval("QueuedRelocation"); /*TraceEvent(newData.interval.begin(), distributorId); - .detail("KeyBegin", printable(newData.keys.begin)).detail("KeyEnd", printable(newData.keys.end)) + .detail("KeyBegin", newData.keys.begin).detail("KeyEnd", newData.keys.end) .detail("Priority", newData.priority).detail("WantsNewServers", newData.wantsNewServers);*/ queuedRelocations++; startRelocation(newData.priority); @@ -677,8 +677,8 @@ struct DDQueueData { } /*TraceEvent("ReceivedRelocateShard", distributorId) - .detail("KeyBegin", printable(rd.keys.begin)) - .detail("KeyEnd", printable(rd.keys.end)) + .detail("KeyBegin", rd.keys.begin) + .detail("KeyEnd", rd.keys.end) .detail("Priority", rd.priority) .detail("AffectedRanges", affectedQueuedItems.size()); */ } @@ -701,8 +701,8 @@ struct DDQueueData { busyString += describe(rd.src[i]) + " - (" + busymap[ rd.src[i] ].toString() + "); "; TraceEvent(title, distributorId) - .detail("KeyBegin", printable(rd.keys.begin)) - .detail("KeyEnd", printable(rd.keys.end)) + .detail("KeyBegin", rd.keys.begin) + .detail("KeyEnd", rd.keys.end) .detail("Priority", rd.priority) .detail("WorkFactor", rd.workFactor) .detail("SourceServerCount", rd.src.size()) @@ -759,8 +759,8 @@ struct DDQueueData { it->value().priority >= rd.priority && rd.priority < PRIORITY_TEAM_REDUNDANT ) { /*TraceEvent("OverlappingInFlight", distributorId) - .detail("KeyBegin", printable(it->value().keys.begin)) - .detail("KeyEnd", printable(it->value().keys.end)) + .detail("KeyBegin", it->value().keys.begin) + .detail("KeyEnd", it->value().keys.end) .detail("Priority", it->value().priority); */ overlappingInFlight = true; break; @@ -867,7 +867,7 @@ ACTOR Future dataDistributionRelocator( DDQueueData *self, RelocateData rd } TraceEvent(relocateShardInterval.begin(), distributorId) - .detail("KeyBegin", printable(rd.keys.begin)).detail("KeyEnd", printable(rd.keys.end)) + .detail("KeyBegin", rd.keys.begin).detail("KeyEnd", rd.keys.end) .detail("Priority", rd.priority).detail("RelocationID", relocateShardInterval.pairID).detail("SuppressedEventCount", self->suppressIntervals); if(relocateShardInterval.severity != SevDebug) { diff --git a/fdbserver/DataDistributionTracker.actor.cpp b/fdbserver/DataDistributionTracker.actor.cpp index cb4d5dde5b..a6e027c171 100644 --- a/fdbserver/DataDistributionTracker.actor.cpp +++ b/fdbserver/DataDistributionTracker.actor.cpp @@ -144,7 +144,7 @@ ACTOR Future trackShardBytes( /*TraceEvent("TrackShardBytesStarting") .detail("TrackerID", trackerID) - .detail("Keys", printable(keys)) + .detail("Keys", keys) .detail("TrackedBytesInitiallyPresent", shardSize->get().present()) .detail("StartingSize", shardSize->get().present() ? shardSize->get().get().metrics.bytes : 0) .detail("StartingMerges", shardSize->get().present() ? shardSize->get().get().merges : 0);*/ @@ -190,7 +190,7 @@ ACTOR Future trackShardBytes( StorageMetrics metrics = wait( tr.waitStorageMetrics( keys, bounds.min, bounds.max, bounds.permittedError, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT ) ); /*TraceEvent("ShardSizeUpdate") - .detail("Keys", printable(keys)) + .detail("Keys", keys) .detail("UpdatedSize", metrics.metrics.bytes) .detail("Bandwidth", metrics.metrics.bytesPerKSecond) .detail("BandwithStatus", getBandwidthStatus(metrics)) @@ -329,8 +329,8 @@ ACTOR Future shardSplitter( if( g_random->random01() < 0.01 ) { TraceEvent("RelocateShardStartSplitx100", self->distributorId) - .detail("Begin", printable(keys.begin)) - .detail("End", printable(keys.end)) + .detail("Begin", keys.begin) + .detail("End", keys.end) .detail("MaxBytes", shardBounds.max.bytes) .detail("MetricsBytes", metrics.bytes) .detail("Bandwidth", bandwidthStatus == BandwidthStatusHigh ? "High" : bandwidthStatus == BandwidthStatusNormal ? "Normal" : "Low") @@ -450,8 +450,8 @@ Future shardMerger( KeyRange mergeRange = merged; TraceEvent("RelocateShardMergeMetrics", self->distributorId) - .detail("OldKeys", printable(keys)) - .detail("NewKeys", printable(mergeRange)) + .detail("OldKeys", keys) + .detail("NewKeys", mergeRange) .detail("EndingSize", endingStats.bytes) .detail("BatchedMerges", shardsMerged); @@ -532,8 +532,8 @@ ACTOR Future shardTracker( wait( delay( 0, TaskDataDistribution ) ); /*TraceEvent("ShardTracker", self->distributorId) - .detail("Begin", printable(keys.begin)) - .detail("End", printable(keys.end)) + .detail("Begin", keys.begin) + .detail("End", keys.end) .detail("TrackerID", trackerID) .detail("MaxBytes", self->maxShardSize->get().get()) .detail("ShardSize", shardSize->get().get().bytes) @@ -572,7 +572,7 @@ void restartShardTrackers( DataDistributionTracker* self, KeyRangeRef keys, Opti if( startingSize.present() ) { ASSERT( ranges.size() == 1 ); /*TraceEvent("ShardTrackerSizePreset", self->distributorId) - .detail("Keys", printable(keys)) + .detail("Keys", keys) .detail("Size", startingSize.get().metrics.bytes) .detail("Merges", startingSize.get().merges);*/ TEST( true ); // shardTracker started with trackedBytes already set @@ -736,8 +736,8 @@ void ShardsAffectedByTeamFailure::defineShard( KeyRangeRef keys ) { uniquify(prevTeams); /*TraceEvent("ShardsAffectedByTeamFailureDefine") - .detail("KeyBegin", printable(keys.begin)) - .detail("KeyEnd", printable(keys.end)) + .detail("KeyBegin", keys.begin) + .detail("KeyEnd", keys.end) .detail("TeamCount", teams.size()); */ auto affectedRanges = shard_teams.getAffectedRangesAfterInsertion(keys); @@ -754,8 +754,8 @@ void ShardsAffectedByTeamFailure::defineShard( KeyRangeRef keys ) { void ShardsAffectedByTeamFailure::moveShard( KeyRangeRef keys, std::vector destinationTeams ) { /*TraceEvent("ShardsAffectedByTeamFailureMove") - .detail("KeyBegin", printable(keys.begin)) - .detail("KeyEnd", printable(keys.end)) + .detail("KeyBegin", keys.begin) + .detail("KeyEnd", keys.end) .detail("NewTeamSize", destinationTeam.size()) .detail("NewTeam", describe(destinationTeam));*/ @@ -828,8 +828,8 @@ void ShardsAffectedByTeamFailure::check() { for(auto x = team_shards.lower_bound( std::make_pair( *t, KeyRangeRef() ) ); x != team_shards.end() && x->first == *t; ++x) shards += printable(x->second.begin) + "-" + printable(x->second.end) + ","; TraceEvent(SevError,"SATFInvariantError2") - .detail("KB", printable(i->begin())) - .detail("KE", printable(i->end())) + .detail("KB", i->begin()) + .detail("KE", i->end()) .detail("Team", teamDesc) .detail("Shards", shards); ASSERT(false); diff --git a/fdbserver/KeyValueStoreMemory.actor.cpp b/fdbserver/KeyValueStoreMemory.actor.cpp index b7a9942a0b..f4e29dae79 100644 --- a/fdbserver/KeyValueStoreMemory.actor.cpp +++ b/fdbserver/KeyValueStoreMemory.actor.cpp @@ -485,8 +485,8 @@ private: if (h.op == OpSnapshotItem) { // snapshot data item /*if (p1 < uncommittedNextKey) { TraceEvent(SevError, "RecSnapshotBack", self->id) - .detail("NextKey", printable(uncommittedNextKey)) - .detail("P1", printable(p1)) + .detail("NextKey", uncommittedNextKey) + .detail("P1", p1) .detail("Nextlocation", self->log->getNextReadLocation()); } ASSERT( p1 >= uncommittedNextKey );*/ @@ -497,7 +497,7 @@ private: ++dbgSnapshotItemCount; } else if (h.op == OpSnapshotEnd || h.op == OpSnapshotAbort) { // snapshot complete TraceEvent("RecSnapshotEnd", self->id) - .detail("NextKey", printable(uncommittedNextKey)) + .detail("NextKey", uncommittedNextKey) .detail("Nextlocation", self->log->getNextReadLocation()) .detail("IsSnapshotEnd", h.op == OpSnapshotEnd); @@ -526,7 +526,7 @@ private: } else if (h.op == OpRollback) { // rollback previous transaction recoveryQueue.rollback(); TraceEvent("KVSMemRecSnapshotRollback", self->id) - .detail("NextKey", printable(uncommittedNextKey)); + .detail("NextKey", uncommittedNextKey); uncommittedNextKey = self->recoveredSnapshotKey; uncommittedPrevSnapshotEnd = self->previousSnapshotEnd; uncommittedSnapshotEnd = self->currentSnapshotEnd; @@ -620,7 +620,7 @@ private: state int snapItems = 0; state uint64_t snapshotBytes = 0; - TraceEvent("KVSMemStartingSnapshot", self->id).detail("StartKey", printable(nextKey)); + TraceEvent("KVSMemStartingSnapshot", self->id).detail("StartKey", nextKey); loop { wait( self->notifiedCommittedWriteBytes.whenAtLeast( snapshotTotalWrittenBytes + 1 ) ); @@ -646,7 +646,7 @@ private: if (next == self->data.end()) { auto thisSnapshotEnd = self->log_op( OpSnapshotEnd, StringRef(), StringRef() ); //TraceEvent("SnapshotEnd", self->id) - // .detail("LastKey", printable(lastKey.present() ? lastKey.get() : LiteralStringRef(""))) + // .detail("LastKey", lastKey.present() ? lastKey.get() : LiteralStringRef("")) // .detail("CurrentSnapshotEndLoc", self->currentSnapshotEnd) // .detail("PreviousSnapshotEndLoc", self->previousSnapshotEnd) // .detail("ThisSnapshotEnd", thisSnapshotEnd) diff --git a/fdbserver/KeyValueStoreSQLite.actor.cpp b/fdbserver/KeyValueStoreSQLite.actor.cpp index c557d61740..104821c3e8 100644 --- a/fdbserver/KeyValueStoreSQLite.actor.cpp +++ b/fdbserver/KeyValueStoreSQLite.actor.cpp @@ -1211,7 +1211,7 @@ int SQLiteDB::checkAllPageChecksums() { Statement *jm = new Statement(*this, "PRAGMA journal_mode"); ASSERT( jm->nextRow() ); if (jm->column(0) != LiteralStringRef("wal")){ - TraceEvent(SevError, "JournalModeError").detail("Filename", filename).detail("Mode", printable(jm->column(0))); + TraceEvent(SevError, "JournalModeError").detail("Filename", filename).detail("Mode", jm->column(0)); ASSERT( false ); } delete jm; @@ -1337,7 +1337,7 @@ void SQLiteDB::open(bool writable) { Statement jm(*this, "PRAGMA journal_mode"); ASSERT( jm.nextRow() ); if (jm.column(0) != LiteralStringRef("wal")){ - TraceEvent(SevError, "JournalModeError").detail("Filename", filename).detail("Mode", printable(jm.column(0))); + TraceEvent(SevError, "JournalModeError").detail("Filename", filename).detail("Mode", jm.column(0)); ASSERT( false ); } diff --git a/fdbserver/LatencyBandConfig.cpp b/fdbserver/LatencyBandConfig.cpp index 728953aa37..927eee2518 100644 --- a/fdbserver/LatencyBandConfig.cpp +++ b/fdbserver/LatencyBandConfig.cpp @@ -83,7 +83,7 @@ Optional LatencyBandConfig::parse(ValueRef configurationStrin json_spirit::mValue parsedConfig; if(!json_spirit::read_string(configurationString.toString(), parsedConfig)) { - TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "InvalidJSON").detail("Configuration", printable(configurationString)); + TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "InvalidJSON").detail("Configuration", configurationString); return config; } @@ -96,7 +96,7 @@ Optional LatencyBandConfig::parse(ValueRef configurationStrin std::string errorStr; if(!schemaMatch(schema.get_obj(), configJson, errorStr)) { - TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "SchemaMismatch").detail("Configuration", printable(configurationString)).detail("Error", errorStr); + TraceEvent(SevWarnAlways, "InvalidLatencyBandConfiguration").detail("Reason", "SchemaMismatch").detail("Configuration", configurationString).detail("Error", errorStr); return config; } diff --git a/fdbserver/LogRouter.actor.cpp b/fdbserver/LogRouter.actor.cpp index 0341d31810..1430f9297d 100644 --- a/fdbserver/LogRouter.actor.cpp +++ b/fdbserver/LogRouter.actor.cpp @@ -285,7 +285,7 @@ void peekMessagesFromMemory( LogRouterData* self, TLogPeekRequest const& req, Bi ASSERT( !messages.getLength() ); auto& deque = get_version_messages(self, req.tag); - //TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", printable(req.tag1)).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size()); + //TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", req.tag1).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size()); auto it = std::lower_bound(deque.begin(), deque.end(), std::make_pair(req.begin, LengthPrefixedStringRef()), CompareFirst>()); diff --git a/fdbserver/MasterProxyServer.actor.cpp b/fdbserver/MasterProxyServer.actor.cpp index 06164c675f..8708f26691 100644 --- a/fdbserver/MasterProxyServer.actor.cpp +++ b/fdbserver/MasterProxyServer.actor.cpp @@ -842,8 +842,8 @@ ACTOR Future commitBatch( // if (debugMutation("BackupProxyCommit", commitVersion, backupMutation)) { // TraceEvent("BackupProxyCommitTo", self->dbgid).detail("To", describe(tags)).detail("BackupMutation", backupMutation.toString()) -// .detail("BackupMutationSize", val.size()).detail("Version", commitVersion).detail("DestPath", printable(logRangeMutation.first)) -// .detail("PartIndex", part).detail("PartIndexEndian", bigEndian32(part)).detail("PartData", printable(backupMutation.param1)); +// .detail("BackupMutationSize", val.size()).detail("Version", commitVersion).detail("DestPath", logRangeMutation.first) +// .detail("PartIndex", part).detail("PartIndexEndian", bigEndian32(part)).detail("PartData", backupMutation.param1); // } } } diff --git a/fdbserver/MoveKeys.actor.cpp b/fdbserver/MoveKeys.actor.cpp index 81313e1f7d..d8389b3f4a 100644 --- a/fdbserver/MoveKeys.actor.cpp +++ b/fdbserver/MoveKeys.actor.cpp @@ -283,8 +283,8 @@ ACTOR Future startMoveKeys( Database occ, KeyRange keys, vector serve currentKeys = KeyRangeRef(currentKeys.begin, endKey); /*TraceEvent("StartMoveKeysBatch", relocationIntervalId) - .detail("KeyBegin", printable(currentKeys.begin).c_str()) - .detail("KeyEnd", printable(currentKeys.end).c_str());*/ + .detail("KeyBegin", currentKeys.begin.c_str()) + .detail("KeyEnd", currentKeys.end.c_str());*/ //printf("Moving '%s'-'%s' (%d) to %d servers\n", keys.begin.toString().c_str(), keys.end.toString().c_str(), old.size(), servers.size()); //for(int i=0; i startMoveKeys( Database occ, KeyRange keys, vector serve decodeKeyServersValue( old[i].value, src, dest ); /*TraceEvent("StartMoveKeysOldRange", relocationIntervalId) - .detail("KeyBegin", printable(rangeIntersectKeys.begin).c_str()) - .detail("KeyEnd", printable(rangeIntersectKeys.end).c_str()) + .detail("KeyBegin", rangeIntersectKeys.begin.c_str()) + .detail("KeyEnd", rangeIntersectKeys.end.c_str()) .detail("OldSrc", describe(src)) .detail("OldDest", describe(dest)) .detail("ReadVersion", tr.getReadVersion().get());*/ @@ -366,8 +366,8 @@ ACTOR Future startMoveKeys( Database occ, KeyRange keys, vector serve if(retries%10 == 0) { TraceEvent(retries == 50 ? SevWarnAlways : SevWarn, "StartMoveKeysRetrying", relocationIntervalId) .error(err) - .detail("Keys", printable(keys)) - .detail("BeginKey", printable(begin)) + .detail("Keys", keys) + .detail("BeginKey", begin) .detail("NumTries", retries); } } @@ -468,7 +468,7 @@ ACTOR Future finishMoveKeys( Database occ, KeyRange keys, vector dest ASSERT (!destinationTeam.empty()); try { - TraceEvent(SevDebug, interval.begin(), relocationIntervalId).detail("KeyBegin", printable(keys.begin)).detail("KeyEnd", printable(keys.end)); + TraceEvent(SevDebug, interval.begin(), relocationIntervalId).detail("KeyBegin", keys.begin).detail("KeyEnd", keys.end); //This process can be split up into multiple transactions if there are too many existing overlapping shards //In that case, each iteration of this loop will have begin set to the end of the last processed shard @@ -538,13 +538,13 @@ ACTOR Future finishMoveKeys( Database occ, KeyRange keys, vector dest alreadyMoved = destSet.empty() && srcSet == intendedTeam; if(destSet != intendedTeam && !alreadyMoved) { TraceEvent(SevWarn, "MoveKeysDestTeamNotIntended", relocationIntervalId) - .detail("KeyBegin", printable(keys.begin)) - .detail("KeyEnd", printable(keys.end)) - .detail("IterationBegin", printable(begin)) - .detail("IterationEnd", printable(endKey)) + .detail("KeyBegin", keys.begin) + .detail("KeyEnd", keys.end) + .detail("IterationBegin", begin) + .detail("IterationEnd", endKey) .detail("DestSet", describe(destSet)) .detail("IntendedTeam", describe(intendedTeam)) - .detail("KeyServers", printable(keyServers)); + .detail("KeyServers", keyServers); //ASSERT( false ); ASSERT(!dest.empty()); //The range has already been moved, but to a different dest (or maybe dest was cleared) @@ -589,18 +589,18 @@ ACTOR Future finishMoveKeys( Database occ, KeyRange keys, vector dest if (!dest.size()) { TEST(true); // A previous finishMoveKeys for this range committed just as it was cancelled to start this one? TraceEvent("FinishMoveKeysNothingToDo", relocationIntervalId) - .detail("KeyBegin", printable(keys.begin)) - .detail("KeyEnd", printable(keys.end)) - .detail("IterationBegin", printable(begin)) - .detail("IterationEnd", printable(endKey)); + .detail("KeyBegin", keys.begin) + .detail("KeyEnd", keys.end) + .detail("IterationBegin", begin) + .detail("IterationEnd", endKey); begin = keyServers.end()[-1].key; break; } waitInterval = TraceInterval("RelocateShard_FinishMoveKeysWaitDurable"); TraceEvent(SevDebug, waitInterval.begin(), relocationIntervalId) - .detail("KeyBegin", printable(keys.begin)) - .detail("KeyEnd", printable(keys.end)); + .detail("KeyBegin", keys.begin) + .detail("KeyEnd", keys.end); // Wait for a durable quorum of servers in destServers to have keys available (readWrite) // They must also have at least the transaction read version so they can't "forget" the shard between @@ -668,10 +668,10 @@ ACTOR Future finishMoveKeys( Database occ, KeyRange keys, vector dest if(retries%10 == 0) { TraceEvent(retries == 20 ? SevWarnAlways : SevWarn, "RelocateShard_FinishMoveKeysRetrying", relocationIntervalId) .error(err) - .detail("KeyBegin", printable(keys.begin)) - .detail("KeyEnd", printable(keys.end)) - .detail("IterationBegin", printable(begin)) - .detail("IterationEnd", printable(endKey)); + .detail("KeyBegin", keys.begin) + .detail("KeyEnd", keys.end) + .detail("IterationBegin", begin) + .detail("IterationEnd", endKey); } } } @@ -785,7 +785,7 @@ ACTOR Future canRemoveStorageServer( Transaction* tr, UID serverID ) { ASSERT(keys.size() >= 2); if(keys[0].value == keys[1].value && keys[1].key != allKeys.end) { - TraceEvent("ServerKeysCoalescingError", serverID).detail("Key1", printable(keys[0].key)).detail("Key2", printable(keys[1].key)).detail("Value", printable(keys[0].value)); + TraceEvent("ServerKeysCoalescingError", serverID).detail("Key1", keys[0].key).detail("Key2", keys[1].key).detail("Value", keys[0].value); ASSERT(false); } diff --git a/fdbserver/OldTLogServer_4_6.actor.cpp b/fdbserver/OldTLogServer_4_6.actor.cpp index 21a9c326e9..8a918601b3 100644 --- a/fdbserver/OldTLogServer_4_6.actor.cpp +++ b/fdbserver/OldTLogServer_4_6.actor.cpp @@ -897,7 +897,7 @@ namespace oldTLog_4_6 { return Void(); } - //TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2)); + //TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2); // Wait until we have something to return that the caller doesn't already have if( logData->version.get() < req.begin ) { wait( logData->version.whenAtLeast( req.begin ) ); @@ -907,7 +907,7 @@ namespace oldTLog_4_6 { state Version endVersion = logData->version.get() + 1; //grab messages from disk - //TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2)); + //TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2); if( req.begin <= logData->persistentDataDurableVersion ) { // Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on // whether we get enough data from disk. @@ -921,7 +921,7 @@ namespace oldTLog_4_6 { persistTagMessagesKey(logData->logId, oldTag, req.begin), persistTagMessagesKey(logData->logId, oldTag, logData->persistentDataDurableVersion + 1)), SERVER_KNOBS->DESIRED_TOTAL_BYTES, SERVER_KNOBS->DESIRED_TOTAL_BYTES)); - //TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().getPrimaryAddress()).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? printable(kv1[0].key) : "").detail("Tag2ResultsLast", kv2.size() ? printable(kv2[0].key) : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence()); + //TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().getPrimaryAddress()).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? kv1[0].key : "").detail("Tag2ResultsLast", kv2.size() ? kv2[0].key : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence()); for (auto &kv : kvs) { auto ver = decodeTagMessagesKey(kv.key); @@ -1244,7 +1244,7 @@ namespace oldTLog_4_6 { wait( waitForAll( (vector>>>(), fVers, fRecoverCounts) ) ); if (fFormat.get().present() && !persistFormatReadableRange.contains( fFormat.get().get() )) { - TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", printable(fFormat.get().get())).detail("Expected", persistFormat.value.toString()); + TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", fFormat.get().get()).detail("Expected", persistFormat.value.toString()); throw worker_recovery_failed(); } @@ -1255,7 +1255,7 @@ namespace oldTLog_4_6 { throw worker_removed(); } else { // This should never happen - TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", printable(v[0].key)); + TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", v[0].key); ASSERT( false ); throw worker_recovery_failed(); } diff --git a/fdbserver/OldTLogServer_6_0.actor.cpp b/fdbserver/OldTLogServer_6_0.actor.cpp index 6ac0f68c4a..5a0cc3ed37 100644 --- a/fdbserver/OldTLogServer_6_0.actor.cpp +++ b/fdbserver/OldTLogServer_6_0.actor.cpp @@ -912,7 +912,7 @@ void peekMessagesFromMemory( Reference self, TLogPeekRequest const& req ASSERT( !messages.getLength() ); auto& deque = getVersionMessages(self, req.tag); - //TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", printable(req.tag1)).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size()); + //TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", req.tag1).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size()); Version begin = std::max( req.begin, self->persistentDataDurableVersion+1 ); auto it = std::lower_bound(deque.begin(), deque.end(), std::make_pair(begin, LengthPrefixedStringRef()), CompareFirst>()); @@ -979,7 +979,7 @@ ACTOR Future tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere return Void(); } - //TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2)); + //TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2); // Wait until we have something to return that the caller doesn't already have if( logData->version.get() < req.begin ) { wait( logData->version.whenAtLeast( req.begin ) ); @@ -1027,7 +1027,7 @@ ACTOR Future tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere state Version endVersion = logData->version.get() + 1; //grab messages from disk - //TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2)); + //TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2); if( req.begin <= logData->persistentDataDurableVersion ) { // Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on // whether we get enough data from disk. @@ -1041,7 +1041,7 @@ ACTOR Future tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere persistTagMessagesKey(logData->logId, req.tag, req.begin), persistTagMessagesKey(logData->logId, req.tag, logData->persistentDataDurableVersion + 1)), SERVER_KNOBS->DESIRED_TOTAL_BYTES, SERVER_KNOBS->DESIRED_TOTAL_BYTES)); - //TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().address).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? printable(kv1[0].key) : "").detail("Tag2ResultsLast", kv2.size() ? printable(kv2[0].key) : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence()); + //TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().address).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? kv1[0].key : "").detail("Tag2ResultsLast", kv2.size() ? kv2[0].key : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence()); for (auto &kv : kvs) { auto ver = decodeTagMessagesKey(kv.key); @@ -1714,7 +1714,7 @@ ACTOR Future restorePersistentState( TLogData* self, LocalityData locality flushAndExit(0); } - TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", printable(fFormat.get().get())).detail("Expected", persistFormat.value.toString()); + TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", fFormat.get().get()).detail("Expected", persistFormat.value.toString()); throw worker_recovery_failed(); } @@ -1725,7 +1725,7 @@ ACTOR Future restorePersistentState( TLogData* self, LocalityData locality throw worker_removed(); } else { // This should never happen - TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", printable(v[0].key)); + TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", v[0].key); ASSERT( false ); throw worker_recovery_failed(); } diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index 5edb7c856f..d30afa7b09 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -1141,7 +1141,7 @@ void setupSimulatedSystem(vector>* systemActors, std::string baseFo ASSERT(g_simulator.storagePolicy && g_simulator.tLogPolicy); ASSERT(!g_simulator.hasSatelliteReplication || g_simulator.satelliteTLogPolicy); - TraceEvent("SimulatorConfig").detail("ConfigString", printable(StringRef(startingConfigString))); + TraceEvent("SimulatorConfig").detail("ConfigString", StringRef(startingConfigString)); const int dataCenters = simconfig.datacenters; const int machineCount = simconfig.machine_count; @@ -1235,7 +1235,7 @@ void setupSimulatedSystem(vector>* systemActors, std::string baseFo *pConnString = conn; - TraceEvent("SimulatedConnectionString").detail("String", conn.toString()).detail("ConfigString", printable(StringRef(startingConfigString))); + TraceEvent("SimulatedConnectionString").detail("String", conn.toString()).detail("ConfigString", StringRef(startingConfigString)); int assignedMachines = 0, nonVersatileMachines = 0; for( int dc = 0; dc < dataCenters; dc++ ) { diff --git a/fdbserver/Status.actor.cpp b/fdbserver/Status.actor.cpp index 15047ea24d..ab24a824ee 100644 --- a/fdbserver/Status.actor.cpp +++ b/fdbserver/Status.actor.cpp @@ -1751,7 +1751,7 @@ ACTOR Future layerStatusFetcher(Database cx, JsonBuilderArray json.absorb(doc.get_obj()); wait(yield()); } catch(Error &e) { - TraceEvent(SevWarn, "LayerStatusBadJSON").detail("Key", printable(docs[j].key)); + TraceEvent(SevWarn, "LayerStatusBadJSON").detail("Key", docs[j].key); } } } diff --git a/fdbserver/StorageMetrics.actor.h b/fdbserver/StorageMetrics.actor.h index eaf475ee44..742d33fdc1 100644 --- a/fdbserver/StorageMetrics.actor.h +++ b/fdbserver/StorageMetrics.actor.h @@ -73,7 +73,7 @@ struct StorageMetricSample { } // If we didn't return above, we didn't find anything. - TraceEvent(SevWarn, "CannotSplitLastSampleKey").detail("Range", printable(range)).detail("Offset", offset); + TraceEvent(SevWarn, "CannotSplitLastSampleKey").detail("Range", range).detail("Offset", offset); return front ? range.end : range.begin; } }; @@ -307,7 +307,7 @@ struct StorageServerMetrics { StorageMetrics estimated = req.estimated; StorageMetrics remaining = getMetrics( req.keys ) + used; - //TraceEvent("SplitMetrics").detail("Begin", printable(req.keys.begin)).detail("End", printable(req.keys.end)).detail("Remaining", remaining.bytes).detail("Used", used.bytes); + //TraceEvent("SplitMetrics").detail("Begin", req.keys.begin).detail("End", req.keys.end).detail("Remaining", remaining.bytes).detail("Used", used.bytes); while( true ) { if( remaining.bytes < 2*SERVER_KNOBS->MIN_SHARD_BYTES ) diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index 81d542a11f..16fe6ae4b8 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -1157,7 +1157,7 @@ void peekMessagesFromMemory( Reference self, TLogPeekRequest const& req ASSERT( !messages.getLength() ); auto& deque = getVersionMessages(self, req.tag); - //TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", printable(req.tag1)).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size()); + //TraceEvent("TLogPeekMem", self->dbgid).detail("Tag", req.tag1).detail("PDS", self->persistentDataSequence).detail("PDDS", self->persistentDataDurableSequence).detail("Oldest", map1.empty() ? 0 : map1.begin()->key ).detail("OldestMsgCount", map1.empty() ? 0 : map1.begin()->value.size()); Version begin = std::max( req.begin, self->persistentDataDurableVersion+1 ); auto it = std::lower_bound(deque.begin(), deque.end(), std::make_pair(begin, LengthPrefixedStringRef()), CompareFirst>()); @@ -1260,7 +1260,7 @@ ACTOR Future tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere return Void(); } - //TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2)); + //TraceEvent("TLogPeekMessages0", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2); // Wait until we have something to return that the caller doesn't already have if( logData->version.get() < req.begin ) { wait( logData->version.whenAtLeast( req.begin ) ); @@ -1308,7 +1308,7 @@ ACTOR Future tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere state Version endVersion = logData->version.get() + 1; //grab messages from disk - //TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", printable(req.tag1)).detail("Tag2", printable(req.tag2)); + //TraceEvent("TLogPeekMessages", self->dbgid).detail("ReqBeginEpoch", req.begin.epoch).detail("ReqBeginSeq", req.begin.sequence).detail("Epoch", self->epoch()).detail("PersistentDataSeq", self->persistentDataSequence).detail("Tag1", req.tag1).detail("Tag2", req.tag2); if( req.begin <= logData->persistentDataDurableVersion ) { // Just in case the durable version changes while we are waiting for the read, we grab this data from memory. We may or may not actually send it depending on // whether we get enough data from disk. @@ -1345,7 +1345,7 @@ ACTOR Future tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere persistTagMessageRefsKey(logData->logId, req.tag, req.begin), persistTagMessageRefsKey(logData->logId, req.tag, logData->persistentDataDurableVersion + 1)))); - //TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().getPrimaryAddress()).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? printable(kv1[0].key) : "").detail("Tag2ResultsLast", kv2.size() ? printable(kv2[0].key) : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence()); + //TraceEvent("TLogPeekResults", self->dbgid).detail("ForAddress", req.reply.getEndpoint().getPrimaryAddress()).detail("Tag1Results", s1).detail("Tag2Results", s2).detail("Tag1ResultsLim", kv1.size()).detail("Tag2ResultsLim", kv2.size()).detail("Tag1ResultsLast", kv1.size() ? kv1[0].key : "").detail("Tag2ResultsLast", kv2.size() ? kv2[0].key : "").detail("Limited", limited).detail("NextEpoch", next_pos.epoch).detail("NextSeq", next_pos.sequence).detail("NowEpoch", self->epoch()).detail("NowSeq", self->sequence.getNextSequence()); state std::vector> commitLocations; state bool earlyEnd = false; @@ -2087,7 +2087,7 @@ ACTOR Future restorePersistentState( TLogData* self, LocalityData locality flushAndExit(0); } - TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", printable(fFormat.get().get())).detail("Expected", persistFormat.value.toString()); + TraceEvent(SevError, "UnsupportedDBFormat", self->dbgid).detail("Format", fFormat.get().get()).detail("Expected", persistFormat.value.toString()); throw worker_recovery_failed(); } @@ -2098,7 +2098,7 @@ ACTOR Future restorePersistentState( TLogData* self, LocalityData locality throw worker_removed(); } else { // This should never happen - TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", printable(v[0].key)); + TraceEvent(SevError, "NoDBFormatKey", self->dbgid).detail("FirstKey", v[0].key); ASSERT( false ); throw worker_recovery_failed(); } diff --git a/fdbserver/fdbserver.actor.cpp b/fdbserver/fdbserver.actor.cpp index 421f793cab..247f4441ad 100644 --- a/fdbserver/fdbserver.actor.cpp +++ b/fdbserver/fdbserver.actor.cpp @@ -203,9 +203,9 @@ StringRef debugKey2 = LiteralStringRef( "\xff\xff\xff\xff" ); bool debugMutation( const char* context, Version version, MutationRef const& mutation ) { if ((mutation.type == mutation.SetValue || mutation.type == mutation.AddValue || mutation.type==mutation.DebugKey) && (mutation.param1 == debugKey || mutation.param1 == debugKey2)) - ;//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("MutationType", "SetValue").detail("Key", printable(mutation.param1)).detail("Value", printable(mutation.param2)); + ;//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("MutationType", "SetValue").detail("Key", mutation.param1).detail("Value", mutation.param2); else if ((mutation.type == mutation.ClearRange || mutation.type == mutation.DebugKeyRange) && ((mutation.param1<=debugKey && mutation.param2>debugKey) || (mutation.param1<=debugKey2 && mutation.param2>debugKey2))) - ;//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("MutationType", "ClearRange").detail("KeyBegin", printable(mutation.param1)).detail("KeyEnd", printable(mutation.param2)); + ;//TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("MutationType", "ClearRange").detail("KeyBegin", mutation.param1).detail("KeyEnd", mutation.param2); else return false; const char* type = @@ -223,7 +223,7 @@ bool debugMutation( const char* context, Version version, MutationRef const& mut bool debugKeyRange( const char* context, Version version, KeyRangeRef const& keys ) { if (keys.contains(debugKey) || keys.contains(debugKey2)) { debugMutation(context, version, MutationRef(MutationRef::DebugKeyRange, keys.begin, keys.end) ); - //TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("KeyBegin", printable(keys.begin)).detail("KeyEnd", printable(keys.end)); + //TraceEvent("MutationTracking").detail("At", context).detail("Version", version).detail("KeyBegin", keys.begin).detail("KeyEnd", keys.end); return true; } else return false; diff --git a/fdbserver/masterserver.actor.cpp b/fdbserver/masterserver.actor.cpp index b628d2b313..191ddced1e 100644 --- a/fdbserver/masterserver.actor.cpp +++ b/fdbserver/masterserver.actor.cpp @@ -308,7 +308,7 @@ ACTOR Future newTLogServers( Reference self, RecruitFromConfig tr.set(tr.arena(), tagLocalityListKeyFor(recr.dcId), tagLocalityListValue(loc)); initialConfChanges->push_back(tr); self->dcId_locality[recr.dcId] = loc; - TraceEvent(SevWarn, "UnknownPrimaryDCID", self->dbgid).detail("PrimaryId", printable(recr.dcId)).detail("Loc", loc); + TraceEvent(SevWarn, "UnknownPrimaryDCID", self->dbgid).detail("PrimaryId", recr.dcId).detail("Loc", loc); } if( !self->dcId_locality.count(remoteDcId) ) { @@ -317,7 +317,7 @@ ACTOR Future newTLogServers( Reference self, RecruitFromConfig tr.set(tr.arena(), tagLocalityListKeyFor(remoteDcId), tagLocalityListValue(loc)); initialConfChanges->push_back(tr); self->dcId_locality[remoteDcId] = loc; - TraceEvent(SevWarn, "UnknownRemoteDCID", self->dbgid).detail("RemoteId", printable(remoteDcId)).detail("Loc", loc); + TraceEvent(SevWarn, "UnknownRemoteDCID", self->dbgid).detail("RemoteId", remoteDcId).detail("Loc", loc); } std::vector exclusionWorkerIds; @@ -522,7 +522,7 @@ ACTOR Future> provisionalMaster( Referenceread_conflict_ranges.begin(), t->read_conflict_ranges.end(), [](KeyRangeRef const& r){return r.contains(lastEpochEndKey);})) { for(auto m = t->mutations.begin(); m != t->mutations.end(); ++m) { - TraceEvent("PM_CTM", parent->dbgid).detail("MType", m->type).detail("Param1", printable(m->param1)).detail("Param2", printable(m->param2)); + TraceEvent("PM_CTM", parent->dbgid).detail("MType", m->type).detail("Param1", m->param1).detail("Param2", m->param2); if (isMetadataMutation(*m)) { // We keep the mutations and write conflict ranges from this transaction, but not its read conflict ranges Standalone out; @@ -613,7 +613,7 @@ ACTOR Future updateLocalityForDcId(Optional dcId, ReferencegetKnownCommittedVersion(); } locality->set( PeekSpecialInfo(loc.first,loc.second,ver) ); - TraceEvent("UpdatedLocalityForDcId").detail("DcId", printable(dcId)).detail("Locality0", loc.first).detail("Locality1", loc.second).detail("Version", ver); + TraceEvent("UpdatedLocalityForDcId").detail("DcId", dcId).detail("Locality0", loc.first).detail("Locality1", loc.second).detail("Version", ver); wait( oldLogSystem->onLogSystemConfigChange() || oldLogSystem->onKnownCommittedVersionChange() ); } } @@ -702,7 +702,7 @@ ACTOR Future readTransactionSystemState( Reference self, Refer //auto kvs = self->txnStateStore->readRange( systemKeys ); //for( auto & kv : kvs.get() ) - // TraceEvent("MasterRecoveredTXS", self->dbgid).detail("K", printable(kv.key)).detail("V", printable(kv.value)); + // TraceEvent("MasterRecoveredTXS", self->dbgid).detail("K", kv.key).detail("V", kv.value); self->txnStateLogAdapter->setNextVersion( oldLogSystem->getEnd() ); //< FIXME: (1) the log adapter should do this automatically after recovery; (2) if we make KeyValueStoreMemory guarantee immediate reads, we should be able to get rid of the discardCommit() below and not need a writable log adapter @@ -849,7 +849,7 @@ ACTOR Future recoverFrom( Reference self, Referencedbgid); for (auto m = req.mutations.begin(); m != req.mutations.end(); ++m) - TraceEvent("EmergencyTransactionMutation", self->dbgid).detail("MType", m->type).detail("P1", printable(m->param1)).detail("P2", printable(m->param2)); + TraceEvent("EmergencyTransactionMutation", self->dbgid).detail("MType", m->type).detail("P1", m->param1).detail("P2", m->param2); DatabaseConfiguration oldConf = self->configuration; self->configuration = self->originalConfiguration; @@ -1050,7 +1050,7 @@ ACTOR Future resolutionBalancing(Reference self) { ResolutionSplitReply split = wait( brokenPromiseToNever(self->resolvers[metrics.lastItem()->second].split.getReply(req, TaskResolutionMetrics)) ); KeyRangeRef moveRange = range.second ? KeyRangeRef( range.first.begin, split.key ) : KeyRangeRef( split.key, range.first.end ); movedRanges.push_back_deep(movedRanges.arena(), ResolverMoveRef(moveRange, dest)); - TraceEvent("MovingResolutionRange").detail("Src", src).detail("Dest", dest).detail("Amount", amount).detail("StartRange", printable(range.first)).detail("MoveRange", printable(moveRange)).detail("Used", split.used).detail("KeyResolverRanges", key_resolver.size()); + TraceEvent("MovingResolutionRange").detail("Src", src).detail("Dest", dest).detail("Amount", amount).detail("StartRange", range.first).detail("MoveRange", moveRange).detail("Used", split.used).detail("KeyResolverRanges", key_resolver.size()); amount -= split.used; if(moveRange != range.first || amount <= 0 ) break; @@ -1058,7 +1058,7 @@ ACTOR Future resolutionBalancing(Reference self) { for(auto& it : movedRanges) key_resolver.insert(it.range, it.dest); //for(auto& it : key_resolver.ranges()) - // TraceEvent("KeyResolver").detail("Range", printable(it.range())).detail("Value", it.value()); + // TraceEvent("KeyResolver").detail("Range", it.range()).detail("Value", it.value()); self->resolverChangesVersion = self->version + 1; for (auto& p : self->proxies) @@ -1284,7 +1284,7 @@ ACTOR Future masterCore( Reference self ) { .detail("StatusCode", RecoveryStatus::recovery_transaction) .detail("Status", RecoveryStatus::names[RecoveryStatus::recovery_transaction]) .detail("PrimaryLocality", self->primaryLocality) - .detail("DcId", printable(self->myInterface.locality.dcId())) + .detail("DcId", self->myInterface.locality.dcId()) .trackLatest("MasterRecoveryState"); // Recovery transaction diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index cf340e1dad..9ae6f239c2 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -535,7 +535,7 @@ public: void addShard( ShardInfo* newShard ) { ASSERT( !newShard->keys.empty() ); newShard->changeCounter = ++shardChangeCounter; - //TraceEvent("AddShard", this->thisServerID).detail("KeyBegin", printable(newShard->keys.begin)).detail("KeyEnd", printable(newShard->keys.end)).detail("State", newShard->isReadable() ? "Readable" : newShard->notAssigned() ? "NotAssigned" : "Adding").detail("Version", this->version.get()); + //TraceEvent("AddShard", this->thisServerID).detail("KeyBegin", newShard->keys.begin).detail("KeyEnd", newShard->keys.end).detail("State", newShard->isReadable() ? "Readable" : newShard->notAssigned() ? "NotAssigned" : "Adding").detail("Version", this->version.get()); /*auto affected = shards.getAffectedRangesAfterInsertion( newShard->keys, Reference() ); for(auto i = affected.begin(); i != affected.end(); ++i) shards.insert( *i, Reference() );*/ @@ -619,7 +619,7 @@ bool validateRange( StorageServer::VersionedData::ViewAtVersion const& view, Key // * Nonoverlapping: No clear overlaps a set or another clear, or adjoins another clear. // * Old mutations are erased: All items in versionedData.atLatest() have insertVersion() > durableVersion() - TraceEvent("ValidateRange", id).detail("KeyBegin", printable(range.begin)).detail("KeyEnd", printable(range.end)).detail("Version", version); + TraceEvent("ValidateRange", id).detail("KeyBegin", range.begin).detail("KeyEnd", range.end).detail("Version", version); KeyRef k; bool ok = true; bool kIsClear = false; @@ -628,7 +628,7 @@ bool validateRange( StorageServer::VersionedData::ViewAtVersion const& view, Key for(; i != view.end() && i.key() < range.end; ++i) { ASSERT( i.insertVersion() > minInsertVersion ); if (kIsClear && i->isClearTo() ? i.key() <= k : i.key() < k) { - TraceEvent(SevError,"InvalidRange",id).detail("Key1", printable(k)).detail("Key2", printable(i.key())).detail("Version", version); + TraceEvent(SevError,"InvalidRange",id).detail("Key1", k).detail("Key2", i.key()).detail("Version", version); ok = false; } //ASSERT( i.key() >= k ); @@ -668,8 +668,8 @@ void validate(StorageServer* data, bool force = false) { ShardInfo* shard = s->value().getPtr(); if (!shard->isInVersionedData()) { if (latest.lower_bound(s->begin()) != latest.lower_bound(s->end())) { - TraceEvent(SevError, "VF", data->thisServerID).detail("LastValidTime", data->debug_lastValidateTime).detail("KeyBegin", printable(s->begin())).detail("KeyEnd", printable(s->end())) - .detail("FirstKey", printable(latest.lower_bound(s->begin()).key())).detail("FirstInsertV", latest.lower_bound(s->begin()).insertVersion()); + TraceEvent(SevError, "VF", data->thisServerID).detail("LastValidTime", data->debug_lastValidateTime).detail("KeyBegin", s->begin()).detail("KeyEnd", s->end()) + .detail("FirstKey", latest.lower_bound(s->begin()).key()).detail("FirstInsertV", latest.lower_bound(s->begin()).insertVersion()); } ASSERT( latest.lower_bound(s->begin()) == latest.lower_bound(s->end()) ); } @@ -784,7 +784,7 @@ ACTOR Future getValueQ( StorageServer* data, GetValueRequest req ) { state uint64_t changeCounter = data->shardChangeCounter; if (!data->shards[req.key]->isReadable()) { - //TraceEvent("WrongShardServer", data->thisServerID).detail("Key", printable(req.key)).detail("Version", version).detail("In", "getValueQ"); + //TraceEvent("WrongShardServer", data->thisServerID).detail("Key", req.key).detail("Version", version).detail("In", "getValueQ"); throw wrong_shard_server(); } @@ -861,7 +861,7 @@ ACTOR Future watchValue_impl( StorageServer* data, WatchValueRequest req ) GetValueRequest getReq( req.key, latest, req.debugID ); state Future getValue = getValueQ( data, getReq ); //we are relying on the delay zero at the top of getValueQ, if removed we need one here GetValueReply reply = wait( getReq.reply.getFuture() ); - //TraceEvent("WatcherCheckValue").detail("Key", printable( req.key ) ).detail("Value", printable( req.value ) ).detail("CurrentValue", printable( v ) ).detail("Ver", latest); + //TraceEvent("WatcherCheckValue").detail("Key", req.key ).detail("Value", req.value ).detail("CurrentValue", v ).detail("Ver", latest); debugMutation("ShardWatchValue", latest, MutationRef(MutationRef::DebugKey, req.key, reply.value.present() ? StringRef( reply.value.get() ) : LiteralStringRef("") ) ); @@ -1091,9 +1091,9 @@ ACTOR Future readRange( StorageServer* data, Version version, if (more) { // if there might be more data, begin reading right after what we already found to find out //if (track) printf("more\n"); if (!(limit<=0 || *pLimitBytes<=0 || result.data.end()[-1].key == atStorageVersion.end()[-1].key)) - TraceEvent(SevError, "ReadRangeIssue", data->thisServerID).detail("ReadBegin", printable(readBegin)).detail("ReadEnd", printable(readEnd)) - .detail("VStart", vStart ? printable(vStart.key()) : "nil").detail("VEnd", vEnd ? printable(vEnd.key()) : "nil") - .detail("AtStorageVersionBack", printable(atStorageVersion.end()[-1].key)).detail("ResultBack", printable(result.data.end()[-1].key)) + TraceEvent(SevError, "ReadRangeIssue", data->thisServerID).detail("ReadBegin", readBegin).detail("ReadEnd", readEnd) + .detail("VStart", vStart ? vStart.key() : LiteralStringRef("nil")).detail("VEnd", vEnd ? vEnd.key() : LiteralStringRef("nil")) + .detail("AtStorageVersionBack", atStorageVersion.end()[-1].key).detail("ResultBack", result.data.end()[-1].key) .detail("Limit", limit).detail("LimitBytes", *pLimitBytes).detail("ResultSize", result.data.size()).detail("PrevSize", prevSize); readBegin = readBeginTemp = keyAfter( result.data.end()[-1].key ); ASSERT( limit<=0 || *pLimitBytes<=0 || result.data.end()[-1].key == atStorageVersion.end()[-1].key ); @@ -1111,8 +1111,8 @@ ACTOR Future readRange( StorageServer* data, Version version, /*if (*pLimitBytes <= 0) TraceEvent(SevWarn, "ReadRangeLimitExceeded") .detail("Version", version) - .detail("Begin", printable(range.begin) ) - .detail("End", printable(range.end) ) + .detail("Begin", range.begin ) + .detail("End", range.end ) .detail("LimitReamin", limit) .detail("LimitBytesRemain", *pLimitBytes); */ @@ -1135,9 +1135,9 @@ ACTOR Future readRange( StorageServer* data, Version version, if ( !(totalsize>originalLimitBytes ? prefix_equal : result.data==correct.data) || correct.more != result.more ) { TraceEvent(SevError, "IncorrectResult", rrid).detail("Server", data->thisServerID).detail("CorrectRows", correct.data.size()) .detail("FirstDifference", first_difference).detail("OriginalLimit", originalLimit) - .detail("ResultRows", result.data.size()).detail("Result0", printable(result.data[0].key)).detail("Correct0", printable(correct.data[0].key)) - .detail("ResultN", result.data.size() ? printable(result.data[std::min(correct.data.size(),result.data.size())-1].key) : "nil") - .detail("CorrectN", correct.data.size() ? printable(correct.data[std::min(correct.data.size(),result.data.size())-1].key) : "nil"); + .detail("ResultRows", result.data.size()).detail("Result0", result.data[0].key).detail("Correct0", correct.data[0].key) + .detail("ResultN", result.data.size() ? result.data[std::min(correct.data.size(),result.data.size())-1].key : "nil") + .detail("CorrectN", correct.data.size() ? correct.data[std::min(correct.data.size(),result.data.size())-1].key : "nil"); }*/ } else { // Reverse read - abandon hope alle ye who enter here @@ -1297,11 +1297,11 @@ ACTOR Future getKeyValues( StorageServer* data, GetKeyValuesRequest req ) if( req.debugID.present() ) g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.AfterVersion"); - //.detail("ShardBegin", printable(shard.begin)).detail("ShardEnd", printable(shard.end)); + //.detail("ShardBegin", shard.begin).detail("ShardEnd", shard.end); //} catch (Error& e) { TraceEvent("WrongShardServer", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("Shard", "None").detail("In", "getKeyValues>getShardKeyRange"); throw e; } if ( !selectorInRange(req.end, shard) && !(req.end.isFirstGreaterOrEqual() && req.end.getKey() == shard.end) ) { -// TraceEvent("WrongShardServer1", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("ShardBegin", printable(shard.begin)).detail("ShardEnd", printable(shard.end)).detail("In", "getKeyValues>checkShardExtents"); +// TraceEvent("WrongShardServer1", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("ShardBegin", shard.begin).detail("ShardEnd", shard.end).detail("In", "getKeyValues>checkShardExtents"); throw wrong_shard_server(); } @@ -1313,7 +1313,7 @@ ACTOR Future getKeyValues( StorageServer* data, GetKeyValuesRequest req ) state Key end = wait(fEnd); if( req.debugID.present() ) g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.AfterKeys"); - //.detail("Off1",offset1).detail("Off2",offset2).detail("ReqBegin",printable(req.begin.getKey())).detail("ReqEnd",printable(req.end.getKey())); + //.detail("Off1",offset1).detail("Off2",offset2).detail("ReqBegin",req.begin.getKey()).detail("ReqEnd",req.end.getKey()); // Offsets of zero indicate begin/end keys in this shard, which obviously means we can answer the query // An end offset of 1 is also OK because the end key is exclusive, so if the first key of the next shard is the end the last actual key returned must be from this shard. @@ -1323,14 +1323,14 @@ ACTOR Future getKeyValues( StorageServer* data, GetKeyValuesRequest req ) // We could detect when offset1 takes us off the beginning of the database or offset2 takes us off the end, and return a clipped range rather // than an error (since that is what the NativeAPI.getRange will do anyway via its "slow path"), but we would have to add some flags to the response // to encode whether we went off the beginning and the end, since it needs that information. - //TraceEvent("WrongShardServer2", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("ShardBegin", printable(shard.begin)).detail("ShardEnd", printable(shard.end)).detail("In", "getKeyValues>checkOffsets").detail("BeginKey", printable(begin)).detail("EndKey", printable(end)).detail("BeginOffset", offset1).detail("EndOffset", offset2); + //TraceEvent("WrongShardServer2", data->thisServerID).detail("Begin", req.begin.toString()).detail("End", req.end.toString()).detail("Version", version).detail("ShardBegin", shard.begin).detail("ShardEnd", shard.end).detail("In", "getKeyValues>checkOffsets").detail("BeginKey", begin).detail("EndKey", end).detail("BeginOffset", offset1).detail("EndOffset", offset2); throw wrong_shard_server(); } if (begin >= end) { if( req.debugID.present() ) g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.Send"); - //.detail("Begin",printable(begin)).detail("End",printable(end)); + //.detail("Begin",begin).detail("End",end); GetKeyValuesReply none; none.version = version; @@ -1347,7 +1347,7 @@ ACTOR Future getKeyValues( StorageServer* data, GetKeyValuesRequest req ) if( req.debugID.present() ) g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "storageserver.getKeyValues.AfterReadRange"); - //.detail("Begin",printable(begin)).detail("End",printable(end)).detail("SizeOf",r.data.size()); + //.detail("Begin",begin).detail("End",end).detail("SizeOf",r.data.size()); data->checkChangeCounter( changeCounter, KeyRangeRef( std::min(begin, std::min(req.begin.getKey(), req.end.getKey())), std::max(end, std::max(req.begin.getKey(), req.end.getKey())) ) ); if (EXPENSIVE_VALIDATION) { for (int i = 0; i < r.data.size(); i++) @@ -1873,8 +1873,8 @@ ACTOR Future fetchKeys( StorageServer *data, AddingShard* shard ) { debugKeyRange("fetchKeysBegin", data->version.get(), shard->keys); TraceEvent(SevDebug, interval.begin(), data->thisServerID) - .detail("KeyBegin", printable(shard->keys.begin)) - .detail("KeyEnd",printable(shard->keys.end)); + .detail("KeyBegin", shard->keys.begin) + .detail("KeyEnd",shard->keys.end); validate(data); @@ -1935,8 +1935,8 @@ ACTOR Future fetchKeys( StorageServer *data, AddingShard* shard ) { TraceEvent(SevDebug, "FetchKeysBlock", data->thisServerID).detail("FKID", interval.pairID) .detail("BlockRows", this_block.size()).detail("BlockBytes", expectedSize) - .detail("KeyBegin", printable(keys.begin)).detail("KeyEnd", printable(keys.end)) - .detail("Last", this_block.size() ? printable(this_block.end()[-1].key) : std::string()) + .detail("KeyBegin", keys.begin).detail("KeyEnd", keys.end) + .detail("Last", this_block.size() ? this_block.end()[-1].key : std::string()) .detail("Version", fetchVersion).detail("More", this_block.more); debugKeyRange("fetchRange", fetchVersion, keys); for(auto k = this_block.begin(); k != this_block.end(); ++k) debugMutation("fetch", fetchVersion, MutationRef(MutationRef::SetValue, k->key, k->value)); @@ -2124,8 +2124,8 @@ ACTOR Future fetchKeys( StorageServer *data, AddingShard* shard ) { TraceEvent(SevError, "FetchKeysError", data->thisServerID) .error(e) .detail("Elapsed", now()-startt) - .detail("KeyBegin", printable(keys.begin)) - .detail("KeyEnd",printable(keys.end)); + .detail("KeyBegin", keys.begin) + .detail("KeyEnd",keys.end); if (e.code() != error_code_actor_cancelled) data->otherError.sendError(e); // Kill the storage server. Are there any recoverable errors? throw; // goes nowhere @@ -2185,8 +2185,8 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss ASSERT( !keys.empty() ); //TraceEvent("ChangeServerKeys", data->thisServerID) - // .detail("KeyBegin", printable(keys.begin)) - // .detail("KeyEnd", printable(keys.end)) + // .detail("KeyBegin", keys.begin) + // .detail("KeyEnd", keys.end) // .detail("NowAssigned", nowAssigned) // .detail("Version", version) // .detail("Context", changeServerKeysContextName[(int)context]); @@ -2200,15 +2200,15 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss if( nowAssigned != it->value()->assigned() ) { isDifferent = true; /*TraceEvent("CSKRangeDifferent", data->thisServerID) - .detail("KeyBegin", printable(it->range().begin)) - .detail("KeyEnd", printable(it->range().end));*/ + .detail("KeyBegin", it->range().begin) + .detail("KeyEnd", it->range().end);*/ break; } } if( !isDifferent ) { //TraceEvent("CSKShortCircuit", data->thisServerID) - // .detail("KeyBegin", printable(keys.begin)) - // .detail("KeyEnd", printable(keys.end)); + // .detail("KeyBegin", keys.begin) + // .detail("KeyEnd", keys.end); return; } @@ -2245,8 +2245,8 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss KeyRangeRef range = keys & r->range(); bool dataAvailable = r->value()==latestVersion || r->value() >= version; /*TraceEvent("CSKRange", data->thisServerID) - .detail("KeyBegin", printable(range.begin)) - .detail("KeyEnd", printable(range.end)) + .detail("KeyBegin", range.begin) + .detail("KeyEnd", range.end) .detail("Available", dataAvailable) .detail("NowAssigned", nowAssigned) .detail("NewestAvailable", r->value()) @@ -2832,7 +2832,7 @@ void setAvailableStatus( StorageServer* self, KeyRangeRef keys, bool available ) auto& mLV = self->addVersionToMutationLog( self->data().getLatestVersion() ); KeyRange availableKeys = KeyRangeRef( persistShardAvailableKeys.begin.toString() + keys.begin.toString(), persistShardAvailableKeys.begin.toString() + keys.end.toString() ); - //TraceEvent("SetAvailableStatus", self->thisServerID).detail("Version", mLV.version).detail("RangeBegin", printable(availableKeys.begin)).detail("RangeEnd", printable(availableKeys.end)); + //TraceEvent("SetAvailableStatus", self->thisServerID).detail("Version", mLV.version).detail("RangeBegin", availableKeys.begin).detail("RangeEnd", availableKeys.end); self->addMutationToMutationLog( mLV, MutationRef( MutationRef::ClearRange, availableKeys.begin, availableKeys.end ) ); self->addMutationToMutationLog( mLV, MutationRef( MutationRef::SetValue, availableKeys.begin, available ? LiteralStringRef("1") : LiteralStringRef("0") ) ); @@ -2849,7 +2849,7 @@ void setAssignedStatus( StorageServer* self, KeyRangeRef keys, bool nowAssigned KeyRange assignedKeys = KeyRangeRef( persistShardAssignedKeys.begin.toString() + keys.begin.toString(), persistShardAssignedKeys.begin.toString() + keys.end.toString() ); - //TraceEvent("SetAssignedStatus", self->thisServerID).detail("Version", mLV.version).detail("RangeBegin", printable(assignedKeys.begin)).detail("RangeEnd", printable(assignedKeys.end)); + //TraceEvent("SetAssignedStatus", self->thisServerID).detail("Version", mLV.version).detail("RangeBegin", assignedKeys.begin).detail("RangeEnd", assignedKeys.end); self->addMutationToMutationLog( mLV, MutationRef( MutationRef::ClearRange, assignedKeys.begin, assignedKeys.end ) ); self->addMutationToMutationLog( mLV, MutationRef( MutationRef::SetValue, assignedKeys.begin, nowAssigned ? LiteralStringRef("1") : LiteralStringRef("0") ) ); @@ -2954,7 +2954,7 @@ ACTOR Future applyByteSampleResult( StorageServer* data, IKeyValueStore* s wait(delay(SERVER_KNOBS->BYTE_SAMPLE_LOAD_DELAY)); } } - TraceEvent("RecoveredByteSampleRange", data->thisServerID).detail("Begin", printable(begin)).detail("End", printable(end)).detail("Fetches", totalFetches).detail("Keys", totalKeys).detail("ReadBytes", totalBytes); + TraceEvent("RecoveredByteSampleRange", data->thisServerID).detail("Begin", begin).detail("End", end).detail("Fetches", totalFetches).detail("Keys", totalKeys).detail("ReadBytes", totalBytes); return Void(); } @@ -3051,7 +3051,7 @@ ACTOR Future restoreDurableState( StorageServer* data, IKeyValueStore* sto ASSERT( !keys.empty() ); bool nowAvailable = available[availableLoc].value!=LiteralStringRef("0"); /*if(nowAvailable) - TraceEvent("AvailableShard", data->thisServerID).detail("RangeBegin", printable(keys.begin)).detail("RangeEnd", printable(keys.end));*/ + TraceEvent("AvailableShard", data->thisServerID).detail("RangeBegin", keys.begin).detail("RangeEnd", keys.end);*/ data->newestAvailableVersion.insert( keys, nowAvailable ? latestVersion : invalidVersion ); wait(yield()); } @@ -3065,7 +3065,7 @@ ACTOR Future restoreDurableState( StorageServer* data, IKeyValueStore* sto ASSERT( !keys.empty() ); bool nowAssigned = assigned[assignedLoc].value!=LiteralStringRef("0"); /*if(nowAssigned) - TraceEvent("AssignedShard", data->thisServerID).detail("RangeBegin", printable(keys.begin)).detail("RangeEnd", printable(keys.end));*/ + TraceEvent("AssignedShard", data->thisServerID).detail("RangeBegin", keys.begin).detail("RangeEnd", keys.end);*/ changeServerKeys(data, keys, nowAssigned, version, CSK_RESTORE); if (!nowAssigned) ASSERT( data->newestAvailableVersion.allEqual(keys, invalidVersion) ); diff --git a/fdbserver/tester.actor.cpp b/fdbserver/tester.actor.cpp index 5e7fc2e226..d521c4dc36 100644 --- a/fdbserver/tester.actor.cpp +++ b/fdbserver/tester.actor.cpp @@ -151,7 +151,7 @@ int getOption( VectorRef options, Key key, int defaultValue) { options[i].value = LiteralStringRef(""); return r; } else { - TraceEvent(SevError, "InvalidTestOption").detail("OptionName", printable(key)); + TraceEvent(SevError, "InvalidTestOption").detail("OptionName", key); throw test_specification_invalid(); } } @@ -167,7 +167,7 @@ uint64_t getOption( VectorRef options, Key key, uint64_t defaultVal options[i].value = LiteralStringRef(""); return r; } else { - TraceEvent(SevError, "InvalidTestOption").detail("OptionName", printable(key)); + TraceEvent(SevError, "InvalidTestOption").detail("OptionName", key); throw test_specification_invalid(); } } @@ -183,7 +183,7 @@ int64_t getOption( VectorRef options, Key key, int64_t defaultValue options[i].value = LiteralStringRef(""); return r; } else { - TraceEvent(SevError, "InvalidTestOption").detail("OptionName", printable(key)); + TraceEvent(SevError, "InvalidTestOption").detail("OptionName", key); throw test_specification_invalid(); } } @@ -304,7 +304,7 @@ TestWorkload *getWorkloadIface( WorkloadRequest work, VectorRef opt auto unconsumedOptions = checkAllOptionsConsumed( workload ? workload->options : VectorRef() ); if( !workload || unconsumedOptions.size() ) { TraceEvent evt(SevError,"TestCreationError"); - evt.detail("TestName", printable(testName)); + evt.detail("TestName", testName); if( !workload ) { evt.detail("Reason", "Null workload"); fprintf(stderr, "ERROR: Workload could not be created, perhaps testName (%s) is not a valid workload\n", printable(testName).c_str()); @@ -509,7 +509,7 @@ ACTOR Future testerServerWorkload( WorkloadRequest work, Reference>> &futures, std::string er ACTOR Future runWorkload( Database cx, std::vector< TesterInterface > testers, TestSpec spec ) { - TraceEvent("TestRunning").detail( "WorkloadTitle", printable(spec.title) ) + TraceEvent("TestRunning").detail( "WorkloadTitle", spec.title ) .detail("TesterCount", testers.size()).detail("Phases", spec.phases) .detail("TestTimeout", spec.timeout); @@ -667,16 +667,16 @@ ACTOR Future runWorkload( Database cx, std::vector< Test if( spec.phases & TestWorkload::SETUP ) { state std::vector< Future> > setups; printf("setting up test (%s)...\n", printable(spec.title).c_str()); - TraceEvent("TestSetupStart").detail("WorkloadTitle", printable(spec.title)); + TraceEvent("TestSetupStart").detail("WorkloadTitle", spec.title); for(int i= 0; i < workloads.size(); i++) setups.push_back( workloads[i].setup.template getReplyUnlessFailedFor( waitForFailureTime, 0) ); wait( waitForAll( setups ) ); throwIfError(setups, "SetupFailedForWorkload" + printable(spec.title)); - TraceEvent("TestSetupComplete").detail("WorkloadTitle", printable(spec.title)); + TraceEvent("TestSetupComplete").detail("WorkloadTitle", spec.title); } if( spec.phases & TestWorkload::EXECUTION ) { - TraceEvent("TestStarting").detail("WorkloadTitle", printable(spec.title)); + TraceEvent("TestStarting").detail("WorkloadTitle", spec.title); printf("running test (%s)...\n", printable(spec.title).c_str()); state std::vector< Future> > starts; for(int i= 0; i < workloads.size(); i++) @@ -684,7 +684,7 @@ ACTOR Future runWorkload( Database cx, std::vector< Test wait( waitForAll( starts ) ); throwIfError(starts, "StartFailedForWorkload" + printable(spec.title)); printf("%s complete\n", printable(spec.title).c_str()); - TraceEvent("TestComplete").detail("WorkloadTitle", printable(spec.title)); + TraceEvent("TestComplete").detail("WorkloadTitle", spec.title); } if( spec.phases & TestWorkload::CHECK ) { @@ -711,7 +711,7 @@ ACTOR Future runWorkload( Database cx, std::vector< Test if( spec.phases & TestWorkload::METRICS ) { state std::vector< Future>> > metricTasks; printf("fetching metrics (%s)...\n", printable(spec.title).c_str()); - TraceEvent("TestFetchingMetrics").detail("WorkloadTitle", printable(spec.title)); + TraceEvent("TestFetchingMetrics").detail("WorkloadTitle", spec.title); for(int i= 0; i < workloads.size(); i++) metricTasks.push_back( workloads[i].metrics.template getReplyUnlessFailedFor>(waitForFailureTime, 0) ); wait( waitForAll( metricTasks ) ); @@ -834,7 +834,7 @@ ACTOR Future runTest( Database cx, std::vector< TesterInterface > testers, } TraceEvent(ok ? SevInfo : SevWarnAlways, "TestResults") - .detail("Workload", printable(spec.title)) + .detail("Workload", spec.title) .detail("Passed", (int)ok); //.detail("Metrics", metricSummary); @@ -888,7 +888,7 @@ vector readTests( ifstream& ifs ) { } spec.title = StringRef( value ); - TraceEvent("TestParserTest").detail("ParsedTest", printable( spec.title )); + TraceEvent("TestParserTest").detail("ParsedTest", spec.title ); } else if( attrib == "timeout" ) { sscanf( value.c_str(), "%d", &(spec.timeout) ); ASSERT( spec.timeout > 0 ); diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index ff0b2d51ca..70dbda4215 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -307,18 +307,18 @@ std::vector< DiskStore > getDiskStores( std::string folder, std::string suffix, // Use the option string that's in the file rather than tLogOptions.toPrefix(), // because they might be different if a new option was introduced in this version. StringRef optionsString = filename.removePrefix(fileVersionedLogDataPrefix).eat("-"); - TraceEvent("DiskStoreVersioned").detail("Filename", printable(filename)); + TraceEvent("DiskStoreVersioned").detail("Filename", filename); ErrorOr tLogOptions = TLogOptions::FromStringRef(optionsString); if (tLogOptions.isError()) { - TraceEvent(SevWarn, "DiskStoreMalformedFilename").detail("Filename", printable(filename)); + TraceEvent(SevWarn, "DiskStoreMalformedFilename").detail("Filename", filename); continue; } - TraceEvent("DiskStoreVersionedSuccess").detail("Filename", printable(filename)); + TraceEvent("DiskStoreVersionedSuccess").detail("Filename", filename); store.tLogOptions = tLogOptions.get(); prefix = filename.substr(0, fileVersionedLogDataPrefix.size() + optionsString.size() + 1); } else if( filename.startsWith( fileLogDataPrefix ) ) { - TraceEvent("DiskStoreUnversioned").detail("Filename", printable(filename)); + TraceEvent("DiskStoreUnversioned").detail("Filename", filename); store.storedComponent = DiskStore::TLogData; store.tLogOptions.version = TLogVersion::V2; store.tLogOptions.spillType = TLogSpillType::VALUE; From 39ecbedd74e30af145619ed702ee5c090a76c994 Mon Sep 17 00:00:00 2001 From: mpilman Date: Mon, 18 Mar 2019 20:27:34 -0700 Subject: [PATCH 12/24] Fixed compilation errors on OS X & gcc8 --- fdbclient/FDBTypes.h | 7 +++++++ fdbclient/NativeAPI.actor.cpp | 12 +++++++----- fdbclient/TaskBucket.actor.cpp | 2 +- fdbserver/ApplyMetadataMutation.h | 5 ++++- 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/fdbclient/FDBTypes.h b/fdbclient/FDBTypes.h index 04a3efff5f..a8b29df913 100644 --- a/fdbclient/FDBTypes.h +++ b/fdbclient/FDBTypes.h @@ -503,6 +503,13 @@ struct RangeResultRef : VectorRef { } }; +template<> +struct Traceable : std::true_type { + static std::string toString(const RangeResultRef& value) { + return Traceable>::toString(value); + } +}; + struct KeyValueStoreType { // These enumerated values are stored in the database configuration, so can NEVER be changed. Only add new ones just before END. enum StoreType { diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index ec09e35e4d..6a859ceff3 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -2484,19 +2484,21 @@ ACTOR void checkWrites( Database cx, Future committed, Promise outCo Standalone shouldBeEmpty = wait( tr.getRange( it->range(), 1 ) ); if( shouldBeEmpty.size() ) { - TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Clear").detail("KeyBegin", it->range().begin.c_str()) - .detail("KeyEnd", it->range().end.c_str()); + TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Clear").detail("KeyBegin", it->range().begin) + .detail("KeyEnd", it->range().end); return; } } else { Optional val = wait( tr.get( it->range().begin ) ); if( !val.present() || val.get() != m.setValue ) { - TraceEvent evt = TraceEvent(SevError, "CheckWritesFailed").detail("Class", "Set").detail("Key", it->range().begin.c_str()) - .detail("Expected", m.setValue.c_str()); + TraceEvent evt = TraceEvent(SevError, "CheckWritesFailed") + .detail("Class", "Set") + .detail("Key", it->range().begin) + .detail("Expected", m.setValue); if( !val.present() ) evt.detail("Actual", "_Value Missing_"); else - evt.detail("Actual", val.get().c_str()); + evt.detail("Actual", val.get()); return; } } diff --git a/fdbclient/TaskBucket.actor.cpp b/fdbclient/TaskBucket.actor.cpp index 05ef3cdf58..ef07fa52ed 100644 --- a/fdbclient/TaskBucket.actor.cpp +++ b/fdbclient/TaskBucket.actor.cpp @@ -396,7 +396,7 @@ public: } catch(Error &e) { TraceEvent(SevWarn, "TB_ExecuteFailure") .error(e) - .detail("TaskUID", task->key.) + .detail("TaskUID", task->key) .detail("TaskType", task->params[Task::reservedTaskParamKeyType].printable()) .detail("Priority", task->getPriority()); try { diff --git a/fdbserver/ApplyMetadataMutation.h b/fdbserver/ApplyMetadataMutation.h index 7ba4b4c495..752435f06c 100644 --- a/fdbserver/ApplyMetadataMutation.h +++ b/fdbserver/ApplyMetadataMutation.h @@ -151,7 +151,10 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef(m.param2) != txnStateStore->readValue(m.param1).get().castTo()) { // FIXME: Make this check more specific, here or by reading configuration whenever there is a change if(!m.param1.startsWith( excludedServersPrefix ) && m.param1 != excludedServersVersionKey) { auto t = txnStateStore->readValue(m.param1).get(); - TraceEvent("MutationRequiresRestart", dbgid).detail("M", m.toString()).detail("PrevValue", t.present() ? t.get() : "(none)").detail("ToCommit", toCommit!=NULL); + TraceEvent("MutationRequiresRestart", dbgid) + .detail("M", m.toString()) + .detail("PrevValue", t.present() ? t.get() : LiteralStringRef("(none)")) + .detail("ToCommit", toCommit!=NULL); if(confChange) *confChange = true; } } From c6bc23791d4bd1464548d295bc28990d9518ccb9 Mon Sep 17 00:00:00 2001 From: mpilman Date: Tue, 19 Mar 2019 08:51:28 -0700 Subject: [PATCH 13/24] Fixed format error in microbench --- flow/ActorCollection.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/ActorCollection.actor.cpp b/flow/ActorCollection.actor.cpp index 3c5b1823fb..2932998175 100644 --- a/flow/ActorCollection.actor.cpp +++ b/flow/ActorCollection.actor.cpp @@ -143,7 +143,7 @@ TEST_CASE("/flow/TraceEvent") { } TraceEvent("TraceDuration") .detail("Time", g_network->now() - startTime); - printf("benchmark done\n", getpid()); + printf("benchmark done\n"); wait(delay(10)); return Void(); } From 1c6c73af6d4ad969dc74c3bda6fd2319ec400bf8 Mon Sep 17 00:00:00 2001 From: mpilman Date: Tue, 19 Mar 2019 14:13:27 -0700 Subject: [PATCH 14/24] Fixed infinite recursion --- flow/Trace.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/Trace.h b/flow/Trace.h index 9cf11b667b..92bf4eee03 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -347,7 +347,7 @@ struct TraceEvent { if (enabled && init()) { auto s = Traceable::toString(value); addMetric(key.c_str(), value, s); - return detailImpl(std::move(key), std::move(s)); + return detailImpl(std::move(key), std::move(s), false); } return *this; } From b0877e7dd0e6823feaadcbc5baa5e68e63a097fe Mon Sep 17 00:00:00 2001 From: mpilman Date: Tue, 19 Mar 2019 17:50:48 -0700 Subject: [PATCH 15/24] Don't quote spaces in std::strings --- flow/Trace.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flow/Trace.h b/flow/Trace.h index 92bf4eee03..47324116f3 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -271,13 +271,14 @@ struct Traceable : std::true_type { template<> struct Traceable : std::true_type { + static bool isPrintable(char c) { return c >= 32 && c < 127; } template static std::string toString(Str&& value) { // if all characters are printable ascii, we simply return the string int nonPrintables = 0; int numBackslashes = 0; for (auto c : value) { - if (!Traceable::isPrintable(c)) { + if (!isPrintable(c)) { ++nonPrintables; } else if (c == '\\') { ++numBackslashes; @@ -289,7 +290,7 @@ struct Traceable : std::true_type { std::string result; result.reserve(value.size() - nonPrintables + (nonPrintables * 4) + numBackslashes); for (auto c : value) { - if (Traceable::isPrintable(c)) { + if (isPrintable(c)) { result.push_back(c); } else if (c == '\\') { result.push_back('\\'); From a05350bfa77281ba6737b14b0672d975d395334b Mon Sep 17 00:00:00 2001 From: "A.J. Beamon" Date: Wed, 3 Apr 2019 17:01:35 -0800 Subject: [PATCH 16/24] Update fdbcli/fdbcli.actor.cpp Co-Authored-By: mpilman --- fdbcli/fdbcli.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index e195087499..e9e107331d 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -2292,7 +2292,7 @@ void fdbcli_comp_cmd(std::string const& text, std::vector& lc) { void LogCommand(std::string line, UID randomID, std::string errMsg) { printf("%s\n", errMsg.c_str()); - TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", StringRef(line)).detail("Error", StringRef(errMsg)); + TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line).detail("Error", errMsg); } struct CLIOptions { From 9595b43a2d29afad266873b33abd0929a553a52b Mon Sep 17 00:00:00 2001 From: "A.J. Beamon" Date: Wed, 3 Apr 2019 17:01:47 -0800 Subject: [PATCH 17/24] Update fdbcli/fdbcli.actor.cpp Co-Authored-By: mpilman --- fdbcli/fdbcli.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index e9e107331d..de8de73647 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -2523,7 +2523,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { try { state UID randomID = g_random->randomUniqueID(); - TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", StringRef(line)); + TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line); bool malformed, partial; state std::vector> parsed = parseLine(line, malformed, partial); From dfad79d57716bc40dcb6b22a11c9255b8f98f2f4 Mon Sep 17 00:00:00 2001 From: "A.J. Beamon" Date: Wed, 3 Apr 2019 17:01:55 -0800 Subject: [PATCH 18/24] Update fdbcli/fdbcli.actor.cpp Co-Authored-By: mpilman --- fdbcli/fdbcli.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index de8de73647..40f1fc649f 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -3357,7 +3357,7 @@ ACTOR Future cli(CLIOptions opt, LineNoise* plinenoise) { is_error = true; } - TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", StringRef(line)).detail("IsError", is_error); + TraceEvent(SevInfo, "CLICommandLog", randomID).detail("Command", line).detail("IsError", is_error); } catch (Error& e) { if(e.code() != error_code_actor_cancelled) From 389ec1c2ac9f2513d2154974838a4ab5c9ad0ee5 Mon Sep 17 00:00:00 2001 From: "A.J. Beamon" Date: Wed, 3 Apr 2019 17:02:03 -0800 Subject: [PATCH 19/24] Update fdbclient/BackupAgentBase.actor.cpp Co-Authored-By: mpilman --- fdbclient/BackupAgentBase.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbclient/BackupAgentBase.actor.cpp b/fdbclient/BackupAgentBase.actor.cpp index 21eda31c39..f9bc159b6a 100644 --- a/fdbclient/BackupAgentBase.actor.cpp +++ b/fdbclient/BackupAgentBase.actor.cpp @@ -149,7 +149,7 @@ Standalone> getLogRanges(Version beginVersion, Version en Key baLogRangePrefix = destUidValue.withPrefix(backupLogKeys.begin); - //TraceEvent("GetLogRanges").detail("DestUidValue", destUidValue).detail("Prefix", StringRef(baLogRangePrefix)); + //TraceEvent("GetLogRanges").detail("DestUidValue", destUidValue).detail("Prefix", baLogRangePrefix); for (int64_t vblock = beginVersion / blockSize; vblock < (endVersion + blockSize - 1) / blockSize; ++vblock) { int64_t tb = vblock * blockSize / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE; From 45eb6b0973bbeaf69723dd4b205306bb1059b0c2 Mon Sep 17 00:00:00 2001 From: "A.J. Beamon" Date: Wed, 3 Apr 2019 17:02:12 -0800 Subject: [PATCH 20/24] Update fdbclient/BackupAgentBase.actor.cpp Co-Authored-By: mpilman --- fdbclient/BackupAgentBase.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbclient/BackupAgentBase.actor.cpp b/fdbclient/BackupAgentBase.actor.cpp index f9bc159b6a..bd390c9a4d 100644 --- a/fdbclient/BackupAgentBase.actor.cpp +++ b/fdbclient/BackupAgentBase.actor.cpp @@ -172,7 +172,7 @@ Standalone> getApplyRanges(Version beginVersion, Version Key baLogRangePrefix = backupUid.withPrefix(applyLogKeys.begin); - //TraceEvent("GetLogRanges").detail("BackupUid", backupUid).detail("Prefix", StringRef(baLogRangePrefix)); + //TraceEvent("GetLogRanges").detail("BackupUid", backupUid).detail("Prefix", baLogRangePrefix); for (int64_t vblock = beginVersion / CLIENT_KNOBS->APPLY_BLOCK_SIZE; vblock < (endVersion + CLIENT_KNOBS->APPLY_BLOCK_SIZE - 1) / CLIENT_KNOBS->APPLY_BLOCK_SIZE; ++vblock) { int64_t tb = vblock * CLIENT_KNOBS->APPLY_BLOCK_SIZE / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE; From 614a599a0418f1cacca590ed27891976a0197881 Mon Sep 17 00:00:00 2001 From: "A.J. Beamon" Date: Wed, 3 Apr 2019 17:03:12 -0800 Subject: [PATCH 21/24] Update fdbserver/SimulatedCluster.actor.cpp Co-Authored-By: mpilman --- fdbserver/SimulatedCluster.actor.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index d30afa7b09..e4ab161e38 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -1235,7 +1235,7 @@ void setupSimulatedSystem(vector>* systemActors, std::string baseFo *pConnString = conn; - TraceEvent("SimulatedConnectionString").detail("String", conn.toString()).detail("ConfigString", StringRef(startingConfigString)); + TraceEvent("SimulatedConnectionString").detail("String", conn.toString()).detail("ConfigString", startingConfigString); int assignedMachines = 0, nonVersatileMachines = 0; for( int dc = 0; dc < dataCenters; dc++ ) { From 4287b1d2a1c0e37db10e1f881ec8a6b43fd23f77 Mon Sep 17 00:00:00 2001 From: mpilman Date: Thu, 4 Apr 2019 09:59:08 -0700 Subject: [PATCH 22/24] resolved minor merge issues --- fdbserver/CMakeLists.txt | 14 +++++++++++++- fdbserver/workloads/JavaWorkload.actor.cpp | 2 +- flow/Arena.h | 7 +++++++ 3 files changed, 21 insertions(+), 2 deletions(-) diff --git a/fdbserver/CMakeLists.txt b/fdbserver/CMakeLists.txt index d3db62c675..ec9afec09c 100644 --- a/fdbserver/CMakeLists.txt +++ b/fdbserver/CMakeLists.txt @@ -170,6 +170,18 @@ set(FDBSERVER_SRCS workloads/WriteBandwidth.actor.cpp workloads/WriteDuringRead.actor.cpp) +set(SQLITE_SRCS + sqlite/btree.h + sqlite/hash.h + sqlite/sqlite3.h + sqlite/sqlite3ext.h + sqlite/sqliteInt.h + sqlite/sqliteLimit.h + sqlite/sqlite3.amalgamation.c) + +add_library(sqlite ${SQLITE_SRCS}) +target_compile_definitions(sqlite PRIVATE $<$:NDEBUG>) + set(java_workload_docstring "Build the Java workloads (makes fdbserver link against JNI)") if(FDB_RELEASE) set(WITH_JAVA_WORKLOAD OFF CACHE BOOL "${java_workload_docstring}") @@ -188,7 +200,7 @@ add_flow_target(EXECUTABLE NAME fdbserver SRCS ${FDBSERVER_SRCS}) target_include_directories(fdbserver PRIVATE ${CMAKE_CURRENT_BINARY_DIR}/workloads ${CMAKE_CURRENT_SOURCE_DIR}/workloads) -target_link_libraries(fdbserver PRIVATE fdbclient) +target_link_libraries(fdbserver PRIVATE fdbclient sqlite) if(WITH_JAVA_WORKLOAD) if(NOT JNI_FOUND) message(SEND_ERROR "Trying to build Java workload but couldn't find JNI") diff --git a/fdbserver/workloads/JavaWorkload.actor.cpp b/fdbserver/workloads/JavaWorkload.actor.cpp index d4b470b5bd..35b413109a 100644 --- a/fdbserver/workloads/JavaWorkload.actor.cpp +++ b/fdbserver/workloads/JavaWorkload.actor.cpp @@ -47,7 +47,7 @@ void printTrace(JNIEnv* env, jobject self, jint severity, jstring message, jobje auto f = onMainThread([severity, &detailsMap, msg]() -> Future { TraceEvent evt(Severity(severity), msg); for (const auto& p : detailsMap) { - evt.detail(p.first, p.second); + evt.detail(p.first.c_str(), p.second); } return Void(); }); diff --git a/flow/Arena.h b/flow/Arena.h index 2586532d3f..564b3596b9 100644 --- a/flow/Arena.h +++ b/flow/Arena.h @@ -422,6 +422,13 @@ private: bool valid; }; +template +struct Traceable> : std::conditional::value, std::true_type, std::false_type>::type { + static std::string toString(const Optional& value) { + return value.present() ? Traceable::toString(value.get()) : "[not set]"; + } +}; + //#define STANDALONE_ALWAYS_COPY template From d01cbf3455861f69e2e1f907ffe54f2cd3c929f7 Mon Sep 17 00:00:00 2001 From: mpilman Date: Fri, 5 Apr 2019 13:11:50 -0700 Subject: [PATCH 23/24] Addressed code review comments --- fdbclient/NativeAPI.actor.cpp | 28 ++-- fdbclient/RYWIterator.cpp | 4 +- fdbclient/ReadYourWrites.actor.cpp | 22 +-- fdbclient/SnapshotCache.h | 14 ++ fdbclient/TaskBucket.actor.cpp | 12 +- fdbclient/WriteMap.h | 2 +- fdbserver/DataDistributionQueue.actor.cpp | 10 +- fdbserver/DataDistributionTracker.actor.cpp | 20 +-- fdbserver/KeyValueStoreMemory.actor.cpp | 4 +- fdbserver/MoveKeys.actor.cpp | 4 +- fdbserver/storageserver.actor.cpp | 4 +- flow/Arena.h | 47 +++--- flow/Trace.cpp | 6 +- flow/Trace.h | 151 +++++++++++++------- 14 files changed, 190 insertions(+), 138 deletions(-) diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index 6a859ceff3..8a00900fb8 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -1334,7 +1334,7 @@ ACTOR Future> getValue( Future version, Key key, Databa g_traceBatch.addAttach("GetValueAttachID", info.debugID.get().first(), getValueID.get().first()); g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.Before"); //.detail("TaskID", g_network->getCurrentTask()); /*TraceEvent("TransactionDebugGetValueInfo", getValueID.get()) - .detail("Key", key) + .detail("Key", key) .detail("ReqVersion", ver) .detail("Servers", describe(ssi.second->get()));*/ } @@ -1356,7 +1356,7 @@ ACTOR Future> getValue( Future version, Key key, Databa if( info.debugID.present() ) { g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.After"); //.detail("TaskID", g_network->getCurrentTask()); /*TraceEvent("TransactionDebugGetValueDone", getValueID.get()) - .detail("Key", key) + .detail("Key", key) .detail("ReqVersion", ver) .detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/ } @@ -1367,7 +1367,7 @@ ACTOR Future> getValue( Future version, Key key, Databa if( info.debugID.present() ) { g_traceBatch.addEvent("GetValueDebug", getValueID.get().first(), "NativeAPI.getValue.Error"); //.detail("TaskID", g_network->getCurrentTask()); /*TraceEvent("TransactionDebugGetValueDone", getValueID.get()) - .detail("Key", key) + .detail("Key", key) .detail("ReqVersion", ver) .detail("ReplySize", reply.value.present() ? reply.value.get().size() : -1);*/ } @@ -1551,13 +1551,13 @@ ACTOR Future> getExactRange( Database cx, Version ver if( info.debugID.present() ) { g_traceBatch.addEvent("TransactionDebug", info.debugID.get().first(), "NativeAPI.getExactRange.Before"); /*TraceEvent("TransactionDebugGetExactRangeInfo", info.debugID.get()) - .detail("ReqBeginKey", req.begin.getKey()) - .detail("ReqEndKey", req.end.getKey()) - .detail("ReqLimit", req.limit) - .detail("ReqLimitBytes", req.limitBytes) - .detail("ReqVersion", req.version) - .detail("Reverse", reverse) - .detail("Servers", locations[shard].second->description());*/ + .detail("ReqBeginKey", req.begin.getKey()) + .detail("ReqEndKey", req.end.getKey()) + .detail("ReqLimit", req.limit) + .detail("ReqLimitBytes", req.limitBytes) + .detail("ReqVersion", req.version) + .detail("Reverse", reverse) + .detail("Servers", locations[shard].second->description());*/ } ++cx->transactionPhysicalReads; GetKeyValuesReply rep = wait( loadBalance( locations[shard].second, &StorageServerInterface::getKeyValues, req, TaskDefaultPromiseEndpoint, false, cx->enableLocalityLoadBalance ? &cx->queueModel : NULL ) ); @@ -1816,8 +1816,8 @@ ACTOR Future> getRange( Database cx, Reference> getRange( Database cx, Reference= allKeys.end && setIter == setEnd); diff --git a/fdbclient/ReadYourWrites.actor.cpp b/fdbclient/ReadYourWrites.actor.cpp index 32ac64114e..18be60bc73 100644 --- a/fdbclient/ReadYourWrites.actor.cpp +++ b/fdbclient/ReadYourWrites.actor.cpp @@ -38,8 +38,8 @@ public: if (kv) key = kv->key; } TraceEvent("RYWDump") - .detail("Begin", it.beginKey().toStandaloneStringRef()) - .detail("End", it.endKey().toStandaloneStringRef()) + .detail("Begin", it.beginKey()) + .detail("End", it.endKey()) .detail("Unknown", it.is_unknown_range()) .detail("Empty", it.is_empty_range()) .detail("KV", it.is_kv()) @@ -512,8 +512,8 @@ public: .detail("Reached", limits.isReached()) .detail("ItemsPastEnd", itemsPastEnd) .detail("EndOffset", -end.offset) - .detail("ItBegin", it.beginKey().toStandaloneStringRef()) - .detail("ItEnd", itEnd.beginKey().toStandaloneStringRef()) + .detail("ItBegin", it.beginKey()) + .detail("ItEnd", itEnd.beginKey()) .detail("Unknown", it.is_unknown_range()) .detail("Requests", requestCount);*/ @@ -606,7 +606,7 @@ public: ASSERT( !requestLimit.hasRowLimit() || requestLimit.rows > 0 ); ASSERT( requestLimit.hasRowLimit() || requestLimit.hasByteLimit() ); - //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey().toStandaloneStringRef()).detail("MinRows", requestLimit.minRows); + //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey()).detail("MinRows", requestLimit.minRows); additionalRows = 0; Standalone snapshot_read = wait( ryw->tr.getRange( read_begin, read_end, requestLimit, true, false ) ); @@ -636,7 +636,7 @@ public: itemsPastEnd += maxCount - count; - //TraceEvent("RYWaddKV", randomID).detail("Key", it.beginKey().toStandaloneStringRef()).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastEnd", itemsPastEnd); + //TraceEvent("RYWaddKV", randomID).detail("Key", it.beginKey()).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastEnd", itemsPastEnd); if( count ) result.append( result.arena(), start, count ); ++it; } else @@ -785,8 +785,8 @@ public: .detail("Reached", limits.isReached()) .detail("ItemsPastBegin", itemsPastBegin) .detail("EndOffset", end.offset) - .detail("ItBegin", it.beginKey().toStandaloneStringRef()) - .detail("ItEnd", itEnd.beginKey().toStandaloneStringRef()) + .detail("ItBegin", it.beginKey()) + .detail("ItEnd", itEnd.beginKey()) .detail("Unknown", it.is_unknown_range()) .detail("Kv", it.is_kv()) .detail("Requests", requestCount);*/ @@ -883,7 +883,7 @@ public: ASSERT( !requestLimit.hasRowLimit() || requestLimit.rows > 0 ); ASSERT( requestLimit.hasRowLimit() || requestLimit.hasByteLimit() ); - //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey().toStandaloneStringRef()).detail("MinRows", requestLimit.minRows); + //TraceEvent("RYWIssuing", randomID).detail("Begin", read_begin.toString()).detail("End", read_end.toString()).detail("Bytes", requestLimit.bytes).detail("Rows", requestLimit.rows).detail("Limits", limits.bytes).detail("Reached", limits.isReached()).detail("RequestCount", requestCount).detail("SingleClears", singleClears).detail("UcEnd", ucEnd.beginKey()).detail("MinRows", requestLimit.minRows); additionalRows = 0; Standalone snapshot_read = wait( ryw->tr.getRange( read_begin, read_end, requestLimit, true, true ) ); @@ -917,7 +917,7 @@ public: } itemsPastBegin += maxCount - count; - //TraceEvent("RYWaddKV", randomID).detail("Key", it.beginKey().toStandaloneStringRef()).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastBegin", itemsPastBegin); + //TraceEvent("RYWaddKV", randomID).detail("Key", it.beginKey()).detail("Count", count).detail("MaxCount", maxCount).detail("ItemsPastBegin", itemsPastBegin); if( count ) { int size = result.size(); result.resize(result.arena(),size+count); @@ -1982,7 +1982,7 @@ void ReadYourWritesTransaction::debugLogRetries(Optional error) { if(error.present()) trace.error(error.get(), true); if(!transactionDebugInfo->transactionName.empty()) - trace.detail("TransactionName", StringRef(transactionDebugInfo->transactionName)); + trace.detail("TransactionName", transactionDebugInfo->transactionName); trace.detail("Elapsed", elapsed).detail("Retries", retries).detail("Committed", committed); } transactionDebugInfo->lastRetryLogTime = now(); diff --git a/fdbclient/SnapshotCache.h b/fdbclient/SnapshotCache.h index 0b3eb6a515..9fca5cafdf 100644 --- a/fdbclient/SnapshotCache.h +++ b/fdbclient/SnapshotCache.h @@ -101,6 +101,7 @@ struct ExtStringRef { } private: + friend struct Traceable; StringRef base; int extra_zero_bytes; }; @@ -113,6 +114,19 @@ inline bool operator > ( const ExtStringRef& lhs, const ExtStringRef& rhs ) { re inline bool operator <= ( const ExtStringRef& lhs, const ExtStringRef& rhs ) { return lhs.cmp(rhs)<=0; } inline bool operator >= ( const ExtStringRef& lhs, const ExtStringRef& rhs ) { return lhs.cmp(rhs)>=0; } +template<> +struct Traceable : std::true_type { + static std::string toString(const ExtStringRef str) { + std::string result; + result.reserve(str.size()); + std::copy(str.base.begin(), str.base.end(), std::back_inserter(result)); + for (int i = 0; i < str.extra_zero_bytes; ++i) { + result.push_back('\0'); + } + return Traceable::toString(result); + } +}; + class SnapshotCache { private: struct Entry { diff --git a/fdbclient/TaskBucket.actor.cpp b/fdbclient/TaskBucket.actor.cpp index ef07fa52ed..76d4e536f0 100644 --- a/fdbclient/TaskBucket.actor.cpp +++ b/fdbclient/TaskBucket.actor.cpp @@ -710,13 +710,13 @@ public: tr->setOption(FDBTransactionOptions::LOCK_AWARE); Standalone values = wait(tr->getRange(subspace.range(), CLIENT_KNOBS->TOO_MANY)); TraceEvent("TaskBucket").detail("DebugPrintRange", "Print DB Range").detail("Key", subspace.key()).detail("Count", values.size()).detail("Msg", msg); - /* - printf("debugPrintRange key: (%d) %s\n", values.size(), printable(subspace.key()).c_str()); + + /*printf("debugPrintRange key: (%d) %s\n", values.size(), printable(subspace.key()).c_str()); for (auto & s : values) { - printf(" key: %-40s value: %s\n", printable(s.key).c_str(), s.value.c_str()); - TraceEvent("TaskBucket").detail("DebugPrintRange", msg) - .detail("Key", s.key) - .detail("Value", s.value); + printf(" key: %-40s value: %s\n", printable(s.key).c_str(), s.value.c_str()); + TraceEvent("TaskBucket").detail("DebugPrintRange", msg) + .detail("Key", s.key) + .detail("Value", s.value); }*/ return Void(); diff --git a/fdbclient/WriteMap.h b/fdbclient/WriteMap.h index 8ba6b145d4..dc67f27e2e 100644 --- a/fdbclient/WriteMap.h +++ b/fdbclient/WriteMap.h @@ -544,7 +544,7 @@ private: it.skip(allKeys.begin); while( it.beginKey() < allKeys.end ) { TraceEvent("WriteMapDump").detail("Begin", it.beginKey().toStandaloneStringRef()) - .detail("End", it.endKey().toStandaloneStringRef()) + .detail("End", it.endKey()) .detail("Cleared", it.is_cleared_range()) .detail("Conflicted", it.is_conflict_range()) .detail("Operation", it.is_operation()) diff --git a/fdbserver/DataDistributionQueue.actor.cpp b/fdbserver/DataDistributionQueue.actor.cpp index 775c51cdf2..da2a5d0dbd 100644 --- a/fdbserver/DataDistributionQueue.actor.cpp +++ b/fdbserver/DataDistributionQueue.actor.cpp @@ -701,8 +701,8 @@ struct DDQueueData { busyString += describe(rd.src[i]) + " - (" + busymap[ rd.src[i] ].toString() + "); "; TraceEvent(title, distributorId) - .detail("KeyBegin", rd.keys.begin) - .detail("KeyEnd", rd.keys.end) + .detail("KeyBegin", rd.keys.begin) + .detail("KeyEnd", rd.keys.end) .detail("Priority", rd.priority) .detail("WorkFactor", rd.workFactor) .detail("SourceServerCount", rd.src.size()) @@ -759,9 +759,9 @@ struct DDQueueData { it->value().priority >= rd.priority && rd.priority < PRIORITY_TEAM_REDUNDANT ) { /*TraceEvent("OverlappingInFlight", distributorId) - .detail("KeyBegin", it->value().keys.begin) - .detail("KeyEnd", it->value().keys.end) - .detail("Priority", it->value().priority); */ + .detail("KeyBegin", it->value().keys.begin) + .detail("KeyEnd", it->value().keys.end) + .detail("Priority", it->value().priority);*/ overlappingInFlight = true; break; } diff --git a/fdbserver/DataDistributionTracker.actor.cpp b/fdbserver/DataDistributionTracker.actor.cpp index a6e027c171..e6e85006f9 100644 --- a/fdbserver/DataDistributionTracker.actor.cpp +++ b/fdbserver/DataDistributionTracker.actor.cpp @@ -190,7 +190,7 @@ ACTOR Future trackShardBytes( StorageMetrics metrics = wait( tr.waitStorageMetrics( keys, bounds.min, bounds.max, bounds.permittedError, CLIENT_KNOBS->STORAGE_METRICS_SHARD_LIMIT ) ); /*TraceEvent("ShardSizeUpdate") - .detail("Keys", keys) + .detail("Keys", keys) .detail("UpdatedSize", metrics.metrics.bytes) .detail("Bandwidth", metrics.metrics.bytesPerKSecond) .detail("BandwithStatus", getBandwidthStatus(metrics)) @@ -199,7 +199,7 @@ ACTOR Future trackShardBytes( .detail("BandwidthLower", bounds.min.bytesPerKSecond) .detail("BandwidthUpper", bounds.max.bytesPerKSecond) .detail("ShardSizePresent", shardSize->get().present()) - .detail("OldShardSize", shardSize->get().present() ? shardSize->get().get().metrics.bytes : 0 ) + .detail("OldShardSize", shardSize->get().present() ? shardSize->get().get().metrics.bytes : 0) .detail("TrackerID", trackerID);*/ if( shardSize->get().present() && addToSizeEstimate ) @@ -532,8 +532,8 @@ ACTOR Future shardTracker( wait( delay( 0, TaskDataDistribution ) ); /*TraceEvent("ShardTracker", self->distributorId) - .detail("Begin", keys.begin) - .detail("End", keys.end) + .detail("Begin", keys.begin) + .detail("End", keys.end) .detail("TrackerID", trackerID) .detail("MaxBytes", self->maxShardSize->get().get()) .detail("ShardSize", shardSize->get().get().bytes) @@ -572,7 +572,7 @@ void restartShardTrackers( DataDistributionTracker* self, KeyRangeRef keys, Opti if( startingSize.present() ) { ASSERT( ranges.size() == 1 ); /*TraceEvent("ShardTrackerSizePreset", self->distributorId) - .detail("Keys", keys) + .detail("Keys", keys) .detail("Size", startingSize.get().metrics.bytes) .detail("Merges", startingSize.get().merges);*/ TEST( true ); // shardTracker started with trackedBytes already set @@ -736,9 +736,9 @@ void ShardsAffectedByTeamFailure::defineShard( KeyRangeRef keys ) { uniquify(prevTeams); /*TraceEvent("ShardsAffectedByTeamFailureDefine") - .detail("KeyBegin", keys.begin) - .detail("KeyEnd", keys.end) - .detail("TeamCount", teams.size()); */ + .detail("KeyBegin", keys.begin) + .detail("KeyEnd", keys.end) + .detail("TeamCount", teams.size());*/ auto affectedRanges = shard_teams.getAffectedRangesAfterInsertion(keys); shard_teams.insert( keys, std::make_pair(teams, prevTeams) ); @@ -754,8 +754,8 @@ void ShardsAffectedByTeamFailure::defineShard( KeyRangeRef keys ) { void ShardsAffectedByTeamFailure::moveShard( KeyRangeRef keys, std::vector destinationTeams ) { /*TraceEvent("ShardsAffectedByTeamFailureMove") - .detail("KeyBegin", keys.begin) - .detail("KeyEnd", keys.end) + .detail("KeyBegin", keys.begin) + .detail("KeyEnd", keys.end) .detail("NewTeamSize", destinationTeam.size()) .detail("NewTeam", describe(destinationTeam));*/ diff --git a/fdbserver/KeyValueStoreMemory.actor.cpp b/fdbserver/KeyValueStoreMemory.actor.cpp index f4e29dae79..ea383f1a66 100644 --- a/fdbserver/KeyValueStoreMemory.actor.cpp +++ b/fdbserver/KeyValueStoreMemory.actor.cpp @@ -485,8 +485,8 @@ private: if (h.op == OpSnapshotItem) { // snapshot data item /*if (p1 < uncommittedNextKey) { TraceEvent(SevError, "RecSnapshotBack", self->id) - .detail("NextKey", uncommittedNextKey) - .detail("P1", p1) + .detail("NextKey", uncommittedNextKey) + .detail("P1", p1) .detail("Nextlocation", self->log->getNextReadLocation()); } ASSERT( p1 >= uncommittedNextKey );*/ diff --git a/fdbserver/MoveKeys.actor.cpp b/fdbserver/MoveKeys.actor.cpp index d8389b3f4a..58e4ff6751 100644 --- a/fdbserver/MoveKeys.actor.cpp +++ b/fdbserver/MoveKeys.actor.cpp @@ -301,8 +301,8 @@ ACTOR Future startMoveKeys( Database occ, KeyRange keys, vector serve decodeKeyServersValue( old[i].value, src, dest ); /*TraceEvent("StartMoveKeysOldRange", relocationIntervalId) - .detail("KeyBegin", rangeIntersectKeys.begin.c_str()) - .detail("KeyEnd", rangeIntersectKeys.end.c_str()) + .detail("KeyBegin", rangeIntersectKeys.begin.c_str()) + .detail("KeyEnd", rangeIntersectKeys.end.c_str()) .detail("OldSrc", describe(src)) .detail("OldDest", describe(dest)) .detail("ReadVersion", tr.getReadVersion().get());*/ diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index 9ae6f239c2..036f56e741 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -2245,8 +2245,8 @@ void changeServerKeys( StorageServer* data, const KeyRangeRef& keys, bool nowAss KeyRangeRef range = keys & r->range(); bool dataAvailable = r->value()==latestVersion || r->value() >= version; /*TraceEvent("CSKRange", data->thisServerID) - .detail("KeyBegin", range.begin) - .detail("KeyEnd", range.end) + .detail("KeyBegin", range.begin) + .detail("KeyEnd", range.end) .detail("Available", dataAvailable) .detail("NowAssigned", nowAssigned) .detail("NewestAvailable", r->value()) diff --git a/flow/Arena.h b/flow/Arena.h index 564b3596b9..dd6397d8dd 100644 --- a/flow/Arena.h +++ b/flow/Arena.h @@ -569,33 +569,7 @@ public: std::string toString() const { return std::string( (const char*)data, length ); } static bool isPrintable(char c) { return c > 32 && c < 127; } - std::string printable() const { - std::string result; - int nonPrintables = 0; - int numBackslashes = 0; - for (auto c : *this) { - if (!isPrintable(c)) { - ++nonPrintables; - } else if (c == '\\') { - ++numBackslashes; - } - } - result.reserve(size() - nonPrintables + (nonPrintables * 4) + numBackslashes); - for (auto c : *this) { - if (isPrintable(c)) { - result.push_back(c); - } else if (c == '\\') { - result.push_back('\\'); - result.push_back('\\'); - } else { - result.push_back('\\'); - result.push_back('x'); - result.push_back(base16Char((c / 16) % 16)); - result.push_back(base16Char(c % 16)); - } - } - return result; - } + inline std::string printable() const; std::string toHexString(int limit = -1) const { if(limit < 0) @@ -680,12 +654,27 @@ private: #pragma pack( pop ) template<> -struct Traceable : std::true_type { +struct TraceableString { + static const char* begin(StringRef value) { + return reinterpret_cast(value.begin()); + } + + static bool atEnd(const StringRef& value, const char* iter) { + return iter == reinterpret_cast(value.end()); + } + static std::string toString(const StringRef& value) { - return value.printable(); + return value.toString(); } }; +template<> +struct Traceable : TraceableStringImpl {}; + +inline std::string StringRef::printable() const { + return Traceable::toString(*this); +} + template struct Traceable> : std::conditional::value, std::true_type, std::false_type>::type { static std::string toString(const Standalone& value) { diff --git a/flow/Trace.cpp b/flow/Trace.cpp index 5869122fc6..cca638f7ad 100644 --- a/flow/Trace.cpp +++ b/flow/Trace.cpp @@ -657,7 +657,11 @@ TraceEvent::TraceEvent( const char* type, UID id ) : id(id), type(type), severit TraceEvent::TraceEvent( Severity severity, const char* type, UID id ) : id(id), type(type), severity(severity), initialized(false), enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity) {} -TraceEvent::TraceEvent( TraceInterval& interval, UID id ) : id(id), type(interval.type), severity(interval.severity), initialized(false), enabled(true) { +TraceEvent::TraceEvent( TraceInterval& interval, UID id ) + : id(id), type(interval.type) + , severity(interval.severity) + , initialized(false) + , enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= interval.severity) { init(interval); } TraceEvent::TraceEvent( Severity severity, TraceInterval& interval, UID id ) diff --git a/flow/Trace.h b/flow/Trace.h index 47324116f3..43f4de565c 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -216,96 +216,141 @@ struct Traceable : std::true_type { } }; +template +struct IsTraceableString : std::false_type {}; + template<> -struct Traceable : std::true_type { - static bool isPrintable(char c) { return c > 32 && c < 127; } +struct IsTraceableString : std::false_type {}; + +template<> +struct IsTraceableString : std::false_type {}; + +template +struct TraceableString { + static auto begin(const Str& value) -> decltype(value.begin()) { + return value.begin(); + } + + static bool atEnd(const Str& value, decltype(value.begin()) iter) { + return iter == value.end(); + } + + static std::string toString(const Str& value) { + return value.toString(); + } +}; + +template<> +struct TraceableString { + static auto begin(const std::string& value) -> decltype(value.begin()) { + return value.begin(); + } + + static bool atEnd(const std::string& value, decltype(value.begin()) iter) { + return iter == value.end(); + } + + template + static std::string toString(S&& value) { + return std::forward(value); + } +}; + +template<> +struct TraceableString { + static const char* begin(const char* value) { + return value; + } + + static bool atEnd(const char* value, const char* iter) { + return *iter == '\0'; + } + static std::string toString(const char* value) { - // if all characters are printable ascii, we simply return the string - int nonPrintables = 0; - int numBackslashes = 0; - auto val = value; - int size = 0; - while (auto c = *(val++)) { - ++size; - if (!Traceable::isPrintable(c)) { - ++nonPrintables; - } else if (c == '\\') { - ++numBackslashes; - } - } - if (nonPrintables == 0 && numBackslashes == 0) { - return std::string(value); - } - std::string result; - result.reserve(size - nonPrintables + (nonPrintables * 4) + numBackslashes); - while (auto c = *(val++)) { - if (Traceable::isPrintable(c)) { - result.push_back(c); - } else if (c == '\\') { - result.push_back('\\'); - result.push_back('\\'); - } else { - result.push_back('\\'); - result.push_back('x'); - result.push_back(base16Char((c / 16) % 16)); - result.push_back(base16Char(c % 16)); - } - } - return result; + return std::string(value); } }; template -struct Traceable : std::true_type { +struct TraceableString { + static const char* begin(const char* value) { + return value; + } + + static bool atEnd(const char* value, const char* iter) { + return iter - value == S; + } + static std::string toString(const char* value) { - return Traceable::toString(value); + return std::string(value, S); } }; template<> -struct Traceable : std::true_type { - static std::string toString(const char* value) { - return Traceable::toString(value); +struct TraceableString { + static const char* begin(char* value) { + return value; + } + + static bool atEnd(char* value, const char* iter) { + return *iter == '\0'; + } + + static std::string toString(char* value) { + return std::string(value); } }; -template<> -struct Traceable : std::true_type { - static bool isPrintable(char c) { return c >= 32 && c < 127; } +template +struct TraceableStringImpl : std::true_type { + static constexpr bool isPrintable(char c) { return c > 32 && c < 127; } + template static std::string toString(Str&& value) { // if all characters are printable ascii, we simply return the string int nonPrintables = 0; int numBackslashes = 0; - for (auto c : value) { - if (!isPrintable(c)) { + auto val = value; + int size = 0; + for (auto iter = TraceableString::begin(value); !TraceableString::atEnd(value, iter); ++iter) { + ++size; + if (!isPrintable(char(*iter))) { ++nonPrintables; - } else if (c == '\\') { + } else if (*iter == '\\') { ++numBackslashes; } } if (nonPrintables == 0 && numBackslashes == 0) { - return std::forward(value); + return TraceableString::toString(std::forward(value)); } std::string result; - result.reserve(value.size() - nonPrintables + (nonPrintables * 4) + numBackslashes); - for (auto c : value) { - if (isPrintable(c)) { - result.push_back(c); - } else if (c == '\\') { + result.reserve(size - nonPrintables + (nonPrintables * 4) + numBackslashes); + for (auto iter = TraceableString::begin(value); !TraceableString::atEnd(value, iter); ++iter) { + if (isPrintable(*iter)) { + result.push_back(*iter); + } else if (*iter == '\\') { result.push_back('\\'); result.push_back('\\'); } else { result.push_back('\\'); result.push_back('x'); - result.push_back(base16Char((c / 16) % 16)); - result.push_back(base16Char(c % 16)); + result.push_back(base16Char((*iter / 16) % 16)); + result.push_back(base16Char(*iter % 16)); } } return result; } }; +template<> +struct Traceable : TraceableStringImpl {}; +template<> +struct Traceable : TraceableStringImpl {}; +template +struct Traceable : TraceableStringImpl {}; +template<> +struct Traceable : TraceableStringImpl {}; + template struct SpecialTraceMetricType : std::conditional::value || std::is_enum::value, From e4746888ebabae850740de8305e8fbd1eeec5b8c Mon Sep 17 00:00:00 2001 From: mpilman Date: Tue, 9 Apr 2019 09:08:45 -0700 Subject: [PATCH 24/24] Removed dead code --- flow/Trace.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/flow/Trace.h b/flow/Trace.h index 43f4de565c..44107fe50f 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -216,15 +216,6 @@ struct Traceable : std::true_type { } }; -template -struct IsTraceableString : std::false_type {}; - -template<> -struct IsTraceableString : std::false_type {}; - -template<> -struct IsTraceableString : std::false_type {}; - template struct TraceableString { static auto begin(const Str& value) -> decltype(value.begin()) {