From 07349869d9b0e2613fa761eb8a76dbc68990ab08 Mon Sep 17 00:00:00 2001 From: sfc-gh-tclinkenbeard Date: Wed, 17 Nov 2021 14:45:48 -0800 Subject: [PATCH] Use fmt to address -Wformat warnings --- fdbcli/ChangeFeedCommand.actor.cpp | 7 +- fdbserver/BlobWorker.actor.cpp | 324 +++++++++--------- fdbserver/VersionedBTree.actor.cpp | 145 ++++---- fdbserver/networktest.actor.cpp | 9 +- .../workloads/BlobGranuleVerifier.actor.cpp | 68 ++-- 5 files changed, 284 insertions(+), 269 deletions(-) diff --git a/fdbcli/ChangeFeedCommand.actor.cpp b/fdbcli/ChangeFeedCommand.actor.cpp index b44517a47e..951d004e1a 100644 --- a/fdbcli/ChangeFeedCommand.actor.cpp +++ b/fdbcli/ChangeFeedCommand.actor.cpp @@ -20,6 +20,7 @@ #include "fdbcli/fdbcli.actor.h" +#include "contrib/fmt-8.0.1/include/fmt/format.h" #include "fdbclient/FDBOptions.g.h" #include "fdbclient/IClientApi.h" #include "fdbclient/Knobs.h" @@ -67,9 +68,9 @@ ACTOR Future requestVersionUpdate(Database localDb, ReferencewhenAtLeast(ver)); - printf("Feed at version %d\n", ver); + fmt::print("Feed at version {}\n", ver); } } @@ -120,7 +121,7 @@ ACTOR Future changeFeedCommandActor(Database localDb, std::vector 4) { int n = 0; - if (sscanf(tokens[4].toString().c_str(), "%ld%n", &end, &n) != 1 || n != tokens[4].size()) { + if (sscanf(tokens[4].toString().c_str(), "%" PRId64 "%n", &end, &n) != 1 || n != tokens[4].size()) { printUsage(tokens[0]); return false; } diff --git a/fdbserver/BlobWorker.actor.cpp b/fdbserver/BlobWorker.actor.cpp index ca7b6aa34e..4047aea045 100644 --- a/fdbserver/BlobWorker.actor.cpp +++ b/fdbserver/BlobWorker.actor.cpp @@ -22,6 +22,7 @@ #include #include +#include "contrib/fmt-8.0.1/include/fmt/format.h" #include "fdbclient/FDBTypes.h" #include "fdbclient/SystemData.h" #include "fdbclient/BackupContainerFileSystem.h" @@ -193,16 +194,16 @@ struct BlobWorkerData : NonCopyable, ReferenceCounted { bool managerEpochOk(int64_t epoch) { if (epoch < currentManagerEpoch) { if (BW_DEBUG) { - printf("BW %s got request from old epoch %ld, notifying manager it is out of date\n", - id.toString().c_str(), - epoch); + fmt::print("BW {0} got request from old epoch {1}, notifying manager it is out of date\n", + id.toString(), + epoch); } return false; } else { if (epoch > currentManagerEpoch) { currentManagerEpoch = epoch; if (BW_DEBUG) { - printf("BW %s found new manager epoch %ld\n", id.toString().c_str(), currentManagerEpoch); + fmt::print("BW {0} found new manager epoch {1}\n", id.toString(), currentManagerEpoch); } } @@ -216,11 +217,11 @@ static void acquireGranuleLock(int64_t epoch, int64_t seqno, int64_t prevOwnerEp // returns true if our lock (E, S) >= (Eprev, Sprev) if (epoch < prevOwnerEpoch || (epoch == prevOwnerEpoch && seqno < prevOwnerSeqno)) { if (BW_DEBUG) { - printf("Lock acquire check failed. Proposed (%ld, %ld) < previous (%ld, %ld)\n", - epoch, - seqno, - prevOwnerEpoch, - prevOwnerSeqno); + fmt::print("Lock acquire check failed. Proposed ({0}, {1}) < previous ({2}, {3})\n", + epoch, + seqno, + prevOwnerEpoch, + prevOwnerSeqno); } throw granule_assignment_conflict(); } @@ -239,11 +240,11 @@ static void checkGranuleLock(int64_t epoch, int64_t seqno, int64_t ownerEpoch, i // returns true if we still own the lock, false if someone else does if (epoch != ownerEpoch || seqno != ownerSeqno) { if (BW_DEBUG) { - printf("Lock assignment check failed. Expected (%ld, %ld), got (%ld, %ld)\n", - epoch, - seqno, - ownerEpoch, - ownerSeqno); + fmt::print("Lock assignment check failed. Expected ({0}, {1}), got ({2}, {3})\n", + epoch, + seqno, + ownerEpoch, + ownerSeqno); } throw granule_assignment_conflict(); } @@ -303,10 +304,10 @@ ACTOR Future readGranuleFiles(Transaction* tr, Key* startKey, Key endKey, } } if (BW_DEBUG) { - printf("Loaded %lu snapshot and %lu delta files for %s\n", - files->snapshotFiles.size(), - files->deltaFiles.size(), - granuleID.toString().c_str()); + fmt::print("Loaded {0} snapshot and {1} delta files for {2}\n", + files->snapshotFiles.size(), + files->deltaFiles.size(), + granuleID.toString()); } return Void(); } @@ -546,14 +547,15 @@ ACTOR Future writeDeltaFile(Reference bwData, wait(tr->commit()); if (BW_DEBUG) { - printf("Granule %s [%s - %s) updated fdb with delta file %s of size %d at version %ld, cv=%ld\n", - granuleID.toString().c_str(), - keyRange.begin.printable().c_str(), - keyRange.end.printable().c_str(), - fname.c_str(), - serialized.size(), - currentDeltaVersion, - tr->getCommittedVersion()); + fmt::print( + "Granule {0} [{1} - {2}) updated fdb with delta file {3} of size {4} at version {5}, cv={6}\n", + granuleID.toString(), + keyRange.begin.printable(), + keyRange.end.printable(), + fname, + serialized.size(), + currentDeltaVersion, + tr->getCommittedVersion()); } if (BUGGIFY_WITH_PROB(0.01)) { @@ -812,10 +814,10 @@ ACTOR Future compactFromBlob(Reference bwData, chunk.includedVersion = version; if (BW_DEBUG) { - printf("Re-snapshotting [%s - %s) @ %ld from blob\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - version); + fmt::print("Re-snapshotting [{0} - {1}) @ {2} from blob\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + version); /*printf(" SnapshotFile:\n %s\n", chunk.snapshotFile.get().toString().c_str()); printf(" DeltaFiles:\n"); @@ -911,7 +913,7 @@ ACTOR Future handleCompletedDeltaFile(Reference bwData, if (completedDeltaFile.version > cfStartVersion) { if (BW_DEBUG) { - printf("Popping change feed %s at %ld\n", cfKey.printable().c_str(), completedDeltaFile.version); + fmt::print("Popping change feed {0} at {1}\n", cfKey.printable(), completedDeltaFile.version); } // FIXME: for a write-hot shard, we could potentially batch these and only pop the largest one after several // have completed @@ -968,10 +970,10 @@ static Version doGranuleRollback(Reference metadata, metadata->bytesInNewDeltaFiles -= df.bytes; toPop++; if (BW_DEBUG) { - printf("[%s - %s) rollback cancelling delta file @ %ld\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - df.version); + fmt::print("[{0} - {1}) rollback cancelling delta file @ {2}\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + df.version); } } else { ASSERT(df.version > cfRollbackVersion); @@ -1013,12 +1015,12 @@ static Version doGranuleRollback(Reference metadata, } mIdx++; if (BW_DEBUG) { - printf("[%s - %s) rollback discarding %d in-memory mutations, %d mutations and %ld bytes left\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - metadata->currentDeltas.size() - mIdx, - mIdx, - metadata->bufferedDeltaBytes); + fmt::print("[{0} - {1}) rollback discarding {2} in-memory mutations, {3} mutations and {4} bytes left\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + metadata->currentDeltas.size() - mIdx, + mIdx, + metadata->bufferedDeltaBytes); } metadata->currentDeltas.resize(metadata->deltaArena, mIdx); @@ -1030,10 +1032,10 @@ static Version doGranuleRollback(Reference metadata, } if (BW_DEBUG) { - printf("[%s - %s) finishing rollback to %ld\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - cfRollbackVersion); + fmt::print("[{0} - {1}) finishing rollback to {2}\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + cfRollbackVersion); } metadata->rollbackCount.set(metadata->rollbackCount.get() + 1); @@ -1089,16 +1091,17 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, } if (BW_DEBUG) { - printf("Granule File Updater Starting for [%s - %s):\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str()); - printf(" CFID: %s\n", startState.granuleID.toString().c_str()); - printf(" CF Start Version: %ld\n", startState.changeFeedStartVersion); - printf(" Previous Durable Version: %ld\n", startState.previousDurableVersion); - printf(" doSnapshot=%s\n", startState.doSnapshot ? "T" : "F"); - printf(" Prev CFID: %s\n", - startState.parentGranule.present() ? startState.parentGranule.get().second.toString().c_str() : ""); - printf(" blobFilesToSnapshot=%s\n", startState.blobFilesToSnapshot.present() ? "T" : "F"); + fmt::print("Granule File Updater Starting for [{0} - {1}):\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable()); + fmt::print(" CFID: {}\n", startState.granuleID.toString()); + fmt::print(" CF Start Version: {}\n", startState.changeFeedStartVersion); + fmt::print(" Previous Durable Version: {}\n", startState.previousDurableVersion); + fmt::print(" doSnapshot={}\n", startState.doSnapshot ? "T" : "F"); + fmt::print(" Prev CFID: {}\n", + startState.parentGranule.present() ? startState.parentGranule.get().second.toString().c_str() + : ""); + fmt::print(" blobFilesToSnapshot={}\n", startState.blobFilesToSnapshot.present() ? "T" : "F"); } state Version startVersion; @@ -1261,13 +1264,13 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, if (metadata->bufferedDeltaBytes >= SERVER_KNOBS->BG_DELTA_FILE_TARGET_BYTES && deltas.version > lastVersion) { if (BW_DEBUG) { - printf("Granule [%s - %s) flushing delta file after %lu bytes @ %ld %ld%s\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - metadata->bufferedDeltaBytes, - lastVersion, - deltas.version, - oldChangeFeedDataComplete.present() ? ". Finalizing " : ""); + fmt::print("Granule [{0} - {1}) flushing delta file after {2} bytes @ {3} {4}{5}\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + metadata->bufferedDeltaBytes, + lastVersion, + deltas.version, + oldChangeFeedDataComplete.present() ? ". Finalizing " : ""); } TraceEvent("BlobGranuleDeltaFile", bwData->id) .detail("Granule", metadata->keyRange) @@ -1323,13 +1326,14 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, if (snapshotEligible && metadata->bytesInNewDeltaFiles >= SERVER_KNOBS->BG_DELTA_BYTES_BEFORE_COMPACT && !readOldChangeFeed) { if (BW_DEBUG && (inFlightBlobSnapshot.isValid() || !inFlightDeltaFiles.empty())) { - printf("Granule [%s - %s) ready to re-snapshot, waiting for outstanding %d snapshot and %lu " - "deltas to " - "finish\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - inFlightBlobSnapshot.isValid() ? 1 : 0, - inFlightDeltaFiles.size()); + fmt::print( + "Granule [{0} - {1}) ready to re-snapshot, waiting for outstanding {2} snapshot and {3} " + "deltas to " + "finish\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + inFlightBlobSnapshot.isValid() ? 1 : 0, + inFlightDeltaFiles.size()); } // wait for all in flight snapshot/delta files if (inFlightBlobSnapshot.isValid()) { @@ -1352,10 +1356,10 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, inFlightDeltaFiles.clear(); if (BW_DEBUG) { - printf("Granule [%s - %s) checking with BM for re-snapshot after %lu bytes\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - metadata->bytesInNewDeltaFiles); + fmt::print("Granule [{0} - {1}) checking with BM for re-snapshot after {2} bytes\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + metadata->bytesInNewDeltaFiles); } TraceEvent("BlobGranuleSnapshotCheck", bwData->id) @@ -1394,18 +1398,19 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, } if (BW_DEBUG) { - printf("Granule [%s - %s)\n, hasn't heard back from BM in BW %s, re-sending status\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - bwData->id.toString().c_str()); + fmt::print( + "Granule [{0} - {1})\n, hasn't heard back from BM in BW {2}, re-sending status\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + bwData->id.toString()); } } if (BW_DEBUG) { - printf("Granule [%s - %s) re-snapshotting after %lu bytes\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - metadata->bytesInNewDeltaFiles); + fmt::print("Granule [{0} - {1}) re-snapshotting after {2} bytes\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + metadata->bytesInNewDeltaFiles); } TraceEvent("BlobGranuleSnapshotFile", bwData->id) .detail("Granule", metadata->keyRange) @@ -1469,7 +1474,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, if (!rollbacksInProgress.empty()) { ASSERT(rollbacksInProgress.front().first == rollbackVersion); ASSERT(rollbacksInProgress.front().second == deltas.version); - printf("Passed rollback %ld -> %ld\n", deltas.version, rollbackVersion); + fmt::print("Passed rollback {0} -> {1}\n", deltas.version, rollbackVersion); rollbacksCompleted.push_back(rollbacksInProgress.front()); rollbacksInProgress.pop_front(); } else { @@ -1481,17 +1486,17 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, metadata->currentDeltas.back().version <= rollbackVersion)) { if (BW_DEBUG) { - printf("BW skipping rollback %ld -> %ld completely\n", - deltas.version, - rollbackVersion); + fmt::print("BW skipping rollback {0} -> {1} completely\n", + deltas.version, + rollbackVersion); } } else { if (BW_DEBUG) { - printf("BW [%s - %s) ROLLBACK @ %ld -> %ld\n", - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - deltas.version, - rollbackVersion); + fmt::print("BW [{0} - {1}) ROLLBACK @ {2} -> {3}\n", + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + deltas.version, + rollbackVersion); TraceEvent(SevWarn, "GranuleRollback", bwData->id) .detail("Granule", metadata->keyRange) .detail("Version", deltas.version) @@ -1528,7 +1533,7 @@ ACTOR Future blobGranuleUpdateFiles(Reference bwData, } else if (!rollbacksInProgress.empty() && rollbacksInProgress.front().first < deltas.version && rollbacksInProgress.front().second > deltas.version) { if (BW_DEBUG) { - printf("Skipping mutations @ %ld b/c prior rollback\n", deltas.version); + fmt::print("Skipping mutations @ {} b/c prior rollback\n", deltas.version); } } else { for (auto& delta : deltas.mutations) { @@ -1677,11 +1682,11 @@ ACTOR Future blobGranuleLoadHistory(Reference bwData, } if (BW_DEBUG) { - printf("Loaded %lu history entries for granule [%s - %s) (%d skipped)\n", - historyEntryStack.size(), - metadata->keyRange.begin.printable().c_str(), - metadata->keyRange.end.printable().c_str(), - skipped); + fmt::print("Loaded {0} history entries for granule [{1} - {2}) ({3} skipped)\n", + historyEntryStack.size(), + metadata->keyRange.begin.printable(), + metadata->keyRange.end.printable(), + skipped); } } @@ -1856,15 +1861,15 @@ ACTOR Future handleBlobGranuleFileRequest(Reference bwData } if (BW_REQUEST_DEBUG) { - printf("[%s - %s) @ %ld time traveled back to %s [%s - %s) @ [%ld - %ld)\n", - req.keyRange.begin.printable().c_str(), - req.keyRange.end.printable().c_str(), - req.readVersion, - cur->granuleID.toString().c_str(), - cur->range.begin.printable().c_str(), - cur->range.end.printable().c_str(), - cur->startVersion, - cur->endVersion); + fmt::print("[{0} - {1}) @ {2} time traveled back to {3} [{4} - {5}) @ [{6} - {7})\n", + req.keyRange.begin.printable(), + req.keyRange.end.printable(), + req.readVersion, + cur->granuleID.toString(), + cur->range.begin.printable(), + cur->range.end.printable(), + cur->startVersion, + cur->endVersion); } // lazily load files for old granule if not present @@ -1895,10 +1900,10 @@ ACTOR Future handleBlobGranuleFileRequest(Reference bwData if (rollbackCount == metadata->rollbackCount.get()) { break; } else if (BW_REQUEST_DEBUG) { - printf("[%s - %s) @ %ld hit rollback, restarting waitForVersion\n", - req.keyRange.begin.printable().c_str(), - req.keyRange.end.printable().c_str(), - req.readVersion); + fmt::print("[{0} - {1}) @ {2} hit rollback, restarting waitForVersion\n", + req.keyRange.begin.printable(), + req.keyRange.end.printable(), + req.readVersion); } } chunkFiles = metadata->files; @@ -2223,13 +2228,13 @@ ACTOR Future changeBlobRange(Reference bwData, bool disposeOnCleanup, bool selfReassign) { if (BW_DEBUG) { - printf("%s range for [%s - %s): %s @ (%ld, %ld)\n", - selfReassign ? "Re-assigning" : "Changing", - keyRange.begin.printable().c_str(), - keyRange.end.printable().c_str(), - active ? "T" : "F", - epoch, - seqno); + fmt::print("{0} range for [{1} - {2}): {3} @ ({4}, {5})\n", + selfReassign ? "Re-assigning" : "Changing", + keyRange.begin.printable(), + keyRange.end.printable(), + active ? "T" : "F", + epoch, + seqno); } // For each range that intersects this update: @@ -2274,11 +2279,11 @@ ACTOR Future changeBlobRange(Reference bwData, if (r.value().activeMetadata.isValid() && thisAssignmentNewer) { // cancel actors for old range and clear reference if (BW_DEBUG) { - printf(" [%s - %s): @ (%ld, %ld) (cancelling)\n", - r.begin().printable().c_str(), - r.end().printable().c_str(), - r.value().lastEpoch, - r.value().lastSeqno); + fmt::print(" [{0} - {1}): @ ({2}, {3}) (cancelling)\n", + r.begin().printable(), + r.end().printable(), + r.value().lastEpoch, + r.value().lastSeqno); } r.value().activeMetadata.clear(); } else if (!thisAssignmentNewer) { @@ -2299,22 +2304,22 @@ ACTOR Future changeBlobRange(Reference bwData, bwData->granuleMetadata.insert(keyRange, newMetadata); if (BW_DEBUG) { - printf("Inserting new range [%s - %s): %s @ (%ld, %ld)\n", - keyRange.begin.printable().c_str(), - keyRange.end.printable().c_str(), - newMetadata.activeMetadata.isValid() ? "T" : "F", - newMetadata.lastEpoch, - newMetadata.lastSeqno); + fmt::print("Inserting new range [{0} - {1}): {2} @ ({3}, {4})\n", + keyRange.begin.printable(), + keyRange.end.printable(), + newMetadata.activeMetadata.isValid() ? "T" : "F", + newMetadata.lastEpoch, + newMetadata.lastSeqno); } for (auto& it : newerRanges) { if (BW_DEBUG) { - printf("Re-inserting newer range [%s - %s): %s @ (%ld, %ld)\n", - it.first.begin.printable().c_str(), - it.first.end.printable().c_str(), - it.second.activeMetadata.isValid() ? "T" : "F", - it.second.lastEpoch, - it.second.lastSeqno); + fmt::print("Re-inserting newer range [{0} - {1}): {2} @ ({3}, {4})\n", + it.first.begin.printable(), + it.first.end.printable(), + it.second.activeMetadata.isValid() ? "T" : "F", + it.second.lastEpoch, + it.second.lastSeqno); } bwData->granuleMetadata.insert(it.first, it.second); } @@ -2333,18 +2338,19 @@ static bool resumeBlobRange(Reference bwData, KeyRange keyRange, !existingRange.value().activeMetadata.isValid()) { if (BW_DEBUG) { - printf("BW %s got out of date resume range for [%s - %s) @ (%ld, %ld). Currently [%s - %s) @ (%ld, " - "%ld): %s\n", - bwData->id.toString().c_str(), - existingRange.begin().printable().c_str(), - existingRange.end().printable().c_str(), - existingRange.value().lastEpoch, - existingRange.value().lastSeqno, - keyRange.begin.printable().c_str(), - keyRange.end.printable().c_str(), - epoch, - seqno, - existingRange.value().activeMetadata.isValid() ? "T" : "F"); + fmt::print( + "BW {0} got out of date resume range for [{1} - {2}) @ ({3}, {4}). Currently [{5} - {6}) @ ({7}, " + "{8}): {9}\n", + bwData->id.toString(), + existingRange.begin().printable(), + existingRange.end().printable(), + existingRange.value().lastEpoch, + existingRange.value().lastSeqno, + keyRange.begin.printable(), + keyRange.end.printable(), + epoch, + seqno, + existingRange.value().activeMetadata.isValid() ? "T" : "F"); } return false; @@ -2556,13 +2562,13 @@ ACTOR Future blobWorker(BlobWorkerInterface bwInterf, --self->stats.numRangesAssigned; state AssignBlobRangeRequest assignReq = _req; if (BW_DEBUG) { - printf("Worker %s assigned range [%s - %s) @ (%ld, %ld):\n continue=%s\n", - self->id.toString().c_str(), - assignReq.keyRange.begin.printable().c_str(), - assignReq.keyRange.end.printable().c_str(), - assignReq.managerEpoch, - assignReq.managerSeqno, - assignReq.continueAssignment ? "T" : "F"); + fmt::print("Worker {0} assigned range [{1} - {2}) @ ({3}, {4}):\n continue={5}\n", + self->id.toString(), + assignReq.keyRange.begin.printable(), + assignReq.keyRange.end.printable(), + assignReq.managerEpoch, + assignReq.managerSeqno, + assignReq.continueAssignment ? "T" : "F"); } if (self->managerEpochOk(assignReq.managerEpoch)) { @@ -2575,13 +2581,13 @@ ACTOR Future blobWorker(BlobWorkerInterface bwInterf, state RevokeBlobRangeRequest revokeReq = _req; --self->stats.numRangesAssigned; if (BW_DEBUG) { - printf("Worker %s revoked range [%s - %s) @ (%ld, %ld):\n dispose=%s\n", - self->id.toString().c_str(), - revokeReq.keyRange.begin.printable().c_str(), - revokeReq.keyRange.end.printable().c_str(), - revokeReq.managerEpoch, - revokeReq.managerSeqno, - revokeReq.dispose ? "T" : "F"); + fmt::print("Worker {0} revoked range [{1} - {2}) @ ({3}, {4}):\n dispose={5}\n", + self->id.toString(), + revokeReq.keyRange.begin.printable(), + revokeReq.keyRange.end.printable(), + revokeReq.managerEpoch, + revokeReq.managerSeqno, + revokeReq.dispose ? "T" : "F"); } if (self->managerEpochOk(revokeReq.managerEpoch)) { diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index 867d3e2603..64a57f7fbf 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -18,6 +18,7 @@ * limitations under the License. */ +#include "contrib/fmt-8.0.1/include/fmt/format.h" #include "fdbclient/FDBTypes.h" #include "fdbserver/Knobs.h" #include "flow/IRandom.h" @@ -8040,7 +8041,7 @@ TEST_CASE("/redwood/correctness/unit/RedwoodRecordRef") { ASSERT(RedwoodRecordRef::Delta::LengthFormatSizes[2] == 6); ASSERT(RedwoodRecordRef::Delta::LengthFormatSizes[3] == 8); - printf("sizeof(RedwoodRecordRef) = %lu\n", sizeof(RedwoodRecordRef)); + fmt::print("sizeof(RedwoodRecordRef) = {}\n", sizeof(RedwoodRecordRef)); // Test pageID stuff. { @@ -8973,14 +8974,15 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/IntIntPair") { pos = newPos; } double elapsed = timer() - start; - printf("Seek/skip test, count=%d jumpMax=%d, items=%lu, oldSeek=%d useHint=%d: Elapsed %f seconds %.2f M/s\n", - count, - jumpMax, - items.size(), - old, - useHint, - elapsed, - double(count) / elapsed / 1e6); + fmt::print("Seek/skip test, count={0} jumpMax={1}, items={2}, oldSeek={3} useHint={4}: Elapsed {5} seconds " + "{6:.2f} M/s\n", + count, + jumpMax, + items.size(), + old, + useHint, + elapsed, + double(count) / elapsed / 1e6); }; auto skipSeekPerformance2 = [&](int jumpMax, bool old, bool useHint, int count) { @@ -9016,15 +9018,16 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/IntIntPair") { pos = newPos; } double elapsed = timer() - start; - printf("DeltaTree2 Seek/skip test, count=%d jumpMax=%d, items=%lu, oldSeek=%d useHint=%d: Elapsed %f seconds " - "%.2f M/s\n", - count, - jumpMax, - items.size(), - old, - useHint, - elapsed, - double(count) / elapsed / 1e6); + fmt::print("DeltaTree2 Seek/skip test, count={0} jumpMax={1}, items={2}, oldSeek={3} useHint={4}: Elapsed {5} " + "seconds " + "{6:.2f} M/s\n", + count, + jumpMax, + items.size(), + old, + useHint, + elapsed, + double(count) / elapsed / 1e6); }; // Compare seeking to nearby elements with and without hints, using the old and new SeekLessThanOrEqual methods. @@ -9094,7 +9097,7 @@ TEST_CASE(":/redwood/performance/mutationBuffer") { strings.push_back(randomString(arena, 5)); } - printf("Inserting %d elements and then finding each string...\n", count); + fmt::print("Inserting {} elements and then finding each string...\n", count); double start = timer(); VersionedBTree::MutationBuffer m; for (int i = 0; i < count; ++i) { @@ -9366,14 +9369,14 @@ TEST_CASE("Lredwood/correctness/btree") { commit = map(btree->commit(version), [=, &ops = totalPageOps, v = version](Void) { // Update pager ops before clearing metrics ops += g_redwoodMetrics.pageOps(); - printf("Committed %s PageOps %" PRId64 "/%" PRId64 " (%.2f%%) VerificationMapEntries %lu/%d (%.2f%%)\n", - toString(v).c_str(), - ops, - targetPageOps, - ops * 100.0 / targetPageOps, - written.size(), - maxVerificationMapEntries, - written.size() * 100.0 / maxVerificationMapEntries); + fmt::print("Committed {0} PageOps {1}/{2} ({3:.2f}) VerificationMapEntries {4}/{5} ({6:.2f})\n", + toString(v).c_str(), + ops, + targetPageOps, + ops * 100.0 / targetPageOps, + written.size(), + maxVerificationMapEntries, + written.size() * 100.0 / maxVerificationMapEntries); printf("Committed:\n%s\n", g_redwoodMetrics.toString(true).c_str()); // Notify the background verifier that version is committed and therefore readable @@ -9517,13 +9520,14 @@ ACTOR Future randomScans(VersionedBTree* btree, } } double elapsed = timer() - readStart; - printf("Completed %d scans: width=%d totalbytesRead=%d prefetchBytes=%d scansRate=%d scans/s %.2f MB/s\n", - count, - width, - totalScanBytes, - prefetchBytes, - int(count / elapsed), - double(totalScanBytes) / 1e6 / elapsed); + fmt::print( + "Completed {0} scans: width={1} totalbytesRead={2} prefetchBytes={3} scansRate={4} scans/s {5:.2f} MB/s\n", + count, + width, + totalScanBytes, + prefetchBytes, + int(count / elapsed), + double(totalScanBytes) / 1e6 / elapsed); return Void(); } @@ -9620,10 +9624,10 @@ TEST_CASE(":/redwood/performance/extentQueue") { for (v = 1; v <= numEntries; ++v) { // Sometimes do a commit if (currentCommitSize >= targetCommitSize) { - printf("currentCommitSize: %d, cumulativeCommitSize: %ld, pageCacheCount: %ld\n", - currentCommitSize, - cumulativeCommitSize, - pager->getPageCacheCount()); + fmt::print("currentCommitSize: {0}, cumulativeCommitSize: {1}, pageCacheCount: {2}\n", + currentCommitSize, + cumulativeCommitSize, + pager->getPageCacheCount()); wait(m_extentQueue.flush()); wait(pager->commit(pager->getLastCommittedVersion() + 1)); cumulativeCommitSize += currentCommitSize; @@ -9642,8 +9646,8 @@ TEST_CASE(":/redwood/performance/extentQueue") { } } cumulativeCommitSize += currentCommitSize; - printf( - "Final cumulativeCommitSize: %ld, pageCacheCount: %ld\n", cumulativeCommitSize, pager->getPageCacheCount()); + fmt::print( + "Final cumulativeCommitSize: {0}, pageCacheCount: {1}\n", cumulativeCommitSize, pager->getPageCacheCount()); wait(m_extentQueue.flush()); extentQueueState = m_extentQueue.getState(); printf("Commit ExtentQueue getState(): %s\n", extentQueueState.toString().c_str()); @@ -10097,16 +10101,16 @@ ACTOR Future prefixClusteredInsert(IKeyValueStore* kvs, state int64_t kvBytesTarget = (int64_t)recordCountTarget * recordSize; state int recordsPerPrefix = recordCountTarget / source.numPrefixes(); - printf("\nstoreType: %d\n", static_cast(kvs->getType())); - printf("commitTarget: %d\n", commitTarget); - printf("prefixSource: %s\n", source.toString().c_str()); - printf("usePrefixesInOrder: %d\n", usePrefixesInOrder); - printf("suffixSize: %d\n", suffixSize); - printf("valueSize: %d\n", valueSize); - printf("recordSize: %d\n", recordSize); - printf("recordsPerPrefix: %d\n", recordsPerPrefix); - printf("recordCountTarget: %d\n", recordCountTarget); - printf("kvBytesTarget: %" PRId64 "\n", kvBytesTarget); + fmt::print("\nstoreType: {}\n", static_cast(kvs->getType())); + fmt::print("commitTarget: {}\n", commitTarget); + fmt::print("prefixSource: {}\n", source.toString()); + fmt::print("usePrefixesInOrder: {}\n", usePrefixesInOrder); + fmt::print("suffixSize: {}\n", suffixSize); + fmt::print("valueSize: {}\n", valueSize); + fmt::print("recordSize: {}\n", recordSize); + fmt::print("recordsPerPrefix: {}\n", recordsPerPrefix); + fmt::print("recordCountTarget: {}\n", recordCountTarget); + fmt::print("kvBytesTarget: {}\n", kvBytesTarget); state int64_t kvBytes = 0; state int64_t kvBytesTotal = 0; @@ -10186,12 +10190,12 @@ ACTOR Future sequentialInsert(IKeyValueStore* kvs, int prefixLen, int valu state int recordSize = source.prefixLen + sizeof(uint64_t) + valueSize; state int64_t kvBytesTarget = (int64_t)recordCountTarget * recordSize; - printf("\nstoreType: %d\n", static_cast(kvs->getType())); - printf("commitTarget: %d\n", commitTarget); - printf("valueSize: %d\n", valueSize); - printf("recordSize: %d\n", recordSize); - printf("recordCountTarget: %d\n", recordCountTarget); - printf("kvBytesTarget: %" PRId64 "\n", kvBytesTarget); + fmt::print("\nstoreType: {}\n", static_cast(kvs->getType())); + fmt::print("commitTarget: {}\n", commitTarget); + fmt::print("valueSize: {}\n", valueSize); + fmt::print("recordSize: {}\n", recordSize); + fmt::print("recordCountTarget: {}\n", recordCountTarget); + fmt::print("kvBytesTarget: {}\n", kvBytesTarget); state int64_t kvBytes = 0; state int64_t kvBytesTotal = 0; @@ -10320,12 +10324,12 @@ ACTOR Future randomRangeScans(IKeyValueStore* kvs, int recordCountTarget, bool singlePrefix, int rowLimit) { - printf("\nstoreType: %d\n", static_cast(kvs->getType())); - printf("prefixSource: %s\n", source.toString().c_str()); - printf("suffixSize: %d\n", suffixSize); - printf("recordCountTarget: %d\n", recordCountTarget); - printf("singlePrefix: %d\n", singlePrefix); - printf("rowLimit: %d\n", rowLimit); + fmt::print("\nstoreType: {}\n", static_cast(kvs->getType())); + fmt::print("prefixSource: {}\n", source.toString()); + fmt::print("suffixSize: {}\n", suffixSize); + fmt::print("recordCountTarget: {}\n", recordCountTarget); + fmt::print("singlePrefix: {}\n", singlePrefix); + fmt::print("rowLimit: {}\n", rowLimit); state int64_t recordSize = source.prefixLen + suffixSize + valueSize; state int64_t bytesRead = 0; @@ -10336,14 +10340,15 @@ ACTOR Future randomRangeScans(IKeyValueStore* kvs, state double start = timer(); state std::function stats = [&]() { double elapsed = timer() - start; - printf("Cumulative stats: %.2f seconds %d queries %.2f MB %ld records %.2f qps %.2f MB/s %.2f rec/s\r\n", - elapsed, - queries, - bytesRead / 1e6, - recordsRead, - queries / elapsed, - bytesRead / elapsed / 1e6, - recordsRead / elapsed); + fmt::print("Cumulative stats: {0:.2f} seconds {1} queries {2:.2f} MB {3} records {4:.2f} qps {5:.2f} MB/s " + "{6:.2f} rec/s\r\n", + elapsed, + queries, + bytesRead / 1e6, + recordsRead, + queries / elapsed, + bytesRead / elapsed / 1e6, + recordsRead / elapsed); fflush(stdout); }; diff --git a/fdbserver/networktest.actor.cpp b/fdbserver/networktest.actor.cpp index 654cf617f4..a495706db4 100644 --- a/fdbserver/networktest.actor.cpp +++ b/fdbserver/networktest.actor.cpp @@ -18,6 +18,7 @@ * limitations under the License. */ +#include "contrib/fmt-8.0.1/include/fmt/format.h" #include "fdbserver/NetworkTest.h" #include "flow/Knobs.h" #include "flow/actorcompiler.h" // This must be the last #include. @@ -584,10 +585,10 @@ struct P2PNetworkTest { self->startTime = now(); - printf("%lu listeners, %lu remotes, %d outgoing connections\n", - self->listeners.size(), - self->remotes.size(), - self->connectionsOut); + fmt::print("{0} listeners, {1} remotes, {2} outgoing connections\n", + self->listeners.size(), + self->remotes.size(), + self->connectionsOut); for (auto n : self->remotes) { printf("Remote: %s\n", n.toString().c_str()); diff --git a/fdbserver/workloads/BlobGranuleVerifier.actor.cpp b/fdbserver/workloads/BlobGranuleVerifier.actor.cpp index cb3812074f..cf9c2aac2c 100644 --- a/fdbserver/workloads/BlobGranuleVerifier.actor.cpp +++ b/fdbserver/workloads/BlobGranuleVerifier.actor.cpp @@ -22,6 +22,7 @@ #include #include +#include "contrib/fmt-8.0.1/include/fmt/format.h" #include "fdbclient/BlobGranuleReader.actor.h" #include "fdbclient/NativeAPI.actor.h" #include "fdbclient/ReadYourWrites.h" @@ -237,13 +238,13 @@ struct BlobGranuleVerifierWorkload : TestWorkload { .detail("BlobSize", blob.first.size()); if (BGV_DEBUG) { - printf("\nMismatch for [%s - %s) @ %ld (%s). F(%d) B(%d):\n", - range.begin.printable().c_str(), - range.end.printable().c_str(), - v, - initialRequest ? "RealTime" : "TimeTravel", - fdb.size(), - blob.first.size()); + fmt::print("\nMismatch for [{0} - {1}) @ {2} ({3}). F({4}) B({5}):\n", + range.begin.printable(), + range.end.printable(), + v, + initialRequest ? "RealTime" : "TimeTravel", + fdb.size(), + blob.first.size()); Optional lastCorrect; for (int i = 0; i < std::max(fdb.size(), blob.first.size()); i++) { @@ -291,11 +292,11 @@ struct BlobGranuleVerifierWorkload : TestWorkload { } printf(" Deltas: (%d)", chunk.newDeltas.size()); if (chunk.newDeltas.size() > 0) { - printf(" with version [%ld - %ld]", - chunk.newDeltas[0].version, - chunk.newDeltas[chunk.newDeltas.size() - 1].version); + fmt::print(" with version [{0} - {1}]", + chunk.newDeltas[0].version, + chunk.newDeltas[chunk.newDeltas.size() - 1].version); } - printf(" IncludedVersion: %ld\n", chunk.includedVersion); + fmt::print(" IncludedVersion: {}\n", chunk.includedVersion); } printf("\n"); } @@ -416,10 +417,10 @@ struct BlobGranuleVerifierWorkload : TestWorkload { state KeyRange r = range; state PromiseStream> chunkStream; if (BGV_DEBUG) { - printf("Final availability check [%s - %s) @ %ld\n", - r.begin.printable().c_str(), - r.end.printable().c_str(), - readVersion); + fmt::print("Final availability check [{0} - {1}) @ {2}\n", + r.begin.printable(), + r.end.printable(), + readVersion); } state KeyRange last; state Future requester = cx->readBlobGranulesStream(chunkStream, r, 0, readVersion); @@ -435,30 +436,31 @@ struct BlobGranuleVerifierWorkload : TestWorkload { break; } if (BGV_DEBUG) { - printf("BG Verifier failed final availability check for [%s - %s) @ %ld with error %s. Last " - "Success=[%s - %s)\n", - r.begin.printable().c_str(), - r.end.printable().c_str(), - readVersion, - e.name(), - last.begin.printable().c_str(), - last.end.printable().c_str()); + fmt::print( + "BG Verifier failed final availability check for [{0} - {1}) @ {2} with error {3}. Last " + "Success=[{4} - {5})\n", + r.begin.printable(), + r.end.printable(), + readVersion, + e.name(), + last.begin.printable(), + last.end.printable()); } availabilityPassed = false; break; } } } - printf("Blob Granule Verifier finished with:\n"); - printf(" %d successful final granule checks\n", checks); - printf(" %d failed final granule checks\n", availabilityPassed ? 0 : 1); - printf(" %ld mismatches\n", self->mismatches); - printf(" %ld time travel too old\n", self->timeTravelTooOld); - printf(" %ld errors\n", self->errors); - printf(" %ld initial reads\n", self->initialReads); - printf(" %ld time travel reads\n", self->timeTravelReads); - printf(" %ld rows\n", self->rowsRead); - printf(" %ld bytes\n", self->bytesRead); + fmt::print("Blob Granule Verifier finished with:\n"); + fmt::print(" {} successful final granule checks\n", checks); + fmt::print(" {} failed final granule checks\n", availabilityPassed ? 0 : 1); + fmt::print(" {} mismatches\n", self->mismatches); + fmt::print(" {} time travel too old\n", self->timeTravelTooOld); + fmt::print(" {} errors\n", self->errors); + fmt::print(" {} initial reads\n", self->initialReads); + fmt::print(" {} time travel reads\n", self->timeTravelReads); + fmt::print(" {} rows\n", self->rowsRead); + fmt::print(" {} bytes\n", self->bytesRead); // FIXME: add above as details TraceEvent("BlobGranuleVerifierChecked"); return availabilityPassed && self->mismatches == 0 && checks > 0 && self->timeTravelTooOld == 0;