diff --git a/bindings/c/foundationdb/fdb_c.h b/bindings/c/foundationdb/fdb_c.h index cbad17e9fb..3f99094e29 100644 --- a/bindings/c/foundationdb/fdb_c.h +++ b/bindings/c/foundationdb/fdb_c.h @@ -169,7 +169,6 @@ typedef struct mappedkeyvalue { * take the shortcut. */ FDBGetRangeReqAndResult getRange; unsigned char buffer[32]; - fdb_bool_t boundaryAndExist; } FDBMappedKeyValue; #pragma pack(push, 4) diff --git a/bindings/c/test/mako/stats.hpp b/bindings/c/test/mako/stats.hpp index 074cd966fc..29989390c6 100644 --- a/bindings/c/test/mako/stats.hpp +++ b/bindings/c/test/mako/stats.hpp @@ -89,9 +89,9 @@ public: }; class alignas(64) ThreadStatistics { - uint64_t conflicts; - uint64_t total_errors; - uint64_t total_timeouts; + uint64_t conflicts{ 0 }; + uint64_t total_errors{ 0 }; + uint64_t total_timeouts{ 0 }; std::array ops; std::array errors; std::array timeouts; @@ -101,7 +101,11 @@ class alignas(64) ThreadStatistics { public: ThreadStatistics() noexcept { - memset(this, 0, sizeof(ThreadStatistics)); + std::fill(ops.begin(), ops.end(), 0); + std::fill(errors.begin(), errors.end(), 0); + std::fill(timeouts.begin(), timeouts.end(), 0); + std::fill(latency_samples.begin(), latency_samples.end(), 0); + std::fill(latency_us_total.begin(), latency_us_total.end(), 0); sketches.resize(MAX_OP); } diff --git a/bindings/c/test/unit/unit_tests.cpp b/bindings/c/test/unit/unit_tests.cpp index f5fea8b2a4..6c44c0aa09 100644 --- a/bindings/c/test/unit/unit_tests.cpp +++ b/bindings/c/test/unit/unit_tests.cpp @@ -182,17 +182,14 @@ struct GetMappedRangeResult { const std::string& value, const std::string& begin, const std::string& end, - const std::vector>& range_results, - fdb_bool_t boundaryAndExist) - : key(key), value(value), begin(begin), end(end), range_results(range_results), - boundaryAndExist(boundaryAndExist) {} + const std::vector>& range_results) + : key(key), value(value), begin(begin), end(end), range_results(range_results) {} std::string key; std::string value; std::string begin; std::string end; std::vector> range_results; - fdb_bool_t boundaryAndExist; }; std::vector mkvs; // True if values remain in the key range requested. @@ -317,7 +314,6 @@ GetMappedRangeResult get_mapped_range(fdb::Transaction& tr, auto value = extractString(mkv.value); auto begin = extractString(mkv.getRange.begin.key); auto end = extractString(mkv.getRange.end.key); - bool boundaryAndExist = mkv.boundaryAndExist; // std::cout << "key:" << key << " value:" << value << " begin:" << begin << " end:" << end << std::endl; std::vector> range_results; @@ -328,7 +324,7 @@ GetMappedRangeResult get_mapped_range(fdb::Transaction& tr, range_results.emplace_back(k, v); // std::cout << "[" << i << "]" << k << " -> " << v << std::endl; } - result.mkvs.emplace_back(key, value, begin, end, range_results, boundaryAndExist); + result.mkvs.emplace_back(key, value, begin, end, range_results); } return result; } @@ -1096,9 +1092,7 @@ TEST_CASE("fdb_transaction_get_mapped_range") { CHECK(!result.more); int id = beginId; - bool boundary; for (int i = 0; i < expectSize; i++, id++) { - boundary = i == 0 || i == expectSize - 1; const auto& mkv = result.mkvs[i]; if (matchIndex == MATCH_INDEX_ALL || i == 0 || i == expectSize - 1) { CHECK(indexEntryKey(id).compare(mkv.key) == 0); @@ -1109,8 +1103,6 @@ TEST_CASE("fdb_transaction_get_mapped_range") { } else { CHECK(EMPTY.compare(mkv.key) == 0); } - bool empty = mkv.range_results.empty(); - CHECK(mkv.boundaryAndExist == (boundary && !empty)); CHECK(EMPTY.compare(mkv.value) == 0); CHECK(mkv.range_results.size() == SPLIT_SIZE); for (int split = 0; split < SPLIT_SIZE; split++) { @@ -1154,9 +1146,7 @@ TEST_CASE("fdb_transaction_get_mapped_range_missing_all_secondary") { CHECK(!result.more); int id = beginId; - bool boundary; for (int i = 0; i < expectSize; i++, id++) { - boundary = i == 0 || i == expectSize - 1; const auto& mkv = result.mkvs[i]; if (matchIndex == MATCH_INDEX_ALL || i == 0 || i == expectSize - 1) { CHECK(indexEntryKey(id).compare(mkv.key) == 0); @@ -1167,8 +1157,6 @@ TEST_CASE("fdb_transaction_get_mapped_range_missing_all_secondary") { } else { CHECK(EMPTY.compare(mkv.key) == 0); } - bool empty = mkv.range_results.empty(); - CHECK(mkv.boundaryAndExist == (boundary && !empty)); CHECK(EMPTY.compare(mkv.value) == 0); } break; diff --git a/bindings/java/fdbJNI.cpp b/bindings/java/fdbJNI.cpp index 996f4944c0..5208647873 100644 --- a/bindings/java/fdbJNI.cpp +++ b/bindings/java/fdbJNI.cpp @@ -612,14 +612,14 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureMappedResults_Future FDBMappedKeyValue kvm = kvms[i]; int kvm_count = kvm.getRange.m_size; - // now it has 5 field, key, value, getRange.begin, getRange.end, boundaryAndExist + // now it has 4 field, key, value, getRange.begin, getRange.end // this needs to change if FDBMappedKeyValue definition is changed. - const int totalFieldFDBMappedKeyValue = 5; + const int totalFieldFDBMappedKeyValue = 4; const int totalLengths = totalFieldFDBMappedKeyValue + kvm_count * 2; int totalBytes = kvm.key.key_length + kvm.value.key_length + kvm.getRange.begin.key.key_length + - kvm.getRange.end.key.key_length + sizeof(kvm.boundaryAndExist); + kvm.getRange.end.key.key_length; for (int i = 0; i < kvm_count; i++) { auto kv = kvm.getRange.data[i]; totalBytes += kv.key_length + kv.value_length; @@ -663,7 +663,6 @@ JNIEXPORT jobject JNICALL Java_com_apple_foundationdb_FutureMappedResults_Future cpBytesAndLength(pByte, pLength, kvm.value); cpBytesAndLength(pByte, pLength, kvm.getRange.begin.key); cpBytesAndLength(pByte, pLength, kvm.getRange.end.key); - cpBytesAndLengthInner(pByte, pLength, (uint8_t*)&(kvm.boundaryAndExist), sizeof(kvm.boundaryAndExist)); for (int kvm_i = 0; kvm_i < kvm_count; kvm_i++) { auto kv = kvm.getRange.data[kvm_i]; cpBytesAndLengthInner(pByte, pLength, kv.key, kv.key_length); diff --git a/bindings/java/src/integration/com/apple/foundationdb/MappedRangeQueryIntegrationTest.java b/bindings/java/src/integration/com/apple/foundationdb/MappedRangeQueryIntegrationTest.java index 3aedef4d1e..82bb6ffc43 100644 --- a/bindings/java/src/integration/com/apple/foundationdb/MappedRangeQueryIntegrationTest.java +++ b/bindings/java/src/integration/com/apple/foundationdb/MappedRangeQueryIntegrationTest.java @@ -209,11 +209,6 @@ class MappedRangeQueryIntegrationTest { assertByteArrayEquals(indexEntryKey(id), mappedKeyValue.getKey()); assertByteArrayEquals(EMPTY, mappedKeyValue.getValue()); assertByteArrayEquals(indexEntryKey(id), mappedKeyValue.getKey()); - if (id == begin || id == end - 1) { - Assertions.assertTrue(mappedKeyValue.getBoundaryAndExist()); - } else { - Assertions.assertFalse(mappedKeyValue.getBoundaryAndExist()); - } byte[] prefix = recordKeyPrefix(id); assertByteArrayEquals(prefix, mappedKeyValue.getRangeBegin()); prefix[prefix.length - 1] = (byte)0x01; diff --git a/bindings/java/src/main/com/apple/foundationdb/MappedKeyValue.java b/bindings/java/src/main/com/apple/foundationdb/MappedKeyValue.java index 31e375be7c..0826abfab9 100644 --- a/bindings/java/src/main/com/apple/foundationdb/MappedKeyValue.java +++ b/bindings/java/src/main/com/apple/foundationdb/MappedKeyValue.java @@ -33,27 +33,22 @@ public class MappedKeyValue extends KeyValue { private final byte[] rangeBegin; private final byte[] rangeEnd; private final List rangeResult; - private final int boundaryAndExist; - // now it has 5 field, key, value, getRange.begin, getRange.end, boundaryAndExist + // now it has 4 fields, key, value, getRange.begin, getRange.end // this needs to change if FDBMappedKeyValue definition is changed. - private static final int TOTAL_SERIALIZED_FIELD_FDBMappedKeyValue = 5; + private static final int TOTAL_SERIALIZED_FIELD_FDBMappedKeyValue = 4; - public MappedKeyValue(byte[] key, byte[] value, byte[] rangeBegin, byte[] rangeEnd, List rangeResult, - int boundaryAndExist) { + public MappedKeyValue(byte[] key, byte[] value, byte[] rangeBegin, byte[] rangeEnd, List rangeResult) { super(key, value); this.rangeBegin = rangeBegin; this.rangeEnd = rangeEnd; this.rangeResult = rangeResult; - this.boundaryAndExist = boundaryAndExist; } public byte[] getRangeBegin() { return rangeBegin; } public byte[] getRangeEnd() { return rangeEnd; } - public boolean getBoundaryAndExist() { return boundaryAndExist == 0 ? false : true; } - public List getRangeResult() { return rangeResult; } public static MappedKeyValue fromBytes(byte[] bytes, int[] lengths) { @@ -69,8 +64,6 @@ public class MappedKeyValue extends KeyValue { byte[] value = takeBytes(offset, bytes, lengths); byte[] rangeBegin = takeBytes(offset, bytes, lengths); byte[] rangeEnd = takeBytes(offset, bytes, lengths); - byte[] boundaryAndExistBytes = takeBytes(offset, bytes, lengths); - int boundaryAndExist = ByteBuffer.wrap(boundaryAndExistBytes).order(ByteOrder.LITTLE_ENDIAN).getInt(); if ((lengths.length - TOTAL_SERIALIZED_FIELD_FDBMappedKeyValue) % 2 != 0) { throw new IllegalArgumentException("There needs to be an even number of lengths!"); @@ -82,7 +75,7 @@ public class MappedKeyValue extends KeyValue { byte[] v = takeBytes(offset, bytes, lengths); rangeResult.add(new KeyValue(k, v)); } - return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult, boundaryAndExist); + return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult); } static class Offset { @@ -109,17 +102,14 @@ public class MappedKeyValue extends KeyValue { return false; MappedKeyValue rhs = (MappedKeyValue) obj; - return Arrays.equals(rangeBegin, rhs.rangeBegin) - && Arrays.equals(rangeEnd, rhs.rangeEnd) - && Objects.equals(rangeResult, rhs.rangeResult) - && boundaryAndExist == rhs.boundaryAndExist; + return Arrays.equals(rangeBegin, rhs.rangeBegin) && Arrays.equals(rangeEnd, rhs.rangeEnd) && + Objects.equals(rangeResult, rhs.rangeResult); } @Override public int hashCode() { int hashForResult = rangeResult == null ? 0 : rangeResult.hashCode(); - return 17 + - (29 * hashForResult + boundaryAndExist + 37 * Arrays.hashCode(rangeBegin) + Arrays.hashCode(rangeEnd)); + return 17 + (29 * hashForResult + 37 * Arrays.hashCode(rangeBegin) + Arrays.hashCode(rangeEnd)); } @Override @@ -128,7 +118,6 @@ public class MappedKeyValue extends KeyValue { sb.append("rangeBegin=").append(ByteArrayUtil.printable(rangeBegin)); sb.append(", rangeEnd=").append(ByteArrayUtil.printable(rangeEnd)); sb.append(", rangeResult=").append(rangeResult); - sb.append(", boundaryAndExist=").append(boundaryAndExist); sb.append('}'); return super.toString() + "->" + sb.toString(); } diff --git a/bindings/java/src/main/com/apple/foundationdb/MappedRangeResultDirectBufferIterator.java b/bindings/java/src/main/com/apple/foundationdb/MappedRangeResultDirectBufferIterator.java index 6257596d85..169cef42e0 100644 --- a/bindings/java/src/main/com/apple/foundationdb/MappedRangeResultDirectBufferIterator.java +++ b/bindings/java/src/main/com/apple/foundationdb/MappedRangeResultDirectBufferIterator.java @@ -51,8 +51,6 @@ class MappedRangeResultDirectBufferIterator extends DirectBufferIterator impleme final byte[] value = getString(); final byte[] rangeBegin = getString(); final byte[] rangeEnd = getString(); - final byte[] boundaryAndExistBytes = getString(); - final int boundaryAndExist = ByteBuffer.wrap(boundaryAndExistBytes).getInt(); final int rangeResultSize = byteBuffer.getInt(); List rangeResult = new ArrayList(); for (int i = 0; i < rangeResultSize; i++) { @@ -61,7 +59,7 @@ class MappedRangeResultDirectBufferIterator extends DirectBufferIterator impleme rangeResult.add(new KeyValue(k, v)); } current += 1; - return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult, boundaryAndExist); + return new MappedKeyValue(key, value, rangeBegin, rangeEnd, rangeResult); } private byte[] getString() { diff --git a/contrib/sqlite/hash.h b/contrib/sqlite/hash.h index df98d432c5..937c4abc97 100644 --- a/contrib/sqlite/hash.h +++ b/contrib/sqlite/hash.h @@ -47,7 +47,7 @@ struct Hash { struct _ht { /* the hash table */ int count; /* Number of entries with this hash */ HashElem* chain; /* Pointer to first entry with this hash */ - } * ht; + }* ht; }; /* Each element in the hash table is an instance of the following diff --git a/contrib/sqlite/sqlite3.h b/contrib/sqlite/sqlite3.h index 7c66883070..93f9b0f42b 100644 --- a/contrib/sqlite/sqlite3.h +++ b/contrib/sqlite/sqlite3.h @@ -4623,17 +4623,17 @@ struct sqlite3_index_info { unsigned char op; /* Constraint operator */ unsigned char usable; /* True if this constraint is usable */ int iTermOffset; /* Used internally - xBestIndex should ignore */ - } * aConstraint; /* Table of WHERE clause constraints */ + }* aConstraint; /* Table of WHERE clause constraints */ int nOrderBy; /* Number of terms in the ORDER BY clause */ struct sqlite3_index_orderby { int iColumn; /* Column number */ unsigned char desc; /* True for DESC. False for ASC. */ - } * aOrderBy; /* The ORDER BY clause */ + }* aOrderBy; /* The ORDER BY clause */ /* Outputs */ struct sqlite3_index_constraint_usage { int argvIndex; /* if >0, constraint is part of argv to xFilter */ unsigned char omit; /* Do not code a test for this constraint */ - } * aConstraintUsage; + }* aConstraintUsage; int idxNum; /* Number used to identify the index */ char* idxStr; /* String, possibly obtained from sqlite3_malloc */ int needToFreeIdxStr; /* Free idxStr using sqlite3_free() if true */ diff --git a/documentation/tutorial/tutorial.actor.cpp b/documentation/tutorial/tutorial.actor.cpp index 245e6d09e3..e8af4a8ad8 100644 --- a/documentation/tutorial/tutorial.actor.cpp +++ b/documentation/tutorial/tutorial.actor.cpp @@ -63,7 +63,9 @@ ACTOR Future simpleTimer() { ACTOR Future someFuture(Future ready) { // loop choose {} works as well here - the braces are optional loop choose { - when(wait(delay(0.5))) { std::cout << "Still waiting...\n"; } + when(wait(delay(0.5))) { + std::cout << "Still waiting...\n"; + } when(int r = wait(ready)) { std::cout << format("Ready %d\n", r); wait(delay(double(r))); @@ -84,8 +86,12 @@ ACTOR Future promiseDemo() { ACTOR Future eventLoop(AsyncTrigger* trigger) { loop choose { - when(wait(delay(0.5))) { std::cout << "Still waiting...\n"; } - when(wait(trigger->onTrigger())) { std::cout << "Triggered!\n"; } + when(wait(delay(0.5))) { + std::cout << "Still waiting...\n"; + } + when(wait(trigger->onTrigger())) { + std::cout << "Triggered!\n"; + } } } @@ -185,7 +191,9 @@ ACTOR Future echoServer() { when(GetInterfaceRequest req = waitNext(echoServer.getInterface.getFuture())) { req.reply.send(echoServer); } - when(EchoRequest req = waitNext(echoServer.echo.getFuture())) { req.reply.send(req.message); } + when(EchoRequest req = waitNext(echoServer.echo.getFuture())) { + req.reply.send(req.message); + } when(ReverseRequest req = waitNext(echoServer.reverse.getFuture())) { req.reply.send(std::string(req.message.rbegin(), req.message.rend())); } diff --git a/fdbbackup/backup.actor.cpp b/fdbbackup/backup.actor.cpp index a0547a32ca..238fb51ac0 100644 --- a/fdbbackup/backup.actor.cpp +++ b/fdbbackup/backup.actor.cpp @@ -2595,7 +2595,9 @@ ACTOR Future expireBackupData(const char* name, lastProgress = p; } } - when(wait(expire)) { break; } + when(wait(expire)) { + break; + } } } @@ -2638,7 +2640,9 @@ ACTOR Future deleteBackupContainer(const char* name, loop { choose { - when(wait(done)) { break; } + when(wait(done)) { + break; + } when(wait(delay(5))) { if (numDeleted != lastUpdate) { printf("\r%d...", numDeleted); @@ -3044,7 +3048,7 @@ static std::vector> parseLine(std::string& line, bool& er static void addKeyRange(std::string optionValue, Standalone>& keyRanges) { bool err = false, partial = false; - int tokenArray = 0; + [[maybe_unused]] int tokenArray = 0; auto parsed = parseLine(optionValue, err, partial); diff --git a/fdbcli/fdbcli.actor.cpp b/fdbcli/fdbcli.actor.cpp index bb0b32c4f0..413d1115f5 100644 --- a/fdbcli/fdbcli.actor.cpp +++ b/fdbcli/fdbcli.actor.cpp @@ -685,7 +685,9 @@ ACTOR template Future makeInterruptable(Future f) { Future interrupt = LineNoise::onKeyboardInterrupt(); choose { - when(T t = wait(f)) { return t; } + when(T t = wait(f)) { + return t; + } when(wait(interrupt)) { f.cancel(); throw operation_cancelled(); diff --git a/fdbclient/ManagementAPI.actor.cpp b/fdbclient/ManagementAPI.actor.cpp index ca18a35df4..6d7bf5f3a0 100644 --- a/fdbclient/ManagementAPI.actor.cpp +++ b/fdbclient/ManagementAPI.actor.cpp @@ -1182,7 +1182,9 @@ ACTOR Future> changeQuorumChecker(Transaction* tr, choose { when(wait(waitForAll(leaderServers))) {} - when(wait(delay(5.0))) { return CoordinatorsResult::COORDINATOR_UNREACHABLE; } + when(wait(delay(5.0))) { + return CoordinatorsResult::COORDINATOR_UNREACHABLE; + } } TraceEvent("ChangeQuorumCheckerSetCoordinatorsKey") .detail("CurrentCoordinators", old.toString()) @@ -1284,7 +1286,9 @@ ACTOR Future changeQuorum(Database cx, Reference asyncDeserializeClusterInterface(Reference> s state Future deserializer = asyncDeserialize(serializedInfo, knownLeader); loop { choose { - when(wait(deserializer)) { UNSTOPPABLE_ASSERT(false); } + when(wait(deserializer)) { + UNSTOPPABLE_ASSERT(false); + } when(wait(knownLeader->onChange())) { if (knownLeader->get().present()) { outKnownLeader->set(knownLeader->get().get().clientInterface); diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index 7d2bcac989..a9e12b9b9f 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -1016,7 +1017,9 @@ ACTOR static Future monitorClientDBInfoChange(DatabaseContext* cx, proxiesChangeTrigger->trigger(); } } - when(wait(actors.getResult())) { UNSTOPPABLE_ASSERT(false); } + when(wait(actors.getResult())) { + UNSTOPPABLE_ASSERT(false); + } } } } @@ -1498,9 +1501,10 @@ DatabaseContext::DatabaseContext(Reference defaultTenant) - : lockAware(lockAware), switchable(switchable), connectionRecord(connectionRecord), proxyProvisional(false), - clientLocality(clientLocality), enableLocalityLoadBalance(enableLocalityLoadBalance), defaultTenant(defaultTenant), - internal(internal), cc("TransactionMetrics"), transactionReadVersions("ReadVersions", cc), + : dbId(deterministicRandom()->randomUniqueID()), lockAware(lockAware), switchable(switchable), + connectionRecord(connectionRecord), proxyProvisional(false), clientLocality(clientLocality), + enableLocalityLoadBalance(enableLocalityLoadBalance), defaultTenant(defaultTenant), internal(internal), + cc("TransactionMetrics", dbId.toString()), transactionReadVersions("ReadVersions", cc), transactionReadVersionsThrottled("ReadVersionsThrottled", cc), transactionReadVersionsCompleted("ReadVersionsCompleted", cc), transactionReadVersionBatches("ReadVersionBatches", cc), @@ -1531,11 +1535,11 @@ DatabaseContext::DatabaseContext(Reference(specialKeys.begin, specialKeys.end, /* test */ false)), connectToDatabaseEventCacheHolder(format("ConnectToDatabase/%s", dbId.toString().c_str())) { - dbId = deterministicRandom()->randomUniqueID(); - TraceEvent("DatabaseContextCreated", dbId).backtrace(); connected = (clientInfo->get().commitProxies.size() && clientInfo->get().grvProxies.size()) @@ -3360,7 +3362,6 @@ ACTOR Future> getValue(Reference trState, span.addAttribute("tenant"_sr, trState->tenant().get()); } - span.addAttribute("key"_sr, key); trState->cx->validateVersion(ver); loop { @@ -3402,7 +3403,9 @@ ACTOR Future> getValue(Reference trState, std::vector{ transaction_too_old(), future_version() }); } choose { - when(wait(trState->cx->connectionFileChanged())) { throw transaction_too_old(); } + when(wait(trState->cx->connectionFileChanged())) { + throw transaction_too_old(); + } when(GetValueReply _reply = wait(loadBalance( trState->cx.getPtr(), locationInfo.locations, @@ -3549,7 +3552,9 @@ ACTOR Future getKey(Reference trState, state GetKeyReply reply; try { choose { - when(wait(trState->cx->connectionFileChanged())) { throw transaction_too_old(); } + when(wait(trState->cx->connectionFileChanged())) { + throw transaction_too_old(); + } when(GetKeyReply _reply = wait(loadBalance( trState->cx.getPtr(), locationInfo.locations, @@ -3713,7 +3718,9 @@ ACTOR Future watchValue(Database cx, Reference p TaskPriority::DefaultPromiseEndpoint))) { resp = r; } - when(wait(cx->connectionRecord ? cx->connectionRecord->onChange() : Never())) { wait(Never()); } + when(wait(cx->connectionRecord ? cx->connectionRecord->onChange() : Never())) { + wait(Never()); + } } if (watchValueID.present()) { g_traceBatch.addEvent("WatchValueDebug", watchValueID.get().first(), "NativeAPI.watchValue.After"); @@ -4076,7 +4083,9 @@ Future getExactRange(Reference trState, state GetKeyValuesFamilyReply rep; try { choose { - when(wait(trState->cx->connectionFileChanged())) { throw transaction_too_old(); } + when(wait(trState->cx->connectionFileChanged())) { + throw transaction_too_old(); + } when(GetKeyValuesFamilyReply _rep = wait(loadBalance( trState->cx.getPtr(), locations[shard].locations, @@ -4284,7 +4293,6 @@ int64_t inline getRangeResultFamilyBytes(MappedRangeResultRef result) { int64_t bytes = 0; for (const MappedKeyValueRef& mappedKeyValue : result) { bytes += mappedKeyValue.key.size() + mappedKeyValue.value.size(); - bytes += sizeof(mappedKeyValue.boundaryAndExist); auto& reqAndResult = mappedKeyValue.reqAndResult; if (std::holds_alternative(reqAndResult)) { auto getValue = std::get(reqAndResult); @@ -4976,7 +4984,9 @@ ACTOR Future getRangeStreamFragment(Reference trState, return Void(); } - when(GetKeyValuesStreamReply _rep = waitNext(replyStream.getFuture())) { rep = _rep; } + when(GetKeyValuesStreamReply _rep = waitNext(replyStream.getFuture())) { + rep = _rep; + } } ++trState->cx->transactionPhysicalReadsCompleted; } catch (Error& e) { @@ -5473,7 +5483,9 @@ ACTOR Future watch(Reference watch, loop { choose { // NativeAPI watchValue future finishes or errors - when(wait(watch->watchFuture)) { break; } + when(wait(watch->watchFuture)) { + break; + } when(wait(cx->connectionFileChanged())) { CODE_PROBE(true, "Recreated a watch after switch"); @@ -7065,7 +7077,9 @@ ACTOR Future getConsistentReadVersion(SpanContext parentSpa state Future onProxiesChanged = cx->onProxiesChanged(); choose { - when(wait(onProxiesChanged)) { onProxiesChanged = cx->onProxiesChanged(); } + when(wait(onProxiesChanged)) { + onProxiesChanged = cx->onProxiesChanged(); + } when(GetReadVersionReply v = wait(basicLoadBalance(cx->getGrvProxies(UseProvisionalProxies( flags & GetReadVersionRequest::FLAG_USE_PROVISIONAL_PROXIES)), @@ -7491,7 +7505,9 @@ ACTOR Future getClusterProtocolImpl( needToConnect = false; } choose { - when(wait(coordinator->onChange())) { needToConnect = true; } + when(wait(coordinator->onChange())) { + needToConnect = true; + } when(ProtocolVersion pv = wait(protocolVersion)) { if (!expectedVersion.present() || expectedVersion.get() != pv) { @@ -8878,7 +8894,9 @@ Reference Transaction::createTrLogInfoProbabilistically(cons void Transaction::setTransactionID(UID id) { ASSERT(getSize() == 0); - trState->spanContext = SpanContext(id, trState->spanContext.spanID); + trState->spanContext = SpanContext(id, trState->spanContext.spanID, trState->spanContext.m_Flags); + tr.spanContext = trState->spanContext; + span.context = trState->spanContext; } void Transaction::setToken(uint64_t token) { @@ -9067,8 +9085,12 @@ ACTOR static Future> getCheckpointMetaDataForRan } choose { - when(wait(cx->connectionFileChanged())) { cx->invalidateCache(KeyRef(), range); } - when(wait(waitForAll(futures))) { break; } + when(wait(cx->connectionFileChanged())) { + cx->invalidateCache(KeyRef(), range); + } + when(wait(waitForAll(futures))) { + break; + } when(wait(delay(timeout))) { TraceEvent(SevWarn, "GetCheckpointTimeout").detail("Range", range).detail("Version", version); } @@ -9523,6 +9545,9 @@ ACTOR Future changeFeedTSSValidator(ChangeFeedStreamRequest req, Version next = waitNext(data->get().ssStreamSummary.getFuture()); ssSummary.push_back(next); } catch (Error& e) { + if (e.code() == error_code_actor_cancelled) { + throw; + } if (e.code() != error_code_end_of_stream) { data->get().complete(); if (e.code() != error_code_operation_cancelled) { @@ -9672,15 +9697,11 @@ Version ChangeFeedData::getVersion() { // native api has consumed and processed, them, and then the fdb client has consumed all of the mutations. ACTOR Future changeFeedWaitLatest(Reference self, Version version) { // wait on SS to have sent up through version - int desired = 0; - int waiting = 0; std::vector> allAtLeast; for (auto& it : self->storageData) { if (it->version.get() < version) { - waiting++; if (version > it->desired.get()) { it->desired.set(version); - desired++; } allAtLeast.push_back(it->version.whenAtLeast(version)); } @@ -9739,8 +9760,12 @@ ACTOR Future changeFeedWhenAtLatest(Reference self, Versio // only allowed to use empty versions if you're caught up Future waitEmptyVersion = (self->notAtLatest.get() == 0) ? changeFeedWaitLatest(self, version) : Never(); choose { - when(wait(waitEmptyVersion)) { break; } - when(wait(lastReturned)) { break; } + when(wait(waitEmptyVersion)) { + break; + } + when(wait(lastReturned)) { + break; + } when(wait(self->refresh.getFuture())) {} when(wait(self->notAtLatest.onChange())) {} } diff --git a/fdbclient/PaxosConfigTransaction.actor.cpp b/fdbclient/PaxosConfigTransaction.actor.cpp index a1604cb5fb..a2168b1349 100644 --- a/fdbclient/PaxosConfigTransaction.actor.cpp +++ b/fdbclient/PaxosConfigTransaction.actor.cpp @@ -209,8 +209,12 @@ class GetGenerationQuorum { } try { choose { - when(ConfigGeneration generation = wait(self->result.getFuture())) { return generation; } - when(wait(self->actors.getResult())) { ASSERT(false); } + when(ConfigGeneration generation = wait(self->result.getFuture())) { + return generation; + } + when(wait(self->actors.getResult())) { + ASSERT(false); + } } } catch (Error& e) { if (e.code() == error_code_failed_to_reach_quorum) { diff --git a/fdbclient/ReadYourWrites.actor.cpp b/fdbclient/ReadYourWrites.actor.cpp index 3b8b9db3e6..de5ae89e0c 100644 --- a/fdbclient/ReadYourWrites.actor.cpp +++ b/fdbclient/ReadYourWrites.actor.cpp @@ -356,16 +356,24 @@ public: Req req, Snapshot snapshot) { choose { - when(typename Req::Result result = wait(readThrough(ryw, req, snapshot))) { return result; } - when(wait(ryw->resetPromise.getFuture())) { throw internal_error(); } + when(typename Req::Result result = wait(readThrough(ryw, req, snapshot))) { + return result; + } + when(wait(ryw->resetPromise.getFuture())) { + throw internal_error(); + } } } ACTOR template static Future readWithConflictRangeSnapshot(ReadYourWritesTransaction* ryw, Req req) { state SnapshotCache::iterator it(&ryw->cache, &ryw->writes); choose { - when(typename Req::Result result = wait(read(ryw, req, &it))) { return result; } - when(wait(ryw->resetPromise.getFuture())) { throw internal_error(); } + when(typename Req::Result result = wait(read(ryw, req, &it))) { + return result; + } + when(wait(ryw->resetPromise.getFuture())) { + throw internal_error(); + } } } ACTOR template @@ -381,7 +389,9 @@ public: addConflictRange(ryw, req, it.extractWriteMapIterator(), result); return result; } - when(wait(ryw->resetPromise.getFuture())) { throw internal_error(); } + when(wait(ryw->resetPromise.getFuture())) { + throw internal_error(); + } } } template @@ -1201,7 +1211,9 @@ public: addConflictRangeAndMustUnmodified(ryw, req, writes, result); return result; } - when(wait(ryw->resetPromise.getFuture())) { throw internal_error(); } + when(wait(ryw->resetPromise.getFuture())) { + throw internal_error(); + } } } @@ -1452,9 +1464,13 @@ public: ACTOR static Future getReadVersion(ReadYourWritesTransaction* ryw) { choose { - when(Version v = wait(ryw->tr.getReadVersion())) { return v; } + when(Version v = wait(ryw->tr.getReadVersion())) { + return v; + } - when(wait(ryw->resetPromise.getFuture())) { throw internal_error(); } + when(wait(ryw->resetPromise.getFuture())) { + throw internal_error(); + } } } }; diff --git a/fdbclient/S3BlobStore.actor.cpp b/fdbclient/S3BlobStore.actor.cpp index e6a5fc6196..8f2c396ced 100644 --- a/fdbclient/S3BlobStore.actor.cpp +++ b/fdbclient/S3BlobStore.actor.cpp @@ -489,7 +489,9 @@ ACTOR Future deleteRecursively_impl(Reference b, loop { choose { // Throw if done throws, otherwise don't stop until end_of_stream - when(wait(done)) { done = Never(); } + when(wait(done)) { + done = Never(); + } when(S3BlobStoreEndpoint::ListResult list = waitNext(resultStream.getFuture())) { for (auto& object : list.objects) { @@ -1205,7 +1207,9 @@ ACTOR Future listObjects_impl(Reference SpecialKeySpace::checkRYWValid(SpecialKeySpace* sks, wait(SpecialKeySpace::getRangeAggregationActor(sks, ryw, begin, end, limits, reverse))) { return result; } - when(wait(ryw->resetFuture())) { throw internal_error(); } + when(wait(ryw->resetFuture())) { + throw internal_error(); + } } } diff --git a/fdbclient/TaskBucket.actor.cpp b/fdbclient/TaskBucket.actor.cpp index 347395892b..ad8aefa08d 100644 --- a/fdbclient/TaskBucket.actor.cpp +++ b/fdbclient/TaskBucket.actor.cpp @@ -870,13 +870,13 @@ TaskBucket::TaskBucket(const Subspace& subspace, AccessSystemKeys sysAccess, PriorityBatch priorityBatch, LockAware lockAware) - : cc("TaskBucket"), dispatchSlotChecksStarted("DispatchSlotChecksStarted", cc), dispatchErrors("DispatchErrors", cc), + : dbgid(deterministicRandom()->randomUniqueID()), cc("TaskBucket", dbgid.toString()), + dispatchSlotChecksStarted("DispatchSlotChecksStarted", cc), dispatchErrors("DispatchErrors", cc), dispatchDoTasks("DispatchDoTasks", cc), dispatchEmptyTasks("DispatchEmptyTasks", cc), - dispatchSlotChecksComplete("DispatchSlotChecksComplete", cc), dbgid(deterministicRandom()->randomUniqueID()), - prefix(subspace), active(prefix.get("ac"_sr)), pauseKey(prefix.pack("pause"_sr)), available(prefix.get("av"_sr)), - available_prioritized(prefix.get("avp"_sr)), timeouts(prefix.get("to"_sr)), - timeout(CLIENT_KNOBS->TASKBUCKET_TIMEOUT_VERSIONS), system_access(sysAccess), priority_batch(priorityBatch), - lockAware(lockAware) {} + dispatchSlotChecksComplete("DispatchSlotChecksComplete", cc), prefix(subspace), active(prefix.get("ac"_sr)), + pauseKey(prefix.pack("pause"_sr)), available(prefix.get("av"_sr)), available_prioritized(prefix.get("avp"_sr)), + timeouts(prefix.get("to"_sr)), timeout(CLIENT_KNOBS->TASKBUCKET_TIMEOUT_VERSIONS), system_access(sysAccess), + priority_batch(priorityBatch), lockAware(lockAware) {} TaskBucket::~TaskBucket() {} diff --git a/fdbclient/Tracing.actor.cpp b/fdbclient/Tracing.actor.cpp index a6a1130305..178a244f0f 100644 --- a/fdbclient/Tracing.actor.cpp +++ b/fdbclient/Tracing.actor.cpp @@ -18,7 +18,7 @@ * limitations under the License. */ -#include "fdbrpc/Msgpack.h" +#include "flow/Msgpack.h" #include "fdbclient/Tracing.h" #include "flow/IRandom.h" #include "flow/UnitTest.h" @@ -447,8 +447,6 @@ TEST_CASE("/flow/Tracing/AddAttributes") { SpanContext(deterministicRandom()->randomUniqueID(), deterministicRandom()->randomUInt64(), TraceFlags::sampled)); - IKnobCollection::getMutableGlobalKnobCollection().setKnob("tracing_span_attributes_enabled", - KnobValueRef::create(bool{ true })); auto arena = span1.arena; span1.addAttribute(StringRef(arena, "foo"_sr), StringRef(arena, "bar"_sr)); span1.addAttribute(StringRef(arena, "operation"_sr), StringRef(arena, "grv"_sr)); @@ -567,8 +565,6 @@ std::string readMPString(uint8_t* index) { // Windows doesn't like lack of header and declaration of constructor for FastUDPTracer #ifndef WIN32 TEST_CASE("/flow/Tracing/FastUDPMessagePackEncoding") { - IKnobCollection::getMutableGlobalKnobCollection().setKnob("tracing_span_attributes_enabled", - KnobValueRef::create(bool{ true })); Span span1("encoded_span"_loc); auto request = MsgpackBuffer{ .buffer = std::make_unique(kTraceBufferSize), .data_size = 0, diff --git a/fdbclient/VersionVector.cpp b/fdbclient/VersionVector.cpp index 81f0637e9f..ffd11a1656 100644 --- a/fdbclient/VersionVector.cpp +++ b/fdbclient/VersionVector.cpp @@ -112,7 +112,7 @@ void populateVersionVector(VersionVector& vv, int tagsPerLocality = tagCount / localityCount; // Populate localities. - for (int i = 0; localities.size() < (size_t)localityCount; i++) { + while (localities.size() < (size_t)localityCount) { int8_t locality = deterministicRandom()->randomInt(tagLocalityInvalid + 1, INT8_MAX); if (std::find(localities.begin(), localities.end(), locality) == localities.end()) { localities.push_back(locality); diff --git a/fdbclient/include/fdbclient/DatabaseContext.h b/fdbclient/include/fdbclient/DatabaseContext.h index 16a10d79d3..b5499c904a 100644 --- a/fdbclient/include/fdbclient/DatabaseContext.h +++ b/fdbclient/include/fdbclient/DatabaseContext.h @@ -439,6 +439,8 @@ public: void expireThrottles(); + UID dbId; + // Key DB-specific information Reference>> connectionRecord; AsyncTrigger proxiesChangeTrigger; @@ -521,7 +523,6 @@ public: // servers by their tags). std::unordered_map ssidTagMapping; - UID dbId; IsInternal internal; // Only contexts created through the C client and fdbcli are non-internal PrioritizedTransactionTagMap throttledTags; diff --git a/fdbclient/include/fdbclient/FDBTypes.h b/fdbclient/include/fdbclient/FDBTypes.h index 80277bb0f7..b941b38bc5 100644 --- a/fdbclient/include/fdbclient/FDBTypes.h +++ b/fdbclient/include/fdbclient/FDBTypes.h @@ -814,18 +814,9 @@ struct MappedKeyValueRef : KeyValueRef { MappedReqAndResultRef reqAndResult; - // boundary KVs are always returned so that caller can use it as a continuation, - // for non-boundary KV, it is always false. - // for boundary KV, it is true only when the secondary query succeeds(return non-empty). - // Note: only MATCH_INDEX_MATCHED_ONLY and MATCH_INDEX_UNMATCHED_ONLY modes can make use of it, - // to decide whether the boudnary is a match/unmatch. - // In the case of MATCH_INDEX_ALL and MATCH_INDEX_NONE, caller should not care if boundary has a match or not. - bool boundaryAndExist; - MappedKeyValueRef() = default; MappedKeyValueRef(Arena& a, const MappedKeyValueRef& copyFrom) : KeyValueRef(a, copyFrom) { const auto& reqAndResultCopyFrom = copyFrom.reqAndResult; - boundaryAndExist = copyFrom.boundaryAndExist; if (std::holds_alternative(reqAndResultCopyFrom)) { auto getValue = std::get(reqAndResultCopyFrom); reqAndResult = GetValueReqAndResultRef(a, getValue); @@ -839,7 +830,7 @@ struct MappedKeyValueRef : KeyValueRef { bool operator==(const MappedKeyValueRef& rhs) const { return static_cast(*this) == static_cast(rhs) && - reqAndResult == rhs.reqAndResult && boundaryAndExist == rhs.boundaryAndExist; + reqAndResult == rhs.reqAndResult; } bool operator!=(const MappedKeyValueRef& rhs) const { return !(rhs == *this); } @@ -849,7 +840,7 @@ struct MappedKeyValueRef : KeyValueRef { template void serialize(Ar& ar) { - serializer(ar, ((KeyValueRef&)*this), reqAndResult, boundaryAndExist); + serializer(ar, ((KeyValueRef&)*this), reqAndResult); } }; diff --git a/fdbclient/include/fdbclient/MultiVersionTransaction.h b/fdbclient/include/fdbclient/MultiVersionTransaction.h index e307b71d98..1a0308e7bf 100644 --- a/fdbclient/include/fdbclient/MultiVersionTransaction.h +++ b/fdbclient/include/fdbclient/MultiVersionTransaction.h @@ -80,7 +80,6 @@ struct FdbCApi : public ThreadSafeReferenceCounted { * and take the shortcut. */ FDBGetRangeReqAndResult getRange; unsigned char buffer[32]; - bool boundaryAndExist; } FDBMappedKeyValue; #pragma pack(push, 4) diff --git a/fdbclient/include/fdbclient/MutationLogReader.actor.h b/fdbclient/include/fdbclient/MutationLogReader.actor.h index 671d461e84..831135a901 100644 --- a/fdbclient/include/fdbclient/MutationLogReader.actor.h +++ b/fdbclient/include/fdbclient/MutationLogReader.actor.h @@ -80,8 +80,9 @@ public: Future done() { return reader; } private: - Version beginVersion, endVersion, currentBeginVersion; - unsigned pipelineDepth; + [[maybe_unused]] Version beginVersion; + Version endVersion, currentBeginVersion; + [[maybe_unused]] unsigned pipelineDepth; Future reader; }; diff --git a/fdbclient/include/fdbclient/TaskBucket.h b/fdbclient/include/fdbclient/TaskBucket.h index b0b7a2bc51..ba122ac7a7 100644 --- a/fdbclient/include/fdbclient/TaskBucket.h +++ b/fdbclient/include/fdbclient/TaskBucket.h @@ -274,6 +274,7 @@ public: Database src; Map>>> key_version; + UID dbgid; CounterCollection cc; Counter dispatchSlotChecksStarted; @@ -281,7 +282,6 @@ public: Counter dispatchDoTasks; Counter dispatchEmptyTasks; Counter dispatchSlotChecksComplete; - UID dbgid; double getTimeoutSeconds() const { return (double)timeout / CLIENT_KNOBS->CORE_VERSIONSPERSECOND; } diff --git a/fdbclient/include/fdbclient/Tracing.h b/fdbclient/include/fdbclient/Tracing.h index 01ffcaa5dd..e04707685b 100644 --- a/fdbclient/include/fdbclient/Tracing.h +++ b/fdbclient/include/fdbclient/Tracing.h @@ -230,9 +230,7 @@ public: } Span& addAttribute(const StringRef& key, const StringRef& value) { - if (FLOW_KNOBS->TRACING_SPAN_ATTRIBUTES_ENABLED) { - attributes.push_back_deep(arena, KeyValueRef(key, value)); - } + attributes.push_back_deep(arena, KeyValueRef(key, value)); return *this; } @@ -273,4 +271,4 @@ struct ITracer { virtual void trace(Span const& span) = 0; }; -void openTracer(TracerType type); \ No newline at end of file +void openTracer(TracerType type); diff --git a/fdbclient/include/fdbclient/VersionedMap.h b/fdbclient/include/fdbclient/VersionedMap.h index b77a5c270a..bd8f3eb482 100644 --- a/fdbclient/include/fdbclient/VersionedMap.h +++ b/fdbclient/include/fdbclient/VersionedMap.h @@ -685,7 +685,7 @@ public: } Future forgetVersionsBeforeAsync(Version newOldestVersion, TaskPriority taskID = TaskPriority::DefaultYield) { - ASSERT(newOldestVersion <= latestVersion); + ASSERT_LE(newOldestVersion, latestVersion); auto r = upper_bound(roots.begin(), roots.end(), newOldestVersion, rootsComparator()); auto upper = r; --r; diff --git a/fdbclient/include/fdbclient/json_spirit/json_spirit_reader_template.h b/fdbclient/include/fdbclient/json_spirit/json_spirit_reader_template.h index 207e2e8e74..9005a09764 100644 --- a/fdbclient/include/fdbclient/json_spirit/json_spirit_reader_template.h +++ b/fdbclient/include/fdbclient/json_spirit/json_spirit_reader_template.h @@ -13,7 +13,7 @@ #include "json_spirit_value.h" #include "json_spirit_error_position.h" -//#define BOOST_SPIRIT_THREADSAFE // uncomment for multithreaded use, requires linking to boost.thread +// #define BOOST_SPIRIT_THREADSAFE // uncomment for multithreaded use, requires linking to boost.thread #include #include diff --git a/fdbrpc/DDSketchTest.actor.cpp b/fdbrpc/DDSketchTest.actor.cpp new file mode 100644 index 0000000000..f2ecb48bd2 --- /dev/null +++ b/fdbrpc/DDSketchTest.actor.cpp @@ -0,0 +1,62 @@ +#include "fdbrpc/DDSketch.h" +#include "flow/Error.h" +#include "flow/IRandom.h" +#include "flow/UnitTest.h" +#include +#include +#include "flow/actorcompiler.h" // has to be last include +void forceLinkDDSketchTests() {} + +TEST_CASE("/fdbrpc/ddsketch/accuracy") { + + int TRY = 100, SIZE = 1e6; + const int totalPercentiles = 7; + double targetPercentiles[totalPercentiles] = { .0001, .01, .1, .50, .90, .99, .9999 }; + double stat[totalPercentiles] = { 0 }; + for (int t = 0; t < TRY; t++) { + DDSketch dd; + std::vector nums; + for (int i = 0; i < SIZE; i++) { + static double a = 1, b = 1; // a skewed distribution + auto y = deterministicRandom()->random01(); + auto num = b / pow(1 - y, 1 / a); + nums.push_back(num); + dd.addSample(num); + } + std::sort(nums.begin(), nums.end()); + for (int percentID = 0; percentID < totalPercentiles; percentID++) { + double percentile = targetPercentiles[percentID]; + double ground = nums[percentile * (SIZE - 1)], ddvalue = dd.percentile(percentile); + double relativeError = fabs(ground - ddvalue) / ground; + stat[percentID] += relativeError; + } + } + + for (int percentID = 0; percentID < totalPercentiles; percentID++) { + printf("%.4lf per, relative error %.4lf\n", targetPercentiles[percentID], stat[percentID] / TRY); + } + + return Void(); +} + +TEST_CASE("/fdbrpc/ddsketch/correctness") { + DDSketch dd; + + for (int i = 0; i < 4000; i++) { + // This generates a uniform real disitribution between the range of + // [0.0004, 0.01] + double sample = (static_cast(deterministicRandom()->randomSkewedUInt32(40, 1000)) / 100000); + dd.addSample(sample); + } + double p50 = dd.percentile(0.5); + ASSERT(p50 > 0 && p50 != std::numeric_limits::infinity()); + double p90 = dd.percentile(0.9); + ASSERT(p90 > 0 && p90 != std::numeric_limits::infinity()); + double p95 = dd.percentile(0.95); + ASSERT(p95 > 0 && p95 != std::numeric_limits::infinity()); + double p99 = dd.percentile(0.99); + ASSERT(p99 > 0 && p99 != std::numeric_limits::infinity()); + double p999 = dd.percentile(0.999); + ASSERT(p999 > 0 && p999 != std::numeric_limits::infinity()); + return Void{}; +} diff --git a/fdbrpc/FailureMonitor.actor.cpp b/fdbrpc/FailureMonitor.actor.cpp index fea3b4a9db..e2b76a8214 100644 --- a/fdbrpc/FailureMonitor.actor.cpp +++ b/fdbrpc/FailureMonitor.actor.cpp @@ -53,7 +53,9 @@ ACTOR Future waitForContinuousFailure(IFailureMonitor* monitor, choose { when(wait(monitor->onStateEqual(endpoint, FailureStatus(false)))) { } // SOMEDAY: Use onStateChanged() for efficiency - when(wait(delay(waitDelay))) { return Void(); } + when(wait(delay(waitDelay))) { + return Void(); + } } } } diff --git a/fdbrpc/FlowTransport.actor.cpp b/fdbrpc/FlowTransport.actor.cpp index 9d1cec9aac..40027cd237 100644 --- a/fdbrpc/FlowTransport.actor.cpp +++ b/fdbrpc/FlowTransport.actor.cpp @@ -572,7 +572,9 @@ ACTOR Future connectionMonitor(Reference peer) { } break; } - when(wait(peer->resetPing.onTrigger())) { break; } + when(wait(peer->resetPing.onTrigger())) { + break; + } } } } @@ -668,7 +670,9 @@ ACTOR Future connectionKeeper(Reference self, choose { when(wait(self->dataToSend.onTrigger())) {} - when(wait(retryConnectF)) { break; } + when(wait(retryConnectF)) { + break; + } } } @@ -717,7 +721,9 @@ ACTOR Future connectionKeeper(Reference self, self->prependConnectPacket(); reader = connectionReader(self->transport, conn, self, Promise>()); } - when(wait(delay(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT))) { throw connection_failed(); } + when(wait(delay(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT))) { + throw connection_failed(); + } } } catch (Error& e) { ++self->connectFailedCount; @@ -1465,7 +1471,9 @@ ACTOR static Future connectionIncoming(TransportData* self, Reference p = wait(onConnected.getFuture())) { p->onIncomingConnection(p, conn, reader); } + when(Reference p = wait(onConnected.getFuture())) { + p->onIncomingConnection(p, conn, reader); + } when(wait(delayJittered(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT))) { CODE_PROBE(true, "Incoming connection timed out"); throw timed_out(); diff --git a/fdbrpc/LoadBalance.actor.cpp b/fdbrpc/LoadBalance.actor.cpp index 1d02e35cbe..b668deb8dd 100644 --- a/fdbrpc/LoadBalance.actor.cpp +++ b/fdbrpc/LoadBalance.actor.cpp @@ -50,7 +50,9 @@ ACTOR Future allAlternativesFailedDelay(Future okFuture) { choose { when(wait(okFuture)) {} - when(wait(::delayJittered(delay))) { throw all_alternatives_failed(); } + when(wait(::delayJittered(delay))) { + throw all_alternatives_failed(); + } } return Void(); } diff --git a/fdbrpc/ReplicationPolicy.cpp b/fdbrpc/ReplicationPolicy.cpp index a31cd89190..712afc08ce 100644 --- a/fdbrpc/ReplicationPolicy.cpp +++ b/fdbrpc/ReplicationPolicy.cpp @@ -77,13 +77,11 @@ bool PolicyOne::selectReplicas(Reference& fromServers, std::vector const& alsoServers, std::vector& results) { int totalUsed = 0; - int itemsUsed = 0; if (alsoServers.size()) { totalUsed++; } else if (fromServers->size()) { auto randomEntry = fromServers->random(); results.push_back(randomEntry); - itemsUsed++; totalUsed++; } return (totalUsed > 0); diff --git a/fdbrpc/Stats.actor.cpp b/fdbrpc/Stats.actor.cpp index 20ed269c84..ae6489c547 100644 --- a/fdbrpc/Stats.actor.cpp +++ b/fdbrpc/Stats.actor.cpp @@ -19,6 +19,14 @@ */ #include "fdbrpc/Stats.h" +#include "flow/IRandom.h" +#include "flow/Knobs.h" +#include "flow/OTELMetrics.h" +#include "flow/TDMetric.actor.h" +#include "flow/Trace.h" +#include "flow/flow.h" +#include "flow/network.h" +#include #include "flow/actorcompiler.h" // has to be last include Counter::Counter(std::string const& name, CounterCollection& collection) @@ -81,8 +89,36 @@ void Counter::clear() { metric = 0; } -void CounterCollection::logToTraceEvent(TraceEvent& te) const { +void CounterCollection::logToTraceEvent(TraceEvent& te) { + NetworkAddress addr = g_network->getLocalAddress(); for (ICounter* c : counters) { + MetricCollection* metrics = MetricCollection::getMetricCollection(); + if (metrics != nullptr) { + std::string ip_str = addr.ip.toString(); + std::string port_str = std::to_string(addr.port); + uint64_t val = c->getValue(); + switch (c->model) { + case MetricsDataModel::OTLP: { + if (metrics->sumMap.find(c->id) != metrics->sumMap.end()) { + metrics->sumMap[c->id].points.emplace_back(static_cast(val)); + } else { + metrics->sumMap[c->id] = OTEL::OTELSum(name + "." + c->getName(), val); + } + metrics->sumMap[c->id].points.back().addAttribute("ip", ip_str); + metrics->sumMap[c->id].points.back().addAttribute("port", port_str); + metrics->sumMap[c->id].points.back().startTime = logTime; + } + case MetricsDataModel::STATSD: { + std::vector> statsd_attributes{ { "ip", ip_str }, + { "port", port_str } }; + metrics->statsd_message.push_back(createStatsdMessage( + c->getName(), StatsDMetric::COUNTER, std::to_string(val) /*, statsd_attributes*/)); + } + case MetricsDataModel::NONE: + default: { + } + } + } te.detail(c->getName().c_str(), c); c->resetInterval(); } @@ -180,3 +216,84 @@ void LatencyBands::clearBands() { LatencyBands::~LatencyBands() { clearBands(); } + +LatencySample::LatencySample(std::string name, UID id, double loggingInterval, double accuracy) + : name(name), IMetric(knobToMetricModel(FLOW_KNOBS->METRICS_DATA_MODEL)), id(id), sampleEmit(now()), sketch(accuracy), + latencySampleEventHolder(makeReference(id.toString() + "/" + name)) { + logger = recurring([this]() { logSample(); }, loggingInterval); + p50id = deterministicRandom()->randomUniqueID(); + p90id = deterministicRandom()->randomUniqueID(); + p95id = deterministicRandom()->randomUniqueID(); + p99id = deterministicRandom()->randomUniqueID(); + p999id = deterministicRandom()->randomUniqueID(); +} + +void LatencySample::addMeasurement(double measurement) { + sketch.addSample(measurement); +} + +void LatencySample::logSample() { + double p25 = sketch.percentile(0.25); + double p50 = sketch.mean(); + double p90 = sketch.percentile(0.9); + double p95 = sketch.percentile(0.95); + double p99 = sketch.percentile(0.99); + double p99_9 = sketch.percentile(0.999); + TraceEvent(name.c_str(), id) + .detail("Count", sketch.getPopulationSize()) + .detail("Elapsed", now() - sampleEmit) + .detail("Min", sketch.min()) + .detail("Max", sketch.max()) + .detail("Mean", sketch.mean()) + .detail("Median", p50) + .detail("P25", p25) + .detail("P90", p90) + .detail("P95", p95) + .detail("P99", p99) + .detail("P99.9", p99_9) + .trackLatest(latencySampleEventHolder->trackingKey); + MetricCollection* metrics = MetricCollection::getMetricCollection(); + if (metrics != nullptr) { + NetworkAddress addr = g_network->getLocalAddress(); + std::string ip_str = addr.ip.toString(); + std::string port_str = std::to_string(addr.port); + switch (model) { + case MetricsDataModel::OTLP: { + if (metrics->histMap.find(IMetric::id) != metrics->histMap.end()) { + metrics->histMap[IMetric::id].points.emplace_back( + sketch.getErrorGuarantee(), sketch.getSamples(), sketch.min(), sketch.max(), sketch.getSum()); + } else { + metrics->histMap[IMetric::id] = OTEL::OTELHistogram( + name, sketch.getErrorGuarantee(), sketch.getSamples(), sketch.min(), sketch.max(), sketch.getSum()); + } + metrics->histMap[IMetric::id].points.back().addAttribute("ip", ip_str); + metrics->histMap[IMetric::id].points.back().addAttribute("port", port_str); + metrics->histMap[IMetric::id].points.back().startTime = sampleEmit; + createOtelGauge(p50id, name + "p50", p50); + createOtelGauge(p90id, name + "p90", p90); + createOtelGauge(p95id, name + "p95", p95); + createOtelGauge(p99id, name + "p99", p99); + createOtelGauge(p999id, name + "p99_9", p99_9); + } + case MetricsDataModel::STATSD: { + std::vector> statsd_attributes{ { "ip", ip_str }, + { "port", port_str } }; + auto median_gauge = + createStatsdMessage(name + "p50", StatsDMetric::GAUGE, std::to_string(p50) /*, statsd_attributes*/); + auto p90_gauge = + createStatsdMessage(name + "p90", StatsDMetric::GAUGE, std::to_string(p90) /*, statsd_attributes*/); + auto p95_gauge = + createStatsdMessage(name + "p95", StatsDMetric::GAUGE, std::to_string(p95) /*, statsd_attributes*/); + auto p99_gauge = + createStatsdMessage(name + "p99", StatsDMetric::GAUGE, std::to_string(p99) /*, statsd_attributes*/); + auto p999_gauge = + createStatsdMessage(name + "p99.9", StatsDMetric::GAUGE, std::to_string(p99_9) /*, statsd_attributes*/); + } + case MetricsDataModel::NONE: + default: { + } + } + } + sketch.clear(); + sampleEmit = now(); +} diff --git a/fdbrpc/dsltest.actor.cpp b/fdbrpc/dsltest.actor.cpp index b1e557e028..8857f89e4f 100644 --- a/fdbrpc/dsltest.actor.cpp +++ b/fdbrpc/dsltest.actor.cpp @@ -257,7 +257,9 @@ ACTOR template ACTOR template [[flow_allow_discard]] Future switchTest(FutureStream as, Future oneb) { loop choose { - when(A a = waitNext(as)) { std::cout << "A " << a << std::endl; } + when(A a = waitNext(as)) { + std::cout << "A " << a << std::endl; + } when(B b = wait(oneb)) { std::cout << "B " << b << std::endl; break; @@ -283,7 +285,7 @@ public: #if !defined(__INTEL_COMPILER) void operator delete(void* buf) { std::cout << "Freeing buffer" << std::endl; - delete[](int*) buf; + delete[] (int*)buf; } #endif @@ -614,8 +616,12 @@ void returnCancelRaceTest() { ACTOR [[flow_allow_discard]] Future chooseTest(Future a, Future b) { choose { - when(int A = wait(a)) { return A; } - when(int B = wait(b)) { return B; } + when(int A = wait(a)) { + return A; + } + when(int B = wait(b)) { + return B; + } } } @@ -960,8 +966,12 @@ ACTOR [[flow_allow_discard]] Future introAdd(Future a, Future b) ACTOR [[flow_allow_discard]] Future introFirst(Future a, Future b) { choose { - when(int x = wait(a)) { return x; } - when(int x = wait(b)) { return x; } + when(int x = wait(a)) { + return x; + } + when(int x = wait(b)) { + return x; + } } } diff --git a/fdbrpc/include/fdbrpc/AsyncFileNonDurable.actor.h b/fdbrpc/include/fdbrpc/AsyncFileNonDurable.actor.h index 01fc71adfd..ab8e895cce 100644 --- a/fdbrpc/include/fdbrpc/AsyncFileNonDurable.actor.h +++ b/fdbrpc/include/fdbrpc/AsyncFileNonDurable.actor.h @@ -52,7 +52,9 @@ Future sendErrorOnShutdown(Future in, bool assertOnCancel = false) { when(wait(success(g_simulator->getCurrentProcess()->shutdownSignal.getFuture()))) { throw io_error().asInjectedFault(); } - when(T rep = wait(in)) { return rep; } + when(T rep = wait(in)) { + return rep; + } } } catch (Error& e) { ASSERT(e.code() != error_code_actor_cancelled || !assertOnCancel); @@ -82,7 +84,9 @@ public: when(wait(success(g_simulator->getCurrentProcess()->shutdownSignal.getFuture()))) { throw io_error().asInjectedFault(); } - when(Reference f = wait(wrappedFile)) { return makeReference(f); } + when(Reference f = wait(wrappedFile)) { + return makeReference(f); + } } } @@ -507,7 +511,9 @@ private: state bool saveDurable = true; choose { when(wait(delay(delayDuration))) {} - when(bool durable = wait(startSyncFuture)) { saveDurable = durable; } + when(bool durable = wait(startSyncFuture)) { + saveDurable = durable; + } } debugFileCheck("AsyncFileNonDurableWriteAfterWait", self->filename, dataCopy.begin(), offset, length); @@ -684,7 +690,9 @@ private: state bool saveDurable = true; choose { when(wait(delay(delayDuration))) {} - when(bool durable = wait(startSyncFuture)) { saveDurable = durable; } + when(bool durable = wait(startSyncFuture)) { + saveDurable = durable; + } } if (g_network->check_yield(TaskPriority::DefaultYield)) { diff --git a/fdbrpc/include/fdbrpc/ContinuousSample.h b/fdbrpc/include/fdbrpc/ContinuousSample.h index 11a9d35d46..f6cf80c430 100644 --- a/fdbrpc/include/fdbrpc/ContinuousSample.h +++ b/fdbrpc/include/fdbrpc/ContinuousSample.h @@ -32,11 +32,24 @@ template class ContinuousSample { public: explicit ContinuousSample(int sampleSize) - : sampleSize(sampleSize), populationSize(0), sorted(true), _min(T()), _max(T()) {} + : sampleSize(sampleSize), populationSize(0), sorted(true), _min(T()), _max(T()), _sum(T()) {} + + void swap(ContinuousSample& other) { + std::swap(samples, other.samples); + std::swap(_min, other._min); + std::swap(_max, other._max); + std::swap(_sum, other._sum); + std::swap(populationSize, other.populationSize); + std::swap(sorted, other.sorted); + std::swap(sampleSize, other.sampleSize); + } ContinuousSample& addSample(T sample) { if (!populationSize) - _min = _max = sample; + _sum = _min = _max = sample; + else { + _sum += sample; + } populationSize++; sorted = false; @@ -51,6 +64,10 @@ public: return *this; } + std::vector getSamples() const { return samples; } + + double sum() const { return _sum; } + double mean() const { if (!samples.size()) return 0; @@ -78,7 +95,7 @@ public: samples.clear(); populationSize = 0; sorted = true; - _min = _max = 0; // Doesn't work for all T + _min = _max = _sum = 0; // Doesn't work for all T } uint64_t getPopulationSize() const { return populationSize; } @@ -88,7 +105,7 @@ private: uint64_t populationSize; bool sorted; std::vector samples; - T _min, _max; + T _min, _max, _sum; void sort() { if (!sorted && samples.size() > 1) diff --git a/fdbrpc/include/fdbrpc/DDSketch.h b/fdbrpc/include/fdbrpc/DDSketch.h index d17508622e..663669be08 100644 --- a/fdbrpc/include/fdbrpc/DDSketch.h +++ b/fdbrpc/include/fdbrpc/DDSketch.h @@ -170,6 +170,7 @@ public: T min() const { return minValue; } T max() const { return maxValue; } + T getSum() const { return sum; } void clear() { std::fill(buckets.begin(), buckets.end(), 0); @@ -185,6 +186,8 @@ public: size_t getBucketSize() const { return buckets.size(); } + std::vector getSamples() const { return buckets; } + DDSketchBase& mergeWith(const DDSketchBase& anotherSketch) { // Must have the same guarantee ASSERT(fabs(errorGuarantee - anotherSketch.errorGuarantee) < EPS && @@ -205,7 +208,7 @@ protected: double errorGuarantee; // As defined in the paper uint64_t populationSize, zeroPopulationSize; // we need to separately count 0s - std::vector buckets; + std::vector buckets; T minValue, maxValue, sum; void setBucketSize(size_t capacity) { buckets.resize(capacity, 0); } }; @@ -214,7 +217,7 @@ protected: template class DDSketch : public DDSketchBase, T> { public: - explicit DDSketch(double errorGuarantee = 0.01) + explicit DDSketch(double errorGuarantee = 0.005) : DDSketchBase, T>(errorGuarantee), gamma((1.0 + errorGuarantee) / (1.0 - errorGuarantee)), multiplier(fastLogger::correctingFactor * log(2) / log(gamma)) { ASSERT(errorGuarantee > 0); @@ -228,7 +231,10 @@ public: return ceil(fastLogger::fastlog(sample) * multiplier) + offset; } - T getValue(size_t index) { return fastLogger::reverseLog((index - offset) / multiplier) * 2.0 / (1 + gamma); } + T getValue(size_t index) { + return fastLogger::reverseLog((static_cast(index) - static_cast(offset)) / multiplier) * 2.0 / + (1 + gamma); + } private: double gamma, multiplier; @@ -248,7 +254,9 @@ public: size_t getIndex(T sample) { return ceil(log(sample) / logGamma) + offset; } - T getValue(size_t index) { return (T)(2.0 * pow(gamma, (index - offset)) / (1 + gamma)); } + T getValue(size_t index) { + return (T)(2.0 * pow(gamma, (static_cast(index) - static_cast(offset))) / (1 + gamma)); + } private: double gamma, logGamma; @@ -292,35 +300,3 @@ private: }; #endif - -TEST_CASE("/fdbrpc/ddsketch/accuracy") { - - int TRY = 100, SIZE = 1e6; - const int totalPercentiles = 7; - double targetPercentiles[totalPercentiles] = { .0001, .01, .1, .50, .90, .99, .9999 }; - double stat[totalPercentiles] = { 0 }; - for (int t = 0; t < TRY; t++) { - DDSketch dd; - std::vector nums; - for (int i = 0; i < SIZE; i++) { - static double a = 1, b = 1; // a skewed distribution - auto y = deterministicRandom()->random01(); - auto num = b / pow(1 - y, 1 / a); - nums.push_back(num); - dd.addSample(num); - } - std::sort(nums.begin(), nums.end()); - for (int percentID = 0; percentID < totalPercentiles; percentID++) { - double percentile = targetPercentiles[percentID]; - double ground = nums[percentile * (SIZE - 1)], ddvalue = dd.percentile(percentile); - double relativeError = fabs(ground - ddvalue) / ground; - stat[percentID] += relativeError; - } - } - - for (int percentID = 0; percentID < totalPercentiles; percentID++) { - printf("%.4lf per, relative error %.4lf\n", targetPercentiles[percentID], stat[percentID] / TRY); - } - - return Void(); -} diff --git a/fdbrpc/include/fdbrpc/Stats.h b/fdbrpc/include/fdbrpc/Stats.h index 5cbb2cccd0..cf3e5b84c9 100644 --- a/fdbrpc/include/fdbrpc/Stats.h +++ b/fdbrpc/include/fdbrpc/Stats.h @@ -20,6 +20,12 @@ #ifndef FDBRPC_STATS_H #define FDBRPC_STATS_H +#include "flow/Error.h" +#include "flow/IRandom.h" +#include "flow/Knobs.h" +#include "flow/OTELMetrics.h" +#include "flow/serialize.h" +#include #include #pragma once @@ -40,8 +46,9 @@ MyCounters() : foo("foo", cc), bar("bar", cc), baz("baz", cc) {} #include "flow/TDMetric.actor.h" #include "fdbrpc/DDSketch.h" -struct ICounter { +struct ICounter : public IMetric { // All counters have a name and value + ICounter() : IMetric(knobToMetricModel(FLOW_KNOBS->METRICS_DATA_MODEL)) {} virtual std::string const& getName() const = 0; virtual int64_t getValue() const = 0; @@ -74,8 +81,11 @@ class CounterCollection { std::string id; std::vector counters, countersToRemove; + double logTime; + public: - CounterCollection(std::string const& name, std::string const& id = std::string()) : name(name), id(id) {} + CounterCollection(std::string const& name, std::string const& id = std::string()) + : name(name), id(id), logTime(0) {} ~CounterCollection() { for (auto c : countersToRemove) c->remove(); @@ -90,7 +100,7 @@ public: std::string const& getId() const { return id; } - void logToTraceEvent(TraceEvent& te) const; + void logToTraceEvent(TraceEvent& te); Future traceCounters( std::string const& traceEventName, @@ -100,7 +110,7 @@ public: std::function const& decorator = [](auto& te) {}); }; -struct Counter final : ICounter, NonCopyable { +struct Counter final : public ICounter, NonCopyable { public: typedef int64_t Value; @@ -214,48 +224,33 @@ public: ~LatencyBands(); }; -class LatencySample { +class LatencySample : public IMetric { public: - LatencySample(std::string name, UID id, double loggingInterval, double accuracy) - : name(name), id(id), sampleStart(now()), sketch(accuracy), - latencySampleEventHolder(makeReference(id.toString() + "/" + name)) { - assert(accuracy > 0); - if (accuracy <= 0) { - fmt::print(stderr, "ERROR: LatencySample {} has invalid accuracy ({})", name, accuracy); - } - logger = recurring([this]() { logSample(); }, loggingInterval); - } - - void addMeasurement(double measurement) { sketch.addSample(measurement); } + LatencySample(std::string name, UID id, double loggingInterval, double accuracy); + void addMeasurement(double measurement); private: std::string name; UID id; - double sampleStart; + // These UIDs below are needed to emit the tail latencies as gauges + // + // If an OTEL aggregator is able to directly accept and process histograms + // the tail latency gauges won't necessarily be needed anymore since they can be + // calculated directly from the emitted buckets. To support users who have an aggregator + // who cannot accept histograms, the tails latencies are still directly emitted. + UID p50id; + UID p90id; + UID p95id; + UID p99id; + UID p999id; + double sampleEmit; DDSketch sketch; Future logger; Reference latencySampleEventHolder; - void logSample() { - TraceEvent(name.c_str(), id) - .detail("Count", sketch.getPopulationSize()) - .detail("Elapsed", now() - sampleStart) - .detail("Min", sketch.min()) - .detail("Max", sketch.max()) - .detail("Mean", sketch.mean()) - .detail("Median", sketch.median()) - .detail("P25", sketch.percentile(0.25)) - .detail("P90", sketch.percentile(0.9)) - .detail("P95", sketch.percentile(0.95)) - .detail("P99", sketch.percentile(0.99)) - .detail("P99.9", sketch.percentile(0.999)) - .trackLatest(latencySampleEventHolder->trackingKey); - - sketch.clear(); - sampleStart = now(); - } + void logSample(); }; #endif diff --git a/fdbrpc/include/fdbrpc/genericactors.actor.h b/fdbrpc/include/fdbrpc/genericactors.actor.h index 216a1c9d07..aa5c04df9c 100644 --- a/fdbrpc/include/fdbrpc/genericactors.actor.h +++ b/fdbrpc/include/fdbrpc/genericactors.actor.h @@ -194,7 +194,9 @@ ACTOR template Future timeoutWarning(Future what, double time, PromiseStream output) { state Future end = delay(time); loop choose { - when(T t = wait(what)) { return t; } + when(T t = wait(what)) { + return t; + } when(wait(end)) { output.send(Void()); end = delay(time); @@ -332,7 +334,9 @@ void endStreamOnDisconnect(Future signal, stream.setRequestStreamEndpoint(endpoint); try { choose { - when(wait(signal)) { stream.sendError(connection_failed()); } + when(wait(signal)) { + stream.sendError(connection_failed()); + } when(wait(peer.isValid() ? peer->disconnect.getFuture() : Never())) { stream.sendError(connection_failed()); } @@ -361,7 +365,9 @@ Future> waitValueOrSignal(Future value, loop { try { choose { - when(X x = wait(value)) { return x; } + when(X x = wait(value)) { + return x; + } when(wait(signal)) { return ErrorOr(IFailureMonitor::failureMonitor().knownUnauthorized(endpoint) ? unauthorized_attempt() diff --git a/fdbrpc/include/fdbrpc/simulator.h b/fdbrpc/include/fdbrpc/simulator.h index e4abefa073..be8256b7e7 100644 --- a/fdbrpc/include/fdbrpc/simulator.h +++ b/fdbrpc/include/fdbrpc/simulator.h @@ -99,6 +99,7 @@ public: LocalityData locality; ProcessClass startingClass; TDMetricCollection tdmetrics; + MetricCollection metrics; ChaosMetrics chaosMetrics; HistogramRegistry histograms; std::map> listenerMap; diff --git a/fdbrpc/libcoroutine/Common.h b/fdbrpc/libcoroutine/Common.h index 97fb4a71f4..70d7f825b1 100644 --- a/fdbrpc/libcoroutine/Common.h +++ b/fdbrpc/libcoroutine/Common.h @@ -39,7 +39,7 @@ typedef long long int64_t; #if defined(WIN32) || defined(__WINS__) || defined(__MINGW32__) || defined(_MSC_VER) #define inline __inline -//#define snprintf _snprintf +// #define snprintf _snprintf #define usleep(x) Sleep(((x) + 999) / 1000) #define HAS_FIBERS 1 @@ -145,7 +145,7 @@ as errors in my dev settings */ extern "C" { #endif -//#define IO_CHECK_ALLOC ENABLED(NOT_IN_CLEAN) +// #define IO_CHECK_ALLOC ENABLED(NOT_IN_CLEAN) #ifdef IO_CHECK_ALLOC BASEKIT_API size_t io_memsize(void* ptr); diff --git a/fdbrpc/libcoroutine/Coro.h b/fdbrpc/libcoroutine/Coro.h index f8d7aa72c8..a2b36fcf60 100644 --- a/fdbrpc/libcoroutine/Coro.h +++ b/fdbrpc/libcoroutine/Coro.h @@ -13,8 +13,8 @@ #define CORO_STACK_SIZE 8192 #define CORO_STACK_SIZE_MIN 1024 #else -//#define CORO_DEFAULT_STACK_SIZE (65536/2) -//#define CORO_DEFAULT_STACK_SIZE (65536*4) +// #define CORO_DEFAULT_STACK_SIZE (65536/2) +// #define CORO_DEFAULT_STACK_SIZE (65536*4) // 128k needed on PPC due to parser #define CORO_DEFAULT_STACK_SIZE (128 * 1024) @@ -45,7 +45,7 @@ #if defined(WIN32) && defined(HAS_FIBERS) #define USE_FIBERS #elif defined(HAS_UCONTEXT) -//#elif defined(HAS_UCONTEXT) && !defined(__x86_64__) +// #elif defined(HAS_UCONTEXT) && !defined(__x86_64__) #if !defined(USE_UCONTEXT) #define USE_UCONTEXT #endif diff --git a/fdbrpc/libcoroutine/taskimpl.h b/fdbrpc/libcoroutine/taskimpl.h index 76c6c3d44d..dfe20f93ef 100644 --- a/fdbrpc/libcoroutine/taskimpl.h +++ b/fdbrpc/libcoroutine/taskimpl.h @@ -14,7 +14,7 @@ #endif #endif -//#define USE_UCONTEXT 1 +// #define USE_UCONTEXT 1 #if defined(__OpenBSD__) #undef USE_UCONTEXT @@ -45,7 +45,7 @@ #endif #include #include -//#include "task.h" +// #include "task.h" #define nil ((void*)0) #define nelem(x) (sizeof(x) / sizeof((x)[0])) diff --git a/fdbrpc/sim2.actor.cpp b/fdbrpc/sim2.actor.cpp index 38963d4143..09354b74b9 100644 --- a/fdbrpc/sim2.actor.cpp +++ b/fdbrpc/sim2.actor.cpp @@ -1357,6 +1357,7 @@ public: } m->setGlobal(enNetworkConnections, (flowGlobalType)m->network); m->setGlobal(enASIOTimedOut, (flowGlobalType) false); + m->setGlobal(INetwork::enMetrics, (flowGlobalType)&m->metrics); TraceEvent("NewMachine") .detail("Name", name) diff --git a/fdbserver/BackupWorker.actor.cpp b/fdbserver/BackupWorker.actor.cpp index f0aa4db6fa..18e32c824b 100644 --- a/fdbserver/BackupWorker.actor.cpp +++ b/fdbserver/BackupWorker.actor.cpp @@ -962,7 +962,9 @@ ACTOR Future pullAsyncData(BackupData* self) { } loop choose { - when(wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) { break; } + when(wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) { + break; + } when(wait(logSystemChange)) { if (self->logSystem.get()) { r = self->logSystem.get()->peekLogRouter(self->myId, tagAt, self->tag); @@ -1034,7 +1036,9 @@ ACTOR Future monitorBackupKeyOrPullData(BackupData* self, bool keyPresent) state Future committedVersion = self->getMinKnownCommittedVersion(); loop choose { - when(wait(success(present))) { break; } + when(wait(success(present))) { + break; + } when(wait(success(committedVersion) || delay(SERVER_KNOBS->BACKUP_NOOP_POP_DELAY, self->cx->taskID))) { if (committedVersion.isReady()) { self->popVersion = diff --git a/fdbserver/BlobManager.actor.cpp b/fdbserver/BlobManager.actor.cpp index 35da018f4f..63e358ee99 100644 --- a/fdbserver/BlobManager.actor.cpp +++ b/fdbserver/BlobManager.actor.cpp @@ -4035,7 +4035,9 @@ ACTOR Future blobWorkerRecruiter( // wait until existing blob workers have been acknowledged so we don't break recruitment invariants loop choose { - when(wait(self->startRecruiting.onTrigger())) { break; } + when(wait(self->startRecruiting.onTrigger())) { + break; + } } loop { @@ -4072,7 +4074,9 @@ ACTOR Future blobWorkerRecruiter( } // when the CC changes, so does the request stream so we need to restart recruiting here - when(wait(recruitBlobWorker->onChange())) { fCandidateWorker = Future(); } + when(wait(recruitBlobWorker->onChange())) { + fCandidateWorker = Future(); + } // signal used to restart the loop and try to recruit the next blob worker when(wait(self->restartRecruiting.onTrigger())) {} diff --git a/fdbserver/BlobWorker.actor.cpp b/fdbserver/BlobWorker.actor.cpp index b5f3142aca..28e2c1161c 100644 --- a/fdbserver/BlobWorker.actor.cpp +++ b/fdbserver/BlobWorker.actor.cpp @@ -1452,9 +1452,13 @@ ACTOR Future checkSplitAndReSnapshot(Reference bw // wait for manager stream to become ready, and send a message loop { choose { - when(wait(bwData->currentManagerStatusStream.get().onReady())) { break; } + when(wait(bwData->currentManagerStatusStream.get().onReady())) { + break; + } when(wait(bwData->currentManagerStatusStream.onChange())) {} - when(wait(metadata->resumeSnapshot.getFuture())) { break; } + when(wait(metadata->resumeSnapshot.getFuture())) { + break; + } } } if (metadata->resumeSnapshot.isSet()) { @@ -1493,7 +1497,9 @@ ACTOR Future checkSplitAndReSnapshot(Reference bw // manager change/no response choose { when(wait(bwData->currentManagerStatusStream.onChange())) {} - when(wait(metadata->resumeSnapshot.getFuture())) { break; } + when(wait(metadata->resumeSnapshot.getFuture())) { + break; + } when(wait(delay(1.0))) {} } @@ -1580,7 +1586,9 @@ ACTOR Future reevaluateInitialSplit(Reference bwData, // wait for manager stream to become ready, and send a message loop { choose { - when(wait(bwData->currentManagerStatusStream.get().onReady())) { break; } + when(wait(bwData->currentManagerStatusStream.get().onReady())) { + break; + } when(wait(bwData->currentManagerStatusStream.onChange())) {} } } @@ -1664,8 +1672,12 @@ ACTOR Future granuleCheckMergeCandidate(Reference bwData, // wait for manager stream to become ready, and send a message loop { choose { - when(wait(delay(std::max(0.0, sendTimeGiveUp - now())))) { break; } - when(wait(bwData->currentManagerStatusStream.get().onReady())) { break; } + when(wait(delay(std::max(0.0, sendTimeGiveUp - now())))) { + break; + } + when(wait(bwData->currentManagerStatusStream.get().onReady())) { + break; + } when(wait(bwData->currentManagerStatusStream.onChange())) {} } } @@ -1948,7 +1960,9 @@ ACTOR Future waitOnCFVersion(Reference metadata, Version ? metadata->activeCFData.get()->whenAtLeast(waitVersion) : Never(); choose { - when(wait(atLeast)) { break; } + when(wait(atLeast)) { + break; + } when(wait(metadata->activeCFData.onChange())) {} } } catch (Error& e) { @@ -3610,7 +3624,9 @@ ACTOR Future doBlobGranuleFileRequest(Reference bwData, Bl if (!bwData->isFullRestoreMode) { choose { when(wait(metadata->readable.getFuture())) {} - when(wait(metadata->cancelled.getFuture())) { throw wrong_shard_server(); } + when(wait(metadata->cancelled.getFuture())) { + throw wrong_shard_server(); + } } } @@ -3627,7 +3643,9 @@ ACTOR Future doBlobGranuleFileRequest(Reference bwData, Bl if (metadata->historyLoaded.canBeSet()) { choose { when(wait(metadata->historyLoaded.getFuture())) {} - when(wait(metadata->cancelled.getFuture())) { throw wrong_shard_server(); } + when(wait(metadata->cancelled.getFuture())) { + throw wrong_shard_server(); + } } } @@ -3639,7 +3657,9 @@ ACTOR Future doBlobGranuleFileRequest(Reference bwData, Bl when(GranuleFiles f = wait(finalChunks[chunkIdx].second)) { rangeGranulePair.push_back(std::pair(finalChunks[chunkIdx].first, f)); } - when(wait(metadata->cancelled.getFuture())) { throw wrong_shard_server(); } + when(wait(metadata->cancelled.getFuture())) { + throw wrong_shard_server(); + } } if (rangeGranulePair.back().second.snapshotFiles.empty()) { @@ -3680,9 +3700,13 @@ ACTOR Future doBlobGranuleFileRequest(Reference bwData, Bl // version on rollback try { choose { - when(wait(waitForVersionFuture)) { break; } + when(wait(waitForVersionFuture)) { + break; + } when(wait(metadata->activeCFData.onChange())) {} - when(wait(metadata->cancelled.getFuture())) { throw wrong_shard_server(); } + when(wait(metadata->cancelled.getFuture())) { + throw wrong_shard_server(); + } } } catch (Error& e) { // We can get change feed cancelled from whenAtLeast. This means the change feed may diff --git a/fdbserver/ClusterController.actor.cpp b/fdbserver/ClusterController.actor.cpp index d3e5b72af5..90d206234b 100644 --- a/fdbserver/ClusterController.actor.cpp +++ b/fdbserver/ClusterController.actor.cpp @@ -371,7 +371,9 @@ ACTOR Future clusterWatchDatabase(ClusterControllerData* cluster, req.reply.send(Void()); TraceEvent(SevDebug, "BackupWorkerDoneRequest", cluster->id).log(); } - when(wait(collection)) { throw internal_error(); } + when(wait(collection)) { + throw internal_error(); + } } // failed master (better master exists) could happen while change-coordinators request processing is // in-progress @@ -432,7 +434,9 @@ ACTOR Future clusterGetServerInfo(ClusterControllerData::DBInfo* db, while (db->serverInfo->get().id == knownServerInfoID) { choose { when(wait(yieldedFuture(db->serverInfo->onChange()))) {} - when(wait(delayJittered(300))) { break; } // The server might be long gone! + when(wait(delayJittered(300))) { + break; + } // The server might be long gone! } } reply.send(db->serverInfo->get()); @@ -2809,7 +2813,9 @@ ACTOR Future monitorBlobManager(ClusterControllerData* self) { self->db.clearInterf(ProcessClass::BlobManagerClass); break; } - when(wait(self->recruitBlobManager.onChange())) { break; } + when(wait(self->recruitBlobManager.onChange())) { + break; + } when(wait(self->db.blobGranulesEnabled.onChange())) { // if there is a blob manager present but blob granules are now disabled, stop the BM if (!self->db.blobGranulesEnabled.get()) { @@ -2840,7 +2846,9 @@ ACTOR Future dbInfoUpdater(ClusterControllerData* self) { state Future updateDBInfo = self->updateDBInfo.onTrigger(); loop { choose { - when(wait(updateDBInfo)) { wait(delay(SERVER_KNOBS->DBINFO_BATCH_DELAY) || dbInfoChange); } + when(wait(updateDBInfo)) { + wait(delay(SERVER_KNOBS->DBINFO_BATCH_DELAY) || dbInfoChange); + } when(wait(dbInfoChange)) {} } @@ -3221,7 +3229,9 @@ ACTOR Future clusterControllerCore(ClusterControllerFullInterface interf, CODE_PROBE(true, "Leader replaced"); return Void(); } - when(ReplyPromise ping = waitNext(interf.clientInterface.ping.getFuture())) { ping.send(Void()); } + when(ReplyPromise ping = waitNext(interf.clientInterface.ping.getFuture())) { + ping.send(Void()); + } } } @@ -3260,7 +3270,9 @@ ACTOR Future clusterController(ServerCoordinators coordinators, ASSERT(false); throw internal_error(); } - when(wait(shouldReplace)) { break; } + when(wait(shouldReplace)) { + break; + } } } if (!shouldReplace.isReady()) { diff --git a/fdbserver/ClusterRecovery.actor.cpp b/fdbserver/ClusterRecovery.actor.cpp index 346a59ce87..4b470afe3e 100644 --- a/fdbserver/ClusterRecovery.actor.cpp +++ b/fdbserver/ClusterRecovery.actor.cpp @@ -67,7 +67,9 @@ ACTOR Future recoveryTerminateOnConflict(UID dbgid, } return Void(); } - when(wait(switchedState)) { return Void(); } + when(wait(switchedState)) { + return Void(); + } } } @@ -921,8 +923,12 @@ ACTOR Future> provisionalMaster(ReferenceprovisionalCommitProxies[0].getKeyServersLocations.getFuture())) { req.reply.send(Never()); } - when(wait(waitCommitProxyFailure)) { throw worker_removed(); } - when(wait(waitGrvProxyFailure)) { throw worker_removed(); } + when(wait(waitCommitProxyFailure)) { + throw worker_removed(); + } + when(wait(waitGrvProxyFailure)) { + throw worker_removed(); + } } } @@ -1127,10 +1133,14 @@ ACTOR Future readTransactionSystemState(Reference sel if (self->recoveryTransactionVersion < minRequiredCommitVersion) self->recoveryTransactionVersion = minRequiredCommitVersion; - } - if (BUGGIFY) { - self->recoveryTransactionVersion += deterministicRandom()->randomInt64(0, 10000000); + // Test randomly increasing the recovery version by a large number. + // When the version epoch is enabled, versions stay in sync with time. + // An offline cluster could see a large version jump when it comes back + // online, so test this behavior in simulation. + if (BUGGIFY) { + self->recoveryTransactionVersion += deterministicRandom()->randomInt64(0, 10000000); + } } TraceEvent(getRecoveryEventName(ClusterRecoveryEventType::CLUSTER_RECOVERY_RECOVERING_EVENT_NAME).c_str(), @@ -1577,8 +1587,12 @@ ACTOR Future clusterRecoveryCore(Reference self) { break; } when(wait(oldLogSystems->onChange())) {} - when(wait(reg)) { throw internal_error(); } - when(wait(recoverAndEndEpoch)) { throw internal_error(); } + when(wait(reg)) { + throw internal_error(); + } + when(wait(recoverAndEndEpoch)) { + throw internal_error(); + } } } diff --git a/fdbserver/ConfigDatabaseUnitTests.actor.cpp b/fdbserver/ConfigDatabaseUnitTests.actor.cpp index e41083e3a7..faa02f66f3 100644 --- a/fdbserver/ConfigDatabaseUnitTests.actor.cpp +++ b/fdbserver/ConfigDatabaseUnitTests.actor.cpp @@ -673,7 +673,9 @@ Future testIgnore(UnitTestParameters params) { wait(set(env, "class-B"_sr, "test_long"_sr, int64_t{ 1 })); choose { when(wait(delay(5))) {} - when(wait(check(env, &TestKnobs::TEST_LONG, Optional{ 1 }))) { ASSERT(false); } + when(wait(check(env, &TestKnobs::TEST_LONG, Optional{ 1 }))) { + ASSERT(false); + } } return Void(); } diff --git a/fdbserver/ConfigNode.actor.cpp b/fdbserver/ConfigNode.actor.cpp index 3c34bc0a93..489f1b4a24 100644 --- a/fdbserver/ConfigNode.actor.cpp +++ b/fdbserver/ConfigNode.actor.cpp @@ -513,7 +513,9 @@ class ConfigNodeImpl { when(ConfigTransactionGetKnobsRequest req = waitNext(cti->getKnobs.getFuture())) { wait(getKnobs(self, req)); } - when(wait(self->kvStore->getError())) { ASSERT(false); } + when(wait(self->kvStore->getError())) { + ASSERT(false); + } } } } @@ -797,7 +799,9 @@ class ConfigNodeImpl { } req.reply.send(Void()); } - when(wait(self->kvStore->getError())) { ASSERT(false); } + when(wait(self->kvStore->getError())) { + ASSERT(false); + } } } } diff --git a/fdbserver/CoordinatedState.actor.cpp b/fdbserver/CoordinatedState.actor.cpp index 8168bdb40f..979e01ea4e 100644 --- a/fdbserver/CoordinatedState.actor.cpp +++ b/fdbserver/CoordinatedState.actor.cpp @@ -302,7 +302,9 @@ struct MovableCoordinatedStateImpl { ASSERT(self->lastValue.present() && self->lastCSValue.present()); TraceEvent("StartMove").detail("ConnectionString", nc.toString()); choose { - when(wait(creationTimeout)) { throw new_coordinators_timed_out(); } + when(wait(creationTimeout)) { + throw new_coordinators_timed_out(); + } when(Value ncInitialValue = wait(nccs.read())) { ASSERT(!ncInitialValue.size()); // The new coordinators must be uninitialized! } @@ -310,7 +312,9 @@ struct MovableCoordinatedStateImpl { TraceEvent("FinishedRead").detail("ConnectionString", nc.toString()); choose { - when(wait(creationTimeout)) { throw new_coordinators_timed_out(); } + when(wait(creationTimeout)) { + throw new_coordinators_timed_out(); + } when(wait(nccs.setExclusive( BinaryWriter::toValue(MovableValue(self->lastValue.get(), MovableValue::MovingFrom, diff --git a/fdbserver/Coordination.actor.cpp b/fdbserver/Coordination.actor.cpp index ce510225d0..fc8d0dc500 100644 --- a/fdbserver/Coordination.actor.cpp +++ b/fdbserver/Coordination.actor.cpp @@ -47,7 +47,9 @@ class LivenessChecker { ACTOR static Future checkStuck(LivenessChecker const* self) { loop { choose { - when(wait(delayUntil(self->lastTime.get() + self->threshold))) { return Void(); } + when(wait(delayUntil(self->lastTime.get() + self->threshold))) { + return Void(); + } when(wait(self->lastTime.onChange())) {} } } @@ -280,7 +282,9 @@ ACTOR Future remoteMonitorLeader(int* clientCount, when(wait(yieldedFuture(currentElectedLeaderOnChange))) { currentElectedLeaderOnChange = currentElectedLeader->onChange(); } - when(wait(delayJittered(SERVER_KNOBS->CLIENT_REGISTER_INTERVAL))) { break; } + when(wait(delayJittered(SERVER_KNOBS->CLIENT_REGISTER_INTERVAL))) { + break; + } } } diff --git a/fdbserver/DDRelocationQueue.actor.cpp b/fdbserver/DDRelocationQueue.actor.cpp index 5484299ceb..f832d55479 100644 --- a/fdbserver/DDRelocationQueue.actor.cpp +++ b/fdbserver/DDRelocationQueue.actor.cpp @@ -1150,7 +1150,7 @@ struct DDQueue : public IDDRelocationQueue { // canceled inflight relocateData. Launch the relocation for the rd. void launchQueuedWork(std::set> combined, const DDEnabledState* ddEnabledState) { - int startedHere = 0; + [[maybe_unused]] int startedHere = 0; double startTime = now(); // kick off relocators from items in the queue as need be std::set>::iterator it = combined.begin(); @@ -2530,7 +2530,9 @@ ACTOR Future dataDistributionQueue(Reference db, debug_setCheckRelocationDuration(false); } } - when(KeyRange done = waitNext(rangesComplete.getFuture())) { keysToLaunchFrom = done; } + when(KeyRange done = waitNext(rangesComplete.getFuture())) { + keysToLaunchFrom = done; + } when(wait(recordMetrics)) { Promise req; getAverageShardBytes.send(req); @@ -2633,7 +2635,9 @@ TEST_CASE("/DataDistribution/DDQueue/ServerCounterTrace") { std::cout << "Start trace counter unit test for " << duration << "s ...\n"; loop choose { when(wait(counterFuture)) {} - when(wait(finishFuture)) { break; } + when(wait(finishFuture)) { + break; + } when(wait(delayJittered(2.0))) { std::vector team(3); for (int i = 0; i < team.size(); ++i) { diff --git a/fdbserver/DDShardTracker.actor.cpp b/fdbserver/DDShardTracker.actor.cpp index 69bb6c853a..c5f7fd2648 100644 --- a/fdbserver/DDShardTracker.actor.cpp +++ b/fdbserver/DDShardTracker.actor.cpp @@ -1449,7 +1449,9 @@ ACTOR Future fetchShardMetricsList_impl(DataDistributionTracker* self, Get ACTOR Future fetchShardMetricsList(DataDistributionTracker* self, GetMetricsListRequest req) { choose { when(wait(fetchShardMetricsList_impl(self, req))) {} - when(wait(delay(SERVER_KNOBS->DD_SHARD_METRICS_TIMEOUT))) { req.reply.sendError(timed_out()); } + when(wait(delay(SERVER_KNOBS->DD_SHARD_METRICS_TIMEOUT))) { + req.reply.sendError(timed_out()); + } } return Void(); } @@ -1492,7 +1494,9 @@ ACTOR Future dataDistributionTracker(Reference in } loop choose { - when(Promise req = waitNext(getAverageShardBytes)) { req.send(self.getAverageShardBytes()); } + when(Promise req = waitNext(getAverageShardBytes)) { + req.send(self.getAverageShardBytes()); + } when(wait(loggingTrigger)) { TraceEvent("DDTrackerStats", self.distributorId) .detail("Shards", self.shards->size()) diff --git a/fdbserver/DDTeamCollection.actor.cpp b/fdbserver/DDTeamCollection.actor.cpp index b7b4504836..9cfdcff4bc 100644 --- a/fdbserver/DDTeamCollection.actor.cpp +++ b/fdbserver/DDTeamCollection.actor.cpp @@ -135,7 +135,9 @@ public: loop { choose { - when(wait(self->buildTeams())) { return Void(); } + when(wait(self->buildTeams())) { + return Void(); + } when(wait(self->restartTeamBuilder.onTrigger())) {} } } @@ -525,7 +527,9 @@ public: while (self->pauseWiggle && !self->pauseWiggle->get() && self->waitUntilRecruited.get()) { choose { when(wait(self->waitUntilRecruited.onChange() || self->pauseWiggle->onChange())) {} - when(wait(delay(SERVER_KNOBS->PERPETUAL_WIGGLE_DELAY, g_network->getCurrentTask()))) { break; } + when(wait(delay(SERVER_KNOBS->PERPETUAL_WIGGLE_DELAY, g_network->getCurrentTask()))) { + break; + } } } @@ -1361,7 +1365,9 @@ public: .detail("ConfigStoreType", self->configuration.storageServerStoreType) .detail("WrongStoreTypeRemoved", server->wrongStoreTypeToRemove.get()); } - when(wait(server->wakeUpTracker.getFuture())) { server->wakeUpTracker = Promise(); } + when(wait(server->wakeUpTracker.getFuture())) { + server->wakeUpTracker = Promise(); + } when(wait(storageMetadataTracker)) {} when(wait(server->ssVersionTooFarBehind.onChange())) {} when(wait(self->disableFailingLaggingServers.onChange())) {} @@ -2103,7 +2109,9 @@ public: .detail("ExtraHealthyTeamCount", extraTeamCount) .detail("HealthyTeamCount", self->healthyTeamCount); } - when(wait(pauseChanged)) { continue; } + when(wait(pauseChanged)) { + continue; + } } } } @@ -2619,7 +2627,9 @@ public: } } } - when(wait(recruitStorage->onChange())) { fCandidateWorker = Future(); } + when(wait(recruitStorage->onChange())) { + fCandidateWorker = Future(); + } when(wait(self->zeroHealthyTeams->onChange())) { if (!pendingTSSCheck && self->zeroHealthyTeams->get() && (self->isTssRecruiting || self->tss_info_by_pair.size() > 0)) { @@ -4066,7 +4076,6 @@ void DDTeamCollection::traceAllInfo(bool shouldPrint) const { void DDTeamCollection::rebuildMachineLocalityMap() { machineLocalityMap.clear(); - int numHealthyMachine = 0; for (auto& [_, machine] : machine_info) { if (machine->serversOnMachine.empty()) { TraceEvent(SevWarn, "RebuildMachineLocalityMapError") @@ -4087,7 +4096,6 @@ void DDTeamCollection::rebuildMachineLocalityMap() { } const LocalityEntry& localityEntry = machineLocalityMap.add(locality, &representativeServer->getId()); machine->localityEntry = localityEntry; - ++numHealthyMachine; } } @@ -5880,43 +5888,43 @@ TEST_CASE("/DataDistribution/GetTeam/DeprioritizeWigglePausedTeam") { } TEST_CASE("/DataDistribution/StorageWiggler/NextIdWithMinAge") { - state StorageWiggler wiggler(nullptr); + state Reference wiggler = makeReference(nullptr); state double startTime = now(); - wiggler.addServer(UID(1, 0), - StorageMetadataType(startTime - SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 5.0, - KeyValueStoreType::SSD_BTREE_V2)); - wiggler.addServer(UID(2, 0), - StorageMetadataType( - startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC, KeyValueStoreType::MEMORY, true)); - wiggler.addServer(UID(3, 0), StorageMetadataType(startTime - 5.0, KeyValueStoreType::SSD_ROCKSDB_V1, true)); - wiggler.addServer(UID(4, 0), - StorageMetadataType(startTime - SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC - 1.0, - KeyValueStoreType::SSD_BTREE_V2)); + wiggler->addServer(UID(1, 0), + StorageMetadataType(startTime - SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 5.0, + KeyValueStoreType::SSD_BTREE_V2)); + wiggler->addServer(UID(2, 0), + StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC, + KeyValueStoreType::MEMORY, + true)); + wiggler->addServer(UID(3, 0), StorageMetadataType(startTime - 5.0, KeyValueStoreType::SSD_ROCKSDB_V1, true)); + wiggler->addServer(UID(4, 0), + StorageMetadataType(startTime - SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC - 1.0, + KeyValueStoreType::SSD_BTREE_V2)); std::vector> correctResult{ UID(3, 0), UID(2, 0), UID(4, 0), Optional() }; for (int i = 0; i < 4; ++i) { - auto id = wiggler.getNextServerId(); + auto id = wiggler->getNextServerId(); ASSERT(id == correctResult[i]); } { std::cout << "Finish Initial Check. Start test getNextWigglingServerID() loop...\n"; // test the getNextWigglingServerID() loop - UID id = wait(DDTeamCollectionImpl::getNextWigglingServerID(Reference::addRef(&wiggler))); + UID id = wait(DDTeamCollectionImpl::getNextWigglingServerID(wiggler)); ASSERT(id == UID(1, 0)); } std::cout << "Test after addServer() ...\n"; - state Future nextFuture = - DDTeamCollectionImpl::getNextWigglingServerID(Reference::addRef(&wiggler)); + state Future nextFuture = DDTeamCollectionImpl::getNextWigglingServerID(wiggler); ASSERT(!nextFuture.isReady()); startTime = now(); StorageMetadataType metadata(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 100.0, KeyValueStoreType::SSD_BTREE_V2); - wiggler.addServer(UID(5, 0), metadata); + wiggler->addServer(UID(5, 0), metadata); ASSERT(!nextFuture.isReady()); std::cout << "Test after updateServer() ...\n"; - StorageWiggler* ptr = &wiggler; + StorageWiggler* ptr = wiggler.getPtr(); wait(trigger( [ptr]() { ptr->updateMetadata(UID(5, 0), @@ -5933,22 +5941,22 @@ TEST_CASE("/DataDistribution/StorageWiggler/NextIdWithMinAge") { TEST_CASE("/DataDistribution/StorageWiggler/NextIdWithTSS") { state std::unique_ptr collection = DDTeamCollectionUnitTest::testMachineTeamCollection(1, Reference(new PolicyOne()), 5); - state StorageWiggler wiggler(collection.get()); + state Reference wiggler = makeReference(collection.get()); std::cout << "Test when need TSS ... \n"; collection->configuration.usableRegions = 1; collection->configuration.desiredTSSCount = 1; state double startTime = now(); - wiggler.addServer(UID(1, 0), - StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0, - KeyValueStoreType::SSD_BTREE_V2)); - wiggler.addServer(UID(2, 0), - StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0, - KeyValueStoreType::SSD_BTREE_V2)); - ASSERT(!wiggler.getNextServerId(true).present()); - ASSERT(wiggler.getNextServerId(collection->reachTSSPairTarget()) == UID(1, 0)); - UID id = wait(DDTeamCollectionImpl::getNextWigglingServerID( - Reference::addRef(&wiggler), Optional(), Optional(), collection.get())); + wiggler->addServer(UID(1, 0), + StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0, + KeyValueStoreType::SSD_BTREE_V2)); + wiggler->addServer(UID(2, 0), + StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0, + KeyValueStoreType::SSD_BTREE_V2)); + ASSERT(!wiggler->getNextServerId(true).present()); + ASSERT(wiggler->getNextServerId(collection->reachTSSPairTarget()) == UID(1, 0)); + UID id = wait( + DDTeamCollectionImpl::getNextWigglingServerID(wiggler, Optional(), Optional(), collection.get())); ASSERT(now() - startTime < SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0); ASSERT(id == UID(2, 0)); return Void(); diff --git a/fdbserver/GrvProxyServer.actor.cpp b/fdbserver/GrvProxyServer.actor.cpp index cac2bc24d1..2967341c54 100644 --- a/fdbserver/GrvProxyServer.actor.cpp +++ b/fdbserver/GrvProxyServer.actor.cpp @@ -351,7 +351,9 @@ ACTOR Future globalConfigRequestServer(GrvProxyData* grvProxyData, GrvProx Void()) && delay(SERVER_KNOBS->GLOBAL_CONFIG_REFRESH_INTERVAL); } - when(wait(actors.getResult())) { ASSERT(false); } + when(wait(actors.getResult())) { + ASSERT(false); + } } } } diff --git a/fdbserver/KeyValueStoreSQLite.actor.cpp b/fdbserver/KeyValueStoreSQLite.actor.cpp index 664ff262a5..88905eb096 100644 --- a/fdbserver/KeyValueStoreSQLite.actor.cpp +++ b/fdbserver/KeyValueStoreSQLite.actor.cpp @@ -721,7 +721,7 @@ struct IntKeyCursor { db.checkError("BtreeCloseCursor", sqlite3BtreeCloseCursor(cursor)); } catch (...) { } - delete[](char*) cursor; + delete[] (char*)cursor; } } }; @@ -759,7 +759,7 @@ struct RawCursor { } catch (...) { TraceEvent(SevError, "RawCursorDestructionError").log(); } - delete[](char*) cursor; + delete[] (char*)cursor; } } void moveFirst() { @@ -1912,14 +1912,11 @@ private: readThreads[i].clear(); } void checkFreePages() { - int iterations = 0; - int64_t freeListSize = freeListPages; while (!freeTableEmpty && freeListSize < SERVER_KNOBS->CHECK_FREE_PAGE_AMOUNT) { int deletedPages = cursor->lazyDelete(SERVER_KNOBS->CHECK_FREE_PAGE_AMOUNT); freeTableEmpty = (deletedPages != SERVER_KNOBS->CHECK_FREE_PAGE_AMOUNT); springCleaningStats.lazyDeletePages += deletedPages; - ++iterations; freeListSize = conn.freePages(); } diff --git a/fdbserver/KeyValueStoreShardedRocksDB.actor.cpp b/fdbserver/KeyValueStoreShardedRocksDB.actor.cpp index 5c36fb6d6f..94493b8ab5 100644 --- a/fdbserver/KeyValueStoreShardedRocksDB.actor.cpp +++ b/fdbserver/KeyValueStoreShardedRocksDB.actor.cpp @@ -639,7 +639,6 @@ int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, i return 0; } - int accumulatedRows = 0; int accumulatedBytes = 0; rocksdb::Status s; @@ -651,7 +650,6 @@ int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, i cursor->Seek(toSlice(range.begin)); while (cursor->Valid() && toStringRef(cursor->key()) < range.end) { KeyValueRef kv(toStringRef(cursor->key()), toStringRef(cursor->value())); - ++accumulatedRows; accumulatedBytes += sizeof(KeyValueRef) + kv.expectedSize(); result->push_back_deep(result->arena(), kv); // Calling `cursor->Next()` is potentially expensive, so short-circut here just in case. @@ -671,7 +669,6 @@ int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, i } while (cursor->Valid() && toStringRef(cursor->key()) >= range.begin) { KeyValueRef kv(toStringRef(cursor->key()), toStringRef(cursor->value())); - ++accumulatedRows; accumulatedBytes += sizeof(KeyValueRef) + kv.expectedSize(); result->push_back_deep(result->arena(), kv); // Calling `cursor->Prev()` is potentially expensive, so short-circut here just in case. diff --git a/fdbserver/LeaderElection.actor.cpp b/fdbserver/LeaderElection.actor.cpp index 40c093093e..68a947f2de 100644 --- a/fdbserver/LeaderElection.actor.cpp +++ b/fdbserver/LeaderElection.actor.cpp @@ -210,8 +210,12 @@ ACTOR Future tryBecomeLeaderInternal(ServerCoordinators coordinators, TraceEvent("LeaderBadCandidateTimeout", myInfo.changeID).log(); break; } - when(wait(candidacies)) { ASSERT(false); } - when(wait(asyncPriorityInfo->onChange())) { break; } + when(wait(candidacies)) { + ASSERT(false); + } + when(wait(asyncPriorityInfo->onChange())) { + break; + } } } diff --git a/fdbserver/LocalConfiguration.actor.cpp b/fdbserver/LocalConfiguration.actor.cpp index c2cdaf7479..ffa6c090f5 100644 --- a/fdbserver/LocalConfiguration.actor.cpp +++ b/fdbserver/LocalConfiguration.actor.cpp @@ -327,8 +327,12 @@ class LocalConfigurationImpl { ACTOR static Future consume(LocalConfigurationImpl* self, ConfigBroadcastInterface broadcaster) { loop { choose { - when(wait(consumeInternal(self, broadcaster))) { ASSERT(false); } - when(wait(self->kvStore->getError())) { ASSERT(false); } + when(wait(consumeInternal(self, broadcaster))) { + ASSERT(false); + } + when(wait(self->kvStore->getError())) { + ASSERT(false); + } } } } diff --git a/fdbserver/LogRouter.actor.cpp b/fdbserver/LogRouter.actor.cpp index 5f2b616df6..6fc2486cd4 100644 --- a/fdbserver/LogRouter.actor.cpp +++ b/fdbserver/LogRouter.actor.cpp @@ -60,11 +60,8 @@ struct LogRouterData { TaskPriority taskID) { while (!self->version_messages.empty() && self->version_messages.front().first < before) { Version version = self->version_messages.front().first; - int64_t messagesErased = 0; while (!self->version_messages.empty() && self->version_messages.front().first == version) { - ++messagesErased; - self->version_messages.pop_front(); } @@ -787,7 +784,9 @@ ACTOR Future logRouter(TLogInterface interf, .detail("Locality", req.locality); state Future core = logRouterCore(interf, req, db); loop choose { - when(wait(core)) { return Void(); } + when(wait(core)) { + return Void(); + } when(wait(checkRemoved(db, req.recoveryCount, interf))) {} } } catch (Error& e) { diff --git a/fdbserver/LogSystemDiskQueueAdapter.actor.cpp b/fdbserver/LogSystemDiskQueueAdapter.actor.cpp index 12e64a5f1e..6801767335 100644 --- a/fdbserver/LogSystemDiskQueueAdapter.actor.cpp +++ b/fdbserver/LogSystemDiskQueueAdapter.actor.cpp @@ -43,7 +43,9 @@ public: if (!self->cursor->hasMessage()) { loop { choose { - when(wait(self->cursor->getMore())) { break; } + when(wait(self->cursor->getMore())) { + break; + } when(wait(self->localityChanged)) { self->cursor = self->logSystem->peekTxs( UID(), diff --git a/fdbserver/LogSystemPeekCursor.actor.cpp b/fdbserver/LogSystemPeekCursor.actor.cpp index e05dc6c967..e99e259981 100644 --- a/fdbserver/LogSystemPeekCursor.actor.cpp +++ b/fdbserver/LogSystemPeekCursor.actor.cpp @@ -452,7 +452,9 @@ ACTOR Future serverPeekGetMore(ILogSystem::ServerPeekCursor* self, TaskPri .detail("Popped", res.popped.present() ? res.popped.get() : 0); return Void(); } - when(wait(self->interf->onChange())) { self->onlySpilled = false; } + when(wait(self->interf->onChange())) { + self->onlySpilled = false; + } } } } catch (Error& e) { diff --git a/fdbserver/MetricClient.actor.cpp b/fdbserver/MetricClient.actor.cpp new file mode 100644 index 0000000000..9617a081d7 --- /dev/null +++ b/fdbserver/MetricClient.actor.cpp @@ -0,0 +1,147 @@ +/* + * MetricClient.actor.cpp + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2022 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "fdbserver/MetricClient.h" +#include "fdbrpc/Stats.h" +#include "flow/FastRef.h" +#include "flow/IRandom.h" +#include "flow/Knobs.h" +#include "flow/OTELMetrics.h" +#include "flow/TDMetric.actor.h" +#include "flow/Msgpack.h" +#include "flow/Trace.h" +#include "flow/flow.h" +#include +#ifndef WIN32 +#include +#endif +#include "flow/actorcompiler.h" +#include "flow/network.h" + +UDPMetricClient::UDPMetricClient() + : socket_fd(-1), model(knobToMetricModel(FLOW_KNOBS->METRICS_DATA_MODEL)), + buf{ MsgpackBuffer{ .buffer = std::make_unique(1024), .data_size = 0, .buffer_size = 1024 } }, + address((model == STATSD) ? FLOW_KNOBS->STATSD_UDP_EMISSION_ADDR : FLOW_KNOBS->OTEL_UDP_EMISSION_ADDR), + port((model == STATSD) ? FLOW_KNOBS->STATSD_UDP_EMISSION_PORT : FLOW_KNOBS->OTEL_UDP_EMISSION_PORT) { + NetworkAddress destAddress = NetworkAddress::parse(address + ":" + std::to_string(port)); + socket = INetworkConnections::net()->createUDPSocket(destAddress); + model = knobToMetricModel(FLOW_KNOBS->METRICS_DATA_MODEL); +} + +// Since MSG_DONTWAIT isn't defined for Windows, we need to add a +// ifndef guard here to avoid any compilation issues +void UDPMetricClient::send_packet(int fd, const void* data, size_t len) { +#ifndef WIN32 + ::send(fd, data, len, MSG_DONTWAIT); +#endif +} + +void UDPMetricClient::send(MetricCollection* metrics) { + if (!socket.isReady()) { + return; + } + socket_fd = socket.get()->native_handle(); + if (socket_fd == -1) + return; + if (model == OTLP) { + std::vector> sums; + std::vector gauges; + + // Define custom serialize functions + auto f_sums = [](const std::vector& vec, MsgpackBuffer& buf) { + typedef void (*func_ptr)(const OTEL::OTELSum&, MsgpackBuffer&); + func_ptr f = OTEL::serialize; + serialize_vector(vec, buf, f); + }; + + auto f_hists = [](const std::vector& vec, MsgpackBuffer& buf) { + typedef void (*func_ptr)(const OTEL::OTELHistogram&, MsgpackBuffer&); + func_ptr f = OTEL::serialize; + serialize_vector(vec, buf, f); + }; + + auto f_gauge = [](const std::vector& vec, MsgpackBuffer& buf) { + typedef void (*func_ptr)(const OTEL::OTELGauge&, MsgpackBuffer&); + func_ptr f = OTEL::serialize; + serialize_vector(vec, buf, f); + }; + + std::vector currentSums; + size_t current_msgpack = 0; + for (const auto& [_, s] : metrics->sumMap) { + if (current_msgpack < MAX_OTELSUM_PACKET_SIZE) { + currentSums.push_back(std::move(s)); + current_msgpack += s.getMsgpackBytes(); + } else { + sums.push_back(std::move(currentSums)); + currentSums.clear(); + current_msgpack = 0; + } + } + if (!sums.empty()) { + for (const auto& currSums : sums) { + serialize_ext(currSums, buf, OTEL::OTELMetricType::Sum, f_sums); + send_packet(socket_fd, buf.buffer.get(), buf.data_size); + int error = errno; + TraceEvent("MetricsSumUdpErrno", UID()).detail("Errno", error); + buf.reset(); + } + metrics->sumMap.clear(); + } + + // Each histogram should be in a seperate because of their large sizes + // Expected DDSketch size is ~4200 entries * 9 bytes = 37800 + for (const auto& [_, h] : metrics->histMap) { + const std::vector singleHist{ std::move(h) }; + serialize_ext(singleHist, buf, OTEL::OTELMetricType::Hist, f_hists); + send_packet(socket_fd, buf.buffer.get(), buf.data_size); + int error = errno; + TraceEvent("MetricsHistUdpErrno", UID()).detail("Errno", error); + buf.reset(); + } + + metrics->histMap.clear(); + + for (const auto& [_, g] : metrics->gaugeMap) { + gauges.push_back(std::move(g)); + } + if (!gauges.empty()) { + serialize_ext(gauges, buf, OTEL::OTELMetricType::Gauge, f_gauge); + send_packet(socket_fd, buf.buffer.get(), buf.data_size); + int error = errno; + TraceEvent("MetricsGaugeUdpErrno", UID()).detail("Errno", error); + metrics->gaugeMap.clear(); + buf.reset(); + } + } else if (model == MetricsDataModel::STATSD) { + std::string messages; + for (const auto& msg : metrics->statsd_message) { + // Account for max udp packet size (+1 since we add '\n') + if (messages.size() + msg.size() + 1 < IUDPSocket::MAX_PACKET_SIZE) { + messages += (std::move(msg) + '\n'); + } else { + send_packet(socket_fd, buf.buffer.get(), buf.data_size); + } + } + if (!messages.empty()) { + send_packet(socket_fd, messages.data(), messages.size()); + } + metrics->statsd_message.clear(); + } +} \ No newline at end of file diff --git a/fdbserver/MetricLogger.actor.cpp b/fdbserver/MetricLogger.actor.cpp index ef6a2601b1..95481b6180 100644 --- a/fdbserver/MetricLogger.actor.cpp +++ b/fdbserver/MetricLogger.actor.cpp @@ -19,13 +19,27 @@ */ #include +#include +#include +#include "msgpack.hpp" +#include +#include +#include "fdbrpc/Stats.h" +#include "flow/Msgpack.h" #include "flow/ApiVersion.h" +#include "flow/IRandom.h" +#include "flow/Knobs.h" +#include "flow/OTELMetrics.h" +#include "flow/SystemMonitor.h" #include "flow/UnitTest.h" #include "flow/TDMetric.actor.h" #include "fdbclient/DatabaseContext.h" #include "fdbclient/ReadYourWrites.h" #include "fdbclient/KeyBackedTypes.h" #include "fdbserver/MetricLogger.actor.h" +#include "fdbserver/MetricClient.h" +#include "flow/flow.h" +#include "flow/network.h" #include "flow/actorcompiler.h" // This must be the last #include. struct MetricsRule { @@ -189,7 +203,7 @@ public: }; ACTOR Future dumpMetrics(Database cx, MetricsConfig* config, TDMetricCollection* collection) { - state MetricUpdateBatch batch; + state MetricBatch batch; state Standalone mk; ASSERT(collection != nullptr); mk.prefix = StringRef(mk.arena(), config->space.key()); @@ -225,8 +239,8 @@ ACTOR Future dumpMetrics(Database cx, MetricsConfig* config, TDMetricColle state std::map> results; // Call all of the callbacks, map each index to its resulting future - for (int i = 0, iend = batch.callbacks.size(); i < iend; ++i) - results[i] = batch.callbacks[i](&mdb, &batch); + for (int i = 0, iend = batch.scope.callbacks.size(); i < iend; ++i) + results[i] = batch.scope.callbacks[i](&mdb, &batch.scope); loop { state std::map>::iterator cb = results.begin(); @@ -249,7 +263,7 @@ ACTOR Future dumpMetrics(Database cx, MetricsConfig* config, TDMetricColle // Otherwise, wait to retry wait(cbtr.onError(lastError)); for (auto& cb : results) - cb.second = batch.callbacks[cb.first](&mdb, &batch); + cb.second = batch.scope.callbacks[cb.first](&mdb, &batch.scope); } // If there are more rolltimes then next dump is now, otherwise if no metrics are enabled then it is @@ -267,19 +281,19 @@ ACTOR Future dumpMetrics(Database cx, MetricsConfig* config, TDMetricColle loop { tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS); try { - for (auto& i : batch.inserts) { + for (auto& i : batch.scope.inserts) { // fprintf(stderr, "%s: dump insert: %s\n", collection->address.toString().c_str(), // printable(allInsertions[i].key).c_str()); tr.set(i.key, i.value()); } - for (auto& a : batch.appends) { + for (auto& a : batch.scope.appends) { // fprintf(stderr, "%s: dump append: %s\n", collection->address.toString().c_str(), // printable(allAppends[i].key).c_str()); tr.atomicOp(a.key, a.value(), MutationRef::AppendIfFits); } - for (auto& u : batch.updates) { + for (auto& u : batch.scope.updates) { // fprintf(stderr, "%s: dump update: %s\n", collection->address.toString().c_str(), // printable(allUpdates[i].first).c_str()); tr.set(u.first, u.second); @@ -403,6 +417,66 @@ ACTOR Future runMetrics(Future fcx, Key prefix) { return Void(); } +ACTOR Future startMetricsSimulationServer(MetricsDataModel model) { + if (model == MetricsDataModel::NONE) { + return Void{}; + } + state uint32_t port = 0; + switch (model) { + case MetricsDataModel::STATSD: + port = FLOW_KNOBS->STATSD_UDP_EMISSION_PORT; + case MetricsDataModel::OTLP: + port = FLOW_KNOBS->OTEL_UDP_EMISSION_PORT; + case MetricsDataModel::NONE: + port = 0; + } + TraceEvent(SevInfo, "MetricsUDPServerStarted").detail("Address", "127.0.0.1").detail("Port", port); + state NetworkAddress localAddress = NetworkAddress::parse("127.0.0.1:" + std::to_string(port)); + state Reference serverSocket = wait(INetworkConnections::net()->createUDPSocket(localAddress)); + serverSocket->bind(localAddress); + state Standalone packetString = makeString(IUDPSocket::MAX_PACKET_SIZE); + state uint8_t* packet = mutateString(packetString); + + loop { + int size = wait(serverSocket->receive(packet, packet + IUDPSocket::MAX_PACKET_SIZE)); + auto message = packetString.substr(0, size); + + // Let's just focus on statsd for now. For statsd, the message is expected to be seperated by newlines. We need + // to break each statsd metric and verify them individually. + if (model == MetricsDataModel::STATSD) { + std::string statsd_message = message.toString(); + auto metrics = splitString(statsd_message, "\n"); + for (const auto& metric : metrics) { + ASSERT(verifyStatsdMessage(metric)); + } + } else if (model == MetricsDataModel::OTLP) { + msgpack::object_handle result; + msgpack::unpack(result, reinterpret_cast(packet), size); + } + } +} + +ACTOR Future runMetrics() { + state MetricCollection* metrics = nullptr; + MetricsDataModel model = knobToMetricModel(FLOW_KNOBS->METRICS_DATA_MODEL); + if (model == MetricsDataModel::NONE) { + return Void{}; + } + state UDPMetricClient metricClient; + state Future metricsActor; + if (g_network->isSimulated()) { + metricsActor = startMetricsSimulationServer(model); + } + loop { + metrics = MetricCollection::getMetricCollection(); + if (metrics != nullptr) { + + metricClient.send(metrics); + } + wait(delay(FLOW_KNOBS->METRICS_EMISSION_INTERVAL)); + } +} + TEST_CASE("/fdbserver/metrics/TraceEvents") { auto getenv2 = [](const char* s) -> const char* { s = getenv(s); diff --git a/fdbserver/OldTLogServer_4_6.actor.cpp b/fdbserver/OldTLogServer_4_6.actor.cpp index 45ccd94a85..6921e4d4e0 100644 --- a/fdbserver/OldTLogServer_4_6.actor.cpp +++ b/fdbserver/OldTLogServer_4_6.actor.cpp @@ -768,7 +768,9 @@ ACTOR Future updateStorage(TLogData* self) { ACTOR Future updateStorageLoop(TLogData* self) { wait(delay(0, TaskPriority::UpdateStorage)); - loop { wait(updateStorage(self)); } + loop { + wait(updateStorage(self)); + } } void commitMessages(Reference self, @@ -1592,13 +1594,17 @@ ACTOR Future restorePersistentState(TLogData* self, LocalityData locality) choose { when(wait(updateStorage(self))) {} - when(wait(allRemoved)) { throw worker_removed(); } + when(wait(allRemoved)) { + throw worker_removed(); + } } } } } } - when(wait(allRemoved)) { throw worker_removed(); } + when(wait(allRemoved)) { + throw worker_removed(); + } } } } catch (Error& e) { diff --git a/fdbserver/OldTLogServer_6_0.actor.cpp b/fdbserver/OldTLogServer_6_0.actor.cpp index dc737c0737..f1b3381f9e 100644 --- a/fdbserver/OldTLogServer_6_0.actor.cpp +++ b/fdbserver/OldTLogServer_6_0.actor.cpp @@ -1014,7 +1014,9 @@ ACTOR Future updateStorage(TLogData* self) { ACTOR Future updateStorageLoop(TLogData* self) { wait(delay(0, TaskPriority::UpdateStorage)); - loop { wait(updateStorage(self)); } + loop { + wait(updateStorage(self)); + } } void commitMessages(TLogData* self, @@ -2071,7 +2073,9 @@ ACTOR Future pullAsyncData(TLogData* self, while (!endVersion.present() || logData->version.get() < endVersion.get()) { loop { choose { - when(wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) { break; } + when(wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) { + break; + } when(wait(dbInfoChange)) { if (logData->logSystem->get()) { r = logData->logSystem->get()->peek(logData->logId, tagAt, endVersion, tags, true); @@ -2503,13 +2507,17 @@ ACTOR Future restorePersistentState(TLogData* self, choose { when(wait(updateStorage(self))) {} - when(wait(allRemoved)) { throw worker_removed(); } + when(wait(allRemoved)) { + throw worker_removed(); + } } } } } } - when(wait(allRemoved)) { throw worker_removed(); } + when(wait(allRemoved)) { + throw worker_removed(); + } } } } catch (Error& e) { @@ -2831,7 +2839,9 @@ ACTOR Future tLog(IKeyValueStore* persistentData, forwardPromise(req.reply, self.tlogCache.get(req.recruitmentID)); } } - when(wait(error)) { throw internal_error(); } + when(wait(error)) { + throw internal_error(); + } when(wait(activeSharedChange)) { if (activeSharedTLog->get() == tlogId) { self.targetVolatileBytes = SERVER_KNOBS->TLOG_SPILL_THRESHOLD; diff --git a/fdbserver/OldTLogServer_6_2.actor.cpp b/fdbserver/OldTLogServer_6_2.actor.cpp index f6b95290e4..58c1f95555 100644 --- a/fdbserver/OldTLogServer_6_2.actor.cpp +++ b/fdbserver/OldTLogServer_6_2.actor.cpp @@ -1232,7 +1232,9 @@ ACTOR Future updateStorage(TLogData* self) { ACTOR Future updateStorageLoop(TLogData* self) { wait(delay(0, TaskPriority::UpdateStorage)); - loop { wait(updateStorage(self)); } + loop { + wait(updateStorage(self)); + } } void commitMessages(TLogData* self, @@ -2530,7 +2532,9 @@ ACTOR Future pullAsyncData(TLogData* self, while (!endVersion.present() || logData->version.get() < endVersion.get()) { loop { choose { - when(wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) { break; } + when(wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) { + break; + } when(wait(dbInfoChange)) { if (logData->logSystem->get()) { r = logData->logSystem->get()->peek(logData->logId, tagAt, endVersion, tags, parallelGetMore); @@ -2980,7 +2984,9 @@ ACTOR Future restorePersistentState(TLogData* self, choose { when(wait(updateStorage(self))) {} - when(wait(allRemoved)) { throw worker_removed(); } + when(wait(allRemoved)) { + throw worker_removed(); + } } } } else { @@ -2991,7 +2997,9 @@ ACTOR Future restorePersistentState(TLogData* self, } } } - when(wait(allRemoved)) { throw worker_removed(); } + when(wait(allRemoved)) { + throw worker_removed(); + } } } } catch (Error& e) { @@ -3319,7 +3327,9 @@ ACTOR Future tLog(IKeyValueStore* persistentData, forwardPromise(req.reply, self.tlogCache.get(req.recruitmentID)); } } - when(wait(error)) { throw internal_error(); } + when(wait(error)) { + throw internal_error(); + } when(wait(activeSharedTLog->onChange())) { if (activeSharedTLog->get() == tlogId) { self.targetVolatileBytes = SERVER_KNOBS->TLOG_SPILL_THRESHOLD; diff --git a/fdbserver/QuietDatabase.actor.cpp b/fdbserver/QuietDatabase.actor.cpp index 07cdae9dd4..a85cc863a3 100644 --- a/fdbserver/QuietDatabase.actor.cpp +++ b/fdbserver/QuietDatabase.actor.cpp @@ -359,7 +359,7 @@ int64_t extractMaxQueueSize(const std::vector>& message TraceEvent("QuietDatabaseGotMaxStorageServerQueueSize") .detail("Stage", "MaxComputed") .detail("Max", maxQueueSize) - .detail("MaxQueueServer", format("%016" PRIx64, maxQueueServer.first())); + .detail("MaxQueueServer", maxQueueServer); return maxQueueSize; } @@ -380,14 +380,14 @@ ACTOR Future getStorageMetricsTimeout(UID storage, WorkerInter when(wait(timeout)) { TraceEvent("QuietDatabaseFailure") .detail("Reason", "Could not fetch StorageMetrics") - .detail("Storage", format("%016" PRIx64, storage.first())); + .detail("Storage", storage); throw timed_out(); } } if (retries > 30) { TraceEvent("QuietDatabaseFailure") .detail("Reason", "Could not fetch StorageMetrics x30") - .detail("Storage", format("%016" PRIx64, storage.first())) + .detail("Storage", storage) .detail("Version", version); throw timed_out(); } diff --git a/fdbserver/RemoteIKeyValueStore.actor.cpp b/fdbserver/RemoteIKeyValueStore.actor.cpp index 63471825e8..cdb18c4af4 100644 --- a/fdbserver/RemoteIKeyValueStore.actor.cpp +++ b/fdbserver/RemoteIKeyValueStore.actor.cpp @@ -56,7 +56,9 @@ struct AfterReturn { ACTOR void sendCommitReply(IKVSCommitRequest commitReq, IKeyValueStore* kvStore, Future onClosed) { try { choose { - when(wait(onClosed)) { commitReq.reply.sendError(remote_kvs_cancelled()); } + when(wait(onClosed)) { + commitReq.reply.sendError(remote_kvs_cancelled()); + } when(wait(kvStore->commit(commitReq.sequential))) { StorageBytes storageBytes = kvStore->getStorageBytes(); commitReq.reply.send(IKVSCommitReply(storageBytes)); @@ -102,8 +104,12 @@ ACTOR Future runIKVS(OpenKVStoreRequest openReq, IKVSInterface ikvsInterfa when(IKVSGetValueRequest getReq = waitNext(ikvsInterface.getValue.getFuture())) { actors.add(cancellableForwardPromise(getReq.reply, kvStore->readValue(getReq.key, getReq.options))); } - when(IKVSSetRequest req = waitNext(ikvsInterface.set.getFuture())) { kvStore->set(req.keyValue); } - when(IKVSClearRequest req = waitNext(ikvsInterface.clear.getFuture())) { kvStore->clear(req.range); } + when(IKVSSetRequest req = waitNext(ikvsInterface.set.getFuture())) { + kvStore->set(req.keyValue); + } + when(IKVSClearRequest req = waitNext(ikvsInterface.clear.getFuture())) { + kvStore->clear(req.range); + } when(IKVSCommitRequest commitReq = waitNext(ikvsInterface.commit.getFuture())) { sendCommitReply(commitReq, kvStore, onClosed.getFuture()); } diff --git a/fdbserver/Resolver.actor.cpp b/fdbserver/Resolver.actor.cpp index 54087f5c3b..603afc28b0 100644 --- a/fdbserver/Resolver.actor.cpp +++ b/fdbserver/Resolver.actor.cpp @@ -251,7 +251,9 @@ ACTOR Future resolveBatch(Reference self, } choose { - when(wait(self->version.whenAtLeast(req.prevVersion))) { break; } + when(wait(self->version.whenAtLeast(req.prevVersion))) { + break; + } when(wait(self->checkNeededVersion.onTrigger())) {} } } @@ -750,7 +752,9 @@ ACTOR Future resolver(ResolverInterface resolver, try { state Future core = resolverCore(resolver, initReq, db); loop choose { - when(wait(core)) { return Void(); } + when(wait(core)) { + return Void(); + } when(wait(checkRemoved(db, initReq.recoveryCount, resolver))) {} } } catch (Error& e) { diff --git a/fdbserver/RestoreApplier.actor.cpp b/fdbserver/RestoreApplier.actor.cpp index afa1487cd1..1f6f66870b 100644 --- a/fdbserver/RestoreApplier.actor.cpp +++ b/fdbserver/RestoreApplier.actor.cpp @@ -284,12 +284,10 @@ ACTOR static Future getAndComputeStagingKeys( .detail("GetKeys", incompleteStagingKeys.size()) .detail("DelayTime", delayTime); ASSERT(!g_network->isSimulated()); - int i = 0; for (auto& key : incompleteStagingKeys) { MutationRef m(MutationRef::SetValue, key.first, "0"_sr); key.second->second.add(m, LogMessageVersion(1)); key.second->second.precomputeResult("GetAndComputeStagingKeys", applierID, batchIndex); - i++; } return Void(); } diff --git a/fdbserver/RestoreLoader.actor.cpp b/fdbserver/RestoreLoader.actor.cpp index c02d91eca1..2b857dff79 100644 --- a/fdbserver/RestoreLoader.actor.cpp +++ b/fdbserver/RestoreLoader.actor.cpp @@ -303,7 +303,9 @@ ACTOR Future restoreLoaderCore(RestoreLoaderInterface loaderInterf, TraceEvent("FastRestoreLoaderCoreExitRole", self->id()); break; } - when(wait(error)) { TraceEvent("FastRestoreLoaderActorCollectionError", self->id()); } + when(wait(error)) { + TraceEvent("FastRestoreLoaderActorCollectionError", self->id()); + } } } catch (Error& e) { bool isError = e.code() != error_code_operation_cancelled; // == error_code_broken_promise diff --git a/fdbserver/RocksDBLogForwarder.actor.cpp b/fdbserver/RocksDBLogForwarder.actor.cpp index 53304f6362..8bcb51356b 100644 --- a/fdbserver/RocksDBLogForwarder.actor.cpp +++ b/fdbserver/RocksDBLogForwarder.actor.cpp @@ -72,7 +72,9 @@ void logTraceEvent(const RocksDBLogRecord& record) { ACTOR Future rocksDBPeriodicallyLogger(RocksDBLogger* pRecords) { loop choose { - when(wait(delay(0.1))) { pRecords->consume(); } + when(wait(delay(0.1))) { + pRecords->consume(); + } } } diff --git a/fdbserver/SimKmsConnector.actor.cpp b/fdbserver/SimKmsConnector.actor.cpp index 9e7c40bf27..6eefdd2fca 100644 --- a/fdbserver/SimKmsConnector.actor.cpp +++ b/fdbserver/SimKmsConnector.actor.cpp @@ -327,8 +327,12 @@ TEST_CASE("fdbserver/SimKmsConnector") { state SimKmsConnector connector("SimKmsConnector"); loop choose { - when(wait(connector.connectorCore(inf))) { throw internal_error(); } - when(wait(testRunWorkload(inf, maxEncryptKeys))) { break; } + when(wait(connector.connectorCore(inf))) { + throw internal_error(); + } + when(wait(testRunWorkload(inf, maxEncryptKeys))) { + break; + } } return Void(); } \ No newline at end of file diff --git a/fdbserver/SimulatedCluster.actor.cpp b/fdbserver/SimulatedCluster.actor.cpp index 0ceb78b9d8..59a0afdf13 100644 --- a/fdbserver/SimulatedCluster.actor.cpp +++ b/fdbserver/SimulatedCluster.actor.cpp @@ -2350,7 +2350,7 @@ void setupSimulatedSystem(std::vector>* systemActors, .detail("ConfigString", startingConfigString); bool requiresExtraDBMachines = !g_simulator->extraDatabases.empty() && !useLocalDatabase; - int assignedMachines = 0, nonVersatileMachines = 0; + int assignedMachines = 0; bool gradualMigrationPossible = true; std::vector processClassesSubSet = { ProcessClass::UnsetClass, ProcessClass::StatelessClass }; @@ -2404,10 +2404,7 @@ void setupSimulatedSystem(std::vector>* systemActors, else processClass = ProcessClass((ProcessClass::ClassType)deterministicRandom()->randomInt(0, 3), ProcessClass::CommandLineSource); // Unset, Storage, or Transaction - if (processClass == - ProcessClass::StatelessClass) { // *can't* be assigned to other roles, even in an emergency - nonVersatileMachines++; - } + if (processClass == ProcessClass::UnsetClass || processClass == ProcessClass::StorageClass) { possible_ss++; } @@ -2419,11 +2416,9 @@ void setupSimulatedSystem(std::vector>* systemActors, if (machine >= machines) { if (storageCacheMachines > 0 && dc == 0) { processClass = ProcessClass(ProcessClass::StorageCacheClass, ProcessClass::CommandLineSource); - nonVersatileMachines++; storageCacheMachines--; } else if (blobWorkerMachines > 0) { // add blob workers to every DC processClass = ProcessClass(ProcessClass::BlobWorkerClass, ProcessClass::CommandLineSource); - nonVersatileMachines++; blobWorkerMachines--; } } diff --git a/fdbserver/SkipList.cpp b/fdbserver/SkipList.cpp index a7d255f68b..9f32329931 100644 --- a/fdbserver/SkipList.cpp +++ b/fdbserver/SkipList.cpp @@ -295,7 +295,7 @@ private: FastAllocator<128>::release(this); INSTRUMENT_RELEASE("SkipListNode128"); } else { - delete[](char*) this; + delete[] (char*)this; INSTRUMENT_RELEASE("SkipListNodeLarge"); } } diff --git a/fdbserver/StorageCache.actor.cpp b/fdbserver/StorageCache.actor.cpp index 15927a217a..1b574dab88 100644 --- a/fdbserver/StorageCache.actor.cpp +++ b/fdbserver/StorageCache.actor.cpp @@ -456,7 +456,9 @@ ACTOR Future waitForVersionNoTooOld(StorageCacheData* data, Version ver if (version <= data->version.get()) return version; choose { - when(wait(data->version.whenAtLeast(version))) { return version; } + when(wait(data->version.whenAtLeast(version))) { + return version; + } when(wait(delay(SERVER_KNOBS->FUTURE_VERSION_DELAY))) { if (deterministicRandom()->random01() < 0.001) TraceEvent(SevWarn, "CacheServerFutureVersion1000x", data->thisServerID) @@ -1848,7 +1850,9 @@ ACTOR Future pullAsyncData(StorageCacheData* data) { loop { loop choose { - when(wait(cursor ? cursor->getMore(TaskPriority::TLogCommit) : Never())) { break; } + when(wait(cursor ? cursor->getMore(TaskPriority::TLogCommit) : Never())) { + break; + } when(wait(dbInfoChange)) { dbInfoChange = data->db->onChange(); if (data->db->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS) { @@ -2250,13 +2254,21 @@ ACTOR Future storageCacheServer(StorageServerInterface ssi, // actors.add(self->readGuard(req , getValueQ)); actors.add(getValueQ(&self, req)); } - when(WatchValueRequest req = waitNext(ssi.watchValue.getFuture())) { ASSERT(false); } - when(GetKeyRequest req = waitNext(ssi.getKey.getFuture())) { actors.add(getKey(&self, req)); } + when(WatchValueRequest req = waitNext(ssi.watchValue.getFuture())) { + ASSERT(false); + } + when(GetKeyRequest req = waitNext(ssi.getKey.getFuture())) { + actors.add(getKey(&self, req)); + } when(GetKeyValuesRequest req = waitNext(ssi.getKeyValues.getFuture())) { actors.add(getKeyValues(&self, req)); } - when(GetShardStateRequest req = waitNext(ssi.getShardState.getFuture())) { ASSERT(false); } - when(StorageQueuingMetricsRequest req = waitNext(ssi.getQueuingMetrics.getFuture())) { ASSERT(false); } + when(GetShardStateRequest req = waitNext(ssi.getShardState.getFuture())) { + ASSERT(false); + } + when(StorageQueuingMetricsRequest req = waitNext(ssi.getQueuingMetrics.getFuture())) { + ASSERT(false); + } // when( ReplyPromise reply = waitNext(ssi.getVersion.getFuture()) ) { // ASSERT(false); //} @@ -2264,21 +2276,39 @@ ACTOR Future storageCacheServer(StorageServerInterface ssi, ASSERT(false); } - when(GetMappedKeyValuesRequest req = waitNext(ssi.getMappedKeyValues.getFuture())) { ASSERT(false); } - when(WaitMetricsRequest req = waitNext(ssi.waitMetrics.getFuture())) { ASSERT(false); } - when(SplitMetricsRequest req = waitNext(ssi.splitMetrics.getFuture())) { ASSERT(false); } - when(GetStorageMetricsRequest req = waitNext(ssi.getStorageMetrics.getFuture())) { ASSERT(false); } - when(ReadHotSubRangeRequest req = waitNext(ssi.getReadHotRanges.getFuture())) { ASSERT(false); } - when(SplitRangeRequest req = waitNext(ssi.getRangeSplitPoints.getFuture())) { ASSERT(false); } - when(GetKeyValuesStreamRequest req = waitNext(ssi.getKeyValuesStream.getFuture())) { ASSERT(false); } - when(ChangeFeedStreamRequest req = waitNext(ssi.changeFeedStream.getFuture())) { ASSERT(false); } + when(GetMappedKeyValuesRequest req = waitNext(ssi.getMappedKeyValues.getFuture())) { + ASSERT(false); + } + when(WaitMetricsRequest req = waitNext(ssi.waitMetrics.getFuture())) { + ASSERT(false); + } + when(SplitMetricsRequest req = waitNext(ssi.splitMetrics.getFuture())) { + ASSERT(false); + } + when(GetStorageMetricsRequest req = waitNext(ssi.getStorageMetrics.getFuture())) { + ASSERT(false); + } + when(ReadHotSubRangeRequest req = waitNext(ssi.getReadHotRanges.getFuture())) { + ASSERT(false); + } + when(SplitRangeRequest req = waitNext(ssi.getRangeSplitPoints.getFuture())) { + ASSERT(false); + } + when(GetKeyValuesStreamRequest req = waitNext(ssi.getKeyValuesStream.getFuture())) { + ASSERT(false); + } + when(ChangeFeedStreamRequest req = waitNext(ssi.changeFeedStream.getFuture())) { + ASSERT(false); + } when(OverlappingChangeFeedsRequest req = waitNext(ssi.overlappingChangeFeeds.getFuture())) { // Simulate endpoint not found so that the requester will try another endpoint // This is a workaround to the fact that storage servers do not have an easy way to enforce this // request goes only to other storage servers, and in simulation we manage to trigger this behavior req.reply.sendError(broken_promise()); } - when(ChangeFeedPopRequest req = waitNext(ssi.changeFeedPop.getFuture())) { ASSERT(false); } + when(ChangeFeedPopRequest req = waitNext(ssi.changeFeedPop.getFuture())) { + ASSERT(false); + } when(ChangeFeedVersionUpdateRequest req = waitNext(ssi.changeFeedVersionUpdate.getFuture())) { ASSERT(false); } diff --git a/fdbserver/TCInfo.actor.cpp b/fdbserver/TCInfo.actor.cpp index 98f5e81f21..0e6397a5db 100644 --- a/fdbserver/TCInfo.actor.cpp +++ b/fdbserver/TCInfo.actor.cpp @@ -51,7 +51,9 @@ public: interfaceChanged = server->onInterfaceChanged; resetRequest = Void(); } - when(wait(serverRemoved)) { return Void(); } + when(wait(serverRemoved)) { + return Void(); + } when(wait(resetRequest)) { // To prevent a tight spin loop if (IFailureMonitor::failureMonitor().getState(ssi.getStorageMetrics.getEndpoint()).isFailed()) { resetRequest = IFailureMonitor::failureMonitor().onStateEqual( diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index 3f301c3c7c..07a7ff548e 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -1445,7 +1445,9 @@ ACTOR Future updateStorage(TLogData* self) { ACTOR Future updateStorageLoop(TLogData* self) { wait(delay(0, TaskPriority::UpdateStorage)); - loop { wait(updateStorage(self)); } + loop { + wait(updateStorage(self)); + } } void commitMessages(TLogData* self, @@ -1606,7 +1608,9 @@ ACTOR Future waitForMessagesForTag(Reference self, Tag reqTag, Ve // we want the caller to finish first, otherwise the data structure it is building might not be complete wait(delay(0.0)); } - when(wait(delay(timeout))) { self->blockingPeekTimeouts += 1; } + when(wait(delay(timeout))) { + self->blockingPeekTimeouts += 1; + } } return Void(); } @@ -2795,7 +2799,9 @@ ACTOR Future pullAsyncData(TLogData* self, while (!endVersion.present() || logData->version.get() < endVersion.get()) { loop { choose { - when(wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) { break; } + when(wait(r ? r->getMore(TaskPriority::TLogCommit) : Never())) { + break; + } when(wait(dbInfoChange)) { if (logData->logSystem->get()) { r = logData->logSystem->get()->peek(logData->logId, tagAt, endVersion, tags, true); @@ -3276,7 +3282,9 @@ ACTOR Future restorePersistentState(TLogData* self, choose { when(wait(updateStorage(self))) {} - when(wait(allRemoved)) { throw worker_removed(); } + when(wait(allRemoved)) { + throw worker_removed(); + } } } } else { @@ -3287,7 +3295,9 @@ ACTOR Future restorePersistentState(TLogData* self, } } } - when(wait(allRemoved)) { throw worker_removed(); } + when(wait(allRemoved)) { + throw worker_removed(); + } } } } catch (Error& e) { @@ -3636,7 +3646,9 @@ ACTOR Future tLog(IKeyValueStore* persistentData, forwardPromise(req.reply, self.tlogCache.get(req.recruitmentID)); } } - when(wait(error)) { throw internal_error(); } + when(wait(error)) { + throw internal_error(); + } when(wait(activeSharedChange)) { if (activeSharedTLog->get() == tlogId) { TraceEvent("SharedTLogNowActive", self.dbgid).detail("NowActive", activeSharedTLog->get()); diff --git a/fdbserver/VFSAsync.cpp b/fdbserver/VFSAsync.cpp index 36aa65588e..d5af31c297 100644 --- a/fdbserver/VFSAsync.cpp +++ b/fdbserver/VFSAsync.cpp @@ -168,7 +168,7 @@ static int asyncReadZeroCopy(sqlite3_file* pFile, void** data, int iAmt, sqlite_ } static int asyncReleaseZeroCopy(sqlite3_file* pFile, void* data, int iAmt, sqlite_int64 iOfst) { // printf("-asyncReleaseRef %p +%lld %d <= %p\n", pFile, iOfst, iAmt, data); - delete[](char*) data; + delete[] (char*)data; return SQLITE_OK; } #endif @@ -299,7 +299,7 @@ struct SharedMemoryInfo { // for a file } void cleanup() { for (int i = 0; i < regions.size(); i++) - delete[](uint8_t*) regions[i]; + delete[] (uint8_t*)regions[i]; table.erase(filename); } diff --git a/fdbserver/VersionedBTree.actor.cpp b/fdbserver/VersionedBTree.actor.cpp index c8fe984a86..7b8fdb3b0d 100644 --- a/fdbserver/VersionedBTree.actor.cpp +++ b/fdbserver/VersionedBTree.actor.cpp @@ -2264,7 +2264,9 @@ public: self->remappedPages[r.originalPageID][r.version] = r.newPageID; } } - when(wait(remapRecoverActor)) { remapRecoverActor = Never(); } + when(wait(remapRecoverActor)) { + remapRecoverActor = Never(); + } } } catch (Error& e) { if (e.code() != error_code_end_of_stream) { @@ -6722,7 +6724,7 @@ private: debug_print(addPrefix(context, update->toString())); if (REDWOOD_DEBUG) { - int c = 0; + [[maybe_unused]] int c = 0; auto i = mBegin; while (1) { debug_printf("%s Mutation %4d '%s': %s\n", @@ -10671,7 +10673,9 @@ TEST_CASE(":/redwood/performance/extentQueue") { if (entriesRead == m_extentQueue.numEntries) break; } - when(wait(queueRecoverActor)) { queueRecoverActor = Never(); } + when(wait(queueRecoverActor)) { + queueRecoverActor = Never(); + } } } catch (Error& e) { if (e.code() != error_code_end_of_stream) { diff --git a/fdbserver/include/fdbserver/ClusterRecovery.actor.h b/fdbserver/include/fdbserver/ClusterRecovery.actor.h index f8c5502abe..b58cfb1ffe 100644 --- a/fdbserver/include/fdbserver/ClusterRecovery.actor.h +++ b/fdbserver/include/fdbserver/ClusterRecovery.actor.h @@ -272,8 +272,8 @@ struct ClusterRecoveryData : NonCopyable, ReferenceCounted masterInterface(masterInterface), masterLifetime(masterLifetimeToken), clusterController(clusterController), cstate(coordinators, addActor, dbgid), dbInfo(dbInfo), registrationCount(0), addActor(addActor), recruitmentStalled(makeReference>(false)), forceRecovery(forceRecovery), neverCreated(false), - safeLocality(tagLocalityInvalid), primaryLocality(tagLocalityInvalid), cc("Master", dbgid.toString()), - changeCoordinatorsRequests("ChangeCoordinatorsRequests", cc), + safeLocality(tagLocalityInvalid), primaryLocality(tagLocalityInvalid), + cc("ClusterRecoveryData", dbgid.toString()), changeCoordinatorsRequests("ChangeCoordinatorsRequests", cc), getCommitVersionRequests("GetCommitVersionRequests", cc), backupWorkerDoneRequests("BackupWorkerDoneRequests", cc), getLiveCommittedVersionRequests("GetLiveCommittedVersionRequests", cc), diff --git a/fdbserver/include/fdbserver/MetricClient.h b/fdbserver/include/fdbserver/MetricClient.h new file mode 100644 index 0000000000..cf628e56c0 --- /dev/null +++ b/fdbserver/include/fdbserver/MetricClient.h @@ -0,0 +1,51 @@ +/* + * MetricClient.h + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2022 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#include "flow/TDMetric.actor.h" +#include "flow/Msgpack.h" +#include "flow/network.h" +#ifndef METRIC_CLIENT_H +#define METRIC_CLIENT_H +class IMetricClient { +protected: + MetricsDataModel model; + +public: + virtual void send(MetricCollection*) = 0; + virtual ~IMetricClient() {} +}; + +class UDPMetricClient : public IMetricClient { +private: + // Since we can't quickly determine the exact packet size for OTELSum in msgpack + // we play on the side of caution and make our maximum 3/4 of the official one + static constexpr uint32_t MAX_OTELSUM_PACKET_SIZE = 0.75 * IUDPSocket::MAX_PACKET_SIZE; + MetricsDataModel model; + Future> socket; + int socket_fd; + MsgpackBuffer buf; + std::string address; + int port; + void send_packet(int fd, const void* data, size_t len); + +public: + UDPMetricClient(); + void send(MetricCollection*) override; +}; +#endif \ No newline at end of file diff --git a/fdbserver/include/fdbserver/MetricLogger.actor.h b/fdbserver/include/fdbserver/MetricLogger.actor.h index 3dcd943db9..b5fac0e922 100644 --- a/fdbserver/include/fdbserver/MetricLogger.actor.h +++ b/fdbserver/include/fdbserver/MetricLogger.actor.h @@ -30,6 +30,7 @@ #include "flow/actorcompiler.h" // This must be the last #include ACTOR Future runMetrics(Future fcx, Key metricsPrefix); +ACTOR Future runMetrics(); #include "flow/unactorcompiler.h" #endif diff --git a/fdbserver/include/fdbserver/RestoreUtil.h b/fdbserver/include/fdbserver/RestoreUtil.h index 6993a07e83..ed3db0aae4 100644 --- a/fdbserver/include/fdbserver/RestoreUtil.h +++ b/fdbserver/include/fdbserver/RestoreUtil.h @@ -38,10 +38,10 @@ #include #define SevFRMutationInfo SevVerbose -//#define SevFRMutationInfo SevInfo +// #define SevFRMutationInfo SevInfo #define SevFRDebugInfo SevVerbose -//#define SevFRDebugInfo SevInfo +// #define SevFRDebugInfo SevInfo struct VersionedMutation { MutationRef mutation; diff --git a/fdbserver/include/fdbserver/WorkerInterface.actor.h b/fdbserver/include/fdbserver/WorkerInterface.actor.h index f3af110042..2b187d9c4d 100644 --- a/fdbserver/include/fdbserver/WorkerInterface.actor.h +++ b/fdbserver/include/fdbserver/WorkerInterface.actor.h @@ -1292,7 +1292,9 @@ Future ioTimeoutError(Future what, double time, const char* context = null } Future end = lowPriorityDelay(time); choose { - when(T t = wait(what)) { return t; } + when(T t = wait(what)) { + return t; + } when(wait(end)) { Error err = io_timeout(); if (g_network->isSimulated() && !g_simulator->getCurrentProcess()->isReliable()) { @@ -1326,7 +1328,9 @@ Future ioDegradedOrTimeoutError(Future what, if (degradedTime < errTime) { Future degradedEnd = lowPriorityDelay(degradedTime); choose { - when(T t = wait(what)) { return t; } + when(T t = wait(what)) { + return t; + } when(wait(degradedEnd)) { CODE_PROBE(true, "TLog degraded", probe::func::deduplicate); TraceEvent(SevWarnAlways, "IoDegraded").log(); @@ -1337,7 +1341,9 @@ Future ioDegradedOrTimeoutError(Future what, Future end = lowPriorityDelay(errTime - degradedTime); choose { - when(T t = wait(what)) { return t; } + when(T t = wait(what)) { + return t; + } when(wait(end)) { Error err = io_timeout(); if (g_network->isSimulated() && !g_simulator->getCurrentProcess()->isReliable()) { diff --git a/fdbserver/include/fdbserver/art.h b/fdbserver/include/fdbserver/art.h index b318ac2547..27dbbbcd50 100644 --- a/fdbserver/include/fdbserver/art.h +++ b/fdbserver/include/fdbserver/art.h @@ -52,7 +52,7 @@ struct art_tree { #define ART_PREV -1 #define ART_NEITHER 0 - //#define ART_IS_LEAF(x) ( (*((ART_NODE_TYPE*)x) == ART_LEAF)) + // #define ART_IS_LEAF(x) ( (*((ART_NODE_TYPE*)x) == ART_LEAF)) template static inline bool ART_IS_LEAF(T const& x) { return *((ART_NODE_TYPE*)x) == ART_LEAF; diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index 84e802bf16..264914f1b8 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -1993,7 +1993,9 @@ ACTOR Future waitForVersionNoTooOld(StorageServer* data, Version versio if (version <= data->version.get()) return version; choose { - when(wait(data->version.whenAtLeast(version))) { return version; } + when(wait(data->version.whenAtLeast(version))) { + return version; + } when(wait(delay(SERVER_KNOBS->FUTURE_VERSION_DELAY))) { if (deterministicRandom()->random01() < 0.001) TraceEvent(SevWarn, "ShardServerFutureVersion1000x", data->thisServerID) @@ -2034,7 +2036,6 @@ std::vector StorageServer::getStorageServerShards(KeyRangeRe ACTOR Future getValueQ(StorageServer* data, GetValueRequest req) { state int64_t resultSize = 0; Span span("SS:getValue"_loc, req.spanContext); - span.addAttribute("key"_sr, req.key); // Temporarily disabled -- this path is hit a lot // getCurrentLineage()->modify(&TransactionLineage::txID) = req.spanContext.first(); @@ -4875,13 +4876,9 @@ ACTOR Future mapKeyValues(StorageServer* data, // keep index for boundary index entries, so that caller can use it as a continuation. result.data[0].key = input.data[0].key; result.data[0].value = input.data[0].value; - result.data[0].boundaryAndExist = getMappedKeyValueSize(kvms[0]) > 0; result.data.back().key = input.data[resultSize - 1].key; result.data.back().value = input.data[resultSize - 1].value; - // index needs to be -1 - int index = (resultSize - 1) % SERVER_KNOBS->MAX_PARALLEL_QUICK_GET_VALUE; - result.data.back().boundaryAndExist = getMappedKeyValueSize(kvms[index]) > 0; } result.more = input.more || resultSize < sz; if (pOriginalReq->options.present() && pOriginalReq->options.get().debugID.present()) @@ -6367,7 +6364,9 @@ ACTOR Future fetchChangeFeedApplier(StorageServer* data, when(wait(changeFeedInfo->fetchLock.take())) { feedFetchReleaser = FlowLock::Releaser(changeFeedInfo->fetchLock); } - when(wait(changeFeedInfo->durableFetchVersion.whenAtLeast(endVersion))) { return invalidVersion; } + when(wait(changeFeedInfo->durableFetchVersion.whenAtLeast(endVersion))) { + return invalidVersion; + } } state Version startVersion = beginVersion; @@ -8391,6 +8390,8 @@ private: // Because of data moves, we can get mutations operating on a change feed we don't yet know about, because // the metadata fetch hasn't started yet bool createdFeed = false; + bool popMutationLog = false; + bool addMutationToLog = false; if (feed == data->uidChangeFeed.end() && status != ChangeFeedStatus::CHANGE_FEED_DESTROY) { createdFeed = true; @@ -8440,6 +8441,7 @@ private: CODE_PROBE(true, "private mutation for feed scheduled for deletion! Un-mark it as removing"); feed->second->removing = false; + addMutationToLog = true; // reset fetch versions because everything previously fetched was cleaned up feed->second->fetchVersion = invalidVersion; feed->second->durableFetchVersion = NotifiedVersion(); @@ -8448,8 +8450,6 @@ private: feed->second->updateMetadataVersion(currentVersion); } - bool popMutationLog = false; - bool addMutationToLog = false; if (popVersion != invalidVersion && status != ChangeFeedStatus::CHANGE_FEED_DESTROY) { // pop the change feed at pop version, no matter what state it is in if (popVersion - 1 > feed->second->emptyVersion) { @@ -9020,7 +9020,6 @@ ACTOR Future update(StorageServer* data, bool* pReceivedUpdate) { } Span span("SS:update"_loc, spanContext); - span.addAttribute("key"_sr, msg.param1); // Drop non-private mutations if TSS fault injection is enabled in simulation, or if this is a TSS in // quarantine. @@ -10355,7 +10354,9 @@ ACTOR Future waitMetrics(StorageServerMetrics* self, WaitMetricsRequest re }*/ } - when(wait(timeout)) { timedout = true; } + when(wait(timeout)) { + timedout = true; + } } } catch (Error& e) { if (e.code() == error_code_actor_cancelled) @@ -11475,4 +11476,4 @@ void versionedMapTest() { printf("PTree node is %d bytes, allocated as %d bytes\n", NSIZE, ASIZE); printf("%d distinct after %d insertions\n", count, 1000 * 1000); printf("Memory used: %f MB\n", (after - before) / 1e6); -} \ No newline at end of file +} diff --git a/fdbserver/tester.actor.cpp b/fdbserver/tester.actor.cpp index f20135aa9e..ad4a56cdb2 100644 --- a/fdbserver/tester.actor.cpp +++ b/fdbserver/tester.actor.cpp @@ -2103,7 +2103,9 @@ ACTOR Future runTests(Reference connRecord, } choose { - when(wait(tests)) { return Void(); } + when(wait(tests)) { + return Void(); + } when(wait(quorum(actors, 1))) { ASSERT(false); throw internal_error(); diff --git a/fdbserver/worker.actor.cpp b/fdbserver/worker.actor.cpp index ed92b0b0cd..b79d15de6a 100644 --- a/fdbserver/worker.actor.cpp +++ b/fdbserver/worker.actor.cpp @@ -675,18 +675,42 @@ ACTOR Future registrationClient( TraceEvent(SevWarn, "WorkerRegisterTimeout").detail("WaitTime", now() - startTime); } } - when(wait(ccInterface->onChange())) { break; } - when(wait(ddInterf->onChange())) { break; } - when(wait(rkInterf->onChange())) { break; } - when(wait(csInterf->onChange())) { break; } - when(wait(bmInterf->onChange())) { break; } - when(wait(blobMigratorInterf->onChange())) { break; } - when(wait(ekpInterf->onChange())) { break; } - when(wait(degraded->onChange())) { break; } - when(wait(FlowTransport::transport().onIncompatibleChanged())) { break; } - when(wait(issues->onChange())) { break; } - when(wait(recovered)) { break; } - when(wait(clusterId->onChange())) { break; } + when(wait(ccInterface->onChange())) { + break; + } + when(wait(ddInterf->onChange())) { + break; + } + when(wait(rkInterf->onChange())) { + break; + } + when(wait(csInterf->onChange())) { + break; + } + when(wait(bmInterf->onChange())) { + break; + } + when(wait(blobMigratorInterf->onChange())) { + break; + } + when(wait(ekpInterf->onChange())) { + break; + } + when(wait(degraded->onChange())) { + break; + } + when(wait(FlowTransport::transport().onIncompatibleChanged())) { + break; + } + when(wait(issues->onChange())) { + break; + } + when(wait(recovered)) { + break; + } + when(wait(clusterId->onChange())) { + break; + } } } } @@ -1786,6 +1810,8 @@ ACTOR Future workerServer(Reference connRecord, metricsLogger = runMetrics(database, KeyRef(metricsPrefix)); database->globalConfig->trigger(samplingFrequency, samplingProfilerUpdateFrequency); } + } else { + metricsLogger = runMetrics(); } errorForwarders.add(resetAfter(degraded, diff --git a/fdbserver/workloads/BlobGranuleCorrectnessWorkload.actor.cpp b/fdbserver/workloads/BlobGranuleCorrectnessWorkload.actor.cpp index 9bd5be82e7..92557bc983 100644 --- a/fdbserver/workloads/BlobGranuleCorrectnessWorkload.actor.cpp +++ b/fdbserver/workloads/BlobGranuleCorrectnessWorkload.actor.cpp @@ -670,8 +670,8 @@ struct BlobGranuleCorrectnessWorkload : TestWorkload { } else { int targetQueryBytes = (deterministicRandom()->randomInt(1, 20) * targetBytesReadPerQuery) / 10; int estimatedQueryBytes = 0; - for (int i = 0; estimatedQueryBytes < targetQueryBytes && endKeyIt != threadData->keyData.end(); - i++, endKeyIt++) { + for (; estimatedQueryBytes < targetQueryBytes && endKeyIt != threadData->keyData.end(); + endKeyIt++) { // iterate forward until end or target keys have passed estimatedQueryBytes += (1 + endKeyIt->second.writes.size() - endKeyIt->second.nextClearIdx) * threadData->targetValLength; diff --git a/fdbserver/workloads/IndexScan.actor.cpp b/fdbserver/workloads/IndexScan.actor.cpp index d5f8a57db4..8f44b48d08 100644 --- a/fdbserver/workloads/IndexScan.actor.cpp +++ b/fdbserver/workloads/IndexScan.actor.cpp @@ -91,7 +91,9 @@ struct IndexScanWorkload : KVWorkload { ACTOR static Future serialScans(Database cx, IndexScanWorkload* self) { state double start = now(); try { - loop { wait(scanDatabase(cx, self)); } + loop { + wait(scanDatabase(cx, self)); + } } catch (...) { self->totalTimeFetching = now() - start; throw; diff --git a/fdbserver/workloads/KVStoreTest.actor.cpp b/fdbserver/workloads/KVStoreTest.actor.cpp index 99bebbe983..44bc5255b3 100644 --- a/fdbserver/workloads/KVStoreTest.actor.cpp +++ b/fdbserver/workloads/KVStoreTest.actor.cpp @@ -404,7 +404,9 @@ ACTOR Future testKVStore(KVStoreTestWorkload* workload) { try { choose { when(wait(main)) {} - when(wait(test.store->getError())) { ASSERT(false); } + when(wait(test.store->getError())) { + ASSERT(false); + } } } catch (Error& e) { err = e; diff --git a/fdbserver/workloads/KillRegion.actor.cpp b/fdbserver/workloads/KillRegion.actor.cpp index 4a292d8470..b3dcc7ec00 100644 --- a/fdbserver/workloads/KillRegion.actor.cpp +++ b/fdbserver/workloads/KillRegion.actor.cpp @@ -120,7 +120,9 @@ struct KillRegionWorkload : TestWorkload { wait(success(ManagementAPI::changeConfig( cx.getReference(), g_simulator->disablePrimary + " repopulate_anti_quorum=1", true))); choose { - when(wait(waitForStorageRecovered(self))) { break; } + when(wait(waitForStorageRecovered(self))) { + break; + } when(wait(delay(300.0))) {} } } diff --git a/fdbserver/workloads/MiniCycle.actor.cpp b/fdbserver/workloads/MiniCycle.actor.cpp index a6cfe4a050..6b52520907 100644 --- a/fdbserver/workloads/MiniCycle.actor.cpp +++ b/fdbserver/workloads/MiniCycle.actor.cpp @@ -90,7 +90,9 @@ struct MiniCycleWorkload : TestWorkload { if (!ok) return false; } - when(wait(end)) { break; } + when(wait(end)) { + break; + } } } diff --git a/fdbserver/workloads/MockDDTrackerShardEvaluator.actor.cpp b/fdbserver/workloads/MockDDTrackerShardEvaluator.actor.cpp index 9d007e7419..b83da7f584 100644 --- a/fdbserver/workloads/MockDDTrackerShardEvaluator.actor.cpp +++ b/fdbserver/workloads/MockDDTrackerShardEvaluator.actor.cpp @@ -133,7 +133,9 @@ struct MockDDTrackerShardEvaluatorWorkload : public MockDDTestWorkload { ACTOR static Future relocateShardReporter(MockDDTrackerShardEvaluatorWorkload* self, FutureStream input) { loop choose { - when(RelocateShard rs = waitNext(input)) { ++self->rsReasonCounts[rs.reason]; } + when(RelocateShard rs = waitNext(input)) { + ++self->rsReasonCounts[rs.reason]; + } } } diff --git a/fdbserver/workloads/PrivateEndpoints.actor.cpp b/fdbserver/workloads/PrivateEndpoints.actor.cpp index 3de6deb0f8..efa7c7eaa9 100644 --- a/fdbserver/workloads/PrivateEndpoints.actor.cpp +++ b/fdbserver/workloads/PrivateEndpoints.actor.cpp @@ -126,7 +126,9 @@ struct PrivateEndpoints : TestWorkload { TraceEvent("PrivateEndpointTestDone").log(); return Void(); } - when(wait(testFuture)) { ++self->numSuccesses; } + when(wait(testFuture)) { + ++self->numSuccesses; + } } wait(delay(0.2)); } diff --git a/fdbserver/workloads/UDPWorkload.actor.cpp b/fdbserver/workloads/UDPWorkload.actor.cpp index 3692e5e697..80456a2d7e 100644 --- a/fdbserver/workloads/UDPWorkload.actor.cpp +++ b/fdbserver/workloads/UDPWorkload.actor.cpp @@ -183,7 +183,9 @@ struct UDPWorkload : TestWorkload { finished = delay(1.0); done = Never(); } - when(wait(finished)) { return Void(); } + when(wait(finished)) { + return Void(); + } } } } @@ -197,7 +199,9 @@ struct UDPWorkload : TestWorkload { loop { choose { when(wait(delay(0.1))) {} - when(wait(actors.getResult())) { UNSTOPPABLE_ASSERT(false); } + when(wait(actors.getResult())) { + UNSTOPPABLE_ASSERT(false); + } } if (!socket.get().isValid() || deterministicRandom()->random01() < 0.05) { peer = deterministicRandom()->randomChoice(*remotes); diff --git a/fdbserver/workloads/UnitTests.actor.cpp b/fdbserver/workloads/UnitTests.actor.cpp index 150bb115a1..54eed3a7ce 100644 --- a/fdbserver/workloads/UnitTests.actor.cpp +++ b/fdbserver/workloads/UnitTests.actor.cpp @@ -46,6 +46,7 @@ void forceLinkAtomicTests(); void forceLinkIdempotencyIdTests(); void forceLinkBlobConnectionProviderTests(); void forceLinkActorCollectionTests(); +void forceLinkDDSketchTests(); struct UnitTestWorkload : TestWorkload { static constexpr auto NAME = "UnitTests"; @@ -108,6 +109,7 @@ struct UnitTestWorkload : TestWorkload { forceLinkIdempotencyIdTests(); forceLinkBlobConnectionProviderTests(); forceLinkActorCollectionTests(); + forceLinkDDSketchTests(); } Future setup(Database const& cx) override { diff --git a/fdbserver/workloads/WatchAndWait.actor.cpp b/fdbserver/workloads/WatchAndWait.actor.cpp index bea4f6be23..9d27238991 100644 --- a/fdbserver/workloads/WatchAndWait.actor.cpp +++ b/fdbserver/workloads/WatchAndWait.actor.cpp @@ -88,7 +88,6 @@ struct WatchAndWaitWorkload : TestWorkload { ACTOR Future _start(Database cx, WatchAndWaitWorkload* self) { state std::vector> watches; - int watchCounter = 0; uint64_t endNode = (self->nodeCount * (self->clientId + 1)) / self->clientCount; uint64_t startNode = (self->nodeCount * self->clientId) / self->clientCount; uint64_t NodesPerWatch = self->nodeCount / self->watchCount; @@ -100,7 +99,6 @@ struct WatchAndWaitWorkload : TestWorkload { .detail("Npw", NodesPerWatch); for (uint64_t i = startNode; i < endNode; i += NodesPerWatch) { watches.push_back(self->watchAndWait(cx, self, i)); - watchCounter++; } wait(delay(self->testDuration)); // || waitForAll( watches ) TraceEvent("WatchAndWaitEnd").detail("Duration", self->testDuration); diff --git a/flow/ActorCollection.actor.cpp b/flow/ActorCollection.actor.cpp index 26e8452f24..38135cb19f 100644 --- a/flow/ActorCollection.actor.cpp +++ b/flow/ActorCollection.actor.cpp @@ -102,7 +102,9 @@ ACTOR Future actorCollection(FutureStream> addActor, // If we didn't return then the entire list wasn't destroyed so erase/destroy i runners.erase_and_dispose(i, [](Runner* r) { delete r; }); } - when(Error e = waitNext(errors.getFuture())) { throw e; } + when(Error e = waitNext(errors.getFuture())) { + throw e; + } } } @@ -130,7 +132,9 @@ void forceLinkActorCollectionTests() {} TEST_CASE("/flow/actorCollection/chooseWhen") { state Promise promise; choose { - when(wait(delay(0))) { promise.send(Void()); } + when(wait(delay(0))) { + promise.send(Void()); + } when(wait(promise.getFuture())) { // Should be cancelled, since another when clause in this choose block has executed ASSERT(false); diff --git a/flow/FastAlloc.cpp b/flow/FastAlloc.cpp index 7ceb539d6b..18f9579479 100644 --- a/flow/FastAlloc.cpp +++ b/flow/FastAlloc.cpp @@ -32,11 +32,11 @@ #include #include -//#ifdef WIN32 -//#include -//#undef min -//#undef max -//#endif +// #ifdef WIN32 +// #include +// #undef min +// #undef max +// #endif #ifdef __linux__ #include diff --git a/flow/FileTraceLogWriter.cpp b/flow/FileTraceLogWriter.cpp index 998f915f77..b9617cb00e 100644 --- a/flow/FileTraceLogWriter.cpp +++ b/flow/FileTraceLogWriter.cpp @@ -31,9 +31,9 @@ #define TRACEFILE_FLAGS O_WRONLY | O_CREAT | O_EXCL | O_CLOEXEC #define TRACEFILE_MODE 0664 #elif defined(_WIN32) -//#include -//#undef max -//#undef min +// #include +// #undef max +// #undef min #include #include #include diff --git a/flow/Hash3.c b/flow/Hash3.c index 103a205ea6..1b694d81bc 100644 --- a/flow/Hash3.c +++ b/flow/Hash3.c @@ -33,7 +33,7 @@ mixing with 12*3 instructions on 3 integers than you can with 3 instructions on 1 byte), but shoehorning those bytes into integers efficiently is messy. ------------------------------------------------------------------------------- */ -//#define SELF_TEST ENABLED(NOT_IN_CLEAN) +// #define SELF_TEST ENABLED(NOT_IN_CLEAN) #include /* defines printf for tests */ #include /* defines time_t for timings in the test */ diff --git a/flow/Knobs.cpp b/flow/Knobs.cpp index 80f6e233a8..eedeb689ba 100644 --- a/flow/Knobs.cpp +++ b/flow/Knobs.cpp @@ -84,11 +84,18 @@ void FlowKnobs::initialize(Randomize randomize, IsSimulated isSimulated) { init( WRITE_TRACING_ENABLED, true ); if( randomize && BUGGIFY ) WRITE_TRACING_ENABLED = false; - init( TRACING_SPAN_ATTRIBUTES_ENABLED, false ); // Additional K/V and tenant data added to Span Attributes init( TRACING_SAMPLE_RATE, 0.0 ); if (randomize && BUGGIFY) TRACING_SAMPLE_RATE = 0.01; // Fraction of distributed traces (not spans) to sample (0 means ignore all traces) init( TRACING_UDP_LISTENER_ADDR, "127.0.0.1" ); // Only applicable if TracerType is set to a network option init( TRACING_UDP_LISTENER_PORT, 8889 ); // Only applicable if TracerType is set to a network option + // Native metrics + init( METRICS_DATA_MODEL, "none"); if (randomize && BUGGIFY) METRICS_DATA_MODEL="otel"; + init( METRICS_EMISSION_INTERVAL, 30.0 ); // The time (in seconds) between metric flushes + init( STATSD_UDP_EMISSION_ADDR, "127.0.0.1"); + init( STATSD_UDP_EMISSION_PORT, 8125 ); + init( OTEL_UDP_EMISSION_ADDR, "127.0.0.1"); + init( OTEL_UDP_EMISSION_PORT, 8903 ); + //connectionMonitor init( CONNECTION_MONITOR_LOOP_TIME, isSimulated ? 0.75 : 1.0 ); if( randomize && BUGGIFY ) CONNECTION_MONITOR_LOOP_TIME = 6.0; init( CONNECTION_MONITOR_TIMEOUT, isSimulated ? 1.50 : 2.0 ); if( randomize && BUGGIFY ) CONNECTION_MONITOR_TIMEOUT = 6.0; diff --git a/flow/MkCertCli.cpp b/flow/MkCertCli.cpp index dd9589ed1b..6ce39225db 100644 --- a/flow/MkCertCli.cpp +++ b/flow/MkCertCli.cpp @@ -75,7 +75,7 @@ CSimpleOpt::SOption gOptions[] = { { OPT_HELP, "--help", SO_NONE }, SO_END_OF_OPTIONS }; template -void printOptionUsage(std::string_view option, const char*(&&optionDescLines)[Len]) { +void printOptionUsage(std::string_view option, const char* (&&optionDescLines)[Len]) { constexpr std::string_view optionIndent{ " " }; constexpr std::string_view descIndent{ " " }; fmt::print(stdout, "{}{}\n", optionIndent, option); diff --git a/flow/Net2.actor.cpp b/flow/Net2.actor.cpp index 575250421f..72bc756fa1 100644 --- a/flow/Net2.actor.cpp +++ b/flow/Net2.actor.cpp @@ -232,6 +232,7 @@ public: double taskBegin; TaskPriority currentTaskID; TDMetricCollection tdmetrics; + MetricCollection metrics; ChaosMetrics chaosMetrics; // we read now() from a different thread. On Intel, reading a double is atomic anyways, but on other platforms it's // not. For portability this should be atomic @@ -933,8 +934,12 @@ public: doAcceptHandshake(self, connected); try { choose { - when(wait(connected.getFuture())) { return Void(); } - when(wait(delay(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT))) { throw connection_failed(); } + when(wait(connected.getFuture())) { + return Void(); + } + when(wait(delay(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT))) { + throw connection_failed(); + } } } catch (Error& e) { if (e.code() != error_code_actor_cancelled) { @@ -993,8 +998,12 @@ public: doConnectHandshake(self, connected); try { choose { - when(wait(connected.getFuture())) { return Void(); } - when(wait(delay(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT))) { throw connection_failed(); } + when(wait(connected.getFuture())) { + return Void(); + } + when(wait(delay(FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT))) { + throw connection_failed(); + } } } catch (Error& e) { // Either the connection failed, or was cancelled by the caller @@ -1219,6 +1228,7 @@ Net2::Net2(const TLSConfig& tlsConfig, bool useThreadPool, bool useMetrics) if (FLOW_KNOBS->ENABLE_CHAOS_FEATURES) { setGlobal(INetwork::enChaosMetrics, (flowGlobalType)&chaosMetrics); } + setGlobal(INetwork::enMetrics, (flowGlobalType)&metrics); setGlobal(INetwork::enNetworkConnections, (flowGlobalType)network); setGlobal(INetwork::enASIOService, (flowGlobalType)&reactor.ios); setGlobal(INetwork::enBlobCredentialFiles, &blobCredentialFiles); diff --git a/flow/TDMetric.cpp b/flow/TDMetric.cpp index 5bbaca470b..2e6b80e35b 100644 --- a/flow/TDMetric.cpp +++ b/flow/TDMetric.cpp @@ -18,8 +18,13 @@ * limitations under the License. */ +#include "flow/Error.h" +#include "flow/OTELMetrics.h" #include "flow/TDMetric.actor.h" #include "flow/flow.h" +#include +#include +#include const StringRef BaseEventMetric::metricType = "Event"_sr; template <> @@ -193,12 +198,12 @@ uint64_t DynamicEventMetric::log(uint64_t explicitTime) { return t; } -void DynamicEventMetric::flushData(MetricKeyRef const& mk, uint64_t rollTime, MetricUpdateBatch& batch) { +void DynamicEventMetric::flushData(MetricKeyRef const& mk, uint64_t rollTime, MetricBatch& batch) { time.flushField(mk, rollTime, batch); for (auto& [name, field] : fields) field->flushField(mk, rollTime, batch); if (!latestRecorded) { - batch.updates.emplace_back(mk.packLatestKey(), StringRef()); + batch.scope.updates.emplace_back(mk.packLatestKey(), StringRef()); latestRecorded = true; } } @@ -229,3 +234,172 @@ std::string MetricData::toString() const { rollTime, writer.getLength()); } + +std::string createStatsdMessage(const std::string& name, StatsDMetric type, const std::string& val) { + return createStatsdMessage(name, type, val, {}); +} + +std::string createStatsdMessage(const std::string& name, + StatsDMetric type, + const std::string& val, + const std::vector>& tags) { + ASSERT(!name.empty()); + std::string msg = name + ":" + val; + switch (type) { + case StatsDMetric::GAUGE: + msg += "|g"; + break; + + case StatsDMetric::COUNTER: + msg += "|c"; + break; + } + + if (!tags.empty()) { + msg += "|"; + for (size_t i = 0; i < tags.size(); i++) { + msg = msg + "#" + tags[i].first + ":" + tags[i].second; + // If we know there is another tag coming, we should add a comma in the message + if (i != tags.size() - 1) { + msg += ","; + } + } + } + + return msg; +} + +MetricsDataModel knobToMetricModel(const std::string& knob) { + if (knob == "statsd") { + return MetricsDataModel::STATSD; + } else if (knob == "otel") { + return MetricsDataModel::OTLP; + } else if (knob == "none") { + return MetricsDataModel::NONE; + } + ASSERT(false); + return MetricsDataModel::NONE; +} + +std::vector splitString(const std::string& str, const std::string& delimit) { + std::vector splitted; + size_t pos = 0; + std::string s = str; + + while ((pos = s.find(delimit)) != std::string::npos) { + splitted.push_back(s.substr(0, pos)); + s.erase(0, pos + delimit.length()); + } + splitted.push_back(s); + return splitted; +} + +/* + Returns true if num is exactly a string representation of a number + Ex: "123", "123.65" both return true + "124.532.13", "t4fr", "102g" all return false +*/ +bool isNumber(const std::string& num) { + if (num.empty()) { + return false; + } + + size_t start = 0; + // We could have a negative number, if the first character isn't a digit + // but it's a "-", then we start from position 1. Otherwise it's not a valid number + if (!std::isdigit(num[0])) { + if (num[0] == '-') { + start = 1; + } else { + return false; + } + } + + // Iterate through the string and make sure every char is a digit and there is only one occurence of "." + int dot_count = 0; + for (size_t i = start; i < num.size(); i++) { + if (!std::isdigit(num[i])) { + if (num[i] == '.') { + if (dot_count > 0) { + return false; + } + ++dot_count; + } else { + return false; + } + } + } + return true; +} + +/* + Returns true if msg is a valid statsd string. Valid statsd strings are of the form + :||#:, + + Where name consists of only upper or lowercase letters (no symbols), + value is numeric (postive or negative, integer or decimal), + type is one of "g", "c", + +*/ +bool verifyStatsdMessage(const std::string& msg) { + auto tokens = splitString(msg, "|"); + std::vector statsdTypes{ "c", "g" }; + + // We can't have more than three "|" in our string based on above format + if (tokens.size() > 3) { + return false; + } + + // First check if : is valid, this should be in tokens[0] + auto nameVal = splitString(tokens[0], ":"); + if (nameVal.size() != 2) { + return false; + } + // nameVal[1] should be a numeric value + if (!isNumber(nameVal[1])) { + return false; + } + + // The 2nd token should always represent a valid statsd type + if (std::find(statsdTypes.begin(), statsdTypes.end(), tokens[1]) == statsdTypes.end()) { + return false; + } + + // It is optional to have tags but the tags section must be non-empty and begin + // with a "#" + if (tokens.size() > 2) { + if (tokens[2].empty()) { + return false; + } + if (tokens[2][0] != '#') { + return false; + } + } + return true; +} + +void createOtelGauge(UID id, const std::string& name, double value) { + MetricCollection* metrics = MetricCollection::getMetricCollection(); + if (metrics != nullptr) { + NetworkAddress addr = g_network->getLocalAddress(); + std::string ip_str = addr.ip.toString(); + std::string port_str = std::to_string(addr.port); + if (metrics->gaugeMap.find(id) != metrics->gaugeMap.end()) { + metrics->gaugeMap[id].points.emplace_back(value); + } else { + metrics->gaugeMap[id] = OTEL::OTELGauge(name, value); + } + metrics->gaugeMap[id].points.back().addAttribute("ip", ip_str); + metrics->gaugeMap[id].points.back().addAttribute("port", port_str); + } +} + +void createOtelGauge(UID id, const std::string& name, double value, const std::vector& attrs) { + MetricCollection* metrics = MetricCollection::getMetricCollection(); + createOtelGauge(id, name, value); + if (metrics != nullptr) { + for (const auto& attr : attrs) { + metrics->gaugeMap[id].points.back().addAttribute(attr.key, attr.value); + } + } +} diff --git a/flow/genericactors.actor.cpp b/flow/genericactors.actor.cpp index 91fc86dbdc..fc88c9b3d3 100644 --- a/flow/genericactors.actor.cpp +++ b/flow/genericactors.actor.cpp @@ -58,7 +58,9 @@ ACTOR Future timeoutWarningCollector(FutureStream input, double logD state uint64_t counter = 0; state Future end = delay(logDelay); loop choose { - when(waitNext(input)) { counter++; } + when(waitNext(input)) { + counter++; + } when(wait(end)) { if (counter) TraceEvent(SevWarn, context, id).detail("LateProcessCount", counter).detail("LoggingDelay", logDelay); @@ -97,8 +99,12 @@ ACTOR Future quorumEqualsTrue(std::vector> futures, int requi } choose { - when(wait(quorum(true_futures, required))) { return true; } - when(wait(quorum(false_futures, futures.size() - required + 1))) { return false; } + when(wait(quorum(true_futures, required))) { + return true; + } + when(wait(quorum(false_futures, futures.size() - required + 1))) { + return false; + } } } @@ -121,7 +127,9 @@ ACTOR Future shortCircuitAny(std::vector> f) { } return false; } - when(wait(waitForAny(sc))) { return true; } + when(wait(waitForAny(sc))) { + return true; + } } } diff --git a/flow/include/flow/Arena.h b/flow/include/flow/Arena.h index 57b0864aa5..51e93ff4ba 100644 --- a/flow/include/flow/Arena.h +++ b/flow/include/flow/Arena.h @@ -359,7 +359,7 @@ struct union_like_traits> : std::true_type { } }; -//#define STANDALONE_ALWAYS_COPY +// #define STANDALONE_ALWAYS_COPY template class Standalone : private Arena, public T { @@ -443,7 +443,7 @@ class StringRef { public: constexpr static FileIdentifier file_identifier = 13300811; StringRef() : data(0), length(0) {} - StringRef(Arena& p, const StringRef& toCopy) : data(new (p) uint8_t[toCopy.size()]), length(toCopy.size()) { + StringRef(Arena& p, const StringRef& toCopy) : data(new(p) uint8_t[toCopy.size()]), length(toCopy.size()) { if (length > 0) { memcpy((void*)data, toCopy.data, length); } @@ -454,7 +454,7 @@ public: if (length) memcpy((void*)data, &toCopy[0], length); } - StringRef(Arena& p, const uint8_t* toCopy, int length) : data(new (p) uint8_t[length]), length(length) { + StringRef(Arena& p, const uint8_t* toCopy, int length) : data(new(p) uint8_t[length]), length(length) { if (length > 0) { memcpy((void*)data, toCopy, length); } @@ -1007,7 +1007,7 @@ public: // Arena constructor for non-Ref types, identified by !flow_ref template VectorRef(Arena& p, const VectorRef& toCopy, typename std::enable_if::value, int>::type = 0) - : VPS(toCopy), data((T*)new (p) uint8_t[sizeof(T) * toCopy.size()]), m_size(toCopy.size()), + : VPS(toCopy), data((T*)new(p) uint8_t[sizeof(T) * toCopy.size()]), m_size(toCopy.size()), m_capacity(toCopy.size()) { if (m_size > 0) { std::copy(toCopy.data, toCopy.data + m_size, data); @@ -1017,7 +1017,7 @@ public: // Arena constructor for Ref types, which must have an Arena constructor template VectorRef(Arena& p, const VectorRef& toCopy, typename std::enable_if::value, int>::type = 0) - : VPS(), data((T*)new (p) uint8_t[sizeof(T) * toCopy.size()]), m_size(toCopy.size()), m_capacity(toCopy.size()) { + : VPS(), data((T*)new(p) uint8_t[sizeof(T) * toCopy.size()]), m_size(toCopy.size()), m_capacity(toCopy.size()) { for (int i = 0; i < m_size; i++) { auto ptr = new (&data[i]) T(p, toCopy[i]); VPS::add(*ptr); diff --git a/flow/include/flow/Error.h b/flow/include/flow/Error.h index 0e18b90bcc..cf2ba6ccf2 100644 --- a/flow/include/flow/Error.h +++ b/flow/include/flow/Error.h @@ -108,7 +108,7 @@ extern Error internal_error_impl(const char* a_nm, #define internal_error_msg(msg) internal_error_impl(msg, __FILE__, __LINE__) extern bool isAssertDisabled(int line); -//#define ASSERT( condition ) ((void)0) +// #define ASSERT( condition ) ((void)0) #define ASSERT(condition) \ do { \ if (!((condition) || isAssertDisabled(__LINE__))) { \ diff --git a/flow/include/flow/FastAlloc.h b/flow/include/flow/FastAlloc.h index 779bd94cd0..3992e590e8 100644 --- a/flow/include/flow/FastAlloc.h +++ b/flow/include/flow/FastAlloc.h @@ -28,11 +28,11 @@ // ALLOC_INSTRUMENTATION_STDOUT enables non-sampled logging of all allocations and deallocations to stdout to be // processed by tools/alloc_instrumentation.py -//#define ALLOC_INSTRUMENTATION_STDOUT ENABLED(NOT_IN_CLEAN) +// #define ALLOC_INSTRUMENTATION_STDOUT ENABLED(NOT_IN_CLEAN) -//#define ALLOC_INSTRUMENTATION ENABLED(NOT_IN_CLEAN) -// The form "(1==1)" in this context is used to satisfy both clang and vc++ with a single syntax. Clang rejects "1" and -// vc++ rejects "true". +// #define ALLOC_INSTRUMENTATION ENABLED(NOT_IN_CLEAN) +// The form "(1==1)" in this context is used to satisfy both clang and vc++ with a single syntax. Clang rejects "1" +// and vc++ rejects "true". #define FASTALLOC_THREAD_SAFE (FLOW_THREAD_SAFE || (1 == 1)) #if VALGRIND @@ -265,7 +265,7 @@ inline void freeFast(int size, void* ptr) { return FastAllocator<128>::release(ptr); if (size <= 256) return FastAllocator<256>::release(ptr); - delete[](uint8_t*) ptr; + delete[] (uint8_t*)ptr; } // Allocate a block of memory aligned to 4096 bytes. Size must be a multiple of diff --git a/flow/include/flow/Knobs.h b/flow/include/flow/Knobs.h index 3854d7797c..1ff40625ba 100644 --- a/flow/include/flow/Knobs.h +++ b/flow/include/flow/Knobs.h @@ -141,11 +141,18 @@ public: double CHAOS_LOGGING_INTERVAL; bool WRITE_TRACING_ENABLED; - bool TRACING_SPAN_ATTRIBUTES_ENABLED; double TRACING_SAMPLE_RATE; std::string TRACING_UDP_LISTENER_ADDR; int TRACING_UDP_LISTENER_PORT; + // Metrics + std::string METRICS_DATA_MODEL; + double METRICS_EMISSION_INTERVAL; + std::string STATSD_UDP_EMISSION_ADDR; + std::string OTEL_UDP_EMISSION_ADDR; + int STATSD_UDP_EMISSION_PORT; + int OTEL_UDP_EMISSION_PORT; + // run loop profiling double RUN_LOOP_PROFILING_INTERVAL; double SLOWTASK_PROFILING_LOG_INTERVAL; diff --git a/fdbrpc/include/fdbrpc/Msgpack.h b/flow/include/flow/Msgpack.h similarity index 82% rename from fdbrpc/include/fdbrpc/Msgpack.h rename to flow/include/flow/Msgpack.h index 0f5c516c31..dea6d67670 100644 --- a/fdbrpc/include/fdbrpc/Msgpack.h +++ b/flow/include/flow/Msgpack.h @@ -154,4 +154,26 @@ inline void serialize_map(const Map& map, MsgpackBuffer& buf) { serialize_string(value.begin(), value.size(), buf); } } -#endif \ No newline at end of file + +// Serializes object T according to ext msgpack specification +template +inline void serialize_ext(const T& t, MsgpackBuffer& buf, uint8_t type, F f) { + buf.write_byte(0xc9); + // We don't know for sure the amount of bytes we'll be writing. + // So for now we set the payload size as zero and then we take the difference in data size + // from now and after we invoke f to determine how many bytes were written + size_t byte_idx = buf.data_size; + for (int i = 0; i < 4; i++) { + buf.write_byte(0); + } + buf.write_byte(type); + size_t prev_size = buf.data_size; + f(t, buf); + size_t updated_size = static_cast(buf.data_size - prev_size); + ASSERT_WE_THINK(updated_size <= std::numeric_limits::max()); + buf.edit_byte(reinterpret_cast(&updated_size)[3], byte_idx); + buf.edit_byte(reinterpret_cast(&updated_size)[2], byte_idx + 1); + buf.edit_byte(reinterpret_cast(&updated_size)[1], byte_idx + 2); + buf.edit_byte(reinterpret_cast(&updated_size)[0], byte_idx + 3); +} +#endif diff --git a/flow/include/flow/OTELMetrics.h b/flow/include/flow/OTELMetrics.h new file mode 100644 index 0000000000..84de699226 --- /dev/null +++ b/flow/include/flow/OTELMetrics.h @@ -0,0 +1,217 @@ +/* + * OTELMetrics.h + * + * This source file is part of the FoundationDB open source project + * + * Copyright 2013-2022 Apple Inc. and the FoundationDB project authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef FLOW_OTELMETRIC_H +#define FLOW_OTELMETRIC_H +#include "flow/flow.h" +#include "flow/Msgpack.h" +#include +#include + +/* + The following classes are based off of the OTEL protobuf definitions for metrics: + NumberDataPoint + HistogramDataPoint + OTELSum + OTELGauge + OTELHistogram + + Since Counters in FDB always use int64_t as the underlying type (see ICounter impl) + we choose to not cover the version of OTELSum which uses double + + Furthermore, we also diverge from the protobuf definition of HistogramDataPoint by using DDSketch. + This means that that there is an additional field for storing the errorGuarantee (a double). Also, to save some + space the buckets are uint32_t instead of uint64_t. The reason for this is due to the fact that it is highly unlikely + that a single bucket would hit it's threshold with the default error guarantee of 1%. + + The receiver will sign extend the buckets to uint64_t upon receiving a HistogramDataPoint. + + See https://github.com/open-telemetry/opentelemetry-proto/blob/main/opentelemetry/proto/metrics/v1/metrics.proto + for more details on the protobuf definitions +*/ + +namespace OTEL { +class Attribute { +public: + std::string key; + std::string value; + + Attribute(const std::string& k, const std::string& v) : key{ k }, value{ v } {} + Attribute(std::string&& k, std::string&& v) : key{ std::move(k) }, value{ std::move(v) } {} +}; + +enum AggregationTemporality { + AGGREGATION_TEMPORALITY_UNSPECIFIED = 0, + AGGREGATION_TEMPORALITY_DELTA, + AGGREGATION_TEMPORALITY_CUMULATIVE +}; + +enum DataPointFlags { FLAG_NONE = 0, FLAG_NO_RECORDED_VALUE }; + +class NumberDataPoint { +public: + double startTime; // 9 bytes in msgpack + double recordTime; // 9 bytes in msgpack + std::vector attributes; // Variable size: assume to be 23 bytes + std::variant val; // 9 bytes in msgpack + DataPointFlags flags; // 1 byte in msgpack + // If we take the sum of above, we get 51 bytes + static const uint32_t MsgpackBytes = 51; + NumberDataPoint(int64_t v) : recordTime{ now() }, val{ v }, flags{ DataPointFlags::FLAG_NONE } {} + + NumberDataPoint(double v) : recordTime{ now() }, val{ v }, flags{ DataPointFlags::FLAG_NONE } {} + + NumberDataPoint& addAttribute(const std::string& key, const std::string& value) { + attributes.emplace_back(Attribute(key, value)); + return *this; + } +}; + +enum OTELMetricType { Gauge = 0, Sum, Hist }; + +class OTELSum { +public: + std::string name; + std::vector points; + AggregationTemporality aggregation; + bool isMonotonic; + OTELSum() : aggregation{ AGGREGATION_TEMPORALITY_CUMULATIVE }, isMonotonic{ true } {} + OTELSum(const std::string& n) : name{ n }, aggregation{ AGGREGATION_TEMPORALITY_CUMULATIVE }, isMonotonic{ true } {} + OTELSum(const std::string& n, int64_t v) + : name{ n }, aggregation{ AGGREGATION_TEMPORALITY_CUMULATIVE }, isMonotonic{ true } { + points.emplace_back(v); + } + // Returns the approximate number of msgpack bytes needed to serialize this object + // Since NumberDataPoint can have variable sized attributes, we play on the same side + // and assume that they are always a constant value + uint32_t getMsgpackBytes() const { + uint32_t name_bytes = name.size() + 4; + uint32_t datapoint_bytes = points.size() * NumberDataPoint::MsgpackBytes; + // Both the isMonotonic and aggregation occupy 1 byte each, so we add 2 to the result + return name_bytes + datapoint_bytes + 2; + } +}; + +class OTELGauge { +public: + std::string name; + std::vector points; + OTELGauge() {} + OTELGauge(const std::string& n) : name{ n } {} + OTELGauge(const std::string& n, double v) : name{ n } { points.emplace_back(v); } +}; + +class HistogramDataPoint { +public: + double errorGuarantee; + std::vector attributes; + double startTime; + const std::vector buckets; + double recordTime; + uint64_t count; + double sum; + double min; + double max; + DataPointFlags flags; + HistogramDataPoint(double error, const std::vector& s, double _min, double _max, double _sum) + : errorGuarantee(error), recordTime{ now() }, buckets{ s }, count{ buckets.size() }, min{ _min }, max{ _max }, + sum{ _sum }, flags{ DataPointFlags::FLAG_NONE } {} + HistogramDataPoint& addAttribute(const std::string& key, const std::string& value) { + attributes.emplace_back(Attribute(key, value)); + return *this; + } +}; + +class OTELHistogram { +public: + std::string name; + std::vector points; + AggregationTemporality aggregation; + OTELHistogram() {} + OTELHistogram(const std::string& n, + double error, + const std::vector& s, + double min, + double max, + double sum) + : name{ n }, aggregation{ AGGREGATION_TEMPORALITY_DELTA } { + points.emplace_back(error, s, min, max, sum); + } +}; + +inline void serialize(const Attribute& attr, MsgpackBuffer& buf) { + serialize_string(attr.key, buf); + serialize_string(attr.value, buf); +} + +inline void serialize(const NumberDataPoint& point, MsgpackBuffer& buf) { + serialize_value(point.startTime, buf, 0xcb); + serialize_value(point.recordTime, buf, 0xcb); + typedef void (*func_ptr)(const Attribute&, MsgpackBuffer&); + func_ptr f = serialize; + serialize_vector(point.attributes, buf, f); + if (std::holds_alternative(point.val)) { + serialize_value(std::get(point.val), buf, 0xd3); + } else { + serialize_value(std::get(point.val), buf, 0xcb); + } + serialize_value(point.flags, buf, 0xcc); +} + +inline void serialize(const OTELSum& sum, MsgpackBuffer& buf) { + serialize_string(sum.name, buf); + typedef void (*func_ptr)(const NumberDataPoint&, MsgpackBuffer&); + func_ptr f = OTEL::serialize; + serialize_vector(sum.points, buf, f); + serialize_value(sum.aggregation, buf, 0xcc); + serialize_bool(sum.isMonotonic, buf); +} + +inline void serialize(const OTELGauge& g, MsgpackBuffer& buf) { + serialize_string(g.name, buf); + typedef void (*func_ptr)(const NumberDataPoint&, MsgpackBuffer&); + func_ptr f = OTEL::serialize; + serialize_vector(g.points, buf, f); +} + +inline void serialize(const HistogramDataPoint& point, MsgpackBuffer& buf) { + typedef void (*func_ptr)(const Attribute&, MsgpackBuffer&); + func_ptr f = serialize; + serialize_value(point.errorGuarantee, buf, 0xcb); + serialize_vector(point.attributes, buf, f); + serialize_value(point.startTime, buf, 0xcb); + serialize_value(point.recordTime, buf, 0xcb); + serialize_value(point.count, buf, 0xcf); + serialize_value(point.sum, buf, 0xcb); + serialize_value(point.min, buf, 0xcb); + serialize_value(point.max, buf, 0xcb); + auto f_Bucket = [](const uint32_t& d, MsgpackBuffer& buf) { serialize_value(d, buf, 0xce); }; + serialize_vector(point.buckets, buf, f_Bucket); + serialize_value(point.flags, buf, 0xcc); +} + +inline void serialize(const OTELHistogram& h, MsgpackBuffer& buf) { + serialize_string(h.name, buf); + typedef void (*func_ptr)(const HistogramDataPoint&, MsgpackBuffer&); + func_ptr f = OTEL::serialize; + serialize_vector(h.points, buf, f); + serialize_value(h.aggregation, buf, 0xcc); +} +} // namespace OTEL +#endif diff --git a/flow/include/flow/TDMetric.actor.h b/flow/include/flow/TDMetric.actor.h index 5adceede15..0261331624 100644 --- a/flow/include/flow/TDMetric.actor.h +++ b/flow/include/flow/TDMetric.actor.h @@ -22,22 +22,30 @@ // When actually compiled (NO_INTELLISENSE), include the generated version of this file. In intellisense use the source // version. +#include "flow/IRandom.h" +#include "flow/Trace.h" +#include #if defined(NO_INTELLISENSE) && !defined(FLOW_TDMETRIC_ACTOR_G_H) #define FLOW_TDMETRIC_ACTOR_G_H #include "flow/TDMetric.actor.g.h" #elif !defined(FLOW_TDMETRIC_ACTOR_H) #define FLOW_TDMETRIC_ACTOR_H - +#include +#include #include "flow/flow.h" #include "flow/network.h" #include "flow/Knobs.h" #include "flow/genericactors.actor.h" #include "flow/CompressedInt.h" +#include "flow/OTELMetrics.h" #include #include #include #include "flow/actorcompiler.h" // This must be the last #include. +enum MetricsDataModel { STATSD = 0, OTLP, NONE }; +MetricsDataModel knobToMetricModel(const std::string& knob); + struct MetricNameRef { MetricNameRef() {} MetricNameRef(const StringRef& type, const StringRef& name, const StringRef& id) : type(type), name(name), id(id) {} @@ -149,11 +157,11 @@ struct MetricKeyRef { void writeMetricName(BinaryWriter& wr) const; }; -struct MetricUpdateBatch { +struct FDBScope { std::vector inserts; std::vector appends; std::vector, Standalone>> updates; - std::vector(IMetricDB*, MetricUpdateBatch*)>> callbacks; + std::vector(IMetricDB*, FDBScope*)>> callbacks; void clear() { inserts.clear(); @@ -163,6 +171,22 @@ struct MetricUpdateBatch { } }; +struct MetricBatch { + FDBScope scope; + + MetricBatch() {} + + MetricBatch(FDBScope* in) { + assert(in != nullptr); + scope.inserts = std::move(in->inserts); + scope.appends = std::move(in->appends); + scope.updates = std::move(in->updates); + scope.callbacks = std::move(in->callbacks); + } + + void clear() { scope.clear(); } +}; + template inline StringRef metricTypeName() { // If this function does not compile then T is not a supported metric type @@ -180,6 +204,7 @@ MAKE_TYPENAME(Standalone, "String"_sr) #undef MAKE_TYPENAME struct BaseMetric; +class IMetric; // The collection of metrics that exist for a single process, at a single address. class TDMetricCollection { @@ -224,6 +249,23 @@ public: bool canLog(int level) const; }; +class MetricCollection { +public: + std::unordered_map map; + std::unordered_map sumMap; + std::unordered_map histMap; + std::unordered_map gaugeMap; + std::vector statsd_message; + + MetricCollection() {} + + static MetricCollection* getMetricCollection() { + if (g_network == nullptr || knobToMetricModel(FLOW_KNOBS->METRICS_DATA_MODEL) == MetricsDataModel::NONE) + return nullptr; + return static_cast((void*)g_network->global(INetwork::enMetrics)); + } +}; + struct MetricData { uint64_t start; uint64_t rollTime; @@ -559,14 +601,14 @@ public: } // Flushes data blocks in metrics to batch, optionally patching headers if a header is given - void flushUpdates(MetricKeyRef const& mk, uint64_t rollTime, MetricUpdateBatch& batch) { + void flushUpdates(MetricKeyRef const& mk, uint64_t rollTime, MetricBatch& batch) { while (metrics.size()) { auto& data = metrics.front(); if (data.start != 0 && data.rollTime <= rollTime) { // If this data is to be appended, write it to the batch now. if (data.appendStart) { - batch.appends.push_back(KeyWithWriter(mk.packDataKey(data.appendStart), data.writer)); + batch.scope.appends.push_back(KeyWithWriter(mk.packDataKey(data.appendStart), data.writer)); } else { // Otherwise, insert but first, patch the header if this block is old enough if (data.rollTime <= lastTimeRequiringHeaderPatch) { @@ -574,7 +616,7 @@ public: FieldLevel::updateSerializedHeader(data.writer.toValue(), previousHeader.get()); } - batch.inserts.push_back(KeyWithWriter(mk.packDataKey(data.start), data.writer)); + batch.scope.inserts.push_back(KeyWithWriter(mk.packDataKey(data.start), data.writer)); } if (metrics.size() == 1) { @@ -593,7 +635,7 @@ public: IMetricDB* db, Standalone mk, uint64_t rollTime, - MetricUpdateBatch* batch) { + FDBScope* scope) { Optional> block = wait(db->getLastBlock(mk.packDataKey(-1))); @@ -622,14 +664,15 @@ public: // Now flush the level data up to the rollTime argument and patch anything older than // lastTimeRequiringHeaderPatch - self->flushUpdates(mk, rollTime, *batch); + MetricBatch batch{ scope }; + self->flushUpdates(mk, rollTime, batch); return Void(); } // Flush this level's data to the output batch. // This function must NOT be called again until any callbacks added to batch have been completed. - void flush(const MetricKeyRef& mk, uint64_t rollTime, MetricUpdateBatch& batch) { + void flush(const MetricKeyRef& mk, uint64_t rollTime, MetricBatch& batch) { // Don't do anything if there is no data in the queue to flush. if (metrics.empty() || metrics.front().start == 0) return; @@ -641,8 +684,8 @@ public: Standalone mkCopy = mk; // Previous header is not present so queue a callback which will update it - batch.callbacks.push_back([=](IMetricDB* db, MetricUpdateBatch* batch) mutable -> Future { - return updatePreviousHeader(this, db, mkCopy, rollTime, batch); + batch.scope.callbacks.push_back([=](IMetricDB* db, FDBScope* s) mutable -> Future { + return updatePreviousHeader(this, db, mkCopy, rollTime, s); }); } }; @@ -692,7 +735,7 @@ struct EventField : public Descriptor { } } - void flushField(MetricKeyRef const& mk, uint64_t rollTime, MetricUpdateBatch& batch) { + void flushField(MetricKeyRef const& mk, uint64_t rollTime, MetricBatch& batch) { MetricKeyRef fk = mk.withField(*this); for (int j = 0; j < levels.size(); ++j) { fk.level = j; @@ -728,7 +771,7 @@ struct BaseMetric { virtual void rollMetric(uint64_t t) = 0; - virtual void flushData(const MetricKeyRef& mk, uint64_t rollTime, MetricUpdateBatch& batch) = 0; + virtual void flushData(const MetricKeyRef& mk, uint64_t rollTime, MetricBatch& batch) = 0; virtual void registerFields(const MetricKeyRef& mk, std::vector>& fieldKeys){}; // Set the metric's config. An assert will fail if the metric is enabled before the metrics collection is @@ -875,17 +918,17 @@ struct EventMetric final : E, ReferenceCounted>, MetricUtil::field_indexes(), mk, rollTime, batch); if (!latestRecorded) { - batch.updates.emplace_back(mk.packLatestKey(), StringRef()); + batch.scope.updates.emplace_back(mk.packLatestKey(), StringRef()); latestRecorded = true; } } template - void flushFields(index_sequence, MetricKeyRef const& mk, uint64_t rollTime, MetricUpdateBatch& batch) { + void flushFields(index_sequence, MetricKeyRef const& mk, uint64_t rollTime, MetricBatch& batch) { #ifdef NO_INTELLISENSE auto _ = { (std::get(values).flushField(mk, rollTime, batch), Void())... }; (void)_; @@ -945,7 +988,7 @@ struct DynamicFieldBase { virtual void nextKey(uint64_t t, int level) = 0; virtual void nextKeyAllLevels(uint64_t t) = 0; virtual void rollMetric(uint64_t t) = 0; - virtual void flushField(MetricKeyRef const& mk, uint64_t rollTime, MetricUpdateBatch& batch) = 0; + virtual void flushField(MetricKeyRef const& mk, uint64_t rollTime, MetricBatch& batch) = 0; virtual void registerField(MetricKeyRef const& mk, std::vector>& fieldKeys) = 0; // Set the current value of this field from the value of another @@ -991,7 +1034,7 @@ struct DynamicField final : public DynamicFieldBase, EventField>& fieldKeys) override { @@ -1128,7 +1171,7 @@ public: uint64_t log(uint64_t explicitTime = 0); // Virtual function implementations - void flushData(MetricKeyRef const& mk, uint64_t rollTime, MetricUpdateBatch& batch) override; + void flushData(MetricKeyRef const& mk, uint64_t rollTime, MetricBatch& batch) override; void rollMetric(uint64_t t) override; void registerFields(MetricKeyRef const& mk, std::vector>& fieldKeys) override; }; @@ -1245,9 +1288,9 @@ public: T getValue() const { return tv.value; } - void flushData(const MetricKeyRef& mk, uint64_t rollTime, MetricUpdateBatch& batch) override { + void flushData(const MetricKeyRef& mk, uint64_t rollTime, MetricBatch& batch) override { if (!recorded) { - batch.updates.emplace_back(mk.packLatestKey(), getLatestAsValue()); + batch.scope.updates.emplace_back(mk.packLatestKey(), getLatestAsValue()); recorded = true; } @@ -1407,6 +1450,45 @@ typedef MetricHandle DoubleMetricHandle; template using EventMetricHandle = MetricHandle>; +enum StatsDMetric { GAUGE = 0, COUNTER }; + +class IMetric { +public: + const UID id; + const MetricsDataModel model; + IMetric(MetricsDataModel m) : id{ deterministicRandom()->randomUniqueID() }, model{ m } { + MetricCollection* metrics = MetricCollection::getMetricCollection(); + if (metrics != nullptr) { + if (metrics->map.count(id) > 0) { + TraceEvent(SevError, "MetricCollection_NameCollision").detail("NameConflict", id.toString().c_str()); + ASSERT(metrics->map.count(id) > 0); + } + metrics->map[id] = this; + } + } + ~IMetric() { + MetricCollection* metrics = MetricCollection::getMetricCollection(); + if (metrics != nullptr) { + metrics->map.erase(id); + } + } +}; + +std::string createStatsdMessage(const std::string& name, + StatsDMetric type, + const std::string& val, + const std::vector>& tags); + +std::string createStatsdMessage(const std::string& name, StatsDMetric type, const std::string& val); + +std::vector splitString(const std::string& str, const std::string& delimit); + +bool verifyStatsdMessage(const std::string& msg); + +void createOtelGauge(UID id, const std::string& name, double value); + +void createOtelGauge(UID id, const std::string& name, double value, const std::vector&); + #include "flow/unactorcompiler.h" #endif diff --git a/flow/include/flow/TaskQueue.h b/flow/include/flow/TaskQueue.h index 89ca6e81d4..41db79b263 100644 --- a/flow/include/flow/TaskQueue.h +++ b/flow/include/flow/TaskQueue.h @@ -74,7 +74,7 @@ public: // Moves all timers that are scheduled to be executed at or before now to the ready queue. void processReadyTimers(double now) { - int numTimers = 0; + [[maybe_unused]] int numTimers = 0; while (!timers.empty() && timers.top().at <= now + INetwork::TIME_EPS) { ++numTimers; ++countTimers; @@ -86,7 +86,7 @@ public: // Moves all tasks scheduled from a different thread to the ready queue. void processThreadReady() { - int numReady = 0; + [[maybe_unused]] int numReady = 0; while (true) { Optional> t = threadReady.pop(); if (!t.present()) diff --git a/flow/include/flow/flat_buffers.h b/flow/include/flow/flat_buffers.h index e64982ecbf..1362069ae9 100644 --- a/flow/include/flow/flat_buffers.h +++ b/flow/include/flow/flat_buffers.h @@ -389,7 +389,7 @@ template constexpr bool is_vector_like = vector_like_traits::value; template -constexpr bool is_vector_of_union_like = is_vector_like&& is_union_like::value_type>; +constexpr bool is_vector_of_union_like = is_vector_like && is_union_like::value_type>; template constexpr bool is_struct_like = struct_like_traits::value; @@ -486,8 +486,8 @@ template constexpr int fb_size = is_struct_like ? struct_size(typename struct_like_traits::types{}) : fb_scalar_size; template -constexpr int fb_align = is_struct_like ? align_helper(typename struct_like_traits::types{}) - : AlignToPowerOfTwo(fb_scalar_size); +constexpr int fb_align = + is_struct_like ? align_helper(typename struct_like_traits::types{}) : AlignToPowerOfTwo(fb_scalar_size); template struct _SizeOf { diff --git a/flow/include/flow/flow.h b/flow/include/flow/flow.h index 2ff64b236e..a21bcfee2b 100644 --- a/flow/include/flow/flow.h +++ b/flow/include/flow/flow.h @@ -1137,7 +1137,7 @@ auto const& getReplyPromiseStream(Request const& r) { // Neither of these implementations of REPLY_TYPE() works on both MSVC and g++, so... #ifdef __GNUG__ #define REPLY_TYPE(RequestType) decltype(getReplyPromise(std::declval()).getFuture().getValue()) -//#define REPLY_TYPE(RequestType) decltype( getReplyFuture( std::declval() ).getValue() ) +// #define REPLY_TYPE(RequestType) decltype( getReplyFuture( std::declval() ).getValue() ) #else template struct ReplyType { diff --git a/flow/include/flow/genericactors.actor.h b/flow/include/flow/genericactors.actor.h index bc609e8426..c1081fbbd9 100644 --- a/flow/include/flow/genericactors.actor.h +++ b/flow/include/flow/genericactors.actor.h @@ -204,8 +204,12 @@ ACTOR template Future timeout(Future what, double time, T timedoutValue, TaskPriority taskID = TaskPriority::DefaultDelay) { Future end = delay(time, taskID); choose { - when(T t = wait(what)) { return t; } - when(wait(end)) { return timedoutValue; } + when(T t = wait(what)) { + return t; + } + when(wait(end)) { + return timedoutValue; + } } } @@ -213,8 +217,12 @@ ACTOR template Future> timeout(Future what, double time) { Future end = delay(time); choose { - when(T t = wait(what)) { return t; } - when(wait(end)) { return Optional(); } + when(T t = wait(what)) { + return t; + } + when(wait(end)) { + return Optional(); + } } } @@ -222,8 +230,12 @@ ACTOR template Future timeoutError(Future what, double time, TaskPriority taskID = TaskPriority::DefaultDelay) { Future end = delay(time, taskID); choose { - when(T t = wait(what)) { return t; } - when(wait(end)) { throw timed_out(); } + when(T t = wait(what)) { + return t; + } + when(wait(end)) { + throw timed_out(); + } } } @@ -357,7 +369,9 @@ Future mapAsync(FutureStream input, F actorFunc, PromiseStream outpu loop { try { choose { - when(T nextInput = waitNext(input)) { futures.push_back(actorFunc(nextInput)); } + when(T nextInput = waitNext(input)) { + futures.push_back(actorFunc(nextInput)); + } when(U nextOutput = wait(futures.size() == 0 ? Never() : futures.front())) { output.send(nextOutput); futures.pop_front(); @@ -474,7 +488,9 @@ Future asyncFilter(FutureStream input, F actorPred, PromiseStream ou loop { try { choose { - when(T nextInput = waitNext(input)) { futures.emplace_back(nextInput, actorPred(nextInput)); } + when(T nextInput = waitNext(input)) { + futures.emplace_back(nextInput, actorPred(nextInput)); + } when(bool pass = wait(futures.size() == 0 ? Never() : futures.front().second)) { if (pass) output.send(futures.front().first); @@ -768,7 +784,9 @@ private: loop { choose { when(wait(self->input.onChange())) {} - when(wait(delay(bounceTime))) { break; } + when(wait(delay(bounceTime))) { + break; + } } } self->output.setUnconditional(Void()); @@ -1013,8 +1031,12 @@ Future smartQuorum(std::vector> results, return Void(); wait(quorum(results, required)); choose { - when(wait(quorum(results, (int)results.size()))) { return Void(); } - when(wait(delay(extraSeconds, taskID))) { return Void(); } + when(wait(quorum(results, (int)results.size()))) { + return Void(); + } + when(wait(delay(extraSeconds, taskID))) { + return Void(); + } } } @@ -1202,8 +1224,12 @@ Future orYield(Future f); ACTOR template Future chooseActor(Future lhs, Future rhs) { choose { - when(T t = wait(lhs)) { return t; } - when(T t = wait(rhs)) { return t; } + when(T t = wait(lhs)) { + return t; + } + when(T t = wait(rhs)) { + return t; + } } } @@ -1247,7 +1273,9 @@ inline Future operator||(Future const& lhs, Future const& rhs) ACTOR template Future recurring(Func what, double interval, TaskPriority taskID = TaskPriority::DefaultDelay) { loop choose { - when(wait(delay(interval, taskID))) { what(); } + when(wait(delay(interval, taskID))) { + what(); + } } } @@ -1340,7 +1368,9 @@ void tagAndForwardError(PromiseStream* pOutput, Error value, Future sig ACTOR template Future waitOrError(Future f, Future errorSignal) { choose { - when(T val = wait(f)) { return val; } + when(T val = wait(f)) { + return val; + } when(wait(errorSignal)) { ASSERT(false); throw internal_error(); diff --git a/flow/include/flow/network.h b/flow/include/flow/network.h index a5bc6ad336..bed123923a 100644 --- a/flow/include/flow/network.h +++ b/flow/include/flow/network.h @@ -532,6 +532,7 @@ public: enBitFlipper = 17, enHistogram = 18, enTokenCache = 19, + enMetrics = 20, COUNT // Add new fields before this enumerator };