Compare commits

...

26 Commits

Author SHA1 Message Date
Yao Xiao afbcf5ef5f
Enable backward read in consistency checker. (#11721)
* Do backward reads in consistency checker.

* Add knob for read options in consistency checker.
2024-10-26 09:57:36 -07:00
Jingyu Zhou aebd907314
Merge pull request #11733 from jzhou77/fix
Fix valgrind errors of uninitialised values
2024-10-26 09:44:44 -07:00
Syed Paymaan Raza 7fd213739b
Urgent consistency checker fixes (#11734)
* Drop duplicate or conflicted requests from urgent consistency checker clients

* Fix edge case in urgent consistency check causing infinite loop

* fixup! Fix edge case in urgent consistency check causing infinite loop
2024-10-25 22:37:42 -07:00
Jingyu Zhou 3fc5c1dd03 Fix valgrind errors of uninitialised values 2024-10-25 15:41:56 -07:00
Zhe Wang ee12b1616b
disable range lock injection in range conflict workload (#11731) 2024-10-25 14:45:46 -07:00
Dan Lambright 2095baff55
check correct version in getMappedKeyValuesQ (#11729)
Co-authored-by: Dan Lambright <hlambright@apple.com>
2024-10-24 20:40:24 -04:00
Syed Paymaan Raza 36b113993c
[gray_failure] Update CC_ONLY_CONSIDER_INTRA_DC_LATENCY knob documentation (#11728) 2024-10-24 07:48:37 -07:00
Zhe Wang 43446204ed
Database Per-Range Lock (#11693)
* range lock framework

* improve the framework

* persist to txnStateStore

* fix bugs

* code clean

* code clean

* bug fix

* address comments

* add complex test workload and fix bugs found by the workload

* add workload correctness check and fix bugs

* code clean up

* add random range lock injection

* fix bugs in RandomRangeLock.actor.cpp

* enable random range lock injection in general workloads

* add rangelockcycle test

* disable random range lock in backup workloads

* nits

* add range lock ownership concept

* enable lock ownership to rangeLock

* api deal with tenant

* fix CI

* add test for multiple rangeLock owners

* nits

* address comments and renaming

* address comments
2024-10-23 16:25:56 -07:00
Syed Paymaan Raza 5f480947ad
[fdbserver] Gray failure and simulator improvements related to remote processes (#11717)
* [fdbserver][simulator] Add remoteDesiredTLogCount option

* [fdbserver][simulator] Allow explicitly specifying number of stateless classes in each DC

* [fdbserver][gray_failure] RemoteTLog lagging SS simulation test

* [fdbserver][gray_failure] Consider remote processes + CC inter/intra latency awareness

* [fdbserver][cc] Make processInSameDC O(1)
2024-10-23 13:15:29 -07:00
Dan Lambright a87e940e05
fix bug TxnStateStoreCycleTest for version vector (#11723)
* fix bug TxnStateStoreCycleTest for version vector

* Respond to review comment

---------

Co-authored-by: Dan Lambright <hlambright@apple.com>
2024-10-23 15:53:15 -04:00
Dan Lambright 9790a53f7c
Propagate rv to tLogs on version vector recovery (#11677)
* refactor management of cluster recovery version (crv)

- send the crv to tlogs as they rejoin cluster during recovery, not
just when an rv is computed.
- if a tlog has not yet received the crv, wait for it

* Propagate cluster recovery version to tlog for version vector recovery
Enable RECORD_RECOVER_AT_IN_CSTATE to store recovery version in cstate

* respond to review comments

* - dont sent rv to old tlog generations
- send crv after locking, if rv computed

* Remove CRV RPCs, have cursor pad beyond end tlog to RV

* Send end version in peek request to facility vv recovery

* Fix serialization in peek request interface.

---------

Co-authored-by: Dan Lambright <hlambright@apple.com>
2024-10-22 17:54:26 -04:00
Doğan Çeçen 83d919e9a9 Fix actorcompiler target in CMake add_flow_target
This also fixes #11595 - if Unix Makefiles is chosen for CMake builds,
build was failing with:

```
make[2]: *** No rule to make target 'actorcompiler.exe', needed by 'flow/ActorCollection.actor.g.cpp'.  Stop.
```

I suspect it could have been a problem for Ninja as well since the issue
was due to race condition, but probably it didn't happened so far for
other unknown factors.

See this example in CMake add_custom_command documentation:

https://cmake.org/cmake/help/latest/command/add_custom_command.html#example-generating-files-for-multiple-targets

The correct target to depend on is `actorcompiler` for CMake to generate
the right dependency order, `${actor_exe}` is just a string that points to
the location of the actor compiler. See here:

4260bbb3c2/cmake/CompileActorCompiler.cmake (L26-L27)
2024-10-22 22:54:22 +08:00
Vishesh Yadav 5215eb61bd
Merge pull request #11718 from vishesh/dummythreadpool-init-fix 2024-10-21 23:46:53 +05:30
walter 4260bbb3c2
Add missing headers (#11720) 2024-10-16 22:18:21 -07:00
Vishesh Yadav 809c3a86a7 Call IThreadReceiver::init() in DummyThreadPool 2024-10-16 10:26:53 +05:30
He Liu f35c329f4b Added comments 2024-10-15 19:22:58 -07:00
He Liu f7fe09c577 Ignore data move conflict on TSS in simulation. 2024-10-15 19:22:58 -07:00
Yao Xiao 7290369aac
Use a single iterator pool for all physical shards. (#11699)
* Rewrite iterator pool.

* simulation fix
2024-10-15 17:28:54 -07:00
Syed Paymaan Raza c146ee0869
[fdbserver] Use STL contains method and std::find for containment checks (#11702) 2024-10-15 11:40:02 -07:00
Vishesh Yadav b4bad4c1d6
Merge pull request #11704 from vishesh/rdar-137053948
Log all the incoming connections
2024-10-14 10:10:38 +05:30
Vishesh Yadav 95319e4d76 Format 2024-10-10 13:32:45 -07:00
Vishesh Yadav 92baea6609 Refactor 2024-10-10 03:48:28 +00:00
Vishesh Yadav b7fbe20f29
Update FlowTransport.actor.cpp 2024-10-09 14:37:48 -07:00
Vishesh Yadav d5bb821949
Update FlowTransport.actor.cpp 2024-10-09 13:22:35 -07:00
Vishesh Yadav 383b35aafc Address review comments 2024-10-09 18:14:01 +00:00
Vishesh Yadav 42f5e84306 Log all incoming connections 2024-10-09 11:09:50 -07:00
118 changed files with 2813 additions and 721 deletions

View File

@ -90,7 +90,7 @@ extern "C" DLLEXPORT fdb_bool_t fdb_error_predicate(int predicate_test, fdb_erro
code == error_code_commit_proxy_memory_limit_exceeded ||
code == error_code_transaction_throttled_hot_shard || code == error_code_batch_transaction_throttled ||
code == error_code_process_behind || code == error_code_tag_throttled ||
code == error_code_proxy_tag_throttled;
code == error_code_proxy_tag_throttled || code == error_code_transaction_rejected_range_locked;
}
return false;
}

View File

@ -19,7 +19,7 @@ else()
set(ACTOR_COMPILER_REFERENCES
"-r:System,System.Core,System.Xml.Linq,System.Data.DataSetExtensions,Microsoft.CSharp,System.Data,System.Xml")
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/actorcompiler.exe
add_custom_command(OUTPUT actorcompiler.exe
COMMAND ${MCS_EXECUTABLE} ARGS ${ACTOR_COMPILER_REFERENCES} ${ACTORCOMPILER_SRCS} "-target:exe" "-out:actorcompiler.exe"
DEPENDS ${ACTORCOMPILER_SRCS}
COMMENT "Compile actor compiler" VERBATIM)

View File

@ -237,12 +237,12 @@ function(add_flow_target)
if(WIN32)
add_custom_command(OUTPUT "${out_file}"
COMMAND $<TARGET_FILE:actorcompiler> "${in_file}" "${out_file}" ${actor_compiler_flags}
DEPENDS "${in_file}" ${actor_exe}
DEPENDS "${in_file}" actorcompiler
COMMENT "Compile actor: ${src}")
else()
add_custom_command(OUTPUT "${out_file}"
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${in_file}" "${out_file}" ${actor_compiler_flags} > /dev/null
DEPENDS "${in_file}" ${actor_exe}
DEPENDS "${in_file}" actorcompiler
COMMENT "Compile actor: ${src}")
endif()
endif()

View File

@ -1015,6 +1015,7 @@ ACTOR Future<Void> applyMutations(Database cx,
}
}
} catch (Error& e) {
ASSERT_WE_THINK(e.code() != error_code_transaction_rejected_range_locked);
Severity sev =
(e.code() == error_code_restore_missing_data || e.code() == error_code_transaction_throttled_hot_shard)
? SevWarnAlways

View File

@ -301,6 +301,7 @@ void ClientKnobs::initialize(Randomize randomize) {
init( TAG_THROTTLING_PAGE_SIZE, 4096 ); if( randomize && BUGGIFY ) TAG_THROTTLING_PAGE_SIZE = 4096;
init( GLOBAL_TAG_THROTTLING_RW_FUNGIBILITY_RATIO, 4.0 );
init( PROXY_MAX_TAG_THROTTLE_DURATION, 5.0 ); if( randomize && BUGGIFY ) PROXY_MAX_TAG_THROTTLE_DURATION = 0.5;
init( TRANSACTION_LOCK_REJECTION_RETRIABLE, true );
// busyness reporting
init( BUSYNESS_SPIKE_START_THRESHOLD, 0.100 );

View File

@ -23,6 +23,7 @@
#include <vector>
#include "fdbclient/GenericManagementAPI.actor.h"
#include "fdbclient/RangeLock.h"
#include "fmt/format.h"
#include "fdbclient/Knobs.h"
#include "flow/Arena.h"
@ -2912,6 +2913,7 @@ ACTOR Future<BulkLoadState> getBulkLoadTask(Transaction* tr,
UID taskId,
std::vector<BulkLoadPhase> phases) {
state BulkLoadState bulkLoadState;
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
RangeResult result = wait(krmGetRanges(tr, bulkLoadPrefix, range));
if (result.size() > 2) {
throw bulkload_task_outdated();
@ -2959,6 +2961,260 @@ ACTOR Future<Void> acknowledgeBulkLoadTask(Database cx, KeyRange range, UID task
return Void();
}
// Persist a new owner if input uniqueId is not existing; Update description if input uniqueId exists
ACTOR Future<Void> registerRangeLockOwner(Database cx, std::string uniqueId, std::string description) {
if (uniqueId.empty() || description.empty()) {
throw range_lock_failed();
}
state Transaction tr(cx);
loop {
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
Optional<Value> res = wait(tr.get(rangeLockOwnerKeyFor(uniqueId)));
RangeLockOwner owner;
if (res.present()) {
owner = decodeRangeLockOwner(res.get());
ASSERT(owner.isValid());
if (owner.getDescription() == description) {
return Void();
}
owner.setDescription(description);
} else {
owner = RangeLockOwner(uniqueId, description);
}
tr.set(rangeLockOwnerKeyFor(uniqueId), rangeLockOwnerValue(owner));
wait(tr.commit());
return Void();
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
ACTOR Future<Void> removeRangeLockOwner(Database cx, std::string uniqueId) {
if (uniqueId.empty()) {
throw range_lock_failed();
}
state Transaction tr(cx);
loop {
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
Optional<Value> res = wait(tr.get(rangeLockOwnerKeyFor(uniqueId)));
if (!res.present()) {
return Void();
}
RangeLockOwner owner = decodeRangeLockOwner(res.get());
ASSERT(owner.isValid());
tr.clear(rangeLockOwnerKeyFor(uniqueId));
wait(tr.commit());
return Void();
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
ACTOR Future<Optional<RangeLockOwner>> getRangeLockOwner(Database cx, std::string uniqueId) {
state Transaction tr(cx);
loop {
try {
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
Optional<Value> res = wait(tr.get(rangeLockOwnerKeyFor(uniqueId)));
if (!res.present()) {
return Optional<RangeLockOwner>();
}
RangeLockOwner owner = decodeRangeLockOwner(res.get());
ASSERT(owner.isValid());
return owner;
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
ACTOR Future<std::vector<RangeLockOwner>> getAllRangeLockOwners(Database cx) {
state std::vector<RangeLockOwner> res;
state Key beginKey = rangeLockOwnerKeys.begin;
state Key endKey = rangeLockOwnerKeys.end;
state Transaction tr(cx);
loop {
state KeyRange rangeToRead = Standalone(KeyRangeRef(beginKey, endKey));
try {
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
RangeResult result = wait(tr.getRange(rangeToRead, CLIENT_KNOBS->TOO_MANY));
for (const auto& kv : result) {
RangeLockOwner owner = decodeRangeLockOwner(kv.value);
ASSERT(owner.isValid());
RangeLockOwnerName uidFromKey = decodeRangeLockOwnerKey(kv.key);
ASSERT(owner.getUniqueId() == uidFromKey);
res.push_back(owner);
}
if (result[result.size() - 1].key == endKey) {
return res;
} else {
beginKey = result[result.size() - 1].key;
tr.reset();
}
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
// Not transactional
ACTOR Future<std::vector<KeyRange>> getReadLockOnRange(Database cx, KeyRange range) {
if (range.end > normalKeys.end) {
throw range_lock_failed();
}
state std::vector<KeyRange> lockedRanges;
state Key beginKey = range.begin;
state Key endKey = range.end;
state Transaction tr(cx);
loop {
state KeyRange rangeToRead = Standalone(KeyRangeRef(beginKey, endKey));
try {
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
RangeResult result = wait(krmGetRanges(&tr, rangeLockPrefix, rangeToRead));
for (int i = 0; i < result.size() - 1; i++) {
if (result[i].value.empty()) {
continue;
}
RangeLockStateSet rangeLockStateSet = decodeRangeLockStateSet(result[i].value);
ASSERT(rangeLockStateSet.isValid());
if (rangeLockStateSet.isLockedFor(RangeLockType::ReadLockOnRange)) {
lockedRanges.push_back(Standalone(KeyRangeRef(result[i].key, result[i + 1].key)));
}
}
if (result[result.size() - 1].key == range.end) {
break;
} else {
beginKey = result[result.size() - 1].key;
tr.reset();
}
} catch (Error& e) {
wait(tr.onError(e));
}
}
return lockedRanges;
}
// Not transactional
ACTOR Future<Void> takeReadLockOnRange(Database cx, KeyRange range, std::string ownerUniqueID) {
if (range.end > normalKeys.end) {
throw range_lock_failed();
}
state Key beginKey = range.begin;
state Key endKey = range.end;
state Transaction tr(cx);
loop {
state KeyRange rangeToLock = Standalone(KeyRangeRef(beginKey, endKey));
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.addWriteConflictRange(normalKeys);
// Step 1: Check owner
state Optional<Value> ownerValue = wait(tr.get(rangeLockOwnerKeyFor(ownerUniqueID)));
if (!ownerValue.present()) {
throw range_lock_failed();
}
state RangeLockOwner owner = decodeRangeLockOwner(ownerValue.get());
ASSERT(owner.isValid());
// Step 2: Get all locks on the range and add the new lock
state RangeResult result = wait(krmGetRanges(&tr, rangeLockPrefix, rangeToLock));
state int i = 0;
for (; i < result.size() - 1; i++) {
KeyRange lockRange = Standalone(KeyRangeRef(result[i].key, result[i + 1].key));
RangeLockStateSet rangeLockStateSet;
if (!result[i].value.empty()) {
rangeLockStateSet = decodeRangeLockStateSet(result[i].value);
}
rangeLockStateSet.upsert(RangeLockState(RangeLockType::ReadLockOnRange, owner.getUniqueId()));
ASSERT(rangeLockStateSet.isValid());
wait(krmSetRangeCoalescing(
&tr, rangeLockPrefix, lockRange, normalKeys, rangeLockStateSetValue(rangeLockStateSet)));
wait(tr.commit());
tr.reset();
beginKey = result[i + 1].key;
break; // TODO(Zhe): remove
}
// Step 3: Exit if all ranges have been locked
if (beginKey == range.end) {
break;
}
wait(delay(0.1));
} catch (Error& e) {
wait(tr.onError(e));
}
}
return Void();
}
// Not transactional
ACTOR Future<Void> releaseReadLockOnRange(Database cx, KeyRange range, std::string ownerUniqueID) {
if (range.end > normalKeys.end) {
throw range_lock_failed();
}
state Key beginKey = range.begin;
state Key endKey = range.end;
state Transaction tr(cx);
loop {
state KeyRange rangeToLock = Standalone(KeyRangeRef(beginKey, endKey));
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
// Step 1: Check owner
state Optional<Value> ownerValue = wait(tr.get(rangeLockOwnerKeyFor(ownerUniqueID)));
if (!ownerValue.present()) {
throw range_lock_failed();
}
state RangeLockOwner owner = decodeRangeLockOwner(ownerValue.get());
ASSERT(owner.isValid());
// Step 2: Get all locks on the range and remove the lock
state RangeResult result = wait(krmGetRanges(&tr, rangeLockPrefix, rangeToLock));
state int i = 0;
for (; i < result.size() - 1; i++) {
KeyRange lockRange = Standalone(KeyRangeRef(result[i].key, result[i + 1].key));
if (result[i].value.empty()) {
beginKey = result[i + 1].key;
continue;
}
RangeLockStateSet rangeLockStateSet = decodeRangeLockStateSet(result[i].value);
rangeLockStateSet.remove(RangeLockState(RangeLockType::ReadLockOnRange, owner.getUniqueId()));
ASSERT(rangeLockStateSet.isValid());
wait(krmSetRangeCoalescing(
&tr, rangeLockPrefix, lockRange, normalKeys, rangeLockStateSetValue(rangeLockStateSet)));
wait(tr.commit());
tr.reset();
beginKey = result[i + 1].key;
break; // TODO(Zhe): remove
}
// Step 3: Exit if all ranges have been unlocked
if (beginKey == range.end) {
break;
}
wait(delay(0.1));
} catch (Error& e) {
wait(tr.onError(e));
}
}
return Void();
}
ACTOR Future<Void> waitForPrimaryDC(Database cx, StringRef dcId) {
state ReadYourWritesTransaction tr(cx);

View File

@ -1559,6 +1559,7 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<IClusterConnection
transactionsFutureVersions("FutureVersions", cc), transactionsNotCommitted("NotCommitted", cc),
transactionsMaybeCommitted("MaybeCommitted", cc), transactionsResourceConstrained("ResourceConstrained", cc),
transactionsProcessBehind("ProcessBehind", cc), transactionsThrottled("Throttled", cc),
transactionsLockRejected("LockRejected", cc),
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
transactionGrvFullBatches("NumGrvFullBatches", cc), transactionGrvTimedOutBatches("NumGrvTimedOutBatches", cc),
transactionCommitVersionNotFoundForSS("CommitVersionNotFoundForSS", cc), anyBGReads(false),
@ -1872,6 +1873,7 @@ DatabaseContext::DatabaseContext(const Error& err)
transactionsFutureVersions("FutureVersions", cc), transactionsNotCommitted("NotCommitted", cc),
transactionsMaybeCommitted("MaybeCommitted", cc), transactionsResourceConstrained("ResourceConstrained", cc),
transactionsProcessBehind("ProcessBehind", cc), transactionsThrottled("Throttled", cc),
transactionsLockRejected("LockRejected", cc),
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
transactionGrvFullBatches("NumGrvFullBatches", cc), transactionGrvTimedOutBatches("NumGrvTimedOutBatches", cc),
transactionCommitVersionNotFoundForSS("CommitVersionNotFoundForSS", cc), anyBGReads(false),
@ -6176,7 +6178,8 @@ double Transaction::getBackoff(int errCode) {
// Set backoff for next time
if (errCode == error_code_commit_proxy_memory_limit_exceeded ||
errCode == error_code_grv_proxy_memory_limit_exceeded ||
errCode == error_code_transaction_throttled_hot_shard) {
errCode == error_code_transaction_throttled_hot_shard ||
errCode == error_code_transaction_rejected_range_locked) {
backoff = std::min(backoff * CLIENT_KNOBS->BACKOFF_GROWTH_RATE, CLIENT_KNOBS->RESOURCE_CONSTRAINED_MAX_BACKOFF);
} else {
@ -6852,7 +6855,8 @@ ACTOR static Future<Void> tryCommit(Reference<TransactionState> trState, CommitT
e.code() != error_code_process_behind && e.code() != error_code_future_version &&
e.code() != error_code_tenant_not_found && e.code() != error_code_illegal_tenant_access &&
e.code() != error_code_proxy_tag_throttled && e.code() != error_code_storage_quota_exceeded &&
e.code() != error_code_tenant_locked && e.code() != error_code_transaction_throttled_hot_shard) {
e.code() != error_code_tenant_locked && e.code() != error_code_transaction_throttled_hot_shard &&
e.code() != error_code_transaction_rejected_range_locked) {
TraceEvent(SevError, "TryCommitError").error(e);
}
if (trState->trLogInfo)
@ -6970,7 +6974,8 @@ Future<Void> Transaction::commitMutations() {
}
return commitResult;
} catch (Error& e) {
if (e.code() == error_code_transaction_throttled_hot_shard) {
if (e.code() == error_code_transaction_throttled_hot_shard ||
e.code() == error_code_transaction_rejected_range_locked) {
TraceEvent("TransactionThrottledHotShard").error(e);
return onError(e);
}
@ -7831,7 +7836,9 @@ Future<Void> Transaction::onError(Error const& e) {
e.code() == error_code_grv_proxy_memory_limit_exceeded || e.code() == error_code_process_behind ||
e.code() == error_code_batch_transaction_throttled || e.code() == error_code_tag_throttled ||
e.code() == error_code_blob_granule_request_failed || e.code() == error_code_proxy_tag_throttled ||
e.code() == error_code_transaction_throttled_hot_shard) {
e.code() == error_code_transaction_throttled_hot_shard ||
(e.code() == error_code_transaction_rejected_range_locked &&
CLIENT_KNOBS->TRANSACTION_LOCK_REJECTION_RETRIABLE)) {
if (e.code() == error_code_not_committed)
++trState->cx->transactionsNotCommitted;
else if (e.code() == error_code_commit_unknown_result)
@ -7847,11 +7854,16 @@ Future<Void> Transaction::onError(Error const& e) {
} else if (e.code() == error_code_proxy_tag_throttled) {
++trState->cx->transactionsThrottled;
trState->proxyTagThrottledDuration += CLIENT_KNOBS->PROXY_MAX_TAG_THROTTLE_DURATION;
} else if (e.code() == error_code_transaction_rejected_range_locked) {
++trState->cx->transactionsLockRejected;
}
double backoff = getBackoff(e.code());
reset();
return delay(backoff, trState->taskID);
} else if (e.code() == error_code_transaction_rejected_range_locked) {
ASSERT(!CLIENT_KNOBS->TRANSACTION_LOCK_REJECTION_RETRIABLE);
++trState->cx->transactionsLockRejected; // throw error
}
if (e.code() == error_code_transaction_too_old || e.code() == error_code_future_version) {
if (e.code() == error_code_transaction_too_old)

View File

@ -499,7 +499,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
init( ROCKSDB_READ_RANGE_ITERATOR_REFRESH_TIME, 30.0 ); if( randomize && BUGGIFY ) ROCKSDB_READ_RANGE_ITERATOR_REFRESH_TIME = 0.1;
init( ROCKSDB_PROBABILITY_REUSE_ITERATOR_SIM, 0.01 );
init( ROCKSDB_READ_RANGE_REUSE_ITERATORS, true ); if( randomize && BUGGIFY ) ROCKSDB_READ_RANGE_REUSE_ITERATORS = deterministicRandom()->coinflip();
init( SHARDED_ROCKSDB_REUSE_ITERATORS, false );
init( SHARDED_ROCKSDB_REUSE_ITERATORS, false ); if (isSimulated) SHARDED_ROCKSDB_REUSE_ITERATORS = deterministicRandom()->coinflip();
init( ROCKSDB_READ_RANGE_REUSE_BOUNDED_ITERATORS, false ); if( randomize && BUGGIFY ) ROCKSDB_READ_RANGE_REUSE_BOUNDED_ITERATORS = deterministicRandom()->coinflip();
init( ROCKSDB_READ_RANGE_BOUNDED_ITERATORS_MAX_LIMIT, 200 );
// Set to 0 to disable rocksdb write rate limiting. Rate limiter unit: bytes per second.
@ -664,6 +664,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
init( COMMIT_BATCHES_MEM_FRACTION_OF_TOTAL, 0.5 );
init( COMMIT_BATCHES_MEM_TO_TOTAL_MEM_SCALE_FACTOR, 5.0 );
init( COMMIT_TRIGGER_DELAY, 0.01 ); if (randomize && BUGGIFY) COMMIT_TRIGGER_DELAY = deterministicRandom()->random01() * 4;
init( ENABLE_READ_LOCK_ON_RANGE, false ); if (isSimulated) ENABLE_READ_LOCK_ON_RANGE = deterministicRandom()->coinflip();
// these settings disable batch bytes scaling. Try COMMIT_TRANSACTION_BATCH_BYTES_MAX=1e6, COMMIT_TRANSACTION_BATCH_BYTES_SCALE_BASE=50000, COMMIT_TRANSACTION_BATCH_BYTES_SCALE_POWER=0.5?
init( COMMIT_TRANSACTION_BATCH_BYTES_MIN, 100000 );
@ -789,7 +790,11 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
init( CC_ENABLE_ENTIRE_SATELLITE_MONITORING, false );
init( CC_SATELLITE_DEGRADATION_MIN_COMPLAINER, 3 );
init( CC_SATELLITE_DEGRADATION_MIN_BAD_SERVER, 3 );
init( CC_ENABLE_REMOTE_LOG_ROUTER_MONITORING, true );
init( CC_ENABLE_REMOTE_LOG_ROUTER_DEGRADATION_MONITORING, false);
init( CC_ENABLE_REMOTE_LOG_ROUTER_MONITORING, true);
init( CC_ENABLE_REMOTE_TLOG_DEGRADATION_MONITORING, false); if (isSimulated && deterministicRandom()->coinflip()) CC_ENABLE_REMOTE_TLOG_DEGRADATION_MONITORING = true;
init( CC_ENABLE_REMOTE_TLOG_DISCONNECT_MONITORING, false); if (isSimulated && deterministicRandom()->coinflip()) CC_ENABLE_REMOTE_TLOG_DISCONNECT_MONITORING = true;
init( CC_ONLY_CONSIDER_INTRA_DC_LATENCY, false); if (isSimulated && deterministicRandom()->coinflip()) CC_ONLY_CONSIDER_INTRA_DC_LATENCY = true;
init( CC_THROTTLE_SINGLETON_RERECRUIT_INTERVAL, 0.5 );
init( INCOMPATIBLE_PEERS_LOGGING_INTERVAL, 600 ); if( randomize && BUGGIFY ) INCOMPATIBLE_PEERS_LOGGING_INTERVAL = 60.0;
@ -860,6 +865,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
init( STORAGE_DURABILITY_LAG_SOFT_MAX, 250e6 ); if( smallStorageTarget ) STORAGE_DURABILITY_LAG_SOFT_MAX = 10e6;
init( STORAGE_INCLUDE_FEED_STORAGE_QUEUE, true ); if ( randomize && BUGGIFY ) STORAGE_INCLUDE_FEED_STORAGE_QUEUE = false;
init( STORAGE_SHARD_CONSISTENCY_CHECK_INTERVAL, 0.0); if ( isSimulated ) STORAGE_SHARD_CONSISTENCY_CHECK_INTERVAL = 5.0;
init( CONSISTENCY_CHECK_BACKWARD_READ, false ); if (isSimulated) CONSISTENCY_CHECK_BACKWARD_READ = deterministicRandom()->coinflip();
init (STORAGE_FETCH_KEYS_DELAY, 0.0 ); if ( randomize && BUGGIFY ) { STORAGE_FETCH_KEYS_DELAY = deterministicRandom()->random01() * 5.0; }
init (STORAGE_FETCH_KEYS_USE_COMMIT_BUDGET, false ); if (isSimulated) STORAGE_FETCH_KEYS_USE_COMMIT_BUDGET = deterministicRandom()->coinflip();
init (STORAGE_FETCH_KEYS_RATE_LIMIT, 0 ); if (isSimulated && BUGGIFY) STORAGE_FETCH_KEYS_RATE_LIMIT = 100 * 1024 * deterministicRandom()->randomInt(1, 10); // In MB/s

View File

@ -1210,6 +1210,49 @@ BulkLoadState decodeBulkLoadState(const ValueRef& value) {
return bulkLoadState;
}
// Range Lock
const KeyRangeRef rangeLockKeys = KeyRangeRef("\xff/rangeLock/"_sr, "\xff/rangeLock0"_sr);
const KeyRef rangeLockPrefix = rangeLockKeys.begin;
const Value rangeLockStateSetValue(const RangeLockStateSet& rangeLockStateSet) {
return ObjectWriter::toValue(rangeLockStateSet, IncludeVersion());
}
RangeLockStateSet decodeRangeLockStateSet(const ValueRef& value) {
RangeLockStateSet rangeLockStateSet;
ObjectReader reader(value.begin(), IncludeVersion());
reader.deserialize(rangeLockStateSet);
return rangeLockStateSet;
}
const KeyRangeRef rangeLockOwnerKeys = KeyRangeRef("\xff/rangeLockOwner/"_sr, "\xff/rangeLockOwner0"_sr);
const KeyRef rangeLockOwnerPrefix = rangeLockOwnerKeys.begin;
const Key rangeLockOwnerKeyFor(const RangeLockOwnerName& ownerUniqueID) {
BinaryWriter wr(Unversioned());
wr.serializeBytes(rangeLockOwnerPrefix);
wr.serializeBytes(StringRef(ownerUniqueID));
return wr.toValue();
}
const RangeLockOwnerName decodeRangeLockOwnerKey(const KeyRef& key) {
std::string ownerUniqueID;
BinaryReader rd(key.removePrefix(rangeLockOwnerPrefix), Unversioned());
rd >> ownerUniqueID;
return ownerUniqueID;
}
const Value rangeLockOwnerValue(const RangeLockOwner& rangeLockOwner) {
return ObjectWriter::toValue(rangeLockOwner, IncludeVersion());
}
RangeLockOwner decodeRangeLockOwner(const ValueRef& value) {
RangeLockOwner rangeLockOwner;
ObjectReader reader(value.begin(), IncludeVersion());
reader.deserialize(rangeLockOwner);
return rangeLockOwner;
}
// Keys to view and control tag throttling
const KeyRangeRef tagThrottleKeys = KeyRangeRef("\xff\x02/throttledTags/tag/"_sr, "\xff\x02/throttledTags/tag0"_sr);
const KeyRef tagThrottleKeysPrefix = tagThrottleKeys.begin;

View File

@ -292,6 +292,10 @@ public:
// Maximum duration that a transaction can be tag throttled by proxy before being rejected
double PROXY_MAX_TAG_THROTTLE_DURATION;
// Enable to automatically retry transactions in the presence of transaction_lock_rejection error
// Set to false only for the rangeLocking simulation test
bool TRANSACTION_LOCK_REJECTION_RETRIABLE;
// busyness reporting
double BUSYNESS_SPIKE_START_THRESHOLD;
double BUSYNESS_SPIKE_SATURATED_THRESHOLD;

View File

@ -628,6 +628,7 @@ public:
Counter transactionsResourceConstrained;
Counter transactionsProcessBehind;
Counter transactionsThrottled;
Counter transactionsLockRejected;
Counter transactionsExpensiveClearCostEstCount;
Counter transactionGrvFullBatches;
Counter transactionGrvTimedOutBatches;

View File

@ -58,6 +58,7 @@ ACTOR static Future<Void> replaceRange_impl(class IKeyValueStore* self,
class IKeyValueStore : public IClosable {
public:
virtual KeyValueStoreType getType() const = 0;
virtual bool getReplaceContent() const { return false; };
// Returns true if the KV store supports shards, i.e., implements addRange(), removeRange(), and
// persistRangeMapping().
virtual bool shardAware() const { return false; }

View File

@ -36,6 +36,7 @@ standard API and some knowledge of the contents of the system key space.
#include <map>
#include "fdbclient/GenericManagementAPI.actor.h"
#include "fdbclient/NativeAPI.actor.h"
#include "fdbclient/RangeLock.h"
#include "fdbclient/ReadYourWrites.h"
#include "fdbclient/DatabaseConfiguration.h"
#include "fdbclient/MonitorLeader.h"
@ -189,6 +190,27 @@ ACTOR Future<BulkLoadState> getBulkLoadTask(Transaction* tr,
UID taskId,
std::vector<BulkLoadPhase> phases);
// Persist a rangeLock owner to database metadata
// A range can only be locked by a registered owner
ACTOR Future<Void> registerRangeLockOwner(Database cx, std::string uniqueId, std::string description);
// Remove an owner form the database metadata
ACTOR Future<Void> removeRangeLockOwner(Database cx, std::string uniqueId);
// Get all registered rangeLock owner
ACTOR Future<std::vector<RangeLockOwner>> getAllRangeLockOwners(Database cx);
ACTOR Future<Optional<RangeLockOwner>> getRangeLockOwner(Database cx, std::string uniqueId);
// Lock a user range (the input range must be within normalKeys)
ACTOR Future<Void> takeReadLockOnRange(Database cx, KeyRange range, std::string ownerUniqueID);
// Unlock a user range (the input range must be within normalKeys)
ACTOR Future<Void> releaseReadLockOnRange(Database cx, KeyRange range, std::string ownerUniqueID);
// Get locked ranges within the input range (the input range must be within normalKeys)
ACTOR Future<std::vector<KeyRange>> getReadLockOnRange(Database cx, KeyRange range);
ACTOR Future<Void> printHealthyZone(Database cx);
ACTOR Future<bool> clearHealthyZone(Database cx, bool printWarning = false, bool clearSSFailureZoneString = false);
ACTOR Future<bool> setHealthyZone(Database cx, StringRef zoneId, double seconds, bool printWarning = false);

View File

@ -0,0 +1,210 @@
/*
* RangeLock.h
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2024 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FDBCLIENT_RANGELOCK_H
#define FDBCLIENT_RANGELOCK_H
#include "flow/Error.h"
#include "flow/IRandom.h"
#include <string>
#pragma once
#include "fdbclient/FDBTypes.h"
#include "fdbrpc/fdbrpc.h"
using RangeLockOwnerName = std::string;
enum class RangeLockType : uint8_t {
Invalid = 0,
ReadLockOnRange = 1, // reject all commits to the locked range
};
// The app/user that owns the lock.
// A lock can be only removed by the owner
struct RangeLockOwner {
constexpr static FileIdentifier file_identifier = 1384408;
public:
RangeLockOwner() = default;
RangeLockOwner(const std::string& uniqueId, const std::string& description)
: uniqueId(uniqueId), description(description), logId(deterministicRandom()->randomUniqueID()),
creationTime(now()) {
if (!isValid()) {
throw range_lock_failed();
}
}
bool isValid() const { return !uniqueId.empty() && !description.empty(); }
std::string toString() const {
return "RangeLockOwner: [UniqueId]: " + uniqueId + ", [Description]: " + description +
", [LogId]: " + logId.toString(),
", [CreationTime]: " + std::to_string(creationTime);
}
bool operator==(RangeLockOwner const& r) const { return uniqueId == r.uniqueId; }
RangeLockOwnerName getUniqueId() const { return uniqueId; }
void setDescription(const std::string& inputDescription) {
description = inputDescription;
return;
}
std::string getDescription() const { return description; }
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, uniqueId, description, logId, creationTime);
}
private:
RangeLockOwnerName uniqueId; // The owner's unique ID and the owner is free to use as many times as needed.
std::string description; // More details about the owner
UID logId; // For logging purpose
double creationTime; // Indicate when the data structure is created
};
// Metadata of a lock on a range
struct RangeLockState {
constexpr static FileIdentifier file_identifier = 1384409;
public:
RangeLockState() = default;
RangeLockState(RangeLockType type, const RangeLockOwnerName& ownerUniqueId)
: lockType(type), ownerUniqueId(ownerUniqueId) {
ASSERT(isValid());
}
bool isValid() const { return lockType != RangeLockType::Invalid && !ownerUniqueId.empty(); }
static std::string rangeLockTypeString(const RangeLockType& type) {
if (type == RangeLockType::Invalid) {
return "invalid";
} else if (type == RangeLockType::ReadLockOnRange) {
return "ReadLockOnRange";
} else {
UNREACHABLE();
}
}
std::string toString() const {
return "RangeLockState: [lockType]: " + rangeLockTypeString(lockType) + " [Owner]: " + ownerUniqueId;
}
bool isLockedFor(RangeLockType inputLockType) const { return lockType == inputLockType; }
bool operator==(RangeLockState const& r) const {
return lockType == r.lockType && ownerUniqueId == r.ownerUniqueId;
}
std::string getLockUniqueString() const { return ownerUniqueId + rangeLockTypeString(lockType); }
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, ownerUniqueId, lockType);
}
private:
RangeLockOwnerName ownerUniqueId; // The app/user that owns the lock.
RangeLockType lockType;
};
// Persisted state on a range. A range can have multiple locks distinguishing by owner and lockType.
// For each combination of owner and lockType, there is an unique lock for the combination
// RangeLockStateSet tracks all those unique locks
struct RangeLockStateSet {
constexpr static FileIdentifier file_identifier = 1384410;
public:
RangeLockStateSet() = default;
bool empty() const { return locks.empty(); }
std::vector<RangeLockState> getAllLockStats() const {
std::vector<RangeLockState> res;
for (const auto& [name, lock] : locks) {
res.push_back(lock);
}
return res;
}
bool isValid() const {
for (const auto& [owner, lock] : locks) {
if (!lock.isValid()) {
return false; // Any invalid makes this set invalid
}
}
return true;
}
std::string toString() const { return "RangeLockStateSet: " + describe(getAllLockStats()); }
const std::map<RangeLockOwnerName, RangeLockState>& getLocks() const { return locks; }
bool operator==(RangeLockStateSet const& r) const {
auto rLocks = r.getLocks();
if (locks.size() != rLocks.size()) {
return false;
}
std::map<RangeLockOwnerName, RangeLockState>::const_iterator iterator = locks.begin();
std::map<RangeLockOwnerName, RangeLockState>::const_iterator rIterator = rLocks.begin();
while (iterator != locks.end() && rIterator != rLocks.end()) {
if (iterator->first != rIterator->first || iterator->second != rIterator->second) {
return false;
}
++iterator;
++rIterator;
}
return true;
}
void upsert(const RangeLockState& inputLock) {
ASSERT(inputLock.isValid());
locks[inputLock.getLockUniqueString()] = inputLock;
return;
}
void remove(const RangeLockState& inputLock) {
ASSERT(inputLock.isValid());
locks.erase(inputLock.getLockUniqueString());
return;
}
bool isLockedFor(RangeLockType lockType) const {
for (const auto& [owner, lock] : locks) {
ASSERT(lock.isValid());
if (lock.isLockedFor(lockType)) {
return true;
}
}
return false;
}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, locks);
}
private:
std::map<RangeLockOwnerName, RangeLockState> locks;
};
#endif

View File

@ -629,6 +629,7 @@ public:
double COMMIT_BATCHES_MEM_FRACTION_OF_TOTAL;
double COMMIT_BATCHES_MEM_TO_TOTAL_MEM_SCALE_FACTOR;
double COMMIT_TRIGGER_DELAY;
bool ENABLE_READ_LOCK_ON_RANGE;
double RESOLVER_COALESCE_TIME;
int BUGGIFIED_ROW_LIMIT;
@ -760,8 +761,25 @@ public:
// be determined as degraded worker.
int CC_SATELLITE_DEGRADATION_MIN_BAD_SERVER; // The minimum amount of degraded server in satellite DC to be
// determined as degraded satellite.
bool CC_ENABLE_REMOTE_LOG_ROUTER_MONITORING; // When enabled, gray failure tries to detect whether the remote log
// router is degraded and may use trigger recovery to recover from it.
bool CC_ENABLE_REMOTE_LOG_ROUTER_DEGRADATION_MONITORING; // When enabled, gray failure tries to detect whether
// remote log routers are experiencing degradation
// (latency) with their peers. Gray failure may trigger
// recovery based on this.
bool CC_ENABLE_REMOTE_LOG_ROUTER_MONITORING; // When enabled, gray failure tries to detect whether
// remote log routers are disconnected from their peers. Gray failure
// may trigger recovery based on this.
bool CC_ENABLE_REMOTE_TLOG_DEGRADATION_MONITORING; // When enabled, gray failure tries to detect whether remote
// tlogs are experiencing degradation (latency) with their peers.
// Gray failure may trigger recovery based on this.
bool CC_ENABLE_REMOTE_TLOG_DISCONNECT_MONITORING; // When enabled, gray failure tries to detect whether remote
// tlogs are disconnected from their peers. Gray failure may
// trigger recovery based on this.
bool CC_ONLY_CONSIDER_INTRA_DC_LATENCY; // When enabled, gray failure only considers intra-DC signal for latency
// degradations. For remote process knobs
// (CC_ENABLE_REMOTE_TLOG_DEGRADATION_MONITORING and
// CC_ENABLE_REMOTE_LOG_ROUTER_DEGRADATION_MONITORING), this knob must be
// turned on, because inter-DC latency signal is not reliable and it's
// challenging to pick a good latency threshold.
double CC_THROTTLE_SINGLETON_RERECRUIT_INTERVAL; // The interval to prevent re-recruiting the same singleton if a
// recruiting fight between two cluster controllers occurs.
@ -1068,6 +1086,7 @@ public:
std::string STORAGESERVER_READTYPE_PRIORITY_MAP;
int SPLIT_METRICS_MAX_ROWS;
double STORAGE_SHARD_CONSISTENCY_CHECK_INTERVAL;
bool CONSISTENCY_CHECK_BACKWARD_READ;
int PHYSICAL_SHARD_MOVE_LOG_SEVERITY;
int FETCH_SHARD_BUFFER_BYTE_LIMIT;
int FETCH_SHARD_UPDATES_BYTE_LIMIT;

View File

@ -26,8 +26,9 @@
#include "fdbclient/AccumulativeChecksum.h"
#include "fdbclient/BulkLoading.h"
#include "fdbclient/FDBTypes.h"
#include "fdbclient/BlobWorkerInterface.h" // TODO move the functions that depend on this out of here and into BlobWorkerInterface.h to remove this dependency
#include "fdbclient/FDBTypes.h"
#include "fdbclient/RangeLock.h"
#include "fdbclient/StorageServerInterface.h"
#include "fdbclient/Tenant.h"
@ -527,6 +528,18 @@ extern const KeyRef bulkLoadPrefix;
const Value bulkLoadStateValue(const BulkLoadState& bulkLoadState);
BulkLoadState decodeBulkLoadState(const ValueRef& value);
extern const KeyRangeRef rangeLockKeys;
extern const KeyRef rangeLockPrefix;
const Value rangeLockStateSetValue(const RangeLockStateSet& rangeLockStateSet);
RangeLockStateSet decodeRangeLockStateSet(const ValueRef& value);
extern const KeyRangeRef rangeLockOwnerKeys;
extern const KeyRef rangeLockOwnerPrefix;
const Key rangeLockOwnerKeyFor(const RangeLockOwnerName& ownerUniqueID);
const RangeLockOwnerName decodeRangeLockOwnerKey(const KeyRef& key);
const Value rangeLockOwnerValue(const RangeLockOwner& rangeLockOwner);
RangeLockOwner decodeRangeLockOwner(const ValueRef& value);
// Keys to view and control tag throttling
extern const KeyRangeRef tagThrottleKeys;
extern const KeyRef tagThrottleKeysPrefix;

View File

@ -30,6 +30,7 @@
#include <vector>
#include <string>
#include <sstream>
#include <iterator>
#include <random>
#include <unordered_map>
#include <unordered_set>

View File

@ -2,6 +2,7 @@
#include <cassert>
#include <string>
#include <functional>
namespace fdbmonitor {
namespace tests {
@ -161,4 +162,4 @@ int main(int argc, char** argv) {
testPathOps();
testEnvVarUtils();
}
}

View File

@ -20,9 +20,14 @@
#include "fdbrpc/FlowTransport.h"
#include "flow/Arena.h"
#include "flow/IThreadPool.h"
#include "flow/Knobs.h"
#include "flow/NetworkAddress.h"
#include "flow/network.h"
#include <cstdint>
#include <fstream>
#include <string>
#include <unordered_map>
#if VALGRIND
#include <memcheck.h>
@ -352,8 +357,107 @@ public:
Future<Void> publicKeyFileWatch;
std::unordered_map<Standalone<StringRef>, PublicKey> publicKeys;
struct ConnectionHistoryEntry {
int64_t time;
NetworkAddress addr;
bool failed;
};
std::deque<ConnectionHistoryEntry> connectionHistory;
Future<Void> connectionHistoryLoggerF;
Reference<IThreadPool> connectionLogWriterThread;
};
struct ConnectionLogWriter : IThreadPoolReceiver {
const std::string baseDir;
std::string fileName;
std::fstream file;
ConnectionLogWriter(const std::string baseDir) : baseDir(baseDir) {}
virtual ~ConnectionLogWriter() {
if (file.is_open())
file.close();
}
struct AppendAction : TypedAction<ConnectionLogWriter, AppendAction> {
std::string localAddr;
std::deque<TransportData::ConnectionHistoryEntry> entries;
AppendAction(std::string localAddr, std::deque<TransportData::ConnectionHistoryEntry>&& entries)
: localAddr(localAddr), entries(std::move(entries)) {}
double getTimeEstimate() const { return 2; }
};
std::string newFileName() const { return baseDir + "fdb-connection-log-" + time_str() + ".csv"; }
void init() { fileName = newFileName(); }
std::string time_str() const { return std::to_string(now()); }
void openOrRoll() {
if (!file.is_open()) {
TraceEvent("OpenConnectionLog").detail("FileName", fileName);
file = std::fstream(fileName, std::ios::in | std::ios::out | std::ios::app);
}
if (!file.is_open()) {
TraceEvent(SevError, "ErrorOpenConnectionLog").detail("FileName", fileName);
throw io_error();
}
if (file.tellg() > 100 * 1024 * 1024 /* 100 MB */) {
file.close();
fileName = newFileName();
TraceEvent("RollConnectionLog").detail("FileName", fileName);
openOrRoll();
}
}
void action(AppendAction& a) {
openOrRoll();
std::string output;
for (const auto& entry : a.entries) {
output += std::to_string(entry.time) + ",";
output += a.localAddr + ",";
output += entry.failed ? "failed," : "success,";
output += entry.addr.toString() + "\n";
}
file << output;
file.flush();
}
};
ACTOR Future<Void> connectionHistoryLogger(TransportData* self) {
if (!FLOW_KNOBS->LOG_CONNECTION_ATTEMPTS_ENABLED) {
return Void();
}
state Future<Void> next = Void();
// One thread ensures async serialized execution on the log file.
if (g_network->isSimulated()) {
self->connectionLogWriterThread = Reference<IThreadPool>(new DummyThreadPool());
} else {
self->connectionLogWriterThread = createGenericThreadPool();
}
self->connectionLogWriterThread->addThread(new ConnectionLogWriter(FLOW_KNOBS->CONNECTION_LOG_DIRECTORY));
loop {
wait(next);
next = delay(FLOW_KNOBS->LOG_CONNECTION_INTERVAL_SECS);
if (self->connectionHistory.size() == 0) {
continue;
}
std::string localAddr = FlowTransport::getGlobalLocalAddress().toString();
auto action = new ConnectionLogWriter::AppendAction(localAddr, std::move(self->connectionHistory));
ASSERT(action != nullptr);
self->connectionLogWriterThread->post(action);
ASSERT(self->connectionHistory.size() == 0);
}
}
ACTOR Future<Void> pingLatencyLogger(TransportData* self) {
state NetworkAddress lastAddress = NetworkAddress();
loop {
@ -422,6 +526,8 @@ TransportData::TransportData(uint64_t transportId, int maxWellKnownEndpoints, IP
allowList(allowList == nullptr ? IPAllowList() : *allowList) {
degraded = makeReference<AsyncVar<bool>>(false);
pingLogger = pingLatencyLogger(this);
connectionHistoryLoggerF = connectionHistoryLogger(this);
}
#define CONNECT_PACKET_V0 0x0FDB00A444020001LL
@ -1490,10 +1596,17 @@ ACTOR static Future<Void> connectionReader(TransportData* transport,
}
ACTOR static Future<Void> connectionIncoming(TransportData* self, Reference<IConnection> conn) {
state TransportData::ConnectionHistoryEntry entry;
entry.time = now();
entry.addr = conn->getPeerAddress();
try {
wait(conn->acceptHandshake());
state Promise<Reference<Peer>> onConnected;
state Future<Void> reader = connectionReader(self, conn, Reference<Peer>(), onConnected);
if (FLOW_KNOBS->LOG_CONNECTION_ATTEMPTS_ENABLED) {
entry.failed = false;
self->connectionHistory.push_back(entry);
}
choose {
when(wait(reader)) {
ASSERT(false);
@ -1507,17 +1620,21 @@ ACTOR static Future<Void> connectionIncoming(TransportData* self, Reference<ICon
throw timed_out();
}
}
return Void();
} catch (Error& e) {
if (e.code() != error_code_actor_cancelled) {
TraceEvent("IncomingConnectionError", conn->getDebugID())
.errorUnsuppressed(e)
.suppressFor(1.0)
.detail("FromAddress", conn->getPeerAddress());
if (FLOW_KNOBS->LOG_CONNECTION_ATTEMPTS_ENABLED) {
entry.failed = true;
self->connectionHistory.push_back(entry);
}
}
conn->close();
return Void();
}
return Void();
}
ACTOR static Future<Void> listen(TransportData* self, NetworkAddress listenAddr) {

View File

@ -90,12 +90,12 @@ public:
tenantNameIndex(&proxyCommitData_.tenantNameIndex), lockedTenants(&proxyCommitData_.lockedTenants),
initialCommit(initialCommit_), provisionalCommitProxy(provisionalCommitProxy_),
accumulativeChecksumIndex(getCommitProxyAccumulativeChecksumIndex(proxyCommitData_.commitProxyIndex)),
acsBuilder(proxyCommitData_.acsBuilder), epoch(proxyCommitData_.epoch) {
acsBuilder(proxyCommitData_.acsBuilder), epoch(proxyCommitData_.epoch), rangeLock(proxyCommitData_.rangeLock) {
if (encryptMode.isEncryptionEnabled()) {
ASSERT(cipherKeys != nullptr);
ASSERT(cipherKeys->count(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID) > 0);
ASSERT(cipherKeys->contains(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID));
if (FLOW_KNOBS->ENCRYPT_HEADER_AUTH_TOKEN_ENABLED) {
ASSERT(cipherKeys->count(ENCRYPT_HEADER_DOMAIN_ID));
ASSERT(cipherKeys->contains(ENCRYPT_HEADER_DOMAIN_ID));
}
}
// If commit proxy, epoch must be set
@ -115,9 +115,9 @@ public:
accumulativeChecksumIndex(resolverAccumulativeChecksumIndex), epoch(Optional<LogEpoch>()) {
if (encryptMode.isEncryptionEnabled()) {
ASSERT(cipherKeys != nullptr);
ASSERT(cipherKeys->count(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID) > 0);
ASSERT(cipherKeys->contains(SYSTEM_KEYSPACE_ENCRYPT_DOMAIN_ID));
if (FLOW_KNOBS->ENCRYPT_HEADER_AUTH_TOKEN_ENABLED) {
ASSERT(cipherKeys->count(ENCRYPT_HEADER_DOMAIN_ID));
ASSERT(cipherKeys->contains(ENCRYPT_HEADER_DOMAIN_ID));
}
}
}
@ -195,6 +195,8 @@ private:
// commit
std::vector<std::pair<UID, UID>> tssMappingToAdd;
std::shared_ptr<RangeLock> rangeLock = nullptr;
private:
bool dummyConfChange = false;
@ -211,6 +213,32 @@ private:
}
}
void checkSetRangeLockPrefix(const MutationRef& m) {
if (!m.param1.startsWith(rangeLockPrefix)) {
return;
} else if (rangeLock == nullptr) {
TraceEvent(SevWarnAlways, "MutationHasRangeLockPrefixButFeatureIsOff")
.detail("Mutation", m.toString())
.detail("FeatureFlag", SERVER_KNOBS->ENABLE_READ_LOCK_ON_RANGE)
.detail("Encription", encryptMode.isEncryptionEnabled());
return;
}
ASSERT(!initialCommit);
// RangeLock is upated by KrmSetRange which updates a range with two successive mutations
if (rangeLock->pendingRequest()) {
// The second mutation
Key endKey = m.param1.removePrefix(rangeLockPrefix);
rangeLock->consumePendingRequest(endKey);
} else {
// The first mutation
RangeLockStateSet lockSetState = m.param2.empty() ? RangeLockStateSet() : decodeRangeLockStateSet(m.param2);
Key startKey = m.param1.removePrefix(rangeLockPrefix);
rangeLock->setPendingRequest(startKey, lockSetState);
}
txnStateStore->set(KeyValueRef(m.param1, m.param2));
return;
}
void checkSetKeyServersPrefix(MutationRef m) {
if (!m.param1.startsWith(keyServersPrefix)) {
return;
@ -916,6 +944,18 @@ private:
}
}
void checkClearRangeLockPrefix(KeyRangeRef range) {
if (rangeLock == nullptr) {
return;
} else if (!rangeLockKeys.intersects(range)) {
return;
}
ASSERT(!initialCommit);
ASSERT_WE_THINK(rangeLockKeys.contains(range));
txnStateStore->clear(range & rangeLockKeys);
return;
}
void checkClearKeyServerKeys(KeyRangeRef range) {
if (!keyServersKeys.intersects(range)) {
return;
@ -1184,7 +1224,7 @@ private:
bool foundKey = false;
for (auto& it : vecBackupKeys->intersectingRanges(normalKeys)) {
if (it.value().count(logDestination) > 0) {
if (it.value().contains(logDestination)) {
foundKey = true;
break;
}
@ -1192,7 +1232,7 @@ private:
auto& systemBackupRanges = getSystemBackupRanges();
for (auto r = systemBackupRanges.begin(); !foundKey && r != systemBackupRanges.end(); ++r) {
for (auto& it : vecBackupKeys->intersectingRanges(*r)) {
if (it.value().count(logDestination) > 0) {
if (it.value().contains(logDestination)) {
foundKey = true;
break;
}
@ -1516,6 +1556,7 @@ public:
}
if (m.type == MutationRef::SetValue && isSystemKey(m.param1)) {
checkSetRangeLockPrefix(m);
checkSetKeyServersPrefix(m);
checkSetServerKeysPrefix(m);
checkSetCheckpointKeys(m);
@ -1541,6 +1582,7 @@ public:
} else if (m.type == MutationRef::ClearRange && isSystemKey(m.param2)) {
KeyRangeRef range(m.param1, m.param2);
checkClearRangeLockPrefix(range);
checkClearKeyServerKeys(range);
checkClearConfigKeys(m, range);
checkClearServerListKeys(range);

View File

@ -92,7 +92,7 @@ std::map<std::tuple<LogEpoch, Version, int>, std::map<Tag, Version>> BackupProgr
auto prev = std::prev(current);
// Previous epoch is gone, consolidate the progress.
for (auto [tag, version] : prev->second) {
if (toCheck.count(tag) > 0) {
if (toCheck.contains(tag)) {
progressIt->second[tag] = std::max(version, progressIt->second[tag]);
toCheck.erase(tag);
}

View File

@ -630,7 +630,7 @@ ACTOR Future<Void> killBlobWorkers(Database cx) {
if (first) {
knownWorkers.insert(interf.id());
}
if (knownWorkers.count(interf.id())) {
if (knownWorkers.contains(interf.id())) {
haltIds.push_back(interf.id());
haltRequests.push_back(interf.haltBlobWorker.tryGetReply(HaltBlobWorkerRequest(1e6, UID())));
}

View File

@ -1000,7 +1000,7 @@ ACTOR Future<Void> doRangeAssignment(Reference<BlobManagerData> bmData,
// actor map, cancelling this actor before it got here
bmData->workerAssignments.insert(assignment.keyRange, workerID.get());
if (bmData->workerStats.count(workerID.get())) {
if (bmData->workerStats.contains(workerID.get())) {
bmData->workerStats[workerID.get()].numGranulesAssigned += 1;
}
@ -1040,7 +1040,7 @@ ACTOR Future<Void> doRangeAssignment(Reference<BlobManagerData> bmData,
req.type = assignment.assign.get().type;
// if that worker isn't alive anymore, add the range back into the stream
if (bmData->workersById.count(workerID.get()) == 0) {
if (!bmData->workersById.contains(workerID.get())) {
throw no_more_servers();
}
state Future<Void> assignFuture = bmData->workersById[workerID.get()].assignBlobRangeRequest.getReply(req);
@ -1073,7 +1073,7 @@ ACTOR Future<Void> doRangeAssignment(Reference<BlobManagerData> bmData,
req.dispose = assignment.revoke.get().dispose;
// if that worker isn't alive anymore, this is a noop
if (bmData->workersById.count(workerID.get())) {
if (bmData->workersById.contains(workerID.get())) {
wait(bmData->workersById[workerID.get()].revokeBlobRangeRequest.getReply(req));
} else {
return Void();
@ -1271,7 +1271,7 @@ static bool handleRangeIsAssign(Reference<BlobManagerData> bmData, RangeAssignme
bmData->assignsInProgress.insert(assignment.keyRange,
doRangeAssignment(bmData, assignment, workerId, bmData->epoch, seqNo));
}
if (bmData->workerStats.count(workerId)) {
if (bmData->workerStats.contains(workerId)) {
bmData->workerStats[workerId].numGranulesAssigned += 1;
}
}
@ -1291,7 +1291,7 @@ static bool handleRangeIsAssign(Reference<BlobManagerData> bmData, RangeAssignme
static bool handleRangeIsRevoke(Reference<BlobManagerData> bmData, RangeAssignment assignment, int64_t seqNo) {
if (assignment.worker.present()) {
// revoke this specific range from this specific worker. Either part of recovery or failing a worker
if (bmData->workerStats.count(assignment.worker.get())) {
if (bmData->workerStats.contains(assignment.worker.get())) {
bmData->workerStats[assignment.worker.get()].numGranulesAssigned -= 1;
}
// if this revoke matches the worker assignment state, mark the range as unassigned
@ -1333,7 +1333,7 @@ static bool handleRangeIsRevoke(Reference<BlobManagerData> bmData, RangeAssignme
// It is fine for multiple disjoint sub-ranges to have the same sequence number since they were part
// of the same logical change
if (bmData->workerStats.count(it.value())) {
if (bmData->workerStats.contains(it.value())) {
bmData->workerStats[it.value()].numGranulesAssigned -= 1;
}
@ -1407,7 +1407,7 @@ ACTOR Future<Void> writeInitialGranuleMapping(Reference<BlobManagerData> bmData,
KeyRangeRef(splitPoints.keys[i], splitPoints.keys[endIdx]),
blobGranuleMappingValueFor(UID())));
for (j = 0; i + j < endIdx; j++) {
if (splitPoints.boundaries.count(splitPoints.keys[i + j])) {
if (splitPoints.boundaries.contains(splitPoints.keys[i + j])) {
tr->set(blobGranuleMergeBoundaryKeyFor(splitPoints.keys[i + j]),
blobGranuleMergeBoundaryValueFor(splitPoints.boundaries[splitPoints.keys[i + j]]));
}
@ -1419,7 +1419,7 @@ ACTOR Future<Void> writeInitialGranuleMapping(Reference<BlobManagerData> bmData,
// Update BlobGranuleMergeBoundary in-memory state.
for (int k = i; k < i + j; k++) {
KeyRef beginKey = splitPoints.keys[k];
if (splitPoints.boundaries.count(beginKey)) {
if (splitPoints.boundaries.contains(beginKey)) {
bmData->mergeBoundaries[beginKey] = splitPoints.boundaries[beginKey];
}
}
@ -1809,7 +1809,7 @@ ACTOR Future<Void> reevaluateInitialSplit(Reference<BlobManagerData> bmData,
if (BM_DEBUG) {
fmt::print("Aligned split ({0}):\n", finalSplit.keys.size());
for (auto& it : finalSplit.keys) {
fmt::print(" {0}{1}\n", it.printable(), finalSplit.boundaries.count(it) ? " *" : "");
fmt::print(" {0}{1}\n", it.printable(), finalSplit.boundaries.contains(it) ? " *" : "");
}
}
@ -1934,7 +1934,7 @@ ACTOR Future<Void> reevaluateInitialSplit(Reference<BlobManagerData> bmData,
blobGranuleMappingKeys.begin,
KeyRangeRef(finalSplit.keys[i], finalSplit.keys[i + 1]),
blobGranuleMappingValueFor(UID())));
if (finalSplit.boundaries.count(finalSplit.keys[i])) {
if (finalSplit.boundaries.contains(finalSplit.keys[i])) {
tr->set(blobGranuleMergeBoundaryKeyFor(finalSplit.keys[i]),
blobGranuleMergeBoundaryValueFor(finalSplit.boundaries[finalSplit.keys[i]]));
}
@ -2063,7 +2063,7 @@ ACTOR Future<Void> maybeSplitRange(Reference<BlobManagerData> bmData,
fmt::print(" {0}:{1}{2}\n",
(i < newGranuleIDs.size() ? newGranuleIDs[i] : UID()).toString().substr(0, 6).c_str(),
splitPoints.keys[i].printable(),
splitPoints.boundaries.count(splitPoints.keys[i]) ? " *" : "");
splitPoints.boundaries.contains(splitPoints.keys[i]) ? " *" : "");
}
}
@ -2195,7 +2195,7 @@ ACTOR Future<Void> maybeSplitRange(Reference<BlobManagerData> bmData,
MutationRef::SetVersionstampedValue);
// Update BlobGranuleMergeBoundary.
if (splitPoints.boundaries.count(splitRange.begin)) {
if (splitPoints.boundaries.contains(splitRange.begin)) {
tr->set(blobGranuleMergeBoundaryKeyFor(splitRange.begin),
blobGranuleMergeBoundaryValueFor(splitPoints.boundaries[splitRange.begin]));
}
@ -2605,7 +2605,7 @@ ACTOR Future<Void> finishMergeGranules(Reference<BlobManagerData> bmData,
// Assert that none of the subsequent granules are hard boundaries.
if (g_network->isSimulated()) {
for (int i = 1; i < parentGranuleRanges.size() - 1; i++) {
ASSERT(!bmData->mergeHardBoundaries.count(parentGranuleRanges[i]));
ASSERT(!bmData->mergeHardBoundaries.contains(parentGranuleRanges[i]));
}
}
@ -2843,7 +2843,7 @@ ACTOR Future<Void> granuleMergeChecker(Reference<BlobManagerData> bmData) {
// 2. Hit the maximum in a merge evaluation window.
// 3. Hit a hard merge boundary meaning we should not merge across them.
if (!it->cvalue().mergeEligible() || currentCandidates.size() == maxRangeSize ||
bmData->mergeHardBoundaries.count(it->range().begin)) {
bmData->mergeHardBoundaries.contains(it->range().begin)) {
if (currentCandidates.size() >= 2) {
mergeChecks.push_back(attemptMerges(bmData, currentCandidates));
}
@ -2859,8 +2859,8 @@ ACTOR Future<Void> granuleMergeChecker(Reference<BlobManagerData> bmData) {
// Conditions:
// 1. Start a new soft merge range.
// 2. End a soft merge range.
if ((!mergeBoundaries.count(curRange.begin) && mergeBoundaries.count(curRange.end)) ||
(mergeBoundaries.count(lastRange.begin) && !mergeBoundaries.count(lastRange.end))) {
if ((!mergeBoundaries.contains(curRange.begin) && mergeBoundaries.contains(curRange.end)) ||
(mergeBoundaries.contains(lastRange.begin) && !mergeBoundaries.contains(lastRange.end))) {
if (currentCandidates.size() >= 2) {
mergeChecks.push_back(attemptMerges(bmData, currentCandidates));
}
@ -2972,10 +2972,10 @@ ACTOR Future<Void> killBlobWorker(Reference<BlobManagerData> bmData, BlobWorkerI
}
Optional<UID> successor = bwId;
while (bmData->workerAffinities.count(successor.get())) {
while (bmData->workerAffinities.contains(successor.get())) {
successor = bmData->workerAffinities[successor.get()];
}
if (successor.get() == bwId || !bmData->workersById.count(successor.get())) {
if (successor.get() == bwId || !bmData->workersById.contains(successor.get())) {
successor = Optional<UID>();
}
@ -3394,16 +3394,16 @@ ACTOR Future<Void> checkBlobWorkerList(Reference<BlobManagerData> bmData, Promis
// add all blob workers to this new blob manager's records and start monitoring it
bool foundAnyNew = false;
for (auto& worker : blobWorkers) {
if (!bmData->deadWorkers.count(worker.id())) {
if (!bmData->deadWorkers.contains(worker.id())) {
bool isFailedOrExcluded = bmData->exclusionTracker.isFailedOrExcluded(worker.stableAddress());
if (!bmData->workerAddresses.count(worker.stableAddress()) &&
if (!bmData->workerAddresses.contains(worker.stableAddress()) &&
worker.locality.dcId() == bmData->dcId && !isFailedOrExcluded) {
bmData->workerAddresses.insert(worker.stableAddress());
bmData->workersById[worker.id()] = worker;
bmData->workerStats[worker.id()] = BlobWorkerInfo();
bmData->addActor.send(monitorBlobWorker(bmData, worker));
foundAnyNew = true;
} else if (!bmData->workersById.count(worker.id())) {
} else if (!bmData->workersById.contains(worker.id())) {
TraceEvent("KillingExtraneousBlobWorker", bmData->id)
.detail("WorkerId", worker.id())
.detail("Addr", worker.stableAddress())
@ -3880,7 +3880,7 @@ ACTOR Future<Void> recoverBlobManager(Reference<BlobManagerData> bmData) {
assignment.seqnoAssigned,
outOfDateAssignments);
}
if (bmData->workerStats.count(workerId)) {
if (bmData->workerStats.contains(workerId)) {
bmData->workerStats[workerId].numGranulesAssigned = reply.get().assignments.size();
}
} else {
@ -4043,11 +4043,11 @@ ACTOR Future<Void> recoverBlobManager(Reference<BlobManagerData> bmData) {
// if worker id is already set to a known worker that replied with it in the mapping, range is already assigned
// there. If not, need to explicitly assign it to someone
if (workerId == UID() || epoch == 0 || !endingWorkers.count(workerId)) {
if (workerId == UID() || epoch == 0 || !endingWorkers.contains(workerId)) {
if (workerId == UID()) {
workerId = workerAffinity;
}
while (bmData->workerAffinities.count(workerId)) {
while (bmData->workerAffinities.contains(workerId)) {
workerId = bmData->workerAffinities[workerId];
CODE_PROBE(true, "Blob worker has affinity after reboot");
}
@ -4058,7 +4058,7 @@ ACTOR Future<Void> recoverBlobManager(Reference<BlobManagerData> bmData) {
RangeAssignment raAssign;
raAssign.isAssign = true;
if (bmData->workersById.count(workerId)) {
if (bmData->workersById.contains(workerId)) {
raAssign.worker = workerId;
}
raAssign.keyRange = range.range();
@ -4122,7 +4122,7 @@ ACTOR Future<Void> chaosRangeMover(Reference<BlobManagerData> bmData) {
while (tries > 0) {
tries--;
auto randomRange = bmData->workerAssignments.randomRange();
if (randomRange.value() != UID() && !alreadyMoved.count(randomRange.range().toString())) {
if (randomRange.value() != UID() && !alreadyMoved.contains(randomRange.range().toString())) {
if (BM_DEBUG) {
fmt::print("Range mover moving range [{0} - {1}): {2}\n",
randomRange.begin().printable().c_str(),
@ -4182,7 +4182,7 @@ ACTOR Future<Void> initializeBlobWorker(Reference<BlobManagerData> self,
// Ask the candidateWorker to initialize a BW only if the worker does not have a pending request
if (numExistingBWOnAddr(self, workerAddr) == 0 &&
self->recruitingLocalities.count(candidateWorker.worker.stableAddress()) == 0) {
!self->recruitingLocalities.contains(candidateWorker.worker.stableAddress())) {
state UID interfaceId = deterministicRandom()->randomUniqueID();
state InitializeBlobWorkerRequest initReq;
@ -4230,13 +4230,13 @@ ACTOR Future<Void> initializeBlobWorker(Reference<BlobManagerData> self,
if (newBlobWorker.present()) {
BlobWorkerInterface bwi = newBlobWorker.get().interf;
if (!self->deadWorkers.count(bwi.id())) {
if (!self->workerAddresses.count(bwi.stableAddress()) && bwi.locality.dcId() == self->dcId) {
if (!self->deadWorkers.contains(bwi.id())) {
if (!self->workerAddresses.contains(bwi.stableAddress()) && bwi.locality.dcId() == self->dcId) {
self->workerAddresses.insert(bwi.stableAddress());
self->workersById[bwi.id()] = bwi;
self->workerStats[bwi.id()] = BlobWorkerInfo();
self->addActor.send(monitorBlobWorker(self, bwi));
} else if (!self->workersById.count(bwi.id())) {
} else if (!self->workersById.contains(bwi.id())) {
self->addActor.send(killBlobWorker(self, bwi, false));
}
}
@ -5970,7 +5970,7 @@ ACTOR Future<Void> blobManager(BlobManagerInterface bmInterf,
if (g_network->isSimulated()) {
UID clusterId = wait(fetchClusterId(self->db));
auto clusterEpoc = std::make_pair(clusterId, epoch);
bool managerEpochAlreadySeen = managerEpochsSeen.count(clusterEpoc);
bool managerEpochAlreadySeen = managerEpochsSeen.contains(clusterEpoc);
if (managerEpochAlreadySeen) {
TraceEvent(SevError, "DuplicateBlobManagersAtEpoch")
.detail("ClusterId", clusterId)

View File

@ -34,6 +34,7 @@
#include "fdbclient/DatabaseContext.h"
#include "fdbrpc/FailureMonitor.h"
#include "fdbclient/EncryptKeyProxyInterface.h"
#include "fdbrpc/Locality.h"
#include "fdbserver/BlobGranuleServerCommon.actor.h"
#include "fdbserver/BlobMigratorInterface.h"
#include "fdbserver/Knobs.h"
@ -94,12 +95,19 @@ ACTOR Future<Optional<Value>> getPreviousCoordinators(ClusterControllerData* sel
}
}
bool ClusterControllerData::processesInSameDC(const NetworkAddress& addr1, const NetworkAddress& addr2) const {
return this->addr_locality.contains(addr1) && this->addr_locality.contains(addr2) &&
this->addr_locality.at(addr1).dcId().present() && this->addr_locality.at(addr2).dcId().present() &&
this->addr_locality.at(addr1).dcId().get() == this->addr_locality.at(addr2).dcId().get();
}
bool ClusterControllerData::transactionSystemContainsDegradedServers() {
const ServerDBInfo& dbi = db.serverInfo->get();
const Reference<ClusterRecoveryData> recoveryData = db.recoveryData;
auto transactionWorkerInList = [&dbi, &recoveryData](const std::unordered_set<NetworkAddress>& serverList,
bool skipSatellite,
bool skipRemote) -> bool {
bool skipRemoteTLog,
bool skipRemoteLogRouter) -> bool {
for (const auto& server : serverList) {
if (dbi.master.addresses().contains(server)) {
return true;
@ -115,15 +123,19 @@ bool ClusterControllerData::transactionSystemContainsDegradedServers() {
continue;
}
if (skipRemote && !logSet.isLocal) {
continue;
}
if (!logSet.isLocal) {
// Only check log routers in the remote region.
for (const auto& logRouter : logSet.logRouters) {
if (logRouter.present() && logRouter.interf().addresses().contains(server)) {
return true;
if (!skipRemoteTLog) {
for (const auto& tlog : logSet.tLogs) {
if (tlog.present() && tlog.interf().addresses().contains(server)) {
return true;
}
}
}
if (!skipRemoteLogRouter) {
for (const auto& logRouter : logSet.logRouters) {
if (logRouter.present() && logRouter.interf().addresses().contains(server)) {
return true;
}
}
}
} else {
@ -176,13 +188,23 @@ bool ClusterControllerData::transactionSystemContainsDegradedServers() {
return false;
};
// Check if transaction system contains degraded/disconnected servers. For satellite and remote regions, we only
// Check if transaction system contains degraded/disconnected servers. For satellite, we only
// check for disconnection since the latency between prmary and satellite is across WAN and may not be very
// stable.
return transactionWorkerInList(degradationInfo.degradedServers, /*skipSatellite=*/true, /*skipRemote=*/true) ||
// TODO: Consider adding satellite latency degradation check and rely on
// SERVER_KNOBS->CC_ONLY_CONSIDER_INTRA_DC_LATENCY for accurate health signal
return transactionWorkerInList(degradationInfo.degradedServers,
/*skipSatellite=*/true,
/*skipRemoteTLog=*/
!(SERVER_KNOBS->CC_ONLY_CONSIDER_INTRA_DC_LATENCY &&
SERVER_KNOBS->CC_ENABLE_REMOTE_TLOG_DEGRADATION_MONITORING),
/*skipRemoteLogRouter*/
!(SERVER_KNOBS->CC_ONLY_CONSIDER_INTRA_DC_LATENCY &&
SERVER_KNOBS->CC_ENABLE_REMOTE_LOG_ROUTER_DEGRADATION_MONITORING)) ||
transactionWorkerInList(degradationInfo.disconnectedServers,
/*skipSatellite=*/false,
/*skipRemote=*/!SERVER_KNOBS->CC_ENABLE_REMOTE_LOG_ROUTER_MONITORING);
/*skipRemoteTLog=*/!SERVER_KNOBS->CC_ENABLE_REMOTE_TLOG_DISCONNECT_MONITORING,
/*skipRemoteLogRouter*/ !SERVER_KNOBS->CC_ENABLE_REMOTE_LOG_ROUTER_MONITORING);
}
bool ClusterControllerData::remoteTransactionSystemContainsDegradedServers() {
@ -558,7 +580,7 @@ bool isHealthySingleton(ClusterControllerData* self,
const Optional<UID> recruitingID) {
// A singleton is stable if it exists in cluster, has not been killed off of proc and is not being recruited
bool isStableSingleton = singleton.isPresent() &&
self->id_worker.count(singleton.getInterface().locality.processId()) &&
self->id_worker.contains(singleton.getInterface().locality.processId()) &&
(!recruitingID.present() || (recruitingID.get() == singleton.getInterface().id()));
if (!isStableSingleton) {
@ -914,6 +936,14 @@ ACTOR Future<Void> workerAvailabilityWatch(WorkerInterface worker,
.detail("Address", worker.address());
cluster->removedDBInfoEndpoints.insert(worker.updateServerDBInfo.getEndpoint());
cluster->id_worker.erase(worker.locality.processId());
// Currently, only CC_ONLY_CONSIDER_INTRA_DC_LATENCY feature relies on addr_locality mapping. In the
// future, if needed, we can populate the mapping unconditionally.
if (SERVER_KNOBS->CC_ONLY_CONSIDER_INTRA_DC_LATENCY) {
cluster->addr_locality.erase(worker.address());
if (worker.secondaryAddress().present()) {
cluster->addr_locality.erase(worker.secondaryAddress().get());
}
}
cluster->updateWorkerList.set(worker.locality.processId(), Optional<ProcessData>());
return Void();
}
@ -1149,7 +1179,7 @@ void haltRegisteringOrCurrentSingleton(ClusterControllerData* self,
// if not currently recruiting, then halt previous one in favour of requesting one
TraceEvent(("CCRegister" + roleName).c_str(), self->id).detail(roleAbbr + "ID", registeringID);
if (currSingleton.isPresent() && currSingleton.getInterface().id() != registeringID &&
self->id_worker.count(currSingleton.getInterface().locality.processId())) {
self->id_worker.contains(currSingleton.getInterface().locality.processId())) {
TraceEvent(("CCHaltPrevious" + roleName).c_str(), self->id)
.detail(roleAbbr + "ID", currSingleton.getInterface().id())
.detail("DcID", printable(self->clusterControllerDcId))
@ -1275,6 +1305,23 @@ ACTOR Future<Void> registerWorker(RegisterWorkerRequest req,
req.degraded,
req.recoveredDiskFiles,
req.issues);
// Currently, only CC_ONLY_CONSIDER_INTRA_DC_LATENCY feature relies on addr_locality mapping. In the future, if
// needed, we can populate the mapping unconditionally.
if (SERVER_KNOBS->CC_ONLY_CONSIDER_INTRA_DC_LATENCY) {
const bool addrDcChanged = self->addr_locality.contains(w.address()) &&
self->addr_locality[w.address()].dcId() != w.locality.dcId();
if (addrDcChanged) {
TraceEvent(SevWarn, "AddrDcChanged")
.detail("Addr", w.address())
.detail("ExistingLocality", self->addr_locality[w.address()].toString())
.detail("NewLocality", w.locality.toString());
}
ASSERT_WE_THINK(!addrDcChanged);
self->addr_locality[w.address()] = w.locality;
if (w.secondaryAddress().present()) {
self->addr_locality[w.secondaryAddress().get()] = w.locality;
}
}
if (!self->masterProcessId.present() &&
w.locality.processId() == self->db.serverInfo->get().master.locality.processId()) {
self->masterProcessId = w.locality.processId();
@ -1713,7 +1760,7 @@ ACTOR Future<Void> monitorStorageMetadata(ClusterControllerData* self) {
idMetadata[id] = decodeServerMetadataValue(sm.value);
}
for (auto& s : servers) {
if (idMetadata.count(s.id())) {
if (idMetadata.contains(s.id())) {
s.metadata = idMetadata[s.id()];
} else {
TraceEvent(SevWarn, "StorageServerMetadataMissing", self->id).detail("ServerID", s.id());
@ -2236,7 +2283,7 @@ ACTOR Future<Void> startDataDistributor(ClusterControllerData* self, double wait
.detail("Addr", worker.interf.address())
.detail("DDID", ddInterf.get().id());
if (distributor.present() && distributor.get().id() != ddInterf.get().id() &&
self->id_worker.count(distributor.get().locality.processId())) {
self->id_worker.contains(distributor.get().locality.processId())) {
TraceEvent("CCHaltDataDistributorAfterRecruit", self->id)
.detail("DDID", distributor.get().id())
@ -2336,7 +2383,7 @@ ACTOR Future<Void> startRatekeeper(ClusterControllerData* self, double waitTime)
.detail("Addr", worker.interf.address())
.detail("RKID", interf.get().id());
if (ratekeeper.present() && ratekeeper.get().id() != interf.get().id() &&
self->id_worker.count(ratekeeper.get().locality.processId())) {
self->id_worker.contains(ratekeeper.get().locality.processId())) {
TraceEvent("CCHaltRatekeeperAfterRecruit", self->id)
.detail("RKID", ratekeeper.get().id())
.detail("DcID", printable(self->clusterControllerDcId));
@ -2426,7 +2473,7 @@ ACTOR Future<Void> startConsistencyScan(ClusterControllerData* self) {
.detail("Addr", worker.interf.address())
.detail("CKID", interf.get().id());
if (consistencyScan.present() && consistencyScan.get().id() != interf.get().id() &&
self->id_worker.count(consistencyScan.get().locality.processId())) {
self->id_worker.contains(consistencyScan.get().locality.processId())) {
TraceEvent("CCHaltConsistencyScanAfterRecruit", self->id)
.detail("CKID", consistencyScan.get().id())
.detail("DcID", printable(self->clusterControllerDcId));
@ -2528,7 +2575,7 @@ ACTOR Future<Void> startEncryptKeyProxy(ClusterControllerData* self, EncryptionA
.detail("Id", interf.get().id())
.detail("ProcessId", interf.get().locality.processId());
if (encryptKeyProxy.present() && encryptKeyProxy.get().id() != interf.get().id() &&
self->id_worker.count(encryptKeyProxy.get().locality.processId())) {
self->id_worker.contains(encryptKeyProxy.get().locality.processId())) {
TraceEvent("CCEKP_HaltAfterRecruit", self->id)
.detail("Id", encryptKeyProxy.get().id())
.detail("DcId", printable(self->clusterControllerDcId));
@ -2700,7 +2747,7 @@ ACTOR Future<Void> startBlobMigrator(ClusterControllerData* self, double waitTim
.detail("Addr", worker.interf.address())
.detail("MGID", interf.get().id());
if (blobMigrator.present() && blobMigrator.get().id() != interf.get().id() &&
self->id_worker.count(blobMigrator.get().locality.processId())) {
self->id_worker.contains(blobMigrator.get().locality.processId())) {
TraceEvent("CCHaltBlobMigratorAfterRecruit", self->id)
.detail("MGID", blobMigrator.get().id())
.detail("DcID", printable(self->clusterControllerDcId));
@ -2805,7 +2852,7 @@ ACTOR Future<Void> startBlobManager(ClusterControllerData* self, double waitTime
.detail("Addr", worker.interf.address())
.detail("BMID", interf.get().id());
if (blobManager.present() && blobManager.get().id() != interf.get().id() &&
self->id_worker.count(blobManager.get().locality.processId())) {
self->id_worker.contains(blobManager.get().locality.processId())) {
TraceEvent("CCHaltBlobManagerAfterRecruit", self->id)
.detail("BMID", blobManager.get().id())
.detail("DcID", printable(self->clusterControllerDcId));
@ -3363,6 +3410,15 @@ ACTOR Future<Void> clusterController(Reference<IClusterConnectionRecord> connRec
namespace {
void addProcessesToSameDC(ClusterControllerData& self, const std::vector<NetworkAddress>&& processes) {
LocalityData locality;
locality.set(LocalityData::keyDcId, Standalone<StringRef>(std::string{ "1" }));
for (const auto& process : processes) {
const bool added = self.addr_locality.insert({ process, locality }).second;
ASSERT(added);
}
}
// Tests `ClusterControllerData::updateWorkerHealth()` can update `ClusterControllerData::workerHealth`
// based on `UpdateWorkerHealth` request correctly.
TEST_CASE("/fdbserver/clustercontroller/updateWorkerHealth") {
@ -3536,6 +3592,10 @@ TEST_CASE("/fdbserver/clustercontroller/getDegradationInfo") {
NetworkAddress badPeer3(IPAddress(0x04040404), 1);
NetworkAddress badPeer4(IPAddress(0x05050505), 1);
if (SERVER_KNOBS->CC_ONLY_CONSIDER_INTRA_DC_LATENCY) {
addProcessesToSameDC(data, { worker, badPeer1, badPeer2, badPeer3, badPeer4 });
}
// Test that a reported degraded link should stay for sometime before being considered as a degraded
// link by cluster controller.
{
@ -3797,22 +3857,32 @@ TEST_CASE("/fdbserver/clustercontroller/shouldTriggerRecoveryDueToDegradedServer
data.degradationInfo.disconnectedServers.clear();
// No recovery when remote tlog is degraded.
data.degradationInfo.degradedServers.insert(remoteTlog);
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
data.degradationInfo.degradedServers.clear();
data.degradationInfo.disconnectedServers.insert(remoteTlog);
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
data.degradationInfo.disconnectedServers.clear();
if (!(SERVER_KNOBS->CC_ONLY_CONSIDER_INTRA_DC_LATENCY &&
SERVER_KNOBS->CC_ENABLE_REMOTE_TLOG_DEGRADATION_MONITORING)) {
data.degradationInfo.degradedServers.insert(remoteTlog);
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
data.degradationInfo.degradedServers.clear();
}
if (!SERVER_KNOBS->CC_ENABLE_REMOTE_TLOG_DISCONNECT_MONITORING) {
data.degradationInfo.disconnectedServers.insert(remoteTlog);
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
data.degradationInfo.disconnectedServers.clear();
}
// No recovery when remote log router is degraded.
data.degradationInfo.degradedServers.insert(logRouter);
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
data.degradationInfo.degradedServers.clear();
if (!(SERVER_KNOBS->CC_ONLY_CONSIDER_INTRA_DC_LATENCY &&
SERVER_KNOBS->CC_ENABLE_REMOTE_LOG_ROUTER_DEGRADATION_MONITORING)) {
data.degradationInfo.degradedServers.insert(logRouter);
ASSERT(!data.shouldTriggerRecoveryDueToDegradedServers());
data.degradationInfo.degradedServers.clear();
}
// Trigger recovery when remote log router is disconnected.
data.degradationInfo.disconnectedServers.insert(logRouter);
ASSERT(data.shouldTriggerRecoveryDueToDegradedServers());
data.degradationInfo.disconnectedServers.clear();
if (SERVER_KNOBS->CC_ENABLE_REMOTE_LOG_ROUTER_MONITORING) {
data.degradationInfo.disconnectedServers.insert(logRouter);
ASSERT(data.shouldTriggerRecoveryDueToDegradedServers());
data.degradationInfo.disconnectedServers.clear();
}
// No recovery when backup worker is degraded.
data.degradationInfo.degradedServers.insert(backup);

View File

@ -270,7 +270,7 @@ ACTOR Future<Void> newTLogServers(Reference<ClusterRecoveryData> self,
std::vector<Standalone<CommitTransactionRef>>* initialConfChanges) {
if (self->configuration.usableRegions > 1) {
state Optional<Key> remoteDcId = self->remoteDcIds.size() ? self->remoteDcIds[0] : Optional<Key>();
if (!self->dcId_locality.count(recr.dcId)) {
if (!self->dcId_locality.contains(recr.dcId)) {
int8_t loc = self->getNextLocality();
Standalone<CommitTransactionRef> tr;
tr.set(tr.arena(), tagLocalityListKeyFor(recr.dcId), tagLocalityListValue(loc));
@ -279,7 +279,7 @@ ACTOR Future<Void> newTLogServers(Reference<ClusterRecoveryData> self,
TraceEvent(SevWarn, "UnknownPrimaryDCID", self->dbgid).detail("PrimaryId", recr.dcId).detail("Loc", loc);
}
if (!self->dcId_locality.count(remoteDcId)) {
if (!self->dcId_locality.contains(remoteDcId)) {
int8_t loc = self->getNextLocality();
Standalone<CommitTransactionRef> tr;
tr.set(tr.arena(), tagLocalityListKeyFor(remoteDcId), tagLocalityListValue(loc));
@ -357,7 +357,7 @@ ACTOR Future<Void> newSeedServers(Reference<ClusterRecoveryData> self,
.detail("CandidateWorker", recruits.storageServers[idx].locality.toString());
InitializeStorageRequest isr;
isr.seedTag = dcId_tags.count(recruits.storageServers[idx].locality.dcId())
isr.seedTag = dcId_tags.contains(recruits.storageServers[idx].locality.dcId())
? dcId_tags[recruits.storageServers[idx].locality.dcId()]
: Tag(nextLocality, 0);
isr.storeType = self->configuration.storageServerStoreType;
@ -376,7 +376,7 @@ ACTOR Future<Void> newSeedServers(Reference<ClusterRecoveryData> self,
CODE_PROBE(true, "initial storage recuitment loop failed to get new server");
wait(delay(SERVER_KNOBS->STORAGE_RECRUITMENT_DELAY));
} else {
if (!dcId_tags.count(recruits.storageServers[idx].locality.dcId())) {
if (!dcId_tags.contains(recruits.storageServers[idx].locality.dcId())) {
dcId_tags[recruits.storageServers[idx].locality.dcId()] = Tag(nextLocality, 0);
nextLocality++;
}
@ -758,7 +758,7 @@ ACTOR Future<Void> updateLogsValue(Reference<ClusterRecoveryData> self, Database
bool found = false;
for (auto& logSet : self->logSystem->getLogSystemConfig().tLogs) {
for (auto& log : logSet.tLogs) {
if (logIds.count(log.id())) {
if (logIds.contains(log.id())) {
found = true;
break;
}
@ -1832,7 +1832,7 @@ ACTOR Future<Void> cleanupRecoveryActorCollection(Reference<ClusterRecoveryData>
}
bool isNormalClusterRecoveryError(const Error& error) {
return normalClusterRecoveryErrors().count(error.code());
return normalClusterRecoveryErrors().contains(error.code());
}
std::string& getRecoveryEventName(ClusterRecoveryEventType type) {

View File

@ -427,7 +427,7 @@ ACTOR Future<Void> commitBatcher(ProxyCommitData* commitData,
if (SERVER_KNOBS->STORAGE_QUOTA_ENABLED && !req.bypassStorageQuota() &&
req.tenantInfo.hasTenant() &&
commitData->tenantsOverStorageQuota.count(req.tenantInfo.tenantId) > 0) {
commitData->tenantsOverStorageQuota.contains(req.tenantInfo.tenantId)) {
req.reply.sendError(storage_quota_exceeded());
continue;
}
@ -815,6 +815,11 @@ inline bool shouldBackup(MutationRef const& m) {
std::set<Tag> CommitBatchContext::getWrittenTagsPreResolution() {
std::set<Tag> transactionTags;
std::vector<Tag> cacheVector = { cacheTag };
if (pProxyCommitData->txnStateStore->getReplaceContent()) {
// return empty set if txnStateStore will snapshot.
// empty sets are sent to all logs.
return transactionTags;
}
for (int transactionNum = 0; transactionNum < trs.size(); transactionNum++) {
int mutationNum = 0;
VectorRef<MutationRef>* pMutations = &trs[transactionNum].transaction.mutations;
@ -1056,7 +1061,7 @@ EncryptCipherDomainId getEncryptDetailsFromMutationRef(ProxyCommitData* commitDa
// Parse mutation key to determine mutation encryption domain
StringRef prefix = m.param1.substr(0, TenantAPI::PREFIX_SIZE);
int64_t tenantId = TenantAPI::prefixToId(prefix, EnforceValidTenantId::False);
if (commitData->tenantMap.count(tenantId)) {
if (commitData->tenantMap.contains(tenantId)) {
domainId = tenantId;
} else {
// Leverage 'default encryption domain'
@ -1194,7 +1199,7 @@ void assertResolutionStateMutationsSizeConsistent(const std::vector<ResolveTrans
bool validTenantAccess(MutationRef m, std::map<int64_t, TenantName> const& tenantMap, Optional<int64_t>& tenantId) {
if (isSingleKeyMutation((MutationRef::Type)m.type)) {
tenantId = TenantAPI::extractTenantIdFromMutation(m);
bool isLegalTenant = tenantMap.count(tenantId.get()) > 0;
bool isLegalTenant = tenantMap.contains(tenantId.get());
CODE_PROBE(!isLegalTenant, "Commit proxy access invalid tenant");
return isLegalTenant;
}
@ -1572,7 +1577,7 @@ Error validateAndProcessTenantAccess(CommitTransactionRequest& tr,
if (!isValid) {
return tenant_not_found();
}
if (!tr.isLockAware() && pProxyCommitData->lockedTenants.count(tr.tenantInfo.tenantId) > 0) {
if (!tr.isLockAware() && pProxyCommitData->lockedTenants.contains(tr.tenantInfo.tenantId)) {
CODE_PROBE(true, "Attempt access to locked tenant without lock awareness");
return tenant_locked();
}
@ -1626,7 +1631,7 @@ void applyMetadataEffect(CommitBatchContext* self) {
// check if all tenant ids are valid if committed == true
committed = committed &&
std::all_of(tenantIds.get().begin(), tenantIds.get().end(), [self](const int64_t& tid) {
return self->pProxyCommitData->tenantMap.count(tid);
return self->pProxyCommitData->tenantMap.contains(tid);
});
if (self->debugID.present()) {
@ -1805,7 +1810,7 @@ ACTOR Future<Void> applyMetadataToCommittedTransactions(CommitBatchContext* self
if (pProxyCommitData->encryptMode == EncryptionAtRestMode::DOMAIN_AWARE && !rawAccessTenantIds.empty()) {
std::unordered_set<EncryptCipherDomainId> extraDomainIds;
for (auto tenantId : rawAccessTenantIds) {
if (self->cipherKeys.count(tenantId) == 0) {
if (!self->cipherKeys.contains(tenantId)) {
extraDomainIds.insert(tenantId);
}
}
@ -1892,7 +1897,7 @@ Future<WriteMutationRefVar> writeMutation(CommitBatchContext* self,
CODE_PROBE(true, "Raw access mutation encryption", probe::decoration::rare);
}
ASSERT_NE(domainId, INVALID_ENCRYPT_DOMAIN_ID);
ASSERT(self->cipherKeys.count(domainId) > 0);
ASSERT(self->cipherKeys.contains(domainId));
encryptedMutation =
mutation->encrypt(self->cipherKeys, domainId, *arena, BlobCipherMetrics::TLOG, encryptTime);
}
@ -2004,6 +2009,46 @@ void addAccumulativeChecksumMutations(CommitBatchContext* self) {
}
}
// RangeLock takes effect only when the feature flag is on and database is unlocked and the mutation is not encrypted
void rejectMutationsForReadLockOnRange(CommitBatchContext* self) {
ASSERT(SERVER_KNOBS->ENABLE_READ_LOCK_ON_RANGE && !self->locked &&
!self->pProxyCommitData->encryptMode.isEncryptionEnabled() &&
self->pProxyCommitData->getTenantMode() == TenantMode::DISABLED);
ProxyCommitData* const pProxyCommitData = self->pProxyCommitData;
ASSERT(pProxyCommitData->rangeLock != nullptr);
std::vector<CommitTransactionRequest>& trs = self->trs;
for (int i = self->transactionNum; i < trs.size(); i++) {
if (self->committed[i] != ConflictBatch::TransactionCommitted) {
continue;
}
VectorRef<MutationRef>* pMutations = &trs[i].transaction.mutations;
bool transactionRejected = false;
for (int j = 0; j < pMutations->size(); j++) {
MutationRef m = (*pMutations)[j];
ASSERT_WE_THINK(!m.isEncrypted());
if (m.isEncrypted()) {
continue;
}
KeyRange rangeToCheck;
if (isSingleKeyMutation((MutationRef::Type)m.type)) {
rangeToCheck = singleKeyRange(m.param1);
} else if (m.type == MutationRef::ClearRange) {
rangeToCheck = KeyRangeRef(m.param1, m.param2);
}
bool shouldReject = pProxyCommitData->rangeLock->isLocked(rangeToCheck);
if (shouldReject) {
self->committed[i] = ConflictBatch::TransactionLockReject;
trs[i].reply.sendError(transaction_rejected_range_locked());
transactionRejected = true;
}
if (transactionRejected) {
break;
}
}
}
return;
}
/// This second pass through committed transactions assigns the actual mutations to the appropriate storage servers'
/// tags
ACTOR Future<Void> assignMutationsToStorageServers(CommitBatchContext* self) {
@ -2296,6 +2341,16 @@ ACTOR Future<Void> postResolution(CommitBatchContext* self) {
"CommitDebug", debugID.get().first(), "CommitProxyServer.commitBatch.ApplyMetadataToCommittedTxn");
}
// After applyed metadata change, this commit proxy has the latest view of locked ranges.
// If a transaction has any mutation accessing to the locked range, reject the transaction with
// error_code_transaction_rejected_range_locked
// This feature is disabled when the database is locked
if (SERVER_KNOBS->ENABLE_READ_LOCK_ON_RANGE && !self->locked &&
!pProxyCommitData->encryptMode.isEncryptionEnabled() &&
self->pProxyCommitData->getTenantMode() == TenantMode::DISABLED) {
rejectMutationsForReadLockOnRange(self);
}
// Second pass
wait(assignMutationsToStorageServers(self));
@ -2643,7 +2698,8 @@ ACTOR Future<Void> reply(CommitBatchContext* self) {
tr.reply.send(CommitID(self->commitVersion, t, self->metadataVersionAfter));
} else if (self->committed[t] == ConflictBatch::TransactionTooOld) {
tr.reply.sendError(transaction_too_old());
} else if (self->committed[t] == ConflictBatch::TransactionTenantFailure) {
} else if (self->committed[t] == ConflictBatch::TransactionTenantFailure ||
self->committed[t] == ConflictBatch::TransactionLockReject) {
// We already sent the error
ASSERT(tr.reply.isSet());
} else {
@ -2737,7 +2793,6 @@ ACTOR Future<Void> reply(CommitBatchContext* self) {
target_latency * SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_INTERVAL_SMOOTHER_ALPHA +
pProxyCommitData->commitBatchInterval *
(1 - SERVER_KNOBS->COMMIT_TRANSACTION_BATCH_INTERVAL_SMOOTHER_ALPHA)));
pProxyCommitData->stats.commitBatchingWindowSize.addMeasurement(pProxyCommitData->commitBatchInterval);
pProxyCommitData->commitBatchesMemBytesCount -= self->currentBatchMemBytesCount;
ASSERT_ABORT(pProxyCommitData->commitBatchesMemBytesCount >= 0);
@ -2827,7 +2882,7 @@ void maybeAddTssMapping(GetKeyServerLocationsReply& reply,
ProxyCommitData* commitData,
std::unordered_set<UID>& included,
UID ssId) {
if (!included.count(ssId)) {
if (!included.contains(ssId)) {
auto mappingItr = commitData->tssMapping.find(ssId);
if (mappingItr != commitData->tssMapping.end()) {
reply.resultsTssMapping.push_back(*mappingItr);
@ -3112,8 +3167,8 @@ ACTOR static Future<Void> doBlobGranuleLocationRequest(GetBlobGranuleLocationsRe
throw blob_granule_transaction_too_old();
}
if (!req.justGranules && !commitData->blobWorkerInterfCache.count(workerId) &&
!bwiLookedUp.count(workerId)) {
if (!req.justGranules && !commitData->blobWorkerInterfCache.contains(workerId) &&
!bwiLookedUp.contains(workerId)) {
bwiLookedUp.insert(workerId);
bwiLookupFutures.push_back(tr.get(blobWorkerListKeyFor(workerId)));
}
@ -3705,27 +3760,30 @@ ACTOR Future<Void> processCompleteTransactionStateRequest(TransactionStateResolv
}
};
for (auto& kv : data) {
if (!kv.key.startsWith(keyServersPrefix)) {
if (kv.key.startsWith(keyServersPrefix)) {
KeyRef k = kv.key.removePrefix(keyServersPrefix);
if (k == allKeys.end) {
continue;
}
decodeKeyServersValue(tag_uid, kv.value, src, dest);
info.tags.clear();
info.src_info.clear();
updateTagInfo(src, info.tags, info.src_info);
info.dest_info.clear();
updateTagInfo(dest, info.tags, info.dest_info);
uniquify(info.tags);
keyInfoData.emplace_back(MapPair<Key, ServerCacheInfo>(k, info), 1);
} else if (kv.key.startsWith(rangeLockPrefix)) {
Key keyInsert = kv.key.removePrefix(rangeLockPrefix);
pContext->pCommitData->rangeLock->initKeyPoint(keyInsert, kv.value);
} else {
mutations.emplace_back(mutations.arena(), MutationRef::SetValue, kv.key, kv.value);
continue;
}
KeyRef k = kv.key.removePrefix(keyServersPrefix);
if (k == allKeys.end) {
continue;
}
decodeKeyServersValue(tag_uid, kv.value, src, dest);
info.tags.clear();
info.src_info.clear();
updateTagInfo(src, info.tags, info.src_info);
info.dest_info.clear();
updateTagInfo(dest, info.tags, info.dest_info);
uniquify(info.tags);
keyInfoData.emplace_back(MapPair<Key, ServerCacheInfo>(k, info), 1);
}
// insert keyTag data separately from metadata mutations so that we can do one bulk insert which
@ -3766,7 +3824,7 @@ ACTOR Future<Void> processTransactionStateRequestPart(TransactionStateResolveCon
ASSERT(pContext->pCommitData != nullptr);
ASSERT(pContext->pActors != nullptr);
if (pContext->receivedSequences.count(request.sequence)) {
if (pContext->receivedSequences.contains(request.sequence)) {
if (pContext->receivedSequences.size() == pContext->maxSequence) {
wait(pContext->txnRecovery);
}

View File

@ -464,7 +464,7 @@ class ConfigBroadcasterImpl {
state BroadcastClientDetails client(
watcher, std::move(configClassSet), lastSeenVersion, std::move(broadcastInterface));
if (impl->clients.count(broadcastInterface.id())) {
if (impl->clients.contains(broadcastInterface.id())) {
// Client already registered
return Void();
}

View File

@ -694,19 +694,19 @@ void DDQueue::validate() {
for (auto it = inFlightRanges.begin(); it != inFlightRanges.end(); ++it) {
for (int i = 0; i < it->value().src.size(); i++) {
// each server in the inFlight map is in the busymap
if (!busymap.count(it->value().src[i]))
if (!busymap.contains(it->value().src[i]))
TraceEvent(SevError, "DDQueueValidateError8")
.detail("Problem", "each server in the inFlight map is in the busymap");
// relocate data that is inFlight is not also in the queue
if (queue[it->value().src[i]].count(it->value()))
if (queue[it->value().src[i]].contains(it->value()))
TraceEvent(SevError, "DDQueueValidateError9")
.detail("Problem", "relocate data that is inFlight is not also in the queue");
}
for (int i = 0; i < it->value().completeDests.size(); i++) {
// each server in the inFlight map is in the dest busymap
if (!destBusymap.count(it->value().completeDests[i]))
if (!destBusymap.contains(it->value().completeDests[i]))
TraceEvent(SevError, "DDQueueValidateError10")
.detail("Problem", "each server in the inFlight map is in the destBusymap");
}
@ -853,7 +853,7 @@ void DDQueue::queueRelocation(RelocateShard rs, std::set<UID>& serversToLaunchFr
// ASSERT(queueMapItr->value() == queueMap.rangeContaining(affectedQueuedItems[r].begin)->value());
RelocateData& rrs = queueMapItr->value();
if (rrs.src.size() == 0 && (rrs.keys == rd.keys || fetchingSourcesQueue.count(rrs) > 0)) {
if (rrs.src.size() == 0 && (rrs.keys == rd.keys || fetchingSourcesQueue.contains(rrs))) {
if (rrs.keys != rd.keys) {
delayDelete.insert(rrs);
}
@ -927,7 +927,7 @@ void DDQueue::queueRelocation(RelocateShard rs, std::set<UID>& serversToLaunchFr
}
void DDQueue::completeSourceFetch(const RelocateData& results) {
ASSERT(fetchingSourcesQueue.count(results));
ASSERT(fetchingSourcesQueue.contains(results));
// logRelocation( results, "GotSourceServers" );
@ -960,7 +960,7 @@ void DDQueue::launchQueuedWork(KeyRange keys, const DDEnabledState* ddEnabledSta
std::set<RelocateData, std::greater<RelocateData>> combined;
auto f = queueMap.intersectingRanges(keys);
for (auto it = f.begin(); it != f.end(); ++it) {
if (it->value().src.size() && queue[it->value().src[0]].count(it->value()))
if (it->value().src.size() && queue[it->value().src[0]].contains(it->value()))
combined.insert(it->value());
}
launchQueuedWork(combined, ddEnabledState);
@ -1064,7 +1064,7 @@ void DDQueue::launchQueuedWork(std::set<RelocateData, std::greater<RelocateData>
bool overlappingInFlight = false;
auto intersectingInFlight = inFlight.intersectingRanges(rd.keys);
for (auto it = intersectingInFlight.begin(); it != intersectingInFlight.end(); ++it) {
if (fetchKeysComplete.count(it->value()) && inFlightActors.liveActorAt(it->range().begin) &&
if (fetchKeysComplete.contains(it->value()) && inFlightActors.liveActorAt(it->range().begin) &&
!rd.keys.contains(it->range()) && it->value().priority >= rd.priority &&
rd.healthPriority < SERVER_KNOBS->PRIORITY_TEAM_UNHEALTHY) {
@ -1235,7 +1235,7 @@ int DDQueue::getHighestPriorityRelocation() const {
// return true if the servers are throttled as source for read rebalance
bool DDQueue::timeThrottle(const std::vector<UID>& ids) const {
return std::any_of(ids.begin(), ids.end(), [this](const UID& id) {
if (this->lastAsSource.count(id)) {
if (this->lastAsSource.contains(id)) {
return (now() - this->lastAsSource.at(id)) * SERVER_KNOBS->READ_REBALANCE_SRC_PARALLELISM <
SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL;
}
@ -1394,7 +1394,7 @@ static int nonOverlappedServerCount(const std::vector<UID>& srcIds, const std::v
std::unordered_set<UID> srcSet{ srcIds.begin(), srcIds.end() };
int count = 0;
for (int i = 0; i < destIds.size(); i++) {
if (srcSet.count(destIds[i]) == 0) {
if (!srcSet.contains(destIds[i])) {
count++;
}
}
@ -2231,7 +2231,7 @@ ACTOR Future<Void> dataDistributionRelocator(DDQueue* self,
inline double getWorstCpu(const HealthMetrics& metrics, const std::vector<UID>& ids) {
double cpu = 0;
for (auto& id : ids) {
if (metrics.storageStats.count(id)) {
if (metrics.storageStats.contains(id)) {
cpu = std::max(cpu, metrics.storageStats.at(id).cpuUsage);
} else {
// assume the server is too busy to report its stats

View File

@ -343,7 +343,7 @@ ACTOR Future<Void> trackShardMetrics(DataDistributionTracker::SafeAccessor self,
if (e.code() != error_code_actor_cancelled && e.code() != error_code_dd_tracker_cancelled) {
DisabledTraceEvent(SevDebug, "TrackShardError", self()->distributorId).detail("Keys", keys);
// The above loop use Database cx, but those error should only be thrown in a code using transaction.
ASSERT(transactionRetryableErrors.count(e.code()) == 0);
ASSERT(!transactionRetryableErrors.contains(e.code()));
self()->output.sendError(e); // Propagate failure to dataDistributionTracker
}
throw e;
@ -368,7 +368,7 @@ ACTOR Future<Void> readHotDetector(DataDistributionTracker* self) {
} catch (Error& e) {
if (e.code() != error_code_actor_cancelled) {
// Those error should only be thrown in a code using transaction.
ASSERT(transactionRetryableErrors.count(e.code()) == 0);
ASSERT(!transactionRetryableErrors.contains(e.code()));
self->output.sendError(e); // Propagate failure to dataDistributionTracker
}
throw e;
@ -1837,7 +1837,7 @@ void PhysicalShardCollection::PhysicalShard::removeRange(const KeyRange& outRang
PhysicalShardAvailable PhysicalShardCollection::checkPhysicalShardAvailable(uint64_t physicalShardID,
StorageMetrics const& moveInMetrics) {
ASSERT(physicalShardID != UID().first() && physicalShardID != anonymousShardId.first());
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
if (physicalShardInstances[physicalShardID].metrics.bytes + moveInMetrics.bytes >
SERVER_KNOBS->MAX_PHYSICAL_SHARD_BYTES) {
return PhysicalShardAvailable::False;
@ -1859,7 +1859,7 @@ void PhysicalShardCollection::updateTeamPhysicalShardIDsMap(uint64_t inputPhysic
ASSERT(inputTeams.size() <= 2);
ASSERT(inputPhysicalShardID != anonymousShardId.first() && inputPhysicalShardID != UID().first());
for (auto inputTeam : inputTeams) {
if (teamPhysicalShardIDs.count(inputTeam) == 0) {
if (!teamPhysicalShardIDs.contains(inputTeam)) {
std::set<uint64_t> physicalShardIDSet;
physicalShardIDSet.insert(inputPhysicalShardID);
teamPhysicalShardIDs.insert(std::make_pair(inputTeam, physicalShardIDSet));
@ -1876,7 +1876,7 @@ void PhysicalShardCollection::insertPhysicalShardToCollection(uint64_t physicalS
uint64_t debugID,
PhysicalShardCreationTime whenCreated) {
ASSERT(physicalShardID != anonymousShardId.first() && physicalShardID != UID().first());
ASSERT(physicalShardInstances.count(physicalShardID) == 0);
ASSERT(!physicalShardInstances.contains(physicalShardID));
physicalShardInstances.insert(
std::make_pair(physicalShardID, PhysicalShard(txnProcessor, physicalShardID, metrics, teams, whenCreated)));
return;
@ -1953,7 +1953,7 @@ Optional<uint64_t> PhysicalShardCollection::trySelectAvailablePhysicalShardFor(
uint64_t debugID) {
ASSERT(team.servers.size() > 0);
// Case: The team is not tracked in the mapping (teamPhysicalShardIDs)
if (teamPhysicalShardIDs.count(team) == 0) {
if (!teamPhysicalShardIDs.contains(team)) {
return Optional<uint64_t>();
}
ASSERT(teamPhysicalShardIDs[team].size() >= 1);
@ -1964,7 +1964,7 @@ Optional<uint64_t> PhysicalShardCollection::trySelectAvailablePhysicalShardFor(
if (physicalShardID == anonymousShardId.first() || physicalShardID == UID().first()) {
ASSERT(false);
}
ASSERT(physicalShardInstances.count(physicalShardID));
ASSERT(physicalShardInstances.contains(physicalShardID));
/*TraceEvent("TryGetPhysicalShardIDCandidates")
.detail("PhysicalShardID", physicalShardID)
.detail("Bytes", physicalShardInstances[physicalShardID].metrics.bytes)
@ -2005,14 +2005,14 @@ uint64_t PhysicalShardCollection::generateNewPhysicalShardID(uint64_t debugID) {
}
void PhysicalShardCollection::reduceMetricsForMoveOut(uint64_t physicalShardID, StorageMetrics const& moveOutMetrics) {
ASSERT(physicalShardInstances.count(physicalShardID) != 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
ASSERT(physicalShardID != UID().first() && physicalShardID != anonymousShardId.first());
physicalShardInstances[physicalShardID].metrics = physicalShardInstances[physicalShardID].metrics - moveOutMetrics;
return;
}
void PhysicalShardCollection::increaseMetricsForMoveIn(uint64_t physicalShardID, StorageMetrics const& moveInMetrics) {
ASSERT(physicalShardInstances.count(physicalShardID) != 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
ASSERT(physicalShardID != UID().first() && physicalShardID != anonymousShardId.first());
physicalShardInstances[physicalShardID].metrics = physicalShardInstances[physicalShardID].metrics + moveInMetrics;
return;
@ -2109,7 +2109,7 @@ std::pair<Optional<ShardsAffectedByTeamFailure::Team>, bool> PhysicalShardCollec
ASSERT(SERVER_KNOBS->SHARD_ENCODE_LOCATION_METADATA);
ASSERT(SERVER_KNOBS->ENABLE_DD_PHYSICAL_SHARD);
ASSERT(inputPhysicalShardID != anonymousShardId.first() && inputPhysicalShardID != UID().first());
if (physicalShardInstances.count(inputPhysicalShardID) == 0) {
if (!physicalShardInstances.contains(inputPhysicalShardID)) {
return { Optional<ShardsAffectedByTeamFailure::Team>(), true };
}
if (!checkPhysicalShardAvailable(inputPhysicalShardID, moveInMetrics)) {
@ -2141,7 +2141,7 @@ void PhysicalShardCollection::initPhysicalShardCollection(KeyRange keys,
ASSERT(physicalShardID != UID().first());
if (physicalShardID != anonymousShardId.first()) {
updateTeamPhysicalShardIDsMap(physicalShardID, selectedTeams, debugID);
if (physicalShardInstances.count(physicalShardID) == 0) {
if (!physicalShardInstances.contains(physicalShardID)) {
insertPhysicalShardToCollection(
physicalShardID, StorageMetrics(), selectedTeams, debugID, PhysicalShardCreationTime::DDInit);
} else {
@ -2181,7 +2181,7 @@ void PhysicalShardCollection::updatePhysicalShardCollection(
// Update physicalShardInstances
// Add the metrics to in-physicalShard
// e.detail("PhysicalShardIDIn", physicalShardID);
if (physicalShardInstances.count(physicalShardID) == 0) {
if (!physicalShardInstances.contains(physicalShardID)) {
// e.detail("Op", "Insert");
insertPhysicalShardToCollection(
physicalShardID, metrics, selectedTeams, debugID, PhysicalShardCreationTime::DDRelocator);
@ -2266,8 +2266,8 @@ void PhysicalShardCollection::cleanUpPhysicalShardCollection() {
}
for (auto it = physicalShardInstances.begin(); it != physicalShardInstances.end();) {
uint64_t physicalShardID = it->first;
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
if (physicalShardsInUse.count(physicalShardID) == 0) {
ASSERT(physicalShardInstances.contains(physicalShardID));
if (!physicalShardsInUse.contains(physicalShardID)) {
/*TraceEvent("PhysicalShardisEmpty")
.detail("PhysicalShard", physicalShardID)
.detail("RemainBytes", physicalShardInstances[physicalShardID].metrics.bytes);*/
@ -2282,7 +2282,7 @@ void PhysicalShardCollection::cleanUpPhysicalShardCollection() {
for (auto [team, _] : teamPhysicalShardIDs) {
for (auto it = teamPhysicalShardIDs[team].begin(); it != teamPhysicalShardIDs[team].end();) {
uint64_t physicalShardID = *it;
if (physicalShardInstances.count(physicalShardID) == 0) {
if (!physicalShardInstances.contains(physicalShardID)) {
// physicalShardID has been removed from physicalShardInstances (see step 1)
// So, remove the physicalShard from teamPhysicalShardID[team]
it = teamPhysicalShardIDs[team].erase(it);
@ -2322,7 +2322,7 @@ void PhysicalShardCollection::logPhysicalShardCollection() {
uint64_t maxPhysicalShardID = 0;
uint64_t minPhysicalShardID = 0;
for (auto physicalShardID : physicalShardIDs) {
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
uint64_t id = physicalShardInstances[physicalShardID].id;
int64_t bytes = physicalShardInstances[physicalShardID].metrics.bytes;
if (bytes > maxPhysicalShardBytes) {
@ -2352,14 +2352,14 @@ void PhysicalShardCollection::logPhysicalShardCollection() {
for (auto ssid : team.servers) {
for (auto it = teamPhysicalShardIDs[team].begin(); it != teamPhysicalShardIDs[team].end();) {
uint64_t physicalShardID = *it;
if (storageServerPhysicalShardStatus.count(ssid) != 0) {
if (storageServerPhysicalShardStatus[ssid].count(physicalShardID) == 0) {
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
if (storageServerPhysicalShardStatus.contains(ssid)) {
if (!storageServerPhysicalShardStatus[ssid].contains(physicalShardID)) {
ASSERT(physicalShardInstances.contains(physicalShardID));
storageServerPhysicalShardStatus[ssid].insert(
std::make_pair(physicalShardID, physicalShardInstances[physicalShardID].metrics.bytes));
}
} else {
ASSERT(physicalShardInstances.count(physicalShardID) > 0);
ASSERT(physicalShardInstances.contains(physicalShardID));
std::map<uint64_t, int64_t> tmp;
tmp.insert(std::make_pair(physicalShardID, physicalShardInstances[physicalShardID].metrics.bytes));
storageServerPhysicalShardStatus.insert(std::make_pair(ssid, tmp));

View File

@ -91,7 +91,7 @@ class DDTeamCollectionImpl {
const ProcessData& workerData = workers[i];
AddressExclusion addr(workerData.address.ip, workerData.address.port);
existingAddrs.insert(addr);
if (self->invalidLocalityAddr.count(addr) &&
if (self->invalidLocalityAddr.contains(addr) &&
self->isValidLocality(self->configuration.storagePolicy, workerData.locality)) {
// The locality info on the addr has been corrected
self->invalidLocalityAddr.erase(addr);
@ -104,7 +104,7 @@ class DDTeamCollectionImpl {
// In case system operator permanently excludes workers on the address with invalid locality
for (auto addr = self->invalidLocalityAddr.begin(); addr != self->invalidLocalityAddr.end();) {
if (!existingAddrs.count(*addr)) {
if (!existingAddrs.contains(*addr)) {
// The address no longer has a worker
addr = self->invalidLocalityAddr.erase(addr);
hasCorrectedLocality = true;
@ -452,7 +452,7 @@ public:
bool foundSrc = false;
for (const auto& id : req.src) {
if (self->server_info.count(id)) {
if (self->server_info.contains(id)) {
foundSrc = true;
break;
}
@ -1224,7 +1224,7 @@ public:
}
ASSERT_EQ(tc->primary, t.primary);
// tc->traceAllInfo();
if (tc->server_info.count(t.servers[0])) {
if (tc->server_info.contains(t.servers[0])) {
auto& info = tc->server_info[t.servers[0]];
bool found = false;
@ -2173,14 +2173,14 @@ public:
// Do not retrigger and double-overwrite failed or wiggling servers
auto old = self->excludedServers.getKeys();
for (const auto& o : old) {
if (!exclusionTracker.excluded.count(o) && !exclusionTracker.failed.count(o) &&
if (!exclusionTracker.excluded.contains(o) && !exclusionTracker.failed.contains(o) &&
!(self->excludedServers.count(o) &&
self->excludedServers.get(o) == DDTeamCollection::Status::WIGGLING)) {
self->excludedServers.set(o, DDTeamCollection::Status::NONE);
}
}
for (const auto& n : exclusionTracker.excluded) {
if (!exclusionTracker.failed.count(n)) {
if (!exclusionTracker.failed.contains(n)) {
self->excludedServers.set(n, DDTeamCollection::Status::EXCLUDED);
}
}
@ -2783,7 +2783,7 @@ public:
if (newServer.present()) {
UID id = newServer.get().interf.id();
if (!self->server_and_tss_info.count(id)) {
if (!self->server_and_tss_info.contains(id)) {
if (!recruitTss || tssState->tssRecruitSuccess()) {
self->addServer(newServer.get().interf,
candidateWorker.processClass,
@ -3043,7 +3043,7 @@ public:
UID tssId = itr->second->getId();
StorageServerInterface tssi = itr->second->getLastKnownInterface();
if (self->shouldHandleServer(tssi) && self->server_and_tss_info.count(tssId)) {
if (self->shouldHandleServer(tssi) && self->server_and_tss_info.contains(tssId)) {
Promise<Void> killPromise = itr->second->killTss;
if (killPromise.canBeSet()) {
CODE_PROBE(tssToRecruit < 0, "Killing TSS due to too many TSS");
@ -3171,7 +3171,7 @@ public:
ProcessClass const& processClass = servers[i].second;
if (!self->shouldHandleServer(ssi)) {
continue;
} else if (self->server_and_tss_info.count(serverId)) {
} else if (self->server_and_tss_info.contains(serverId)) {
auto& serverInfo = self->server_and_tss_info[serverId];
if (ssi.getValue.getEndpoint() !=
serverInfo->getLastKnownInterface().getValue.getEndpoint() ||
@ -3185,7 +3185,7 @@ public:
serverInfo->interfaceChanged.getFuture());
currentInterfaceChanged.send(std::make_pair(ssi, processClass));
}
} else if (!self->recruitingIds.count(ssi.id())) {
} else if (!self->recruitingIds.contains(ssi.id())) {
self->addServer(ssi,
processClass,
self->serverTrackerErrorOut,
@ -3263,7 +3263,7 @@ public:
// if perpetual_storage_wiggle_locality has value and not 0(disabled).
if (!localityKeyValues.empty()) {
if (self->server_info.count(res.begin()->first)) {
if (self->server_info.contains(res.begin()->first)) {
auto server = self->server_info.at(res.begin()->first);
for (const auto& [localityKey, localityValue] : localityKeyValues) {
// Update the wigglingId only if it matches the locality.
@ -3975,14 +3975,14 @@ Optional<Reference<IDataDistributionTeam>> DDTeamCollection::findTeamFromServers
const std::set<UID> completeSources(servers.begin(), servers.end());
for (const auto& server : servers) {
if (!server_info.count(server)) {
if (!server_info.contains(server)) {
continue;
}
auto const& teamList = server_info[server]->getTeams();
for (const auto& team : teamList) {
bool found = true;
for (const UID& s : team->getServerIDs()) {
if (!completeSources.count(s)) {
if (!completeSources.contains(s)) {
found = false;
break;
}
@ -5688,7 +5688,7 @@ void DDTeamCollection::addServer(StorageServerInterface newServer,
if (newServer.isTss()) {
tss_info_by_pair[newServer.tssPairID.get()] = r;
if (server_info.count(newServer.tssPairID.get())) {
if (server_info.contains(newServer.tssPairID.get())) {
r->onTSSPairRemoved = server_info[newServer.tssPairID.get()]->onRemoved;
}
} else {
@ -5701,7 +5701,7 @@ void DDTeamCollection::addServer(StorageServerInterface newServer,
if (!newServer.isTss()) {
// link and wake up tss' tracker so it knows when this server gets removed
if (tss_info_by_pair.count(newServer.id())) {
if (tss_info_by_pair.contains(newServer.id())) {
tss_info_by_pair[newServer.id()]->onTSSPairRemoved = r->onRemoved;
if (tss_info_by_pair[newServer.id()]->wakeUpTracker.canBeSet()) {
auto p = tss_info_by_pair[newServer.id()]->wakeUpTracker;
@ -5987,7 +5987,7 @@ void DDTeamCollection::removeServer(UID removedServer) {
Future<Void> DDTeamCollection::excludeStorageServersForWiggle(const UID& id) {
Future<Void> moveFuture = Void();
if (this->server_info.count(id) != 0) {
if (this->server_info.contains(id)) {
auto& info = server_info.at(id);
AddressExclusion addr(info->getLastKnownInterface().address().ip, info->getLastKnownInterface().address().port);

View File

@ -207,7 +207,7 @@ Future<Void> StorageWiggler::onCheck() const {
void StorageWiggler::addServer(const UID& serverId, const StorageMetadataType& metadata) {
// std::cout << "size: " << pq_handles.size() << " add " << serverId.toString() << " DC: "
// << teamCollection->isPrimary() << std::endl;
ASSERT(!pq_handles.count(serverId));
ASSERT(!pq_handles.contains(serverId));
pq_handles[serverId] = wiggle_pq.emplace(metadata, serverId);
}
@ -1730,7 +1730,7 @@ ACTOR Future<std::map<NetworkAddress, std::pair<WorkerInterface, std::string>>>
.detail("SS", server.id());
++storageFailures;
} else {
if (result.count(server.address())) {
if (result.contains(server.address())) {
ASSERT(itr->second.id() == result[server.address()].first.id());
if (result[server.address()].second.find("storage") == std::string::npos)
result[server.address()].second.append(",storage");
@ -1755,7 +1755,7 @@ ACTOR Future<std::map<NetworkAddress, std::pair<WorkerInterface, std::string>>>
TraceEvent(SevWarn, "MissingTlogWorkerInterface").detail("TlogAddress", tlog.address());
throw snap_tlog_failed();
}
if (result.count(tlog.address())) {
if (result.contains(tlog.address())) {
ASSERT(workersMap[tlog.address()].id() == result[tlog.address()].first.id());
result[tlog.address()].second.append(",tlog");
} else {
@ -1779,7 +1779,7 @@ ACTOR Future<std::map<NetworkAddress, std::pair<WorkerInterface, std::string>>>
Optional<NetworkAddress> secondary = worker.interf.tLog.getEndpoint().addresses.secondaryAddress;
if (coordinatorsAddrSet.find(primary) != coordinatorsAddrSet.end() ||
(secondary.present() && (coordinatorsAddrSet.find(secondary.get()) != coordinatorsAddrSet.end()))) {
if (result.count(primary)) {
if (result.contains(primary)) {
ASSERT(workersMap[primary].id() == result[primary].first.id());
result[primary].second.append(",coord");
} else {
@ -1791,7 +1791,7 @@ ACTOR Future<std::map<NetworkAddress, std::pair<WorkerInterface, std::string>>>
for (const auto& worker : workers) {
const auto& processAddress = worker.interf.address();
// skip processes that are already included
if (result.count(processAddress))
if (result.contains(processAddress))
continue;
const auto& processClassType = worker.processClass.classType();
// coordinators are always configured to be recruited
@ -3784,7 +3784,7 @@ ACTOR Future<Void> dataDistributor_impl(DataDistributorInterface di,
}
when(DistributorSnapRequest snapReq = waitNext(di.distributorSnapReq.getFuture())) {
auto& snapUID = snapReq.snapUID;
if (ddSnapReqResultMap.count(snapUID)) {
if (ddSnapReqResultMap.contains(snapUID)) {
CODE_PROBE(true,
"Data distributor received a duplicate finished snapshot request",
probe::decoration::rare);
@ -3793,7 +3793,7 @@ ACTOR Future<Void> dataDistributor_impl(DataDistributorInterface di,
TraceEvent("RetryFinishedDistributorSnapRequest")
.detail("SnapUID", snapUID)
.detail("Result", result.isError() ? result.getError().code() : 0);
} else if (ddSnapReqMap.count(snapReq.snapUID)) {
} else if (ddSnapReqMap.contains(snapReq.snapUID)) {
CODE_PROBE(true, "Data distributor received a duplicate ongoing snapshot request");
TraceEvent("RetryOngoingDistributorSnapRequest").detail("SnapUID", snapUID);
ASSERT(snapReq.snapPayload == ddSnapReqMap[snapUID].snapPayload);
@ -3836,7 +3836,7 @@ ACTOR Future<Void> dataDistributor_impl(DataDistributorInterface di,
}
}
} catch (Error& err) {
if (normalDataDistributorErrors().count(err.code()) == 0) {
if (!(normalDataDistributorErrors().contains(err.code()))) {
TraceEvent("DataDistributorError", di.id()).errorUnsuppressed(err);
throw err;
}

View File

@ -1154,7 +1154,8 @@ ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo> const> db,
GrvProxyInterface myInterface) {
loop {
if (db->get().recoveryCount >= recoveryCount &&
!std::count(db->get().client.grvProxies.begin(), db->get().client.grvProxies.end(), myInterface)) {
std::find(db->get().client.grvProxies.begin(), db->get().client.grvProxies.end(), myInterface) ==
db->get().client.grvProxies.end()) {
throw worker_removed();
}
wait(db->onChange());

View File

@ -52,6 +52,7 @@ public:
bool exactRecovery,
bool enableEncryption);
bool getReplaceContent() const override { return replaceContent; }
// IClosable
Future<Void> getError() const override { return log->getError(); }
Future<Void> onClosed() const override { return log->onClosed(); }
@ -494,7 +495,7 @@ private:
uint32_t opType = (uint32_t)op;
// Make sure the first bit of the optype is empty
ASSERT(opType >> ENCRYPTION_ENABLED_BIT == 0);
if (!enableEncryption || metaOps.count(op) > 0) {
if (!enableEncryption || metaOps.contains(op)) {
OpHeader h = { opType, v1.size(), v2.size() };
log->push(StringRef((const uint8_t*)&h, sizeof(h)));
log->push(v1);
@ -545,7 +546,7 @@ private:
ASSERT(!isOpEncrypted(&h));
// Metadata op types to be excluded from encryption.
static std::unordered_set<OpType> metaOps = { OpSnapshotEnd, OpSnapshotAbort, OpCommit, OpRollback };
if (metaOps.count((OpType)h.op) == 0) {
if (!metaOps.contains((OpType)h.op)) {
// It is not supported to open an encrypted store as unencrypted, or vice-versa.
ASSERT_EQ(encryptedOp, self->enableEncryption);
}

View File

@ -574,7 +574,6 @@ rocksdb::ColumnFamilyOptions getCFOptions() {
}
rocksdb::BlockBasedTableOptions bbOpts;
// TODO: Add a knob for the block cache size. (Default is 8 MB)
if (SERVER_KNOBS->SHARDED_ROCKSDB_PREFIX_LEN > 0) {
// Prefix blooms are used during Seek.
options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(SERVER_KNOBS->SHARDED_ROCKSDB_PREFIX_LEN));
@ -715,121 +714,104 @@ rocksdb::ReadOptions getReadOptions() {
}
struct ReadIterator {
uint64_t index; // incrementing counter to uniquely identify read iterator.
bool inUse;
std::shared_ptr<rocksdb::Iterator> iter;
std::unique_ptr<rocksdb::Iterator> iter;
double creationTime;
KeyRange keyRange;
std::shared_ptr<rocksdb::Slice> beginSlice, endSlice;
std::unique_ptr<rocksdb::Slice> beginSlice, endSlice;
ReadIterator(rocksdb::ColumnFamilyHandle* cf, uint64_t index, rocksdb::DB* db)
: index(index), inUse(true), creationTime(now()), iter(db->NewIterator(getReadOptions(), cf)) {}
ReadIterator(rocksdb::ColumnFamilyHandle* cf, uint64_t index, rocksdb::DB* db, const KeyRange& range)
: index(index), inUse(true), creationTime(now()), keyRange(range) {
ReadIterator(rocksdb::ColumnFamilyHandle* cf, rocksdb::DB* db)
: creationTime(now()), iter(db->NewIterator(getReadOptions(), cf)) {}
ReadIterator(rocksdb::ColumnFamilyHandle* cf, rocksdb::DB* db, const KeyRange& range)
: creationTime(now()), keyRange(range) {
auto options = getReadOptions();
beginSlice = std::shared_ptr<rocksdb::Slice>(new rocksdb::Slice(toSlice(keyRange.begin)));
beginSlice = std::unique_ptr<rocksdb::Slice>(new rocksdb::Slice(toSlice(keyRange.begin)));
options.iterate_lower_bound = beginSlice.get();
endSlice = std::shared_ptr<rocksdb::Slice>(new rocksdb::Slice(toSlice(keyRange.end)));
endSlice = std::unique_ptr<rocksdb::Slice>(new rocksdb::Slice(toSlice(keyRange.end)));
options.iterate_upper_bound = endSlice.get();
iter = std::shared_ptr<rocksdb::Iterator>(db->NewIterator(options, cf));
iter = std::unique_ptr<rocksdb::Iterator>(db->NewIterator(options, cf));
}
};
/*
ReadIteratorPool: Collection of iterators. Reuses iterators on non-concurrent multiple read operations,
instead of creating and deleting for every read.
Read: IteratorPool provides an unused iterator if exists or creates and gives a new iterator.
Returns back the iterator after the read is done.
Write: Iterators in the pool are deleted, forcing new iterator creation on next reads. The iterators
which are currently used by the reads can continue using the iterator as it is a shared_ptr. Once
the read is processed, shared_ptr goes out of scope and gets deleted. Eventually the iterator object
gets deleted as the ref count becomes 0.
*/
class ReadIteratorPool {
// Stores iterators for all shards for future reuse. One iterator is stored per shard.
class IteratorPool {
public:
ReadIteratorPool(rocksdb::DB* db, rocksdb::ColumnFamilyHandle* cf, const std::string& path)
: db(db), cf(cf), index(0), iteratorsReuseCount(0) {
ASSERT(db);
ASSERT(cf);
TraceEvent(SevVerbose, "ShardedRocksReadIteratorPool")
.detail("Path", path)
.detail("KnobRocksDBReadRangeReuseIterators", SERVER_KNOBS->SHARDED_ROCKSDB_REUSE_ITERATORS)
.detail("KnobRocksDBPrefixLen", SERVER_KNOBS->SHARDED_ROCKSDB_PREFIX_LEN);
}
IteratorPool() {}
// Called on every db commit.
void update() {
if (SERVER_KNOBS->SHARDED_ROCKSDB_REUSE_ITERATORS) {
std::lock_guard<std::mutex> lock(mutex);
iteratorsMap.clear();
}
}
// Called on every read operation.
ReadIterator getIterator(const KeyRange& range) {
// Shared iterators are not bounded.
if (SERVER_KNOBS->SHARDED_ROCKSDB_REUSE_ITERATORS) {
std::lock_guard<std::mutex> lock(mutex);
for (it = iteratorsMap.begin(); it != iteratorsMap.end(); it++) {
if (!it->second.inUse) {
it->second.inUse = true;
iteratorsReuseCount++;
return it->second;
}
}
index++;
ReadIterator iter(cf, index, db);
iteratorsMap.insert({ index, iter });
return iter;
std::shared_ptr<ReadIterator> getIterator(const std::string& id) {
std::unique_lock<std::mutex> lock(mu);
auto it = pool.find(id);
if (it == pool.end()) {
++numNewIterators;
return nullptr;
} else {
index++;
ReadIterator iter(cf, index, db, range);
return iter;
auto ret = it->second;
pool.erase(it);
++numReusedIters;
return ret;
}
}
// Called on every read operation, after the keys are collected.
void returnIterator(ReadIterator& iter) {
if (SERVER_KNOBS->SHARDED_ROCKSDB_REUSE_ITERATORS) {
std::lock_guard<std::mutex> lock(mutex);
it = iteratorsMap.find(iter.index);
// iterator found: put the iterator back to the pool(inUse=false).
// iterator not found: update would have removed the iterator from pool, so nothing to do.
if (it != iteratorsMap.end()) {
ASSERT(it->second.inUse);
it->second.inUse = false;
}
void returnIterator(const std::string& id, std::shared_ptr<ReadIterator> iterator) {
ASSERT(iterator != nullptr);
std::unique_lock<std::mutex> lock(mu);
auto it = pool.find(id);
if (it != pool.end()) {
// An iterator already exist in the pool, replace it any way.
++numReplacedIters;
}
pool[id] = iterator;
}
// Called for every ROCKSDB_READ_RANGE_ITERATOR_REFRESH_TIME seconds in a loop.
void refreshIterators() {
std::lock_guard<std::mutex> lock(mutex);
it = iteratorsMap.begin();
while (it != iteratorsMap.end()) {
if (now() - it->second.creationTime > SERVER_KNOBS->ROCKSDB_READ_RANGE_ITERATOR_REFRESH_TIME) {
it = iteratorsMap.erase(it);
void refresh() {
std::unique_lock<std::mutex> lock(mu);
auto poolSize = pool.size();
auto it = pool.begin();
auto currTime = now();
int refreshedIterCount = 0;
while (it != pool.end()) {
if (currTime - it->second->creationTime > SERVER_KNOBS->ROCKSDB_READ_RANGE_ITERATOR_REFRESH_TIME) {
it = pool.erase(it);
++refreshedIterCount;
} else {
it++;
++it;
}
}
TraceEvent("RefreshIterators")
.detail("NumReplacedIterators", numReplacedIters)
.detail("NumReusedIterators", numReusedIters)
.detail("NumNewIterators", numNewIterators)
.detail("PoolSize", poolSize)
.detail("RefreshedIterators", refreshedIterCount);
numReplacedIters = 0;
numReusedIters = 0;
numNewIterators = 0;
}
void clear() {
std::unique_lock<std::mutex> lock(mu);
pool.clear();
}
void update(const std::string& id) {
std::unique_lock<std::mutex> lock(mu);
auto it = pool.find(id);
if (it != pool.end()) {
it->second->iter->Refresh();
}
}
uint64_t numReadIteratorsCreated() { return index; }
uint64_t numTimesReadIteratorsReused() { return iteratorsReuseCount; }
void erase(const std::string& id) {
std::unique_lock<std::mutex> lock(mu);
pool.erase(id);
}
private:
std::unordered_map<int, ReadIterator> iteratorsMap;
std::unordered_map<int, ReadIterator>::iterator it;
rocksdb::DB* db;
rocksdb::ColumnFamilyHandle* cf;
std::mutex mutex;
// incrementing counter for every new iterator creation, to uniquely identify the iterator in returnIterator().
uint64_t index;
uint64_t iteratorsReuseCount;
std::mutex mu;
std::unordered_map<std::string, std::shared_ptr<ReadIterator>> pool;
uint64_t numReplacedIters = 0;
uint64_t numReusedIters = 0;
uint64_t numNewIterators = 0;
};
ACTOR Future<Void> flowLockLogger(const FlowLock* readLock, const FlowLock* fetchLock) {
@ -863,7 +845,6 @@ struct PhysicalShard {
PhysicalShard(rocksdb::DB* db, std::string id, rocksdb::ColumnFamilyHandle* handle)
: db(db), id(id), cf(handle), isInitialized(true) {
ASSERT(cf);
readIterPool = std::make_shared<ReadIteratorPool>(db, cf, id);
}
rocksdb::Status init() {
@ -876,7 +857,6 @@ struct PhysicalShard {
return status;
}
logShardEvent(id, ShardOp::OPEN);
readIterPool = std::make_shared<ReadIteratorPool>(db, cf, id);
this->isInitialized.store(true);
return status;
}
@ -941,10 +921,7 @@ struct PhysicalShard {
.detail("Checkpoint", checkpoint.toString());
if (status.ok()) {
if (!this->isInitialized) {
readIterPool = std::make_shared<ReadIteratorPool>(db, cf, id);
this->isInitialized.store(true);
} else if (SERVER_KNOBS->SHARDED_ROCKSDB_REUSE_ITERATORS) {
this->readIterPool->update();
}
}
@ -953,11 +930,6 @@ struct PhysicalShard {
bool initialized() { return this->isInitialized.load(); }
void refreshReadIteratorPool() {
ASSERT(this->readIterPool != nullptr);
this->readIterPool->refreshIterators();
}
std::vector<KeyRange> getAllRanges() const {
std::vector<KeyRange> res;
for (const auto& [key, shard] : dataShards) {
@ -986,7 +958,6 @@ struct PhysicalShard {
~PhysicalShard() {
logShardEvent(id, ShardOp::CLOSE);
isInitialized.store(false);
readIterPool.reset();
// Deleting default column family is not allowed.
if (deletePending && id != DEFAULT_CF_NAME) {
@ -1011,7 +982,6 @@ struct PhysicalShard {
rocksdb::ColumnFamilyOptions cfOptions;
rocksdb::ColumnFamilyHandle* cf = nullptr;
std::unordered_map<std::string, std::unique_ptr<DataShard>> dataShards;
std::shared_ptr<ReadIteratorPool> readIterPool;
bool deletePending = false;
std::atomic<bool> isInitialized;
uint64_t numRangeDeletions = 0;
@ -1019,19 +989,40 @@ struct PhysicalShard {
double lastCompactionTime = 0.0;
};
int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, int byteLimit, RangeResult* result) {
int readRangeInDb(PhysicalShard* shard,
const KeyRangeRef range,
int rowLimit,
int byteLimit,
RangeResult* result,
std::shared_ptr<IteratorPool> iteratorPool) {
if (rowLimit == 0 || byteLimit == 0) {
return 0;
}
int accumulatedBytes = 0;
rocksdb::Status s;
std::shared_ptr<ReadIterator> readIter = nullptr;
bool reuseIterator = SERVER_KNOBS->SHARDED_ROCKSDB_REUSE_ITERATORS && iteratorPool != nullptr;
if (g_network->isSimulated() &&
deterministicRandom()->random01() > SERVER_KNOBS->ROCKSDB_PROBABILITY_REUSE_ITERATOR_SIM) {
// Reduce probability of reusing iterators in simulation.
reuseIterator = false;
}
if (reuseIterator) {
readIter = iteratorPool->getIterator(shard->id);
if (readIter == nullptr) {
readIter = std::make_shared<ReadIterator>(shard->cf, shard->db);
}
} else {
readIter = std::make_shared<ReadIterator>(shard->cf, shard->db, range);
}
// When using a prefix extractor, ensure that keys are returned in order even if they cross
// a prefix boundary.
if (rowLimit >= 0) {
ReadIterator readIter = shard->readIterPool->getIterator(range);
auto cursor = readIter.iter;
auto* cursor = readIter->iter.get();
cursor->Seek(toSlice(range.begin));
while (cursor->Valid() && toStringRef(cursor->key()) < range.end) {
KeyValueRef kv(toStringRef(cursor->key()), toStringRef(cursor->value()));
@ -1044,10 +1035,8 @@ int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, i
cursor->Next();
}
s = cursor->status();
shard->readIterPool->returnIterator(readIter);
} else {
ReadIterator readIter = shard->readIterPool->getIterator(range);
auto cursor = readIter.iter;
auto* cursor = readIter->iter.get();
cursor->SeekForPrev(toSlice(range.end));
if (cursor->Valid() && toStringRef(cursor->key()) == range.end) {
cursor->Prev();
@ -1063,7 +1052,6 @@ int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, i
cursor->Prev();
}
s = cursor->status();
shard->readIterPool->returnIterator(readIter);
}
if (!s.ok()) {
@ -1072,6 +1060,9 @@ int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, i
// should never be returned to user.
return -1;
}
if (reuseIterator) {
iteratorPool->returnIterator(shard->id, readIter);
}
return accumulatedBytes;
}
@ -1094,9 +1085,10 @@ public:
const rocksdb::DBOptions& options,
std::shared_ptr<RocksDBErrorListener> errorListener,
std::shared_ptr<RocksDBEventListener> eventListener,
Counters* cc)
Counters* cc,
std::shared_ptr<IteratorPool> iteratorPool)
: path(path), logId(logId), dbOptions(options), cfOptions(getCFOptions()), dataShardMap(nullptr, specialKeys.end),
counters(cc) {
counters(cc), iteratorPool(iteratorPool) {
if (!g_network->isSimulated()) {
// Generating trace events in non-FDB thread will cause errors. The event listener is tested with local FDB
// cluster.
@ -1250,7 +1242,8 @@ public:
keyRange,
std::max(2, SERVER_KNOBS->ROCKSDB_READ_RANGE_ROW_LIMIT),
SERVER_KNOBS->SHARD_METADATA_SCAN_BYTES_LIMIT,
&metadata);
&metadata,
iteratorPool);
if (bytes <= 0) {
break;
}
@ -1344,7 +1337,6 @@ public:
if (!status.ok()) {
return status;
}
metadataShard->readIterPool->update();
TraceEvent(SevInfo, "ShardedRocksInitializeMetaDataShard", this->logId)
.detail("MetadataShardCF", metadataShard->cf->GetID());
}
@ -1355,6 +1347,7 @@ public:
SERVER_KNOBS->ROCKSDB_WRITEBATCH_PROTECTION_BYTES_PER_KEY, // protection_bytes_per_key
0 /* default_cf_ts_sz default:0 */);
dirtyShards = std::make_unique<std::set<PhysicalShard*>>();
iteratorPool->update(getMetaDataShard()->id);
TraceEvent(SevInfo, "ShardedRocksDBInitEnd", this->logId)
.detail("DataPath", path)
@ -1493,7 +1486,7 @@ public:
if (SERVER_KNOBS->ROCKSDB_EMPTY_RANGE_CHECK && existingShard->initialized()) {
// Enable consistency validation.
RangeResult rangeResult;
auto bytesRead = readRangeInDb(existingShard, range, 1, UINT16_MAX, &rangeResult);
auto bytesRead = readRangeInDb(existingShard, range, 1, UINT16_MAX, &rangeResult, iteratorPool);
if (bytesRead > 0) {
TraceEvent(SevError, "ShardedRocksDBRangeNotEmpty")
.detail("ShardId", existingShard->toString())
@ -1913,6 +1906,7 @@ private:
KeyRangeMap<DataShard*> dataShardMap;
std::deque<std::string> pendingDeletionShards;
Counters* counters;
std::shared_ptr<IteratorPool> iteratorPool;
};
class RocksDBMetrics {
@ -1942,8 +1936,6 @@ public:
Reference<Histogram> getCommitQueueWaitHistogram();
Reference<Histogram> getWriteHistogram();
Reference<Histogram> getDeleteCompactRangeHistogram();
// Stat for Memory Usage
void logMemUsage(rocksdb::DB* db);
std::vector<std::pair<std::string, int64_t>> getManifestBytes(std::string manifestDirectory);
private:
@ -2085,6 +2077,8 @@ RocksDBMetrics::RocksDBMetrics(UID debugID, std::shared_ptr<rocksdb::Statistics>
{ "RowCacheHit", rocksdb::ROW_CACHE_HIT, 0 },
{ "RowCacheMiss", rocksdb::ROW_CACHE_MISS, 0 },
{ "CountIterSkippedKeys", rocksdb::NUMBER_ITER_SKIP, 0 },
{ "NoIteratorCreated", rocksdb::NO_ITERATOR_CREATED, 0 },
{ "NoIteratorDeleted", rocksdb::NO_ITERATOR_DELETED, 0 },
};
@ -2263,20 +2257,6 @@ void RocksDBMetrics::logStats(rocksdb::DB* db, std::string manifestDirectory) {
}
}
void RocksDBMetrics::logMemUsage(rocksdb::DB* db) {
TraceEvent e(SevInfo, "ShardedRocksDBMemMetrics", debugID);
uint64_t stat;
ASSERT(db != nullptr);
ASSERT(db->GetAggregatedIntProperty(rocksdb::DB::Properties::kBlockCacheUsage, &stat));
e.detail("BlockCacheUsage", stat);
ASSERT(db->GetAggregatedIntProperty(rocksdb::DB::Properties::kEstimateTableReadersMem, &stat));
e.detail("EstimateSstReaderBytes", stat);
ASSERT(db->GetAggregatedIntProperty(rocksdb::DB::Properties::kCurSizeAllMemTables, &stat));
e.detail("AllMemtablesBytes", stat);
ASSERT(db->GetAggregatedIntProperty(rocksdb::DB::Properties::kBlockCachePinnedUsage, &stat));
e.detail("BlockCachePinnedUsage", stat);
}
void RocksDBMetrics::resetPerfContext() {
rocksdb::SetPerfLevel(rocksdb::PerfLevel::kEnableCount);
rocksdb::get_perf_context()->Reset();
@ -2460,7 +2440,6 @@ ACTOR Future<Void> rocksDBAggregatedMetricsLogger(std::shared_ptr<ShardedRocksDB
break;
}
rocksDBMetrics->logStats(db, manifestDirectory);
rocksDBMetrics->logMemUsage(db);
if (SERVER_KNOBS->ROCKSDB_PERFCONTEXT_SAMPLE_RATE != 0) {
rocksDBMetrics->logPerfContext(true);
}
@ -2476,33 +2455,30 @@ ACTOR Future<Void> rocksDBAggregatedMetricsLogger(std::shared_ptr<ShardedRocksDB
struct ShardedRocksDBKeyValueStore : IKeyValueStore {
using CF = rocksdb::ColumnFamilyHandle*;
ACTOR static Future<Void> refreshReadIteratorPools(
std::shared_ptr<ShardedRocksDBState> rState,
Future<Void> readyToStart,
std::unordered_map<std::string, std::shared_ptr<PhysicalShard>>* physicalShards) {
ACTOR static Future<Void> refreshIteratorPool(std::shared_ptr<ShardedRocksDBState> rState,
std::shared_ptr<IteratorPool> iteratorPool,
Future<Void> readyToStart) {
if (!SERVER_KNOBS->SHARDED_ROCKSDB_REUSE_ITERATORS) {
return Void();
}
state Reference<Histogram> histogram = Histogram::getHistogram(
ROCKSDBSTORAGE_HISTOGRAM_GROUP, "TimeSpentRefreshIterators"_sr, Histogram::Unit::milliseconds);
if (SERVER_KNOBS->SHARDED_ROCKSDB_REUSE_ITERATORS) {
try {
wait(readyToStart);
loop {
wait(delay(SERVER_KNOBS->ROCKSDB_READ_RANGE_ITERATOR_REFRESH_TIME));
if (rState->closing) {
break;
}
double startTime = timer_monotonic();
for (auto& [_, shard] : *physicalShards) {
if (shard->initialized()) {
shard->refreshReadIteratorPool();
}
}
histogram->sample(timer_monotonic() - startTime);
}
} catch (Error& e) {
if (e.code() != error_code_actor_cancelled) {
TraceEvent(SevError, "RefreshReadIteratorPoolError").errorUnsuppressed(e);
try {
wait(readyToStart);
loop {
wait(delay(SERVER_KNOBS->ROCKSDB_READ_RANGE_ITERATOR_REFRESH_TIME));
if (rState->closing) {
break;
}
double startTime = timer_monotonic();
iteratorPool->refresh();
histogram->sample(timer_monotonic() - startTime);
}
} catch (Error& e) {
if (e.code() != error_code_actor_cancelled) {
TraceEvent(SevError, "RefreshReadIteratorPoolError").errorUnsuppressed(e);
}
}
@ -2648,14 +2624,16 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
int threadIndex;
std::unordered_map<uint32_t, rocksdb::ColumnFamilyHandle*>* columnFamilyMap;
std::shared_ptr<RocksDBMetrics> rocksDBMetrics;
std::shared_ptr<IteratorPool> iteratorPool;
double sampleStartTime;
explicit Writer(UID logId,
int threadIndex,
std::unordered_map<uint32_t, rocksdb::ColumnFamilyHandle*>* columnFamilyMap,
std::shared_ptr<RocksDBMetrics> rocksDBMetrics)
std::shared_ptr<RocksDBMetrics> rocksDBMetrics,
std::shared_ptr<IteratorPool> iteratorPool)
: logId(logId), threadIndex(threadIndex), columnFamilyMap(columnFamilyMap), rocksDBMetrics(rocksDBMetrics),
sampleStartTime(now()) {}
iteratorPool(iteratorPool), sampleStartTime(now()) {}
~Writer() override {}
@ -2736,6 +2714,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
columnFamilyMap->erase(shard->cf->GetID());
a.metadataShard->db->Delete(
rocksdb::WriteOptions(), a.metadataShard->cf, compactionTimestampPrefix.toString() + shard->id);
iteratorPool->erase(shard->id);
}
TraceEvent("RemoveShardTime").detail("Duration", now() - start).detail("Size", a.shards.size());
a.shards.clear();
@ -2849,7 +2828,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
if (SERVER_KNOBS->SHARDED_ROCKSDB_REUSE_ITERATORS) {
for (auto shard : *(a.dirtyShards)) {
shard->readIterPool->update();
iteratorPool->update(shard->id);
}
}
@ -3309,10 +3288,15 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
double readRangeTimeout;
int threadIndex;
std::shared_ptr<RocksDBMetrics> rocksDBMetrics;
std::shared_ptr<IteratorPool> iteratorPool;
double sampleStartTime;
explicit Reader(UID logId, int threadIndex, std::shared_ptr<RocksDBMetrics> rocksDBMetrics)
: logId(logId), threadIndex(threadIndex), rocksDBMetrics(rocksDBMetrics), sampleStartTime(now()) {
explicit Reader(UID logId,
int threadIndex,
std::shared_ptr<RocksDBMetrics> rocksDBMetrics,
std::shared_ptr<IteratorPool> iteratorPool)
: logId(logId), threadIndex(threadIndex), rocksDBMetrics(rocksDBMetrics), iteratorPool(iteratorPool),
sampleStartTime(now()) {
readValueTimeout = SERVER_KNOBS->ROCKSDB_READ_VALUE_TIMEOUT;
readValuePrefixTimeout = SERVER_KNOBS->ROCKSDB_READ_VALUE_PREFIX_TIMEOUT;
readRangeTimeout = SERVER_KNOBS->ROCKSDB_READ_RANGE_TIMEOUT;
@ -3574,7 +3558,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
.detail("Reason", shard == nullptr ? "Not Exist" : "Not Initialized");
continue;
}
auto bytesRead = readRangeInDb(shard, range, rowLimit, byteLimit, &result);
auto bytesRead = readRangeInDb(shard, range, rowLimit, byteLimit, &result, iteratorPool);
if (bytesRead < 0) {
// Error reading an instance.
a.result.sendError(internal_error());
@ -3629,7 +3613,8 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
errorListener(std::make_shared<RocksDBErrorListener>()),
eventListener(std::make_shared<RocksDBEventListener>(id)),
errorFuture(forwardError(errorListener->getFuture())), dbOptions(getOptions()),
shardManager(path, id, dbOptions, errorListener, eventListener, &counters),
iteratorPool(std::make_shared<IteratorPool>()),
shardManager(path, id, dbOptions, errorListener, eventListener, &counters, iteratorPool),
rocksDBMetrics(std::make_shared<RocksDBMetrics>(id, dbOptions.statistics)) {
// In simluation, run the reader/writer threads as Coro threads (i.e. in the network thread. The storage
// engine is still multi-threaded as background compaction threads are still present. Reads/writes to disk
@ -3652,12 +3637,13 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
compactionThread = createGenericThreadPool(0, SERVER_KNOBS->ROCKSDB_COMPACTION_THREAD_PRIORITY);
readThreads = createGenericThreadPool(/*stackSize=*/0, SERVER_KNOBS->ROCKSDB_READER_THREAD_PRIORITY);
}
writeThread->addThread(new Writer(id, 0, shardManager.getColumnFamilyMap(), rocksDBMetrics), "fdb-rocksdb-wr");
writeThread->addThread(new Writer(id, 0, shardManager.getColumnFamilyMap(), rocksDBMetrics, iteratorPool),
"fdb-rocksdb-wr");
compactionThread->addThread(new CompactionWorker(id), "fdb-rocksdb-cw");
TraceEvent("ShardedRocksDBReadThreads", id)
.detail("KnobRocksDBReadParallelism", SERVER_KNOBS->ROCKSDB_READ_PARALLELISM);
for (unsigned i = 0; i < SERVER_KNOBS->ROCKSDB_READ_PARALLELISM; ++i) {
readThreads->addThread(new Reader(id, i, rocksDBMetrics), "fdb-rocksdb-re");
readThreads->addThread(new Reader(id, i, rocksDBMetrics, iteratorPool), "fdb-rocksdb-re");
}
}
@ -3679,6 +3665,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
}
TraceEvent("CloseKeyValueStore").detail("DeleteKVS", deleteOnClose);
self->iteratorPool->clear();
auto a = new Writer::CloseAction(&self->shardManager, deleteOnClose);
auto f = a->done.getFuture();
self->writeThread->post(a);
@ -3729,7 +3716,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
ShardManager::shardMetricsLogger(this->rState, openFuture, &shardManager) &&
rocksDBAggregatedMetricsLogger(this->rState, openFuture, rocksDBMetrics, &shardManager, this->path);
this->compactionJob = compactShards(this->rState, openFuture, &shardManager, compactionThread);
this->refreshHolder = refreshReadIteratorPools(this->rState, openFuture, shardManager.getAllShards());
this->refreshHolder = refreshIteratorPool(this->rState, iteratorPool, openFuture);
this->refreshRocksDBBackgroundWorkHolder =
refreshRocksDBBackgroundEventCounter(this->id, this->eventListener);
this->cleanUpJob = emptyShardCleaner(this->rState, openFuture, &shardManager, writeThread);
@ -4084,6 +4071,7 @@ struct ShardedRocksDBKeyValueStore : IKeyValueStore {
rocksdb::DBOptions dbOptions;
std::shared_ptr<RocksDBErrorListener> errorListener;
std::shared_ptr<RocksDBEventListener> eventListener;
std::shared_ptr<IteratorPool> iteratorPool;
ShardManager shardManager;
std::shared_ptr<RocksDBMetrics> rocksDBMetrics;
std::string path;

View File

@ -28,7 +28,7 @@
#include "fdbserver/Knobs.h"
void KnobKeyValuePairs::set(const std::string& name, const ParsedKnobValue value) {
ASSERT(knobs.count(name) == 0);
ASSERT(!knobs.contains(name));
knobs[name] = value;
}

View File

@ -44,7 +44,7 @@ LatencyBandsMap::ExpirableBands::ExpirableBands(LatencyBands&& bands)
: latencyBands(std::move(bands)), lastUpdated(now()) {}
Optional<LatencyBands*> LatencyBandsMap::getLatencyBands(TransactionTag tag) {
if (map.size() == maxSize && !map.count(tag)) {
if (map.size() == maxSize && !map.contains(tag)) {
CODE_PROBE(true, "LatencyBandsMap reached maxSize");
return {};
}

View File

@ -203,7 +203,8 @@ ACTOR Future<Void> tryBecomeLeaderInternal(ServerCoordinators coordinators,
// If more than 2*SERVER_KNOBS->POLLING_FREQUENCY elapses while we are nominated by some coordinator but
// there is no leader, we might be breaking the leader election process for someone with better
// communications but lower ID, so change IDs.
if ((!leader.present() || !leader.get().second) && std::count(nominees.begin(), nominees.end(), myInfo)) {
if ((!leader.present() || !leader.get().second) &&
std::find(nominees.begin(), nominees.end(), myInfo) != nominees.end()) {
if (!badCandidateTimeout.isValid())
badCandidateTimeout = delay(SERVER_KNOBS->POLLING_FREQUENCY * 2, TaskPriority::CoordinationReply);
} else

View File

@ -92,6 +92,7 @@ struct LogRouterData {
double maxGetMoreTime = 0; // The max wait time LR spent in a pull-data-request to satellite tLog.
int64_t generation = -1;
Reference<Histogram> peekLatencyDist;
Optional<Version> recoverAt = Optional<Version>();
struct PeekTrackerData {
std::map<int, Promise<std::pair<Version, bool>>> sequence_version;
@ -138,6 +139,8 @@ struct LogRouterData {
logSet.locality = req.locality;
logSet.updateLocalitySet(req.tLogLocalities);
recoverAt = req.recoverAt;
for (int i = 0; i < req.tLogLocalities.size(); i++) {
Tag tag(tagLocalityRemoteLog, i);
auto tagData = getTagData(tag);
@ -387,7 +390,8 @@ Future<Reference<ILogSystem::IPeekCursor>> LogRouterData::getPeekCursorData(Refe
.When(logSystemChanged,
[&](const Void&) {
if (logSystem->get()) {
result = logSystem->get()->peekLogRouter(dbgid, beginVersion, routerTag, useSatellite);
result =
logSystem->get()->peekLogRouter(dbgid, beginVersion, routerTag, useSatellite, recoverAt);
primaryPeekLocation = result->getPrimaryPeekLocation();
TraceEvent("LogRouterPeekLocation", dbgid)
.detail("LogID", result->getPrimaryPeekLocation())
@ -403,7 +407,7 @@ Future<Reference<ILogSystem::IPeekCursor>> LogRouterData::getPeekCursorData(Refe
CODE_PROBE(true, "Detect log router slow peeks");
TraceEvent(SevWarnAlways, "LogRouterSlowPeek", dbgid).detail("NextTrySatellite", !useSatellite);
useSatellite = !useSatellite;
result = logSystem->get()->peekLogRouter(dbgid, beginVersion, routerTag, useSatellite);
result = logSystem->get()->peekLogRouter(dbgid, beginVersion, routerTag, useSatellite, recoverAt);
primaryPeekLocation = result->getPrimaryPeekLocation();
TraceEvent("LogRouterPeekLocation", dbgid)
.detail("LogID", result->getPrimaryPeekLocation())

View File

@ -157,7 +157,7 @@ void LogSet::checkSatelliteTagLocations() {
std::set<Optional<Key>> zones;
std::set<Optional<Key>> dcs;
for (auto& loc : tLogLocalities) {
if (zones.count(loc.zoneId())) {
if (zones.contains(loc.zoneId())) {
foundDuplicate = true;
break;
}
@ -341,7 +341,7 @@ float LogPushData::getEmptyMessageRatio() const {
bool LogPushData::writeTransactionInfo(int location, uint32_t subseq) {
if (!FLOW_KNOBS->WRITE_TRACING_ENABLED || logSystem->getTLogVersion() < TLogVersion::V6 ||
writtenLocations.count(location) != 0) {
writtenLocations.contains(location)) {
return false;
}

View File

@ -287,13 +287,13 @@ bool LogSystemConfig::isNextGenerationOf(LogSystemConfig const& r) const {
bool LogSystemConfig::hasTLog(UID tid) const {
for (const auto& log : tLogs) {
if (std::count(log.tLogs.begin(), log.tLogs.end(), tid) > 0) {
if (std::find(log.tLogs.begin(), log.tLogs.end(), tid) != log.tLogs.end()) {
return true;
}
}
for (const auto& old : oldTLogs) {
for (const auto& log : old.tLogs) {
if (std::count(log.tLogs.begin(), log.tLogs.end(), tid) > 0) {
if (std::find(log.tLogs.begin(), log.tLogs.end(), tid) != log.tLogs.end()) {
return true;
}
}
@ -303,13 +303,13 @@ bool LogSystemConfig::hasTLog(UID tid) const {
bool LogSystemConfig::hasLogRouter(UID rid) const {
for (const auto& log : tLogs) {
if (std::count(log.logRouters.begin(), log.logRouters.end(), rid) > 0) {
if (std::find(log.logRouters.begin(), log.logRouters.end(), rid) != log.logRouters.end()) {
return true;
}
}
for (const auto& old : oldTLogs) {
for (const auto& log : old.tLogs) {
if (std::count(log.logRouters.begin(), log.logRouters.end(), rid) > 0) {
if (std::find(log.logRouters.begin(), log.logRouters.end(), rid) != log.logRouters.end()) {
return true;
}
}
@ -319,7 +319,7 @@ bool LogSystemConfig::hasLogRouter(UID rid) const {
bool LogSystemConfig::hasBackupWorker(UID bid) const {
for (const auto& log : tLogs) {
if (std::count(log.backupWorkers.begin(), log.backupWorkers.end(), bid) > 0) {
if (std::find(log.backupWorkers.begin(), log.backupWorkers.end(), bid) != log.backupWorkers.end()) {
return true;
}
}

View File

@ -288,7 +288,8 @@ ACTOR Future<Void> serverPeekParallelGetMore(ILogSystem::ServerPeekCursor* self,
self->tag,
self->returnIfBlocked,
self->onlySpilled,
std::make_pair(self->randomID, self->sequence++)),
std::make_pair(self->randomID, self->sequence++),
self->end.version),
taskID)));
}
if (self->sequence == std::numeric_limits<decltype(self->sequence)>::max()) {
@ -444,7 +445,9 @@ ACTOR Future<Void> serverPeekGetMore(ILogSystem::ServerPeekCursor* self, TaskPri
TLogPeekRequest(self->messageVersion.version,
self->tag,
self->returnIfBlocked,
self->onlySpilled),
self->onlySpilled,
Optional<std::pair<UID, int>>(),
self->end.version),
taskID))
: Never())) {
updateCursorWithReply(self, res);

View File

@ -227,7 +227,7 @@ bool MockStorageServer::allShardStatusIn(const KeyRangeRef& range, const std::se
for (auto it = ranges.begin(); it != ranges.end(); ++it) {
// fmt::print("allShardStatusIn: {}: {} \n", id.toString(), it->range().toString());
if (!status.count(it->cvalue().status))
if (!status.contains(it->cvalue().status))
return false;
}
return true;
@ -679,7 +679,7 @@ void MockGlobalState::addStoragePerProcess(uint64_t defaultDiskSpace) {
}
bool MockGlobalState::serverIsSourceForShard(const UID& serverId, KeyRangeRef shard, bool inFlightShard) {
if (!allServers.count(serverId))
if (!allServers.contains(serverId))
return false;
// check serverKeys
@ -703,9 +703,9 @@ bool MockGlobalState::serverIsDestForShard(const UID& serverId, KeyRangeRef shar
TraceEvent(SevDebug, "ServerIsDestForShard")
.detail("ServerId", serverId)
.detail("Keys", shard)
.detail("Contains", allServers.count(serverId));
.detail("Contains", allServers.contains(serverId));
if (!allServers.count(serverId))
if (!allServers.contains(serverId))
return false;
// check serverKeys
@ -723,7 +723,7 @@ bool MockGlobalState::serverIsDestForShard(const UID& serverId, KeyRangeRef shar
}
bool MockGlobalState::allShardsRemovedFromServer(const UID& serverId) {
return allServers.count(serverId) && shardMapping->getNumberOfShards(serverId) == 0;
return allServers.contains(serverId) && shardMapping->getNumberOfShards(serverId) == 0;
}
Future<std::pair<Optional<StorageMetrics>, int>> MockGlobalState::waitStorageMetrics(

View File

@ -892,14 +892,14 @@ ACTOR Future<std::vector<std::vector<UID>>> additionalSources(RangeResult shards
decodeKeyServersValue(UIDtoTagMap, shards[i].value, src, dest);
for (int s = 0; s < src.size(); s++) {
if (!fetching.count(src[s])) {
if (!fetching.contains(src[s])) {
fetching.insert(src[s]);
serverListEntries.push_back(tr->get(serverListKeyFor(src[s])));
}
}
for (int s = 0; s < dest.size(); s++) {
if (!fetching.count(dest[s])) {
if (!fetching.contains(dest[s])) {
fetching.insert(dest[s]);
serverListEntries.push_back(tr->get(serverListKeyFor(dest[s])));
}
@ -1350,7 +1350,7 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
completeSrc = src;
} else {
for (int i = 0; i < completeSrc.size(); i++) {
if (!srcSet.count(completeSrc[i])) {
if (!srcSet.contains(completeSrc[i])) {
swapAndPop(&completeSrc, i--);
}
}
@ -1405,7 +1405,7 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
srcSet.insert(src2[s]);
for (int i = 0; i < completeSrc.size(); i++) {
if (!srcSet.count(completeSrc[i])) {
if (!srcSet.contains(completeSrc[i])) {
swapAndPop(&completeSrc, i--);
}
}
@ -1452,7 +1452,7 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
state std::vector<UID> newDestinations;
std::set<UID> completeSrcSet(completeSrc.begin(), completeSrc.end());
for (auto& it : dest) {
if (!hasRemote || !completeSrcSet.count(it)) {
if (!hasRemote || !completeSrcSet.contains(it)) {
newDestinations.push_back(it);
}
}
@ -1491,7 +1491,7 @@ ACTOR static Future<Void> finishMoveKeys(Database occ,
auto tssPair = tssMapping.find(storageServerInterfaces[s].id());
if (tssPair != tssMapping.end() && waitForTSSCounter > 0 &&
!tssToIgnore.count(tssPair->second.id())) {
!tssToIgnore.contains(tssPair->second.id())) {
tssReadyInterfs.push_back(tssPair->second);
tssReady.push_back(waitForShardReady(
tssPair->second, keys, tr.getReadVersion().get(), GetShardStateRequest::READABLE));
@ -2171,7 +2171,7 @@ ACTOR static Future<Void> finishMoveShards(Database occ,
completeSrc = src;
} else {
for (int i = 0; i < completeSrc.size(); i++) {
if (!srcSet.count(completeSrc[i])) {
if (!srcSet.contains(completeSrc[i])) {
swapAndPop(&completeSrc, i--);
}
}
@ -2187,7 +2187,7 @@ ACTOR static Future<Void> finishMoveShards(Database occ,
state std::vector<UID> newDestinations;
std::set<UID> completeSrcSet(completeSrc.begin(), completeSrc.end());
for (const UID& id : destServers) {
if (!hasRemote || !completeSrcSet.count(id)) {
if (!hasRemote || !completeSrcSet.contains(id)) {
newDestinations.push_back(id);
}
}
@ -2692,7 +2692,7 @@ ACTOR Future<Void> removeStorageServer(Database cx,
allLocalities.insert(dcId_locality[decodeTLogDatacentersKey(it.key)]);
}
if (locality >= 0 && !allLocalities.count(locality)) {
if (locality >= 0 && !allLocalities.contains(locality)) {
for (auto& it : fTagLocalities.get()) {
if (locality == decodeTagLocalityListValue(it.value)) {
tr->clear(it.key);
@ -3316,7 +3316,7 @@ void seedShardServers(Arena& arena, CommitTransactionRef& tr, std::vector<Storag
std::map<UID, Tag> server_tag;
int8_t nextLocality = 0;
for (auto& s : servers) {
if (!dcId_locality.count(s.locality.dcId())) {
if (!dcId_locality.contains(s.locality.dcId())) {
tr.set(arena, tagLocalityListKeyFor(s.locality.dcId()), tagLocalityListValue(nextLocality));
dcId_locality[s.locality.dcId()] = Tag(nextLocality, 0);
nextLocality++;
@ -3398,7 +3398,7 @@ Future<Void> unassignServerKeys(UID traceId, TrType tr, KeyRangeRef keys, std::s
continue;
}
if (ignoreServers.count(id)) {
if (ignoreServers.contains(id)) {
dprint("Ignore un-assignment from {} .\n", id.toString());
continue;
}

View File

@ -128,7 +128,7 @@ public:
const UID serverId = ssi.id();
newServers[serverId] = ssi;
if (oldServers.count(serverId)) {
if (oldServers.contains(serverId)) {
if (ssi.getValue.getEndpoint() != oldServers[serverId].getValue.getEndpoint() ||
ssi.isAcceptingRequests() != oldServers[serverId].isAcceptingRequests()) {
serverChanges.send(std::make_pair(serverId, Optional<StorageServerInterface>(ssi)));
@ -617,7 +617,7 @@ public:
self.maxVersion = std::max(self.maxVersion, req.version);
if (recoveryVersion == std::numeric_limits<Version>::max() &&
self.version_recovery.count(recoveryVersion)) {
self.version_recovery.contains(recoveryVersion)) {
recoveryVersion = self.maxVersion;
self.version_recovery[recoveryVersion] =
self.version_recovery[std::numeric_limits<Version>::max()];
@ -681,7 +681,7 @@ public:
if (recoveryVersion == 0) {
recoveryVersion = std::numeric_limits<Version>::max();
}
if (self.version_recovery.count(recoveryVersion)) {
if (self.version_recovery.contains(recoveryVersion)) {
auto& it = self.version_recovery[recoveryVersion];
double existingEnd = it.second.present() ? it.second.get() : now();
double existingDuration = existingEnd - it.first;
@ -999,7 +999,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) {
ignoredMachines.insert(ss->second->locality.zoneId());
continue;
}
if (ignoredMachines.count(ss->second->locality.zoneId()) > 0) {
if (ignoredMachines.contains(ss->second->locality.zoneId())) {
continue;
}
@ -1021,7 +1021,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) {
ignoredDurabilityLagMachines.insert(ss->second->locality.zoneId());
continue;
}
if (ignoredDurabilityLagMachines.count(ss->second->locality.zoneId()) > 0) {
if (ignoredDurabilityLagMachines.contains(ss->second->locality.zoneId())) {
continue;
}
@ -1215,7 +1215,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) {
minSSVer = std::min(minSSVer, ss.lastReply.version);
// Machines that ratekeeper isn't controlling can fall arbitrarily far behind
if (ignoredMachines.count(it.value.locality.zoneId()) == 0) {
if (!ignoredMachines.contains(it.value.locality.zoneId())) {
minLimitingSSVer = std::min(minLimitingSSVer, ss.lastReply.version);
}
}

View File

@ -34,7 +34,7 @@ void ResolutionBalancer::setResolvers(const std::vector<ResolverInterface>& v) {
}
void ResolutionBalancer::setChangesInReply(UID requestingProxy, GetCommitVersionReply& rep) {
if (resolverNeedingChanges.count(requestingProxy)) {
if (resolverNeedingChanges.contains(requestingProxy)) {
rep.resolverChanges = resolverChanges.get();
rep.resolverChangesVersion = resolverChangesVersion;
resolverNeedingChanges.erase(requestingProxy);
@ -86,12 +86,12 @@ static std::pair<KeyRangeRef, bool> findRange(CoalescedKeyRangeMap<int>& key_res
++it;
// If possible create a new boundary which doesn't exist yet
for (; it != ranges.end(); ++it) {
if (it->value() == src && !borders.count(prev->value()) &&
if (it->value() == src && !borders.contains(prev->value()) &&
std::find(movedRanges.begin(), movedRanges.end(), ResolverMoveRef(it->range(), dest)) ==
movedRanges.end()) {
return std::make_pair(it->range(), true);
}
if (prev->value() == src && !borders.count(it->value()) &&
if (prev->value() == src && !borders.contains(it->value()) &&
std::find(movedRanges.begin(), movedRanges.end(), ResolverMoveRef(prev->range(), dest)) ==
movedRanges.end()) {
return std::make_pair(prev->range(), false);

View File

@ -666,7 +666,7 @@ ACTOR Future<Void> processTransactionStateRequestPart(Reference<Resolver> self,
ASSERT(pContext->pResolverData.getPtr() != nullptr);
ASSERT(pContext->pActors != nullptr);
if (pContext->receivedSequences.count(request.sequence)) {
if (pContext->receivedSequences.contains(request.sequence)) {
// This part is already received. Still we will re-broadcast it to other CommitProxies & Resolvers
pContext->pActors->send(broadcastTxnRequest(request, SERVER_KNOBS->TXN_STATE_SEND_AMOUNT, true));
wait(yield());
@ -795,7 +795,7 @@ ACTOR Future<Void> checkRemoved(Reference<AsyncVar<ServerDBInfo> const> db,
ResolverInterface myInterface) {
loop {
if (db->get().recoveryCount >= recoveryCount &&
!std::count(db->get().resolvers.begin(), db->get().resolvers.end(), myInterface))
std::find(db->get().resolvers.begin(), db->get().resolvers.end(), myInterface) == db->get().resolvers.end())
throw worker_removed();
wait(db->onChange());
}

View File

@ -76,12 +76,12 @@ void ServerThroughputTracker::cleanupUnseenTags(TransactionTagMap<ThroughputCoun
while (it != tagToThroughputCounters.end()) {
auto& [tag, throughputCounters] = *it;
bool seen = false;
if (seenReadTags.count(tag)) {
if (seenReadTags.contains(tag)) {
seen = true;
} else {
throughputCounters.updateThroughput(0, OpType::READ);
}
if (seenWriteTags.count(tag)) {
if (seenWriteTags.contains(tag)) {
seen = true;
} else {
throughputCounters.updateThroughput(0, OpType::WRITE);
@ -102,7 +102,7 @@ void ServerThroughputTracker::cleanupUnseenStorageServers(std::unordered_set<UID
auto it1 = throughput.begin();
while (it1 != throughput.end()) {
auto& [ssId, tagToThroughputCounters] = *it1;
if (seen.count(ssId)) {
if (seen.contains(ssId)) {
++it1;
} else {
auto it2 = tagToThroughputCounters.begin();

View File

@ -202,14 +202,15 @@ void ShardsAffectedByTeamFailure::check() const {
if (EXPENSIVE_VALIDATION || checkMode == CheckMode::ForceCheck) {
for (auto t = team_shards.begin(); t != team_shards.end(); ++t) {
auto i = shard_teams.rangeContaining(t->second.begin);
if (i->range() != t->second || !std::count(i->value().first.begin(), i->value().first.end(), t->first)) {
if (i->range() != t->second ||
std::find(i->value().first.begin(), i->value().first.end(), t->first) == i->value().first.end()) {
ASSERT(false);
}
}
auto rs = shard_teams.ranges();
for (auto i = rs.begin(); i != rs.end(); ++i) {
for (auto t = i->value().first.begin(); t != i->value().first.end(); ++t) {
if (!team_shards.count(std::make_pair(*t, i->range()))) {
if (!team_shards.contains(std::make_pair(*t, i->range()))) {
std::string teamDesc, shards;
for (int k = 0; k < t->servers.size(); k++)
teamDesc += format("%llx ", t->servers[k].first());

View File

@ -509,6 +509,11 @@ public:
int simulationNormalRunTestsTimeoutSeconds = 5400;
int simulationBuggifyRunTestsTimeoutSeconds = 36000;
// Number of tlogs in the remote region
Optional<int> remoteDesiredTLogCount;
// Number of process classes explictly set as Stateless in all DCs
Optional<int> statelessProcessClassesPerDC;
ConfigDBType getConfigDBType() const { return configDBType; }
bool tomlKeyPresent(const toml::value& data, std::string key) {
@ -555,6 +560,7 @@ public:
.add("generateFearless", &generateFearless)
.add("datacenters", &datacenters)
.add("desiredTLogCount", &desiredTLogCount)
.add("remoteDesiredTLogCount", &remoteDesiredTLogCount)
.add("commitProxyCount", &commitProxyCount)
.add("grvProxyCount", &grvProxyCount)
.add("resolverCount", &resolverCount)
@ -581,7 +587,8 @@ public:
.add("defaultTenant", &defaultTenant)
.add("longRunningTest", &longRunningTest)
.add("simulationNormalRunTestsTimeoutSeconds", &simulationNormalRunTestsTimeoutSeconds)
.add("simulationBuggifyRunTestsTimeoutSeconds", &simulationBuggifyRunTestsTimeoutSeconds);
.add("simulationBuggifyRunTestsTimeoutSeconds", &simulationBuggifyRunTestsTimeoutSeconds)
.add("statelessProcessClassesPerDC", &statelessProcessClassesPerDC);
try {
auto file = toml::parse(testFile);
if (file.contains("configuration") && toml::find(file, "configuration").is_table()) {
@ -1650,6 +1657,9 @@ void SimulationConfig::setSpecificConfig(const TestConfig& testConfig) {
if (testConfig.desiredTLogCount.present()) {
db.desiredTLogCount = testConfig.desiredTLogCount.get();
}
if (testConfig.remoteDesiredTLogCount.present()) {
db.remoteDesiredTLogCount = testConfig.remoteDesiredTLogCount.get();
}
if (testConfig.commitProxyCount.present()) {
db.commitProxyCount = testConfig.commitProxyCount.get();
}
@ -2129,8 +2139,12 @@ void SimulationConfig::setRegions(const TestConfig& testConfig) {
if (deterministicRandom()->random01() < 0.25)
db.desiredLogRouterCount = deterministicRandom()->randomInt(1, 7);
if (deterministicRandom()->random01() < 0.25)
if (testConfig.remoteDesiredTLogCount.present()) {
db.remoteDesiredTLogCount = testConfig.remoteDesiredTLogCount.get();
} else if (deterministicRandom()->random01() < 0.25) {
db.remoteDesiredTLogCount = deterministicRandom()->randomInt(1, 7);
}
bool useNormalDCsAsSatellites =
datacenters > 4 && testConfig.minimumRegions < 2 && deterministicRandom()->random01() < 0.3;
@ -2632,13 +2646,6 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
(dc < machineCount % dataCenters); // add remainder of machines to first datacenter
int possible_ss = 0;
int dcCoordinators = coordinatorCount / dataCenters + (dc < coordinatorCount % dataCenters);
printf("Datacenter %d: %d/%d machines, %d/%d coordinators\n",
dc,
machines,
machineCount,
dcCoordinators,
coordinatorCount);
ASSERT_LE(dcCoordinators, machines);
// FIXME: we hardcode some machines to specifically test storage cache and blob workers
// TODO: caching disabled for this merge
@ -2657,9 +2664,26 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
int totalMachines =
machines + storageCacheMachines + blobWorkerMachines + simHTTPMachines + extraStorageMachineCount;
printf("Datacenter %d: %d/%d machines, %d/%d coordinators, %d other machines\n",
dc,
machines,
machineCount,
dcCoordinators,
coordinatorCount,
totalMachines - machines);
ASSERT_LE(dcCoordinators, machines);
int useSeedForMachine = deterministicRandom()->randomInt(0, totalMachines);
Standalone<StringRef> zoneId;
Standalone<StringRef> newZoneId;
Optional<int> desiredStatelessClasses;
int actualStatelessClasses = 0;
if (testConfig.statelessProcessClassesPerDC.present()) {
desiredStatelessClasses = testConfig.statelessProcessClassesPerDC.get();
}
for (int machine = 0; machine < totalMachines; machine++) {
Standalone<StringRef> machineId(deterministicRandom()->randomUniqueID().toString());
if (machine == 0 || machineCount - dataCenters <= 4 || assignedMachines != 4 ||
@ -2687,6 +2711,11 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
}
}
if (desiredStatelessClasses.present() && actualStatelessClasses < desiredStatelessClasses.get()) {
processClass = ProcessClass(ProcessClass::StatelessClass, ProcessClass::CommandLineSource);
actualStatelessClasses++;
}
// FIXME: hack to add machines specifically to test storage cache and blob workers and http server
// `machines` here is the normal (non-temporary) machines that totalMachines comprises of
int processCount = processesPerMachine;
@ -2781,6 +2810,12 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
assignedMachines++;
}
if (desiredStatelessClasses.present()) {
// If this assertion fails, that measn that there were not enough machines in the DC (primary or remote)
// to match desired stateless classes
ASSERT(actualStatelessClasses == desiredStatelessClasses.get());
}
if (possible_ss - simconfig.db.desiredTSSCount / simconfig.db.usableRegions <= simconfig.db.storageTeamSize) {
gradualMigrationPossible = false;
}

View File

@ -332,14 +332,14 @@ JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics,
std::string machineId = event.getValue("MachineID");
// If this machine ID does not already exist in the machineMap, add it
if (machineJsonMap.count(machineId) == 0) {
if (!machineJsonMap.contains(machineId)) {
statusObj["machine_id"] = machineId;
if (dcIds.count(it->first)) {
if (dcIds.contains(it->first)) {
statusObj["datacenter_id"] = dcIds[it->first];
}
if (locality.count(it->first)) {
if (locality.contains(it->first)) {
statusObj["locality"] = locality[it->first].toJSON<JsonBuilderObject>();
}
@ -393,7 +393,7 @@ JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics,
tempList.address = it->first;
// Check if the locality data is present and if so, make use of it.
auto localityData = LocalityData();
if (locality.count(it->first)) {
if (locality.contains(it->first)) {
localityData = locality[it->first];
}
@ -819,7 +819,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
machineMemoryUsage.insert(std::make_pair(workerItr->interf.locality.machineId(), MachineMemoryInfo()))
.first;
try {
ASSERT(pMetrics.count(workerItr->interf.address()));
ASSERT(pMetrics.contains(workerItr->interf.address()));
const TraceEventFields& processMetrics = pMetrics[workerItr->interf.address()];
const TraceEventFields& programStart = programStarts[workerItr->interf.address()];
@ -947,7 +947,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
wait(yield());
state JsonBuilderObject statusObj;
try {
ASSERT(pMetrics.count(workerItr->interf.address()));
ASSERT(pMetrics.contains(workerItr->interf.address()));
NetworkAddress address = workerItr->interf.address();
const TraceEventFields& processMetrics = pMetrics[workerItr->interf.address()];
@ -1037,7 +1037,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
}
int64_t memoryLimit = 0;
if (programStarts.count(address)) {
if (programStarts.contains(address)) {
auto const& programStartEvent = programStarts.at(address);
if (programStartEvent.size() > 0) {
@ -1057,7 +1057,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
}
// if this process address is in the machine metrics
if (mMetrics.count(address) && mMetrics[address].size()) {
if (mMetrics.contains(address) && mMetrics[address].size()) {
double availableMemory;
availableMemory = mMetrics[address].getDouble("AvailableMemory");
@ -1074,7 +1074,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
JsonBuilderArray messages;
if (errors.count(address) && errors[address].size()) {
if (errors.contains(address) && errors[address].size()) {
// returns status object with type and time of error
messages.push_back(getError(errors.at(address)));
}
@ -1088,7 +1088,7 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
}
// If this process had a trace file open error, identified by strAddress, then add it to messages array
if (tracefileOpenErrorMap.count(strAddress)) {
if (tracefileOpenErrorMap.contains(strAddress)) {
messages.push_back(tracefileOpenErrorMap[strAddress]);
}
@ -1573,9 +1573,9 @@ ACTOR static Future<Void> logRangeWarningFetcher(Database cx,
KeyRange range = BinaryReader::fromStringRef<KeyRange>(it.key.removePrefix(destUidLookupPrefix),
IncludeVersion());
UID logUid = BinaryReader::fromStringRef<UID>(it.value, Unversioned());
if (loggingRanges.count(LogRangeAndUID(range, logUid))) {
if (loggingRanges.contains(LogRangeAndUID(range, logUid))) {
std::pair<Key, Key> rangePair = std::make_pair(range.begin, range.end);
if (existingRanges.count(rangePair)) {
if (existingRanges.contains(rangePair)) {
std::string rangeDescription = (range == getDefaultBackupSharedRange())
? "the default backup set"
: format("`%s` - `%s`",
@ -2547,7 +2547,7 @@ static JsonBuilderObject tlogFetcher(int* logFaultTolerance,
int failedLogs = 0;
for (auto& log : tLogSet.tLogs) {
JsonBuilderObject logObj;
bool failed = !log.present() || !address_workers.count(log.interf().address());
bool failed = !log.present() || !address_workers.contains(log.interf().address());
logObj["id"] = log.id().shortString();
logObj["healthy"] = !failed;
if (log.present()) {
@ -3590,7 +3590,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
if (it.isTss()) {
activeTSSCount++;
}
if (wiggleServers.count(it.id())) {
if (wiggleServers.contains(it.id())) {
wiggleServerAddress.push_back(it.address().toString());
}
}

View File

@ -252,7 +252,7 @@ Future<Void> TCServerInfo::updateStoreType() {
void TCServerInfo::removeTeamsContainingServer(UID removedServer) {
for (int t = 0; t < teams.size(); t++) {
auto const& serverIds = teams[t]->getServerIDs();
if (std::count(serverIds.begin(), serverIds.end(), removedServer)) {
if (std::find(serverIds.begin(), serverIds.end(), removedServer) != serverIds.end()) {
teams[t--] = teams.back();
teams.pop_back();
}

View File

@ -334,7 +334,6 @@ struct TLogData : NonCopyable {
int64_t overheadBytesInput;
int64_t overheadBytesDurable;
int activePeekStreams = 0;
Optional<Version> clusterRecoveryVersion;
WorkerCache<TLogInterface> tlogCache;
FlowLock peekMemoryLimiter;
@ -555,7 +554,7 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
bool poppedRecently,
bool unpoppedRecovered) {
if (tag.locality != tagLocalityLogRouter && tag.locality != tagLocalityTxs && tag != txsTag && allTags.size() &&
!allTags.count(tag) && popped <= recoveredAt) {
!allTags.contains(tag) && popped <= recoveredAt) {
popped = recoveredAt + 1;
}
auto newTagData = makeReference<TagData>(tag, popped, 0, nothingPersistent, poppedRecently, unpoppedRecovered);
@ -1345,7 +1344,7 @@ ACTOR Future<Void> tLogPop(TLogData* self, TLogPopRequest req, Reference<LogData
// This actor is just a loop that calls updatePersistentData and popDiskQueue whenever
// (a) there's data to be spilled or (b) we should update metadata after some commits have been fully popped.
ACTOR Future<Void> updateStorage(TLogData* self) {
while (self->spillOrder.size() && !self->id_data.count(self->spillOrder.front())) {
while (self->spillOrder.size() && !self->id_data.contains(self->spillOrder.front())) {
self->spillOrder.pop_front();
}
@ -1750,7 +1749,8 @@ Future<Void> tLogPeekMessages(PromiseType replyPromise,
Tag reqTag,
bool reqReturnIfBlocked = false,
bool reqOnlySpilled = false,
Optional<std::pair<UID, int>> reqSequence = Optional<std::pair<UID, int>>()) {
Optional<std::pair<UID, int>> reqSequence = Optional<std::pair<UID, int>>(),
Optional<Version> reqEnd = Optional<Version>()) {
state BinaryWriter messages(Unversioned());
state BinaryWriter messages2(Unversioned());
state int sequence = -1;
@ -1814,26 +1814,33 @@ Future<Void> tLogPeekMessages(PromiseType replyPromise,
state double blockStart = now();
// if tLog locked for recovery, return an empty message at the cluster recovery version
// if requested version is greater than any received.
state Optional<Version> clusterRecoveryVersion = Optional<Version>();
ASSERT(!clusterRecoveryVersion.present() || reqBegin <= clusterRecoveryVersion.get());
if (logData->stopped() && logData->version.get() < reqBegin && self->clusterRecoveryVersion.present()) {
clusterRecoveryVersion = self->clusterRecoveryVersion.get();
TraceEvent("TLogPeekMessagesClusterRecoveryVersion").detail("Version", clusterRecoveryVersion.get());
}
if (!clusterRecoveryVersion.present() && reqReturnIfBlocked && logData->version.get() < reqBegin) {
replyPromise.sendError(end_of_stream());
if (reqSequence.present()) {
auto& trackerData = logData->peekTracker[peekId];
auto& sequenceData = trackerData.sequence_version[sequence + 1];
trackerData.lastUpdate = now();
if (!sequenceData.isSet()) {
sequenceData.send(std::make_pair(reqBegin, reqOnlySpilled));
// We need to return data that the caller doesn't already have.
// If the requested version is beyond what the tLog currently has, we'll wait for new data.
// However, there's a catch:
// - If the tLog is locked (e.g., during recovery), waiting for new data could cause a deadlock.
// This happens because a locked tLog won't receive any new commits.
// - This scenario can occur with version vector, where a tLog can be peeked at a version
// higher than its current logData->version during recovery.
// To prevent deadlocks:
// - If a valid 'end' version was provided in the request (the Recovery Version), return with that version.
// - Otherwise, wait for new data as long as the tLog isn't locked.
state Optional<Version> replyWithRecoveryVersion = Optional<Version>();
if (logData->version.get() < reqBegin) {
if (SERVER_KNOBS->ENABLE_VERSION_VECTOR_TLOG_UNICAST && logData->stopped() && reqEnd.present() &&
reqEnd.get() != std::numeric_limits<Version>::max()) {
replyWithRecoveryVersion = reqEnd;
} else if (reqReturnIfBlocked) {
replyPromise.sendError(end_of_stream());
if (reqSequence.present()) {
auto& trackerData = logData->peekTracker[peekId];
auto& sequenceData = trackerData.sequence_version[sequence + 1];
trackerData.lastUpdate = now();
if (!sequenceData.isSet()) {
sequenceData.send(std::make_pair(reqBegin, reqOnlySpilled));
}
}
return Void();
}
return Void();
}
DebugLogTraceEvent("TLogPeekMessages0", self->dbgid)
@ -1841,9 +1848,10 @@ Future<Void> tLogPeekMessages(PromiseType replyPromise,
.detail("Tag", reqTag.toString())
.detail("ReqBegin", reqBegin)
.detail("Version", logData->version.get())
.detail("RecoveredAt", logData->recoveredAt);
.detail("RecoveredAt", logData->recoveredAt)
.detail("ClusterRecovery", replyWithRecoveryVersion.present() ? replyWithRecoveryVersion.get() : -1);
// Wait until we have something to return that the caller doesn't already have
if (!clusterRecoveryVersion.present() && logData->version.get() < reqBegin) {
if (!replyWithRecoveryVersion.present() && logData->version.get() < reqBegin) {
wait(logData->version.whenAtLeast(reqBegin));
wait(delay(SERVER_KNOBS->TLOG_PEEK_DELAY, g_network->getCurrentTask()));
}
@ -2120,7 +2128,10 @@ Future<Void> tLogPeekMessages(PromiseType replyPromise,
auto messagesValue = messages.toValue();
reply.arena.dependsOn(messagesValue.arena());
reply.messages = messagesValue;
reply.end = clusterRecoveryVersion.present() ? clusterRecoveryVersion.get() : endVersion;
reply.end = endVersion;
if (replyWithRecoveryVersion.present()) {
reply.end = replyWithRecoveryVersion.get();
}
reply.onlySpilled = onlySpilled;
DebugLogTraceEvent("TLogPeekMessages4", self->dbgid)
@ -2532,7 +2543,8 @@ ACTOR Future<Void> rejoinClusterController(TLogData* self,
loop {
auto const& inf = self->dbInfo->get();
bool isDisplaced =
!std::count(inf.priorCommittedLogServers.begin(), inf.priorCommittedLogServers.end(), tli.id());
std::find(inf.priorCommittedLogServers.begin(), inf.priorCommittedLogServers.end(), tli.id()) ==
inf.priorCommittedLogServers.end();
if (isPrimary) {
isDisplaced =
isDisplaced && inf.recoveryCount >= recoveryCount && inf.recoveryState != RecoveryState::UNINITIALIZED;
@ -2791,7 +2803,7 @@ ACTOR Future<Void> serveTLogInterface(TLogData* self,
bool found = false;
if (self->dbInfo->get().recoveryState >= RecoveryState::ACCEPTING_COMMITS) {
for (auto& logs : self->dbInfo->get().logSystemConfig.tLogs) {
if (std::count(logs.tLogs.begin(), logs.tLogs.end(), logData->logId)) {
if (std::find(logs.tLogs.begin(), logs.tLogs.end(), logData->logId) != logs.tLogs.end()) {
found = true;
break;
}
@ -2820,8 +2832,15 @@ ACTOR Future<Void> serveTLogInterface(TLogData* self,
logData->addActor.send(tLogPeekStream(self, req, logData));
}
when(TLogPeekRequest req = waitNext(tli.peekMessages.getFuture())) {
logData->addActor.send(tLogPeekMessages(
req.reply, self, logData, req.begin, req.tag, req.returnIfBlocked, req.onlySpilled, req.sequence));
logData->addActor.send(tLogPeekMessages(req.reply,
self,
logData,
req.begin,
req.tag,
req.returnIfBlocked,
req.onlySpilled,
req.sequence,
req.end));
}
when(TLogPopRequest req = waitNext(tli.popMessages.getFuture())) {
logData->addActor.send(tLogPop(self, req, logData));
@ -2872,11 +2891,6 @@ ACTOR Future<Void> serveTLogInterface(TLogData* self,
when(TLogEnablePopRequest enablePopReq = waitNext(tli.enablePopRequest.getFuture())) {
logData->addActor.send(tLogEnablePopReq(enablePopReq, self, logData));
}
when(setClusterRecoveryVersionRequest req = waitNext(tli.setClusterRecoveryVersion.getFuture())) {
ASSERT(logData->stopped());
self->clusterRecoveryVersion = req.recoveryVersion;
req.reply.send(Void());
}
}
}
@ -2895,7 +2909,7 @@ void removeLog(TLogData* self, Reference<LogData> logData) {
// actors threw an error immediately
self->id_data.erase(logData->logId);
while (self->popOrder.size() && !self->id_data.count(self->popOrder.front())) {
while (self->popOrder.size() && !self->id_data.contains(self->popOrder.front())) {
self->popOrder.pop_front();
}

View File

@ -219,14 +219,14 @@ Tag TagPartitionedLogSystem::getPseudoPopTag(Tag tag, ProcessClass::ClassType ty
switch (type) {
case ProcessClass::LogRouterClass:
if (tag.locality == tagLocalityLogRouter) {
ASSERT(pseudoLocalities.count(tagLocalityLogRouterMapped) > 0);
ASSERT(pseudoLocalities.contains(tagLocalityLogRouterMapped));
tag.locality = tagLocalityLogRouterMapped;
}
break;
case ProcessClass::BackupClass:
if (tag.locality == tagLocalityLogRouter) {
ASSERT(pseudoLocalities.count(tagLocalityBackup) > 0);
ASSERT(pseudoLocalities.contains(tagLocalityBackup));
tag.locality = tagLocalityBackup;
}
break;
@ -238,7 +238,7 @@ Tag TagPartitionedLogSystem::getPseudoPopTag(Tag tag, ProcessClass::ClassType ty
}
bool TagPartitionedLogSystem::hasPseudoLocality(int8_t locality) const {
return pseudoLocalities.count(locality) > 0;
return pseudoLocalities.contains(locality);
}
Version TagPartitionedLogSystem::popPseudoLocalityTag(Tag tag, Version upTo) {
@ -1267,8 +1267,15 @@ Reference<ILogSystem::IPeekCursor> TagPartitionedLogSystem::peekSingle(UID dbgid
Reference<ILogSystem::IPeekCursor> TagPartitionedLogSystem::peekLogRouter(UID dbgid,
Version begin,
Tag tag,
bool useSatellite) {
bool useSatellite,
Optional<Version> end) {
bool found = false;
if (!end.present()) {
end = std::numeric_limits<Version>::max();
} else {
end = end.get() + 1; // The last version is exclusive to the cursor's desired range
}
for (const auto& log : tLogs) {
found = log->hasLogRouter(dbgid) || log->hasBackupWorker(dbgid);
if (found) {
@ -1303,7 +1310,7 @@ Reference<ILogSystem::IPeekCursor> TagPartitionedLogSystem::peekLogRouter(UID db
// FIXME: do this merge on one of the logs in the other data center to avoid sending multiple copies
// across the WAN
return makeReference<ILogSystem::SetPeekCursor>(
localSets, bestSet, localSets[bestSet]->bestLocationFor(tag), tag, begin, getPeekEnd(), true);
localSets, bestSet, localSets[bestSet]->bestLocationFor(tag), tag, begin, end.get(), true);
} else {
int bestPrimarySet = -1;
int bestSatelliteSet = -1;
@ -1329,7 +1336,7 @@ Reference<ILogSystem::IPeekCursor> TagPartitionedLogSystem::peekLogRouter(UID db
.detail("Begin", begin)
.detail("LogId", log->logServers[log->bestLocationFor(tag)]->get().id());
return makeReference<ILogSystem::ServerPeekCursor>(
log->logServers[log->bestLocationFor(tag)], tag, begin, getPeekEnd(), false, true);
log->logServers[log->bestLocationFor(tag)], tag, begin, end.get(), false, true);
}
}
bool firstOld = true;
@ -1384,7 +1391,7 @@ Reference<ILogSystem::IPeekCursor> TagPartitionedLogSystem::peekLogRouter(UID db
firstOld = false;
}
return makeReference<ILogSystem::ServerPeekCursor>(
Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, getPeekEnd(), false, false);
Reference<AsyncVar<OptionalInterface<TLogInterface>>>(), tag, begin, end.get(), false, false);
}
Version TagPartitionedLogSystem::getKnownCommittedVersion() {
@ -1856,7 +1863,7 @@ void TagPartitionedLogSystem::setBackupWorkers(const std::vector<InitializeBacku
LogEpoch logsetEpoch = this->epoch;
oldestBackupEpoch = this->epoch;
for (const auto& reply : replies) {
if (removedBackupWorkers.count(reply.interf.id()) > 0) {
if (removedBackupWorkers.contains(reply.interf.id())) {
removedBackupWorkers.erase(reply.interf.id());
continue;
}
@ -2316,7 +2323,6 @@ ACTOR Future<Void> TagPartitionedLogSystem::epochEnd(Reference<AsyncVar<Referenc
// trackRejoins listens for rejoin requests from the tLogs that we are recovering from, to learn their
// TLogInterfaces
state std::vector<LogLockInfo> lockResults;
state Reference<IdToInterf> lockResultsInterf = makeReference<IdToInterf>();
state std::vector<std::pair<Reference<AsyncVar<OptionalInterface<TLogInterface>>>, Reference<IReplicationPolicy>>>
allLogServers;
state std::vector<Reference<LogSet>> logServers;
@ -2358,8 +2364,7 @@ ACTOR Future<Void> TagPartitionedLogSystem::epochEnd(Reference<AsyncVar<Referenc
lockResults[i].isCurrent = true;
lockResults[i].logSet = logServers[i];
for (int t = 0; t < logServers[i]->logServers.size(); t++) {
lockResults[i].replies.push_back(
TagPartitionedLogSystem::lockTLog(dbgid, logServers[i]->logServers[t], lockResultsInterf));
lockResults[i].replies.push_back(TagPartitionedLogSystem::lockTLog(dbgid, logServers[i]->logServers[t]));
}
}
@ -2372,7 +2377,7 @@ ACTOR Future<Void> TagPartitionedLogSystem::epochEnd(Reference<AsyncVar<Referenc
foundSpecial = true;
break;
}
if (!lockedLocalities.count(log->locality)) {
if (!lockedLocalities.contains(log->locality)) {
TraceEvent("EpochEndLockExtra").detail("Locality", log->locality);
CODE_PROBE(true, "locking old generations for version information");
lockedLocalities.insert(log->locality);
@ -2380,8 +2385,7 @@ ACTOR Future<Void> TagPartitionedLogSystem::epochEnd(Reference<AsyncVar<Referenc
lockResult.epochEnd = old.epochEnd;
lockResult.logSet = log;
for (int t = 0; t < log->logServers.size(); t++) {
lockResult.replies.push_back(
TagPartitionedLogSystem::lockTLog(dbgid, log->logServers[t], lockResultsInterf));
lockResult.replies.push_back(TagPartitionedLogSystem::lockTLog(dbgid, log->logServers[t]));
}
lockResults.push_back(lockResult);
}
@ -2397,8 +2401,7 @@ ACTOR Future<Void> TagPartitionedLogSystem::epochEnd(Reference<AsyncVar<Referenc
lockResult.epochEnd = old.epochEnd;
lockResult.logSet = old.tLogs[0];
for (int t = 0; t < old.tLogs[0]->logServers.size(); t++) {
lockResult.replies.push_back(
TagPartitionedLogSystem::lockTLog(dbgid, old.tLogs[0]->logServers[t], lockResultsInterf));
lockResult.replies.push_back(TagPartitionedLogSystem::lockTLog(dbgid, old.tLogs[0]->logServers[t]));
}
allLockResults.push_back(lockResult);
}
@ -2491,33 +2494,6 @@ ACTOR Future<Void> TagPartitionedLogSystem::epochEnd(Reference<AsyncVar<Referenc
logSystem->remoteLogsWrittenToCoreState = true;
logSystem->stopped = true;
logSystem->pseudoLocalities = prevState.pseudoLocalities;
if (SERVER_KNOBS->ENABLE_VERSION_VECTOR_TLOG_UNICAST) {
// When a new log system is created, inform the surviving tLogs of the RV.
// SOMEDAY: Assert surviving tLogs use the RV from the latest log system.
// @todo issues:
// - we are not adding entries of a LogSet to "logGroupResults" above if
// "getDurableVersion()" doesn't return a recovery version for that LogSet.
// Is it fine to ignore the log servers (if any) in that LogSet and if so,
// how would they learn about the recovery version?
// - we don't get here if a restart happens and the resulting recovery
// version doesn't exceed the previously computed recovery version. Don't
// we need to send the (previously computed) recovery version to those
// log servers that caused the restart?
for (auto logGroupResult : logGroupResults) {
state std::vector<TLogLockResult> tLogResults = std::get<1>(logGroupResult);
for (auto& tLogResult : tLogResults) {
wait(transformErrors(
throwErrorOr(lockResultsInterf->lockInterf[tLogResult.id]
.setClusterRecoveryVersion.getReplyUnlessFailedFor(
setClusterRecoveryVersionRequest(logSystem->recoverAt.get()),
SERVER_KNOBS->TLOG_TIMEOUT,
SERVER_KNOBS->MASTER_FAILURE_SLOPE_DURING_RECOVERY)),
cluster_recovery_failed()));
}
}
}
outLogSystem->set(logSystem);
}
@ -2580,6 +2556,7 @@ ACTOR Future<Void> TagPartitionedLogSystem::recruitOldLogRouters(TagPartitionedL
req.tLogLocalities = tLogLocalities;
req.tLogPolicy = tLogPolicy;
req.locality = locality;
req.recoverAt = self->recoverAt.get();
auto reply = transformErrors(
throwErrorOr(workers[nextRouter].logRouter.getReplyUnlessFailedFor(
req, SERVER_KNOBS->TLOG_TIMEOUT, SERVER_KNOBS->MASTER_FAILURE_SLOPE_DURING_RECOVERY)),
@ -2629,6 +2606,7 @@ ACTOR Future<Void> TagPartitionedLogSystem::recruitOldLogRouters(TagPartitionedL
req.tLogLocalities = tLogLocalities;
req.tLogPolicy = tLogPolicy;
req.locality = locality;
req.recoverAt = old.recoverAt;
auto reply = transformErrors(
throwErrorOr(workers[nextRouter].logRouter.getReplyUnlessFailedFor(
req, SERVER_KNOBS->TLOG_TIMEOUT, SERVER_KNOBS->MASTER_FAILURE_SLOPE_DURING_RECOVERY)),
@ -3369,8 +3347,7 @@ ACTOR Future<Void> TagPartitionedLogSystem::trackRejoins(
ACTOR Future<TLogLockResult> TagPartitionedLogSystem::lockTLog(
UID myID,
Reference<AsyncVar<OptionalInterface<TLogInterface>>> tlog,
Optional<Reference<IdToInterf>> lockInterf) {
Reference<AsyncVar<OptionalInterface<TLogInterface>>> tlog) {
TraceEvent("TLogLockStarted", myID).detail("TLog", tlog->get().id()).detail("InfPresent", tlog->get().present());
loop {
choose {
@ -3378,9 +3355,6 @@ ACTOR Future<TLogLockResult> TagPartitionedLogSystem::lockTLog(
tlog->get().present() ? brokenPromiseToNever(tlog->get().interf().lock.getReply<TLogLockResult>())
: Never())) {
TraceEvent("TLogLocked", myID).detail("TLog", tlog->get().id()).detail("End", data.end);
if (lockInterf.present()) {
lockInterf.get()->lockInterf[data.id] = tlog->get().interf();
}
return data;
}
when(wait(tlog->onChange())) {}

View File

@ -685,7 +685,7 @@ static int asyncFullPathname(sqlite3_vfs* pVfs, /* VFS */
** and false otherwise.
*/
bool vfsAsyncIsOpen(std::string filename) {
return SharedMemoryInfo::table.count(abspath(filename)) > 0;
return SharedMemoryInfo::table.contains(abspath(filename));
}
/*

View File

@ -3344,7 +3344,7 @@ public:
if (copyNewToOriginal) {
if (g_network->isSimulated()) {
ASSERT(self->remapDestinationsSimOnly.count(p.originalPageID) == 0);
ASSERT(!self->remapDestinationsSimOnly.contains(p.originalPageID));
self->remapDestinationsSimOnly.insert(p.originalPageID);
}
debug_printf("DWALPager(%s) remapCleanup copy %s\n", self->filename.c_str(), p.toString().c_str());
@ -9175,7 +9175,7 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/RedwoodRecordRef") {
if (deterministicRandom()->coinflip()) {
rec.value = StringRef(arena, v);
}
if (uniqueItems.count(rec) == 0) {
if (!uniqueItems.contains(rec)) {
uniqueItems.insert(rec);
}
}
@ -9352,7 +9352,7 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/RedwoodRecordRef2") {
if (deterministicRandom()->coinflip()) {
rec.value = StringRef(arena, v);
}
if (uniqueItems.count(rec) == 0) {
if (!uniqueItems.contains(rec)) {
uniqueItems.insert(rec);
}
}
@ -9533,7 +9533,7 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/IntIntPair") {
nextP.v++;
auto prevP = p;
prevP.v--;
if (uniqueItems.count(p) == 0 && uniqueItems.count(nextP) == 0 && uniqueItems.count(prevP) == 0) {
if (!uniqueItems.contains(p) && !uniqueItems.contains(nextP) && !uniqueItems.contains(prevP)) {
uniqueItems.insert(p);
}
}
@ -9676,8 +9676,8 @@ TEST_CASE("Lredwood/correctness/unit/deltaTree/IntIntPair") {
// Insert record if it, its predecessor, and its successor are not present.
// Test data is intentionally sparse to test finding each record with a directional
// seek from each adjacent possible but not present record.
if (uniqueItems.count(p) == 0 && uniqueItems.count(IntIntPair(p.k, p.v - 1)) == 0 &&
uniqueItems.count(IntIntPair(p.k, p.v + 1)) == 0) {
if (!uniqueItems.contains(p) && !uniqueItems.contains(IntIntPair(p.k, p.v - 1)) &&
!uniqueItems.contains(IntIntPair(p.k, p.v + 1))) {
if (!cur2.insert(p)) {
shouldBeFull = true;
break;

View File

@ -2229,7 +2229,7 @@ int main(int argc, char* argv[]) {
const std::set<std::string> allowedDirectories = { ".", "..", "backups", "unittests", "fdbblob" };
for (const auto& dir : directories) {
if (dir.size() != 32 && allowedDirectories.count(dir) == 0 && dir.find("snap") == std::string::npos) {
if (dir.size() != 32 && !allowedDirectories.contains(dir) && dir.find("snap") == std::string::npos) {
TraceEvent(SevError, "IncompatibleDirectoryFound")
.detail("DataFolder", dataFolder)

View File

@ -37,6 +37,8 @@
#include "fdbserver/BlobMigratorInterface.h"
#include "fdbserver/Knobs.h"
#include "fdbserver/WorkerInterface.actor.h"
#include "fdbrpc/Locality.h"
#include "flow/NetworkAddress.h"
#include "flow/SystemMonitor.h"
#include "metacluster/MetaclusterMetrics.h"
@ -351,9 +353,9 @@ public:
.detail("Worker", it.second.details.interf.address())
.detail("WorkerAvailable", workerAvailable(it.second, false))
.detail("RecoverDiskFiles", it.second.details.recoveredDiskFiles)
.detail("NotExcludedMachine", !excludedMachines.count(it.second.details.interf.locality.zoneId()))
.detail("NotExcludedMachine", !excludedMachines.contains(it.second.details.interf.locality.zoneId()))
.detail("IncludeDC",
(includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())))
(includeDCs.size() == 0 || includeDCs.contains(it.second.details.interf.locality.dcId())))
.detail("NotExcludedAddress", !addressExcluded(excludedAddresses, it.second.details.interf.address()))
.detail("NotExcludedAddress2",
(!it.second.details.interf.secondaryAddress().present() ||
@ -363,8 +365,8 @@ public:
ProcessClass::UnsetFit)
.detail("MachineFitness", it.second.details.processClass.machineClassFitness(ProcessClass::Storage));
if (workerAvailable(it.second, false) && it.second.details.recoveredDiskFiles &&
!excludedMachines.count(it.second.details.interf.locality.zoneId()) &&
(includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())) &&
!excludedMachines.contains(it.second.details.interf.locality.zoneId()) &&
(includeDCs.size() == 0 || includeDCs.contains(it.second.details.interf.locality.dcId())) &&
!addressExcluded(excludedAddresses, it.second.details.interf.address()) &&
(!it.second.details.interf.secondaryAddress().present() ||
!addressExcluded(excludedAddresses, it.second.details.interf.secondaryAddress().get())) &&
@ -379,8 +381,8 @@ public:
for (auto& it : id_worker) {
ProcessClass::Fitness fit = it.second.details.processClass.machineClassFitness(ProcessClass::Storage);
if (workerAvailable(it.second, false) && it.second.details.recoveredDiskFiles &&
!excludedMachines.count(it.second.details.interf.locality.zoneId()) &&
(includeDCs.size() == 0 || includeDCs.count(it.second.details.interf.locality.dcId())) &&
!excludedMachines.contains(it.second.details.interf.locality.zoneId()) &&
(includeDCs.size() == 0 || includeDCs.contains(it.second.details.interf.locality.dcId())) &&
!addressExcluded(excludedAddresses, it.second.details.interf.address()) && fit < bestFit) {
bestFit = fit;
bestInfo = it.second.details;
@ -502,7 +504,7 @@ public:
auto thisField = worker.interf.locality.get(field);
auto thisZone = worker.interf.locality.zoneId();
if (field_count.count(thisField)) {
if (field_count.contains(thisField)) {
zone_workers[thisZone].push_back(worker);
zone_count[thisZone].second = thisField;
}
@ -528,7 +530,7 @@ public:
auto& zoneWorkers = zone_workers[lowestZone.second];
while (zoneWorkers.size() && !added) {
if (!resultSet.count(zoneWorkers.back())) {
if (!resultSet.contains(zoneWorkers.back())) {
resultSet.insert(zoneWorkers.back());
if (resultSet.size() == desired) {
return;
@ -583,7 +585,7 @@ public:
bool added = false;
while (zoneWorkers.size() && !added) {
if (!resultSet.count(zoneWorkers.back())) {
if (!resultSet.contains(zoneWorkers.back())) {
resultSet.insert(zoneWorkers.back());
if (resultSet.size() == desired) {
return;
@ -690,7 +692,7 @@ public:
SevDebug, id, "complex", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds);
continue;
}
if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) {
if (!dcIds.empty() && !dcIds.contains(worker_details.interf.locality.dcId())) {
logWorkerUnavailable(
SevDebug, id, "complex", "Worker is not in the target DC", worker_details, fitness, dcIds);
continue;
@ -801,7 +803,7 @@ public:
}
if (workerIter->second.size() + resultSet.size() <= desired) {
for (auto& worker : workerIter->second) {
if (chosenFields.count(worker.interf.locality.get(field))) {
if (chosenFields.contains(worker.interf.locality.get(field))) {
resultSet.insert(worker);
}
}
@ -940,7 +942,7 @@ public:
SevDebug, id, "simple", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds);
continue;
}
if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) {
if (!dcIds.empty() && !dcIds.contains(worker_details.interf.locality.dcId())) {
logWorkerUnavailable(
SevDebug, id, "simple", "Worker is not in the target DC", worker_details, fitness, dcIds);
continue;
@ -973,7 +975,7 @@ public:
auto used = std::get<1>(workerIter->first);
deterministicRandom()->randomShuffle(workerIter->second);
for (auto& worker : workerIter->second) {
if (!zones.count(worker.interf.locality.zoneId())) {
if (!zones.contains(worker.interf.locality.zoneId())) {
zones.insert(worker.interf.locality.zoneId());
resultSet.insert(worker);
if (resultSet.size() == required) {
@ -1092,7 +1094,7 @@ public:
SevDebug, id, "deprecated", "Worker's fitness is NeverAssign", worker_details, fitness, dcIds);
continue;
}
if (!dcIds.empty() && dcIds.count(worker_details.interf.locality.dcId()) == 0) {
if (!dcIds.empty() && !dcIds.contains(worker_details.interf.locality.dcId())) {
logWorkerUnavailable(
SevDebug, id, "deprecated", "Worker is not in the target DC", worker_details, fitness, dcIds);
continue;
@ -1312,7 +1314,7 @@ public:
std::map<Optional<Standalone<StringRef>>, int> field_count;
std::set<Optional<Standalone<StringRef>>> zones;
for (auto& worker : testWorkers) {
if (!zones.count(worker.interf.locality.zoneId())) {
if (!zones.contains(worker.interf.locality.zoneId())) {
field_count[worker.interf.locality.get(pa1->attributeKey())]++;
zones.insert(worker.interf.locality.zoneId());
}
@ -2478,7 +2480,7 @@ public:
.detail("ProcessID", it.interf().filteredLocality.processId());
return true;
}
if (!logRouterAddresses.count(tlogWorker->second.details.interf.address())) {
if (!logRouterAddresses.contains(tlogWorker->second.details.interf.address())) {
logRouterAddresses.insert(tlogWorker->second.details.interf.address());
log_routers.push_back(tlogWorker->second.details);
}
@ -2498,7 +2500,7 @@ public:
.detail("ProcessID", worker.interf().locality.processId());
return true;
}
if (backup_addresses.count(workerIt->second.details.interf.address()) == 0) {
if (!backup_addresses.contains(workerIt->second.details.interf.address())) {
backup_addresses.insert(workerIt->second.details.interf.address());
backup_workers.push_back(workerIt->second.details);
}
@ -2664,7 +2666,7 @@ public:
int32_t oldSatelliteRegionFit = std::numeric_limits<int32_t>::max();
for (auto& it : satellite_tlogs) {
if (satellite_priority.count(it.interf.locality.dcId())) {
if (satellite_priority.contains(it.interf.locality.dcId())) {
oldSatelliteRegionFit = std::min(oldSatelliteRegionFit, satellite_priority[it.interf.locality.dcId()]);
} else {
oldSatelliteRegionFit = -1;
@ -2673,7 +2675,7 @@ public:
int32_t newSatelliteRegionFit = std::numeric_limits<int32_t>::max();
for (auto& it : newSatelliteTLogs) {
if (satellite_priority.count(it.interf.locality.dcId())) {
if (satellite_priority.contains(it.interf.locality.dcId())) {
newSatelliteRegionFit = std::min(newSatelliteRegionFit, satellite_priority[it.interf.locality.dcId()]);
} else {
newSatelliteRegionFit = -1;
@ -3107,6 +3109,10 @@ public:
bool degradedSatellite = false; // Indicates that the entire satellite DC is degraded.
};
// Returns true if and only if addr1 and addr2 are located in the same DC
bool processesInSameDC(const NetworkAddress& addr1, const NetworkAddress& addr2) const;
// Returns a list of servers who are experiencing degraded links. These are candidates to perform exclusion. Note
// that only one endpoint of a bad link will be included in this list.
DegradationInfo getDegradationInfo() {
@ -3122,6 +3128,9 @@ public:
// This degraded link is not long enough to be considered as degraded.
continue;
}
if (SERVER_KNOBS->CC_ONLY_CONSIDER_INTRA_DC_LATENCY && !processesInSameDC(server, degradedPeer)) {
continue;
}
degradedLinkDst2Src[degradedPeer].insert(server);
}
for (const auto& [disconnectedPeer, times] : health.disconnectedPeers) {
@ -3262,7 +3271,6 @@ public:
return transactionSystemContainsDegradedServers();
}
// Returns true when the cluster controller should trigger a failover due to degraded servers used in the
// transaction system in the primary data center, and no degradation in the remote data center.
bool shouldTriggerFailoverDueToDegradedServers() {
@ -3319,6 +3327,7 @@ public:
std::map<Optional<Standalone<StringRef>>, WorkerInfo> id_worker;
std::map<Optional<Standalone<StringRef>>, ProcessClass>
id_class; // contains the mapping from process id to process class from the database
std::unordered_map<NetworkAddress, LocalityData> addr_locality; // mapping of process address to its locality
RangeResult lastProcessClasses;
bool gotProcessClasses;
bool gotFullyRecoveredConfig;

View File

@ -43,7 +43,7 @@ public:
}
}
bool contains(KeyRef configClass) const { return classes.count(configClass); }
bool contains(KeyRef configClass) const { return classes.contains(configClass); }
std::set<Key> const& getClasses() const { return classes; }
template <class Ar>

View File

@ -44,6 +44,7 @@ struct ConflictBatch {
TransactionTooOld,
TransactionTenantFailure,
TransactionCommitted,
TransactionLockReject,
};
void addTransaction(const CommitTransactionRef& transaction, Version newOldestVersion);

View File

@ -770,7 +770,7 @@ struct StorageWiggler : ReferenceCounted<StorageWiggler> {
void removeServer(const UID& serverId);
// update metadata and adjust priority_queue
void updateMetadata(const UID& serverId, const StorageMetadataType& metadata);
bool contains(const UID& serverId) const { return pq_handles.count(serverId) > 0; }
bool contains(const UID& serverId) const { return pq_handles.contains(serverId); }
bool empty() const { return wiggle_pq.empty(); }
// It's guarantee that When a.metadata >= b.metadata, if !necessary(a) then !necessary(b)

View File

@ -49,7 +49,7 @@ struct ExclusionTracker {
bool isFailedOrExcluded(NetworkAddress addr) {
AddressExclusion addrExclusion(addr.ip, addr.port);
return excluded.count(addrExclusion) || failed.count(addrExclusion);
return excluded.contains(addrExclusion) || failed.contains(addrExclusion);
}
ACTOR static Future<Void> tracker(ExclusionTracker* self) {

View File

@ -107,7 +107,8 @@ enum EncodingType : uint8_t {
static constexpr std::array EncryptedEncodingTypes = { AESEncryption, AESEncryptionWithAuth, XOREncryption_TestOnly };
inline bool isEncodingTypeEncrypted(EncodingType encoding) {
return std::count(EncryptedEncodingTypes.begin(), EncryptedEncodingTypes.end(), encoding) > 0;
return std::find(EncryptedEncodingTypes.begin(), EncryptedEncodingTypes.end(), encoding) !=
EncryptedEncodingTypes.end();
}
inline bool isEncodingTypeAESEncrypted(EncodingType encoding) {

View File

@ -555,7 +555,11 @@ struct ILogSystem {
// Same contract as peek(), but blocks until the preferred log server(s) for the given tag are available (and is
// correspondingly less expensive)
virtual Reference<IPeekCursor> peekLogRouter(UID dbgid, Version begin, Tag tag, bool useSatellite) = 0;
virtual Reference<IPeekCursor> peekLogRouter(UID dbgid,
Version begin,
Tag tag,
bool useSatellite,
Optional<Version> end = Optional<Version>()) = 0;
// Same contract as peek(), but can only peek from the logs elected in the same generation.
// If the preferred log server is down, a different log from the same generation will merge results locally before
// sending them to the log router.

View File

@ -27,6 +27,7 @@
#include "fdbclient/FDBTypes.h"
#include "fdbclient/GetEncryptCipherKeys.h"
#include "fdbclient/RangeLock.h"
#include "fdbclient/Tenant.h"
#include "fdbrpc/Stats.h"
#include "fdbserver/AccumulativeChecksumUtil.h"
@ -194,6 +195,72 @@ struct ExpectedIdempotencyIdCountForKey {
: commitVersion(commitVersion), idempotencyIdCount(idempotencyIdCount), batchIndexHighByte(batchIndexHighByte) {}
};
struct RangeLock {
public:
RangeLock() { coreMap.insert(allKeys, RangeLockStateSet()); }
bool pendingRequest() const { return currentRangeLockStartKey.present(); }
void initKeyPoint(const Key& key, const Value& value) {
// TraceEvent(SevDebug, "RangeLockRangeOps").detail("Ops", "Init").detail("Key", key);
if (!value.empty()) {
coreMap.rawInsert(key, decodeRangeLockStateSet(value));
} else {
coreMap.rawInsert(key, RangeLockStateSet());
}
return;
}
void setPendingRequest(const Key& startKey, const RangeLockStateSet& lockSetState) {
ASSERT(SERVER_KNOBS->ENABLE_READ_LOCK_ON_RANGE);
ASSERT(!pendingRequest());
currentRangeLockStartKey = std::make_pair(startKey, lockSetState);
return;
}
void consumePendingRequest(const Key& endKey) {
ASSERT(SERVER_KNOBS->ENABLE_READ_LOCK_ON_RANGE);
ASSERT(pendingRequest());
ASSERT(endKey <= normalKeys.end);
ASSERT(currentRangeLockStartKey.get().first < endKey);
KeyRange lockRange = Standalone(KeyRangeRef(currentRangeLockStartKey.get().first, endKey));
RangeLockStateSet lockSetState = currentRangeLockStartKey.get().second;
/* TraceEvent(SevDebug, "RangeLockRangeOps")
.detail("Ops", "Update")
.detail("Range", lockRange)
.detail("Status", lockSetState.toString()); */
coreMap.insert(lockRange, lockSetState);
coreMap.coalesce(allKeys);
currentRangeLockStartKey.reset();
return;
}
bool isLocked(const KeyRange& range) const {
ASSERT(SERVER_KNOBS->ENABLE_READ_LOCK_ON_RANGE);
if (range.end >= normalKeys.end) {
return false;
}
for (auto lockRange : coreMap.intersectingRanges(range)) {
if (lockRange.value().isValid() && lockRange.value().isLockedFor(RangeLockType::ReadLockOnRange)) {
/*TraceEvent(SevDebug, "RangeLockRangeOps")
.detail("Ops", "Check")
.detail("Range", range)
.detail("Status", "Reject");*/
return true;
}
}
/*TraceEvent(SevDebug, "RangeLockRangeOps")
.detail("Ops", "Check")
.detail("Range", range)
.detail("Status", "Accept");*/
return false;
}
private:
Optional<std::pair<Key, RangeLockStateSet>> currentRangeLockStartKey;
KeyRangeMap<RangeLockStateSet> coreMap;
};
struct ProxyCommitData {
UID dbgid;
int64_t commitBatchesMemBytesCount;
@ -275,6 +342,8 @@ struct ProxyCommitData {
std::shared_ptr<AccumulativeChecksumBuilder> acsBuilder = nullptr;
LogEpoch epoch;
std::shared_ptr<RangeLock> rangeLock = nullptr;
// The tag related to a storage server rarely change, so we keep a vector of tags for each key range to be slightly
// more CPU efficient. When a tag related to a storage server does change, we empty out all of these vectors to
// signify they must be repopulated. We do not repopulate them immediately to avoid a slow task.
@ -362,6 +431,10 @@ struct ProxyCommitData {
: nullptr),
epoch(epoch) {
commitComputePerOperation.resize(SERVER_KNOBS->PROXY_COMPUTE_BUCKETS, 0.0);
rangeLock = SERVER_KNOBS->ENABLE_READ_LOCK_ON_RANGE && !encryptMode.isEncryptionEnabled() &&
getTenantMode() == TenantMode::DISABLED
? std::make_shared<RangeLock>()
: nullptr;
}
};

View File

@ -57,7 +57,7 @@ struct RatekeeperSingleton : Singleton<RatekeeperInterface> {
}
}
void halt(ClusterControllerData& cc, Optional<Standalone<StringRef>> pid) const {
if (interface.present() && cc.id_worker.count(pid)) {
if (interface.present() && cc.id_worker.contains(pid)) {
cc.id_worker[pid].haltRatekeeper =
brokenPromiseToNever(interface.get().haltRatekeeper.getReply(HaltRatekeeperRequest(cc.id)));
}
@ -82,7 +82,7 @@ struct DataDistributorSingleton : Singleton<DataDistributorInterface> {
}
}
void halt(ClusterControllerData& cc, Optional<Standalone<StringRef>> pid) const {
if (interface.present() && cc.id_worker.count(pid)) {
if (interface.present() && cc.id_worker.contains(pid)) {
cc.id_worker[pid].haltDistributor =
brokenPromiseToNever(interface.get().haltDataDistributor.getReply(HaltDataDistributorRequest(cc.id)));
}
@ -132,7 +132,7 @@ struct BlobManagerSingleton : Singleton<BlobManagerInterface> {
}
}
void halt(ClusterControllerData& cc, Optional<Standalone<StringRef>> pid) const {
if (interface.present() && cc.id_worker.count(pid)) {
if (interface.present() && cc.id_worker.contains(pid)) {
cc.id_worker[pid].haltBlobManager =
brokenPromiseToNever(interface.get().haltBlobManager.getReply(HaltBlobManagerRequest(cc.id)));
}
@ -190,7 +190,7 @@ struct EncryptKeyProxySingleton : Singleton<EncryptKeyProxyInterface> {
}
}
void halt(ClusterControllerData& cc, Optional<Standalone<StringRef>> pid) const {
if (interface.present() && cc.id_worker.count(pid)) {
if (interface.present() && cc.id_worker.contains(pid)) {
cc.id_worker[pid].haltEncryptKeyProxy =
brokenPromiseToNever(interface.get().haltEncryptKeyProxy.getReply(HaltEncryptKeyProxyRequest(cc.id)));
}

View File

@ -26,7 +26,7 @@
#include "flow/flow.h"
#include "fdbclient/StorageCheckpoint.h"
enum class MoveInPhase {
enum class MoveInPhase : std::int8_t {
Pending = 0,
Fetching = 1,
Ingesting = 2,
@ -45,13 +45,13 @@ struct MoveInShardMetaData {
UID id;
UID dataMoveId;
std::vector<KeyRange> ranges; // The key ranges to be fetched.
Version createVersion;
Version highWatermark; // The highest version that has been applied to the MoveInShard.
int8_t phase; // MoveInPhase.
Version createVersion = invalidVersion;
Version highWatermark = invalidVersion; // The highest version that has been applied to the MoveInShard.
int8_t phase = static_cast<int8_t>(MoveInPhase::Error); // MoveInPhase.
std::vector<CheckpointMetaData> checkpoints; // All related checkpoints, they should cover `ranges`.
Optional<std::string> error;
double startTime;
bool conductBulkLoad;
double startTime = 0.0;
bool conductBulkLoad = false;
MoveInShardMetaData() = default;
MoveInShardMetaData(const UID& id,

View File

@ -171,7 +171,7 @@ public:
bool matches(std::vector<Standalone<StringRef>> const& sortedMachineIDs);
std::string getMachineIDsStr() const;
bool containsMachine(Standalone<StringRef> machineID) const {
return std::count(machineIDs.begin(), machineIDs.end(), machineID);
return std::find(machineIDs.begin(), machineIDs.end(), machineID) != machineIDs.end();
}
// Returns true iff team is found

View File

@ -53,7 +53,6 @@ struct TLogInterface {
RequestStream<struct TLogEnablePopRequest> enablePopRequest;
RequestStream<struct TLogSnapRequest> snapRequest;
RequestStream<struct TrackTLogRecoveryRequest> trackRecovery;
RequestStream<struct setClusterRecoveryVersionRequest> setClusterRecoveryVersion;
TLogInterface() {}
explicit TLogInterface(const LocalityData& locality)
@ -88,7 +87,6 @@ struct TLogInterface {
streams.push_back(snapRequest.getReceiver());
streams.push_back(peekStreamMessages.getReceiver(TaskPriority::TLogPeek));
streams.push_back(trackRecovery.getReceiver());
streams.push_back(setClusterRecoveryVersion.getReceiver());
FlowTransport::transport().addEndpoints(streams);
}
@ -119,8 +117,6 @@ struct TLogInterface {
RequestStream<struct TLogPeekStreamRequest>(peekMessages.getEndpoint().getAdjustedEndpoint(11));
trackRecovery =
RequestStream<struct TrackTLogRecoveryRequest>(peekMessages.getEndpoint().getAdjustedEndpoint(12));
setClusterRecoveryVersion = RequestStream<struct setClusterRecoveryVersionRequest>(
peekMessages.getEndpoint().getAdjustedEndpoint(13));
}
}
};
@ -225,18 +221,21 @@ struct TLogPeekRequest {
bool onlySpilled;
Optional<std::pair<UID, int>> sequence;
ReplyPromise<TLogPeekReply> reply;
Optional<Version> end; // when set is exclusive to the desired range
TLogPeekRequest(Version begin,
Tag tag,
bool returnIfBlocked,
bool onlySpilled,
Optional<std::pair<UID, int>> sequence = Optional<std::pair<UID, int>>())
: begin(begin), tag(tag), returnIfBlocked(returnIfBlocked), onlySpilled(onlySpilled), sequence(sequence) {}
Optional<std::pair<UID, int>> sequence = Optional<std::pair<UID, int>>(),
Optional<Version> end = Optional<Version>())
: begin(begin), tag(tag), returnIfBlocked(returnIfBlocked), onlySpilled(onlySpilled), sequence(sequence),
end(end) {}
TLogPeekRequest() {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, begin, tag, returnIfBlocked, onlySpilled, sequence, reply);
serializer(ar, begin, tag, returnIfBlocked, onlySpilled, sequence, reply, end);
}
};
@ -470,19 +469,4 @@ struct TrackTLogRecoveryRequest {
}
};
struct setClusterRecoveryVersionRequest {
constexpr static FileIdentifier file_identifier = 6876464;
Version recoveryVersion;
ReplyPromise<Void> reply;
setClusterRecoveryVersionRequest() = default;
setClusterRecoveryVersionRequest(Version recoveryVersion) : recoveryVersion(recoveryVersion) {}
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, recoveryVersion, reply);
}
};
#endif

View File

@ -66,6 +66,7 @@ struct OldLogData {
};
struct IdToInterf : ReferenceCounted<IdToInterf> {
Optional<Version> recoverAt = Optional<Version>();
std::map<UID, TLogInterface> lockInterf;
};
@ -245,7 +246,12 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted<TagPartition
// Specifically, the epoch is determined by looking up "dbgid" in tlog sets of generations.
// The returned cursor can peek data at the "tag" from the given "begin" version to that epoch's end version or
// the recovery version for the latest old epoch. For the current epoch, the cursor has no end version.
Reference<IPeekCursor> peekLogRouter(UID dbgid, Version begin, Tag tag, bool useSatellite) final;
// For the old epoch, the cursor is provided an end version.
Reference<IPeekCursor> peekLogRouter(UID dbgid,
Version begin,
Tag tag,
bool useSatellite,
Optional<Version> end) final;
Version getKnownCommittedVersion() final;
@ -392,10 +398,7 @@ struct TagPartitionedLogSystem final : ILogSystem, ReferenceCounted<TagPartition
std::vector<Reference<AsyncVar<OptionalInterface<TLogInterface>>>> tlogs,
Reference<AsyncVar<Version>> recoveredVersion);
ACTOR static Future<TLogLockResult> lockTLog(
UID myID,
Reference<AsyncVar<OptionalInterface<TLogInterface>>> tlog,
Optional<Reference<IdToInterf>> lockInterf = Optional<Reference<IdToInterf>>());
ACTOR static Future<TLogLockResult> lockTLog(UID myID, Reference<AsyncVar<OptionalInterface<TLogInterface>>> tlog);
template <class T>
static std::vector<T> getReadyNonError(std::vector<Future<T>> const& futures);
};

View File

@ -668,10 +668,11 @@ struct InitializeLogRouterRequest {
Reference<IReplicationPolicy> tLogPolicy;
int8_t locality;
ReplyPromise<struct TLogInterface> reply;
Optional<Version> recoverAt = Optional<Version>();
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, recoveryCount, routerTag, startVersion, tLogLocalities, tLogPolicy, locality, reply);
serializer(ar, recoveryCount, routerTag, startVersion, tLogLocalities, tLogPolicy, locality, reply, recoverAt);
}
};

View File

@ -314,9 +314,6 @@ public:
art_iterator insert_if_absent(KeyRef& key, void* value, int* replaced);
void erase(const art_iterator& it);
uint64_t count() { return size; }
}; // art_tree
struct art_iterator {

View File

@ -611,7 +611,7 @@ ACTOR Future<Void> masterServerCxx(MasterInterface mi,
"Master: terminated due to backup worker failure",
probe::decoration::rare);
if (normalMasterErrors().count(err.code())) {
if (normalMasterErrors().contains(err.code())) {
TraceEvent("MasterTerminated", mi.id()).error(err);
return Void();
}

View File

@ -53,7 +53,6 @@ struct LatencyStats {
}
void reset() { *this = LatencyStats(); }
double count() { return n; }
double mean() { return x / n; }
double stddev() { return sqrt(x2 / n - (x / n) * (x / n)); }
};

View File

@ -6384,8 +6384,7 @@ ACTOR Future<Void> getMappedKeyValuesQ(StorageServer* data, GetMappedKeyValuesRe
state Version version = wait(waitForVersion(data, commitVersion, req.version, span.context));
data->counters.readVersionWaitSample.addMeasurement(g_network->timer() - queueWaitEnd);
data->checkTenantEntry(
req.version, req.tenantInfo, req.options.present() ? req.options.get().lockAware : false);
data->checkTenantEntry(version, req.tenantInfo, req.options.present() ? req.options.get().lockAware : false);
if (req.tenantInfo.hasTenant()) {
req.begin.setKeyUnlimited(req.begin.getKey().withPrefix(req.tenantInfo.prefix.get(), req.arena));
req.end.setKeyUnlimited(req.end.getKey().withPrefix(req.tenantInfo.prefix.get(), req.arena));
@ -8072,7 +8071,7 @@ ACTOR Future<Version> fetchChangeFeed(StorageServer* data,
if (g_network->isSimulated() && !g_simulator->restarted) {
// verify that the feed was actually destroyed and it's not an error in this inference logic.
// Restarting tests produce false positives because the validation state isn't kept across tests
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.count(changeFeedInfo->id.toString()));
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.contains(changeFeedInfo->id.toString()));
}
Key beginClearKey = changeFeedInfo->id.withPrefix(persistChangeFeedKeys.begin);
@ -8089,7 +8088,7 @@ ACTOR Future<Version> fetchChangeFeed(StorageServer* data,
changeFeedInfo->destroy(cleanupVersion);
if (data->uidChangeFeed.count(changeFeedInfo->id)) {
if (data->uidChangeFeed.contains(changeFeedInfo->id)) {
// only register range for cleanup if it has not been already cleaned up
data->changeFeedCleanupDurable[changeFeedInfo->id] = cleanupVersion;
}
@ -8308,7 +8307,7 @@ ACTOR Future<std::vector<Key>> fetchChangeFeedMetadata(StorageServer* data,
if (g_network->isSimulated() && !g_simulator->restarted) {
// verify that the feed was actually destroyed and it's not an error in this inference logic. Restarting
// tests produce false positives because the validation state isn't kept across tests
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.count(feed.first.toString()));
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.contains(feed.first.toString()));
}
Key beginClearKey = feed.first.withPrefix(persistChangeFeedKeys.begin);
@ -10583,7 +10582,13 @@ void changeServerKeysWithPhysicalShards(StorageServer* data,
.detailf("CurrentShard", "%016llx", shard->desiredShardId)
.detail("IsTSS", data->isTss())
.detail("Version", cVer);
throw data_move_conflict();
if (data->isTss() && g_network->isSimulated()) {
// Tss data move conflicts are expected in simulation, and can be safely ignored
// by restarting the server.
throw please_reboot();
} else {
throw data_move_conflict();
}
} else {
TraceEvent(SevInfo, "CSKMoveInToSameShard", data->thisServerID)
.detail("DataMoveID", dataMoveId)
@ -12545,7 +12550,7 @@ ACTOR Future<Void> updateStorage(StorageServer* data) {
auto info = data->uidChangeFeed.find(feedFetchVersions[curFeed].first);
// Don't update if the feed is pending cleanup. Either it will get cleaned up and destroyed, or it will
// get fetched again, where the fetch version will get reset.
if (info != data->uidChangeFeed.end() && !data->changeFeedCleanupDurable.count(info->second->id)) {
if (info != data->uidChangeFeed.end() && !data->changeFeedCleanupDurable.contains(info->second->id)) {
if (feedFetchVersions[curFeed].second > info->second->durableFetchVersion.get()) {
info->second->durableFetchVersion.set(feedFetchVersions[curFeed].second);
}

View File

@ -26,6 +26,7 @@
#include <iterator>
#include <map>
#include <streambuf>
#include <numeric>
#include <fmt/ranges.h>
#include <toml.hpp>
@ -418,19 +419,19 @@ void CompoundWorkload::addFailureInjection(WorkloadRequest& work) {
for (auto const& w : workloads) {
w->disableFailureInjectionWorkloads(disabledWorkloads);
}
if (disabledWorkloads.count("all") > 0) {
if (disabledWorkloads.contains("all")) {
return;
}
auto& factories = IFailureInjectorFactory::factories();
DeterministicRandom random(sharedRandomNumber);
for (auto& factory : factories) {
auto workload = factory->create(*this);
if (disabledWorkloads.count(workload->description()) > 0) {
if (disabledWorkloads.contains(workload->description())) {
continue;
}
if (std::count(work.disabledFailureInjectionWorkloads.begin(),
work.disabledFailureInjectionWorkloads.end(),
workload->description()) > 0) {
if (std::find(work.disabledFailureInjectionWorkloads.begin(),
work.disabledFailureInjectionWorkloads.end(),
workload->description()) != work.disabledFailureInjectionWorkloads.end()) {
continue;
}
while (shouldInjectFailure(random, work, workload)) {
@ -967,6 +968,7 @@ ACTOR Future<Void> testerServerCore(TesterInterface interf,
.detail("ConsistencyCheckerId", work.sharedRandomNumber)
.detail("ClientId", work.clientId)
.detail("ClientCount", work.clientCount);
work.reply.sendError(consistency_check_urgent_duplicate_request());
} else if (consistencyCheckerUrgentTester.second.isValid() &&
!consistencyCheckerUrgentTester.second.isReady()) {
TraceEvent(SevWarnAlways, "ConsistencyCheckUrgent_TesterWorkloadConflict", interf.id())
@ -974,13 +976,15 @@ ACTOR Future<Void> testerServerCore(TesterInterface interf,
.detail("ArrivingConsistencyCheckerId", work.sharedRandomNumber)
.detail("ClientId", work.clientId)
.detail("ClientCount", work.clientCount);
work.reply.sendError(consistency_check_urgent_conflicting_request());
} else {
consistencyCheckerUrgentTester = std::make_pair(
work.sharedRandomNumber, testerServerConsistencyCheckerUrgentWorkload(work, ccr, dbInfo));
TraceEvent(SevInfo, "ConsistencyCheckUrgent_TesterWorkloadInitialized", interf.id())
.detail("ConsistencyCheckerId", consistencyCheckerUrgentTester.first)
.detail("ClientId", work.clientId)
.detail("ClientCount", work.clientCount);
}
consistencyCheckerUrgentTester = std::make_pair(
work.sharedRandomNumber, testerServerConsistencyCheckerUrgentWorkload(work, ccr, dbInfo));
TraceEvent(SevInfo, "ConsistencyCheckUrgent_TesterWorkloadInitialized", interf.id())
.detail("ConsistencyCheckerId", consistencyCheckerUrgentTester.first)
.detail("ClientId", work.clientId)
.detail("ClientCount", work.clientCount);
} else {
addWorkload.send(testerServerWorkload(work, ccr, dbInfo, locality));
}
@ -1646,7 +1650,7 @@ Optional<Key> getKeyFromString(const std::string& str) {
}
const char first = str.at(i + 2);
const char second = str.at(i + 3);
if (parseCharMap.count(first) == 0 || parseCharMap.count(second) == 0) {
if (!parseCharMap.contains(first) || !parseCharMap.contains(second)) {
TraceEvent(g_network->isSimulated() ? SevError : SevWarnAlways,
"ConsistencyCheckUrgent_GetKeyFromStringError")
.setMaxEventLength(-1)
@ -1762,7 +1766,13 @@ std::unordered_map<int, std::vector<KeyRange>> makeTaskAssignment(Database cx,
std::vector<KeyRange> shardsToCheck,
int testersCount,
int round) {
ASSERT(testersCount >= 1);
std::unordered_map<int, std::vector<KeyRange>> assignment;
std::vector<size_t> shuffledIndices(testersCount);
std::iota(shuffledIndices.begin(), shuffledIndices.end(), 0); // creates [0, 1, ..., testersCount - 1]
deterministicRandom()->randomShuffle(shuffledIndices);
int batchSize = CLIENT_KNOBS->CONSISTENCY_CHECK_URGENT_BATCH_SHARD_COUNT;
int startingPoint = 0;
if (shardsToCheck.size() > batchSize * testersCount) {
@ -1777,7 +1787,17 @@ std::unordered_map<int, std::vector<KeyRange>> makeTaskAssignment(Database cx,
if (testerIdx > testersCount - 1) {
break; // Have filled up all testers
}
assignment[testerIdx].push_back(shardsToCheck[i]);
// When assigning a shards/batch to a tester idx, there are certain edge cases which can result in urgent
// consistency checker being infinetely stuck in a loop. Examples:
// 1. if there is 1 remaining shard, and tester 0 consistently fails, we will still always pick tester 0
// 2. if there are 10 remaining shards, and batch size is 10, and tester 0 consistently fails, we will
// still always pick tester 0
// 3. if there are 20 remaining shards, and batch size is 10, and testers {0, 1} consistently fail, we will
// keep picking testers {0, 1}
// To avoid repeatedly picking the same testers even though they could be failing, shuffledIndices provides an
// indirection to a random tester idx. That way, each invocation of makeTaskAssignment won't
// result in the same task assignment for the class of edge cases mentioned above.
assignment[shuffledIndices[testerIdx]].push_back(shardsToCheck[i]);
}
std::unordered_map<int, std::vector<KeyRange>>::iterator assignIt;
for (assignIt = assignment.begin(); assignIt != assignment.end(); assignIt++) {
@ -3150,12 +3170,12 @@ ACTOR Future<Void> testExpectedErrorImpl(Future<Void> test,
}
// Make sure that no duplicate details were provided
ASSERT(details.count("TestDescription") == 0);
ASSERT(details.count("ExpectedError") == 0);
ASSERT(details.count("ExpectedErrorCode") == 0);
ASSERT(details.count("ActualError") == 0);
ASSERT(details.count("ActualErrorCode") == 0);
ASSERT(details.count("Reason") == 0);
ASSERT(!details.contains("TestDescription"));
ASSERT(!details.contains("ExpectedError"));
ASSERT(!details.contains("ExpectedErrorCode"));
ASSERT(!details.contains("ActualError"));
ASSERT(!details.contains("ActualErrorCode"));
ASSERT(!details.contains("Reason"));
for (auto& p : details) {
evt.detail(p.first.c_str(), p.second);

View File

@ -1405,7 +1405,7 @@ std::set<std::thread::id> profiledThreads;
// Returns whether or not a given thread should be profiled
int filter_in_thread(void* arg) {
return profiledThreads.count(std::this_thread::get_id()) > 0 ? 1 : 0;
return profiledThreads.contains(std::this_thread::get_id()) ? 1 : 0;
}
#endif
@ -3329,7 +3329,7 @@ ACTOR Future<Void> workerServer(Reference<IClusterConnectionRecord> connRecord,
}
when(state WorkerSnapRequest snapReq = waitNext(interf.workerSnapReq.getFuture())) {
std::string snapReqKey = snapReq.snapUID.toString() + snapReq.role.toString();
if (snapReqResultMap.count(snapReqKey)) {
if (snapReqResultMap.contains(snapReqKey)) {
CODE_PROBE(true, "Worker received a duplicate finished snapshot request", probe::decoration::rare);
auto result = snapReqResultMap[snapReqKey];
result.isError() ? snapReq.reply.sendError(result.getError()) : snapReq.reply.send(result.get());
@ -3337,7 +3337,7 @@ ACTOR Future<Void> workerServer(Reference<IClusterConnectionRecord> connRecord,
.detail("SnapUID", snapReq.snapUID.toString())
.detail("Role", snapReq.role)
.detail("Result", result.isError() ? result.getError().code() : success().code());
} else if (snapReqMap.count(snapReqKey)) {
} else if (snapReqMap.contains(snapReqKey)) {
CODE_PROBE(true, "Worker received a duplicate ongoing snapshot request", probe::decoration::rare);
TraceEvent("RetryOngoingWorkerSnapRequest")
.detail("SnapUID", snapReq.snapUID.toString())

View File

@ -50,6 +50,11 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
std::map<Standalone<KeyRef>, Standalone<ValueRef>> dbKVs;
// This workload is not compatible with RandomRangeLock workload because they will race in locked range
void disableFailureInjectionWorkloads(std::set<std::string>& out) const override {
out.insert({ "RandomRangeLock" });
}
BackupAndParallelRestoreCorrectnessWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
locked.set(sharedRandomNumber % 2);
backupAfter = getOption(options, "backupAfter"_sr, 10.0);

View File

@ -52,6 +52,11 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
bool defaultBackup;
Optional<std::string> encryptionKeyFileName;
// This workload is not compatible with RandomRangeLock workload because they will race in locked range
void disableFailureInjectionWorkloads(std::set<std::string>& out) const override {
out.insert({ "RandomRangeLock" });
}
BackupAndRestoreCorrectnessWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
locked.set(sharedRandomNumber % 2);
backupAfter = getOption(options, "backupAfter"_sr, 10.0);

View File

@ -49,6 +49,11 @@ struct BackupToDBCorrectnessWorkload : TestWorkload {
bool defaultBackup;
UID destUid;
// This workload is not compatible with RandomRangeLock workload because they will race in locked range
void disableFailureInjectionWorkloads(std::set<std::string>& out) const override {
out.insert({ "RandomRangeLock" });
}
BackupToDBCorrectnessWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
locked.set(sharedRandomNumber % 2);
backupAfter = getOption(options, "backupAfter"_sr, 10.0);

View File

@ -38,6 +38,11 @@ struct BackupToDBUpgradeWorkload : TestWorkload {
Standalone<VectorRef<KeyRangeRef>> backupRanges;
Database extraDB;
// This workload is not compatible with RandomRangeLock workload because they will race in locked range
void disableFailureInjectionWorkloads(std::set<std::string>& out) const override {
out.insert({ "RandomRangeLock" });
}
BackupToDBUpgradeWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
backupAfter = getOption(options, "backupAfter"_sr, deterministicRandom()->random01() * 10.0);
backupPrefix = getOption(options, "backupPrefix"_sr, StringRef());

View File

@ -831,7 +831,7 @@ struct BlobGranuleRangesWorkload : TestWorkload {
int op = OP_COUNT;
loop {
op = deterministicRandom()->randomInt(0, OP_COUNT);
if (!excludedTypes.count((UnitTestTypes)op)) {
if (!excludedTypes.contains((UnitTestTypes)op)) {
break;
}
loopTries--;

View File

@ -148,7 +148,7 @@ struct BulkSetupWorkload : TestWorkload {
for (i = 0; i < workload->tenants.size(); i++) {
state Reference<Tenant> tenant = workload->tenants[i];
std::vector<KeyValueRef> keysForCurTenant = wait(getKVPairsForTenant(workload, tenant, cx));
if (tenantIdsToDrop.count(tenant->id())) {
if (tenantIdsToDrop.contains(tenant->id())) {
// Don't check the tenants that the EKP would throw errors for
continue;
}

View File

@ -0,0 +1,281 @@
#include <cstdint>
#include "fdbclient/DatabaseContext.h"
#include "fdbclient/FDBTypes.h"
#include "fdbclient/Status.h"
#include "fdbclient/StatusClient.h"
#include "fdbrpc/PerfMetric.h"
#include "fdbrpc/SimulatorProcessInfo.h"
#include "fdbrpc/simulator.h"
#include "fdbserver/ServerDBInfo.actor.h"
#include "fdbserver/workloads/workloads.actor.h"
#include "flow/Error.h"
#include "flow/IPAddress.h"
#include "flow/IRandom.h"
#include "flow/Optional.h"
#include "flow/Trace.h"
#include "flow/flow.h"
#include "flow/genericactors.actor.h"
#include "flow/actorcompiler.h" // This must be the last #include.
struct ClogRemoteTLog : TestWorkload {
static constexpr auto NAME = "ClogRemoteTLog";
bool enabled{ false };
double testDuration{ 0.0 };
double lagMeasurementFrequency{ 0 };
double clogInitDelay{ 0 };
double clogDuration{ 0 };
double lagThreshold{ 0 };
bool doStatePathCheck{ true };
enum TestState { TEST_INIT, SS_LAG_NORMAL, SS_LAG_HIGH };
// Currently, the only valid state path is: TEST_INIT -> SS_LAG_NORMAL -> SS_LAG_HIGH -> SS_LAG_NORMAL
const std::vector<std::vector<TestState>> expectedStatePaths{
{ TEST_INIT, SS_LAG_NORMAL, SS_LAG_HIGH, SS_LAG_NORMAL }
};
std::vector<TestState>
actualStatePath; // to be populated when the test runs, and finally checked at the end in check()
ClogRemoteTLog(const WorkloadContext& wctx) : TestWorkload(wctx) {
enabled =
(clientId == 0); // only run this workload for a single client, and that too the first client (by its id)
testDuration = getOption(options, "testDuration"_sr, 120);
lagMeasurementFrequency = getOption(options, "lagMeasurementFrequency"_sr, 5);
clogInitDelay = getOption(options, "clogInitDelay"_sr, 10);
clogDuration = getOption(options, "clogDuration"_sr, 110);
lagThreshold = getOption(options, "lagThreshold"_sr, 20);
}
Future<Void> setup(const Database& db) override { return Void(); }
Future<Void> start(const Database& db) override {
if (!g_network->isSimulated() || !enabled) {
return Void();
}
return timeout(reportErrors(workload(this, db), "ClogRemoteTLogError"), testDuration, Void());
}
Future<bool> check(const Database& db) override {
if (!g_network->isSimulated() || !enabled) {
return true;
}
// First, emit trace event for potential debugging if test fails
auto stateToStr = [](const TestState testState) -> std::string {
switch (testState) {
case (TEST_INIT): {
return "TEST_INIT";
}
case (SS_LAG_NORMAL): {
return "SS_LAG_NORMAL";
}
case (SS_LAG_HIGH): {
return "SS_LAG_HIGH";
}
default: {
ASSERT(false);
return "";
}
};
};
auto print = [&stateToStr](const std::vector<TestState>& path) {
std::string ret;
for (size_t i = 0; i < path.size(); ++i) {
const auto pathState = path[i];
ret += stateToStr(pathState) + (i < path.size() - 1 ? std::string{ " -> " } : std::string{ "" });
}
return ret;
};
TraceEvent("ClogRemoteTLogCheck")
.detail("ActualStatePath", print(actualStatePath))
.detail("DoStatePathCheck", doStatePathCheck ? "True" : "False");
// Then, do the actual check
if (!doStatePathCheck) {
return true;
}
auto match = [](const std::vector<TestState>& path1, const std::vector<TestState>& path2) -> bool {
if (path1.size() != path2.size()) {
return false;
}
for (size_t i = 0; i < path1.size(); ++i) {
if (path1[i] != path2[i]) {
return false;
}
}
return true;
};
for (const auto& expectedPath : expectedStatePaths) {
if (match(actualStatePath, expectedPath)) {
return true;
}
}
return false;
}
void getMetrics(std::vector<PerfMetric>& m) override {}
ACTOR static Future<Optional<double>> measureMaxSSLag(ClogRemoteTLog* self, Database db) {
StatusObject status = wait(StatusClient::statusFetcher(db));
StatusObjectReader reader(status);
StatusObjectReader cluster;
StatusObjectReader processMap;
if (!reader.get("cluster", cluster)) {
TraceEvent("NoCluster");
return Optional<double>();
}
if (!cluster.get("processes", processMap)) {
TraceEvent("NoProcesses");
return Optional<double>();
}
double maxSSLag{ -1 };
for (auto p : processMap.obj()) {
StatusObjectReader process(p.second);
if (process.has("roles")) {
StatusArray roles = p.second.get_obj()["roles"].get_array();
for (StatusObjectReader role : roles) {
ASSERT(role.has("role"));
if (role.has("data_lag")) {
ASSERT(role["role"].get_str() == "storage");
auto dataLag = role["data_lag"].get_obj();
ASSERT(dataLag.contains("seconds"));
ASSERT(dataLag.contains("versions"));
TraceEvent("SSDataLag")
.detail("Process", p.first)
.detail("Role", role["role"].get_str())
.detail("SecondLag", dataLag["seconds"].get_value<double>())
.detail("VersionLag", dataLag["versions"].get_int64());
maxSSLag = std::max(maxSSLag, dataLag["seconds"].get_value<double>());
}
}
}
}
TraceEvent("MaxSSDataLag")
.detail("SecondLag", maxSSLag == -1 ? "none" : std::to_string(maxSSLag))
.detail("SecondThreshold", self->lagThreshold);
if (maxSSLag == -1) {
return Optional<double>();
} else {
return maxSSLag;
}
}
ACTOR static Future<std::vector<IPAddress>> getRemoteSSIPs(Database db) {
state std::vector<IPAddress> ret;
state Transaction tr(db);
loop {
try {
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
std::vector<std::pair<StorageServerInterface, ProcessClass>> results =
wait(NativeAPI::getServerListAndProcessClasses(&tr));
for (auto& [ssi, p] : results) {
if (ssi.locality.dcId().present() && ssi.locality.dcId().get() == g_simulator->remoteDcId) {
ret.push_back(ssi.address().ip);
}
}
return ret;
} catch (Error& e) {
if (e.code() != error_code_actor_cancelled) {
TraceEvent("GetRemoteSSIPsError").error(e);
}
wait(tr.onError(e));
}
}
}
static std::vector<IPAddress> getRemoteTLogIPs(ClogRemoteTLog* self) {
std::vector<IPAddress> remoteTLogIPs;
for (const auto& tLogSet : self->dbInfo->get().logSystemConfig.tLogs) {
if (tLogSet.isLocal) {
continue;
}
for (const auto& tLog : tLogSet.tLogs) {
remoteTLogIPs.push_back(tLog.interf().address().ip);
}
}
return remoteTLogIPs;
}
ACTOR static Future<Void> clogRemoteTLog(ClogRemoteTLog* self, Database db) {
wait(delay(self->clogInitDelay));
// Ensure db is ready
while (self->dbInfo->get().recoveryState < RecoveryState::FULLY_RECOVERED) {
wait(self->dbInfo->onChange());
}
// Then, get all remote TLog IPs
state std::vector<IPAddress> remoteTLogIPs = getRemoteTLogIPs(self);
ASSERT(!remoteTLogIPs.empty());
// Then, get all remote SS IPs
std::vector<IPAddress> remoteSSIPs = wait(getRemoteSSIPs(db));
ASSERT(!remoteSSIPs.empty());
// Then, attempt to find a remote tlog that is not on the same machine as a remote SS
Optional<IPAddress> remoteTLogIP_temp;
for (const auto& ip : remoteTLogIPs) {
if (std::find(remoteSSIPs.begin(), remoteSSIPs.end(), ip) == remoteSSIPs.end()) {
remoteTLogIP_temp = ip;
}
}
// If we can find such a machine that is just running a remote tlog, then we will do extra checking at the end
// (in check() method). If we can't find such a machine, we pick a random machhine and still run the test to
// ensure no crashes or correctness issues are observed.
state IPAddress remoteTLogIP;
if (remoteTLogIP_temp.present()) {
remoteTLogIP = remoteTLogIP_temp.get();
} else {
remoteTLogIP = remoteTLogIPs[deterministicRandom()->randomInt(0, remoteTLogIPs.size())];
self->doStatePathCheck = false;
}
// Then, find all processes that the remote tlog will have degraded connection with
IPAddress cc = self->dbInfo->get().clusterInterface.address().ip;
state std::vector<IPAddress> processes;
for (const auto& process : g_simulator->getAllProcesses()) {
const auto& ip = process->address.ip;
if (process->startingClass != ProcessClass::TesterClass && ip != cc) {
processes.push_back(ip);
}
}
ASSERT(!processes.empty());
// Finally, start the clogging between the remote tlog and the processes calculated above
for (const auto& ip : processes) {
if (remoteTLogIP == ip) {
continue;
}
TraceEvent("ClogRemoteTLog").detail("SrcIP", remoteTLogIP).detail("DstIP", ip);
g_simulator->clogPair(remoteTLogIP, ip, self->testDuration);
g_simulator->clogPair(ip, remoteTLogIP, self->testDuration);
}
wait(Never());
return Void();
}
ACTOR Future<Void> workload(ClogRemoteTLog* self, Database db) {
state Future<Void> clog = self->clogRemoteTLog(self, db);
state TestState testState = TestState::TEST_INIT;
self->actualStatePath.push_back(testState);
loop {
wait(delay(self->lagMeasurementFrequency));
Optional<double> ssLag = wait(measureMaxSSLag(self, db));
if (!ssLag.present()) {
continue;
}
TestState localState = ssLag.get() < self->lagThreshold ? TestState::SS_LAG_NORMAL : TestState::SS_LAG_HIGH;
// Anytime a state transition happens, append to the state path
if (localState != testState) {
self->actualStatePath.push_back(localState);
testState = localState;
}
}
}
};
WorkloadFactory<ClogRemoteTLog> ClogRemoteTLogWorkloadFactory;

View File

@ -432,9 +432,9 @@ struct ConfigureDatabaseWorkload : TestWorkload {
int storeType = 0;
while (true) {
storeType = deterministicRandom()->randomInt(0, 6);
if (std::count(self->storageEngineExcludeTypes.begin(),
self->storageEngineExcludeTypes.end(),
storeType) == 0) {
if (std::find(self->storageEngineExcludeTypes.begin(),
self->storageEngineExcludeTypes.end(),
storeType) == self->storageEngineExcludeTypes.end()) {
break;
}
}

View File

@ -38,6 +38,11 @@ struct ConflictRangeWorkload : TestWorkload {
std::vector<Future<Void>> clients;
PerfIntCounter withConflicts, withoutConflicts, retries;
// This workload is not compatible with RandomRangeLock workload because RangeLock transaction triggers conflicts
void disableFailureInjectionWorkloads(std::set<std::string>& out) const override {
out.insert({ "RandomRangeLock" });
}
ConflictRangeWorkload(WorkloadContext const& wcx)
: TestWorkload(wcx), withConflicts("WithConflicts"), withoutConflicts("withoutConflicts"), retries("Retries") {
minOperationsPerTransaction = getOption(options, "minOperationsPerTransaction"_sr, 2);
@ -197,7 +202,7 @@ struct ConflictRangeWorkload : TestWorkload {
if (randomSets) {
for (int j = 0; j < 5; j++) {
int proposedKey = deterministicRandom()->randomInt(0, self->maxKeySpace);
if (!insertedSet.count(proposedKey)) {
if (!insertedSet.contains(proposedKey)) {
TraceEvent("ConflictRangeSet").detail("Key", proposedKey);
insertedSet.insert(proposedKey);
tr2.set(StringRef(format("%010d", proposedKey)),
@ -208,7 +213,7 @@ struct ConflictRangeWorkload : TestWorkload {
} else {
for (int j = 0; j < 5; j++) {
int proposedKey = deterministicRandom()->randomInt(0, self->maxKeySpace);
if (insertedSet.count(proposedKey)) {
if (insertedSet.contains(proposedKey)) {
TraceEvent("ConflictRangeClear").detail("Key", proposedKey);
insertedSet.erase(proposedKey);
tr2.clear(StringRef(format("%010d", proposedKey)));

View File

@ -1035,7 +1035,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
}
for (auto& ssi : servers) {
ASSERT(id_ssi.count(ssi.id()));
ASSERT(id_ssi.contains(ssi.id()));
}
return true;
}
@ -1180,7 +1180,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
itr->interf.secondaryAddress().present() ? itr->interf.secondaryAddress().get().toString()
: "Unset");
for (const auto& id : stores.get()) {
if (statefulProcesses[itr->interf.address()].count(id)) {
if (statefulProcesses[itr->interf.address()].contains(id)) {
continue;
}
// For extra data store
@ -1200,7 +1200,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
.detail("ProcessPrimaryAddress", p->address)
.detail("ProcessAddresses", p->addresses.toString())
.detail("DataStoreID", id)
.detail("Protected", g_simulator->protectedAddresses.count(itr->interf.address()))
.detail("Protected", g_simulator->protectedAddresses.contains(itr->interf.address()))
.detail("Reliable", p->isReliable())
.detail("ReliableInfo", p->getReliableInfo())
.detail("KillOrRebootProcess", p->address);
@ -1323,7 +1323,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
all[i]->startingClass != ProcessClass::TesterClass &&
all[i]->startingClass != ProcessClass::SimHTTPServerClass &&
all[i]->protocolVersion == g_network->protocolVersion()) {
if (!workerAddresses.count(all[i]->address)) {
if (!workerAddresses.contains(all[i]->address)) {
TraceEvent("ConsistencyCheck_WorkerMissingFromList").detail("Addr", all[i]->address);
return false;
}
@ -1378,7 +1378,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
for (const auto& addr : oldCoordinators) {
auto findResult = addr_locality.find(addr);
if (findResult != addr_locality.end()) {
if (checkDuplicates.count(findResult->second.zoneId())) {
if (checkDuplicates.contains(findResult->second.zoneId())) {
TraceEvent("ConsistencyCheck_BadCoordinator")
.detail("Addr", addr)
.detail("NotFound", findResult == addr_locality.end());
@ -1410,7 +1410,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
for (const auto& worker : allWorkers) {
allWorkerProcessMap[worker.interf.address()] = worker;
Optional<Key> dc = worker.interf.locality.dcId();
if (!dcToAllClassTypes.count(dc))
if (!dcToAllClassTypes.contains(dc))
dcToAllClassTypes.insert({});
dcToAllClassTypes[dc].push_back(worker.processClass.classType());
}
@ -1420,17 +1420,17 @@ struct ConsistencyCheckWorkload : TestWorkload {
for (const auto& worker : nonExcludedWorkers) {
nonExcludedWorkerProcessMap[worker.interf.address()] = worker;
Optional<Key> dc = worker.interf.locality.dcId();
if (!dcToNonExcludedClassTypes.count(dc))
if (!dcToNonExcludedClassTypes.contains(dc))
dcToNonExcludedClassTypes.insert({});
dcToNonExcludedClassTypes[dc].push_back(worker.processClass.classType());
}
if (!allWorkerProcessMap.count(db.clusterInterface.clientInterface.address())) {
if (!allWorkerProcessMap.contains(db.clusterInterface.clientInterface.address())) {
TraceEvent("ConsistencyCheck_CCNotInWorkerList")
.detail("CCAddress", db.clusterInterface.clientInterface.address().toString());
return false;
}
if (!allWorkerProcessMap.count(db.master.address())) {
if (!allWorkerProcessMap.contains(db.master.address())) {
TraceEvent("ConsistencyCheck_MasterNotInWorkerList")
.detail("MasterAddress", db.master.address().toString());
return false;
@ -1478,13 +1478,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check CC
ProcessClass::Fitness bestClusterControllerFitness =
getBestAvailableFitness(dcToNonExcludedClassTypes[ccDcId], ProcessClass::ClusterController);
if (!nonExcludedWorkerProcessMap.count(db.clusterInterface.clientInterface.address()) ||
if (!nonExcludedWorkerProcessMap.contains(db.clusterInterface.clientInterface.address()) ||
nonExcludedWorkerProcessMap[db.clusterInterface.clientInterface.address()].processClass.machineClassFitness(
ProcessClass::ClusterController) != bestClusterControllerFitness) {
TraceEvent("ConsistencyCheck_ClusterControllerNotBest")
.detail("BestClusterControllerFitness", bestClusterControllerFitness)
.detail("ExistingClusterControllerFit",
nonExcludedWorkerProcessMap.count(db.clusterInterface.clientInterface.address())
nonExcludedWorkerProcessMap.contains(db.clusterInterface.clientInterface.address())
? nonExcludedWorkerProcessMap[db.clusterInterface.clientInterface.address()]
.processClass.machineClassFitness(ProcessClass::ClusterController)
: -1);
@ -1501,14 +1501,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
}
}
if ((!nonExcludedWorkerProcessMap.count(db.master.address()) &&
if ((!nonExcludedWorkerProcessMap.contains(db.master.address()) &&
bestMasterFitness != ProcessClass::ExcludeFit) ||
nonExcludedWorkerProcessMap[db.master.address()].processClass.machineClassFitness(ProcessClass::Master) !=
bestMasterFitness) {
TraceEvent("ConsistencyCheck_MasterNotBest")
.detail("BestMasterFitness", bestMasterFitness)
.detail("ExistingMasterFit",
nonExcludedWorkerProcessMap.count(db.master.address())
nonExcludedWorkerProcessMap.contains(db.master.address())
? nonExcludedWorkerProcessMap[db.master.address()].processClass.machineClassFitness(
ProcessClass::Master)
: -1);
@ -1519,13 +1519,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
ProcessClass::Fitness bestCommitProxyFitness =
getBestAvailableFitness(dcToNonExcludedClassTypes[masterDcId], ProcessClass::CommitProxy);
for (const auto& commitProxy : db.client.commitProxies) {
if (!nonExcludedWorkerProcessMap.count(commitProxy.address()) ||
if (!nonExcludedWorkerProcessMap.contains(commitProxy.address()) ||
nonExcludedWorkerProcessMap[commitProxy.address()].processClass.machineClassFitness(
ProcessClass::CommitProxy) != bestCommitProxyFitness) {
TraceEvent("ConsistencyCheck_CommitProxyNotBest")
.detail("BestCommitProxyFitness", bestCommitProxyFitness)
.detail("ExistingCommitProxyFitness",
nonExcludedWorkerProcessMap.count(commitProxy.address())
nonExcludedWorkerProcessMap.contains(commitProxy.address())
? nonExcludedWorkerProcessMap[commitProxy.address()].processClass.machineClassFitness(
ProcessClass::CommitProxy)
: -1);
@ -1537,13 +1537,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
ProcessClass::Fitness bestGrvProxyFitness =
getBestAvailableFitness(dcToNonExcludedClassTypes[masterDcId], ProcessClass::GrvProxy);
for (const auto& grvProxy : db.client.grvProxies) {
if (!nonExcludedWorkerProcessMap.count(grvProxy.address()) ||
if (!nonExcludedWorkerProcessMap.contains(grvProxy.address()) ||
nonExcludedWorkerProcessMap[grvProxy.address()].processClass.machineClassFitness(
ProcessClass::GrvProxy) != bestGrvProxyFitness) {
TraceEvent("ConsistencyCheck_GrvProxyNotBest")
.detail("BestGrvProxyFitness", bestGrvProxyFitness)
.detail("ExistingGrvProxyFitness",
nonExcludedWorkerProcessMap.count(grvProxy.address())
nonExcludedWorkerProcessMap.contains(grvProxy.address())
? nonExcludedWorkerProcessMap[grvProxy.address()].processClass.machineClassFitness(
ProcessClass::GrvProxy)
: -1);
@ -1555,13 +1555,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
ProcessClass::Fitness bestResolverFitness =
getBestAvailableFitness(dcToNonExcludedClassTypes[masterDcId], ProcessClass::Resolver);
for (const auto& resolver : db.resolvers) {
if (!nonExcludedWorkerProcessMap.count(resolver.address()) ||
if (!nonExcludedWorkerProcessMap.contains(resolver.address()) ||
nonExcludedWorkerProcessMap[resolver.address()].processClass.machineClassFitness(
ProcessClass::Resolver) != bestResolverFitness) {
TraceEvent("ConsistencyCheck_ResolverNotBest")
.detail("BestResolverFitness", bestResolverFitness)
.detail("ExistingResolverFitness",
nonExcludedWorkerProcessMap.count(resolver.address())
nonExcludedWorkerProcessMap.contains(resolver.address())
? nonExcludedWorkerProcessMap[resolver.address()].processClass.machineClassFitness(
ProcessClass::Resolver)
: -1);
@ -1576,7 +1576,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
for (auto& tlogSet : db.logSystemConfig.tLogs) {
if (!tlogSet.isLocal && tlogSet.logRouters.size()) {
for (auto& logRouter : tlogSet.logRouters) {
if (!nonExcludedWorkerProcessMap.count(logRouter.interf().address())) {
if (!nonExcludedWorkerProcessMap.contains(logRouter.interf().address())) {
TraceEvent("ConsistencyCheck_LogRouterNotInNonExcludedWorkers")
.detail("Id", logRouter.id());
return false;
@ -1596,14 +1596,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
ProcessClass::Fitness fitnessLowerBound =
allWorkerProcessMap[db.master.address()].processClass.machineClassFitness(ProcessClass::DataDistributor);
if (db.distributor.present() &&
(!nonExcludedWorkerProcessMap.count(db.distributor.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.distributor.get().address()) ||
nonExcludedWorkerProcessMap[db.distributor.get().address()].processClass.machineClassFitness(
ProcessClass::DataDistributor) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_DistributorNotBest")
.detail("DataDistributorFitnessLowerBound", fitnessLowerBound)
.detail(
"ExistingDistributorFitness",
nonExcludedWorkerProcessMap.count(db.distributor.get().address())
nonExcludedWorkerProcessMap.contains(db.distributor.get().address())
? nonExcludedWorkerProcessMap[db.distributor.get().address()].processClass.machineClassFitness(
ProcessClass::DataDistributor)
: -1);
@ -1612,14 +1612,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check Ratekeeper
if (db.ratekeeper.present() &&
(!nonExcludedWorkerProcessMap.count(db.ratekeeper.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.ratekeeper.get().address()) ||
nonExcludedWorkerProcessMap[db.ratekeeper.get().address()].processClass.machineClassFitness(
ProcessClass::Ratekeeper) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_RatekeeperNotBest")
.detail("BestRatekeeperFitness", fitnessLowerBound)
.detail(
"ExistingRatekeeperFitness",
nonExcludedWorkerProcessMap.count(db.ratekeeper.get().address())
nonExcludedWorkerProcessMap.contains(db.ratekeeper.get().address())
? nonExcludedWorkerProcessMap[db.ratekeeper.get().address()].processClass.machineClassFitness(
ProcessClass::Ratekeeper)
: -1);
@ -1628,14 +1628,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check BlobManager
if (config.blobGranulesEnabled && db.blobManager.present() &&
(!nonExcludedWorkerProcessMap.count(db.blobManager.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.blobManager.get().address()) ||
nonExcludedWorkerProcessMap[db.blobManager.get().address()].processClass.machineClassFitness(
ProcessClass::BlobManager) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_BlobManagerNotBest")
.detail("BestBlobManagerFitness", fitnessLowerBound)
.detail(
"ExistingBlobManagerFitness",
nonExcludedWorkerProcessMap.count(db.blobManager.get().address())
nonExcludedWorkerProcessMap.contains(db.blobManager.get().address())
? nonExcludedWorkerProcessMap[db.blobManager.get().address()].processClass.machineClassFitness(
ProcessClass::BlobManager)
: -1);
@ -1644,14 +1644,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check BlobMigrator
if (config.blobGranulesEnabled && db.blobMigrator.present() &&
(!nonExcludedWorkerProcessMap.count(db.blobMigrator.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.blobMigrator.get().address()) ||
nonExcludedWorkerProcessMap[db.blobMigrator.get().address()].processClass.machineClassFitness(
ProcessClass::BlobMigrator) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_BlobMigratorNotBest")
.detail("BestBlobMigratorFitness", fitnessLowerBound)
.detail(
"ExistingBlobMigratorFitness",
nonExcludedWorkerProcessMap.count(db.blobMigrator.get().address())
nonExcludedWorkerProcessMap.contains(db.blobMigrator.get().address())
? nonExcludedWorkerProcessMap[db.blobMigrator.get().address()].processClass.machineClassFitness(
ProcessClass::BlobMigrator)
: -1);
@ -1660,13 +1660,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check EncryptKeyProxy
if (config.encryptionAtRestMode.isEncryptionEnabled() && db.client.encryptKeyProxy.present() &&
(!nonExcludedWorkerProcessMap.count(db.client.encryptKeyProxy.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.client.encryptKeyProxy.get().address()) ||
nonExcludedWorkerProcessMap[db.client.encryptKeyProxy.get().address()].processClass.machineClassFitness(
ProcessClass::EncryptKeyProxy) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_EncryptKeyProxyNotBest")
.detail("BestEncryptKeyProxyFitness", fitnessLowerBound)
.detail("ExistingEncryptKeyProxyFitness",
nonExcludedWorkerProcessMap.count(db.client.encryptKeyProxy.get().address())
nonExcludedWorkerProcessMap.contains(db.client.encryptKeyProxy.get().address())
? nonExcludedWorkerProcessMap[db.client.encryptKeyProxy.get().address()]
.processClass.machineClassFitness(ProcessClass::EncryptKeyProxy)
: -1);
@ -1675,13 +1675,13 @@ struct ConsistencyCheckWorkload : TestWorkload {
// Check ConsistencyScan
if (db.consistencyScan.present() &&
(!nonExcludedWorkerProcessMap.count(db.consistencyScan.get().address()) ||
(!nonExcludedWorkerProcessMap.contains(db.consistencyScan.get().address()) ||
nonExcludedWorkerProcessMap[db.consistencyScan.get().address()].processClass.machineClassFitness(
ProcessClass::ConsistencyScan) > fitnessLowerBound)) {
TraceEvent("ConsistencyCheck_ConsistencyScanNotBest")
.detail("BestConsistencyScanFitness", fitnessLowerBound)
.detail("ExistingConsistencyScanFitness",
nonExcludedWorkerProcessMap.count(db.consistencyScan.get().address())
nonExcludedWorkerProcessMap.contains(db.consistencyScan.get().address())
? nonExcludedWorkerProcessMap[db.consistencyScan.get().address()]
.processClass.machineClassFitness(ProcessClass::ConsistencyScan)
: -1);

View File

@ -305,6 +305,9 @@ struct ConsistencyCheckUrgentWorkload : TestWorkload {
req.begin = begin;
req.end = firstGreaterOrEqual(range.end);
req.limit = 1e4;
if (g_network->isSimulated() && SERVER_KNOBS->CONSISTENCY_CHECK_BACKWARD_READ) {
req.limit = -1e4;
}
req.limitBytes = CLIENT_KNOBS->REPLY_BYTE_LIMIT;
req.version = version;
req.tags = TagSet();

View File

@ -195,7 +195,7 @@ struct DataLossRecoveryWorkload : TestWorkload {
state std::vector<StorageServerInterface> interfs = wait(getStorageServers(cx));
if (!interfs.empty()) {
state StorageServerInterface interf = interfs[deterministicRandom()->randomInt(0, interfs.size())];
if (g_simulator->protectedAddresses.count(interf.address()) == 0) {
if (!g_simulator->protectedAddresses.contains(interf.address())) {
// We need to avoid selecting a storage server that is already dead at this point, otherwise
// the test will hang. This is achieved by sending a GetStorageMetrics RPC. This is a necessary
// check for this test because DD has been disabled and the proper mechanism that removes bad

View File

@ -123,7 +123,7 @@ struct DiskDurabilityTest : TestWorkload {
state std::vector<int64_t> targetPages;
for (int i = deterministicRandom()->randomInt(1, 100); i > 0 && targetPages.size() < size / 4096; i--) {
auto p = deterministicRandom()->randomInt(0, size / 4096);
if (!std::count(targetPages.begin(), targetPages.end(), p))
if (std::find(targetPages.begin(), targetPages.end(), p) == targetPages.end())
targetPages.push_back(p);
}
for (int i = deterministicRandom()->randomInt(1, 4); i > 0; i--) {

View File

@ -194,11 +194,10 @@ struct DiskFailureInjectionWorkload : FailureInjectionWorkload {
TraceEvent("ResendChaos")
.detail("ChosenWorkersSize", self->chosenWorkers.size())
.detail("FoundWorkers", workersMap.size())
.detail(
"ResendToNumber",
std::count_if(self->chosenWorkers.begin(),
self->chosenWorkers.end(),
[&map = std::as_const(workersMap)](auto const& addr) { return map.count(addr) > 0; }));
.detail("ResendToNumber",
std::count_if(self->chosenWorkers.begin(),
self->chosenWorkers.end(),
[&map = std::as_const(workersMap)](auto const& addr) { return map.contains(addr); }));
for (auto& workerAddress : self->chosenWorkers) {
auto itr = workersMap.find(workerAddress);
if (itr != workersMap.end()) {

View File

@ -102,7 +102,7 @@ struct ExcludeIncludeStorageServersWorkload : TestWorkload {
std::vector<std::pair<StorageServerInterface, ProcessClass>> results =
wait(NativeAPI::getServerListAndProcessClasses(&tr));
for (auto& [ssi, p] : results) {
if (g_simulator->protectedAddresses.count(ssi.address()) == 0) {
if (!g_simulator->protectedAddresses.contains(ssi.address())) {
servers.insert(AddressExclusion(ssi.address().ip, ssi.address().port));
}
}

View File

@ -225,7 +225,9 @@ struct FuzzApiCorrectnessWorkload : TestWorkload {
return TenantGroupNameRef(format("tenantgroup_%d", groupNum));
}
}
bool canUseTenant(Optional<TenantName> tenant) { return !tenant.present() || createdTenants.count(tenant.get()); }
bool canUseTenant(Optional<TenantName> tenant) {
return !tenant.present() || createdTenants.contains(tenant.get());
}
Future<Void> setup(Database const& cx) override {
if (clientId == 0) {

View File

@ -112,15 +112,15 @@ ACTOR Future<Void> httpKVRequestCallback(Reference<SimHTTPKVStore> kvStore,
// content-length and RequestID from http are already filled in
// ASSERT_EQ(req->data.headers.size(), 5);
ASSERT_EQ(req->data.headers.size(), 5);
ASSERT(req->data.headers.count("Key"));
ASSERT(req->data.headers.count("ClientID"));
ASSERT(req->data.headers.count("UID"));
ASSERT(req->data.headers.count("SeqNo"));
ASSERT(req->data.headers.contains("Key"));
ASSERT(req->data.headers.contains("ClientID"));
ASSERT(req->data.headers.contains("UID"));
ASSERT(req->data.headers.contains("SeqNo"));
int clientId = atoi(req->data.headers["ClientID"].c_str());
int seqNo = atoi(req->data.headers["SeqNo"].c_str());
ASSERT(req->data.headers.count("Content-Length"));
ASSERT(req->data.headers.contains("Content-Length"));
ASSERT_EQ(req->data.headers["Content-Length"], std::to_string(req->data.content.size()));
ASSERT_EQ(req->data.contentLen, req->data.content.size());
@ -291,11 +291,11 @@ struct HTTPKeyValueStoreWorkload : TestWorkload {
}
ASSERT_EQ(response->code, 200);
ASSERT(response->data.headers.count("ClientID"));
ASSERT(response->data.headers.contains("ClientID"));
ASSERT_EQ(response->data.headers["ClientID"], std::to_string(self->clientId));
ASSERT(response->data.headers.count("Key"));
ASSERT(response->data.headers.contains("Key"));
ASSERT_EQ(response->data.headers["Key"], key);
ASSERT(response->data.headers.count("UID"));
ASSERT(response->data.headers.contains("UID"));
ASSERT_EQ(response->data.headers["UID"], requestID.toString());
return response;

View File

@ -219,7 +219,7 @@ struct MachineAttritionWorkload : FailureInjectionWorkload {
for (const auto& worker : workers) {
// kill all matching workers
if (idAccess(worker).present() &&
std::count(targets.begin(), targets.end(), idAccess(worker).get().toString())) {
std::find(targets.begin(), targets.end(), idAccess(worker).get().toString()) != targets.end()) {
TraceEvent("SendingRebootRequest").detail("TargetWorker", worker.interf.locality.toString());
worker.interf.clientInterface.reboot.send(rbReq);
}

View File

@ -117,8 +117,8 @@ struct MetaclusterManagementWorkload : TestWorkload {
for (int i = 0; i < 20; ++i) {
int64_t newPrefix = deterministicRandom()->randomInt(TenantAPI::TENANT_ID_PREFIX_MIN_VALUE,
TenantAPI::TENANT_ID_PREFIX_MAX_VALUE + 1);
if (allowTenantIdPrefixReuse || !usedPrefixes.count(newPrefix)) {
CODE_PROBE(usedPrefixes.count(newPrefix), "Reusing tenant ID prefix", probe::decoration::rare);
if (allowTenantIdPrefixReuse || !usedPrefixes.contains(newPrefix)) {
CODE_PROBE(usedPrefixes.contains(newPrefix), "Reusing tenant ID prefix", probe::decoration::rare);
return newPrefix;
}
}
@ -606,7 +606,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
state bool foundTenantCollision = false;
for (auto t : dataDb->tenants) {
if (self->createdTenants.count(t.first)) {
if (self->createdTenants.contains(t.first)) {
foundTenantCollision = true;
tenantsToRemove.insert(t.first);
}
@ -614,7 +614,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
state bool foundGroupCollision = false;
for (auto t : dataDb->tenantGroups) {
if (self->tenantGroups.count(t.first)) {
if (self->tenantGroups.contains(t.first)) {
foundGroupCollision = true;
tenantsToRemove.insert(t.second->tenants.begin(), t.second->tenants.end());
}
@ -1011,7 +1011,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
auto itr = self->createdTenants.find(tenant);
state bool exists = itr != self->createdTenants.end();
state bool tenantGroupExists = tenantGroup.present() && self->tenantGroups.count(tenantGroup.get());
state bool tenantGroupExists = tenantGroup.present() && self->tenantGroups.contains(tenantGroup.get());
state bool hasCapacity = tenantGroupExists || self->ungroupedTenants.size() + self->tenantGroups.size() <
self->totalTenantGroupCapacity;
@ -1740,7 +1740,7 @@ struct MetaclusterManagementWorkload : TestWorkload {
ASSERT_EQ(tenants.size(), clusterData->tenants.size());
for (auto [tenantName, tenantEntry] : tenants) {
ASSERT(clusterData->tenants.count(tenantName));
ASSERT(clusterData->tenants.contains(tenantName));
auto tenantData = clusterData->tenants.find(tenantName);
ASSERT(tenantData != clusterData->tenants.end());
ASSERT(tenantData->second->cluster == clusterName);

View File

@ -139,7 +139,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
do {
tenantGroup = TenantGroupNameRef(
format("tenantgroup%08d", deterministicRandom()->randomInt(0, maxTenantGroups)));
} while (tenantGroups.count(tenantGroup.get()) > 0);
} while (tenantGroups.contains(tenantGroup.get()));
}
}
}
@ -368,7 +368,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
for (auto const& t : tenantCollisions) {
// If the data cluster tenant is expected, then remove the management tenant
// Note that the management tenant may also have been expected
if (self->createdTenants.count(t.second.first)) {
if (self->createdTenants.contains(t.second.first)) {
CODE_PROBE(true, "Remove management tenant in restore collision");
removeTrackedTenant(t.second.second);
deleteFutures.push_back(metacluster::deleteTenant(self->managementDb, t.second.second));
@ -527,7 +527,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
}
}
for (auto const& g : dataClusterGroups.results) {
if (managementGroups.count(g.first)) {
if (managementGroups.contains(g.first)) {
groupCollisions.insert(g.first);
}
}
@ -765,12 +765,12 @@ struct MetaclusterRestoreWorkload : TestWorkload {
state TenantName tenantName;
for (int i = 0; i < 10; ++i) {
tenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(tenantName) == 0) {
if (!self->tenantNameIndex.contains(tenantName)) {
break;
}
}
if (self->tenantNameIndex.count(tenantName)) {
if (self->tenantNameIndex.contains(tenantName)) {
return Void();
}
@ -815,12 +815,12 @@ struct MetaclusterRestoreWorkload : TestWorkload {
state TenantName tenantName;
for (int i = 0; i < 10; ++i) {
tenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(tenantName) != 0) {
if (self->tenantNameIndex.contains(tenantName)) {
break;
}
}
if (self->tenantNameIndex.count(tenantName) == 0) {
if (!self->tenantNameIndex.contains(tenantName)) {
return Void();
}
@ -856,12 +856,12 @@ struct MetaclusterRestoreWorkload : TestWorkload {
state TenantName tenantName;
for (int i = 0; i < 10; ++i) {
tenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(tenantName) != 0) {
if (self->tenantNameIndex.contains(tenantName)) {
break;
}
}
if (self->tenantNameIndex.count(tenantName) == 0) {
if (!self->tenantNameIndex.contains(tenantName)) {
return Void();
}
@ -920,18 +920,18 @@ struct MetaclusterRestoreWorkload : TestWorkload {
state TenantName newTenantName;
for (int i = 0; i < 10; ++i) {
oldTenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(oldTenantName) != 0) {
if (self->tenantNameIndex.contains(oldTenantName)) {
break;
}
}
for (int i = 0; i < 10; ++i) {
newTenantName = self->chooseTenantName();
if (self->tenantNameIndex.count(newTenantName) == 0) {
if (!self->tenantNameIndex.contains(newTenantName)) {
break;
}
}
if (self->tenantNameIndex.count(oldTenantName) == 0 || self->tenantNameIndex.count(newTenantName) != 0) {
if (!self->tenantNameIndex.contains(oldTenantName) || self->tenantNameIndex.contains(newTenantName)) {
return Void();
}
@ -1094,7 +1094,7 @@ struct MetaclusterRestoreWorkload : TestWorkload {
if (!clusterData.restored) {
ASSERT_EQ(tenants.results.size(), clusterData.tenants.size());
for (auto [tenantId, tenantEntry] : tenants.results) {
ASSERT(clusterData.tenants.count(tenantId));
ASSERT(clusterData.tenants.contains(tenantId));
auto tenantData = self->createdTenants[tenantId];
ASSERT(tenantData.cluster == clusterName);
ASSERT(tenantData.tenantGroup == tenantEntry.tenantGroup);
@ -1128,9 +1128,9 @@ struct MetaclusterRestoreWorkload : TestWorkload {
// Check for deleted tenants that reappeared
int unexpectedTenants = 0;
for (auto const& [tenantId, tenantEntry] : tenantMap) {
if (!clusterData.tenants.count(tenantId)) {
if (!clusterData.tenants.contains(tenantId)) {
ASSERT(self->recoverManagementCluster);
ASSERT(self->deletedTenants.count(tenantId));
ASSERT(self->deletedTenants.contains(tenantId));
++unexpectedTenants;
}
}
@ -1204,8 +1204,8 @@ struct MetaclusterRestoreWorkload : TestWorkload {
// If we recovered both the management and some data clusters, we might undelete a tenant
// Check that any unexpected tenants were deleted and that we had a potentially lossy recovery
for (auto const& [tenantId, tenantEntry] : tenantMap) {
if (!self->createdTenants.count(tenantId)) {
ASSERT(self->deletedTenants.count(tenantId));
if (!self->createdTenants.contains(tenantId)) {
ASSERT(self->deletedTenants.contains(tenantId));
ASSERT(self->recoverManagementCluster);
ASSERT(self->recoverDataClusters);
}

View File

@ -558,7 +558,7 @@ struct PhysicalShardMoveWorkLoad : TestWorkload {
ASSERT(interfs.size() > teamSize - includes.size());
while (includes.size() < teamSize) {
const auto& interf = interfs[deterministicRandom()->randomInt(0, interfs.size())];
if (excludes.count(interf.uniqueID) == 0 && includes.count(interf.uniqueID) == 0) {
if (!excludes.contains(interf.uniqueID) && !includes.contains(interf.uniqueID)) {
includes.insert(interf.uniqueID);
}
}

View File

@ -115,7 +115,7 @@ struct MoveKeysWorkload : FailureInjectionWorkload {
while (t.size() < teamSize && storageServers.size()) {
auto s = storageServers.back();
storageServers.pop_back();
if (!machines.count(s.locality.zoneId())) {
if (!machines.contains(s.locality.zoneId())) {
machines.insert(s.locality.zoneId());
t.insert(s);
}

View File

@ -0,0 +1,154 @@
/*
* RandomRangeLock.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2024 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbclient/FDBTypes.h"
#include "fdbclient/ManagementAPI.actor.h"
#include "fdbclient/RangeLock.h"
#include "fdbclient/SystemData.h"
#include "fdbserver/workloads/workloads.actor.h"
#include "flow/ActorCollection.h"
#include "flow/Arena.h"
#include "flow/Error.h"
#include "flow/IRandom.h"
#include "flow/Trace.h"
#include "flow/actorcompiler.h" // This must be the last #include.
#include <string>
struct RandomRangeLockWorkload : FailureInjectionWorkload {
static constexpr auto NAME = "RandomRangeLock";
bool enabled;
double maxLockDuration = 60.0;
double maxStartDelay = 300.0;
int lockActorCount = 10;
RandomRangeLockWorkload(WorkloadContext const& wcx, NoOptions) : FailureInjectionWorkload(wcx) {
enabled = (clientId == 0) && g_network->isSimulated();
}
RandomRangeLockWorkload(WorkloadContext const& wcx) : FailureInjectionWorkload(wcx) {
enabled = (clientId == 0) && g_network->isSimulated();
maxLockDuration = getOption(options, "maxLockDuration"_sr, maxLockDuration);
maxStartDelay = getOption(options, "maxStartDelay"_sr, maxStartDelay);
}
Future<Void> setup(Database const& cx) override { return Void(); }
Future<Void> start(Database const& cx) override { return _start(cx, this); }
Future<bool> check(Database const& cx) override { return true; }
void getMetrics(std::vector<PerfMetric>& m) override {}
bool shouldInject(DeterministicRandom& random,
const WorkloadRequest& work,
const unsigned alreadyAdded) const override {
// Inject this workload with 10% probability given that the workload uses database
return alreadyAdded == 0 && work.useDatabase && random.random01() < 0.1;
}
Standalone<StringRef> getRandomStringRef() const {
int stringLength = deterministicRandom()->randomInt(1, 10);
Standalone<StringRef> stringBuffer = makeString(stringLength);
deterministicRandom()->randomBytes(mutateString(stringBuffer), stringLength);
return stringBuffer;
}
KeyRange getRandomRange(RandomRangeLockWorkload* self) const {
Standalone<StringRef> keyA = self->getRandomStringRef();
Standalone<StringRef> keyB = self->getRandomStringRef();
if (keyA < keyB) {
return Standalone(KeyRangeRef(keyA, keyB));
} else if (keyA > keyB) {
return Standalone(KeyRangeRef(keyB, keyA));
} else {
return singleKeyRange(keyA);
}
}
ACTOR Future<Void> lockActor(Database cx, RandomRangeLockWorkload* self) {
state double testDuration = deterministicRandom()->random01() * self->maxLockDuration;
state double testStartDelay = deterministicRandom()->random01() * self->maxStartDelay;
state std::string rangeLockOwnerName =
"Owner" + std::to_string(deterministicRandom()->randomInt(0, self->lockActorCount));
// Here we intentionally introduced duplicated owner name between different lockActor
std::string lockOwnerDescription = rangeLockOwnerName + ":" + self->getRandomStringRef().toString();
wait(registerRangeLockOwner(cx, rangeLockOwnerName, lockOwnerDescription));
wait(delay(testStartDelay));
state KeyRange range = self->getRandomRange(self);
TraceEvent(SevWarnAlways, "InjectRangeLockSubmit")
.detail("RangeLockOwnerName", rangeLockOwnerName)
.detail("Range", range)
.detail("LockStartDelayTime", testStartDelay)
.detail("LockTime", testDuration);
try {
Optional<RangeLockOwner> owner = wait(getRangeLockOwner(cx, rangeLockOwnerName));
ASSERT(owner.present());
ASSERT(owner.get().getUniqueId() == rangeLockOwnerName);
wait(takeReadLockOnRange(cx, range, rangeLockOwnerName));
TraceEvent(SevWarnAlways, "InjectRangeLocked")
.detail("RangeLockOwnerName", rangeLockOwnerName)
.detail("Range", range)
.detail("LockTime", testDuration);
ASSERT(range.end <= normalKeys.end);
} catch (Error& e) {
if (e.code() != error_code_range_lock_failed) {
throw e;
} else {
ASSERT(range.end > normalKeys.end);
}
}
wait(delay(testDuration));
try {
wait(releaseReadLockOnRange(cx, range, rangeLockOwnerName));
TraceEvent(SevWarnAlways, "InjectRangeUnlocked")
.detail("RangeLockOwnerName", rangeLockOwnerName)
.detail("Range", range);
ASSERT(range.end <= normalKeys.end);
} catch (Error& e) {
if (e.code() != error_code_range_lock_failed) {
throw e;
} else {
ASSERT(range.end > normalKeys.end);
}
}
return Void();
}
ACTOR Future<Void> _start(Database cx, RandomRangeLockWorkload* self) {
if (self->enabled) {
// Run lockActorCount number of actor concurrently.
// Each actor conducts (1) locking a range for a while and (2) unlocking the range.
// Each actor randomly generate a uniqueId as the lock owner.
// It is possible that different actors have the same lock owner.
// The range to be locked is randomly generated.
// It is possible that different actors have overlapped range to lock.
// The rangeLock mechanism should approperiately handled those conflict.
// When all actors complete, it is expected that all locks are removed,
// and this injected workload should not block other workloads.
std::vector<Future<Void>> actors;
for (int i = 0; i < self->lockActorCount; i++) {
actors.push_back(self->lockActor(cx, self));
}
wait(waitForAll(actors));
}
return Void();
}
};
FailureInjectorFactory<RandomRangeLockWorkload> RangeLockFailureInjectionFactory;
WorkloadFactory<RandomRangeLockWorkload> RandomRangeLockWorkloadFactory;

View File

@ -0,0 +1,525 @@
/*
* RangeLock.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2024 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbclient/AuditUtils.actor.h"
#include "fdbclient/RangeLock.h"
#include "fdbclient/FDBTypes.h"
#include "fdbclient/ManagementAPI.actor.h"
#include "fdbclient/SystemData.h"
#include "fdbserver/TesterInterface.actor.h"
#include "fdbserver/workloads/workloads.actor.h"
#include "flow/IRandom.h"
#include "flow/Trace.h"
#include "flow/actorcompiler.h" // This must be the last #include.
struct RangeLocking : TestWorkload {
static constexpr auto NAME = "RangeLocking";
const bool enabled;
bool pass;
bool shouldExit = false;
bool verboseLogging = false; // enable to log range lock and commit history
std::string rangeLockOwnerName = "RangeLockingTest";
// This workload is not compatible with RandomRangeLock workload because they will race in locked range
void disableFailureInjectionWorkloads(std::set<std::string>& out) const override {
out.insert({ "RandomRangeLock" });
}
struct KVOperation {
std::variant<KeyRange, KeyValue> params;
KVOperation(KeyRange range) : params(range) {}
KVOperation(KeyValue keyValue) : params(keyValue) {}
std::string toString() const {
std::string res = "KVOperation: ";
if (std::holds_alternative<KeyRange>(params)) {
res = res + "[ClearRange]: " + std::get<KeyRange>(params).toString();
} else {
res = res + "[SetKeyValue]: key: " + std::get<KeyValue>(params).key.toString() +
", value: " + std::get<KeyValue>(params).value.toString();
}
return res;
}
};
struct LockRangeOperation {
KeyRange range;
bool lock;
LockRangeOperation(KeyRange range, bool lock) : range(range), lock(lock) {}
std::string toString() const {
std::string res = "LockRangeOperation: ";
if (lock) {
res = res + "[LockRange]: " + range.toString();
} else {
res = res + "[UnlockRange]: " + range.toString();
}
return res;
}
};
KeyRangeMap<bool> lockedRangeMap;
std::vector<LockRangeOperation> lockRangeOperations;
std::vector<KVOperation> kvOperations;
std::map<Key, Value> kvs;
RangeLocking(WorkloadContext const& wcx) : TestWorkload(wcx), enabled(true), pass(true) {
lockedRangeMap.insert(allKeys, false);
}
Future<Void> setup(Database const& cx) override {
return registerRangeLockOwner(cx, rangeLockOwnerName, rangeLockOwnerName);
}
Future<Void> start(Database const& cx) override { return _start(this, cx); }
Future<bool> check(Database const& cx) override { return true; }
void getMetrics(std::vector<PerfMetric>& m) override {}
ACTOR Future<Void> setKey(Database cx, Key key, Value value) {
state Transaction tr(cx);
loop {
try {
tr.set(key, value);
wait(tr.commit());
return Void();
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
ACTOR Future<Void> clearKey(Database cx, Key key) {
state Transaction tr(cx);
loop {
try {
tr.clear(key);
wait(tr.commit());
return Void();
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
ACTOR Future<Void> clearRange(Database cx, KeyRange range) {
state Transaction tr(cx);
loop {
try {
tr.clear(range);
wait(tr.commit());
return Void();
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
ACTOR Future<Optional<Value>> getKey(Database cx, Key key) {
state Transaction tr(cx);
loop {
try {
Optional<Value> value = wait(tr.get(key));
return value;
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
ACTOR Future<Void> simpleTest(RangeLocking* self, Database cx) {
state Key keyUpdate = "11"_sr;
state KeyRange keyToClear = KeyRangeRef("1"_sr, "3"_sr);
state KeyRange rangeLock = KeyRangeRef("1"_sr, "2"_sr);
state Optional<Value> value;
state std::vector<KeyRange> lockedRanges;
wait(self->setKey(cx, keyUpdate, "1"_sr));
wait(store(value, self->getKey(cx, keyUpdate)));
ASSERT(value.present() && value.get() == "1"_sr);
wait(self->clearKey(cx, keyUpdate));
wait(store(value, self->getKey(cx, keyUpdate)));
ASSERT(!value.present());
wait(takeReadLockOnRange(cx, rangeLock, self->rangeLockOwnerName));
TraceEvent("RangeLockWorkLoadLockRange").detail("Range", rangeLock);
wait(store(lockedRanges, getReadLockOnRange(cx, normalKeys)));
TraceEvent("RangeLockWorkLoadGetLockedRange")
.detail("Range", rangeLock)
.detail("LockState", describe(lockedRanges));
try {
wait(self->setKey(cx, keyUpdate, "2"_sr));
ASSERT(false);
} catch (Error& e) {
ASSERT(e.code() == error_code_transaction_rejected_range_locked);
}
try {
wait(self->clearRange(cx, keyToClear));
ASSERT(false);
} catch (Error& e) {
ASSERT(e.code() == error_code_transaction_rejected_range_locked);
}
wait(store(value, self->getKey(cx, keyUpdate)));
ASSERT(!value.present());
wait(releaseReadLockOnRange(cx, rangeLock, self->rangeLockOwnerName));
TraceEvent("RangeLockWorkLoadUnlockRange").detail("Range", rangeLock);
lockedRanges.clear();
wait(store(lockedRanges, getReadLockOnRange(cx, normalKeys)));
TraceEvent("RangeLockWorkLoadGetLockedRange")
.detail("Range", rangeLock)
.detail("LockState", describe(lockedRanges));
wait(self->setKey(cx, keyUpdate, "3"_sr));
wait(store(value, self->getKey(cx, keyUpdate)));
ASSERT(value.present() && value.get() == "3"_sr);
return Void();
}
KeyValue getRandomKeyValue() const {
Key key = StringRef(std::to_string(deterministicRandom()->randomInt(0, 10)));
Value value = key;
return Standalone(KeyValueRef(key, value));
}
KeyRange getRandomRange() const {
int startPoint = deterministicRandom()->randomInt(0, 9);
Key beginKey = StringRef(std::to_string(startPoint));
Key endKey = StringRef(std::to_string(deterministicRandom()->randomInt(startPoint + 1, 10)));
return Standalone(KeyRangeRef(beginKey, endKey));
}
ACTOR Future<Void> updateDBWithRandomOperations(RangeLocking* self, Database cx) {
self->kvOperations.clear();
state int iterationCount = deterministicRandom()->randomInt(1, 10);
state int i = 0;
for (; i < iterationCount; i++) {
state bool acceptedByDB = true;
if (deterministicRandom()->coinflip()) {
state KeyValue kv = self->getRandomKeyValue();
try {
wait(self->setKey(cx, kv.key, kv.value));
} catch (Error& e) {
if (e.code() != error_code_transaction_rejected_range_locked) {
throw e;
}
acceptedByDB = false;
}
self->kvOperations.push_back(KVOperation(kv));
if (self->verboseLogging) {
TraceEvent("RangeLockWorkLoadHistory")
.detail("Ops", "SetKey")
.detail("Key", kv.key)
.detail("Value", kv.value)
.detail("Accepted", acceptedByDB);
}
} else {
state KeyRange range = self->getRandomRange();
try {
wait(self->clearRange(cx, range));
} catch (Error& e) {
if (e.code() != error_code_transaction_rejected_range_locked) {
throw e;
}
acceptedByDB = false;
}
self->kvOperations.push_back(KVOperation(range));
if (self->verboseLogging) {
TraceEvent("RangeLockWorkLoadHistory")
.detail("Ops", "ClearRange")
.detail("Range", range)
.detail("Accepted", acceptedByDB);
}
}
}
return Void();
}
ACTOR Future<Void> updateLockMapWithRandomOperation(RangeLocking* self, Database cx) {
self->lockRangeOperations.clear();
state int i = 0;
state int iterationCount = deterministicRandom()->randomInt(1, 10);
for (; i < iterationCount; i++) {
state KeyRange range = self->getRandomRange();
state bool lock = deterministicRandom()->coinflip();
if (lock) {
wait(takeReadLockOnRange(cx, range, self->rangeLockOwnerName));
if (self->verboseLogging) {
TraceEvent("RangeLockWorkLoadHistory").detail("Ops", "Lock").detail("Range", range);
}
} else {
wait(releaseReadLockOnRange(cx, range, self->rangeLockOwnerName));
if (self->verboseLogging) {
TraceEvent("RangeLockWorkLoadHistory").detail("Ops", "Unlock").detail("Range", range);
}
}
self->lockRangeOperations.push_back(LockRangeOperation(range, lock));
}
return Void();
}
bool operationRejectByLocking(RangeLocking* self, const KVOperation& kvOperation) {
KeyRange rangeToCheck;
if (std::holds_alternative<KeyRange>(kvOperation.params)) {
rangeToCheck = std::get<KeyRange>(kvOperation.params);
} else {
rangeToCheck = singleKeyRange(std::get<KeyValue>(kvOperation.params).key);
}
for (auto lockRange : self->lockedRangeMap.intersectingRanges(rangeToCheck)) {
if (lockRange.value() == true) {
return true;
}
}
return false;
}
void updateInMemoryKVSStatus(RangeLocking* self) {
for (const auto& operation : self->kvOperations) {
if (self->operationRejectByLocking(self, operation)) {
continue;
}
if (std::holds_alternative<KeyValue>(operation.params)) {
Key key = std::get<KeyValue>(operation.params).key;
Value value = std::get<KeyValue>(operation.params).value;
self->kvs[key] = value;
} else {
KeyRange clearRange = std::get<KeyRange>(operation.params);
std::vector<Key> keysToClear;
for (const auto& [key, value] : self->kvs) {
if (clearRange.contains(key)) {
keysToClear.push_back(key);
}
}
for (const auto& key : keysToClear) {
self->kvs.erase(key);
}
}
}
return;
}
void updateInMemoryLockStatus(RangeLocking* self) {
for (const auto& operation : self->lockRangeOperations) {
self->lockedRangeMap.insert(operation.range, operation.lock);
}
return;
}
ACTOR Future<std::map<Key, Value>> getKVSFromDB(RangeLocking* self, Database cx) {
state std::map<Key, Value> kvsFromDB;
state Key beginKey = normalKeys.begin;
state Key endKey = normalKeys.end;
loop {
state Transaction tr(cx);
KeyRange rangeToRead = KeyRangeRef(beginKey, endKey);
try {
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
RangeResult res = wait(tr.getRange(rangeToRead, GetRangeLimits()));
for (int i = 0; i < res.size(); i++) {
kvsFromDB[res[i].key] = res[i].value;
}
if (res.size() > 0) {
beginKey = keyAfter(res.end()[-1].key);
} else {
break;
}
} catch (Error& e) {
wait(tr.onError(e));
}
}
return kvsFromDB;
}
ACTOR Future<std::vector<KeyRange>> getLockedRangesFromDB(Database cx) {
state std::vector<KeyRange> res;
wait(store(res, getReadLockOnRange(cx, normalKeys)));
return coalesceRangeList(res);
}
std::vector<KeyRange> getLockedRangesFromMemory(RangeLocking* self) {
std::vector<KeyRange> res;
for (auto range : self->lockedRangeMap.ranges()) {
if (range.value() == true) {
res.push_back(range.range());
}
}
return coalesceRangeList(res);
}
ACTOR Future<Void> checkKVCorrectness(RangeLocking* self, Database cx) {
state std::map<Key, Value> currentKvsInDB;
wait(store(currentKvsInDB, self->getKVSFromDB(self, cx)));
for (const auto& [key, value] : currentKvsInDB) {
if (self->kvs.find(key) == self->kvs.end()) {
TraceEvent(SevError, "RangeLockWorkLoadHistory")
.detail("Ops", "CheckDBUniqueKey")
.detail("Key", key)
.detail("Value", value);
self->shouldExit = true;
return Void();
} else if (self->kvs[key] != value) {
TraceEvent(SevError, "RangeLockWorkLoadHistory")
.detail("Ops", "CheckMismatchValue")
.detail("Key", key)
.detail("MemValue", self->kvs[key])
.detail("DBValue", value);
self->shouldExit = true;
return Void();
}
}
for (const auto& [key, value] : self->kvs) {
if (currentKvsInDB.find(key) == currentKvsInDB.end()) {
TraceEvent(SevError, "RangeLockWorkLoadHistory")
.detail("Ops", "CheckMemoryUniqueKey")
.detail("Key", key)
.detail("Value", value);
self->shouldExit = true;
return Void();
}
}
if (self->verboseLogging) {
TraceEvent e("RangeLockWorkLoadHistory");
e.setMaxEventLength(-1);
e.setMaxFieldLength(-1);
e.detail("Ops", "CheckAllKVCorrect");
int i = 0;
for (const auto& [key, value] : currentKvsInDB) {
e.detail("Key" + std::to_string(i), key);
e.detail("Value" + std::to_string(i), value);
i++;
}
}
return Void();
}
ACTOR Future<Void> checkLockCorrectness(RangeLocking* self, Database cx) {
state std::vector<KeyRange> currentLockRangesInDB;
wait(store(currentLockRangesInDB, self->getLockedRangesFromDB(cx)));
std::vector<KeyRange> currentLockRangesInMemory = self->getLockedRangesFromMemory(self);
for (int i = 0; i < currentLockRangesInDB.size(); i++) {
if (i >= currentLockRangesInMemory.size()) {
TraceEvent(SevError, "RangeLockWorkLoadHistory")
.detail("Ops", "CheckDBUniqueLockedRange")
.detail("Range", currentLockRangesInDB[i]);
self->shouldExit = true;
return Void();
}
if (currentLockRangesInDB[i] != currentLockRangesInMemory[i]) {
TraceEvent(SevError, "RangeLockWorkLoadHistory")
.detail("Ops", "CheckMismatchLockedRange")
.detail("RangeMemory", currentLockRangesInMemory[i])
.detail("RangeDB", currentLockRangesInDB[i]);
self->shouldExit = true;
return Void();
}
}
for (int i = currentLockRangesInDB.size(); i < currentLockRangesInMemory.size(); i++) {
TraceEvent(SevError, "RangeLockWorkLoadHistory")
.detail("Ops", "CheckMemoryUniqueLockedRange")
.detail("Key", currentLockRangesInMemory[i]);
self->shouldExit = true;
return Void();
}
if (self->verboseLogging) {
TraceEvent e("RangeLockWorkLoadHistory");
e.setMaxEventLength(-1);
e.setMaxFieldLength(-1);
e.detail("Ops", "CheckAllLockCorrect");
int i = 0;
for (const auto& range : currentLockRangesInDB) {
e.detail("Range" + std::to_string(i), range);
i++;
}
}
return Void();
}
ACTOR Future<Void> complexTest(RangeLocking* self, Database cx) {
state int iterationCount = 100;
state int iteration = 0;
state std::string rangeLockOwnerName = "RangeLockingSimpleTest";
wait(registerRangeLockOwner(cx, rangeLockOwnerName, rangeLockOwnerName));
loop {
if (iteration > iterationCount || self->shouldExit) {
break;
}
if (deterministicRandom()->coinflip()) {
wait(self->updateLockMapWithRandomOperation(self, cx));
self->updateInMemoryLockStatus(self);
}
TraceEvent("RangeLockWorkloadProgress")
.detail("Iteration", iteration)
.detail("IterationCount", iterationCount)
.detail("Phase", "UpdateLock");
wait(self->checkLockCorrectness(self, cx));
TraceEvent("RangeLockWorkloadProgress")
.detail("Iteration", iteration)
.detail("IterationCount", iterationCount)
.detail("Phase", "CheckLockCorrectness");
if (deterministicRandom()->coinflip()) {
try {
wait(self->updateDBWithRandomOperations(self, cx));
self->updateInMemoryKVSStatus(self);
} catch (Error& e) {
ASSERT(e.code() == error_code_transaction_rejected_range_locked);
self->kvOperations.clear();
}
}
TraceEvent("RangeLockWorkloadProgress")
.detail("Iteration", iteration)
.detail("IterationCount", iterationCount)
.detail("Phase", "UpdateDB");
wait(self->checkKVCorrectness(self, cx));
TraceEvent("RangeLockWorkloadProgress")
.detail("Iteration", iteration)
.detail("IterationCount", iterationCount)
.detail("Phase", "CheckDBCorrectness");
iteration++;
}
wait(releaseReadLockOnRange(cx, normalKeys, self->rangeLockOwnerName));
TraceEvent("RangeLockWorkloadProgress").detail("Phase", "End");
return Void();
}
ACTOR Future<Void> _start(RangeLocking* self, Database cx) {
if (self->clientId != 0) {
return Void();
}
// wait(self->simpleTest(self, cx));
wait(self->complexTest(self, cx));
return Void();
}
};
WorkloadFactory<RangeLocking> RangeLockingFactory;

Some files were not shown because too many files have changed in this diff Show More