Disable ShardedRocksDB in simulation when shard_encode_location_metadata is disabled (#7726)
* Disabled tests for ShardedRocks. Cleaned up ShardedRocks TraceEvent. Added assertion in ShardManager::validate(). * Added test trace. * Make sure TraceEvent contains `ShardedRocks`. * Exclude ShardedRocksDB when SHARD_ENCODE_LOCATION_METADATA is disabled. Co-authored-by: He Liu <heliu@apple.com>
This commit is contained in:
parent
ade9d20780
commit
35a4cb91d5
|
@ -1081,6 +1081,9 @@ ShardsAffectedByTeamFailure::getTeamsFor(KeyRangeRef keys) {
|
|||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::erase(Team team, KeyRange const& range) {
|
||||
DisabledTraceEvent(SevDebug, "ShardsAffectedByTeamFailureErase")
|
||||
.detail("Range", range)
|
||||
.detail("Team", team.toString());
|
||||
if (team_shards.erase(std::pair<Team, KeyRange>(team, range)) > 0) {
|
||||
for (auto uid = team.servers.begin(); uid != team.servers.end(); ++uid) {
|
||||
// Safeguard against going negative after eraseServer() sets value to 0
|
||||
|
@ -1092,6 +1095,9 @@ void ShardsAffectedByTeamFailure::erase(Team team, KeyRange const& range) {
|
|||
}
|
||||
|
||||
void ShardsAffectedByTeamFailure::insert(Team team, KeyRange const& range) {
|
||||
DisabledTraceEvent(SevDebug, "ShardsAffectedByTeamFailureInsert")
|
||||
.detail("Range", range)
|
||||
.detail("Team", team.toString());
|
||||
if (team_shards.insert(std::pair<Team, KeyRange>(team, range)).second) {
|
||||
for (auto uid = team.servers.begin(); uid != team.servers.end(); ++uid)
|
||||
storageServerShards[*uid]++;
|
||||
|
|
|
@ -303,6 +303,9 @@ rocksdb::Options getOptions() {
|
|||
|
||||
// TODO: enable rocksdb metrics.
|
||||
options.db_log_dir = SERVER_KNOBS->LOG_DIRECTORY;
|
||||
if (g_network->isSimulated()) {
|
||||
options.OptimizeForSmallDb();
|
||||
}
|
||||
return options;
|
||||
}
|
||||
|
||||
|
|
|
@ -1712,15 +1712,19 @@ ACTOR static Future<Void> finishMoveShards(Database occ,
|
|||
Void(),
|
||||
TaskPriority::MoveKeys));
|
||||
|
||||
int count = 0;
|
||||
std::vector<UID> readyServers;
|
||||
for (int s = 0; s < serverReady.size(); ++s) {
|
||||
count += serverReady[s].isReady() && !serverReady[s].isError();
|
||||
if (serverReady[s].isReady() && !serverReady[s].isError()) {
|
||||
readyServers.push_back(storageServerInterfaces[s].uniqueID);
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent(SevVerbose, "FinishMoveShardsWaitedServers", relocationIntervalId)
|
||||
.detail("ReadyServers", count);
|
||||
.detail("DataMoveID", dataMoveId)
|
||||
.detail("ReadyServers", describe(readyServers));
|
||||
|
||||
if (readyServers.size() == newDestinations.size()) {
|
||||
|
||||
if (count == newDestinations.size()) {
|
||||
std::vector<Future<Void>> actors;
|
||||
actors.push_back(krmSetRangeCoalescing(
|
||||
&tr, keyServersPrefix, range, allKeys, keyServersValue(destServers, {}, dataMoveId, UID())));
|
||||
|
|
|
@ -1450,8 +1450,6 @@ void SimulationConfig::setStorageEngine(const TestConfig& testConfig) {
|
|||
TraceEvent(SevWarnAlways, "RocksDBNonDeterminism")
|
||||
.detail("Explanation", "The Sharded RocksDB storage engine is threaded and non-deterministic");
|
||||
noUnseed = true;
|
||||
auto& g_knobs = IKnobCollection::getMutableGlobalKnobCollection();
|
||||
g_knobs.setKnob("shard_encode_location_metadata", KnobValueRef::create(bool{ true }));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -2396,6 +2394,10 @@ ACTOR void setupAndRun(std::string dataFolder,
|
|||
state bool allowDisablingTenants = testConfig.allowDisablingTenants;
|
||||
state bool allowCreatingTenants = true;
|
||||
|
||||
if (!CLIENT_KNOBS->SHARD_ENCODE_LOCATION_METADATA) {
|
||||
testConfig.storageEngineExcludeTypes.push_back(5);
|
||||
}
|
||||
|
||||
// The RocksDB storage engine does not support the restarting tests because you cannot consistently get a clean
|
||||
// snapshot of the storage engine without a snapshotting file system.
|
||||
// https://github.com/apple/foundationdb/issues/5155
|
||||
|
|
|
@ -7084,6 +7084,10 @@ void changeServerKeysWithPhysicalShards(StorageServer* data,
|
|||
for (int i = 0; i < ranges.size(); i++) {
|
||||
const Reference<ShardInfo> currentShard = ranges[i].value;
|
||||
const KeyRangeRef currentRange = static_cast<KeyRangeRef>(ranges[i]);
|
||||
if (currentShard.isValid()) {
|
||||
TraceEvent(SevVerbose, "OverlappingPhysicalShard", data->thisServerID)
|
||||
.detail("PhysicalShard", currentShard->toStorageServerShard().toString());
|
||||
}
|
||||
if (!currentShard.isValid()) {
|
||||
ASSERT(currentRange == keys); // there shouldn't be any nulls except for the range being inserted
|
||||
} else if (currentShard->notAssigned()) {
|
||||
|
@ -7105,7 +7109,7 @@ void changeServerKeysWithPhysicalShards(StorageServer* data,
|
|||
.detail("NowAssigned", nowAssigned)
|
||||
.detail("Version", cVer)
|
||||
.detail("ResultingShard", newShard.toString());
|
||||
} else if (ranges[i].value->adding) {
|
||||
} else if (currentShard->adding) {
|
||||
ASSERT(!nowAssigned);
|
||||
StorageServerShard newShard = currentShard->toStorageServerShard();
|
||||
newShard.range = currentRange;
|
||||
|
|
Loading…
Reference in New Issue