Merge commit '59b04d46cff720df5d267daa8dc8a60c25466f74' into kaio-latency-pctile

This commit is contained in:
Steve Atherton 2022-09-15 20:27:02 -07:00
commit 0e0a10ab45
85 changed files with 1072 additions and 615 deletions

View File

@ -290,7 +290,8 @@ else()
add_link_options(-stdlib=libc++ -Wl,-build-id=sha1)
endif()
endif()
if (NOT APPLE)
if (NOT APPLE AND NOT USE_LIBCXX)
message(STATUS "Linking libatomic")
add_link_options(-latomic)
endif()
if (OPEN_FOR_IDE)

View File

@ -414,7 +414,7 @@ ACTOR Future<Void> readCommitted(Database cx,
loop {
try {
state GetRangeLimits limits(GetRangeLimits::ROW_LIMIT_UNLIMITED,
(g_network->isSimulated() && !g_simulator.speedUpSimulation)
(g_network->isSimulated() && !g_simulator->speedUpSimulation)
? CLIENT_KNOBS->BACKUP_SIMULATED_LIMIT_BYTES
: CLIENT_KNOBS->BACKUP_GET_RANGE_LIMIT_BYTES);
@ -493,7 +493,7 @@ ACTOR Future<Void> readCommitted(Database cx,
loop {
try {
state GetRangeLimits limits(GetRangeLimits::ROW_LIMIT_UNLIMITED,
(g_network->isSimulated() && !g_simulator.speedUpSimulation)
(g_network->isSimulated() && !g_simulator->speedUpSimulation)
? CLIENT_KNOBS->BACKUP_SIMULATED_LIMIT_BYTES
: CLIENT_KNOBS->BACKUP_GET_RANGE_LIMIT_BYTES);

View File

@ -227,10 +227,10 @@ Future<Reference<IAsyncFile>> BackupContainerLocalDirectory::readFile(const std:
throw file_not_found();
}
if (g_simulator.getCurrentProcess()->uid == UID()) {
if (g_simulator->getCurrentProcess()->uid == UID()) {
TraceEvent(SevError, "BackupContainerReadFileOnUnsetProcessID").log();
}
std::string uniquePath = fullPath + "." + g_simulator.getCurrentProcess()->uid.toString() + ".lnk";
std::string uniquePath = fullPath + "." + g_simulator->getCurrentProcess()->uid.toString() + ".lnk";
unlink(uniquePath.c_str());
ASSERT(symlink(basename(path).c_str(), uniquePath.c_str()) == 0);
fullPath = uniquePath;

View File

@ -816,8 +816,6 @@ struct AbortFiveZeroBackupTask : TaskFuncBase {
state FileBackupAgent backupAgent;
state std::string tagName = task->params[BackupAgentBase::keyConfigBackupTag].toString();
CODE_PROBE(true, "Canceling old backup task");
TraceEvent(SevInfo, "FileBackupCancelOldTask")
.detail("Task", task->params[Task::reservedTaskParamKeyType])
.detail("TagName", tagName);
@ -902,8 +900,6 @@ struct AbortFiveOneBackupTask : TaskFuncBase {
state BackupConfig config(task);
state std::string tagName = wait(config.tag().getOrThrow(tr));
CODE_PROBE(true, "Canceling 5.1 backup task");
TraceEvent(SevInfo, "FileBackupCancelFiveOneTask")
.detail("Task", task->params[Task::reservedTaskParamKeyType])
.detail("TagName", tagName);

View File

@ -956,7 +956,7 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
std::sort(old.coords.begin(), old.coords.end());
if (conn->hostnames == old.hostnames && conn->coords == old.coords && old.clusterKeyName() == newName) {
connectionStrings.clear();
if (g_network->isSimulated() && g_simulator.configDBType == ConfigDBType::DISABLED) {
if (g_network->isSimulated() && g_simulator->configDBType == ConfigDBType::DISABLED) {
disableConfigDB = true;
}
if (!disableConfigDB) {
@ -973,7 +973,7 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
int i = 0;
int protectedCount = 0;
while ((protectedCount < ((desiredCoordinators.size() / 2) + 1)) && (i < desiredCoordinators.size())) {
auto process = g_simulator.getProcessByAddress(desiredCoordinators[i]);
auto process = g_simulator->getProcessByAddress(desiredCoordinators[i]);
auto addresses = process->addresses;
if (!process->isReliable()) {
@ -981,9 +981,9 @@ ACTOR Future<Optional<CoordinatorsResult>> changeQuorumChecker(Transaction* tr,
continue;
}
g_simulator.protectedAddresses.insert(process->addresses.address);
g_simulator->protectedAddresses.insert(process->addresses.address);
if (addresses.secondaryAddress.present()) {
g_simulator.protectedAddresses.insert(process->addresses.secondaryAddress.get());
g_simulator->protectedAddresses.insert(process->addresses.secondaryAddress.get());
}
TraceEvent("ProtectCoordinator").detail("Address", desiredCoordinators[i]).backtrace();
protectedCount++;
@ -1077,12 +1077,12 @@ ACTOR Future<CoordinatorsResult> changeQuorum(Database cx, Reference<IQuorumChan
if (g_network->isSimulated()) {
for (int i = 0; i < (desiredCoordinators.size() / 2) + 1; i++) {
auto process = g_simulator.getProcessByAddress(desiredCoordinators[i]);
auto process = g_simulator->getProcessByAddress(desiredCoordinators[i]);
ASSERT(process->isReliable() || process->rebooting);
g_simulator.protectedAddresses.insert(process->addresses.address);
g_simulator->protectedAddresses.insert(process->addresses.address);
if (process->addresses.secondaryAddress.present()) {
g_simulator.protectedAddresses.insert(process->addresses.secondaryAddress.get());
g_simulator->protectedAddresses.insert(process->addresses.secondaryAddress.get());
}
TraceEvent("ProtectCoordinator").detail("Address", desiredCoordinators[i]).backtrace();
}
@ -1341,7 +1341,7 @@ struct AutoQuorumChange final : IQuorumChange {
continue;
}
// Exclude faulty node due to machine assassination
if (g_network->isSimulated() && !g_simulator.getProcessByAddress(worker->address)->isReliable()) {
if (g_network->isSimulated() && !g_simulator->getProcessByAddress(worker->address)->isReliable()) {
TraceEvent("AutoSelectCoordinators").detail("SkipUnreliableWorker", worker->address.toString());
continue;
}
@ -2266,7 +2266,7 @@ ACTOR Future<Void> updateChangeFeed(Transaction* tr, Key rangeID, ChangeFeedStat
} else if (status == ChangeFeedStatus::CHANGE_FEED_DESTROY) {
if (val.present()) {
if (g_network->isSimulated()) {
g_simulator.validationData.allDestroyedChangeFeedIDs.insert(rangeID.toString());
g_simulator->validationData.allDestroyedChangeFeedIDs.insert(rangeID.toString());
}
tr->set(rangeIDKey,
changeFeedValue(std::get<0>(decodeChangeFeedValue(val.get())),
@ -2304,7 +2304,7 @@ ACTOR Future<Void> updateChangeFeed(Reference<ReadYourWritesTransaction> tr,
} else if (status == ChangeFeedStatus::CHANGE_FEED_DESTROY) {
if (val.present()) {
if (g_network->isSimulated()) {
g_simulator.validationData.allDestroyedChangeFeedIDs.insert(rangeID.toString());
g_simulator->validationData.allDestroyedChangeFeedIDs.insert(rangeID.toString());
}
tr->set(rangeIDKey,
changeFeedValue(std::get<0>(decodeChangeFeedValue(val.get())),
@ -2625,16 +2625,16 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
data.address.ip = IPAddress(i);
if (g_network->isSimulated()) {
g_simulator.newProcess("TestCoordinator",
data.address.ip,
data.address.port,
false,
1,
data.locality,
ProcessClass(ProcessClass::CoordinatorClass, ProcessClass::CommandLineSource),
"",
"",
currentProtocolVersion());
g_simulator->newProcess("TestCoordinator",
data.address.ip,
data.address.port,
false,
1,
data.locality,
ProcessClass(ProcessClass::CoordinatorClass, ProcessClass::CommandLineSource),
"",
"",
currentProtocolVersion());
}
workers.push_back(data);

View File

@ -67,7 +67,7 @@ ACTOR Future<Void> PipelinedReader::getNext_impl(PipelinedReader* self, Database
state Transaction tr(cx);
state GetRangeLimits limits(GetRangeLimits::ROW_LIMIT_UNLIMITED,
(g_network->isSimulated() && !g_simulator.speedUpSimulation)
(g_network->isSimulated() && !g_simulator->speedUpSimulation)
? CLIENT_KNOBS->BACKUP_SIMULATED_LIMIT_BYTES
: CLIENT_KNOBS->BACKUP_GET_RANGE_LIMIT_BYTES);

View File

@ -4686,7 +4686,7 @@ static Future<Void> tssStreamComparison(Request request,
if ((!ssEndOfStream || !tssEndOfStream) && !TSS_doCompare(ssReply.get(), tssReply.get())) {
CODE_PROBE(true, "TSS mismatch in stream comparison");
TraceEvent mismatchEvent(
(g_network->isSimulated() && g_simulator.tssMode == ISimulator::TSSMode::EnabledDropMutations)
(g_network->isSimulated() && g_simulator->tssMode == ISimulator::TSSMode::EnabledDropMutations)
? SevWarnAlways
: SevError,
TSS_mismatchTraceName(request));
@ -4708,7 +4708,7 @@ static Future<Void> tssStreamComparison(Request request,
// record a summarized trace event instead
TraceEvent summaryEvent((g_network->isSimulated() &&
g_simulator.tssMode == ISimulator::TSSMode::EnabledDropMutations)
g_simulator->tssMode == ISimulator::TSSMode::EnabledDropMutations)
? SevWarnAlways
: SevError,
TSS_mismatchTraceName(request));
@ -8423,7 +8423,7 @@ Reference<TransactionLogInfo> Transaction::createTrLogInfoProbabilistically(cons
cx->globalConfig->get<double>(fdbClientInfoTxnSampleRate, CLIENT_KNOBS->CSI_SAMPLING_PROBABILITY);
if (((networkOptions.logClientInfo.present() && networkOptions.logClientInfo.get()) || BUGGIFY) &&
deterministicRandom()->random01() < clientSamplingProbability &&
(!g_network->isSimulated() || !g_simulator.speedUpSimulation)) {
(!g_network->isSimulated() || !g_simulator->speedUpSimulation)) {
return makeReference<TransactionLogInfo>(TransactionLogInfo::DATABASE);
}
}
@ -9586,7 +9586,7 @@ ACTOR Future<Void> getChangeFeedStreamActor(Reference<DatabaseContext> db,
if (useIdx >= 0) {
chosenLocations[loc] = useIdx;
loc++;
if (g_network->isSimulated() && !g_simulator.speedUpSimulation && BUGGIFY_WITH_PROB(0.01)) {
if (g_network->isSimulated() && !g_simulator->speedUpSimulation && BUGGIFY_WITH_PROB(0.01)) {
// simulate as if we had to wait for all alternatives delayed, before the next one
wait(delay(deterministicRandom()->random01()));
}

View File

@ -962,6 +962,8 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
init( BLOB_MANAGER_STATUS_EXP_BACKOFF_MAX, 5.0 );
init( BLOB_MANAGER_STATUS_EXP_BACKOFF_EXPONENT, 1.5 );
init( BLOB_MANAGER_CONCURRENT_MERGE_CHECKS, 64 ); if( randomize && BUGGIFY ) BLOB_MANAGER_CONCURRENT_MERGE_CHECKS = 1 << deterministicRandom()->randomInt(0, 7);
init( BLOB_MANIFEST_BACKUP, false );
init( BLOB_FULL_RESTORE_MODE, false );
init( BGCC_TIMEOUT, isSimulated ? 10.0 : 120.0 );
init( BGCC_MIN_INTERVAL, isSimulated ? 1.0 : 10.0 );

View File

@ -276,4 +276,15 @@ struct BlobGranuleHistoryValue {
}
};
// A manifest to assist full fdb restore from blob granule files
struct BlobManifest {
constexpr static FileIdentifier file_identifier = 298872;
VectorRef<KeyValueRef> rows;
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, rows);
}
};
#endif

View File

@ -943,6 +943,8 @@ public:
int BLOB_MANAGER_CONCURRENT_MERGE_CHECKS;
double BGCC_TIMEOUT;
double BGCC_MIN_INTERVAL;
bool BLOB_MANIFEST_BACKUP;
bool BLOB_FULL_RESTORE_MODE;
// Blob metadata
int64_t BLOB_METADATA_CACHE_TTL;

View File

@ -71,7 +71,9 @@ struct Tuple {
size_t size() const { return offsets.size(); }
void reserve(size_t cap) { offsets.reserve(cap); }
void clear() {
data.clear();
// Make a new Standalone to use different memory so that
// previously returned objects from pack() are valid.
data = Standalone<VectorRef<uint8_t>>();
offsets.clear();
}
// Return a Tuple encoded raw string.

View File

@ -24,7 +24,7 @@
std::map<std::string, Future<Void>> AsyncFileNonDurable::filesBeingDeleted;
ACTOR Future<Void> sendOnProcess(ISimulator::ProcessInfo* process, Promise<Void> promise, TaskPriority taskID) {
wait(g_simulator.onProcess(process, taskID));
wait(g_simulator->onProcess(process, taskID));
promise.send(Void());
return Void();
}
@ -33,7 +33,7 @@ ACTOR Future<Void> sendErrorOnProcess(ISimulator::ProcessInfo* process,
Promise<Void> promise,
Error e,
TaskPriority taskID) {
wait(g_simulator.onProcess(process, taskID));
wait(g_simulator->onProcess(process, taskID));
promise.sendError(e);
return Void();
}

View File

@ -1136,9 +1136,10 @@ static void scanPackets(TransportData* transport,
if (checksumEnabled) {
bool isBuggifyEnabled = false;
if (g_network->isSimulated() && !isStableConnection &&
g_network->now() - g_simulator.lastConnectionFailure > g_simulator.connectionFailuresDisableDuration &&
g_network->now() - g_simulator->lastConnectionFailure >
g_simulator->connectionFailuresDisableDuration &&
BUGGIFY_WITH_PROB(0.0001)) {
g_simulator.lastConnectionFailure = g_network->now();
g_simulator->lastConnectionFailure = g_network->now();
isBuggifyEnabled = true;
TraceEvent(SevInfo, "BitsFlip").log();
int flipBits = 32 - (int)floor(log2(deterministicRandom()->randomUInt32()));
@ -1588,7 +1589,7 @@ FlowTransport::FlowTransport(uint64_t transportId, int maxWellKnownEndpoints, IP
: self(new TransportData(transportId, maxWellKnownEndpoints, allowList)) {
self->multiVersionCleanup = multiVersionCleanupWorker(self);
if (g_network->isSimulated()) {
for (auto const& p : g_simulator.authKeys) {
for (auto const& p : g_simulator->authKeys) {
self->publicKeys.emplace(p.first, p.second.toPublic());
}
}

View File

@ -1200,8 +1200,8 @@ void dsltest() {
actorTest1(true);
actorTest2(true);
actorTest3(true);
// if (g_network == &g_simulator)
// g_simulator.run( actorTest4(true) );
// if (g_network == g_simulator)
// g_simulator->run( actorTest4(true) );
actorTest5();
actorTest6();
actorTest7();

View File

@ -27,8 +27,8 @@
ACTOR Future<Void> disableConnectionFailuresAfter(double time, std::string context) {
if (g_network->isSimulated()) {
wait(delayUntil(time));
g_simulator.connectionFailuresDisableDuration = 1e6;
g_simulator.speedUpSimulation = true;
g_simulator->connectionFailuresDisableDuration = 1e6;
g_simulator->speedUpSimulation = true;
TraceEvent(SevWarnAlways, ("DisableConnectionFailures_" + context).c_str());
}
return Void();

View File

@ -48,7 +48,7 @@ ACTOR Future<Void> sendErrorOnProcess(ISimulator::ProcessInfo* process,
ACTOR template <class T>
Future<T> sendErrorOnShutdown(Future<T> in) {
choose {
when(wait(success(g_simulator.getCurrentProcess()->shutdownSignal.getFuture()))) {
when(wait(success(g_simulator->getCurrentProcess()->shutdownSignal.getFuture()))) {
throw io_error().asInjectedFault();
}
when(T rep = wait(in)) { return rep; }
@ -64,14 +64,14 @@ public:
explicit AsyncFileDetachable(Reference<IAsyncFile> file) : file(file) { shutdown = doShutdown(this); }
ACTOR Future<Void> doShutdown(AsyncFileDetachable* self) {
wait(success(g_simulator.getCurrentProcess()->shutdownSignal.getFuture()));
wait(success(g_simulator->getCurrentProcess()->shutdownSignal.getFuture()));
self->file = Reference<IAsyncFile>();
return Void();
}
ACTOR static Future<Reference<IAsyncFile>> open(Future<Reference<IAsyncFile>> wrappedFile) {
choose {
when(wait(success(g_simulator.getCurrentProcess()->shutdownSignal.getFuture()))) {
when(wait(success(g_simulator->getCurrentProcess()->shutdownSignal.getFuture()))) {
throw io_error().asInjectedFault();
}
when(Reference<IAsyncFile> f = wait(wrappedFile)) { return makeReference<AsyncFileDetachable>(f); }
@ -82,31 +82,31 @@ public:
void delref() override { ReferenceCounted<AsyncFileDetachable>::delref(); }
Future<int> read(void* data, int length, int64_t offset) override {
if (!file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady())
if (!file.getPtr() || g_simulator->getCurrentProcess()->shutdownSignal.getFuture().isReady())
return io_error().asInjectedFault();
return sendErrorOnShutdown(file->read(data, length, offset));
}
Future<Void> write(void const* data, int length, int64_t offset) override {
if (!file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady())
if (!file.getPtr() || g_simulator->getCurrentProcess()->shutdownSignal.getFuture().isReady())
return io_error().asInjectedFault();
return sendErrorOnShutdown(file->write(data, length, offset));
}
Future<Void> truncate(int64_t size) override {
if (!file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady())
if (!file.getPtr() || g_simulator->getCurrentProcess()->shutdownSignal.getFuture().isReady())
return io_error().asInjectedFault();
return sendErrorOnShutdown(file->truncate(size));
}
Future<Void> sync() override {
if (!file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady())
if (!file.getPtr() || g_simulator->getCurrentProcess()->shutdownSignal.getFuture().isReady())
return io_error().asInjectedFault();
return sendErrorOnShutdown(file->sync());
}
Future<int64_t> size() const override {
if (!file.getPtr() || g_simulator.getCurrentProcess()->shutdownSignal.getFuture().isReady())
if (!file.getPtr() || g_simulator->getCurrentProcess()->shutdownSignal.getFuture().isReady())
return io_error().asInjectedFault();
return sendErrorOnShutdown(file->size());
}
@ -214,12 +214,12 @@ public:
Future<Reference<IAsyncFile>> wrappedFile,
Reference<DiskParameters> diskParameters,
bool aio) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* currentProcess = g_simulator->getCurrentProcess();
state TaskPriority currentTaskID = g_network->getCurrentTask();
state Future<Void> shutdown = success(currentProcess->shutdownSignal.getFuture());
//TraceEvent("AsyncFileNonDurableOpenBegin").detail("Filename", filename).detail("Addr", g_simulator.getCurrentProcess()->address);
wait(g_simulator.onMachine(currentProcess));
//TraceEvent("AsyncFileNonDurableOpenBegin").detail("Filename", filename).detail("Addr", g_simulator->getCurrentProcess()->address);
wait(g_simulator->onMachine(currentProcess));
try {
wait(success(wrappedFile) || shutdown);
@ -237,7 +237,7 @@ public:
//TraceEvent("AsyncFileNonDurableOpenWaitOnDelete2").detail("Filename", filename);
if (shutdown.isReady())
throw io_error().asInjectedFault();
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
}
state Reference<AsyncFileNonDurable> nonDurableFile(
@ -252,7 +252,7 @@ public:
//TraceEvent("AsyncFileNonDurableOpenComplete").detail("Filename", filename);
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
return nonDurableFile;
} catch (Error& e) {
@ -260,8 +260,8 @@ public:
std::string currentFilename =
(wrappedFile.isReady() && !wrappedFile.isError()) ? wrappedFile.get()->getFilename() : actualFilename;
currentProcess->machine->openFiles.erase(currentFilename);
//TraceEvent("AsyncFileNonDurableOpenError").errorUnsuppressed(e).detail("Filename", filename).detail("Address", currentProcess->address).detail("Addr", g_simulator.getCurrentProcess()->address);
wait(g_simulator.onProcess(currentProcess, currentTaskID));
//TraceEvent("AsyncFileNonDurableOpenError").errorUnsuppressed(e).detail("Filename", filename).detail("Address", currentProcess->address).detail("Addr", g_simulator->getCurrentProcess()->address);
wait(g_simulator->onProcess(currentProcess, currentTaskID));
throw err;
}
}
@ -290,7 +290,7 @@ public:
// Removes a file from the openFiles map
static void removeOpenFile(std::string filename, AsyncFileNonDurable* file) {
auto& openFiles = g_simulator.getCurrentProcess()->machine->openFiles;
auto& openFiles = g_simulator->getCurrentProcess()->machine->openFiles;
auto iter = openFiles.find(filename);
@ -425,24 +425,24 @@ private:
debugFileCheck("AsyncFileNonDurableRead", self->filename, data, offset, length);
// if(g_simulator.getCurrentProcess()->rebooting)
// if(g_simulator->getCurrentProcess()->rebooting)
//TraceEvent("AsyncFileNonDurable_ReadEnd", self->id).detail("Filename", self->filename);
return readFuture.get();
}
ACTOR Future<int> read(AsyncFileNonDurable* self, void* data, int length, int64_t offset) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* currentProcess = g_simulator->getCurrentProcess();
state TaskPriority currentTaskID = g_network->getCurrentTask();
wait(g_simulator.onMachine(currentProcess));
wait(g_simulator->onMachine(currentProcess));
try {
state int rep = wait(self->onRead(self, data, length, offset));
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
return rep;
} catch (Error& e) {
state Error err = e;
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
throw err;
}
}
@ -457,12 +457,12 @@ private:
int length,
int64_t offset) {
state Standalone<StringRef> dataCopy(StringRef((uint8_t*)data, length));
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* currentProcess = g_simulator->getCurrentProcess();
state TaskPriority currentTaskID = g_network->getCurrentTask();
wait(g_simulator.onMachine(currentProcess));
wait(g_simulator->onMachine(currentProcess));
state double delayDuration =
g_simulator.speedUpSimulation ? 0.0001 : (deterministicRandom()->random01() * self->maxWriteDelay);
g_simulator->speedUpSimulation ? 0.0001 : (deterministicRandom()->random01() * self->maxWriteDelay);
state Future<bool> startSyncFuture = self->startSyncPromise.getFuture();
@ -475,7 +475,7 @@ private:
self->getModificationsAndInsert(offset, length, true, writeEnded);
self->minSizeAfterPendingModifications = std::max(self->minSizeAfterPendingModifications, offset + length);
if (BUGGIFY_WITH_PROB(0.001) && !g_simulator.speedUpSimulation)
if (BUGGIFY_WITH_PROB(0.001) && !g_simulator->speedUpSimulation)
priorModifications.push_back(
delay(deterministicRandom()->random01() * FLOW_KNOBS->MAX_PRIOR_MODIFICATION_DELAY) ||
self->killed.getFuture());
@ -629,12 +629,12 @@ private:
Promise<Void> truncateStarted,
Future<Future<Void>> ownFuture,
int64_t size) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* currentProcess = g_simulator->getCurrentProcess();
state TaskPriority currentTaskID = g_network->getCurrentTask();
wait(g_simulator.onMachine(currentProcess));
wait(g_simulator->onMachine(currentProcess));
state double delayDuration =
g_simulator.speedUpSimulation ? 0.0001 : (deterministicRandom()->random01() * self->maxWriteDelay);
g_simulator->speedUpSimulation ? 0.0001 : (deterministicRandom()->random01() * self->maxWriteDelay);
state Future<bool> startSyncFuture = self->startSyncPromise.getFuture();
try {
@ -773,18 +773,18 @@ private:
}
ACTOR Future<Void> sync(AsyncFileNonDurable* self, bool durable) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* currentProcess = g_simulator->getCurrentProcess();
state TaskPriority currentTaskID = g_network->getCurrentTask();
wait(g_simulator.onMachine(currentProcess));
wait(g_simulator->onMachine(currentProcess));
try {
wait(self->onSync(self, durable));
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
return Void();
} catch (Error& e) {
state Error err = e;
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
throw err;
}
}
@ -806,32 +806,33 @@ private:
}
ACTOR static Future<int64_t> size(AsyncFileNonDurable const* self) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* currentProcess = g_simulator->getCurrentProcess();
state TaskPriority currentTaskID = g_network->getCurrentTask();
wait(g_simulator.onMachine(currentProcess));
wait(g_simulator->onMachine(currentProcess));
try {
state int64_t rep = wait(onSize(self));
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
return rep;
} catch (Error& e) {
state Error err = e;
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
throw err;
}
}
// Finishes all outstanding actors on an AsyncFileNonDurable and then deletes it
ACTOR Future<Void> closeFile(AsyncFileNonDurable* self) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* currentProcess = g_simulator->getCurrentProcess();
state TaskPriority currentTaskID = g_network->getCurrentTask();
state std::string filename = self->filename;
g_simulator.getMachineByNetworkAddress(self->openedAddress)->deletingOrClosingFiles.insert(self->getFilename());
g_simulator->getMachineByNetworkAddress(self->openedAddress)
->deletingOrClosingFiles.insert(self->getFilename());
wait(g_simulator.onMachine(currentProcess));
wait(g_simulator->onMachine(currentProcess));
try {
// Make sure all writes have gone through.
Promise<bool> startSyncPromise = self->startSyncPromise;
@ -854,8 +855,8 @@ private:
wait(self->killComplete.getFuture());
// Remove this file from the filesBeingDeleted map so that new files can be created with this filename
g_simulator.getMachineByNetworkAddress(self->openedAddress)->closingFiles.erase(self->getFilename());
g_simulator.getMachineByNetworkAddress(self->openedAddress)
g_simulator->getMachineByNetworkAddress(self->openedAddress)->closingFiles.erase(self->getFilename());
g_simulator->getMachineByNetworkAddress(self->openedAddress)
->deletingOrClosingFiles.erase(self->getFilename());
AsyncFileNonDurable::filesBeingDeleted.erase(self->filename);
//TraceEvent("AsyncFileNonDurable_FinishDelete", self->id).detail("Filename", self->filename);

View File

@ -142,7 +142,7 @@ Future<Void> tssComparison(Req req,
if (!TSS_doCompare(src.get(), tss.get().get())) {
CODE_PROBE(true, "TSS Mismatch");
state TraceEvent mismatchEvent(
(g_network->isSimulated() && g_simulator.tssMode == ISimulator::TSSMode::EnabledDropMutations)
(g_network->isSimulated() && g_simulator->tssMode == ISimulator::TSSMode::EnabledDropMutations)
? SevWarnAlways
: SevError,
TSS_mismatchTraceName(req));
@ -206,7 +206,7 @@ Future<Void> tssComparison(Req req,
// record a summarized trace event instead
TraceEvent summaryEvent((g_network->isSimulated() &&
g_simulator.tssMode == ISimulator::TSSMode::EnabledDropMutations)
g_simulator->tssMode == ISimulator::TSSMode::EnabledDropMutations)
? SevWarnAlways
: SevError,
TSS_mismatchTraceName(req));

View File

@ -534,9 +534,7 @@ private:
bool allSwapsDisabled;
};
// Quickly make existing code work that expects g_simulator to be of class type (not a pointer)
extern ISimulator* g_pSimulator;
#define g_simulator (*g_pSimulator)
extern ISimulator* g_simulator;
void startNewSimulator(bool printSimTime);

View File

@ -55,7 +55,7 @@
#include "flow/FaultInjection.h"
#include "flow/actorcompiler.h" // This must be the last #include.
ISimulator* g_pSimulator = nullptr;
ISimulator* g_simulator = nullptr;
thread_local ISimulator::ProcessInfo* ISimulator::currentProcess = nullptr;
ISimulator::ISimulator()
@ -69,10 +69,10 @@ bool simulator_should_inject_fault(const char* context, const char* file, int li
if (!g_network->isSimulated() || !faultInjectionActivated)
return false;
auto p = g_simulator.getCurrentProcess();
auto p = g_simulator->getCurrentProcess();
if (p->fault_injection_p2 && deterministicRandom()->random01() < p->fault_injection_p2 &&
!g_simulator.speedUpSimulation) {
!g_simulator->speedUpSimulation) {
uint32_t h1 = line + (p->fault_injection_r >> 32);
if (h1 < p->fault_injection_p1 * std::numeric_limits<uint32_t>::max()) {
@ -154,13 +154,13 @@ struct SimClogging {
double tnow = now();
double t = tnow + (stableConnection ? 0.1 : 1.0) * halfLatency();
if (!g_simulator.speedUpSimulation && !stableConnection)
if (!g_simulator->speedUpSimulation && !stableConnection)
t += clogPairLatency[pair];
if (!g_simulator.speedUpSimulation && !stableConnection && clogPairUntil.count(pair))
if (!g_simulator->speedUpSimulation && !stableConnection && clogPairUntil.count(pair))
t = std::max(t, clogPairUntil[pair]);
if (!g_simulator.speedUpSimulation && !stableConnection && clogRecvUntil.count(to.ip))
if (!g_simulator->speedUpSimulation && !stableConnection && clogRecvUntil.count(to.ip))
t = std::max(t, clogRecvUntil[to.ip]);
return t - tnow;
@ -192,7 +192,7 @@ private:
double halfLatency() const {
double a = deterministicRandom()->random01();
const double pFast = 0.999;
if (a <= pFast || g_simulator.speedUpSimulation) {
if (a <= pFast || g_simulator->speedUpSimulation) {
a = a / pFast;
return 0.5 * (FLOW_KNOBS->MIN_NETWORK_LATENCY * (1 - a) +
FLOW_KNOBS->FAST_NETWORK_LATENCY / pFast * a); // 0.5ms average
@ -363,7 +363,7 @@ private:
ACTOR static Future<Void> sender(Sim2Conn* self) {
loop {
wait(self->writtenBytes.onChange()); // takes place on peer!
ASSERT(g_simulator.getCurrentProcess() == self->peerProcess);
ASSERT(g_simulator->getCurrentProcess() == self->peerProcess);
wait(delay(.002 * deterministicRandom()->random01()));
self->sentBytes.set(self->writtenBytes.get()); // or possibly just some sometimes...
}
@ -371,41 +371,41 @@ private:
ACTOR static Future<Void> receiver(Sim2Conn* self) {
loop {
if (self->sentBytes.get() != self->receivedBytes.get())
wait(g_simulator.onProcess(self->peerProcess));
wait(g_simulator->onProcess(self->peerProcess));
while (self->sentBytes.get() == self->receivedBytes.get())
wait(self->sentBytes.onChange());
ASSERT(g_simulator.getCurrentProcess() == self->peerProcess);
ASSERT(g_simulator->getCurrentProcess() == self->peerProcess);
state int64_t pos =
deterministicRandom()->random01() < .5
? self->sentBytes.get()
: deterministicRandom()->randomInt64(self->receivedBytes.get(), self->sentBytes.get() + 1);
wait(delay(g_clogging.getSendDelay(
self->process->address, self->peerProcess->address, self->isStableConnection())));
wait(g_simulator.onProcess(self->process));
ASSERT(g_simulator.getCurrentProcess() == self->process);
wait(g_simulator->onProcess(self->process));
ASSERT(g_simulator->getCurrentProcess() == self->process);
wait(delay(g_clogging.getRecvDelay(
self->process->address, self->peerProcess->address, self->isStableConnection())));
ASSERT(g_simulator.getCurrentProcess() == self->process);
ASSERT(g_simulator->getCurrentProcess() == self->process);
if (self->stopReceive.isReady()) {
wait(Future<Void>(Never()));
}
self->receivedBytes.set(pos);
wait(Future<Void>(Void())); // Prior notification can delete self and cancel this actor
ASSERT(g_simulator.getCurrentProcess() == self->process);
ASSERT(g_simulator->getCurrentProcess() == self->process);
}
}
ACTOR static Future<Void> whenReadable(Sim2Conn* self) {
try {
loop {
if (self->readBytes.get() != self->receivedBytes.get()) {
ASSERT(g_simulator.getCurrentProcess() == self->process);
ASSERT(g_simulator->getCurrentProcess() == self->process);
return Void();
}
wait(self->receivedBytes.onChange());
self->rollRandomClose();
}
} catch (Error& e) {
ASSERT(g_simulator.getCurrentProcess() == self->process);
ASSERT(g_simulator->getCurrentProcess() == self->process);
throw;
}
}
@ -415,20 +415,20 @@ private:
if (!self->peer)
return Void();
if (self->peer->availableSendBufferForPeer() > 0) {
ASSERT(g_simulator.getCurrentProcess() == self->process);
ASSERT(g_simulator->getCurrentProcess() == self->process);
return Void();
}
try {
wait(self->peer->receivedBytes.onChange());
ASSERT(g_simulator.getCurrentProcess() == self->peerProcess);
ASSERT(g_simulator->getCurrentProcess() == self->peerProcess);
} catch (Error& e) {
if (e.code() != error_code_broken_promise)
throw;
}
wait(g_simulator.onProcess(self->process));
wait(g_simulator->onProcess(self->process));
}
} catch (Error& e) {
ASSERT(g_simulator.getCurrentProcess() == self->process);
ASSERT(g_simulator->getCurrentProcess() == self->process);
throw;
}
}
@ -436,9 +436,9 @@ private:
void rollRandomClose() {
// make sure connections between parenta and their childs are not closed
if (!stableConnection &&
now() - g_simulator.lastConnectionFailure > g_simulator.connectionFailuresDisableDuration &&
now() - g_simulator->lastConnectionFailure > g_simulator->connectionFailuresDisableDuration &&
deterministicRandom()->random01() < .00001) {
g_simulator.lastConnectionFailure = now();
g_simulator->lastConnectionFailure = now();
double a = deterministicRandom()->random01(), b = deterministicRandom()->random01();
CODE_PROBE(true, "Simulated connection failure", probe::context::sim2, probe::assert::simOnly);
TraceEvent("ConnectionFailure", dbgid)
@ -460,7 +460,7 @@ private:
}
ACTOR static Future<Void> trackLeakedConnection(Sim2Conn* self) {
wait(g_simulator.onProcess(self->process));
wait(g_simulator->onProcess(self->process));
if (self->process->address.isPublic()) {
wait(delay(FLOW_KNOBS->CONNECTION_MONITOR_IDLE_TIMEOUT * FLOW_KNOBS->CONNECTION_MONITOR_IDLE_TIMEOUT * 1.5 +
FLOW_KNOBS->CONNECTION_MONITOR_LOOP_TIME * 2.1 + FLOW_KNOBS->CONNECTION_MONITOR_TIMEOUT));
@ -517,7 +517,7 @@ public:
int mode,
Reference<DiskParameters> diskParameters = makeReference<DiskParameters>(25000, 150000000),
bool delayOnWrite = true) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* currentProcess = g_simulator->getCurrentProcess();
state TaskPriority currentTaskID = g_network->getCurrentTask();
if (++openCount >= 6000) {
@ -527,15 +527,15 @@ public:
if (openCount == 4000) {
TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyFiles").log();
g_simulator.speedUpSimulation = true;
g_simulator.connectionFailuresDisableDuration = 1e6;
g_simulator->speedUpSimulation = true;
g_simulator->connectionFailuresDisableDuration = 1e6;
}
// Filesystems on average these days seem to start to have limits of around 255 characters for a
// filename. We add ".part" below, so we need to stay under 250.
ASSERT(basename(filename).size() < 250);
wait(g_simulator.onMachine(currentProcess));
wait(g_simulator->onMachine(currentProcess));
try {
wait(delay(FLOW_KNOBS->MIN_OPEN_TIME +
deterministicRandom()->random01() * (FLOW_KNOBS->MAX_OPEN_TIME - FLOW_KNOBS->MIN_OPEN_TIME)));
@ -561,11 +561,11 @@ public:
platform::makeTemporary(open_filename.c_str());
SimpleFile* simpleFile = new SimpleFile(h, diskParameters, delayOnWrite, filename, open_filename, flags);
state Reference<IAsyncFile> file = Reference<IAsyncFile>(simpleFile);
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
return file;
} catch (Error& e) {
state Error err = e;
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
throw err;
}
}
@ -779,7 +779,7 @@ private:
if (self->flags & OPEN_ATOMIC_WRITE_AND_CREATE) {
self->flags &= ~OPEN_ATOMIC_WRITE_AND_CREATE;
auto& machineCache = g_simulator.getCurrentProcess()->machine->openFiles;
auto& machineCache = g_simulator->getCurrentProcess()->machine->openFiles;
std::string sourceFilename = self->filename + ".part";
if (machineCache.count(sourceFilename)) {
@ -863,7 +863,7 @@ private:
PromiseStream<Reference<IConnection>> nextConnection;
ACTOR static void incoming(Reference<Sim2Listener> self, double seconds, Reference<IConnection> conn) {
wait(g_simulator.onProcess(self->process));
wait(g_simulator->onProcess(self->process));
wait(delay(seconds));
if (((Sim2Conn*)conn.getPtr())->isPeerGone() && deterministicRandom()->random01() < 0.5)
return;
@ -881,7 +881,7 @@ private:
NetworkAddress address;
};
#define g_sim2 ((Sim2&)g_simulator)
#define g_sim2 ((Sim2&)(*g_simulator))
class Sim2 final : public ISimulator, public INetworkConnections {
public:
@ -1101,7 +1101,7 @@ public:
SimThreadArgs(THREAD_FUNC_RETURN (*func)(void*), void* arg) : func(func), arg(arg) {
ASSERT(g_network->isSimulated());
currentProcess = g_simulator.getCurrentProcess();
currentProcess = g_simulator->getCurrentProcess();
}
};
@ -1175,18 +1175,18 @@ public:
// This is a _rudimentary_ simulation of the untrustworthiness of non-durable deletes and the possibility of
// rebooting during a durable one. It isn't perfect: for example, on real filesystems testing
// for the existence of a non-durably deleted file BEFORE a reboot will show that it apparently doesn't exist.
if (g_simulator.getCurrentProcess()->machine->openFiles.count(filename)) {
g_simulator.getCurrentProcess()->machine->openFiles.erase(filename);
g_simulator.getCurrentProcess()->machine->deletingOrClosingFiles.insert(filename);
if (g_simulator->getCurrentProcess()->machine->openFiles.count(filename)) {
g_simulator->getCurrentProcess()->machine->openFiles.erase(filename);
g_simulator->getCurrentProcess()->machine->deletingOrClosingFiles.insert(filename);
}
if (mustBeDurable || deterministicRandom()->random01() < 0.5) {
state ISimulator::ProcessInfo* currentProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* currentProcess = g_simulator->getCurrentProcess();
state TaskPriority currentTaskID = g_network->getCurrentTask();
TraceEvent(SevDebug, "Sim2DeleteFileImpl")
.detail("CurrentProcess", currentProcess->toString())
.detail("Filename", filename)
.detail("Durable", mustBeDurable);
wait(g_simulator.onMachine(currentProcess));
wait(g_simulator->onMachine(currentProcess));
try {
wait(::delay(0.05 * deterministicRandom()->random01()));
if (!currentProcess->rebooting) {
@ -1195,11 +1195,11 @@ public:
wait(::delay(0.05 * deterministicRandom()->random01()));
CODE_PROBE(true, "Simulated durable delete", probe::context::sim2, probe::assert::simOnly);
}
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
return Void();
} catch (Error& e) {
state Error err = e;
wait(g_simulator.onProcess(currentProcess, currentTaskID));
wait(g_simulator->onProcess(currentProcess, currentTaskID));
throw err;
}
} else {
@ -1293,8 +1293,8 @@ public:
m->machine = &machine;
machine.processes.push_back(m);
currentlyRebootingProcesses.erase(addresses.address);
m->excluded = g_simulator.isExcluded(NetworkAddress(ip, port, true, false));
m->cleared = g_simulator.isCleared(addresses.address);
m->excluded = g_simulator->isExcluded(NetworkAddress(ip, port, true, false));
m->cleared = g_simulator->isCleared(addresses.address);
m->protocolVersion = protocol;
m->setGlobal(enTDMetrics, (flowGlobalType)&m->tdmetrics);
@ -2378,8 +2378,8 @@ class UDPSimSocket : public IUDPSocket, ReferenceCounted<UDPSimSocket> {
public:
UDPSimSocket(NetworkAddress const& localAddress, Optional<NetworkAddress> const& peerAddress)
: id(deterministicRandom()->randomUniqueID()), process(g_simulator.getCurrentProcess()), peerAddress(peerAddress),
actors(false), _localAddress(localAddress) {
: id(deterministicRandom()->randomUniqueID()), process(g_simulator->getCurrentProcess()),
peerAddress(peerAddress), actors(false), _localAddress(localAddress) {
g_sim2.addressMap.emplace(_localAddress, process);
ASSERT(process->boundUDPSockets.find(localAddress) == process->boundUDPSockets.end());
process->boundUDPSockets.emplace(localAddress, this);
@ -2482,7 +2482,7 @@ public:
Future<Reference<IUDPSocket>> Sim2::createUDPSocket(NetworkAddress toAddr) {
NetworkAddress localAddress;
auto process = g_simulator.getCurrentProcess();
auto process = g_simulator->getCurrentProcess();
if (process->address.ip.isV6()) {
IPAddress::IPAddressStore store = process->address.ip.toV6();
uint16_t* ipParts = (uint16_t*)store.data();
@ -2500,7 +2500,7 @@ Future<Reference<IUDPSocket>> Sim2::createUDPSocket(NetworkAddress toAddr) {
Future<Reference<IUDPSocket>> Sim2::createUDPSocket(bool isV6) {
NetworkAddress localAddress;
auto process = g_simulator.getCurrentProcess();
auto process = g_simulator->getCurrentProcess();
if (process->address.ip.isV6() == isV6) {
localAddress = process->address;
} else {
@ -2522,8 +2522,8 @@ Future<Reference<IUDPSocket>> Sim2::createUDPSocket(bool isV6) {
void startNewSimulator(bool printSimTime) {
ASSERT(!g_network);
g_network = g_pSimulator = new Sim2(printSimTime);
g_simulator.connectionFailuresDisableDuration = deterministicRandom()->random01() < 0.5 ? 0 : 1e6;
g_network = g_simulator = new Sim2(printSimTime);
g_simulator->connectionFailuresDisableDuration = deterministicRandom()->random01() < 0.5 ? 0 : 1e6;
}
ACTOR void doReboot(ISimulator::ProcessInfo* p, ISimulator::KillType kt) {
@ -2585,7 +2585,7 @@ ACTOR void doReboot(ISimulator::ProcessInfo* p, ISimulator::KillType kt) {
p->rebooting = true;
if ((kt == ISimulator::RebootAndDelete) || (kt == ISimulator::RebootProcessAndDelete)) {
p->cleared = true;
g_simulator.clearAddress(p->address);
g_simulator->clearAddress(p->address);
}
p->shutdownSignal.send(kt);
} catch (Error& e) {
@ -2597,10 +2597,10 @@ ACTOR void doReboot(ISimulator::ProcessInfo* p, ISimulator::KillType kt) {
// Simulates delays for performing operations on disk
Future<Void> waitUntilDiskReady(Reference<DiskParameters> diskParameters, int64_t size, bool sync) {
if (g_simulator.getCurrentProcess()->failedDisk) {
if (g_simulator->getCurrentProcess()->failedDisk) {
return Never();
}
if (g_simulator.connectionFailuresDisableDuration > 1e4)
if (g_simulator->connectionFailuresDisableDuration > 1e4)
return delay(0.0001);
if (diskParameters->nextOperation < now())
@ -2655,7 +2655,7 @@ Future<Reference<class IAsyncFile>> Sim2FileSystem::open(const std::string& file
ASSERT(flags & IAsyncFile::OPEN_CREATE);
if (flags & IAsyncFile::OPEN_UNCACHED) {
auto& machineCache = g_simulator.getCurrentProcess()->machine->openFiles;
auto& machineCache = g_simulator->getCurrentProcess()->machine->openFiles;
std::string actualFilename = filename;
if (flags & IAsyncFile::OPEN_ATOMIC_WRITE_AND_CREATE) {
actualFilename = filename + ".part";

View File

@ -51,13 +51,13 @@ void debug_advanceVersion(UID id, int64_t version, const char* suffix) {
}
void debug_advanceMinCommittedVersion(UID id, int64_t version) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return;
debug_advanceVersion(id, version, "min");
}
void debug_advanceMaxCommittedVersion(UID id, int64_t version) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return;
debug_advanceVersion(id, version, "max");
}
@ -67,7 +67,7 @@ bool debug_checkPartRestoredVersion(UID id,
std::string context,
std::string minormax,
Severity sev = SevError) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return false;
if (disabledMachines.count(id))
return false;
@ -88,33 +88,33 @@ bool debug_checkPartRestoredVersion(UID id,
}
bool debug_checkRestoredVersion(UID id, int64_t version, std::string context, Severity sev) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return false;
return debug_checkPartRestoredVersion(id, version, context, "min", sev) ||
debug_checkPartRestoredVersion(id, version, context, "max", sev);
}
void debug_removeVersions(UID id) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return;
validationData.erase(id.toString() + "min");
validationData.erase(id.toString() + "max");
}
bool debug_versionsExist(UID id) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return false;
return validationData.count(id.toString() + "min") != 0 || validationData.count(id.toString() + "max") != 0;
}
bool debug_checkMinRestoredVersion(UID id, int64_t version, std::string context, Severity sev) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return false;
return debug_checkPartRestoredVersion(id, version, context, "min", sev);
}
bool debug_checkMaxRestoredVersion(UID id, int64_t version, std::string context, Severity sev) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return false;
return debug_checkPartRestoredVersion(id, version, context, "max", sev);
}
@ -129,13 +129,13 @@ void debug_setCheckRelocationDuration(bool check) {
checkRelocationDuration = check;
}
void debug_advanceVersionTimestamp(int64_t version, double t) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return;
timedVersionsValidationData[version] = t;
}
bool debug_checkVersionTime(int64_t version, double t, std::string context, Severity sev) {
if (!g_network->isSimulated() || !g_simulator.extraDatabases.empty())
if (!g_network->isSimulated() || !g_simulator->extraDatabases.empty())
return false;
if (!timedVersionsValidationData.count(version)) {
TraceEvent(SevWarn, (context + "UnknownTime").c_str())

View File

@ -155,7 +155,7 @@ void GranuleFiles::getFiles(Version beginVersion,
int64_t& deltaBytesCounter,
bool summarize) const {
BlobFileIndex dummyIndex; // for searching
ASSERT(!snapshotFiles.empty());
// if beginVersion == 0 or we can collapse, find the latest snapshot <= readVersion
auto snapshotF = snapshotFiles.end();
if (beginVersion == 0 || canCollapse) {

View File

@ -25,6 +25,7 @@
#include <vector>
#include <unordered_map>
#include "fdbclient/ServerKnobs.h"
#include "fdbrpc/simulator.h"
#include "fmt/format.h"
#include "fdbclient/BackupContainerFileSystem.h"
@ -1944,6 +1945,7 @@ ACTOR Future<Void> maybeSplitRange(Reference<BlobManagerData> bmData,
for (auto it = splitPoints.boundaries.begin(); it != splitPoints.boundaries.end(); it++) {
bmData->mergeBoundaries[it->first] = it->second;
}
break;
} catch (Error& e) {
if (e.code() == error_code_operation_cancelled) {
@ -2611,7 +2613,7 @@ ACTOR Future<Void> granuleMergeChecker(Reference<BlobManagerData> bmData) {
double sleepTime = SERVER_KNOBS->BG_MERGE_CANDIDATE_DELAY_SECONDS;
// Check more frequently if speedUpSimulation is set. This may
if (g_network->isSimulated() && g_simulator.speedUpSimulation) {
if (g_network->isSimulated() && g_simulator->speedUpSimulation) {
sleepTime = std::min(5.0, sleepTime);
}
// start delay at the start of the loop, to account for time spend in calculation
@ -3455,6 +3457,10 @@ ACTOR Future<Void> recoverBlobManager(Reference<BlobManagerData> bmData) {
// Once we acknowledge the existing blob workers, we can go ahead and recruit new ones
bmData->startRecruiting.trigger();
bmData->initBStore();
if (isFullRestoreMode())
wait(loadManifest(bmData->db, bmData->bstore));
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(bmData->db);
// set up force purge keys if not done already
@ -3766,7 +3772,7 @@ ACTOR Future<Void> chaosRangeMover(Reference<BlobManagerData> bmData) {
loop {
wait(delay(30.0));
if (g_simulator.speedUpSimulation) {
if (g_simulator->speedUpSimulation) {
if (BM_DEBUG) {
printf("Range mover stopping\n");
}
@ -5042,6 +5048,28 @@ ACTOR Future<int64_t> bgccCheckGranule(Reference<BlobManagerData> bmData, KeyRan
return bytesRead;
}
// Check if there is any pending split. It's a precheck for manifest backup
ACTOR Future<bool> hasPendingSplit(Reference<BlobManagerData> self) {
state Transaction tr(self->db);
loop {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
try {
RangeResult result = wait(tr.getRange(blobGranuleSplitKeys, GetRangeLimits::BYTE_LIMIT_UNLIMITED));
for (auto& row : result) {
std::pair<BlobGranuleSplitState, Version> gss = decodeBlobGranuleSplitValue(row.value);
if (gss.first != BlobGranuleSplitState::Done) {
return true;
}
}
return false;
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
// FIXME: could eventually make this more thorough by storing some state in the DB or something
// FIXME: simpler solution could be to shuffle ranges
ACTOR Future<Void> bgConsistencyCheck(Reference<BlobManagerData> bmData) {
@ -5053,15 +5081,25 @@ ACTOR Future<Void> bgConsistencyCheck(Reference<BlobManagerData> bmData) {
if (BM_DEBUG) {
fmt::print("BGCC starting\n");
}
if (isFullRestoreMode())
wait(printRestoreSummary(bmData->db, bmData->bstore));
loop {
if (g_network->isSimulated() && g_simulator.speedUpSimulation) {
if (g_network->isSimulated() && g_simulator->speedUpSimulation) {
if (BM_DEBUG) {
printf("BGCC stopping\n");
}
return Void();
}
// Only dump blob manifest when there is no pending split to ensure data consistency
if (SERVER_KNOBS->BLOB_MANIFEST_BACKUP && !isFullRestoreMode()) {
bool pendingSplit = wait(hasPendingSplit(bmData));
if (!pendingSplit) {
wait(dumpManifest(bmData->db, bmData->bstore));
}
}
if (bmData->workersById.size() >= 1) {
int tries = 10;
state KeyRange range;

View File

@ -0,0 +1,374 @@
/*
* BlobManifest.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbclient/BackupContainer.h"
#include "fdbserver/Knobs.h"
#include "flow/FastRef.h"
#include "flow/flow.h"
#include "fdbclient/NativeAPI.actor.h"
#include "fdbclient/BlobConnectionProvider.h"
#include "fdbclient/FDBTypes.h"
#include "fdbclient/KeyRangeMap.h"
#include "fdbclient/SystemData.h"
#include "fdbclient/BackupContainerFileSystem.h"
#include "fdbclient/BlobGranuleReader.actor.h"
#include "fdbserver/BlobGranuleServerCommon.actor.h"
#include "flow/actorcompiler.h" // has to be last include
#include "fmt/core.h"
//
// This module offers routines to dump or load blob manifest file, which is used for full restore from granules
//
static std::string MANIFEST_FILENAME = "manifest"; // Default manifest file name on external blob storage
#define ENABLE_DEBUG_PRINT true
template <typename... T>
inline void dprint(fmt::format_string<T...> fmt, T&&... args) {
if (ENABLE_DEBUG_PRINT)
fmt::print(fmt, std::forward<T>(args)...);
}
// This class dumps blob manifest to external blob storage.
class BlobManifestDumper : public ReferenceCounted<BlobManifestDumper> {
public:
BlobManifestDumper(Database& db, Reference<BlobConnectionProvider> blobConn) : db_(db), blobConn_(blobConn) {}
virtual ~BlobManifestDumper() {}
// Execute the dumper
ACTOR static Future<Void> execute(Reference<BlobManifestDumper> self) {
try {
state Standalone<BlobManifest> manifest;
Standalone<VectorRef<KeyValueRef>> rows = wait(getSystemKeys(self));
manifest.rows = rows;
Value data = encode(manifest);
wait(writeToFile(self, data));
} catch (Error& e) {
dprint("WARNING: unexpected blob manifest dumper error {}\n", e.what()); // skip error handling for now
}
return Void();
}
private:
// Return system keys that to be backed up
ACTOR static Future<Standalone<VectorRef<KeyValueRef>>> getSystemKeys(Reference<BlobManifestDumper> self) {
state Standalone<VectorRef<KeyValueRef>> rows;
state Transaction tr(self->db_);
loop {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
try {
state std::vector<KeyRangeRef> ranges = {
blobGranuleMappingKeys, // Map granule to workers. Track the active granules
blobGranuleFileKeys, // Map a granule version to granule files. Track files for a granule
blobGranuleHistoryKeys, // Map granule to its parents and parent bundaries. for time-travel read
blobRangeKeys // Key ranges managed by blob
};
for (auto range : ranges) {
// todo use getRangeStream for better performance
RangeResult result = wait(tr.getRange(range, GetRangeLimits::BYTE_LIMIT_UNLIMITED));
for (auto& row : result) {
rows.push_back_deep(rows.arena(), KeyValueRef(row.key, row.value));
}
}
return rows;
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
// Write data to blob manifest file
ACTOR static Future<Void> writeToFile(Reference<BlobManifestDumper> self, Value data) {
state Reference<BackupContainerFileSystem> writer;
state std::string fileName;
std::tie(writer, fileName) = self->blobConn_->createForWrite(MANIFEST_FILENAME);
state Reference<IBackupFile> file = wait(writer->writeFile(fileName));
wait(file->append(data.begin(), data.size()));
wait(file->finish());
dprint("Write blob manifest file with {} bytes\n", data.size());
return Void();
}
// Encode manifest as binary data
static Value encode(BlobManifest& manifest) {
BinaryWriter wr(IncludeVersion(ProtocolVersion::withBlobGranuleFile()));
wr << manifest;
return wr.toValue();
}
Database db_;
Reference<BlobConnectionProvider> blobConn_;
};
// Defines granule info that interests full restore
struct BlobGranuleVersion {
// Two constructors required by VectorRef
BlobGranuleVersion() {}
BlobGranuleVersion(Arena& a, const BlobGranuleVersion& copyFrom)
: granuleID(copyFrom.granuleID), keyRange(a, copyFrom.keyRange), version(copyFrom.version),
sizeInBytes(copyFrom.sizeInBytes) {}
UID granuleID;
KeyRangeRef keyRange;
Version version;
int64_t sizeInBytes;
};
// Defines a vector for BlobGranuleVersion
typedef Standalone<VectorRef<BlobGranuleVersion>> BlobGranuleVersionVector;
// Defines filename, version, size for each granule file that interests full restore
struct GranuleFileVersion {
Version version;
uint8_t fileType;
std::string filename;
int64_t sizeInBytes;
};
// This class is to load blob manifest into system key space, which is part of for bare metal restore
class BlobManifestLoader : public ReferenceCounted<BlobManifestLoader> {
public:
BlobManifestLoader(Database& db, Reference<BlobConnectionProvider> blobConn) : db_(db), blobConn_(blobConn) {}
virtual ~BlobManifestLoader() {}
// Execute the loader
ACTOR static Future<Void> execute(Reference<BlobManifestLoader> self) {
try {
Value data = wait(readFromFile(self));
Standalone<BlobManifest> manifest = decode(data);
wait(writeSystemKeys(self, manifest.rows));
BlobGranuleVersionVector _ = wait(listGranules(self));
} catch (Error& e) {
dprint("WARNING: unexpected manifest loader error {}\n", e.what()); // skip error handling so far
}
return Void();
}
// Print out a summary for blob granules
ACTOR static Future<Void> print(Reference<BlobManifestLoader> self) {
state BlobGranuleVersionVector granules = wait(listGranules(self));
for (auto granule : granules) {
wait(checkGranuleFiles(self, granule));
}
return Void();
}
private:
// Read data from a manifest file
ACTOR static Future<Value> readFromFile(Reference<BlobManifestLoader> self) {
state Reference<BackupContainerFileSystem> readBstore = self->blobConn_->getForRead(MANIFEST_FILENAME);
state Reference<IAsyncFile> reader = wait(readBstore->readFile(MANIFEST_FILENAME));
state int64_t fileSize = wait(reader->size());
state Arena arena;
state uint8_t* data = new (arena) uint8_t[fileSize];
int readSize = wait(reader->read(data, fileSize, 0));
dprint("Blob manifest restoring {} bytes\n", readSize);
StringRef ref = StringRef(data, readSize);
return Value(ref, arena);
}
// Decode blob manifest from binary data
static Standalone<BlobManifest> decode(Value data) {
Standalone<BlobManifest> manifest;
BinaryReader binaryReader(data, IncludeVersion());
binaryReader >> manifest;
return manifest;
}
// Write system keys to database
ACTOR static Future<Void> writeSystemKeys(Reference<BlobManifestLoader> self, VectorRef<KeyValueRef> rows) {
state Transaction tr(self->db_);
loop {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
try {
for (auto& row : rows) {
tr.set(row.key, row.value);
}
wait(tr.commit());
dprint("Blob manifest loaded {} rows\n", rows.size());
return Void();
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
// Iterate active granules and return their version/sizes
ACTOR static Future<BlobGranuleVersionVector> listGranules(Reference<BlobManifestLoader> self) {
state Transaction tr(self->db_);
loop {
state BlobGranuleVersionVector results;
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
try {
std::vector<KeyRangeRef> granules;
state int i = 0;
auto limit = GetRangeLimits::BYTE_LIMIT_UNLIMITED;
state RangeResult blobRanges = wait(tr.getRange(blobGranuleMappingKeys, limit));
for (i = 0; i < blobRanges.size() - 1; i++) {
Key startKey = blobRanges[i].key.removePrefix(blobGranuleMappingKeys.begin);
Key endKey = blobRanges[i + 1].key.removePrefix(blobGranuleMappingKeys.begin);
state KeyRange granuleRange = KeyRangeRef(startKey, endKey);
try {
Standalone<BlobGranuleVersion> granule = wait(getGranule(&tr, granuleRange));
results.push_back_deep(results.arena(), granule);
} catch (Error& e) {
dprint("missing data for key range {} \n", granuleRange.toString());
}
}
return results;
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
// Find the newest granule for a key range. The newest granule has the max version and relevant files
ACTOR static Future<Standalone<BlobGranuleVersion>> getGranule(Transaction* tr, KeyRangeRef range) {
state Standalone<BlobGranuleVersion> granuleVersion;
KeyRange historyKeyRange = blobGranuleHistoryKeyRangeFor(range);
// reverse lookup so that the first row is the newest version
state RangeResult results =
wait(tr->getRange(historyKeyRange, GetRangeLimits::BYTE_LIMIT_UNLIMITED, Snapshot::False, Reverse::True));
for (KeyValueRef row : results) {
state KeyRange keyRange;
state Version version;
std::tie(keyRange, version) = decodeBlobGranuleHistoryKey(row.key);
Standalone<BlobGranuleHistoryValue> historyValue = decodeBlobGranuleHistoryValue(row.value);
state UID granuleID = historyValue.granuleID;
std::vector<GranuleFileVersion> files = wait(listGranuleFiles(tr, granuleID));
if (files.empty()) {
dprint("Granule {} doesn't have files for version {}\n", granuleID.toString(), version);
continue; // check previous version
}
granuleVersion.keyRange = KeyRangeRef(granuleVersion.arena(), keyRange);
granuleVersion.granuleID = granuleID;
granuleVersion.version = files.back().version;
granuleVersion.sizeInBytes = granuleSizeInBytes(files);
dprint("Granule {}: \n", granuleVersion.granuleID.toString());
dprint(" {} {} {}\n", keyRange.toString(), granuleVersion.version, granuleVersion.sizeInBytes);
for (auto& file : files) {
dprint(" File {}: {} bytes\n", file.filename, file.sizeInBytes);
}
return granuleVersion;
}
throw restore_missing_data(); // todo a better error code
}
// Return sum of last snapshot file size and delta files afterwards
static int64_t granuleSizeInBytes(std::vector<GranuleFileVersion> files) {
int64_t totalSize = 0;
for (auto it = files.rbegin(); it < files.rend(); ++it) {
totalSize += it->sizeInBytes;
if (it->fileType == BG_FILE_TYPE_SNAPSHOT)
break;
}
return totalSize;
}
// List all files for given granule
ACTOR static Future<std::vector<GranuleFileVersion>> listGranuleFiles(Transaction* tr, UID granuleID) {
state KeyRange fileKeyRange = blobGranuleFileKeyRangeFor(granuleID);
RangeResult results = wait(tr->getRange(fileKeyRange, GetRangeLimits::BYTE_LIMIT_UNLIMITED));
std::vector<GranuleFileVersion> files;
for (auto& row : results) {
UID gid;
Version version;
uint8_t fileType;
Standalone<StringRef> filename;
int64_t offset;
int64_t length;
int64_t fullFileLength;
Optional<BlobGranuleCipherKeysMeta> cipherKeysMeta;
std::tie(gid, version, fileType) = decodeBlobGranuleFileKey(row.key);
std::tie(filename, offset, length, fullFileLength, cipherKeysMeta) = decodeBlobGranuleFileValue(row.value);
GranuleFileVersion vs = { version, fileType, filename.toString(), length };
files.push_back(vs);
}
return files;
}
// Read data from granules and print out summary
ACTOR static Future<Void> checkGranuleFiles(Reference<BlobManifestLoader> self, BlobGranuleVersion granule) {
state KeyRangeRef range = granule.keyRange;
state Version readVersion = granule.version;
state Transaction tr(self->db_);
loop {
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
try {
state Standalone<VectorRef<BlobGranuleChunkRef>> chunks =
wait(tr.readBlobGranules(range, 0, readVersion));
state int count = 0;
for (const BlobGranuleChunkRef& chunk : chunks) {
RangeResult rows = wait(readBlobGranule(chunk, range, 0, readVersion, self->blobConn_));
count += rows.size();
}
dprint("Restorable blob granule {} @ {}\n", granule.granuleID.toString(), readVersion);
dprint(" Range: {}\n", range.toString());
dprint(" Keys : {}\n", count);
dprint(" Size : {} bytes\n", granule.sizeInBytes);
return Void();
} catch (Error& e) {
wait(tr.onError(e));
}
}
}
Database db_;
Reference<BlobConnectionProvider> blobConn_;
};
// API to dump a manifest copy to external storage
ACTOR Future<Void> dumpManifest(Database db, Reference<BlobConnectionProvider> blobConn) {
Reference<BlobManifestDumper> dumper = makeReference<BlobManifestDumper>(db, blobConn);
wait(BlobManifestDumper::execute(dumper));
return Void();
}
// API to load manifest from external blob storage
ACTOR Future<Void> loadManifest(Database db, Reference<BlobConnectionProvider> blobConn) {
Reference<BlobManifestLoader> loader = makeReference<BlobManifestLoader>(db, blobConn);
wait(BlobManifestLoader::execute(loader));
return Void();
}
// API to print summary for restorable granules
ACTOR Future<Void> printRestoreSummary(Database db, Reference<BlobConnectionProvider> blobConn) {
Reference<BlobManifestLoader> loader = makeReference<BlobManifestLoader>(db, blobConn);
wait(BlobManifestLoader::print(loader));
return Void();
}

View File

@ -253,7 +253,7 @@ struct BlobWorkerData : NonCopyable, ReferenceCounted<BlobWorkerData> {
return false;
}
if (g_network->isSimulated()) {
if (g_simulator.speedUpSimulation) {
if (g_simulator->speedUpSimulation) {
return false;
}
return buggifyFull;
@ -1973,6 +1973,10 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
metadata->historyVersion = startState.history.present() ? startState.history.get().version : startVersion;
}
// No need to start Change Feed in full restore mode
if (isFullRestoreMode())
return Void();
checkMergeCandidate = granuleCheckMergeCandidate(bwData,
metadata,
startState.granuleID,
@ -3397,10 +3401,12 @@ ACTOR Future<Void> doBlobGranuleFileRequest(Reference<BlobWorkerData> bwData, Bl
}
state Reference<GranuleMetadata> metadata = m;
state Version granuleBeginVersion = req.beginVersion;
choose {
when(wait(metadata->readable.getFuture())) {}
when(wait(metadata->cancelled.getFuture())) { throw wrong_shard_server(); }
// skip waiting for CF ready for recovery mode
if (!isFullRestoreMode()) {
choose {
when(wait(metadata->readable.getFuture())) {}
when(wait(metadata->cancelled.getFuture())) { throw wrong_shard_server(); }
}
}
// in case both readable and cancelled are ready, check cancelled
@ -3453,6 +3459,10 @@ ACTOR Future<Void> doBlobGranuleFileRequest(Reference<BlobWorkerData> bwData, Bl
CODE_PROBE(true, "Granule Active Read");
// this is an active granule query
loop {
// skip check since CF doesn't start for bare metal recovery mode
if (isFullRestoreMode()) {
break;
}
if (!metadata->activeCFData.get().isValid() || !metadata->cancelled.canBeSet()) {
throw wrong_shard_server();
}
@ -3493,12 +3503,14 @@ ACTOR Future<Void> doBlobGranuleFileRequest(Reference<BlobWorkerData> bwData, Bl
// if feed was popped by another worker and BW only got empty versions, it wouldn't itself see that it
// got popped, but we can still reject the in theory this should never happen with other protections but
// it's a useful and inexpensive sanity check
Version emptyVersion = metadata->activeCFData.get()->popVersion - 1;
if (req.readVersion > metadata->durableDeltaVersion.get() &&
emptyVersion > metadata->bufferedDeltaVersion) {
CODE_PROBE(true, "feed popped for read but granule updater didn't notice yet");
// FIXME: could try to cancel the actor here somehow, but it should find out eventually
throw wrong_shard_server();
if (!isFullRestoreMode()) {
Version emptyVersion = metadata->activeCFData.get()->popVersion - 1;
if (req.readVersion > metadata->durableDeltaVersion.get() &&
emptyVersion > metadata->bufferedDeltaVersion) {
CODE_PROBE(true, "feed popped for read but granule updater didn't notice yet");
// FIXME: could try to cancel the actor here somehow, but it should find out eventually
throw wrong_shard_server();
}
}
rangeGranulePair.push_back(std::pair(metadata->keyRange, metadata->files));
}
@ -3795,7 +3807,6 @@ ACTOR Future<GranuleStartState> openGranule(Reference<BlobWorkerData> bwData, As
std::tuple<int64_t, int64_t, UID> prevOwner = decodeBlobGranuleLockValue(prevLockValue.get());
info.granuleID = std::get<2>(prevOwner);
state bool doLockCheck = true;
// if it's the first snapshot of a new granule, history won't be present
if (info.history.present()) {
@ -3859,9 +3870,28 @@ ACTOR Future<GranuleStartState> openGranule(Reference<BlobWorkerData> bwData, As
// if this granule is not derived from a split or merge, use new granule id
info.granuleID = newGranuleID;
}
createChangeFeed = true;
info.doSnapshot = true;
info.previousDurableVersion = invalidVersion;
// for recovery mode - don't create change feed, don't create snapshot
if (isFullRestoreMode()) {
createChangeFeed = false;
info.doSnapshot = false;
GranuleFiles granuleFiles = wait(loadPreviousFiles(&tr, info.granuleID));
info.existingFiles = granuleFiles;
if (info.existingFiles.get().snapshotFiles.empty()) {
ASSERT(info.existingFiles.get().deltaFiles.empty());
info.previousDurableVersion = invalidVersion;
} else if (info.existingFiles.get().deltaFiles.empty()) {
info.previousDurableVersion = info.existingFiles.get().snapshotFiles.back().version;
} else {
info.previousDurableVersion = info.existingFiles.get().deltaFiles.back().version;
}
info.changeFeedStartVersion = info.previousDurableVersion;
} else {
createChangeFeed = true;
info.doSnapshot = true;
info.previousDurableVersion = invalidVersion;
}
}
if (createChangeFeed) {
@ -3876,7 +3906,7 @@ ACTOR Future<GranuleStartState> openGranule(Reference<BlobWorkerData> bwData, As
// If anything in previousGranules, need to do the handoff logic and set
// ret.previousChangeFeedId, and the previous durable version will come from the previous
// granules
if (info.history.present() && info.history.get().value.parentVersions.size() > 0) {
if (info.history.present() && info.history.get().value.parentVersions.size() > 0 && !isFullRestoreMode()) {
CODE_PROBE(true, "Granule open found parent");
if (info.history.get().value.parentVersions.size() == 1) { // split
state KeyRangeRef parentRange(info.history.get().value.parentBoundaries[0],
@ -4706,7 +4736,7 @@ ACTOR Future<Void> simForceFileWriteContention(Reference<BlobWorkerData> bwData)
}
// check for speed up sim
when(wait(delay(5.0))) {
if (g_simulator.speedUpSimulation) {
if (g_simulator->speedUpSimulation) {
if (BW_DEBUG) {
fmt::print("BW {0} releasing {1} file writes b/c speed up simulation\n",
bwData->id.toString().substr(0, 5),
@ -4723,7 +4753,7 @@ ACTOR Future<Void> simForceFullMemory(Reference<BlobWorkerData> bwData) {
// instead of randomly rejecting each request or not, simulate periods in which BW is full
loop {
wait(delayJittered(deterministicRandom()->randomInt(5, 20)));
if (g_simulator.speedUpSimulation) {
if (g_simulator->speedUpSimulation) {
bwData->buggifyFull = false;
if (BW_DEBUG) {
fmt::print("BW {0}: ForceFullMemory exiting\n", bwData->id.toString().substr(0, 6));

View File

@ -1481,8 +1481,8 @@ ACTOR Future<Void> clusterRecoveryCore(Reference<ClusterRecoveryData> self) {
(self->cstate.myDBState.oldTLogData.size() - CLIENT_KNOBS->RECOVERY_DELAY_START_GENERATION)));
}
if (g_network->isSimulated() && self->cstate.myDBState.oldTLogData.size() > CLIENT_KNOBS->MAX_GENERATIONS_SIM) {
g_simulator.connectionFailuresDisableDuration = 1e6;
g_simulator.speedUpSimulation = true;
g_simulator->connectionFailuresDisableDuration = 1e6;
g_simulator->speedUpSimulation = true;
TraceEvent(SevWarnAlways, "DisableConnectionFailures_TooManyGenerations").log();
}
}

View File

@ -1981,7 +1981,7 @@ ACTOR Future<bool> rebalanceReadLoad(DDQueue* self,
Reference<IDataDistributionTeam> destTeam,
bool primary,
TraceEvent* traceEvent) {
if (g_network->isSimulated() && g_simulator.speedUpSimulation) {
if (g_network->isSimulated() && g_simulator->speedUpSimulation) {
traceEvent->detail("CancelingDueToSimulationSpeedup", true);
return false;
}
@ -2067,7 +2067,7 @@ ACTOR static Future<bool> rebalanceTeams(DDQueue* self,
Reference<IDataDistributionTeam const> destTeam,
bool primary,
TraceEvent* traceEvent) {
if (g_network->isSimulated() && g_simulator.speedUpSimulation) {
if (g_network->isSimulated() && g_simulator->speedUpSimulation) {
traceEvent->detail("CancelingDueToSimulationSpeedup", true);
return false;
}
@ -2668,4 +2668,4 @@ TEST_CASE("/DataDistribution/DDQueue/ServerCounterTrace") {
}
std::cout << "Finished.";
return Void();
}
}

View File

@ -108,7 +108,7 @@ ACTOR void destoryChildProcess(Future<Void> parentSSClosed, ISimulator::ProcessI
wait(parentSSClosed);
TraceEvent(SevDebug, message.c_str()).log();
// This one is root cause for most failures, make sure it's okay to destory
g_pSimulator->destroyProcess(childInfo);
g_simulator->destroyProcess(childInfo);
// Explicitly reset the connection with the child process in case re-spawn very quickly
FlowTransport::transport().resetConnection(childInfo->address);
}
@ -118,7 +118,7 @@ ACTOR Future<int> spawnSimulated(std::vector<std::string> paramList,
bool isSync,
double maxSimDelayTime,
IClosable* parent) {
state ISimulator::ProcessInfo* self = g_pSimulator->getCurrentProcess();
state ISimulator::ProcessInfo* self = g_simulator->getCurrentProcess();
state ISimulator::ProcessInfo* child;
state std::string role;
@ -160,7 +160,7 @@ ACTOR Future<int> spawnSimulated(std::vector<std::string> paramList,
}
}
state int result = 0;
child = g_pSimulator->newProcess(
child = g_simulator->newProcess(
"remote flow process",
self->address.ip,
0,
@ -171,7 +171,7 @@ ACTOR Future<int> spawnSimulated(std::vector<std::string> paramList,
self->dataFolder.c_str(),
self->coordinationFolder.c_str(), // do we need to customize this coordination folder path?
self->protocolVersion);
wait(g_pSimulator->onProcess(child));
wait(g_simulator->onProcess(child));
state Future<ISimulator::KillType> onShutdown = child->onShutdown();
state Future<ISimulator::KillType> parentShutdown = self->onShutdown();
state Future<Void> flowProcessF;
@ -199,7 +199,7 @@ ACTOR Future<int> spawnSimulated(std::vector<std::string> paramList,
choose {
when(wait(flowProcessF)) {
TraceEvent(SevDebug, "ChildProcessKilled").log();
wait(g_pSimulator->onProcess(self));
wait(g_simulator->onProcess(self));
TraceEvent(SevDebug, "BackOnParentProcess").detail("Result", std::to_string(result));
destoryChildProcess(parentSSClosed, child, "StorageServerReceivedClosedMessage");
}

View File

@ -563,7 +563,7 @@ ACTOR Future<Void> queueGetReadVersionRequests(
bool canBeQueued = true;
if (stats->txnRequestIn.getValue() - stats->txnRequestOut.getValue() >
SERVER_KNOBS->START_TRANSACTION_MAX_QUEUE_SIZE ||
(g_network->isSimulated() && !g_simulator.speedUpSimulation &&
(g_network->isSimulated() && !g_simulator->speedUpSimulation &&
deterministicRandom()->random01() < 0.01)) {
// When the limit is hit, try to drop requests from the lower priority queues.
if (req.priority == TransactionPriority::BATCH) {

View File

@ -1810,7 +1810,7 @@ private:
cursor->set(a.kv);
++setsThisCommit;
++writesComplete;
if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting)
if (g_network->isSimulated() && g_simulator->getCurrentProcess()->rebooting)
TraceEvent("SetActionFinished", dbgid).detail("Elapsed", now() - s);
}
@ -1824,7 +1824,7 @@ private:
cursor->fastClear(a.range, freeTableEmpty);
cursor->clear(a.range); // TODO: at most one
++writesComplete;
if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting)
if (g_network->isSimulated() && g_simulator->getCurrentProcess()->rebooting)
TraceEvent("ClearActionFinished", dbgid).detail("Elapsed", now() - s);
}
@ -1864,7 +1864,7 @@ private:
diskBytesUsed = waitForAndGet(conn.dbFile->size()) + waitForAndGet(conn.walFile->size());
if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting)
if (g_network->isSimulated() && g_simulator->getCurrentProcess()->rebooting)
TraceEvent("CommitActionFinished", dbgid).detail("Elapsed", now() - t1);
}
@ -1987,7 +1987,7 @@ private:
a.result.send(workPerformed);
++writesComplete;
if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting)
if (g_network->isSimulated() && g_simulator->getCurrentProcess()->rebooting)
TraceEvent("SpringCleaningActionFinished", dbgid).detail("Elapsed", now() - s);
}
};

View File

@ -1491,7 +1491,7 @@ ACTOR Future<Void> doQueueCommit(TLogData* self,
wait(ioDegradedOrTimeoutError(
c, SERVER_KNOBS->MAX_STORAGE_COMMIT_TIME, self->degraded, SERVER_KNOBS->TLOG_DEGRADED_DURATION));
if (g_network->isSimulated() && !g_simulator.speedUpSimulation && BUGGIFY_WITH_PROB(0.0001)) {
if (g_network->isSimulated() && !g_simulator->speedUpSimulation && BUGGIFY_WITH_PROB(0.0001)) {
wait(delay(6.0));
}
wait(self->queueCommitEnd.whenAtLeast(commitNumber - 1));

View File

@ -1905,7 +1905,7 @@ ACTOR Future<Void> tLogPeekStream(TLogData* self, TLogPeekStreamRequest req, Ref
}
ACTOR Future<Void> watchDegraded(TLogData* self) {
if (g_network->isSimulated() && g_simulator.speedUpSimulation) {
if (g_network->isSimulated() && g_simulator->speedUpSimulation) {
return Void();
}
@ -1932,7 +1932,7 @@ ACTOR Future<Void> doQueueCommit(TLogData* self,
state Future<Void> degraded = watchDegraded(self);
wait(c);
if (g_network->isSimulated() && !g_simulator.speedUpSimulation && BUGGIFY_WITH_PROB(0.0001)) {
if (g_network->isSimulated() && !g_simulator->speedUpSimulation && BUGGIFY_WITH_PROB(0.0001)) {
wait(delay(6.0));
}
degraded.cancel();

View File

@ -657,9 +657,9 @@ ACTOR Future<int64_t> getVersionOffset(Database cx,
ACTOR Future<Void> repairDeadDatacenter(Database cx,
Reference<AsyncVar<ServerDBInfo> const> dbInfo,
std::string context) {
if (g_network->isSimulated() && g_simulator.usableRegions > 1) {
bool primaryDead = g_simulator.datacenterDead(g_simulator.primaryDcId);
bool remoteDead = g_simulator.datacenterDead(g_simulator.remoteDcId);
if (g_network->isSimulated() && g_simulator->usableRegions > 1) {
bool primaryDead = g_simulator->datacenterDead(g_simulator->primaryDcId);
bool remoteDead = g_simulator->datacenterDead(g_simulator->remoteDcId);
// FIXME: the primary and remote can both be considered dead because excludes are not handled properly by the
// datacenterDead function
@ -673,10 +673,10 @@ ACTOR Future<Void> repairDeadDatacenter(Database cx,
.detail("Stage", "Repopulate")
.detail("RemoteDead", remoteDead)
.detail("PrimaryDead", primaryDead);
g_simulator.usableRegions = 1;
g_simulator->usableRegions = 1;
wait(success(ManagementAPI::changeConfig(
cx.getReference(),
(primaryDead ? g_simulator.disablePrimary : g_simulator.disableRemote) + " repopulate_anti_quorum=1",
(primaryDead ? g_simulator->disablePrimary : g_simulator->disableRemote) + " repopulate_anti_quorum=1",
true)));
while (dbInfo->get().recoveryState < RecoveryState::STORAGE_RECOVERED) {
wait(dbInfo->onChange());

View File

@ -1208,7 +1208,7 @@ void Ratekeeper::updateRate(RatekeeperLimits* limits) {
limits->tpsLimit = std::max(limits->tpsLimit, 0.0);
if (g_network->isSimulated() && g_simulator.speedUpSimulation) {
if (g_network->isSimulated() && g_simulator->speedUpSimulation) {
limits->tpsLimit = std::max(limits->tpsLimit, 100.0);
}

View File

@ -366,13 +366,13 @@ ACTOR Future<Void> _restoreWorker(Database cx, LocalityData locality) {
// Protect restore worker from being killed in simulation;
// Future: Remove the protection once restore can tolerate failure
if (g_network->isSimulated()) {
auto addresses = g_simulator.getProcessByAddress(myWorkerInterf.address())->addresses;
auto addresses = g_simulator->getProcessByAddress(myWorkerInterf.address())->addresses;
g_simulator.protectedAddresses.insert(addresses.address);
g_simulator->protectedAddresses.insert(addresses.address);
if (addresses.secondaryAddress.present()) {
g_simulator.protectedAddresses.insert(addresses.secondaryAddress.get());
g_simulator->protectedAddresses.insert(addresses.secondaryAddress.get());
}
ISimulator::ProcessInfo* p = g_simulator.getProcessByAddress(myWorkerInterf.address());
ISimulator::ProcessInfo* p = g_simulator->getProcessByAddress(myWorkerInterf.address());
TraceEvent("ProtectRestoreWorker")
.detail("Address", addresses.toString())
.detail("IsReliable", p->isReliable())

View File

@ -468,18 +468,18 @@ T simulate(const T& in) {
ACTOR Future<Void> runBackup(Reference<IClusterConnectionRecord> connRecord) {
state std::vector<Future<Void>> agentFutures;
while (g_simulator.backupAgents == ISimulator::BackupAgentType::WaitForType) {
while (g_simulator->backupAgents == ISimulator::BackupAgentType::WaitForType) {
wait(delay(1.0));
}
if (g_simulator.backupAgents == ISimulator::BackupAgentType::BackupToFile) {
if (g_simulator->backupAgents == ISimulator::BackupAgentType::BackupToFile) {
Database cx = Database::createDatabase(connRecord, ApiVersion::LATEST_VERSION);
state FileBackupAgent fileAgent;
agentFutures.push_back(fileAgent.run(
cx, 1.0 / CLIENT_KNOBS->BACKUP_AGGREGATE_POLL_RATE, CLIENT_KNOBS->SIM_BACKUP_TASKS_PER_AGENT));
while (g_simulator.backupAgents == ISimulator::BackupAgentType::BackupToFile) {
while (g_simulator->backupAgents == ISimulator::BackupAgentType::BackupToFile) {
wait(delay(1.0));
}
@ -495,16 +495,16 @@ ACTOR Future<Void> runBackup(Reference<IClusterConnectionRecord> connRecord) {
ACTOR Future<Void> runDr(Reference<IClusterConnectionRecord> connRecord) {
state std::vector<Future<Void>> agentFutures;
while (g_simulator.drAgents == ISimulator::BackupAgentType::WaitForType) {
while (g_simulator->drAgents == ISimulator::BackupAgentType::WaitForType) {
wait(delay(1.0));
}
if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) {
ASSERT(g_simulator.extraDatabases.size() == 1);
if (g_simulator->drAgents == ISimulator::BackupAgentType::BackupToDB) {
ASSERT(g_simulator->extraDatabases.size() == 1);
Database cx = Database::createDatabase(connRecord, ApiVersion::LATEST_VERSION);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
state Database drDatabase = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
TraceEvent("StartingDrAgents")
@ -519,7 +519,7 @@ ACTOR Future<Void> runDr(Reference<IClusterConnectionRecord> connRecord) {
agentFutures.push_back(extraAgent.run(cx, drPollDelay, CLIENT_KNOBS->SIM_BACKUP_TASKS_PER_AGENT));
agentFutures.push_back(dbAgent.run(drDatabase, drPollDelay, CLIENT_KNOBS->SIM_BACKUP_TASKS_PER_AGENT));
while (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) {
while (g_simulator->drAgents == ISimulator::BackupAgentType::BackupToDB) {
wait(delay(1.0));
}
@ -555,7 +555,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<IClusterConne
std::string whitelistBinPaths,
ProtocolVersion protocolVersion,
ConfigDBType configDBType) {
state ISimulator::ProcessInfo* simProcess = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* simProcess = g_simulator->getCurrentProcess();
state UID randomId = nondeterministicRandom()->randomUniqueID();
state int cycles = 0;
state IPAllowList allowList;
@ -578,17 +578,17 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<IClusterConne
wait(delay(waitTime));
state ISimulator::ProcessInfo* process = g_simulator.newProcess("Server",
ip,
port,
sslEnabled,
listenPerProcess,
localities,
processClass,
dataFolder->c_str(),
coordFolder->c_str(),
protocolVersion);
wait(g_simulator.onProcess(
state ISimulator::ProcessInfo* process = g_simulator->newProcess("Server",
ip,
port,
sslEnabled,
listenPerProcess,
localities,
processClass,
dataFolder->c_str(),
coordFolder->c_str(),
protocolVersion);
wait(g_simulator->onProcess(
process,
TaskPriority::DefaultYield)); // Now switch execution to the process on which we will run
state Future<ISimulator::KillType> onShutdown = process->onShutdown();
@ -623,7 +623,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<IClusterConne
1,
WLTOKEN_RESERVED_COUNT,
&allowList);
for (const auto& p : g_simulator.authKeys) {
for (const auto& p : g_simulator->authKeys) {
FlowTransport::transport().addPublicKey(p.first, p.second.toPublic());
}
Sim2FileSystem::newFileSystem();
@ -674,7 +674,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<IClusterConne
if (e.code() != error_code_actor_cancelled)
printf("SimulatedFDBDTerminated: %s\n", e.what());
ASSERT(destructed ||
g_simulator.getCurrentProcess() == process); // simulatedFDBD catch called on different process
g_simulator->getCurrentProcess() == process); // simulatedFDBD catch called on different process
TraceEvent(e.code() == error_code_actor_cancelled || e.code() == error_code_file_not_found ||
e.code() == error_code_incompatible_software_version || destructed
? SevInfo
@ -702,7 +702,7 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<IClusterConne
onShutdown = e;
}
ASSERT(destructed || g_simulator.getCurrentProcess() == process);
ASSERT(destructed || g_simulator->getCurrentProcess() == process);
if (!process->shutdownSignal.isSet() && !destructed) {
process->rebooting = true;
@ -715,11 +715,11 @@ ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<IClusterConne
.detail("Excluded", process->excluded)
.detail("Rebooting", process->rebooting)
.detail("ZoneId", localities.zoneId());
wait(g_simulator.onProcess(simProcess));
wait(g_simulator->onProcess(simProcess));
wait(delay(0.00001 + FLOW_KNOBS->MAX_BUGGIFIED_DELAY)); // One last chance for the process to clean up?
g_simulator.destroyProcess(
g_simulator->destroyProcess(
process); // Leak memory here; the process may be used in other parts of the simulation
auto shutdownResult = onShutdown.get();
@ -842,7 +842,7 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr,
const int listenPort = i * listenPerProcess + 1;
AgentMode agentMode =
runBackupAgents == AgentOnly ? (i == ips.size() - 1 ? AgentOnly : AgentNone) : runBackupAgents;
if (g_simulator.hasDiffProtocolProcess && !g_simulator.setDiffProtocol && agentMode == AgentNone) {
if (g_simulator->hasDiffProtocolProcess && !g_simulator->setDiffProtocol && agentMode == AgentNone) {
processes.push_back(simulatedFDBDRebooter(clusterFile,
ips[i],
sslEnabled,
@ -859,7 +859,7 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr,
whitelistBinPaths,
protocolVersion,
configDBType));
g_simulator.setDiffProtocol = true;
g_simulator->setDiffProtocol = true;
} else {
processes.push_back(simulatedFDBDRebooter(clusterFile,
ips[i],
@ -915,7 +915,7 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr,
{
// Kill all open files, which may cause them to write invalid data.
auto& machineCache = g_simulator.getMachineById(localities.machineId())->openFiles;
auto& machineCache = g_simulator->getMachineById(localities.machineId())->openFiles;
// Copy the file pointers to a vector because the map may be modified while we are killing files
std::vector<AsyncFileNonDurable*> files;
@ -933,14 +933,14 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr,
state std::set<std::string> filenames;
state std::string closingStr;
auto& machineCache = g_simulator.getMachineById(localities.machineId())->openFiles;
auto& machineCache = g_simulator->getMachineById(localities.machineId())->openFiles;
for (auto it : machineCache) {
filenames.insert(it.first);
closingStr += it.first + ", ";
ASSERT(it.second.get().canGet());
}
for (auto it : g_simulator.getMachineById(localities.machineId())->deletingOrClosingFiles) {
for (auto it : g_simulator->getMachineById(localities.machineId())->deletingOrClosingFiles) {
filenames.insert(it);
closingStr += it + ", ";
}
@ -953,9 +953,9 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr,
.detail("ZoneId", localities.zoneId())
.detail("DataHall", localities.dataHallId());
ISimulator::MachineInfo* machine = g_simulator.getMachineById(localities.machineId());
ISimulator::MachineInfo* machine = g_simulator->getMachineById(localities.machineId());
machine->closingFiles = filenames;
g_simulator.getMachineById(localities.machineId())->openFiles.clear();
g_simulator->getMachineById(localities.machineId())->openFiles.clear();
// During a reboot:
// The process is expected to close all files and be inactive in zero time, but not necessarily
@ -965,7 +965,7 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr,
state int shutdownDelayCount = 0;
state double backoff = 0;
loop {
auto& machineCache = g_simulator.getMachineById(localities.machineId())->closingFiles;
auto& machineCache = g_simulator->getMachineById(localities.machineId())->closingFiles;
if (!machineCache.empty()) {
std::string openFiles;
@ -997,7 +997,7 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr,
.detail("ZoneId", localities.zoneId())
.detail("DataHall", localities.dataHallId());
g_simulator.destroyMachine(localities.machineId());
g_simulator->destroyMachine(localities.machineId());
// SOMEDAY: when processes can be rebooted, this check will be needed
// ASSERT( this machine is rebooting );
@ -1010,7 +1010,7 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr,
CODE_PROBE(true, "Simulated machine has been rebooted");
state bool swap = killType == ISimulator::Reboot && BUGGIFY_WITH_PROB(0.75) &&
g_simulator.canSwapToMachine(localities.zoneId());
g_simulator->canSwapToMachine(localities.zoneId());
if (swap)
availableFolders[localities.dcId()].push_back(myFolders);
@ -1066,7 +1066,7 @@ ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr,
// this machine is rebooting = false;
}
} catch (Error& e) {
g_simulator.getMachineById(localities.machineId())->openFiles.clear();
g_simulator->getMachineById(localities.machineId())->openFiles.clear();
throw;
}
}
@ -1124,12 +1124,12 @@ ACTOR Future<Void> restartSimulatedSystem(std::vector<Future<Void>>* systemActor
int testerCount = atoi(ini.GetValue("META", "testerCount"));
auto tssModeStr = ini.GetValue("META", "tssMode");
if (tssModeStr != nullptr) {
g_simulator.tssMode = (ISimulator::TSSMode)atoi(tssModeStr);
g_simulator->tssMode = (ISimulator::TSSMode)atoi(tssModeStr);
}
ClusterConnectionString conn(ini.GetValue("META", "connectionString"));
if (testConfig.extraDatabaseMode == ISimulator::ExtraDatabaseMode::Local) {
g_simulator.extraDatabases.clear();
g_simulator.extraDatabases.push_back(conn.toString());
g_simulator->extraDatabases.clear();
g_simulator->extraDatabases.push_back(conn.toString());
}
if (!testConfig.disableHostname) {
auto mockDNSStr = ini.GetValue("META", "mockDNS");
@ -1248,8 +1248,8 @@ ACTOR Future<Void> restartSimulatedSystem(std::vector<Future<Void>>* systemActor
processClass == ProcessClass::TesterClass ? "SimulatedTesterMachine" : "SimulatedMachine"));
}
g_simulator.desiredCoordinators = desiredCoordinators;
g_simulator.processesPerMachine = processesPerMachine;
g_simulator->desiredCoordinators = desiredCoordinators;
g_simulator->processesPerMachine = processesPerMachine;
uniquify(dcIds);
if (!BUGGIFY && dcIds.size() == 2 && dcIds[0] != "" && dcIds[1] != "") {
@ -1279,11 +1279,11 @@ ACTOR Future<Void> restartSimulatedSystem(std::vector<Future<Void>>* systemActor
json_spirit::write_string(json_spirit::mValue(regionArr), json_spirit::Output_options::none);
}
g_simulator.restarted = true;
g_simulator->restarted = true;
TraceEvent("RestartSimulatorSettings")
.detail("DesiredCoordinators", g_simulator.desiredCoordinators)
.detail("ProcessesPerMachine", g_simulator.processesPerMachine)
.detail("DesiredCoordinators", g_simulator->desiredCoordinators)
.detail("ProcessesPerMachine", g_simulator->processesPerMachine)
.detail("ListenersPerProcess", listenersPerProcess);
} catch (Error& e) {
TraceEvent(SevError, "RestartSimulationError").error(e);
@ -1736,18 +1736,18 @@ void SimulationConfig::setRegions(const TestConfig& testConfig) {
}
if (needsRemote) {
g_simulator.originalRegions =
g_simulator->originalRegions =
"regions=" + json_spirit::write_string(json_spirit::mValue(regionArr), json_spirit::Output_options::none);
StatusArray disablePrimary = regionArr;
disablePrimary[0].get_obj()["datacenters"].get_array()[0].get_obj()["priority"] = -1;
g_simulator.disablePrimary = "regions=" + json_spirit::write_string(json_spirit::mValue(disablePrimary),
json_spirit::Output_options::none);
g_simulator->disablePrimary = "regions=" + json_spirit::write_string(json_spirit::mValue(disablePrimary),
json_spirit::Output_options::none);
StatusArray disableRemote = regionArr;
disableRemote[1].get_obj()["datacenters"].get_array()[0].get_obj()["priority"] = -1;
g_simulator.disableRemote = "regions=" + json_spirit::write_string(json_spirit::mValue(disableRemote),
json_spirit::Output_options::none);
g_simulator->disableRemote = "regions=" + json_spirit::write_string(json_spirit::mValue(disableRemote),
json_spirit::Output_options::none);
} else {
// In order to generate a starting configuration with the remote disabled, do not apply the region
// configuration to the DatabaseConfiguration until after creating the starting conf string.
@ -1841,21 +1841,21 @@ void SimulationConfig::setTss(const TestConfig& testConfig) {
double tssRandom = deterministicRandom()->random01();
if (tssRandom > 0.5 || !faultInjectionActivated) {
// normal tss mode
g_simulator.tssMode = ISimulator::TSSMode::EnabledNormal;
g_simulator->tssMode = ISimulator::TSSMode::EnabledNormal;
} else if (tssRandom < 0.25 && !testConfig.isFirstTestInRestart) {
// fault injection - don't enable in first test in restart because second test won't know it intentionally
// lost data
g_simulator.tssMode = ISimulator::TSSMode::EnabledDropMutations;
g_simulator->tssMode = ISimulator::TSSMode::EnabledDropMutations;
} else {
// delay injection
g_simulator.tssMode = ISimulator::TSSMode::EnabledAddDelay;
g_simulator->tssMode = ISimulator::TSSMode::EnabledAddDelay;
}
printf("enabling tss for simulation in mode %d: %s\n", g_simulator.tssMode, confStr.c_str());
printf("enabling tss for simulation in mode %d: %s\n", g_simulator->tssMode, confStr.c_str());
}
}
void setConfigDB(TestConfig const& testConfig) {
g_simulator.configDBType = testConfig.getConfigDBType();
g_simulator->configDBType = testConfig.getConfigDBType();
}
// Generates and sets an appropriate configuration for the database according to
@ -1970,57 +1970,57 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
startingConfigString += format(" tss_storage_engine:=%d", simconfig.db.testingStorageServerStoreType);
}
if (g_simulator.originalRegions != "") {
simconfig.set_config(g_simulator.originalRegions);
g_simulator.startingDisabledConfiguration = startingConfigString + " " + g_simulator.disableRemote;
startingConfigString += " " + g_simulator.originalRegions;
if (g_simulator->originalRegions != "") {
simconfig.set_config(g_simulator->originalRegions);
g_simulator->startingDisabledConfiguration = startingConfigString + " " + g_simulator->disableRemote;
startingConfigString += " " + g_simulator->originalRegions;
}
g_simulator.storagePolicy = simconfig.db.storagePolicy;
g_simulator.tLogPolicy = simconfig.db.tLogPolicy;
g_simulator.tLogWriteAntiQuorum = simconfig.db.tLogWriteAntiQuorum;
g_simulator.remoteTLogPolicy = simconfig.db.getRemoteTLogPolicy();
g_simulator.usableRegions = simconfig.db.usableRegions;
g_simulator->storagePolicy = simconfig.db.storagePolicy;
g_simulator->tLogPolicy = simconfig.db.tLogPolicy;
g_simulator->tLogWriteAntiQuorum = simconfig.db.tLogWriteAntiQuorum;
g_simulator->remoteTLogPolicy = simconfig.db.getRemoteTLogPolicy();
g_simulator->usableRegions = simconfig.db.usableRegions;
if (simconfig.db.regions.size() > 0) {
g_simulator.primaryDcId = simconfig.db.regions[0].dcId;
g_simulator.hasSatelliteReplication = simconfig.db.regions[0].satelliteTLogReplicationFactor > 0;
g_simulator->primaryDcId = simconfig.db.regions[0].dcId;
g_simulator->hasSatelliteReplication = simconfig.db.regions[0].satelliteTLogReplicationFactor > 0;
if (simconfig.db.regions[0].satelliteTLogUsableDcsFallback > 0) {
g_simulator.satelliteTLogPolicyFallback = simconfig.db.regions[0].satelliteTLogPolicyFallback;
g_simulator.satelliteTLogWriteAntiQuorumFallback =
g_simulator->satelliteTLogPolicyFallback = simconfig.db.regions[0].satelliteTLogPolicyFallback;
g_simulator->satelliteTLogWriteAntiQuorumFallback =
simconfig.db.regions[0].satelliteTLogWriteAntiQuorumFallback;
} else {
g_simulator.satelliteTLogPolicyFallback = simconfig.db.regions[0].satelliteTLogPolicy;
g_simulator.satelliteTLogWriteAntiQuorumFallback = simconfig.db.regions[0].satelliteTLogWriteAntiQuorum;
g_simulator->satelliteTLogPolicyFallback = simconfig.db.regions[0].satelliteTLogPolicy;
g_simulator->satelliteTLogWriteAntiQuorumFallback = simconfig.db.regions[0].satelliteTLogWriteAntiQuorum;
}
g_simulator.satelliteTLogPolicy = simconfig.db.regions[0].satelliteTLogPolicy;
g_simulator.satelliteTLogWriteAntiQuorum = simconfig.db.regions[0].satelliteTLogWriteAntiQuorum;
g_simulator->satelliteTLogPolicy = simconfig.db.regions[0].satelliteTLogPolicy;
g_simulator->satelliteTLogWriteAntiQuorum = simconfig.db.regions[0].satelliteTLogWriteAntiQuorum;
for (auto s : simconfig.db.regions[0].satellites) {
g_simulator.primarySatelliteDcIds.push_back(s.dcId);
g_simulator->primarySatelliteDcIds.push_back(s.dcId);
}
} else {
g_simulator.hasSatelliteReplication = false;
g_simulator.satelliteTLogWriteAntiQuorum = 0;
g_simulator->hasSatelliteReplication = false;
g_simulator->satelliteTLogWriteAntiQuorum = 0;
}
if (simconfig.db.regions.size() == 2) {
g_simulator.remoteDcId = simconfig.db.regions[1].dcId;
g_simulator->remoteDcId = simconfig.db.regions[1].dcId;
ASSERT((!simconfig.db.regions[0].satelliteTLogPolicy && !simconfig.db.regions[1].satelliteTLogPolicy) ||
simconfig.db.regions[0].satelliteTLogPolicy->info() ==
simconfig.db.regions[1].satelliteTLogPolicy->info());
for (auto s : simconfig.db.regions[1].satellites) {
g_simulator.remoteSatelliteDcIds.push_back(s.dcId);
g_simulator->remoteSatelliteDcIds.push_back(s.dcId);
}
}
if (g_simulator.usableRegions < 2 || !g_simulator.hasSatelliteReplication) {
g_simulator.allowLogSetKills = false;
if (g_simulator->usableRegions < 2 || !g_simulator->hasSatelliteReplication) {
g_simulator->allowLogSetKills = false;
}
ASSERT(g_simulator.storagePolicy && g_simulator.tLogPolicy);
ASSERT(!g_simulator.hasSatelliteReplication || g_simulator.satelliteTLogPolicy);
ASSERT(g_simulator->storagePolicy && g_simulator->tLogPolicy);
ASSERT(!g_simulator->hasSatelliteReplication || g_simulator->satelliteTLogPolicy);
TraceEvent("SimulatorConfig").setMaxFieldLength(10000).detail("ConfigString", StringRef(startingConfigString));
const int dataCenters = simconfig.datacenters;
@ -2035,7 +2035,7 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
bool sslEnabled = deterministicRandom()->random01() < 0.10;
bool sslOnly = sslEnabled && deterministicRandom()->coinflip();
bool isTLS = sslEnabled && sslOnly;
g_simulator.listenersPerProcess = sslEnabled && !sslOnly ? 2 : 1;
g_simulator->listenersPerProcess = sslEnabled && !sslOnly ? 2 : 1;
CODE_PROBE(sslEnabled, "SSL enabled");
CODE_PROBE(!sslEnabled, "SSL disabled");
@ -2177,10 +2177,10 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
TraceEvent("ProtectCoordinator")
.detail("Address", coordinatorAddresses[i])
.detail("Coordinators", describe(coordinatorAddresses));
g_simulator.protectedAddresses.insert(NetworkAddress(
g_simulator->protectedAddresses.insert(NetworkAddress(
coordinatorAddresses[i].ip, coordinatorAddresses[i].port, true, coordinatorAddresses[i].isTLS()));
if (coordinatorAddresses[i].port == 2) {
g_simulator.protectedAddresses.insert(NetworkAddress(coordinatorAddresses[i].ip, 1, true, true));
g_simulator->protectedAddresses.insert(NetworkAddress(coordinatorAddresses[i].ip, 1, true, true));
}
}
deterministicRandom()->randomShuffle(coordinatorAddresses);
@ -2192,12 +2192,12 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
}
if (useLocalDatabase) {
g_simulator.extraDatabases.push_back(
g_simulator->extraDatabases.push_back(
useHostname ? ClusterConnectionString(coordinatorHostnames, "TestCluster:0"_sr).toString()
: ClusterConnectionString(coordinatorAddresses, "TestCluster:0"_sr).toString());
} else if (testConfig.extraDatabaseMode != ISimulator::ExtraDatabaseMode::Disabled) {
for (int i = 0; i < extraDatabaseCount; ++i) {
g_simulator.extraDatabases.push_back(
g_simulator->extraDatabases.push_back(
useHostname
? ClusterConnectionString(extraCoordinatorHostnames[i], StringRef(format("ExtraCluster%04d:0", i)))
.toString()
@ -2212,7 +2212,7 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
.detail("String", conn.toString())
.detail("ConfigString", startingConfigString);
bool requiresExtraDBMachines = !g_simulator.extraDatabases.empty() && !useLocalDatabase;
bool requiresExtraDBMachines = !g_simulator->extraDatabases.empty() && !useLocalDatabase;
int assignedMachines = 0, nonVersatileMachines = 0;
bool gradualMigrationPossible = true;
std::vector<ProcessClass::ClassType> processClassesSubSet = { ProcessClass::UnsetClass,
@ -2321,7 +2321,7 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
if (requiresExtraDBMachines) {
int cluster = 4;
for (auto extraDatabase : g_simulator.extraDatabases) {
for (auto extraDatabase : g_simulator->extraDatabases) {
std::vector<IPAddress> extraIps;
extraIps.reserve(processesPerMachine);
for (int i = 0; i < processesPerMachine; i++) {
@ -2359,14 +2359,14 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
}
}
g_simulator.desiredCoordinators = coordinatorCount;
g_simulator.physicalDatacenters = dataCenters;
g_simulator.processesPerMachine = processesPerMachine;
g_simulator->desiredCoordinators = coordinatorCount;
g_simulator->physicalDatacenters = dataCenters;
g_simulator->processesPerMachine = processesPerMachine;
TraceEvent("SetupSimulatorSettings")
.detail("DesiredCoordinators", g_simulator.desiredCoordinators)
.detail("PhysicalDatacenters", g_simulator.physicalDatacenters)
.detail("ProcessesPerMachine", g_simulator.processesPerMachine);
.detail("DesiredCoordinators", g_simulator->desiredCoordinators)
.detail("PhysicalDatacenters", g_simulator->physicalDatacenters)
.detail("ProcessesPerMachine", g_simulator->processesPerMachine);
// SOMEDAY: add locality for testers to simulate network topology
// FIXME: Start workers with tester class instead, at least sometimes run tests with the testers-only flag
@ -2395,16 +2395,16 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
"SimulatedTesterMachine"));
}
if (g_simulator.setDiffProtocol) {
if (g_simulator->setDiffProtocol) {
--(*pTesterCount);
}
*pStartingConfiguration = startingConfigString;
// save some state that we only need when restarting the simulator.
g_simulator.connectionString = conn.toString();
g_simulator.testerCount = testerCount;
g_simulator.allowStorageMigrationTypeChange = gradualMigrationPossible;
g_simulator->connectionString = conn.toString();
g_simulator->testerCount = testerCount;
g_simulator->allowStorageMigrationTypeChange = gradualMigrationPossible;
TraceEvent("SimulatedClusterStarted")
.detail("DataCenters", dataCenters)
@ -2442,14 +2442,14 @@ ACTOR void setupAndRun(std::string dataFolder,
state TestConfig testConfig;
state IPAllowList allowList;
testConfig.readFromConfig(testFile);
g_simulator.hasDiffProtocolProcess = testConfig.startIncompatibleProcess;
g_simulator.setDiffProtocol = false;
g_simulator->hasDiffProtocolProcess = testConfig.startIncompatibleProcess;
g_simulator->setDiffProtocol = false;
if (testConfig.injectTargetedSSRestart && deterministicRandom()->random01() < 0.25) {
g_simulator.injectTargetedSSRestartTime = 60.0 + 340.0 * deterministicRandom()->random01();
g_simulator->injectTargetedSSRestartTime = 60.0 + 340.0 * deterministicRandom()->random01();
}
if (testConfig.injectSSDelay && deterministicRandom()->random01() < 0.25) {
g_simulator.injectSSDelayTime = 60.0 + 240.0 * deterministicRandom()->random01();
g_simulator->injectSSDelayTime = 60.0 + 240.0 * deterministicRandom()->random01();
}
// Build simulator allow list
@ -2509,21 +2509,21 @@ ACTOR void setupAndRun(std::string dataFolder,
// TODO (IPv6) Use IPv6?
auto testSystem =
g_simulator.newProcess("TestSystem",
IPAddress(0x01010101),
1,
false,
1,
LocalityData(Optional<Standalone<StringRef>>(),
Standalone<StringRef>(deterministicRandom()->randomUniqueID().toString()),
Standalone<StringRef>(deterministicRandom()->randomUniqueID().toString()),
Optional<Standalone<StringRef>>()),
ProcessClass(ProcessClass::TesterClass, ProcessClass::CommandLineSource),
"",
"",
currentProtocolVersion());
g_simulator->newProcess("TestSystem",
IPAddress(0x01010101),
1,
false,
1,
LocalityData(Optional<Standalone<StringRef>>(),
Standalone<StringRef>(deterministicRandom()->randomUniqueID().toString()),
Standalone<StringRef>(deterministicRandom()->randomUniqueID().toString()),
Optional<Standalone<StringRef>>()),
ProcessClass(ProcessClass::TesterClass, ProcessClass::CommandLineSource),
"",
"",
currentProtocolVersion());
testSystem->excludeFromRestarts = true;
wait(g_simulator.onProcess(testSystem, TaskPriority::DefaultYield));
wait(g_simulator->onProcess(testSystem, TaskPriority::DefaultYield));
Sim2FileSystem::newFileSystem();
FlowTransport::createInstance(true, 1, WLTOKEN_RESERVED_COUNT, &allowList);
CODE_PROBE(true, "Simulation start");
@ -2606,7 +2606,7 @@ ACTOR void setupAndRun(std::string dataFolder,
TraceEvent("TracingMissingCodeProbes").log();
probe::traceMissedProbes(probe::ExecutionContext::Simulation);
TraceEvent("SimulatedSystemDestruct").log();
g_simulator.stop();
g_simulator->stop();
destructed = true;
wait(Never());
ASSERT(false);

View File

@ -1802,7 +1802,7 @@ ACTOR Future<Void> compactCache(StorageCacheData* data) {
loop {
// TODO understand this, should we add delay here?
// if (g_network->isSimulated()) {
// double endTime = g_simulator.checkDisabled(format("%s/compactCache",
// double endTime = g_simulator->checkDisabled(format("%s/compactCache",
// data->thisServerID.toString().c_str())); if(endTime > now()) { wait(delay(endTime - now(),
// TaskPriority::CompactCache));
// }

View File

@ -2161,7 +2161,7 @@ ACTOR Future<Void> doQueueCommit(TLogData* self,
wait(ioDegradedOrTimeoutError(
c, SERVER_KNOBS->MAX_STORAGE_COMMIT_TIME, self->degraded, SERVER_KNOBS->TLOG_DEGRADED_DURATION));
if (g_network->isSimulated() && !g_simulator.speedUpSimulation && BUGGIFY_WITH_PROB(0.0001)) {
if (g_network->isSimulated() && !g_simulator->speedUpSimulation && BUGGIFY_WITH_PROB(0.0001)) {
wait(delay(6.0));
}
wait(self->queueCommitEnd.whenAtLeast(commitNumber - 1));

View File

@ -3077,7 +3077,7 @@ ACTOR Future<Reference<ILogSystem>> TagPartitionedLogSystem::newEpoch(
// Don't force failure of recovery if it took us a long time to recover. This avoids multiple long running
// recoveries causing tests to timeout
if (BUGGIFY && now() - startTime < 300 && g_network->isSimulated() && g_simulator.speedUpSimulation)
if (BUGGIFY && now() - startTime < 300 && g_network->isSimulated() && g_simulator->speedUpSimulation)
throw cluster_recovery_failed();
for (int i = 0; i < logSystem->tLogs[0]->logServers.size(); i++)

View File

@ -731,7 +731,7 @@ static int asyncSleep(sqlite3_vfs* pVfs, int microseconds) {
try {
Future<Void> simCancel = Never();
if (g_network->isSimulated())
simCancel = success(g_simulator.getCurrentProcess()->shutdownSignal.getFuture());
simCancel = success(g_simulator->getCurrentProcess()->shutdownSignal.getFuture());
if (simCancel.isReady()) {
waitFor(delay(FLOW_KNOBS->MAX_BUGGIFIED_DELAY));
return 0;

View File

@ -2984,7 +2984,7 @@ public:
page->rawData());
} catch (Error& e) {
Error err = e;
if (g_network->isSimulated() && g_simulator.checkInjectedCorruption()) {
if (g_network->isSimulated() && g_simulator->checkInjectedCorruption()) {
err = err.asInjectedFault();
}
@ -4781,7 +4781,7 @@ struct DecodeBoundaryVerifier {
static DecodeBoundaryVerifier* getVerifier(std::string name) {
static std::map<std::string, DecodeBoundaryVerifier> verifiers;
// Only use verifier in a non-restarted simulation so that all page writes are captured
if (g_network->isSimulated() && !g_simulator.restarted) {
if (g_network->isSimulated() && !g_simulator->restarted) {
return &verifiers[name];
}
return nullptr;

View File

@ -103,7 +103,7 @@ protected:
blocked = Promise<Void>();
double before = now();
CoroThreadPool::waitFor(blocked.getFuture());
if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting)
if (g_network->isSimulated() && g_simulator->getCurrentProcess()->rebooting)
TraceEvent("CoroUnblocked").detail("After", now() - before);
}

View File

@ -73,7 +73,7 @@ protected:
blocked = Promise<Void>();
double before = now();
CoroThreadPool::waitFor(blocked.getFuture());
if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting)
if (g_network->isSimulated() && g_simulator->getCurrentProcess()->rebooting)
TraceEvent("CoroUnblocked").detail("After", now() - before);
}
@ -265,7 +265,7 @@ ACTOR void coroSwitcher(Future<Void> what, TaskPriority taskID, Coro* coro) {
try {
// state double t = now();
wait(what);
// if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting && now()!=t)
// if (g_network->isSimulated() && g_simulator->getCurrentProcess()->rebooting && now()!=t)
// TraceEvent("NonzeroWaitDuringReboot").detail("TaskID", taskID).detail("Elapsed", now()-t).backtrace("Flow");
} catch (Error&) {
}
@ -280,7 +280,7 @@ void CoroThreadPool::waitFor(Future<Void> what) {
// double t = now();
coroSwitcher(what, g_network->getCurrentTask(), current_coro);
Coro_switchTo_(swapCoro(main_coro), main_coro);
// if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting && now()!=t)
// if (g_network->isSimulated() && g_simulator->getCurrentProcess()->rebooting && now()!=t)
// TraceEvent("NonzeroWaitDuringReboot").detail("TaskID", currentTaskID).detail("Elapsed",
// now()-t).backtrace("Coro");
ASSERT(what.isReady());

View File

@ -353,17 +353,17 @@ UID getSharedMemoryMachineId() {
#endif
}
ACTOR void failAfter(Future<Void> trigger, ISimulator::ProcessInfo* m = g_simulator.getCurrentProcess()) {
ACTOR void failAfter(Future<Void> trigger, ISimulator::ProcessInfo* m = g_simulator->getCurrentProcess()) {
wait(trigger);
if (enableFailures) {
printf("Killing machine: %s at %f\n", m->address.toString().c_str(), now());
g_simulator.killProcess(m, ISimulator::KillInstantly);
g_simulator->killProcess(m, ISimulator::KillInstantly);
}
}
void failAfter(Future<Void> trigger, Endpoint e) {
if (g_network == &g_simulator)
failAfter(trigger, g_simulator.getProcess(e));
if (g_network == g_simulator)
failAfter(trigger, g_simulator->getProcess(e));
}
ACTOR Future<Void> histogramReport() {
@ -2265,7 +2265,7 @@ int main(int argc, char* argv[]) {
KnobValue::create(ini.GetBoolValue("META", "enableBlobGranuleEncryption", false)));
}
setupAndRun(dataFolder, opts.testFile, opts.restarting, (isRestoring >= 1), opts.whitelistBinPaths);
g_simulator.run();
g_simulator->run();
} else if (role == ServerRole::FDBD) {
// Update the global blob credential files list so that both fast
// restore workers and backup workers can access blob storage.
@ -2475,7 +2475,7 @@ int main(int argc, char* argv[]) {
}
}
// g_simulator.run();
// g_simulator->run();
#ifdef ALLOC_INSTRUMENTATION
{

View File

@ -33,6 +33,7 @@
#include "fdbclient/Tenant.h"
#include "fdbserver/ServerDBInfo.h"
#include "fdbserver/Knobs.h"
#include "flow/flow.h"
#include "flow/actorcompiler.h" // has to be last include
@ -145,6 +146,13 @@ private:
Future<Void> collection;
};
ACTOR Future<Void> dumpManifest(Database db, Reference<BlobConnectionProvider> blobConn);
ACTOR Future<Void> loadManifest(Database db, Reference<BlobConnectionProvider> blobConn);
ACTOR Future<Void> printRestoreSummary(Database db, Reference<BlobConnectionProvider> blobConn);
inline bool isFullRestoreMode() {
return SERVER_KNOBS->BLOB_FULL_RESTORE_MODE;
};
#include "flow/unactorcompiler.h"
#endif

View File

@ -68,7 +68,7 @@ struct VFSAsyncFile {
// Error code is only checked for non-zero because the SQLite API error code after an injected error
// may not match the error code returned by VFSAsyncFile when the inject error occurred.
bool e = g_network->global(INetwork::enSQLiteInjectedError) != (flowGlobalType)0;
bool f = g_simulator.checkInjectedCorruption();
bool f = g_simulator->checkInjectedCorruption();
TraceEvent("VFSCheckInjectedError")
.detail("InjectedIOError", e)
.detail("InjectedCorruption", f)

View File

@ -1212,7 +1212,7 @@ ACTOR template <class T>
Future<T> ioTimeoutError(Future<T> what, double time) {
// Before simulation is sped up, IO operations can take a very long time so limit timeouts
// to not end until at least time after simulation is sped up.
if (g_network->isSimulated() && !g_simulator.speedUpSimulation) {
if (g_network->isSimulated() && !g_simulator->speedUpSimulation) {
time += std::max(0.0, FLOW_KNOBS->SIM_SPEEDUP_AFTER_SECONDS - now());
}
Future<Void> end = lowPriorityDelay(time);
@ -1220,7 +1220,7 @@ Future<T> ioTimeoutError(Future<T> what, double time) {
when(T t = wait(what)) { return t; }
when(wait(end)) {
Error err = io_timeout();
if (g_network->isSimulated() && !g_simulator.getCurrentProcess()->isReliable()) {
if (g_network->isSimulated() && !g_simulator->getCurrentProcess()->isReliable()) {
err = err.asInjectedFault();
}
TraceEvent(SevError, "IoTimeoutError").error(err);
@ -1236,7 +1236,7 @@ Future<T> ioDegradedOrTimeoutError(Future<T> what,
double degradedTime) {
// Before simulation is sped up, IO operations can take a very long time so limit timeouts
// to not end until at least time after simulation is sped up.
if (g_network->isSimulated() && !g_simulator.speedUpSimulation) {
if (g_network->isSimulated() && !g_simulator->speedUpSimulation) {
double timeShift = std::max(0.0, FLOW_KNOBS->SIM_SPEEDUP_AFTER_SECONDS - now());
errTime += timeShift;
degradedTime += timeShift;
@ -1259,7 +1259,7 @@ Future<T> ioDegradedOrTimeoutError(Future<T> what,
when(T t = wait(what)) { return t; }
when(wait(end)) {
Error err = io_timeout();
if (g_network->isSimulated() && !g_simulator.getCurrentProcess()->isReliable()) {
if (g_network->isSimulated() && !g_simulator->getCurrentProcess()->isReliable()) {
err = err.asInjectedFault();
}
TraceEvent(SevError, "IoTimeoutError").error(err);

View File

@ -286,11 +286,11 @@ struct ApiWorkload : TestWorkload {
minValueLength = getOption(options, LiteralStringRef("minValueLength"), 1);
maxValueLength = getOption(options, LiteralStringRef("maxValueLength"), 10000);
useExtraDB = g_network->isSimulated() && !g_simulator.extraDatabases.empty();
useExtraDB = g_network->isSimulated() && !g_simulator->extraDatabases.empty();
if (useExtraDB) {
ASSERT(g_simulator.extraDatabases.size() == 1);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
extraDB = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
}
}

View File

@ -900,11 +900,11 @@ public:
// Set up tss fault injection here, only if we are in simulated mode and with fault injection.
// With fault injection enabled, the tss will start acting normal for a bit, then after the specified delay
// start behaving incorrectly.
if (g_network->isSimulated() && !g_simulator.speedUpSimulation &&
g_simulator.tssMode >= ISimulator::TSSMode::EnabledAddDelay) {
if (g_network->isSimulated() && !g_simulator->speedUpSimulation &&
g_simulator->tssMode >= ISimulator::TSSMode::EnabledAddDelay) {
tssFaultInjectTime = now() + deterministicRandom()->randomInt(60, 300);
TraceEvent(SevWarnAlways, "TSSInjectFaultEnabled", thisServerID)
.detail("Mode", g_simulator.tssMode)
.detail("Mode", g_simulator->tssMode)
.detail("At", tssFaultInjectTime.get());
}
}
@ -1501,21 +1501,21 @@ public:
void maybeInjectTargetedRestart(Version v) {
// inject an SS restart at most once per test
if (g_network->isSimulated() && !g_simulator.speedUpSimulation &&
now() > g_simulator.injectTargetedSSRestartTime &&
if (g_network->isSimulated() && !g_simulator->speedUpSimulation &&
now() > g_simulator->injectTargetedSSRestartTime &&
rebootAfterDurableVersion == std::numeric_limits<Version>::max()) {
CODE_PROBE(true, "Injecting SS targeted restart");
TraceEvent("SimSSInjectTargetedRestart", thisServerID).detail("Version", v);
rebootAfterDurableVersion = v;
g_simulator.injectTargetedSSRestartTime = std::numeric_limits<double>::max();
g_simulator->injectTargetedSSRestartTime = std::numeric_limits<double>::max();
}
}
bool maybeInjectDelay() {
if (g_network->isSimulated() && !g_simulator.speedUpSimulation && now() > g_simulator.injectSSDelayTime) {
if (g_network->isSimulated() && !g_simulator->speedUpSimulation && now() > g_simulator->injectSSDelayTime) {
CODE_PROBE(true, "Injecting SS targeted delay");
TraceEvent("SimSSInjectDelay", thisServerID).log();
g_simulator.injectSSDelayTime = std::numeric_limits<double>::max();
g_simulator->injectSSDelayTime = std::numeric_limits<double>::max();
return true;
}
return false;
@ -3655,7 +3655,7 @@ ACTOR Future<Key> findKey(StorageServer* data,
if (sel.offset <= 1 && sel.offset >= 0)
maxBytes = std::numeric_limits<int>::max();
else
maxBytes = (g_network->isSimulated() && g_simulator.tssMode == ISimulator::TSSMode::Disabled && BUGGIFY)
maxBytes = (g_network->isSimulated() && g_simulator->tssMode == ISimulator::TSSMode::Disabled && BUGGIFY)
? SERVER_KNOBS->BUGGIFY_LIMIT_BYTES
: SERVER_KNOBS->STORAGE_LIMIT_BYTES;
@ -4129,14 +4129,12 @@ void preprocessMappedKey(Tuple& mappedKeyFormatTuple, std::vector<Optional<Tuple
}
}
Key constructMappedKey(KeyValueRef* keyValue,
std::vector<Optional<Tuple>>& vec,
Tuple& mappedKeyTuple,
Tuple& mappedKeyFormatTuple) {
Key constructMappedKey(KeyValueRef* keyValue, std::vector<Optional<Tuple>>& vec, Tuple& mappedKeyFormatTuple) {
// Lazily parse key and/or value to tuple because they may not need to be a tuple if not used.
Optional<Tuple> keyTuple;
Optional<Tuple> valueTuple;
mappedKeyTuple.clear();
Tuple mappedKeyTuple;
mappedKeyTuple.reserve(vec.size());
for (int i = 0; i < vec.size(); i++) {
@ -4183,12 +4181,11 @@ TEST_CASE("/fdbserver/storageserver/constructMappedKey") {
Tuple mappedKeyFormatTuple =
Tuple::makeTuple("normal"_sr, "{{escaped}}"_sr, "{K[2]}"_sr, "{V[0]}"_sr, "{...}"_sr);
Tuple mappedKeyTuple;
std::vector<Optional<Tuple>> vt;
bool isRangeQuery = false;
preprocessMappedKey(mappedKeyFormatTuple, vt, isRangeQuery);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyTuple, mappedKeyFormatTuple);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyFormatTuple);
Key expectedMappedKey =
Tuple::makeTuple("normal"_sr, "{escaped}"_sr, "key-2"_sr, "value-0"_sr).getDataAsStandalone();
@ -4200,11 +4197,10 @@ TEST_CASE("/fdbserver/storageserver/constructMappedKey") {
{
Tuple mappedKeyFormatTuple = Tuple::makeTuple("{{{{}}"_sr, "}}"_sr);
Tuple mappedKeyTuple;
std::vector<Optional<Tuple>> vt;
bool isRangeQuery = false;
preprocessMappedKey(mappedKeyFormatTuple, vt, isRangeQuery);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyTuple, mappedKeyFormatTuple);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyFormatTuple);
Key expectedMappedKey = Tuple::makeTuple("{{}"_sr, "}"_sr).getDataAsStandalone();
// std::cout << printable(mappedKey) << " == " << printable(expectedMappedKey) << std::endl;
@ -4214,11 +4210,10 @@ TEST_CASE("/fdbserver/storageserver/constructMappedKey") {
{
Tuple mappedKeyFormatTuple = Tuple::makeTuple("{{{{}}"_sr, "}}"_sr);
Tuple mappedKeyTuple;
std::vector<Optional<Tuple>> vt;
bool isRangeQuery = false;
preprocessMappedKey(mappedKeyFormatTuple, vt, isRangeQuery);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyTuple, mappedKeyFormatTuple);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyFormatTuple);
Key expectedMappedKey = Tuple::makeTuple("{{}"_sr, "}"_sr).getDataAsStandalone();
// std::cout << printable(mappedKey) << " == " << printable(expectedMappedKey) << std::endl;
@ -4229,12 +4224,11 @@ TEST_CASE("/fdbserver/storageserver/constructMappedKey") {
Tuple mappedKeyFormatTuple = Tuple::makeTuple("{K[100]}"_sr);
state bool throwException = false;
try {
Tuple mappedKeyTuple;
std::vector<Optional<Tuple>> vt;
bool isRangeQuery = false;
preprocessMappedKey(mappedKeyFormatTuple, vt, isRangeQuery);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyTuple, mappedKeyFormatTuple);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyFormatTuple);
} catch (Error& e) {
ASSERT(e.code() == error_code_mapper_bad_index);
throwException = true;
@ -4245,12 +4239,11 @@ TEST_CASE("/fdbserver/storageserver/constructMappedKey") {
Tuple mappedKeyFormatTuple = Tuple::makeTuple("{...}"_sr, "last-element"_sr);
state bool throwException2 = false;
try {
Tuple mappedKeyTuple;
std::vector<Optional<Tuple>> vt;
bool isRangeQuery = false;
preprocessMappedKey(mappedKeyFormatTuple, vt, isRangeQuery);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyTuple, mappedKeyFormatTuple);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyFormatTuple);
} catch (Error& e) {
ASSERT(e.code() == error_code_mapper_bad_range_decriptor);
throwException2 = true;
@ -4261,12 +4254,11 @@ TEST_CASE("/fdbserver/storageserver/constructMappedKey") {
Tuple mappedKeyFormatTuple = Tuple::makeTuple("{K[not-a-number]}"_sr);
state bool throwException3 = false;
try {
Tuple mappedKeyTuple;
std::vector<Optional<Tuple>> vt;
bool isRangeQuery = false;
preprocessMappedKey(mappedKeyFormatTuple, vt, isRangeQuery);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyTuple, mappedKeyFormatTuple);
Key mappedKey = constructMappedKey(&kvr, vt, mappedKeyFormatTuple);
} catch (Error& e) {
ASSERT(e.code() == error_code_mapper_bad_index);
throwException3 = true;
@ -4324,7 +4316,6 @@ ACTOR Future<GetMappedKeyValuesReply> mapKeyValues(StorageServer* data,
g_traceBatch.addEvent(
"TransactionDebug", pOriginalReq->options.get().debugID.get().first(), "storageserver.mapKeyValues.Start");
state Tuple mappedKeyFormatTuple;
state Tuple mappedKeyTuple;
try {
mappedKeyFormatTuple = Tuple::unpack(mapper);
@ -4361,7 +4352,7 @@ ACTOR Future<GetMappedKeyValuesReply> mapKeyValues(StorageServer* data,
kvm->value = ""_sr;
}
Key mappedKey = constructMappedKey(it, vt, mappedKeyTuple, mappedKeyFormatTuple);
Key mappedKey = constructMappedKey(it, vt, mappedKeyFormatTuple);
// Make sure the mappedKey is always available, so that it's good even we want to get key asynchronously.
result.arena.dependsOn(mappedKey.arena());
@ -4848,7 +4839,7 @@ ACTOR Future<Void> getKeyValuesStreamQ(StorageServer* data, GetKeyValuesStreamRe
// Even if TSS mode is Disabled, this may be the second test in a restarting test where the first run
// had it enabled.
state int byteLimit = (BUGGIFY && g_simulator.tssMode == ISimulator::TSSMode::Disabled &&
state int byteLimit = (BUGGIFY && g_simulator->tssMode == ISimulator::TSSMode::Disabled &&
!data->isTss() && !data->isSSWithTSSPair())
? 1
: CLIENT_KNOBS->REPLY_BYTE_LIMIT;
@ -6109,7 +6100,7 @@ ACTOR Future<Version> fetchChangeFeed(StorageServer* data,
.detail("Version", cleanupVersion);
if (g_network->isSimulated()) {
ASSERT(g_simulator.validationData.allDestroyedChangeFeedIDs.count(changeFeedInfo->id.toString()));
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.count(changeFeedInfo->id.toString()));
}
Key beginClearKey = changeFeedInfo->id.withPrefix(persistChangeFeedKeys.begin);
@ -6339,7 +6330,7 @@ ACTOR Future<std::vector<Key>> fetchChangeFeedMetadata(StorageServer* data,
if (g_network->isSimulated()) {
// verify that the feed was actually destroyed and it's not an error in this inference logic
ASSERT(g_simulator.validationData.allDestroyedChangeFeedIDs.count(feed.first.toString()));
ASSERT(g_simulator->validationData.allDestroyedChangeFeedIDs.count(feed.first.toString()));
}
Key beginClearKey = feed.first.withPrefix(persistChangeFeedKeys.begin);
@ -8134,7 +8125,7 @@ void StorageServer::clearTenants(TenantNameRef startTenant, TenantNameRef endTen
ACTOR Future<Void> tssDelayForever() {
loop {
wait(delay(5.0));
if (g_simulator.speedUpSimulation) {
if (g_simulator->speedUpSimulation) {
return Void();
}
}
@ -8153,7 +8144,7 @@ ACTOR Future<Void> update(StorageServer* data, bool* pReceivedUpdate) {
// a very small value.
state int64_t hardLimit = SERVER_KNOBS->STORAGE_HARD_LIMIT_BYTES;
state int64_t hardLimitOverage = SERVER_KNOBS->STORAGE_HARD_LIMIT_BYTES_OVERAGE;
if (g_network->isSimulated() && g_simulator.speedUpSimulation) {
if (g_network->isSimulated() && g_simulator->speedUpSimulation) {
hardLimit = SERVER_KNOBS->STORAGE_HARD_LIMIT_BYTES_SPEED_UP_SIM;
hardLimitOverage = SERVER_KNOBS->STORAGE_HARD_LIMIT_BYTES_OVERAGE_SPEED_UP_SIM;
}
@ -8182,8 +8173,8 @@ ACTOR Future<Void> update(StorageServer* data, bool* pReceivedUpdate) {
data->lastDurableVersionEBrake = data->durableVersion.get();
}
if (g_network->isSimulated() && data->isTss() && g_simulator.tssMode == ISimulator::TSSMode::EnabledAddDelay &&
!g_simulator.speedUpSimulation && data->tssFaultInjectTime.present() &&
if (g_network->isSimulated() && data->isTss() && g_simulator->tssMode == ISimulator::TSSMode::EnabledAddDelay &&
!g_simulator->speedUpSimulation && data->tssFaultInjectTime.present() &&
data->tssFaultInjectTime.get() < now()) {
if (deterministicRandom()->random01() < 0.01) {
TraceEvent(SevWarnAlways, "TSSInjectDelayForever", data->thisServerID).log();
@ -8448,8 +8439,8 @@ ACTOR Future<Void> update(StorageServer* data, bool* pReceivedUpdate) {
// Drop non-private mutations if TSS fault injection is enabled in simulation, or if this is a TSS in
// quarantine.
if (g_network->isSimulated() && data->isTss() && !g_simulator.speedUpSimulation &&
g_simulator.tssMode == ISimulator::TSSMode::EnabledDropMutations &&
if (g_network->isSimulated() && data->isTss() && !g_simulator->speedUpSimulation &&
g_simulator->tssMode == ISimulator::TSSMode::EnabledDropMutations &&
data->tssFaultInjectTime.present() && data->tssFaultInjectTime.get() < now() &&
(msg.type == MutationRef::SetValue || msg.type == MutationRef::ClearRange) &&
(msg.param1.size() < 2 || msg.param1[0] != 0xff || msg.param1[1] != 0xff) &&
@ -8567,7 +8558,7 @@ ACTOR Future<Void> update(StorageServer* data, bool* pReceivedUpdate) {
data->otherError.getFuture().get();
Version maxVersionsInMemory =
(g_network->isSimulated() && g_simulator.speedUpSimulation)
(g_network->isSimulated() && g_simulator->speedUpSimulation)
? std::max(5 * SERVER_KNOBS->VERSIONS_PER_SECOND, SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS)
: SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS;
for (int i = 0; i < data->recoveryVersionSkips.size(); i++) {
@ -8687,7 +8678,7 @@ ACTOR Future<Void> updateStorage(StorageServer* data) {
ASSERT(data->durableVersion.get() == data->storageVersion());
if (g_network->isSimulated()) {
double endTime =
g_simulator.checkDisabled(format("%s/updateStorage", data->thisServerID.toString().c_str()));
g_simulator->checkDisabled(format("%s/updateStorage", data->thisServerID.toString().c_str()));
if (endTime > now()) {
wait(delay(endTime - now(), TaskPriority::UpdateStorage));
}

View File

@ -451,7 +451,7 @@ void printSimulatedTopology() {
if (!g_network->isSimulated()) {
return;
}
auto processes = g_simulator.getAllProcesses();
auto processes = g_simulator->getAllProcesses();
std::sort(processes.begin(), processes.end(), [](ISimulator::ProcessInfo* lhs, ISimulator::ProcessInfo* rhs) {
auto l = lhs->locality;
auto r = rhs->locality;
@ -1001,9 +1001,9 @@ ACTOR Future<Void> checkConsistency(Database cx,
state double connectionFailures;
if (g_network->isSimulated()) {
// NOTE: the value will be reset after consistency check
connectionFailures = g_simulator.connectionFailuresDisableDuration;
g_simulator.connectionFailuresDisableDuration = 1e6;
g_simulator.speedUpSimulation = true;
connectionFailures = g_simulator->connectionFailuresDisableDuration;
g_simulator->connectionFailuresDisableDuration = 1e6;
g_simulator->speedUpSimulation = true;
}
Standalone<VectorRef<KeyValueRef>> options;
@ -1040,7 +1040,7 @@ ACTOR Future<Void> checkConsistency(Database cx,
DistributedTestResults testResults = wait(runWorkload(cx, testers, spec, Optional<TenantName>()));
if (testResults.ok() || lastRun) {
if (g_network->isSimulated()) {
g_simulator.connectionFailuresDisableDuration = connectionFailures;
g_simulator->connectionFailuresDisableDuration = connectionFailures;
}
return Void();
}
@ -1289,7 +1289,7 @@ std::map<std::string, std::function<void(const std::string& value, TestSpec* spe
ASSERT(connectionFailuresDisableDuration >= 0);
spec->simConnectionFailuresDisableDuration = connectionFailuresDisableDuration;
if (g_network->isSimulated())
g_simulator.connectionFailuresDisableDuration = spec->simConnectionFailuresDisableDuration;
g_simulator->connectionFailuresDisableDuration = spec->simConnectionFailuresDisableDuration;
TraceEvent("TestParserTest")
.detail("ParsedSimConnectionFailuresDisableDuration", spec->simConnectionFailuresDisableDuration);
} },
@ -1604,8 +1604,8 @@ ACTOR Future<Void> runTests(Reference<AsyncVar<Optional<struct ClusterController
}
if (g_network->isSimulated()) {
g_simulator.backupAgents = simBackupAgents;
g_simulator.drAgents = simDrAgents;
g_simulator->backupAgents = simBackupAgents;
g_simulator->drAgents = simDrAgents;
}
// turn off the database ping functionality if the suite of tests are not going to be using the database

View File

@ -191,7 +191,7 @@ Error checkIOTimeout(Error const& e) {
// In simulation, have to check global timed out flag for both this process and the machine process on which IO is
// done
if (g_network->isSimulated() && !timeoutOccurred)
timeoutOccurred = g_pSimulator->getCurrentProcess()->machine->machineProcess->global(INetwork::enASIOTimedOut);
timeoutOccurred = g_simulator->getCurrentProcess()->machine->machineProcess->global(INetwork::enASIOTimedOut);
if (timeoutOccurred) {
CODE_PROBE(true, "Timeout occurred");
@ -1416,7 +1416,7 @@ void startRole(const Role& role,
StringMetricHandle(LiteralStringRef("Roles")) = roleString(g_roles, false);
StringMetricHandle(LiteralStringRef("RolesWithIDs")) = roleString(g_roles, true);
if (g_network->isSimulated())
g_simulator.addRole(g_network->getLocalAddress(), role.roleName);
g_simulator->addRole(g_network->getLocalAddress(), role.roleName);
}
void endRole(const Role& role, UID id, std::string reason, bool ok, Error e) {
@ -1446,7 +1446,7 @@ void endRole(const Role& role, UID id, std::string reason, bool ok, Error e) {
StringMetricHandle(LiteralStringRef("Roles")) = roleString(g_roles, false);
StringMetricHandle(LiteralStringRef("RolesWithIDs")) = roleString(g_roles, true);
if (g_network->isSimulated())
g_simulator.removeRole(g_network->getLocalAddress(), role.roleName);
g_simulator->removeRole(g_network->getLocalAddress(), role.roleName);
if (role.includeInTraceRoles) {
removeTraceRole(role.abbreviation);

View File

@ -138,8 +138,8 @@ struct AtomicRestoreWorkload : TestWorkload {
}
// SOMEDAY: Remove after backup agents can exist quiescently
if (g_simulator.backupAgents == ISimulator::BackupAgentType::BackupToFile) {
g_simulator.backupAgents = ISimulator::BackupAgentType::NoBackupAgents;
if (g_simulator->backupAgents == ISimulator::BackupAgentType::BackupToFile) {
g_simulator->backupAgents = ISimulator::BackupAgentType::NoBackupAgents;
}
TraceEvent("AtomicRestore_Done").log();

View File

@ -39,9 +39,9 @@ struct AtomicSwitchoverWorkload : TestWorkload {
backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
ASSERT(g_simulator.extraDatabases.size() == 1);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
extraDB = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
}
@ -193,8 +193,8 @@ struct AtomicSwitchoverWorkload : TestWorkload {
TraceEvent("AS_Done").log();
// SOMEDAY: Remove after backup agents can exist quiescently
if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) {
g_simulator.drAgents = ISimulator::BackupAgentType::NoBackupAgents;
if (g_simulator->drAgents == ISimulator::BackupAgentType::BackupToDB) {
g_simulator->drAgents = ISimulator::BackupAgentType::NoBackupAgents;
}
return Void();

View File

@ -777,9 +777,9 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
}
// SOMEDAY: Remove after backup agents can exist quiescently
if ((g_simulator.backupAgents == ISimulator::BackupAgentType::BackupToFile) &&
if ((g_simulator->backupAgents == ISimulator::BackupAgentType::BackupToFile) &&
(!BackupAndParallelRestoreCorrectnessWorkload::backupAgentRequests)) {
g_simulator.backupAgents = ISimulator::BackupAgentType::NoBackupAgents;
g_simulator->backupAgents = ISimulator::BackupAgentType::NoBackupAgents;
}
} catch (Error& e) {
TraceEvent(SevError, "BackupAndParallelRestoreCorrectness").error(e).GetLastError();

View File

@ -871,9 +871,9 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
}
// SOMEDAY: Remove after backup agents can exist quiescently
if ((g_simulator.backupAgents == ISimulator::BackupAgentType::BackupToFile) &&
if ((g_simulator->backupAgents == ISimulator::BackupAgentType::BackupToFile) &&
(!BackupAndRestoreCorrectnessWorkload::backupAgentRequests)) {
g_simulator.backupAgents = ISimulator::BackupAgentType::NoBackupAgents;
g_simulator->backupAgents = ISimulator::BackupAgentType::NoBackupAgents;
}
} catch (Error& e) {
TraceEvent(SevError, "BackupAndRestoreCorrectness").error(e).GetLastError();

View File

@ -37,9 +37,9 @@ struct BackupToDBAbort : TestWorkload {
backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
ASSERT(g_simulator.extraDatabases.size() == 1);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
extraDB = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
lockid = UID(0xbeeffeed, 0xdecaf00d);
@ -94,8 +94,8 @@ struct BackupToDBAbort : TestWorkload {
TraceEvent("BDBA_End").log();
// SOMEDAY: Remove after backup agents can exist quiescently
if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) {
g_simulator.drAgents = ISimulator::BackupAgentType::NoBackupAgents;
if (g_simulator->drAgents == ISimulator::BackupAgentType::BackupToDB) {
g_simulator->drAgents = ISimulator::BackupAgentType::NoBackupAgents;
}
return Void();

View File

@ -129,9 +129,9 @@ struct BackupToDBCorrectnessWorkload : TestWorkload {
}
}
ASSERT(g_simulator.extraDatabases.size() == 1);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
extraDB = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
TraceEvent("BARW_Start").detail("Locked", locked);
@ -737,9 +737,9 @@ struct BackupToDBCorrectnessWorkload : TestWorkload {
}
// SOMEDAY: Remove after backup agents can exist quiescently
if ((g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) &&
if ((g_simulator->drAgents == ISimulator::BackupAgentType::BackupToDB) &&
(!BackupToDBCorrectnessWorkload::drAgentRequests)) {
g_simulator.drAgents = ISimulator::BackupAgentType::NoBackupAgents;
g_simulator->drAgents = ISimulator::BackupAgentType::NoBackupAgents;
}
} catch (Error& e) {
TraceEvent(SevError, "BackupAndRestoreCorrectness").error(e);

View File

@ -77,9 +77,9 @@ struct BackupToDBUpgradeWorkload : TestWorkload {
}
}
ASSERT(g_simulator.extraDatabases.size() == 1);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
extraDB = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
TraceEvent("DRU_Start").log();
@ -520,8 +520,8 @@ struct BackupToDBUpgradeWorkload : TestWorkload {
TraceEvent("DRU_Complete").detail("BackupTag", printable(self->backupTag));
if (g_simulator.drAgents == ISimulator::BackupAgentType::BackupToDB) {
g_simulator.drAgents = ISimulator::BackupAgentType::NoBackupAgents;
if (g_simulator->drAgents == ISimulator::BackupAgentType::BackupToDB) {
g_simulator->drAgents = ISimulator::BackupAgentType::NoBackupAgents;
}
} catch (Error& e) {
TraceEvent(SevError, "BackupAndRestoreCorrectnessError").error(e);

View File

@ -62,11 +62,11 @@ struct ChangeConfigWorkload : TestWorkload {
ACTOR Future<Void> configureExtraDatabase(ChangeConfigWorkload* self, Database db) {
wait(delay(5 * deterministicRandom()->random01()));
if (self->configMode.size()) {
if (g_simulator.startingDisabledConfiguration != "") {
if (g_simulator->startingDisabledConfiguration != "") {
// It is not safe to allow automatic failover to a region which is not fully replicated,
// so wait for both regions to be fully replicated before enabling failover
wait(success(
ManagementAPI::changeConfig(db.getReference(), g_simulator.startingDisabledConfiguration, true)));
ManagementAPI::changeConfig(db.getReference(), g_simulator->startingDisabledConfiguration, true)));
TraceEvent("WaitForReplicasExtra").log();
wait(waitForFullReplication(db));
TraceEvent("WaitForReplicasExtraEnd").log();
@ -89,7 +89,7 @@ struct ChangeConfigWorkload : TestWorkload {
Future<Void> configureExtraDatabases(ChangeConfigWorkload* self) {
std::vector<Future<Void>> futures;
if (g_network->isSimulated()) {
for (auto extraDatabase : g_simulator.extraDatabases) {
for (auto extraDatabase : g_simulator->extraDatabases) {
auto extraFile = makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(extraDatabase));
Database db = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
futures.push_back(configureExtraDatabase(self, db));
@ -111,18 +111,19 @@ struct ChangeConfigWorkload : TestWorkload {
}
if (self->configMode.size()) {
if (g_network->isSimulated() && g_simulator.startingDisabledConfiguration != "") {
if (g_network->isSimulated() && g_simulator->startingDisabledConfiguration != "") {
// It is not safe to allow automatic failover to a region which is not fully replicated,
// so wait for both regions to be fully replicated before enabling failover
wait(success(
ManagementAPI::changeConfig(cx.getReference(), g_simulator.startingDisabledConfiguration, true)));
ManagementAPI::changeConfig(cx.getReference(), g_simulator->startingDisabledConfiguration, true)));
TraceEvent("WaitForReplicas").log();
wait(waitForFullReplication(cx));
TraceEvent("WaitForReplicasEnd").log();
}
wait(success(ManagementAPI::changeConfig(cx.getReference(), self->configMode, true)));
}
if (g_network->isSimulated() && g_simulator.configDBType != ConfigDBType::SIMPLE || !g_network->isSimulated()) {
if ((g_network->isSimulated() && g_simulator->configDBType != ConfigDBType::SIMPLE) ||
!g_network->isSimulated()) {
if (self->networkAddresses.size()) {
state int i;
for (i = 0; i < self->coordinatorChanges; ++i) {

View File

@ -37,11 +37,11 @@ class WorkloadProcessState {
~WorkloadProcessState() {
TraceEvent("ShutdownClientForWorkload", id).log();
g_simulator.destroyProcess(childProcess);
g_simulator->destroyProcess(childProcess);
}
ACTOR static Future<Void> initializationDone(WorkloadProcessState* self, ISimulator::ProcessInfo* parent) {
wait(g_simulator.onProcess(parent, TaskPriority::DefaultYield));
wait(g_simulator->onProcess(parent, TaskPriority::DefaultYield));
self->init.send(Void());
wait(Never());
ASSERT(false); // does not happen
@ -49,7 +49,7 @@ class WorkloadProcessState {
}
ACTOR static Future<Void> processStart(WorkloadProcessState* self) {
state ISimulator::ProcessInfo* parent = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* parent = g_simulator->getCurrentProcess();
state std::vector<Future<Void>> futures;
if (parent->address.isV6()) {
self->childAddress =
@ -65,22 +65,22 @@ class WorkloadProcessState {
TraceEvent("StartingClientWorkloadProcess", self->id)
.detail("Name", self->processName)
.detail("Address", self->childAddress);
self->childProcess = g_simulator.newProcess(self->processName.c_str(),
self->childAddress,
1,
parent->address.isTLS(),
1,
locality,
ProcessClass(ProcessClass::TesterClass, ProcessClass::AutoSource),
dataFolder.c_str(),
parent->coordinationFolder.c_str(),
parent->protocolVersion);
self->childProcess = g_simulator->newProcess(self->processName.c_str(),
self->childAddress,
1,
parent->address.isTLS(),
1,
locality,
ProcessClass(ProcessClass::TesterClass, ProcessClass::AutoSource),
dataFolder.c_str(),
parent->coordinationFolder.c_str(),
parent->protocolVersion);
self->childProcess->excludeFromRestarts = true;
wait(g_simulator.onProcess(self->childProcess, TaskPriority::DefaultYield));
wait(g_simulator->onProcess(self->childProcess, TaskPriority::DefaultYield));
try {
FlowTransport::createInstance(true, 1, WLTOKEN_RESERVED_COUNT);
Sim2FileSystem::newFileSystem();
auto addr = g_simulator.getCurrentProcess()->address;
auto addr = g_simulator->getCurrentProcess()->address;
futures.push_back(FlowTransport::transport().bind(addr, addr));
futures.push_back(success((self->childProcess->onShutdown())));
TraceEvent("ClientWorkloadProcessInitialized", self->id).log();
@ -143,18 +143,18 @@ struct WorkloadProcess {
ACTOR static Future<Void> openDatabase(WorkloadProcess* self,
ClientWorkload::CreateWorkload childCreator,
WorkloadContext wcx) {
state ISimulator::ProcessInfo* parent = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* parent = g_simulator->getCurrentProcess();
state Optional<Error> err;
wcx.dbInfo = Reference<AsyncVar<struct ServerDBInfo> const>();
wait(self->processState->initialized());
wait(g_simulator.onProcess(self->childProcess(), TaskPriority::DefaultYield));
wait(g_simulator->onProcess(self->childProcess(), TaskPriority::DefaultYield));
try {
self->createDatabase(childCreator, wcx);
} catch (Error& e) {
ASSERT(e.code() != error_code_actor_cancelled);
err = e;
}
wait(g_simulator.onProcess(parent, TaskPriority::DefaultYield));
wait(g_simulator->onProcess(parent, TaskPriority::DefaultYield));
if (err.present()) {
throw err.get();
}
@ -177,11 +177,11 @@ struct WorkloadProcess {
}
ACTOR static void destroy(WorkloadProcess* self) {
state ISimulator::ProcessInfo* parent = g_simulator.getCurrentProcess();
wait(g_simulator.onProcess(self->childProcess(), TaskPriority::DefaultYield));
state ISimulator::ProcessInfo* parent = g_simulator->getCurrentProcess();
wait(g_simulator->onProcess(self->childProcess(), TaskPriority::DefaultYield));
TraceEvent("DeleteWorkloadProcess").backtrace();
delete self;
wait(g_simulator.onProcess(parent, TaskPriority::DefaultYield));
wait(g_simulator->onProcess(parent, TaskPriority::DefaultYield));
}
std::string description() { return desc; }
@ -190,7 +190,7 @@ struct WorkloadProcess {
// count of `f` is 1, this will cause the future to be destroyed in the process `process`
ACTOR template <class T>
static void cancelChild(ISimulator::ProcessInfo* process, Future<T> f) {
wait(g_simulator.onProcess(process, TaskPriority::DefaultYield));
wait(g_simulator->onProcess(process, TaskPriority::DefaultYield));
}
ACTOR template <class Ret, class Fun>
@ -198,9 +198,9 @@ struct WorkloadProcess {
state Optional<Error> err;
state Ret res;
state Future<Ret> fut;
state ISimulator::ProcessInfo* parent = g_simulator.getCurrentProcess();
state ISimulator::ProcessInfo* parent = g_simulator->getCurrentProcess();
wait(self->databaseOpened);
wait(g_simulator.onProcess(self->childProcess(), TaskPriority::DefaultYield));
wait(g_simulator->onProcess(self->childProcess(), TaskPriority::DefaultYield));
self->cx->defaultTenant = defaultTenant;
try {
fut = f(self->cx);
@ -218,7 +218,7 @@ struct WorkloadProcess {
err = e;
}
fut = Future<Ret>();
wait(g_simulator.onProcess(parent, TaskPriority::DefaultYield));
wait(g_simulator->onProcess(parent, TaskPriority::DefaultYield));
if (err.present()) {
throw err.get();
}

View File

@ -60,10 +60,10 @@ public:
void getMetrics(std::vector<PerfMetric>& m) override {}
void clogRandomPair() {
auto m1 = deterministicRandom()->randomChoice(g_simulator.getAllProcesses());
auto m2 = deterministicRandom()->randomChoice(g_simulator.getAllProcesses());
auto m1 = deterministicRandom()->randomChoice(g_simulator->getAllProcesses());
auto m2 = deterministicRandom()->randomChoice(g_simulator->getAllProcesses());
if (m1->address.ip != m2->address.ip) {
g_simulator.clogPair(m1->address.ip, m2->address.ip, clogDuration.orDefault(10000));
g_simulator->clogPair(m1->address.ip, m2->address.ip, clogDuration.orDefault(10000));
}
}
};

View File

@ -117,9 +117,9 @@ class ConfigIncrementWorkload : public TestWorkload {
Reference<ISingleThreadTransaction> getTransaction(Database cx) const {
ASSERT(g_network->isSimulated()); // TODO: Enforce elsewhere
ASSERT(g_simulator.configDBType != ConfigDBType::DISABLED);
auto type = (g_simulator.configDBType == ConfigDBType::SIMPLE) ? ISingleThreadTransaction::Type::SIMPLE_CONFIG
: ISingleThreadTransaction::Type::PAXOS_CONFIG;
ASSERT(g_simulator->configDBType != ConfigDBType::DISABLED);
auto type = (g_simulator->configDBType == ConfigDBType::SIMPLE) ? ISingleThreadTransaction::Type::SIMPLE_CONFIG
: ISingleThreadTransaction::Type::PAXOS_CONFIG;
return ISingleThreadTransaction::create(type, cx);
}

View File

@ -52,9 +52,9 @@ static const char* backupTypes[] = { "backup_worker_enabled:=0", "backup_worker_
std::string generateRegions() {
std::string result;
if (g_simulator.physicalDatacenters == 1 ||
(g_simulator.physicalDatacenters == 2 && deterministicRandom()->random01() < 0.25) ||
g_simulator.physicalDatacenters == 3) {
if (g_simulator->physicalDatacenters == 1 ||
(g_simulator->physicalDatacenters == 2 && deterministicRandom()->random01() < 0.25) ||
g_simulator->physicalDatacenters == 3) {
return " usable_regions=1 regions=\"\"";
}
@ -87,7 +87,7 @@ std::string generateRegions() {
StatusArray remoteDcArr;
remoteDcArr.push_back(remoteDcObj);
if (g_simulator.physicalDatacenters > 3 && deterministicRandom()->random01() < 0.5) {
if (g_simulator->physicalDatacenters > 3 && deterministicRandom()->random01() < 0.5) {
StatusObject primarySatelliteObj;
primarySatelliteObj["id"] = "2";
primarySatelliteObj["priority"] = 1;
@ -104,7 +104,7 @@ std::string generateRegions() {
remoteSatelliteObj["satellite_logs"] = deterministicRandom()->randomInt(1, 7);
remoteDcArr.push_back(remoteSatelliteObj);
if (g_simulator.physicalDatacenters > 5 && deterministicRandom()->random01() < 0.5) {
if (g_simulator->physicalDatacenters > 5 && deterministicRandom()->random01() < 0.5) {
StatusObject primarySatelliteObjB;
primarySatelliteObjB["id"] = "4";
primarySatelliteObjB["priority"] = 1;
@ -239,11 +239,11 @@ struct ConfigureDatabaseWorkload : TestWorkload {
allowDescriptorChange =
getOption(options, LiteralStringRef("allowDescriptorChange"), SERVER_KNOBS->ENABLE_CROSS_CLUSTER_SUPPORT);
allowTestStorageMigration =
getOption(options, "allowTestStorageMigration"_sr, false) && g_simulator.allowStorageMigrationTypeChange;
getOption(options, "allowTestStorageMigration"_sr, false) && g_simulator->allowStorageMigrationTypeChange;
storageMigrationCompatibleConf = getOption(options, "storageMigrationCompatibleConf"_sr, false);
waitStoreTypeCheck = getOption(options, "waitStoreTypeCheck"_sr, false);
downgradeTest1 = getOption(options, "downgradeTest1"_sr, false);
g_simulator.usableRegions = 1;
g_simulator->usableRegions = 1;
}
std::string description() const override { return "DestroyDatabaseWorkload"; }
@ -347,7 +347,7 @@ struct ConfigureDatabaseWorkload : TestWorkload {
ACTOR Future<Void> singleDB(ConfigureDatabaseWorkload* self, Database cx) {
state Transaction tr;
loop {
if (g_simulator.speedUpSimulation) {
if (g_simulator->speedUpSimulation) {
return Void();
}
state int randomChoice;
@ -373,14 +373,14 @@ struct ConfigureDatabaseWorkload : TestWorkload {
} else if (randomChoice == 3) {
//TraceEvent("ConfigureTestConfigureBegin").detail("NewConfig", newConfig);
int maxRedundancies = sizeof(redundancies) / sizeof(redundancies[0]);
if (g_simulator.physicalDatacenters == 2 || g_simulator.physicalDatacenters > 3) {
if (g_simulator->physicalDatacenters == 2 || g_simulator->physicalDatacenters > 3) {
maxRedundancies--; // There are not enough machines for triple replication in fearless
// configurations
}
int redundancy = deterministicRandom()->randomInt(0, maxRedundancies);
std::string config = redundancies[redundancy];
if (config == "triple" && g_simulator.physicalDatacenters == 3) {
if (config == "triple" && g_simulator->physicalDatacenters == 3) {
config = "three_data_hall ";
}

View File

@ -1482,7 +1482,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
: "False");
if ((g_network->isSimulated() &&
g_simulator.tssMode != ISimulator::TSSMode::EnabledDropMutations) ||
g_simulator->tssMode != ISimulator::TSSMode::EnabledDropMutations) ||
(!storageServerInterfaces[j].isTss() &&
!storageServerInterfaces[firstValidServer].isTss())) {
self->testFailure("Data inconsistent", true);
@ -1624,7 +1624,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
break;
} else if (estimatedBytes[j] < 0 &&
((g_network->isSimulated() &&
g_simulator.tssMode <= ISimulator::TSSMode::EnabledNormal) ||
g_simulator->tssMode <= ISimulator::TSSMode::EnabledNormal) ||
!storageServerInterfaces[j].isTss())) {
// Ignore a non-responding TSS outside of simulation, or if tss fault injection is enabled
hasValidEstimate = false;
@ -1928,7 +1928,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
// FIXME: this is hiding the fact that we can recruit a new storage server on a location the has
// files left behind by a previous failure
// this means that the process is wasting disk space until the process is rebooting
ISimulator::ProcessInfo* p = g_simulator.getProcessByAddress(itr->interf.address());
ISimulator::ProcessInfo* p = g_simulator->getProcessByAddress(itr->interf.address());
// Note: itr->interf.address() may not equal to p->address() because role's endpoint's primary
// addr can be swapped by choosePrimaryAddress() based on its peer's tls config.
TraceEvent("ConsistencyCheck_RebootProcess")
@ -1937,14 +1937,14 @@ struct ConsistencyCheckWorkload : TestWorkload {
.detail("ProcessPrimaryAddress", p->address)
.detail("ProcessAddresses", p->addresses.toString())
.detail("DataStoreID", id)
.detail("Protected", g_simulator.protectedAddresses.count(itr->interf.address()))
.detail("Protected", g_simulator->protectedAddresses.count(itr->interf.address()))
.detail("Reliable", p->isReliable())
.detail("ReliableInfo", p->getReliableInfo())
.detail("KillOrRebootProcess", p->address);
if (p->isReliable()) {
g_simulator.rebootProcess(p, ISimulator::RebootProcess);
g_simulator->rebootProcess(p, ISimulator::RebootProcess);
} else {
g_simulator.killProcess(p, ISimulator::KillInstantly);
g_simulator->killProcess(p, ISimulator::KillInstantly);
}
}
@ -2034,7 +2034,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
}
ACTOR Future<bool> checkWorkerList(Database cx, ConsistencyCheckWorkload* self) {
if (!g_simulator.extraDatabases.empty()) {
if (!g_simulator->extraDatabases.empty()) {
return true;
}
@ -2043,7 +2043,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
for (const auto& it : workers) {
NetworkAddress addr = it.interf.tLog.getEndpoint().addresses.getTLSAddress();
ISimulator::ProcessInfo* info = g_simulator.getProcessByAddress(addr);
ISimulator::ProcessInfo* info = g_simulator->getProcessByAddress(addr);
if (!info || info->failed) {
TraceEvent("ConsistencyCheck_FailedWorkerInList").detail("Addr", it.interf.address());
return false;
@ -2051,7 +2051,7 @@ struct ConsistencyCheckWorkload : TestWorkload {
workerAddresses.insert(NetworkAddress(addr.ip, addr.port, true, addr.isTLS()));
}
std::vector<ISimulator::ProcessInfo*> all = g_simulator.getAllProcesses();
std::vector<ISimulator::ProcessInfo*> all = g_simulator->getAllProcesses();
for (int i = 0; i < all.size(); i++) {
if (all[i]->isReliable() && all[i]->name == std::string("Server") &&
all[i]->startingClass != ProcessClass::TesterClass &&
@ -2181,10 +2181,10 @@ struct ConsistencyCheckWorkload : TestWorkload {
}
// Check if master and cluster controller are in the desired DC for fearless cluster when running under
// simulation
// FIXME: g_simulator.datacenterDead could return false positives. Relaxing checks until it is fixed.
if (g_network->isSimulated() && config.usableRegions > 1 && g_simulator.primaryDcId.present() &&
!g_simulator.datacenterDead(g_simulator.primaryDcId) &&
!g_simulator.datacenterDead(g_simulator.remoteDcId)) {
// FIXME: g_simulator->datacenterDead could return false positives. Relaxing checks until it is fixed.
if (g_network->isSimulated() && config.usableRegions > 1 && g_simulator->primaryDcId.present() &&
!g_simulator->datacenterDead(g_simulator->primaryDcId) &&
!g_simulator->datacenterDead(g_simulator->remoteDcId)) {
expectedPrimaryDcId = config.regions[0].dcId;
expectedRemoteDcId = config.regions[1].dcId;
// If the priorities are equal, either could be the primary
@ -2303,9 +2303,9 @@ struct ConsistencyCheckWorkload : TestWorkload {
}
// Check LogRouter
if (g_network->isSimulated() && config.usableRegions > 1 && g_simulator.primaryDcId.present() &&
!g_simulator.datacenterDead(g_simulator.primaryDcId) &&
!g_simulator.datacenterDead(g_simulator.remoteDcId)) {
if (g_network->isSimulated() && config.usableRegions > 1 && g_simulator->primaryDcId.present() &&
!g_simulator->datacenterDead(g_simulator->primaryDcId) &&
!g_simulator->datacenterDead(g_simulator->remoteDcId)) {
for (auto& tlogSet : db.logSystemConfig.tLogs) {
if (!tlogSet.isLocal && tlogSet.logRouters.size()) {
for (auto& logRouter : tlogSet.logRouters) {

View File

@ -69,7 +69,7 @@ struct CycleWorkload : TestWorkload, CycleMembers<MultiTenancy> {
if constexpr (MultiTenancy) {
ASSERT(g_network->isSimulated());
this->useToken = getOption(options, "useToken"_sr, true);
auto k = g_simulator.authKeys.begin();
auto k = g_simulator->authKeys.begin();
this->tenant = getOption(options, "tenant"_sr, "CycleTenant"_sr);
// make it comfortably longer than the timeout of the workload
auto currentTime = uint64_t(lround(g_network->timer()));
@ -327,7 +327,7 @@ struct CycleWorkload : TestWorkload, CycleMembers<MultiTenancy> {
if (g_network->isSimulated() && retryCount > 50) {
CODE_PROBE(true, "Cycle check enable speedUpSimulation because too many transaction_too_old()");
// try to make the read window back to normal size (5 * version_per_sec)
g_simulator.speedUpSimulation = true;
g_simulator->speedUpSimulation = true;
}
wait(tr.onError(e));
}

View File

@ -188,7 +188,7 @@ struct DataLossRecoveryWorkload : TestWorkload {
std::vector<StorageServerInterface> interfs = wait(getStorageServers(cx));
if (!interfs.empty()) {
const auto& interf = interfs[deterministicRandom()->randomInt(0, interfs.size())];
if (g_simulator.protectedAddresses.count(interf.address()) == 0) {
if (g_simulator->protectedAddresses.count(interf.address()) == 0) {
dest.push_back(interf.uniqueID);
addr = interf.address();
}
@ -256,9 +256,9 @@ struct DataLossRecoveryWorkload : TestWorkload {
}
void killProcess(DataLossRecoveryWorkload* self, const NetworkAddress& addr) {
ISimulator::ProcessInfo* process = g_simulator.getProcessByAddress(addr);
ISimulator::ProcessInfo* process = g_simulator->getProcessByAddress(addr);
ASSERT(process->addresses.contains(addr));
g_simulator.killProcess(process, ISimulator::KillInstantly);
g_simulator->killProcess(process, ISimulator::KillInstantly);
TraceEvent("TestTeamKilled").detail("Address", addr);
}
@ -267,4 +267,4 @@ struct DataLossRecoveryWorkload : TestWorkload {
void getMetrics(std::vector<PerfMetric>& m) override {}
};
WorkloadFactory<DataLossRecoveryWorkload> DataLossRecoveryWorkloadFactory("DataLossRecovery");
WorkloadFactory<DataLossRecoveryWorkload> DataLossRecoveryWorkloadFactory("DataLossRecovery");

View File

@ -38,9 +38,9 @@ struct DifferentClustersSameRVWorkload : TestWorkload {
bool switchComplete = false;
DifferentClustersSameRVWorkload(WorkloadContext const& wcx) : TestWorkload(wcx) {
ASSERT(g_simulator.extraDatabases.size() == 1);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
extraDB = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
testDuration = getOption(options, LiteralStringRef("testDuration"), 100.0);
switchAfter = getOption(options, LiteralStringRef("switchAfter"), 50.0);

View File

@ -65,7 +65,7 @@ struct DiskFailureInjectionWorkload : TestWorkload {
}
std::string description() const override {
if (&g_simulator == g_network)
if (g_simulator == g_network)
return "DiskFailureInjection";
else
return "NoSimDiskFailureInjection";
@ -177,8 +177,8 @@ struct DiskFailureInjectionWorkload : TestWorkload {
if (self->throttleDisk && (throttledWorkers++ < self->workersToThrottle))
self->injectDiskDelays(machine, self->stallInterval, self->stallPeriod, self->throttlePeriod);
if (self->corruptFile && (corruptedWorkers++ < self->workersToCorrupt)) {
if (&g_simulator == g_network)
g_simulator.corruptWorkerMap[machine.address()] = true;
if (g_simulator == g_network)
g_simulator->corruptWorkerMap[machine.address()] = true;
self->injectBitFlips(machine, self->percentBitFlips);
}
}
@ -200,8 +200,8 @@ struct DiskFailureInjectionWorkload : TestWorkload {
if (self->throttleDisk && (throttledWorkers++ < self->workersToThrottle))
self->injectDiskDelays(itr->second, self->stallInterval, self->stallPeriod, self->throttlePeriod);
if (self->corruptFile && (corruptedWorkers++ < self->workersToCorrupt)) {
if (&g_simulator == g_network)
g_simulator.corruptWorkerMap[workerAddress] = true;
if (g_simulator == g_network)
g_simulator->corruptWorkerMap[workerAddress] = true;
self->injectBitFlips(itr->second, self->percentBitFlips);
}
}

View File

@ -43,7 +43,7 @@ struct DummyWorkload : TestWorkload {
ACTOR static Future<Void> _start(DummyWorkload* self, Database cx) {
if (self->displayDelay > 0.0)
wait(delay(self->displayDelay));
g_simulator.displayWorkers();
g_simulator->displayWorkers();
return Void();
}

View File

@ -242,14 +242,14 @@ struct ExternalWorkload : TestWorkload, FDBWorkloadContext {
}
uint64_t getProcessID() const override {
if (g_network->isSimulated()) {
return reinterpret_cast<uint64_t>(g_simulator.getCurrentProcess());
return reinterpret_cast<uint64_t>(g_simulator->getCurrentProcess());
} else {
return 0ul;
}
}
void setProcessID(uint64_t processID) override {
if (g_network->isSimulated()) {
g_simulator.currentProcess = reinterpret_cast<ISimulator::ProcessInfo*>(processID);
g_simulator->currentProcess = reinterpret_cast<ISimulator::ProcessInfo*>(processID);
}
}
double now() const override { return g_network->now(); }

View File

@ -36,7 +36,7 @@ struct KillRegionWorkload : TestWorkload {
enabled =
!clientId && g_network->isSimulated(); // only do this on the "first" client, and only when in simulation
testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0);
g_simulator.usableRegions = 1;
g_simulator->usableRegions = 1;
}
std::string description() const override { return "KillRegionWorkload"; }
@ -57,7 +57,7 @@ struct KillRegionWorkload : TestWorkload {
ACTOR static Future<Void> _setup(KillRegionWorkload* self, Database cx) {
TraceEvent("ForceRecovery_DisablePrimaryBegin").log();
wait(success(ManagementAPI::changeConfig(cx.getReference(), g_simulator.disablePrimary, true)));
wait(success(ManagementAPI::changeConfig(cx.getReference(), g_simulator->disablePrimary, true)));
TraceEvent("ForceRecovery_WaitForRemote").log();
wait(waitForPrimaryDC(cx, LiteralStringRef("1")));
TraceEvent("ForceRecovery_DisablePrimaryComplete").log();
@ -75,29 +75,29 @@ struct KillRegionWorkload : TestWorkload {
ASSERT(g_network->isSimulated());
if (deterministicRandom()->random01() < 0.5) {
TraceEvent("ForceRecovery_DisableRemoteBegin").log();
wait(success(ManagementAPI::changeConfig(cx.getReference(), g_simulator.disableRemote, true)));
wait(success(ManagementAPI::changeConfig(cx.getReference(), g_simulator->disableRemote, true)));
TraceEvent("ForceRecovery_WaitForPrimary").log();
wait(waitForPrimaryDC(cx, LiteralStringRef("0")));
TraceEvent("ForceRecovery_DisableRemoteComplete").log();
wait(success(ManagementAPI::changeConfig(cx.getReference(), g_simulator.originalRegions, true)));
wait(success(ManagementAPI::changeConfig(cx.getReference(), g_simulator->originalRegions, true)));
}
TraceEvent("ForceRecovery_Wait").log();
wait(delay(deterministicRandom()->random01() * self->testDuration));
// FIXME: killDataCenter breaks simulation if forceKill=false, since some processes can survive and
// partially complete a recovery
g_simulator.killDataCenter(LiteralStringRef("0"),
deterministicRandom()->random01() < 0.5 ? ISimulator::KillInstantly
: ISimulator::RebootAndDelete,
true);
g_simulator.killDataCenter(LiteralStringRef("2"),
deterministicRandom()->random01() < 0.5 ? ISimulator::KillInstantly
: ISimulator::RebootAndDelete,
true);
g_simulator.killDataCenter(LiteralStringRef("4"),
deterministicRandom()->random01() < 0.5 ? ISimulator::KillInstantly
: ISimulator::RebootAndDelete,
true);
g_simulator->killDataCenter(LiteralStringRef("0"),
deterministicRandom()->random01() < 0.5 ? ISimulator::KillInstantly
: ISimulator::RebootAndDelete,
true);
g_simulator->killDataCenter(LiteralStringRef("2"),
deterministicRandom()->random01() < 0.5 ? ISimulator::KillInstantly
: ISimulator::RebootAndDelete,
true);
g_simulator->killDataCenter(LiteralStringRef("4"),
deterministicRandom()->random01() < 0.5 ? ISimulator::KillInstantly
: ISimulator::RebootAndDelete,
true);
TraceEvent("ForceRecovery_Begin").log();
@ -116,7 +116,7 @@ struct KillRegionWorkload : TestWorkload {
loop {
// only needed if force recovery was unnecessary and we killed the secondary
wait(success(ManagementAPI::changeConfig(
cx.getReference(), g_simulator.disablePrimary + " repopulate_anti_quorum=1", true)));
cx.getReference(), g_simulator->disablePrimary + " repopulate_anti_quorum=1", true)));
choose {
when(wait(waitForStorageRecovered(self))) { break; }
when(wait(delay(300.0))) {}

View File

@ -128,7 +128,7 @@ struct LocalRatekeeperWorkload : TestWorkload {
ACTOR static Future<Void> _start(LocalRatekeeperWorkload* self, Database cx) {
wait(delay(self->startAfter));
state StorageServerInterface ssi = wait(getRandomStorage(cx));
g_simulator.disableFor(format("%s/updateStorage", ssi.id().toString().c_str()), now() + self->blockWritesFor);
g_simulator->disableFor(format("%s/updateStorage", ssi.id().toString().c_str()), now() + self->blockWritesFor);
state Future<Void> done = delay(self->blockWritesFor);
// not much will happen until the storage goes over the soft limit
wait(delay(double(SERVER_KNOBS->STORAGE_DURABILITY_LAG_SOFT_MAX / 1e6)));

View File

@ -108,7 +108,7 @@ struct MachineAttritionWorkload : TestWorkload {
static std::vector<ISimulator::ProcessInfo*> getServers() {
std::vector<ISimulator::ProcessInfo*> machines;
std::vector<ISimulator::ProcessInfo*> all = g_simulator.getAllProcesses();
std::vector<ISimulator::ProcessInfo*> all = g_simulator->getAllProcesses();
for (int i = 0; i < all.size(); i++)
if (!all[i]->failed && all[i]->name == std::string("Server") &&
all[i]->startingClass != ProcessClass::TesterClass)
@ -301,7 +301,7 @@ struct MachineAttritionWorkload : TestWorkload {
.detail("Reboot", self->reboot)
.detail("KillType", kt);
g_simulator.killDataCenter(target, kt);
g_simulator->killDataCenter(target, kt);
} else if (self->killDatahall) {
delayBeforeKill = deterministicRandom()->random01() * meanDelay;
wait(delay(delayBeforeKill));
@ -313,7 +313,7 @@ struct MachineAttritionWorkload : TestWorkload {
auto kt = ISimulator::KillInstantly;
TraceEvent("Assassination").detail("TargetDataHall", target).detail("KillType", kt);
g_simulator.killDataHall(target, kt);
g_simulator->killDataHall(target, kt);
} else {
state int killedMachines = 0;
while (killedMachines < self->machinesToKill && self->machines.size() > self->machinesToLeave) {
@ -366,9 +366,9 @@ struct MachineAttritionWorkload : TestWorkload {
if (self->reboot) {
if (deterministicRandom()->random01() > 0.5) {
g_simulator.rebootProcess(targetMachine.zoneId(), deterministicRandom()->random01() > 0.5);
g_simulator->rebootProcess(targetMachine.zoneId(), deterministicRandom()->random01() > 0.5);
} else {
g_simulator.killZone(targetMachine.zoneId(), ISimulator::Reboot);
g_simulator->killZone(targetMachine.zoneId(), ISimulator::Reboot);
}
} else {
auto randomDouble = deterministicRandom()->random01();
@ -377,7 +377,7 @@ struct MachineAttritionWorkload : TestWorkload {
.detail("RandomValue", randomDouble);
if (randomDouble < 0.33) {
TraceEvent("RebootAndDelete").detail("TargetMachine", targetMachine.toString());
g_simulator.killZone(targetMachine.zoneId(), ISimulator::RebootAndDelete);
g_simulator->killZone(targetMachine.zoneId(), ISimulator::RebootAndDelete);
} else {
auto kt = ISimulator::KillInstantly;
if (self->allowFaultInjection) {
@ -393,7 +393,7 @@ struct MachineAttritionWorkload : TestWorkload {
}
*/
}
g_simulator.killZone(targetMachine.zoneId(), kt);
g_simulator->killZone(targetMachine.zoneId(), kt);
}
}

View File

@ -99,8 +99,8 @@ struct MetaclusterManagementWorkload : TestWorkload {
MultiVersionApi::api->selectApiVersion(cx->apiVersion.version());
self->managementDb = MultiVersionDatabase::debugCreateFromExistingDatabase(threadSafeHandle);
ASSERT(g_simulator.extraDatabases.size() > 0);
for (auto connectionString : g_simulator.extraDatabases) {
ASSERT(g_simulator->extraDatabases.size() > 0);
for (auto connectionString : g_simulator->extraDatabases) {
ClusterConnectionString ccs(connectionString);
auto extraFile = makeReference<ClusterConnectionMemoryRecord>(ccs);
self->dataDbIndex.push_back(ClusterName(format("cluster_%08d", self->dataDbs.size())));

View File

@ -29,7 +29,7 @@ struct ProtocolVersionWorkload : TestWorkload {
Future<Void> start(Database const& cx) override { return _start(this, cx); }
ACTOR Future<Void> _start(ProtocolVersionWorkload* self, Database cx) {
state std::vector<ISimulator::ProcessInfo*> allProcesses = g_pSimulator->getAllProcesses();
state std::vector<ISimulator::ProcessInfo*> allProcesses = g_simulator->getAllProcesses();
state std::vector<ISimulator::ProcessInfo*>::iterator diffVersionProcess =
find_if(allProcesses.begin(), allProcesses.end(), [](const ISimulator::ProcessInfo* p) {
return p->protocolVersion != currentProtocolVersion();

View File

@ -39,14 +39,14 @@ struct RandomCloggingWorkload : TestWorkload {
}
std::string description() const override {
if (&g_simulator == g_network)
if (g_simulator == g_network)
return "RandomClogging";
else
return "NoRC";
}
Future<Void> setup(Database const& cx) override { return Void(); }
Future<Void> start(Database const& cx) override {
if (&g_simulator == g_network && enabled)
if (g_simulator == g_network && enabled)
return timeout(
reportErrors(swizzleClog ? swizzleClogClient(this) : clogClient(this), "RandomCloggingError"),
testDuration,
@ -59,14 +59,14 @@ struct RandomCloggingWorkload : TestWorkload {
ACTOR void doClog(ISimulator::ProcessInfo* machine, double t, double delay = 0.0) {
wait(::delay(delay));
g_simulator.clogInterface(machine->address.ip, t);
g_simulator->clogInterface(machine->address.ip, t);
}
void clogRandomPair(double t) {
auto m1 = deterministicRandom()->randomChoice(g_simulator.getAllProcesses());
auto m2 = deterministicRandom()->randomChoice(g_simulator.getAllProcesses());
auto m1 = deterministicRandom()->randomChoice(g_simulator->getAllProcesses());
auto m2 = deterministicRandom()->randomChoice(g_simulator->getAllProcesses());
if (m1->address.ip != m2->address.ip)
g_simulator.clogPair(m1->address.ip, m2->address.ip, t);
g_simulator->clogPair(m1->address.ip, m2->address.ip, t);
}
ACTOR Future<Void> clogClient(RandomCloggingWorkload* self) {
@ -74,7 +74,7 @@ struct RandomCloggingWorkload : TestWorkload {
state double workloadEnd = now() + self->testDuration;
loop {
wait(poisson(&lastTime, self->scale / self->clogginess));
auto machine = deterministicRandom()->randomChoice(g_simulator.getAllProcesses());
auto machine = deterministicRandom()->randomChoice(g_simulator->getAllProcesses());
double t = self->scale * 10.0 * exp(-10.0 * deterministicRandom()->random01());
t = std::max(0.0, std::min(t, workloadEnd - now()));
self->doClog(machine, t);
@ -97,9 +97,9 @@ struct RandomCloggingWorkload : TestWorkload {
// then unclog in a different order over the course of t seconds
std::vector<ISimulator::ProcessInfo*> swizzled;
std::vector<double> starts, ends;
for (int m = 0; m < g_simulator.getAllProcesses().size(); m++)
for (int m = 0; m < g_simulator->getAllProcesses().size(); m++)
if (deterministicRandom()->random01() < 0.5) {
swizzled.push_back(g_simulator.getAllProcesses()[m]);
swizzled.push_back(g_simulator->getAllProcesses()[m]);
starts.push_back(deterministicRandom()->random01() * t / 2);
ends.push_back(deterministicRandom()->random01() * t / 2 + t / 2);
}

View File

@ -181,7 +181,7 @@ struct MoveKeysWorkload : TestWorkload {
ACTOR Future<Void> forceMasterFailure(Database cx, MoveKeysWorkload* self) {
ASSERT(g_network->isSimulated());
loop {
if (g_simulator.killZone(self->dbInfo->get().master.locality.zoneId(), ISimulator::Reboot, true))
if (g_simulator->killZone(self->dbInfo->get().master.locality.zoneId(), ISimulator::Reboot, true))
return Void();
wait(delay(1.0));
}

View File

@ -54,7 +54,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
kill2Timeout = getOption(options, LiteralStringRef("kill2Timeout"), 6000.0);
killProcesses = deterministicRandom()->random01() < 0.5;
if (g_network->isSimulated()) {
g_simulator.allowLogSetKills = false;
g_simulator->allowLogSetKills = false;
}
}
@ -80,7 +80,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Zoneid", it->locality.zoneId().get().toString())
.detail("MachineId", it->locality.machineId().get().toString());
if (g_simulator.protectedAddresses.count(it->address) == 0)
if (g_simulator->protectedAddresses.count(it->address) == 0)
processAddrs.push_back(pAddr);
machineProcesses[machineIp].insert(pAddr);
@ -127,13 +127,13 @@ struct RemoveServersSafelyWorkload : TestWorkload {
for (AddressExclusion ex : toKill1) {
AddressExclusion machineIp(ex.ip);
ASSERT(machine_ids.count(machineIp));
g_simulator.disableSwapToMachine(machine_ids[machineIp]);
g_simulator->disableSwapToMachine(machine_ids[machineIp]);
}
for (AddressExclusion ex : toKill2) {
AddressExclusion machineIp(ex.ip);
ASSERT(machine_ids.count(machineIp));
g_simulator.disableSwapToMachine(machine_ids[machineIp]);
g_simulator->disableSwapToMachine(machine_ids[machineIp]);
}
return Void();
@ -178,7 +178,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
}
}
// Get the list of processes matching network address
for (auto processInfo : g_simulator.getAllProcesses()) {
for (auto processInfo : g_simulator->getAllProcesses()) {
auto processNet = AddressExclusion(processInfo->address.ip, processInfo->address.port);
if (processAddrs.find(processNet) != processAddrs.end()) {
processes.push_back(processInfo);
@ -189,7 +189,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Failed", processInfo->failed)
.detail("Excluded", processInfo->excluded)
.detail("Rebooting", processInfo->rebooting)
.detail("Protected", g_simulator.protectedAddresses.count(processInfo->address));
.detail("Protected", g_simulator->protectedAddresses.count(processInfo->address));
} else {
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "ProcessNotToKill")
@ -198,7 +198,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Failed", processInfo->failed)
.detail("Excluded", processInfo->excluded)
.detail("Rebooting", processInfo->rebooting)
.detail("Protected", g_simulator.protectedAddresses.count(processInfo->address));
.detail("Protected", g_simulator->protectedAddresses.count(processInfo->address));
}
}
TraceEvent("RemoveAndKill", functionId)
@ -223,9 +223,9 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("AddrTotal", procAddrs.size())
.detail("ProcTotal", procArray.size())
.detail("Addresses", describe(procAddrs))
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
for (auto& procAddr : procAddrs) {
g_simulator.excludeAddress(NetworkAddress(procAddr.ip, procAddr.port, true, false));
g_simulator->excludeAddress(NetworkAddress(procAddr.ip, procAddr.port, true, false));
}
for (auto& procRecord : procArray) {
procRecord->excluded = true;
@ -235,7 +235,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Process", describe(*procRecord))
.detail("Failed", procRecord->failed)
.detail("Rebooting", procRecord->rebooting)
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
}
return procArray;
}
@ -250,9 +250,9 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("AddrTotal", procAddrs.size())
.detail("ProcTotal", procArray.size())
.detail("Addresses", describe(procAddrs))
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
for (auto& procAddr : procAddrs) {
g_simulator.includeAddress(NetworkAddress(procAddr.ip, procAddr.port, true, false));
g_simulator->includeAddress(NetworkAddress(procAddr.ip, procAddr.port, true, false));
}
for (auto& procRecord : procArray) {
// Only change the exclusion member, if not failed since it will require a reboot to revive it
@ -264,7 +264,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Process", describe(*procRecord))
.detail("Failed", procRecord->failed)
.detail("Rebooting", procRecord->rebooting)
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
}
return procArray;
}
@ -307,7 +307,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
// Check if we can kill the added process
bCanKillProcess =
g_simulator.canKillProcesses(processesLeft, processesDead, ISimulator::KillInstantly, nullptr);
g_simulator->canKillProcesses(processesLeft, processesDead, ISimulator::KillInstantly, nullptr);
// Remove the added processes
processesLeft.resize(processesLeft.size() - killProcArray.size());
@ -350,7 +350,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Step", "exclude list first")
.detail("ToKill", describe(toKill1))
.detail("KillTotal", toKill1.size())
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
// toKill1 may kill too many servers to make cluster unavailable.
// Get the processes in toKill1 that are safe to kill
@ -361,7 +361,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Step", "exclude list first")
.detail("ToKillModified", describe(toKill1))
.detail("KillTotalModified", toKill1.size())
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
self->excludeAddresses(toKill1);
@ -374,7 +374,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("KillTotal", toKill1.size())
.detail("Processes", killProcArray.size())
.detail("ToKill1", describe(toKill1))
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
// Include the servers, if unable to exclude
// Reinclude when buggify is on to increase the surface area of the next set of excludes
@ -385,7 +385,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Step", "include all first")
.detail("KillTotal", toKill1.size())
.detail("ToKill", describe(toKill1))
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
wait(includeServers(cx, std::vector<AddressExclusion>(1)));
wait(includeLocalities(cx, std::vector<std::string>(), failed, true));
wait(includeLocalities(cx, std::vector<std::string>(), !failed, true));
@ -403,7 +403,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Step", "exclude list second")
.detail("KillTotal", toKill2.size())
.detail("ToKill", describe(toKill2))
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
self->excludeAddresses(toKill2);
// The second set of machines is selected so that we can always make progress without it, even after the
@ -413,7 +413,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("ToKill2", describe(toKill2))
.detail("KillTotal", toKill2.size())
.detail("Processes", killProcArray.size())
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
wait(reportErrors(timeoutError(removeAndKill(self, cx, toKill2, bClearedFirst ? &toKill1 : nullptr, true),
self->kill2Timeout),
"RemoveServersSafelyError",
@ -423,14 +423,14 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Step", "excluded second list")
.detail("KillTotal", toKill2.size())
.detail("ToKill", describe(toKill2))
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
// Get the updated list of processes which may have changed due to reboots, deletes, etc
TraceEvent("RemoveAndKill")
.detail("Step", "include all second")
.detail("KillTotal", toKill2.size())
.detail("ToKill", describe(toKill2))
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
wait(includeServers(cx, std::vector<AddressExclusion>(1)));
wait(includeLocalities(cx, std::vector<std::string>(), failed, true));
wait(includeLocalities(cx, std::vector<std::string>(), !failed, true));
@ -454,31 +454,31 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Step", removeViaClear ? "ClearProcesses" : "IgnoreProcesses")
.detail("Addresses", describe(killAddrs))
.detail("Processes", killProcArray.size())
.detail("ClusterAvailable", g_simulator.isAvailable())
.detail("ClusterAvailable", g_simulator->isAvailable())
.detail("RemoveViaClear", removeViaClear);
for (auto& killProcess : killProcArray) {
if (g_simulator.protectedAddresses.count(killProcess->address))
if (g_simulator->protectedAddresses.count(killProcess->address))
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "NoKill Process")
.detail("Process", describe(*killProcess))
.detail("Failed", killProcess->failed)
.detail("Rebooting", killProcess->rebooting)
.detail("ClusterAvailable", g_simulator.isAvailable())
.detail("Protected", g_simulator.protectedAddresses.count(killProcess->address));
.detail("ClusterAvailable", g_simulator->isAvailable())
.detail("Protected", g_simulator->protectedAddresses.count(killProcess->address));
else if (removeViaClear) {
g_simulator.rebootProcess(killProcess, ISimulator::RebootProcessAndDelete);
g_simulator->rebootProcess(killProcess, ISimulator::RebootProcessAndDelete);
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "Clear Process")
.detail("Process", describe(*killProcess))
.detail("Failed", killProcess->failed)
.detail("Rebooting", killProcess->rebooting)
.detail("ClusterAvailable", g_simulator.isAvailable())
.detail("Protected", g_simulator.protectedAddresses.count(killProcess->address));
.detail("ClusterAvailable", g_simulator->isAvailable())
.detail("Protected", g_simulator->protectedAddresses.count(killProcess->address));
}
/*
else {
g_simulator.killProcess( killProcess, ISimulator::KillInstantly );
TraceEvent("RemoveAndKill", functionId).detail("Step", "Kill Process").detail("Process", describe(*killProcess)).detail("Failed", killProcess->failed).detail("Rebooting", killProcess->rebooting).detail("ClusterAvailable", g_simulator.isAvailable()).detail("Protected", g_simulator.protectedAddresses.count(killProcess->address));
g_simulator->killProcess( killProcess, ISimulator::KillInstantly );
TraceEvent("RemoveAndKill", functionId).detail("Step", "Kill Process").detail("Process", describe(*killProcess)).detail("Failed", killProcess->failed).detail("Rebooting", killProcess->rebooting).detail("ClusterAvailable", g_simulator->isAvailable()).detail("Protected", g_simulator->protectedAddresses.count(killProcess->address));
}
*/
}
@ -493,15 +493,15 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Addresses", describe(killAddrs))
.detail("Processes", killProcArray.size())
.detail("Zones", zoneIds.size())
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
for (auto& zoneId : zoneIds) {
killedMachine = g_simulator.killZone(
killedMachine = g_simulator->killZone(
zoneId, removeViaClear ? ISimulator::RebootAndDelete : ISimulator::KillInstantly);
TraceEvent(killedMachine ? SevInfo : SevWarn, "RemoveAndKill")
.detail("Step", removeViaClear ? "Clear Machine" : "Kill Machine")
.detail("ZoneId", zoneId)
.detail(removeViaClear ? "Cleared" : "Killed", killedMachine)
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
}
}
@ -520,7 +520,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
// First clear the exclusion list and exclude the given list
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "Including all")
.detail("ClusterAvailable", g_simulator.isAvailable())
.detail("ClusterAvailable", g_simulator->isAvailable())
.detail("MarkExcludeAsFailed", markExcludeAsFailed);
state bool failed = true;
wait(includeServers(cx, std::vector<AddressExclusion>(1)));
@ -528,7 +528,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
wait(includeLocalities(cx, std::vector<std::string>(), !failed, true));
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "Included all")
.detail("ClusterAvailable", g_simulator.isAvailable())
.detail("ClusterAvailable", g_simulator->isAvailable())
.detail("MarkExcludeAsFailed", markExcludeAsFailed);
// Reinclude the addresses that were excluded, if present
if (pIncAddrs) {
@ -629,7 +629,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("ToKill", describe(toKill))
.detail("Addresses", describe(toKillArray))
.detail("FailedAddresses", describe(toKillMarkFailedArray))
.detail("ClusterAvailable", g_simulator.isAvailable())
.detail("ClusterAvailable", g_simulator->isAvailable())
.detail("MarkExcludeAsFailed", markExcludeAsFailed);
state bool excludeLocalitiesInsteadOfServers = deterministicRandom()->coinflip();
@ -682,14 +682,14 @@ struct RemoveServersSafelyWorkload : TestWorkload {
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "Wait For Server Exclusion")
.detail("Addresses", describe(toKill))
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
wait(success(checkForExcludingServers(cx, toKillArray, true /* wait for exclusion */)));
}
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "coordinators auto")
.detail("DesiredCoordinators", g_simulator.desiredCoordinators)
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("DesiredCoordinators", g_simulator->desiredCoordinators)
.detail("ClusterAvailable", g_simulator->isAvailable());
// Setup the coordinators BEFORE the exclusion
// Otherwise, we may end up with NotEnoughMachinesForCoordinators
@ -697,7 +697,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
state int nQuorum;
while (true) {
cycle++;
nQuorum = ((g_simulator.desiredCoordinators + 1) / 2) * 2 - 1;
nQuorum = ((g_simulator->desiredCoordinators + 1) / 2) * 2 - 1;
CoordinatorsResult result = wait(changeQuorum(cx, autoQuorumChange(nQuorum)));
TraceEvent(result == CoordinatorsResult::SUCCESS || result == CoordinatorsResult::SAME_NETWORK_ADDRESSES
? SevInfo
@ -707,7 +707,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
.detail("Result", (int)result)
.detail("Attempt", cycle)
.detail("Quorum", nQuorum)
.detail("DesiredCoordinators", g_simulator.desiredCoordinators);
.detail("DesiredCoordinators", g_simulator->desiredCoordinators);
if (result == CoordinatorsResult::SUCCESS || result == CoordinatorsResult::SAME_NETWORK_ADDRESSES)
break;
}
@ -716,19 +716,19 @@ struct RemoveServersSafelyWorkload : TestWorkload {
} else {
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "nothing to clear")
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
}
TraceEvent("RemoveAndKill", functionId)
.detail("Step", "done")
.detail("ClusterAvailable", g_simulator.isAvailable());
.detail("ClusterAvailable", g_simulator->isAvailable());
return Void();
}
static std::vector<ISimulator::ProcessInfo*> getServers() {
std::vector<ISimulator::ProcessInfo*> machines;
std::vector<ISimulator::ProcessInfo*> all = g_simulator.getAllProcesses();
std::vector<ISimulator::ProcessInfo*> all = g_simulator->getAllProcesses();
for (int i = 0; i < all.size(); i++) {
if (all[i]->name == std::string("Server") && all[i]->isAvailableClass()) {
machines.push_back(all[i]);
@ -793,7 +793,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
addressToIndexMap[workers[i].address] = i;
}
std::vector<ISimulator::ProcessInfo*> processes = g_simulator.getAllProcesses();
std::vector<ISimulator::ProcessInfo*> processes = g_simulator->getAllProcesses();
for (auto process : processes) {
if (addressToIndexMap.find(process->address) != addressToIndexMap.end()) {
if (workers[addressToIndexMap[process->address]].locality.processId().present()) {

View File

@ -47,7 +47,7 @@ struct RollbackWorkload : TestWorkload {
std::string description() const override { return "RollbackWorkload"; }
Future<Void> setup(Database const& cx) override { return Void(); }
Future<Void> start(Database const& cx) override {
if (&g_simulator == g_network && enabled)
if (g_simulator == g_network && enabled)
return timeout(reportErrors(rollbackFailureWorker(cx, this, meanDelay), "RollbackFailureWorkerError"),
testDuration,
Void());
@ -83,8 +83,8 @@ struct RollbackWorkload : TestWorkload {
for (int t = 0; t < tlogs.size(); t++) {
if (t != utIndex) {
g_simulator.clogPair(proxy.address().ip, tlogs[t].address().ip, self->clogDuration);
// g_simulator.clogInterface( g_simulator.getProcess( system.tlogs[t].commit.getEndpoint() ),
g_simulator->clogPair(proxy.address().ip, tlogs[t].address().ip, self->clogDuration);
// g_simulator->clogInterface( g_simulator->getProcess( system.tlogs[t].commit.getEndpoint() ),
// self->clogDuration, ClogAll );
}
}
@ -95,11 +95,11 @@ struct RollbackWorkload : TestWorkload {
// Kill the proxy and clog the unclogged tlog
if (self->enableFailures) {
g_simulator.killProcess(g_simulator.getProcessByAddress(proxy.address()), ISimulator::KillInstantly);
g_simulator.clogInterface(uncloggedTLog.ip, self->clogDuration, ClogAll);
g_simulator->killProcess(g_simulator->getProcessByAddress(proxy.address()), ISimulator::KillInstantly);
g_simulator->clogInterface(uncloggedTLog.ip, self->clogDuration, ClogAll);
} else {
g_simulator.clogInterface(proxy.address().ip, self->clogDuration, ClogAll);
g_simulator.clogInterface(uncloggedTLog.ip, self->clogDuration, ClogAll);
g_simulator->clogInterface(proxy.address().ip, self->clogDuration, ClogAll);
g_simulator->clogInterface(uncloggedTLog.ip, self->clogDuration, ClogAll);
}
return Void();
}

View File

@ -46,7 +46,7 @@ struct SaveAndKillWorkload : TestWorkload {
std::string description() const override { return "SaveAndKillWorkload"; }
Future<Void> setup(Database const& cx) override {
g_simulator.disableSwapsToAll();
g_simulator->disableSwapsToAll();
return Void();
}
Future<Void> start(Database const& cx) override { return _start(this); }
@ -60,12 +60,12 @@ struct SaveAndKillWorkload : TestWorkload {
ini.LoadFile(self->restartInfo.c_str());
ini.SetValue("RESTORE", "isRestoring", format("%d", self->isRestoring).c_str());
ini.SetValue("META", "processesPerMachine", format("%d", g_simulator.processesPerMachine).c_str());
ini.SetValue("META", "listenersPerProcess", format("%d", g_simulator.listenersPerProcess).c_str());
ini.SetValue("META", "desiredCoordinators", format("%d", g_simulator.desiredCoordinators).c_str());
ini.SetValue("META", "connectionString", g_simulator.connectionString.c_str());
ini.SetValue("META", "testerCount", format("%d", g_simulator.testerCount).c_str());
ini.SetValue("META", "tssMode", format("%d", g_simulator.tssMode).c_str());
ini.SetValue("META", "processesPerMachine", format("%d", g_simulator->processesPerMachine).c_str());
ini.SetValue("META", "listenersPerProcess", format("%d", g_simulator->listenersPerProcess).c_str());
ini.SetValue("META", "desiredCoordinators", format("%d", g_simulator->desiredCoordinators).c_str());
ini.SetValue("META", "connectionString", g_simulator->connectionString.c_str());
ini.SetValue("META", "testerCount", format("%d", g_simulator->testerCount).c_str());
ini.SetValue("META", "tssMode", format("%d", g_simulator->tssMode).c_str());
ini.SetValue("META", "mockDNS", INetworkConnections::net()->convertMockDNSToString().c_str());
ini.SetBoolValue("META", "enableEncryption", SERVER_KNOBS->ENABLE_ENCRYPTION);
@ -73,8 +73,9 @@ struct SaveAndKillWorkload : TestWorkload {
ini.SetBoolValue("META", "enableStorageServerEncryption", SERVER_KNOBS->ENABLE_STORAGE_SERVER_ENCRYPTION);
ini.SetBoolValue("META", "enableBlobGranuleEncryption", SERVER_KNOBS->ENABLE_BLOB_GRANULE_ENCRYPTION);
std::vector<ISimulator::ProcessInfo*> processes = g_simulator.getAllProcesses();
std::map<NetworkAddress, ISimulator::ProcessInfo*> rebootingProcesses = g_simulator.currentlyRebootingProcesses;
std::vector<ISimulator::ProcessInfo*> processes = g_simulator->getAllProcesses();
std::map<NetworkAddress, ISimulator::ProcessInfo*> rebootingProcesses =
g_simulator->currentlyRebootingProcesses;
std::map<std::string, ISimulator::ProcessInfo*> allProcessesMap;
for (const auto& [_, process] : rebootingProcesses) {
if (allProcessesMap.find(process->dataFolder) == allProcessesMap.end() && !process->isSpawnedKVProcess()) {
@ -139,14 +140,14 @@ struct SaveAndKillWorkload : TestWorkload {
ini.SaveFile(self->restartInfo.c_str());
for (auto process = allProcessesMap.begin(); process != allProcessesMap.end(); process++) {
g_simulator.killProcess(process->second, ISimulator::Reboot);
g_simulator->killProcess(process->second, ISimulator::Reboot);
}
for (i = 0; i < 100; i++) {
wait(delay(0.0));
}
g_simulator.stop();
g_simulator->stop();
return Void();
}

View File

@ -61,8 +61,8 @@ struct TargetedKillWorkload : TestWorkload {
NetworkAddress address,
Database cx,
TargetedKillWorkload* self) {
if (&g_simulator == g_network) {
g_simulator.killInterface(address, ISimulator::KillInstantly);
if (g_simulator == g_network) {
g_simulator->killInterface(address, ISimulator::KillInstantly);
return Void();
}

View File

@ -92,7 +92,7 @@ struct TenantManagementConcurrencyWorkload : TestWorkload {
return _setup(cx, this);
}
ACTOR static Future<Void> _setup(Database cx, TenantManagementConcurrencyWorkload* self) {
state ClusterConnectionString connectionString(g_simulator.extraDatabases[0]);
state ClusterConnectionString connectionString(g_simulator->extraDatabases[0]);
Reference<IDatabase> threadSafeHandle =
wait(unsafeThreadFutureToFuture(ThreadSafeDatabase::createFromExistingDatabase(cx)));
@ -141,7 +141,7 @@ struct TenantManagementConcurrencyWorkload : TestWorkload {
}
if (self->useMetacluster) {
ASSERT(g_simulator.extraDatabases.size() == 1);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile = makeReference<ClusterConnectionMemoryRecord>(connectionString);
self->dataDb = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
} else {
@ -342,4 +342,4 @@ struct TenantManagementConcurrencyWorkload : TestWorkload {
};
WorkloadFactory<TenantManagementConcurrencyWorkload> TenantManagementConcurrencyWorkloadFactory(
"TenantManagementConcurrency");
"TenantManagementConcurrency");

View File

@ -127,7 +127,7 @@ struct TenantManagementWorkload : TestWorkload {
localTenantGroupNamePrefix = format("%stenantgroup_%d_", tenantNamePrefix.toString().c_str(), clientId);
bool defaultUseMetacluster = false;
if (clientId == 0 && g_network->isSimulated() && !g_simulator.extraDatabases.empty()) {
if (clientId == 0 && g_network->isSimulated() && !g_simulator->extraDatabases.empty()) {
defaultUseMetacluster = deterministicRandom()->coinflip();
}
@ -181,7 +181,7 @@ struct TenantManagementWorkload : TestWorkload {
DataClusterEntry entry;
entry.capacity.numTenantGroups = 1e9;
wait(MetaclusterAPI::registerCluster(
self->mvDb, self->dataClusterName, g_simulator.extraDatabases[0], entry));
self->mvDb, self->dataClusterName, g_simulator->extraDatabases[0], entry));
}
state Transaction tr(cx);
@ -218,8 +218,8 @@ struct TenantManagementWorkload : TestWorkload {
}
if (self->useMetacluster) {
ASSERT(g_simulator.extraDatabases.size() == 1);
auto extraFile = makeReference<ClusterConnectionMemoryRecord>(g_simulator.extraDatabases[0]);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile = makeReference<ClusterConnectionMemoryRecord>(g_simulator->extraDatabases[0]);
self->dataDb = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
} else {
self->dataDb = cx;

View File

@ -156,9 +156,9 @@ struct VersionStampWorkload : TestWorkload {
ACTOR Future<bool> _check(Database cx, VersionStampWorkload* self) {
if (self->validateExtraDB) {
ASSERT(g_simulator.extraDatabases.size() == 1);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
cx = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
}
state ReadYourWritesTransaction tr(cx);
@ -315,10 +315,10 @@ struct VersionStampWorkload : TestWorkload {
state double lastTime = now();
state Database extraDB;
if (!g_simulator.extraDatabases.empty()) {
ASSERT(g_simulator.extraDatabases.size() == 1);
if (!g_simulator->extraDatabases.empty()) {
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
extraDB = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
}
@ -385,7 +385,7 @@ struct VersionStampWorkload : TestWorkload {
} catch (Error& e) {
err = e;
if (err.code() == error_code_database_locked && !g_simulator.extraDatabases.empty()) {
if (err.code() == error_code_database_locked && !g_simulator->extraDatabases.empty()) {
//TraceEvent("VST_CommitDatabaseLocked");
cx_is_primary = !cx_is_primary;
tr = ReadYourWritesTransaction(cx_is_primary ? cx : extraDB);

View File

@ -89,11 +89,11 @@ struct WriteDuringReadWorkload : TestWorkload {
CODE_PROBE(adjacentKeys && (nodes + minNode) > CLIENT_KNOBS->KEY_SIZE_LIMIT,
"WriteDuringReadWorkload testing large keys");
useExtraDB = !g_simulator.extraDatabases.empty();
useExtraDB = !g_simulator->extraDatabases.empty();
if (useExtraDB) {
ASSERT(g_simulator.extraDatabases.size() == 1);
ASSERT(g_simulator->extraDatabases.size() == 1);
auto extraFile =
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator.extraDatabases[0]));
makeReference<ClusterConnectionMemoryRecord>(ClusterConnectionString(g_simulator->extraDatabases[0]));
extraDB = Database::createDatabase(extraFile, ApiVersion::LATEST_VERSION);
useSystemKeys = false;
}
@ -683,7 +683,7 @@ struct WriteDuringReadWorkload : TestWorkload {
loop {
wait(delay(now() - startTime > self->slowModeStart ||
(g_network->isSimulated() && g_simulator.speedUpSimulation)
(g_network->isSimulated() && g_simulator->speedUpSimulation)
? 1.0
: 0.1));
try {

View File

@ -600,7 +600,7 @@ public:
NetworkAddress getAddressIndex() {
// ahm
// if( g_network->isSimulated() )
// return g_simulator.getCurrentProcess()->address;
// return g_simulator->getCurrentProcess()->address;
// else
return g_network->getLocalAddress();
}