Add partitioned logs option to AtomicRestore workload

This commit is contained in:
Jingyu Zhou 2020-03-26 11:08:57 -07:00
parent aca458cd96
commit 6be913a430
2 changed files with 8 additions and 5 deletions

View File

@ -29,6 +29,7 @@ struct AtomicRestoreWorkload : TestWorkload {
double startAfter, restoreAfter; double startAfter, restoreAfter;
bool fastRestore; // true: use fast restore, false: use old style restore bool fastRestore; // true: use fast restore, false: use old style restore
Standalone<VectorRef<KeyRangeRef>> backupRanges; Standalone<VectorRef<KeyRangeRef>> backupRanges;
bool usePartitionedLogs;
AtomicRestoreWorkload(WorkloadContext const& wcx) AtomicRestoreWorkload(WorkloadContext const& wcx)
: TestWorkload(wcx) { : TestWorkload(wcx) {
@ -37,6 +38,8 @@ struct AtomicRestoreWorkload : TestWorkload {
restoreAfter = getOption(options, LiteralStringRef("restoreAfter"), 20.0); restoreAfter = getOption(options, LiteralStringRef("restoreAfter"), 20.0);
fastRestore = getOption(options, LiteralStringRef("fastRestore"), false); fastRestore = getOption(options, LiteralStringRef("fastRestore"), false);
backupRanges.push_back_deep(backupRanges.arena(), normalKeys); backupRanges.push_back_deep(backupRanges.arena(), normalKeys);
usePartitionedLogs = getOption(options, LiteralStringRef("usePartitionedLogs"),
deterministicRandom()->random01() < 0.5 ? true : false);
} }
virtual std::string description() { virtual std::string description() {
@ -68,9 +71,10 @@ struct AtomicRestoreWorkload : TestWorkload {
state std::string backupContainer = "file://simfdb/backups/"; state std::string backupContainer = "file://simfdb/backups/";
try { try {
wait(backupAgent.submitBackup(cx, StringRef(backupContainer), deterministicRandom()->randomInt(0, 100), BackupAgentBase::getDefaultTagName(), self->backupRanges, false)); wait(backupAgent.submitBackup(cx, StringRef(backupContainer), deterministicRandom()->randomInt(0, 100),
} BackupAgentBase::getDefaultTagName(), self->backupRanges, false,
catch (Error& e) { self->usePartitionedLogs));
} catch (Error& e) {
if (e.code() != error_code_backup_unneeded && e.code() != error_code_backup_duplicate) if (e.code() != error_code_backup_unneeded && e.code() != error_code_backup_duplicate)
throw; throw;
} }

View File

@ -21,7 +21,6 @@
#include "fdbrpc/simulator.h" #include "fdbrpc/simulator.h"
#include "fdbclient/BackupAgent.actor.h" #include "fdbclient/BackupAgent.actor.h"
#include "fdbclient/BackupContainer.h" #include "fdbclient/BackupContainer.h"
#include "fdbserver/Knobs.h"
#include "fdbserver/workloads/workloads.actor.h" #include "fdbserver/workloads/workloads.actor.h"
#include "fdbserver/workloads/BulkSetup.actor.h" #include "fdbserver/workloads/BulkSetup.actor.h"
#include "fdbclient/RestoreWorkerInterface.actor.h" #include "fdbclient/RestoreWorkerInterface.actor.h"
@ -185,7 +184,7 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
try { try {
wait(backupAgent->submitBackup(cx, StringRef(backupContainer), deterministicRandom()->randomInt(0, 100), wait(backupAgent->submitBackup(cx, StringRef(backupContainer), deterministicRandom()->randomInt(0, 100),
tag.toString(), backupRanges, stopDifferentialDelay ? false : true, tag.toString(), backupRanges, stopDifferentialDelay ? false : true,
/*partitionedLog=*/self->usePartitionedLogs)); self->usePartitionedLogs));
} catch (Error& e) { } catch (Error& e) {
TraceEvent("BARW_DoBackupSubmitBackupException", randomID).error(e).detail("Tag", printable(tag)); TraceEvent("BARW_DoBackupSubmitBackupException", randomID).error(e).detail("Tag", printable(tag));
if (e.code() != error_code_backup_unneeded && e.code() != error_code_backup_duplicate) throw; if (e.code() != error_code_backup_unneeded && e.code() != error_code_backup_duplicate) throw;