Apply clang format

This commit is contained in:
Meng Xu 2020-03-25 11:20:17 -07:00
parent 120272f025
commit 1ba11dc74b
4 changed files with 22 additions and 14 deletions

View File

@ -279,7 +279,8 @@ public:
Future<Void> parallelRestoreFinish(Database cx, UID randomUID); Future<Void> parallelRestoreFinish(Database cx, UID randomUID);
Future<Void> submitParallelRestore(Database cx, Key backupTag, Standalone<VectorRef<KeyRangeRef>> backupRanges, Future<Void> submitParallelRestore(Database cx, Key backupTag, Standalone<VectorRef<KeyRangeRef>> backupRanges,
KeyRef bcUrl, Version targetVersion, bool lockDB, UID randomUID); KeyRef bcUrl, Version targetVersion, bool lockDB, UID randomUID);
Future<Void> atomicParallelRestore(Database cx, Key tagName, Standalone<VectorRef<KeyRangeRef>> ranges, Key addPrefix, Key removePrefix); Future<Void> atomicParallelRestore(Database cx, Key tagName, Standalone<VectorRef<KeyRangeRef>> ranges,
Key addPrefix, Key removePrefix);
// restore() will // restore() will
// - make sure that url is readable and appears to be a complete backup // - make sure that url is readable and appears to be a complete backup

View File

@ -3637,8 +3637,9 @@ public:
auto range = backupRanges[restoreIndex]; auto range = backupRanges[restoreIndex];
Standalone<StringRef> restoreTag(backupTag.toString() + "_" + std::to_string(restoreIndex)); Standalone<StringRef> restoreTag(backupTag.toString() + "_" + std::to_string(restoreIndex));
// Register the request request in DB, which will be picked up by restore worker leader // Register the request request in DB, which will be picked up by restore worker leader
struct RestoreRequest restoreRequest(restoreIndex, restoreTag, bcUrl, true, targetVersion, true, range, struct RestoreRequest restoreRequest(restoreIndex, restoreTag, bcUrl, true, targetVersion, true,
Key(), Key(), lockDB, deterministicRandom()->randomUniqueID()); range, Key(), Key(), lockDB,
deterministicRandom()->randomUniqueID());
tr->set(restoreRequestKeyFor(restoreRequest.index), restoreRequestValue(restoreRequest)); tr->set(restoreRequestKeyFor(restoreRequest.index), restoreRequestValue(restoreRequest));
} }
tr->set(restoreRequestTriggerKey, tr->set(restoreRequestTriggerKey,
@ -4532,7 +4533,9 @@ public:
// Similar to atomicRestore, only used in simulation test. // Similar to atomicRestore, only used in simulation test.
// locks the database before discontinuing the backup and that same lock is then used while doing the restore. // locks the database before discontinuing the backup and that same lock is then used while doing the restore.
// the tagname of the backup must be the same as the restore. // the tagname of the backup must be the same as the restore.
ACTOR static Future<Void> atomicParallelRestore(FileBackupAgent* backupAgent, Database cx, Key tagName, Standalone<VectorRef<KeyRangeRef>> ranges, Key addPrefix, Key removePrefix) { ACTOR static Future<Void> atomicParallelRestore(FileBackupAgent* backupAgent, Database cx, Key tagName,
Standalone<VectorRef<KeyRangeRef>> ranges, Key addPrefix,
Key removePrefix) {
Version ver = wait(atomicRestore(backupAgent, cx, tagName, ranges, addPrefix, removePrefix, true)); Version ver = wait(atomicRestore(backupAgent, cx, tagName, ranges, addPrefix, removePrefix, true));
return Void(); return Void();
} }
@ -4550,10 +4553,12 @@ Future<Void> FileBackupAgent::parallelRestoreFinish(Database cx, UID randomUID)
Future<Void> FileBackupAgent::submitParallelRestore(Database cx, Key backupTag, Future<Void> FileBackupAgent::submitParallelRestore(Database cx, Key backupTag,
Standalone<VectorRef<KeyRangeRef>> backupRanges, KeyRef bcUrl, Standalone<VectorRef<KeyRangeRef>> backupRanges, KeyRef bcUrl,
Version targetVersion, bool lockDB, UID randomUID) { Version targetVersion, bool lockDB, UID randomUID) {
return FileBackupAgentImpl::submitParallelRestore(cx, backupTag, backupRanges, bcUrl, targetVersion, lockDB, randomUID); return FileBackupAgentImpl::submitParallelRestore(cx, backupTag, backupRanges, bcUrl, targetVersion, lockDB,
randomUID);
} }
Future<Void> FileBackupAgent::atomicParallelRestore(Database cx, Key tagName, Standalone<VectorRef<KeyRangeRef>> ranges, Key addPrefix, Key removePrefix) { Future<Void> FileBackupAgent::atomicParallelRestore(Database cx, Key tagName, Standalone<VectorRef<KeyRangeRef>> ranges,
Key addPrefix, Key removePrefix) {
return FileBackupAgentImpl::atomicParallelRestore(this, cx, tagName, ranges, addPrefix, removePrefix); return FileBackupAgentImpl::atomicParallelRestore(this, cx, tagName, ranges, addPrefix, removePrefix);
} }

View File

@ -83,7 +83,8 @@ struct AtomicRestoreWorkload : TestWorkload {
if (self->fastRestore) { // New fast parallel restore if (self->fastRestore) { // New fast parallel restore
TraceEvent(SevWarnAlways, "AtomicParallelRestore"); TraceEvent(SevWarnAlways, "AtomicParallelRestore");
wait(backupAgent.atomicParallelRestore(cx, BackupAgentBase::getDefaultTag(), self->backupRanges, StringRef(), StringRef())); wait(backupAgent.atomicParallelRestore(cx, BackupAgentBase::getDefaultTag(), self->backupRanges,
StringRef(), StringRef()));
} else { // Old style restore } else { // Old style restore
loop { loop {
std::vector<Future<Version>> restores; std::vector<Future<Version>> restores;

View File

@ -450,7 +450,8 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
// Submit parallel restore requests // Submit parallel restore requests
TraceEvent("FastRestore").detail("PrepareRestores", self->backupRanges.size()); TraceEvent("FastRestore").detail("PrepareRestores", self->backupRanges.size());
wait(backupAgent.submitParallelRestore(cx, self->backupTag, self->backupRanges, wait(backupAgent.submitParallelRestore(cx, self->backupTag, self->backupRanges,
KeyRef(lastBackupContainer->getURL()), targetVersion, self->locked, randomID)); KeyRef(lastBackupContainer->getURL()), targetVersion,
self->locked, randomID));
TraceEvent("FastRestore").detail("TriggerRestore", "Setting up restoreRequestTriggerKey"); TraceEvent("FastRestore").detail("TriggerRestore", "Setting up restoreRequestTriggerKey");
// Sometimes kill and restart the restore // Sometimes kill and restart the restore