2017-05-26 04:48:44 +08:00
/*
* FileBackupAgent . actor . cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013 - 2018 Apple Inc . and the FoundationDB project authors
*
* Licensed under the Apache License , Version 2.0 ( the " License " ) ;
* you may not use this file except in compliance with the License .
* You may obtain a copy of the License at
*
* http : //www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an " AS IS " BASIS ,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* See the License for the specific language governing permissions and
* limitations under the License .
*/
# include "BackupAgent.h"
# include "BackupContainer.h"
# include "DatabaseContext.h"
# include "ManagementAPI.h"
# include "Status.h"
# include "KeyBackedTypes.h"
# include <ctime>
# include <climits>
# include "fdbrpc/IAsyncFile.h"
# include "flow/genericactors.actor.h"
# include "flow/Hash3.h"
# include <numeric>
# include <boost/algorithm/string/split.hpp>
# include <boost/algorithm/string/classification.hpp>
# include <algorithm>
2017-10-28 05:06:15 +08:00
const Key FileBackupAgent : : keyLastRestorable = LiteralStringRef ( " last_restorable " ) ;
2017-05-26 04:48:44 +08:00
// For convenience
typedef FileBackupAgent : : ERestoreState ERestoreState ;
StringRef FileBackupAgent : : restoreStateText ( ERestoreState id ) {
switch ( id ) {
case ERestoreState : : UNITIALIZED : return LiteralStringRef ( " unitialized " ) ;
case ERestoreState : : QUEUED : return LiteralStringRef ( " queued " ) ;
case ERestoreState : : STARTING : return LiteralStringRef ( " starting " ) ;
case ERestoreState : : RUNNING : return LiteralStringRef ( " running " ) ;
case ERestoreState : : COMPLETED : return LiteralStringRef ( " completed " ) ;
case ERestoreState : : ABORTED : return LiteralStringRef ( " aborted " ) ;
default : return LiteralStringRef ( " Unknown " ) ;
}
}
template < > Tuple Codec < ERestoreState > : : pack ( ERestoreState const & val ) { return Tuple ( ) . append ( val ) ; }
template < > ERestoreState Codec < ERestoreState > : : unpack ( Tuple const & val ) { return ( ERestoreState ) val . getInt ( 0 ) ; }
2017-09-07 00:46:27 +08:00
ACTOR Future < std : : vector < KeyBackedTag > > TagUidMap : : getAll_impl ( TagUidMap * tagsMap , Reference < ReadYourWritesTransaction > tr ) {
2017-11-15 05:51:23 +08:00
state Key prefix = tagsMap - > prefix ; // Copying it here as tagsMap lifetime is not tied to this actor
2017-09-07 00:46:27 +08:00
TagMap : : PairsType tagPairs = wait ( tagsMap - > getRange ( tr , std : : string ( ) , { } , 1e6 ) ) ;
std : : vector < KeyBackedTag > results ;
for ( auto & p : tagPairs )
2017-11-15 05:51:23 +08:00
results . push_back ( KeyBackedTag ( p . first , prefix ) ) ;
2017-09-07 00:46:27 +08:00
return results ;
}
2017-05-26 04:48:44 +08:00
2017-09-06 02:38:40 +08:00
KeyBackedTag : : KeyBackedTag ( std : : string tagName , StringRef tagMapPrefix )
2017-08-29 02:28:19 +08:00
: KeyBackedProperty < UidAndAbortedFlagT > ( TagUidMap ( tagMapPrefix ) . getProperty ( tagName ) ) , tagName ( tagName ) , tagMapPrefix ( tagMapPrefix ) { }
2017-08-29 02:59:04 +08:00
class RestoreConfig : public KeyBackedConfig {
public :
RestoreConfig ( UID uid = UID ( ) ) : KeyBackedConfig ( fileRestorePrefixRange . begin , uid ) { }
RestoreConfig ( Reference < Task > task ) : KeyBackedConfig ( fileRestorePrefixRange . begin , task ) { }
2017-05-26 04:48:44 +08:00
KeyBackedProperty < ERestoreState > stateEnum ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
Future < StringRef > stateText ( Reference < ReadYourWritesTransaction > tr ) {
return map ( stateEnum ( ) . getD ( tr ) , [ ] ( ERestoreState s ) - > StringRef { return FileBackupAgent : : restoreStateText ( s ) ; } ) ;
}
KeyBackedProperty < Key > addPrefix ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
KeyBackedProperty < Key > removePrefix ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
KeyBackedProperty < KeyRange > restoreRange ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
KeyBackedProperty < Key > batchFuture ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
KeyBackedProperty < Version > restoreVersion ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
2017-11-15 15:33:17 +08:00
KeyBackedProperty < Reference < IBackupContainer > > sourceContainer ( ) {
2017-05-26 04:48:44 +08:00
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
2017-11-15 15:33:17 +08:00
// Get the source container as a bare URL, without creating a container instance
KeyBackedProperty < Value > sourceContainerURL ( ) {
return configSpace . pack ( LiteralStringRef ( " sourceContainer " ) ) ;
}
2017-05-26 04:48:44 +08:00
// Total bytes written by all log and range restore tasks.
KeyBackedBinaryValue < int64_t > bytesWritten ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
// File blocks that have had tasks created for them by the Dispatch task
KeyBackedBinaryValue < int64_t > filesBlocksDispatched ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
// File blocks whose tasks have finished
KeyBackedBinaryValue < int64_t > fileBlocksFinished ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
// Total number of files in the fileMap
KeyBackedBinaryValue < int64_t > fileCount ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
// Total number of file blocks in the fileMap
KeyBackedBinaryValue < int64_t > fileBlockCount ( ) {
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
2017-11-15 15:33:17 +08:00
// Describes a file to load blocks from during restore. Ordered by version and then fileName to enable
// incrementally advancing through the map, saving the version and path of the next starting point.
struct RestoreFile {
Version version ;
std : : string fileName ;
bool isRange ; // false for log file
int64_t blockSize ;
int64_t fileSize ;
Version endVersion ; // not meaningful for range files
Tuple pack ( ) const {
return Tuple ( )
. append ( version )
. append ( StringRef ( fileName ) )
. append ( isRange )
. append ( fileSize )
. append ( blockSize )
. append ( endVersion ) ;
}
static RestoreFile unpack ( Tuple const & t ) {
RestoreFile r ;
int i = 0 ;
r . version = t . getInt ( i + + ) ;
r . fileName = t . getString ( i + + ) . toString ( ) ;
r . isRange = t . getInt ( i + + ) ! = 0 ;
r . fileSize = t . getInt ( i + + ) ;
r . blockSize = t . getInt ( i + + ) ;
r . endVersion = t . getInt ( i + + ) ;
return r ;
}
} ;
typedef KeyBackedSet < RestoreFile > FileSetT ;
FileSetT fileSet ( ) {
2017-05-26 04:48:44 +08:00
return configSpace . pack ( LiteralStringRef ( __FUNCTION__ ) ) ;
}
Future < bool > isRunnable ( Reference < ReadYourWritesTransaction > tr ) {
return map ( stateEnum ( ) . getD ( tr ) , [ ] ( ERestoreState s ) - > bool { return s ! = ERestoreState : : ABORTED
& & s ! = ERestoreState : : COMPLETED
& & s ! = ERestoreState : : UNITIALIZED ;
} ) ;
}
2017-11-16 05:33:09 +08:00
Future < Void > logError ( Database cx , Error e , std : : string const & details , void * taskInstance = nullptr ) {
2017-05-26 04:48:44 +08:00
if ( ! uid . isValid ( ) ) {
TraceEvent ( SevError , " FileRestoreErrorNoUID " ) . error ( e ) . detail ( " Description " , details ) ;
return Void ( ) ;
}
2017-11-19 20:34:28 +08:00
TraceEvent t ( SevWarn , " FileRestoreError " ) ;
t . error ( e ) . detail ( " RestoreUID " , uid ) . detail ( " Description " , details ) . detail ( " TaskInstance " , ( uint64_t ) taskInstance ) ;
// These should not happen
if ( e . code ( ) = = error_code_key_not_found )
t . backtrace ( ) ;
2017-10-13 02:04:11 +08:00
std : : string msg = format ( " ERROR: %s (%s) " , details . c_str ( ) , e . what ( ) ) ;
2017-09-09 07:09:18 +08:00
return lastError ( ) . set ( cx , { msg , ( int64_t ) now ( ) } ) ;
2017-05-26 04:48:44 +08:00
}
Key mutationLogPrefix ( ) {
return uidPrefixKey ( applyLogKeys . begin , uid ) ;
}
Key applyMutationsMapPrefix ( ) {
return uidPrefixKey ( applyMutationsKeyVersionMapRange . begin , uid ) ;
}
ACTOR static Future < int64_t > getApplyVersionLag_impl ( Reference < ReadYourWritesTransaction > tr , UID uid ) {
// Both of these are snapshot reads
state Future < Optional < Value > > beginVal = tr - > get ( uidPrefixKey ( applyMutationsBeginRange . begin , uid ) , true ) ;
state Future < Optional < Value > > endVal = tr - > get ( uidPrefixKey ( applyMutationsEndRange . begin , uid ) , true ) ;
Void _ = wait ( success ( beginVal ) & & success ( endVal ) ) ;
if ( ! beginVal . get ( ) . present ( ) | | ! endVal . get ( ) . present ( ) )
return 0 ;
Version beginVersion = BinaryReader : : fromStringRef < Version > ( beginVal . get ( ) . get ( ) , Unversioned ( ) ) ;
Version endVersion = BinaryReader : : fromStringRef < Version > ( endVal . get ( ) . get ( ) , Unversioned ( ) ) ;
return endVersion - beginVersion ;
}
Future < int64_t > getApplyVersionLag ( Reference < ReadYourWritesTransaction > tr ) {
return getApplyVersionLag_impl ( tr , uid ) ;
}
void initApplyMutations ( Reference < ReadYourWritesTransaction > tr , Key addPrefix , Key removePrefix ) {
// Set these because they have to match the applyMutations values.
this - > addPrefix ( ) . set ( tr , addPrefix ) ;
this - > removePrefix ( ) . set ( tr , removePrefix ) ;
clearApplyMutationsKeys ( tr ) ;
// Initialize add/remove prefix, range version map count and set the map's start key to InvalidVersion
tr - > set ( uidPrefixKey ( applyMutationsAddPrefixRange . begin , uid ) , addPrefix ) ;
tr - > set ( uidPrefixKey ( applyMutationsRemovePrefixRange . begin , uid ) , removePrefix ) ;
int64_t startCount = 0 ;
tr - > set ( uidPrefixKey ( applyMutationsKeyVersionCountRange . begin , uid ) , StringRef ( ( uint8_t * ) & startCount , 8 ) ) ;
Key mapStart = uidPrefixKey ( applyMutationsKeyVersionMapRange . begin , uid ) ;
tr - > set ( mapStart , BinaryWriter : : toValue < Version > ( invalidVersion , Unversioned ( ) ) ) ;
}
void clearApplyMutationsKeys ( Reference < ReadYourWritesTransaction > tr ) {
2017-12-21 07:41:47 +08:00
tr - > setOption ( FDBTransactionOptions : : COMMIT_ON_FIRST_PROXY ) ;
2017-05-26 04:48:44 +08:00
// Clear add/remove prefix keys
tr - > clear ( uidPrefixKey ( applyMutationsAddPrefixRange . begin , uid ) ) ;
tr - > clear ( uidPrefixKey ( applyMutationsRemovePrefixRange . begin , uid ) ) ;
// Clear range version map and count key
tr - > clear ( uidPrefixKey ( applyMutationsKeyVersionCountRange . begin , uid ) ) ;
Key mapStart = uidPrefixKey ( applyMutationsKeyVersionMapRange . begin , uid ) ;
tr - > clear ( KeyRangeRef ( mapStart , strinc ( mapStart ) ) ) ;
// Clear any loaded mutations that have not yet been applied
Key mutationPrefix = mutationLogPrefix ( ) ;
tr - > clear ( KeyRangeRef ( mutationPrefix , strinc ( mutationPrefix ) ) ) ;
// Clear end and begin versions (intentionally in this order)
tr - > clear ( uidPrefixKey ( applyMutationsEndRange . begin , uid ) ) ;
tr - > clear ( uidPrefixKey ( applyMutationsBeginRange . begin , uid ) ) ;
}
void setApplyBeginVersion ( Reference < ReadYourWritesTransaction > tr , Version ver ) {
tr - > set ( uidPrefixKey ( applyMutationsBeginRange . begin , uid ) , BinaryWriter : : toValue ( ver , Unversioned ( ) ) ) ;
}
void setApplyEndVersion ( Reference < ReadYourWritesTransaction > tr , Version ver ) {
tr - > set ( uidPrefixKey ( applyMutationsEndRange . begin , uid ) , BinaryWriter : : toValue ( ver , Unversioned ( ) ) ) ;
}
Future < Version > getApplyEndVersion ( Reference < ReadYourWritesTransaction > tr ) {
return map ( tr - > get ( uidPrefixKey ( applyMutationsEndRange . begin , uid ) ) , [ = ] ( Optional < Value > const & value ) - > Version {
return value . present ( ) ? BinaryReader : : fromStringRef < Version > ( value . get ( ) , Unversioned ( ) ) : 0 ;
} ) ;
}
static Future < std : : string > getProgress_impl ( RestoreConfig const & restore , Reference < ReadYourWritesTransaction > const & tr ) ;
Future < std : : string > getProgress ( Reference < ReadYourWritesTransaction > tr ) {
return getProgress_impl ( * this , tr ) ;
}
static Future < std : : string > getFullStatus_impl ( RestoreConfig const & restore , Reference < ReadYourWritesTransaction > const & tr ) ;
Future < std : : string > getFullStatus ( Reference < ReadYourWritesTransaction > tr ) {
return getFullStatus_impl ( * this , tr ) ;
}
} ;
2017-11-15 15:33:17 +08:00
typedef RestoreConfig : : RestoreFile RestoreFile ;
2017-05-26 04:48:44 +08:00
ACTOR Future < std : : string > RestoreConfig : : getProgress_impl ( RestoreConfig restore , Reference < ReadYourWritesTransaction > tr ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
state Future < int64_t > fileCount = restore . fileCount ( ) . getD ( tr ) ;
state Future < int64_t > fileBlockCount = restore . fileBlockCount ( ) . getD ( tr ) ;
state Future < int64_t > fileBlocksDispatched = restore . filesBlocksDispatched ( ) . getD ( tr ) ;
state Future < int64_t > fileBlocksFinished = restore . fileBlocksFinished ( ) . getD ( tr ) ;
state Future < int64_t > bytesWritten = restore . bytesWritten ( ) . getD ( tr ) ;
state Future < StringRef > status = restore . stateText ( tr ) ;
state Future < Version > lag = restore . getApplyVersionLag ( tr ) ;
2017-09-06 02:38:40 +08:00
state Future < std : : string > tag = restore . tag ( ) . getD ( tr ) ;
2017-09-09 07:09:18 +08:00
state Future < std : : pair < std : : string , int64_t > > lastError = restore . lastError ( ) . getD ( tr ) ;
2017-05-26 04:48:44 +08:00
// restore might no longer be valid after the first wait so make sure it is not needed anymore.
state UID uid = restore . getUid ( ) ;
Void _ = wait ( success ( fileCount ) & & success ( fileBlockCount ) & & success ( fileBlocksDispatched ) & & success ( fileBlocksFinished ) & & success ( bytesWritten ) & & success ( status ) & & success ( lag ) & & success ( tag ) & & success ( lastError ) ) ;
std : : string errstr = " None " ;
if ( lastError . get ( ) . second ! = 0 )
2017-09-09 07:09:18 +08:00
errstr = format ( " '%s' %llds ago. \n " , lastError . get ( ) . first . c_str ( ) , ( int64_t ) now ( ) - lastError . get ( ) . second ) ;
2017-05-26 04:48:44 +08:00
TraceEvent ( " FileRestoreProgress " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , uid )
2017-09-06 02:38:40 +08:00
. detail ( " Tag " , tag . get ( ) )
2017-05-26 04:48:44 +08:00
. detail ( " State " , status . get ( ) . toString ( ) )
. detail ( " FileCount " , fileCount . get ( ) )
. detail ( " FileBlocksFinished " , fileBlocksFinished . get ( ) )
. detail ( " FileBlocksTotal " , fileBlockCount . get ( ) )
. detail ( " FileBlocksInProgress " , fileBlocksDispatched . get ( ) - fileBlocksFinished . get ( ) )
. detail ( " BytesWritten " , bytesWritten . get ( ) )
. detail ( " ApplyLag " , lag . get ( ) )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
return format ( " Tag: %s UID: %s State: %s Blocks: %lld/%lld BlocksInProgress: %lld Files: %lld BytesWritten: %lld ApplyVersionLag: %lld LastError: %s " ,
2017-09-06 02:38:40 +08:00
tag . get ( ) . c_str ( ) ,
2017-05-26 04:48:44 +08:00
uid . toString ( ) . c_str ( ) ,
status . get ( ) . toString ( ) . c_str ( ) ,
fileBlocksFinished . get ( ) ,
fileBlockCount . get ( ) ,
fileBlocksDispatched . get ( ) - fileBlocksFinished . get ( ) ,
fileCount . get ( ) ,
bytesWritten . get ( ) ,
lag . get ( ) ,
errstr . c_str ( )
) ;
}
ACTOR Future < std : : string > RestoreConfig : : getFullStatus_impl ( RestoreConfig restore , Reference < ReadYourWritesTransaction > tr ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
state Future < KeyRange > range = restore . restoreRange ( ) . getD ( tr ) ;
state Future < Key > addPrefix = restore . addPrefix ( ) . getD ( tr ) ;
state Future < Key > removePrefix = restore . removePrefix ( ) . getD ( tr ) ;
2017-11-15 15:33:17 +08:00
state Future < Key > url = restore . sourceContainerURL ( ) . getD ( tr ) ;
2017-05-26 04:48:44 +08:00
state Future < Version > restoreVersion = restore . restoreVersion ( ) . getD ( tr ) ;
state Future < std : : string > progress = restore . getProgress ( tr ) ;
// restore might no longer be valid after the first wait so make sure it is not needed anymore.
state UID uid = restore . getUid ( ) ;
Void _ = wait ( success ( range ) & & success ( addPrefix ) & & success ( removePrefix ) & & success ( url ) & & success ( restoreVersion ) & & success ( progress ) ) ;
return format ( " %s URL: %s Begin: '%s' End: '%s' AddPrefix: '%s' RemovePrefix: '%s' Version: %lld " ,
progress . get ( ) . c_str ( ) ,
url . get ( ) . toString ( ) . c_str ( ) ,
printable ( range . get ( ) . begin ) . c_str ( ) ,
printable ( range . get ( ) . end ) . c_str ( ) ,
printable ( addPrefix . get ( ) ) . c_str ( ) ,
printable ( removePrefix . get ( ) ) . c_str ( ) ,
restoreVersion . get ( )
) ;
}
FileBackupAgent : : FileBackupAgent ( )
: subspace ( Subspace ( fileBackupPrefixRange . begin ) )
// The other subspaces have logUID -> value
, config ( subspace . get ( BackupAgentBase : : keyConfig ) )
2017-10-28 05:06:15 +08:00
, lastRestorable ( subspace . get ( FileBackupAgent : : keyLastRestorable ) )
2017-05-26 04:48:44 +08:00
, taskBucket ( new TaskBucket ( subspace . get ( BackupAgentBase : : keyTasks ) , true , false , true ) )
, futureBucket ( new FutureBucket ( subspace . get ( BackupAgentBase : : keyFutures ) , true , true ) )
{
}
namespace fileBackup {
// Padding bytes for backup files. The largest padded area that could ever have to be written is
// the size of two 32 bit ints and the largest key size and largest value size. Since CLIENT_KNOBS
// may not be initialized yet a conservative constant is being used.
std : : string paddingFFs ( 128 * 1024 , 0xFF ) ;
// File Format handlers.
// Both Range and Log formats are designed to be readable starting at any 1MB boundary
// so they can be read in parallel.
//
// Writer instances must be kept alive while any member actors are in progress.
//
// RangeFileWriter must be used as follows:
// 1 - writeKey(key) the queried key range begin
// 2 - writeKV(k, v) each kv pair to restore
// 3 - writeKey(key) the queried key range end
//
// RangeFileWriter will insert the required padding, header, and extra
// end/begin keys around the 1MB boundaries as needed.
//
// Example:
// The range a-z is queries and returns c-j which covers 3 blocks.
// The client code writes keys in this sequence:
// a c d e f g h i j z
//
// H = header P = padding a...z = keys v = value | = block boundary
//
// Encoded file: H a cv dv ev P | H e ev fv gv hv P | H h hv iv jv z
// Decoded in blocks yields:
// Block 1: range [a, e) with kv pairs cv, dv
// Block 2: range [e, h) with kv pairs ev, fv, gv
// Block 3: range [h, z) with kv pairs hv, iv, jv
//
// NOTE: All blocks except for the final block will have one last
// value which will not be used. This isn't actually a waste since
// if the next KV pair wouldn't fit within the block after the value
// then the space after the final key to the next 1MB boundary would
// just be padding anyway.
struct RangeFileWriter {
2017-11-15 15:33:17 +08:00
RangeFileWriter ( Reference < IBackupFile > file = Reference < IBackupFile > ( ) , int blockSize = 0 ) : file ( file ) , blockSize ( blockSize ) , blockEnd ( 0 ) , fileVersion ( 1001 ) { }
2017-05-26 04:48:44 +08:00
// Handles the first block and internal blocks. Ends current block if needed.
ACTOR static Future < Void > newBlock ( RangeFileWriter * self , int bytesNeeded ) {
// Write padding to finish current block if needed
2017-11-15 15:33:17 +08:00
int bytesLeft = self - > blockEnd - self - > file - > size ( ) ;
2017-05-26 04:48:44 +08:00
if ( bytesLeft > 0 ) {
2017-11-16 05:33:09 +08:00
Void _ = wait ( self - > file - > append ( ( uint8_t * ) paddingFFs . data ( ) , bytesLeft ) ) ;
2017-05-26 04:48:44 +08:00
}
// Set new blockEnd
self - > blockEnd + = self - > blockSize ;
// write Header
2017-11-16 05:33:09 +08:00
Void _ = wait ( self - > file - > append ( ( uint8_t * ) & self - > fileVersion , sizeof ( self - > fileVersion ) ) ) ;
2017-05-26 04:48:44 +08:00
// If this is NOT the first block then write duplicate stuff needed from last block
if ( self - > blockEnd > self - > blockSize ) {
2017-11-16 05:33:09 +08:00
Void _ = wait ( self - > file - > appendString ( self - > lastKey ) ) ;
Void _ = wait ( self - > file - > appendString ( self - > lastKey ) ) ;
Void _ = wait ( self - > file - > appendString ( self - > lastValue ) ) ;
2017-05-26 04:48:44 +08:00
}
// There must now be room in the current block for bytesNeeded or the block size is too small
2017-11-15 15:33:17 +08:00
if ( self - > file - > size ( ) + bytesNeeded > self - > blockEnd )
2017-05-26 04:48:44 +08:00
throw backup_bad_block_size ( ) ;
return Void ( ) ;
}
// Ends the current block if necessary based on bytesNeeded.
Future < Void > newBlockIfNeeded ( int bytesNeeded ) {
2017-11-15 15:33:17 +08:00
if ( file - > size ( ) + bytesNeeded > blockEnd )
2017-05-26 04:48:44 +08:00
return newBlock ( this , bytesNeeded ) ;
return Void ( ) ;
}
// Start a new block if needed, then write the key and value
ACTOR static Future < Void > writeKV_impl ( RangeFileWriter * self , Key k , Value v ) {
int toWrite = sizeof ( int32_t ) + k . size ( ) + sizeof ( int32_t ) + v . size ( ) ;
Void _ = wait ( self - > newBlockIfNeeded ( toWrite ) ) ;
2017-11-16 05:33:09 +08:00
Void _ = wait ( self - > file - > appendString ( k ) ) ;
Void _ = wait ( self - > file - > appendString ( v ) ) ;
2017-05-26 04:48:44 +08:00
self - > lastKey = k ;
self - > lastValue = v ;
return Void ( ) ;
}
Future < Void > writeKV ( Key k , Value v ) { return writeKV_impl ( this , k , v ) ; }
// Write begin key or end key.
ACTOR static Future < Void > writeKey_impl ( RangeFileWriter * self , Key k ) {
2017-11-16 05:33:09 +08:00
int toWrite = sizeof ( uint32_t ) + k . size ( ) ;
2017-05-26 04:48:44 +08:00
Void _ = wait ( self - > newBlockIfNeeded ( toWrite ) ) ;
2017-11-16 05:33:09 +08:00
Void _ = wait ( self - > file - > appendString ( k ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
Future < Void > writeKey ( Key k ) { return writeKey_impl ( this , k ) ; }
2017-11-15 15:33:17 +08:00
Reference < IBackupFile > file ;
2017-05-26 04:48:44 +08:00
int blockSize ;
private :
int64_t blockEnd ;
uint32_t fileVersion ;
Key lastKey ;
Key lastValue ;
} ;
// Helper class for reading restore data from a buffer and throwing the right errors.
struct StringRefReader {
StringRefReader ( StringRef s = StringRef ( ) , Error e = Error ( ) ) : rptr ( s . begin ( ) ) , end ( s . end ( ) ) , failure_error ( e ) { }
// Return remainder of data as a StringRef
StringRef remainder ( ) {
return StringRef ( rptr , end - rptr ) ;
}
// Return a pointer to len bytes at the current read position and advance read pos
const uint8_t * consume ( unsigned int len ) {
if ( rptr = = end & & len ! = 0 )
throw end_of_stream ( ) ;
const uint8_t * p = rptr ;
rptr + = len ;
if ( rptr > end )
throw failure_error ;
return p ;
}
// Return a T from the current read position and advance read pos
template < typename T > const T consume ( ) {
return * ( const T * ) consume ( sizeof ( T ) ) ;
}
// Functions for consuming big endian (network byte order) integers.
// Consumes a big endian number, swaps it to little endian, and returns it.
const int32_t consumeNetworkInt32 ( ) { return ( int32_t ) bigEndian32 ( ( uint32_t ) consume < int32_t > ( ) ) ; }
const uint32_t consumeNetworkUInt32 ( ) { return bigEndian32 ( consume < uint32_t > ( ) ) ; }
bool eof ( ) { return rptr = = end ; }
const uint8_t * rptr , * end ;
Error failure_error ;
} ;
ACTOR Future < Standalone < VectorRef < KeyValueRef > > > decodeRangeFileBlock ( Reference < IAsyncFile > file , int64_t offset , int len ) {
state Standalone < StringRef > buf = makeString ( len ) ;
int rLen = wait ( file - > read ( mutateString ( buf ) , len , offset ) ) ;
if ( rLen ! = len )
throw restore_bad_read ( ) ;
Standalone < VectorRef < KeyValueRef > > results ( { } , buf . arena ( ) ) ;
state StringRefReader reader ( buf , restore_corrupted_data ( ) ) ;
try {
// Read header, currently only decoding version 1001
if ( reader . consume < int32_t > ( ) ! = 1001 )
throw restore_unsupported_file_version ( ) ;
// Read begin key, if this fails then block was invalid.
uint32_t kLen = reader . consumeNetworkUInt32 ( ) ;
const uint8_t * k = reader . consume ( kLen ) ;
results . push_back ( results . arena ( ) , KeyValueRef ( KeyRef ( k , kLen ) , ValueRef ( ) ) ) ;
// Read kv pairs and end key
while ( 1 ) {
// Read a key.
kLen = reader . consumeNetworkUInt32 ( ) ;
k = reader . consume ( kLen ) ;
// If eof reached or first value len byte is 0xFF then a valid block end was reached.
if ( reader . eof ( ) | | * reader . rptr = = 0xFF ) {
results . push_back ( results . arena ( ) , KeyValueRef ( KeyRef ( k , kLen ) , ValueRef ( ) ) ) ;
break ;
}
// Read a value, which must exist or the block is invalid
uint32_t vLen = reader . consumeNetworkUInt32 ( ) ;
const uint8_t * v = reader . consume ( vLen ) ;
results . push_back ( results . arena ( ) , KeyValueRef ( KeyRef ( k , kLen ) , ValueRef ( v , vLen ) ) ) ;
// If eof reached or first byte of next key len is 0xFF then a valid block end was reached.
if ( reader . eof ( ) | | * reader . rptr = = 0xFF )
break ;
}
// Make sure any remaining bytes in the block are 0xFF
for ( auto b : reader . remainder ( ) )
if ( b ! = 0xFF )
throw restore_corrupted_data_padding ( ) ;
return results ;
} catch ( Error & e ) {
2017-11-19 20:34:28 +08:00
TraceEvent ( SevWarn , " FileRestoreCorruptRangeFileBlock " )
2017-05-26 04:48:44 +08:00
. detail ( " Filename " , file - > getFilename ( ) )
. detail ( " BlockOffset " , offset )
. detail ( " BlockLen " , len )
. detail ( " ErrorRelativeOffset " , reader . rptr - buf . begin ( ) )
. detail ( " ErrorAbsoluteOffset " , reader . rptr - buf . begin ( ) + offset )
. error ( e ) ;
throw ;
}
}
// Very simple format compared to KeyRange files.
// Header, [Key, Value]... Key len
struct LogFileWriter {
static const std : : string & FFs ;
2017-11-15 15:33:17 +08:00
LogFileWriter ( Reference < IBackupFile > file = Reference < IBackupFile > ( ) , int blockSize = 0 ) : file ( file ) , blockSize ( blockSize ) , blockEnd ( 0 ) , fileVersion ( 2001 ) { }
2017-05-26 04:48:44 +08:00
// Start a new block if needed, then write the key and value
ACTOR static Future < Void > writeKV_impl ( LogFileWriter * self , Key k , Value v ) {
// If key and value do not fit in this block, end it and start a new one
int toWrite = sizeof ( int32_t ) + k . size ( ) + sizeof ( int32_t ) + v . size ( ) ;
2017-11-15 15:33:17 +08:00
if ( self - > file - > size ( ) + toWrite > self - > blockEnd ) {
2017-05-26 04:48:44 +08:00
// Write padding if needed
2017-11-15 15:33:17 +08:00
int bytesLeft = self - > blockEnd - self - > file - > size ( ) ;
2017-05-26 04:48:44 +08:00
if ( bytesLeft > 0 ) {
2017-11-16 05:33:09 +08:00
Void _ = wait ( self - > file - > append ( ( uint8_t * ) paddingFFs . data ( ) , bytesLeft ) ) ;
2017-05-26 04:48:44 +08:00
}
// Set new blockEnd
self - > blockEnd + = self - > blockSize ;
// write Header
2017-11-16 05:33:09 +08:00
Void _ = wait ( self - > file - > append ( ( uint8_t * ) & self - > fileVersion , sizeof ( self - > fileVersion ) ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-16 05:33:09 +08:00
Void _ = wait ( self - > file - > appendString ( k ) ) ;
Void _ = wait ( self - > file - > appendString ( v ) ) ;
2017-05-26 04:48:44 +08:00
// At this point we should be in whatever the current block is or the block size is too small
2017-11-15 15:33:17 +08:00
if ( self - > file - > size ( ) > self - > blockEnd )
2017-05-26 04:48:44 +08:00
throw backup_bad_block_size ( ) ;
return Void ( ) ;
}
Future < Void > writeKV ( Key k , Value v ) { return writeKV_impl ( this , k , v ) ; }
2017-11-15 15:33:17 +08:00
Reference < IBackupFile > file ;
2017-05-26 04:48:44 +08:00
int blockSize ;
private :
int64_t blockEnd ;
uint32_t fileVersion ;
} ;
ACTOR Future < Standalone < VectorRef < KeyValueRef > > > decodeLogFileBlock ( Reference < IAsyncFile > file , int64_t offset , int len ) {
state Standalone < StringRef > buf = makeString ( len ) ;
int rLen = wait ( file - > read ( mutateString ( buf ) , len , offset ) ) ;
if ( rLen ! = len )
throw restore_bad_read ( ) ;
Standalone < VectorRef < KeyValueRef > > results ( { } , buf . arena ( ) ) ;
state StringRefReader reader ( buf , restore_corrupted_data ( ) ) ;
try {
// Read header, currently only decoding version 2001
if ( reader . consume < int32_t > ( ) ! = 2001 )
throw restore_unsupported_file_version ( ) ;
// Read k/v pairs. Block ends either at end of last value exactly or with 0xFF as first key len byte.
while ( 1 ) {
// If eof reached or first key len bytes is 0xFF then end of block was reached.
if ( reader . eof ( ) | | * reader . rptr = = 0xFF )
break ;
// Read key and value. If anything throws then there is a problem.
uint32_t kLen = reader . consumeNetworkUInt32 ( ) ;
const uint8_t * k = reader . consume ( kLen ) ;
uint32_t vLen = reader . consumeNetworkUInt32 ( ) ;
const uint8_t * v = reader . consume ( vLen ) ;
results . push_back ( results . arena ( ) , KeyValueRef ( KeyRef ( k , kLen ) , ValueRef ( v , vLen ) ) ) ;
}
// Make sure any remaining bytes in the block are 0xFF
for ( auto b : reader . remainder ( ) )
if ( b ! = 0xFF )
throw restore_corrupted_data_padding ( ) ;
return results ;
} catch ( Error & e ) {
2017-11-19 20:34:28 +08:00
TraceEvent ( SevWarn , " FileRestoreCorruptLogFileBlock " )
2017-05-26 04:48:44 +08:00
. detail ( " Filename " , file - > getFilename ( ) )
. detail ( " BlockOffset " , offset )
. detail ( " BlockLen " , len )
. detail ( " ErrorRelativeOffset " , reader . rptr - buf . begin ( ) )
. detail ( " ErrorAbsoluteOffset " , reader . rptr - buf . begin ( ) + offset )
. error ( e ) ;
throw ;
}
}
2017-09-09 07:09:18 +08:00
ACTOR Future < Void > checkTaskVersion ( Database cx , Reference < Task > task , StringRef name , uint32_t version ) {
2017-05-26 04:48:44 +08:00
uint32_t taskVersion = task - > getVersion ( ) ;
if ( taskVersion > version ) {
2017-09-09 07:09:18 +08:00
state Error err = task_invalid_version ( ) ;
2017-11-19 20:34:28 +08:00
TraceEvent ( SevWarn , " BA_BackupRangeTaskFunc_execute " ) . detail ( " taskVersion " , taskVersion ) . detail ( " Name " , printable ( name ) ) . detail ( " Version " , version ) ;
2017-09-09 07:09:18 +08:00
if ( KeyBackedConfig : : TaskParams . uid ( ) . exists ( task ) ) {
2017-11-16 05:33:09 +08:00
std : : string msg = format ( " %s task version `%lu' is greater than supported version `%lu' " , task - > params [ Task : : reservedTaskParamKeyType ] . toString ( ) . c_str ( ) , ( unsigned long ) taskVersion , ( unsigned long ) version ) ;
2017-09-09 07:09:18 +08:00
Void _ = wait ( BackupConfig ( task ) . logError ( cx , err , msg ) ) ;
}
2017-05-26 04:48:44 +08:00
2017-09-09 07:09:18 +08:00
throw err ;
2017-05-26 04:48:44 +08:00
}
return Void ( ) ;
}
2017-09-12 02:14:30 +08:00
std : : function < void ( Reference < Task > ) > NOP_SETUP_TASK_FN = [ ] ( Reference < Task > task ) { /* NOP */ } ;
ACTOR static Future < Key > addBackupTask ( StringRef name ,
uint32_t version ,
Reference < ReadYourWritesTransaction > tr ,
Reference < TaskBucket > taskBucket ,
TaskCompletionKey completionKey ,
BackupConfig config ,
Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ,
std : : function < void ( Reference < Task > ) > setupTaskFn = NOP_SETUP_TASK_FN ,
int priority = 0 ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
Key doneKey = wait ( completionKey . get ( tr , taskBucket ) ) ;
state Reference < Task > task ( new Task ( name , version , doneKey , priority ) ) ;
// Bind backup config to new task
Void _ = wait ( config . toTask ( tr , task ) ) ;
// Set task specific params
setupTaskFn ( task ) ;
if ( ! waitFor ) {
return taskBucket - > addTask ( tr , task ) ;
}
Void _ = wait ( waitFor - > onSetAddTask ( tr , taskBucket , task ) ) ;
return LiteralStringRef ( " OnSetAddTask " ) ;
}
2017-11-16 05:33:09 +08:00
// Backup and Restore taskFunc definitions will inherit from one of the following classes which
// servers to catch and log to the appropriate config any error that execute/finish didn't catch and log.
struct RestoreTaskFuncBase : TaskFuncBase {
virtual Future < Void > handleError ( Database cx , Reference < Task > task , Error const & error ) {
2017-11-19 20:39:18 +08:00
return RestoreConfig ( task ) . logError ( cx , error , format ( " Task '%s' UID '%s' %s failed " , task - > params [ Task : : reservedTaskParamKeyType ] . printable ( ) . c_str ( ) , task - > key . printable ( ) . c_str ( ) , toString ( task ) . c_str ( ) ) ) ;
}
virtual std : : string toString ( Reference < Task > task )
{
return " " ;
2017-11-16 05:33:09 +08:00
}
} ;
struct BackupTaskFuncBase : TaskFuncBase {
virtual Future < Void > handleError ( Database cx , Reference < Task > task , Error const & error ) {
2017-11-19 20:39:18 +08:00
return BackupConfig ( task ) . logError ( cx , error , format ( " Task '%s' UID '%s' %s failed " , task - > params [ Task : : reservedTaskParamKeyType ] . printable ( ) . c_str ( ) , task - > key . printable ( ) . c_str ( ) , toString ( task ) . c_str ( ) ) ) ;
}
virtual std : : string toString ( Reference < Task > task )
{
return " " ;
2017-11-16 05:33:09 +08:00
}
} ;
2017-12-14 17:44:38 +08:00
ACTOR static Future < Standalone < VectorRef < KeyRef > > > getBlockOfShards ( Reference < ReadYourWritesTransaction > tr , Key beginKey , Key endKey , int limit ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
state Standalone < VectorRef < KeyRef > > results ;
Standalone < RangeResultRef > values = wait ( tr - > getRange ( KeyRangeRef ( keyAfter ( beginKey . withPrefix ( keyServersPrefix ) ) , endKey . withPrefix ( keyServersPrefix ) ) , limit ) ) ;
for ( auto & s : values ) {
KeyRef k = s . key . removePrefix ( keyServersPrefix ) ;
results . push_back_deep ( results . arena ( ) , k ) ;
}
return results ;
}
2017-11-16 05:33:09 +08:00
struct BackupRangeTaskFunc : BackupTaskFuncBase {
2017-05-26 04:48:44 +08:00
static StringRef name ;
static const uint32_t version ;
2017-09-02 04:50:38 +08:00
static struct {
static TaskParam < Key > beginKey ( ) {
return LiteralStringRef ( __FUNCTION__ ) ;
}
static TaskParam < Key > endKey ( ) {
return LiteralStringRef ( __FUNCTION__ ) ;
}
static TaskParam < bool > addBackupRangeTasks ( ) {
return LiteralStringRef ( __FUNCTION__ ) ;
}
} Params ;
2017-05-26 04:48:44 +08:00
2017-11-19 20:39:18 +08:00
std : : string toString ( Reference < Task > task ) {
return format ( " beginKey '%s' endKey '%s' addTasks %d " ,
Params . beginKey ( ) . get ( task ) . printable ( ) . c_str ( ) ,
Params . endKey ( ) . get ( task ) . printable ( ) . c_str ( ) ,
Params . addBackupRangeTasks ( ) . get ( task )
) ;
}
2017-05-26 04:48:44 +08:00
StringRef getName ( ) const { return name ; } ;
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _execute ( cx , tb , fb , task ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
2017-11-15 15:33:17 +08:00
// Finish (which flushes/syncs) the file, and then in a single transaction, make some range backup progress durable.
// This means:
// - increment the backup config's range bytes written
// - update the range file map
// - update the task begin key
// - save/extend the task with the new params
// Returns whether or not the caller should continue executing the task.
ACTOR static Future < bool > finishRangeFile ( Reference < IBackupFile > file , Database cx , Reference < Task > task , Reference < TaskBucket > taskBucket , KeyRange range , Version version ) {
Void _ = wait ( file - > finish ( ) ) ;
2017-05-26 04:48:44 +08:00
// Ignore empty ranges.
if ( range . empty ( ) )
2017-12-01 09:18:57 +08:00
return false ;
2017-05-26 04:48:44 +08:00
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
2017-11-15 15:33:17 +08:00
state BackupConfig backup ( task ) ;
2017-12-01 09:18:57 +08:00
state bool usedFile = false ;
2017-11-15 15:33:17 +08:00
2017-12-04 12:52:09 +08:00
// Avoid unnecessary conflict by prevent taskbucket's automatic timeout extension
// because the following transaction loop extends and updates the task.
2017-12-05 02:20:50 +08:00
Void _ = wait ( task - > extendMutex . take ( ) ) ;
2017-12-04 12:52:09 +08:00
state FlowLock : : Releaser releaser ( task - > extendMutex , 1 ) ;
2017-05-26 04:48:44 +08:00
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-11-15 15:33:17 +08:00
2017-12-01 09:18:57 +08:00
// Update the start key of the task so if this transaction completes but the task then fails
// when it is restarted it will continue where this execution left off.
2017-11-15 15:33:17 +08:00
Params . beginKey ( ) . set ( task , range . end ) ;
// Save and extend the task with the new begin parameter
2017-12-01 09:18:57 +08:00
state Version newTimeout = wait ( taskBucket - > extendTimeout ( tr , task , true ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-15 15:33:17 +08:00
// Update the range bytes written in the backup config
backup . rangeBytesWritten ( ) . atomicOp ( tr , file - > size ( ) , MutationRef : : AddValue ) ;
2017-05-26 04:48:44 +08:00
2017-11-15 15:33:17 +08:00
// See if there is already a file for this key which has an earlier begin, update the map if not.
2017-12-18 06:29:57 +08:00
Optional < BackupConfig : : RangeSlice > s = wait ( backup . snapshotRangeFileMap ( ) . get ( tr , range . end ) ) ;
2017-12-01 09:18:57 +08:00
if ( ! s . present ( ) | | s . get ( ) . begin > = range . begin ) {
2017-12-18 06:29:57 +08:00
backup . snapshotRangeFileMap ( ) . set ( tr , range . end , { range . begin , version , file - > getFileName ( ) , file - > size ( ) } ) ;
2017-12-01 09:18:57 +08:00
usedFile = true ;
}
2017-05-26 04:48:44 +08:00
Void _ = wait ( tr - > commit ( ) ) ;
2017-12-01 09:18:57 +08:00
task - > timeoutVersion = newTimeout ;
2017-05-26 04:48:44 +08:00
break ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
2017-12-01 09:18:57 +08:00
return usedFile ;
2017-05-26 04:48:44 +08:00
}
2017-12-14 17:44:38 +08:00
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , Key begin , Key end , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) , int priority = 0 , Version scheduledVersion = invalidVersion ) {
2017-09-12 02:14:30 +08:00
Key key = wait ( addBackupTask ( BackupRangeTaskFunc : : name ,
BackupRangeTaskFunc : : version ,
tr , taskBucket , completionKey ,
BackupConfig ( parentTask ) ,
waitFor ,
[ = ] ( Reference < Task > task ) {
Params . beginKey ( ) . set ( task , begin ) ;
Params . endKey ( ) . set ( task , end ) ;
Params . addBackupRangeTasks ( ) . set ( task , false ) ;
2017-12-14 17:44:38 +08:00
if ( scheduledVersion ! = invalidVersion )
2017-12-21 05:48:31 +08:00
ReservedTaskParams : : scheduledVersion ( ) . set ( task , scheduledVersion ) ;
2017-09-12 02:14:30 +08:00
} ,
priority ) ) ;
return key ;
2017-05-26 04:48:44 +08:00
}
ACTOR static Future < Void > _execute ( Database cx , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state Reference < FlowLock > lock ( new FlowLock ( CLIENT_KNOBS - > BACKUP_LOCK_BYTES ) ) ;
Void _ = wait ( checkTaskVersion ( cx , task , BackupRangeTaskFunc : : name , BackupRangeTaskFunc : : version ) ) ;
2017-11-28 07:57:19 +08:00
state Key beginKey = Params . beginKey ( ) . get ( task ) ;
state Key endKey = Params . endKey ( ) . get ( task ) ;
// When a key range task saves the last chunk of progress and then the executor dies, when the task continues
// its beginKey and endKey will be equal but there is no work to be done.
if ( beginKey = = endKey )
return Void ( ) ;
2017-05-26 04:48:44 +08:00
// Find out if there is a shard boundary in(beginKey, endKey)
2017-11-28 07:57:19 +08:00
Standalone < VectorRef < KeyRef > > keys = wait ( runRYWTransaction ( cx , [ = ] ( Reference < ReadYourWritesTransaction > tr ) { return getBlockOfShards ( tr , beginKey , endKey , 1 ) ; } ) ) ;
2017-05-26 04:48:44 +08:00
if ( keys . size ( ) > 0 ) {
2017-09-02 04:50:38 +08:00
Params . addBackupRangeTasks ( ) . set ( task , true ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
// Read everything from beginKey to endKey, write it to an output file, run the output file processor, and
// then set on_done. If we are still writing after X seconds, end the output file and insert a new backup_range
// task for the remainder.
2017-11-15 15:33:17 +08:00
state Reference < IBackupFile > outFile ;
state Version outVersion = invalidVersion ;
2017-05-26 04:48:44 +08:00
state Key lastKey ;
// retrieve kvData
state PromiseStream < RangeResultWithVersion > results ;
2017-11-28 07:57:19 +08:00
state Future < Void > rc = readCommitted ( cx , results , lock , KeyRangeRef ( beginKey , endKey ) , true , true , true ) ;
2017-05-26 04:48:44 +08:00
state RangeFileWriter rangeFile ;
state BackupConfig backup ( task ) ;
2017-11-25 16:46:16 +08:00
// Don't need to check keepRunning(task) here because we will do that while finishing each output file, but if bc
// is false then clearly the backup is no longer in progress
state Reference < IBackupContainer > bc = wait ( backup . backupContainer ( ) . getD ( cx ) ) ;
if ( ! bc ) {
return Void ( ) ;
}
2017-11-15 15:33:17 +08:00
state bool done = false ;
2017-11-26 11:54:51 +08:00
state int64_t nrKeys = 0 ;
2017-05-26 04:48:44 +08:00
loop {
2017-11-25 16:46:16 +08:00
state RangeResultWithVersion values ;
try {
RangeResultWithVersion _values = waitNext ( results . getFuture ( ) ) ;
values = _values ;
lock - > release ( values . first . expectedSize ( ) ) ;
} catch ( Error & e ) {
if ( e . code ( ) = = error_code_end_of_stream )
done = true ;
else
throw ;
}
2017-05-26 04:48:44 +08:00
2017-11-25 16:46:16 +08:00
// If we've seen a new read version OR hit the end of the stream, then if we were writing a file finish it.
if ( values . second ! = outVersion | | done ) {
if ( outFile ) {
2017-11-19 20:44:33 +08:00
TEST ( outVersion ! = invalidVersion ) ; // Backup range task wrote multiple versions
state Key nextKey = done ? endKey : keyAfter ( lastKey ) ;
2017-11-25 16:46:16 +08:00
Void _ = wait ( rangeFile . writeKey ( nextKey ) ) ;
2017-11-15 15:33:17 +08:00
2017-12-01 09:18:57 +08:00
bool usedFile = wait ( finishRangeFile ( outFile , cx , task , taskBucket , KeyRangeRef ( beginKey , nextKey ) , outVersion ) ) ;
2017-11-26 11:54:51 +08:00
TraceEvent ( " FileBackupWroteRangeFile " )
2017-12-20 07:27:04 +08:00
. detail ( " BackupUID " , backup . getUid ( ) )
. detail ( " BackupURL " , bc - > getURL ( ) )
2017-11-26 11:54:51 +08:00
. detail ( " Size " , outFile - > size ( ) )
. detail ( " Keys " , nrKeys )
2017-12-20 07:27:04 +08:00
. detail ( " ReadVersion " , outVersion )
2017-11-26 13:13:56 +08:00
. detail ( " BeginKey " , beginKey . printable ( ) )
. detail ( " EndKey " , nextKey . printable ( ) )
2017-12-01 09:18:57 +08:00
. detail ( " AddedFileToMap " , usedFile ) ;
2017-11-26 11:54:51 +08:00
nrKeys = 0 ;
2017-11-19 20:44:33 +08:00
beginKey = nextKey ;
2017-11-25 16:46:16 +08:00
}
2017-05-26 04:48:44 +08:00
2017-11-25 16:46:16 +08:00
if ( done )
return Void ( ) ;
2017-05-26 04:48:44 +08:00
2017-11-25 16:46:16 +08:00
// Start writing a new file
outVersion = values . second ;
// block size must be at least large enough for 3 max size keys and 2 max size values + overhead so 250k conservatively.
state int blockSize = BUGGIFY ? g_random - > randomInt ( 250e3 , 4e6 ) : CLIENT_KNOBS - > BACKUP_RANGEFILE_BLOCK_SIZE ;
Reference < IBackupFile > f = wait ( bc - > writeRangeFile ( outVersion , blockSize ) ) ;
outFile = f ;
2017-05-26 04:48:44 +08:00
2017-11-25 16:46:16 +08:00
// Initialize range file writer and write begin key
rangeFile = RangeFileWriter ( outFile , blockSize ) ;
Void _ = wait ( rangeFile . writeKey ( beginKey ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-28 07:57:19 +08:00
// write kvData to file, update lastKey and key count
if ( values . first . size ( ) ! = 0 ) {
state size_t i = 0 ;
for ( ; i < values . first . size ( ) ; + + i ) {
Void _ = wait ( rangeFile . writeKV ( values . first [ i ] . key , values . first [ i ] . value ) ) ;
}
lastKey = values . first . back ( ) . key ;
nrKeys + = values . first . size ( ) ;
2017-05-26 04:48:44 +08:00
}
}
}
ACTOR static Future < Void > startBackupRangeInternal ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task , Reference < TaskFuture > onDone ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-09-02 04:50:38 +08:00
state Key nextKey = Params . beginKey ( ) . get ( task ) ;
2017-11-28 07:57:19 +08:00
state Key endKey = Params . endKey ( ) . get ( task ) ;
state Standalone < VectorRef < KeyRef > > keys = wait ( getBlockOfShards ( tr , nextKey , endKey , CLIENT_KNOBS - > BACKUP_SHARD_TASK_LIMIT ) ) ;
2017-05-26 04:48:44 +08:00
std : : vector < Future < Key > > addTaskVector ;
for ( int idx = 0 ; idx < keys . size ( ) ; + + idx ) {
if ( nextKey ! = keys [ idx ] ) {
addTaskVector . push_back ( addTask ( tr , taskBucket , task , nextKey , keys [ idx ] , TaskCompletionKey : : joinWith ( onDone ) ) ) ;
2017-12-20 07:27:04 +08:00
TraceEvent ( " FileBackupRangeSplit " )
. detail ( " BackupUID " , BackupConfig ( task ) . getUid ( ) )
. detail ( " BeginKey " , Params . beginKey ( ) . get ( task ) . printable ( ) )
. detail ( " EndKey " , Params . endKey ( ) . get ( task ) . printable ( ) )
. detail ( " SliceBeginKey " , nextKey . printable ( ) )
. detail ( " SliceEndKey " , keys [ idx ] . printable ( ) ) ;
2017-05-26 04:48:44 +08:00
}
nextKey = keys [ idx ] ;
}
Void _ = wait ( waitForAll ( addTaskVector ) ) ;
2017-11-28 07:57:19 +08:00
if ( nextKey ! = endKey ) {
2017-05-26 04:48:44 +08:00
// Add task to cover nextKey to the end, using the priority of the current task
2017-11-28 07:57:19 +08:00
Key _ = wait ( addTask ( tr , taskBucket , task , nextKey , endKey , TaskCompletionKey : : joinWith ( onDone ) , Reference < TaskFuture > ( ) , task - > getPriority ( ) ) ) ;
2017-05-26 04:48:44 +08:00
}
return Void ( ) ;
}
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state Reference < TaskFuture > taskFuture = futureBucket - > unpack ( task - > params [ Task : : reservedTaskParamKeyDone ] ) ;
2017-09-02 04:50:38 +08:00
if ( Params . addBackupRangeTasks ( ) . get ( task ) ) {
2017-05-26 04:48:44 +08:00
Void _ = wait ( startBackupRangeInternal ( tr , taskBucket , futureBucket , task , taskFuture ) ) ;
}
else {
Void _ = wait ( taskFuture - > set ( tr , taskBucket ) ) ;
}
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
}
} ;
StringRef BackupRangeTaskFunc : : name = LiteralStringRef ( " file_backup_range " ) ;
const uint32_t BackupRangeTaskFunc : : version = 1 ;
REGISTER_TASKFUNC ( BackupRangeTaskFunc ) ;
2017-12-18 06:29:57 +08:00
struct BackupSnapshotDispatchTask : BackupTaskFuncBase {
2017-12-14 17:44:38 +08:00
static StringRef name ;
static const uint32_t version ;
static struct {
// Set by Execute, used by Finish
2017-12-21 06:39:23 +08:00
static TaskParam < bool > snapshotFinished ( ) {
2017-12-14 17:44:38 +08:00
return LiteralStringRef ( __FUNCTION__ ) ;
}
// Set by Execute, used by Finish
2017-12-21 06:39:23 +08:00
static TaskParam < Version > nextDispatchVersion ( ) {
2017-12-14 17:44:38 +08:00
return LiteralStringRef ( __FUNCTION__ ) ;
}
} Params ;
StringRef getName ( ) const { return name ; } ;
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _execute ( cx , tb , fb , task ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) , int priority = 1 , Version scheduledVersion = invalidVersion ) {
Key key = wait ( addBackupTask ( name ,
version ,
tr , taskBucket , completionKey ,
BackupConfig ( parentTask ) ,
waitFor ,
[ = ] ( Reference < Task > task ) {
if ( scheduledVersion ! = invalidVersion )
2017-12-21 05:48:31 +08:00
ReservedTaskParams : : scheduledVersion ( ) . set ( task , scheduledVersion ) ;
2017-12-14 17:44:38 +08:00
} ,
priority ) ) ;
return key ;
}
2017-12-15 17:39:50 +08:00
enum DispatchState { SKIP = 0 , DONE = 1 , NOT_DONE_MIN = 2 } ;
2017-12-14 17:44:38 +08:00
ACTOR static Future < Void > _execute ( Database cx , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state Reference < FlowLock > lock ( new FlowLock ( CLIENT_KNOBS - > BACKUP_LOCK_BYTES ) ) ;
Void _ = wait ( checkTaskVersion ( cx , task , name , version ) ) ;
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
2017-12-15 17:39:50 +08:00
// The shard map will use 3 values classes. Exactly SKIP, exactly DONE, then any number >= NOT_DONE_MIN which will mean not done.
// This is to enable an efficient coalesce() call to squash adjacent ranges which are not yet finished to enable efficiently
// finding random database shards which are not done.
state int notDoneSequence = NOT_DONE_MIN ;
state KeyRangeMap < int > shardMap ( notDoneSequence + + , normalKeys . end ) ;
2017-12-14 17:44:38 +08:00
state Key beginKey = normalKeys . begin ;
// Read all shard boundaries and add them to the map
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
state Future < Standalone < VectorRef < KeyRef > > > shardBoundaries = getBlockOfShards ( tr , beginKey , normalKeys . end , CLIENT_KNOBS - > TOO_MANY ) ;
Void _ = wait ( success ( shardBoundaries ) & & taskBucket - > keepRunning ( tr , task ) ) ;
if ( shardBoundaries . get ( ) . size ( ) = = 0 )
break ;
for ( auto & boundary : shardBoundaries . get ( ) ) {
2017-12-15 17:39:50 +08:00
shardMap . rawInsert ( boundary , notDoneSequence + + ) ;
2017-12-14 17:44:38 +08:00
}
beginKey = keyAfter ( shardBoundaries . get ( ) . back ( ) ) ;
tr - > reset ( ) ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
// Read required stuff from backup config
state BackupConfig config ( task ) ;
state Version recentReadVersion ;
state Version snapshotBeginVersion ;
2017-12-18 06:29:57 +08:00
state Version snapshotTargetEndVersion ;
2017-12-14 17:44:38 +08:00
state int64_t snapshotIntervalSeconds ;
state std : : vector < KeyRange > backupRanges ;
state Optional < Key > snapshotBatchFutureKey ;
state Reference < TaskFuture > snapshotBatchFuture ;
2017-12-15 17:39:50 +08:00
state Optional < int64_t > snapshotBatchSize ;
2017-12-14 17:44:38 +08:00
tr - > reset ( ) ;
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
Void _ = wait ( store ( config . snapshotBeginVersion ( ) . getOrThrow ( tr ) , snapshotBeginVersion )
2017-12-18 06:29:57 +08:00
& & store ( config . snapshotTargetEndVersion ( ) . getOrThrow ( tr ) , snapshotTargetEndVersion )
2017-12-14 17:44:38 +08:00
& & store ( config . backupRanges ( ) . getOrThrow ( tr ) , backupRanges )
2017-12-15 17:39:50 +08:00
& & store ( config . snapshotIntervalSeconds ( ) . getOrThrow ( tr ) , snapshotIntervalSeconds )
2017-12-14 17:44:38 +08:00
// The next two parameters are optional
& & store ( config . snapshotBatchFuture ( ) . get ( tr ) , snapshotBatchFutureKey )
2017-12-15 17:39:50 +08:00
& & store ( config . snapshotBatchSize ( ) . get ( tr ) , snapshotBatchSize )
2017-12-14 17:44:38 +08:00
& & store ( tr - > getReadVersion ( ) , recentReadVersion )
& & taskBucket - > keepRunning ( tr , task ) ) ;
// If the snapshot batch future key does not exist, create it, set it, and commit
// Also initialize the target snapshot end version if it is not yet set.
if ( ! snapshotBatchFutureKey . present ( ) ) {
snapshotBatchFuture = futureBucket - > future ( tr ) ;
config . snapshotBatchFuture ( ) . set ( tr , snapshotBatchFuture - > pack ( ) ) ;
2017-12-15 17:39:50 +08:00
snapshotBatchSize = 0 ;
config . snapshotBatchSize ( ) . set ( tr , snapshotBatchSize . get ( ) ) ;
2017-12-14 17:44:38 +08:00
// The dispatch of this batch can take multiple separate executions if the executor fails
// so store a completion key for the dispatch finish() to set when dispatching the batch is done.
state TaskCompletionKey dispatchCompletionKey = TaskCompletionKey : : joinWith ( snapshotBatchFuture ) ;
Void _ = wait ( map ( dispatchCompletionKey . get ( tr , taskBucket ) , [ = ] ( Key const & k ) {
config . snapshotBatchDispatchDoneKey ( ) . set ( tr , k ) ;
return Void ( ) ;
} ) ) ;
Void _ = wait ( tr - > commit ( ) ) ;
}
else {
2017-12-15 17:39:50 +08:00
ASSERT ( snapshotBatchSize . present ( ) ) ;
2017-12-14 17:44:38 +08:00
// Batch future key exists in the config so create future from it
snapshotBatchFuture = Reference < TaskFuture > ( new TaskFuture ( futureBucket , snapshotBatchFutureKey . get ( ) ) ) ;
}
break ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
// Read all dispatched ranges
state std : : vector < std : : pair < Key , bool > > dispatchBoundaries ;
tr - > reset ( ) ;
beginKey = normalKeys . begin ;
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-12-18 06:29:57 +08:00
state Future < std : : vector < std : : pair < Key , bool > > > bounds = config . snapshotRangeDispatchMap ( ) . getRange ( tr , beginKey , keyAfter ( normalKeys . end ) , CLIENT_KNOBS - > TOO_MANY ) ;
2017-12-14 17:44:38 +08:00
Void _ = wait ( success ( bounds ) & & taskBucket - > keepRunning ( tr , task ) & & store ( tr - > getReadVersion ( ) , recentReadVersion ) ) ;
if ( bounds . get ( ) . empty ( ) )
break ;
dispatchBoundaries . reserve ( dispatchBoundaries . size ( ) + bounds . get ( ) . size ( ) ) ;
dispatchBoundaries . insert ( dispatchBoundaries . end ( ) , bounds . get ( ) . begin ( ) , bounds . get ( ) . end ( ) ) ;
beginKey = keyAfter ( bounds . get ( ) . back ( ) . first ) ;
tr - > reset ( ) ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
// Set anything inside a dispatched range to DONE.
// Also ensure that the boundary value are true, false, [true, false]...
if ( dispatchBoundaries . size ( ) > 0 ) {
bool lastValue = false ;
Key lastKey ;
for ( auto & boundary : dispatchBoundaries ) {
// Values must alternate
ASSERT ( boundary . second = = ! lastValue ) ;
// If this was the end of a dispatched range
if ( ! boundary . second ) {
// Ensure that the dispatched boundaries exist AND set all ranges in the dispatched boundary to DONE.
for ( auto & range : shardMap . modify ( KeyRangeRef ( lastKey , boundary . first ) ) ) {
range . value ( ) = DONE ;
}
}
lastValue = boundary . second ;
lastKey = boundary . first ;
}
ASSERT ( lastValue = = false ) ;
}
// Set anything outside the backup ranges to SKIP. We can use insert() here instead of modify()
// because it's OK to delete shard boundaries in the skipped ranges.
if ( backupRanges . size ( ) > 0 ) {
shardMap . insert ( KeyRangeRef ( normalKeys . begin , backupRanges . front ( ) . begin ) , SKIP ) ;
for ( int i = 0 ; i < backupRanges . size ( ) - 1 ; + + i ) {
shardMap . insert ( KeyRangeRef ( backupRanges [ i ] . end , backupRanges [ i + 1 ] . begin ) , SKIP ) ;
}
shardMap . insert ( KeyRangeRef ( backupRanges . back ( ) . end , normalKeys . end ) , SKIP ) ;
}
state int countShardsDone = 0 ;
state int countShardsNotDone = 0 ;
// Scan through the shard map, counting the DONE and NOT_DONE shards.
for ( auto & range : shardMap . ranges ( ) ) {
if ( range . value ( ) = = DONE ) {
+ + countShardsDone ;
}
2017-12-15 17:39:50 +08:00
else if ( range . value ( ) > = NOT_DONE_MIN )
2017-12-14 17:44:38 +08:00
+ + countShardsNotDone ;
}
2017-12-15 17:39:50 +08:00
shardMap . coalesce ( normalKeys ) ;
2017-12-14 17:44:38 +08:00
// In this context "all" refers to all of the shards relevant for this particular backup
state int countAllShards = countShardsDone + countShardsNotDone ;
if ( countShardsNotDone = = 0 ) {
2017-12-18 06:29:57 +08:00
TraceEvent ( " FileBackupSnapshotDispatchTaskFinished " )
2017-12-14 17:44:38 +08:00
. detail ( " BackupUID " , config . getUid ( ) )
. detail ( " AllShards " , countAllShards )
. detail ( " ShardsDone " , countShardsDone )
. detail ( " ShardsNotDone " , countShardsNotDone )
. detail ( " SnapshotBeginVersion " , snapshotBeginVersion )
2017-12-18 06:29:57 +08:00
. detail ( " SnapshotTargetEndVersion " , snapshotTargetEndVersion )
2017-12-14 17:44:38 +08:00
. detail ( " CurrentVersion " , recentReadVersion )
. detail ( " SnapshotIntervalSeconds " , snapshotIntervalSeconds ) ;
Params . snapshotFinished ( ) . set ( task , true ) ;
return Void ( ) ;
}
// Calculate number of shards that should be done before the next interval end
state Version nextDispatchVersion = recentReadVersion + CLIENT_KNOBS - > CORE_VERSIONSPERSECOND * ( g_network - > isSimulated ( ) ? ( snapshotIntervalSeconds / 5.0 ) : CLIENT_KNOBS - > BACKUP_SNAPSHOT_DISPATCH_INTERVAL_SEC ) ;
Params . nextDispatchVersion ( ) . set ( task , nextDispatchVersion ) ;
// timeElapsed is between 0 and 1 and represents what portion of the shards we should have completed by now
2017-12-18 16:13:25 +08:00
double timeElapsed ;
if ( snapshotTargetEndVersion > snapshotBeginVersion )
timeElapsed = std : : max ( 1.0 , ( double ) ( nextDispatchVersion - snapshotBeginVersion ) / ( snapshotTargetEndVersion - snapshotBeginVersion ) ) ;
else
timeElapsed = 1.0 ;
2017-12-14 17:44:38 +08:00
state int countExpectedShardsDone = std : : min < int > ( countAllShards , countAllShards * timeElapsed ) ;
state int countShardsToDispatch = std : : max < int > ( 0 , countExpectedShardsDone - countShardsDone ) ;
2017-12-18 06:29:57 +08:00
TraceEvent ( " FileBackupSnapshotDispatchTask1 " )
2017-12-14 17:44:38 +08:00
. detail ( " BackupUID " , config . getUid ( ) )
. detail ( " AllShards " , countAllShards )
. detail ( " ShardsDone " , countShardsDone )
. detail ( " ShardsNotDone " , countShardsNotDone )
. detail ( " ExpectedShardsDone " , countExpectedShardsDone )
. detail ( " ShardsToDispatch " , countShardsToDispatch )
. detail ( " SnapshotBeginVersion " , snapshotBeginVersion )
2017-12-18 06:29:57 +08:00
. detail ( " SnapshotTargetEndVersion " , snapshotTargetEndVersion )
2017-12-14 17:44:38 +08:00
. detail ( " NextDispatchVersion " , nextDispatchVersion )
. detail ( " CurrentVersion " , recentReadVersion )
. detail ( " TimeElapsed " , timeElapsed )
. detail ( " SnapshotIntervalSeconds " , snapshotIntervalSeconds ) ;
2017-12-19 07:56:57 +08:00
// Dispatch random shards to catch up to the expected progress
2017-12-14 17:44:38 +08:00
while ( countShardsToDispatch > 0 ) {
// First select ranges to add
state std : : vector < KeyRange > rangesToAdd ;
2017-12-19 07:56:57 +08:00
// Limit number of tasks added per transaction
int taskBatchSize = BUGGIFY ? g_random - > randomInt ( 1 , countShardsToDispatch + 1 ) : CLIENT_KNOBS - > RESTORE_DISPATCH_ADDTASK_SIZE ;
2017-12-14 17:44:38 +08:00
int added = 0 ;
2017-12-19 07:56:57 +08:00
2017-12-14 17:44:38 +08:00
while ( countShardsToDispatch > 0 & & added < taskBatchSize & & shardMap . size ( ) > 0 ) {
// Get a random range.
auto it = shardMap . randomRange ( ) ;
// Find a NOT_DONE range and add it to rangesToAdd
while ( 1 ) {
2017-12-15 17:39:50 +08:00
if ( it - > value ( ) > = NOT_DONE_MIN ) {
2017-12-14 17:44:38 +08:00
rangesToAdd . push_back ( it - > range ( ) ) ;
it - > value ( ) = DONE ;
shardMap . coalesce ( Key ( it - > begin ( ) ) ) ;
+ + added ;
+ + countShardsDone ;
- - countShardsToDispatch ;
- - countShardsNotDone ;
break ;
}
if ( it - > end ( ) = = shardMap . mapEnd )
break ;
+ + it ;
}
}
2017-12-15 17:39:50 +08:00
state int64_t oldBatchSize = snapshotBatchSize . get ( ) ;
2017-12-18 13:01:31 +08:00
state int64_t newBatchSize = oldBatchSize + rangesToAdd . size ( ) ;
2017-12-19 07:56:57 +08:00
// Now add the selected ranges in a single transaction.
2017-12-14 17:44:38 +08:00
tr - > reset ( ) ;
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
// For each range, make sure it isn't set in the dispatched range map.
state std : : vector < Future < Optional < bool > > > beginReads ;
state std : : vector < Future < Optional < bool > > > endReads ;
for ( auto & range : rangesToAdd ) {
2017-12-18 06:29:57 +08:00
beginReads . push_back ( config . snapshotRangeDispatchMap ( ) . get ( tr , range . begin ) ) ;
endReads . push_back ( config . snapshotRangeDispatchMap ( ) . get ( tr , range . end ) ) ;
2017-12-14 17:44:38 +08:00
}
2017-12-15 17:39:50 +08:00
Void _ = wait ( store ( config . snapshotBatchSize ( ) . getOrThrow ( tr ) , snapshotBatchSize . get ( ) )
& & waitForAll ( beginReads ) & & waitForAll ( endReads ) & & taskBucket - > keepRunning ( tr , task ) ) ;
2017-12-18 13:01:31 +08:00
// Snapshot batch size should be either oldBatchSize or newBatchSize. If new, this transaction is already done.
if ( snapshotBatchSize . get ( ) = = newBatchSize ) {
break ;
2017-12-15 17:39:50 +08:00
}
else {
2017-12-18 13:01:31 +08:00
ASSERT ( snapshotBatchSize . get ( ) = = oldBatchSize ) ;
config . snapshotBatchSize ( ) . set ( tr , newBatchSize ) ;
snapshotBatchSize = newBatchSize ;
}
2017-12-14 17:44:38 +08:00
state std : : vector < Future < Void > > addTaskFutures ;
for ( int i = 0 ; i < beginReads . size ( ) ; + + i ) {
KeyRange & range = rangesToAdd [ i ] ;
// This loop might have made changes to begin or end boundaries in a prior
// iteration. If so, the updated values exist in the RYW cache so re-read both entries.
2017-12-18 06:29:57 +08:00
Optional < bool > beginValue = config . snapshotRangeDispatchMap ( ) . get ( tr , range . begin ) . get ( ) ;
Optional < bool > endValue = config . snapshotRangeDispatchMap ( ) . get ( tr , range . end ) . get ( ) ;
2017-12-14 17:44:38 +08:00
ASSERT ( ! beginValue . present ( ) | | ! endValue . present ( ) | | beginValue ! = endValue ) ;
// If begin is present, it must be a range end so value must be false
// If end is present, it must be a range begin so value must be true
if ( ( ! beginValue . present ( ) | | ! beginValue . get ( ) )
& & ( ! endValue . present ( ) | | endValue . get ( ) ) )
{
if ( beginValue . present ( ) ) {
2017-12-18 06:29:57 +08:00
config . snapshotRangeDispatchMap ( ) . erase ( tr , range . begin ) ;
2017-12-14 17:44:38 +08:00
}
else {
2017-12-18 06:29:57 +08:00
config . snapshotRangeDispatchMap ( ) . set ( tr , range . begin , true ) ;
2017-12-14 17:44:38 +08:00
}
if ( endValue . present ( ) ) {
2017-12-18 06:29:57 +08:00
config . snapshotRangeDispatchMap ( ) . erase ( tr , range . end ) ;
2017-12-14 17:44:38 +08:00
}
else {
2017-12-18 06:29:57 +08:00
config . snapshotRangeDispatchMap ( ) . set ( tr , range . end , false ) ;
2017-12-14 17:44:38 +08:00
}
// Choose a random version between now and the next dispatch version at which to start this range task
Version randomVersion = recentReadVersion + g_random - > random01 ( ) * ( nextDispatchVersion - recentReadVersion ) ;
addTaskFutures . push_back ( success ( BackupRangeTaskFunc : : addTask ( tr , taskBucket , task , range . begin , range . end , TaskCompletionKey : : joinWith ( snapshotBatchFuture ) , Reference < TaskFuture > ( ) , 0 , randomVersion ) ) ) ;
2017-12-20 07:27:04 +08:00
TraceEvent ( " FileBackupSnapshotRangeDispatched " )
. detail ( " BackupUID " , config . getUid ( ) )
. detail ( " CurrentVersion " , recentReadVersion )
. detail ( " ScheduledVersion " , randomVersion )
. detail ( " BeginKey " , range . begin . printable ( ) )
. detail ( " EndKey " , range . end . printable ( ) ) ;
2017-12-14 17:44:38 +08:00
}
2017-12-18 15:22:18 +08:00
else {
// This shouldn't happen because if the transaction was already done or if another execution
// of this task is making progress it should have been detected above.
ASSERT ( false ) ;
}
2017-12-14 17:44:38 +08:00
}
Void _ = wait ( waitForAll ( addTaskFutures ) ) ;
Void _ = wait ( tr - > commit ( ) ) ;
break ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
}
2017-12-15 17:39:50 +08:00
if ( countShardsNotDone = = 0 ) {
2017-12-18 06:29:57 +08:00
TraceEvent ( " FileBackupSnapshotDispatchTaskFinished " )
2017-12-15 17:39:50 +08:00
. detail ( " BackupUID " , config . getUid ( ) )
. detail ( " AllShards " , countAllShards )
. detail ( " ShardsDone " , countShardsDone )
. detail ( " ShardsNotDone " , countShardsNotDone )
. detail ( " SnapshotBeginVersion " , snapshotBeginVersion )
2017-12-18 06:29:57 +08:00
. detail ( " SnapshotTargetEndVersion " , snapshotTargetEndVersion )
2017-12-15 17:39:50 +08:00
. detail ( " CurrentVersion " , recentReadVersion )
. detail ( " SnapshotIntervalSeconds " , snapshotIntervalSeconds ) ;
Params . snapshotFinished ( ) . set ( task , true ) ;
}
2017-12-14 17:44:38 +08:00
return Void ( ) ;
}
2017-12-18 06:29:57 +08:00
// This function is just a wrapper for BackupSnapshotManifest::addTask() which is defined below.
// The BackupSnapshotDispatchTask and BackupSnapshotManifest tasks reference each other so in order to keep their execute and finish phases
// defined together inside their class definitions this wrapper is declared here but defined after BackupSnapshotManifest is defined.
static Future < Key > addSnapshotManifestTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ) ;
2017-12-14 17:44:38 +08:00
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state BackupConfig config ( task ) ;
2017-12-18 06:29:57 +08:00
// Get the batch future and dispatch done keys, then clear them.
state Key snapshotBatchFutureKey ;
state Key snapshotBatchDispatchDoneKey ;
2017-12-14 17:44:38 +08:00
2017-12-18 06:29:57 +08:00
Void _ = wait ( store ( config . snapshotBatchFuture ( ) . getOrThrow ( tr ) , snapshotBatchFutureKey )
& & store ( config . snapshotBatchDispatchDoneKey ( ) . getOrThrow ( tr ) , snapshotBatchDispatchDoneKey ) ) ;
2017-12-14 17:44:38 +08:00
2017-12-18 06:29:57 +08:00
state Reference < TaskFuture > snapshotBatchFuture = futureBucket - > unpack ( snapshotBatchFutureKey ) ;
state Reference < TaskFuture > snapshotBatchDispatchDoneFuture = futureBucket - > unpack ( snapshotBatchDispatchDoneKey ) ;
config . snapshotBatchFuture ( ) . clear ( tr ) ;
config . snapshotBatchDispatchDoneKey ( ) . clear ( tr ) ;
config . snapshotBatchSize ( ) . clear ( tr ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
state Reference < TaskFuture > snapshotFinishedFuture = task - > getDoneFuture ( futureBucket ) ;
2017-08-31 07:22:28 +08:00
2017-12-18 06:29:57 +08:00
// If the snapshot is finished, the next task is to write a snapshot manifest, otherwise it's another snapshot dispatch task.
// In either case, the task should wait for snapshotBatchFuture.
// The snapshot done key, passed to the current task, is also passed on.
if ( Params . snapshotFinished ( ) . getOrDefault ( task , false ) ) {
Void _ = wait ( success ( addSnapshotManifestTask ( tr , taskBucket , task , TaskCompletionKey : : signal ( snapshotFinishedFuture ) , snapshotBatchFuture ) ) ) ;
}
else {
Void _ = wait ( success ( addTask ( tr , taskBucket , task , TaskCompletionKey : : signal ( snapshotFinishedFuture ) , snapshotBatchFuture , 1 , Params . nextDispatchVersion ( ) . get ( task ) ) ) ) ;
}
2017-08-31 07:22:28 +08:00
2017-12-18 06:29:57 +08:00
// This snapshot batch is finished, so set the batch done future.
Void _ = wait ( snapshotBatchDispatchDoneFuture - > set ( tr , taskBucket ) ) ;
2017-05-26 04:48:44 +08:00
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
}
} ;
2017-12-18 06:29:57 +08:00
StringRef BackupSnapshotDispatchTask : : name = LiteralStringRef ( " file_backup_snapshot_dispatch " ) ;
const uint32_t BackupSnapshotDispatchTask : : version = 1 ;
REGISTER_TASKFUNC ( BackupSnapshotDispatchTask ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
struct BackupLogRangeTaskFunc : BackupTaskFuncBase {
2017-05-26 04:48:44 +08:00
static StringRef name ;
static const uint32_t version ;
2017-09-02 04:50:38 +08:00
static struct {
static TaskParam < bool > addBackupLogRangeTasks ( ) {
return LiteralStringRef ( __FUNCTION__ ) ;
}
static TaskParam < int64_t > fileSize ( ) {
return LiteralStringRef ( __FUNCTION__ ) ;
}
static TaskParam < Version > beginVersion ( ) {
return LiteralStringRef ( __FUNCTION__ ) ;
}
static TaskParam < Version > endVersion ( ) {
return LiteralStringRef ( __FUNCTION__ ) ;
}
} Params ;
2017-05-26 04:48:44 +08:00
StringRef getName ( ) const { return name ; } ;
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _execute ( cx , tb , fb , task ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
ACTOR static Future < Void > _execute ( Database cx , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state Reference < FlowLock > lock ( new FlowLock ( CLIENT_KNOBS - > BACKUP_LOCK_BYTES ) ) ;
Void _ = wait ( checkTaskVersion ( cx , task , BackupLogRangeTaskFunc : : name , BackupLogRangeTaskFunc : : version ) ) ;
2017-09-02 04:50:38 +08:00
state Version beginVersion = Params . beginVersion ( ) . get ( task ) ;
state Version endVersion = Params . endVersion ( ) . get ( task ) ;
2017-05-26 04:48:44 +08:00
2017-08-31 03:48:28 +08:00
state BackupConfig config ( task ) ;
2017-11-15 15:33:17 +08:00
state Reference < IBackupContainer > bc ;
2017-12-07 06:11:40 +08:00
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
2017-05-26 04:48:44 +08:00
loop {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
// Wait for the read version to pass endVersion
try {
2017-12-01 09:18:57 +08:00
Void _ = wait ( taskBucket - > keepRunning ( tr , task ) ) ;
2017-11-25 16:46:16 +08:00
if ( ! bc ) {
2017-12-01 09:18:57 +08:00
// Backup container must be present if we're still here
2017-11-25 16:46:16 +08:00
Reference < IBackupContainer > _bc = wait ( config . backupContainer ( ) . getOrThrow ( tr ) ) ;
bc = _bc ;
}
Version currentVersion = tr - > getReadVersion ( ) . get ( ) ;
2017-05-26 04:48:44 +08:00
if ( endVersion < currentVersion )
break ;
Void _ = wait ( delay ( std : : max ( CLIENT_KNOBS - > BACKUP_RANGE_MINWAIT , ( double ) ( endVersion - currentVersion ) / CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ) ) ;
2017-12-07 06:11:40 +08:00
tr - > reset ( ) ;
2017-05-26 04:48:44 +08:00
}
catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
2017-08-31 06:07:36 +08:00
state Standalone < VectorRef < KeyRangeRef > > ranges = getLogRanges ( beginVersion , endVersion , config . getUidAsKey ( ) ) ;
2017-05-26 04:48:44 +08:00
if ( ranges . size ( ) > CLIENT_KNOBS - > BACKUP_MAX_LOG_RANGES ) {
2017-09-02 04:50:38 +08:00
Params . addBackupLogRangeTasks ( ) . set ( task , true ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
// Block size must be at least large enough for 1 max size key, 1 max size value, and overhead, so conservatively 125k.
2017-11-15 15:33:17 +08:00
state int blockSize = BUGGIFY ? g_random - > randomInt ( 125e3 , 4e6 ) : CLIENT_KNOBS - > BACKUP_LOGFILE_BLOCK_SIZE ;
state Reference < IBackupFile > outFile = wait ( bc - > writeLogFile ( beginVersion , endVersion , blockSize ) ) ;
state LogFileWriter logFile ( outFile , blockSize ) ;
2017-05-26 04:48:44 +08:00
state size_t idx ;
2017-11-15 15:33:17 +08:00
state PromiseStream < RangeResultWithVersion > results ;
2017-05-26 04:48:44 +08:00
state std : : vector < Future < Void > > rc ;
2017-11-15 15:33:17 +08:00
for ( auto & range : ranges ) {
rc . push_back ( readCommitted ( cx , results , lock , range , false , true , true ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-15 15:33:17 +08:00
2017-11-16 05:33:09 +08:00
state Future < Void > sendEOS = map ( errorOr ( waitForAll ( rc ) ) , [ = ] ( ErrorOr < Void > const & result ) {
if ( result . isError ( ) )
results . sendError ( result . getError ( ) ) ;
else
results . sendError ( end_of_stream ( ) ) ;
2017-11-15 15:33:17 +08:00
return Void ( ) ;
} ) ;
2017-05-26 04:48:44 +08:00
2017-11-15 15:33:17 +08:00
try {
loop {
state RangeResultWithVersion r = waitNext ( results . getFuture ( ) ) ;
lock - > release ( r . first . expectedSize ( ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-15 15:33:17 +08:00
state int i = 0 ;
for ( ; i < r . first . size ( ) ; + + i ) {
// Remove the backupLogPrefix + UID bytes from the key
Void _ = wait ( logFile . writeKV ( r . first [ i ] . key . substr ( backupLogPrefixBytes + 16 ) , r . first [ i ] . value ) ) ;
}
}
} catch ( Error & e ) {
if ( e . code ( ) = = error_code_actor_cancelled )
throw ;
2017-05-26 04:48:44 +08:00
2017-11-15 15:33:17 +08:00
if ( e . code ( ) ! = error_code_end_of_stream ) {
state Error err = e ;
2017-11-16 05:33:09 +08:00
Void _ = wait ( config . logError ( cx , err , format ( " Failed to write to file `%s' " , outFile - > getFileName ( ) . c_str ( ) ) ) ) ;
2017-11-15 15:33:17 +08:00
throw err ;
2017-05-26 04:48:44 +08:00
}
}
2017-11-25 16:46:16 +08:00
// Make sure this task is still alive, if it's not then the data read above could be incomplete.
2017-12-01 09:18:57 +08:00
Void _ = wait ( taskBucket - > keepRunning ( cx , task ) ) ;
2017-11-25 16:46:16 +08:00
2017-11-15 15:33:17 +08:00
Void _ = wait ( outFile - > finish ( ) ) ;
2017-12-20 07:27:04 +08:00
TraceEvent ( " FileBackupWroteLogFile " )
. detail ( " BackupUID " , config . getUid ( ) )
. detail ( " BackupURL " , bc - > getURL ( ) )
. detail ( " Size " , outFile - > size ( ) )
. detail ( " BeginVersion " , beginVersion )
. detail ( " EndVersion " , endVersion ) ;
2017-11-15 15:33:17 +08:00
Params . fileSize ( ) . set ( task , outFile - > size ( ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , Version beginVersion , Version endVersion , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ) {
2017-09-12 02:14:30 +08:00
Key key = wait ( addBackupTask ( BackupLogRangeTaskFunc : : name ,
BackupLogRangeTaskFunc : : version ,
tr , taskBucket , completionKey ,
BackupConfig ( parentTask ) ,
waitFor ,
[ = ] ( Reference < Task > task ) {
Params . beginVersion ( ) . set ( task , beginVersion ) ;
Params . endVersion ( ) . set ( task , endVersion ) ;
Params . addBackupLogRangeTasks ( ) . set ( task , false ) ;
} ) ) ;
return key ;
2017-05-26 04:48:44 +08:00
}
ACTOR static Future < Void > startBackupLogRangeInternal ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task , Reference < TaskFuture > taskFuture , Version beginVersion , Version endVersion ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
std : : vector < Future < Key > > addTaskVector ;
int tasks = 0 ;
for ( int64_t vblock = beginVersion / CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE ; vblock < ( endVersion + CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE - 1 ) / CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE ; vblock + = CLIENT_KNOBS - > BACKUP_MAX_LOG_RANGES ) {
Version bv = std : : max ( beginVersion , vblock * CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE ) ;
if ( tasks > = CLIENT_KNOBS - > BACKUP_SHARD_TASK_LIMIT ) {
addTaskVector . push_back ( addTask ( tr , taskBucket , task , bv , endVersion , TaskCompletionKey : : joinWith ( taskFuture ) ) ) ;
break ;
}
Version ev = std : : min ( endVersion , ( vblock + CLIENT_KNOBS - > BACKUP_MAX_LOG_RANGES ) * CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE ) ;
addTaskVector . push_back ( addTask ( tr , taskBucket , task , bv , ev , TaskCompletionKey : : joinWith ( taskFuture ) ) ) ;
tasks + + ;
}
Void _ = wait ( waitForAll ( addTaskVector ) ) ;
return Void ( ) ;
}
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
2017-09-02 04:50:38 +08:00
state Version beginVersion = Params . beginVersion ( ) . get ( task ) ;
state Version endVersion = Params . endVersion ( ) . get ( task ) ;
2017-05-26 04:48:44 +08:00
state Reference < TaskFuture > taskFuture = futureBucket - > unpack ( task - > params [ Task : : reservedTaskParamKeyDone ] ) ;
2017-09-02 04:50:38 +08:00
if ( Params . fileSize ( ) . exists ( task ) ) {
BackupConfig ( task ) . logBytesWritten ( ) . atomicOp ( tr , Params . fileSize ( ) . get ( task ) , MutationRef : : AddValue ) ;
2017-05-26 04:48:44 +08:00
}
2017-09-02 04:50:38 +08:00
if ( Params . addBackupLogRangeTasks ( ) . get ( task ) ) {
2017-05-26 04:48:44 +08:00
Void _ = wait ( startBackupLogRangeInternal ( tr , taskBucket , futureBucket , task , taskFuture , beginVersion , endVersion ) ) ;
endVersion = beginVersion ;
} else {
Void _ = wait ( taskFuture - > set ( tr , taskBucket ) ) ;
}
if ( endVersion > beginVersion ) {
Standalone < VectorRef < KeyRangeRef > > ranges = getLogRanges ( beginVersion , endVersion , task - > params [ FileBackupAgent : : keyConfigLogUid ] ) ;
for ( auto & rng : ranges )
tr - > clear ( rng ) ;
}
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
}
} ;
StringRef BackupLogRangeTaskFunc : : name = LiteralStringRef ( " file_backup_log_range " ) ;
const uint32_t BackupLogRangeTaskFunc : : version = 1 ;
REGISTER_TASKFUNC ( BackupLogRangeTaskFunc ) ;
2017-12-18 06:29:57 +08:00
struct BackupLogsDispatchTask : BackupTaskFuncBase {
2017-05-26 04:48:44 +08:00
static StringRef name ;
static const uint32_t version ;
2017-09-02 04:50:38 +08:00
static struct {
static TaskParam < Version > beginVersion ( ) {
return LiteralStringRef ( __FUNCTION__ ) ;
}
} Params ;
2017-05-26 04:48:44 +08:00
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
2017-12-18 06:29:57 +08:00
Void _ = wait ( checkTaskVersion ( tr - > getDatabase ( ) , task , BackupLogsDispatchTask : : name , BackupLogsDispatchTask : : version ) ) ;
2017-05-26 04:48:44 +08:00
2017-08-31 07:22:28 +08:00
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-12-18 06:29:57 +08:00
state Reference < TaskFuture > onDone = task - > getDoneFuture ( futureBucket ) ;
state Version beginVersion = Params . beginVersion ( ) . get ( task ) ;
2017-08-31 07:22:28 +08:00
state BackupConfig config ( task ) ;
2017-12-18 06:29:57 +08:00
config . latestLogEndVersion ( ) . set ( tr , beginVersion ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
state bool stopWhenDone ;
state Optional < Version > restorableVersion ;
2017-12-18 15:22:18 +08:00
state EBackupState backupState ;
Void _ = wait ( store ( config . stopWhenDone ( ) . getOrThrow ( tr ) , stopWhenDone )
& & store ( config . getLatestRestorableVersion ( tr ) , restorableVersion )
& & store ( config . stateEnum ( ) . getOrThrow ( tr ) , backupState ) ) ;
// If the backup is restorable but the state is not differential then set state to differential
if ( restorableVersion . present ( ) & & backupState ! = BackupAgentBase : : STATE_DIFFERENTIAL )
config . stateEnum ( ) . set ( tr , BackupAgentBase : : STATE_DIFFERENTIAL ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
// If stopWhenDone is set and there is a restorable version, set the done future and do not create further tasks.
if ( stopWhenDone & & restorableVersion . present ( ) ) {
Void _ = wait ( onDone - > set ( tr , taskBucket ) & & taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
2017-05-26 04:48:44 +08:00
}
2017-12-18 06:29:57 +08:00
state Version endVersion = std : : max < Version > ( tr - > getReadVersion ( ) . get ( ) + 1 , beginVersion + ( CLIENT_KNOBS - > BACKUP_MAX_LOG_RANGES - 1 ) * CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
if ( endVersion - beginVersion > g_random - > randomInt64 ( 0 , CLIENT_KNOBS - > BACKUP_VERSION_DELAY ) ) {
TraceEvent ( " FileBackupLogDispatch " ) . detail ( " BeginVersion " , beginVersion ) . detail ( " EndVersion " , endVersion ) . detail ( " RestorableVersion " , restorableVersion . orDefault ( - 1 ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-12-18 06:29:57 +08:00
state Reference < TaskFuture > logDispatchBatchFuture = futureBucket - > future ( tr ) ;
// Add the next logs dispatch task which will run after this batch is done
Key _ = wait ( BackupLogRangeTaskFunc : : addTask ( tr , taskBucket , task , beginVersion , endVersion , TaskCompletionKey : : joinWith ( logDispatchBatchFuture ) ) ) ;
Key _ = wait ( BackupLogsDispatchTask : : addTask ( tr , taskBucket , task , endVersion , TaskCompletionKey : : signal ( onDone ) , logDispatchBatchFuture ) ) ;
2017-05-26 04:48:44 +08:00
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
}
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , Version beginVersion , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ) {
2017-12-18 06:29:57 +08:00
Key key = wait ( addBackupTask ( BackupLogsDispatchTask : : name ,
BackupLogsDispatchTask : : version ,
2017-09-12 02:14:30 +08:00
tr , taskBucket , completionKey ,
BackupConfig ( parentTask ) ,
waitFor ,
[ = ] ( Reference < Task > task ) {
Params . beginVersion ( ) . set ( task , beginVersion ) ;
} ) ) ;
return key ;
2017-05-26 04:48:44 +08:00
}
StringRef getName ( ) const { return name ; } ;
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return Void ( ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
} ;
2017-12-18 06:29:57 +08:00
StringRef BackupLogsDispatchTask : : name = LiteralStringRef ( " file_backup_logs " ) ;
const uint32_t BackupLogsDispatchTask : : version = 1 ;
REGISTER_TASKFUNC ( BackupLogsDispatchTask ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
struct FinishedFullBackupTaskFunc : BackupTaskFuncBase {
2017-05-26 04:48:44 +08:00
static StringRef name ;
static const uint32_t version ;
StringRef getName ( ) const { return name ; } ;
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
2017-09-09 07:09:18 +08:00
Void _ = wait ( checkTaskVersion ( tr - > getDatabase ( ) , task , FinishedFullBackupTaskFunc : : name , FinishedFullBackupTaskFunc : : version ) ) ;
2017-05-26 04:48:44 +08:00
2017-08-31 06:07:36 +08:00
state BackupConfig backup ( task ) ;
2017-09-02 05:39:38 +08:00
state UID uid = backup . getUid ( ) ;
2017-05-26 04:48:44 +08:00
2017-09-02 05:39:38 +08:00
state Key configPath = uidPrefixKey ( logRangesRange . begin , uid ) ;
state Key logsPath = uidPrefixKey ( backupLogKeys . begin , uid ) ;
2017-05-26 04:48:44 +08:00
2017-12-21 07:41:47 +08:00
tr - > setOption ( FDBTransactionOptions : : COMMIT_ON_FIRST_PROXY ) ;
2017-05-26 04:48:44 +08:00
tr - > clear ( KeyRangeRef ( configPath , strinc ( configPath ) ) ) ;
tr - > clear ( KeyRangeRef ( logsPath , strinc ( logsPath ) ) ) ;
2017-08-30 02:49:40 +08:00
backup . stateEnum ( ) . set ( tr , EBackupState : : STATE_COMPLETED ) ;
2017-05-26 04:48:44 +08:00
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
}
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ) {
2017-09-12 02:14:30 +08:00
Key key = wait ( addBackupTask ( FinishedFullBackupTaskFunc : : name ,
FinishedFullBackupTaskFunc : : version ,
tr , taskBucket , completionKey ,
BackupConfig ( parentTask ) , waitFor ) ) ;
return key ;
2017-05-26 04:48:44 +08:00
}
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return Void ( ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
} ;
StringRef FinishedFullBackupTaskFunc : : name = LiteralStringRef ( " file_finished_full_backup " ) ;
const uint32_t FinishedFullBackupTaskFunc : : version = 1 ;
REGISTER_TASKFUNC ( FinishedFullBackupTaskFunc ) ;
2017-12-18 06:29:57 +08:00
// TODO: Register a task that will finish/delete any tasks of these types:
//LiteralStringRef("file_backup_diff_logs");
//LiteralStringRef("file_finish_full_backup");
struct BackupSnapshotManifest : BackupTaskFuncBase {
2017-05-26 04:48:44 +08:00
static StringRef name ;
static const uint32_t version ;
2017-09-02 04:50:38 +08:00
static struct {
2017-12-18 06:29:57 +08:00
static TaskParam < Version > endVersion ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
2017-09-02 04:50:38 +08:00
} Params ;
2017-05-26 04:48:44 +08:00
ACTOR static Future < Void > _execute ( Database cx , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
2017-11-15 15:33:17 +08:00
state BackupConfig config ( task ) ;
state Reference < IBackupContainer > bc ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
// Read the entire range file map into memory, then walk it backwards from its last entry to produce a list of non overlapping key range files
state std : : map < Key , BackupConfig : : RangeSlice > localmap ;
state Key startKey ;
state int batchSize = BUGGIFY ? 1 : 1000000 ;
2017-08-31 03:48:28 +08:00
2017-12-18 06:29:57 +08:00
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
Void _ = wait ( taskBucket - > keepRunning ( tr , task ) ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
if ( ! bc ) {
// Backup container must be present if we're still here
Void _ = wait ( store ( config . backupContainer ( ) . getOrThrow ( tr ) , bc ) ) ;
}
2017-11-25 16:46:16 +08:00
2017-12-18 06:29:57 +08:00
BackupConfig : : RangeFileMapT : : PairsType rangeresults = wait ( config . snapshotRangeFileMap ( ) . getRange ( tr , startKey , { } , batchSize ) ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
for ( auto & p : rangeresults ) {
localmap . insert ( p ) ;
}
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
if ( rangeresults . size ( ) < batchSize )
break ;
2017-11-15 15:33:17 +08:00
2017-12-18 06:29:57 +08:00
startKey = keyAfter ( rangeresults . back ( ) . first ) ;
tr - > reset ( ) ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-12-18 06:29:57 +08:00
}
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
std : : vector < std : : string > files ;
state Version maxVer = 0 ;
state Version minVer = std : : numeric_limits < Version > : : max ( ) ;
state int64_t totalBytes = 0 ;
2017-11-15 15:33:17 +08:00
2017-12-18 06:29:57 +08:00
if ( ! localmap . empty ( ) ) {
// Get iterator that points to greatest key, start there.
auto ri = localmap . rbegin ( ) ;
auto i = ( + + ri ) . base ( ) ;
2017-11-15 15:33:17 +08:00
2017-12-18 06:29:57 +08:00
while ( 1 ) {
const BackupConfig : : RangeSlice & r = i - > second ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
// Add file to final file list
files . push_back ( r . fileName ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
// Update version range seen
if ( r . version < minVer )
minVer = r . version ;
if ( r . version > maxVer )
maxVer = r . version ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
// Update total bytes counted.
totalBytes + = r . fileSize ;
2017-10-19 06:49:49 +08:00
2017-12-18 06:29:57 +08:00
// Jump to file that either ends where this file begins or has the greatest end that is less than
// the begin of this file. In other words find the map key that is <= begin of this file. To do this
// find the first end strictly greater than begin and then back up one.
i = localmap . upper_bound ( i - > second . begin ) ;
// If we get begin then we're done, there are no more ranges that end at or before the last file's begin
if ( i = = localmap . begin ( ) )
2017-11-15 15:33:17 +08:00
break ;
2017-12-18 06:29:57 +08:00
- - i ;
2017-12-02 07:16:44 +08:00
}
2017-11-15 15:33:17 +08:00
}
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
Params . endVersion ( ) . set ( task , maxVer ) ;
Void _ = wait ( bc - > writeKeyspaceSnapshotFile ( files , totalBytes ) ) ;
TraceEvent ( SevInfo , " FileBackupWroteSnapshotManifest " )
2017-12-20 07:27:04 +08:00
. detail ( " BackupUID " , config . getUid ( ) )
. detail ( " BackupURL " , bc - > getURL ( ) )
2017-12-18 06:29:57 +08:00
. detail ( " BeginVersion " , minVer )
. detail ( " EndVersion " , maxVer )
. detail ( " TotalBytes " , totalBytes ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
2017-12-18 06:29:57 +08:00
Void _ = wait ( checkTaskVersion ( tr - > getDatabase ( ) , task , BackupSnapshotManifest : : name , BackupSnapshotManifest : : version ) ) ;
2017-05-26 04:48:44 +08:00
2017-08-30 02:49:40 +08:00
state BackupConfig config ( task ) ;
2017-12-18 15:22:18 +08:00
// Set the latest snapshot end version, which was set during the execute phase
2017-12-18 06:29:57 +08:00
config . latestSnapshotEndVersion ( ) . set ( tr , Params . endVersion ( ) . get ( task ) ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 15:22:18 +08:00
state bool stopWhenDone ;
state EBackupState backupState ;
state Optional < Version > restorableVersion ;
Void _ = wait ( store ( config . stopWhenDone ( ) . getOrThrow ( tr ) , stopWhenDone )
& & store ( config . stateEnum ( ) . getOrThrow ( tr ) , backupState )
& & store ( config . getLatestRestorableVersion ( tr ) , restorableVersion ) ) ;
// If the backup is restorable and the state isn't differential the set state to differential
if ( restorableVersion . present ( ) & & backupState ! = BackupAgentBase : : STATE_DIFFERENTIAL )
config . stateEnum ( ) . set ( tr , BackupAgentBase : : STATE_DIFFERENTIAL ) ;
// Unless we are to stop, start the next snapshot using the default interval
2017-12-18 06:29:57 +08:00
Reference < TaskFuture > snapshotDoneFuture = task - > getDoneFuture ( futureBucket ) ;
2017-12-21 04:07:35 +08:00
if ( ! stopWhenDone ) {
Void _ = wait ( config . initNewSnapshot ( tr ) & & success ( BackupSnapshotDispatchTask : : addTask ( tr , taskBucket , task , TaskCompletionKey : : signal ( snapshotDoneFuture ) ) ) ) ;
} else {
// Set the done future as the snapshot is now complete.
Void _ = wait ( snapshotDoneFuture - > set ( tr , taskBucket ) ) ;
}
2017-05-26 04:48:44 +08:00
2017-12-21 04:07:35 +08:00
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ) {
2017-12-18 06:29:57 +08:00
Key key = wait ( addBackupTask ( BackupSnapshotManifest : : name ,
BackupSnapshotManifest : : version ,
2017-09-12 02:14:30 +08:00
tr , taskBucket , completionKey ,
BackupConfig ( parentTask ) , waitFor ) ) ;
return key ;
2017-05-26 04:48:44 +08:00
}
StringRef getName ( ) const { return name ; } ;
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _execute ( cx , tb , fb , task ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
} ;
2017-12-18 06:29:57 +08:00
StringRef BackupSnapshotManifest : : name = LiteralStringRef ( " file_backup_snapshot_manifest " ) ;
const uint32_t BackupSnapshotManifest : : version = 1 ;
REGISTER_TASKFUNC ( BackupSnapshotManifest ) ;
Future < Key > BackupSnapshotDispatchTask : : addSnapshotManifestTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor ) {
return BackupSnapshotManifest : : addTask ( tr , taskBucket , parentTask , completionKey , waitFor ) ;
}
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
struct StartFullBackupTaskFunc : BackupTaskFuncBase {
2017-05-26 04:48:44 +08:00
static StringRef name ;
static const uint32_t version ;
2017-09-02 04:50:38 +08:00
static struct {
static TaskParam < Version > beginVersion ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
} Params ;
2017-05-26 04:48:44 +08:00
ACTOR static Future < Void > _execute ( Database cx , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
Void _ = wait ( checkTaskVersion ( cx , task , StartFullBackupTaskFunc : : name , StartFullBackupTaskFunc : : version ) ) ;
loop {
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
Version startVersion = wait ( tr - > getReadVersion ( ) ) ;
2017-09-02 04:50:38 +08:00
Params . beginVersion ( ) . set ( task , startVersion ) ;
2017-05-26 04:48:44 +08:00
break ;
}
catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
return Void ( ) ;
}
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
2017-08-30 02:49:40 +08:00
state BackupConfig config ( task ) ;
2017-09-02 04:50:38 +08:00
state Version beginVersion = Params . beginVersion ( ) . get ( task ) ;
2017-05-26 04:48:44 +08:00
2017-09-01 08:58:26 +08:00
state std : : vector < KeyRange > backupRanges = wait ( config . backupRanges ( ) . getOrThrow ( tr ) ) ;
2017-05-26 04:48:44 +08:00
// Start logging the mutations for the specified ranges of the tag
for ( auto & backupRange : backupRanges ) {
2017-09-06 00:42:14 +08:00
config . startMutationLogs ( tr , backupRange ) ;
2017-05-26 04:48:44 +08:00
}
2017-08-30 02:49:40 +08:00
config . stateEnum ( ) . set ( tr , EBackupState : : STATE_BACKUP ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
state Reference < TaskFuture > backupFinished = futureBucket - > future ( tr ) ;
2017-05-26 04:48:44 +08:00
2017-12-20 17:07:03 +08:00
// Initialize the initial snapshot and create tasks to continually write logs and snapshots
// The initial snapshot has a desired duration of 0, meaning go as fast as possible.
Void _ = wait ( config . initNewSnapshot ( tr , 0 ) ) ;
2017-12-18 06:29:57 +08:00
Key _ = wait ( BackupSnapshotDispatchTask : : addTask ( tr , taskBucket , task , TaskCompletionKey : : joinWith ( backupFinished ) ) ) ;
Key _ = wait ( BackupLogsDispatchTask : : addTask ( tr , taskBucket , task , beginVersion , TaskCompletionKey : : joinWith ( backupFinished ) ) ) ;
2017-05-26 04:48:44 +08:00
2017-12-18 06:29:57 +08:00
// If a clean stop is requested, the log and snapshot tasks will quit after the backup is restorable, then the following
// task will clean up and set the completed state.
Key _ = wait ( FinishedFullBackupTaskFunc : : addTask ( tr , taskBucket , task , TaskCompletionKey : : noSignal ( ) , backupFinished ) ) ;
2017-05-26 04:48:44 +08:00
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
}
2017-09-09 07:09:18 +08:00
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , UID uid , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) )
2017-05-26 04:48:44 +08:00
{
2017-09-12 02:14:30 +08:00
Key key = wait ( addBackupTask ( StartFullBackupTaskFunc : : name ,
StartFullBackupTaskFunc : : version ,
tr , taskBucket , completionKey ,
BackupConfig ( uid ) , waitFor ) ) ;
return key ;
2017-05-26 04:48:44 +08:00
}
StringRef getName ( ) const { return name ; } ;
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _execute ( cx , tb , fb , task ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
} ;
StringRef StartFullBackupTaskFunc : : name = LiteralStringRef ( " file_start_full_backup " ) ;
const uint32_t StartFullBackupTaskFunc : : version = 1 ;
REGISTER_TASKFUNC ( StartFullBackupTaskFunc ) ;
2017-11-16 05:33:09 +08:00
struct RestoreCompleteTaskFunc : RestoreTaskFuncBase {
2017-05-26 04:48:44 +08:00
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
2017-09-09 07:09:18 +08:00
Void _ = wait ( checkTaskVersion ( tr - > getDatabase ( ) , task , name , version ) ) ;
2017-05-26 04:48:44 +08:00
state RestoreConfig restore ( task ) ;
restore . stateEnum ( ) . set ( tr , ERestoreState : : COMPLETED ) ;
// Clear the file map now since it could be huge.
2017-11-15 15:33:17 +08:00
restore . fileSet ( ) . clear ( tr ) ;
2017-05-26 04:48:44 +08:00
// TODO: Validate that the range version map has exactly the restored ranges in it. This means that for any restore operation
// the ranges to restore must be within the backed up ranges, otherwise from the restore perspective it will appear that some
// key ranges were missing and so the backup set is incomplete and the restore has failed.
// This validation cannot be done currently because Restore only supports a single restore range but backups can have many ranges.
// Clear the applyMutations stuff, including any unapplied mutations from versions beyond the restored version.
restore . clearApplyMutationsKeys ( tr ) ;
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
Void _ = wait ( unlockDatabase ( tr , restore . getUid ( ) ) ) ;
return Void ( ) ;
}
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ) {
Key doneKey = wait ( completionKey . get ( tr , taskBucket ) ) ;
state Reference < Task > task ( new Task ( RestoreCompleteTaskFunc : : name , RestoreCompleteTaskFunc : : version , doneKey ) ) ;
// Get restore config from parent task and bind it to new task
Void _ = wait ( RestoreConfig ( parentTask ) . toTask ( tr , task ) ) ;
if ( ! waitFor ) {
return taskBucket - > addTask ( tr , task ) ;
}
Void _ = wait ( waitFor - > onSetAddTask ( tr , taskBucket , task ) ) ;
return LiteralStringRef ( " OnSetAddTask " ) ;
}
static StringRef name ;
static const uint32_t version ;
StringRef getName ( ) const { return name ; } ;
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return Void ( ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
} ;
StringRef RestoreCompleteTaskFunc : : name = LiteralStringRef ( " restore_complete " ) ;
const uint32_t RestoreCompleteTaskFunc : : version = 1 ;
REGISTER_TASKFUNC ( RestoreCompleteTaskFunc ) ;
2017-11-16 05:33:09 +08:00
struct RestoreFileTaskFuncBase : RestoreTaskFuncBase {
struct InputParams {
static TaskParam < RestoreFile > inputFile ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
static TaskParam < int64_t > readOffset ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
static TaskParam < int64_t > readLen ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
} Params ;
2017-11-19 20:39:18 +08:00
std : : string toString ( Reference < Task > task ) {
return format ( " fileName '%s' readLen %lld readOffset %lld " ,
2017-11-16 05:33:09 +08:00
Params . inputFile ( ) . get ( task ) . fileName . c_str ( ) ,
Params . readLen ( ) . get ( task ) ,
Params . readOffset ( ) . get ( task ) ) ;
}
} ;
struct RestoreRangeTaskFunc : RestoreFileTaskFuncBase {
static struct : InputParams {
2017-11-15 15:33:17 +08:00
// The range of data that the (possibly empty) data represented, which is set if it intersects the target restore range
2017-05-26 04:48:44 +08:00
static TaskParam < KeyRange > originalFileRange ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
} Params ;
2017-11-19 20:39:18 +08:00
std : : string toString ( Reference < Task > task ) {
2017-11-25 16:46:16 +08:00
return RestoreFileTaskFuncBase : : toString ( task ) + format ( " originalFileRange '%s' " , printable ( Params . originalFileRange ( ) . get ( task ) ) . c_str ( ) ) ;
2017-11-19 20:39:18 +08:00
}
2017-05-26 04:48:44 +08:00
ACTOR static Future < Void > _execute ( Database cx , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state RestoreConfig restore ( task ) ;
2017-11-16 05:33:09 +08:00
state RestoreFile rangeFile = Params . inputFile ( ) . get ( task ) ;
state int64_t readOffset = Params . readOffset ( ) . get ( task ) ;
state int64_t readLen = Params . readLen ( ) . get ( task ) ;
TraceEvent ( " FileRestoreRangeStart " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 05:33:09 +08:00
. detail ( " FileName " , rangeFile . fileName )
. detail ( " FileVersion " , rangeFile . version )
. detail ( " FileSize " , rangeFile . fileSize )
. detail ( " ReadOffset " , readOffset )
. detail ( " ReadLen " , readLen )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
state Future < Reference < IBackupContainer > > bc ;
state Future < KeyRange > restoreRange ;
state Future < Key > addPrefix ;
state Future < Key > removePrefix ;
2017-11-15 15:33:17 +08:00
2017-11-16 05:33:09 +08:00
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
bc = restore . sourceContainer ( ) . getOrThrow ( tr ) ;
restoreRange = restore . restoreRange ( ) . getD ( tr ) ;
addPrefix = restore . addPrefix ( ) . getD ( tr ) ;
removePrefix = restore . removePrefix ( ) . getD ( tr ) ;
2017-05-26 04:48:44 +08:00
2017-12-01 09:18:57 +08:00
Void _ = wait ( taskBucket - > keepRunning ( tr , task ) ) ;
2017-11-25 16:46:16 +08:00
Void _ = wait ( success ( bc ) & & success ( restoreRange ) & & success ( addPrefix ) & & success ( removePrefix ) & & checkTaskVersion ( tr - > getDatabase ( ) , task , name , version ) ) ;
2017-11-16 05:33:09 +08:00
break ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-16 05:33:09 +08:00
}
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
state Reference < IAsyncFile > inFile = wait ( bc . get ( ) - > readFile ( rangeFile . fileName ) ) ;
state Standalone < VectorRef < KeyValueRef > > blockData = wait ( decodeRangeFileBlock ( inFile , readOffset , readLen ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
// First and last key are the range for this file
state KeyRange fileRange = KeyRangeRef ( blockData . front ( ) . key , blockData . back ( ) . key ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
// If fileRange doesn't intersect restore range then we're done.
if ( ! fileRange . intersects ( restoreRange . get ( ) ) )
return Void ( ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
// We know the file range intersects the restore range but there could still be keys outside the restore range.
// Find the subvector of kv pairs that intersect the restore range. Note that the first and last keys are just the range endpoints for this file
int rangeStart = 1 ;
int rangeEnd = blockData . size ( ) - 1 ;
// Slide start forward, stop if something in range is found
while ( rangeStart < rangeEnd & & ! restoreRange . get ( ) . contains ( blockData [ rangeStart ] . key ) )
+ + rangeStart ;
// Side end backward, stop if something in range is found
while ( rangeEnd > rangeStart & & ! restoreRange . get ( ) . contains ( blockData [ rangeEnd - 1 ] . key ) )
- - rangeEnd ;
state VectorRef < KeyValueRef > data = blockData . slice ( rangeStart , rangeEnd ) ;
// Shrink file range to be entirely within restoreRange and translate it to the new prefix
// First, use the untranslated file range to create the shrunk original file range which must be used in the kv range version map for applying mutations
state KeyRange originalFileRange = KeyRangeRef ( std : : max ( fileRange . begin , restoreRange . get ( ) . begin ) , std : : min ( fileRange . end , restoreRange . get ( ) . end ) ) ;
Params . originalFileRange ( ) . set ( task , originalFileRange ) ;
// Now shrink and translate fileRange
Key fileEnd = std : : min ( fileRange . end , restoreRange . get ( ) . end ) ;
if ( fileEnd = = ( removePrefix . get ( ) = = StringRef ( ) ? normalKeys . end : strinc ( removePrefix . get ( ) ) ) ) {
fileEnd = addPrefix . get ( ) = = StringRef ( ) ? normalKeys . end : strinc ( addPrefix . get ( ) ) ;
} else {
fileEnd = fileEnd . removePrefix ( removePrefix . get ( ) ) . withPrefix ( addPrefix . get ( ) ) ;
}
fileRange = KeyRangeRef ( std : : max ( fileRange . begin , restoreRange . get ( ) . begin ) . removePrefix ( removePrefix . get ( ) ) . withPrefix ( addPrefix . get ( ) ) , fileEnd ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
state int start = 0 ;
state int end = data . size ( ) ;
state int dataSizeLimit = BUGGIFY ? g_random - > randomInt ( 256 * 1024 , 10e6 ) : CLIENT_KNOBS - > RESTORE_WRITE_TX_SIZE ;
2017-05-26 04:48:44 +08:00
2017-12-20 09:33:45 +08:00
tr - > reset ( ) ;
2017-11-16 05:33:09 +08:00
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
state int i = start ;
state int txBytes = 0 ;
state int iend = start ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
// find iend that results in the desired transaction size
for ( ; iend < end & & txBytes < dataSizeLimit ; + + iend ) {
txBytes + = data [ iend ] . key . expectedSize ( ) ;
txBytes + = data [ iend ] . value . expectedSize ( ) ;
}
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
// Clear the range we are about to set.
// If start == 0 then use fileBegin for the start of the range, else data[start]
// If iend == end then use fileEnd for the end of the range, else data[iend]
state KeyRange trRange = KeyRangeRef ( ( start = = 0 ) ? fileRange . begin : data [ start ] . key . removePrefix ( removePrefix . get ( ) ) . withPrefix ( addPrefix . get ( ) )
, ( iend = = end ) ? fileRange . end : data [ iend ] . key . removePrefix ( removePrefix . get ( ) ) . withPrefix ( addPrefix . get ( ) ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
tr - > clear ( trRange ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
for ( ; i < iend ; + + i ) {
tr - > setOption ( FDBTransactionOptions : : NEXT_WRITE_NO_WRITE_CONFLICT_RANGE ) ;
tr - > set ( data [ i ] . key . removePrefix ( removePrefix . get ( ) ) . withPrefix ( addPrefix . get ( ) ) , data [ i ] . value ) ;
}
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
// Add to bytes written count
restore . bytesWritten ( ) . atomicOp ( tr , txBytes , MutationRef : : Type : : AddValue ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
state Future < Void > checkLock = checkDatabaseLock ( tr , restore . getUid ( ) ) ;
2017-05-26 04:48:44 +08:00
2017-12-01 09:18:57 +08:00
Void _ = wait ( taskBucket - > keepRunning ( tr , task ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
Void _ = wait ( checkLock ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
Void _ = wait ( tr - > commit ( ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
TraceEvent ( " FileRestoreCommittedRange " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 05:33:09 +08:00
. detail ( " FileName " , rangeFile . fileName )
. detail ( " FileVersion " , rangeFile . version )
. detail ( " FileSize " , rangeFile . fileSize )
. detail ( " ReadOffset " , readOffset )
. detail ( " ReadLen " , readLen )
. detail ( " CommitVersion " , tr - > getCommittedVersion ( ) )
. detail ( " BeginRange " , printable ( trRange . begin ) )
. detail ( " EndRange " , printable ( trRange . end ) )
. detail ( " StartIndex " , start )
. detail ( " EndIndex " , i )
. detail ( " DataSize " , data . size ( ) )
. detail ( " Bytes " , txBytes )
. detail ( " OriginalFileRange " , printable ( originalFileRange ) )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
// Commit succeeded, so advance starting point
start = i ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
if ( start = = end )
return Void ( ) ;
2017-12-07 06:11:40 +08:00
tr - > reset ( ) ;
2017-11-16 05:33:09 +08:00
} catch ( Error & e ) {
TraceEvent ( SevWarn , " FileRestoreErrorRangeWrite " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 05:33:09 +08:00
. detail ( " FileName " , rangeFile . fileName )
. detail ( " FileVersion " , rangeFile . version )
. detail ( " FileSize " , rangeFile . fileSize )
. detail ( " ReadOffset " , readOffset )
. detail ( " ReadLen " , readLen )
. detail ( " BeginRange " , printable ( trRange . begin ) )
. detail ( " EndRange " , printable ( trRange . end ) )
. detail ( " StartIndex " , start )
. detail ( " EndIndex " , i )
. detail ( " DataSize " , data . size ( ) )
. detail ( " Bytes " , txBytes )
. error ( e )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
if ( e . code ( ) = = error_code_transaction_too_large )
dataSizeLimit / = 2 ;
else
Void _ = wait ( tr - > onError ( e ) ) ;
2017-05-26 04:48:44 +08:00
}
}
}
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state RestoreConfig restore ( task ) ;
restore . fileBlocksFinished ( ) . atomicOp ( tr , 1 , MutationRef : : Type : : AddValue ) ;
// Update the KV range map if originalFileRange is set
Future < Void > updateMap = Void ( ) ;
if ( Params . originalFileRange ( ) . exists ( task ) ) {
2017-11-16 05:33:09 +08:00
Value versionEncoded = BinaryWriter : : toValue ( Params . inputFile ( ) . get ( task ) . version , Unversioned ( ) ) ;
2017-05-26 04:48:44 +08:00
updateMap = krmSetRange ( tr , restore . applyMutationsMapPrefix ( ) , Params . originalFileRange ( ) . get ( task ) , versionEncoded ) ;
}
state Reference < TaskFuture > taskFuture = futureBucket - > unpack ( task - > params [ Task : : reservedTaskParamKeyDone ] ) ;
Void _ = wait ( taskFuture - > set ( tr , taskBucket ) & &
taskBucket - > finish ( tr , task ) & & updateMap ) ;
return Void ( ) ;
}
2017-11-15 15:33:17 +08:00
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , RestoreFile rf , int64_t offset , int64_t len , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ) {
2017-05-26 04:48:44 +08:00
Key doneKey = wait ( completionKey . get ( tr , taskBucket ) ) ;
state Reference < Task > task ( new Task ( RestoreRangeTaskFunc : : name , RestoreRangeTaskFunc : : version , doneKey ) ) ;
// Create a restore config from the current task and bind it to the new task.
Void _ = wait ( RestoreConfig ( parentTask ) . toTask ( tr , task ) ) ;
2017-11-15 15:33:17 +08:00
2017-11-16 05:33:09 +08:00
Params . inputFile ( ) . set ( task , rf ) ;
2017-11-15 15:33:17 +08:00
Params . readOffset ( ) . set ( task , offset ) ;
Params . readLen ( ) . set ( task , len ) ;
2017-05-26 04:48:44 +08:00
if ( ! waitFor ) {
return taskBucket - > addTask ( tr , task ) ;
}
Void _ = wait ( waitFor - > onSetAddTask ( tr , taskBucket , task ) ) ;
return LiteralStringRef ( " OnSetAddTask " ) ;
}
static StringRef name ;
static const uint32_t version ;
StringRef getName ( ) const { return name ; } ;
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _execute ( cx , tb , fb , task ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
} ;
StringRef RestoreRangeTaskFunc : : name = LiteralStringRef ( " restore_range_data " ) ;
const uint32_t RestoreRangeTaskFunc : : version = 1 ;
REGISTER_TASKFUNC ( RestoreRangeTaskFunc ) ;
2017-11-16 05:33:09 +08:00
struct RestoreLogDataTaskFunc : RestoreFileTaskFuncBase {
2017-05-26 04:48:44 +08:00
static StringRef name ;
static const uint32_t version ;
StringRef getName ( ) const { return name ; } ;
2017-11-16 05:33:09 +08:00
static struct : InputParams {
2017-05-26 04:48:44 +08:00
} Params ;
ACTOR static Future < Void > _execute ( Database cx , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state RestoreConfig restore ( task ) ;
2017-11-16 05:33:09 +08:00
state RestoreFile logFile = Params . inputFile ( ) . get ( task ) ;
state int64_t readOffset = Params . readOffset ( ) . get ( task ) ;
state int64_t readLen = Params . readLen ( ) . get ( task ) ;
TraceEvent ( " FileRestoreLogStart " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 05:33:09 +08:00
. detail ( " FileName " , logFile . fileName )
. detail ( " FileBeginVersion " , logFile . version )
. detail ( " FileEndVersion " , logFile . endVersion )
. detail ( " FileSize " , logFile . fileSize )
. detail ( " ReadOffset " , readOffset )
. detail ( " ReadLen " , readLen )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
state Reference < IBackupContainer > bc ;
2017-11-15 15:33:17 +08:00
2017-11-16 05:33:09 +08:00
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
Reference < IBackupContainer > _bc = wait ( restore . sourceContainer ( ) . getOrThrow ( tr ) ) ;
bc = _bc ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
Void _ = wait ( checkTaskVersion ( tr - > getDatabase ( ) , task , name , version ) ) ;
2017-12-01 09:18:57 +08:00
Void _ = wait ( taskBucket - > keepRunning ( tr , task ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
break ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-16 05:33:09 +08:00
}
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
state Key mutationLogPrefix = restore . mutationLogPrefix ( ) ;
state Reference < IAsyncFile > inFile = wait ( bc - > readFile ( logFile . fileName ) ) ;
state Standalone < VectorRef < KeyValueRef > > data = wait ( decodeLogFileBlock ( inFile , readOffset , readLen ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
state int start = 0 ;
state int end = data . size ( ) ;
state int dataSizeLimit = BUGGIFY ? g_random - > randomInt ( 256 * 1024 , 10e6 ) : CLIENT_KNOBS - > RESTORE_WRITE_TX_SIZE ;
2017-05-26 04:48:44 +08:00
2017-12-20 09:33:45 +08:00
tr - > reset ( ) ;
2017-11-16 05:33:09 +08:00
loop {
try {
if ( start = = end )
return Void ( ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
state int i = start ;
state int txBytes = 0 ;
for ( ; i < end & & txBytes < dataSizeLimit ; + + i ) {
Key k = data [ i ] . key . withPrefix ( mutationLogPrefix ) ;
ValueRef v = data [ i ] . value ;
tr - > set ( k , v ) ;
txBytes + = k . expectedSize ( ) ;
txBytes + = v . expectedSize ( ) ;
}
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
state Future < Void > checkLock = checkDatabaseLock ( tr , restore . getUid ( ) ) ;
2017-05-26 04:48:44 +08:00
2017-12-01 09:18:57 +08:00
Void _ = wait ( taskBucket - > keepRunning ( tr , task ) ) ;
2017-11-16 05:33:09 +08:00
Void _ = wait ( checkLock ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
// Add to bytes written count
restore . bytesWritten ( ) . atomicOp ( tr , txBytes , MutationRef : : Type : : AddValue ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
Void _ = wait ( tr - > commit ( ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
TraceEvent ( " FileRestoreCommittedLog " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 05:33:09 +08:00
. detail ( " FileName " , logFile . fileName )
. detail ( " FileBeginVersion " , logFile . version )
. detail ( " FileEndVersion " , logFile . endVersion )
. detail ( " FileSize " , logFile . fileSize )
. detail ( " ReadOffset " , readOffset )
. detail ( " ReadLen " , readLen )
. detail ( " CommitVersion " , tr - > getCommittedVersion ( ) )
. detail ( " StartIndex " , start )
. detail ( " EndIndex " , i )
. detail ( " DataSize " , data . size ( ) )
. detail ( " Bytes " , txBytes )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
// Commit succeeded, so advance starting point
start = i ;
2017-12-07 06:11:40 +08:00
tr - > reset ( ) ;
2017-11-16 05:33:09 +08:00
} catch ( Error & e ) {
TraceEvent ( SevWarn , " FileRestoreErrorLogWrite " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 05:33:09 +08:00
. detail ( " FileName " , logFile . fileName )
. detail ( " FileBeginVersion " , logFile . version )
. detail ( " FileEndVersion " , logFile . endVersion )
. detail ( " FileSize " , logFile . fileSize )
. detail ( " ReadOffset " , readOffset )
. detail ( " ReadLen " , readLen )
. detail ( " StartIndex " , start )
. detail ( " EndIndex " , i )
. detail ( " DataSize " , data . size ( ) )
. detail ( " Bytes " , txBytes )
. error ( e )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 05:33:09 +08:00
if ( e . code ( ) = = error_code_transaction_too_large )
dataSizeLimit / = 2 ;
else
Void _ = wait ( tr - > onError ( e ) ) ;
2017-05-26 04:48:44 +08:00
}
}
}
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
RestoreConfig ( task ) . fileBlocksFinished ( ) . atomicOp ( tr , 1 , MutationRef : : Type : : AddValue ) ;
state Reference < TaskFuture > taskFuture = futureBucket - > unpack ( task - > params [ Task : : reservedTaskParamKeyDone ] ) ;
// TODO: Check to see if there is a leak in the FutureBucket since an invalid task (validation key fails) will never set its taskFuture.
Void _ = wait ( taskFuture - > set ( tr , taskBucket ) & &
taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
}
2017-11-15 15:33:17 +08:00
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , RestoreFile lf , int64_t offset , int64_t len , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ) {
2017-05-26 04:48:44 +08:00
Key doneKey = wait ( completionKey . get ( tr , taskBucket ) ) ;
state Reference < Task > task ( new Task ( RestoreLogDataTaskFunc : : name , RestoreLogDataTaskFunc : : version , doneKey ) ) ;
// Create a restore config from the current task and bind it to the new task.
Void _ = wait ( RestoreConfig ( parentTask ) . toTask ( tr , task ) ) ;
2017-11-16 05:33:09 +08:00
Params . inputFile ( ) . set ( task , lf ) ;
2017-11-15 15:33:17 +08:00
Params . readOffset ( ) . set ( task , offset ) ;
Params . readLen ( ) . set ( task , len ) ;
2017-05-26 04:48:44 +08:00
if ( ! waitFor ) {
return taskBucket - > addTask ( tr , task ) ;
}
Void _ = wait ( waitFor - > onSetAddTask ( tr , taskBucket , task ) ) ;
return LiteralStringRef ( " OnSetAddTask " ) ;
}
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _execute ( cx , tb , fb , task ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
} ;
StringRef RestoreLogDataTaskFunc : : name = LiteralStringRef ( " restore_log_data " ) ;
const uint32_t RestoreLogDataTaskFunc : : version = 1 ;
REGISTER_TASKFUNC ( RestoreLogDataTaskFunc ) ;
2017-11-16 05:33:09 +08:00
struct RestoreDispatchTaskFunc : RestoreTaskFuncBase {
2017-05-26 04:48:44 +08:00
static StringRef name ;
static const uint32_t version ;
StringRef getName ( ) const { return name ; } ;
static struct {
static TaskParam < Version > beginVersion ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
2017-11-15 15:33:17 +08:00
static TaskParam < std : : string > beginFile ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
2017-05-26 04:48:44 +08:00
static TaskParam < int64_t > beginBlock ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
static TaskParam < int64_t > batchSize ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
static TaskParam < int64_t > remainingInBatch ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
} Params ;
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state RestoreConfig restore ( task ) ;
2017-11-16 14:38:31 +08:00
state Version beginVersion = Params . beginVersion ( ) . get ( task ) ;
state Reference < TaskFuture > onDone = futureBucket - > unpack ( task - > params [ Task : : reservedTaskParamKeyDone ] ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
state int64_t remainingInBatch = Params . remainingInBatch ( ) . get ( task ) ;
state bool addingToExistingBatch = remainingInBatch > 0 ;
2017-12-19 09:48:18 +08:00
state Version restoreVersion ;
2017-12-20 10:11:40 +08:00
Void _ = wait ( store ( restore . restoreVersion ( ) . getOrThrow ( tr ) , restoreVersion )
& & checkTaskVersion ( tr - > getDatabase ( ) , task , name , version ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
// If not adding to an existing batch then update the apply mutations end version so the mutations from the
2017-12-19 07:56:57 +08:00
// previous batch can be applied. Only do this once beginVersion is > 0 (it will be 0 for the initial dispatch).
2017-12-20 07:27:04 +08:00
if ( ! addingToExistingBatch & & beginVersion > 0 ) {
2017-12-19 09:48:18 +08:00
restore . setApplyEndVersion ( tr , std : : min ( beginVersion , restoreVersion ) ) ;
2017-12-20 07:27:04 +08:00
}
2017-05-26 04:48:44 +08:00
2017-12-20 10:11:40 +08:00
// The applyLag must be retrieved AFTER potentially updating the apply end version.
state int64_t applyLag = wait ( restore . getApplyVersionLag ( tr ) ) ;
2017-11-16 14:38:31 +08:00
state int64_t batchSize = Params . batchSize ( ) . get ( task ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
// If starting a new batch and the apply lag is too large then re-queue and wait
if ( ! addingToExistingBatch & & applyLag > ( BUGGIFY ? 1 : CLIENT_KNOBS - > CORE_VERSIONSPERSECOND * 300 ) ) {
// Wait a small amount of time and then re-add this same task.
Void _ = wait ( delay ( FLOW_KNOBS - > PREVENT_FAST_SPIN_DELAY ) ) ;
Key _ = wait ( RestoreDispatchTaskFunc : : addTask ( tr , taskBucket , task , beginVersion , " " , 0 , batchSize , remainingInBatch ) ) ;
TraceEvent ( " FileRestoreDispatch " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 14:38:31 +08:00
. detail ( " BeginVersion " , beginVersion )
. detail ( " ApplyLag " , applyLag )
. detail ( " BatchSize " , batchSize )
. detail ( " Decision " , " too_far_behind " )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
}
state std : : string beginFile = Params . beginFile ( ) . getOrDefault ( task ) ;
// Get a batch of files. We're targeting batchSize blocks being dispatched so query for batchSize files (each of which is 0 or more blocks).
state RestoreConfig : : FileSetT : : Values files = wait ( restore . fileSet ( ) . getRange ( tr , { beginVersion , beginFile } , { } , CLIENT_KNOBS - > RESTORE_DISPATCH_ADDTASK_SIZE ) ) ;
// allPartsDone will be set once all block tasks in the current batch are finished.
state Reference < TaskFuture > allPartsDone ;
// If adding to existing batch then join the new block tasks to the existing batch future
if ( addingToExistingBatch ) {
Key fKey = wait ( restore . batchFuture ( ) . getD ( tr ) ) ;
allPartsDone = Reference < TaskFuture > ( new TaskFuture ( futureBucket , fKey ) ) ;
}
else {
// Otherwise create a new future for the new batch
allPartsDone = futureBucket - > future ( tr ) ;
restore . batchFuture ( ) . set ( tr , allPartsDone - > pack ( ) ) ;
// Set batch quota remaining to batch size
remainingInBatch = batchSize ;
}
// If there were no files to load then this batch is done and restore is almost done.
if ( files . size ( ) = = 0 ) {
// If adding to existing batch then blocks could be in progress so create a new Dispatch task that waits for them to finish
if ( addingToExistingBatch ) {
2017-12-19 09:48:18 +08:00
// Setting next begin to restoreVersion + 1 so that any files in the file map at the restore version won't be dispatched again.
Key _ = wait ( RestoreDispatchTaskFunc : : addTask ( tr , taskBucket , task , restoreVersion + 1 , " " , 0 , batchSize , 0 , TaskCompletionKey : : noSignal ( ) , allPartsDone ) ) ;
2017-05-26 04:48:44 +08:00
TraceEvent ( " FileRestoreDispatch " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-05-26 04:48:44 +08:00
. detail ( " BeginVersion " , beginVersion )
2017-11-16 14:38:31 +08:00
. detail ( " BeginFile " , Params . beginFile ( ) . get ( task ) )
. detail ( " BeginBlock " , Params . beginBlock ( ) . get ( task ) )
. detail ( " RestoreVersion " , restoreVersion )
2017-05-26 04:48:44 +08:00
. detail ( " ApplyLag " , applyLag )
2017-11-16 14:38:31 +08:00
. detail ( " Decision " , " end_of_final_batch " )
2017-05-26 04:48:44 +08:00
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
}
2017-11-16 14:38:31 +08:00
else if ( beginVersion < restoreVersion ) {
// If beginVersion is less than restoreVersion then do one more dispatch task to get there
Key _ = wait ( RestoreDispatchTaskFunc : : addTask ( tr , taskBucket , task , restoreVersion , " " , 0 , batchSize ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
TraceEvent ( " FileRestoreDispatch " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 14:38:31 +08:00
. detail ( " BeginVersion " , beginVersion )
. detail ( " BeginFile " , Params . beginFile ( ) . get ( task ) )
. detail ( " BeginBlock " , Params . beginBlock ( ) . get ( task ) )
. detail ( " RestoreVersion " , restoreVersion )
. detail ( " ApplyLag " , applyLag )
. detail ( " Decision " , " apply_to_restore_version " )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-16 14:38:31 +08:00
else if ( applyLag = = 0 ) {
// If apply lag is 0 then we are done so create the completion task
Key _ = wait ( RestoreCompleteTaskFunc : : addTask ( tr , taskBucket , task , TaskCompletionKey : : noSignal ( ) ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
TraceEvent ( " FileRestoreDispatch " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 14:38:31 +08:00
. detail ( " BeginVersion " , beginVersion )
. detail ( " BeginFile " , Params . beginFile ( ) . get ( task ) )
. detail ( " BeginBlock " , Params . beginBlock ( ) . get ( task ) )
. detail ( " ApplyLag " , applyLag )
. detail ( " Decision " , " restore_complete " )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
} else {
// Applying of mutations is not yet finished so wait a small amount of time and then re-add this same task.
Void _ = wait ( delay ( FLOW_KNOBS - > PREVENT_FAST_SPIN_DELAY ) ) ;
Key _ = wait ( RestoreDispatchTaskFunc : : addTask ( tr , taskBucket , task , beginVersion , " " , 0 , batchSize ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
TraceEvent ( " FileRestoreDispatch " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 14:38:31 +08:00
. detail ( " BeginVersion " , beginVersion )
. detail ( " ApplyLag " , applyLag )
. detail ( " Decision " , " apply_still_behind " )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-16 14:38:31 +08:00
// If adding to existing batch then task is joined with a batch future so set done future
// Note that this must be done after joining at least one task with the batch future in case all other blockers already finished.
Future < Void > setDone = addingToExistingBatch ? onDone - > set ( tr , taskBucket ) : Void ( ) ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
Void _ = wait ( taskBucket - > finish ( tr , task ) & & setDone ) ;
return Void ( ) ;
}
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
// Start moving through the file list and queuing up blocks. Only queue up to RESTORE_DISPATCH_ADDTASK_SIZE blocks per Dispatch task
// and target batchSize total per batch but a batch must end on a complete version boundary so exceed the limit if necessary
// to reach the end of a version of files.
state std : : vector < Future < Key > > addTaskFutures ;
state Version endVersion = files [ 0 ] . version ;
state int blocksDispatched = 0 ;
state int64_t beginBlock = Params . beginBlock ( ) . getOrDefault ( task ) ;
state int i = 0 ;
for ( ; i < files . size ( ) ; + + i ) {
RestoreConfig : : RestoreFile & f = files [ i ] ;
// Here we are "between versions" (prior to adding the first block of the first file of a new version) so this is an opportunity
// to end the current dispatch batch (which must end on a version boundary) if the batch size has been reached or exceeded
if ( f . version ! = endVersion & & remainingInBatch < = 0 ) {
// Next start will be at the first version after endVersion at the first file first block
+ + endVersion ;
beginFile = " " ;
beginBlock = 0 ;
break ;
}
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
// Set the starting point for the next task in case we stop inside this file
endVersion = f . version ;
beginFile = f . fileName ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
state int64_t j = beginBlock * f . blockSize ;
// For each block of the file
for ( ; j < f . fileSize ; j + = f . blockSize ) {
2017-05-26 04:48:44 +08:00
// Stop if we've reached the addtask limit
if ( blocksDispatched = = CLIENT_KNOBS - > RESTORE_DISPATCH_ADDTASK_SIZE )
break ;
2017-11-16 14:38:31 +08:00
if ( f . isRange ) {
addTaskFutures . push_back ( RestoreRangeTaskFunc : : addTask ( tr , taskBucket , task ,
f , j , std : : min < int64_t > ( f . blockSize , f . fileSize - j ) ,
TaskCompletionKey : : joinWith ( allPartsDone ) ) ) ;
}
else {
addTaskFutures . push_back ( RestoreLogDataTaskFunc : : addTask ( tr , taskBucket , task ,
f , j , std : : min < int64_t > ( f . blockSize , f . fileSize - j ) ,
TaskCompletionKey : : joinWith ( allPartsDone ) ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-16 14:38:31 +08:00
// Increment beginBlock for the file and total blocks dispatched for this task
+ + beginBlock ;
+ + blocksDispatched ;
- - remainingInBatch ;
2017-05-26 04:48:44 +08:00
}
2017-11-16 14:38:31 +08:00
// Stop if we've reached the addtask limit
if ( blocksDispatched = = CLIENT_KNOBS - > RESTORE_DISPATCH_ADDTASK_SIZE )
break ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
// We just completed an entire file so the next task should start at the file after this one within endVersion (or later)
// if this iteration ends up being the last for this task
beginFile = beginFile + ' \x00 ' ;
beginBlock = 0 ;
2017-05-26 04:48:44 +08:00
2017-12-20 07:27:04 +08:00
//TraceEvent("FileRestoreDispatchedFile").detail("RestoreUID", restore.getUid()).detail("FileName", fi.filename).detail("TaskInstance", (uint64_t)this);
2017-11-16 14:38:31 +08:00
}
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
// If no blocks were dispatched then the next dispatch task should run now and be joined with the allPartsDone future
if ( blocksDispatched = = 0 ) {
std : : string decision ;
2017-05-26 04:48:44 +08:00
2017-11-16 14:38:31 +08:00
// If no files were dispatched either then the batch size wasn't large enough to catch all of the files at the next lowest non-dispatched
// version, so increase the batch size.
if ( i = = 0 ) {
batchSize * = 2 ;
decision = " increased_batch_size " ;
}
else
decision = " all_files_were_empty " ;
2017-05-26 04:48:44 +08:00
TraceEvent ( " FileRestoreDispatch " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-05-26 04:48:44 +08:00
. detail ( " BeginVersion " , beginVersion )
2017-11-15 15:33:17 +08:00
. detail ( " BeginFile " , Params . beginFile ( ) . get ( task ) )
2017-05-26 04:48:44 +08:00
. detail ( " BeginBlock " , Params . beginBlock ( ) . get ( task ) )
. detail ( " EndVersion " , endVersion )
. detail ( " ApplyLag " , applyLag )
. detail ( " BatchSize " , batchSize )
2017-11-16 14:38:31 +08:00
. detail ( " Decision " , decision )
2017-05-26 04:48:44 +08:00
. detail ( " TaskInstance " , ( uint64_t ) this )
. detail ( " RemainingInBatch " , remainingInBatch ) ;
2017-11-16 14:38:31 +08:00
Void _ = wait ( success ( RestoreDispatchTaskFunc : : addTask ( tr , taskBucket , task , endVersion , beginFile , beginBlock , batchSize , remainingInBatch , TaskCompletionKey : : joinWith ( ( allPartsDone ) ) ) ) ) ;
// If adding to existing batch then task is joined with a batch future so set done future.
// Note that this must be done after joining at least one task with the batch future in case all other blockers already finished.
Future < Void > setDone = addingToExistingBatch ? onDone - > set ( tr , taskBucket ) : Void ( ) ;
Void _ = wait ( setDone & & taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-16 14:38:31 +08:00
// If adding to existing batch then task is joined with a batch future so set done future.
Future < Void > setDone = addingToExistingBatch ? onDone - > set ( tr , taskBucket ) : Void ( ) ;
// Increment the number of blocks dispatched in the restore config
restore . filesBlocksDispatched ( ) . atomicOp ( tr , blocksDispatched , MutationRef : : Type : : AddValue ) ;
// If beginFile is not empty then we had to stop in the middle of a version (possibly within a file) so we cannot end
// the batch here because we do not know if we got all of the files and blocks from the last version queued, so
// make sure remainingInBatch is at least 1.
2017-12-19 07:56:57 +08:00
if ( ! beginFile . empty ( ) )
2017-11-16 14:38:31 +08:00
remainingInBatch = std : : max < int64_t > ( 1 , remainingInBatch ) ;
// If more blocks need to be dispatched in this batch then add a follow-on task that is part of the allPartsDone group which will won't wait
// to run and will add more block tasks.
if ( remainingInBatch > 0 )
addTaskFutures . push_back ( RestoreDispatchTaskFunc : : addTask ( tr , taskBucket , task , endVersion , beginFile , beginBlock , batchSize , remainingInBatch , TaskCompletionKey : : joinWith ( allPartsDone ) ) ) ;
else // Otherwise, add a follow-on task to continue after all previously dispatched blocks are done
addTaskFutures . push_back ( RestoreDispatchTaskFunc : : addTask ( tr , taskBucket , task , endVersion , beginFile , beginBlock , batchSize , 0 , TaskCompletionKey : : noSignal ( ) , allPartsDone ) ) ;
Void _ = wait ( setDone & & waitForAll ( addTaskFutures ) & & taskBucket - > finish ( tr , task ) ) ;
TraceEvent ( " FileRestoreDispatch " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-11-16 14:38:31 +08:00
. detail ( " BeginVersion " , beginVersion )
. detail ( " BeginFile " , Params . beginFile ( ) . get ( task ) )
. detail ( " BeginBlock " , Params . beginBlock ( ) . get ( task ) )
. detail ( " EndVersion " , endVersion )
. detail ( " ApplyLag " , applyLag )
. detail ( " BatchSize " , batchSize )
. detail ( " Decision " , " dispatched_files " )
. detail ( " FilesDispatched " , i )
. detail ( " BlocksDispatched " , blocksDispatched )
. detail ( " TaskInstance " , ( uint64_t ) this )
. detail ( " RemainingInBatch " , remainingInBatch ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
2017-11-15 15:33:17 +08:00
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < Task > parentTask , Version beginVersion , std : : string beginFile , int64_t beginBlock , int64_t batchSize , int64_t remainingInBatch = 0 , TaskCompletionKey completionKey = TaskCompletionKey : : noSignal ( ) , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) ) {
2017-05-26 04:48:44 +08:00
Key doneKey = wait ( completionKey . get ( tr , taskBucket ) ) ;
// Use high priority for dispatch tasks that have to queue more blocks for the current batch
unsigned int priority = ( remainingInBatch > 0 ) ? 1 : 0 ;
2017-11-07 04:59:00 +08:00
state Reference < Task > task ( new Task ( RestoreDispatchTaskFunc : : name , RestoreDispatchTaskFunc : : version , doneKey , priority ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-07 04:59:00 +08:00
// Create a config from the parent task and bind it to the new task
2017-05-26 04:48:44 +08:00
Void _ = wait ( RestoreConfig ( parentTask ) . toTask ( tr , task ) ) ;
Params . beginVersion ( ) . set ( task , beginVersion ) ;
Params . batchSize ( ) . set ( task , batchSize ) ;
Params . remainingInBatch ( ) . set ( task , remainingInBatch ) ;
Params . beginBlock ( ) . set ( task , beginBlock ) ;
Params . beginFile ( ) . set ( task , beginFile ) ;
if ( ! waitFor ) {
return taskBucket - > addTask ( tr , task ) ;
}
Void _ = wait ( waitFor - > onSetAddTask ( tr , taskBucket , task ) ) ;
return LiteralStringRef ( " OnSetAddTask " ) ;
}
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return Void ( ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
} ;
StringRef RestoreDispatchTaskFunc : : name = LiteralStringRef ( " restore_dispatch " ) ;
const uint32_t RestoreDispatchTaskFunc : : version = 1 ;
REGISTER_TASKFUNC ( RestoreDispatchTaskFunc ) ;
ACTOR Future < std : : string > restoreStatus ( Reference < ReadYourWritesTransaction > tr , Key tagName ) {
tr - > setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-08-29 02:28:19 +08:00
state std : : vector < KeyBackedTag > tags ;
2017-05-26 04:48:44 +08:00
if ( tagName . size ( ) = = 0 ) {
2017-08-29 02:28:19 +08:00
std : : vector < KeyBackedTag > t = wait ( getAllRestoreTags ( tr ) ) ;
2017-05-26 04:48:44 +08:00
tags = t ;
}
else
2017-09-06 02:38:40 +08:00
tags . push_back ( makeRestoreTag ( tagName . toString ( ) ) ) ;
2017-05-26 04:48:44 +08:00
state std : : string result ;
state int i = 0 ;
for ( ; i < tags . size ( ) ; + + i ) {
UidAndAbortedFlagT u = wait ( tags [ i ] . getD ( tr ) ) ;
std : : string s = wait ( RestoreConfig ( u . first ) . getFullStatus ( tr ) ) ;
result . append ( s ) ;
result . append ( " \n \n " ) ;
}
return result ;
}
ACTOR Future < ERestoreState > abortRestore ( Reference < ReadYourWritesTransaction > tr , Key tagName ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-12-21 07:41:47 +08:00
tr - > setOption ( FDBTransactionOptions : : COMMIT_ON_FIRST_PROXY ) ;
2017-05-26 04:48:44 +08:00
2017-09-06 02:38:40 +08:00
state KeyBackedTag tag = makeRestoreTag ( tagName . toString ( ) ) ;
2017-05-26 04:48:44 +08:00
state Optional < UidAndAbortedFlagT > current = wait ( tag . get ( tr ) ) ;
if ( ! current . present ( ) )
return ERestoreState : : UNITIALIZED ;
state RestoreConfig restore ( current . get ( ) . first ) ;
state ERestoreState status = wait ( restore . stateEnum ( ) . getD ( tr ) ) ;
state bool runnable = wait ( restore . isRunnable ( tr ) ) ;
if ( ! runnable )
return status ;
restore . stateEnum ( ) . set ( tr , ERestoreState : : ABORTED ) ;
// Clear all of the ApplyMutations stuff
restore . clearApplyMutationsKeys ( tr ) ;
// Cancel the backup tasks on this tag
Void _ = wait ( tag . cancel ( tr ) ) ;
Void _ = wait ( unlockDatabase ( tr , current . get ( ) . first ) ) ;
return ERestoreState : : ABORTED ;
}
2017-12-05 09:21:43 +08:00
ACTOR Future < ERestoreState > abortRestore ( Database cx , Key tagName ) {
state Reference < ReadYourWritesTransaction > tr = Reference < ReadYourWritesTransaction > ( new ReadYourWritesTransaction ( cx ) ) ;
loop {
try {
ERestoreState estate = wait ( abortRestore ( tr , tagName ) ) ;
if ( estate ! = ERestoreState : : ABORTED ) {
return estate ;
}
Void _ = wait ( tr - > commit ( ) ) ;
break ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
tr = Reference < ReadYourWritesTransaction > ( new ReadYourWritesTransaction ( cx ) ) ;
//Commit a dummy transaction before returning success, to ensure the mutation applier has stopped submitting mutations
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-12-21 07:41:47 +08:00
tr - > setOption ( FDBTransactionOptions : : COMMIT_ON_FIRST_PROXY ) ;
2017-12-05 09:21:43 +08:00
tr - > addReadConflictRange ( singleKeyRange ( KeyRef ( ) ) ) ;
tr - > addWriteConflictRange ( singleKeyRange ( KeyRef ( ) ) ) ;
Void _ = wait ( tr - > commit ( ) ) ;
return ERestoreState : : ABORTED ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
}
2017-11-16 05:33:09 +08:00
struct StartFullRestoreTaskFunc : RestoreTaskFuncBase {
2017-05-26 04:48:44 +08:00
static StringRef name ;
static const uint32_t version ;
static struct {
static TaskParam < Version > firstVersion ( ) { return LiteralStringRef ( __FUNCTION__ ) ; }
} Params ;
ACTOR static Future < Void > _execute ( Database cx , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
state RestoreConfig restore ( task ) ;
2017-11-15 15:33:17 +08:00
state Version restoreVersion ;
state Reference < IBackupContainer > bc ;
2017-05-26 04:48:44 +08:00
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-09-09 07:09:18 +08:00
Void _ = wait ( checkTaskVersion ( tr - > getDatabase ( ) , task , name , version ) ) ;
2017-11-15 15:33:17 +08:00
Version _restoreVersion = wait ( restore . restoreVersion ( ) . getOrThrow ( tr ) ) ;
restoreVersion = _restoreVersion ;
2017-12-01 09:18:57 +08:00
Void _ = wait ( taskBucket - > keepRunning ( tr , task ) ) ;
2017-05-26 04:48:44 +08:00
ERestoreState oldState = wait ( restore . stateEnum ( ) . getD ( tr ) ) ;
if ( oldState ! = ERestoreState : : QUEUED & & oldState ! = ERestoreState : : STARTING ) {
Void _ = wait ( restore . logError ( cx , restore_error ( ) , format ( " StartFullRestore: Encountered unexpected state(%d) " , oldState ) , this ) ) ;
return Void ( ) ;
}
restore . stateEnum ( ) . set ( tr , ERestoreState : : STARTING ) ;
2017-11-15 15:33:17 +08:00
restore . fileSet ( ) . clear ( tr ) ;
2017-05-26 04:48:44 +08:00
restore . fileBlockCount ( ) . clear ( tr ) ;
restore . fileCount ( ) . clear ( tr ) ;
2017-11-15 15:33:17 +08:00
Reference < IBackupContainer > _bc = wait ( restore . sourceContainer ( ) . getOrThrow ( tr ) ) ;
bc = _bc ;
2017-05-26 04:48:44 +08:00
Void _ = wait ( tr - > commit ( ) ) ;
break ;
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
2017-11-15 15:33:17 +08:00
Optional < RestorableFileSet > restorable = wait ( bc - > getRestoreSet ( restoreVersion ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-15 15:33:17 +08:00
if ( ! restorable . present ( ) )
throw restore_missing_data ( ) ;
2017-05-26 04:48:44 +08:00
2017-11-25 16:46:16 +08:00
// First version for which log data should be applied
Params . firstVersion ( ) . set ( task , restorable . get ( ) . snapshot . beginVersion ) ;
2017-11-15 15:33:17 +08:00
// Convert the two lists in restorable (logs and ranges) to a single list of RestoreFiles.
// Order does not matter, they will be put in order when written to the restoreFileMap below.
state std : : vector < RestoreConfig : : RestoreFile > files ;
2017-05-26 04:48:44 +08:00
2017-11-15 15:33:17 +08:00
for ( const RangeFile & f : restorable . get ( ) . ranges ) {
files . push_back ( { f . version , f . fileName , true , f . blockSize , f . fileSize } ) ;
}
for ( const LogFile & f : restorable . get ( ) . logs ) {
2017-12-20 07:27:04 +08:00
files . push_back ( { f . beginVersion , f . fileName , false , f . blockSize , f . fileSize , f . endVersion } ) ;
2017-11-15 15:33:17 +08:00
}
2017-05-26 04:48:44 +08:00
2017-11-15 15:33:17 +08:00
state std : : vector < RestoreConfig : : RestoreFile > : : iterator start = files . begin ( ) ;
state std : : vector < RestoreConfig : : RestoreFile > : : iterator end = files . end ( ) ;
2017-12-14 17:44:38 +08:00
tr - > reset ( ) ;
2017-05-26 04:48:44 +08:00
while ( start ! = end ) {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-12-01 09:18:57 +08:00
Void _ = wait ( taskBucket - > keepRunning ( tr , task ) ) ;
2017-05-26 04:48:44 +08:00
2017-11-15 15:33:17 +08:00
state std : : vector < RestoreConfig : : RestoreFile > : : iterator i = start ;
2017-05-26 04:48:44 +08:00
state int txBytes = 0 ;
state int nFileBlocks = 0 ;
state int nFiles = 0 ;
2017-11-15 15:33:17 +08:00
auto fileSet = restore . fileSet ( ) ;
2017-05-26 04:48:44 +08:00
for ( ; i ! = end & & txBytes < 1e6 ; + + i ) {
2017-11-15 15:33:17 +08:00
txBytes + = fileSet . insert ( tr , * i ) ;
nFileBlocks + = ( i - > fileSize + i - > blockSize - 1 ) / i - > blockSize ;
2017-05-26 04:48:44 +08:00
+ + nFiles ;
}
// Increment counts
restore . fileCount ( ) . atomicOp ( tr , nFiles , MutationRef : : Type : : AddValue ) ;
restore . fileBlockCount ( ) . atomicOp ( tr , nFileBlocks , MutationRef : : Type : : AddValue ) ;
Void _ = wait ( tr - > commit ( ) ) ;
TraceEvent ( " FileRestoreLoadedFiles " )
2017-12-20 07:27:04 +08:00
. detail ( " RestoreUID " , restore . getUid ( ) )
2017-05-26 04:48:44 +08:00
. detail ( " FileCount " , nFiles )
. detail ( " FileBlockCount " , nFileBlocks )
. detail ( " Bytes " , txBytes )
. detail ( " TaskInstance " , ( uint64_t ) this ) ;
start = i ;
2017-12-07 06:11:40 +08:00
tr - > reset ( ) ;
2017-05-26 04:48:44 +08:00
} catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
return Void ( ) ;
}
ACTOR static Future < Void > _finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , Reference < FutureBucket > futureBucket , Reference < Task > task ) {
state RestoreConfig restore ( task ) ;
2017-11-25 16:46:16 +08:00
state Version firstVersion = Params . firstVersion ( ) . getOrDefault ( task , invalidVersion ) ;
if ( firstVersion = = invalidVersion ) {
2017-05-26 04:48:44 +08:00
Void _ = wait ( restore . logError ( tr - > getDatabase ( ) , restore_missing_data ( ) , " StartFullRestore: The backup had no data. " , this ) ) ;
2017-09-06 02:38:40 +08:00
std : : string tag = wait ( restore . tag ( ) . getD ( tr ) ) ;
ERestoreState _ = wait ( abortRestore ( tr , StringRef ( tag ) ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
restore . stateEnum ( ) . set ( tr , ERestoreState : : RUNNING ) ;
// Set applyMutation versions
restore . setApplyBeginVersion ( tr , firstVersion ) ;
restore . setApplyEndVersion ( tr , firstVersion ) ;
// Apply range data and log data in order
2017-11-25 16:46:16 +08:00
Key _ = wait ( RestoreDispatchTaskFunc : : addTask ( tr , taskBucket , task , 0 , " " , 0 , CLIENT_KNOBS - > RESTORE_DISPATCH_BATCH_SIZE ) ) ;
2017-05-26 04:48:44 +08:00
Void _ = wait ( taskBucket - > finish ( tr , task ) ) ;
return Void ( ) ;
}
ACTOR static Future < Key > addTask ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > taskBucket , UID uid , TaskCompletionKey completionKey , Reference < TaskFuture > waitFor = Reference < TaskFuture > ( ) )
{
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
Key doneKey = wait ( completionKey . get ( tr , taskBucket ) ) ;
state Reference < Task > task ( new Task ( StartFullRestoreTaskFunc : : name , StartFullRestoreTaskFunc : : version , doneKey ) ) ;
state RestoreConfig restore ( uid ) ;
// Bind the restore config to the new task
Void _ = wait ( restore . toTask ( tr , task ) ) ;
if ( ! waitFor ) {
return taskBucket - > addTask ( tr , task ) ;
}
Void _ = wait ( waitFor - > onSetAddTask ( tr , taskBucket , task ) ) ;
return LiteralStringRef ( " OnSetAddTask " ) ;
}
StringRef getName ( ) const { return name ; } ;
Future < Void > execute ( Database cx , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _execute ( cx , tb , fb , task ) ; } ;
Future < Void > finish ( Reference < ReadYourWritesTransaction > tr , Reference < TaskBucket > tb , Reference < FutureBucket > fb , Reference < Task > task ) { return _finish ( tr , tb , fb , task ) ; } ;
} ;
StringRef StartFullRestoreTaskFunc : : name = LiteralStringRef ( " restore_start " ) ;
const uint32_t StartFullRestoreTaskFunc : : version = 1 ;
REGISTER_TASKFUNC ( StartFullRestoreTaskFunc ) ;
}
struct LogInfo : public ReferenceCounted < LogInfo > {
std : : string fileName ;
Reference < IAsyncFile > logFile ;
Version beginVersion ;
Version endVersion ;
int64_t offset ;
LogInfo ( ) : offset ( 0 ) { } ;
} ;
class FileBackupAgentImpl {
public :
static const int MAX_RESTORABLE_FILE_METASECTION_BYTES = 1024 * 8 ;
// This method will return the final status of the backup
2017-09-06 02:38:40 +08:00
ACTOR static Future < int > waitBackup ( FileBackupAgent * backupAgent , Database cx , std : : string tagName , bool stopWhenDone ) {
2017-05-26 04:48:44 +08:00
state std : : string backTrace ;
2017-08-30 02:49:40 +08:00
state KeyBackedTag tag = makeBackupTag ( tagName ) ;
2017-05-26 04:48:44 +08:00
loop {
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
try {
2017-08-30 02:49:40 +08:00
state Optional < UidAndAbortedFlagT > oldUidAndAborted = wait ( tag . get ( tr ) ) ;
if ( ! oldUidAndAborted . present ( ) ) {
return EBackupState : : STATE_NEVERRAN ;
}
state BackupConfig config ( oldUidAndAborted . get ( ) . first ) ;
state EBackupState status = wait ( config . stateEnum ( ) . getD ( tr , EBackupState : : STATE_NEVERRAN ) ) ;
2017-05-26 04:48:44 +08:00
// Break, if no longer runnable
2017-09-07 11:06:32 +08:00
if ( ! FileBackupAgent : : isRunnable ( status ) ) {
2017-05-26 04:48:44 +08:00
return status ;
}
// Break, if in differential mode (restorable) and stopWhenDone is not enabled
if ( ( ! stopWhenDone ) & & ( BackupAgentBase : : STATE_DIFFERENTIAL = = status ) ) {
return status ;
}
2017-08-30 02:49:40 +08:00
state Future < Void > watchFuture = tr - > watch ( config . stateEnum ( ) . key ) ;
2017-05-26 04:48:44 +08:00
Void _ = wait ( tr - > commit ( ) ) ;
Void _ = wait ( watchFuture ) ;
}
catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
}
2017-12-14 17:44:38 +08:00
ACTOR static Future < Void > submitBackup ( FileBackupAgent * backupAgent , Reference < ReadYourWritesTransaction > tr , Key outContainer , int snapshotIntervalSeconds , std : : string tagName , Standalone < VectorRef < KeyRangeRef > > backupRanges , bool stopWhenDone ) {
2017-05-26 04:48:44 +08:00
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-10-13 07:12:42 +08:00
TraceEvent ( SevInfo , " FBA_submitBackup " )
. detail ( " tagName " , tagName . c_str ( ) )
. detail ( " stopWhenDone " , stopWhenDone )
. detail ( " outContainer " , outContainer . toString ( ) ) ;
2017-08-31 09:05:50 +08:00
state KeyBackedTag tag = makeBackupTag ( tagName ) ;
Optional < UidAndAbortedFlagT > uidAndAbortedFlag = wait ( tag . get ( tr ) ) ;
if ( uidAndAbortedFlag . present ( ) ) {
state BackupConfig prevConfig ( uidAndAbortedFlag . get ( ) . first ) ;
state EBackupState prevBackupStatus = wait ( prevConfig . stateEnum ( ) . getD ( tr , EBackupState : : STATE_NEVERRAN ) ) ;
if ( FileBackupAgent : : isRunnable ( prevBackupStatus ) ) {
throw backup_duplicate ( ) ;
}
2017-05-26 04:48:44 +08:00
2017-08-31 09:05:50 +08:00
// Now is time to clear prev backup config space. We have no more use for it.
prevConfig . clear ( tr ) ;
2017-05-26 04:48:44 +08:00
}
2017-08-31 09:05:50 +08:00
state BackupConfig config ( g_random - > randomUniqueID ( ) ) ;
state UID uid = config . getUid ( ) ;
2017-05-26 04:48:44 +08:00
// This check will ensure that current backupUid is later than the last backup Uid
2017-09-06 05:06:55 +08:00
state Standalone < StringRef > nowStr = BackupAgentBase : : getCurrentTime ( ) ;
2017-05-26 04:48:44 +08:00
state std : : string backupContainer = outContainer . toString ( ) ;
// To be consistent with directory handling behavior since FDB backup was first released, if the container string
2017-11-15 15:33:17 +08:00
// describes a local directory then "/backup-<timestamp>" will be added to it.
2017-05-26 04:48:44 +08:00
if ( backupContainer . find ( " file:// " ) = = 0 ) {
2017-10-20 04:33:12 +08:00
backupContainer = joinPath ( backupContainer , std : : string ( " backup- " ) + nowStr . toString ( ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-15 15:33:17 +08:00
state Reference < IBackupContainer > bc = IBackupContainer : : openContainer ( backupContainer ) ;
2017-05-26 04:48:44 +08:00
try {
Void _ = wait ( timeoutError ( bc - > create ( ) , 30 ) ) ;
} catch ( Error & e ) {
2017-12-02 07:16:44 +08:00
if ( e . code ( ) = = error_code_actor_cancelled )
throw ;
2017-10-13 02:04:11 +08:00
fprintf ( stderr , " ERROR: Could not create backup container: %s \n " , e . what ( ) ) ;
2017-05-26 04:48:44 +08:00
throw backup_error ( ) ;
}
2017-08-31 09:05:50 +08:00
Optional < Value > lastBackupTimestamp = wait ( backupAgent - > lastBackupTimestamp ( ) . get ( tr ) ) ;
2017-05-26 04:48:44 +08:00
2017-09-06 05:06:55 +08:00
if ( ( lastBackupTimestamp . present ( ) ) & & ( lastBackupTimestamp . get ( ) > = nowStr ) ) {
2017-08-31 09:05:50 +08:00
fprintf ( stderr , " ERROR: The last backup `%s' happened in the future. \n " , printable ( lastBackupTimestamp . get ( ) ) . c_str ( ) ) ;
2017-05-26 04:48:44 +08:00
throw backup_error ( ) ;
}
KeyRangeMap < int > backupRangeSet ;
for ( auto & backupRange : backupRanges ) {
backupRangeSet . insert ( backupRange , 1 ) ;
}
backupRangeSet . coalesce ( allKeys ) ;
2017-09-01 08:58:26 +08:00
state std : : vector < KeyRange > normalizedRanges ;
2017-05-26 04:48:44 +08:00
for ( auto & backupRange : backupRangeSet . ranges ( ) ) {
if ( backupRange . value ( ) ) {
2017-09-06 05:06:55 +08:00
normalizedRanges . push_back ( KeyRange ( KeyRangeRef ( backupRange . range ( ) . begin , backupRange . range ( ) . end ) ) ) ;
2017-05-26 04:48:44 +08:00
}
}
2017-08-29 08:01:36 +08:00
config . clear ( tr ) ;
// Point the tag to this new uid
2017-09-06 02:38:40 +08:00
tag . set ( tr , { uid , false } ) ;
2017-05-26 04:48:44 +08:00
2017-09-06 05:06:55 +08:00
backupAgent - > lastBackupTimestamp ( ) . set ( tr , nowStr ) ;
2017-05-26 04:48:44 +08:00
// Set the backup keys
2017-09-01 08:58:26 +08:00
config . tag ( ) . set ( tr , tagName ) ;
2017-08-30 02:49:40 +08:00
config . stateEnum ( ) . set ( tr , EBackupState : : STATE_SUBMITTED ) ;
2017-11-15 15:33:17 +08:00
config . backupContainer ( ) . set ( tr , bc ) ;
2017-08-31 06:31:55 +08:00
config . stopWhenDone ( ) . set ( tr , stopWhenDone ) ;
2017-09-01 08:58:26 +08:00
config . backupRanges ( ) . set ( tr , normalizedRanges ) ;
2017-12-14 17:44:38 +08:00
config . snapshotIntervalSeconds ( ) . set ( tr , snapshotIntervalSeconds ) ;
2017-05-26 04:48:44 +08:00
2017-09-09 07:09:18 +08:00
Key taskKey = wait ( fileBackup : : StartFullBackupTaskFunc : : addTask ( tr , backupAgent - > taskBucket , uid , TaskCompletionKey : : noSignal ( ) ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
ACTOR static Future < Void > submitRestore ( FileBackupAgent * backupAgent , Reference < ReadYourWritesTransaction > tr , Key tagName , Key backupURL , Version restoreVersion , Key addPrefix , Key removePrefix , KeyRange restoreRange , bool lockDB , UID uid ) {
ASSERT ( restoreRange . contains ( removePrefix ) | | removePrefix . size ( ) = = 0 ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
// Get old restore config for this tag
2017-09-06 02:38:40 +08:00
state KeyBackedTag tag = makeRestoreTag ( tagName . toString ( ) ) ;
2017-05-26 04:48:44 +08:00
state Optional < UidAndAbortedFlagT > oldUidAndAborted = wait ( tag . get ( tr ) ) ;
if ( oldUidAndAborted . present ( ) ) {
if ( oldUidAndAborted . get ( ) . first = = uid ) {
if ( oldUidAndAborted . get ( ) . second ) {
throw restore_duplicate_uid ( ) ;
}
else {
return Void ( ) ;
}
}
state RestoreConfig oldRestore ( oldUidAndAborted . get ( ) . first ) ;
// Make sure old restore for this tag is not runnable
bool runnable = wait ( oldRestore . isRunnable ( tr ) ) ;
if ( runnable ) {
throw restore_duplicate_tag ( ) ;
}
// Clear the old restore config
oldRestore . clear ( tr ) ;
}
KeyRange restoreIntoRange = KeyRangeRef ( restoreRange . begin , restoreRange . end ) . removePrefix ( removePrefix ) . withPrefix ( addPrefix ) ;
Standalone < RangeResultRef > existingRows = wait ( tr - > getRange ( restoreIntoRange , 1 ) ) ;
if ( existingRows . size ( ) > 0 ) {
throw restore_destination_not_empty ( ) ;
}
// Make new restore config
state RestoreConfig restore ( uid ) ;
// Point the tag to the new uid
tag . set ( tr , { uid , false } ) ;
2017-11-15 15:33:17 +08:00
Reference < IBackupContainer > bc = IBackupContainer : : openContainer ( backupURL . toString ( ) ) ;
2017-05-26 04:48:44 +08:00
// Configure the new restore
2017-09-06 02:38:40 +08:00
restore . tag ( ) . set ( tr , tagName . toString ( ) ) ;
2017-11-15 15:33:17 +08:00
restore . sourceContainer ( ) . set ( tr , bc ) ;
2017-05-26 04:48:44 +08:00
restore . stateEnum ( ) . set ( tr , ERestoreState : : QUEUED ) ;
restore . restoreVersion ( ) . set ( tr , restoreVersion ) ;
restore . restoreRange ( ) . set ( tr , restoreRange ) ;
// this also sets restore.add/removePrefix.
restore . initApplyMutations ( tr , addPrefix , removePrefix ) ;
Key taskKey = wait ( fileBackup : : StartFullRestoreTaskFunc : : addTask ( tr , backupAgent - > taskBucket , uid , TaskCompletionKey : : noSignal ( ) ) ) ;
if ( lockDB )
Void _ = wait ( lockDatabase ( tr , uid ) ) ;
else
Void _ = wait ( checkDatabaseLock ( tr , uid ) ) ;
return Void ( ) ;
}
// This method will return the final status of the backup
ACTOR static Future < ERestoreState > waitRestore ( Database cx , Key tagName , bool verbose ) {
loop {
try {
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
tr - > setOption ( FDBTransactionOptions : : PRIORITY_SYSTEM_IMMEDIATE ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-09-06 02:38:40 +08:00
state KeyBackedTag tag = makeRestoreTag ( tagName . toString ( ) ) ;
2017-05-26 04:48:44 +08:00
Optional < UidAndAbortedFlagT > current = wait ( tag . get ( tr ) ) ;
if ( ! current . present ( ) ) {
if ( verbose )
printf ( " Tag: %s State: %s \n " , tagName . toString ( ) . c_str ( ) , FileBackupAgent : : restoreStateText ( ERestoreState : : UNITIALIZED ) . toString ( ) . c_str ( ) ) ;
return ERestoreState : : UNITIALIZED ;
}
state RestoreConfig restore ( current . get ( ) . first ) ;
if ( verbose ) {
state std : : string details = wait ( restore . getProgress ( tr ) ) ;
printf ( " %s \n " , details . c_str ( ) ) ;
}
state ERestoreState status = wait ( restore . stateEnum ( ) . getD ( tr ) ) ;
state bool runnable = wait ( restore . isRunnable ( tr ) ) ;
// State won't change from here
if ( ! runnable )
break ;
// Wait for a change
state Future < Void > watchFuture = tr - > watch ( restore . stateEnum ( ) . key ) ;
Void _ = wait ( tr - > commit ( ) ) ;
if ( verbose )
Void _ = wait ( watchFuture | | delay ( 1 ) ) ;
else
Void _ = wait ( watchFuture ) ;
}
catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
return status ;
}
ACTOR static Future < Void > discontinueBackup ( FileBackupAgent * backupAgent , Reference < ReadYourWritesTransaction > tr , Key tagName ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-09-02 05:39:38 +08:00
2017-09-07 11:06:32 +08:00
state KeyBackedTag tag = makeBackupTag ( tagName . toString ( ) ) ;
2017-10-19 06:49:49 +08:00
state UidAndAbortedFlagT current = wait ( tag . getOrThrow ( tr , false , backup_unneeded ( ) ) ) ;
2017-09-07 11:06:32 +08:00
state BackupConfig config ( current . first ) ;
2017-09-02 05:39:38 +08:00
state EBackupState status = wait ( config . stateEnum ( ) . getD ( tr , EBackupState : : STATE_NEVERRAN ) ) ;
2017-05-26 04:48:44 +08:00
2017-09-07 11:06:32 +08:00
if ( ! FileBackupAgent : : isRunnable ( status ) ) {
2017-05-26 04:48:44 +08:00
throw backup_unneeded ( ) ;
}
2017-10-13 07:12:42 +08:00
TraceEvent ( SevInfo , " FBA_discontinueBackup " )
. detail ( " tagName " , tag . tagName . c_str ( ) )
. detail ( " status " , BackupAgentBase : : getStateText ( status ) ) ;
2017-08-31 06:31:55 +08:00
state bool stopWhenDone = wait ( config . stopWhenDone ( ) . getOrThrow ( tr ) ) ;
2017-05-26 04:48:44 +08:00
2017-08-31 06:31:55 +08:00
if ( stopWhenDone ) {
2017-05-26 04:48:44 +08:00
throw backup_duplicate ( ) ;
}
2017-08-31 06:31:55 +08:00
config . stopWhenDone ( ) . set ( tr , true ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
2017-09-06 02:38:40 +08:00
ACTOR static Future < Void > abortBackup ( FileBackupAgent * backupAgent , Reference < ReadYourWritesTransaction > tr , std : : string tagName ) {
2017-05-26 04:48:44 +08:00
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-08-29 08:01:36 +08:00
state KeyBackedTag tag = makeBackupTag ( tagName ) ;
2017-10-19 06:49:49 +08:00
state UidAndAbortedFlagT current = wait ( tag . getOrThrow ( tr , false , backup_unneeded ( ) ) ) ;
2017-08-29 08:01:36 +08:00
2017-09-07 11:06:32 +08:00
state BackupConfig config ( current . first ) ;
2017-09-02 05:39:38 +08:00
EBackupState status = wait ( config . stateEnum ( ) . getD ( tr , EBackupState : : STATE_NEVERRAN ) ) ;
2017-05-26 04:48:44 +08:00
if ( ! backupAgent - > isRunnable ( ( BackupAgentBase : : enumState ) status ) ) {
throw backup_unneeded ( ) ;
}
2017-10-13 07:12:42 +08:00
TraceEvent ( SevInfo , " FBA_abortBackup " )
. detail ( " tagName " , tagName . c_str ( ) )
. detail ( " status " , BackupAgentBase : : getStateText ( status ) ) ;
2017-08-29 08:01:36 +08:00
// Cancel backup task through tag
Void _ = wait ( tag . cancel ( tr ) ) ;
2017-09-02 05:39:38 +08:00
Key configPath = uidPrefixKey ( logRangesRange . begin , config . getUid ( ) ) ;
Key logsPath = uidPrefixKey ( backupLogKeys . begin , config . getUid ( ) ) ;
2017-05-26 04:48:44 +08:00
tr - > clear ( KeyRangeRef ( configPath , strinc ( configPath ) ) ) ;
tr - > clear ( KeyRangeRef ( logsPath , strinc ( logsPath ) ) ) ;
2017-08-30 02:49:40 +08:00
config . stateEnum ( ) . set ( tr , EBackupState : : STATE_ABORTED ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
2017-09-06 02:38:40 +08:00
ACTOR static Future < std : : string > getStatus ( FileBackupAgent * backupAgent , Database cx , int errorLimit , std : : string tagName ) {
2017-05-26 04:48:44 +08:00
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
state std : : string statusText ;
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-08-30 02:49:40 +08:00
state KeyBackedTag tag ;
state BackupConfig config ;
state EBackupState backupState ;
2017-05-26 04:48:44 +08:00
2017-08-30 02:49:40 +08:00
statusText = " " ;
tag = makeBackupTag ( tagName ) ;
state Optional < UidAndAbortedFlagT > uidAndAbortedFlag = wait ( tag . get ( tr ) ) ;
2017-12-15 05:54:01 +08:00
state Future < Optional < Value > > fPaused = tr - > get ( backupAgent - > taskBucket - > getPauseKey ( ) ) ;
2017-08-30 02:49:40 +08:00
if ( uidAndAbortedFlag . present ( ) ) {
2017-09-06 00:42:14 +08:00
config = BackupConfig ( uidAndAbortedFlag . get ( ) . first ) ;
2017-09-07 11:06:32 +08:00
EBackupState status = wait ( config . stateEnum ( ) . getD ( tr , EBackupState : : STATE_NEVERRAN ) ) ;
2017-08-30 02:49:40 +08:00
backupState = status ;
}
2017-05-26 04:48:44 +08:00
2017-08-30 02:49:40 +08:00
if ( ! uidAndAbortedFlag . present ( ) | | backupState = = EBackupState : : STATE_NEVERRAN ) {
2017-05-26 04:48:44 +08:00
statusText + = " No previous backups found. \n " ;
2017-08-30 02:49:40 +08:00
} else {
state std : : string backupStatus ( BackupAgentBase : : getStateText ( backupState ) ) ;
2017-11-15 15:33:17 +08:00
state Reference < IBackupContainer > bc = wait ( config . backupContainer ( ) . getOrThrow ( tr ) ) ;
2017-12-18 06:29:57 +08:00
state Optional < Version > stopVersion = wait ( config . getLatestRestorableVersion ( tr ) ) ;
2017-12-20 16:49:08 +08:00
bool snapshotProgress = false ;
2017-05-26 04:48:44 +08:00
switch ( backupState ) {
case BackupAgentBase : : STATE_SUBMITTED :
2017-11-15 15:33:17 +08:00
statusText + = " The backup on tag ` " + tagName + " ' is in progress (just started) to " + bc - > getURL ( ) + " . \n " ;
2017-05-26 04:48:44 +08:00
break ;
case BackupAgentBase : : STATE_BACKUP :
2017-11-15 15:33:17 +08:00
statusText + = " The backup on tag ` " + tagName + " ' is in progress to " + bc - > getURL ( ) + " . \n " ;
2017-12-20 16:49:08 +08:00
snapshotProgress = true ;
2017-05-26 04:48:44 +08:00
break ;
case BackupAgentBase : : STATE_DIFFERENTIAL :
2017-11-15 15:33:17 +08:00
statusText + = " The backup on tag ` " + tagName + " ' is restorable but continuing to " + bc - > getURL ( ) + " . \n " ;
2017-12-20 16:49:08 +08:00
snapshotProgress = true ;
2017-05-26 04:48:44 +08:00
break ;
case BackupAgentBase : : STATE_COMPLETED :
2017-12-18 06:29:57 +08:00
statusText + = " The previous backup on tag ` " + tagName + " ' at " + bc - > getURL ( ) + " completed at version " + format ( " %lld " , stopVersion . orDefault ( - 1 ) ) + " . \n " ;
2017-05-26 04:48:44 +08:00
break ;
default :
2017-11-15 15:33:17 +08:00
statusText + = " The previous backup on tag ` " + tagName + " ' at " + bc - > getURL ( ) + " " + backupStatus + " . \n " ;
2017-05-26 04:48:44 +08:00
break ;
}
2017-12-20 16:49:08 +08:00
if ( snapshotProgress ) {
state int64_t snapshotInterval ;
state Version recentReadVersion ;
state Version snapshotBeginVersion ;
state Version snapshotTargetEndVersion ;
Void _ = wait ( store ( config . snapshotBeginVersion ( ) . getOrThrow ( tr ) , snapshotBeginVersion )
& & store ( config . snapshotTargetEndVersion ( ) . getOrThrow ( tr ) , snapshotTargetEndVersion )
& & store ( config . snapshotIntervalSeconds ( ) . getOrThrow ( tr ) , snapshotInterval )
& & store ( tr - > getReadVersion ( ) , recentReadVersion ) ) ;
statusText + = format ( " Snapshot interval is %lld seconds. " , snapshotInterval ) ;
if ( backupState = = BackupAgentBase : : STATE_DIFFERENTIAL )
statusText + = format ( " Current snapshot progress target is %3.2f%% \n " , 100.0 * ( recentReadVersion - snapshotBeginVersion ) / ( snapshotTargetEndVersion - snapshotBeginVersion ) ) ;
else
statusText + = " The initial snapshot is still running. \n " ;
}
2017-05-26 04:48:44 +08:00
}
// Append the errors, if requested
2017-09-09 07:09:18 +08:00
if ( errorLimit > 0 & & config . getUid ( ) . isValid ( ) ) {
Optional < std : : pair < std : : string , int64_t > > errMsg = wait ( config . lastError ( ) . get ( tr ) ) ;
if ( errMsg . present ( ) ) {
statusText + = " WARNING: Some backup agents have reported issues: \n " ;
statusText + = format ( " [%lld]: %s \n " , errMsg . get ( ) . second , errMsg . get ( ) . first . c_str ( ) ) ;
2017-05-26 04:48:44 +08:00
}
}
2017-10-31 03:35:00 +08:00
2017-12-15 05:54:01 +08:00
Optional < Value > paused = wait ( fPaused ) ;
if ( paused . present ( ) ) {
statusText + = format ( " \n All backup agents have been paused. \n " ) ;
2017-10-31 03:35:00 +08:00
}
2017-05-26 04:48:44 +08:00
break ;
}
catch ( Error & e ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
return statusText ;
}
ACTOR static Future < Version > getLastRestorable ( FileBackupAgent * backupAgent , Reference < ReadYourWritesTransaction > tr , Key tagName ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
state Optional < Value > version = wait ( tr - > get ( backupAgent - > lastRestorable . pack ( tagName ) ) ) ;
return ( version . present ( ) ) ? BinaryReader : : fromStringRef < Version > ( version . get ( ) , Unversioned ( ) ) : 0 ;
}
static StringRef read ( StringRef & data , int bytes ) {
if ( bytes > data . size ( ) ) throw restore_error ( ) ;
StringRef r = data . substr ( 0 , bytes ) ;
data = data . substr ( bytes ) ;
return r ;
}
ACTOR static Future < Version > restore ( FileBackupAgent * backupAgent , Database cx , Key tagName , Key url , bool waitForComplete , Version targetVersion , bool verbose , KeyRange range , Key addPrefix , Key removePrefix , bool lockDB , UID randomUid ) {
2017-11-15 15:33:17 +08:00
state Reference < IBackupContainer > bc = IBackupContainer : : openContainer ( url . toString ( ) ) ;
BackupDescription desc = wait ( bc - > describeBackup ( ) ) ;
printf ( " Backup Description \n %s " , desc . toString ( ) . c_str ( ) ) ;
if ( targetVersion = = invalidVersion & & desc . maxRestorableVersion . present ( ) )
targetVersion = desc . maxRestorableVersion . get ( ) ;
Optional < RestorableFileSet > restoreSet = wait ( bc - > getRestoreSet ( targetVersion ) ) ;
if ( ! restoreSet . present ( ) ) {
2017-11-19 20:34:28 +08:00
TraceEvent ( SevWarn , " FileBackupAgentRestoreNotPossible " )
2017-11-15 15:33:17 +08:00
. detail ( " BackupContainer " , bc - > getURL ( ) )
. detail ( " TargetVersion " , targetVersion ) ;
fprintf ( stderr , " ERROR: Restore version %lld is not possible from %s \n " , targetVersion , bc - > getURL ( ) . c_str ( ) ) ;
2017-05-26 04:48:44 +08:00
throw restore_invalid_version ( ) ;
}
if ( verbose ) {
printf ( " Restoring backup to version: %lld \n " , ( long long ) targetVersion ) ;
}
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
loop {
try {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
Void _ = wait ( submitRestore ( backupAgent , tr , tagName , url , targetVersion , addPrefix , removePrefix , range , lockDB , randomUid ) ) ;
Void _ = wait ( tr - > commit ( ) ) ;
break ;
} catch ( Error & e ) {
if ( e . code ( ) ! = error_code_restore_duplicate_tag ) {
Void _ = wait ( tr - > onError ( e ) ) ;
}
}
}
if ( waitForComplete ) {
ERestoreState finalState = wait ( waitRestore ( cx , tagName , verbose ) ) ;
if ( finalState ! = ERestoreState : : COMPLETED )
throw restore_error ( ) ;
}
return targetVersion ;
}
//used for correctness only, locks the database before discontinuing the backup and that same lock is then used while doing the restore.
//the tagname of the backup must be the same as the restore.
ACTOR static Future < Version > atomicRestore ( FileBackupAgent * backupAgent , Database cx , Key tagName , KeyRange range , Key addPrefix , Key removePrefix ) {
state Reference < ReadYourWritesTransaction > ryw_tr = Reference < ReadYourWritesTransaction > ( new ReadYourWritesTransaction ( cx ) ) ;
2017-09-07 11:06:32 +08:00
state BackupConfig backupConfig ;
2017-05-26 04:48:44 +08:00
loop {
try {
ryw_tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
ryw_tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-09-07 11:06:32 +08:00
state KeyBackedTag tag = makeBackupTag ( tagName . toString ( ) ) ;
UidAndAbortedFlagT uidFlag = wait ( tag . getOrThrow ( ryw_tr ) ) ;
backupConfig = BackupConfig ( uidFlag . first ) ;
state EBackupState status = wait ( backupConfig . stateEnum ( ) . getOrThrow ( ryw_tr ) ) ;
2017-05-26 04:48:44 +08:00
2017-09-07 11:06:32 +08:00
if ( status ! = BackupAgentBase : : STATE_DIFFERENTIAL ) {
2017-05-26 04:48:44 +08:00
throw backup_duplicate ( ) ;
}
break ;
} catch ( Error & e ) {
Void _ = wait ( ryw_tr - > onError ( e ) ) ;
}
}
//Lock src, record commit version
state Transaction tr ( cx ) ;
state Version commitVersion ;
state UID randomUid = g_random - > randomUniqueID ( ) ;
loop {
try {
Void _ = wait ( lockDatabase ( & tr , randomUid ) ) ;
Void _ = wait ( tr . commit ( ) ) ;
commitVersion = tr . getCommittedVersion ( ) ;
break ;
} catch ( Error & e ) {
Void _ = wait ( tr . onError ( e ) ) ;
}
}
ryw_tr - > reset ( ) ;
loop {
try {
Void _ = wait ( discontinueBackup ( backupAgent , ryw_tr , tagName ) ) ;
Void _ = wait ( ryw_tr - > commit ( ) ) ;
break ;
} catch ( Error & e ) {
if ( e . code ( ) = = error_code_backup_unneeded | | e . code ( ) = = error_code_backup_duplicate ) {
break ;
}
Void _ = wait ( ryw_tr - > onError ( e ) ) ;
}
}
2017-09-06 02:38:40 +08:00
int _ = wait ( waitBackup ( backupAgent , cx , tagName . toString ( ) , true ) ) ;
2017-05-26 04:48:44 +08:00
ryw_tr - > reset ( ) ;
loop {
try {
ryw_tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
ryw_tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
ryw_tr - > addReadConflictRange ( range ) ;
ryw_tr - > clear ( range ) ;
Void _ = wait ( ryw_tr - > commit ( ) ) ;
break ;
} catch ( Error & e ) {
Void _ = wait ( ryw_tr - > onError ( e ) ) ;
}
}
2017-11-15 15:33:17 +08:00
Reference < IBackupContainer > bc = wait ( backupConfig . backupContainer ( ) . getOrThrow ( cx ) ) ;
2017-09-07 11:06:32 +08:00
2017-11-15 15:33:17 +08:00
Version ver = wait ( restore ( backupAgent , cx , tagName , KeyRef ( bc - > getURL ( ) ) , true , - 1 , true , range , addPrefix , removePrefix , true , randomUid ) ) ;
2017-05-26 04:48:44 +08:00
return ver ;
}
} ;
2017-09-06 02:38:40 +08:00
const std : : string BackupAgentBase : : defaultTagName = " default " ;
2017-05-26 04:48:44 +08:00
const int BackupAgentBase : : logHeaderSize = 12 ;
const int FileBackupAgent : : dataFooterSize = 20 ;
Future < Version > FileBackupAgent : : restore ( Database cx , Key tagName , Key url , bool waitForComplete , Version targetVersion , bool verbose , KeyRange range , Key addPrefix , Key removePrefix , bool lockDB ) {
return FileBackupAgentImpl : : restore ( this , cx , tagName , url , waitForComplete , targetVersion , verbose , range , addPrefix , removePrefix , lockDB , g_random - > randomUniqueID ( ) ) ;
}
Future < Version > FileBackupAgent : : atomicRestore ( Database cx , Key tagName , KeyRange range , Key addPrefix , Key removePrefix ) {
return FileBackupAgentImpl : : atomicRestore ( this , cx , tagName , range , addPrefix , removePrefix ) ;
}
Future < ERestoreState > FileBackupAgent : : abortRestore ( Reference < ReadYourWritesTransaction > tr , Key tagName ) {
return fileBackup : : abortRestore ( tr , tagName ) ;
}
2017-12-05 09:21:43 +08:00
Future < ERestoreState > FileBackupAgent : : abortRestore ( Database cx , Key tagName ) {
return fileBackup : : abortRestore ( cx , tagName ) ;
}
2017-05-26 04:48:44 +08:00
Future < std : : string > FileBackupAgent : : restoreStatus ( Reference < ReadYourWritesTransaction > tr , Key tagName ) {
return fileBackup : : restoreStatus ( tr , tagName ) ;
}
Future < ERestoreState > FileBackupAgent : : waitRestore ( Database cx , Key tagName , bool verbose ) {
return FileBackupAgentImpl : : waitRestore ( cx , tagName , verbose ) ;
} ;
2017-12-14 17:44:38 +08:00
Future < Void > FileBackupAgent : : submitBackup ( Reference < ReadYourWritesTransaction > tr , Key outContainer , int snapshotIntervalSeconds , std : : string tagName , Standalone < VectorRef < KeyRangeRef > > backupRanges , bool stopWhenDone ) {
return FileBackupAgentImpl : : submitBackup ( this , tr , outContainer , snapshotIntervalSeconds , tagName , backupRanges , stopWhenDone ) ;
2017-05-26 04:48:44 +08:00
}
Future < Void > FileBackupAgent : : discontinueBackup ( Reference < ReadYourWritesTransaction > tr , Key tagName ) {
return FileBackupAgentImpl : : discontinueBackup ( this , tr , tagName ) ;
}
2017-09-06 02:38:40 +08:00
Future < Void > FileBackupAgent : : abortBackup ( Reference < ReadYourWritesTransaction > tr , std : : string tagName ) {
2017-05-26 04:48:44 +08:00
return FileBackupAgentImpl : : abortBackup ( this , tr , tagName ) ;
}
2017-09-06 02:38:40 +08:00
Future < std : : string > FileBackupAgent : : getStatus ( Database cx , int errorLimit , std : : string tagName ) {
2017-05-26 04:48:44 +08:00
return FileBackupAgentImpl : : getStatus ( this , cx , errorLimit , tagName ) ;
}
Future < Version > FileBackupAgent : : getLastRestorable ( Reference < ReadYourWritesTransaction > tr , Key tagName ) {
return FileBackupAgentImpl : : getLastRestorable ( this , tr , tagName ) ;
}
2017-11-15 15:33:17 +08:00
void FileBackupAgent : : setLastRestorable ( Reference < ReadYourWritesTransaction > tr , Key tagName , Version version ) {
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
tr - > set ( lastRestorable . pack ( tagName ) , BinaryWriter : : toValue < Version > ( version , Unversioned ( ) ) ) ;
2017-05-26 04:48:44 +08:00
}
2017-11-15 15:33:17 +08:00
Future < int > FileBackupAgent : : waitBackup ( Database cx , std : : string tagName , bool stopWhenDone ) {
return FileBackupAgentImpl : : waitBackup ( this , cx , tagName , stopWhenDone ) ;
2017-05-26 04:48:44 +08:00
}
Future < std : : string > FileBackupAgent : : getBackupInfo ( std : : string container , Version * defaultVersion ) {
2017-11-15 15:33:17 +08:00
return map ( IBackupContainer : : openContainer ( container ) - > describeBackup ( ) , [ = ] ( BackupDescription const & d ) {
if ( defaultVersion ! = nullptr & & d . maxRestorableVersion . present ( ) )
* defaultVersion = d . maxRestorableVersion . get ( ) ;
return d . toString ( ) ;
} ) ;
2017-05-26 04:48:44 +08:00
}