2017-05-26 04:48:44 +08:00
/*
* BackupAgentBase . actor . cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013 - 2018 Apple Inc . and the FoundationDB project authors
2018-02-22 02:25:11 +08:00
*
2017-05-26 04:48:44 +08:00
* Licensed under the Apache License , Version 2.0 ( the " License " ) ;
* you may not use this file except in compliance with the License .
* You may obtain a copy of the License at
2018-02-22 02:25:11 +08:00
*
2017-05-26 04:48:44 +08:00
* http : //www.apache.org/licenses/LICENSE-2.0
2018-02-22 02:25:11 +08:00
*
2017-05-26 04:48:44 +08:00
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an " AS IS " BASIS ,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* See the License for the specific language governing permissions and
* limitations under the License .
*/
2019-03-20 16:36:25 +08:00
# include <iomanip>
2019-03-20 17:51:40 +08:00
# include <time.h>
2019-03-20 16:36:25 +08:00
2019-02-18 07:19:05 +08:00
# include "fdbclient/BackupAgent.actor.h"
2017-05-26 04:48:44 +08:00
# include "fdbrpc/simulator.h"
# include "flow/ActorCollection.h"
2019-02-18 06:55:47 +08:00
# include "flow/actorcompiler.h" // has to be last include
2017-05-26 04:48:44 +08:00
2021-07-09 20:41:33 +08:00
FDB_DEFINE_BOOLEAN_PARAM ( LockDB ) ;
FDB_DEFINE_BOOLEAN_PARAM ( UnlockDB ) ;
FDB_DEFINE_BOOLEAN_PARAM ( StopWhenDone ) ;
FDB_DEFINE_BOOLEAN_PARAM ( Verbose ) ;
FDB_DEFINE_BOOLEAN_PARAM ( WaitForComplete ) ;
FDB_DEFINE_BOOLEAN_PARAM ( ForceAction ) ;
FDB_DEFINE_BOOLEAN_PARAM ( Terminator ) ;
FDB_DEFINE_BOOLEAN_PARAM ( UsePartitionedLog ) ;
FDB_DEFINE_BOOLEAN_PARAM ( InconsistentSnapshotOnly ) ;
FDB_DEFINE_BOOLEAN_PARAM ( ShowErrors ) ;
FDB_DEFINE_BOOLEAN_PARAM ( AbortOldBackup ) ;
FDB_DEFINE_BOOLEAN_PARAM ( DstOnly ) ;
FDB_DEFINE_BOOLEAN_PARAM ( WaitForDestUID ) ;
FDB_DEFINE_BOOLEAN_PARAM ( CheckBackupUID ) ;
FDB_DEFINE_BOOLEAN_PARAM ( DeleteData ) ;
FDB_DEFINE_BOOLEAN_PARAM ( SetValidation ) ;
FDB_DEFINE_BOOLEAN_PARAM ( PartialBackup ) ;
2021-07-05 04:14:25 +08:00
2019-03-20 16:18:37 +08:00
std : : string BackupAgentBase : : formatTime ( int64_t epochs ) {
time_t curTime = ( time_t ) epochs ;
char buffer [ 30 ] ;
struct tm timeinfo ;
getLocalTime ( & curTime , & timeinfo ) ;
strftime ( buffer , 30 , " %Y/%m/%d.%H:%M:%S%z " , & timeinfo ) ;
return buffer ;
}
int64_t BackupAgentBase : : parseTime ( std : : string timestamp ) {
2019-03-20 17:39:49 +08:00
struct tm out ;
2019-03-27 00:00:45 +08:00
out . tm_isdst = - 1 ; // This field is not set by strptime. -1 tells mktime to determine whether DST is in effect
2019-03-22 10:38:07 +08:00
std : : string timeOnly = timestamp . substr ( 0 , 19 ) ;
// TODO: Use std::get_time implementation for all platforms once supported
// It would be nice to read the timezone using %z, but it seems not all get_time()
// or strptime() implementations handle it correctly in all environments so we
2019-03-23 02:02:38 +08:00
// will read the date and time independent of timezone at first and then adjust it.
2019-03-20 17:39:49 +08:00
# ifdef _WIN32
2019-03-22 10:38:07 +08:00
std : : istringstream s ( timeOnly ) ;
2019-03-20 16:18:37 +08:00
s . imbue ( std : : locale ( setlocale ( LC_TIME , nullptr ) ) ) ;
s > > std : : get_time ( & out , " %Y/%m/%d.%H:%M:%S " ) ;
if ( s . fail ( ) ) {
return - 1 ;
}
2019-03-22 10:38:07 +08:00
# else
2021-03-11 02:06:03 +08:00
if ( strptime ( timeOnly . c_str ( ) , " %Y/%m/%d.%H:%M:%S " , & out ) = = nullptr ) {
2019-03-22 10:38:07 +08:00
return - 1 ;
}
# endif
2019-03-20 16:18:37 +08:00
// Read timezone offset in +/-HHMM format then convert to seconds
int tzHH ;
int tzMM ;
2021-03-11 02:06:03 +08:00
if ( sscanf ( timestamp . substr ( 19 , 5 ) . c_str ( ) , " %3d%2d " , & tzHH , & tzMM ) ! = 2 ) {
2019-03-20 16:18:37 +08:00
return - 1 ;
}
2021-03-11 02:06:03 +08:00
if ( tzHH < 0 ) {
2019-03-20 16:18:37 +08:00
tzMM = - tzMM ;
}
2019-03-21 02:53:24 +08:00
// tzOffset is the number of seconds EAST of GMT
2019-03-20 16:18:37 +08:00
int tzOffset = tzHH * 60 * 60 + tzMM * 60 ;
2021-03-11 02:06:03 +08:00
// The goal is to convert the timestamp string to epoch seconds assuming the date/time was expressed in the timezone
// at the end of the string. However, mktime() will ONLY return epoch seconds assuming the date/time is expressed in
// local time (based on locale / environment) mktime() will set out.tm_gmtoff when available
2019-03-20 16:18:37 +08:00
int64_t ts = mktime ( & out ) ;
2019-03-21 03:36:08 +08:00
// localTZOffset is the number of seconds EAST of GMT
long localTZOffset ;
2019-03-21 02:53:24 +08:00
# ifdef _WIN32
2019-03-21 03:36:08 +08:00
// _get_timezone() returns the number of seconds WEST of GMT
2021-03-11 02:06:03 +08:00
if ( _get_timezone ( & localTZOffset ) ! = 0 ) {
2019-03-21 03:36:08 +08:00
return - 1 ;
}
// Negate offset to match the orientation of tzOffset
localTZOffset = - localTZOffset ;
2019-03-21 02:53:24 +08:00
# else
// tm.tm_gmtoff is the number of seconds EAST of GMT
2019-03-21 03:36:08 +08:00
localTZOffset = out . tm_gmtoff ;
2019-03-21 02:53:24 +08:00
# endif
2019-03-21 03:36:08 +08:00
2021-03-11 02:06:03 +08:00
// Add back the difference between the local timezone assumed by mktime() and the intended timezone from the input
// string
2019-03-21 03:36:08 +08:00
ts + = ( localTZOffset - tzOffset ) ;
2019-03-20 16:18:37 +08:00
return ts ;
}
2021-07-05 10:07:52 +08:00
const Key BackupAgentBase : : keyFolderId = " config_folderid " _sr ;
const Key BackupAgentBase : : keyBeginVersion = " beginVersion " _sr ;
const Key BackupAgentBase : : keyEndVersion = " endVersion " _sr ;
const Key BackupAgentBase : : keyPrevBeginVersion = " prevBeginVersion " _sr ;
const Key BackupAgentBase : : keyConfigBackupTag = " config_backup_tag " _sr ;
const Key BackupAgentBase : : keyConfigLogUid = " config_log_uid " _sr ;
const Key BackupAgentBase : : keyConfigBackupRanges = " config_backup_ranges " _sr ;
const Key BackupAgentBase : : keyConfigStopWhenDoneKey = " config_stop_when_done " _sr ;
const Key BackupAgentBase : : keyStateStop = " state_stop " _sr ;
const Key BackupAgentBase : : keyStateStatus = " state_status " _sr ;
2021-07-09 23:13:02 +08:00
const Key BackupAgentBase : : keyStateLogBeginVersion = " last_begin_version " _sr ;
2021-07-05 10:07:52 +08:00
const Key BackupAgentBase : : keyLastUid = " last_uid " _sr ;
const Key BackupAgentBase : : keyBeginKey = " beginKey " _sr ;
const Key BackupAgentBase : : keyEndKey = " endKey " _sr ;
const Key BackupAgentBase : : keyDrVersion = " drVersion " _sr ;
const Key BackupAgentBase : : destUid = " destUid " _sr ;
const Key BackupAgentBase : : backupStartVersion = " backupStartVersion " _sr ;
const Key BackupAgentBase : : keyTagName = " tagname " _sr ;
const Key BackupAgentBase : : keyStates = " state " _sr ;
const Key BackupAgentBase : : keyConfig = " config " _sr ;
const Key BackupAgentBase : : keyErrors = " errors " _sr ;
const Key BackupAgentBase : : keyRanges = " ranges " _sr ;
const Key BackupAgentBase : : keyTasks = " tasks " _sr ;
const Key BackupAgentBase : : keyFutures = " futures " _sr ;
const Key BackupAgentBase : : keySourceStates = " source_states " _sr ;
const Key BackupAgentBase : : keySourceTagName = " source_tagname " _sr ;
2017-05-26 04:48:44 +08:00
bool copyParameter ( Reference < Task > source , Reference < Task > dest , Key key ) {
if ( source ) {
dest - > params [ key ] = source - > params [ key ] ;
return true ;
}
return false ;
}
Version getVersionFromString ( std : : string const & value ) {
2019-09-13 06:40:14 +08:00
Version version = invalidVersion ;
2017-05-26 04:48:44 +08:00
int n = 0 ;
if ( sscanf ( value . c_str ( ) , " %lld%n " , ( long long * ) & version , & n ) ! = 1 | | n ! = value . size ( ) ) {
2018-06-09 04:57:00 +08:00
TraceEvent ( SevWarnAlways , " GetVersionFromString " ) . detail ( " InvalidVersion " , value ) ;
2017-05-26 04:48:44 +08:00
throw restore_invalid_version ( ) ;
}
return version ;
}
// Transaction log data is stored by the FoundationDB core in the
2021-03-31 00:26:48 +08:00
// "backupLogKeys" (i.e., \xff\x02/blog/) keyspace in a funny order for
// performance reasons.
// Returns the ranges of keys that contain the data for the given range
2017-05-26 04:48:44 +08:00
// of versions.
2020-06-16 18:59:47 +08:00
// assert CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE % blocksize = 0. Otherwise calculation of hash will be incorrect
2021-03-11 02:06:03 +08:00
Standalone < VectorRef < KeyRangeRef > > getLogRanges ( Version beginVersion ,
Version endVersion ,
Key destUidValue ,
int blockSize ) {
2017-05-26 04:48:44 +08:00
Standalone < VectorRef < KeyRangeRef > > ret ;
2018-02-21 05:22:31 +08:00
Key baLogRangePrefix = destUidValue . withPrefix ( backupLogKeys . begin ) ;
2017-05-26 04:48:44 +08:00
2019-04-04 09:02:03 +08:00
//TraceEvent("GetLogRanges").detail("DestUidValue", destUidValue).detail("Prefix", baLogRangePrefix);
2017-05-26 04:48:44 +08:00
for ( int64_t vblock = beginVersion / blockSize ; vblock < ( endVersion + blockSize - 1 ) / blockSize ; + + vblock ) {
int64_t tb = vblock * blockSize / CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE ;
uint64_t bv = bigEndian64 ( std : : max ( beginVersion , vblock * blockSize ) ) ;
uint64_t ev = bigEndian64 ( std : : min ( endVersion , ( vblock + 1 ) * blockSize ) ) ;
uint32_t data = tb & 0xffffffff ;
uint8_t hash = ( uint8_t ) hashlittle ( & data , sizeof ( uint32_t ) , 0 ) ;
Key vblockPrefix = StringRef ( & hash , sizeof ( uint8_t ) ) . withPrefix ( baLogRangePrefix ) ;
2021-03-11 02:06:03 +08:00
ret . push_back_deep ( ret . arena ( ) ,
KeyRangeRef ( StringRef ( ( uint8_t * ) & bv , sizeof ( uint64_t ) ) . withPrefix ( vblockPrefix ) ,
StringRef ( ( uint8_t * ) & ev , sizeof ( uint64_t ) ) . withPrefix ( vblockPrefix ) ) ) ;
2017-05-26 04:48:44 +08:00
}
return ret ;
}
Standalone < VectorRef < KeyRangeRef > > getApplyRanges ( Version beginVersion , Version endVersion , Key backupUid ) {
Standalone < VectorRef < KeyRangeRef > > ret ;
Key baLogRangePrefix = backupUid . withPrefix ( applyLogKeys . begin ) ;
2019-04-04 09:02:12 +08:00
//TraceEvent("GetLogRanges").detail("BackupUid", backupUid).detail("Prefix", baLogRangePrefix);
2017-05-26 04:48:44 +08:00
2021-03-11 02:06:03 +08:00
for ( int64_t vblock = beginVersion / CLIENT_KNOBS - > APPLY_BLOCK_SIZE ;
vblock < ( endVersion + CLIENT_KNOBS - > APPLY_BLOCK_SIZE - 1 ) / CLIENT_KNOBS - > APPLY_BLOCK_SIZE ;
+ + vblock ) {
2017-05-26 04:48:44 +08:00
int64_t tb = vblock * CLIENT_KNOBS - > APPLY_BLOCK_SIZE / CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE ;
uint64_t bv = bigEndian64 ( std : : max ( beginVersion , vblock * CLIENT_KNOBS - > APPLY_BLOCK_SIZE ) ) ;
uint64_t ev = bigEndian64 ( std : : min ( endVersion , ( vblock + 1 ) * CLIENT_KNOBS - > APPLY_BLOCK_SIZE ) ) ;
uint32_t data = tb & 0xffffffff ;
uint8_t hash = ( uint8_t ) hashlittle ( & data , sizeof ( uint32_t ) , 0 ) ;
Key vblockPrefix = StringRef ( & hash , sizeof ( uint8_t ) ) . withPrefix ( baLogRangePrefix ) ;
2021-03-11 02:06:03 +08:00
ret . push_back_deep ( ret . arena ( ) ,
KeyRangeRef ( StringRef ( ( uint8_t * ) & bv , sizeof ( uint64_t ) ) . withPrefix ( vblockPrefix ) ,
StringRef ( ( uint8_t * ) & ev , sizeof ( uint64_t ) ) . withPrefix ( vblockPrefix ) ) ) ;
2017-05-26 04:48:44 +08:00
}
return ret ;
}
2021-03-11 02:06:03 +08:00
Key getApplyKey ( Version version , Key backupUid ) {
int64_t vblock = ( version - 1 ) / CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE ;
2017-05-26 04:48:44 +08:00
uint64_t v = bigEndian64 ( version ) ;
uint32_t data = vblock & 0xffffffff ;
uint8_t hash = ( uint8_t ) hashlittle ( & data , sizeof ( uint32_t ) , 0 ) ;
Key k1 = StringRef ( ( uint8_t * ) & v , sizeof ( uint64_t ) ) . withPrefix ( StringRef ( & hash , sizeof ( uint8_t ) ) ) ;
Key k2 = k1 . withPrefix ( backupUid ) ;
return k2 . withPrefix ( applyLogKeys . begin ) ;
}
2021-08-10 05:36:07 +08:00
Key getLogKey ( Version version , Key backupUid ) {
int64_t vblock = ( version - 1 ) / CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE ;
uint64_t v = bigEndian64 ( version ) ;
uint32_t data = vblock & 0xffffffff ;
uint8_t hash = ( uint8_t ) hashlittle ( & data , sizeof ( uint32_t ) , 0 ) ;
Key k1 = StringRef ( ( uint8_t * ) & v , sizeof ( uint64_t ) ) . withPrefix ( StringRef ( & hash , sizeof ( uint8_t ) ) ) ;
Key k2 = k1 . withPrefix ( backupUid ) ;
return k2 . withPrefix ( backupLogKeys . begin ) ;
}
2020-06-16 18:59:47 +08:00
Version getLogKeyVersion ( Key key ) {
2020-07-03 16:36:20 +08:00
return bigEndian64 ( * ( int64_t * ) ( key . begin ( ) + backupLogPrefixBytes + sizeof ( UID ) + sizeof ( uint8_t ) ) ) ;
2020-06-16 18:59:47 +08:00
}
2021-03-11 02:06:03 +08:00
// Given a key from one of the ranges returned by get_log_ranges,
// returns(version, part) where version is the database version number of
// the transaction log data in the value, and part is 0 for the first such
// data for a given version, 1 for the second block of data, etc.
2019-09-13 06:40:14 +08:00
std : : pair < Version , uint32_t > decodeBKMutationLogKey ( Key key ) {
2021-03-11 02:06:03 +08:00
return std : : make_pair (
getLogKeyVersion ( key ) ,
bigEndian32 ( * ( int32_t * ) ( key . begin ( ) + backupLogPrefixBytes + sizeof ( UID ) + sizeof ( uint8_t ) + sizeof ( int64_t ) ) ) ) ;
2017-05-26 04:48:44 +08:00
}
2021-03-11 02:06:03 +08:00
void decodeBackupLogValue ( Arena & arena ,
VectorRef < MutationRef > & result ,
int & mutationSize ,
StringRef value ,
StringRef addPrefix ,
StringRef removePrefix ,
Version version ,
Reference < KeyRangeMap < Version > > key_version ) {
2017-05-26 04:48:44 +08:00
try {
uint64_t offset ( 0 ) ;
uint64_t protocolVersion = 0 ;
memcpy ( & protocolVersion , value . begin ( ) , sizeof ( uint64_t ) ) ;
offset + = sizeof ( uint64_t ) ;
2021-03-11 02:06:03 +08:00
if ( protocolVersion < = 0x0FDB00A200090001 ) {
TraceEvent ( SevError , " DecodeBackupLogValue " )
. detail ( " IncompatibleProtocolVersion " , protocolVersion )
. detail ( " ValueSize " , value . size ( ) )
. detail ( " Value " , value ) ;
2017-05-26 04:48:44 +08:00
throw incompatible_protocol_version ( ) ;
}
uint32_t totalBytes = 0 ;
memcpy ( & totalBytes , value . begin ( ) + offset , sizeof ( uint32_t ) ) ;
offset + = sizeof ( uint32_t ) ;
uint32_t consumed = 0 ;
2021-03-11 02:06:03 +08:00
if ( totalBytes + offset > value . size ( ) )
2017-05-26 04:48:44 +08:00
throw restore_missing_data ( ) ;
int originalOffset = offset ;
2021-03-11 02:06:03 +08:00
while ( consumed < totalBytes ) {
2017-05-26 04:48:44 +08:00
uint32_t type = 0 ;
memcpy ( & type , value . begin ( ) + offset , sizeof ( uint32_t ) ) ;
offset + = sizeof ( uint32_t ) ;
uint32_t len1 = 0 ;
memcpy ( & len1 , value . begin ( ) + offset , sizeof ( uint32_t ) ) ;
offset + = sizeof ( uint32_t ) ;
uint32_t len2 = 0 ;
memcpy ( & len2 , value . begin ( ) + offset , sizeof ( uint32_t ) ) ;
offset + = sizeof ( uint32_t ) ;
2021-03-11 02:06:03 +08:00
ASSERT ( offset + len1 + len2 < = value . size ( ) & & isValidMutationType ( type ) ) ;
2017-05-26 04:48:44 +08:00
MutationRef logValue ;
Arena tempArena ;
logValue . type = type ;
logValue . param1 = value . substr ( offset , len1 ) ;
offset + = len1 ;
logValue . param2 = value . substr ( offset , len2 ) ;
offset + = len2 ;
if ( logValue . type = = MutationRef : : ClearRange ) {
KeyRangeRef range ( logValue . param1 , logValue . param2 ) ;
auto ranges = key_version - > intersectingRanges ( range ) ;
for ( auto r : ranges ) {
if ( version > r . value ( ) & & r . value ( ) ! = invalidVersion ) {
KeyRef minKey = std : : min ( r . range ( ) . end , range . end ) ;
if ( minKey = = ( removePrefix = = StringRef ( ) ? normalKeys . end : strinc ( removePrefix ) ) ) {
logValue . param1 = std : : max ( r . range ( ) . begin , range . begin ) ;
2021-03-11 02:06:03 +08:00
if ( removePrefix . size ( ) ) {
2017-05-26 04:48:44 +08:00
logValue . param1 = logValue . param1 . removePrefix ( removePrefix ) ;
}
2021-03-11 02:06:03 +08:00
if ( addPrefix . size ( ) ) {
2017-05-26 04:48:44 +08:00
logValue . param1 = logValue . param1 . withPrefix ( addPrefix , tempArena ) ;
}
logValue . param2 = addPrefix = = StringRef ( ) ? normalKeys . end : strinc ( addPrefix , tempArena ) ;
result . push_back_deep ( arena , logValue ) ;
mutationSize + = logValue . expectedSize ( ) ;
2021-03-11 02:06:03 +08:00
} else {
2017-05-26 04:48:44 +08:00
logValue . param1 = std : : max ( r . range ( ) . begin , range . begin ) ;
logValue . param2 = minKey ;
2021-03-11 02:06:03 +08:00
if ( removePrefix . size ( ) ) {
2017-05-26 04:48:44 +08:00
logValue . param1 = logValue . param1 . removePrefix ( removePrefix ) ;
logValue . param2 = logValue . param2 . removePrefix ( removePrefix ) ;
}
2021-03-11 02:06:03 +08:00
if ( addPrefix . size ( ) ) {
2017-05-26 04:48:44 +08:00
logValue . param1 = logValue . param1 . withPrefix ( addPrefix , tempArena ) ;
logValue . param2 = logValue . param2 . withPrefix ( addPrefix , tempArena ) ;
}
result . push_back_deep ( arena , logValue ) ;
mutationSize + = logValue . expectedSize ( ) ;
}
}
}
2021-03-11 02:06:03 +08:00
} else {
2017-05-26 04:48:44 +08:00
Version ver = key_version - > rangeContaining ( logValue . param1 ) . value ( ) ;
2021-08-08 15:03:25 +08:00
//TraceEvent("ApplyMutation").detail("LogValue", logValue).detail("Version", version).detail("Ver", ver).detail("Apply", version > ver && ver != invalidVersion);
2017-05-26 04:48:44 +08:00
if ( version > ver & & ver ! = invalidVersion ) {
2021-03-11 02:06:03 +08:00
if ( removePrefix . size ( ) ) {
2017-05-26 04:48:44 +08:00
logValue . param1 = logValue . param1 . removePrefix ( removePrefix ) ;
}
2021-03-11 02:06:03 +08:00
if ( addPrefix . size ( ) ) {
2017-05-26 04:48:44 +08:00
logValue . param1 = logValue . param1 . withPrefix ( addPrefix , tempArena ) ;
}
result . push_back_deep ( arena , logValue ) ;
mutationSize + = logValue . expectedSize ( ) ;
}
}
consumed + = BackupAgentBase : : logHeaderSize + len1 + len2 ;
}
ASSERT ( consumed = = totalBytes ) ;
if ( value . size ( ) ! = offset ) {
2021-03-11 02:06:03 +08:00
TraceEvent ( SevError , " BA_DecodeBackupLogValue " )
. detail ( " UnexpectedExtraDataSize " , value . size ( ) )
. detail ( " Offset " , offset )
. detail ( " TotalBytes " , totalBytes )
. detail ( " Consumed " , consumed )
. detail ( " OriginalOffset " , originalOffset ) ;
2017-05-26 04:48:44 +08:00
throw restore_corrupted_data ( ) ;
}
2021-03-11 02:06:03 +08:00
} catch ( Error & e ) {
TraceEvent ( e . code ( ) = = error_code_restore_missing_data ? SevWarn : SevError , " BA_DecodeBackupLogValue " )
. error ( e )
. GetLastError ( )
. detail ( " ValueSize " , value . size ( ) )
. detail ( " Value " , value ) ;
2017-05-26 04:48:44 +08:00
throw ;
}
}
2019-09-13 06:40:14 +08:00
2017-05-26 04:48:44 +08:00
static double lastErrorTime = 0 ;
2019-09-13 06:40:14 +08:00
2018-09-11 01:51:41 +08:00
void logErrorWorker ( Reference < ReadYourWritesTransaction > tr , Key keyErrors , std : : string message ) {
2017-05-26 04:48:44 +08:00
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2021-03-11 02:06:03 +08:00
if ( now ( ) - lastErrorTime > CLIENT_KNOBS - > BACKUP_ERROR_DELAY ) {
2019-03-19 06:03:43 +08:00
TraceEvent ( " BA_LogError " ) . detail ( " Key " , keyErrors ) . detail ( " Message " , message ) ;
2017-05-26 04:48:44 +08:00
lastErrorTime = now ( ) ;
}
tr - > set ( keyErrors , message ) ;
}
Future < Void > logError ( Database cx , Key keyErrors , const std : : string & message ) {
2021-03-11 02:06:03 +08:00
return runRYWTransaction ( cx , [ = ] ( Reference < ReadYourWritesTransaction > tr ) {
logErrorWorker ( tr , keyErrors , message ) ;
2018-09-11 01:51:41 +08:00
return Future < Void > ( Void ( ) ) ;
} ) ;
2017-05-26 04:48:44 +08:00
}
Future < Void > logError ( Reference < ReadYourWritesTransaction > tr , Key keyErrors , const std : : string & message ) {
return logError ( tr - > getDatabase ( ) , keyErrors , message ) ;
}
2021-03-11 02:06:03 +08:00
ACTOR Future < Void > readCommitted ( Database cx ,
PromiseStream < RangeResultWithVersion > results ,
Reference < FlowLock > lock ,
KeyRangeRef range ,
2021-07-05 04:14:25 +08:00
Terminator terminator ,
AccessSystemKeys systemAccess ,
2021-07-03 12:41:50 +08:00
LockAware lockAware ) {
2017-05-26 04:48:44 +08:00
state KeySelector begin = firstGreaterOrEqual ( range . begin ) ;
state KeySelector end = firstGreaterOrEqual ( range . end ) ;
2018-03-08 05:56:34 +08:00
state Transaction tr ( cx ) ;
2017-05-26 04:48:44 +08:00
state FlowLock : : Releaser releaser ;
2021-03-11 02:06:03 +08:00
loop {
2017-05-26 04:48:44 +08:00
try {
2020-07-05 03:03:47 +08:00
state GetRangeLimits limits ( GetRangeLimits : : ROW_LIMIT_UNLIMITED ,
( g_network - > isSimulated ( ) & & ! g_simulator . speedUpSimulation )
? CLIENT_KNOBS - > BACKUP_SIMULATED_LIMIT_BYTES
: CLIENT_KNOBS - > BACKUP_GET_RANGE_LIMIT_BYTES ) ;
2017-12-22 09:21:05 +08:00
2017-05-26 04:48:44 +08:00
if ( systemAccess )
2018-03-08 05:56:34 +08:00
tr . setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
2017-05-26 04:48:44 +08:00
if ( lockAware )
2018-03-08 05:56:34 +08:00
tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-05-26 04:48:44 +08:00
2021-03-11 02:06:03 +08:00
// add lock
2017-05-26 04:48:44 +08:00
releaser . release ( ) ;
2021-03-11 02:06:03 +08:00
wait ( lock - > take ( TaskPriority : : DefaultYield ,
limits . bytes + CLIENT_KNOBS - > VALUE_SIZE_LIMIT + CLIENT_KNOBS - > SYSTEM_KEY_SIZE_LIMIT ) ) ;
releaser = FlowLock : : Releaser (
* lock , limits . bytes + CLIENT_KNOBS - > VALUE_SIZE_LIMIT + CLIENT_KNOBS - > SYSTEM_KEY_SIZE_LIMIT ) ;
2017-05-26 04:48:44 +08:00
2021-05-04 04:14:16 +08:00
state RangeResult values = wait ( tr . getRange ( begin , end , limits ) ) ;
2017-05-26 04:48:44 +08:00
// When this buggify line is enabled, if there are more than 1 result then use half of the results
2021-03-06 03:28:15 +08:00
// Copy the data instead of messing with the results directly to avoid TSS issues.
2021-03-11 02:06:03 +08:00
if ( values . size ( ) > 1 & & BUGGIFY ) {
2021-05-13 02:53:20 +08:00
RangeResult copy ;
2021-03-06 03:28:15 +08:00
// only copy first half of values into copy
for ( int i = 0 ; i < values . size ( ) / 2 ; i + + ) {
copy . push_back_deep ( copy . arena ( ) , values [ i ] ) ;
}
values = copy ;
2017-05-26 04:48:44 +08:00
values . more = true ;
// Half of the time wait for this tr to expire so that the next read is at a different version
2021-03-11 02:06:03 +08:00
if ( deterministicRandom ( ) - > random01 ( ) < 0.5 )
2018-08-11 04:57:10 +08:00
wait ( delay ( 6.0 ) ) ;
2017-05-26 04:48:44 +08:00
}
2021-03-11 02:06:03 +08:00
releaser . remaining - =
values . expectedSize ( ) ; // its the responsibility of the caller to release after this point
2017-05-26 04:48:44 +08:00
ASSERT ( releaser . remaining > = 0 ) ;
2018-03-08 05:56:34 +08:00
results . send ( RangeResultWithVersion ( values , tr . getReadVersion ( ) . get ( ) ) ) ;
2017-05-26 04:48:44 +08:00
if ( values . size ( ) > 0 )
begin = firstGreaterThan ( values . end ( ) [ - 1 ] . key ) ;
if ( ! values . more & & ! limits . isReached ( ) ) {
2021-03-11 02:06:03 +08:00
if ( terminator )
2017-11-15 15:33:17 +08:00
results . sendError ( end_of_stream ( ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
2021-03-11 02:06:03 +08:00
} catch ( Error & e ) {
2019-04-09 05:21:24 +08:00
if ( e . code ( ) = = error_code_transaction_too_old ) {
// We are using this transaction until it's too old and then resetting to a fresh one,
// so we don't need to delay.
tr . fullReset ( ) ;
2021-03-11 02:06:03 +08:00
} else {
2019-04-09 05:21:24 +08:00
wait ( tr . onError ( e ) ) ;
}
2017-05-26 04:48:44 +08:00
}
}
}
2021-03-11 02:06:03 +08:00
ACTOR Future < Void > readCommitted ( Database cx ,
PromiseStream < RCGroup > results ,
Future < Void > active ,
Reference < FlowLock > lock ,
KeyRangeRef range ,
std : : function < std : : pair < uint64_t , uint32_t > ( Key key ) > groupBy ,
2021-07-05 04:14:25 +08:00
Terminator terminator ,
AccessSystemKeys systemAccess ,
2021-07-03 12:41:50 +08:00
LockAware lockAware ) {
2017-05-26 04:48:44 +08:00
state KeySelector nextKey = firstGreaterOrEqual ( range . begin ) ;
state KeySelector end = firstGreaterOrEqual ( range . end ) ;
state RCGroup rcGroup = RCGroup ( ) ;
state uint64_t skipGroup ( ULLONG_MAX ) ;
2018-03-08 05:56:34 +08:00
state Transaction tr ( cx ) ;
2017-05-26 04:48:44 +08:00
state FlowLock : : Releaser releaser ;
2021-03-11 02:06:03 +08:00
loop {
2017-05-26 04:48:44 +08:00
try {
2020-07-05 03:03:47 +08:00
state GetRangeLimits limits ( GetRangeLimits : : ROW_LIMIT_UNLIMITED ,
( g_network - > isSimulated ( ) & & ! g_simulator . speedUpSimulation )
? CLIENT_KNOBS - > BACKUP_SIMULATED_LIMIT_BYTES
: CLIENT_KNOBS - > BACKUP_GET_RANGE_LIMIT_BYTES ) ;
2017-12-22 09:21:05 +08:00
2017-05-26 04:48:44 +08:00
if ( systemAccess )
2018-03-08 05:56:34 +08:00
tr . setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
2017-05-26 04:48:44 +08:00
if ( lockAware )
2018-03-08 05:56:34 +08:00
tr . setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
2017-05-26 04:48:44 +08:00
2021-05-04 04:14:16 +08:00
state RangeResult rangevalue = wait ( tr . getRange ( nextKey , end , limits ) ) ;
2017-05-26 04:48:44 +08:00
2021-03-06 03:28:15 +08:00
// When this buggify line is enabled, if there are more than 1 result then use half of the results.
// Copy the data instead of messing with the results directly to avoid TSS issues.
2021-03-11 02:06:03 +08:00
if ( rangevalue . size ( ) > 1 & & BUGGIFY ) {
2021-05-13 02:53:20 +08:00
RangeResult copy ;
2021-03-06 03:28:15 +08:00
// only copy first half of rangevalue into copy
for ( int i = 0 ; i < rangevalue . size ( ) / 2 ; i + + ) {
copy . push_back_deep ( copy . arena ( ) , rangevalue [ i ] ) ;
}
rangevalue = copy ;
2017-05-26 04:48:44 +08:00
rangevalue . more = true ;
// Half of the time wait for this tr to expire so that the next read is at a different version
2021-03-11 02:06:03 +08:00
if ( deterministicRandom ( ) - > random01 ( ) < 0.5 )
2018-08-11 04:57:10 +08:00
wait ( delay ( 6.0 ) ) ;
2017-05-26 04:48:44 +08:00
}
2021-03-11 02:06:03 +08:00
// add lock
2018-08-11 04:57:10 +08:00
wait ( active ) ;
2017-05-26 04:48:44 +08:00
releaser . release ( ) ;
2019-06-25 17:47:35 +08:00
wait ( lock - > take ( TaskPriority : : DefaultYield , rangevalue . expectedSize ( ) + rcGroup . items . expectedSize ( ) ) ) ;
2017-05-26 04:48:44 +08:00
releaser = FlowLock : : Releaser ( * lock , rangevalue . expectedSize ( ) + rcGroup . items . expectedSize ( ) ) ;
2021-03-11 02:06:03 +08:00
for ( auto & s : rangevalue ) {
2017-05-26 04:48:44 +08:00
uint64_t groupKey = groupBy ( s . key ) . first ;
2019-03-19 06:03:43 +08:00
//TraceEvent("Log_ReadCommitted").detail("GroupKey", groupKey).detail("SkipGroup", skipGroup).detail("NextKey", nextKey.key).detail("End", end.key).detail("Valuesize", value.size()).detail("Index",index++).detail("Size",s.value.size());
2021-03-11 02:06:03 +08:00
if ( groupKey ! = skipGroup ) {
if ( rcGroup . version = = - 1 ) {
2018-03-08 05:56:34 +08:00
rcGroup . version = tr . getReadVersion ( ) . get ( ) ;
2017-05-26 04:48:44 +08:00
rcGroup . groupKey = groupKey ;
2021-03-11 02:06:03 +08:00
} else if ( rcGroup . groupKey ! = groupKey ) {
2018-06-09 04:57:00 +08:00
//TraceEvent("Log_ReadCommitted").detail("SendGroup0", rcGroup.groupKey).detail("ItemSize", rcGroup.items.size()).detail("DataLength",rcGroup.items[0].value.size());
2021-03-11 02:06:03 +08:00
// state uint32_t len(0);
// for (size_t j = 0; j < rcGroup.items.size(); ++j) {
2017-05-26 04:48:44 +08:00
// len += rcGroup.items[j].value.size();
//}
2018-06-09 02:11:08 +08:00
//TraceEvent("SendGroup").detail("GroupKey", rcGroup.groupKey).detail("Version", rcGroup.version).detail("Length", len).detail("Releaser.remaining", releaser.remaining);
2021-03-11 02:06:03 +08:00
releaser . remaining - =
rcGroup . items
. expectedSize ( ) ; // its the responsibility of the caller to release after this point
2017-05-26 04:48:44 +08:00
ASSERT ( releaser . remaining > = 0 ) ;
results . send ( rcGroup ) ;
nextKey = firstGreaterThan ( rcGroup . items . end ( ) [ - 1 ] . key ) ;
skipGroup = rcGroup . groupKey ;
rcGroup = RCGroup ( ) ;
2018-03-08 05:56:34 +08:00
rcGroup . version = tr . getReadVersion ( ) . get ( ) ;
2017-05-26 04:48:44 +08:00
rcGroup . groupKey = groupKey ;
}
rcGroup . items . push_back_deep ( rcGroup . items . arena ( ) , s ) ;
}
}
if ( ! rangevalue . more ) {
2021-03-11 02:06:03 +08:00
if ( rcGroup . version ! = - 1 ) {
releaser . remaining - =
rcGroup . items
. expectedSize ( ) ; // its the responsibility of the caller to release after this point
2017-05-26 04:48:44 +08:00
ASSERT ( releaser . remaining > = 0 ) ;
2018-06-09 02:20:06 +08:00
//TraceEvent("Log_ReadCommitted").detail("SendGroup1", rcGroup.groupKey).detail("ItemSize", rcGroup.items.size()).detail("DataLength", rcGroup.items[0].value.size());
2017-05-26 04:48:44 +08:00
results . send ( rcGroup ) ;
}
2021-03-11 02:06:03 +08:00
if ( terminator )
2017-11-15 15:33:17 +08:00
results . sendError ( end_of_stream ( ) ) ;
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
nextKey = firstGreaterThan ( rangevalue . end ( ) [ - 1 ] . key ) ;
2021-03-11 02:06:03 +08:00
} catch ( Error & e ) {
2019-04-09 05:21:24 +08:00
if ( e . code ( ) = = error_code_transaction_too_old ) {
// We are using this transaction until it's too old and then resetting to a fresh one,
// so we don't need to delay.
tr . fullReset ( ) ;
2021-03-11 02:06:03 +08:00
} else {
2019-04-09 05:21:24 +08:00
wait ( tr . onError ( e ) ) ;
}
2017-05-26 04:48:44 +08:00
}
}
}
2021-03-11 02:06:03 +08:00
Future < Void > readCommitted ( Database cx ,
PromiseStream < RCGroup > results ,
Reference < FlowLock > lock ,
KeyRangeRef range ,
std : : function < std : : pair < uint64_t , uint32_t > ( Key key ) > groupBy ) {
2021-07-05 04:14:25 +08:00
return readCommitted (
2021-07-17 15:11:40 +08:00
cx , results , Void ( ) , lock , range , groupBy , Terminator : : True , AccessSystemKeys : : True , LockAware : : True ) ;
2017-05-26 04:48:44 +08:00
}
2021-03-11 02:06:03 +08:00
ACTOR Future < int > dumpData ( Database cx ,
PromiseStream < RCGroup > results ,
Reference < FlowLock > lock ,
Key uid ,
Key addPrefix ,
Key removePrefix ,
RequestStream < CommitTransactionRequest > commit ,
NotifiedVersion * committedVersion ,
Optional < Version > endVersion ,
Key rangeBegin ,
PromiseStream < Future < Void > > addActor ,
FlowLock * commitLock ,
Reference < KeyRangeMap < Version > > keyVersion ) {
2017-05-26 04:48:44 +08:00
state Version lastVersion = invalidVersion ;
state bool endOfStream = false ;
state int totalBytes = 0 ;
loop {
state CommitTransactionRequest req ;
state Version newBeginVersion = invalidVersion ;
state int mutationSize = 0 ;
loop {
try {
RCGroup group = waitNext ( results . getFuture ( ) ) ;
lock - > release ( group . items . expectedSize ( ) ) ;
BinaryWriter bw ( Unversioned ( ) ) ;
2021-03-11 02:06:03 +08:00
for ( int i = 0 ; i < group . items . size ( ) ; + + i ) {
2017-05-26 04:48:44 +08:00
bw . serializeBytes ( group . items [ i ] . value ) ;
}
2021-03-11 02:06:03 +08:00
decodeBackupLogValue ( req . arena ,
req . transaction . mutations ,
mutationSize ,
bw . toValue ( ) ,
addPrefix ,
removePrefix ,
group . groupKey ,
keyVersion ) ;
2017-05-26 04:48:44 +08:00
newBeginVersion = group . groupKey + 1 ;
2021-03-11 02:06:03 +08:00
if ( mutationSize > = CLIENT_KNOBS - > BACKUP_LOG_WRITE_BATCH_MAX_SIZE ) {
2017-05-26 04:48:44 +08:00
break ;
}
2021-03-11 02:06:03 +08:00
} catch ( Error & e ) {
2017-05-26 04:48:44 +08:00
if ( e . code ( ) = = error_code_end_of_stream ) {
2021-03-11 02:06:03 +08:00
if ( endVersion . present ( ) & & endVersion . get ( ) > lastVersion & & endVersion . get ( ) > newBeginVersion ) {
2017-05-26 04:48:44 +08:00
newBeginVersion = endVersion . get ( ) ;
}
2021-03-11 02:06:03 +08:00
if ( newBeginVersion = = invalidVersion )
2017-05-26 04:48:44 +08:00
return totalBytes ;
endOfStream = true ;
break ;
}
throw ;
}
}
Key applyBegin = uid . withPrefix ( applyMutationsBeginRange . begin ) ;
Key versionKey = BinaryWriter : : toValue ( newBeginVersion , Unversioned ( ) ) ;
Key rangeEnd = getApplyKey ( newBeginVersion , uid ) ;
2017-09-21 06:50:20 +08:00
2017-05-26 04:48:44 +08:00
req . transaction . mutations . push_back_deep ( req . arena , MutationRef ( MutationRef : : SetValue , applyBegin , versionKey ) ) ;
2017-09-21 06:50:20 +08:00
req . transaction . write_conflict_ranges . push_back_deep ( req . arena , singleKeyRange ( applyBegin ) ) ;
2017-05-26 04:48:44 +08:00
req . transaction . mutations . push_back_deep ( req . arena , MutationRef ( MutationRef : : ClearRange , rangeBegin , rangeEnd ) ) ;
2017-09-21 06:50:20 +08:00
req . transaction . write_conflict_ranges . push_back_deep ( req . arena , singleKeyRange ( rangeBegin ) ) ;
2017-05-26 04:48:44 +08:00
2017-12-21 08:54:57 +08:00
// The commit request contains no read conflict ranges, so regardless of what read version we
// choose, it's impossible for us to get a transaction_too_old error back, and it's impossible
// for our transaction to be aborted due to conflicts.
2017-05-26 04:48:44 +08:00
req . transaction . read_snapshot = committedVersion - > get ( ) ;
2018-02-10 10:21:29 +08:00
req . flags = req . flags | CommitTransactionRequest : : FLAG_IS_LOCK_AWARE ;
2017-05-26 04:48:44 +08:00
totalBytes + = mutationSize ;
2021-03-11 02:06:03 +08:00
wait ( commitLock - > take ( TaskPriority : : DefaultYield , mutationSize ) ) ;
addActor . send ( commitLock - > releaseWhen ( success ( commit . getReply ( req ) ) , mutationSize ) ) ;
2017-05-26 04:48:44 +08:00
2021-03-11 02:06:03 +08:00
if ( endOfStream ) {
2017-05-26 04:48:44 +08:00
return totalBytes ;
}
}
}
2021-03-11 02:06:03 +08:00
ACTOR Future < Void > coalesceKeyVersionCache ( Key uid ,
Version endVersion ,
Reference < KeyRangeMap < Version > > keyVersion ,
RequestStream < CommitTransactionRequest > commit ,
NotifiedVersion * committedVersion ,
PromiseStream < Future < Void > > addActor ,
FlowLock * commitLock ) {
2017-05-26 04:48:44 +08:00
Version lastVersion = - 1000 ;
int64_t removed = 0 ;
state CommitTransactionRequest req ;
state int64_t mutationSize = 0 ;
Key mapPrefix = uid . withPrefix ( applyMutationsKeyVersionMapRange . begin ) ;
2021-03-11 02:06:03 +08:00
for ( auto it : keyVersion - > ranges ( ) ) {
if ( lastVersion = = - 1000 ) {
2017-05-26 04:48:44 +08:00
lastVersion = it . value ( ) ;
} else {
Version ver = it . value ( ) ;
2021-03-11 02:06:03 +08:00
if ( ver < endVersion & & lastVersion < endVersion & & ver ! = invalidVersion & &
lastVersion ! = invalidVersion ) {
2017-05-26 04:48:44 +08:00
Key removeKey = it . range ( ) . begin . withPrefix ( mapPrefix ) ;
Key removeEnd = keyAfter ( removeKey ) ;
2021-03-11 02:06:03 +08:00
req . transaction . mutations . push_back_deep ( req . arena ,
MutationRef ( MutationRef : : ClearRange , removeKey , removeEnd ) ) ;
2017-05-26 04:48:44 +08:00
mutationSize + = removeKey . size ( ) + removeEnd . size ( ) ;
removed - - ;
} else {
lastVersion = ver ;
}
}
}
2021-03-11 02:06:03 +08:00
if ( removed ! = 0 ) {
2017-05-26 04:48:44 +08:00
Key countKey = uid . withPrefix ( applyMutationsKeyVersionCountRange . begin ) ;
req . transaction . write_conflict_ranges . push_back_deep ( req . arena , singleKeyRange ( countKey ) ) ;
2021-03-11 02:06:03 +08:00
req . transaction . mutations . push_back_deep (
req . arena , MutationRef ( MutationRef : : AddValue , countKey , StringRef ( ( uint8_t * ) & removed , 8 ) ) ) ;
2017-05-26 04:48:44 +08:00
req . transaction . read_snapshot = committedVersion - > get ( ) ;
2018-02-10 10:21:29 +08:00
req . flags = req . flags | CommitTransactionRequest : : FLAG_IS_LOCK_AWARE ;
2017-05-26 04:48:44 +08:00
2021-03-11 02:06:03 +08:00
wait ( commitLock - > take ( TaskPriority : : DefaultYield , mutationSize ) ) ;
addActor . send ( commitLock - > releaseWhen ( success ( commit . getReply ( req ) ) , mutationSize ) ) ;
2017-05-26 04:48:44 +08:00
}
return Void ( ) ;
}
2021-03-11 02:06:03 +08:00
ACTOR Future < Void > applyMutations ( Database cx ,
Key uid ,
Key addPrefix ,
Key removePrefix ,
Version beginVersion ,
Version * endVersion ,
RequestStream < CommitTransactionRequest > commit ,
NotifiedVersion * committedVersion ,
Reference < KeyRangeMap < Version > > keyVersion ) {
2017-05-26 04:48:44 +08:00
state FlowLock commitLock ( CLIENT_KNOBS - > BACKUP_LOCK_BYTES ) ;
state PromiseStream < Future < Void > > addActor ;
2021-03-11 02:06:03 +08:00
state Future < Void > error = actorCollection ( addActor . getFuture ( ) ) ;
2017-05-26 04:48:44 +08:00
state int maxBytes = CLIENT_KNOBS - > APPLY_MIN_LOCK_BYTES ;
2019-03-01 09:45:00 +08:00
keyVersion - > insert ( metadataVersionKey , 0 ) ;
2017-05-26 04:48:44 +08:00
try {
loop {
2021-03-11 02:06:03 +08:00
if ( beginVersion > = * endVersion ) {
wait ( commitLock . take ( TaskPriority : : DefaultYield , CLIENT_KNOBS - > BACKUP_LOCK_BYTES ) ) ;
2017-05-26 04:48:44 +08:00
commitLock . release ( CLIENT_KNOBS - > BACKUP_LOCK_BYTES ) ;
2021-03-11 02:06:03 +08:00
if ( beginVersion > = * endVersion ) {
2017-05-26 04:48:44 +08:00
return Void ( ) ;
}
}
2021-03-11 02:06:03 +08:00
2017-05-26 04:48:44 +08:00
int rangeCount = std : : max ( 1 , CLIENT_KNOBS - > APPLY_MAX_LOCK_BYTES / maxBytes ) ;
2021-03-11 02:06:03 +08:00
state Version newEndVersion = std : : min ( * endVersion ,
( ( beginVersion / CLIENT_KNOBS - > APPLY_BLOCK_SIZE ) + rangeCount ) *
CLIENT_KNOBS - > APPLY_BLOCK_SIZE ) ;
2017-05-26 04:48:44 +08:00
state Standalone < VectorRef < KeyRangeRef > > ranges = getApplyRanges ( beginVersion , newEndVersion , uid ) ;
state size_t idx ;
state std : : vector < PromiseStream < RCGroup > > results ;
state std : : vector < Future < Void > > rc ;
state std : : vector < Reference < FlowLock > > locks ;
for ( int i = 0 ; i < ranges . size ( ) ; + + i ) {
results . push_back ( PromiseStream < RCGroup > ( ) ) ;
2020-11-07 15:50:55 +08:00
locks . push_back ( makeReference < FlowLock > (
std : : max ( CLIENT_KNOBS - > APPLY_MAX_LOCK_BYTES / ranges . size ( ) , CLIENT_KNOBS - > APPLY_MIN_LOCK_BYTES ) ) ) ;
2017-05-26 04:48:44 +08:00
rc . push_back ( readCommitted ( cx , results [ i ] , locks [ i ] , ranges [ i ] , decodeBKMutationLogKey ) ) ;
}
2021-03-11 02:06:03 +08:00
maxBytes = std : : max < int > ( maxBytes * CLIENT_KNOBS - > APPLY_MAX_DECAY_RATE , CLIENT_KNOBS - > APPLY_MIN_LOCK_BYTES ) ;
2017-05-26 04:48:44 +08:00
for ( idx = 0 ; idx < ranges . size ( ) ; + + idx ) {
2021-03-11 02:06:03 +08:00
int bytes = wait ( dumpData ( cx ,
results [ idx ] ,
locks [ idx ] ,
uid ,
addPrefix ,
removePrefix ,
commit ,
committedVersion ,
idx = = ranges . size ( ) - 1 ? newEndVersion : Optional < Version > ( ) ,
ranges [ idx ] . begin ,
addActor ,
& commitLock ,
keyVersion ) ) ;
maxBytes = std : : max < int > ( CLIENT_KNOBS - > APPLY_MAX_INCREASE_FACTOR * bytes , maxBytes ) ;
if ( error . isError ( ) )
throw error . getError ( ) ;
2017-05-26 04:48:44 +08:00
}
2021-03-11 02:06:03 +08:00
wait ( coalesceKeyVersionCache (
uid , newEndVersion , keyVersion , commit , committedVersion , addActor , & commitLock ) ) ;
2017-05-26 04:48:44 +08:00
beginVersion = newEndVersion ;
2021-04-02 08:35:43 +08:00
if ( BUGGIFY ) {
wait ( delay ( 2.0 ) ) ;
}
2017-05-26 04:48:44 +08:00
}
2021-03-11 02:06:03 +08:00
} catch ( Error & e ) {
TraceEvent ( e . code ( ) = = error_code_restore_missing_data ? SevWarnAlways : SevError , " ApplyMutationsError " )
. error ( e ) ;
throw ;
2017-05-26 04:48:44 +08:00
}
2017-09-21 06:50:20 +08:00
}
2018-02-21 05:22:31 +08:00
2021-03-11 02:06:03 +08:00
ACTOR static Future < Void > _eraseLogData ( Reference < ReadYourWritesTransaction > tr ,
Key logUidValue ,
Key destUidValue ,
Optional < Version > endVersion ,
2021-07-05 09:30:55 +08:00
CheckBackupUID checkBackupUid ,
2021-03-11 02:06:03 +08:00
Version backupUid ) {
2018-02-21 05:22:31 +08:00
state Key backupLatestVersionsPath = destUidValue . withPrefix ( backupLatestVersionsPrefix ) ;
state Key backupLatestVersionsKey = logUidValue . withPrefix ( backupLatestVersionsPath ) ;
2018-03-14 02:21:24 +08:00
2018-06-07 04:05:53 +08:00
if ( ! destUidValue . size ( ) ) {
2018-02-21 05:22:31 +08:00
return Void ( ) ;
}
2019-09-28 09:32:27 +08:00
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
2018-03-14 02:21:24 +08:00
2019-09-28 09:32:27 +08:00
if ( checkBackupUid ) {
2021-03-11 02:06:03 +08:00
Subspace sourceStates =
Subspace ( databaseBackupPrefixRange . begin ) . get ( BackupAgentBase : : keySourceStates ) . get ( logUidValue ) ;
Optional < Value > v = wait ( tr - > get ( sourceStates . pack ( DatabaseBackupAgent : : keyFolderId ) ) ) ;
if ( v . present ( ) & & BinaryReader : : fromStringRef < Version > ( v . get ( ) , Unversioned ( ) ) > backupUid )
2019-09-28 09:32:27 +08:00
return Void ( ) ;
}
2018-03-14 02:21:24 +08:00
2021-05-04 04:14:16 +08:00
state RangeResult backupVersions = wait (
2021-03-11 02:06:03 +08:00
tr - > getRange ( KeyRangeRef ( backupLatestVersionsPath , strinc ( backupLatestVersionsPath ) ) , CLIENT_KNOBS - > TOO_MANY ) ) ;
2018-03-14 02:21:24 +08:00
2019-09-28 09:32:27 +08:00
// Make sure version history key does exist and lower the beginVersion if needed
state Version currBeginVersion = invalidVersion ;
for ( auto backupVersion : backupVersions ) {
Key currLogUidValue = backupVersion . key . removePrefix ( backupLatestVersionsPrefix ) . removePrefix ( destUidValue ) ;
2018-02-21 05:22:31 +08:00
2019-09-28 09:32:27 +08:00
if ( currLogUidValue = = logUidValue ) {
currBeginVersion = BinaryReader : : fromStringRef < Version > ( backupVersion . value , Unversioned ( ) ) ;
break ;
}
}
2018-02-21 05:22:31 +08:00
2019-09-28 09:32:27 +08:00
// Do not clear anything if version history key cannot be found
if ( currBeginVersion = = invalidVersion ) {
return Void ( ) ;
}
state Version currEndVersion = std : : numeric_limits < Version > : : max ( ) ;
2021-03-11 02:06:03 +08:00
if ( endVersion . present ( ) ) {
2019-09-28 09:32:27 +08:00
currEndVersion = std : : min ( currEndVersion , endVersion . get ( ) ) ;
}
state Version nextSmallestVersion = currEndVersion ;
bool clearLogRangesRequired = true ;
// More than one backup/DR with the same range
if ( backupVersions . size ( ) > 1 ) {
for ( auto backupVersion : backupVersions ) {
Key currLogUidValue = backupVersion . key . removePrefix ( backupLatestVersionsPrefix ) . removePrefix ( destUidValue ) ;
Version currVersion = BinaryReader : : fromStringRef < Version > ( backupVersion . value , Unversioned ( ) ) ;
if ( currLogUidValue = = logUidValue ) {
continue ;
} else if ( currVersion > currBeginVersion ) {
nextSmallestVersion = std : : min ( currVersion , nextSmallestVersion ) ;
} else {
// If we can find a version less than or equal to beginVersion, clearing log ranges is not required
clearLogRangesRequired = false ;
break ;
2018-06-07 04:05:53 +08:00
}
2019-09-28 09:32:27 +08:00
}
}
2018-03-14 02:21:24 +08:00
2019-09-28 09:32:27 +08:00
if ( endVersion . present ( ) | | backupVersions . size ( ) ! = 1 | | BUGGIFY ) {
if ( ! endVersion . present ( ) ) {
// Clear current backup version history
tr - > clear ( backupLatestVersionsKey ) ;
2021-03-11 02:06:03 +08:00
if ( backupVersions . size ( ) = = 1 ) {
2019-09-28 09:32:27 +08:00
tr - > clear ( prefixRange ( destUidValue . withPrefix ( logRangesRange . begin ) ) ) ;
2018-06-07 04:05:53 +08:00
}
2019-09-28 09:32:27 +08:00
} else {
// Update current backup latest version
tr - > set ( backupLatestVersionsKey , BinaryWriter : : toValue < Version > ( currEndVersion , Unversioned ( ) ) ) ;
}
2018-03-14 02:21:24 +08:00
2019-09-28 09:32:27 +08:00
// Clear log ranges if needed
if ( clearLogRangesRequired ) {
2021-03-11 02:06:03 +08:00
if ( ( nextSmallestVersion - currBeginVersion ) / CLIENT_KNOBS - > LOG_RANGE_BLOCK_SIZE > =
std : : numeric_limits < uint8_t > : : max ( ) | |
BUGGIFY ) {
2019-09-28 09:32:27 +08:00
Key baLogRangePrefix = destUidValue . withPrefix ( backupLogKeys . begin ) ;
2021-03-11 02:06:03 +08:00
for ( int h = 0 ; h < = std : : numeric_limits < uint8_t > : : max ( ) ; h + + ) {
2019-09-28 09:32:27 +08:00
uint64_t bv = bigEndian64 ( Version ( 0 ) ) ;
uint64_t ev = bigEndian64 ( nextSmallestVersion ) ;
uint8_t h1 = h ;
Key vblockPrefix = StringRef ( & h1 , sizeof ( uint8_t ) ) . withPrefix ( baLogRangePrefix ) ;
tr - > clear ( KeyRangeRef ( StringRef ( ( uint8_t * ) & bv , sizeof ( uint64_t ) ) . withPrefix ( vblockPrefix ) ,
2021-03-11 02:06:03 +08:00
StringRef ( ( uint8_t * ) & ev , sizeof ( uint64_t ) ) . withPrefix ( vblockPrefix ) ) ) ;
2019-09-28 09:32:27 +08:00
}
} else {
2021-03-11 02:06:03 +08:00
Standalone < VectorRef < KeyRangeRef > > ranges =
getLogRanges ( currBeginVersion , nextSmallestVersion , destUidValue ) ;
2019-09-28 09:32:27 +08:00
for ( auto & range : ranges ) {
tr - > clear ( range ) ;
2018-06-07 04:05:53 +08:00
}
}
2019-09-28 09:32:27 +08:00
}
} else {
// Clear version history
tr - > clear ( prefixRange ( backupLatestVersionsPath ) ) ;
// Clear everything under blog/[destUid]
tr - > clear ( prefixRange ( destUidValue . withPrefix ( backupLogKeys . begin ) ) ) ;
2018-03-14 02:21:24 +08:00
2019-09-28 09:32:27 +08:00
// Disable committing mutations into blog
tr - > clear ( prefixRange ( destUidValue . withPrefix ( logRangesRange . begin ) ) ) ;
}
2021-03-11 02:06:03 +08:00
if ( ! endVersion . present ( ) & & backupVersions . size ( ) = = 1 ) {
2021-05-04 04:14:16 +08:00
RangeResult existingDestUidValues =
2021-03-11 02:06:03 +08:00
wait ( tr - > getRange ( KeyRangeRef ( destUidLookupPrefix , strinc ( destUidLookupPrefix ) ) , CLIENT_KNOBS - > TOO_MANY ) ) ;
for ( auto it : existingDestUidValues ) {
if ( it . value = = destUidValue ) {
2019-10-17 11:18:39 +08:00
tr - > clear ( it . key ) ;
}
}
}
2019-09-28 09:32:27 +08:00
return Void ( ) ;
}
2018-03-17 06:40:59 +08:00
2021-03-11 02:06:03 +08:00
Future < Void > eraseLogData ( Reference < ReadYourWritesTransaction > tr ,
Key logUidValue ,
Key destUidValue ,
Optional < Version > endVersion ,
2021-07-05 09:30:55 +08:00
CheckBackupUID checkBackupUid ,
2021-03-11 02:06:03 +08:00
Version backupUid ) {
2019-09-28 09:32:27 +08:00
return _eraseLogData ( tr , logUidValue , destUidValue , endVersion , checkBackupUid , backupUid ) ;
}
2018-03-14 02:21:24 +08:00
2019-09-28 09:32:27 +08:00
ACTOR Future < Void > cleanupLogMutations ( Database cx , Value destUidValue , bool deleteData ) {
state Key backupLatestVersionsPath = destUidValue . withPrefix ( backupLatestVersionsPrefix ) ;
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
2019-10-01 03:44:20 +08:00
state Optional < Key > removingLogUid ;
state std : : set < Key > loggedLogUids ;
2019-09-28 09:32:27 +08:00
loop {
try {
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
2021-05-04 04:14:16 +08:00
state RangeResult backupVersions = wait ( tr - > getRange (
2021-03-11 02:06:03 +08:00
KeyRangeRef ( backupLatestVersionsPath , strinc ( backupLatestVersionsPath ) ) , CLIENT_KNOBS - > TOO_MANY ) ) ;
2019-09-28 09:32:27 +08:00
state Version readVer = tr - > getReadVersion ( ) . get ( ) ;
state Version minVersion = std : : numeric_limits < Version > : : max ( ) ;
state Key minVersionLogUid ;
2021-03-11 02:06:03 +08:00
2019-09-28 09:32:27 +08:00
state int backupIdx = 0 ;
for ( ; backupIdx < backupVersions . size ( ) ; backupIdx + + ) {
2021-03-11 02:06:03 +08:00
state Version currVersion =
BinaryReader : : fromStringRef < Version > ( backupVersions [ backupIdx ] . value , Unversioned ( ) ) ;
state Key currLogUid =
backupVersions [ backupIdx ] . key . removePrefix ( backupLatestVersionsPrefix ) . removePrefix ( destUidValue ) ;
if ( currVersion < minVersion ) {
2019-09-28 09:32:27 +08:00
minVersionLogUid = currLogUid ;
minVersion = currVersion ;
2018-03-14 02:21:24 +08:00
}
2021-03-11 02:06:03 +08:00
if ( ! loggedLogUids . count ( currLogUid ) ) {
state Future < Optional < Value > > foundDRKey = tr - > get ( Subspace ( databaseBackupPrefixRange . begin )
. get ( BackupAgentBase : : keySourceStates )
. get ( currLogUid )
. pack ( DatabaseBackupAgent : : keyStateStatus ) ) ;
state Future < Optional < Value > > foundBackupKey =
tr - > get ( Subspace ( currLogUid . withPrefix ( LiteralStringRef ( " uid->config/ " ) )
. withPrefix ( fileBackupPrefixRange . begin ) )
. pack ( LiteralStringRef ( " stateEnum " ) ) ) ;
2019-10-01 03:44:20 +08:00
wait ( success ( foundDRKey ) & & success ( foundBackupKey ) ) ;
2021-03-11 02:06:03 +08:00
if ( foundDRKey . get ( ) . present ( ) & & foundBackupKey . get ( ) . present ( ) ) {
printf ( " WARNING: Found a tag that looks like both a backup and a DR. This tag is %.4f hours "
" behind. \n " ,
( readVer - currVersion ) / ( 3600.0 * CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ) ;
} else if ( foundDRKey . get ( ) . present ( ) & & ! foundBackupKey . get ( ) . present ( ) ) {
printf ( " Found a DR that is %.4f hours behind. \n " ,
( readVer - currVersion ) / ( 3600.0 * CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ) ;
} else if ( ! foundDRKey . get ( ) . present ( ) & & foundBackupKey . get ( ) . present ( ) ) {
printf ( " Found a Backup that is %.4f hours behind. \n " ,
( readVer - currVersion ) / ( 3600.0 * CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ) ;
2019-10-01 03:44:20 +08:00
} else {
2021-03-11 02:06:03 +08:00
printf ( " WARNING: Found an unknown tag that is %.4f hours behind. \n " ,
( readVer - currVersion ) / ( 3600.0 * CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ) ;
2019-10-01 03:44:20 +08:00
}
loggedLogUids . insert ( currLogUid ) ;
2018-03-17 06:40:59 +08:00
}
2018-06-07 04:05:53 +08:00
}
2018-03-17 06:40:59 +08:00
2021-03-11 02:06:03 +08:00
if ( deleteData ) {
if ( readVer - minVersion > CLIENT_KNOBS - > MIN_CLEANUP_SECONDS * CLIENT_KNOBS - > CORE_VERSIONSPERSECOND & &
( ! removingLogUid . present ( ) | | minVersionLogUid = = removingLogUid . get ( ) ) ) {
2019-11-01 00:52:21 +08:00
removingLogUid = minVersionLogUid ;
wait ( eraseLogData ( tr , minVersionLogUid , destUidValue ) ) ;
wait ( tr - > commit ( ) ) ;
2021-03-11 02:06:03 +08:00
printf ( " \n Successfully removed the tag that was %.4f hours behind. \n \n " ,
( readVer - minVersion ) / ( 3600.0 * CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ) ;
} else if ( removingLogUid . present ( ) & & minVersionLogUid ! = removingLogUid . get ( ) ) {
printf ( " \n WARNING: The oldest tag was possibly removed, run again without `--delete_data' to "
" check. \n \n " ) ;
2019-11-01 00:52:21 +08:00
} else {
2021-03-11 02:06:03 +08:00
printf ( " \n WARNING: Did not delete data because the tag is not at least %.4f hours behind. Change "
" `--min_cleanup_seconds' to adjust this threshold. \n \n " ,
CLIENT_KNOBS - > MIN_CLEANUP_SECONDS / 3600.0 ) ;
2019-11-01 00:52:21 +08:00
}
2021-03-11 02:06:03 +08:00
} else if ( readVer - minVersion >
CLIENT_KNOBS - > MIN_CLEANUP_SECONDS * CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) {
printf ( " \n Passing `--delete_data' would delete the tag that is %.4f hours behind. \n \n " ,
( readVer - minVersion ) / ( 3600.0 * CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ) ;
2019-09-28 09:32:27 +08:00
} else {
2021-03-11 02:06:03 +08:00
printf ( " \n Passing `--delete_data' would not delete the tag that is %.4f hours behind. Change "
" `--min_cleanup_seconds' to adjust the cleanup threshold. \n \n " ,
( readVer - minVersion ) / ( 3600.0 * CLIENT_KNOBS - > CORE_VERSIONSPERSECOND ) ) ;
2018-06-07 04:05:53 +08:00
}
2019-09-28 09:32:27 +08:00
return Void ( ) ;
2021-03-11 02:06:03 +08:00
} catch ( Error & e ) {
2018-08-11 04:57:10 +08:00
wait ( tr - > onError ( e ) ) ;
2018-03-14 02:21:24 +08:00
}
}
}
2021-07-05 09:30:55 +08:00
ACTOR Future < Void > cleanupBackup ( Database cx , DeleteData deleteData ) {
2019-09-28 09:32:27 +08:00
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
loop {
try {
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
2021-05-04 04:14:16 +08:00
state RangeResult destUids = wait (
2021-03-11 02:06:03 +08:00
tr - > getRange ( KeyRangeRef ( destUidLookupPrefix , strinc ( destUidLookupPrefix ) ) , CLIENT_KNOBS - > TOO_MANY ) ) ;
2019-09-28 09:32:27 +08:00
2021-03-11 02:06:03 +08:00
for ( auto destUid : destUids ) {
2019-09-28 09:32:27 +08:00
wait ( cleanupLogMutations ( cx , destUid . value , deleteData ) ) ;
}
return Void ( ) ;
2021-03-11 02:06:03 +08:00
} catch ( Error & e ) {
2019-09-28 09:32:27 +08:00
wait ( tr - > onError ( e ) ) ;
}
}
2018-06-07 04:05:53 +08:00
}
2021-07-05 10:07:52 +08:00
// Convert the status text to an enumerated value
BackupAgentBase : : EnumState BackupAgentBase : : getState ( std : : string const & stateText ) {
auto enState = EnumState : : STATE_ERRORED ;
if ( stateText . empty ( ) ) {
enState = EnumState : : STATE_NEVERRAN ;
}
else if ( ! stateText . compare ( " has been submitted " ) ) {
enState = EnumState : : STATE_SUBMITTED ;
}
else if ( ! stateText . compare ( " has been started " ) ) {
enState = EnumState : : STATE_RUNNING ;
}
else if ( ! stateText . compare ( " is differential " ) ) {
enState = EnumState : : STATE_RUNNING_DIFFERENTIAL ;
}
else if ( ! stateText . compare ( " has been completed " ) ) {
enState = EnumState : : STATE_COMPLETED ;
}
else if ( ! stateText . compare ( " has been aborted " ) ) {
enState = EnumState : : STATE_ABORTED ;
}
else if ( ! stateText . compare ( " has been partially aborted " ) ) {
enState = EnumState : : STATE_PARTIALLY_ABORTED ;
}
return enState ;
}
const char * BackupAgentBase : : getStateText ( EnumState enState ) {
const char * stateText ;
switch ( enState ) {
case EnumState : : STATE_ERRORED :
stateText = " has errored " ;
break ;
case EnumState : : STATE_NEVERRAN :
stateText = " has never been started " ;
break ;
case EnumState : : STATE_SUBMITTED :
stateText = " has been submitted " ;
break ;
case EnumState : : STATE_RUNNING :
stateText = " has been started " ;
break ;
case EnumState : : STATE_RUNNING_DIFFERENTIAL :
stateText = " is differential " ;
break ;
case EnumState : : STATE_COMPLETED :
stateText = " has been completed " ;
break ;
case EnumState : : STATE_ABORTED :
stateText = " has been aborted " ;
break ;
case EnumState : : STATE_PARTIALLY_ABORTED :
stateText = " has been partially aborted " ;
break ;
default :
stateText = " <undefined> " ;
break ;
}
return stateText ;
}
const char * BackupAgentBase : : getStateName ( EnumState enState ) {
switch ( enState ) {
case EnumState : : STATE_ERRORED :
return " Errored " ;
case EnumState : : STATE_NEVERRAN :
return " NeverRan " ;
case EnumState : : STATE_SUBMITTED :
return " Submitted " ;
break ;
case EnumState : : STATE_RUNNING :
return " Running " ;
case EnumState : : STATE_RUNNING_DIFFERENTIAL :
return " RunningDifferentially " ;
case EnumState : : STATE_COMPLETED :
return " Completed " ;
case EnumState : : STATE_ABORTED :
return " Aborted " ;
case EnumState : : STATE_PARTIALLY_ABORTED :
return " Aborting " ;
default :
return " <undefined> " ;
}
}
bool BackupAgentBase : : isRunnable ( EnumState enState ) {
switch ( enState ) {
case EnumState : : STATE_SUBMITTED :
case EnumState : : STATE_RUNNING :
case EnumState : : STATE_RUNNING_DIFFERENTIAL :
case EnumState : : STATE_PARTIALLY_ABORTED :
return true ;
default :
return false ;
}
}
Standalone < StringRef > BackupAgentBase : : getCurrentTime ( ) {
double t = now ( ) ;
time_t curTime = t ;
char buffer [ 128 ] ;
struct tm * timeinfo ;
timeinfo = localtime ( & curTime ) ;
strftime ( buffer , 128 , " %Y-%m-%d-%H-%M-%S " , timeinfo ) ;
std : : string time ( buffer ) ;
return StringRef ( time + format ( " .%06d " , ( int ) ( 1e6 * ( t - curTime ) ) ) ) ;
}
std : : string const BackupAgentBase : : defaultTagName = " default " ;