2019-05-07 07:56:49 +08:00
/*
* RestoreApplier . actor . cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013 - 2018 Apple Inc . and the FoundationDB project authors
*
* Licensed under the Apache License , Version 2.0 ( the " License " ) ;
* you may not use this file except in compliance with the License .
* You may obtain a copy of the License at
*
* http : //www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an " AS IS " BASIS ,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* See the License for the specific language governing permissions and
* limitations under the License .
*/
2019-05-10 11:55:44 +08:00
2019-05-13 12:53:09 +08:00
// This file defines the functions used by the RestoreApplier role.
// RestoreApplier role starts at restoreApplierCore actor
2019-05-10 11:55:44 +08:00
# include "fdbclient/NativeAPI.actor.h"
# include "fdbclient/SystemData.h"
# include "fdbclient/BackupAgent.actor.h"
# include "fdbclient/ManagementAPI.actor.h"
# include "fdbclient/MutationList.h"
# include "fdbclient/BackupContainer.h"
# include "fdbserver/RestoreCommon.actor.h"
# include "fdbserver/RestoreUtil.h"
# include "fdbserver/RestoreRoleCommon.actor.h"
# include "fdbserver/RestoreApplier.actor.h"
# include "flow/actorcompiler.h" // This must be the last #include.
ACTOR Future < Void > handleGetApplierKeyRangeRequest ( RestoreGetApplierKeyRangeRequest req , Reference < RestoreApplierData > self ) ;
ACTOR Future < Void > handleSetApplierKeyRangeRequest ( RestoreSetApplierKeyRangeRequest req , Reference < RestoreApplierData > self ) ;
ACTOR Future < Void > handleCalculateApplierKeyRangeRequest ( RestoreCalculateApplierKeyRangeRequest req , Reference < RestoreApplierData > self ) ;
ACTOR Future < Void > handleSendSampleMutationVectorRequest ( RestoreSendMutationVectorRequest req , Reference < RestoreApplierData > self ) ;
ACTOR Future < Void > handleSendMutationVectorRequest ( RestoreSendMutationVectorRequest req , Reference < RestoreApplierData > self ) ;
2019-05-23 04:30:33 +08:00
ACTOR Future < Void > handleSendMutationVectorVersionedRequest ( RestoreSendMutationVectorVersionedRequest req , Reference < RestoreApplierData > self ) ;
2019-05-10 11:55:44 +08:00
ACTOR Future < Void > handleApplyToDBRequest ( RestoreSimpleRequest req , Reference < RestoreApplierData > self , Database cx ) ;
ACTOR Future < Void > restoreApplierCore ( Reference < RestoreApplierData > self , RestoreApplierInterface applierInterf , Database cx ) {
state ActorCollection actors ( false ) ;
2019-05-23 04:30:33 +08:00
state Future < Void > exitRole = Never ( ) ;
2019-05-10 11:55:44 +08:00
state double lastLoopTopTime ;
loop {
double loopTopTime = now ( ) ;
double elapsedTime = loopTopTime - lastLoopTopTime ;
if ( elapsedTime > 0.050 ) {
if ( g_random - > random01 ( ) < 0.01 )
TraceEvent ( SevWarn , " SlowRestoreLoaderLoopx100 " ) . detail ( " NodeDesc " , self - > describeNode ( ) ) . detail ( " Elapsed " , elapsedTime ) ;
}
lastLoopTopTime = loopTopTime ;
state std : : string requestTypeStr = " [Init] " ;
try {
choose {
when ( RestoreSimpleRequest req = waitNext ( applierInterf . heartbeat . getFuture ( ) ) ) {
requestTypeStr = " heartbeat " ;
2019-05-23 04:30:33 +08:00
actors . add ( handleHeartbeat ( req , applierInterf . id ( ) ) ) ;
2019-05-10 11:55:44 +08:00
}
when ( RestoreSetApplierKeyRangeRequest req = waitNext ( applierInterf . setApplierKeyRangeRequest . getFuture ( ) ) ) {
requestTypeStr = " setApplierKeyRangeRequest " ;
2019-05-23 04:30:33 +08:00
actors . add ( handleSetApplierKeyRangeRequest ( req , self ) ) ;
2019-05-10 11:55:44 +08:00
}
2019-05-23 04:30:33 +08:00
when ( RestoreSendMutationVectorVersionedRequest req = waitNext ( applierInterf . sendMutationVector . getFuture ( ) ) ) {
2019-05-10 11:55:44 +08:00
requestTypeStr = " sendMutationVector " ;
2019-05-23 04:30:33 +08:00
//actors.add( handleSendMutationVectorRequest(req, self) );
actors . add ( handleSendMutationVectorVersionedRequest ( req , self ) ) ;
2019-05-10 11:55:44 +08:00
}
when ( RestoreSimpleRequest req = waitNext ( applierInterf . applyToDB . getFuture ( ) ) ) {
requestTypeStr = " applyToDB " ;
actors . add ( handleApplyToDBRequest ( req , self , cx ) ) ;
}
when ( RestoreVersionBatchRequest req = waitNext ( applierInterf . initVersionBatch . getFuture ( ) ) ) {
requestTypeStr = " initVersionBatch " ;
2019-05-23 04:30:33 +08:00
actors . add ( handleInitVersionBatchRequest ( req , self ) ) ;
2019-05-10 11:55:44 +08:00
}
2019-05-11 07:48:01 +08:00
when ( RestoreSimpleRequest req = waitNext ( applierInterf . finishRestore . getFuture ( ) ) ) {
requestTypeStr = " finishRestore " ;
2019-05-23 04:30:33 +08:00
exitRole = handlerFinishRestoreRequest ( req , self , cx ) ;
2019-05-11 07:48:01 +08:00
}
2019-05-23 04:30:33 +08:00
when ( wait ( exitRole ) ) {
break ;
2019-05-10 11:55:44 +08:00
}
}
} catch ( Error & e ) {
fprintf ( stdout , " [ERROR] Loader handle received request:%s error. error code:%d, error message:%s \n " ,
requestTypeStr . c_str ( ) , e . code ( ) , e . what ( ) ) ;
if ( requestTypeStr . find ( " [Init] " ) ! = std : : string : : npos ) {
printf ( " Exit due to error at requestType:%s " , requestTypeStr . c_str ( ) ) ;
break ;
}
}
}
return Void ( ) ;
}
2019-05-13 13:05:49 +08:00
// Based on the number of sampled mutations operated in the key space, split the key space evenly to k appliers
// If the number of splitted key spaces is smaller than k, some appliers will not be used
2019-05-10 11:55:44 +08:00
ACTOR Future < Void > handleCalculateApplierKeyRangeRequest ( RestoreCalculateApplierKeyRangeRequest req , Reference < RestoreApplierData > self ) {
state int numMutations = 0 ;
state std : : vector < Standalone < KeyRef > > keyRangeLowerBounds ;
while ( self - > isInProgress ( RestoreCommandEnum : : Calculate_Applier_KeyRange ) ) {
printf ( " [DEBUG] NODE:%s Calculate_Applier_KeyRange wait for 5s \n " , self - > describeNode ( ) . c_str ( ) ) ;
wait ( delay ( 5.0 ) ) ;
}
wait ( delay ( 1.0 ) ) ;
// Handle duplicate message
// We need to recalculate the value for duplicate message! Because the reply to duplicate message may arrive earlier!
if ( self - > isCmdProcessed ( req . cmdID ) & & ! keyRangeLowerBounds . empty ( ) ) {
printf ( " [DEBUG] Node:%s skip duplicate cmd:%s \n " , self - > describeNode ( ) . c_str ( ) , req . cmdID . toString ( ) . c_str ( ) ) ;
req . reply . send ( GetKeyRangeNumberReply ( keyRangeLowerBounds . size ( ) ) ) ;
return Void ( ) ;
}
self - > setInProgressFlag ( RestoreCommandEnum : : Calculate_Applier_KeyRange ) ;
// Applier will calculate applier key range
printf ( " [INFO][Applier] CMD:%s, Node:%s Calculate key ranges for %d appliers \n " ,
req . cmdID . toString ( ) . c_str ( ) , self - > describeNode ( ) . c_str ( ) , req . numAppliers ) ;
if ( keyRangeLowerBounds . empty ( ) ) {
keyRangeLowerBounds = self - > calculateAppliersKeyRanges ( req . numAppliers ) ; // keyRangeIndex is the number of key ranges requested
self - > keyRangeLowerBounds = keyRangeLowerBounds ;
}
printf ( " [INFO][Applier] CMD:%s, NodeID:%s: num of key ranges:%ld \n " ,
req . cmdID . toString ( ) . c_str ( ) , self - > describeNode ( ) . c_str ( ) , keyRangeLowerBounds . size ( ) ) ;
req . reply . send ( GetKeyRangeNumberReply ( keyRangeLowerBounds . size ( ) ) ) ;
self - > processedCmd [ req . cmdID ] = 1 ; // We should not skip this command in the following phase. Otherwise, the handler in other phases may return a wrong number of appliers
self - > clearInProgressFlag ( RestoreCommandEnum : : Calculate_Applier_KeyRange ) ;
return Void ( ) ;
}
2019-05-13 13:05:49 +08:00
// Reply with the key range for the aplier req.applierIndex.
// This actor cannot return until the applier has calculated the key ranges for appliers
2019-05-10 11:55:44 +08:00
ACTOR Future < Void > handleGetApplierKeyRangeRequest ( RestoreGetApplierKeyRangeRequest req , Reference < RestoreApplierData > self ) {
state int numMutations = 0 ;
//state std::vector<Standalone<KeyRef>> keyRangeLowerBounds = self->keyRangeLowerBounds;
while ( self - > isInProgress ( RestoreCommandEnum : : Get_Applier_KeyRange ) ) {
printf ( " [DEBUG] NODE:%s Calculate_Applier_KeyRange wait for 5s \n " , self - > describeNode ( ) . c_str ( ) ) ;
wait ( delay ( 5.0 ) ) ;
}
wait ( delay ( 1.0 ) ) ;
//NOTE: Must reply a valid lowerBound and upperBound! Otherwise, the master will receive an invalid value!
// if (self->isCmdProcessed(req.cmdID) ) {
// printf("[DEBUG] Node:%s skip duplicate cmd:%s\n", self->describeNode().c_str(), req.cmdID.toString().c_str());
// req.reply.send(GetKeyRangeReply(workerInterf.id(), req.cmdID)); // Must wait until the previous command returns
// return Void();
// }
self - > setInProgressFlag ( RestoreCommandEnum : : Get_Applier_KeyRange ) ;
if ( req . applierIndex < 0 | | req . applierIndex > = self - > keyRangeLowerBounds . size ( ) ) {
printf ( " [INFO][Applier] NodeID:%s Get_Applier_KeyRange keyRangeIndex is out of range. keyIndex:%d keyRagneSize:%ld \n " ,
self - > describeNode ( ) . c_str ( ) , req . applierIndex , self - > keyRangeLowerBounds . size ( ) ) ;
}
printf ( " [INFO][Applier] NodeID:%s replies Get_Applier_KeyRange. keyRangeIndex:%d lower_bound_of_keyRange:%s \n " ,
self - > describeNode ( ) . c_str ( ) , req . applierIndex , getHexString ( self - > keyRangeLowerBounds [ req . applierIndex ] ) . c_str ( ) ) ;
KeyRef lowerBound = self - > keyRangeLowerBounds [ req . applierIndex ] ;
KeyRef upperBound = ( req . applierIndex + 1 ) < self - > keyRangeLowerBounds . size ( ) ? self - > keyRangeLowerBounds [ req . applierIndex + 1 ] : normalKeys . end ;
req . reply . send ( GetKeyRangeReply ( self - > id ( ) , req . cmdID , req . applierIndex , lowerBound , upperBound ) ) ;
self - > clearInProgressFlag ( RestoreCommandEnum : : Get_Applier_KeyRange ) ;
return Void ( ) ;
}
2019-05-13 13:05:49 +08:00
// Assign key range to applier req.applierID
// Idempodent operation. OK to re-execute the duplicate cmd
// The applier should remember the key range it is responsible for
2019-05-10 11:55:44 +08:00
ACTOR Future < Void > handleSetApplierKeyRangeRequest ( RestoreSetApplierKeyRangeRequest req , Reference < RestoreApplierData > self ) {
while ( self - > isInProgress ( RestoreCommandEnum : : Assign_Applier_KeyRange ) ) {
printf ( " [DEBUG] NODE:%s handleSetApplierKeyRangeRequest wait for 1s \n " , self - > describeNode ( ) . c_str ( ) ) ;
wait ( delay ( 1.0 ) ) ;
}
if ( self - > isCmdProcessed ( req . cmdID ) ) {
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
return Void ( ) ;
}
self - > setInProgressFlag ( RestoreCommandEnum : : Assign_Applier_KeyRange ) ;
self - > range2Applier [ req . range . begin ] = req . applierID ;
2019-05-14 16:49:44 +08:00
self - > processedCmd . clear ( ) ; // The Loader_Register_Mutation_to_Applier command can be sent in both sampling and actual loading phases
2019-05-10 11:55:44 +08:00
self - > processedCmd [ req . cmdID ] = 1 ;
self - > clearInProgressFlag ( RestoreCommandEnum : : Assign_Applier_KeyRange ) ;
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
return Void ( ) ;
}
// Applier receive mutation from loader
ACTOR Future < Void > handleSendMutationVectorRequest ( RestoreSendMutationVectorRequest req , Reference < RestoreApplierData > self ) {
state int numMutations = 0 ;
if ( debug_verbose ) {
printf ( " [VERBOSE_DEBUG] Node:%s receive mutation number:%d \n " , self - > describeNode ( ) . c_str ( ) , req . mutations . size ( ) ) ;
}
// NOTE: We have insert operation to self->kvOps. For the same worker, we should only allow one actor of this kind to run at any time!
// Otherwise, race condition may happen!
while ( self - > isInProgress ( RestoreCommandEnum : : Loader_Send_Mutations_To_Applier ) ) {
printf ( " [DEBUG] NODE:%s sendMutation wait for 1s \n " , self - > describeNode ( ) . c_str ( ) ) ;
wait ( delay ( 1.0 ) ) ;
}
// Handle duplicat cmd
if ( self - > isCmdProcessed ( req . cmdID ) ) {
2019-05-15 13:10:06 +08:00
printf ( " [DEBUG] NODE:% handleSendMutationVectorRequest skip duplicate cmd:%s \n " , self - > describeNode ( ) . c_str ( ) , req . cmdID . toString ( ) . c_str ( ) ) ;
2019-05-14 16:49:44 +08:00
//printf("[DEBUG] Skipped duplicate cmd:%s\n", req.cmdID.toString().c_str());
2019-05-10 11:55:44 +08:00
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
return Void ( ) ;
}
self - > setInProgressFlag ( RestoreCommandEnum : : Loader_Send_Mutations_To_Applier ) ;
// Applier will cache the mutations at each version. Once receive all mutations, applier will apply them to DB
state uint64_t commitVersion = req . commitVersion ;
VectorRef < MutationRef > mutations ( req . mutations ) ;
printf ( " [DEBUG] Node:%s receive %d mutations at version:%ld \n " , self - > describeNode ( ) . c_str ( ) , mutations . size ( ) , commitVersion ) ;
if ( self - > kvOps . find ( commitVersion ) = = self - > kvOps . end ( ) ) {
self - > kvOps . insert ( std : : make_pair ( commitVersion , VectorRef < MutationRef > ( ) ) ) ;
}
state int mIndex = 0 ;
for ( mIndex = 0 ; mIndex < mutations . size ( ) ; mIndex + + ) {
MutationRef mutation = mutations [ mIndex ] ;
self - > kvOps [ commitVersion ] . push_back_deep ( self - > kvOps [ commitVersion ] . arena ( ) , mutation ) ;
numMutations + + ;
2019-05-14 08:24:57 +08:00
//if ( numMutations % 100000 == 1 ) { // Should be different value in simulation and in real mode
2019-05-10 11:55:44 +08:00
printf ( " [INFO][Applier] Node:%s Receives %d mutations. cur_mutation:%s \n " ,
self - > describeNode ( ) . c_str ( ) , numMutations , mutation . toString ( ) . c_str ( ) ) ;
2019-05-14 08:24:57 +08:00
//}
2019-05-10 11:55:44 +08:00
}
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
// Avoid race condition when this actor is called twice on the same command
self - > processedCmd [ req . cmdID ] = 1 ;
self - > clearInProgressFlag ( RestoreCommandEnum : : Loader_Send_Mutations_To_Applier ) ;
return Void ( ) ;
}
2019-05-23 04:30:33 +08:00
// ATTENTION: If a loader sends mutations of range and log files at the same time,
// Race condition may happen in this actor?
// MX: Maybe we won't have race condition even in the above situation because all actors run on 1 thread
// as long as we do not wait or yield when operate the shared data, it should be fine.
ACTOR Future < Void > handleSendMutationVectorVersionedRequest ( RestoreSendMutationVectorVersionedRequest req , Reference < RestoreApplierData > self ) {
state int numMutations = 0 ;
if ( debug_verbose ) {
// NOTE: Print out the current version and received req is helpful in debugging
printf ( " [VERBOSE_DEBUG] handleSendMutationVectorVersionedRequest Node:%s at rangeVersion:%ld logVersion:%ld receive mutation number:%d, req:%s \n " ,
self - > describeNode ( ) . c_str ( ) , self - > rangeVersion . get ( ) , self - > logVersion . get ( ) , req . mutations . size ( ) , req . toString ( ) . c_str ( ) ) ;
}
if ( req . isRangeFile ) {
wait ( self - > rangeVersion . whenAtLeast ( req . prevVersion ) ) ;
} else {
wait ( self - > logVersion . whenAtLeast ( req . prevVersion ) ) ;
}
// ASSUME: Log file is processed before range file. We do NOT mix range and log file.
//ASSERT_WE_THINK( self->rangeVersion.get() > 0 && req.isRangeFile );
if ( ( req . isRangeFile & & self - > rangeVersion . get ( ) = = req . prevVersion ) | |
( ! req . isRangeFile & & self - > logVersion . get ( ) = = req . prevVersion ) ) { // Not a duplicate (check relies on no waiting between here and self->version.set() below!)
// Applier will cache the mutations at each version. Once receive all mutations, applier will apply them to DB
state Version commitVersion = req . version ;
VectorRef < MutationRef > mutations ( req . mutations ) ;
printf ( " [DEBUG] Node:%s receive %d mutations at version:%ld \n " , self - > describeNode ( ) . c_str ( ) , mutations . size ( ) , commitVersion ) ;
if ( self - > kvOps . find ( commitVersion ) = = self - > kvOps . end ( ) ) {
self - > kvOps . insert ( std : : make_pair ( commitVersion , VectorRef < MutationRef > ( ) ) ) ;
}
state int mIndex = 0 ;
for ( mIndex = 0 ; mIndex < mutations . size ( ) ; mIndex + + ) {
MutationRef mutation = mutations [ mIndex ] ;
self - > kvOps [ commitVersion ] . push_back_deep ( self - > kvOps [ commitVersion ] . arena ( ) , mutation ) ;
numMutations + + ;
//if ( numMutations % 100000 == 1 ) { // Should be different value in simulation and in real mode
printf ( " [INFO][Applier] Node:%s Receives %d mutations. cur_mutation:%s \n " ,
self - > describeNode ( ) . c_str ( ) , numMutations , mutation . toString ( ) . c_str ( ) ) ;
//}
}
// Notify the same actor and unblock the request at the next version
if ( req . isRangeFile ) {
self - > rangeVersion . set ( req . version ) ;
} else {
self - > logVersion . set ( req . version ) ;
}
}
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
return Void ( ) ;
}
2019-05-10 11:55:44 +08:00
ACTOR Future < Void > handleSendSampleMutationVectorRequest ( RestoreSendMutationVectorRequest req , Reference < RestoreApplierData > self ) {
state int numMutations = 0 ;
self - > numSampledMutations = 0 ;
2019-05-14 08:24:57 +08:00
2019-05-10 11:55:44 +08:00
// NOTE: We have insert operation to self->kvOps. For the same worker, we should only allow one actor of this kind to run at any time!
// Otherwise, race condition may happen!
while ( self - > isInProgress ( RestoreCommandEnum : : Loader_Send_Sample_Mutation_To_Applier ) ) {
printf ( " [DEBUG] NODE:%s handleSendSampleMutationVectorRequest wait for 1s \n " , self - > describeNode ( ) . c_str ( ) ) ;
wait ( delay ( 1.0 ) ) ;
}
// Handle duplicate message
if ( self - > isCmdProcessed ( req . cmdID ) ) {
printf ( " [DEBUG] NODE:%s skip duplicate cmd:%s \n " , self - > describeNode ( ) . c_str ( ) , req . cmdID . toString ( ) . c_str ( ) ) ;
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
return Void ( ) ;
}
self - > setInProgressFlag ( RestoreCommandEnum : : Loader_Send_Sample_Mutation_To_Applier ) ;
// Applier will cache the mutations at each version. Once receive all mutations, applier will apply them to DB
state uint64_t commitVersion = req . commitVersion ;
// TODO: Change the req.mutation to a vector of mutations
VectorRef < MutationRef > mutations ( req . mutations ) ;
state int mIndex = 0 ;
for ( mIndex = 0 ; mIndex < mutations . size ( ) ; mIndex + + ) {
MutationRef mutation = mutations [ mIndex ] ;
if ( self - > keyOpsCount . find ( mutation . param1 ) = = self - > keyOpsCount . end ( ) ) {
self - > keyOpsCount . insert ( std : : make_pair ( mutation . param1 , 0 ) ) ;
}
// NOTE: We may receive the same mutation more than once due to network package lost.
// Since sampling is just an estimation and the network should be stable enough, we do NOT handle the duplication for now
// In a very unreliable network, we may get many duplicate messages and get a bad key-range splits for appliers. But the restore should still work except for running slower.
self - > keyOpsCount [ mutation . param1 ] + + ;
self - > numSampledMutations + + ;
if ( debug_verbose & & self - > numSampledMutations % 1000 = = 1 ) {
printf ( " [Sampling][Applier] Node:%s Receives %d sampled mutations. cur_mutation:%s \n " ,
self - > describeNode ( ) . c_str ( ) , self - > numSampledMutations , mutation . toString ( ) . c_str ( ) ) ;
}
}
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
self - > processedCmd [ req . cmdID ] = 1 ;
self - > clearInProgressFlag ( RestoreCommandEnum : : Loader_Send_Sample_Mutation_To_Applier ) ;
return Void ( ) ;
}
ACTOR Future < Void > handleApplyToDBRequest ( RestoreSimpleRequest req , Reference < RestoreApplierData > self , Database cx ) {
state bool isPrint = false ; //Debug message
state std : : string typeStr = " " ;
// Wait in case the applyToDB request was delivered twice;
while ( self - > inProgressApplyToDB ) {
printf ( " [DEBUG] NODE:%s inProgressApplyToDB wait for 5s \n " , self - > describeNode ( ) . c_str ( ) ) ;
wait ( delay ( 5.0 ) ) ;
}
if ( self - > isCmdProcessed ( req . cmdID ) ) {
printf ( " [DEBUG] NODE:%s skip duplicate cmd:%s \n " , self - > describeNode ( ) . c_str ( ) , req . cmdID . toString ( ) . c_str ( ) ) ;
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
return Void ( ) ;
}
self - > inProgressApplyToDB = true ;
// Assume the process will not crash when it apply mutations to DB. The reply message can be lost though
if ( self - > kvOps . empty ( ) ) {
printf ( " Node:%s kvOps is empty. No-op for apply to DB \n " , self - > describeNode ( ) . c_str ( ) ) ;
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
self - > processedCmd [ req . cmdID ] = 1 ;
self - > inProgressApplyToDB = false ;
return Void ( ) ;
}
self - > sanityCheckMutationOps ( ) ;
if ( debug_verbose ) {
TraceEvent ( " ApplyKVOPsToDB " ) . detail ( " MapSize " , self - > kvOps . size ( ) ) ;
printf ( " ApplyKVOPsToDB num_of_version:%ld \n " , self - > kvOps . size ( ) ) ;
}
state std : : map < Version , Standalone < VectorRef < MutationRef > > > : : iterator it = self - > kvOps . begin ( ) ;
state std : : map < Version , Standalone < VectorRef < MutationRef > > > : : iterator prevIt = it ;
state int index = 0 ;
state int prevIndex = index ;
state int count = 0 ;
state Reference < ReadYourWritesTransaction > tr ( new ReadYourWritesTransaction ( cx ) ) ;
state int numVersion = 0 ;
state double transactionSize = 0 ;
loop {
try {
tr - > reset ( ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
transactionSize = 0 ;
for ( ; it ! = self - > kvOps . end ( ) ; + + it ) {
numVersion + + ;
if ( debug_verbose ) {
TraceEvent ( " ApplyKVOPsToDB \t " ) . detail ( " Version " , it - > first ) . detail ( " OpNum " , it - > second . size ( ) ) ;
}
//printf("ApplyKVOPsToDB numVersion:%d Version:%08lx num_of_ops:%d, \n", numVersion, it->first, it->second.size());
state MutationRef m ;
for ( ; index < it - > second . size ( ) ; + + index ) {
m = it - > second [ index ] ;
if ( m . type > = MutationRef : : Type : : SetValue & & m . type < = MutationRef : : Type : : MAX_ATOMIC_OP )
typeStr = typeString [ m . type ] ;
else {
printf ( " ApplyKVOPsToDB MutationType:%d is out of range \n " , m . type ) ;
}
2019-05-15 07:03:32 +08:00
if ( debug_verbose & & count % 1000 = = 0 ) {
printf ( " ApplyKVOPsToDB Node:%s num_mutation:%d Version:%08lx num_of_ops to apply:%d \n " ,
2019-05-10 11:55:44 +08:00
self - > describeNode ( ) . c_str ( ) , count , it - > first , it - > second . size ( ) ) ;
}
2019-05-21 08:52:40 +08:00
if ( debug_verbose | | true ) {
2019-05-10 11:55:44 +08:00
printf ( " [VERBOSE_DEBUG] Node:%s apply mutation:%s \n " , self - > describeNode ( ) . c_str ( ) , m . toString ( ) . c_str ( ) ) ;
}
if ( m . type = = MutationRef : : SetValue ) {
tr - > set ( m . param1 , m . param2 ) ;
} else if ( m . type = = MutationRef : : ClearRange ) {
KeyRangeRef mutationRange ( m . param1 , m . param2 ) ;
tr - > clear ( mutationRange ) ;
} else if ( isAtomicOp ( ( MutationRef : : Type ) m . type ) ) {
//// Now handle atomic operation from this if statement
// TODO: Have not de-duplicated the mutations for multiple network delivery
// ATOMIC_MASK = (1 << AddValue) | (1 << And) | (1 << Or) | (1 << Xor) | (1 << AppendIfFits) | (1 << Max) | (1 << Min) | (1 << SetVersionstampedKey) | (1 << SetVersionstampedValue) | (1 << ByteMin) | (1 << ByteMax) | (1 << MinV2) | (1 << AndV2),
//atomicOp( const KeyRef& key, const ValueRef& operand, uint32_t operationType )
tr - > atomicOp ( m . param1 , m . param2 , m . type ) ;
} else {
printf ( " [WARNING] mtype:%d (%s) unhandled \n " , m . type , typeStr . c_str ( ) ) ;
}
+ + count ;
transactionSize + = m . expectedSize ( ) ;
if ( transactionSize > = transactionBatchSizeThreshold ) { // commit per 1000 mutations
wait ( tr - > commit ( ) ) ;
tr - > reset ( ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
prevIt = it ;
prevIndex = index ;
transactionSize = 0 ;
}
if ( isPrint ) {
printf ( " \t ApplyKVOPsToDB Version:%016lx MType:%s K:%s, V:%s K_size:%d V_size:%d \n " , it - > first , typeStr . c_str ( ) ,
getHexString ( m . param1 ) . c_str ( ) , getHexString ( m . param2 ) . c_str ( ) , m . param1 . size ( ) , m . param2 . size ( ) ) ;
TraceEvent ( " ApplyKVOPsToDB \t \t " ) . detail ( " Version " , it - > first )
. detail ( " MType " , m . type ) . detail ( " MTypeStr " , typeStr )
. detail ( " MKey " , getHexString ( m . param1 ) )
. detail ( " MValueSize " , m . param2 . size ( ) )
. detail ( " MValue " , getHexString ( m . param2 ) ) ;
}
}
2019-05-15 08:39:44 +08:00
if ( transactionSize > 0 ) { // the commit batch should NOT across versions
wait ( tr - > commit ( ) ) ;
tr - > reset ( ) ;
tr - > setOption ( FDBTransactionOptions : : ACCESS_SYSTEM_KEYS ) ;
tr - > setOption ( FDBTransactionOptions : : LOCK_AWARE ) ;
prevIt = it ;
prevIndex = index ;
transactionSize = 0 ;
}
2019-05-10 11:55:44 +08:00
index = 0 ;
}
// Last transaction
if ( transactionSize > 0 ) {
wait ( tr - > commit ( ) ) ;
}
break ;
} catch ( Error & e ) {
printf ( " ApplyKVOPsToDB transaction error:%s. \n " , e . what ( ) ) ;
wait ( tr - > onError ( e ) ) ;
it = prevIt ;
index = prevIndex ;
transactionSize = 0 ;
}
}
self - > kvOps . clear ( ) ;
printf ( " Node:%s ApplyKVOPsToDB number of kv mutations:%d \n " , self - > describeNode ( ) . c_str ( ) , count ) ;
req . reply . send ( RestoreCommonReply ( self - > id ( ) , req . cmdID ) ) ;
printf ( " self->processedCmd size:%d req.cmdID:%s \n " , self - > processedCmd . size ( ) , req . cmdID . toString ( ) . c_str ( ) ) ;
self - > processedCmd [ req . cmdID ] = 1 ;
self - > inProgressApplyToDB = false ;
return Void ( ) ;
}