2017-06-10 05:56:41 +08:00
/*
* VersionedBTree . actor . cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013 - 2018 Apple Inc . and the FoundationDB project authors
*
* Licensed under the Apache License , Version 2.0 ( the " License " ) ;
* you may not use this file except in compliance with the License .
* You may obtain a copy of the License at
*
* http : //www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing , software
* distributed under the License is distributed on an " AS IS " BASIS ,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
* See the License for the specific language governing permissions and
* limitations under the License .
*/
# include "flow/flow.h"
2018-10-20 01:30:13 +08:00
# include "fdbserver/IVersionedStore.h"
# include "fdbserver/IPager.h"
2017-06-10 05:56:41 +08:00
# include "fdbclient/Tuple.h"
# include "flow/serialize.h"
# include "flow/genericactors.actor.h"
# include "flow/UnitTest.h"
2018-10-20 01:30:13 +08:00
# include "fdbserver/MemoryPager.h"
# include "fdbserver/IndirectShadowPager.h"
2017-06-10 05:56:41 +08:00
# include <map>
# include <vector>
2017-08-04 15:01:25 +08:00
# include "fdbclient/CommitTransaction.h"
2018-10-20 01:30:13 +08:00
# include "fdbserver/IKeyValueStore.h"
2019-02-21 18:46:30 +08:00
# include "fdbserver/DeltaTree.h"
2018-07-23 18:09:13 +08:00
# include <string.h>
2018-10-19 11:26:45 +08:00
# include "flow/actorcompiler.h"
2019-05-05 01:52:02 +08:00
# include <cinttypes>
2018-07-23 18:09:13 +08:00
2019-05-29 21:23:32 +08:00
// TODO: Move this to a flow header once it is mature.
struct SplitStringRef {
StringRef a ;
StringRef b ;
SplitStringRef ( StringRef a = StringRef ( ) , StringRef b = StringRef ( ) ) : a ( a ) , b ( b ) {
}
SplitStringRef ( Arena & arena , const SplitStringRef & toCopy )
: a ( toStringRef ( arena ) ) , b ( ) {
}
SplitStringRef prefix ( int len ) const {
if ( len < = a . size ( ) ) {
return SplitStringRef ( a . substr ( 0 , len ) ) ;
}
len - = a . size ( ) ;
return SplitStringRef ( a , b . substr ( 0 , len ) ) ;
}
StringRef toStringRef ( Arena & arena ) const {
StringRef c = makeString ( size ( ) , arena ) ;
memcpy ( mutateString ( c ) , a . begin ( ) , a . size ( ) ) ;
memcpy ( mutateString ( c ) + a . size ( ) , b . begin ( ) , b . size ( ) ) ;
return c ;
}
Standalone < StringRef > toStringRef ( ) const {
Arena a ;
return Standalone < StringRef > ( toStringRef ( a ) , a ) ;
}
int size ( ) const {
return a . size ( ) + b . size ( ) ;
}
int expectedSize ( ) const {
return size ( ) ;
}
std : : string toString ( ) const {
return format ( " %s%s " , a . toString ( ) . c_str ( ) , b . toString ( ) . c_str ( ) ) ;
}
std : : string toHexString ( ) const {
return format ( " %s%s " , a . toHexString ( ) . c_str ( ) , b . toHexString ( ) . c_str ( ) ) ;
}
struct const_iterator {
const uint8_t * ptr ;
const uint8_t * end ;
const uint8_t * next ;
inline bool operator = = ( const const_iterator & rhs ) const {
return ptr = = rhs . ptr ;
}
inline const_iterator & operator + + ( ) {
+ + ptr ;
if ( ptr = = end ) {
ptr = next ;
}
return * this ;
}
inline const_iterator & operator + ( int n ) {
ptr + = n ;
if ( ptr > = end ) {
ptr = next + ( ptr - end ) ;
}
return * this ;
}
inline uint8_t operator * ( ) const {
return * ptr ;
}
} ;
inline const_iterator begin ( ) const {
return { a . begin ( ) , a . end ( ) , b . begin ( ) } ;
}
inline const_iterator end ( ) const {
return { b . end ( ) } ;
}
template < typename StringT >
int compare ( const StringT & rhs ) const {
auto j = begin ( ) ;
auto k = rhs . begin ( ) ;
auto jEnd = end ( ) ;
auto kEnd = rhs . end ( ) ;
while ( j ! = jEnd & & k ! = kEnd ) {
int cmp = * j - * k ;
if ( cmp ! = 0 ) {
return cmp ;
}
}
// If we've reached the end of *this, then values are equal if rhs is also exhausted, otherwise *this is less than rhs
if ( j = = jEnd ) {
return k = = kEnd ? 0 : - 1 ;
}
return 1 ;
}
} ;
2019-02-21 18:46:30 +08:00
# define STR(x) LiteralStringRef(x)
struct RedwoodRecordRef {
2019-05-29 21:23:32 +08:00
typedef uint8_t byte ;
2019-02-21 18:46:30 +08:00
2019-04-30 08:00:29 +08:00
RedwoodRecordRef ( KeyRef key = KeyRef ( ) , Version ver = 0 , Optional < ValueRef > value = { } , uint32_t chunkTotal = 0 , uint32_t chunkStart = 0 )
: key ( key ) , version ( ver ) , value ( value ) , chunk ( { chunkTotal , chunkStart } )
2019-02-21 18:46:30 +08:00
{
}
2019-05-29 21:23:32 +08:00
RedwoodRecordRef ( Arena & arena , const RedwoodRecordRef & toCopy )
: key ( arena , toCopy . key ) , version ( toCopy . version ) , chunk ( toCopy . chunk ) {
2019-02-21 18:46:30 +08:00
if ( value . present ( ) ) {
value = ValueRef ( arena , toCopy . value . get ( ) ) ;
}
}
2019-05-29 21:23:32 +08:00
RedwoodRecordRef ( KeyRef key , Optional < ValueRef > value , const byte intFields [ 14 ] )
: key ( key ) , value ( value )
{
deserializeIntFields ( intFields ) ;
}
// TODO: This probably is not actually needed, it only helps make debug output more clear
2019-05-22 10:16:32 +08:00
RedwoodRecordRef withoutValue ( ) const {
return RedwoodRecordRef ( key , version , { } , chunk . total , chunk . start ) ;
}
2019-05-29 21:23:32 +08:00
// Returns how many bytes are in common between the integer fields of *this and other, assuming that
// all values are BigEndian, version is 64 bits, chunk total is 24 bits, and chunk start is 24 bits
int getCommonIntFieldPrefix ( const RedwoodRecordRef & other ) const {
if ( version ! = other . version ) {
return clzll ( version ^ other . version ) > > 3 ;
}
if ( chunk . total ! = other . chunk . total ) {
// the -1 is because we are only considering the lower 3 bytes
return 8 + ( clz ( chunk . total ^ other . chunk . total ) > > 3 ) - 1 ;
}
if ( chunk . start ! = other . chunk . start ) {
// the -1 is because we are only considering the lower 3 bytes
return 11 + ( clz ( chunk . start ^ other . chunk . start ) > > 3 ) - 1 ;
}
return 14 ;
}
// Find the common prefix between two records, assuming that the first
// skip bytes are the same.
inline int getCommonPrefixLen ( const RedwoodRecordRef & other , int skip ) const {
int skipStart = std : : min ( skip , key . size ( ) ) ;
int common = skipStart + commonPrefixLength ( key . begin ( ) + skipStart , other . key . begin ( ) + skipStart , std : : min ( other . key . size ( ) , key . size ( ) ) - skipStart ) ;
if ( common = = key . size ( ) & & key . size ( ) = = other . key . size ( ) ) {
common + = getCommonIntFieldPrefix ( other ) ;
}
return common ;
}
static const int intFieldArraySize = 14 ;
// Write big endian values of version (64 bits), total (24 bits), and start (24 bits) fields
// to an array of 14 bytes
void serializeIntFields ( byte * dst ) const {
* ( uint32_t * ) ( dst + 10 ) = bigEndian32 ( chunk . start ) ;
* ( uint32_t * ) ( dst + 7 ) = bigEndian32 ( chunk . total ) ;
* ( uint64_t * ) dst = bigEndian64 ( version ) ;
}
// Initialize int fields from the array format that serializeIntFields produces
void deserializeIntFields ( const byte * src ) {
version = bigEndian64 ( * ( uint64_t * ) src ) ;
chunk . total = bigEndian32 ( * ( uint32_t * ) ( src + 7 ) ) & 0xffffff ;
chunk . start = bigEndian32 ( * ( uint32_t * ) ( src + 10 ) ) & 0xffffff ;
}
// TODO: Use SplitStringRef (unless it ends up being slower)
2019-02-21 18:46:30 +08:00
KeyRef key ;
Optional < ValueRef > value ;
2019-05-29 21:23:32 +08:00
Version version ;
2019-04-30 08:00:29 +08:00
struct {
uint32_t total ;
2019-05-29 21:23:32 +08:00
// TODO: Change start to chunk number.
2019-04-30 08:00:29 +08:00
uint32_t start ;
} chunk ;
2019-02-21 18:46:30 +08:00
int expectedSize ( ) const {
2019-04-30 08:00:29 +08:00
return key . expectedSize ( ) + value . expectedSize ( ) ;
2019-02-21 18:46:30 +08:00
}
bool isMultiPart ( ) const {
2019-05-29 21:23:32 +08:00
return chunk . total ! = 0 ;
2019-02-21 18:46:30 +08:00
}
// Generate a kv shard from a complete kv
RedwoodRecordRef split ( int start , int len ) {
2019-04-30 08:00:29 +08:00
ASSERT ( ! isMultiPart ( ) ) ;
return RedwoodRecordRef ( key , version , value . get ( ) . substr ( start , len ) , value . get ( ) . size ( ) , start ) ;
2019-02-21 18:46:30 +08:00
}
2019-05-29 21:23:32 +08:00
class Writer {
public :
Writer ( byte * ptr ) : wptr ( ptr ) { }
byte * wptr ;
template < typename T > void write ( const T & in ) {
* ( T * ) wptr = in ;
wptr + = sizeof ( T ) ;
}
// Write a big endian 1 or 2 byte integer using the high bit of the first byte as an "extension" bit.
// Values > 15 bits in length are not valid input but this is not checked for.
void writeVarInt ( int x ) {
if ( x > = 128 ) {
2019-05-30 07:47:53 +08:00
* wptr + + = ( uint8_t ) ( ( x > > 8 ) | 0x80 ) ;
2019-05-29 21:23:32 +08:00
}
* wptr + + = ( uint8_t ) x ;
}
void writeString ( StringRef s ) {
memcpy ( wptr , s . begin ( ) , s . size ( ) ) ;
wptr + = s . size ( ) ;
}
} ;
class Reader {
public :
Reader ( const void * ptr ) : rptr ( ( const byte * ) ptr ) { }
const byte * rptr ;
template < typename T > T read ( ) {
T r = * ( const T * ) rptr ;
rptr + = sizeof ( T ) ;
return r ;
}
// Read a big endian 1 or 2 byte integer using the high bit of the first byte as an "extension" bit.
int readVarInt ( ) {
int x = * rptr + + ;
// If the high bit is set
if ( x & 0x80 ) {
// Clear the high bit
x & = 0x7f ;
// Shift low byte left
x < < = 8 ;
// Read the new low byte and OR it in
x | = * rptr + + ;
}
return x ;
}
StringRef readString ( int len ) {
StringRef s ( rptr , len ) ;
rptr + = len ;
return s ;
}
const byte * readBytes ( int len ) {
const byte * b = rptr ;
rptr + = len ;
return b ;
}
} ;
2019-02-21 18:46:30 +08:00
# pragma pack(push,1)
struct Delta {
2019-05-29 21:23:32 +08:00
// Serialized Format
//
// 1 byte for Flags + a 4 bit length
// borrow source is prev ancestor - 0 or 1
// has_key_suffix
// has_value
// has_version
// other_fields suffix len - 4 bits
//
// If has value and value is not 4 bytes
// 1 byte value length
//
// 1 or 2 bytes for Prefix Borrow Length (hi bit indicates presence of second byte)
//
// IF has_key_suffix is set
// 1 or 2 bytes for Key Suffix Length
//
// Key suffix bytes
// Meta suffix bytes
// Value bytes
//
// For a series of RedwoodRecordRef's containing shards of the same KV pair where the key size is < 104 bytes,
// the overhead per middle chunk is 7 bytes:
// 4 bytes of child pointers in the DeltaTree Node
// 1 flag byte
// 1 prefix borrow length byte
// 1 meta suffix byte describing chunk start position
enum EFlags {
PREFIX_SOURCE = 0x80 ,
HAS_KEY_SUFFIX = 0x40 ,
HAS_VALUE = 0x20 ,
HAS_VERSION = 0x10 ,
INT_FIELD_SUFFIX_BITS = 0x0f
} ;
2019-02-21 18:46:30 +08:00
uint8_t flags ;
2019-05-29 21:23:32 +08:00
byte data [ ] ;
void setPrefixSource ( bool val ) {
if ( val ) {
flags | = PREFIX_SOURCE ;
2019-04-30 08:00:29 +08:00
}
else {
2019-05-29 21:23:32 +08:00
flags & = ~ PREFIX_SOURCE ;
2019-02-21 18:46:30 +08:00
}
}
2019-05-29 21:23:32 +08:00
bool getPrefixSource ( ) const {
return flags & PREFIX_SOURCE ;
}
RedwoodRecordRef apply ( const RedwoodRecordRef & base , Arena & arena ) const {
Reader r ( data ) ;
int intFieldSuffixLen = flags & INT_FIELD_SUFFIX_BITS ;
int prefixLen = r . readVarInt ( ) ;
int valueLen = ( flags & HAS_VALUE ) ? r . read < uint8_t > ( ) : 0 ;
StringRef k ;
int keyPrefixLen = std : : min ( prefixLen , base . key . size ( ) ) ;
int intFieldPrefixLen = prefixLen - keyPrefixLen ;
int keySuffixLen = ( flags & HAS_KEY_SUFFIX ) ? r . readVarInt ( ) : 0 ;
if ( keySuffixLen > 0 ) {
k = makeString ( keyPrefixLen + keySuffixLen , arena ) ;
memcpy ( mutateString ( k ) , base . key . begin ( ) , keyPrefixLen ) ;
memcpy ( mutateString ( k ) + keyPrefixLen , r . readString ( keySuffixLen ) . begin ( ) , keySuffixLen ) ;
}
else {
k = base . key . substr ( 0 , keyPrefixLen ) ;
}
// Now decode the integer fields
const byte * intFieldSuffix = r . readBytes ( intFieldSuffixLen ) ;
// Create big endian array in which to reassemble the integer fields from prefix and suffix bytes
byte intFields [ intFieldArraySize ] ;
// If borrowing any bytes, get the source's integer field array
if ( intFieldPrefixLen > 0 ) {
base . serializeIntFields ( intFields ) ;
}
else {
memset ( intFields , 0 , intFieldArraySize ) ;
}
// Version offset is used to skip the version bytes in the int field array when version is missing (aka 0)
2019-05-30 07:26:58 +08:00
int versionOffset = ( ( intFieldPrefixLen = = 0 ) & & ( ~ flags & HAS_VERSION ) ) ? 8 : 0 ;
2019-05-29 21:23:32 +08:00
// If there are suffix bytes, copy those into place after the prefix
if ( intFieldSuffixLen > 0 ) {
memcpy ( intFields + versionOffset + intFieldPrefixLen , intFieldSuffix , intFieldSuffixLen ) ;
}
// Zero out any remaining bytes if the array was initialized from base
if ( intFieldPrefixLen > 0 ) {
for ( int i = versionOffset + intFieldPrefixLen + intFieldSuffixLen ; i < intFieldArraySize ; + + i ) {
intFields [ i ] = 0 ;
2019-02-21 18:46:30 +08:00
}
}
2019-05-29 21:23:32 +08:00
return RedwoodRecordRef ( k , flags & HAS_VALUE ? r . readString ( valueLen ) : Optional < ValueRef > ( ) , intFields ) ;
}
int size ( ) const {
Reader r ( data ) ;
int intFieldSuffixLen = flags & INT_FIELD_SUFFIX_BITS ;
int prefixLen = r . readVarInt ( ) ;
int valueLen = ( flags & HAS_VALUE ) ? r . read < uint8_t > ( ) : 0 ;
int keySuffixLen = ( flags & HAS_KEY_SUFFIX ) ? r . readVarInt ( ) : 0 ;
return sizeof ( Delta ) + r . rptr - data + intFieldSuffixLen + valueLen + keySuffixLen ;
2019-02-21 18:46:30 +08:00
}
2019-05-29 21:23:32 +08:00
// Delta can't be determined without the RedwoodRecordRef upon which the Delta is based.
2019-02-21 18:46:30 +08:00
std : : string toString ( ) const {
2019-05-29 21:23:32 +08:00
Reader r ( data ) ;
2019-05-30 07:26:58 +08:00
std : : string flagString ;
if ( flags & PREFIX_SOURCE ) flagString + = " prefixSource " ;
if ( flags & HAS_KEY_SUFFIX ) flagString + = " keySuffix " ;
if ( flags & HAS_VERSION ) flagString + = " Version " ;
if ( flags & HAS_VALUE ) flagString + = " Value " ;
2019-05-29 21:23:32 +08:00
int intFieldSuffixLen = flags & INT_FIELD_SUFFIX_BITS ;
int prefixLen = r . readVarInt ( ) ;
int valueLen = ( flags & HAS_VALUE ) ? r . read < uint8_t > ( ) : 0 ;
int keySuffixLen = ( flags & HAS_KEY_SUFFIX ) ? r . readVarInt ( ) : 0 ;
2019-05-30 07:26:58 +08:00
return format ( " flags: %s prefixLen: %d keySuffixLen: %d intFieldSuffix: %d valueLen %d raw: %s " ,
flagString . c_str ( ) , prefixLen , keySuffixLen , intFieldSuffixLen , valueLen , StringRef ( ( const uint8_t * ) this , size ( ) ) . toHexString ( ) . c_str ( ) ) ;
2019-02-21 18:46:30 +08:00
}
} ;
# pragma pack(pop)
int compare ( const RedwoodRecordRef & rhs ) const {
int cmp = key . compare ( rhs . key ) ;
if ( cmp = = 0 ) {
cmp = version - rhs . version ;
if ( cmp = = 0 ) {
2019-05-22 10:16:32 +08:00
// It is assumed that in any data set there will never be more than one
// unique chunk total size for the same key and version, so sort by start, total
// Chunked (represented by chunk.total > 0) sorts higher than whole
cmp = chunk . start - rhs . chunk . start ;
if ( cmp = = 0 ) {
2019-04-30 08:00:29 +08:00
cmp = chunk . total - rhs . chunk . total ;
if ( cmp = = 0 ) {
2019-05-22 10:16:32 +08:00
// No-value sorts AFTER having a value.
cmp = ( value . present ( ) ? 0 : 1 ) - ( rhs . value . present ( ) ? 0 : 1 ) ;
2019-02-21 18:46:30 +08:00
}
}
}
}
return cmp ;
}
bool operator = = ( const RedwoodRecordRef & rhs ) const {
return compare ( rhs ) = = 0 ;
}
bool operator < ( const RedwoodRecordRef & rhs ) const {
return compare ( rhs ) < 0 ;
}
bool operator > ( const RedwoodRecordRef & rhs ) const {
return compare ( rhs ) > 0 ;
}
bool operator < = ( const RedwoodRecordRef & rhs ) const {
return compare ( rhs ) < = 0 ;
}
bool operator > = ( const RedwoodRecordRef & rhs ) const {
return compare ( rhs ) > = 0 ;
}
int deltaSize ( const RedwoodRecordRef & base ) const {
2019-05-29 21:23:32 +08:00
int size = sizeof ( Delta ) ;
2019-02-21 18:46:30 +08:00
if ( value . present ( ) ) {
2019-05-29 21:23:32 +08:00
size + = value . get ( ) . size ( ) ;
+ + size ;
}
int prefixLen = getCommonPrefixLen ( base , 0 ) ;
2019-05-30 08:38:55 +08:00
size + = ( prefixLen > = 128 ) ? 2 : 1 ;
2019-05-29 21:23:32 +08:00
int intFieldPrefixLen ;
// Currently using a worst-guess guess where int fields in suffix are stored in their entirety if nonzero.
if ( prefixLen < key . size ( ) ) {
int keySuffixLen = key . size ( ) - prefixLen ;
2019-05-30 08:38:55 +08:00
size + = ( keySuffixLen > = 128 ) ? 2 : 1 ;
2019-05-29 21:23:32 +08:00
size + = keySuffixLen ;
intFieldPrefixLen = 0 ;
}
else {
intFieldPrefixLen = prefixLen - key . size ( ) ;
}
if ( version = = 0 & & chunk . total = = 0 & & chunk . start = = 0 ) {
// No int field suffix needed
}
else {
byte fields [ intFieldArraySize ] ;
serializeIntFields ( fields ) ;
const byte * end = fields + intFieldArraySize - 1 ;
int trailingNulls = 0 ;
while ( * end - - = = 0 ) {
+ + trailingNulls ;
2019-02-21 18:46:30 +08:00
}
2019-05-29 21:23:32 +08:00
size + = std : : max ( 0 , intFieldArraySize - intFieldPrefixLen - trailingNulls ) ;
2019-05-30 09:06:11 +08:00
if ( intFieldPrefixLen = = 0 & & version = = 0 ) {
size - = 8 ;
}
2019-02-21 18:46:30 +08:00
}
2019-05-29 21:23:32 +08:00
return size ;
2019-02-21 18:46:30 +08:00
}
2019-05-29 21:23:32 +08:00
// commonPrefix between *this and base can be passed if known
int writeDelta ( Delta & d , const RedwoodRecordRef & base , int commonPrefix = - 1 ) const {
d . flags = version = = 0 ? 0 : Delta : : HAS_VERSION ;
if ( commonPrefix < 0 ) {
commonPrefix = getCommonPrefixLen ( base , 0 ) ;
2019-04-30 08:00:29 +08:00
}
2019-05-29 21:23:32 +08:00
Writer w ( d . data ) ;
// prefixLen
w . writeVarInt ( commonPrefix ) ;
// valueLen
2019-02-21 18:46:30 +08:00
if ( value . present ( ) ) {
2019-05-29 21:23:32 +08:00
d . flags | = Delta : : HAS_VALUE ;
w . write < uint8_t > ( value . get ( ) . size ( ) ) ;
}
// keySuffixLen
if ( key . size ( ) > commonPrefix ) {
d . flags | = Delta : : HAS_KEY_SUFFIX ;
StringRef keySuffix = key . substr ( commonPrefix ) ;
w . writeVarInt ( keySuffix . size ( ) ) ;
// keySuffix
w . writeString ( keySuffix ) ;
}
// This is a common case, where no int suffix is needed
if ( version = = 0 & & chunk . total = = 0 & & chunk . start = = 0 ) {
// The suffixLen bits in flags are already zero, so nothing to do here.
}
else {
byte fields [ intFieldArraySize ] ;
serializeIntFields ( fields ) ;
// Find the position of the first null byte from the right
// This for loop has no endPos > 0 check because it is known that the array contains non-null bytes
int endPos ;
for ( endPos = intFieldArraySize ; fields [ endPos - 1 ] = = 0 ; - - endPos ) ;
// Start copying after any prefix bytes that matched the int fields of the base
int intFieldPrefixLen = std : : max ( 0 , commonPrefix - key . size ( ) ) ;
2019-05-30 07:26:58 +08:00
int startPos = intFieldPrefixLen + ( intFieldPrefixLen = = 0 & & version = = 0 ? 8 : 0 ) ;
2019-05-29 21:23:32 +08:00
int suffixLen = std : : max ( 0 , endPos - startPos ) ;
if ( suffixLen > 0 ) {
w . writeString ( StringRef ( fields + startPos , suffixLen ) ) ;
d . flags | = suffixLen ;
2019-02-21 18:46:30 +08:00
}
}
2019-05-29 21:23:32 +08:00
if ( value . present ( ) ) {
w . writeString ( value . get ( ) ) ;
}
return w . wptr - d . data + sizeof ( Delta ) ;
2019-02-21 18:46:30 +08:00
}
2019-05-29 21:23:32 +08:00
template < typename StringRefT >
static std : : string kvformat ( StringRefT s , int hexLimit = - 1 ) {
2019-02-21 18:46:30 +08:00
bool hex = false ;
for ( auto c : s ) {
if ( ! isprint ( c ) ) {
hex = true ;
break ;
}
}
return hex ? s . toHexString ( hexLimit ) : s . toString ( ) ;
}
std : : string toString ( int hexLimit = 15 ) const {
std : : string r ;
2019-05-29 21:23:32 +08:00
r + = format ( " '%s' @% " PRId64 " " , kvformat ( key , hexLimit ) . c_str ( ) , version ) ;
2019-05-22 10:16:32 +08:00
r + = format ( " [%d/%d] " , chunk . start , chunk . total ) ;
2019-02-21 18:46:30 +08:00
if ( value . present ( ) ) {
r + = format ( " -> '%s' " , kvformat ( value . get ( ) , hexLimit ) . c_str ( ) ) ;
}
else {
2019-05-22 10:16:32 +08:00
r + = " -> <not_present> " ;
2019-02-21 18:46:30 +08:00
}
return r ;
}
} ;
2017-06-10 05:56:41 +08:00
2018-06-08 18:32:34 +08:00
struct BTreePage {
2019-02-21 18:46:30 +08:00
2018-06-08 18:32:34 +08:00
enum EPageFlags { IS_LEAF = 1 } ;
2017-06-10 05:56:41 +08:00
2019-02-21 18:46:30 +08:00
typedef DeltaTree < RedwoodRecordRef > BinaryTree ;
# pragma pack(push,1)
struct {
uint8_t flags ;
uint16_t count ;
uint32_t kvBytes ;
uint8_t extensionPageCount ;
LogicalPageID extensionPages [ 0 ] ;
} ;
2018-10-19 11:26:45 +08:00
# pragma pack(pop)
2017-06-10 05:56:41 +08:00
2019-02-21 18:46:30 +08:00
int size ( ) const {
const BinaryTree * t = & tree ( ) ;
return ( uint8_t * ) t - ( uint8_t * ) this + t - > size ( ) ;
2018-09-19 15:32:39 +08:00
}
2019-02-21 18:46:30 +08:00
bool isLeaf ( ) const {
return flags & IS_LEAF ;
}
BinaryTree & tree ( ) {
return * ( BinaryTree * ) ( extensionPages + extensionPageCount ) ;
}
const BinaryTree & tree ( ) const {
return * ( const BinaryTree * ) ( extensionPages + extensionPageCount ) ;
2018-09-19 15:32:39 +08:00
}
static inline int GetHeaderSize ( int extensionPages = 0 ) {
return sizeof ( BTreePage ) + extensionPages + sizeof ( LogicalPageID ) ;
2018-06-14 19:15:14 +08:00
}
2019-02-21 18:46:30 +08:00
std : : string toString ( bool write , LogicalPageID id , Version ver , const RedwoodRecordRef * lowerBound , const RedwoodRecordRef * upperBound ) const {
2018-06-08 18:32:34 +08:00
std : : string r ;
2019-05-29 21:23:32 +08:00
r + = format ( " BTreePage op=%s id=%d ver=% " PRId64 " ptr=%p flags=0x%X count=%d kvBytes=%d extPages=%d \n lowerBound: %s \n upperBound: %s \n " ,
2019-02-21 18:46:30 +08:00
write ? " write " : " read " , id , ver , this , ( int ) flags , ( int ) count , ( int ) kvBytes , ( int ) extensionPageCount ,
lowerBound - > toString ( ) . c_str ( ) , upperBound - > toString ( ) . c_str ( ) ) ;
2018-08-29 04:46:14 +08:00
try {
if ( count > 0 ) {
2019-02-21 18:46:30 +08:00
// This doesn't use the cached reader for the page but it is only for debugging purposes
BinaryTree : : Reader reader ( & tree ( ) , lowerBound , upperBound ) ;
BinaryTree : : Cursor c = reader . getCursor ( ) ;
2018-08-29 04:46:14 +08:00
c . moveFirst ( ) ;
ASSERT ( c . valid ( ) ) ;
do {
2019-02-21 18:46:30 +08:00
r + = " " ;
if ( ! ( flags & IS_LEAF ) ) {
RedwoodRecordRef rec = c . get ( ) ;
2019-05-22 10:16:32 +08:00
std : : string val ;
if ( rec . value . present ( ) ) {
ASSERT ( rec . value . get ( ) . size ( ) = = sizeof ( uint32_t ) ) ;
uint32_t id = * ( const uint32_t * ) rec . value . get ( ) . begin ( ) ;
val = format ( " [Page id=%u] " , id ) ;
rec . value = val ;
}
2019-02-21 18:46:30 +08:00
r + = rec . toString ( ) ;
}
else {
r + = c . get ( ) . toString ( ) ;
2018-07-18 18:19:35 +08:00
}
2019-02-21 18:46:30 +08:00
r + = " \n " ;
2017-06-10 05:56:41 +08:00
2019-05-22 10:16:32 +08:00
ASSERT ( c . get ( ) . key > = lowerBound - > key & & c . get ( ) . key < = upperBound - > key ) ;
2018-08-29 04:46:14 +08:00
} while ( c . moveNext ( ) ) ;
}
2019-04-18 03:57:23 +08:00
} catch ( Error & e ) {
2018-08-29 04:46:14 +08:00
debug_printf ( " BTreePage::toString ERROR: %s \n " , e . what ( ) ) ;
debug_printf ( " BTreePage::toString partial result: %s \n " , r . c_str ( ) ) ;
throw ;
2018-06-08 18:32:34 +08:00
}
2017-06-10 05:56:41 +08:00
2018-06-08 18:32:34 +08:00
return r ;
2017-06-10 05:56:41 +08:00
}
2018-10-19 11:26:45 +08:00
} ;
2018-06-08 18:32:34 +08:00
2019-05-22 10:16:32 +08:00
static void makeEmptyPage ( Reference < IPage > page , uint8_t newFlags , int pageSize ) {
2019-02-21 18:46:30 +08:00
VALGRIND_MAKE_MEM_DEFINED ( page - > begin ( ) , page - > size ( ) ) ;
2018-06-08 18:32:34 +08:00
BTreePage * btpage = ( BTreePage * ) page - > begin ( ) ;
btpage - > flags = newFlags ;
btpage - > kvBytes = 0 ;
btpage - > count = 0 ;
2018-10-25 06:57:06 +08:00
btpage - > extensionPageCount = 0 ;
2019-02-21 18:46:30 +08:00
btpage - > tree ( ) . build ( nullptr , nullptr , nullptr , nullptr ) ;
}
BTreePage : : BinaryTree : : Reader * getReader ( Reference < const IPage > page ) {
return ( BTreePage : : BinaryTree : : Reader * ) page - > userData ;
2018-06-08 18:32:34 +08:00
}
2017-06-10 05:56:41 +08:00
2018-09-19 15:32:39 +08:00
struct BoundaryAndPage {
2019-02-21 18:46:30 +08:00
Standalone < RedwoodRecordRef > lowerBound ;
2018-09-19 15:32:39 +08:00
// Only firstPage or multiPage will be in use at once
Reference < IPage > firstPage ;
std : : vector < Reference < IPage > > extPages ;
2018-08-29 04:46:14 +08:00
} ;
2018-07-10 17:24:01 +08:00
// Returns a std::vector of pairs of lower boundary key indices within kvPairs and encoded pages.
2019-02-21 18:46:30 +08:00
// TODO: Refactor this as an accumulator you add sorted keys to which makes pages.
2018-06-08 18:32:34 +08:00
template < typename Allocator >
2019-02-21 18:46:30 +08:00
static std : : vector < BoundaryAndPage > buildPages ( bool minimalBoundaries , const RedwoodRecordRef & lowerBound , const RedwoodRecordRef & upperBound , std : : vector < RedwoodRecordRef > entries , uint8_t newFlags , Allocator const & newBlockFn , int usableBlockSize ) {
// TODO: Figure out how to do minimal boundaries with RedwoodRecordRef
minimalBoundaries = false ;
2018-09-19 15:32:39 +08:00
// This is how much space for the binary tree exists in the page, after the header
2019-02-21 18:46:30 +08:00
int pageSize = usableBlockSize - BTreePage : : GetHeaderSize ( ) ;
2018-09-19 15:32:39 +08:00
// Each new block adds (usableBlockSize - sizeof(LogicalPageID)) more net usable space *for the binary tree* to pageSize.
int netTreeBlockSize = usableBlockSize - sizeof ( LogicalPageID ) ;
2018-07-15 04:37:52 +08:00
2018-09-19 15:32:39 +08:00
int blockCount = 1 ;
std : : vector < BoundaryAndPage > pages ;
2018-06-08 18:32:34 +08:00
2019-02-21 18:46:30 +08:00
int kvBytes = 0 ;
int compressedBytes = BTreePage : : BinaryTree : : GetTreeOverhead ( ) ;
2018-06-15 08:52:25 +08:00
2018-06-08 18:32:34 +08:00
int start = 0 ;
2018-06-15 08:52:25 +08:00
int i = 0 ;
2018-06-08 18:32:34 +08:00
const int iEnd = entries . size ( ) ;
2018-08-29 04:46:14 +08:00
// Lower bound of the page being added to
2019-05-22 10:16:32 +08:00
RedwoodRecordRef pageLowerBound = lowerBound . withoutValue ( ) ;
2019-02-21 18:46:30 +08:00
RedwoodRecordRef pageUpperBound ;
2018-08-29 04:46:14 +08:00
while ( i < = iEnd ) {
bool end = i = = iEnd ;
bool flush = end ;
// If not the end, add i to the page if necessary
if ( end ) {
2019-05-22 10:16:32 +08:00
pageUpperBound = upperBound . withoutValue ( ) ;
2018-08-29 04:46:14 +08:00
}
else {
2019-02-21 18:46:30 +08:00
// Get delta from previous record
const RedwoodRecordRef & entry = entries [ i ] ;
int deltaSize = entry . deltaSize ( ( i = = start ) ? pageLowerBound : entries [ i - 1 ] ) ;
2018-08-29 04:46:14 +08:00
int keySize = entry . key . size ( ) ;
2019-02-21 18:46:30 +08:00
int valueSize = entry . value . present ( ) ? entry . value . get ( ) . size ( ) : 0 ;
2018-08-29 04:46:14 +08:00
2019-02-21 18:46:30 +08:00
int spaceNeeded = sizeof ( BTreePage : : BinaryTree : : Node ) + deltaSize ;
2018-08-29 04:46:14 +08:00
2019-02-21 18:46:30 +08:00
debug_printf ( " Trying to add record %3d of %3lu (i=%3d) klen %4d vlen %3d deltaSize %4d spaceNeeded %4d compressed %4d / page %4d bytes %s \n " ,
i + 1 , entries . size ( ) , i , keySize , valueSize , deltaSize ,
spaceNeeded , compressedBytes , pageSize , entry . toString ( ) . c_str ( ) ) ;
2018-08-29 04:46:14 +08:00
int spaceAvailable = pageSize - compressedBytes ;
2018-09-19 15:32:39 +08:00
// Does it fit?
bool fits = spaceAvailable > = spaceNeeded ;
2018-08-29 04:46:14 +08:00
2018-09-19 15:32:39 +08:00
// If it doesn't fit, either end the current page or increase the page size
2018-08-29 04:46:14 +08:00
if ( ! fits ) {
2018-09-19 15:32:39 +08:00
// For leaf level where minimal boundaries are used require at least 1 entry, otherwise require 4 to enforce a minimum branching factor
int minimumEntries = minimalBoundaries ? 1 : 4 ;
int count = i - start ;
// If not enough entries or page less than half full, increase page size to make the entry fit
if ( count < minimumEntries | | spaceAvailable > pageSize / 2 ) {
// Figure out how many additional whole or partial blocks are needed
int newBlocks = 1 + ( spaceNeeded - spaceAvailable - 1 ) / netTreeBlockSize ;
2018-09-24 17:42:23 +08:00
int newPageSize = pageSize + ( newBlocks * netTreeBlockSize ) ;
2019-02-21 18:46:30 +08:00
if ( newPageSize < = BTreePage : : BinaryTree : : MaximumTreeSize ( ) ) {
2018-09-24 17:42:23 +08:00
blockCount + = newBlocks ;
pageSize = newPageSize ;
fits = true ;
}
2018-08-29 04:46:14 +08:00
}
2018-09-24 17:42:23 +08:00
if ( ! fits ) {
2018-09-19 15:32:39 +08:00
// Flush page
2018-08-29 04:46:14 +08:00
if ( minimalBoundaries ) {
2019-05-22 10:16:32 +08:00
// TODO: Write minimal boundaries
2018-08-29 04:46:14 +08:00
// Note that prefixLen is guaranteed to be < entry.key.size() because entries are in increasing order and cannot repeat.
2019-02-21 18:46:30 +08:00
// int len = prefixLen + 1;
// if(entry.key[prefixLen] == 0)
// len = std::min(len + 1, entry.key.size());
// pageUpperBound = entry.key.substr(0, len);
2018-08-29 04:46:14 +08:00
}
else {
2019-05-22 10:16:32 +08:00
pageUpperBound = entry . withoutValue ( ) ;
2018-08-29 04:46:14 +08:00
}
}
}
2018-07-15 04:37:52 +08:00
2018-09-19 15:32:39 +08:00
// If the record fits then add it to the page set
if ( fits ) {
2018-08-29 04:46:14 +08:00
kvBytes + = keySize + valueSize ;
2018-09-19 15:32:39 +08:00
compressedBytes + = spaceNeeded ;
2018-08-29 04:46:14 +08:00
+ + i ;
}
2018-07-15 04:37:52 +08:00
2018-08-29 04:46:14 +08:00
flush = ! fits ;
}
2018-07-15 04:37:52 +08:00
2018-08-29 04:46:14 +08:00
// If flush then write a page using records from start to i. It's guaranteed that pageUpperBound has been set above.
if ( flush ) {
end = i = = iEnd ; // i could have been moved above
2019-03-15 15:46:09 +08:00
2018-08-29 04:46:14 +08:00
int count = i - start ;
2019-03-15 15:46:09 +08:00
// If not writing the final page, reduce entry count of page by a third
if ( ! end ) {
i - = count / 3 ;
2019-05-22 10:16:32 +08:00
pageUpperBound = entries [ i ] . withoutValue ( ) ;
2019-03-15 15:46:09 +08:00
}
debug_printf ( " Flushing page start=%d i=%d count=%d \n lower: %s \n upper: %s \n " , start , i , count , pageLowerBound . toString ( ) . c_str ( ) , pageUpperBound . toString ( ) . c_str ( ) ) ;
2019-02-21 18:46:30 +08:00
# if REDWOOD_DEBUG
2018-08-29 04:46:14 +08:00
for ( int j = start ; j < i ; + + j ) {
2019-02-21 18:46:30 +08:00
debug_printf ( " %3d: %s \n " , j , entries [ j ] . toString ( ) . c_str ( ) ) ;
if ( j > start ) {
2019-05-22 10:16:32 +08:00
//ASSERT(entries[j] > entries[j - 1]);
2019-02-21 18:46:30 +08:00
}
2018-08-29 04:46:14 +08:00
}
2019-05-22 10:16:32 +08:00
ASSERT ( pageLowerBound . key < = pageUpperBound . key ) ;
2019-02-21 18:46:30 +08:00
# endif
2018-09-19 15:32:39 +08:00
union {
BTreePage * btPage ;
uint8_t * btPageMem ;
} ;
2019-02-21 18:46:30 +08:00
int allocatedSize ;
2018-09-19 15:32:39 +08:00
if ( blockCount = = 1 ) {
Reference < IPage > page = newBlockFn ( ) ;
2019-02-21 18:46:30 +08:00
VALGRIND_MAKE_MEM_DEFINED ( page - > begin ( ) , page - > size ( ) ) ;
2018-09-19 15:32:39 +08:00
btPageMem = page - > mutate ( ) ;
2019-02-21 18:46:30 +08:00
allocatedSize = page - > size ( ) ;
pages . push_back ( { pageLowerBound , page } ) ;
2018-09-19 15:32:39 +08:00
}
else {
ASSERT ( blockCount > 1 ) ;
2019-02-21 18:46:30 +08:00
allocatedSize = usableBlockSize * blockCount ;
btPageMem = new uint8_t [ allocatedSize ] ;
VALGRIND_MAKE_MEM_DEFINED ( btPageMem , allocatedSize ) ;
2018-09-19 15:32:39 +08:00
}
btPage - > flags = newFlags ;
btPage - > kvBytes = kvBytes ;
btPage - > count = i - start ;
btPage - > extensionPageCount = blockCount - 1 ;
2019-02-21 18:46:30 +08:00
int written = btPage - > tree ( ) . build ( & entries [ start ] , & entries [ i ] , & pageLowerBound , & pageUpperBound ) ;
2018-06-12 16:43:19 +08:00
if ( written > pageSize ) {
2018-09-19 15:32:39 +08:00
fprintf ( stderr , " ERROR: Wrote %d bytes to %d byte page (%d blocks). recs %d kvBytes %d compressed %d \n " , written , pageSize , blockCount , i - start , kvBytes , compressedBytes ) ;
2018-06-12 16:43:19 +08:00
ASSERT ( false ) ;
}
2018-09-19 15:32:39 +08:00
if ( blockCount ! = 1 ) {
Reference < IPage > page = newBlockFn ( ) ;
2019-02-21 18:46:30 +08:00
VALGRIND_MAKE_MEM_DEFINED ( page - > begin ( ) , page - > size ( ) ) ;
2018-09-19 15:32:39 +08:00
const uint8_t * rptr = btPageMem ;
memcpy ( page - > mutate ( ) , rptr , usableBlockSize ) ;
rptr + = usableBlockSize ;
std : : vector < Reference < IPage > > extPages ;
for ( int b = 1 ; b < blockCount ; + + b ) {
Reference < IPage > extPage = newBlockFn ( ) ;
2019-02-21 18:46:30 +08:00
VALGRIND_MAKE_MEM_DEFINED ( page - > begin ( ) , page - > size ( ) ) ;
2018-09-19 15:32:39 +08:00
//debug_printf("block %d write offset %d\n", b, firstBlockSize + (b - 1) * usableBlockSize);
memcpy ( extPage - > mutate ( ) , rptr , usableBlockSize ) ;
rptr + = usableBlockSize ;
extPages . push_back ( std : : move ( extPage ) ) ;
}
pages . push_back ( { std : : move ( pageLowerBound ) , std : : move ( page ) , std : : move ( extPages ) } ) ;
delete btPageMem ;
2018-08-29 04:46:14 +08:00
}
2018-09-19 15:32:39 +08:00
if ( end )
break ;
2018-06-08 18:32:34 +08:00
start = i ;
kvBytes = 0 ;
2019-02-21 18:46:30 +08:00
compressedBytes = BTreePage : : BinaryTree : : GetTreeOverhead ( ) ;
2019-05-22 10:16:32 +08:00
pageLowerBound = pageUpperBound . withoutValue ( ) ;
2017-06-10 05:56:41 +08:00
}
}
2018-06-08 18:32:34 +08:00
//debug_printf("buildPages: returning pages.size %lu, kvpairs %lu\n", pages.size(), kvPairs.size());
return pages ;
}
2017-06-10 05:56:41 +08:00
# define NOT_IMPLEMENTED { UNSTOPPABLE_ASSERT(false); }
class VersionedBTree : public IVersionedStore {
public :
2018-07-15 04:37:52 +08:00
// The first possible internal record possible in the tree
2019-02-21 18:46:30 +08:00
static RedwoodRecordRef dbBegin ;
2018-07-15 04:37:52 +08:00
// A record which is greater than the last possible record in the tree
2019-02-21 18:46:30 +08:00
static RedwoodRecordRef dbEnd ;
2017-06-10 05:56:41 +08:00
2018-12-06 14:41:04 +08:00
struct Counts {
Counts ( ) {
memset ( this , 0 , sizeof ( Counts ) ) ;
}
void clear ( ) {
* this = Counts ( ) ;
}
2019-03-15 15:46:09 +08:00
int64_t pageReads ;
int64_t extPageReads ;
int64_t setBytes ;
2018-12-06 14:41:04 +08:00
int64_t pageWrites ;
2019-03-15 15:46:09 +08:00
int64_t extPageWrites ;
2018-12-06 14:41:04 +08:00
int64_t sets ;
int64_t clears ;
int64_t commits ;
2019-03-15 15:46:09 +08:00
int64_t gets ;
int64_t getRanges ;
int64_t commitToPage ;
int64_t commitToPageStart ;
2018-12-06 14:41:04 +08:00
2019-03-15 15:46:09 +08:00
std : : string toString ( bool clearAfter = false ) {
2019-05-29 21:23:32 +08:00
std : : string s = format ( " set=% " PRId64 " clear=% " PRId64 " get=% " PRId64 " getRange=% " PRId64 " commit=% " PRId64 " pageRead=% " PRId64 " extPageRead=% " PRId64 " pageWrite=% " PRId64 " extPageWrite=% " PRId64 " commitPage=% " PRId64 " commitPageStart=% " PRId64 " " ,
2019-03-15 15:46:09 +08:00
sets , clears , gets , getRanges , commits , pageReads , extPageReads , pageWrites , extPageWrites , commitToPage , commitToPageStart ) ;
if ( clearAfter ) {
clear ( ) ;
}
2018-12-06 14:41:04 +08:00
return s ;
}
} ;
2019-03-15 15:46:09 +08:00
// Using a static for metrics because a single process shouldn't normally have multiple storage engines
static Counts counts ;
2018-12-06 14:41:04 +08:00
2018-10-25 06:57:06 +08:00
// All async opts on the btree are based on pager reads, writes, and commits, so
// we can mostly forward these next few functions to the pager
2018-10-15 18:43:43 +08:00
virtual Future < Void > getError ( ) {
return m_pager - > getError ( ) ;
}
virtual Future < Void > onClosed ( ) {
return m_pager - > onClosed ( ) ;
}
2018-10-25 06:57:06 +08:00
void close_impl ( bool dispose ) {
IPager * pager = m_pager ;
delete this ;
if ( dispose )
pager - > dispose ( ) ;
else
pager - > close ( ) ;
2018-10-15 18:43:43 +08:00
}
2018-10-25 06:57:06 +08:00
virtual void dispose ( ) {
return close_impl ( true ) ;
2018-10-15 18:43:43 +08:00
}
virtual void close ( ) {
2018-10-25 06:57:06 +08:00
return close_impl ( false ) ;
2018-10-15 18:43:43 +08:00
}
2017-06-10 05:56:41 +08:00
virtual KeyValueStoreType getType ( ) NOT_IMPLEMENTED
virtual bool supportsMutation ( int op ) NOT_IMPLEMENTED
2018-10-25 06:57:06 +08:00
virtual StorageBytes getStorageBytes ( ) {
return m_pager - > getStorageBytes ( ) ;
}
2017-06-10 05:56:41 +08:00
// Writes are provided in an ordered stream.
// A write is considered part of (a change leading to) the version determined by the previous call to setWriteVersion()
// A write shall not become durable until the following call to commit() begins, and shall be durable once the following call to commit() returns
virtual void set ( KeyValueRef keyValue ) {
2018-12-06 14:41:04 +08:00
+ + counts . sets ;
2017-08-25 08:25:53 +08:00
SingleKeyMutationsByVersion & changes = insertMutationBoundary ( keyValue . key ) - > second . startKeyMutations ;
2019-04-30 08:00:29 +08:00
if ( singleVersion ) {
if ( changes . empty ( ) ) {
changes [ 0 ] = SingleKeyMutation ( keyValue . value ) ;
}
else {
2019-05-22 10:16:32 +08:00
changes . begin ( ) - > second = SingleKeyMutation ( keyValue . value ) ;
2019-04-30 08:00:29 +08:00
}
}
else {
// Add the set if the changes set is empty or the last entry isn't a set to exactly the same value
if ( changes . empty ( ) | | ! changes . rbegin ( ) - > second . equalToSet ( keyValue . value ) ) {
changes [ m_writeVersion ] = SingleKeyMutation ( keyValue . value ) ;
}
2017-08-26 06:48:32 +08:00
}
2017-08-22 13:29:57 +08:00
}
virtual void clear ( KeyRangeRef range ) {
2018-12-06 14:41:04 +08:00
+ + counts . clears ;
2017-08-25 08:25:53 +08:00
MutationBufferT : : iterator iBegin = insertMutationBoundary ( range . begin ) ;
MutationBufferT : : iterator iEnd = insertMutationBoundary ( range . end ) ;
2019-04-30 08:00:29 +08:00
// In single version mode, clear all pending updates in the affected range
if ( singleVersion ) {
2017-08-25 08:25:53 +08:00
RangeMutation & range = iBegin - > second ;
2019-04-30 08:00:29 +08:00
range . startKeyMutations . clear ( ) ;
2019-05-22 10:16:32 +08:00
range . startKeyMutations [ 0 ] = SingleKeyMutation ( ) ;
2019-04-30 08:00:29 +08:00
range . rangeClearVersion = 0 ;
+ + iBegin ;
m_pBuffer - > erase ( iBegin , iEnd ) ;
}
else {
// For each boundary in the cleared range
while ( iBegin ! = iEnd ) {
RangeMutation & range = iBegin - > second ;
2017-08-25 08:25:53 +08:00
2019-04-30 08:00:29 +08:00
// Set the rangeClearedVersion if not set
if ( ! range . rangeClearVersion . present ( ) )
range . rangeClearVersion = m_writeVersion ;
2017-08-25 08:25:53 +08:00
2019-04-30 08:00:29 +08:00
// Add a clear to the startKeyMutations map if it's empty or the last item is not a clear
if ( range . startKeyMutations . empty ( ) | | ! range . startKeyMutations . rbegin ( ) - > second . isClear ( ) )
range . startKeyMutations [ m_writeVersion ] = SingleKeyMutation ( ) ;
2017-08-25 08:25:53 +08:00
2019-04-30 08:00:29 +08:00
+ + iBegin ;
}
2017-08-25 08:25:53 +08:00
}
2017-06-10 05:56:41 +08:00
}
2017-08-22 13:29:57 +08:00
2017-06-10 05:56:41 +08:00
virtual void mutate ( int op , StringRef param1 , StringRef param2 ) NOT_IMPLEMENTED
// Versions [begin, end) no longer readable
virtual void forgetVersions ( Version begin , Version end ) NOT_IMPLEMENTED
virtual Future < Version > getLatestVersion ( ) {
if ( m_writeVersion ! = invalidVersion )
return m_writeVersion ;
return m_pager - > getLatestVersion ( ) ;
}
2017-09-23 08:18:28 +08:00
Version getWriteVersion ( ) {
return m_writeVersion ;
}
2017-09-21 19:43:49 +08:00
Version getLastCommittedVersion ( ) {
return m_lastCommittedVersion ;
}
2019-04-30 08:00:29 +08:00
VersionedBTree ( IPager * pager , std : : string name , bool singleVersion = false , int target_page_size = - 1 )
2017-08-04 06:07:29 +08:00
: m_pager ( pager ) ,
m_writeVersion ( invalidVersion ) ,
2018-09-19 15:32:39 +08:00
m_usablePageSizeOverride ( pager - > getUsablePageSize ( ) ) ,
2017-09-23 08:18:28 +08:00
m_lastCommittedVersion ( invalidVersion ) ,
m_pBuffer ( nullptr ) ,
2019-04-30 08:00:29 +08:00
m_name ( name ) ,
singleVersion ( singleVersion )
2017-09-06 07:59:31 +08:00
{
2018-09-19 15:32:39 +08:00
if ( target_page_size > 0 & & target_page_size < m_usablePageSizeOverride )
m_usablePageSizeOverride = target_page_size ;
2017-09-23 08:18:28 +08:00
m_init = init_impl ( this ) ;
2017-10-10 04:24:16 +08:00
m_latestCommit = m_init ;
2017-06-10 05:56:41 +08:00
}
2017-09-21 19:43:49 +08:00
ACTOR static Future < Void > init_impl ( VersionedBTree * self ) {
2017-07-15 02:36:49 +08:00
self - > m_root = 0 ;
state Version latest = wait ( self - > m_pager - > getLatestVersion ( ) ) ;
if ( latest = = 0 ) {
2017-08-04 06:07:29 +08:00
+ + latest ;
2017-09-21 08:50:02 +08:00
Reference < IPage > page = self - > m_pager - > newPageBuffer ( ) ;
2019-05-22 10:16:32 +08:00
makeEmptyPage ( page , BTreePage : : IS_LEAF , self - > m_usablePageSizeOverride ) ;
2019-02-21 18:46:30 +08:00
self - > writePage ( self - > m_root , page , latest , & dbBegin , & dbEnd ) ;
2017-08-04 06:07:29 +08:00
self - > m_pager - > setLatestVersion ( latest ) ;
2018-09-20 18:39:55 +08:00
wait ( self - > m_pager - > commit ( ) ) ;
2017-07-15 02:36:49 +08:00
}
2017-08-04 06:07:29 +08:00
self - > m_lastCommittedVersion = latest ;
2017-06-10 05:56:41 +08:00
return Void ( ) ;
}
2017-09-23 08:18:28 +08:00
Future < Void > init ( ) { return m_init ; }
2017-06-10 05:56:41 +08:00
2017-08-22 13:29:57 +08:00
virtual ~ VersionedBTree ( ) {
2018-10-15 18:43:43 +08:00
// This probably shouldn't be called directly (meaning deleting an instance directly) but it should be safe,
// it will cancel init and commit and leave the pager alive but with potentially an incomplete set of
// uncommitted writes so it should not be committed.
2017-10-02 18:32:22 +08:00
m_init . cancel ( ) ;
m_latestCommit . cancel ( ) ;
2017-08-22 13:29:57 +08:00
}
2017-06-10 05:56:41 +08:00
// readAtVersion() may only be called on a version which has previously been passed to setWriteVersion() and never previously passed
// to forgetVersion. The returned results when violating this precondition are unspecified; the store is not required to be able to detect violations.
// The returned read cursor provides a consistent snapshot of the versioned store, corresponding to all the writes done with write versions less
// than or equal to the given version.
// If readAtVersion() is called on the *current* write version, the given read cursor MAY reflect subsequent writes at the same
// write version, OR it may represent a snapshot as of the call to readAtVersion().
virtual Reference < IStoreCursor > readAtVersion ( Version v ) {
// TODO: Use the buffer to return uncommitted data
2017-07-26 07:10:19 +08:00
// For now, only committed versions can be read.
2019-04-30 08:00:29 +08:00
Version recordVersion = singleVersion ? 0 : v ;
2017-07-26 07:10:19 +08:00
ASSERT ( v < = m_lastCommittedVersion ) ;
2019-04-30 08:00:29 +08:00
if ( singleVersion ) {
ASSERT ( v = = m_lastCommittedVersion ) ;
}
return Reference < IStoreCursor > ( new Cursor ( m_pager - > getReadSnapshot ( v ) , m_root , recordVersion , m_usablePageSizeOverride ) ) ;
2017-06-10 05:56:41 +08:00
}
// Must be nondecreasing
virtual void setWriteVersion ( Version v ) {
2017-09-23 08:18:28 +08:00
ASSERT ( v > m_lastCommittedVersion ) ;
// If there was no current mutation buffer, create one in the buffer map and update m_pBuffer
if ( m_pBuffer = = nullptr ) {
// When starting a new mutation buffer its start version must be greater than the last write version
ASSERT ( v > m_writeVersion ) ;
m_pBuffer = & m_mutationBuffers [ v ] ;
// Create range representing the entire keyspace. This reduces edge cases to applying mutations
// because now all existing keys are within some range in the mutation map.
2019-02-21 18:46:30 +08:00
( * m_pBuffer ) [ dbBegin . key ] ;
( * m_pBuffer ) [ dbEnd . key ] ;
2017-09-23 08:18:28 +08:00
}
else {
// It's OK to set the write version to the same version repeatedly so long as m_pBuffer is not null
ASSERT ( v > = m_writeVersion ) ;
}
2017-06-10 05:56:41 +08:00
m_writeVersion = v ;
}
virtual Future < Void > commit ( ) {
2017-09-23 08:18:28 +08:00
if ( m_pBuffer = = nullptr )
return m_latestCommit ;
2017-06-10 05:56:41 +08:00
return commit_impl ( this ) ;
}
2019-04-30 08:00:29 +08:00
bool isSingleVersion ( ) const {
return singleVersion ;
}
2017-06-10 05:56:41 +08:00
private :
2019-02-21 18:46:30 +08:00
void writePage ( LogicalPageID id , Reference < IPage > page , Version ver , const RedwoodRecordRef * pageLowerBound , const RedwoodRecordRef * pageUpperBound ) {
2018-08-29 04:46:14 +08:00
debug_printf ( " writePage(): %s \n " , ( ( const BTreePage * ) page - > begin ( ) ) - > toString ( true , id , ver , pageLowerBound , pageUpperBound ) . c_str ( ) ) ;
2017-06-10 05:56:41 +08:00
m_pager - > writePage ( id , page , ver ) ;
}
LogicalPageID m_root ;
2019-02-21 18:46:30 +08:00
typedef std : : pair < Standalone < RedwoodRecordRef > , LogicalPageID > KeyPagePairT ;
2017-07-05 14:41:48 +08:00
typedef std : : pair < Version , std : : vector < KeyPagePairT > > VersionedKeyToPageSetT ;
typedef std : : vector < VersionedKeyToPageSetT > VersionedChildrenT ;
2017-08-04 15:01:25 +08:00
2019-04-30 08:00:29 +08:00
static std : : string toString ( const KeyPagePairT & c ) {
return format ( " (%s, Page %u) " , c.first.toString(-1).c_str(), c.second) ;
}
static std : : string toString ( const VersionedKeyToPageSetT & c ) {
2019-05-29 21:23:32 +08:00
std : : string r = format ( " Version % " PRId64 " => [ " , c . first ) ;
2019-04-30 08:00:29 +08:00
for ( auto & o : c . second ) {
r + = toString ( o ) + " " ;
}
return r + " ] " ;
}
static std : : string toString ( const VersionedChildrenT & c ) {
std : : string r = " { " ;
for ( auto & o : c ) {
r + = toString ( o ) + " , " ;
}
return r + " } " ;
}
2017-08-25 08:25:53 +08:00
// Represents a change to a single key - set, clear, or atomic op
struct SingleKeyMutation {
// Clear
SingleKeyMutation ( ) : op ( MutationRef : : ClearRange ) { }
// Set
SingleKeyMutation ( Value val ) : op ( MutationRef : : SetValue ) , value ( val ) { }
// Atomic Op
SingleKeyMutation ( MutationRef : : Type op , Value val ) : op ( op ) , value ( val ) { }
2017-08-22 13:29:57 +08:00
MutationRef : : Type op ;
2017-08-25 08:25:53 +08:00
Value value ;
2017-08-04 15:01:25 +08:00
2017-08-22 13:29:57 +08:00
inline bool isClear ( ) const { return op = = MutationRef : : ClearRange ; }
inline bool isSet ( ) const { return op = = MutationRef : : SetValue ; }
2017-08-25 08:25:53 +08:00
inline bool isAtomicOp ( ) const { return ! isSet ( ) & & ! isClear ( ) ; }
inline bool equalToSet ( ValueRef val ) { return isSet ( ) & & value = = val ; }
2019-02-21 18:46:30 +08:00
inline RedwoodRecordRef toRecord ( KeyRef userKey , Version version ) const {
2017-09-06 07:59:31 +08:00
// No point in serializing an atomic op, it needs to be coalesced to a real value.
2017-08-25 08:25:53 +08:00
ASSERT ( ! isAtomicOp ( ) ) ;
if ( isClear ( ) )
2019-02-21 18:46:30 +08:00
return RedwoodRecordRef ( userKey , version ) ;
2017-08-22 13:29:57 +08:00
2019-02-21 18:46:30 +08:00
return RedwoodRecordRef ( userKey , version , value ) ;
2017-08-25 08:25:53 +08:00
}
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
std : : string toString ( ) const {
2017-08-26 06:48:32 +08:00
return format ( " op=%d val='%s' " , op , printable ( value ) . c_str ( ) ) ;
2017-08-22 13:29:57 +08:00
}
2017-08-04 15:01:25 +08:00
} ;
2017-08-25 08:25:53 +08:00
// Represents mutations on a single key and a possible clear to a range that begins
// immediately after that key
typedef std : : map < Version , SingleKeyMutation > SingleKeyMutationsByVersion ;
struct RangeMutation {
// Mutations for exactly the start key
SingleKeyMutationsByVersion startKeyMutations ;
// A clear range version, if cleared, for the range starting immediately AFTER the start key
Optional < Version > rangeClearVersion ;
// Returns true if this RangeMutation doesn't actually mutate anything
bool noChanges ( ) const {
return ! rangeClearVersion . present ( ) & & startKeyMutations . empty ( ) ;
}
std : : string toString ( ) const {
std : : string result ;
result . append ( " rangeClearVersion: " ) ;
if ( rangeClearVersion . present ( ) )
2019-05-29 21:23:32 +08:00
result . append ( format ( " % " PRId64 " " , rangeClearVersion . get ( ) ) ) ;
2017-08-25 08:25:53 +08:00
else
result . append ( " <not present> " ) ;
result . append ( " startKeyMutations: " ) ;
for ( SingleKeyMutationsByVersion : : value_type const & m : startKeyMutations )
2019-05-29 21:23:32 +08:00
result . append ( format ( " [% " PRId64 " => %s] " , m . first , m . second . toString ( ) . c_str ( ) ) ) ;
2017-08-25 08:25:53 +08:00
return result ;
}
} ;
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
typedef std : : map < Key , RangeMutation > MutationBufferT ;
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
/* Mutation Buffer Overview
2017-08-22 13:29:57 +08:00
*
2019-04-30 08:00:29 +08:00
* This structure ' s organization is meant to put pending updates for the btree in an order
* that makes it efficient to query all pending mutations across all pending versions which are
* relevant to a particular subtree of the btree .
*
* At the top level , it is a map of the start of a range being modified to a RangeMutation .
* The end of the range is map key ( which is the next range start in the map ) .
*
2018-07-15 04:37:52 +08:00
* - The buffer starts out with keys ' ' and endKVV . key already populated .
2017-08-25 08:25:53 +08:00
*
* - When a new key is inserted into the buffer map , it is by definition
* splitting an existing range so it should take on the rangeClearVersion of
* the immediately preceding key which is the start of that range
2017-08-22 13:29:57 +08:00
*
* - Keys are inserted into the buffer map for every individual operation ( set / clear / atomic )
* key and for both the start and end of a range clear .
2017-08-25 08:25:53 +08:00
*
2017-08-22 13:29:57 +08:00
* - To apply a single clear , add it to the individual ops only if the last entry is not also a clear .
*
2017-08-25 08:25:53 +08:00
* - To apply a range clear , after inserting the new range boundaries do the following to the start
* boundary and all successive boundaries < end
* - set the range clear version if not already set
* - add a clear to the startKeyMutations if the final entry is not a clear .
2017-08-22 13:29:57 +08:00
*
2017-08-25 08:25:53 +08:00
* - Note that there are actually TWO valid ways to represent
* set c = val1 at version 1
* clear c \ x00 to z at version 2
* with this model . Either
* c = { rangeClearVersion = 2 , startKeyMutations = { 1 = > val1 }
* z = { rangeClearVersion = < not present > , startKeyMutations = { }
* OR
* c = { rangeClearVersion = < not present > , startKeyMutations = { 1 = > val1 }
* c \ x00 = { rangeClearVersion = 2 , startKeyMutations = { 2 = > < not present > }
* z = { rangeClearVersion = < not present > , startKeyMutations = { }
2017-08-22 13:29:57 +08:00
*
2017-08-25 08:25:53 +08:00
* This is because the rangeClearVersion applies to a range begining with the first
* key AFTER the start key , so that the logic for reading the start key is more simple
* as it only involves consulting startKeyMutations . When adding a clear range , the
* boundary key insert / split described above is valid , and is what is currently done ,
* but it would also be valid to see if the last key before startKey is equal to
* keyBefore ( startKey ) , and if so that mutation buffer boundary key can be used instead
* without adding an additional key to the buffer .
2019-04-30 08:00:29 +08:00
* TODO : A possible optimization here could be to only use existing btree leaf page boundaries as keys ,
* with mutation point keys being stored in an unsorted strucutre under those boundary map keys ,
* to be sorted later just before being merged into the existing leaf page .
*/
2017-08-22 13:29:57 +08:00
2018-10-15 18:43:43 +08:00
IPager * m_pager ;
MutationBufferT * m_pBuffer ;
std : : map < Version , MutationBufferT > m_mutationBuffers ;
Version m_writeVersion ;
Version m_lastCommittedVersion ;
Future < Void > m_latestCommit ;
int m_usablePageSizeOverride ;
Future < Void > m_init ;
std : : string m_name ;
2019-04-30 08:00:29 +08:00
bool singleVersion ;
2018-10-15 18:43:43 +08:00
2017-08-28 16:57:01 +08:00
void printMutationBuffer ( MutationBufferT : : const_iterator begin , MutationBufferT : : const_iterator end ) const {
2017-08-25 08:25:53 +08:00
# if REDWOOD_DEBUG
debug_printf ( " ------------------------------------- \n " ) ;
debug_printf ( " BUFFER \n " ) ;
while ( begin ! = end ) {
debug_printf ( " '%s': %s \n " , printable ( begin - > first ) . c_str ( ) , begin - > second . toString ( ) . c_str ( ) ) ;
+ + begin ;
}
debug_printf ( " ------------------------------------- \n " ) ;
# endif
}
2017-08-22 13:29:57 +08:00
2017-09-23 08:18:28 +08:00
void printMutationBuffer ( MutationBufferT * buf ) const {
return printMutationBuffer ( buf - > begin ( ) , buf - > end ( ) ) ;
2017-08-25 08:25:53 +08:00
}
2017-08-22 13:29:57 +08:00
2017-09-23 08:18:28 +08:00
// Find or create a mutation buffer boundary for bound and return an iterator to it
2017-08-25 08:25:53 +08:00
MutationBufferT : : iterator insertMutationBoundary ( Key boundary ) {
2017-09-23 08:18:28 +08:00
ASSERT ( m_pBuffer ! = nullptr ) ;
2017-08-25 08:25:53 +08:00
// Find the first split point in buffer that is >= key
2017-09-23 08:18:28 +08:00
MutationBufferT : : iterator ib = m_pBuffer - > lower_bound ( boundary ) ;
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
// Since the initial state of the mutation buffer contains the range '' through
// the maximum possible key, our search had to have found something.
2017-09-23 08:18:28 +08:00
ASSERT ( ib ! = m_pBuffer - > end ( ) ) ;
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
// If we found the boundary we are looking for, return its iterator
if ( ib - > first = = boundary )
return ib ;
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
// ib is our insert hint. Insert the new boundary and set ib to its entry
2017-09-23 08:18:28 +08:00
ib = m_pBuffer - > insert ( ib , { boundary , RangeMutation ( ) } ) ;
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
// ib is certainly > begin() because it is guaranteed that the empty string
// boundary exists and the only way to have found that is to look explicitly
// for it in which case we would have returned above.
MutationBufferT : : iterator iPrevious = ib ;
2017-08-26 06:48:32 +08:00
- - iPrevious ;
2017-08-28 16:57:01 +08:00
if ( iPrevious - > second . rangeClearVersion . present ( ) ) {
ib - > second . rangeClearVersion = iPrevious - > second . rangeClearVersion ;
ib - > second . startKeyMutations [ iPrevious - > second . rangeClearVersion . get ( ) ] = SingleKeyMutation ( ) ;
}
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
return ib ;
2017-08-22 13:29:57 +08:00
}
2017-07-26 07:10:19 +08:00
2018-09-19 15:32:39 +08:00
void buildNewRoot ( Version version , std : : vector < BoundaryAndPage > & pages , std : : vector < LogicalPageID > & logicalPageIDs , const BTreePage * pPage ) {
2018-08-29 04:46:14 +08:00
//debug_printf("buildNewRoot start %lu\n", pages.size());
2017-07-14 02:32:14 +08:00
// While there are multiple child pages for this version we must write new tree levels.
while ( pages . size ( ) > 1 ) {
2019-02-21 18:46:30 +08:00
std : : vector < RedwoodRecordRef > childEntries ;
for ( int i = 0 ; i < pages . size ( ) ; i + + ) {
RedwoodRecordRef entry = pages [ i ] . lowerBound ;
entry . value = StringRef ( ( unsigned char * ) & logicalPageIDs [ i ] , sizeof ( uint32_t ) ) ;
childEntries . push_back ( entry ) ;
}
2017-07-14 02:32:14 +08:00
2017-07-14 13:11:48 +08:00
int oldPages = pages . size ( ) ;
2019-02-21 18:46:30 +08:00
pages = buildPages ( false , dbBegin , dbEnd , childEntries , 0 , [ = ] ( ) { return m_pager - > newPageBuffer ( ) ; } , m_usablePageSizeOverride ) ;
2017-07-14 02:32:14 +08:00
2019-05-29 21:23:32 +08:00
debug_printf ( " Writing a new root level at version % " PRId64 " with %lu children across %lu pages \n " , version , childEntries . size ( ) , pages . size ( ) ) ;
2017-07-14 02:32:14 +08:00
2019-02-21 18:46:30 +08:00
logicalPageIDs = writePages ( pages , version , m_root , pPage , & dbEnd , nullptr ) ;
2018-09-19 15:32:39 +08:00
}
}
2019-02-21 18:46:30 +08:00
std : : vector < LogicalPageID > writePages ( std : : vector < BoundaryAndPage > pages , Version version , LogicalPageID originalID , const BTreePage * originalPage , const RedwoodRecordRef * upperBound , void * actor_debug ) {
2019-05-29 21:23:32 +08:00
debug_printf ( " %p: writePages(): %u @% " PRId64 " -> %lu replacement pages \n " , actor_debug , originalID , version , pages . size ( ) ) ;
2018-09-19 15:32:39 +08:00
2018-10-02 07:51:57 +08:00
ASSERT ( version ! = 0 | | pages . size ( ) = = 1 ) ;
2018-09-19 15:32:39 +08:00
2018-10-02 07:51:57 +08:00
std : : vector < LogicalPageID > primaryLogicalPageIDs ;
2018-09-19 15:32:39 +08:00
2018-10-02 07:51:57 +08:00
// Reuse original primary page ID if it's not the root or if only one page is being written.
if ( originalID ! = m_root | | pages . size ( ) = = 1 )
primaryLogicalPageIDs . push_back ( originalID ) ;
2017-07-14 02:32:14 +08:00
2018-10-02 07:51:57 +08:00
// Allocate a primary page ID for each page to be written
while ( primaryLogicalPageIDs . size ( ) < pages . size ( ) ) {
primaryLogicalPageIDs . push_back ( m_pager - > allocateLogicalPage ( ) ) ;
}
2018-09-19 15:32:39 +08:00
2019-05-29 21:23:32 +08:00
debug_printf ( " %p: writePages(): Writing %lu replacement pages for %d at version % " PRId64 " \n " , actor_debug , pages . size ( ) , originalID , version ) ;
2018-09-19 15:32:39 +08:00
for ( int i = 0 ; i < pages . size ( ) ; i + + ) {
2018-12-06 14:41:04 +08:00
+ + counts . pageWrites ;
2018-09-19 15:32:39 +08:00
// Allocate page number for main page first
2018-10-02 07:51:57 +08:00
LogicalPageID id = primaryLogicalPageIDs [ i ] ;
2018-09-19 15:32:39 +08:00
// Check for extension pages, if they exist assign IDs for them and write them at version
2018-10-02 07:51:57 +08:00
auto const & extPages = pages [ i ] . extPages ;
// If there are extension pages, write all pages using pager directly because this->writePage() is for whole primary pages
if ( extPages . size ( ) ! = 0 ) {
BTreePage * newPage = ( BTreePage * ) pages [ i ] . firstPage - > mutate ( ) ;
2018-09-19 15:32:39 +08:00
ASSERT ( newPage - > extensionPageCount = = extPages . size ( ) ) ;
for ( int e = 0 , eEnd = extPages . size ( ) ; e < eEnd ; + + e ) {
2018-10-02 07:51:57 +08:00
LogicalPageID eid = m_pager - > allocateLogicalPage ( ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " %p: writePages(): Writing extension page op=write id=%u @% " PRId64 " (%d of %lu) referencePage=%u \n " , actor_debug , eid , version , e + 1 , extPages . size ( ) , id ) ;
2018-09-19 15:32:39 +08:00
newPage - > extensionPages [ e ] = eid ;
// If replacing the primary page below (version == 0) then pass the primary page's ID as the reference page ID
m_pager - > writePage ( eid , extPages [ e ] , version , ( version = = 0 ) ? id : invalidLogicalPageID ) ;
2019-03-15 15:46:09 +08:00
+ + counts . extPageWrites ;
2018-09-19 15:32:39 +08:00
}
2019-05-29 21:23:32 +08:00
debug_printf ( " %p: writePages(): Writing primary page op=write id=%u @% " PRId64 " (+%lu extension pages) \n " , actor_debug , id , version , extPages . size ( ) ) ;
2018-09-19 15:32:39 +08:00
m_pager - > writePage ( id , pages [ i ] . firstPage , version ) ;
}
else {
2019-05-29 21:23:32 +08:00
debug_printf ( " %p: writePages(): Writing normal page op=write id=%u @% " PRId64 " \n " , actor_debug , id , version ) ;
2019-02-21 18:46:30 +08:00
writePage ( id , pages [ i ] . firstPage , version , & pages [ i ] . lowerBound , ( i = = pages . size ( ) - 1 ) ? upperBound : & pages [ i + 1 ] . lowerBound ) ;
2018-09-19 15:32:39 +08:00
}
}
2017-07-14 02:32:14 +08:00
2018-10-02 07:51:57 +08:00
// Free the old extension pages now that all replacement pages have been written
for ( int i = 0 ; i < originalPage - > extensionPageCount ; + + i ) {
//debug_printf("%p: writePages(): Freeing old extension op=del id=%u @latest\n", actor_debug, originalPage->extensionPages[i]);
//m_pager->freeLogicalPage(originalPage->extensionPages[i], version);
2018-09-19 15:32:39 +08:00
}
2017-07-14 02:32:14 +08:00
2018-10-02 07:51:57 +08:00
return primaryLogicalPageIDs ;
2018-09-19 15:32:39 +08:00
}
class SuperPage : public IPage , ReferenceCounted < SuperPage > {
public :
2019-04-10 02:16:45 +08:00
SuperPage ( std : : vector < Reference < const IPage > > pages , int usablePageSize )
: m_size ( pages . size ( ) * usablePageSize ) {
2018-09-19 15:32:39 +08:00
m_data = new uint8_t [ m_size ] ;
uint8_t * wptr = m_data ;
for ( auto & p : pages ) {
memcpy ( wptr , p - > begin ( ) , usablePageSize ) ;
wptr + = usablePageSize ;
}
}
virtual ~ SuperPage ( ) {
delete m_data ;
}
virtual void addref ( ) const {
ReferenceCounted < SuperPage > : : addref ( ) ;
}
virtual void delref ( ) const {
ReferenceCounted < SuperPage > : : delref ( ) ;
}
virtual int size ( ) const {
return m_size ;
}
virtual uint8_t const * begin ( ) const {
return m_data ;
}
virtual uint8_t * mutate ( ) {
return m_data ;
}
private :
uint8_t * m_data ;
2019-04-10 02:16:45 +08:00
const int m_size ;
2018-09-19 15:32:39 +08:00
} ;
2019-02-21 18:46:30 +08:00
ACTOR static Future < Reference < const IPage > > readPage ( Reference < IPagerSnapshot > snapshot , LogicalPageID id , int usablePageSize , const RedwoodRecordRef * lowerBound , const RedwoodRecordRef * upperBound ) {
2019-05-29 21:23:32 +08:00
debug_printf ( " readPage() op=read id=%u @% " PRId64 " lower=%s upper=%s \n " , id , snapshot - > getVersion ( ) , lowerBound - > toString ( ) . c_str ( ) , upperBound - > toString ( ) . c_str ( ) ) ;
2019-03-15 15:46:09 +08:00
wait ( delay ( 0 , TaskDiskRead ) ) ;
2018-09-19 15:32:39 +08:00
2019-02-21 18:46:30 +08:00
state Reference < const IPage > result = wait ( snapshot - > getPhysicalPage ( id ) ) ;
2019-03-15 15:46:09 +08:00
+ + counts . pageReads ;
2019-02-21 18:46:30 +08:00
state const BTreePage * pTreePage = ( const BTreePage * ) result - > begin ( ) ;
2018-09-19 15:32:39 +08:00
if ( pTreePage - > extensionPageCount = = 0 ) {
2019-05-29 21:23:32 +08:00
debug_printf ( " readPage() Found normal page for op=read id=%u @% " PRId64 " \n " , id , snapshot - > getVersion ( ) ) ;
2018-09-19 15:32:39 +08:00
}
2019-02-21 18:46:30 +08:00
else {
std : : vector < Future < Reference < const IPage > > > pageGets ;
pageGets . push_back ( std : : move ( result ) ) ;
2018-09-19 15:32:39 +08:00
2019-02-21 18:46:30 +08:00
for ( int i = 0 ; i < pTreePage - > extensionPageCount ; + + i ) {
2019-05-29 21:23:32 +08:00
debug_printf ( " readPage() Reading extension page op=read id=%u @% " PRId64 " ext=%d/%d \n " , pTreePage - > extensionPages [ i ] , snapshot - > getVersion ( ) , i + 1 , ( int ) pTreePage - > extensionPageCount ) ;
2019-02-21 18:46:30 +08:00
pageGets . push_back ( snapshot - > getPhysicalPage ( pTreePage - > extensionPages [ i ] ) ) ;
}
2018-09-19 15:32:39 +08:00
2019-02-21 18:46:30 +08:00
std : : vector < Reference < const IPage > > pages = wait ( getAll ( pageGets ) ) ;
2019-03-15 15:46:09 +08:00
counts . extPageReads + = pTreePage - > extensionPageCount ;
2019-02-21 18:46:30 +08:00
result = Reference < const IPage > ( new SuperPage ( pages , usablePageSize ) ) ;
pTreePage = ( const BTreePage * ) result - > begin ( ) ;
2017-07-14 02:32:14 +08:00
}
2018-09-19 15:32:39 +08:00
2019-02-21 18:46:30 +08:00
if ( result - > userData = = nullptr ) {
2019-05-29 21:23:32 +08:00
debug_printf ( " readPage() Creating Reader for page id=%u @% " PRId64 " lower=%s upper=%s \n " , id , snapshot - > getVersion ( ) , lowerBound - > toString ( ) . c_str ( ) , upperBound - > toString ( ) . c_str ( ) ) ;
2019-02-21 18:46:30 +08:00
result - > userData = new BTreePage : : BinaryTree : : Reader ( & pTreePage - > tree ( ) , lowerBound , upperBound ) ;
result - > userDataDestructor = [ ] ( void * ptr ) { delete ( BTreePage : : BinaryTree : : Reader * ) ptr ; } ;
}
2018-09-19 15:32:39 +08:00
2019-02-21 18:46:30 +08:00
debug_printf ( " readPage() %s \n " , pTreePage - > toString ( false , id , snapshot - > getVersion ( ) , lowerBound , upperBound ) . c_str ( ) ) ;
// Nothing should attempt to read bytes in the page outside the BTreePage structure
VALGRIND_MAKE_MEM_UNDEFINED ( result - > begin ( ) + pTreePage - > size ( ) , result - > size ( ) - pTreePage - > size ( ) ) ;
return result ;
2017-07-14 02:32:14 +08:00
}
2017-07-05 14:41:48 +08:00
// Returns list of (version, list of (lower_bound, list of children) )
2019-02-21 18:46:30 +08:00
// TODO: Probably should pass prev/next records by pointer in many places
2019-05-22 10:16:32 +08:00
ACTOR static Future < VersionedChildrenT > commitSubtree ( VersionedBTree * self , MutationBufferT * mutationBuffer , Reference < IPagerSnapshot > snapshot , LogicalPageID root , const RedwoodRecordRef * lowerBound , const RedwoodRecordRef * upperBound , const RedwoodRecordRef * decodeLowerBound , const RedwoodRecordRef * decodeUpperBound ) {
debug_printf ( " %p commitSubtree: root=%d lower=%s upper=%s \n " , THIS , root , lowerBound - > toString ( ) . c_str ( ) , upperBound - > toString ( ) . c_str ( ) ) ;
debug_printf ( " %p commitSubtree: root=%d decodeLower=%s decodeUpper=%s \n " , THIS , root , decodeLowerBound - > toString ( ) . c_str ( ) , decodeUpperBound - > toString ( ) . c_str ( ) ) ;
2019-03-15 15:46:09 +08:00
self - > counts . commitToPageStart + + ;
2017-08-25 08:25:53 +08:00
2019-05-22 10:16:32 +08:00
// If a boundary changed, the page must be rewritten regardless of KV mutations
state bool boundaryChanged = ( lowerBound ! = decodeLowerBound ) | | ( upperBound ! = decodeUpperBound ) ;
debug_printf ( " %p id=%u boundaryChanged=%d \n " , THIS , root , boundaryChanged ) ;
2017-06-10 05:56:41 +08:00
2019-03-15 15:46:09 +08:00
// Find the slice of the mutation buffer that is relevant to this subtree
// TODO: Rather than two lower_bound searches, perhaps just compare each mutation to the upperBound key while iterating
2019-05-22 10:16:32 +08:00
state MutationBufferT : : const_iterator iMutationBoundary = mutationBuffer - > upper_bound ( lowerBound - > key ) ;
- - iMutationBoundary ;
2019-03-15 15:46:09 +08:00
state MutationBufferT : : const_iterator iMutationBoundaryEnd = mutationBuffer - > lower_bound ( upperBound - > key ) ;
2019-05-22 10:16:32 +08:00
if ( REDWOOD_DEBUG ) {
self - > printMutationBuffer ( iMutationBoundary , iMutationBoundaryEnd ) ;
2017-08-28 21:28:49 +08:00
}
2019-05-22 10:16:32 +08:00
// If the boundary range iterators are the same then upperbound and lowerbound have the same key.
// If the key is being mutated, them remove this subtree.
if ( iMutationBoundary = = iMutationBoundaryEnd ) {
if ( ! iMutationBoundary - > second . startKeyMutations . empty ( ) ) {
VersionedChildrenT c ;
debug_printf ( " %p id=%u lower and upper bound key/version match and key is modified so deleting page, returning %s \n " , THIS , root , toString ( c ) . c_str ( ) ) ;
return c ;
}
// If there are no forced boundary changes then this subtree is unchanged.
if ( ! boundaryChanged ) {
2019-04-30 08:00:29 +08:00
VersionedChildrenT c ( { { 0 , { { * lowerBound , root } } } } ) ;
2019-05-22 10:16:32 +08:00
debug_printf ( " %p id=%d page contains a single key '%s' which is not changing, returning %s \n " , THIS , root , lowerBound - > key . toString ( ) . c_str ( ) , toString ( c ) . c_str ( ) ) ;
2019-04-30 08:00:29 +08:00
return c ;
2017-08-25 08:25:53 +08:00
}
}
// Another way to have no mutations is to have a single mutation range cover this
// subtree but have no changes in it
MutationBufferT : : const_iterator iMutationBoundaryNext = iMutationBoundary ;
+ + iMutationBoundaryNext ;
2019-05-22 10:16:32 +08:00
if ( ! boundaryChanged & & iMutationBoundaryNext = = iMutationBoundaryEnd & &
2019-03-15 15:46:09 +08:00
( iMutationBoundary - > second . noChanges ( ) | |
( ! iMutationBoundary - > second . rangeClearVersion . present ( ) & &
iMutationBoundary - > first < lowerBound - > key )
)
) {
2019-04-30 08:00:29 +08:00
VersionedChildrenT c ( { { 0 , { { * lowerBound , root } } } } ) ;
debug_printf ( " %p no changes because sole mutation range was not cleared, returning %s \n " , THIS , toString ( c ) . c_str ( ) ) ;
return c ;
2017-08-25 08:25:53 +08:00
}
2019-03-15 15:46:09 +08:00
self - > counts . commitToPage + + ;
2019-05-22 10:16:32 +08:00
state Reference < const IPage > rawPage = wait ( readPage ( snapshot , root , self - > m_usablePageSizeOverride , decodeLowerBound , decodeUpperBound ) ) ;
2018-09-19 15:32:39 +08:00
state BTreePage * page = ( BTreePage * ) rawPage - > begin ( ) ;
2019-05-22 10:16:32 +08:00
debug_printf ( " %p commitSubtree(): %s \n " , THIS , page - > toString ( false , root , snapshot - > getVersion ( ) , decodeLowerBound , decodeUpperBound ) . c_str ( ) ) ;
2018-06-08 18:32:34 +08:00
2019-02-21 18:46:30 +08:00
BTreePage : : BinaryTree : : Cursor cursor = getReader ( rawPage ) - > getCursor ( ) ;
cursor . moveFirst ( ) ;
2017-06-10 05:56:41 +08:00
2018-08-29 04:46:14 +08:00
// Leaf Page
2018-06-08 18:32:34 +08:00
if ( page - > flags & BTreePage : : IS_LEAF ) {
2017-06-10 05:56:41 +08:00
VersionedChildrenT results ;
2019-02-21 18:46:30 +08:00
std : : vector < RedwoodRecordRef > merged ;
2017-06-10 05:56:41 +08:00
2019-05-22 10:16:32 +08:00
debug_printf ( " %p id=%u MERGING EXISTING DATA WITH MUTATIONS: \n " , THIS , root ) ;
2019-03-15 15:46:09 +08:00
if ( REDWOOD_DEBUG ) {
self - > printMutationBuffer ( iMutationBoundary , iMutationBoundaryEnd ) ;
}
2017-08-26 06:48:32 +08:00
2017-08-22 13:29:57 +08:00
// It's a given that the mutation map is not empty so it's safe to do this
2017-08-25 08:25:53 +08:00
Key mutationRangeStart = iMutationBoundary - > first ;
2017-08-22 13:29:57 +08:00
// If replacement pages are written they will be at the minimum version seen in the mutations for this leaf
2017-08-28 16:57:01 +08:00
Version minVersion = invalidVersion ;
2019-03-15 15:46:09 +08:00
int changes = 0 ;
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
// Now, process each mutation range and merge changes with existing data.
while ( iMutationBoundary ! = iMutationBoundaryEnd ) {
2019-03-28 04:42:49 +08:00
debug_printf ( " %p New mutation boundary: '%s': %s \n " , THIS , printable ( iMutationBoundary - > first ) . c_str ( ) , iMutationBoundary - > second . toString ( ) . c_str ( ) ) ;
2017-08-23 02:30:44 +08:00
2017-08-25 08:25:53 +08:00
SingleKeyMutationsByVersion : : const_iterator iMutations ;
2017-08-22 13:29:57 +08:00
2017-08-28 21:28:49 +08:00
// If the mutation boundary key is less than the lower bound key then skip startKeyMutations for
// this bounary, we're only processing this mutation range here to apply any clears to existing data.
2019-04-30 08:00:29 +08:00
if ( iMutationBoundary - > first < lowerBound - > key ) {
2017-08-28 21:28:49 +08:00
iMutations = iMutationBoundary - > second . startKeyMutations . end ( ) ;
2019-04-30 08:00:29 +08:00
}
2017-08-25 08:25:53 +08:00
// If the mutation boundary key is the same as the page lowerBound key then start reading single
2019-02-21 18:46:30 +08:00
// key mutations at the first version greater than the lowerBound key's version.
2019-04-30 08:00:29 +08:00
else if ( ! self - > singleVersion & & iMutationBoundary - > first = = lowerBound - > key ) {
2019-02-21 18:46:30 +08:00
iMutations = iMutationBoundary - > second . startKeyMutations . upper_bound ( lowerBound - > version ) ;
2019-04-30 08:00:29 +08:00
}
else {
2017-08-25 08:25:53 +08:00
iMutations = iMutationBoundary - > second . startKeyMutations . begin ( ) ;
2019-04-30 08:00:29 +08:00
}
2017-08-22 13:29:57 +08:00
2017-08-25 08:25:53 +08:00
SingleKeyMutationsByVersion : : const_iterator iMutationsEnd = iMutationBoundary - > second . startKeyMutations . end ( ) ;
2019-04-30 08:00:29 +08:00
// Iterate over old versions of the mutation boundary key, outputting if necessary
2019-02-21 18:46:30 +08:00
while ( cursor . valid ( ) & & cursor . get ( ) . key = = iMutationBoundary - > first ) {
2019-04-30 08:00:29 +08:00
// If not in single version mode or there were no changes to the key
if ( ! self - > singleVersion | | iMutationBoundary - > second . noChanges ( ) ) {
merged . push_back ( cursor . get ( ) ) ;
debug_printf ( " %p: Added %s [existing, boundary start] \n " , THIS , merged . back ( ) . toString ( ) . c_str ( ) ) ;
}
else {
ASSERT ( self - > singleVersion ) ;
debug_printf ( " %p: Skipped %s [existing, boundary start, singleVersion mode] \n " , THIS , cursor . get ( ) . toString ( ) . c_str ( ) ) ;
minVersion = 0 ;
}
2019-02-21 18:46:30 +08:00
cursor . moveNext ( ) ;
2017-08-25 08:25:53 +08:00
}
2017-06-10 05:56:41 +08:00
2018-07-18 18:19:35 +08:00
// TODO: If a mutation set is equal to the previous existing value of the key, maybe don't write it.
2017-08-25 08:25:53 +08:00
// Output mutations for the mutation boundary start key
2017-08-22 13:29:57 +08:00
while ( iMutations ! = iMutationsEnd ) {
2017-09-06 07:59:31 +08:00
const SingleKeyMutation & m = iMutations - > second ;
2018-09-19 15:32:39 +08:00
int maxPartSize = std : : min ( 255 , self - > m_usablePageSizeOverride / 5 ) ;
2017-09-20 04:03:30 +08:00
if ( m . isClear ( ) | | m . value . size ( ) < = maxPartSize ) {
2017-09-16 08:27:13 +08:00
if ( iMutations - > first < minVersion | | minVersion = = invalidVersion )
minVersion = iMutations - > first ;
2019-03-15 15:46:09 +08:00
+ + changes ;
2019-05-22 10:16:32 +08:00
merged . push_back ( m . toRecord ( iMutationBoundary - > first , iMutations - > first ) ) ;
2019-03-28 04:42:49 +08:00
debug_printf ( " %p: Added non-split %s [mutation, boundary start] \n " , THIS , merged . back ( ) . toString ( ) . c_str ( ) ) ;
2017-09-06 07:59:31 +08:00
}
else {
2018-07-23 18:09:13 +08:00
if ( iMutations - > first < minVersion | | minVersion = = invalidVersion )
minVersion = iMutations - > first ;
2019-03-15 15:46:09 +08:00
+ + changes ;
2017-09-06 07:59:31 +08:00
int bytesLeft = m . value . size ( ) ;
2018-07-23 18:09:13 +08:00
int start = 0 ;
2019-02-21 18:46:30 +08:00
RedwoodRecordRef whole ( iMutationBoundary - > first , iMutations - > first , m . value ) ;
2017-09-06 07:59:31 +08:00
while ( bytesLeft > 0 ) {
int partSize = std : : min ( bytesLeft , maxPartSize ) ;
2018-07-23 18:09:13 +08:00
// Don't copy the value chunk because this page will stay in memory until after we've built new version(s) of it
2019-02-21 18:46:30 +08:00
merged . push_back ( whole . split ( start , partSize ) ) ;
2018-07-25 17:29:17 +08:00
bytesLeft - = partSize ;
start + = partSize ;
2019-03-28 04:42:49 +08:00
debug_printf ( " %p: Added split %s [mutation, boundary start] \n " , THIS , merged . back ( ) . toString ( ) . c_str ( ) ) ;
2017-09-06 07:59:31 +08:00
}
}
2017-08-22 13:29:57 +08:00
+ + iMutations ;
}
2017-06-10 05:56:41 +08:00
2017-08-25 08:25:53 +08:00
// Get the clear version for this range, which is the last thing that we need from it,
Optional < Version > clearRangeVersion = iMutationBoundary - > second . rangeClearVersion ;
// Advance to the next boundary because we need to know the end key for the current range.
+ + iMutationBoundary ;
2019-03-28 04:42:49 +08:00
debug_printf ( " %p Mutation range end: '%s' \n " , THIS , printable ( iMutationBoundary - > first ) . c_str ( ) ) ;
2017-08-29 08:26:53 +08:00
2017-08-25 08:25:53 +08:00
// Write existing keys which are less than the next mutation boundary key, clearing if needed.
2019-02-21 18:46:30 +08:00
while ( cursor . valid ( ) & & cursor . get ( ) . key < iMutationBoundary - > first ) {
2019-04-30 08:00:29 +08:00
// TODO: Remove old versions that are too old
2019-02-21 18:46:30 +08:00
2019-04-30 08:00:29 +08:00
bool remove = self - > singleVersion & & clearRangeVersion . present ( ) ;
if ( ! remove ) {
merged . push_back ( cursor . get ( ) ) ;
debug_printf ( " %p: Added %s [existing, middle] \n " , THIS , merged . back ( ) . toString ( ) . c_str ( ) ) ;
}
else {
ASSERT ( self - > singleVersion ) ;
debug_printf ( " %p: Skipped %s [existing, boundary start, singleVersion mode] \n " , THIS , cursor . get ( ) . toString ( ) . c_str ( ) ) ;
2017-08-28 16:57:01 +08:00
Version clearVersion = clearRangeVersion . get ( ) ;
if ( clearVersion < minVersion | | minVersion = = invalidVersion )
minVersion = clearVersion ;
2017-08-22 13:29:57 +08:00
}
2019-04-30 08:00:29 +08:00
// If keeping version history, write clears for records that exist in this range if the range was cleared
if ( ! self - > singleVersion ) {
// Write a clear of this key if needed. A clear is required if clearRangeVersion is set and the next cursor
// key is different than the current one. If the last cursor key in the page is different from the
// first key in the right sibling page then the page's upper bound will reflect that.
auto nextCursor = cursor ;
nextCursor . moveNext ( ) ;
if ( clearRangeVersion . present ( ) & & cursor . get ( ) . key ! = nextCursor . getOrUpperBound ( ) . key ) {
Version clearVersion = clearRangeVersion . get ( ) ;
if ( clearVersion < minVersion | | minVersion = = invalidVersion )
minVersion = clearVersion ;
+ + changes ;
merged . push_back ( RedwoodRecordRef ( cursor . get ( ) . key , clearVersion ) ) ;
debug_printf ( " %p: Added %s [existing, middle clear] \n " , THIS , merged . back ( ) . toString ( ) . c_str ( ) ) ;
}
cursor = nextCursor ;
}
else {
cursor . moveNext ( ) ;
}
2017-08-22 13:29:57 +08:00
}
2017-08-26 06:48:32 +08:00
}
2017-06-10 05:56:41 +08:00
2017-08-26 06:48:32 +08:00
// Write any remaining existing keys, which are not subject to clears as they are beyond the cleared range.
2019-02-21 18:46:30 +08:00
while ( cursor . valid ( ) ) {
merged . push_back ( cursor . get ( ) ) ;
2019-03-28 04:42:49 +08:00
debug_printf ( " %p: Added %s [existing, tail] \n " , THIS , merged . back ( ) . toString ( ) . c_str ( ) ) ;
2019-02-21 18:46:30 +08:00
cursor . moveNext ( ) ;
2017-06-10 05:56:41 +08:00
}
2017-08-25 08:25:53 +08:00
2019-03-28 04:42:49 +08:00
debug_printf ( " %p Done merging mutations into existing leaf contents, made %d changes \n " , THIS , changes ) ;
2017-08-28 18:53:29 +08:00
2019-03-15 15:46:09 +08:00
// No changes were actually made. This could happen if the only mutations are clear ranges which do not match any records.
2019-05-22 10:16:32 +08:00
if ( ! boundaryChanged & & minVersion = = invalidVersion ) {
2019-04-30 08:00:29 +08:00
VersionedChildrenT c ( { { 0 , { { * lowerBound , root } } } } ) ;
debug_printf ( " %p No changes were made during mutation merge, returning %s \n " , THIS , toString ( c ) . c_str ( ) ) ;
2019-03-15 15:46:09 +08:00
ASSERT ( changes = = 0 ) ;
2019-04-30 08:00:29 +08:00
return c ;
2017-08-28 18:53:29 +08:00
}
2017-08-28 16:57:01 +08:00
2018-09-19 15:32:39 +08:00
// TODO: Make version and key splits based on contents of merged list, if keeping history
2017-06-10 05:56:41 +08:00
2019-04-30 08:00:29 +08:00
// If everything in the page was deleted then this page should be deleted as of the new version
// Note that if a single range clear covered the entire page then we should not get this far
2019-05-22 10:16:32 +08:00
if ( merged . empty ( ) & & root ! = 0 ) {
2019-04-30 08:00:29 +08:00
// TODO: For multi version mode only delete this page as of the new version
VersionedChildrenT c ( { } ) ;
2019-05-22 10:16:32 +08:00
debug_printf ( " %p id=%u All leaf page contents were cleared, returning %s \n " , THIS , root , toString ( c ) . c_str ( ) ) ;
2019-04-30 08:00:29 +08:00
return c ;
}
2017-06-10 05:56:41 +08:00
IPager * pager = self - > m_pager ;
2019-02-21 18:46:30 +08:00
std : : vector < BoundaryAndPage > pages = buildPages ( true , * lowerBound , * upperBound , merged , BTreePage : : IS_LEAF , [ pager ] ( ) { return pager - > newPageBuffer ( ) ; } , self - > m_usablePageSizeOverride ) ;
2017-06-10 05:56:41 +08:00
2019-04-30 08:00:29 +08:00
if ( ! self - > singleVersion ) {
// If there isn't still just a single page of data then this page became too large and was split.
// The new split pages will be valid as of minVersion, but the old page remains valid at the old version
if ( pages . size ( ) ! = 1 ) {
results . push_back ( { 0 , { { * lowerBound , root } } } ) ;
2019-05-22 10:16:32 +08:00
debug_printf ( " %p Added versioned child set #1: %s \n " , THIS , toString ( results . back ( ) ) . c_str ( ) ) ;
2019-04-30 08:00:29 +08:00
}
else {
// The page was updated but not size-split or version-split so the last page version's data
// can be replaced with the new page contents
if ( pages . size ( ) = = 1 )
minVersion = 0 ;
}
2017-07-05 14:41:48 +08:00
}
2017-06-10 05:56:41 +08:00
2018-09-19 15:32:39 +08:00
// Write page(s), get new page IDs
2019-04-30 08:00:29 +08:00
Version writeVersion = self - > singleVersion ? self - > getLastCommittedVersion ( ) + 1 : minVersion ;
std : : vector < LogicalPageID > newPageIDs = self - > writePages ( pages , writeVersion , root , page , upperBound , THIS ) ;
2017-06-10 05:56:41 +08:00
2017-07-14 02:32:14 +08:00
// If this commitSubtree() is operating on the root, write new levels if needed until until we're returning a single page
2018-09-19 15:32:39 +08:00
if ( root = = self - > m_root & & pages . size ( ) > 1 ) {
2019-03-28 04:42:49 +08:00
debug_printf ( " %p Building new root \n " , THIS ) ;
2019-04-30 08:00:29 +08:00
self - > buildNewRoot ( writeVersion , pages , newPageIDs , page ) ;
2017-08-28 21:28:49 +08:00
}
2017-07-14 02:32:14 +08:00
2019-04-30 08:00:29 +08:00
results . push_back ( { writeVersion , { } } ) ;
2017-06-10 05:56:41 +08:00
for ( int i = 0 ; i < pages . size ( ) ; i + + ) {
2017-08-28 16:57:01 +08:00
// The lower bound of the first page is the lower bound of the subtree, not the first entry in the page
2019-02-21 18:46:30 +08:00
const RedwoodRecordRef & lower = ( i = = 0 ) ? * lowerBound : pages [ i ] . lowerBound ;
2019-05-22 10:16:32 +08:00
debug_printf ( " %p Adding page to results: %s => Page %d \n " , THIS , lower . toString ( ) . c_str ( ) , newPageIDs [ i ] ) ;
2019-02-21 18:46:30 +08:00
results . back ( ) . second . push_back ( { lower , newPageIDs [ i ] } ) ;
2017-06-10 05:56:41 +08:00
}
2019-05-22 10:16:32 +08:00
debug_printf ( " %p Added versioned child set #2: %s \n " , THIS , toString ( results . back ( ) ) . c_str ( ) ) ;
2017-06-10 05:56:41 +08:00
2019-03-28 04:42:49 +08:00
debug_printf ( " %p DONE. \n " , THIS ) ;
2017-06-10 05:56:41 +08:00
return results ;
}
else {
2018-08-29 04:46:14 +08:00
// Internal Page
2019-05-22 10:16:32 +08:00
// TODO: Combine these into one vector and/or do something more elegant
2018-06-08 18:32:34 +08:00
state std : : vector < Future < VersionedChildrenT > > futureChildren ;
state std : : vector < LogicalPageID > childPageIDs ;
2019-05-22 10:16:32 +08:00
state std : : vector < const RedwoodRecordRef * > lowerBoundaries ;
state std : : vector < const RedwoodRecordRef * > decodeLowerBoundaries ;
// Track whether or not any child has had its boundaries forcibly changed
state bool childBoundariesChanged = false ;
2018-06-08 18:32:34 +08:00
2018-09-19 15:32:39 +08:00
// TODO: Make this much more efficient with a skip-merge through the two sorted sets (mutations, existing cursor)
2018-06-15 08:52:25 +08:00
bool first = true ;
2019-02-21 18:46:30 +08:00
while ( cursor . valid ( ) ) {
// The lower bound for the first child is the lowerBound arg
const RedwoodRecordRef & childLowerBound = first ? * lowerBound : cursor . get ( ) ;
2019-05-22 10:16:32 +08:00
first = false ;
// Skip over any children that do not link to a page. They exist to preserve the ancestors from
// which adjacent children can borrow prefix bytes.
// If there are any, then the first valid child page will incur a boundary change to move
// its lower bound to the left so we can delete the non-linking entry from this page to free up space.
while ( ! cursor . get ( ) . value . present ( ) ) {
// There should be an internal page written that has no valid child pages. This loop will find
// the first valid child link, and if there are no more then execution will not return to this loop.
ASSERT ( cursor . moveNext ( ) ) ;
childBoundariesChanged = true ;
}
2018-06-08 18:32:34 +08:00
2019-05-22 10:16:32 +08:00
ASSERT ( cursor . valid ( ) ) ;
const RedwoodRecordRef & decodeChildLowerBound = cursor . get ( ) ;
const Optional < ValueRef > & childValue = cursor . get ( ) . value ;
uint32_t pageID = * ( uint32_t * ) childValue . get ( ) . begin ( ) ;
2018-08-29 04:46:14 +08:00
ASSERT ( pageID ! = 0 ) ;
2018-06-08 18:32:34 +08:00
2019-05-22 10:16:32 +08:00
const RedwoodRecordRef & decodeChildUpperBound = cursor . moveNext ( ) ? cursor . get ( ) : * upperBound ;
// Skip over any next-children which do not actually link to child pages
while ( cursor . valid ( ) & & ! cursor . get ( ) . value . present ( ) ) {
cursor . moveNext ( ) ;
childBoundariesChanged = true ;
}
const RedwoodRecordRef & childUpperBound = cursor . valid ( ) ? cursor . get ( ) : * upperBound ;
2017-08-28 16:57:01 +08:00
2019-05-22 10:16:32 +08:00
debug_printf ( " %p internal page id=%d child page id=%u lower=%s upper=%s decodeLower=%s decodeUpper=%s \n " ,
THIS , root , pageID , childLowerBound . toString ( ) . c_str ( ) , childUpperBound . toString ( ) . c_str ( ) , decodeChildLowerBound . toString ( ) . c_str ( ) , decodeChildUpperBound . toString ( ) . c_str ( ) ) ;
2017-08-28 18:53:29 +08:00
2019-04-30 08:00:29 +08:00
/*
// TODO: If lower bound and upper bound have the same key, do something intelligent if possible
//
if ( childLowerBound . key = = childUpperBound . key ) {
if ( key is modified or cleared ) {
if ( self - > singleVersion ) {
// In single version mode, don't keep any records with the old key if the key is modified, so return
// an empty page set to replace the child page
futureChildren . push_back ( VersionedChildrenT ( { { 0 , { } } } ) ) ;
}
else {
// In versioned mode, there is no need to recurse to this page because new versions of key
// will go in the right most page that has the same lowerBound key, but since the key is
// being changed the new version of this page should exclude the old subtree
}
else {
// Return the child page as-is, no need to visit it
futureChildren . push_back ( VersionedChildrenT ( { { 0 , { { childLowerBound , pageID } } } } ) ) ;
}
}
else {
// No changes
futureChildren . push_back ( VersionedChildrenT ( { { 0 , { { childLowerBound , pageID } } } } ) ) ;
}
}
else {
futureChildren . push_back ( self - > commitSubtree ( self , mutationBuffer , snapshot , pageID , & childLowerBound , & childUpperBound ) ) ;
}
*/
2019-05-22 10:16:32 +08:00
futureChildren . push_back ( self - > commitSubtree ( self , mutationBuffer , snapshot , pageID , & childLowerBound , & childUpperBound , & decodeChildLowerBound , & decodeChildUpperBound ) ) ;
2018-06-08 18:32:34 +08:00
childPageIDs . push_back ( pageID ) ;
2019-05-22 10:16:32 +08:00
lowerBoundaries . push_back ( & childLowerBound ) ;
decodeLowerBoundaries . push_back ( & decodeChildLowerBound ) ;
2017-06-10 05:56:41 +08:00
}
2019-03-15 15:46:09 +08:00
// Waiting one at a time makes debugging easier
// TODO: Is it better to use waitForAll()?
2019-02-21 18:46:30 +08:00
state int k ;
for ( k = 0 ; k < futureChildren . size ( ) ; + + k ) {
wait ( success ( futureChildren [ k ] ) ) ;
}
2017-06-10 05:56:41 +08:00
2019-05-22 10:16:32 +08:00
// Were any children modified?
2017-07-05 14:41:48 +08:00
bool modified = false ;
2019-05-22 10:16:32 +08:00
// Were all children deleted?
bool all_deleted = true ;
2019-04-30 08:00:29 +08:00
if ( REDWOOD_DEBUG ) {
debug_printf ( " %p Subtree update results for id=%d \n " , THIS , root ) ;
for ( int i = 0 ; i < futureChildren . size ( ) ; + + i ) {
const VersionedChildrenT & children = futureChildren [ i ] . get ( ) ;
2019-05-22 10:16:32 +08:00
debug_printf ( " %p subtree for child page id=%u lowerBound=%s: %s \n " , THIS , childPageIDs [ i ] , lowerBoundaries [ i ] - > toString ( 50 ) . c_str ( ) , toString ( children ) . c_str ( ) ) ;
2019-04-30 08:00:29 +08:00
}
}
2018-06-08 18:32:34 +08:00
for ( int i = 0 ; i < futureChildren . size ( ) ; + + i ) {
const VersionedChildrenT & children = futureChildren [ i ] . get ( ) ;
2019-04-30 08:00:29 +08:00
2019-05-22 10:16:32 +08:00
if ( children . empty ( ) ) {
2017-07-05 14:41:48 +08:00
modified = true ;
2019-05-22 10:16:32 +08:00
}
else {
// Not all children were deleted since this branch has a child
all_deleted = false ;
// If the merge resulted in 1 versioned child set with exactly one child
// page, and its page ID and lower bound are the same as the original, then no changes were made.
// Otherwise, a change was made so we know the page was modified and we can stop iterating.
if ( ! ( children . size ( ) = = 1
& & children . front ( ) . second . size ( ) = = 1
& & children . front ( ) . second . front ( ) . second = = childPageIDs [ i ]
& & children . front ( ) . second . front ( ) . first = = * decodeLowerBoundaries [ i ]
)
)
{
modified = true ;
break ;
}
}
}
if ( childBoundariesChanged ) {
modified = true ;
}
if ( all_deleted ) {
ASSERT ( modified ) ;
if ( root = = 0 ) {
Reference < IPage > page = self - > m_pager - > newPageBuffer ( ) ;
makeEmptyPage ( page , BTreePage : : IS_LEAF , self - > m_usablePageSizeOverride ) ;
self - > writePage ( 0 , page , self - > getLastCommittedVersion ( ) + 1 , & dbBegin , & dbEnd ) ;
VersionedChildrenT c ( { { 0 , { { dbBegin , 0 } } } } ) ;
debug_printf ( " %p id=%u All root page children were deleted, rewrote root as leaf, returning %s \n " , THIS , root , toString ( c ) . c_str ( ) ) ;
return c ;
}
else {
VersionedChildrenT c ( { } ) ;
debug_printf ( " %p id=%u All internal page children were deleted #1 so deleting this page too, returning %s \n " , THIS , root , toString ( c ) . c_str ( ) ) ;
return c ;
2017-06-10 05:56:41 +08:00
}
}
2019-05-22 10:16:32 +08:00
if ( ! boundaryChanged & & ! modified ) {
2019-04-30 08:00:29 +08:00
VersionedChildrenT c ( { { 0 , { { * lowerBound , root } } } } ) ;
debug_printf ( " %p not modified, returning %s \n " , THIS , toString ( c ) . c_str ( ) ) ;
return c ;
2017-07-14 02:32:14 +08:00
}
2017-06-10 05:56:41 +08:00
Version version = 0 ;
VersionedChildrenT result ;
2019-04-30 08:00:29 +08:00
debug_printf ( " %p: Internal page %u modified, creating replacements. \n " , THIS , root ) ;
2017-06-10 05:56:41 +08:00
loop { // over version splits of this page
Version nextVersion = std : : numeric_limits < Version > : : max ( ) ;
2019-02-21 18:46:30 +08:00
std : : vector < RedwoodRecordRef > childEntries ;
2017-06-10 05:56:41 +08:00
// For each Future<VersionedChildrenT>
2019-05-29 21:23:32 +08:00
debug_printf ( " %p creating replacement pages for id=%d at Version % " PRId64 " \n " , THIS , root , version ) ;
2017-07-05 14:41:48 +08:00
2019-05-22 10:16:32 +08:00
// In multi version mode if we're writing version 0 there is a chance that we don't have to write ourselves, if there are no changes in any child subtrees
bool modified = self - > singleVersion | | version ! = 0 ;
2017-06-10 05:56:41 +08:00
2018-06-08 18:32:34 +08:00
for ( int i = 0 ; i < futureChildren . size ( ) ; + + i ) {
const VersionedChildrenT & children = futureChildren [ i ] . get ( ) ;
2019-04-30 08:00:29 +08:00
if ( children . empty ( ) ) {
2019-05-22 10:16:32 +08:00
// Subtree was deleted, but due to prefix dependencies the boundary key might still need to exist
2019-04-30 08:00:29 +08:00
modified = true ;
2019-05-22 10:16:32 +08:00
// If there is a previous child and it has a subtree then this boundary key must still exist
// to continue being the upper bound of the previous child
if ( ! childEntries . empty ( ) & & childEntries . back ( ) . value . present ( ) ) {
RedwoodRecordRef entry ( * lowerBoundaries [ i ] ) ;
entry . value = Optional < ValueRef > ( ) ;
childEntries . push_back ( entry ) ;
}
2019-04-30 08:00:29 +08:00
continue ;
}
2017-07-05 14:41:48 +08:00
2019-05-22 15:23:31 +08:00
debug_printf ( " %p Versioned page set that replaced Page id=%d: %lu versions \n " , THIS , childPageIDs [ i ] , children . size ( ) ) ;
2019-04-30 08:00:29 +08:00
if ( REDWOOD_DEBUG ) {
for ( auto & versionedPageSet : children ) {
2019-05-29 21:23:32 +08:00
debug_printf ( " %p version % " PRId64 " \n " , THIS , versionedPageSet . first ) ;
2019-04-30 08:00:29 +08:00
for ( auto & boundaryPage : versionedPageSet . second ) {
debug_printf ( " %p '%s' -> Page id=%u \n " , THIS , boundaryPage . first . toString ( ) . c_str ( ) , boundaryPage . second ) ;
}
2017-06-10 05:56:41 +08:00
}
}
2019-04-30 08:00:29 +08:00
vector < VersionedBTree : : VersionedKeyToPageSetT > : : const_iterator cv ;
2017-06-10 05:56:41 +08:00
2019-04-30 08:00:29 +08:00
if ( self - > singleVersion ) {
ASSERT ( children . size ( ) = = 1 ) ;
cv = children . begin ( ) ;
nextVersion = std : : numeric_limits < Version > : : max ( ) ;
2017-06-10 05:56:41 +08:00
}
2019-04-30 08:00:29 +08:00
else {
// Find the first version greater than the current version we are writing
cv = std : : upper_bound ( children . begin ( ) , children . end ( ) , version , [ ] ( Version a , VersionedChildrenT : : value_type const & b ) { return a < b . first ; } ) ;
// If there are no versions before the one we found, just update nextVersion and continue.
if ( cv = = children . begin ( ) ) {
2019-05-29 21:23:32 +08:00
debug_printf ( " %p First version (% " PRId64 " ) in set is greater than current, setting nextVersion and continuing \n " , THIS , cv - > first ) ;
2019-04-30 08:00:29 +08:00
nextVersion = std : : min ( nextVersion , cv - > first ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " %p curr % " PRId64 " next % " PRId64 " \n " , THIS , version , nextVersion ) ;
2019-04-30 08:00:29 +08:00
continue ;
}
2017-06-10 05:56:41 +08:00
2019-04-30 08:00:29 +08:00
// If a version greater than the current version being written was found, update nextVersion
if ( cv ! = children . end ( ) ) {
nextVersion = std : : min ( nextVersion , cv - > first ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " %p curr % " PRId64 " next % " PRId64 " \n " , THIS , version , nextVersion ) ;
2019-04-30 08:00:29 +08:00
}
2017-06-10 05:56:41 +08:00
2019-04-30 08:00:29 +08:00
// Go back one to the last version that was valid prior to or at the current version we are writing
- - cv ;
}
2017-06-10 05:56:41 +08:00
2019-05-29 21:23:32 +08:00
debug_printf ( " %p Using children for version % " PRId64 " from this set, building version % " PRId64 " \n " , THIS , cv - > first , version ) ;
2017-07-05 14:41:48 +08:00
// If page count isn't 1 then the root is definitely modified
modified = modified | | cv - > second . size ( ) ! = 1 ;
2017-06-10 05:56:41 +08:00
2019-04-30 08:00:29 +08:00
// TODO: If page ID changed in singleVersion mode then root is modified
2017-06-10 05:56:41 +08:00
// Add the children at this version to the child entries list for the current version being built.
for ( auto & childPage : cv - > second ) {
2019-02-21 18:46:30 +08:00
RedwoodRecordRef entry = childPage . first ;
entry . value = StringRef ( ( unsigned char * ) & childPage . second , sizeof ( uint32_t ) ) ;
2019-05-22 10:16:32 +08:00
debug_printf ( " %p Adding child page %s \n " , THIS , entry . toString ( ) . c_str ( ) ) ;
2019-02-21 18:46:30 +08:00
childEntries . push_back ( entry ) ;
2017-06-10 05:56:41 +08:00
}
}
2019-05-29 21:23:32 +08:00
debug_printf ( " %p Finished pass through futurechildren. childEntries=%lu version=% " PRId64 " nextVersion=% " PRId64 " \n " , THIS , childEntries . size ( ) , version , nextVersion ) ;
2017-06-10 05:56:41 +08:00
2017-07-05 14:41:48 +08:00
if ( modified ) {
2019-04-30 08:00:29 +08:00
// If all children were deleted then this page should be deleted as of the new version
// Note that if a single range clear covered the entire page then we should not get this far
if ( childEntries . empty ( ) ) {
2019-05-22 10:16:32 +08:00
if ( self - > singleVersion ) {
2019-05-29 21:23:32 +08:00
debug_printf ( " %p All internal page children were deleted #2 at version % " PRId64 " \n " , THIS , version ) ;
2019-05-22 10:16:32 +08:00
}
else {
VersionedKeyToPageSetT c ( { version , { } } ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " %p All internal page children were deleted #3 at version % " PRId64 " , adding %s \n " , THIS , version , toString ( c ) . c_str ( ) ) ;
2019-05-22 10:16:32 +08:00
result . push_back ( c ) ;
}
2019-04-30 08:00:29 +08:00
}
2019-05-22 10:16:32 +08:00
else {
// TODO: Track split points across iterations of this loop, so that they don't shift unnecessarily and
// cause unnecessary path copying
2019-04-30 08:00:29 +08:00
2019-05-22 10:16:32 +08:00
IPager * pager = self - > m_pager ;
std : : vector < BoundaryAndPage > pages = buildPages ( false , * lowerBound , * upperBound , childEntries , 0 , [ pager ] ( ) { return pager - > newPageBuffer ( ) ; } , self - > m_usablePageSizeOverride ) ;
2017-06-10 05:56:41 +08:00
2019-05-22 10:16:32 +08:00
// Write page(s), use version 0 to replace latest version if only writing one page
Version writeVersion = self - > singleVersion ? self - > getLastCommittedVersion ( ) + 1 : version ;
std : : vector < LogicalPageID > newPageIDs = self - > writePages ( pages , writeVersion , root , page , upperBound , THIS ) ;
2017-06-10 05:56:41 +08:00
2019-05-22 10:16:32 +08:00
// If this commitSubtree() is operating on the root, write new levels if needed until until we're returning a single page
if ( root = = self - > m_root ) {
self - > buildNewRoot ( writeVersion , pages , newPageIDs , page ) ;
}
2017-07-14 02:32:14 +08:00
2019-05-22 10:16:32 +08:00
result . resize ( result . size ( ) + 1 ) ;
result . back ( ) . first = writeVersion ;
2017-06-10 05:56:41 +08:00
2019-05-22 10:16:32 +08:00
for ( int i = 0 ; i < pages . size ( ) ; i + + )
result . back ( ) . second . push_back ( { pages [ i ] . lowerBound , newPageIDs [ i ] } ) ;
2017-06-10 05:56:41 +08:00
2019-05-22 10:16:32 +08:00
debug_printf ( " %p Added versioned child set #3: %s \n " , THIS , toString ( result . back ( ) ) . c_str ( ) ) ;
2017-07-05 14:41:48 +08:00
}
}
else {
2019-03-28 04:42:49 +08:00
debug_printf ( " %p Version 0 has no changes \n " , THIS ) ;
2019-02-21 18:46:30 +08:00
result . push_back ( { 0 , { { * lowerBound , root } } } ) ;
2019-05-22 10:16:32 +08:00
debug_printf ( " %p Added versioned child set #4: %s \n " , THIS , toString ( result . back ( ) ) . c_str ( ) ) ;
2017-07-05 14:41:48 +08:00
}
2017-06-10 05:56:41 +08:00
if ( nextVersion = = std : : numeric_limits < Version > : : max ( ) )
break ;
version = nextVersion ;
}
2019-03-28 04:42:49 +08:00
debug_printf ( " %p DONE. \n " , THIS ) ;
2017-06-10 05:56:41 +08:00
return result ;
}
}
ACTOR static Future < Void > commit_impl ( VersionedBTree * self ) {
2017-09-23 08:18:28 +08:00
state MutationBufferT * mutations = self - > m_pBuffer ;
// No more mutations are allowed to be written to this mutation buffer we will commit
// at m_writeVersion, which we must save locally because it could change during commit.
self - > m_pBuffer = nullptr ;
state Version writeVersion = self - > m_writeVersion ;
// The latest mutation buffer start version is the one we will now (or eventually) commit.
state Version mutationBufferStartVersion = self - > m_mutationBuffers . rbegin ( ) - > first ;
// Replace the lastCommit future with a new one and then wait on the old one
state Promise < Void > committed ;
Future < Void > previousCommit = self - > m_latestCommit ;
self - > m_latestCommit = committed . getFuture ( ) ;
// Wait for the latest commit that started to be finished.
2018-09-20 18:39:55 +08:00
wait ( previousCommit ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " %s: Beginning commit of version % " PRId64 " \n " , self - > m_name . c_str ( ) , writeVersion ) ;
2017-09-23 08:18:28 +08:00
// Get the latest version from the pager, which is what we will read at
2017-06-10 05:56:41 +08:00
Version latestVersion = wait ( self - > m_pager - > getLatestVersion ( ) ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " %s: pager latestVersion % " PRId64 " \n " , self - > m_name . c_str ( ) , latestVersion ) ;
2017-06-10 05:56:41 +08:00
2019-03-15 15:46:09 +08:00
if ( REDWOOD_DEBUG ) {
self - > printMutationBuffer ( mutations ) ;
}
2017-08-22 13:29:57 +08:00
2019-05-22 10:16:32 +08:00
VersionedChildrenT newRoot = wait ( commitSubtree ( self , mutations , self - > m_pager - > getReadSnapshot ( latestVersion ) , self - > m_root , & dbBegin , & dbEnd , & dbBegin , & dbEnd ) ) ;
2017-06-10 05:56:41 +08:00
2017-09-23 08:18:28 +08:00
self - > m_pager - > setLatestVersion ( writeVersion ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " %s: Committing pager % " PRId64 " \n " , self - > m_name . c_str ( ) , writeVersion ) ;
2018-09-20 18:39:55 +08:00
wait ( self - > m_pager - > commit ( ) ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " %s: Committed version % " PRId64 " \n " , self - > m_name . c_str ( ) , writeVersion ) ;
2017-06-10 05:56:41 +08:00
2017-09-23 08:18:28 +08:00
// Now that everything is committed we must delete the mutation buffer.
// Our buffer's start version should be the oldest mutation buffer version in the map.
ASSERT ( mutationBufferStartVersion = = self - > m_mutationBuffers . begin ( ) - > first ) ;
self - > m_mutationBuffers . erase ( self - > m_mutationBuffers . begin ( ) ) ;
self - > m_lastCommittedVersion = writeVersion ;
2018-12-06 14:41:04 +08:00
+ + self - > counts . commits ;
2019-05-29 21:23:32 +08:00
printf ( " \n Committed: %s \n " , self - > counts . toString ( true ) . c_str ( ) ) ;
2017-09-23 08:18:28 +08:00
committed . send ( Void ( ) ) ;
2017-08-22 13:29:57 +08:00
2017-06-10 05:56:41 +08:00
return Void ( ) ;
}
2019-02-21 18:46:30 +08:00
// InternalCursor is for seeking to and iterating over the 'internal' records (not user-visible) in the Btree.
// These records are versioned and they can represent deletedness or partial values.
struct InternalCursor {
private :
// Each InternalCursor's position is represented by a reference counted PageCursor, which links
// to its parent PageCursor, up to a PageCursor representing a cursor on the root page.
// PageCursors can be shared by many InternalCursors, making InternalCursor copying low overhead
struct PageCursor : ReferenceCounted < PageCursor > , FastAllocated < PageCursor > {
Reference < PageCursor > parent ;
LogicalPageID pageID ; // Only needed for debugging purposes
Reference < const IPage > page ;
BTreePage : : BinaryTree : : Cursor cursor ;
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
PageCursor ( LogicalPageID id , Reference < const IPage > page , Reference < PageCursor > parent = { } )
: pageID ( id ) , page ( page ) , parent ( parent ) , cursor ( getReader ( ) . getCursor ( ) )
{
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
PageCursor ( const PageCursor & toCopy ) : parent ( toCopy . parent ) , pageID ( toCopy . pageID ) , page ( toCopy . page ) , cursor ( toCopy . cursor ) {
}
// Convenience method for copying a PageCursor
Reference < PageCursor > copy ( ) const {
return Reference < PageCursor > ( new PageCursor ( * this ) ) ;
}
// Multiple InternalCursors can share a Page
BTreePage : : BinaryTree : : Reader & getReader ( ) const {
return * ( BTreePage : : BinaryTree : : Reader * ) page - > userData ;
}
bool isLeaf ( ) const {
const BTreePage * p = ( ( const BTreePage * ) page - > begin ( ) ) ;
return p - > isLeaf ( ) ;
}
Future < Reference < PageCursor > > getChild ( Reference < IPagerSnapshot > pager , int usablePageSizeOverride ) {
ASSERT ( ! isLeaf ( ) ) ;
BTreePage : : BinaryTree : : Cursor next = cursor ;
next . moveNext ( ) ;
const RedwoodRecordRef & rec = cursor . get ( ) ;
LogicalPageID id = * ( LogicalPageID * ) rec . value . get ( ) . begin ( ) ;
Future < Reference < const IPage > > child = readPage ( pager , id , usablePageSizeOverride , & rec , & next . getOrUpperBound ( ) ) ;
return map ( child , [ = ] ( Reference < const IPage > page ) {
return Reference < PageCursor > ( new PageCursor ( id , page , Reference < PageCursor > : : addRef ( this ) ) ) ;
} ) ;
}
std : : string toString ( ) const {
return format ( " Page %lu, %s " , pageID , cursor . valid ( ) ? cursor . get ( ) . toString ( ) . c_str ( ) : " <invalid> " ) ;
}
} ;
LogicalPageID rootPageID ;
int usablePageSizeOverride ;
Reference < IPagerSnapshot > pager ;
Reference < PageCursor > pageCursor ;
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
public :
InternalCursor ( ) {
2017-09-15 20:19:39 +08:00
}
2019-02-21 18:46:30 +08:00
InternalCursor ( Reference < IPagerSnapshot > pager , LogicalPageID root , int usablePageSizeOverride )
: pager ( pager ) , rootPageID ( root ) , usablePageSizeOverride ( usablePageSizeOverride ) {
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
std : : string toString ( ) const {
2018-06-12 16:43:19 +08:00
std : : string r ;
2019-04-30 08:00:29 +08:00
2019-02-21 18:46:30 +08:00
Reference < PageCursor > c = pageCursor ;
2019-04-30 08:00:29 +08:00
int maxDepth = 0 ;
2019-02-21 18:46:30 +08:00
while ( c ) {
2019-04-30 08:00:29 +08:00
c = c - > parent ;
+ + maxDepth ;
}
c = pageCursor ;
int depth = maxDepth ;
while ( c ) {
r = format ( " [%d/%d: %s] " , depth - - , maxDepth , c - > toString ( ) . c_str ( ) ) + r ;
2019-02-21 18:46:30 +08:00
c = c - > parent ;
2018-06-12 16:43:19 +08:00
}
return r ;
2017-09-15 20:19:39 +08:00
}
2019-02-21 18:46:30 +08:00
// Returns true if cursor position is a valid leaf page record
bool valid ( ) const {
return pageCursor & & pageCursor - > isLeaf ( ) & & pageCursor - > cursor . valid ( ) ;
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
// Returns true if cursor position is valid() and has a present record value
bool present ( ) {
return valid ( ) & & pageCursor - > cursor . get ( ) . value . present ( ) ;
}
2018-07-15 04:37:52 +08:00
2019-02-21 18:46:30 +08:00
// Returns true if cursor position is present() and has an effective version <= v
bool presentAtVersion ( Version v ) {
return present ( ) & & pageCursor - > cursor . get ( ) . version < = v ;
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
// Returns true if cursor position is present() and has an effective version <= v
bool validAtVersion ( Version v ) {
return valid ( ) & & pageCursor - > cursor . get ( ) . version < = v ;
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
const RedwoodRecordRef & get ( ) const {
return pageCursor - > cursor . get ( ) ;
2017-09-15 20:19:39 +08:00
}
2019-02-21 18:46:30 +08:00
// Ensure that pageCursor is not shared with other cursors so we can modify it
void ensureUnshared ( ) {
if ( ! pageCursor - > isSoleOwner ( ) ) {
pageCursor = pageCursor - > copy ( ) ;
2017-09-15 20:19:39 +08:00
}
}
2019-02-21 18:46:30 +08:00
Future < Void > moveToRoot ( ) {
// If pageCursor exists follow parent links to the root
if ( pageCursor ) {
while ( pageCursor - > parent ) {
pageCursor = pageCursor - > parent ;
}
return Void ( ) ;
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
// Otherwise read the root page
Future < Reference < const IPage > > root = readPage ( pager , rootPageID , usablePageSizeOverride , & dbBegin , & dbEnd ) ;
return map ( root , [ = ] ( Reference < const IPage > p ) {
pageCursor = Reference < PageCursor > ( new PageCursor ( rootPageID , p ) ) ;
return Void ( ) ;
} ) ;
}
2018-06-12 16:43:19 +08:00
2019-02-21 18:46:30 +08:00
ACTOR Future < bool > seekLessThanOrEqual_impl ( InternalCursor * self , RedwoodRecordRef query ) {
Future < Void > f = self - > moveToRoot ( ) ;
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
// f will almost always be ready
if ( ! f . isReady ( ) ) {
wait ( f ) ;
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
self - > ensureUnshared ( ) ;
loop {
2019-05-22 10:16:32 +08:00
bool success = self - > pageCursor - > cursor . seekLessThanOrEqual ( query ) ;
// Skip backwards over internal page entries that do not link to child pages
if ( ! self - > pageCursor - > isLeaf ( ) ) {
// While record has no value, move again
while ( success & & ! self - > pageCursor - > cursor . get ( ) . value . present ( ) ) {
success = self - > pageCursor - > cursor . movePrev ( ) ;
}
}
if ( success ) {
2019-02-21 18:46:30 +08:00
// If we found a record <= query at a leaf page then return success
if ( self - > pageCursor - > isLeaf ( ) ) {
return true ;
2017-09-15 20:19:39 +08:00
}
2019-02-21 18:46:30 +08:00
Reference < PageCursor > child = wait ( self - > pageCursor - > getChild ( self - > pager , self - > usablePageSizeOverride ) ) ;
self - > pageCursor = child ;
2017-09-15 20:19:39 +08:00
}
else {
2019-02-21 18:46:30 +08:00
// No records <= query on this page, so move to immediate previous record at leaf level
bool success = wait ( self - > move ( false ) ) ;
return success ;
2017-09-15 20:19:39 +08:00
}
}
}
2019-02-21 18:46:30 +08:00
Future < bool > seekLTE ( RedwoodRecordRef query ) {
return seekLessThanOrEqual_impl ( this , query ) ;
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
ACTOR Future < bool > move_impl ( InternalCursor * self , bool forward ) {
// Try to move pageCursor, if it fails to go parent, repeat until it works or root cursor can't be moved
while ( 1 ) {
self - > ensureUnshared ( ) ;
bool success = self - > pageCursor - > cursor . valid ( ) & & ( forward ? self - > pageCursor - > cursor . moveNext ( ) : self - > pageCursor - > cursor . movePrev ( ) ) ;
2017-09-15 20:19:39 +08:00
2019-05-22 10:16:32 +08:00
// Skip over internal page entries that do not link to child pages
if ( ! self - > pageCursor - > isLeaf ( ) ) {
// While record has no value, move again
while ( success & & ! self - > pageCursor - > cursor . get ( ) . value . present ( ) ) {
success = forward ? self - > pageCursor - > cursor . moveNext ( ) : self - > pageCursor - > cursor . movePrev ( ) ;
}
}
2019-02-21 18:46:30 +08:00
// Stop if successful or there's no parent to move to
if ( success | | ! self - > pageCursor - > parent ) {
break ;
2018-06-12 16:43:19 +08:00
}
2019-02-21 18:46:30 +08:00
// Move to parent
self - > pageCursor = self - > pageCursor - > parent ;
2018-06-08 18:32:34 +08:00
}
2019-02-21 18:46:30 +08:00
// If pageCursor not valid we've reached an end of the tree
if ( ! self - > pageCursor - > cursor . valid ( ) ) {
return false ;
2017-09-15 20:19:39 +08:00
}
2019-02-21 18:46:30 +08:00
// While not on a leaf page, move down to get to one.
while ( ! self - > pageCursor - > isLeaf ( ) ) {
2019-05-22 10:16:32 +08:00
// Skip over internal page entries that do not link to child pages
while ( ! self - > pageCursor - > cursor . get ( ) . value . present ( ) ) {
bool success = forward ? self - > pageCursor - > cursor . moveNext ( ) : self - > pageCursor - > cursor . movePrev ( ) ;
if ( ! success ) {
return false ;
}
}
2019-02-21 18:46:30 +08:00
Reference < PageCursor > child = wait ( self - > pageCursor - > getChild ( self - > pager , self - > usablePageSizeOverride ) ) ;
bool success = forward ? child - > cursor . moveFirst ( ) : child - > cursor . moveLast ( ) ;
self - > pageCursor = child ;
2017-09-15 20:19:39 +08:00
}
2019-02-21 18:46:30 +08:00
return true ;
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
Future < bool > move ( bool forward ) {
return move_impl ( this , forward ) ;
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
Future < bool > moveNext ( ) {
return move_impl ( this , true ) ;
}
Future < bool > movePrev ( ) {
return move_impl ( this , false ) ;
}
2018-08-29 04:46:14 +08:00
2019-02-21 18:46:30 +08:00
// Move to the first or last record of the database.
ACTOR Future < bool > move_end ( InternalCursor * self , bool begin ) {
Future < Void > f = self - > moveToRoot ( ) ;
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
// f will almost always be ready
if ( ! f . isReady ( ) ) {
wait ( f ) ;
2018-06-12 16:43:19 +08:00
}
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
self - > ensureUnshared ( ) ;
2018-06-08 18:32:34 +08:00
2019-02-21 18:46:30 +08:00
loop {
// Move to first or last record in the page
bool success = begin ? self - > pageCursor - > cursor . moveFirst ( ) : self - > pageCursor - > cursor . moveLast ( ) ;
2019-05-22 10:16:32 +08:00
// Skip over internal page entries that do not link to child pages
if ( ! self - > pageCursor - > isLeaf ( ) ) {
// While record has no value, move past it
while ( success & & ! self - > pageCursor - > cursor . get ( ) . value . present ( ) ) {
success = begin ? self - > pageCursor - > cursor . moveNext ( ) : self - > pageCursor - > cursor . movePrev ( ) ;
}
}
2019-02-21 18:46:30 +08:00
// If it worked, return true if we've reached a leaf page otherwise go to the next child
if ( success ) {
if ( self - > pageCursor - > isLeaf ( ) ) {
return true ;
}
2019-05-22 10:16:32 +08:00
2019-02-21 18:46:30 +08:00
Reference < PageCursor > child = wait ( self - > pageCursor - > getChild ( self - > pager , self - > usablePageSizeOverride ) ) ;
self - > pageCursor = child ;
}
else {
return false ;
}
}
2017-09-15 20:19:39 +08:00
}
2019-02-21 18:46:30 +08:00
Future < bool > moveFirst ( ) {
return move_end ( this , true ) ;
}
Future < bool > moveLast ( ) {
return move_end ( this , false ) ;
}
2017-09-15 20:19:39 +08:00
} ;
// Cursor is for reading and interating over user visible KV pairs at a specific version
2019-02-21 18:46:30 +08:00
// KeyValueRefs returned become invalid once the cursor is moved
class Cursor : public IStoreCursor , public ReferenceCounted < Cursor > , public FastAllocated < Cursor > , NonCopyable {
2017-06-10 05:56:41 +08:00
public :
2019-04-30 08:00:29 +08:00
Cursor ( Reference < IPagerSnapshot > pageSource , LogicalPageID root , Version recordVersion , int usablePageSizeOverride )
: m_version ( recordVersion ) ,
2019-02-21 18:46:30 +08:00
m_cur1 ( pageSource , root , usablePageSizeOverride ) ,
m_cur2 ( m_cur1 )
{
2017-06-10 05:56:41 +08:00
}
2019-02-21 18:46:30 +08:00
void addref ( ) { ReferenceCounted < Cursor > : : addref ( ) ; }
void delref ( ) { ReferenceCounted < Cursor > : : delref ( ) ; }
2017-09-09 16:29:25 +08:00
2019-02-21 18:46:30 +08:00
private :
Version m_version ;
// If kv is valid
// - kv.key references memory held by cur1
// - If cur1 points to a non split KV pair
// - kv.value references memory held by cur1
// - cur2 points to the next internal record after cur1
// Else
// - kv.value references memory in arena
// - cur2 points to the first internal record of the split KV pair
InternalCursor m_cur1 ;
InternalCursor m_cur2 ;
Arena m_arena ;
Optional < KeyValueRef > m_kv ;
public :
virtual Future < Void > findEqual ( KeyRef key ) { return find_impl ( this , key , true , 0 ) ; }
virtual Future < Void > findFirstEqualOrGreater ( KeyRef key , bool needValue , int prefetchNextBytes ) { return find_impl ( this , key , needValue , 1 ) ; }
virtual Future < Void > findLastLessOrEqual ( KeyRef key , bool needValue , int prefetchPriorBytes ) { return find_impl ( this , key , needValue , - 1 ) ; }
virtual Future < Void > next ( bool needValue ) { return move ( this , true , needValue ) ; }
virtual Future < Void > prev ( bool needValue ) { return move ( this , false , needValue ) ; }
2017-06-10 05:56:41 +08:00
virtual bool isValid ( ) {
return m_kv . present ( ) ;
}
virtual KeyRef getKey ( ) {
return m_kv . get ( ) . key ;
}
2019-02-21 18:46:30 +08:00
2017-06-10 05:56:41 +08:00
//virtual StringRef getCompressedKey() = 0;
virtual ValueRef getValue ( ) {
return m_kv . get ( ) . value ;
}
2018-09-19 15:32:39 +08:00
// TODO: Either remove this method or change the contract so that key and value strings returned are still valid after the cursor is
// moved and allocate them in some arena that this method resets.
2017-06-10 05:56:41 +08:00
virtual void invalidateReturnedStrings ( ) {
}
2019-02-21 18:46:30 +08:00
std : : string toString ( ) const {
2018-06-12 16:43:19 +08:00
std : : string r ;
2019-05-29 21:23:32 +08:00
r + = format ( " Cursor(%p) ver: % " PRId64 " " , this , m_version ) ;
2019-02-24 19:47:32 +08:00
if ( m_kv . present ( ) ) {
r + = format ( " KV: '%s' -> '%s' \n " , m_kv . get ( ) . key . printable ( ) . c_str ( ) , m_kv . get ( ) . value . printable ( ) . c_str ( ) ) ;
}
else {
2019-04-30 08:00:29 +08:00
r + = " KV: <np> \n " ;
2019-02-24 19:47:32 +08:00
}
2019-02-21 18:46:30 +08:00
r + = format ( " Cur1: %s \n " , m_cur1 . toString ( ) . c_str ( ) ) ;
r + = format ( " Cur2: %s \n " , m_cur2 . toString ( ) . c_str ( ) ) ;
2019-02-24 19:47:32 +08:00
2018-06-12 16:43:19 +08:00
return r ;
}
2017-09-15 20:19:39 +08:00
private :
2018-07-23 18:09:13 +08:00
// find key in tree closest to or equal to key (at this cursor's version)
2017-09-15 20:19:39 +08:00
// for less than or equal use cmp < 0
// for greater than or equal use cmp > 0
// for equal use cmp == 0
2019-02-21 18:46:30 +08:00
ACTOR static Future < Void > find_impl ( Cursor * self , KeyRef key , bool needValue , int cmp ) {
2018-07-23 18:09:13 +08:00
// Search for the last key at or before (key, version, \xff)
2019-05-22 10:16:32 +08:00
state RedwoodRecordRef query ( key , self - > m_version , { } , 0 , std : : numeric_limits < int32_t > : : max ( ) ) ;
2019-02-21 18:46:30 +08:00
self - > m_kv . reset ( ) ;
2017-06-10 05:56:41 +08:00
2019-02-21 18:46:30 +08:00
wait ( success ( self - > m_cur1 . seekLTE ( query ) ) ) ;
debug_printf ( " find%sE(%s): %s \n " , cmp > 0 ? " GT " : ( cmp = = 0 ? " " : " LT " ) , query . toString ( ) . c_str ( ) , self - > toString ( ) . c_str ( ) ) ;
2017-06-10 05:56:41 +08:00
2019-02-21 18:46:30 +08:00
// If we found the target key with a present value then return it as it is valid for any cmp type
if ( self - > m_cur1 . present ( ) & & self - > m_cur1 . get ( ) . key = = key ) {
debug_printf ( " Target key found, reading full KV pair. Cursor: %s \n " , self - > toString ( ) . c_str ( ) ) ;
2018-09-20 18:39:55 +08:00
wait ( self - > readFullKVPair ( self ) ) ;
2017-09-15 20:19:39 +08:00
return Void ( ) ;
2017-09-09 16:29:25 +08:00
}
2019-02-21 18:46:30 +08:00
// Mode is ==, so if we're still here we didn't find it.
2017-09-15 20:19:39 +08:00
if ( cmp = = 0 ) {
2017-09-09 16:29:25 +08:00
return Void ( ) ;
}
2019-02-21 18:46:30 +08:00
// Mode is >=, so if we're here we have to go to the next present record at the target version
// because the seek done above was <= query
2017-09-15 20:19:39 +08:00
if ( cmp > 0 ) {
2019-02-21 18:46:30 +08:00
// icur is at a record < query or invalid.
// If cursor is invalid, try to go to start of tree
if ( ! self - > m_cur1 . valid ( ) ) {
bool valid = wait ( self - > m_cur1 . moveFirst ( ) ) ;
if ( ! valid ) {
self - > m_kv . reset ( ) ;
return Void ( ) ;
}
}
else {
loop {
bool valid = wait ( self - > m_cur1 . move ( true ) ) ;
if ( ! valid ) {
self - > m_kv . reset ( ) ;
return Void ( ) ;
}
if ( self - > m_cur1 . get ( ) . key > key ) {
break ;
}
}
2017-09-16 16:45:39 +08:00
}
2019-02-21 18:46:30 +08:00
2017-09-16 16:45:39 +08:00
// Get the next present key at the target version. Handles invalid cursor too.
2018-09-20 18:39:55 +08:00
wait ( self - > next ( needValue ) ) ;
2017-09-09 16:29:25 +08:00
}
2017-09-16 16:45:39 +08:00
else if ( cmp < 0 ) {
2019-02-21 18:46:30 +08:00
// Mode is <=, which is the same as the seekLTE(query)
if ( ! self - > m_cur1 . valid ( ) ) {
self - > m_kv . reset ( ) ;
return Void ( ) ;
}
2017-09-17 19:38:01 +08:00
// Move to previous present kv pair at the target version
2018-09-20 18:39:55 +08:00
wait ( self - > prev ( needValue ) ) ;
2017-09-15 20:19:39 +08:00
}
2017-09-09 16:29:25 +08:00
return Void ( ) ;
}
2019-02-21 18:46:30 +08:00
// TODO: use needValue
ACTOR static Future < Void > move ( Cursor * self , bool fwd , bool needValue ) {
debug_printf ( " Cursor::move(%d): Cursor = %s \n " , fwd , self - > toString ( ) . c_str ( ) ) ;
ASSERT ( self - > m_cur1 . valid ( ) ) ;
2017-09-15 20:19:39 +08:00
2019-02-21 18:46:30 +08:00
// If kv is present then the key/version at cur1 was already returned so move to a new key
// Move cur1 until failure or a new key is found, keeping prior record visited in cur2
2017-09-16 16:45:39 +08:00
if ( self - > m_kv . present ( ) ) {
2019-02-21 18:46:30 +08:00
ASSERT ( self - > m_cur1 . valid ( ) ) ;
loop {
self - > m_cur2 = self - > m_cur1 ;
bool valid = wait ( self - > m_cur1 . move ( fwd ) ) ;
if ( ! valid | | self - > m_cur1 . get ( ) . key ! = self - > m_cur2 . get ( ) . key ) {
break ;
}
2017-09-16 16:45:39 +08:00
}
}
2019-02-21 18:46:30 +08:00
// Given two consecutive cursors c1 and c2, c1 represents a returnable record if
// c1.presentAtVersion(v) || (!c2.validAtVersion() || c2.get().key != c1.get().key())
// Note the distinction between 'present' and 'valid'. Present means the value for the key
// exists at the version (but could be the empty string) while valid just means the internal
// record is in effect at that version but it could indicate that the key was cleared and
// no longer exists from the user's perspective at that version
//
2019-02-24 19:47:32 +08:00
// cur2 must be the record immediately after cur1
// TODO: This may already be the case, store state to track this condition and avoid the reset here
if ( self - > m_cur1 . valid ( ) ) {
2019-02-21 18:46:30 +08:00
self - > m_cur2 = self - > m_cur1 ;
wait ( success ( self - > m_cur2 . move ( true ) ) ) ;
}
while ( self - > m_cur1 . valid ( ) ) {
if ( self - > m_cur1 . presentAtVersion ( self - > m_version ) & &
( ! self - > m_cur2 . validAtVersion ( self - > m_version ) | |
self - > m_cur2 . get ( ) . key ! = self - > m_cur1 . get ( ) . key )
2017-09-16 08:27:13 +08:00
) {
2018-09-20 18:39:55 +08:00
wait ( readFullKVPair ( self ) ) ;
2017-09-15 20:19:39 +08:00
return Void ( ) ;
2017-06-10 05:56:41 +08:00
}
2019-02-21 18:46:30 +08:00
if ( fwd ) {
// Moving forward, move cur2 forward and keep cur1 pointing to the prior (predecessor) record
2019-02-24 19:47:32 +08:00
debug_printf ( " Cursor::move(%d): Moving forward, Cursor = %s \n " , fwd , self - > toString ( ) . c_str ( ) ) ;
2019-02-21 18:46:30 +08:00
self - > m_cur1 = self - > m_cur2 ;
wait ( success ( self - > m_cur2 . move ( true ) ) ) ;
}
else {
// Moving backward, move cur1 backward and keep cur2 pointing to the prior (successor) record
2019-02-24 19:47:32 +08:00
debug_printf ( " Cursor::move(%d): Moving backward, Cursor = %s \n " , fwd , self - > toString ( ) . c_str ( ) ) ;
2019-02-21 18:46:30 +08:00
self - > m_cur2 = self - > m_cur1 ;
wait ( success ( self - > m_cur1 . move ( false ) ) ) ;
}
2017-06-10 05:56:41 +08:00
}
2017-09-16 08:27:13 +08:00
2019-02-21 18:46:30 +08:00
self - > m_kv . reset ( ) ;
2019-02-24 19:47:32 +08:00
debug_printf ( " Cursor::move(%d): Exit, end of db reached. Cursor = %s \n " , fwd , self - > toString ( ) . c_str ( ) ) ;
2017-09-16 08:27:13 +08:00
return Void ( ) ;
2017-06-10 05:56:41 +08:00
}
2019-02-21 18:46:30 +08:00
// Read all of the current key-value record starting at cur1 into kv
ACTOR static Future < Void > readFullKVPair ( Cursor * self ) {
self - > m_arena = Arena ( ) ;
const RedwoodRecordRef & rec = self - > m_cur1 . get ( ) ;
debug_printf ( " readFullKVPair: Starting at %s \n " , self - > toString ( ) . c_str ( ) ) ;
2017-09-17 19:38:01 +08:00
2019-02-21 18:46:30 +08:00
// Unsplit value, cur1 will hold the key and value memory
if ( ! rec . isMultiPart ( ) ) {
2019-02-24 19:47:32 +08:00
self - > m_kv = KeyValueRef ( rec . key , rec . value . get ( ) ) ;
2019-02-21 18:46:30 +08:00
debug_printf ( " readFullKVPair: Unsplit, exit. %s \n " , self - > toString ( ) . c_str ( ) ) ;
2017-09-17 19:38:01 +08:00
2019-02-21 18:46:30 +08:00
return Void ( ) ;
2017-09-17 19:38:01 +08:00
}
2019-02-21 18:46:30 +08:00
// Split value, need to coalesce split value parts into a buffer in arena,
// after which cur1 will point to the first part and kv.key will reference its key
2019-04-30 08:00:29 +08:00
ASSERT ( rec . chunk . start + rec . value . get ( ) . size ( ) = = rec . chunk . total ) ;
2017-09-17 19:38:01 +08:00
2019-04-30 08:00:29 +08:00
debug_printf ( " readFullKVPair: Split, totalsize %d %s \n " , rec . chunk . total , self - > toString ( ) . c_str ( ) ) ;
2017-09-17 19:38:01 +08:00
2019-02-21 18:46:30 +08:00
// Allocate space for the entire value in the same arena as the key
2019-04-30 08:00:29 +08:00
state int bytesLeft = rec . chunk . total ;
2019-02-21 18:46:30 +08:00
state StringRef dst = makeString ( bytesLeft , self - > m_arena ) ;
2018-07-23 18:09:13 +08:00
2019-02-21 18:46:30 +08:00
loop {
const RedwoodRecordRef & rec = self - > m_cur1 . get ( ) ;
2017-09-09 16:29:25 +08:00
2019-02-21 18:46:30 +08:00
debug_printf ( " readFullKVPair: Adding chunk %s \n " , rec . toString ( ) . c_str ( ) ) ;
int partSize = rec . value . get ( ) . size ( ) ;
2019-04-30 08:00:29 +08:00
memcpy ( mutateString ( dst ) + rec . chunk . start , rec . value . get ( ) . begin ( ) , partSize ) ;
2019-02-21 18:46:30 +08:00
bytesLeft - = partSize ;
if ( bytesLeft = = 0 ) {
self - > m_kv = KeyValueRef ( rec . key , dst ) ;
return Void ( ) ;
2017-09-09 16:29:25 +08:00
}
2019-02-21 18:46:30 +08:00
ASSERT ( bytesLeft > 0 ) ;
// Move backward
bool success = wait ( self - > m_cur1 . move ( false ) ) ;
ASSERT ( success ) ;
2017-09-09 16:29:25 +08:00
}
2017-06-10 05:56:41 +08:00
}
} ;
2019-02-21 18:46:30 +08:00
2017-06-10 05:56:41 +08:00
} ;
2019-02-21 18:46:30 +08:00
RedwoodRecordRef VersionedBTree : : dbBegin ( StringRef ( ) , 0 ) ;
RedwoodRecordRef VersionedBTree : : dbEnd ( LiteralStringRef ( " \xff \xff \xff \xff " ) , std : : numeric_limits < Version > : : max ( ) ) ;
2019-03-15 15:46:09 +08:00
VersionedBTree : : Counts VersionedBTree : : counts ;
2017-08-23 02:30:44 +08:00
2017-10-02 18:32:22 +08:00
ACTOR template < class T >
Future < T > catchError ( Promise < Void > error , Future < T > f ) {
try {
T result = wait ( f ) ;
return result ;
} catch ( Error & e ) {
2018-10-25 06:57:06 +08:00
if ( e . code ( ) ! = error_code_actor_cancelled & & error . canBeSet ( ) )
2017-10-02 18:32:22 +08:00
error . sendError ( e ) ;
throw ;
}
}
2017-09-22 14:51:55 +08:00
class KeyValueStoreRedwoodUnversioned : public IKeyValueStore {
2017-09-21 19:43:49 +08:00
public :
2017-09-22 14:51:55 +08:00
KeyValueStoreRedwoodUnversioned ( std : : string filePrefix , UID logID ) : m_filePrefix ( filePrefix ) {
2018-10-25 06:57:06 +08:00
// TODO: This constructor should really just take an IVersionedStore
IPager * pager = new IndirectShadowPager ( filePrefix ) ;
2019-04-30 08:00:29 +08:00
m_tree = new VersionedBTree ( pager , filePrefix , true , pager - > getUsablePageSize ( ) ) ;
2018-10-25 06:57:06 +08:00
m_init = catchError ( init_impl ( this ) ) ;
2017-09-21 19:43:49 +08:00
}
2017-09-22 14:51:55 +08:00
virtual Future < Void > init ( ) {
return m_init ;
}
ACTOR Future < Void > init_impl ( KeyValueStoreRedwoodUnversioned * self ) {
2018-10-25 06:57:06 +08:00
TraceEvent ( SevInfo , " RedwoodInit " ) . detail ( " FilePrefix " , self - > m_filePrefix ) ;
2018-09-20 18:39:55 +08:00
wait ( self - > m_tree - > init ( ) ) ;
2017-09-21 19:43:49 +08:00
Version v = wait ( self - > m_tree - > getLatestVersion ( ) ) ;
self - > m_tree - > setWriteVersion ( v + 1 ) ;
2018-10-25 06:57:06 +08:00
TraceEvent ( SevInfo , " RedwoodInitComplete " ) . detail ( " FilePrefix " , self - > m_filePrefix ) ;
2017-09-21 19:43:49 +08:00
return Void ( ) ;
}
2017-10-02 18:32:22 +08:00
ACTOR void shutdown ( KeyValueStoreRedwoodUnversioned * self , bool dispose ) {
2018-07-04 06:39:32 +08:00
TraceEvent ( SevInfo , " RedwoodShutdown " ) . detail ( " FilePrefix " , self - > m_filePrefix ) . detail ( " Dispose " , dispose ) ;
2018-10-25 06:57:06 +08:00
if ( self - > m_error . canBeSet ( ) ) {
self - > m_error . sendError ( actor_cancelled ( ) ) ; // Ideally this should be shutdown_in_progress
}
2017-09-23 08:18:28 +08:00
self - > m_init . cancel ( ) ;
2018-10-25 06:57:06 +08:00
Future < Void > closedFuture = self - > m_tree - > onClosed ( ) ;
2017-10-02 18:32:22 +08:00
if ( dispose )
2018-10-25 06:57:06 +08:00
self - > m_tree - > dispose ( ) ;
2017-10-02 18:32:22 +08:00
else
2018-10-25 06:57:06 +08:00
self - > m_tree - > close ( ) ;
2018-09-20 18:39:55 +08:00
wait ( closedFuture ) ;
2017-09-21 19:43:49 +08:00
self - > m_closed . send ( Void ( ) ) ;
2018-07-04 06:39:32 +08:00
TraceEvent ( SevInfo , " RedwoodShutdownComplete " ) . detail ( " FilePrefix " , self - > m_filePrefix ) . detail ( " Dispose " , dispose ) ;
2017-10-02 18:32:22 +08:00
delete self ;
2017-09-21 19:43:49 +08:00
}
virtual void close ( ) {
2017-10-02 18:32:22 +08:00
shutdown ( this , false ) ;
2017-09-21 19:43:49 +08:00
}
virtual void dispose ( ) {
2017-10-02 18:32:22 +08:00
shutdown ( this , true ) ;
2017-09-21 19:43:49 +08:00
}
virtual Future < Void > onClosed ( ) {
return m_closed . getFuture ( ) ;
}
Future < Void > commit ( bool sequential = false ) {
2017-10-10 04:24:16 +08:00
Future < Void > c = m_tree - > commit ( ) ;
m_tree - > setWriteVersion ( m_tree - > getWriteVersion ( ) + 1 ) ;
2018-10-25 06:57:06 +08:00
return catchError ( c ) ;
2017-09-21 19:43:49 +08:00
}
virtual KeyValueStoreType getType ( ) {
2017-09-22 14:51:55 +08:00
return KeyValueStoreType : : SSD_REDWOOD_V1 ;
2017-09-21 19:43:49 +08:00
}
virtual StorageBytes getStorageBytes ( ) {
2018-10-25 06:57:06 +08:00
return m_tree - > getStorageBytes ( ) ;
2017-09-21 19:43:49 +08:00
}
2018-10-25 06:57:06 +08:00
virtual Future < Void > getError ( ) {
return delayed ( m_error . getFuture ( ) ) ;
} ;
2017-09-21 19:43:49 +08:00
void clear ( KeyRangeRef range , const Arena * arena = 0 ) {
2019-03-15 15:46:09 +08:00
debug_printf ( " CLEAR %s \n " , printable ( range ) . c_str ( ) ) ;
2017-09-21 19:43:49 +08:00
m_tree - > clear ( range ) ;
}
virtual void set ( KeyValueRef keyValue , const Arena * arena = NULL ) {
2019-03-15 15:46:09 +08:00
debug_printf ( " SET %s \n " , keyValue . key . printable ( ) . c_str ( ) ) ;
2017-09-21 19:43:49 +08:00
m_tree - > set ( keyValue ) ;
}
2019-03-15 15:46:09 +08:00
virtual Future < Standalone < VectorRef < KeyValueRef > > > readRange ( KeyRangeRef keys , int rowLimit = 1 < < 30 , int byteLimit = 1 < < 30 ) {
debug_printf ( " READRANGE %s \n " , printable ( keys ) . c_str ( ) ) ;
return catchError ( readRange_impl ( this , keys , rowLimit , byteLimit ) ) ;
}
2018-10-26 10:48:31 +08:00
ACTOR static Future < Standalone < VectorRef < KeyValueRef > > > readRange_impl ( KeyValueStoreRedwoodUnversioned * self , KeyRange keys , int rowLimit , int byteLimit ) {
2019-03-15 15:46:09 +08:00
self - > m_tree - > counts . getRanges + + ;
2017-09-21 19:43:49 +08:00
state Standalone < VectorRef < KeyValueRef > > result ;
state int accumulatedBytes = 0 ;
ASSERT ( byteLimit > 0 ) ;
state Reference < IStoreCursor > cur = self - > m_tree - > readAtVersion ( self - > m_tree - > getLastCommittedVersion ( ) ) ;
2017-10-02 18:32:22 +08:00
2017-09-21 19:43:49 +08:00
if ( rowLimit > = 0 ) {
2018-09-20 18:39:55 +08:00
wait ( cur - > findFirstEqualOrGreater ( keys . begin , true , 0 ) ) ;
2017-09-21 19:43:49 +08:00
while ( cur - > isValid ( ) & & cur - > getKey ( ) < keys . end ) {
KeyValueRef kv ( KeyRef ( result . arena ( ) , cur - > getKey ( ) ) , ValueRef ( result . arena ( ) , cur - > getValue ( ) ) ) ;
accumulatedBytes + = kv . expectedSize ( ) ;
result . push_back ( result . arena ( ) , kv ) ;
2018-10-25 06:57:06 +08:00
if ( - - rowLimit = = 0 | | accumulatedBytes > = byteLimit ) {
2017-09-21 19:43:49 +08:00
break ;
2018-10-25 06:57:06 +08:00
}
2018-09-20 18:39:55 +08:00
wait ( cur - > next ( true ) ) ;
2017-09-21 19:43:49 +08:00
}
} else {
2018-09-20 18:39:55 +08:00
wait ( cur - > findLastLessOrEqual ( keys . end , true , 0 ) ) ;
2017-09-21 19:43:49 +08:00
if ( cur - > isValid ( ) & & cur - > getKey ( ) = = keys . end )
2018-09-20 18:39:55 +08:00
wait ( cur - > prev ( true ) ) ;
2017-09-21 19:43:49 +08:00
while ( cur - > isValid ( ) & & cur - > getKey ( ) > = keys . begin ) {
KeyValueRef kv ( KeyRef ( result . arena ( ) , cur - > getKey ( ) ) , ValueRef ( result . arena ( ) , cur - > getValue ( ) ) ) ;
accumulatedBytes + = kv . expectedSize ( ) ;
result . push_back ( result . arena ( ) , kv ) ;
2018-10-25 06:57:06 +08:00
if ( - - rowLimit = = 0 | | accumulatedBytes > = byteLimit ) {
2017-09-21 19:43:49 +08:00
break ;
2018-10-25 06:57:06 +08:00
}
2018-09-20 18:39:55 +08:00
wait ( cur - > prev ( true ) ) ;
2017-09-21 19:43:49 +08:00
}
}
return result ;
}
2018-10-26 10:48:31 +08:00
ACTOR static Future < Optional < Value > > readValue_impl ( KeyValueStoreRedwoodUnversioned * self , Key key , Optional < UID > debugID ) {
2019-03-15 15:46:09 +08:00
self - > m_tree - > counts . gets + + ;
2017-09-21 19:43:49 +08:00
state Reference < IStoreCursor > cur = self - > m_tree - > readAtVersion ( self - > m_tree - > getLastCommittedVersion ( ) ) ;
2018-09-20 18:39:55 +08:00
wait ( cur - > findEqual ( key ) ) ;
2017-10-02 18:32:22 +08:00
if ( cur - > isValid ( ) ) {
2017-09-21 19:43:49 +08:00
return cur - > getValue ( ) ;
2017-10-02 18:32:22 +08:00
}
2017-09-21 19:43:49 +08:00
return Optional < Value > ( ) ;
}
virtual Future < Optional < Value > > readValue ( KeyRef key , Optional < UID > debugID = Optional < UID > ( ) ) {
2018-10-25 06:57:06 +08:00
return catchError ( readValue_impl ( this , key , debugID ) ) ;
2017-09-21 19:43:49 +08:00
}
2018-10-26 10:48:31 +08:00
ACTOR static Future < Optional < Value > > readValuePrefix_impl ( KeyValueStoreRedwoodUnversioned * self , Key key , int maxLength , Optional < UID > debugID ) {
2019-03-15 15:46:09 +08:00
self - > m_tree - > counts . gets + + ;
2017-09-21 19:43:49 +08:00
state Reference < IStoreCursor > cur = self - > m_tree - > readAtVersion ( self - > m_tree - > getLastCommittedVersion ( ) ) ;
2018-09-20 18:39:55 +08:00
wait ( cur - > findEqual ( key ) ) ;
2017-09-21 19:43:49 +08:00
if ( cur - > isValid ( ) ) {
Value v = cur - > getValue ( ) ;
int len = std : : min ( v . size ( ) , maxLength ) ;
return Value ( cur - > getValue ( ) . substr ( 0 , len ) ) ;
}
return Optional < Value > ( ) ;
}
virtual Future < Optional < Value > > readValuePrefix ( KeyRef key , int maxLength , Optional < UID > debugID = Optional < UID > ( ) ) {
2018-10-25 06:57:06 +08:00
return catchError ( readValuePrefix_impl ( this , key , maxLength , debugID ) ) ;
2017-09-21 19:43:49 +08:00
}
2017-09-22 14:51:55 +08:00
virtual ~ KeyValueStoreRedwoodUnversioned ( ) {
2017-09-21 19:43:49 +08:00
} ;
private :
std : : string m_filePrefix ;
VersionedBTree * m_tree ;
Future < Void > m_init ;
Promise < Void > m_closed ;
2017-10-02 18:32:22 +08:00
Promise < Void > m_error ;
2018-10-25 06:57:06 +08:00
template < typename T > inline Future < T > catchError ( Future < T > f ) {
return : : catchError ( m_error , f ) ;
}
2017-09-21 19:43:49 +08:00
} ;
2017-09-22 14:51:55 +08:00
IKeyValueStore * keyValueStoreRedwoodV1 ( std : : string const & filename , UID logID ) {
return new KeyValueStoreRedwoodUnversioned ( filename , logID ) ;
2017-09-21 19:43:49 +08:00
}
2018-09-28 07:07:29 +08:00
int randomSize ( int max ) {
2019-02-21 18:46:30 +08:00
int n = pow ( g_random - > random01 ( ) , 3 ) * max ;
2018-09-28 07:07:29 +08:00
return n ;
}
2017-09-21 19:43:49 +08:00
2017-07-14 13:11:48 +08:00
KeyValue randomKV ( int keySize = 10 , int valueSize = 5 ) {
2018-09-28 07:07:29 +08:00
int kLen = randomSize ( 1 + keySize ) ;
2018-10-05 14:46:37 +08:00
int vLen = valueSize > 0 ? randomSize ( valueSize ) : 0 ;
2017-06-10 05:56:41 +08:00
KeyValue kv ;
kv . key = makeString ( kLen , kv . arena ( ) ) ;
kv . value = makeString ( vLen , kv . arena ( ) ) ;
for ( int i = 0 ; i < kLen ; + + i )
mutateString ( kv . key ) [ i ] = ( uint8_t ) g_random - > randomInt ( ' a ' , ' m ' ) ;
for ( int i = 0 ; i < vLen ; + + i )
mutateString ( kv . value ) [ i ] = ( uint8_t ) g_random - > randomInt ( ' n ' , ' z ' ) ;
return kv ;
}
2019-04-30 08:00:29 +08:00
ACTOR Future < int > verifyRange ( VersionedBTree * btree , Key start , Key end , Version v , std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > * written , int * pErrorCount ) {
2017-09-15 20:19:39 +08:00
state int errors = 0 ;
if ( end < = start )
end = keyAfter ( start ) ;
2017-09-16 16:45:39 +08:00
state std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > : : const_iterator i = written - > lower_bound ( std : : make_pair ( start . toString ( ) , 0 ) ) ;
2017-09-15 20:19:39 +08:00
state std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > : : const_iterator iEnd = written - > upper_bound ( std : : make_pair ( end . toString ( ) , 0 ) ) ;
2017-09-16 08:27:13 +08:00
state std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > : : const_iterator iLast ;
2017-09-15 20:19:39 +08:00
2017-09-17 19:38:01 +08:00
state Reference < IStoreCursor > cur = btree - > readAtVersion ( v ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " VerifyRange(@% " PRId64 " , %s, %s): Start cur=%p \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , cur . getPtr ( ) ) ;
2017-09-17 19:38:01 +08:00
// Randomly use the cursor for something else first.
if ( g_random - > coinflip ( ) ) {
2017-09-21 15:58:56 +08:00
state Key randomKey = randomKV ( ) . key ;
2019-05-29 21:23:32 +08:00
debug_printf ( " VerifyRange(@% " PRId64 " , %s, %s): Dummy seek to '%s' \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , randomKey . toString ( ) . c_str ( ) ) ;
2018-09-20 18:39:55 +08:00
wait ( g_random - > coinflip ( ) ? cur - > findFirstEqualOrGreater ( randomKey , true , 0 ) : cur - > findLastLessOrEqual ( randomKey , true , 0 ) ) ;
2017-09-17 19:38:01 +08:00
}
2018-06-14 19:15:14 +08:00
2019-05-29 21:23:32 +08:00
debug_printf ( " VerifyRange(@% " PRId64 " , %s, %s): Actual seek \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) ) ;
2018-09-20 18:39:55 +08:00
wait ( cur - > findFirstEqualOrGreater ( start , true , 0 ) ) ;
2017-09-17 19:38:01 +08:00
state std : : vector < KeyValue > results ;
2017-09-15 20:19:39 +08:00
while ( cur - > isValid ( ) & & cur - > getKey ( ) < end ) {
// Find the next written kv pair that would be present at this version
while ( 1 ) {
iLast = i ;
2017-09-16 08:27:13 +08:00
if ( i = = iEnd )
break ;
+ + i ;
2019-05-22 10:16:32 +08:00
2017-09-16 08:27:13 +08:00
if ( iLast - > first . second < = v
& & iLast - > second . present ( )
& & (
i = = iEnd
| | i - > first . first ! = iLast - > first . first
| | i - > first . second > v
)
2019-05-22 10:16:32 +08:00
) {
2019-05-29 21:23:32 +08:00
debug_printf ( " VerifyRange(@% " PRId64 " , %s, %s) Found key in written map: %s \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , iLast - > first . first . c_str ( ) ) ;
2017-09-16 08:27:13 +08:00
break ;
2019-05-22 10:16:32 +08:00
}
2017-09-15 20:19:39 +08:00
}
2017-09-16 08:27:13 +08:00
2017-09-15 20:19:39 +08:00
if ( iLast = = iEnd ) {
2019-02-24 19:47:32 +08:00
+ + errors ;
+ + * pErrorCount ;
2019-05-29 21:23:32 +08:00
printf ( " VerifyRange(@% " PRId64 " , %s, %s) ERROR: Tree key '%s' vs nothing in written map. \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , cur - > getKey ( ) . toString ( ) . c_str ( ) ) ;
2017-09-15 20:19:39 +08:00
break ;
}
2017-09-16 08:27:13 +08:00
2017-09-15 20:19:39 +08:00
if ( cur - > getKey ( ) ! = iLast - > first . first ) {
2019-02-24 19:47:32 +08:00
+ + errors ;
+ + * pErrorCount ;
2019-05-29 21:23:32 +08:00
printf ( " VerifyRange(@% " PRId64 " , %s, %s) ERROR: Tree key '%s' vs written '%s' \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , cur - > getKey ( ) . toString ( ) . c_str ( ) , iLast - > first . first . c_str ( ) ) ;
2017-09-15 20:19:39 +08:00
break ;
}
if ( cur - > getValue ( ) ! = iLast - > second . get ( ) ) {
2019-02-24 19:47:32 +08:00
+ + errors ;
+ + * pErrorCount ;
2019-05-29 21:23:32 +08:00
printf ( " VerifyRange(@% " PRId64 " , %s, %s) ERROR: Tree key '%s' has tree value '%s' vs written '%s' \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , cur - > getKey ( ) . toString ( ) . c_str ( ) , cur - > getValue ( ) . toString ( ) . c_str ( ) , iLast - > second . get ( ) . c_str ( ) ) ;
2017-09-15 20:19:39 +08:00
break ;
}
2017-09-17 19:38:01 +08:00
2019-05-22 10:16:32 +08:00
ASSERT ( errors = = 0 ) ;
2017-09-17 19:38:01 +08:00
results . push_back ( KeyValue ( KeyValueRef ( cur - > getKey ( ) , cur - > getValue ( ) ) ) ) ;
2018-09-20 18:39:55 +08:00
wait ( cur - > next ( true ) ) ;
2017-09-15 20:19:39 +08:00
}
2017-09-16 08:27:13 +08:00
// Make sure there are no further written kv pairs that would be present at this version.
while ( 1 ) {
iLast = i ;
if ( i = = iEnd )
break ;
+ + i ;
if ( iLast - > first . second < = v
& & iLast - > second . present ( )
& & (
i = = iEnd
| | i - > first . first ! = iLast - > first . first
| | i - > first . second > v
)
)
break ;
}
if ( iLast ! = iEnd ) {
2019-02-24 19:47:32 +08:00
+ + errors ;
+ + * pErrorCount ;
2019-05-29 21:23:32 +08:00
printf ( " VerifyRange(@% " PRId64 " , %s, %s) ERROR: Tree range ended but written has @% " PRId64 " '%s' \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , iLast - > first . second , iLast - > first . first . c_str ( ) ) ;
2017-09-16 08:27:13 +08:00
}
2017-09-16 16:45:39 +08:00
2019-05-29 21:23:32 +08:00
debug_printf ( " VerifyRangeReverse(@% " PRId64 " , %s, %s): start \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) ) ;
2019-04-30 08:00:29 +08:00
// Randomly use a new cursor for the reverse range read but only if version history is available
if ( ! btree - > isSingleVersion ( ) & & g_random - > coinflip ( ) ) {
2017-09-17 19:38:01 +08:00
cur = btree - > readAtVersion ( v ) ;
}
// Now read the range from the tree in reverse order and compare to the saved results
2018-09-20 18:39:55 +08:00
wait ( cur - > findLastLessOrEqual ( end , true , 0 ) ) ;
2017-09-17 19:38:01 +08:00
if ( cur - > isValid ( ) & & cur - > getKey ( ) = = end )
2018-09-20 18:39:55 +08:00
wait ( cur - > prev ( true ) ) ;
2017-09-17 19:38:01 +08:00
state std : : vector < KeyValue > : : const_reverse_iterator r = results . rbegin ( ) ;
while ( cur - > isValid ( ) & & cur - > getKey ( ) > = start ) {
if ( r = = results . rend ( ) ) {
2019-02-24 19:47:32 +08:00
+ + errors ;
+ + * pErrorCount ;
2019-05-29 21:23:32 +08:00
printf ( " VerifyRangeReverse(@% " PRId64 " , %s, %s) ERROR: Tree key '%s' vs nothing in written map. \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , cur - > getKey ( ) . toString ( ) . c_str ( ) ) ;
2017-09-17 19:38:01 +08:00
break ;
}
if ( cur - > getKey ( ) ! = r - > key ) {
2019-02-24 19:47:32 +08:00
+ + errors ;
+ + * pErrorCount ;
2019-05-29 21:23:32 +08:00
printf ( " VerifyRangeReverse(@% " PRId64 " , %s, %s) ERROR: Tree key '%s' vs written '%s' \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , cur - > getKey ( ) . toString ( ) . c_str ( ) , r - > key . toString ( ) . c_str ( ) ) ;
2017-09-17 19:38:01 +08:00
break ;
}
if ( cur - > getValue ( ) ! = r - > value ) {
2019-02-24 19:47:32 +08:00
+ + errors ;
+ + * pErrorCount ;
2019-05-29 21:23:32 +08:00
printf ( " VerifyRangeReverse(@% " PRId64 " , %s, %s) ERROR: Tree key '%s' has tree value '%s' vs written '%s' \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , cur - > getKey ( ) . toString ( ) . c_str ( ) , cur - > getValue ( ) . toString ( ) . c_str ( ) , r - > value . toString ( ) . c_str ( ) ) ;
2017-09-17 19:38:01 +08:00
break ;
}
+ + r ;
2018-09-20 18:39:55 +08:00
wait ( cur - > prev ( true ) ) ;
2017-09-17 19:38:01 +08:00
}
if ( r ! = results . rend ( ) ) {
2019-02-24 19:47:32 +08:00
+ + errors ;
+ + * pErrorCount ;
2019-05-29 21:23:32 +08:00
printf ( " VerifyRangeReverse(@% " PRId64 " , %s, %s) ERROR: Tree range ended but written has '%s' \n " , v , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , r - > key . toString ( ) . c_str ( ) ) ;
2017-09-17 19:38:01 +08:00
}
2017-09-15 20:19:39 +08:00
return errors ;
}
2019-02-24 19:47:32 +08:00
ACTOR Future < int > verifyAll ( VersionedBTree * btree , Version maxCommittedVersion , std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > * written , int * pErrorCount ) {
2018-09-28 15:35:03 +08:00
// Read back every key at every version set or cleared and verify the result.
state std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > : : const_iterator i = written - > cbegin ( ) ;
state std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > : : const_iterator iEnd = written - > cend ( ) ;
state int errors = 0 ;
while ( i ! = iEnd ) {
state std : : string key = i - > first . first ;
state Version ver = i - > first . second ;
if ( ver < = maxCommittedVersion ) {
state Optional < std : : string > val = i - > second ;
state Reference < IStoreCursor > cur = btree - > readAtVersion ( ver ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " Verifying @% " PRId64 " '%s' \n " , ver , key . c_str ( ) ) ;
2019-02-21 18:46:30 +08:00
state Arena arena ;
wait ( cur - > findEqual ( KeyRef ( arena , key ) ) ) ;
2018-09-28 15:35:03 +08:00
if ( val . present ( ) ) {
if ( ! ( cur - > isValid ( ) & & cur - > getKey ( ) = = key & & cur - > getValue ( ) = = val . get ( ) ) ) {
+ + errors ;
2019-02-24 19:47:32 +08:00
+ + * pErrorCount ;
2018-09-28 15:35:03 +08:00
if ( ! cur - > isValid ( ) )
2019-05-05 01:52:02 +08:00
printf ( " Verify ERROR: key_not_found: '%s' -> '%s' @% " PRId64 " \n " , key . c_str ( ) , val . get ( ) . c_str ( ) , ver ) ;
2018-09-28 15:35:03 +08:00
else if ( cur - > getKey ( ) ! = key )
2019-05-05 01:52:02 +08:00
printf ( " Verify ERROR: key_incorrect: found '%s' expected '%s' @% " PRId64 " \n " , cur - > getKey ( ) . toString ( ) . c_str ( ) , key . c_str ( ) , ver ) ;
2018-09-28 15:35:03 +08:00
else if ( cur - > getValue ( ) ! = val . get ( ) )
2019-05-05 01:52:02 +08:00
printf ( " Verify ERROR: value_incorrect: for '%s' found '%s' expected '%s' @% " PRId64 " \n " , cur - > getKey ( ) . toString ( ) . c_str ( ) , cur - > getValue ( ) . toString ( ) . c_str ( ) , val . get ( ) . c_str ( ) , ver ) ;
2018-09-28 15:35:03 +08:00
}
} else {
if ( cur - > isValid ( ) & & cur - > getKey ( ) = = key ) {
+ + errors ;
2019-02-24 19:47:32 +08:00
+ + * pErrorCount ;
2019-05-29 21:23:32 +08:00
printf ( " Verify ERROR: cleared_key_found: '%s' -> '%s' @% " PRId64 " \n " , key . c_str ( ) , cur - > getValue ( ) . toString ( ) . c_str ( ) , ver ) ;
2018-09-28 15:35:03 +08:00
}
}
}
+ + i ;
}
return errors ;
}
2019-05-22 10:16:32 +08:00
ACTOR Future < Void > verify ( VersionedBTree * btree , FutureStream < Version > vStream , std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > * written , int * pErrorCount , bool serial ) {
2019-04-30 08:00:29 +08:00
state Future < int > vall ;
state Future < int > vrange ;
2018-09-28 15:35:03 +08:00
try {
loop {
2018-10-02 07:51:57 +08:00
state Version v = waitNext ( vStream ) ;
2018-09-28 15:35:03 +08:00
2019-04-30 08:00:29 +08:00
if ( btree - > isSingleVersion ( ) ) {
v = btree - > getLastCommittedVersion ( ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " Verifying at latest committed version % " PRId64 " \n " , v ) ;
2019-04-30 08:00:29 +08:00
vall = verifyRange ( btree , LiteralStringRef ( " " ) , LiteralStringRef ( " \xff \xff " ) , v , written , pErrorCount ) ;
2019-05-22 10:16:32 +08:00
if ( serial ) {
wait ( success ( vall ) ) ;
}
2019-04-30 08:00:29 +08:00
vrange = verifyRange ( btree , randomKV ( ) . key , randomKV ( ) . key , v , written , pErrorCount ) ;
2019-05-22 10:16:32 +08:00
if ( serial ) {
wait ( success ( vrange ) ) ;
}
2019-04-30 08:00:29 +08:00
}
else {
2019-05-29 21:23:32 +08:00
debug_printf ( " Verifying through version % " PRId64 " \n " , v ) ;
2019-04-30 08:00:29 +08:00
vall = verifyAll ( btree , v , written , pErrorCount ) ;
2019-05-22 10:16:32 +08:00
if ( serial ) {
wait ( success ( vall ) ) ;
}
2019-04-30 08:00:29 +08:00
vrange = verifyRange ( btree , randomKV ( ) . key , randomKV ( ) . key , g_random - > randomInt ( 1 , v + 1 ) , written , pErrorCount ) ;
2019-05-22 10:16:32 +08:00
if ( serial ) {
wait ( success ( vrange ) ) ;
}
2019-04-30 08:00:29 +08:00
}
2018-09-28 15:35:03 +08:00
wait ( success ( vall ) & & success ( vrange ) ) ;
2018-10-02 07:51:57 +08:00
int errors = vall . get ( ) + vrange . get ( ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " Verified through version % " PRId64 " , %d errors \n " , v , errors ) ;
2018-09-28 15:35:03 +08:00
if ( * pErrorCount ! = 0 )
break ;
}
} catch ( Error & e ) {
if ( e . code ( ) ! = error_code_end_of_stream ) {
throw ;
}
}
return Void ( ) ;
}
2018-10-05 14:46:37 +08:00
// Does a random range read, doesn't trap/report errors
ACTOR Future < Void > randomReader ( VersionedBTree * btree ) {
state Reference < IStoreCursor > cur ;
loop {
wait ( yield ( ) ) ;
if ( ! cur | | g_random - > random01 ( ) > .1 ) {
2019-04-30 08:00:29 +08:00
Version v = btree - > getLastCommittedVersion ( ) ;
if ( ! btree - > isSingleVersion ( ) ) {
v = g_random - > randomInt ( 1 , v + 1 ) ;
}
2018-10-05 14:46:37 +08:00
cur = btree - > readAtVersion ( v ) ;
}
2019-02-21 18:46:30 +08:00
state KeyValue kv = randomKV ( 10 , 0 ) ;
wait ( cur - > findFirstEqualOrGreater ( kv . key , true , 0 ) ) ;
2018-10-05 14:46:37 +08:00
state int c = g_random - > randomInt ( 0 , 100 ) ;
while ( cur - > isValid ( ) & & c - - > 0 ) {
wait ( success ( cur - > next ( true ) ) ) ;
wait ( yield ( ) ) ;
}
}
}
2018-08-29 04:46:14 +08:00
2019-02-21 18:46:30 +08:00
struct IntIntPair {
IntIntPair ( ) { }
IntIntPair ( int k , int v ) : k ( k ) , v ( v ) { }
IntIntPair ( Arena & arena , const IntIntPair & toCopy ) {
* this = toCopy ;
}
struct Delta {
2019-05-29 21:23:32 +08:00
bool prefixSource ;
2019-02-21 18:46:30 +08:00
int dk ;
int dv ;
2019-05-29 21:23:32 +08:00
IntIntPair apply ( const IntIntPair & base , Arena & arena ) {
return { base . k + dk , base . v + dv } ;
}
void setPrefixSource ( bool val ) {
prefixSource = val ;
}
bool getPrefixSource ( ) const {
return prefixSource ;
2019-02-21 18:46:30 +08:00
}
int size ( ) const {
return sizeof ( Delta ) ;
}
std : : string toString ( ) const {
2019-05-29 21:23:32 +08:00
return format ( " DELTA{prefixSource=%d dk=%d(0x%x) dv = % d ( 0 x % x ) } " , prefixSource, dk, dk, dv, dv) ;
2019-02-21 18:46:30 +08:00
}
} ;
int compare ( const IntIntPair & rhs ) const {
//printf("compare %s to %s\n", toString().c_str(), rhs.toString().c_str());
return k - rhs . k ;
}
bool operator = = ( const IntIntPair & rhs ) const {
return k = = rhs . k ;
}
2019-05-29 21:23:32 +08:00
int getCommonPrefixLen ( const IntIntPair & other , int skip ) const {
return 0 ;
}
2019-02-21 18:46:30 +08:00
int deltaSize ( const IntIntPair & base ) const {
return sizeof ( Delta ) ;
}
2019-05-29 21:23:32 +08:00
int writeDelta ( Delta & d , const IntIntPair & base , int commonPrefix = - 1 ) const {
d . dk = k - base . k ;
d . dv = v - base . v ;
return sizeof ( Delta ) ;
2019-02-21 18:46:30 +08:00
}
int k ;
int v ;
std : : string toString ( ) const {
return format ( " {k=%d(0x%x) v = % d ( 0 x % x ) } " , k, k, v, v) ;
}
} ;
2019-05-29 21:23:32 +08:00
int getCommonIntFieldPrefix2 ( const RedwoodRecordRef & a , const RedwoodRecordRef & b ) {
RedwoodRecordRef : : byte aFields [ RedwoodRecordRef : : intFieldArraySize ] ;
RedwoodRecordRef : : byte bFields [ RedwoodRecordRef : : intFieldArraySize ] ;
a . serializeIntFields ( aFields ) ;
b . serializeIntFields ( bFields ) ;
//printf("a: %s\n", StringRef(aFields, RedwoodRecordRef::intFieldArraySize).toHexString().c_str());
//printf("b: %s\n", StringRef(bFields, RedwoodRecordRef::intFieldArraySize).toHexString().c_str());
int i = 0 ;
while ( i < RedwoodRecordRef : : intFieldArraySize & & aFields [ i ] = = bFields [ i ] ) {
+ + i ;
}
//printf("%d\n", i);
return i ;
}
void deltaTest ( RedwoodRecordRef rec , RedwoodRecordRef base ) {
char buf [ 500 ] ;
RedwoodRecordRef : : Delta & d = * ( RedwoodRecordRef : : Delta * ) buf ;
Arena mem ;
2019-05-30 07:47:53 +08:00
int expectedSize = rec . deltaSize ( base ) ;
int deltaSize = rec . writeDelta ( d , base ) ;
2019-05-29 21:23:32 +08:00
RedwoodRecordRef decoded = d . apply ( base , mem ) ;
2019-05-30 09:06:11 +08:00
if ( decoded ! = rec | | expectedSize ! = deltaSize ) {
printf ( " \n " ) ;
2019-05-30 07:47:53 +08:00
printf ( " Base: %s \n " , base . toString ( ) . c_str ( ) ) ;
printf ( " ExpectedSize: %d \n " , expectedSize ) ;
printf ( " DeltaSize: %d \n " , deltaSize ) ;
printf ( " Delta: %s \n " , d . toString ( ) . c_str ( ) ) ;
printf ( " Record: %s \n " , rec . toString ( ) . c_str ( ) ) ;
printf ( " Decoded: %s \n " , decoded . toString ( ) . c_str ( ) ) ;
2019-05-30 07:26:58 +08:00
printf ( " RedwoodRecordRef::Delta test failure! \n " ) ;
2019-05-29 21:23:32 +08:00
ASSERT ( false ) ;
}
}
2019-05-30 07:26:58 +08:00
Standalone < RedwoodRecordRef > randomRedwoodRecordRef ( int maxKeySize = 3 , int maxValueSize = 255 ) {
RedwoodRecordRef rec ;
KeyValue kv = randomKV ( 3 , 10 ) ;
rec . key = kv . key ;
if ( g_random - > random01 ( ) < .9 ) {
rec . value = kv . value ;
}
rec . version = g_random - > coinflip ( ) ? 0 : g_random - > randomInt64 ( 0 , std : : numeric_limits < Version > : : max ( ) ) ;
if ( g_random - > coinflip ( ) ) {
rec . chunk . total = g_random - > randomInt ( 1 , 100000 ) ;
rec . chunk . start = g_random - > randomInt ( 0 , rec . chunk . total ) ;
}
return Standalone < RedwoodRecordRef > ( rec , kv . arena ( ) ) ;
}
2019-05-29 21:23:32 +08:00
TEST_CASE ( " !/redwood/correctness/unit/RedwoodRecordRef " ) {
// Testing common prefix calculation for integer fields using the member function that calculates this directly
// and by serializing the integer fields to arrays and finding the common prefix length of the two arrays
deltaTest ( RedwoodRecordRef ( LiteralStringRef ( " " ) , 0 , LiteralStringRef ( " " ) , 0 , 0 ) ,
RedwoodRecordRef ( LiteralStringRef ( " " ) , 0 , LiteralStringRef ( " " ) , 0 , 0 )
) ;
deltaTest ( RedwoodRecordRef ( LiteralStringRef ( " abc " ) , 0 , LiteralStringRef ( " " ) , 0 , 0 ) ,
RedwoodRecordRef ( LiteralStringRef ( " abc " ) , 0 , LiteralStringRef ( " " ) , 0 , 0 )
) ;
deltaTest ( RedwoodRecordRef ( LiteralStringRef ( " abc " ) , 0 , LiteralStringRef ( " " ) , 0 , 0 ) ,
RedwoodRecordRef ( LiteralStringRef ( " abcd " ) , 0 , LiteralStringRef ( " " ) , 0 , 0 )
) ;
deltaTest ( RedwoodRecordRef ( LiteralStringRef ( " abc " ) , 2 , LiteralStringRef ( " " ) , 0 , 0 ) ,
RedwoodRecordRef ( LiteralStringRef ( " abc " ) , 2 , LiteralStringRef ( " " ) , 0 , 0 )
) ;
deltaTest ( RedwoodRecordRef ( LiteralStringRef ( " abc " ) , 2 , LiteralStringRef ( " " ) , 0 , 0 ) ,
RedwoodRecordRef ( LiteralStringRef ( " ab " ) , 2 , LiteralStringRef ( " " ) , 1 , 3 )
) ;
deltaTest ( RedwoodRecordRef ( LiteralStringRef ( " abc " ) , 2 , LiteralStringRef ( " " ) , 5 , 0 ) ,
RedwoodRecordRef ( LiteralStringRef ( " abc " ) , 2 , LiteralStringRef ( " " ) , 5 , 1 )
) ;
2019-05-30 07:47:53 +08:00
RedwoodRecordRef : : byte varInts [ 100 ] ;
RedwoodRecordRef : : Writer w ( varInts ) ;
RedwoodRecordRef : : Reader r ( varInts ) ;
w . writeVarInt ( 1 ) ;
w . writeVarInt ( 128 ) ;
w . writeVarInt ( 32000 ) ;
ASSERT ( r . readVarInt ( ) = = 1 ) ;
ASSERT ( r . readVarInt ( ) = = 128 ) ;
ASSERT ( r . readVarInt ( ) = = 32000 ) ;
2019-05-29 21:23:32 +08:00
RedwoodRecordRef rec1 ;
RedwoodRecordRef rec2 ;
rec1 . version = 0x12345678 ;
rec2 . version = 0x12995678 ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = 5 ) ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = getCommonIntFieldPrefix2 ( rec1 , rec2 ) ) ;
rec1 . version = 0x12345678 ;
rec2 . version = 0x12345678 ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = 14 ) ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = getCommonIntFieldPrefix2 ( rec1 , rec2 ) ) ;
rec1 . version = invalidVersion ;
rec2 . version = 0 ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = 0 ) ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = getCommonIntFieldPrefix2 ( rec1 , rec2 ) ) ;
rec1 . version = 0x12345678 ;
rec2 . version = 0x12345678 ;
rec1 . chunk . total = 4 ;
rec2 . chunk . total = 4 ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = 14 ) ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = getCommonIntFieldPrefix2 ( rec1 , rec2 ) ) ;
rec1 . version = 0x12345678 ;
rec2 . version = 0x12345678 ;
rec1 . chunk . start = 4 ;
rec2 . chunk . start = 4 ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = 14 ) ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = getCommonIntFieldPrefix2 ( rec1 , rec2 ) ) ;
rec1 . version = 0x12345678 ;
rec2 . version = 0x12345678 ;
rec1 . chunk . start = 4 ;
rec2 . chunk . start = 5 ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = 13 ) ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = getCommonIntFieldPrefix2 ( rec1 , rec2 ) ) ;
rec1 . version = 0x12345678 ;
rec2 . version = 0x12345678 ;
rec1 . chunk . total = 256 ;
rec2 . chunk . total = 512 ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = 9 ) ;
ASSERT ( rec1 . getCommonIntFieldPrefix ( rec2 ) = = getCommonIntFieldPrefix2 ( rec1 , rec2 ) ) ;
Arena mem ;
double start ;
uint64_t total ;
uint64_t count ;
uint64_t i ;
start = timer ( ) ;
total = 0 ;
2019-05-30 07:26:58 +08:00
count = 1e9 ;
2019-05-29 21:23:32 +08:00
for ( i = 0 ; i < count ; + + i ) {
rec1 . chunk . total = i & 0xffffff ;
rec2 . chunk . total = i & 0xffffff ;
rec1 . chunk . start = i & 0xffffff ;
rec2 . chunk . start = ( i + 1 ) & 0xffffff ;
total + = rec1 . getCommonIntFieldPrefix ( rec2 ) ;
}
printf ( " % " PRId64 " getCommonIntFieldPrefix() %g M/s \n " , total , count / ( timer ( ) - start ) / 1e6 ) ;
rec1 . key = LiteralStringRef ( " alksdfjaklsdfjlkasdjflkasdjfklajsdflk;ajsdflkajdsflkjadsf " ) ;
rec2 . key = LiteralStringRef ( " alksdfjaklsdfjlkasdjflkasdjfklajsdflk;ajsdflkajdsflkjadsf " ) ;
start = timer ( ) ;
total = 0 ;
2019-05-30 07:26:58 +08:00
count = 1e9 ;
2019-05-29 21:23:32 +08:00
for ( i = 0 ; i < count ; + + i ) {
RedwoodRecordRef : : byte fields [ RedwoodRecordRef : : intFieldArraySize ] ;
rec1 . chunk . start = i & 0xffffff ;
rec2 . chunk . start = ( i + 1 ) & 0xffffff ;
rec1 . serializeIntFields ( fields ) ;
total + = fields [ RedwoodRecordRef : : intFieldArraySize - 1 ] ;
}
printf ( " % " PRId64 " serializeIntFields() %g M/s \n " , total , count / ( timer ( ) - start ) / 1e6 ) ;
start = timer ( ) ;
total = 0 ;
2019-05-30 07:26:58 +08:00
count = 100e6 ;
2019-05-29 21:23:32 +08:00
for ( i = 0 ; i < count ; + + i ) {
rec1 . chunk . start = i & 0xffffff ;
rec2 . chunk . start = ( i + 1 ) & 0xffffff ;
total + = rec1 . getCommonPrefixLen ( rec2 , 50 ) ;
}
printf ( " % " PRId64 " getCommonPrefixLen(skip=50) %g M/s \n " , total , count / ( timer ( ) - start ) / 1e6 ) ;
start = timer ( ) ;
total = 0 ;
2019-05-30 07:26:58 +08:00
count = 100e6 ;
2019-05-29 21:23:32 +08:00
for ( i = 0 ; i < count ; + + i ) {
rec1 . chunk . start = i & 0xffffff ;
rec2 . chunk . start = ( i + 1 ) & 0xffffff ;
total + = rec1 . getCommonPrefixLen ( rec2 , 0 ) ;
}
printf ( " % " PRId64 " getCommonPrefixLen(skip=0) %g M/s \n " , total , count / ( timer ( ) - start ) / 1e6 ) ;
char buf [ 1000 ] ;
RedwoodRecordRef : : Delta & d = * ( RedwoodRecordRef : : Delta * ) buf ;
start = timer ( ) ;
total = 0 ;
2019-05-30 07:26:58 +08:00
count = 100e6 ;
2019-05-29 21:23:32 +08:00
int commonPrefix = rec1 . getCommonPrefixLen ( rec2 , 0 ) ;
for ( i = 0 ; i < count ; + + i ) {
rec1 . chunk . start = i & 0xffffff ;
rec2 . chunk . start = ( i + 1 ) & 0xffffff ;
total + = rec1 . writeDelta ( d , rec2 , commonPrefix ) ;
}
printf ( " % " PRId64 " writeDelta(commonPrefix=%d) %g M/s \n " , total , commonPrefix , count / ( timer ( ) - start ) / 1e6 ) ;
start = timer ( ) ;
total = 0 ;
2019-05-30 07:26:58 +08:00
count = 10e6 ;
2019-05-29 21:23:32 +08:00
for ( i = 0 ; i < count ; + + i ) {
rec1 . chunk . start = i & 0xffffff ;
rec2 . chunk . start = ( i + 1 ) & 0xffffff ;
total + = rec1 . writeDelta ( d , rec2 ) ;
}
printf ( " % " PRId64 " writeDelta() %g M/s \n " , total , count / ( timer ( ) - start ) / 1e6 ) ;
2019-05-30 07:26:58 +08:00
start = timer ( ) ;
total = 0 ;
2019-05-30 07:47:53 +08:00
count = 1e6 ;
2019-05-30 07:26:58 +08:00
for ( i = 0 ; i < count ; + + i ) {
Standalone < RedwoodRecordRef > a = randomRedwoodRecordRef ( ) ;
Standalone < RedwoodRecordRef > b = randomRedwoodRecordRef ( ) ;
deltaTest ( a , b ) ;
}
printf ( " Random deltaTest() %g M/s \n " , count / ( timer ( ) - start ) / 1e6 ) ;
2019-05-29 21:23:32 +08:00
return Void ( ) ;
}
TEST_CASE ( " !/redwood/correctness/unit/deltaTree/RedwoodRecordRef " ) {
2019-02-21 18:46:30 +08:00
const int N = 200 ;
RedwoodRecordRef prev ;
RedwoodRecordRef next ( LiteralStringRef ( " \xff \xff \xff \xff " ) ) ;
Arena arena ;
std : : vector < RedwoodRecordRef > items ;
for ( int i = 0 ; i < N ; + + i ) {
std : : string k = g_random - > randomAlphaNumeric ( 30 ) ;
std : : string v = g_random - > randomAlphaNumeric ( 30 ) ;
RedwoodRecordRef rec ;
rec . key = StringRef ( arena , k ) ;
rec . version = g_random - > coinflip ( ) ? g_random - > randomInt64 ( 0 , std : : numeric_limits < Version > : : max ( ) ) : invalidVersion ;
if ( g_random - > coinflip ( ) ) {
rec . value = StringRef ( arena , v ) ;
if ( g_random - > coinflip ( ) ) {
2019-05-29 21:23:32 +08:00
rec . chunk . start = g_random - > randomInt ( 0 , 100000 ) ;
rec . chunk . total = rec . chunk . start + v . size ( ) + g_random - > randomInt ( 0 , 100000 ) ;
2019-02-21 18:46:30 +08:00
}
}
items . push_back ( rec ) ;
//printf("i=%d %s\n", i, items.back().toString().c_str());
}
std : : sort ( items . begin ( ) , items . end ( ) ) ;
DeltaTree < RedwoodRecordRef > * tree = ( DeltaTree < RedwoodRecordRef > * ) new uint8_t [ N * 100 ] ;
tree - > build ( & items [ 0 ] , & items [ items . size ( ) ] , & prev , & next ) ;
printf ( " Count=%d Size=%d InitialDepth=%d \n " , ( int ) items . size ( ) , ( int ) tree - > size ( ) , ( int ) tree - > initialDepth ) ;
debug_printf ( " Data(%p): %s \n " , tree , StringRef ( ( uint8_t * ) tree , tree - > size ( ) ) . toHexString ( ) . c_str ( ) ) ;
DeltaTree < RedwoodRecordRef > : : Reader r ( tree , & prev , & next ) ;
DeltaTree < RedwoodRecordRef > : : Cursor fwd = r . getCursor ( ) ;
DeltaTree < RedwoodRecordRef > : : Cursor rev = r . getCursor ( ) ;
ASSERT ( fwd . moveFirst ( ) ) ;
ASSERT ( rev . moveLast ( ) ) ;
int i = 0 ;
while ( 1 ) {
if ( fwd . get ( ) ! = items [ i ] ) {
printf ( " forward iterator i=%d \n %s found \n %s expected \n " , i , fwd . get ( ) . toString ( ) . c_str ( ) , items [ i ] . toString ( ) . c_str ( ) ) ;
printf ( " Delta: %s \n " , fwd . node - > raw - > delta - > toString ( ) . c_str ( ) ) ;
ASSERT ( false ) ;
}
if ( rev . get ( ) ! = items [ items . size ( ) - 1 - i ] ) {
printf ( " reverse iterator i=%d \n %s found \n %s expected \n " , i , rev . get ( ) . toString ( ) . c_str ( ) , items [ items . size ( ) - 1 - i ] . toString ( ) . c_str ( ) ) ;
printf ( " Delta: %s \n " , rev . node - > raw - > delta - > toString ( ) . c_str ( ) ) ;
ASSERT ( false ) ;
}
+ + i ;
ASSERT ( fwd . moveNext ( ) = = rev . movePrev ( ) ) ;
ASSERT ( fwd . valid ( ) = = rev . valid ( ) ) ;
if ( ! fwd . valid ( ) ) {
break ;
}
}
ASSERT ( i = = items . size ( ) ) ;
double start = timer ( ) ;
DeltaTree < RedwoodRecordRef > : : Cursor c = r . getCursor ( ) ;
for ( int i = 0 ; i < 20000000 ; + + i ) {
const RedwoodRecordRef & query = items [ g_random - > randomInt ( 0 , items . size ( ) ) ] ;
if ( ! c . seekLessThanOrEqual ( query ) ) {
printf ( " Not found! query=%s \n " , query . toString ( ) . c_str ( ) ) ;
ASSERT ( false ) ;
}
if ( c . get ( ) ! = query ) {
printf ( " Found incorrect node! query=%s found=%s \n " , query . toString ( ) . c_str ( ) , c . get ( ) . toString ( ) . c_str ( ) ) ;
ASSERT ( false ) ;
}
}
double elapsed = timer ( ) - start ;
printf ( " Elapsed %f \n " , elapsed ) ;
return Void ( ) ;
}
2019-05-29 21:23:32 +08:00
TEST_CASE ( " !/redwood/correctness/unit/deltaTree/IntIntPair " ) {
2019-02-21 18:46:30 +08:00
const int N = 200 ;
IntIntPair prev = { 0 , 0 } ;
IntIntPair next = { 1000 , 0 } ;
std : : vector < IntIntPair > items ;
for ( int i = 0 ; i < N ; + + i ) {
items . push_back ( { i * 10 , i * 1000 } ) ;
//printf("i=%d %s\n", i, items.back().toString().c_str());
}
DeltaTree < IntIntPair > * tree = ( DeltaTree < IntIntPair > * ) new uint8_t [ 10000 ] ;
tree - > build ( & items [ 0 ] , & items [ items . size ( ) ] , & prev , & next ) ;
printf ( " Count=%d Size=%d InitialDepth=%d \n " , ( int ) items . size ( ) , ( int ) tree - > size ( ) , ( int ) tree - > initialDepth ) ;
debug_printf ( " Data(%p): %s \n " , tree , StringRef ( ( uint8_t * ) tree , tree - > size ( ) ) . toHexString ( ) . c_str ( ) ) ;
DeltaTree < IntIntPair > : : Reader r ( tree , & prev , & next ) ;
DeltaTree < IntIntPair > : : Cursor fwd = r . getCursor ( ) ;
DeltaTree < IntIntPair > : : Cursor rev = r . getCursor ( ) ;
ASSERT ( fwd . moveFirst ( ) ) ;
ASSERT ( rev . moveLast ( ) ) ;
int i = 0 ;
while ( 1 ) {
if ( fwd . get ( ) ! = items [ i ] ) {
printf ( " forward iterator i=%d \n %s found \n %s expected \n " , i , fwd . get ( ) . toString ( ) . c_str ( ) , items [ i ] . toString ( ) . c_str ( ) ) ;
ASSERT ( false ) ;
}
if ( rev . get ( ) ! = items [ items . size ( ) - 1 - i ] ) {
printf ( " reverse iterator i=%d \n %s found \n %s expected \n " , i , rev . get ( ) . toString ( ) . c_str ( ) , items [ items . size ( ) - 1 - i ] . toString ( ) . c_str ( ) ) ;
ASSERT ( false ) ;
}
+ + i ;
ASSERT ( fwd . moveNext ( ) = = rev . movePrev ( ) ) ;
ASSERT ( fwd . valid ( ) = = rev . valid ( ) ) ;
if ( ! fwd . valid ( ) ) {
break ;
}
}
ASSERT ( i = = items . size ( ) ) ;
DeltaTree < IntIntPair > : : Cursor c = r . getCursor ( ) ;
double start = timer ( ) ;
for ( int i = 0 ; i < 20000000 ; + + i ) {
IntIntPair p ( { g_random - > randomInt ( 0 , items . size ( ) * 10 ) , 0 } ) ;
if ( ! c . seekLessThanOrEqual ( p ) ) {
printf ( " Not found! query=%s \n " , p . toString ( ) . c_str ( ) ) ;
ASSERT ( false ) ;
}
if ( c . get ( ) . k ! = ( p . k - ( p . k % 10 ) ) ) {
printf ( " Found incorrect node! query=%s found=%s \n " , p . toString ( ) . c_str ( ) , c . get ( ) . toString ( ) . c_str ( ) ) ;
ASSERT ( false ) ;
}
}
double elapsed = timer ( ) - start ;
printf ( " Elapsed %f \n " , elapsed ) ;
return Void ( ) ;
}
struct SimpleCounter {
SimpleCounter ( ) : x ( 0 ) , xt ( 0 ) , t ( timer ( ) ) , start ( t ) { }
void operator + = ( int n ) { x + = n ; }
void operator + + ( ) { x + + ; }
int64_t get ( ) { return x ; }
double rate ( ) {
double t2 = timer ( ) ;
int r = ( x - xt ) / ( t2 - t ) ;
xt = x ;
t = t2 ;
return r ;
}
double avgRate ( ) { return x / ( timer ( ) - start ) ; }
int64_t x ;
double t ;
double start ;
int64_t xt ;
2019-05-29 21:23:32 +08:00
std : : string toString ( ) { return format ( " % " PRId64 " /%.2f/%.2f " , x , rate ( ) / 1e6 , avgRate ( ) / 1e6 ) ; }
2019-02-21 18:46:30 +08:00
} ;
2019-05-29 21:23:32 +08:00
TEST_CASE ( " !/redwood/correctness/btree " ) {
2018-09-20 10:16:18 +08:00
state bool useDisk = true ; // MemoryPager is not being maintained currently.
2017-07-14 13:11:48 +08:00
2018-07-05 12:12:09 +08:00
state std : : string pagerFile = " unittest_pageFile " ;
2018-10-15 18:43:43 +08:00
IPager * pager ;
2018-08-29 04:46:14 +08:00
2019-05-22 10:16:32 +08:00
state bool serialTest = g_random - > coinflip ( ) ;
state bool shortTest = g_random - > coinflip ( ) ;
state bool singleVersion = true ; // Multi-version mode is broken / not finished
2019-05-22 13:19:14 +08:00
state double startTime = now ( ) ;
2019-05-22 10:16:32 +08:00
printf ( " serialTest: %d shortTest: %d singleVersion: %d \n " , serialTest , shortTest , singleVersion ) ;
2018-10-15 18:43:43 +08:00
if ( useDisk ) {
2019-02-24 19:47:32 +08:00
printf ( " Deleting existing test data... \n " ) ;
2018-10-15 18:43:43 +08:00
deleteFile ( pagerFile ) ;
deleteFile ( pagerFile + " 0.pagerlog " ) ;
deleteFile ( pagerFile + " 1.pagerlog " ) ;
2017-09-22 14:51:55 +08:00
pager = new IndirectShadowPager ( pagerFile ) ;
2018-10-15 18:43:43 +08:00
}
2017-07-14 13:11:48 +08:00
else
pager = createMemoryPager ( ) ;
2019-02-24 19:47:32 +08:00
printf ( " Initializing... \n " ) ;
2019-05-22 10:16:32 +08:00
state int pageSize = shortTest ? 200 : ( g_random - > coinflip ( ) ? pager - > getUsablePageSize ( ) : g_random - > randomInt ( 200 , 400 ) ) ;
2019-04-30 08:00:29 +08:00
state VersionedBTree * btree = new VersionedBTree ( pager , pagerFile , singleVersion , pageSize ) ;
2018-09-20 18:39:55 +08:00
wait ( btree - > init ( ) ) ;
2017-06-10 05:56:41 +08:00
2017-09-06 07:59:31 +08:00
// We must be able to fit at least two any two keys plus overhead in a page to prevent
// a situation where the tree cannot be grown upward with decreasing level size.
2018-06-12 16:43:19 +08:00
// TODO: Handle arbitrarily large keys
2019-02-21 18:46:30 +08:00
state int maxKeySize = g_random - > randomInt ( 4 , pageSize * 2 ) ;
2019-05-22 10:16:32 +08:00
state int maxValueSize = g_random - > randomInt ( 0 , pageSize * 4 ) ;
state int maxCommitSize = shortTest ? 1000 : randomSize ( 10e6 ) ;
state int mutationBytesTarget = shortTest ? 5000 : randomSize ( 50e6 ) ;
state double clearChance = g_random - > random01 ( ) * .01 ; // at most 1 in 100
2019-02-21 18:46:30 +08:00
2019-03-15 15:46:09 +08:00
printf ( " Using page size %d, max key size %d, max value size %d, clearchance %f, total mutation byte target %d \n " , pageSize , maxKeySize , maxValueSize , clearChance , mutationBytesTarget ) ;
2017-09-06 07:59:31 +08:00
2017-08-22 13:29:57 +08:00
state std : : map < std : : pair < std : : string , Version > , Optional < std : : string > > written ;
state std : : set < Key > keys ;
2017-06-10 05:56:41 +08:00
2017-09-15 20:19:39 +08:00
state Version lastVer = wait ( btree - > getLatestVersion ( ) ) ;
2019-05-05 01:52:02 +08:00
printf ( " Starting from version: % " PRId64 " \n " , lastVer ) ;
2017-07-14 13:11:48 +08:00
2017-06-10 05:56:41 +08:00
state Version version = lastVer + 1 ;
2018-09-28 07:07:29 +08:00
btree - > setWriteVersion ( version ) ;
2018-09-28 15:35:03 +08:00
2019-02-21 18:46:30 +08:00
state SimpleCounter mutationBytes ;
state SimpleCounter keyBytesInserted ;
state SimpleCounter valueBytesInserted ;
state SimpleCounter sets ;
state SimpleCounter rangeClears ;
state SimpleCounter keyBytesCleared ;
2018-09-28 15:35:03 +08:00
state int errorCount ;
2019-02-24 19:47:32 +08:00
state int mutationBytesThisCommit = 0 ;
state int mutationBytesTargetThisCommit = randomSize ( maxCommitSize ) ;
2017-07-26 07:10:19 +08:00
2018-09-28 15:35:03 +08:00
state PromiseStream < Version > committedVersions ;
2019-05-22 10:16:32 +08:00
state Future < Void > verifyTask = verify ( btree , committedVersions . getFuture ( ) , & written , & errorCount , serialTest ) ;
state Future < Void > randomTask = serialTest ? Void ( ) : ( randomReader ( btree ) | | btree - > getError ( ) ) ;
2018-09-28 07:07:29 +08:00
2018-10-02 07:51:57 +08:00
state Future < Void > commit = Void ( ) ;
2019-02-21 18:46:30 +08:00
while ( mutationBytes . get ( ) < mutationBytesTarget ) {
2019-05-22 13:19:14 +08:00
if ( now ( ) - startTime > 600 ) {
mutationBytesTarget = mutationBytes . get ( ) ;
}
2018-09-28 07:07:29 +08:00
// Sometimes advance the version
if ( g_random - > random01 ( ) < 0.10 ) {
2017-07-15 02:37:08 +08:00
+ + version ;
2017-06-10 05:56:41 +08:00
btree - > setWriteVersion ( version ) ;
}
2018-09-28 07:07:29 +08:00
// Sometimes do a clear range
2019-02-24 19:47:32 +08:00
if ( g_random - > random01 ( ) < clearChance ) {
2018-09-28 07:07:29 +08:00
Key start = randomKV ( maxKeySize , 1 ) . key ;
Key end = ( g_random - > random01 ( ) < .01 ) ? keyAfter ( start ) : randomKV ( maxKeySize , 1 ) . key ;
2017-06-10 05:56:41 +08:00
2018-09-28 07:07:29 +08:00
// Sometimes replace start and/or end with a close actual (previously used) value
if ( g_random - > random01 ( ) < .10 ) {
auto i = keys . upper_bound ( start ) ;
if ( i ! = keys . end ( ) )
start = * i ;
}
if ( g_random - > random01 ( ) < .10 ) {
auto i = keys . upper_bound ( end ) ;
if ( i ! = keys . end ( ) )
end = * i ;
}
2017-07-14 13:11:48 +08:00
2018-09-28 07:07:29 +08:00
if ( end = = start )
end = keyAfter ( start ) ;
else if ( end < start ) {
std : : swap ( end , start ) ;
}
2017-07-14 13:11:48 +08:00
2019-02-21 18:46:30 +08:00
+ + rangeClears ;
2018-09-28 07:07:29 +08:00
KeyRangeRef range ( start , end ) ;
2019-05-29 21:23:32 +08:00
debug_printf ( " Mutation: Clear '%s' to '%s' @% " PRId64 " \n " , start . toString ( ) . c_str ( ) , end . toString ( ) . c_str ( ) , version ) ;
2018-09-28 07:07:29 +08:00
auto e = written . lower_bound ( std : : make_pair ( start . toString ( ) , 0 ) ) ;
if ( e ! = written . end ( ) ) {
auto last = e ;
auto eEnd = written . lower_bound ( std : : make_pair ( end . toString ( ) , 0 ) ) ;
while ( e ! = eEnd ) {
auto w = * e ;
+ + e ;
// If e key is different from last and last was present then insert clear for last's key at version
if ( last ! = eEnd & & ( ( e = = eEnd | | e - > first . first ! = last - > first . first ) & & last - > second . present ( ) ) ) {
2019-05-29 21:23:32 +08:00
debug_printf ( " Mutation: Clearing key '%s' @% " PRId64 " \n " , last - > first . first . c_str ( ) , version ) ;
2019-02-21 18:46:30 +08:00
keyBytesCleared + = last - > first . first . size ( ) ;
2019-02-24 19:47:32 +08:00
mutationBytes + = last - > first . first . size ( ) ;
mutationBytesThisCommit + = last - > first . first . size ( ) ;
2019-02-21 18:46:30 +08:00
2018-09-28 07:07:29 +08:00
// If the last set was at version then just make it not present
if ( last - > first . second = = version ) {
2019-02-21 18:46:30 +08:00
last - > second . reset ( ) ;
2018-09-28 07:07:29 +08:00
}
else {
2019-02-21 18:46:30 +08:00
written [ std : : make_pair ( last - > first . first , version ) ] . reset ( ) ;
2018-09-28 07:07:29 +08:00
}
}
last = e ;
}
}
2017-07-14 13:11:48 +08:00
2018-09-28 07:07:29 +08:00
btree - > clear ( range ) ;
2017-07-14 13:11:48 +08:00
}
2018-09-28 07:07:29 +08:00
else {
// Set a key
KeyValue kv = randomKV ( maxKeySize , maxValueSize ) ;
// Sometimes change key to a close previously used key
if ( g_random - > random01 ( ) < .01 ) {
auto i = keys . upper_bound ( kv . key ) ;
if ( i ! = keys . end ( ) )
kv . key = StringRef ( kv . arena ( ) , * i ) ;
}
2019-02-21 18:46:30 +08:00
2019-05-29 21:23:32 +08:00
debug_printf ( " Mutation: Set '%s' -> '%s' @% " PRId64 " \n " , kv . key . toString ( ) . c_str ( ) , kv . value . toString ( ) . c_str ( ) , version ) ;
2019-02-21 18:46:30 +08:00
+ + sets ;
2018-09-28 07:07:29 +08:00
keyBytesInserted + = kv . key . size ( ) ;
2019-02-21 18:46:30 +08:00
valueBytesInserted + = kv . value . size ( ) ;
2018-09-28 07:07:29 +08:00
mutationBytes + = ( kv . key . size ( ) + kv . value . size ( ) ) ;
2019-02-24 19:47:32 +08:00
mutationBytesThisCommit + = ( kv . key . size ( ) + kv . value . size ( ) ) ;
2019-02-21 18:46:30 +08:00
2018-09-28 07:07:29 +08:00
btree - > set ( kv ) ;
written [ std : : make_pair ( kv . key . toString ( ) , version ) ] = kv . value . toString ( ) ;
keys . insert ( kv . key ) ;
}
2019-02-24 19:47:32 +08:00
// Commit at end or after this commit's mutation bytes are reached
if ( mutationBytes . get ( ) > = mutationBytesTarget | | mutationBytesThisCommit > = mutationBytesTargetThisCommit ) {
// Wait for previous commit to finish
wait ( commit ) ;
2019-05-29 21:23:32 +08:00
printf ( " Committed. Next commit %d bytes, % " PRId64 " /%d (%.2f%%) Stats: Insert %.2f MB/s ClearedKeys %.2f MB/s Total %.2f \n " ,
2019-02-24 19:47:32 +08:00
mutationBytesThisCommit ,
mutationBytes . get ( ) ,
mutationBytesTarget ,
( double ) mutationBytes . get ( ) / mutationBytesTarget * 100 ,
( keyBytesInserted . rate ( ) + valueBytesInserted . rate ( ) ) / 1e6 ,
keyBytesCleared . rate ( ) / 1e6 ,
mutationBytes . rate ( ) / 1e6
) ;
Version v = version ; // Avoid capture of version as a member of *this
commit = map ( btree - > commit ( ) , [ = ] ( Void ) {
2018-10-02 07:51:57 +08:00
// Notify the background verifier that version is committed and therefore readable
committedVersions . send ( v ) ;
return Void ( ) ;
} ) ;
2019-05-22 10:16:32 +08:00
if ( serialTest ) {
// Wait for commit, wait for verification, then start new verification
wait ( commit ) ;
committedVersions . sendError ( end_of_stream ( ) ) ;
debug_printf ( " Waiting for verification to complete. \n " ) ;
wait ( verifyTask ) ;
committedVersions = PromiseStream < Version > ( ) ;
verifyTask = verify ( btree , committedVersions . getFuture ( ) , & written , & errorCount , serialTest ) ;
}
2019-02-24 19:47:32 +08:00
mutationBytesThisCommit = 0 ;
mutationBytesTargetThisCommit = randomSize ( maxCommitSize ) ;
2018-09-28 07:07:29 +08:00
2018-10-02 07:51:57 +08:00
// Recover from disk at random
2019-05-22 10:16:32 +08:00
if ( ! serialTest & & useDisk & & g_random - > random01 ( ) < .02 ) {
2018-10-02 07:51:57 +08:00
printf ( " Recovering from disk. \n " ) ;
// Wait for outstanding commit
debug_printf ( " Waiting for outstanding commit \n " ) ;
wait ( commit ) ;
2018-09-28 15:35:03 +08:00
// Stop and wait for the verifier task
committedVersions . sendError ( end_of_stream ( ) ) ;
2018-10-02 07:51:57 +08:00
debug_printf ( " Waiting for verification to complete. \n " ) ;
2018-09-28 15:35:03 +08:00
wait ( verifyTask ) ;
2018-10-15 18:43:43 +08:00
Future < Void > closedFuture = btree - > onClosed ( ) ;
btree - > close ( ) ;
2018-09-28 07:07:29 +08:00
wait ( closedFuture ) ;
2018-10-25 06:57:06 +08:00
debug_printf ( " Reopening btree \n " ) ;
2018-10-15 18:43:43 +08:00
IPager * pager = new IndirectShadowPager ( pagerFile ) ;
2019-04-30 08:00:29 +08:00
btree = new VersionedBTree ( pager , pagerFile , singleVersion , pageSize ) ;
2018-09-28 07:07:29 +08:00
wait ( btree - > init ( ) ) ;
Version v = wait ( btree - > getLatestVersion ( ) ) ;
ASSERT ( v = = version ) ;
2019-05-05 01:52:02 +08:00
printf ( " Recovered from disk. Latest version % " PRId64 " \n " , v ) ;
2017-06-10 05:56:41 +08:00
2018-09-28 15:35:03 +08:00
// Create new promise stream and start the verifier again
committedVersions = PromiseStream < Version > ( ) ;
2019-05-22 10:16:32 +08:00
verifyTask = verify ( btree , committedVersions . getFuture ( ) , & written , & errorCount , serialTest ) ;
2018-10-15 18:43:43 +08:00
randomTask = randomReader ( btree ) | | btree - > getError ( ) ;
2017-06-10 05:56:41 +08:00
}
2018-09-28 15:35:03 +08:00
+ + version ;
btree - > setWriteVersion ( version ) ;
2018-09-28 07:07:29 +08:00
}
2019-02-24 19:47:32 +08:00
// Check for errors
if ( errorCount ! = 0 )
throw internal_error ( ) ;
2017-06-10 05:56:41 +08:00
}
2018-10-02 07:51:57 +08:00
debug_printf ( " Waiting for outstanding commit \n " ) ;
wait ( commit ) ;
2018-09-28 15:35:03 +08:00
committedVersions . sendError ( end_of_stream ( ) ) ;
2018-10-02 07:51:57 +08:00
debug_printf ( " Waiting for verification to complete. \n " ) ;
2018-09-28 15:35:03 +08:00
wait ( verifyTask ) ;
2019-02-24 19:47:32 +08:00
// Check for errors
if ( errorCount ! = 0 )
throw internal_error ( ) ;
2018-10-15 18:43:43 +08:00
Future < Void > closedFuture = btree - > onClosed ( ) ;
btree - > close ( ) ;
2018-09-20 18:39:55 +08:00
wait ( closedFuture ) ;
2017-07-14 13:11:48 +08:00
return Void ( ) ;
}
2019-03-15 15:46:09 +08:00
ACTOR Future < Void > pointReads ( VersionedBTree * btree , int count , int nodeCount ) {
state Version readVer = wait ( btree - > getLatestVersion ( ) ) ;
state int c = 0 ;
state double readStart = timer ( ) ;
printf ( " Executing %d point reads \n " , count ) ;
state Key k = makeString ( 4 ) ;
state Reference < IStoreCursor > cur = btree - > readAtVersion ( readVer ) ;
while ( c < count ) {
//cur = btree->readAtVersion(readVer);
2019-04-30 08:00:29 +08:00
* ( uint32_t * ) k . begin ( ) = bigEndian32 ( g_random - > randomInt ( 0 , nodeCount ) ) ;
2019-03-15 15:46:09 +08:00
wait ( success ( cur - > findFirstEqualOrGreater ( k , true , 0 ) ) ) ;
+ + c ;
}
double elapsed = timer ( ) - readStart ;
printf ( " Point read speed %d/s \n " , int ( count / elapsed ) ) ;
return Void ( ) ;
}
2018-10-06 13:13:22 +08:00
TEST_CASE ( " !/redwood/performance/set " ) {
2018-10-15 18:43:43 +08:00
state std : : string pagerFile = " unittest_pageFile " ;
2019-02-24 19:47:32 +08:00
printf ( " Deleting old test data \n " ) ;
2018-10-15 18:43:43 +08:00
deleteFile ( pagerFile ) ;
deleteFile ( pagerFile + " 0.pagerlog " ) ;
deleteFile ( pagerFile + " 1.pagerlog " ) ;
2019-04-30 08:00:29 +08:00
2018-10-15 18:43:43 +08:00
IPager * pager = new IndirectShadowPager ( pagerFile ) ;
2019-04-30 08:00:29 +08:00
state bool singleVersion = true ;
state VersionedBTree * btree = new VersionedBTree ( pager , " unittest_pageFile " , singleVersion ) ;
2018-09-20 18:39:55 +08:00
wait ( btree - > init ( ) ) ;
2017-07-14 13:11:48 +08:00
2019-03-15 15:46:09 +08:00
state int nodeCount = 1e9 ;
2018-10-02 07:51:57 +08:00
state int maxChangesPerVersion = 1000 ;
2019-04-30 08:00:29 +08:00
state int64_t kvBytesTarget = 200e6 ;
int maxKeySize = 50 ;
int maxValueSize = 100 ;
state int maxConsecutiveRun = 6 ;
2017-07-14 13:11:48 +08:00
state std : : string key ( maxKeySize , ' k ' ) ;
2019-03-15 15:46:09 +08:00
state std : : string value ( maxValueSize , ' v ' ) ;
2018-06-15 08:52:25 +08:00
state int64_t kvBytes = 0 ;
2019-03-15 15:46:09 +08:00
state int64_t kvBytesTotal = 0 ;
2018-06-15 08:52:25 +08:00
state int records = 0 ;
2018-10-02 07:51:57 +08:00
state Future < Void > commit = Void ( ) ;
2017-07-14 13:11:48 +08:00
2019-02-24 19:47:32 +08:00
printf ( " Starting. \n " ) ;
state double intervalStart = timer ( ) ;
2019-03-15 15:46:09 +08:00
while ( kvBytesTotal < kvBytesTarget ) {
2017-07-14 13:11:48 +08:00
Version lastVer = wait ( btree - > getLatestVersion ( ) ) ;
state Version version = lastVer + 1 ;
btree - > setWriteVersion ( version ) ;
int changes = g_random - > randomInt ( 0 , maxChangesPerVersion ) ;
2019-04-30 08:00:29 +08:00
int baseKey = g_random - > randomInt ( 0 , nodeCount ) ;
2017-07-14 13:11:48 +08:00
while ( changes - - ) {
KeyValue kv ;
// Change first 4 bytes of key to an int
2019-04-30 08:00:29 +08:00
* ( uint32_t * ) key . data ( ) = bigEndian32 ( baseKey + + ) ;
if ( g_random - > randomInt ( 0 , maxConsecutiveRun ) = = 0 ) {
baseKey = g_random - > randomInt ( 0 , nodeCount ) ;
}
2017-07-14 13:11:48 +08:00
kv . key = StringRef ( ( uint8_t * ) key . data ( ) , g_random - > randomInt ( 10 , key . size ( ) ) ) ;
kv . value = StringRef ( ( uint8_t * ) value . data ( ) , g_random - > randomInt ( 0 , value . size ( ) ) ) ;
btree - > set ( kv ) ;
2018-06-15 08:52:25 +08:00
kvBytes + = kv . key . size ( ) + kv . value . size ( ) ;
2019-03-15 15:46:09 +08:00
kvBytesTotal + = kv . key . size ( ) + kv . value . size ( ) ;
2018-06-15 08:52:25 +08:00
+ + records ;
2017-07-14 13:11:48 +08:00
}
2019-03-15 15:46:09 +08:00
if ( kvBytes > 1e6 | | kvBytesTotal > = kvBytesTarget ) {
2018-12-06 14:41:04 +08:00
wait ( commit ) ;
2019-05-22 14:49:27 +08:00
2019-02-24 19:47:32 +08:00
// Avoid capturing this to freeze counter values
int recs = records ;
int kvb = kvBytes ;
commit = map ( btree - > commit ( ) , [ = ] ( Void result ) {
double elapsed = timer ( ) - intervalStart ;
printf ( " Committed %d kvBytes in %d records in %f seconds, %.2f MB/s \n " , kvb , recs , elapsed , kvb / elapsed / 1e6 ) ;
intervalStart = timer ( ) ;
return Void ( ) ;
} ) ;
records = 0 ;
kvBytes = 0 ;
2017-07-14 13:11:48 +08:00
}
}
2019-02-24 19:47:32 +08:00
wait ( commit ) ;
2017-07-14 13:11:48 +08:00
2019-03-15 15:46:09 +08:00
state int reads = 3e5 ;
wait ( pointReads ( btree , reads , nodeCount ) & & pointReads ( btree , reads , nodeCount ) & & pointReads ( btree , reads , nodeCount ) ) ;
2018-10-15 18:43:43 +08:00
Future < Void > closedFuture = btree - > onClosed ( ) ;
btree - > close ( ) ;
2018-09-20 18:39:55 +08:00
wait ( closedFuture ) ;
2017-07-14 13:11:48 +08:00
2017-06-10 05:56:41 +08:00
return Void ( ) ;
}