merge master
This commit is contained in:
commit
e068c478b5
|
@ -62,6 +62,6 @@ testers = {
|
|||
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 2040, 23, MAX_API_VERSION),
|
||||
'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 2040, 200, MAX_API_VERSION),
|
||||
'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 2040, 200, MAX_API_VERSION, types=ALL_TYPES),
|
||||
'flow': Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION, directory_snapshot_ops_enabled=False),
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
|
|||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
|
@ -95,7 +95,7 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
|
|||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
|
|
|
@ -95,7 +95,7 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
|
@ -118,7 +118,7 @@
|
|||
<Optimization>Full</Optimization>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
|
||||
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
|
||||
<EnablePREfast>false</EnablePREfast>
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;BOOST_ALL_NO_LIB;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
|
|
|
@ -25,8 +25,6 @@ import (
|
|||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
"log"
|
||||
"math/big"
|
||||
"os"
|
||||
|
@ -37,6 +35,9 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb"
|
||||
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
|
||||
)
|
||||
|
||||
const verbose bool = false
|
||||
|
@ -104,7 +105,7 @@ func (sm *StackMachine) waitAndPop() (ret stackEntry) {
|
|||
switch el := ret.item.(type) {
|
||||
case []byte:
|
||||
ret.item = el
|
||||
case int64, uint64, *big.Int, string, bool, tuple.UUID, float32, float64, tuple.Tuple:
|
||||
case int64, uint64, *big.Int, string, bool, tuple.UUID, float32, float64, tuple.Tuple, tuple.Versionstamp:
|
||||
ret.item = el
|
||||
case fdb.Key:
|
||||
ret.item = []byte(el)
|
||||
|
@ -661,6 +662,24 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
t = append(t, sm.waitAndPop().item)
|
||||
}
|
||||
sm.store(idx, []byte(t.Pack()))
|
||||
case op == "TUPLE_PACK_WITH_VERSIONSTAMP":
|
||||
var t tuple.Tuple
|
||||
|
||||
prefix := sm.waitAndPop().item.([]byte)
|
||||
c := sm.waitAndPop().item.(int64)
|
||||
for i := 0; i < int(c); i++ {
|
||||
t = append(t, sm.waitAndPop().item)
|
||||
}
|
||||
|
||||
packed, err := t.PackWithVersionstamp(prefix)
|
||||
if err != nil && strings.Contains(err.Error(), "No incomplete") {
|
||||
sm.store(idx, []byte("ERROR: NONE"))
|
||||
} else if err != nil {
|
||||
sm.store(idx, []byte("ERROR: MULTIPLE"))
|
||||
} else {
|
||||
sm.store(idx, []byte("OK"))
|
||||
sm.store(idx, packed)
|
||||
}
|
||||
case op == "TUPLE_UNPACK":
|
||||
t, e := tuple.Unpack(fdb.Key(sm.waitAndPop().item.([]byte)))
|
||||
if e != nil {
|
||||
|
@ -812,7 +831,8 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
|
|||
tr.Options().SetRetryLimit(50)
|
||||
tr.Options().SetMaxRetryDelay(100)
|
||||
tr.Options().SetUsedDuringCommitProtectionDisable()
|
||||
tr.Options().SetTransactionLoggingEnable("my_transaction")
|
||||
tr.Options().SetDebugTransactionIdentifier("my_transaction")
|
||||
tr.Options().SetLogTransaction()
|
||||
tr.Options().SetReadLockAware()
|
||||
tr.Options().SetLockAware()
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@ package tuple
|
|||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
|
@ -72,6 +73,39 @@ type Tuple []TupleElement
|
|||
// an instance of this type.
|
||||
type UUID [16]byte
|
||||
|
||||
// Versionstamp is struct for a FoundationDB verionstamp. Versionstamps are
|
||||
// 12 bytes long composed of a 10 byte transaction version and a 2 byte user
|
||||
// version. The transaction version is filled in at commit time and the user
|
||||
// version is provided by the application to order results within a transaction.
|
||||
type Versionstamp struct {
|
||||
TransactionVersion [10]byte
|
||||
UserVersion uint16
|
||||
}
|
||||
|
||||
var incompleteTransactionVersion = [10]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
|
||||
|
||||
const versionstampLength = 12
|
||||
|
||||
// IncompleteVersionstamp is the constructor you should use to make
|
||||
// an incomplete versionstamp to use in a tuple.
|
||||
func IncompleteVersionstamp(userVersion uint16) Versionstamp {
|
||||
return Versionstamp{
|
||||
TransactionVersion: incompleteTransactionVersion,
|
||||
UserVersion: userVersion,
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes converts a Versionstamp struct to a byte slice for encoding in a tuple.
|
||||
func (v Versionstamp) Bytes() []byte {
|
||||
var scratch [versionstampLength]byte
|
||||
|
||||
copy(scratch[:], v.TransactionVersion[:])
|
||||
|
||||
binary.BigEndian.PutUint16(scratch[10:], v.UserVersion)
|
||||
|
||||
return scratch[:]
|
||||
}
|
||||
|
||||
// Type codes: These prefix the different elements in a packed Tuple
|
||||
// to indicate what type they are.
|
||||
const nilCode = 0x00
|
||||
|
@ -86,6 +120,7 @@ const doubleCode = 0x21
|
|||
const falseCode = 0x26
|
||||
const trueCode = 0x27
|
||||
const uuidCode = 0x30
|
||||
const versionstampCode = 0x33
|
||||
|
||||
var sizeLimits = []uint64{
|
||||
1<<(0*8) - 1,
|
||||
|
@ -122,7 +157,15 @@ func adjustFloatBytes(b []byte, encode bool) {
|
|||
}
|
||||
|
||||
type packer struct {
|
||||
buf []byte
|
||||
versionstampPos int32
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func newPacker() *packer {
|
||||
return &packer{
|
||||
versionstampPos: -1,
|
||||
buf: make([]byte, 0, 64),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *packer) putByte(b byte) {
|
||||
|
@ -249,7 +292,22 @@ func (p *packer) encodeUUID(u UUID) {
|
|||
p.putBytes(u[:])
|
||||
}
|
||||
|
||||
func (p *packer) encodeTuple(t Tuple, nested bool) {
|
||||
func (p *packer) encodeVersionstamp(v Versionstamp) {
|
||||
p.putByte(versionstampCode)
|
||||
|
||||
isIncomplete := v.TransactionVersion == incompleteTransactionVersion
|
||||
if isIncomplete {
|
||||
if p.versionstampPos != -1 {
|
||||
panic(fmt.Sprintf("Tuple can only contain one incomplete versionstamp"))
|
||||
}
|
||||
|
||||
p.versionstampPos = int32(len(p.buf))
|
||||
}
|
||||
|
||||
p.putBytes(v.Bytes())
|
||||
}
|
||||
|
||||
func (p *packer) encodeTuple(t Tuple, nested bool, versionstamps bool) {
|
||||
if nested {
|
||||
p.putByte(nestedCode)
|
||||
}
|
||||
|
@ -257,7 +315,7 @@ func (p *packer) encodeTuple(t Tuple, nested bool) {
|
|||
for i, e := range t {
|
||||
switch e := e.(type) {
|
||||
case Tuple:
|
||||
p.encodeTuple(e, true)
|
||||
p.encodeTuple(e, true, versionstamps)
|
||||
case nil:
|
||||
p.putByte(nilCode)
|
||||
if nested {
|
||||
|
@ -293,6 +351,12 @@ func (p *packer) encodeTuple(t Tuple, nested bool) {
|
|||
}
|
||||
case UUID:
|
||||
p.encodeUUID(e)
|
||||
case Versionstamp:
|
||||
if versionstamps == false && e.TransactionVersion == incompleteTransactionVersion {
|
||||
panic(fmt.Sprintf("Incomplete Versionstamp included in vanilla tuple pack"))
|
||||
}
|
||||
|
||||
p.encodeVersionstamp(e)
|
||||
default:
|
||||
panic(fmt.Sprintf("unencodable element at index %d (%v, type %T)", i, t[i], t[i]))
|
||||
}
|
||||
|
@ -306,19 +370,103 @@ func (p *packer) encodeTuple(t Tuple, nested bool) {
|
|||
// Pack returns a new byte slice encoding the provided tuple. Pack will panic if
|
||||
// the tuple contains an element of any type other than []byte,
|
||||
// fdb.KeyConvertible, string, int64, int, uint64, uint, *big.Int, big.Int, float32,
|
||||
// float64, bool, tuple.UUID, nil, or a Tuple with elements of valid types. It will
|
||||
// also panic if an integer is specified with a value outside the range
|
||||
// [-2**2040+1, 2**2040-1]
|
||||
// float64, bool, tuple.UUID, tuple.Versionstamp, nil, or a Tuple with elements of
|
||||
// valid types. It will also panic if an integer is specified with a value outside
|
||||
// the range [-2**2040+1, 2**2040-1]
|
||||
//
|
||||
// Tuple satisfies the fdb.KeyConvertible interface, so it is not necessary to
|
||||
// call Pack when using a Tuple with a FoundationDB API function that requires a
|
||||
// key.
|
||||
//
|
||||
// This method will panic if it contains an incomplete Versionstamp. Use
|
||||
// PackWithVersionstamp instead.
|
||||
//
|
||||
func (t Tuple) Pack() []byte {
|
||||
p := packer{buf: make([]byte, 0, 64)}
|
||||
p.encodeTuple(t, false)
|
||||
p := newPacker()
|
||||
p.encodeTuple(t, false, false)
|
||||
return p.buf
|
||||
}
|
||||
|
||||
// PackWithVersionstamp packs the specified tuple into a key for versionstamp
|
||||
// operations. See Pack for more information. This function will return an error
|
||||
// if you attempt to pack a tuple with more than one versionstamp. This function will
|
||||
// return an error if you attempt to pack a tuple with a versionstamp position larger
|
||||
// than an uint16 if the API version is less than 520.
|
||||
func (t Tuple) PackWithVersionstamp(prefix []byte) ([]byte, error) {
|
||||
hasVersionstamp, err := t.HasIncompleteVersionstamp()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
apiVersion, err := fdb.GetAPIVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if hasVersionstamp == false {
|
||||
return nil, errors.New("No incomplete versionstamp included in tuple pack with versionstamp")
|
||||
}
|
||||
|
||||
p := newPacker()
|
||||
|
||||
if prefix != nil {
|
||||
p.putBytes(prefix)
|
||||
}
|
||||
|
||||
p.encodeTuple(t, false, true)
|
||||
|
||||
if hasVersionstamp {
|
||||
var scratch [4]byte
|
||||
var offsetIndex int
|
||||
if apiVersion < 520 {
|
||||
if p.versionstampPos > math.MaxUint16 {
|
||||
return nil, errors.New("Versionstamp position too large")
|
||||
}
|
||||
|
||||
offsetIndex = 2
|
||||
binary.LittleEndian.PutUint16(scratch[:], uint16(p.versionstampPos))
|
||||
} else {
|
||||
offsetIndex = 4
|
||||
binary.LittleEndian.PutUint32(scratch[:], uint32(p.versionstampPos))
|
||||
}
|
||||
|
||||
p.putBytes(scratch[0:offsetIndex])
|
||||
}
|
||||
|
||||
return p.buf, nil
|
||||
}
|
||||
|
||||
// HasIncompleteVersionstamp determines if there is at least one incomplete
|
||||
// versionstamp in a tuple. This function will return an error this tuple has
|
||||
// more than one versionstamp.
|
||||
func (t Tuple) HasIncompleteVersionstamp() (bool, error) {
|
||||
incompleteCount := t.countIncompleteVersionstamps()
|
||||
|
||||
var err error
|
||||
if incompleteCount > 1 {
|
||||
err = errors.New("Tuple can only contain one incomplete versionstamp")
|
||||
}
|
||||
|
||||
return incompleteCount >= 1, err
|
||||
}
|
||||
|
||||
func (t Tuple) countIncompleteVersionstamps() int {
|
||||
incompleteCount := 0
|
||||
|
||||
for _, el := range t {
|
||||
switch e := el.(type) {
|
||||
case Versionstamp:
|
||||
if e.TransactionVersion == incompleteTransactionVersion {
|
||||
incompleteCount++
|
||||
}
|
||||
case Tuple:
|
||||
incompleteCount += e.countIncompleteVersionstamps()
|
||||
}
|
||||
}
|
||||
|
||||
return incompleteCount
|
||||
}
|
||||
|
||||
func findTerminator(b []byte) int {
|
||||
bp := b
|
||||
var length int
|
||||
|
@ -438,6 +586,20 @@ func decodeUUID(b []byte) (UUID, int) {
|
|||
return u, 17
|
||||
}
|
||||
|
||||
func decodeVersionstamp(b []byte) (Versionstamp, int) {
|
||||
var transactionVersion [10]byte
|
||||
var userVersion uint16
|
||||
|
||||
copy(transactionVersion[:], b[1:11])
|
||||
|
||||
userVersion = binary.BigEndian.Uint16(b[11:])
|
||||
|
||||
return Versionstamp{
|
||||
TransactionVersion: transactionVersion,
|
||||
UserVersion: userVersion,
|
||||
}, versionstampLength + 1
|
||||
}
|
||||
|
||||
func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
|
||||
var t Tuple
|
||||
|
||||
|
@ -489,6 +651,11 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
|
|||
return nil, i, fmt.Errorf("insufficient bytes to decode UUID starting at position %d of byte array for tuple", i)
|
||||
}
|
||||
el, off = decodeUUID(b[i:])
|
||||
case b[i] == versionstampCode:
|
||||
if i+versionstampLength+1 > len(b) {
|
||||
return nil, i, fmt.Errorf("insufficient bytes to decode Versionstamp starting at position %d of byte array for tuple", i)
|
||||
}
|
||||
el, off = decodeVersionstamp(b[i:])
|
||||
case b[i] == nestedCode:
|
||||
var err error
|
||||
el, off, err = decodeTuple(b[i+1:], true)
|
||||
|
|
|
@ -60,7 +60,7 @@
|
|||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
|
@ -75,7 +75,7 @@
|
|||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
|
||||
</ClCompile>
|
||||
|
|
|
@ -40,11 +40,26 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
public final ReadTransaction snapshot;
|
||||
|
||||
class ReadSnapshot implements ReadTransaction {
|
||||
@Override
|
||||
public boolean isSnapshot() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReadTransaction snapshot() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<Long> getReadVersion() {
|
||||
return FDBTransaction.this.getReadVersion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setReadVersion(long version) {
|
||||
FDBTransaction.this.setReadVersion(version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompletableFuture<byte[]> get(byte[] key) {
|
||||
return get_internal(key, true);
|
||||
|
@ -126,6 +141,18 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
return getRange(range, ReadTransaction.ROW_LIMIT_UNLIMITED);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) {
|
||||
// This is a snapshot transaction; do not add the conflict range.
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean addReadConflictKeyIfNotSnapshot(byte[] key) {
|
||||
// This is a snapshot transaction; do not add the conflict key.
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public TransactionOptions options() {
|
||||
return FDBTransaction.this.options();
|
||||
|
@ -157,6 +184,11 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
transactionOwner = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSnapshot() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReadTransaction snapshot() {
|
||||
return snapshot;
|
||||
|
@ -321,11 +353,23 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) {
|
||||
addReadConflictRange(keyBegin, keyEnd);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addReadConflictRange(byte[] keyBegin, byte[] keyEnd) {
|
||||
addConflictRange(keyBegin, keyEnd, ConflictRangeType.READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean addReadConflictKeyIfNotSnapshot(byte[] key) {
|
||||
addReadConflictKey(key);
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addReadConflictKey(byte[] key) {
|
||||
addConflictRange(key, ByteArrayUtil.join(key, new byte[]{(byte) 0}), ConflictRangeType.READ);
|
||||
|
|
|
@ -32,7 +32,7 @@ import com.apple.foundationdb.tuple.Tuple;
|
|||
* <br>
|
||||
* <b>Note:</b> Client must call {@link Transaction#commit()} and wait on the result on all transactions,
|
||||
* even ones that only read. This is done automatically when using the retry loops from
|
||||
* {@link Database#run(Function)}. This is explained more in the intro to {@link Transaction}.
|
||||
* {@link Database#run(java.util.function.Function)}. This is explained more in the intro to {@link Transaction}.
|
||||
*
|
||||
* @see Transaction
|
||||
*/
|
||||
|
@ -43,12 +43,73 @@ public interface ReadTransaction extends ReadTransactionContext {
|
|||
*/
|
||||
int ROW_LIMIT_UNLIMITED = 0;
|
||||
|
||||
/**
|
||||
* Gets whether this transaction is a snapshot view of the database. In other words, this returns
|
||||
* whether read conflict ranges are omitted for any reads done through this {@code ReadTransaction}.
|
||||
* <br>
|
||||
* For more information about how to use snapshot reads correctly, see
|
||||
* <a href="/foundationdb/developer-guide.html#using-snapshot-reads" target="_blank">Using snapshot reads</a>.
|
||||
*
|
||||
* @return whether this is a snapshot view of the database with relaxed isolation properties
|
||||
* @see #snapshot()
|
||||
*/
|
||||
boolean isSnapshot();
|
||||
|
||||
/**
|
||||
* Return a special-purpose, read-only view of the database. Reads done through this interface are known as "snapshot reads".
|
||||
* Snapshot reads selectively relax FoundationDB's isolation property, reducing
|
||||
* <a href="/foundationdb/developer-guide.html#transaction-conflicts" target="_blank">Transaction conflicts</a>
|
||||
* but making reasoning about concurrency harder.<br>
|
||||
* <br>
|
||||
* For more information about how to use snapshot reads correctly, see
|
||||
* <a href="/foundationdb/developer-guide.html#using-snapshot-reads" target="_blank">Using snapshot reads</a>.
|
||||
*
|
||||
* @return a read-only view of this {@code ReadTransaction} with relaxed isolation properties
|
||||
*/
|
||||
ReadTransaction snapshot();
|
||||
|
||||
/**
|
||||
* Gets the version at which the reads for this {@code Transaction} will access the database.
|
||||
* @return the version for database reads
|
||||
*/
|
||||
CompletableFuture<Long> getReadVersion();
|
||||
|
||||
/**
|
||||
* Directly sets the version of the database at which to execute reads. The
|
||||
* normal operation of a transaction is to determine an appropriately recent
|
||||
* version; this call overrides that behavior. If the version is set too
|
||||
* far in the past, {@code past_version} errors will be thrown from read operations.
|
||||
* <i>Infrequently used.</i>
|
||||
*
|
||||
* @param version the version at which to read from the database
|
||||
*/
|
||||
void setReadVersion(long version);
|
||||
|
||||
/**
|
||||
* Adds the read conflict range that this {@code ReadTransaction} would have added as if it had read
|
||||
* the given key range. If this is a {@linkplain #snapshot() snapshot} view of the database, this will
|
||||
* not add the conflict range. This mirrors how reading a range through a snapshot view
|
||||
* of the database does not add a conflict range for the read keys.
|
||||
*
|
||||
* @param keyBegin the first key in the range (inclusive)
|
||||
* @param keyEnd the last key in the range (exclusive)
|
||||
* @return {@code true} if the read conflict range was added and {@code false} otherwise
|
||||
* @see Transaction#addReadConflictRange(byte[], byte[])
|
||||
*/
|
||||
boolean addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd);
|
||||
|
||||
/**
|
||||
* Adds the read conflict range that this {@code ReadTransaction} would have added as if it had read
|
||||
* the given key. If this is a {@linkplain #snapshot() snapshot} view of the database, this will
|
||||
* not add the conflict range. This mirrors how reading a key through a snapshot view
|
||||
* of the database does not add a conflict range for the read key.
|
||||
*
|
||||
* @param key the key to add to the read conflict range set (it this is not a snapshot view of the database)
|
||||
* @return {@code true} if the read conflict key was added and {@code false} otherwise
|
||||
* @see Transaction#addReadConflictKey(byte[])
|
||||
*/
|
||||
boolean addReadConflictKeyIfNotSnapshot(byte[] key);
|
||||
|
||||
/**
|
||||
* Gets a value from the database. The call will return {@code null} if the key is not
|
||||
* present in the database.
|
||||
|
|
|
@ -76,31 +76,6 @@ import com.apple.foundationdb.tuple.Tuple;
|
|||
*/
|
||||
public interface Transaction extends AutoCloseable, ReadTransaction, TransactionContext {
|
||||
|
||||
/**
|
||||
* Return special-purpose, read-only view of the database. Reads done through this interface are known as "snapshot reads".
|
||||
* Snapshot reads selectively relax FoundationDB's isolation property, reducing
|
||||
* <a href="/foundationdb/developer-guide.html#transaction-conflicts" target="_blank">Transaction conflicts</a>
|
||||
* but making reasoning about concurrency harder.<br>
|
||||
* <br>
|
||||
* For more information about how to use snapshot reads correctly, see
|
||||
* <a href="/foundationdb/developer-guide.html#using-snapshot-reads" target="_blank">Using snapshot reads</a>.
|
||||
*
|
||||
* @return a read-only view of this {@code Transaction} with relaxed isolation properties
|
||||
*/
|
||||
ReadTransaction snapshot();
|
||||
|
||||
/**
|
||||
* Directly sets the version of the database at which to execute reads. The
|
||||
* normal operation of a transaction is to determine an appropriately recent
|
||||
* version; this call overrides that behavior. If the version is set too
|
||||
* far in the past, {@code past_version} errors will be thrown from read operations.
|
||||
* <i>Infrequently used.</i>
|
||||
*
|
||||
* @param version the version at which to read from the database
|
||||
*/
|
||||
void setReadVersion(long version);
|
||||
|
||||
|
||||
/**
|
||||
* Adds a range of keys to the transaction's read conflict ranges as if you
|
||||
* had read the range. As a result, other transactions that write a key in
|
||||
|
@ -116,7 +91,7 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
|
|||
* the key. As a result, other transactions that concurrently write this key
|
||||
* could cause the transaction to fail with a conflict.
|
||||
*
|
||||
* @param key the key to be added to the range
|
||||
* @param key the key to be added to the read conflict range set
|
||||
*/
|
||||
void addReadConflictKey(byte[] key);
|
||||
|
||||
|
|
|
@ -482,7 +482,8 @@ public class AsyncStackTester {
|
|||
tr.options().setRetryLimit(50);
|
||||
tr.options().setMaxRetryDelay(100);
|
||||
tr.options().setUsedDuringCommitProtectionDisable();
|
||||
tr.options().setTransactionLoggingEnable("my_transaction");
|
||||
tr.options().setDebugTransactionIdentifier("my_transaction");
|
||||
tr.options().setLogTransaction();
|
||||
tr.options().setReadLockAware();
|
||||
tr.options().setLockAware();
|
||||
|
||||
|
|
|
@ -0,0 +1,209 @@
|
|||
/*
|
||||
* SnapshotTransactionTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb.test;
|
||||
|
||||
import java.util.UUID;
|
||||
import java.util.concurrent.CompletionException;
|
||||
|
||||
import com.apple.foundationdb.Database;
|
||||
import com.apple.foundationdb.FDB;
|
||||
import com.apple.foundationdb.FDBException;
|
||||
import com.apple.foundationdb.ReadTransaction;
|
||||
import com.apple.foundationdb.Transaction;
|
||||
import com.apple.foundationdb.subspace.Subspace;
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
|
||||
/**
|
||||
* Some tests regarding conflict ranges to make sure they do what we expect.
|
||||
*/
|
||||
public class SnapshotTransactionTest {
|
||||
private static final int CONFLICT_CODE = 1020;
|
||||
private static final Subspace SUBSPACE = new Subspace(Tuple.from("test", "conflict_ranges"));
|
||||
|
||||
public static void main(String[] args) {
|
||||
FDB fdb = FDB.selectAPIVersion(610);
|
||||
try(Database db = fdb.open()) {
|
||||
snapshotReadShouldNotConflict(db);
|
||||
snapshotShouldNotAddConflictRange(db);
|
||||
snapshotOnSnapshot(db);
|
||||
}
|
||||
}
|
||||
|
||||
// Adding a random write conflict key makes it so the transaction conflicts are actually resolved.
|
||||
public static void addUUIDConflicts(Transaction... trs) {
|
||||
for(Transaction tr : trs) {
|
||||
tr.options().setTimeout(1000);
|
||||
tr.getReadVersion().join();
|
||||
byte[] key = SUBSPACE.pack(Tuple.from("uuids", UUID.randomUUID()));
|
||||
tr.addReadConflictKey(key);
|
||||
tr.addWriteConflictKey(key);
|
||||
}
|
||||
}
|
||||
|
||||
public static <E extends Exception> void validateConflict(E e) throws E {
|
||||
FDBException fdbE = null;
|
||||
Throwable current = e;
|
||||
while(current != null && fdbE == null) {
|
||||
if(current instanceof FDBException) {
|
||||
fdbE = (FDBException)current;
|
||||
}
|
||||
else {
|
||||
current = current.getCause();
|
||||
}
|
||||
}
|
||||
if(fdbE == null) {
|
||||
System.err.println("Error was not caused by FDBException");
|
||||
throw e;
|
||||
}
|
||||
else {
|
||||
int errorCode = fdbE.getCode();
|
||||
if(errorCode != CONFLICT_CODE) {
|
||||
System.err.println("FDB error was not caused by a transaction conflict");
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void snapshotReadShouldNotConflict(Database db) {
|
||||
try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) {
|
||||
addUUIDConflicts(tr1, tr2, tr3);
|
||||
|
||||
// Verify reading a *range* causes a conflict
|
||||
tr1.addWriteConflictKey(SUBSPACE.pack(Tuple.from("foo", 0L)));
|
||||
tr2.snapshot().getRange(SUBSPACE.range(Tuple.from("foo"))).asList().join();
|
||||
tr3.getRange(SUBSPACE.range(Tuple.from("foo"))).asList().join();
|
||||
|
||||
// Two successful commits
|
||||
tr1.commit().join();
|
||||
tr2.commit().join();
|
||||
|
||||
// Read from tr3 should conflict with update from tr1.
|
||||
try {
|
||||
tr3.commit().join();
|
||||
throw new RuntimeException("tr3 did not conflict");
|
||||
} catch(CompletionException e) {
|
||||
validateConflict(e);
|
||||
}
|
||||
}
|
||||
try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) {
|
||||
addUUIDConflicts(tr1, tr2, tr3);
|
||||
|
||||
// Verify reading a *key* causes a conflict
|
||||
byte[] key = SUBSPACE.pack(Tuple.from("foo", 1066L));
|
||||
tr1.addWriteConflictKey(key);
|
||||
tr2.snapshot().get(key);
|
||||
tr3.get(key).join();
|
||||
|
||||
tr1.commit().join();
|
||||
tr2.commit().join();
|
||||
|
||||
try {
|
||||
tr3.commit().join();
|
||||
throw new RuntimeException("tr3 did not conflict");
|
||||
}
|
||||
catch(CompletionException e) {
|
||||
validateConflict(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static void snapshotShouldNotAddConflictRange(Database db) {
|
||||
try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) {
|
||||
addUUIDConflicts(tr1, tr2, tr3);
|
||||
|
||||
// Verify adding a read conflict *range* causes a conflict.
|
||||
Subspace fooSubspace = SUBSPACE.subspace(Tuple.from("foo"));
|
||||
tr1.addWriteConflictKey(fooSubspace.pack(Tuple.from(0L)));
|
||||
byte[] beginKey = fooSubspace.range().begin;
|
||||
byte[] endKey = fooSubspace.range().end;
|
||||
if(tr2.snapshot().addReadConflictRangeIfNotSnapshot(beginKey, endKey)) {
|
||||
throw new RuntimeException("snapshot read said it added a conflict range");
|
||||
}
|
||||
if(!tr3.addReadConflictRangeIfNotSnapshot(beginKey, endKey)) {
|
||||
throw new RuntimeException("non-snapshot read said it did not add a conflict range");
|
||||
}
|
||||
|
||||
// Two successful commits
|
||||
tr1.commit().join();
|
||||
tr2.commit().join();
|
||||
|
||||
// Read from tr3 should conflict with update from tr1.
|
||||
try {
|
||||
tr3.commit().join();
|
||||
throw new RuntimeException("tr3 did not conflict");
|
||||
}
|
||||
catch(CompletionException e) {
|
||||
validateConflict(e);
|
||||
}
|
||||
}
|
||||
try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) {
|
||||
addUUIDConflicts(tr1, tr2, tr3);
|
||||
|
||||
// Verify adding a read conflict *key* causes a conflict.
|
||||
byte[] key = SUBSPACE.pack(Tuple.from("foo", 1066L));
|
||||
tr1.addWriteConflictKey(key);
|
||||
if(tr2.snapshot().addReadConflictKeyIfNotSnapshot(key)) {
|
||||
throw new RuntimeException("snapshot read said it added a conflict range");
|
||||
}
|
||||
if(!tr3.addReadConflictKeyIfNotSnapshot(key)) {
|
||||
throw new RuntimeException("non-snapshot read said it did not add a conflict range");
|
||||
}
|
||||
|
||||
// Two successful commits
|
||||
tr1.commit().join();
|
||||
tr2.commit().join();
|
||||
|
||||
// Read from tr3 should conflict with update from tr1.
|
||||
try {
|
||||
tr3.commit().join();
|
||||
throw new RuntimeException("tr3 did not conflict");
|
||||
}
|
||||
catch(CompletionException e) {
|
||||
validateConflict(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void snapshotOnSnapshot(Database db) {
|
||||
try(Transaction tr = db.createTransaction()) {
|
||||
if(tr.isSnapshot()) {
|
||||
throw new RuntimeException("new transaction is a snapshot transaction");
|
||||
}
|
||||
ReadTransaction snapshotTr = tr.snapshot();
|
||||
if(!snapshotTr.isSnapshot()) {
|
||||
throw new RuntimeException("snapshot transaction is not a snapshot transaction");
|
||||
}
|
||||
if(snapshotTr == tr) {
|
||||
throw new RuntimeException("snapshot and regular transaction are pointer-equal");
|
||||
}
|
||||
ReadTransaction snapshotSnapshotTr = snapshotTr.snapshot();
|
||||
if(!snapshotSnapshotTr.isSnapshot()) {
|
||||
throw new RuntimeException("snapshot transaction is not a snapshot transaction");
|
||||
}
|
||||
if(snapshotSnapshotTr != snapshotTr) {
|
||||
throw new RuntimeException("calling snapshot on a snapshot transaction produced a different transaction");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private SnapshotTransactionTest() {}
|
||||
}
|
||||
|
|
@ -436,7 +436,8 @@ public class StackTester {
|
|||
tr.options().setRetryLimit(50);
|
||||
tr.options().setMaxRetryDelay(100);
|
||||
tr.options().setUsedDuringCommitProtectionDisable();
|
||||
tr.options().setTransactionLoggingEnable("my_transaction");
|
||||
tr.options().setDebugTransactionIdentifier("my_transaction");
|
||||
tr.options().setLogTransaction();
|
||||
tr.options().setReadLockAware();
|
||||
tr.options().setLockAware();
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ endif()
|
|||
set(package_file_name foundationdb-${FDB_VERSION}.tar.gz)
|
||||
set(package_file ${CMAKE_BINARY_DIR}/packages/${package_file_name})
|
||||
add_custom_command(OUTPUT ${package_file}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> setup.py sdist &&
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> setup.py sdist --formats=gztar &&
|
||||
${CMAKE_COMMAND} -E copy dist/${package_file_name} ${package_file}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
|
||||
COMMENT "Create Python sdist package")
|
||||
|
|
|
@ -136,7 +136,8 @@ def test_options(tr):
|
|||
tr.options.set_retry_limit(50)
|
||||
tr.options.set_max_retry_delay(100)
|
||||
tr.options.set_used_during_commit_protection_disable()
|
||||
tr.options.set_transaction_logging_enable('my_transaction')
|
||||
tr.options.set_debug_transaction_identifier('my_transaction')
|
||||
tr.options.set_log_transaction()
|
||||
tr.options.set_read_lock_aware()
|
||||
tr.options.set_lock_aware()
|
||||
|
||||
|
|
|
@ -466,7 +466,8 @@ class Tester
|
|||
tr.options.set_retry_limit(50)
|
||||
tr.options.set_max_retry_delay(100)
|
||||
tr.options.set_used_during_commit_protection_disable
|
||||
tr.options.set_transaction_logging_enable('my_transaction')
|
||||
tr.options.set_debug_transaction_identifier('my_transaction')
|
||||
tr.options.set_log_transaction()
|
||||
tr.options.set_read_lock_aware()
|
||||
tr.options.set_lock_aware()
|
||||
|
||||
|
|
|
@ -6,6 +6,4 @@ set(error_msg
|
|||
if(EXISTS "${FILE}")
|
||||
list(JOIN error_msg " " err)
|
||||
message(FATAL_ERROR "${err}")
|
||||
else()
|
||||
message(STATUS "${FILE} does not exist")
|
||||
endif()
|
||||
|
|
|
@ -44,7 +44,11 @@ set(CMAKE_REQUIRED_LIBRARIES c)
|
|||
|
||||
|
||||
if(WIN32)
|
||||
# see: https://docs.microsoft.com/en-us/windows/desktop/WinProg/using-the-windows-headers
|
||||
# this sets the windows target version to Windows 7
|
||||
set(WINDOWS_TARGET 0x0601)
|
||||
add_compile_options(/W3 /EHsc /std:c++14 /bigobj $<$<CONFIG:Release>:/Zi>)
|
||||
add_compile_definitions(_WIN32_WINNT=${WINDOWS_TARGET} BOOST_ALL_NO_LIB)
|
||||
else()
|
||||
if(USE_GOLD_LINKER)
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")
|
||||
|
|
|
@ -142,10 +142,10 @@
|
|||
A transaction is not permitted to read any transformed key or value previously set within that transaction, and an attempt to do so will result in an error.
|
||||
|
||||
.. |atomic-versionstamps-tuple-warning-key| replace::
|
||||
At this time, versionstamped keys are not compatible with the Tuple layer except in Java and Python. Note that this implies versionstamped keys may not be used with the Subspace and Directory layers except in those languages.
|
||||
At this time, versionstamped keys are not compatible with the Tuple layer except in Java, Python, and Go. Note that this implies versionstamped keys may not be used with the Subspace and Directory layers except in those languages.
|
||||
|
||||
.. |atomic-versionstamps-tuple-warning-value| replace::
|
||||
At this time, versionstamped values are not compatible with the Tuple layer except in Java and Python. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages.
|
||||
At this time, versionstamped values are not compatible with the Tuple layer except in Java, Python, and Go. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages.
|
||||
|
||||
.. |api-version| replace:: 610
|
||||
|
||||
|
|
|
@ -39,7 +39,7 @@ Tools
|
|||
There are 5 command line tools for working with Backup and DR operations:
|
||||
|
||||
``fdbbackup``
|
||||
This command line tool is used to control (but not execute) backup jobs and manage backup data. It can ``start`` or ``abort`` a backup, ``discontinue`` a continuous backup, get the ``status`` of an ongoing backup, or ``wait`` for a backup to complete. It can also ``describe``, ``delete``, ``expire`` data in a backup, or ``list`` the backups at a destination folder URL.
|
||||
This command line tool is used to control (but not execute) backup jobs and manage backup data. It can ``start``, ``modify`` or ``abort`` a backup, ``discontinue`` a continuous backup, get the ``status`` of an ongoing backup, or ``wait`` for a backup to complete. It can also ``describe``, ``delete``, ``expire`` data in a backup, or ``list`` the backups at a destination folder URL.
|
||||
|
||||
``fdbrestore``
|
||||
This command line tool is used to control (but not execute) restore jobs. It can ``start`` or ``abort`` a restore, get the ``status`` of current and recent restore tasks, or ``wait`` for a restore task to complete while printing ongoing progress details.
|
||||
|
@ -100,7 +100,7 @@ If <secret> is not specified, it will be looked up in :ref:`blob credential sour
|
|||
|
||||
An example blob store Backup URL would be ``blobstore://myKey:mySecret@something.domain.com:80/dec_1_2017_0400?bucket=backups``.
|
||||
|
||||
Blob store Backup URLs can have optional parameters at the end which set various limits on interactions with the blob store. All values must be positive decimal integers. The default values are not very restrictive. The most likely parameter a user would want to change is ``max_send_bytes_per_second`` (or ``sbps`` for short) which determines the upload speed to the blob service.
|
||||
Blob store Backup URLs can have optional parameters at the end which set various limits or options used when communicating with the store. All values must be positive decimal integers unless otherwise specified. The speed related default values are not very restrictive. The most likely parameter a user would want to change is ``max_send_bytes_per_second`` (or ``sbps`` for short) which determines the upload speed to the blob service.
|
||||
|
||||
Here is a complete list of valid parameters:
|
||||
|
||||
|
@ -130,7 +130,11 @@ Here is a complete list of valid parameters:
|
|||
|
||||
*max_send_bytes_per_second* (or *sbps*) - Max send bytes per second for all requests combined.
|
||||
|
||||
*max_recv_bytes_per_second* (or *rbps*) - Max receive bytes per second for all requests combined
|
||||
*max_recv_bytes_per_second* (or *rbps*) - Max receive bytes per second for all requests combined.
|
||||
|
||||
*header* - Add an additional HTTP header to each blob store REST API request. Can be specified multiple times. Format is *header=<FieldName>:<FieldValue>* where both strings are non-empty.
|
||||
|
||||
**Example**: The URL parameter *header=x-amz-storage-class:REDUCED_REDUNDANCY* would send the HTTP header required to use the reduced redundancy storage option in the S3 API.
|
||||
|
||||
.. _blob-credential-files:
|
||||
|
||||
|
@ -193,7 +197,7 @@ The following options apply to most subcommands:
|
|||
Path to the cluster file that should be used to connect to the FoundationDB cluster you want to use. If not specified, a :ref:`default cluster file <default-cluster-file>` will be used.
|
||||
|
||||
``-d <BACKUP_URL>``
|
||||
The Backup URL which the subcommand should read, write, or modify. For ``start`` operations, the Backup URL must be accessible by the ``backup_agent`` processes.
|
||||
The Backup URL which the subcommand should read, write, or modify. For ``start`` and ``modify`` operations, the Backup URL must be accessible by the ``backup_agent`` processes.
|
||||
|
||||
``-t <TAG>``
|
||||
A "tag" is a named slot in which a backup task executes. Backups on different named tags make progress and are controlled independently, though their executions are handled by the same set of backup agent processes. Any number of unique backup tags can be active at once. It the tag is not specified, the default tag name "default" is used.
|
||||
|
@ -231,6 +235,29 @@ The ``start`` subcommand is used to start a backup. If there is already a backu
|
|||
user@host$ fdbbackup start -k 'apple bananna' -k 'mango pineapple' -d <BACKUP_URL>
|
||||
user@host$ fdbbackup start -k '@pp1e b*n*nn*' -k '#an&0 p^n3app!e' -d <BACKUP_URL>
|
||||
|
||||
.. program:: fdbbackup modify
|
||||
|
||||
``modify``
|
||||
---------
|
||||
|
||||
The ``modify`` subcommand is used to modify parameters of a running backup. All specified changes are made in a single transaction.
|
||||
|
||||
::
|
||||
|
||||
user@host$ fdbbackup modify [-t <TAG>] [-d <BACKUP_URL>] [-s <DURATION>] [--active_snapshot_interval <DURATION>] [--verify_uid <UID>]
|
||||
|
||||
``-d <BACKUP_URL>``
|
||||
Sets a new Backup URL for the backup to write to. This is most likely to be used to change only URL parameters or account information. However, it can also be used to start writing to a new destination mid-backup. The new old location will cease gaining any additional restorability, while the new location will not be restorable until a new snapshot begins and completes. Full restorability would be regained, however, if the contents of the two destinations were to be combined by the user.
|
||||
|
||||
``-s <DURATION>`` or ``--snapshot_interval <DURATION>``
|
||||
Sets a new duration for backup snapshots, in seconds.
|
||||
|
||||
``--active_snapshot_interval <DURATION>``
|
||||
Sets new duration for the backup's currently active snapshot, in seconds, relative to the start of the snapshot.
|
||||
|
||||
``--verify_uid <UID>``
|
||||
Specifies a UID to verify against the BackupUID of the running backup. If provided, the UID is verified in the same transaction which sets the new backup parameters (if the UID matches).
|
||||
|
||||
.. program:: fdbbackup abort
|
||||
|
||||
``abort``
|
||||
|
|
|
@ -339,6 +339,7 @@ cluster.messages log_servers_error Time
|
|||
cluster.messages transaction_start_timeout Unable to start transaction after __ seconds.
|
||||
cluster.messages unreachable_master_worker Unable to locate the master worker.
|
||||
cluster.messages unreachable_dataDistributor_worker Unable to locate the data distributor worker.
|
||||
cluster.messages unreachable_ratekeeper_worker Unable to locate the ratekeeper worker.
|
||||
cluster.messages unreachable_processes The cluster has some unreachable processes.
|
||||
cluster.messages unreadable_configuration Unable to read database configuration.
|
||||
cluster.messages layer_status_incomplete Some or all of the layers subdocument could not be read.
|
||||
|
|
|
@ -7,7 +7,9 @@ Release Notes
|
|||
|
||||
Features
|
||||
--------
|
||||
* Improved replication mechanism, a new hierarchical replication technique that further significantly reduces the frequency of data loss events even when multiple machines (e.g., fault-tolerant zones in the current code) permanently fail at the same time. `(PR #964) <https://github.com/apple/foundationdb/pull/964>`.
|
||||
* Improved replication mechanism, a new hierarchical replication technique that further significantly reduces the frequency of data loss events even when multiple machines (e.g., fault-tolerant zones in the current code) permanently fail at the same time. `(PR #964) <https://github.com/apple/foundationdb/pull/964>`_.
|
||||
|
||||
* Added background actor to remove redundant teams from team collection so that the healthy team number is guaranteed not exceeding the desired number. `(PR #1139) <https://github.com/apple/foundationdb/pull/1139>`_
|
||||
|
||||
|
||||
* Get read version, read, and commit requests are counted and aggregated by server-side latency in configurable latency bands and output in JSON status. `(PR #1084) <https://github.com/apple/foundationdb/pull/1084>`_
|
||||
|
@ -16,6 +18,8 @@ Features
|
|||
* Batch priority transactions are now limited separately by ratekeeper and will be throttled at lower levels of cluster saturation. This makes it possible to run a more intense background load at saturation without significantly affecting normal priority transactions. It is still recommended not to run excessive loads at batch priority. `(PR #1198) <https://github.com/apple/foundationdb/pull/1198>`_
|
||||
* Restore now requires the destnation cluster to be specified explicitly to avoid confusion. `(PR #1240) <https://github.com/apple/foundationdb/pull/1240>`_
|
||||
* Restore target version can now be specified by timestamp if the original cluster is available. `(PR #1240) <https://github.com/apple/foundationdb/pull/1240>`_
|
||||
* Separate data distribution out from master as a new role. `(PR #1062) <https://github.com/apple/foundationdb/pull/1062>`_
|
||||
* Separate rate keeper out from data distribution as a new role. `(PR ##1176) <https://github.com/apple/foundationdb/pull/1176>`_
|
||||
|
||||
Performance
|
||||
-----------
|
||||
|
@ -39,9 +43,15 @@ Bindings
|
|||
* Python: Removed ``fdb.init``, ``fdb.create_cluster``, and ``fdb.Cluster``. ``fdb.open`` no longer accepts a ``database_name`` parameter. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Java: Deprecated ``FDB.createCluster`` and ``Cluster``. The preferred way to get a ``Database`` is by using ``FDB.open``, which should work in both new and old API versions. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Java: Removed ``Cluster(long cPtr, Executor executor)`` constructor. This is API breaking for any code that has subclassed the ``Cluster`` class and is not protected by API versioning. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Java: Several methods relevant to read-only transactions have been moved into the ``ReadTransaction`` interface.
|
||||
* Ruby: Removed ``FDB.init``, ``FDB.create_cluster``, and ``FDB.Cluster``. ``FDB.open`` no longer accepts a ``database_name`` parameter. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Golang: Deprecated ``fdb.StartNetwork``, ``fdb.Open``, ``fdb.MustOpen``, and ``fdb.CreateCluster`` and added ``fdb.OpenDatabase`` and ``fdb.MustOpenDatabase``. The preferred way to start the network and get a ``Database`` is by using ``FDB.OpenDatabase`` or ``FDB.OpenDefault``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Flow: Deprecated ``API::createCluster`` and ``Cluster`` and added ``API::createDatabase``. The preferred way to get a ``Database`` is by using ``API::createDatabase``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Flow: Removed ``API::createCluster`` and ``Cluster`` and added ``API::createDatabase``. The new way to get a ``Database`` is by using ``API::createDatabase``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_ `(PR #1215) <https://github.com/apple/foundationdb/pull/1215>`_
|
||||
* Flow: Changed ``DatabaseContext`` to ``Database``, and ``API::createDatabase`` returns ``Reference<Database>`` instead of ``Reference<<DatabaseContext>``. `(PR #1215) <https://github.com/apple/foundationdb/pull/1215>`_
|
||||
* Flow: Converted ``Transaction`` into an interface and moved its implementation into an internal class. Transactions should now be created using ``Database::createTransaction(db)``. `(PR #1215) <https://github.com/apple/foundationdb/pull/1215>`_
|
||||
* Flow: Added ``ReadTransaction`` interface that allows only read operations on a transaction. The ``Transaction`` interface inherits from ``ReadTransaction`` and can be used when a ``ReadTransaction`` is required. `(PR #1215) <https://github.com/apple/foundationdb/pull/1215>`_
|
||||
* Flow: Changed ``Transaction::setVersion`` to ``Transaction::setReadVersion``. `(PR #1215) <https://github.com/apple/foundationdb/pull/1215>`_
|
||||
* Flow: On update to this version of the Flow bindings, client code will fail to build due to the changes in the API, irrespective of the API version used. Client code must be updated to use the new bindings API. These changes affect the bindings only and won't impact compatibility with different versions of the cluster. `(PR #1215) <https://github.com/apple/foundationdb/pull/1215>`_
|
||||
* Golang: Added ``fdb.Printable`` to print a human-readable string for a given byte array. Add ``Key.String()``, which converts the ``Key`` to a ``string`` using the ``Printable`` function. `(PR #1010) <https://github.com/apple/foundationdb/pull/1010>`_
|
||||
* Python: Python signal handling didn't work when waiting on a future. In particular, pressing Ctrl-C would not successfully interrupt the program. `(PR #1138) <https://github.com/apple/foundationdb/pull/1138>`_
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ enum enumProgramExe {
|
|||
};
|
||||
|
||||
enum enumBackupType {
|
||||
BACKUP_UNDEFINED=0, BACKUP_START, BACKUP_STATUS, BACKUP_ABORT, BACKUP_WAIT, BACKUP_DISCONTINUE, BACKUP_PAUSE, BACKUP_RESUME, BACKUP_EXPIRE, BACKUP_DELETE, BACKUP_DESCRIBE, BACKUP_LIST, BACKUP_DUMP
|
||||
BACKUP_UNDEFINED=0, BACKUP_START, BACKUP_MODIFY, BACKUP_STATUS, BACKUP_ABORT, BACKUP_WAIT, BACKUP_DISCONTINUE, BACKUP_PAUSE, BACKUP_RESUME, BACKUP_EXPIRE, BACKUP_DELETE, BACKUP_DESCRIBE, BACKUP_LIST, BACKUP_DUMP
|
||||
};
|
||||
|
||||
enum enumDBType {
|
||||
|
@ -99,6 +99,9 @@ enum {
|
|||
// Backup and Restore constants
|
||||
OPT_TAGNAME, OPT_BACKUPKEYS, OPT_WAITFORDONE,
|
||||
|
||||
// Backup Modify
|
||||
OPT_MOD_ACTIVE_INTERVAL, OPT_MOD_VERIFY_UID,
|
||||
|
||||
// Restore constants
|
||||
OPT_RESTORECONTAINER, OPT_RESTORE_VERSION, OPT_RESTORE_TIMESTAMP, OPT_PREFIX_ADD, OPT_PREFIX_REMOVE, OPT_RESTORE_CLUSTERFILE_DEST, OPT_RESTORE_CLUSTERFILE_ORIG,
|
||||
|
||||
|
@ -189,6 +192,40 @@ CSimpleOpt::SOption g_rgBackupStartOptions[] = {
|
|||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupModifyOptions[] = {
|
||||
#ifdef _WIN32
|
||||
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
|
||||
#endif
|
||||
{ OPT_TRACE, "--log", SO_NONE },
|
||||
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
|
||||
{ OPT_TRACE_LOG_GROUP, "--loggroup", SO_REQ_SEP },
|
||||
{ OPT_QUIET, "-q", SO_NONE },
|
||||
{ OPT_QUIET, "--quiet", SO_NONE },
|
||||
{ OPT_VERSION, "-v", SO_NONE },
|
||||
{ OPT_VERSION, "--version", SO_NONE },
|
||||
{ OPT_CRASHONERROR, "--crash", SO_NONE },
|
||||
{ OPT_MEMLIMIT, "-m", SO_REQ_SEP },
|
||||
{ OPT_MEMLIMIT, "--memory", SO_REQ_SEP },
|
||||
{ OPT_HELP, "-?", SO_NONE },
|
||||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_BLOB_CREDENTIALS, "--blob_credentials", SO_REQ_SEP },
|
||||
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
|
||||
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
|
||||
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
|
||||
{ OPT_TAGNAME, "-t", SO_REQ_SEP },
|
||||
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
|
||||
{ OPT_MOD_VERIFY_UID, "--verify_uid", SO_REQ_SEP },
|
||||
{ OPT_DESTCONTAINER, "-d", SO_REQ_SEP },
|
||||
{ OPT_DESTCONTAINER, "--destcontainer", SO_REQ_SEP },
|
||||
{ OPT_SNAPSHOTINTERVAL, "-s", SO_REQ_SEP },
|
||||
{ OPT_SNAPSHOTINTERVAL, "--snapshot_interval", SO_REQ_SEP },
|
||||
{ OPT_MOD_ACTIVE_INTERVAL, "--active_snapshot_interval", SO_REQ_SEP },
|
||||
|
||||
SO_END_OF_OPTIONS
|
||||
};
|
||||
|
||||
CSimpleOpt::SOption g_rgBackupStatusOptions[] = {
|
||||
#ifdef _WIN32
|
||||
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
|
||||
|
@ -830,7 +867,7 @@ static void printBackupUsage(bool devhelp) {
|
|||
" FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',\n"
|
||||
" then `%s'.\n", platform::getDefaultClusterFilePath().c_str());
|
||||
printf(" -d, --destcontainer URL\n"
|
||||
" The Backup container URL for start, describe, expire, and delete operations.\n");
|
||||
" The Backup container URL for start, modify, describe, expire, and delete operations.\n");
|
||||
printBackupContainerInfo();
|
||||
printf(" -b, --base_url BASEURL\n"
|
||||
" Base backup URL for list operations. This looks like a Backup URL but without a backup name.\n");
|
||||
|
@ -855,7 +892,12 @@ static void printBackupUsage(bool devhelp) {
|
|||
printf(" For describe operations, lookup versions in the database to obtain timestamps. A cluster file is required.\n");
|
||||
printf(" -f, --force For expire operations, force expiration even if minimum restorability would be violated.\n");
|
||||
printf(" -s, --snapshot_interval DURATION\n"
|
||||
" For start operations, specifies the backup's target snapshot interval as DURATION seconds. Defaults to %d.\n", CLIENT_KNOBS->BACKUP_DEFAULT_SNAPSHOT_INTERVAL_SEC);
|
||||
" For start or modify operations, specifies the backup's default target snapshot interval as DURATION seconds. Defaults to %d for start operations.\n", CLIENT_KNOBS->BACKUP_DEFAULT_SNAPSHOT_INTERVAL_SEC);
|
||||
printf(" --active_snapshot_interval DURATION\n"
|
||||
" For modify operations, sets the desired interval for the backup's currently active snapshot, relative to the start of the snapshot.\n");
|
||||
printf(" --verify_uid UID\n"
|
||||
" Specifies a UID to verify against the BackupUID of the running backup. If provided, the UID is verified in the same transaction\n"
|
||||
" which sets the new backup parameters (if the UID matches).\n");
|
||||
printf(" -e ERRORLIMIT The maximum number of errors printed by status (default is 10).\n");
|
||||
printf(" -k KEYS List of key ranges to backup.\n"
|
||||
" If not specified, the entire database will be backed up.\n");
|
||||
|
@ -1053,6 +1095,9 @@ enumProgramExe getProgramType(std::string programExe)
|
|||
}
|
||||
}
|
||||
#endif
|
||||
// For debugging convenience, remove .debug suffix if present.
|
||||
if(StringRef(programExe).endsWith(LiteralStringRef(".debug")))
|
||||
programExe = programExe.substr(0, programExe.size() - 6);
|
||||
|
||||
// Check if backup agent
|
||||
if ((programExe.length() >= exeAgent.size()) &&
|
||||
|
@ -1113,6 +1158,7 @@ enumBackupType getBackupType(std::string backupType)
|
|||
values["describe"] = BACKUP_DESCRIBE;
|
||||
values["list"] = BACKUP_LIST;
|
||||
values["dump"] = BACKUP_DUMP;
|
||||
values["modify"] = BACKUP_MODIFY;
|
||||
}
|
||||
|
||||
auto i = values.find(backupType);
|
||||
|
@ -1943,7 +1989,7 @@ ACTOR Future<Void> runRestore(std::string destClusterFile, std::string originalC
|
|||
state Database db = Database::createDatabase(destClusterFile, Database::API_VERSION_LATEST);
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
state Reference<IBackupContainer> bc = IBackupContainer::openContainer(container);
|
||||
state Reference<IBackupContainer> bc = openBackupContainer(exeRestore.toString().c_str(), container);
|
||||
|
||||
// If targetVersion is unset then use the maximum restorable version from the backup description
|
||||
if(targetVersion == invalidVersion) {
|
||||
|
@ -2143,13 +2189,102 @@ ACTOR Future<Void> listBackup(std::string baseUrl) {
|
|||
}
|
||||
}
|
||||
catch (Error& e) {
|
||||
fprintf(stderr, "ERROR: %s\n", e.what());
|
||||
std::string msg = format("ERROR: %s", e.what());
|
||||
if(e.code() == error_code_backup_invalid_url && !IBackupContainer::lastOpenError.empty()) {
|
||||
msg += format(": %s", IBackupContainer::lastOpenError.c_str());
|
||||
}
|
||||
fprintf(stderr, "%s\n", msg.c_str());
|
||||
throw;
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
struct BackupModifyOptions {
|
||||
Optional<std::string> verifyUID;
|
||||
Optional<std::string> destURL;
|
||||
Optional<int> snapshotIntervalSeconds;
|
||||
Optional<int> activeSnapshotIntervalSeconds;
|
||||
bool hasChanges() const {
|
||||
return destURL.present() || snapshotIntervalSeconds.present() || activeSnapshotIntervalSeconds.present();
|
||||
}
|
||||
};
|
||||
|
||||
ACTOR Future<Void> modifyBackup(Database db, std::string tagName, BackupModifyOptions options) {
|
||||
if(!options.hasChanges()) {
|
||||
fprintf(stderr, "No changes were specified, nothing to do!\n");
|
||||
throw backup_error();
|
||||
}
|
||||
|
||||
state KeyBackedTag tag = makeBackupTag(tagName);
|
||||
|
||||
state Reference<IBackupContainer> bc;
|
||||
if(options.destURL.present()) {
|
||||
bc = openBackupContainer(exeBackup.toString().c_str(), options.destURL.get());
|
||||
try {
|
||||
wait(timeoutError(bc->create(), 30));
|
||||
} catch(Error &e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
throw;
|
||||
fprintf(stderr, "ERROR: Could not create backup container at '%s': %s\n", options.destURL.get().c_str(), e.what());
|
||||
throw backup_error();
|
||||
}
|
||||
}
|
||||
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(db));
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
state Optional<UidAndAbortedFlagT> uidFlag = wait(tag.get(db));
|
||||
|
||||
if(!uidFlag.present()) {
|
||||
fprintf(stderr, "No backup exists on tag '%s'\n", tagName.c_str());
|
||||
throw backup_error();
|
||||
}
|
||||
|
||||
if(uidFlag.get().second) {
|
||||
fprintf(stderr, "Cannot modify aborted backup on tag '%s'\n", tagName.c_str());
|
||||
throw backup_error();
|
||||
}
|
||||
|
||||
state BackupConfig config(uidFlag.get().first);
|
||||
EBackupState s = wait(config.stateEnum().getOrThrow(tr, false, backup_invalid_info()));
|
||||
if(!FileBackupAgent::isRunnable(s)) {
|
||||
fprintf(stderr, "Backup on tag '%s' is not runnable.\n", tagName.c_str());
|
||||
throw backup_error();
|
||||
}
|
||||
|
||||
if(options.verifyUID.present() && options.verifyUID.get() != uidFlag.get().first.toString()) {
|
||||
fprintf(stderr, "UID verification failed, backup on tag '%s' is '%s' but '%s' was specified.\n", tagName.c_str(), uidFlag.get().first.toString().c_str(), options.verifyUID.get().c_str());
|
||||
throw backup_error();
|
||||
}
|
||||
|
||||
if(options.snapshotIntervalSeconds.present()) {
|
||||
config.snapshotIntervalSeconds().set(tr, options.snapshotIntervalSeconds.get());
|
||||
}
|
||||
|
||||
if(options.activeSnapshotIntervalSeconds.present()) {
|
||||
Version begin = wait(config.snapshotBeginVersion().getOrThrow(tr, false, backup_error()));
|
||||
config.snapshotTargetEndVersion().set(tr, begin + ((int64_t)options.activeSnapshotIntervalSeconds.get() * CLIENT_KNOBS->CORE_VERSIONSPERSECOND));
|
||||
}
|
||||
|
||||
if(options.destURL.present()) {
|
||||
config.backupContainer().set(tr, bc);
|
||||
}
|
||||
|
||||
wait(tr->commit());
|
||||
break;
|
||||
}
|
||||
catch (Error& e) {
|
||||
wait(tr->onError(e));
|
||||
}
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
static std::vector<std::vector<StringRef>> parseLine(std::string &line, bool& err, bool& partial)
|
||||
{
|
||||
err = false;
|
||||
|
@ -2407,6 +2542,9 @@ int main(int argc, char* argv[]) {
|
|||
case BACKUP_LIST:
|
||||
args = new CSimpleOpt(argc - 1, &argv[1], g_rgBackupListOptions, SO_O_EXACT);
|
||||
break;
|
||||
case BACKUP_MODIFY:
|
||||
args = new CSimpleOpt(argc - 1, &argv[1], g_rgBackupModifyOptions, SO_O_EXACT);
|
||||
break;
|
||||
case BACKUP_UNDEFINED:
|
||||
default:
|
||||
// Display help, if requested
|
||||
|
@ -2548,6 +2686,8 @@ int main(int argc, char* argv[]) {
|
|||
std::string restoreClusterFileDest;
|
||||
std::string restoreClusterFileOrig;
|
||||
|
||||
BackupModifyOptions modifyOptions;
|
||||
|
||||
if( argc == 1 ) {
|
||||
printUsage(programExe, false);
|
||||
return FDB_EXIT_ERROR;
|
||||
|
@ -2731,16 +2871,30 @@ int main(int argc, char* argv[]) {
|
|||
// If the url starts with '/' then prepend "file://" for backwards compatibility
|
||||
if(StringRef(destinationContainer).startsWith(LiteralStringRef("/")))
|
||||
destinationContainer = std::string("file://") + destinationContainer;
|
||||
modifyOptions.destURL = destinationContainer;
|
||||
break;
|
||||
case OPT_SNAPSHOTINTERVAL: {
|
||||
case OPT_SNAPSHOTINTERVAL:
|
||||
case OPT_MOD_ACTIVE_INTERVAL:
|
||||
{
|
||||
const char* a = args->OptionArg();
|
||||
if (!sscanf(a, "%d", &snapshotIntervalSeconds)) {
|
||||
int seconds;
|
||||
if (!sscanf(a, "%d", &seconds)) {
|
||||
fprintf(stderr, "ERROR: Could not parse snapshot interval `%s'\n", a);
|
||||
printHelpTeaser(argv[0]);
|
||||
return FDB_EXIT_ERROR;
|
||||
}
|
||||
if(optId == OPT_SNAPSHOTINTERVAL) {
|
||||
snapshotIntervalSeconds = seconds;
|
||||
modifyOptions.snapshotIntervalSeconds = seconds;
|
||||
}
|
||||
else if(optId == OPT_MOD_ACTIVE_INTERVAL) {
|
||||
modifyOptions.activeSnapshotIntervalSeconds = seconds;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case OPT_MOD_VERIFY_UID:
|
||||
modifyOptions.verifyUID = args->OptionArg();
|
||||
break;
|
||||
case OPT_WAITFORDONE:
|
||||
waitForDone = true;
|
||||
break;
|
||||
|
@ -3142,6 +3296,15 @@ int main(int argc, char* argv[]) {
|
|||
break;
|
||||
}
|
||||
|
||||
case BACKUP_MODIFY:
|
||||
{
|
||||
if(!initCluster())
|
||||
return FDB_EXIT_ERROR;
|
||||
|
||||
f = stopAfter( modifyBackup(db, tagName, modifyOptions) );
|
||||
break;
|
||||
}
|
||||
|
||||
case BACKUP_STATUS:
|
||||
if(!initCluster())
|
||||
return FDB_EXIT_ERROR;
|
||||
|
|
|
@ -78,7 +78,7 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
|
@ -98,7 +98,7 @@
|
|||
<Optimization>Full</Optimization>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
|
||||
<EnablePREfast>false</EnablePREfast>
|
||||
|
|
|
@ -1817,10 +1817,6 @@ ACTOR Future<bool> coordinators( Database db, std::vector<StringRef> tokens, boo
|
|||
try {
|
||||
// SOMEDAY: Check for keywords
|
||||
auto const& addr = NetworkAddress::parse( t->toString() );
|
||||
if (addresses.size() > 0 && addr.isTLS() != addresses.begin()->isTLS()) {
|
||||
printf("ERROR: cannot use coordinators with different TLS states: `%s'\n", t->toString().c_str());
|
||||
return true;
|
||||
}
|
||||
if (addresses.count(addr)){
|
||||
printf("ERROR: passed redundant coordinators: `%s'\n", addr.toString().c_str());
|
||||
return true;
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\zookeeper\win32;..\zookeeper\generated;..\zookeeper\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<MinimalRebuild>false</MinimalRebuild>
|
||||
|
@ -105,7 +105,7 @@
|
|||
</PrecompiledHeader>
|
||||
<Optimization>Full</Optimization>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>..\zookeeper\win32;..\zookeeper\generated;..\zookeeper\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
|
||||
|
|
|
@ -723,11 +723,6 @@ public:
|
|||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
// Get the backup container URL only without creating a backup container instance.
|
||||
KeyBackedProperty<Reference<IBackupContainer>> backupContainerURL() {
|
||||
return configSpace.pack(LiteralStringRef("backupContainer"));
|
||||
}
|
||||
|
||||
// Stop differntial logging if already started or don't start after completing KV ranges
|
||||
KeyBackedProperty<bool> stopWhenDone() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
|
|
|
@ -586,6 +586,8 @@ ACTOR Future<Void> applyMutations(Database cx, Key uid, Key addPrefix, Key remov
|
|||
state Future<Void> error = actorCollection( addActor.getFuture() );
|
||||
state int maxBytes = CLIENT_KNOBS->APPLY_MIN_LOCK_BYTES;
|
||||
|
||||
keyVersion->insert(metadataVersionKey, 0);
|
||||
|
||||
try {
|
||||
loop {
|
||||
if(beginVersion >= *endVersion) {
|
||||
|
|
|
@ -1329,6 +1329,7 @@ public:
|
|||
continue;
|
||||
}
|
||||
TraceEvent(SevWarn, "BackupContainerBlobStoreInvalidParameter").detail("Name", printable(kv.first)).detail("Value", printable(kv.second));
|
||||
IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", kv.first.c_str());
|
||||
throw backup_invalid_url();
|
||||
}
|
||||
}
|
||||
|
@ -1359,7 +1360,7 @@ public:
|
|||
BlobStoreEndpoint::ListResult contents = wait(bstore->listBucket(bucket, basePath));
|
||||
std::vector<std::string> results;
|
||||
for(auto &f : contents.objects) {
|
||||
results.push_back(bstore->getResourceURL(f.name.substr(basePath.size())));
|
||||
results.push_back(bstore->getResourceURL(f.name.substr(basePath.size()), format("bucket=%s", bucket.c_str())));
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
|
|
@ -147,10 +147,14 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
|
|||
StringRef t(url);
|
||||
StringRef prefix = t.eat("://");
|
||||
if(prefix != LiteralStringRef("blobstore"))
|
||||
throw std::string("Invalid blobstore URL.");
|
||||
StringRef cred = t.eat("@");
|
||||
StringRef hostPort = t.eat("/");
|
||||
StringRef resource = t.eat("?");
|
||||
throw format("Invalid blobstore URL prefix '%s'", prefix.toString().c_str());
|
||||
StringRef cred = t.eat("@");
|
||||
uint8_t foundSeparator = 0;
|
||||
StringRef hostPort = t.eatAny("/?", &foundSeparator);
|
||||
StringRef resource;
|
||||
if(foundSeparator == '/') {
|
||||
resource = t.eat("?");
|
||||
}
|
||||
|
||||
// hostPort is at least a host or IP address, optionally followed by :portNumber or :serviceName
|
||||
StringRef h(hostPort);
|
||||
|
@ -161,12 +165,31 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
|
|||
StringRef service = h.eat();
|
||||
|
||||
BlobKnobs knobs;
|
||||
HTTP::Headers extraHeaders;
|
||||
while(1) {
|
||||
StringRef name = t.eat("=");
|
||||
if(name.size() == 0)
|
||||
break;
|
||||
StringRef value = t.eat("&");
|
||||
|
||||
// Special case for header
|
||||
if(name == LiteralStringRef("header")) {
|
||||
StringRef originalValue = value;
|
||||
StringRef headerFieldName = value.eat(":");
|
||||
StringRef headerFieldValue = value;
|
||||
if(headerFieldName.size() == 0 || headerFieldValue.size() == 0) {
|
||||
throw format("'%s' is not a valid value for '%s' parameter. Format is <FieldName>:<FieldValue> where strings are not empty.", originalValue.toString().c_str(), name.toString().c_str());
|
||||
}
|
||||
std::string &fieldValue = extraHeaders[headerFieldName.toString()];
|
||||
// RFC 2616 section 4.2 says header field names can repeat but only if it is valid to concatenate their values with comma separation
|
||||
if(!fieldValue.empty()) {
|
||||
fieldValue.append(",");
|
||||
}
|
||||
fieldValue.append(headerFieldValue.toString());
|
||||
continue;
|
||||
}
|
||||
|
||||
// See if the parameter is a knob
|
||||
// First try setting a dummy value (all knobs are currently numeric) just to see if this parameter is known to BlobStoreEndpoint.
|
||||
// If it is, then we will set it to a good value or throw below, so the dummy set has no bad side effects.
|
||||
bool known = knobs.set(name, 0);
|
||||
|
@ -197,7 +220,7 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
|
|||
StringRef key = c.eat(":");
|
||||
StringRef secret = c.eat();
|
||||
|
||||
return Reference<BlobStoreEndpoint>(new BlobStoreEndpoint(host.toString(), service.toString(), key.toString(), secret.toString(), knobs));
|
||||
return Reference<BlobStoreEndpoint>(new BlobStoreEndpoint(host.toString(), service.toString(), key.toString(), secret.toString(), knobs, extraHeaders));
|
||||
|
||||
} catch(std::string &err) {
|
||||
if(error != nullptr)
|
||||
|
@ -207,7 +230,7 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
|
|||
}
|
||||
}
|
||||
|
||||
std::string BlobStoreEndpoint::getResourceURL(std::string resource) {
|
||||
std::string BlobStoreEndpoint::getResourceURL(std::string resource, std::string params) {
|
||||
std::string hostPort = host;
|
||||
if(!service.empty()) {
|
||||
hostPort.append(":");
|
||||
|
@ -220,9 +243,29 @@ std::string BlobStoreEndpoint::getResourceURL(std::string resource) {
|
|||
s = std::string(":") + secret;
|
||||
|
||||
std::string r = format("blobstore://%s%s@%s/%s", key.c_str(), s.c_str(), hostPort.c_str(), resource.c_str());
|
||||
std::string p = knobs.getURLParameters();
|
||||
if(!p.empty())
|
||||
r.append("?").append(p);
|
||||
|
||||
// Get params that are deviations from knob defaults
|
||||
std::string knobParams = knobs.getURLParameters();
|
||||
if(!knobParams.empty()) {
|
||||
if(!params.empty()) {
|
||||
params.append("&");
|
||||
}
|
||||
params.append(knobParams);
|
||||
}
|
||||
|
||||
for(auto &kv : extraHeaders) {
|
||||
if(!params.empty()) {
|
||||
params.append("&");
|
||||
}
|
||||
params.append("header=");
|
||||
params.append(HTTP::urlEncode(kv.first));
|
||||
params.append(":");
|
||||
params.append(HTTP::urlEncode(kv.second));
|
||||
}
|
||||
|
||||
if(!params.empty())
|
||||
r.append("?").append(params);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -495,6 +538,16 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
|
|||
headers["Content-Length"] = format("%d", contentLen);
|
||||
headers["Host"] = bstore->host;
|
||||
headers["Accept"] = "application/xml";
|
||||
|
||||
// Merge extraHeaders into headers
|
||||
for(auto &kv : bstore->extraHeaders) {
|
||||
std::string &fieldValue = headers[kv.first];
|
||||
if(!fieldValue.empty()) {
|
||||
fieldValue.append(",");
|
||||
}
|
||||
fieldValue.append(kv.second);
|
||||
}
|
||||
|
||||
wait(bstore->concurrentRequests.take());
|
||||
state FlowLock::Releaser globalReleaser(bstore->concurrentRequests, 1);
|
||||
|
||||
|
|
|
@ -102,8 +102,8 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
BlobStoreEndpoint(std::string const &host, std::string service, std::string const &key, std::string const &secret, BlobKnobs const &knobs = BlobKnobs())
|
||||
: host(host), service(service), key(key), secret(secret), lookupSecret(secret.empty()), knobs(knobs),
|
||||
BlobStoreEndpoint(std::string const &host, std::string service, std::string const &key, std::string const &secret, BlobKnobs const &knobs = BlobKnobs(), HTTP::Headers extraHeaders = HTTP::Headers())
|
||||
: host(host), service(service), key(key), secret(secret), lookupSecret(secret.empty()), knobs(knobs), extraHeaders(extraHeaders),
|
||||
requestRate(new SpeedLimit(knobs.requests_per_second, 1)),
|
||||
requestRateList(new SpeedLimit(knobs.list_requests_per_second, 1)),
|
||||
requestRateWrite(new SpeedLimit(knobs.write_requests_per_second, 1)),
|
||||
|
@ -133,8 +133,8 @@ public:
|
|||
// the unconsumed parameters will be added to it.
|
||||
static Reference<BlobStoreEndpoint> fromString(std::string const &url, std::string *resourceFromURL = nullptr, std::string *error = nullptr, ParametersT *ignored_parameters = nullptr);
|
||||
|
||||
// Get a normalized version of this URL with the given resource and any non-default BlobKnob values as URL parameters.
|
||||
std::string getResourceURL(std::string resource);
|
||||
// Get a normalized version of this URL with the given resource and any non-default BlobKnob values as URL parameters in addition to the passed params string
|
||||
std::string getResourceURL(std::string resource, std::string params);
|
||||
|
||||
struct ReusableConnection {
|
||||
Reference<IConnection> conn;
|
||||
|
@ -150,6 +150,7 @@ public:
|
|||
std::string secret;
|
||||
bool lookupSecret;
|
||||
BlobKnobs knobs;
|
||||
HTTP::Headers extraHeaders;
|
||||
|
||||
// Speed and concurrency limits
|
||||
Reference<IRateControl> requestRate;
|
||||
|
|
|
@ -1528,8 +1528,8 @@ namespace dbBackup {
|
|||
}
|
||||
}
|
||||
|
||||
state Reference<ReadYourWritesTransaction> srcTr2(new ReadYourWritesTransaction(taskBucket->src));
|
||||
loop {
|
||||
state Reference<ReadYourWritesTransaction> srcTr2(new ReadYourWritesTransaction(taskBucket->src));
|
||||
try {
|
||||
srcTr2->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
srcTr2->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
@ -1559,6 +1559,21 @@ namespace dbBackup {
|
|||
}
|
||||
}
|
||||
|
||||
state Reference<ReadYourWritesTransaction> srcTr3(new ReadYourWritesTransaction(taskBucket->src));
|
||||
loop {
|
||||
try {
|
||||
srcTr3->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
srcTr3->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
|
||||
srcTr3->atomicOp(metadataVersionKey, metadataVersionRequiredValue, MutationRef::SetVersionstampedValue);
|
||||
|
||||
wait(srcTr3->commit());
|
||||
break;
|
||||
} catch(Error &e) {
|
||||
wait(srcTr3->onError(e));
|
||||
}
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
|
|
@ -161,6 +161,9 @@ public:
|
|||
|
||||
int apiVersion;
|
||||
|
||||
int mvCacheInsertLocation;
|
||||
std::vector<std::pair<Version, Optional<Value>>> metadataVersionCache;
|
||||
|
||||
HealthMetrics healthMetrics;
|
||||
double healthMetricsLastUpdated;
|
||||
double detailedHealthMetricsLastUpdated;
|
||||
|
|
|
@ -737,6 +737,7 @@ struct HealthMetrics {
|
|||
int64_t worstStorageDurabilityLag;
|
||||
int64_t worstTLogQueue;
|
||||
double tpsLimit;
|
||||
bool batchLimited;
|
||||
std::map<UID, StorageStats> storageStats;
|
||||
std::map<UID, int64_t> tLogQueue;
|
||||
|
||||
|
@ -745,6 +746,7 @@ struct HealthMetrics {
|
|||
, worstStorageDurabilityLag(0)
|
||||
, worstTLogQueue(0)
|
||||
, tpsLimit(0.0)
|
||||
, batchLimited(false)
|
||||
{}
|
||||
|
||||
void update(const HealthMetrics& hm, bool detailedInput, bool detailedOutput)
|
||||
|
@ -753,6 +755,7 @@ struct HealthMetrics {
|
|||
worstStorageDurabilityLag = hm.worstStorageDurabilityLag;
|
||||
worstTLogQueue = hm.worstTLogQueue;
|
||||
tpsLimit = hm.tpsLimit;
|
||||
batchLimited = hm.batchLimited;
|
||||
|
||||
if (!detailedOutput) {
|
||||
storageStats.clear();
|
||||
|
@ -769,13 +772,14 @@ struct HealthMetrics {
|
|||
worstStorageDurabilityLag == r.worstStorageDurabilityLag &&
|
||||
worstTLogQueue == r.worstTLogQueue &&
|
||||
storageStats == r.storageStats &&
|
||||
tLogQueue == r.tLogQueue
|
||||
tLogQueue == r.tLogQueue &&
|
||||
batchLimited == r.batchLimited
|
||||
);
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, worstStorageQueue, worstStorageDurabilityLag, worstTLogQueue, tpsLimit, storageStats, tLogQueue);
|
||||
serializer(ar, worstStorageQueue, worstStorageDurabilityLag, worstTLogQueue, tpsLimit, batchLimited, storageStats, tLogQueue);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1316,6 +1316,7 @@ namespace fileBackup {
|
|||
state Reference<FlowLock> lock(new FlowLock(CLIENT_KNOBS->BACKUP_LOCK_BYTES));
|
||||
wait(checkTaskVersion(cx, task, name, version));
|
||||
|
||||
state double startTime = timer();
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
|
||||
// The shard map will use 3 values classes. Exactly SKIP, exactly DONE, then any number >= NOT_DONE_MIN which will mean not done.
|
||||
|
@ -1709,7 +1710,8 @@ namespace fileBackup {
|
|||
.detail("SnapshotBeginVersion", snapshotBeginVersion)
|
||||
.detail("SnapshotTargetEndVersion", snapshotTargetEndVersion)
|
||||
.detail("CurrentVersion", recentReadVersion)
|
||||
.detail("SnapshotIntervalSeconds", snapshotIntervalSeconds);
|
||||
.detail("SnapshotIntervalSeconds", snapshotIntervalSeconds)
|
||||
.detail("DispatchTimeSeconds", timer() - startTime);
|
||||
Params.snapshotFinished().set(task, true);
|
||||
}
|
||||
|
||||
|
@ -2428,6 +2430,7 @@ namespace fileBackup {
|
|||
|
||||
state RestoreConfig restore(task);
|
||||
restore.stateEnum().set(tr, ERestoreState::COMPLETED);
|
||||
tr->atomicOp(metadataVersionKey, metadataVersionRequiredValue, MutationRef::SetVersionstampedValue);
|
||||
// Clear the file map now since it could be huge.
|
||||
restore.fileSet().clear(tr);
|
||||
|
||||
|
@ -3326,6 +3329,25 @@ namespace fileBackup {
|
|||
}
|
||||
}
|
||||
|
||||
tr->reset();
|
||||
loop {
|
||||
try {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
Version destVersion = wait(tr->getReadVersion());
|
||||
TraceEvent("FileRestoreVersionUpgrade").detail("RestoreVersion", restoreVersion).detail("Dest", destVersion);
|
||||
if (destVersion <= restoreVersion) {
|
||||
TEST(true); // Forcing restored cluster to higher version
|
||||
tr->set(minRequiredCommitVersionKey, BinaryWriter::toValue(restoreVersion+1, Unversioned()));
|
||||
wait(tr->commit());
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
} catch( Error &e ) {
|
||||
wait(tr->onError(e));
|
||||
}
|
||||
}
|
||||
|
||||
Optional<RestorableFileSet> restorable = wait(bc->getRestoreSet(restoreVersion));
|
||||
|
||||
if(!restorable.present())
|
||||
|
@ -3875,6 +3897,8 @@ public:
|
|||
statusText += "The previous backup on tag `" + tagName + "' at " + bc->getURL() + " " + backupStatus + ".\n";
|
||||
break;
|
||||
}
|
||||
statusText += format("BackupUID: %s\n", uidAndAbortedFlag.get().first.toString().c_str());
|
||||
statusText += format("BackupURL: %s\n", bc->getURL().c_str());
|
||||
|
||||
if(snapshotProgress) {
|
||||
state int64_t snapshotInterval;
|
||||
|
|
|
@ -31,7 +31,7 @@ namespace HTTP {
|
|||
o.reserve(s.size() * 3);
|
||||
char buf[4];
|
||||
for(auto c : s)
|
||||
if(std::isalnum(c) || c == '?' || c == '/' || c == '-' || c == '_' || c == '.')
|
||||
if(std::isalnum(c) || c == '?' || c == '/' || c == '-' || c == '_' || c == '.' || c == ',' || c == ':')
|
||||
o.append(&c, 1);
|
||||
else {
|
||||
sprintf(buf, "%%%.02X", c);
|
||||
|
@ -394,8 +394,15 @@ namespace HTTP {
|
|||
event.detail("RequestIDReceived", responseID);
|
||||
if(requestID != responseID) {
|
||||
err = http_bad_request_id();
|
||||
|
||||
// Log a non-debug a error
|
||||
TraceEvent(SevError, "HTTPRequestFailedIDMismatch")
|
||||
Severity sev = SevError;
|
||||
// If the response code is 5xx (server error) and the responseID is empty then just warn
|
||||
if(responseID.empty() && r->code >= 500 && r->code < 600) {
|
||||
sev = SevWarnAlways;
|
||||
}
|
||||
|
||||
TraceEvent(sev, "HTTPRequestFailedIDMismatch")
|
||||
.detail("DebugID", conn->getDebugID())
|
||||
.detail("RemoteAddress", conn->getPeerAddress())
|
||||
.detail("Verb", verb)
|
||||
|
|
|
@ -58,6 +58,7 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( SYSTEM_KEY_SIZE_LIMIT, 3e4 );
|
||||
init( VALUE_SIZE_LIMIT, 1e5 );
|
||||
init( SPLIT_KEY_SIZE_LIMIT, KEY_SIZE_LIMIT/2 ); if( randomize && BUGGIFY ) SPLIT_KEY_SIZE_LIMIT = KEY_SIZE_LIMIT - serverKeysPrefixFor(UID()).size() - 1;
|
||||
init( METADATA_VERSION_CACHE_SIZE, 1000 );
|
||||
|
||||
init( MAX_BATCH_SIZE, 20 ); if( randomize && BUGGIFY ) MAX_BATCH_SIZE = 1; // Note that SERVER_KNOBS->START_TRANSACTION_MAX_BUDGET_SIZE is set to match this value
|
||||
init( GRV_BATCH_TIMEOUT, 0.005 ); if( randomize && BUGGIFY ) GRV_BATCH_TIMEOUT = 0.1;
|
||||
|
@ -106,7 +107,7 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( BACKUP_LOCK_BYTES, 1e8 );
|
||||
init( BACKUP_RANGE_TIMEOUT, TASKBUCKET_TIMEOUT_VERSIONS/CORE_VERSIONSPERSECOND/2.0 );
|
||||
init( BACKUP_RANGE_MINWAIT, std::max(1.0, BACKUP_RANGE_TIMEOUT/2.0));
|
||||
init( BACKUP_SNAPSHOT_DISPATCH_INTERVAL_SEC, 60 * 60 ); // 1 hour
|
||||
init( BACKUP_SNAPSHOT_DISPATCH_INTERVAL_SEC, 10 * 60 ); // 10 minutes
|
||||
init( BACKUP_DEFAULT_SNAPSHOT_INTERVAL_SEC, 3600 * 24 * 10); // 10 days
|
||||
init( BACKUP_SHARD_TASK_LIMIT, 1000 ); if( randomize && BUGGIFY ) BACKUP_SHARD_TASK_LIMIT = 4;
|
||||
init( BACKUP_AGGREGATE_POLL_RATE_UPDATE_INTERVAL, 60);
|
||||
|
|
|
@ -56,6 +56,7 @@ public:
|
|||
int64_t SYSTEM_KEY_SIZE_LIMIT;
|
||||
int64_t VALUE_SIZE_LIMIT;
|
||||
int64_t SPLIT_KEY_SIZE_LIMIT;
|
||||
int METADATA_VERSION_CACHE_SIZE;
|
||||
|
||||
int MAX_BATCH_SIZE;
|
||||
double GRV_BATCH_TIMEOUT;
|
||||
|
|
|
@ -71,14 +71,15 @@ struct MasterProxyInterface {
|
|||
struct CommitID {
|
||||
Version version; // returns invalidVersion if transaction conflicts
|
||||
uint16_t txnBatchId;
|
||||
Optional<Value> metadataVersion;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, version, txnBatchId);
|
||||
serializer(ar, version, txnBatchId, metadataVersion);
|
||||
}
|
||||
|
||||
CommitID() : version(invalidVersion), txnBatchId(0) {}
|
||||
CommitID( Version version, uint16_t txnBatchId ) : version(version), txnBatchId(txnBatchId) {}
|
||||
CommitID( Version version, uint16_t txnBatchId, const Optional<Value>& metadataVersion ) : version(version), txnBatchId(txnBatchId), metadataVersion(metadataVersion) {}
|
||||
};
|
||||
|
||||
struct CommitTransactionRequest : TimedRequest {
|
||||
|
@ -120,10 +121,11 @@ static inline int getBytes( CommitTransactionRequest const& r ) {
|
|||
struct GetReadVersionReply {
|
||||
Version version;
|
||||
bool locked;
|
||||
Optional<Value> metadataVersion;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, version, locked);
|
||||
serializer(ar, version, locked, metadataVersion);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -176,12 +176,6 @@ ClusterConnectionString::ClusterConnectionString( std::string const& connectionS
|
|||
coord = NetworkAddress::parseList(addrs);
|
||||
ASSERT( coord.size() > 0 ); // parseList() always returns at least one address if it doesn't throw
|
||||
|
||||
bool isTLS = coord[0].isTLS();
|
||||
for( auto const& server : coord ) {
|
||||
if( server.isTLS() != isTLS )
|
||||
throw connection_string_invalid();
|
||||
}
|
||||
|
||||
std::sort( coord.begin(), coord.end() );
|
||||
// Check that there are no duplicate addresses
|
||||
if ( std::unique( coord.begin(), coord.end() ) != coord.end() )
|
||||
|
|
|
@ -514,9 +514,10 @@ DatabaseContext::DatabaseContext(
|
|||
transactionReadVersions(0), transactionLogicalReads(0), transactionPhysicalReads(0), transactionCommittedMutations(0), transactionCommittedMutationBytes(0),
|
||||
transactionsCommitStarted(0), transactionsCommitCompleted(0), transactionsTooOld(0), transactionsFutureVersions(0), transactionsNotCommitted(0),
|
||||
transactionsMaybeCommitted(0), transactionsResourceConstrained(0), outstandingWatches(0),
|
||||
latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000),
|
||||
latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000), mvCacheInsertLocation(0),
|
||||
healthMetricsLastUpdated(0), detailedHealthMetricsLastUpdated(0)
|
||||
{
|
||||
metadataVersionCache.resize(CLIENT_KNOBS->METADATA_VERSION_CACHE_SIZE);
|
||||
maxOutstandingWatches = CLIENT_KNOBS->DEFAULT_MAX_OUTSTANDING_WATCHES;
|
||||
|
||||
logger = databaseLogger( this );
|
||||
|
@ -1934,6 +1935,7 @@ void Transaction::operator=(Transaction&& r) BOOST_NOEXCEPT {
|
|||
cx = std::move(r.cx);
|
||||
tr = std::move(r.tr);
|
||||
readVersion = std::move(r.readVersion);
|
||||
metadataVersion = std::move(r.metadataVersion);
|
||||
extraConflictRanges = std::move(r.extraConflictRanges);
|
||||
commitResult = std::move(r.commitResult);
|
||||
committing = std::move(r.committing);
|
||||
|
@ -1980,6 +1982,36 @@ Future<Optional<Value>> Transaction::get( const Key& key, bool snapshot ) {
|
|||
if( !snapshot )
|
||||
tr.transaction.read_conflict_ranges.push_back(tr.arena, singleKeyRange(key, tr.arena));
|
||||
|
||||
if(key == metadataVersionKey) {
|
||||
if(!ver.isReady() || metadataVersion.isSet()) {
|
||||
return metadataVersion.getFuture();
|
||||
} else {
|
||||
if(ver.isError()) return ver.getError();
|
||||
if(ver.get() == cx->metadataVersionCache[cx->mvCacheInsertLocation].first) {
|
||||
return cx->metadataVersionCache[cx->mvCacheInsertLocation].second;
|
||||
}
|
||||
|
||||
Version v = ver.get();
|
||||
int hi = cx->mvCacheInsertLocation;
|
||||
int lo = (cx->mvCacheInsertLocation+1)%cx->metadataVersionCache.size();
|
||||
|
||||
while(hi!=lo) {
|
||||
int cu = hi > lo ? (hi + lo)/2 : ((hi + cx->metadataVersionCache.size() + lo)/2)%cx->metadataVersionCache.size();
|
||||
if(v == cx->metadataVersionCache[cu].first) {
|
||||
return cx->metadataVersionCache[cu].second;
|
||||
}
|
||||
if(cu == lo) {
|
||||
break;
|
||||
}
|
||||
if(v < cx->metadataVersionCache[cu].first) {
|
||||
hi = cu;
|
||||
} else {
|
||||
lo = (cu+1)%cx->metadataVersionCache.size();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return getValue( ver, key, cx, info, trLogInfo );
|
||||
}
|
||||
|
||||
|
@ -2285,6 +2317,7 @@ double Transaction::getBackoff(int errCode) {
|
|||
void Transaction::reset() {
|
||||
tr = CommitTransactionRequest();
|
||||
readVersion = Future<Version>();
|
||||
metadataVersion = Promise<Optional<Key>>();
|
||||
extraConflictRanges.clear();
|
||||
versionstampPromise = Promise<Standalone<StringRef>>();
|
||||
commitResult = Promise<Void>();
|
||||
|
@ -2500,6 +2533,10 @@ ACTOR static Future<Void> tryCommit( Database cx, Reference<TransactionLogInfo>
|
|||
if (info.debugID.present())
|
||||
TraceEvent(interval.end()).detail("CommittedVersion", v);
|
||||
*pCommittedVersion = v;
|
||||
if(v > cx->metadataVersionCache[cx->mvCacheInsertLocation].first) {
|
||||
cx->mvCacheInsertLocation = (cx->mvCacheInsertLocation + 1)%cx->metadataVersionCache.size();
|
||||
cx->metadataVersionCache[cx->mvCacheInsertLocation] = std::make_pair(v, ci.metadataVersion);
|
||||
}
|
||||
|
||||
Standalone<StringRef> ret = makeString(10);
|
||||
placeVersionstamp(mutateString(ret), v, ci.txnBatchId);
|
||||
|
@ -2874,7 +2911,7 @@ ACTOR Future<Void> readVersionBatcher( DatabaseContext *cx, FutureStream< std::p
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Version> extractReadVersion(DatabaseContext* cx, Reference<TransactionLogInfo> trLogInfo, Future<GetReadVersionReply> f, bool lockAware, double startTime) {
|
||||
ACTOR Future<Version> extractReadVersion(DatabaseContext* cx, Reference<TransactionLogInfo> trLogInfo, Future<GetReadVersionReply> f, bool lockAware, double startTime, Promise<Optional<Value>> metadataVersion) {
|
||||
GetReadVersionReply rep = wait(f);
|
||||
double latency = now() - startTime;
|
||||
cx->GRVLatencies.addSample(latency);
|
||||
|
@ -2883,6 +2920,12 @@ ACTOR Future<Version> extractReadVersion(DatabaseContext* cx, Reference<Transact
|
|||
if(rep.locked && !lockAware)
|
||||
throw database_locked();
|
||||
|
||||
if(rep.version > cx->metadataVersionCache[cx->mvCacheInsertLocation].first) {
|
||||
cx->mvCacheInsertLocation = (cx->mvCacheInsertLocation + 1)%cx->metadataVersionCache.size();
|
||||
cx->metadataVersionCache[cx->mvCacheInsertLocation] = std::make_pair(rep.version, rep.metadataVersion);
|
||||
}
|
||||
|
||||
metadataVersion.send(rep.metadataVersion);
|
||||
return rep.version;
|
||||
}
|
||||
|
||||
|
@ -2898,7 +2941,7 @@ Future<Version> Transaction::getReadVersion(uint32_t flags) {
|
|||
Promise<GetReadVersionReply> p;
|
||||
batcher.stream.send( std::make_pair( p, info.debugID ) );
|
||||
startTime = now();
|
||||
readVersion = extractReadVersion( cx.getPtr(), trLogInfo, p.getFuture(), options.lockAware, startTime);
|
||||
readVersion = extractReadVersion( cx.getPtr(), trLogInfo, p.getFuture(), options.lockAware, startTime, metadataVersion);
|
||||
}
|
||||
return readVersion;
|
||||
}
|
||||
|
|
|
@ -317,6 +317,7 @@ private:
|
|||
Version committedVersion;
|
||||
CommitTransactionRequest tr;
|
||||
Future<Version> readVersion;
|
||||
Promise<Optional<Value>> metadataVersion;
|
||||
vector<Future<std::pair<Key, Key>>> extraConflictRanges;
|
||||
Promise<Void> commitResult;
|
||||
Future<Void> committing;
|
||||
|
|
|
@ -1046,7 +1046,7 @@ public:
|
|||
return Void();
|
||||
}
|
||||
|
||||
ryw->writeRangeToNativeTransaction(KeyRangeRef(StringRef(), ryw->getMaxWriteKey()));
|
||||
ryw->writeRangeToNativeTransaction(KeyRangeRef(StringRef(), allKeys.end));
|
||||
|
||||
auto conflictRanges = ryw->readConflicts.ranges();
|
||||
for( auto iter = conflictRanges.begin(); iter != conflictRanges.end(); ++iter ) {
|
||||
|
@ -1222,7 +1222,7 @@ Future< Optional<Value> > ReadYourWritesTransaction::get( const Key& key, bool s
|
|||
if( resetPromise.isSet() )
|
||||
return resetPromise.getFuture().getError();
|
||||
|
||||
if(key >= getMaxReadKey())
|
||||
if(key >= getMaxReadKey() && key != metadataVersionKey)
|
||||
return key_outside_legal_range();
|
||||
|
||||
//There are no keys in the database with size greater than KEY_SIZE_LIMIT
|
||||
|
@ -1499,8 +1499,14 @@ void ReadYourWritesTransaction::atomicOp( const KeyRef& key, const ValueRef& ope
|
|||
throw used_during_commit();
|
||||
}
|
||||
|
||||
if(key >= getMaxWriteKey())
|
||||
if (key == metadataVersionKey) {
|
||||
if(operationType != MutationRef::SetVersionstampedValue || operand != metadataVersionRequiredValue) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
}
|
||||
else if(key >= getMaxWriteKey()) {
|
||||
throw key_outside_legal_range();
|
||||
}
|
||||
|
||||
if(!isValidMutationType(operationType) || !isAtomicOp((MutationRef::Type) operationType))
|
||||
throw invalid_mutation_type();
|
||||
|
@ -1565,7 +1571,10 @@ void ReadYourWritesTransaction::set( const KeyRef& key, const ValueRef& value )
|
|||
BinaryReader::fromStringRef<ClientWorkerInterface>(value, IncludeVersion()).reboot.send( RebootRequest(false, true) );
|
||||
return;
|
||||
}
|
||||
|
||||
if (key == metadataVersionKey) {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
|
||||
bool addWriteConflict = !options.getAndResetWriteConflictDisabled();
|
||||
|
||||
if(checkUsedDuringCommit()) {
|
||||
|
@ -1665,7 +1674,7 @@ Future<Void> ReadYourWritesTransaction::watch(const Key& key) {
|
|||
if( options.readYourWritesDisabled )
|
||||
return watches_disabled();
|
||||
|
||||
if(key >= allKeys.end || (key >= getMaxReadKey() && tr.apiVersionAtLeast(300)))
|
||||
if(key >= allKeys.end || (key >= getMaxReadKey() && key != metadataVersionKey && tr.apiVersionAtLeast(300)))
|
||||
return key_outside_legal_range();
|
||||
|
||||
if (key.size() > (key.startsWith(systemKeys.begin) ? CLIENT_KNOBS->SYSTEM_KEY_SIZE_LIMIT : CLIENT_KNOBS->KEY_SIZE_LIMIT))
|
||||
|
|
|
@ -326,6 +326,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
|||
"$enum":[
|
||||
"unreachable_master_worker",
|
||||
"unreachable_dataDistributor_worker",
|
||||
"unreachable_ratekeeper_worker",
|
||||
"unreadable_configuration",
|
||||
"full_replication_timeout",
|
||||
"client_issues",
|
||||
|
|
|
@ -586,6 +586,8 @@ std::pair<MetricNameRef, KeyRef> decodeMetricConfKey( KeyRef const& prefix, KeyR
|
|||
const KeyRef maxUIDKey = LiteralStringRef("\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff");
|
||||
|
||||
const KeyRef databaseLockedKey = LiteralStringRef("\xff/dbLocked");
|
||||
const KeyRef metadataVersionKey = LiteralStringRef("\xff/metadataVersion");
|
||||
const KeyRef metadataVersionRequiredValue = LiteralStringRef("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00");
|
||||
const KeyRef mustContainSystemMutationsKey = LiteralStringRef("\xff/mustContainSystemMutations");
|
||||
|
||||
const KeyRangeRef monitorConfKeys(
|
||||
|
|
|
@ -265,6 +265,8 @@ extern const KeyRef metricConfPrefix;
|
|||
extern const KeyRef maxUIDKey;
|
||||
|
||||
extern const KeyRef databaseLockedKey;
|
||||
extern const KeyRef metadataVersionKey;
|
||||
extern const KeyRef metadataVersionRequiredValue;
|
||||
extern const KeyRef mustContainSystemMutationsKey;
|
||||
|
||||
// Key range reserved for storing changes to monitor conf files
|
||||
|
|
|
@ -186,7 +186,7 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
|
@ -209,7 +209,7 @@
|
|||
<Optimization>Full</Optimization>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
|
||||
<EnablePREfast>false</EnablePREfast>
|
||||
|
|
|
@ -251,10 +251,10 @@ description is not currently required but encouraged.
|
|||
description="Performs a little-endian comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The smaller of the two values is then stored in the database."/>
|
||||
<Option name="set_versionstamped_key" code="14"
|
||||
paramType="Bytes" paramDescription="value to which to set the transformed key"
|
||||
description="Transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes." />
|
||||
description="Transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java, Python, and Go bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes." />
|
||||
<Option name="set_versionstamped_value" code="15"
|
||||
paramType="Bytes" paramDescription="value to versionstamp and set"
|
||||
description="Transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset." />
|
||||
description="Transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java, Python, and Go bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset." />
|
||||
<Option name="byte_min" code="16"
|
||||
paramType="Bytes" paramDescription="value to check against database value"
|
||||
description="Performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the smaller of the two values is then stored in the database."/>
|
||||
|
|
|
@ -76,4 +76,4 @@
|
|||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
|
|
@ -164,26 +164,35 @@ ProcessClass::Fitness ProcessClass::machineClassFitness( ClusterRole role ) cons
|
|||
}
|
||||
case ProcessClass::DataDistributor:
|
||||
switch( _class ) {
|
||||
case ProcessClass::DataDistributorClass:
|
||||
return ProcessClass::BestFit;
|
||||
case ProcessClass::StatelessClass:
|
||||
return ProcessClass::GoodFit;
|
||||
case ProcessClass::MasterClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::ResolutionClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::TransactionClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::ProxyClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::UnsetClass:
|
||||
return ProcessClass::UnsetFit;
|
||||
case ProcessClass::CoordinatorClass:
|
||||
return ProcessClass::NeverAssign;
|
||||
case ProcessClass::TesterClass:
|
||||
return ProcessClass::NeverAssign;
|
||||
default:
|
||||
return ProcessClass::WorstFit;
|
||||
case ProcessClass::DataDistributorClass:
|
||||
return ProcessClass::BestFit;
|
||||
case ProcessClass::StatelessClass:
|
||||
return ProcessClass::GoodFit;
|
||||
case ProcessClass::MasterClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::UnsetClass:
|
||||
return ProcessClass::UnsetFit;
|
||||
case ProcessClass::CoordinatorClass:
|
||||
case ProcessClass::TesterClass:
|
||||
return ProcessClass::NeverAssign;
|
||||
default:
|
||||
return ProcessClass::WorstFit;
|
||||
}
|
||||
case ProcessClass::RateKeeper:
|
||||
switch( _class ) {
|
||||
case ProcessClass::RateKeeperClass:
|
||||
return ProcessClass::BestFit;
|
||||
case ProcessClass::StatelessClass:
|
||||
return ProcessClass::GoodFit;
|
||||
case ProcessClass::MasterClass:
|
||||
return ProcessClass::OkayFit;
|
||||
case ProcessClass::UnsetClass:
|
||||
return ProcessClass::UnsetFit;
|
||||
case ProcessClass::CoordinatorClass:
|
||||
case ProcessClass::TesterClass:
|
||||
return ProcessClass::NeverAssign;
|
||||
default:
|
||||
return ProcessClass::WorstFit;
|
||||
}
|
||||
default:
|
||||
return ProcessClass::NeverAssign;
|
||||
|
|
|
@ -26,9 +26,9 @@
|
|||
|
||||
struct ProcessClass {
|
||||
// This enum is stored in restartInfo.ini for upgrade tests, so be very careful about changing the existing items!
|
||||
enum ClassType { UnsetClass, StorageClass, TransactionClass, ResolutionClass, TesterClass, ProxyClass, MasterClass, StatelessClass, LogClass, ClusterControllerClass, LogRouterClass, DataDistributorClass, CoordinatorClass, InvalidClass = -1 };
|
||||
enum ClassType { UnsetClass, StorageClass, TransactionClass, ResolutionClass, TesterClass, ProxyClass, MasterClass, StatelessClass, LogClass, ClusterControllerClass, LogRouterClass, DataDistributorClass, CoordinatorClass, RateKeeperClass, InvalidClass = -1 };
|
||||
enum Fitness { BestFit, GoodFit, UnsetFit, OkayFit, WorstFit, ExcludeFit, NeverAssign }; //cannot be larger than 7 because of leader election mask
|
||||
enum ClusterRole { Storage, TLog, Proxy, Master, Resolver, LogRouter, ClusterController, DataDistributor, NoRole };
|
||||
enum ClusterRole { Storage, TLog, Proxy, Master, Resolver, LogRouter, ClusterController, DataDistributor, RateKeeper, NoRole };
|
||||
enum ClassSource { CommandLineSource, AutoSource, DBSource, InvalidSource = -1 };
|
||||
int16_t _class;
|
||||
int16_t _source;
|
||||
|
@ -50,6 +50,7 @@ public:
|
|||
else if (s=="cluster_controller") _class = ClusterControllerClass;
|
||||
else if (s=="data_distributor") _class = DataDistributorClass;
|
||||
else if (s=="coordinator") _class = CoordinatorClass;
|
||||
else if (s=="ratekeeper") _class = RateKeeperClass;
|
||||
else _class = InvalidClass;
|
||||
}
|
||||
|
||||
|
@ -67,6 +68,7 @@ public:
|
|||
else if (classStr=="cluster_controller") _class = ClusterControllerClass;
|
||||
else if (classStr=="data_distributor") _class = DataDistributorClass;
|
||||
else if (classStr=="coordinator") _class = CoordinatorClass;
|
||||
else if (classStr=="ratekeeper") _class = RateKeeperClass;
|
||||
else _class = InvalidClass;
|
||||
|
||||
if (sourceStr=="command_line") _source = CommandLineSource;
|
||||
|
@ -99,6 +101,7 @@ public:
|
|||
case ClusterControllerClass: return "cluster_controller";
|
||||
case DataDistributorClass: return "data_distributor";
|
||||
case CoordinatorClass: return "coordinator";
|
||||
case RateKeeperClass: return "ratekeeper";
|
||||
default: return "invalid";
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,7 +177,7 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
|
@ -201,7 +201,7 @@
|
|||
<Optimization>Full</Optimization>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
|
||||
<EnablePREfast>false</EnablePREfast>
|
||||
|
|
|
@ -99,6 +99,7 @@ public:
|
|||
case ProcessClass::LogRouterClass: return false;
|
||||
case ProcessClass::ClusterControllerClass: return false;
|
||||
case ProcessClass::DataDistributorClass: return false;
|
||||
case ProcessClass::RateKeeperClass: return false;
|
||||
default: return false;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -178,7 +178,7 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
|||
}
|
||||
}
|
||||
}
|
||||
} else if( m.param1 == databaseLockedKey || m.param1 == mustContainSystemMutationsKey || m.param1.startsWith(applyMutationsBeginRange.begin) ||
|
||||
} else if( m.param1 == databaseLockedKey || m.param1 == metadataVersionKey || m.param1 == mustContainSystemMutationsKey || m.param1.startsWith(applyMutationsBeginRange.begin) ||
|
||||
m.param1.startsWith(applyMutationsAddPrefixRange.begin) || m.param1.startsWith(applyMutationsRemovePrefixRange.begin) || m.param1.startsWith(tagLocalityListPrefix) || m.param1.startsWith(serverTagHistoryPrefix) ) {
|
||||
if(!initialCommit) txnStateStore->set(KeyValueRef(m.param1, m.param2));
|
||||
}
|
||||
|
@ -225,6 +225,9 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
|||
for (auto& logRange : vecBackupKeys->modify(KeyRangeRef(logRangeBegin, logRangeEnd))) {
|
||||
logRange->value().insert(logDestination);
|
||||
}
|
||||
for (auto& logRange : vecBackupKeys->modify(singleKeyRange(metadataVersionKey))) {
|
||||
logRange->value().insert(logDestination);
|
||||
}
|
||||
|
||||
// Log the modification
|
||||
TraceEvent("LogRangeAdd").detail("LogRanges", vecBackupKeys->size()).detail("MutationKey", printable(m.param1))
|
||||
|
@ -345,6 +348,9 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
|||
if (range.contains(databaseLockedKey)) {
|
||||
if(!initialCommit) txnStateStore->clear(singleKeyRange(databaseLockedKey));
|
||||
}
|
||||
if (range.contains(metadataVersionKey)) {
|
||||
if(!initialCommit) txnStateStore->clear(singleKeyRange(metadataVersionKey));
|
||||
}
|
||||
if (range.contains(mustContainSystemMutationsKey)) {
|
||||
if(!initialCommit) txnStateStore->clear(singleKeyRange(mustContainSystemMutationsKey));
|
||||
}
|
||||
|
@ -415,6 +421,21 @@ static void applyMetadataMutations(UID const& dbgid, Arena &arena, VectorRef<Mut
|
|||
// Remove the backup name from the range
|
||||
logRangeMap.erase(logDestination);
|
||||
}
|
||||
|
||||
bool foundKey = false;
|
||||
for(auto &it : vecBackupKeys->intersectingRanges(normalKeys)) {
|
||||
if(it.value().count(logDestination) > 0) {
|
||||
foundKey = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(!foundKey) {
|
||||
auto logRanges = vecBackupKeys->modify(singleKeyRange(metadataVersionKey));
|
||||
for (auto logRange : logRanges) {
|
||||
auto &logRangeMap = logRange->value();
|
||||
logRangeMap.erase(logDestination);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Coallesce the entire range
|
||||
|
|
|
@ -56,7 +56,7 @@ set(FDBSERVER_SRCS
|
|||
QuietDatabase.actor.cpp
|
||||
QuietDatabase.h
|
||||
Ratekeeper.actor.cpp
|
||||
Ratekeeper.h
|
||||
RatekeeperInterface.h
|
||||
RecoveryState.h
|
||||
Restore.actor.cpp
|
||||
RestoreInterface.h
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -30,6 +30,7 @@
|
|||
#include "fdbserver/LogSystemConfig.h"
|
||||
#include "fdbserver/WaitFailure.h"
|
||||
#include "fdbserver/ClusterRecruitmentInterface.h"
|
||||
#include "fdbserver/RatekeeperInterface.h"
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "fdbserver/Status.h"
|
||||
#include "fdbserver/LatencyBandConfig.h"
|
||||
|
@ -108,17 +109,28 @@ public:
|
|||
{
|
||||
}
|
||||
|
||||
void setDistributor(const DataDistributorInterface& distributorInterf) {
|
||||
void setDistributor(const DataDistributorInterface& interf) {
|
||||
ServerDBInfo newInfo = serverInfo->get();
|
||||
newInfo.id = g_random->randomUniqueID();
|
||||
newInfo.distributor = distributorInterf;
|
||||
newInfo.distributor = interf;
|
||||
serverInfo->set( newInfo );
|
||||
}
|
||||
|
||||
void clearDistributor() {
|
||||
void setRatekeeper(const RatekeeperInterface& interf) {
|
||||
ServerDBInfo newInfo = serverInfo->get();
|
||||
newInfo.id = g_random->randomUniqueID();
|
||||
newInfo.distributor = Optional<DataDistributorInterface>();
|
||||
newInfo.ratekeeper = interf;
|
||||
serverInfo->set( newInfo );
|
||||
}
|
||||
|
||||
void clearInterf(ProcessClass::ClassType t) {
|
||||
ServerDBInfo newInfo = serverInfo->get();
|
||||
newInfo.id = g_random->randomUniqueID();
|
||||
if (t == ProcessClass::DataDistributorClass) {
|
||||
newInfo.distributor = Optional<DataDistributorInterface>();
|
||||
} else if (t == ProcessClass::RateKeeperClass) {
|
||||
newInfo.ratekeeper = Optional<RatekeeperInterface>();
|
||||
}
|
||||
serverInfo->set( newInfo );
|
||||
}
|
||||
};
|
||||
|
@ -534,6 +546,9 @@ public:
|
|||
if (db.serverInfo->get().distributor.present()) {
|
||||
(*id_used)[db.serverInfo->get().distributor.get().locality.processId()]++;
|
||||
}
|
||||
if (db.serverInfo->get().ratekeeper.present()) {
|
||||
(*id_used)[db.serverInfo->get().ratekeeper.get().locality.processId()]++;
|
||||
}
|
||||
}
|
||||
|
||||
RecruitRemoteFromConfigurationReply findRemoteWorkersForConfiguration( RecruitRemoteFromConfigurationRequest const& req ) {
|
||||
|
@ -931,6 +946,9 @@ public:
|
|||
if (db.serverInfo->get().distributor.present()) {
|
||||
id_used[db.serverInfo->get().distributor.get().locality.processId()]++;
|
||||
}
|
||||
if (db.serverInfo->get().ratekeeper.present()) {
|
||||
id_used[db.serverInfo->get().ratekeeper.get().locality.processId()]++;
|
||||
}
|
||||
WorkerFitnessInfo mworker = getWorkerForRoleInDatacenter(clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db.config, id_used, true);
|
||||
|
||||
if ( oldMasterFit < mworker.fitness )
|
||||
|
@ -1116,6 +1134,9 @@ ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, Cluster
|
|||
if (cluster->db.serverInfo->get().distributor.present()) {
|
||||
id_used[cluster->db.serverInfo->get().distributor.get().locality.processId()]++;
|
||||
}
|
||||
if (cluster->db.serverInfo->get().ratekeeper.present()) {
|
||||
id_used[cluster->db.serverInfo->get().ratekeeper.get().locality.processId()]++;
|
||||
}
|
||||
state WorkerFitnessInfo masterWorker = cluster->getWorkerForRoleInDatacenter(cluster->clusterControllerDcId, ProcessClass::Master, ProcessClass::NeverAssign, db->config, id_used);
|
||||
if( ( masterWorker.worker.processClass.machineClassFitness( ProcessClass::Master ) > SERVER_KNOBS->EXPECTED_MASTER_FITNESS || masterWorker.worker.interf.locality.processId() == cluster->clusterControllerProcessId )
|
||||
&& now() - cluster->startTime < SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY ) {
|
||||
|
@ -1151,6 +1172,7 @@ ACTOR Future<Void> clusterWatchDatabase( ClusterControllerData* cluster, Cluster
|
|||
++dbInfo.masterLifetime;
|
||||
dbInfo.clusterInterface = db->serverInfo->get().clusterInterface;
|
||||
dbInfo.distributor = db->serverInfo->get().distributor;
|
||||
dbInfo.ratekeeper = db->serverInfo->get().ratekeeper;
|
||||
|
||||
TraceEvent("CCWDB", cluster->id).detail("Lifetime", dbInfo.masterLifetime.toString()).detail("ChangeID", dbInfo.id);
|
||||
db->serverInfo->set( dbInfo );
|
||||
|
@ -1762,7 +1784,12 @@ void registerWorker( RegisterWorkerRequest req, ClusterControllerData *self ) {
|
|||
if ( req.distributorInterf.present() && !self->db.serverInfo->get().distributor.present() ) {
|
||||
const DataDistributorInterface& di = req.distributorInterf.get();
|
||||
TraceEvent("ClusterController_RegisterDataDistributor", self->id).detail("DDID", di.id());
|
||||
self->db.setDistributor( di );
|
||||
self->db.setDistributor(di);
|
||||
}
|
||||
if ( req.ratekeeperInterf.present() && !self->db.serverInfo->get().ratekeeper.present() ) {
|
||||
const RatekeeperInterface& rki = req.ratekeeperInterf.get();
|
||||
TraceEvent("ClusterController_RegisterRatekeeper", self->id).detail("RKID", rki.id());
|
||||
self->db.setRatekeeper(rki);
|
||||
}
|
||||
if( info == self->id_worker.end() ) {
|
||||
self->id_worker[w.locality.processId()] = WorkerInfo( workerAvailabilityWatch( w, newProcessClass, self ), req.reply, req.generation, w, req.initialClass, newProcessClass, newPriorityInfo, req.degraded );
|
||||
|
@ -2319,8 +2346,6 @@ ACTOR Future<Void> handleForcedRecoveries( ClusterControllerData *self, ClusterC
|
|||
}
|
||||
|
||||
ACTOR Future<DataDistributorInterface> startDataDistributor( ClusterControllerData *self ) {
|
||||
state Optional<Key> dcId = self->clusterControllerDcId;
|
||||
state InitializeDataDistributorRequest req;
|
||||
while ( !self->clusterControllerProcessId.present() || !self->masterProcessId.present() ) {
|
||||
wait( delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY) );
|
||||
}
|
||||
|
@ -2332,18 +2357,18 @@ ACTOR Future<DataDistributorInterface> startDataDistributor( ClusterControllerDa
|
|||
}
|
||||
|
||||
std::map<Optional<Standalone<StringRef>>, int> id_used = self->getUsedIds();
|
||||
state WorkerFitnessInfo data_distributor = self->getWorkerForRoleInDatacenter(dcId, ProcessClass::DataDistributor, ProcessClass::NeverAssign, self->db.config, id_used);
|
||||
req.reqId = g_random->randomUniqueID();
|
||||
TraceEvent("ClusterController_DataDistributorRecruit", req.reqId).detail("Addr", data_distributor.worker.interf.address());
|
||||
state WorkerFitnessInfo data_distributor = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId, ProcessClass::DataDistributor, ProcessClass::NeverAssign, self->db.config, id_used);
|
||||
state InitializeDataDistributorRequest req(g_random->randomUniqueID());
|
||||
TraceEvent("ClusterController_DataDistributorRecruit", self->id).detail("Addr", data_distributor.worker.interf.address());
|
||||
|
||||
ErrorOr<DataDistributorInterface> distributor = wait( data_distributor.worker.interf.dataDistributor.getReplyUnlessFailedFor(req, SERVER_KNOBS->WAIT_FOR_DISTRIBUTOR_JOIN_DELAY, 0) );
|
||||
if (distributor.present()) {
|
||||
TraceEvent("ClusterController_DataDistributorRecruited", req.reqId).detail("Addr", data_distributor.worker.interf.address());
|
||||
TraceEvent("ClusterController_DataDistributorRecruited", self->id).detail("Addr", data_distributor.worker.interf.address());
|
||||
return distributor.get();
|
||||
}
|
||||
}
|
||||
catch (Error& e) {
|
||||
TraceEvent("ClusterController_DataDistributorRecruitError", req.reqId).error(e);
|
||||
TraceEvent("ClusterController_DataDistributorRecruitError", self->id).error(e);
|
||||
if ( e.code() != error_code_no_more_servers ) {
|
||||
throw;
|
||||
}
|
||||
|
@ -2352,7 +2377,7 @@ ACTOR Future<DataDistributorInterface> startDataDistributor( ClusterControllerDa
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> waitDDRejoinOrStartDD( ClusterControllerData *self, ClusterControllerFullInterface *clusterInterface ) {
|
||||
ACTOR Future<Void> monitorDataDistributor(ClusterControllerData *self) {
|
||||
state Future<Void> initialDelay = delay(SERVER_KNOBS->WAIT_FOR_DISTRIBUTOR_JOIN_DELAY);
|
||||
|
||||
// wait for a while to see if existing data distributor will join.
|
||||
|
@ -2372,10 +2397,66 @@ ACTOR Future<Void> waitDDRejoinOrStartDD( ClusterControllerData *self, ClusterCo
|
|||
wait( waitFailureClient( self->db.serverInfo->get().distributor.get().waitFailure, SERVER_KNOBS->DD_FAILURE_TIME ) );
|
||||
TraceEvent("ClusterController", self->id)
|
||||
.detail("DataDistributorDied", self->db.serverInfo->get().distributor.get().id());
|
||||
self->db.clearDistributor();
|
||||
self->db.clearInterf(ProcessClass::DataDistributorClass);
|
||||
} else {
|
||||
DataDistributorInterface distributorInterf = wait( startDataDistributor(self) );
|
||||
self->db.setDistributor( distributorInterf );
|
||||
self->db.setDistributor(distributorInterf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<RatekeeperInterface> startRatekeeper(ClusterControllerData *self) {
|
||||
loop {
|
||||
try {
|
||||
while ( self->db.serverInfo->get().recoveryState < RecoveryState::ACCEPTING_COMMITS ) {
|
||||
wait( self->db.serverInfo->onChange() );
|
||||
}
|
||||
|
||||
std::map<Optional<Standalone<StringRef>>, int> id_used = self->getUsedIds();
|
||||
state WorkerFitnessInfo rkWorker = self->getWorkerForRoleInDatacenter(self->clusterControllerDcId, ProcessClass::RateKeeper, ProcessClass::NeverAssign, self->db.config, id_used);
|
||||
state InitializeRatekeeperRequest req(g_random->randomUniqueID());
|
||||
TraceEvent("ClusterController_RecruitRatekeeper", self->id).detail("Addr", rkWorker.worker.interf.address());
|
||||
|
||||
ErrorOr<RatekeeperInterface> interf = wait( rkWorker.worker.interf.ratekeeper.getReplyUnlessFailedFor(req, SERVER_KNOBS->WAIT_FOR_RATEKEEPER_JOIN_DELAY, 0) );
|
||||
if (interf.present()) {
|
||||
TraceEvent("ClusterController_RatekeeperRecruited", self->id).detail("Addr", rkWorker.worker.interf.address());
|
||||
return interf.get();
|
||||
}
|
||||
}
|
||||
catch (Error& e) {
|
||||
TraceEvent("ClusterController_RatekeeperRecruitError", self->id).error(e);
|
||||
if ( e.code() != error_code_no_more_servers ) {
|
||||
throw;
|
||||
}
|
||||
}
|
||||
wait( delay(SERVER_KNOBS->ATTEMPT_RECRUITMENT_DELAY) );
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorRatekeeper(ClusterControllerData *self) {
|
||||
state Future<Void> initialDelay = delay(SERVER_KNOBS->WAIT_FOR_RATEKEEPER_JOIN_DELAY);
|
||||
|
||||
// wait for a while to see if an existing ratekeeper will join.
|
||||
loop choose {
|
||||
when ( wait(initialDelay) ) { break; }
|
||||
when ( wait(self->db.serverInfo->onChange()) ) { // Rejoins via worker registration
|
||||
if ( self->db.serverInfo->get().ratekeeper.present() ) {
|
||||
TraceEvent("ClusterController_GotRateKeeper", self->id)
|
||||
.detail("RKID", self->db.serverInfo->get().ratekeeper.get().id());
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
loop {
|
||||
if ( self->db.serverInfo->get().ratekeeper.present() ) {
|
||||
wait( waitFailureClient( self->db.serverInfo->get().ratekeeper.get().waitFailure, SERVER_KNOBS->RATEKEEPER_FAILURE_TIME ) );
|
||||
TraceEvent("ClusterController_RateKeeperDied", self->id)
|
||||
.detail("RKID", self->db.serverInfo->get().ratekeeper.get().id());
|
||||
self->db.clearInterf(ProcessClass::RateKeeperClass);
|
||||
} else {
|
||||
RatekeeperInterface rkInterf = wait( startRatekeeper(self) );
|
||||
self->db.setRatekeeper(rkInterf);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2396,8 +2477,9 @@ ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf,
|
|||
self.addActor.send( updatedChangingDatacenters(&self) );
|
||||
self.addActor.send( updatedChangedDatacenters(&self) );
|
||||
self.addActor.send( updateDatacenterVersionDifference(&self) );
|
||||
self.addActor.send( waitDDRejoinOrStartDD(&self, &interf) );
|
||||
self.addActor.send( handleForcedRecoveries(&self, interf) );
|
||||
self.addActor.send( monitorDataDistributor(&self) );
|
||||
self.addActor.send( monitorRatekeeper(&self) );
|
||||
//printf("%s: I am the cluster controller\n", g_network->getLocalAddress().toString().c_str());
|
||||
|
||||
loop choose {
|
||||
|
|
|
@ -168,16 +168,17 @@ struct RegisterWorkerRequest {
|
|||
ClusterControllerPriorityInfo priorityInfo;
|
||||
Generation generation;
|
||||
Optional<DataDistributorInterface> distributorInterf;
|
||||
Optional<RatekeeperInterface> ratekeeperInterf;
|
||||
ReplyPromise<RegisterWorkerReply> reply;
|
||||
bool degraded;
|
||||
|
||||
RegisterWorkerRequest() : priorityInfo(ProcessClass::UnsetFit, false, ClusterControllerPriorityInfo::FitnessUnknown), degraded(false) {}
|
||||
RegisterWorkerRequest(WorkerInterface wi, ProcessClass initialClass, ProcessClass processClass, ClusterControllerPriorityInfo priorityInfo, Generation generation, Optional<DataDistributorInterface> ddInterf, bool degraded) :
|
||||
wi(wi), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo), generation(generation), distributorInterf(ddInterf), degraded(degraded) {}
|
||||
RegisterWorkerRequest(WorkerInterface wi, ProcessClass initialClass, ProcessClass processClass, ClusterControllerPriorityInfo priorityInfo, Generation generation, Optional<DataDistributorInterface> ddInterf, Optional<RatekeeperInterface> rkInterf, bool degraded) :
|
||||
wi(wi), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo), generation(generation), distributorInterf(ddInterf), ratekeeperInterf(rkInterf), degraded(degraded) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
serializer(ar, wi, initialClass, processClass, priorityInfo, generation, distributorInterf, reply, degraded);
|
||||
serializer(ar, wi, initialClass, processClass, priorityInfo, generation, distributorInterf, ratekeeperInterf, reply, degraded);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@
|
|||
#include "fdbserver/WaitFailure.h"
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "fdbserver/IKeyValueStore.h"
|
||||
#include "fdbserver/Ratekeeper.h"
|
||||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
#include "fdbrpc/Replication.h"
|
||||
#include "flow/UnitTest.h"
|
||||
|
@ -570,7 +569,6 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
PromiseStream<UID> removedServers;
|
||||
std::set<UID> recruitingIds; // The IDs of the SS which are being recruited
|
||||
std::set<NetworkAddress> recruitingLocalities;
|
||||
Optional<PromiseStream< std::pair<UID, Optional<StorageServerInterface>> >> serverChanges;
|
||||
Future<Void> initialFailureReactionDelay;
|
||||
Future<Void> initializationDoneActor;
|
||||
Promise<Void> serverTrackerErrorOut;
|
||||
|
@ -629,13 +627,12 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
Reference<ShardsAffectedByTeamFailure> const& shardsAffectedByTeamFailure,
|
||||
DatabaseConfiguration configuration, std::vector<Optional<Key>> includedDCs,
|
||||
Optional<std::vector<Optional<Key>>> otherTrackedDCs,
|
||||
Optional<PromiseStream<std::pair<UID, Optional<StorageServerInterface>>>> const& serverChanges,
|
||||
Future<Void> readyToStart, Reference<AsyncVar<bool>> zeroHealthyTeams, bool primary,
|
||||
Reference<AsyncVar<bool>> processingUnhealthy)
|
||||
: cx(cx), distributorId(distributorId), lock(lock), output(output),
|
||||
shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), doBuildTeams(true), teamBuilder(Void()),
|
||||
badTeamRemover(Void()), redundantTeamRemover(Void()), configuration(configuration),
|
||||
serverChanges(serverChanges), readyToStart(readyToStart),
|
||||
readyToStart(readyToStart),
|
||||
checkTeamDelay(delay(SERVER_KNOBS->CHECK_TEAM_DELAY, TaskDataDistribution)),
|
||||
initialFailureReactionDelay(
|
||||
delayed(readyToStart, SERVER_KNOBS->INITIAL_FAILURE_REACTION_DELAY, TaskDataDistribution)),
|
||||
|
@ -2839,10 +2836,6 @@ ACTOR Future<Void> storageServerTracker(
|
|||
state Future<KeyValueStoreType> storeTracker = keyValueStoreTypeTracker( self, server );
|
||||
state bool hasWrongStoreTypeOrDC = false;
|
||||
|
||||
if(self->serverChanges.present()) {
|
||||
self->serverChanges.get().send( std::make_pair(server->id, server->lastKnownInterface) );
|
||||
}
|
||||
|
||||
try {
|
||||
loop {
|
||||
status.isUndesired = false;
|
||||
|
@ -2933,9 +2926,6 @@ ACTOR Future<Void> storageServerTracker(
|
|||
when( wait( failureTracker ) ) {
|
||||
// The server is failed AND all data has been removed from it, so permanently remove it.
|
||||
TraceEvent("StatusMapChange", self->distributorId).detail("ServerID", server->id).detail("Status", "Removing");
|
||||
if(self->serverChanges.present()) {
|
||||
self->serverChanges.get().send( std::make_pair(server->id, Optional<StorageServerInterface>()) );
|
||||
}
|
||||
|
||||
if(server->updated.canBeSet()) {
|
||||
server->updated.send(Void());
|
||||
|
@ -3040,9 +3030,6 @@ ACTOR Future<Void> storageServerTracker(
|
|||
}
|
||||
|
||||
interfaceChanged = server->onInterfaceChanged;
|
||||
if(self->serverChanges.present()) {
|
||||
self->serverChanges.get().send( std::make_pair(server->id, server->lastKnownInterface) );
|
||||
}
|
||||
// We rely on the old failureTracker being actorCancelled since the old actor now has a pointer to an invalid location
|
||||
status = ServerStatus( status.isFailed, status.isUndesired, server->lastKnownInterface.locality );
|
||||
|
||||
|
@ -3460,13 +3447,39 @@ ACTOR Future<Void> pollMoveKeysLock( Database cx, MoveKeysLock lock ) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> dataDistribution(
|
||||
Reference<AsyncVar<struct ServerDBInfo>> db,
|
||||
UID myId,
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > serverChanges,
|
||||
double* lastLimited)
|
||||
struct DataDistributorData : NonCopyable, ReferenceCounted<DataDistributorData> {
|
||||
Reference<AsyncVar<struct ServerDBInfo>> dbInfo;
|
||||
UID ddId;
|
||||
PromiseStream<Future<Void>> addActor;
|
||||
|
||||
DataDistributorData(Reference<AsyncVar<ServerDBInfo>> const& db, UID id) : dbInfo(db), ddId(id) {}
|
||||
};
|
||||
|
||||
ACTOR Future<Void> monitorBatchLimitedTime(Reference<AsyncVar<ServerDBInfo>> db, double* lastLimited) {
|
||||
loop {
|
||||
wait( delay(SERVER_KNOBS->METRIC_UPDATE_RATE) );
|
||||
|
||||
state Reference<ProxyInfo> proxies(new ProxyInfo(db->get().client.proxies, db->get().myLocality));
|
||||
|
||||
choose {
|
||||
when (wait(db->onChange())) {}
|
||||
when (GetHealthMetricsReply reply = wait(proxies->size() ?
|
||||
loadBalance(proxies, &MasterProxyInterface::getHealthMetrics, GetHealthMetricsRequest(false))
|
||||
: Never())) {
|
||||
if (reply.healthMetrics.batchLimited) {
|
||||
*lastLimited = now();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> dataDistribution(Reference<DataDistributorData> self)
|
||||
{
|
||||
state Database cx = openDBOnServer(db, TaskDataDistributionLaunch, true, true);
|
||||
state double lastLimited = 0;
|
||||
self->addActor.send( monitorBatchLimitedTime(self->dbInfo, &lastLimited) );
|
||||
|
||||
state Database cx = openDBOnServer(self->dbInfo, TaskDataDistributionLaunch, true, true);
|
||||
cx->locationCacheSize = SERVER_KNOBS->DD_LOCATION_CACHE_SIZE;
|
||||
|
||||
//cx->setOption( FDBDatabaseOptions::LOCATION_CACHE_SIZE, StringRef((uint8_t*) &SERVER_KNOBS->DD_LOCATION_CACHE_SIZE, 8) );
|
||||
|
@ -3481,10 +3494,10 @@ ACTOR Future<Void> dataDistribution(
|
|||
loop {
|
||||
try {
|
||||
loop {
|
||||
TraceEvent("DDInitTakingMoveKeysLock", myId);
|
||||
MoveKeysLock lock_ = wait( takeMoveKeysLock( cx, myId ) );
|
||||
TraceEvent("DDInitTakingMoveKeysLock", self->ddId);
|
||||
MoveKeysLock lock_ = wait( takeMoveKeysLock( cx, self->ddId ) );
|
||||
lock = lock_;
|
||||
TraceEvent("DDInitTookMoveKeysLock", myId);
|
||||
TraceEvent("DDInitTookMoveKeysLock", self->ddId);
|
||||
|
||||
DatabaseConfiguration configuration_ = wait( getDatabaseConfiguration(cx) );
|
||||
configuration = configuration_;
|
||||
|
@ -3498,7 +3511,7 @@ ACTOR Future<Void> dataDistribution(
|
|||
remoteDcIds.push_back( regions[1].dcId );
|
||||
}
|
||||
|
||||
TraceEvent("DDInitGotConfiguration", myId).detail("Conf", configuration.toString());
|
||||
TraceEvent("DDInitGotConfiguration", self->ddId).detail("Conf", configuration.toString());
|
||||
|
||||
state Transaction tr(cx);
|
||||
loop {
|
||||
|
@ -3528,24 +3541,24 @@ ACTOR Future<Void> dataDistribution(
|
|||
}
|
||||
}
|
||||
|
||||
TraceEvent("DDInitUpdatedReplicaKeys", myId);
|
||||
Reference<InitialDataDistribution> initData_ = wait( getInitialDataDistribution(cx, myId, lock, configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>() ) );
|
||||
TraceEvent("DDInitUpdatedReplicaKeys", self->ddId);
|
||||
Reference<InitialDataDistribution> initData_ = wait( getInitialDataDistribution(cx, self->ddId, lock, configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>() ) );
|
||||
initData = initData_;
|
||||
if(initData->shards.size() > 1) {
|
||||
TraceEvent("DDInitGotInitialDD", myId)
|
||||
TraceEvent("DDInitGotInitialDD", self->ddId)
|
||||
.detail("B", printable(initData->shards.end()[-2].key))
|
||||
.detail("E", printable(initData->shards.end()[-1].key))
|
||||
.detail("Src", describe(initData->shards.end()[-2].primarySrc))
|
||||
.detail("Dest", describe(initData->shards.end()[-2].primaryDest))
|
||||
.trackLatest("InitialDD");
|
||||
} else {
|
||||
TraceEvent("DDInitGotInitialDD", myId).detail("B","").detail("E", "").detail("Src", "[no items]").detail("Dest", "[no items]").trackLatest("InitialDD");
|
||||
TraceEvent("DDInitGotInitialDD", self->ddId).detail("B","").detail("E", "").detail("Src", "[no items]").detail("Dest", "[no items]").trackLatest("InitialDD");
|
||||
}
|
||||
|
||||
if (initData->mode) break; // mode may be set true by system operator using fdbcli
|
||||
TraceEvent("DataDistributionDisabled", myId);
|
||||
TraceEvent("DataDistributionDisabled", self->ddId);
|
||||
|
||||
TraceEvent("MovingData", myId)
|
||||
TraceEvent("MovingData", self->ddId)
|
||||
.detail( "InFlight", 0 )
|
||||
.detail( "InQueue", 0 )
|
||||
.detail( "AverageShardSize", -1 )
|
||||
|
@ -3554,8 +3567,8 @@ ACTOR Future<Void> dataDistribution(
|
|||
.detail( "HighestPriority", 0 )
|
||||
.trackLatest( "MovingData" );
|
||||
|
||||
TraceEvent("TotalDataInFlight", myId).detail("Primary", true).detail("TotalBytes", 0).detail("UnhealthyServers", 0).detail("HighestPriority", 0).trackLatest("TotalDataInFlight");
|
||||
TraceEvent("TotalDataInFlight", myId).detail("Primary", false).detail("TotalBytes", 0).detail("UnhealthyServers", 0).detail("HighestPriority", configuration.usableRegions > 1 ? 0 : -1).trackLatest("TotalDataInFlightRemote");
|
||||
TraceEvent("TotalDataInFlight", self->ddId).detail("Primary", true).detail("TotalBytes", 0).detail("UnhealthyServers", 0).detail("HighestPriority", 0).trackLatest("TotalDataInFlight");
|
||||
TraceEvent("TotalDataInFlight", self->ddId).detail("Primary", false).detail("TotalBytes", 0).detail("UnhealthyServers", 0).detail("HighestPriority", configuration.usableRegions > 1 ? 0 : -1).trackLatest("TotalDataInFlightRemote");
|
||||
|
||||
wait( waitForDataDistributionEnabled(cx) );
|
||||
TraceEvent("DataDistributionEnabled");
|
||||
|
@ -3573,12 +3586,12 @@ ACTOR Future<Void> dataDistribution(
|
|||
state Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure( new ShardsAffectedByTeamFailure );
|
||||
|
||||
state int shard = 0;
|
||||
for(; shard<initData->shards.size() - 1; shard++) {
|
||||
for (; shard < initData->shards.size() - 1; shard++) {
|
||||
KeyRangeRef keys = KeyRangeRef(initData->shards[shard].key, initData->shards[shard+1].key);
|
||||
shardsAffectedByTeamFailure->defineShard(keys);
|
||||
std::vector<ShardsAffectedByTeamFailure::Team> teams;
|
||||
teams.push_back(ShardsAffectedByTeamFailure::Team(initData->shards[shard].primarySrc, true));
|
||||
if(configuration.usableRegions > 1) {
|
||||
if (configuration.usableRegions > 1) {
|
||||
teams.push_back(ShardsAffectedByTeamFailure::Team(initData->shards[shard].remoteSrc, false));
|
||||
}
|
||||
if(g_network->isSimulated()) {
|
||||
|
@ -3587,11 +3600,11 @@ ACTOR Future<Void> dataDistribution(
|
|||
}
|
||||
|
||||
shardsAffectedByTeamFailure->moveShard(keys, teams);
|
||||
if(initData->shards[shard].hasDest) {
|
||||
if (initData->shards[shard].hasDest) {
|
||||
// This shard is already in flight. Ideally we should use dest in sABTF and generate a dataDistributionRelocator directly in
|
||||
// DataDistributionQueue to track it, but it's easier to just (with low priority) schedule it for movement.
|
||||
bool unhealthy = initData->shards[shard].primarySrc.size() != configuration.storageTeamSize;
|
||||
if(!unhealthy && configuration.usableRegions > 1) {
|
||||
if (!unhealthy && configuration.usableRegions > 1) {
|
||||
unhealthy = initData->shards[shard].remoteSrc.size() != configuration.storageTeamSize;
|
||||
}
|
||||
output.send( RelocateShard( keys, unhealthy ? PRIORITY_TEAM_UNHEALTHY : PRIORITY_RECOVER_MOVE ) );
|
||||
|
@ -3620,20 +3633,20 @@ ACTOR Future<Void> dataDistribution(
|
|||
}
|
||||
|
||||
actors.push_back( pollMoveKeysLock(cx, lock) );
|
||||
actors.push_back( reportErrorsExcept( dataDistributionTracker( initData, cx, output, shardsAffectedByTeamFailure, getShardMetrics, getAverageShardBytes.getFuture(), readyToStart, anyZeroHealthyTeams, myId ), "DDTracker", myId, &normalDDQueueErrors() ) );
|
||||
actors.push_back( reportErrorsExcept( dataDistributionQueue( cx, output, input.getFuture(), getShardMetrics, processingUnhealthy, tcis, shardsAffectedByTeamFailure, lock, getAverageShardBytes, myId, storageTeamSize, lastLimited ), "DDQueue", myId, &normalDDQueueErrors() ) );
|
||||
actors.push_back( reportErrorsExcept( dataDistributionTracker( initData, cx, output, shardsAffectedByTeamFailure, getShardMetrics, getAverageShardBytes.getFuture(), readyToStart, anyZeroHealthyTeams, self->ddId ), "DDTracker", self->ddId, &normalDDQueueErrors() ) );
|
||||
actors.push_back( reportErrorsExcept( dataDistributionQueue( cx, output, input.getFuture(), getShardMetrics, processingUnhealthy, tcis, shardsAffectedByTeamFailure, lock, getAverageShardBytes, self->ddId, storageTeamSize, &lastLimited ), "DDQueue", self->ddId, &normalDDQueueErrors() ) );
|
||||
|
||||
vector<DDTeamCollection*> teamCollectionsPtrs;
|
||||
Reference<DDTeamCollection> primaryTeamCollection( new DDTeamCollection(cx, myId, lock, output, shardsAffectedByTeamFailure, configuration, primaryDcId, configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>(), serverChanges, readyToStart.getFuture(), zeroHealthyTeams[0], true, processingUnhealthy) );
|
||||
Reference<DDTeamCollection> primaryTeamCollection( new DDTeamCollection(cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, primaryDcId, configuration.usableRegions > 1 ? remoteDcIds : std::vector<Optional<Key>>(), readyToStart.getFuture(), zeroHealthyTeams[0], true, processingUnhealthy) );
|
||||
teamCollectionsPtrs.push_back(primaryTeamCollection.getPtr());
|
||||
if (configuration.usableRegions > 1) {
|
||||
Reference<DDTeamCollection> remoteTeamCollection( new DDTeamCollection(cx, myId, lock, output, shardsAffectedByTeamFailure, configuration, remoteDcIds, Optional<std::vector<Optional<Key>>>(), serverChanges, readyToStart.getFuture() && remoteRecovered(db), zeroHealthyTeams[1], false, processingUnhealthy) );
|
||||
Reference<DDTeamCollection> remoteTeamCollection( new DDTeamCollection(cx, self->ddId, lock, output, shardsAffectedByTeamFailure, configuration, remoteDcIds, Optional<std::vector<Optional<Key>>>(), readyToStart.getFuture() && remoteRecovered(self->dbInfo), zeroHealthyTeams[1], false, processingUnhealthy) );
|
||||
teamCollectionsPtrs.push_back(remoteTeamCollection.getPtr());
|
||||
remoteTeamCollection->teamCollections = teamCollectionsPtrs;
|
||||
actors.push_back( reportErrorsExcept( dataDistributionTeamCollection( remoteTeamCollection, initData, tcis[1], db ), "DDTeamCollectionSecondary", myId, &normalDDQueueErrors() ) );
|
||||
actors.push_back( reportErrorsExcept( dataDistributionTeamCollection( remoteTeamCollection, initData, tcis[1], self->dbInfo ), "DDTeamCollectionSecondary", self->ddId, &normalDDQueueErrors() ) );
|
||||
}
|
||||
primaryTeamCollection->teamCollections = teamCollectionsPtrs;
|
||||
actors.push_back( reportErrorsExcept( dataDistributionTeamCollection( primaryTeamCollection, initData, tcis[0], db ), "DDTeamCollectionPrimary", myId, &normalDDQueueErrors() ) );
|
||||
actors.push_back( reportErrorsExcept( dataDistributionTeamCollection( primaryTeamCollection, initData, tcis[0], self->dbInfo ), "DDTeamCollectionPrimary", self->ddId, &normalDDQueueErrors() ) );
|
||||
actors.push_back(yieldPromiseStream(output.getFuture(), input));
|
||||
|
||||
wait( waitForAll( actors ) );
|
||||
|
@ -3651,15 +3664,6 @@ ACTOR Future<Void> dataDistribution(
|
|||
}
|
||||
}
|
||||
|
||||
struct DataDistributorData : NonCopyable, ReferenceCounted<DataDistributorData> {
|
||||
Reference<AsyncVar<struct ServerDBInfo>> dbInfo;
|
||||
UID ddId;
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > ddStorageServerChanges;
|
||||
PromiseStream<Future<Void>> addActor;
|
||||
|
||||
DataDistributorData(Reference<AsyncVar<ServerDBInfo>> const& db, UID id) : dbInfo(db), ddId(id) {}
|
||||
};
|
||||
|
||||
static std::set<int> const& normalDataDistributorErrors() {
|
||||
static std::set<int> s;
|
||||
if (s.empty()) {
|
||||
|
@ -3672,31 +3676,14 @@ static std::set<int> const& normalDataDistributorErrors() {
|
|||
return s;
|
||||
}
|
||||
|
||||
static std::set<int> const& normalRateKeeperErrors() {
|
||||
static std::set<int> s;
|
||||
if (s.empty()) {
|
||||
s.insert( error_code_worker_removed );
|
||||
s.insert( error_code_broken_promise );
|
||||
s.insert( error_code_actor_cancelled );
|
||||
s.insert( error_code_please_reboot );
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
ACTOR Future<Void> dataDistributor(DataDistributorInterface di, Reference<AsyncVar<struct ServerDBInfo>> db ) {
|
||||
state UID lastClusterControllerID(0,0);
|
||||
state Reference<DataDistributorData> self( new DataDistributorData(db, di.id()) );
|
||||
state Future<Void> collection = actorCollection( self->addActor.getFuture() );
|
||||
|
||||
TraceEvent("DataDistributor_Starting", di.id());
|
||||
self->addActor.send( waitFailureServer(di.waitFailure.getFuture()) );
|
||||
|
||||
try {
|
||||
TraceEvent("DataDistributor_Running", di.id());
|
||||
state PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > ddStorageServerChanges;
|
||||
state double lastLimited = 0;
|
||||
state Future<Void> distributor = reportErrorsExcept( dataDistribution( self->dbInfo, di.id(), ddStorageServerChanges, &lastLimited ), "DataDistribution", di.id(), &normalDataDistributorErrors() );
|
||||
self->addActor.send( reportErrorsExcept( rateKeeper( self->dbInfo, ddStorageServerChanges, di.getRateInfo.getFuture(), &lastLimited ), "Ratekeeper", di.id(), &normalRateKeeperErrors() ) );
|
||||
self->addActor.send( waitFailureServer(di.waitFailure.getFuture()) );
|
||||
state Future<Void> distributor = reportErrorsExcept( dataDistribution(self), "DataDistribution", di.id(), &normalDataDistributorErrors() );
|
||||
|
||||
wait( distributor || collection );
|
||||
}
|
||||
|
@ -3732,7 +3719,6 @@ DDTeamCollection* testTeamCollection(int teamSize, IRepPolicyRef policy, int pro
|
|||
conf,
|
||||
{},
|
||||
{},
|
||||
PromiseStream<std::pair<UID, Optional<StorageServerInterface>>>(),
|
||||
Future<Void>(Void()),
|
||||
Reference<AsyncVar<bool>>( new AsyncVar<bool>(true) ),
|
||||
true,
|
||||
|
@ -3765,7 +3751,7 @@ DDTeamCollection* testMachineTeamCollection(int teamSize, IRepPolicyRef policy,
|
|||
DDTeamCollection* collection =
|
||||
new DDTeamCollection(database, UID(0, 0), MoveKeysLock(), PromiseStream<RelocateShard>(),
|
||||
Reference<ShardsAffectedByTeamFailure>(new ShardsAffectedByTeamFailure()), conf, {}, {},
|
||||
PromiseStream<std::pair<UID, Optional<StorageServerInterface>>>(), Future<Void>(Void()),
|
||||
Future<Void>(Void()),
|
||||
Reference<AsyncVar<bool>>(new AsyncVar<bool>(true)), true,
|
||||
Reference<AsyncVar<bool>>(new AsyncVar<bool>(false)));
|
||||
|
||||
|
|
|
@ -253,5 +253,6 @@ int64_t getMaxShardSize( double dbSizeEstimate );
|
|||
class DDTeamCollection;
|
||||
ACTOR Future<Void> teamRemover(DDTeamCollection* self);
|
||||
ACTOR Future<Void> teamRemoverPeriodic(DDTeamCollection* self);
|
||||
ACTOR Future<vector<std::pair<StorageServerInterface, ProcessClass>>> getServerListAndProcessClasses(Transaction* tr);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -21,21 +21,19 @@
|
|||
#ifndef FDBSERVER_DATADISTRIBUTORINTERFACE_H
|
||||
#define FDBSERVER_DATADISTRIBUTORINTERFACE_H
|
||||
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
|
||||
struct DataDistributorInterface {
|
||||
RequestStream<ReplyPromise<Void>> waitFailure;
|
||||
RequestStream<struct GetRateInfoRequest> getRateInfo;
|
||||
struct LocalityData locality;
|
||||
|
||||
DataDistributorInterface() {}
|
||||
explicit DataDistributorInterface(const struct LocalityData& l) : locality(l) {}
|
||||
|
||||
void initEndpoints() {}
|
||||
UID id() const { return getRateInfo.getEndpoint().token; }
|
||||
NetworkAddress address() const { return getRateInfo.getEndpoint().getPrimaryAddress(); }
|
||||
UID id() const { return waitFailure.getEndpoint().token; }
|
||||
NetworkAddress address() const { return waitFailure.getEndpoint().getPrimaryAddress(); }
|
||||
bool operator== (const DataDistributorInterface& r) const {
|
||||
return id() == r.id();
|
||||
}
|
||||
|
@ -45,36 +43,7 @@ struct DataDistributorInterface {
|
|||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
serializer(ar, waitFailure, getRateInfo, locality);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRateInfoRequest {
|
||||
UID requesterID;
|
||||
int64_t totalReleasedTransactions;
|
||||
int64_t batchReleasedTransactions;
|
||||
bool detailed;
|
||||
ReplyPromise<struct GetRateInfoReply> reply;
|
||||
|
||||
GetRateInfoRequest() {}
|
||||
GetRateInfoRequest(UID const& requesterID, int64_t totalReleasedTransactions, int64_t batchReleasedTransactions, bool detailed)
|
||||
: requesterID(requesterID), totalReleasedTransactions(totalReleasedTransactions), batchReleasedTransactions(batchReleasedTransactions), detailed(detailed) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, requesterID, totalReleasedTransactions, batchReleasedTransactions, detailed, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRateInfoReply {
|
||||
double transactionRate;
|
||||
double batchTransactionRate;
|
||||
double leaseDuration;
|
||||
HealthMetrics healthMetrics;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, transactionRate, batchTransactionRate, leaseDuration, healthMetrics);
|
||||
serializer(ar, waitFailure, locality);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -310,11 +310,13 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
init( WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY, 5.0 );
|
||||
init( ATTEMPT_RECRUITMENT_DELAY, 0.035 );
|
||||
init( WAIT_FOR_DISTRIBUTOR_JOIN_DELAY, 1.0 );
|
||||
init( WAIT_FOR_RATEKEEPER_JOIN_DELAY, 1.0 );
|
||||
init( WORKER_FAILURE_TIME, 1.0 ); if( randomize && BUGGIFY ) WORKER_FAILURE_TIME = 10.0;
|
||||
init( CHECK_OUTSTANDING_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) CHECK_OUTSTANDING_INTERVAL = 0.001;
|
||||
init( VERSION_LAG_METRIC_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) VERSION_LAG_METRIC_INTERVAL = 10.0;
|
||||
init( MAX_VERSION_DIFFERENCE, 20 * VERSIONS_PER_SECOND );
|
||||
init( FORCE_RECOVERY_CHECK_DELAY, 5.0 );
|
||||
init( RATEKEEPER_FAILURE_TIME, 1.0 );
|
||||
|
||||
init( INCOMPATIBLE_PEERS_LOGGING_INTERVAL, 600 ); if( randomize && BUGGIFY ) INCOMPATIBLE_PEERS_LOGGING_INTERVAL = 60.0;
|
||||
init( EXPECTED_MASTER_FITNESS, ProcessClass::UnsetFit );
|
||||
|
|
|
@ -251,12 +251,14 @@ public:
|
|||
double WAIT_FOR_GOOD_REMOTE_RECRUITMENT_DELAY;
|
||||
double ATTEMPT_RECRUITMENT_DELAY;
|
||||
double WAIT_FOR_DISTRIBUTOR_JOIN_DELAY;
|
||||
double WAIT_FOR_RATEKEEPER_JOIN_DELAY;
|
||||
double WORKER_FAILURE_TIME;
|
||||
double CHECK_OUTSTANDING_INTERVAL;
|
||||
double INCOMPATIBLE_PEERS_LOGGING_INTERVAL;
|
||||
double VERSION_LAG_METRIC_INTERVAL;
|
||||
int64_t MAX_VERSION_DIFFERENCE;
|
||||
double FORCE_RECOVERY_CHECK_DELAY;
|
||||
double RATEKEEPER_FAILURE_TIME;
|
||||
|
||||
// Knobs used to select the best policy (via monte carlo)
|
||||
int POLICY_RATING_TESTS; // number of tests per policy (in order to compare)
|
||||
|
|
|
@ -76,17 +76,6 @@ struct ProxyStats {
|
|||
}
|
||||
};
|
||||
|
||||
ACTOR template <class T>
|
||||
Future<Void> forwardValue(Promise<T> out, Future<T> in)
|
||||
{
|
||||
// Like forwardPromise, but throws on error
|
||||
T t = wait(in);
|
||||
out.send(t);
|
||||
return Void();
|
||||
}
|
||||
|
||||
int getBytes(Promise<Version> const& r) { return 0; }
|
||||
|
||||
ACTOR Future<Void> getRate(UID myID, Reference<AsyncVar<ServerDBInfo>> db, int64_t* inTransactionCount, int64_t* inBatchTransactionCount, double* outTransactionRate,
|
||||
double* outBatchTransactionRate, GetHealthMetricsReply* healthMetricsReply, GetHealthMetricsReply* detailedHealthMetricsReply) {
|
||||
state Future<Void> nextRequestTimer = Never();
|
||||
|
@ -94,21 +83,17 @@ ACTOR Future<Void> getRate(UID myID, Reference<AsyncVar<ServerDBInfo>> db, int64
|
|||
state Future<GetRateInfoReply> reply = Never();
|
||||
state double lastDetailedReply = 0.0; // request detailed metrics immediately
|
||||
state bool expectingDetailedReply = false;
|
||||
|
||||
state int64_t lastTC = 0;
|
||||
|
||||
if (db->get().distributor.present()) {
|
||||
nextRequestTimer = Void();
|
||||
}
|
||||
|
||||
if (db->get().ratekeeper.present()) nextRequestTimer = Void();
|
||||
loop choose {
|
||||
when ( wait( db->onChange() ) ) {
|
||||
if ( db->get().distributor.present() ) {
|
||||
TraceEvent("Proxy_DataDistributorChanged", myID)
|
||||
.detail("DDID", db->get().distributor.get().id());
|
||||
nextRequestTimer = Void(); // trigger GetRate request
|
||||
if ( db->get().ratekeeper.present() ) {
|
||||
TraceEvent("Proxy_RatekeeperChanged", myID)
|
||||
.detail("RKID", db->get().ratekeeper.get().id());
|
||||
nextRequestTimer = Void(); // trigger GetRate request
|
||||
} else {
|
||||
TraceEvent("Proxy_DataDistributorDied", myID);
|
||||
TraceEvent("Proxy_RatekeeperDied", myID);
|
||||
nextRequestTimer = Never();
|
||||
reply = Never();
|
||||
}
|
||||
|
@ -116,7 +101,7 @@ ACTOR Future<Void> getRate(UID myID, Reference<AsyncVar<ServerDBInfo>> db, int64
|
|||
when ( wait( nextRequestTimer ) ) {
|
||||
nextRequestTimer = Never();
|
||||
bool detailed = now() - lastDetailedReply > SERVER_KNOBS->DETAILED_METRIC_UPDATE_RATE;
|
||||
reply = brokenPromiseToNever(db->get().distributor.get().getRateInfo.getReply(GetRateInfoRequest(myID, *inTransactionCount, *inBatchTransactionCount, detailed)));
|
||||
reply = brokenPromiseToNever(db->get().ratekeeper.get().getRateInfo.getReply(GetRateInfoRequest(myID, *inTransactionCount, *inBatchTransactionCount, detailed)));
|
||||
expectingDetailedReply = detailed;
|
||||
}
|
||||
when ( GetRateInfoReply rep = wait(reply) ) {
|
||||
|
@ -222,6 +207,7 @@ struct ProxyCommitData {
|
|||
bool firstProxy;
|
||||
double lastCoalesceTime;
|
||||
bool locked;
|
||||
Optional<Value> metadataVersion;
|
||||
double commitBatchInterval;
|
||||
|
||||
int64_t localCommitBatchesStarted;
|
||||
|
@ -658,6 +644,8 @@ ACTOR Future<Void> commitBatch(
|
|||
lockedKey = self->txnStateStore->readValue(databaseLockedKey).get();
|
||||
state bool lockedAfter = lockedKey.present() && lockedKey.get().size();
|
||||
|
||||
state Optional<Value> metadataVersionAfter = self->txnStateStore->readValue(metadataVersionKey).get();
|
||||
|
||||
auto fcm = self->logAdapter->getCommitMessage();
|
||||
storeCommits.push_back(std::make_pair(fcm, self->txnStateStore->commit()));
|
||||
self->version = commitVersion;
|
||||
|
@ -764,54 +752,36 @@ ACTOR Future<Void> commitBatch(
|
|||
else
|
||||
UNREACHABLE();
|
||||
|
||||
// Check on backing up key, if backup ranges are defined and a normal key
|
||||
if ((self->vecBackupKeys.size() > 1) && normalKeys.contains(m.param1)) {
|
||||
|
||||
if (isAtomicOp((MutationRef::Type)m.type)) {
|
||||
|
||||
// Check on backing up key, if backup ranges are defined and a normal key
|
||||
if (self->vecBackupKeys.size() > 1 && (normalKeys.contains(m.param1) || m.param1 == metadataVersionKey)) {
|
||||
if (m.type != MutationRef::Type::ClearRange) {
|
||||
// Add the mutation to the relevant backup tag
|
||||
for (auto backupName : self->vecBackupKeys[m.param1]) {
|
||||
logRangeMutations[backupName].push_back_deep(logRangeMutationsArena, m);
|
||||
}
|
||||
}
|
||||
else {
|
||||
switch (m.type)
|
||||
KeyRangeRef mutationRange(m.param1, m.param2);
|
||||
KeyRangeRef intersectionRange;
|
||||
|
||||
// Identify and add the intersecting ranges of the mutation to the array of mutations to serialize
|
||||
for (auto backupRange : self->vecBackupKeys.intersectingRanges(mutationRange))
|
||||
{
|
||||
// Backup the mutation, if within a backup range
|
||||
case MutationRef::Type::SetValue:
|
||||
// Get the backup sub range
|
||||
const auto& backupSubrange = backupRange.range();
|
||||
|
||||
// Determine the intersecting range
|
||||
intersectionRange = mutationRange & backupSubrange;
|
||||
|
||||
// Create the custom mutation for the specific backup tag
|
||||
MutationRef backupMutation(MutationRef::Type::ClearRange, intersectionRange.begin, intersectionRange.end);
|
||||
|
||||
// Add the mutation to the relevant backup tag
|
||||
for (auto backupName : self->vecBackupKeys[m.param1]) {
|
||||
logRangeMutations[backupName].push_back_deep(logRangeMutationsArena, m);
|
||||
for (auto backupName : backupRange.value()) {
|
||||
logRangeMutations[backupName].push_back_deep(logRangeMutationsArena, backupMutation);
|
||||
}
|
||||
break;
|
||||
|
||||
case MutationRef::Type::ClearRange:
|
||||
{
|
||||
KeyRangeRef mutationRange(m.param1, m.param2);
|
||||
KeyRangeRef intersectionRange;
|
||||
|
||||
// Identify and add the intersecting ranges of the mutation to the array of mutations to serialize
|
||||
for (auto backupRange : self->vecBackupKeys.intersectingRanges(mutationRange))
|
||||
{
|
||||
// Get the backup sub range
|
||||
const auto& backupSubrange = backupRange.range();
|
||||
|
||||
// Determine the intersecting range
|
||||
intersectionRange = mutationRange & backupSubrange;
|
||||
|
||||
// Create the custom mutation for the specific backup tag
|
||||
MutationRef backupMutation(MutationRef::Type::ClearRange, intersectionRange.begin, intersectionRange.end);
|
||||
|
||||
// Add the mutation to the relevant backup tag
|
||||
for (auto backupName : backupRange.value()) {
|
||||
logRangeMutations[backupName].push_back_deep(logRangeMutationsArena, backupMutation);
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -896,6 +866,7 @@ ACTOR Future<Void> commitBatch(
|
|||
when(GetReadVersionReply v = wait(self->getConsistentReadVersion.getReply(GetReadVersionRequest(0, GetReadVersionRequest::PRIORITY_SYSTEM_IMMEDIATE | GetReadVersionRequest::FLAG_CAUSAL_READ_RISKY)))) {
|
||||
if(v.version > self->committedVersion.get()) {
|
||||
self->locked = v.locked;
|
||||
self->metadataVersion = v.metadataVersion;
|
||||
self->committedVersion.set(v.version);
|
||||
}
|
||||
|
||||
|
@ -980,6 +951,7 @@ ACTOR Future<Void> commitBatch(
|
|||
TEST(self->committedVersion.get() > commitVersion); // A later version was reported committed first
|
||||
if( commitVersion > self->committedVersion.get() ) {
|
||||
self->locked = lockedAfter;
|
||||
self->metadataVersion = metadataVersionAfter;
|
||||
self->committedVersion.set(commitVersion);
|
||||
}
|
||||
|
||||
|
@ -993,7 +965,7 @@ ACTOR Future<Void> commitBatch(
|
|||
for (int t = 0; t < trs.size(); t++) {
|
||||
if (committed[t] == ConflictBatch::TransactionCommitted && (!locked || trs[t].isLockAware())) {
|
||||
ASSERT_WE_THINK(commitVersion != invalidVersion);
|
||||
trs[t].reply.send(CommitID(commitVersion, t));
|
||||
trs[t].reply.send(CommitID(commitVersion, t, metadataVersionAfter));
|
||||
}
|
||||
else if (committed[t] == ConflictBatch::TransactionTooOld) {
|
||||
trs[t].reply.sendError(transaction_too_old());
|
||||
|
@ -1068,7 +1040,8 @@ ACTOR Future<GetReadVersionReply> getLiveCommittedVersion(ProxyCommitData* commi
|
|||
GetReadVersionReply rep;
|
||||
rep.version = commitData->committedVersion.get();
|
||||
rep.locked = commitData->locked;
|
||||
|
||||
rep.metadataVersion = commitData->metadataVersion;
|
||||
|
||||
for (auto v : versions) {
|
||||
if(v.version > rep.version) {
|
||||
rep = v;
|
||||
|
@ -1565,6 +1538,7 @@ ACTOR Future<Void> masterProxyServerCore(
|
|||
g_traceBatch.addEvent("TransactionDebug", req.debugID.get().first(), "MasterProxyServer.masterProxyServerCore.GetRawCommittedVersion");
|
||||
GetReadVersionReply rep;
|
||||
rep.locked = commitData.locked;
|
||||
rep.metadataVersion = commitData.metadataVersion;
|
||||
rep.version = commitData.committedVersion.get();
|
||||
req.reply.send(rep);
|
||||
}
|
||||
|
@ -1647,6 +1621,7 @@ ACTOR Future<Void> masterProxyServerCore(
|
|||
|
||||
auto lockedKey = commitData.txnStateStore->readValue(databaseLockedKey).get();
|
||||
commitData.locked = lockedKey.present() && lockedKey.get().size();
|
||||
commitData.metadataVersion = commitData.txnStateStore->readValue(metadataVersionKey).get();
|
||||
|
||||
commitData.txnStateStore->enableSnapshot();
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -19,13 +19,14 @@
|
|||
*/
|
||||
|
||||
#include "flow/IndexedSet.h"
|
||||
#include "fdbserver/Ratekeeper.h"
|
||||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbrpc/Smoother.h"
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "fdbrpc/simulator.h"
|
||||
#include "fdbclient/ReadYourWrites.h"
|
||||
#include "fdbserver/Knobs.h"
|
||||
#include "fdbserver/DataDistribution.actor.h"
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "fdbserver/WaitFailure.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
enum limitReason_t {
|
||||
|
@ -146,7 +147,7 @@ struct TransactionCounts {
|
|||
TransactionCounts() : total(0), batch(0), time(0) {}
|
||||
};
|
||||
|
||||
struct Ratekeeper {
|
||||
struct RatekeeperData {
|
||||
Map<UID, StorageQueueInfo> storageQueueInfo;
|
||||
Map<UID, TLogQueueInfo> tlogQueueInfo;
|
||||
|
||||
|
@ -154,16 +155,16 @@ struct Ratekeeper {
|
|||
Smoother smoothReleasedTransactions, smoothBatchReleasedTransactions, smoothTotalDurableBytes;
|
||||
HealthMetrics healthMetrics;
|
||||
DatabaseConfiguration configuration;
|
||||
PromiseStream<Future<Void>> addActor;
|
||||
|
||||
Int64MetricHandle actualTpsMetric;
|
||||
|
||||
double lastWarning;
|
||||
double* lastLimited;
|
||||
|
||||
RatekeeperLimits normalLimits;
|
||||
RatekeeperLimits batchLimits;
|
||||
|
||||
Ratekeeper() : smoothReleasedTransactions(SERVER_KNOBS->SMOOTHING_AMOUNT), smoothBatchReleasedTransactions(SERVER_KNOBS->SMOOTHING_AMOUNT), smoothTotalDurableBytes(SERVER_KNOBS->SLOW_SMOOTHING_AMOUNT),
|
||||
RatekeeperData() : smoothReleasedTransactions(SERVER_KNOBS->SMOOTHING_AMOUNT), smoothBatchReleasedTransactions(SERVER_KNOBS->SMOOTHING_AMOUNT), smoothTotalDurableBytes(SERVER_KNOBS->SLOW_SMOOTHING_AMOUNT),
|
||||
actualTpsMetric(LiteralStringRef("Ratekeeper.ActualTPS")),
|
||||
lastWarning(0),
|
||||
normalLimits("", SERVER_KNOBS->TARGET_BYTES_PER_STORAGE_SERVER, SERVER_KNOBS->SPRING_BYTES_STORAGE_SERVER, SERVER_KNOBS->TARGET_BYTES_PER_TLOG, SERVER_KNOBS->SPRING_BYTES_TLOG, SERVER_KNOBS->MAX_TL_SS_VERSION_DIFFERENCE),
|
||||
|
@ -172,7 +173,7 @@ struct Ratekeeper {
|
|||
};
|
||||
|
||||
//SOMEDAY: template trackStorageServerQueueInfo and trackTLogQueueInfo into one function
|
||||
ACTOR Future<Void> trackStorageServerQueueInfo( Ratekeeper* self, StorageServerInterface ssi ) {
|
||||
ACTOR Future<Void> trackStorageServerQueueInfo( RatekeeperData* self, StorageServerInterface ssi ) {
|
||||
self->storageQueueInfo.insert( mapPair(ssi.id(), StorageQueueInfo(ssi.id(), ssi.locality) ) );
|
||||
state Map<UID, StorageQueueInfo>::iterator myQueueInfo = self->storageQueueInfo.find(ssi.id());
|
||||
TraceEvent("RkTracking", ssi.id());
|
||||
|
@ -217,7 +218,7 @@ ACTOR Future<Void> trackStorageServerQueueInfo( Ratekeeper* self, StorageServerI
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> trackTLogQueueInfo( Ratekeeper* self, TLogInterface tli ) {
|
||||
ACTOR Future<Void> trackTLogQueueInfo( RatekeeperData* self, TLogInterface tli ) {
|
||||
self->tlogQueueInfo.insert( mapPair(tli.id(), TLogQueueInfo(tli.id()) ) );
|
||||
state Map<UID, TLogQueueInfo>::iterator myQueueInfo = self->tlogQueueInfo.find(tli.id());
|
||||
TraceEvent("RkTracking", tli.id());
|
||||
|
@ -270,7 +271,7 @@ ACTOR Future<Void> splitError( Future<Void> in, Promise<Void> errOut ) {
|
|||
}
|
||||
|
||||
ACTOR Future<Void> trackEachStorageServer(
|
||||
Ratekeeper* self,
|
||||
RatekeeperData* self,
|
||||
FutureStream< std::pair<UID, Optional<StorageServerInterface>> > serverChanges )
|
||||
{
|
||||
state Map<UID, Future<Void>> actors;
|
||||
|
@ -289,7 +290,47 @@ ACTOR Future<Void> trackEachStorageServer(
|
|||
}
|
||||
}
|
||||
|
||||
void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
||||
ACTOR Future<Void> monitorServerListChange(
|
||||
Reference<AsyncVar<ServerDBInfo>> dbInfo,
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > serverChanges) {
|
||||
state Database db = openDBOnServer(dbInfo, TaskRateKeeper, true, true);
|
||||
state std::map<UID, StorageServerInterface> oldServers;
|
||||
state Transaction tr(db);
|
||||
|
||||
loop {
|
||||
try {
|
||||
vector<std::pair<StorageServerInterface, ProcessClass>> results = wait(getServerListAndProcessClasses(&tr));
|
||||
|
||||
std::map<UID, StorageServerInterface> newServers;
|
||||
for (int i = 0; i < results.size(); i++) {
|
||||
const StorageServerInterface& ssi = results[i].first;
|
||||
const UID serverId = ssi.id();
|
||||
newServers[serverId] = ssi;
|
||||
|
||||
if (oldServers.count(serverId)) {
|
||||
if (ssi.getValue.getEndpoint() != oldServers[serverId].getValue.getEndpoint()) {
|
||||
serverChanges.send( std::make_pair(serverId, Optional<StorageServerInterface>(ssi)) );
|
||||
}
|
||||
oldServers.erase(serverId);
|
||||
} else {
|
||||
serverChanges.send( std::make_pair(serverId, Optional<StorageServerInterface>(ssi)) );
|
||||
}
|
||||
}
|
||||
|
||||
for (const auto& it : oldServers) {
|
||||
serverChanges.send( std::make_pair(it.first, Optional<StorageServerInterface>()) );
|
||||
}
|
||||
|
||||
oldServers.swap(newServers);
|
||||
tr = Transaction(db);
|
||||
wait(delay(SERVER_KNOBS->SERVER_LIST_DELAY));
|
||||
} catch(Error& e) {
|
||||
wait( tr.onError(e) );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void updateRate(RatekeeperData* self, RatekeeperLimits* limits) {
|
||||
//double controlFactor = ; // dt / eFoldingTime
|
||||
|
||||
double actualTps = self->smoothReleasedTransactions.smoothRate();
|
||||
|
@ -297,7 +338,7 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
// SOMEDAY: Remove the max( 1.0, ... ) since the below calculations _should_ be able to recover back up from this value
|
||||
actualTps = std::max( std::max( 1.0, actualTps ), self->smoothTotalDurableBytes.smoothRate() / CLIENT_KNOBS->TRANSACTION_SIZE_LIMIT );
|
||||
|
||||
limits.tpsLimit = std::numeric_limits<double>::infinity();
|
||||
limits->tpsLimit = std::numeric_limits<double>::infinity();
|
||||
UID reasonID = UID();
|
||||
limitReason_t limitReason = limitReason_t::unlimited;
|
||||
|
||||
|
@ -323,9 +364,9 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
|
||||
worstFreeSpaceStorageServer = std::min(worstFreeSpaceStorageServer, (int64_t)ss.smoothFreeSpace.smoothTotal() - minFreeSpace);
|
||||
|
||||
int64_t springBytes = std::max<int64_t>(1, std::min<int64_t>(limits.storageSpringBytes, (ss.smoothFreeSpace.smoothTotal() - minFreeSpace) * 0.2));
|
||||
int64_t targetBytes = std::max<int64_t>(1, std::min(limits.storageTargetBytes, (int64_t)ss.smoothFreeSpace.smoothTotal() - minFreeSpace));
|
||||
if (targetBytes != limits.storageTargetBytes) {
|
||||
int64_t springBytes = std::max<int64_t>(1, std::min<int64_t>(limits->storageSpringBytes, (ss.smoothFreeSpace.smoothTotal() - minFreeSpace) * 0.2));
|
||||
int64_t targetBytes = std::max<int64_t>(1, std::min(limits->storageTargetBytes, (int64_t)ss.smoothFreeSpace.smoothTotal() - minFreeSpace));
|
||||
if (targetBytes != limits->storageTargetBytes) {
|
||||
if (minFreeSpace == SERVER_KNOBS->MIN_FREE_SPACE) {
|
||||
ssLimitReason = limitReason_t::storage_server_min_free_space;
|
||||
} else {
|
||||
|
@ -389,9 +430,9 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
|
||||
storageTpsLimitReverseIndex.insert(std::make_pair(limitTps, &ss));
|
||||
|
||||
if(limitTps < limits.tpsLimit && (ssLimitReason == limitReason_t::storage_server_min_free_space || ssLimitReason == limitReason_t::storage_server_min_free_space_ratio)) {
|
||||
if (limitTps < limits->tpsLimit && (ssLimitReason == limitReason_t::storage_server_min_free_space || ssLimitReason == limitReason_t::storage_server_min_free_space_ratio)) {
|
||||
reasonID = ss.id;
|
||||
limits.tpsLimit = limitTps;
|
||||
limits->tpsLimit = limitTps;
|
||||
limitReason = ssLimitReason;
|
||||
}
|
||||
|
||||
|
@ -402,19 +443,19 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
self->healthMetrics.worstStorageDurabilityLag = worstStorageDurabilityLagStorageServer;
|
||||
|
||||
std::set<Optional<Standalone<StringRef>>> ignoredMachines;
|
||||
for(auto ss = storageTpsLimitReverseIndex.begin(); ss != storageTpsLimitReverseIndex.end() && ss->first < limits.tpsLimit; ++ss) {
|
||||
if(ignoredMachines.size() < std::min(self->configuration.storageTeamSize - 1, SERVER_KNOBS->MAX_MACHINES_FALLING_BEHIND)) {
|
||||
for (auto ss = storageTpsLimitReverseIndex.begin(); ss != storageTpsLimitReverseIndex.end() && ss->first < limits->tpsLimit; ++ss) {
|
||||
if (ignoredMachines.size() < std::min(self->configuration.storageTeamSize - 1, SERVER_KNOBS->MAX_MACHINES_FALLING_BEHIND)) {
|
||||
ignoredMachines.insert(ss->second->locality.zoneId());
|
||||
continue;
|
||||
}
|
||||
if(ignoredMachines.count(ss->second->locality.zoneId()) > 0) {
|
||||
if (ignoredMachines.count(ss->second->locality.zoneId()) > 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
limitingStorageQueueStorageServer = ss->second->lastReply.bytesInput - ss->second->smoothDurableBytes.smoothTotal();
|
||||
limits.tpsLimit = ss->first;
|
||||
limitReason = ssReasons[storageTpsLimitReverseIndex.begin()->second->id];
|
||||
limits->tpsLimit = ss->first;
|
||||
reasonID = storageTpsLimitReverseIndex.begin()->second->id; // Although we aren't controlling based on the worst SS, we still report it as the limiting process
|
||||
limitReason = ssReasons[reasonID];
|
||||
|
||||
break;
|
||||
}
|
||||
|
@ -426,27 +467,27 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
{
|
||||
Version minSSVer = std::numeric_limits<Version>::max();
|
||||
Version minLimitingSSVer = std::numeric_limits<Version>::max();
|
||||
for(auto i = self->storageQueueInfo.begin(); i != self->storageQueueInfo.end(); ++i) {
|
||||
auto& ss = i->value;
|
||||
for (const auto& it : self->storageQueueInfo) {
|
||||
auto& ss = it.value;
|
||||
if (!ss.valid) continue;
|
||||
|
||||
minSSVer = std::min(minSSVer, ss.lastReply.version);
|
||||
|
||||
// Machines that ratekeeper isn't controlling can fall arbitrarily far behind
|
||||
if(ignoredMachines.count(i->value.locality.zoneId()) == 0) {
|
||||
if (ignoredMachines.count(it.value.locality.zoneId()) == 0) {
|
||||
minLimitingSSVer = std::min(minLimitingSSVer, ss.lastReply.version);
|
||||
}
|
||||
}
|
||||
|
||||
Version maxTLVer = std::numeric_limits<Version>::min();
|
||||
for(auto i = self->tlogQueueInfo.begin(); i != self->tlogQueueInfo.end(); ++i) {
|
||||
auto& tl = i->value;
|
||||
for(const auto& it : self->tlogQueueInfo) {
|
||||
auto& tl = it.value;
|
||||
if (!tl.valid) continue;
|
||||
maxTLVer = std::max(maxTLVer, tl.lastReply.v);
|
||||
}
|
||||
|
||||
// writeToReadLatencyLimit: 0 = infinte speed; 1 = TL durable speed ; 2 = half TL durable speed
|
||||
writeToReadLatencyLimit = ((maxTLVer - minLimitingSSVer) - limits.maxVersionDifference/2) / (limits.maxVersionDifference/4);
|
||||
writeToReadLatencyLimit = ((maxTLVer - minLimitingSSVer) - limits->maxVersionDifference/2) / (limits->maxVersionDifference/4);
|
||||
worstVersionLag = std::max((Version)0, maxTLVer - minSSVer);
|
||||
limitingVersionLag = std::max((Version)0, maxTLVer - minLimitingSSVer);
|
||||
}
|
||||
|
@ -454,8 +495,8 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
int64_t worstFreeSpaceTLog = std::numeric_limits<int64_t>::max();
|
||||
int64_t worstStorageQueueTLog = 0;
|
||||
int tlcount = 0;
|
||||
for(auto i = self->tlogQueueInfo.begin(); i != self->tlogQueueInfo.end(); ++i) {
|
||||
auto& tl = i->value;
|
||||
for (auto& it : self->tlogQueueInfo) {
|
||||
auto& tl = it.value;
|
||||
if (!tl.valid) continue;
|
||||
++tlcount;
|
||||
|
||||
|
@ -465,9 +506,9 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
|
||||
worstFreeSpaceTLog = std::min(worstFreeSpaceTLog, (int64_t)tl.smoothFreeSpace.smoothTotal() - minFreeSpace);
|
||||
|
||||
int64_t springBytes = std::max<int64_t>(1, std::min<int64_t>(limits.logSpringBytes, (tl.smoothFreeSpace.smoothTotal() - minFreeSpace) * 0.2));
|
||||
int64_t targetBytes = std::max<int64_t>(1, std::min(limits.logTargetBytes, (int64_t)tl.smoothFreeSpace.smoothTotal() - minFreeSpace));
|
||||
if (targetBytes != limits.logTargetBytes) {
|
||||
int64_t springBytes = std::max<int64_t>(1, std::min<int64_t>(limits->logSpringBytes, (tl.smoothFreeSpace.smoothTotal() - minFreeSpace) * 0.2));
|
||||
int64_t targetBytes = std::max<int64_t>(1, std::min(limits->logTargetBytes, (int64_t)tl.smoothFreeSpace.smoothTotal() - minFreeSpace));
|
||||
if (targetBytes != limits->logTargetBytes) {
|
||||
if (minFreeSpace == SERVER_KNOBS->MIN_FREE_SPACE) {
|
||||
tlogLimitReason = limitReason_t::log_server_min_free_space;
|
||||
} else {
|
||||
|
@ -487,7 +528,7 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
}
|
||||
reasonID = tl.id;
|
||||
limitReason = limitReason_t::log_server_min_free_space;
|
||||
limits.tpsLimit = 0.0;
|
||||
limits->tpsLimit = 0.0;
|
||||
}
|
||||
|
||||
double targetRateRatio = std::min( ( b + springBytes ) / (double)springBytes, 2.0 );
|
||||
|
@ -505,8 +546,8 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
if (targetRateRatio < .75) //< FIXME: KNOB for 2.0
|
||||
x = std::max(x, 0.95);
|
||||
double lim = actualTps * x;
|
||||
if (lim < limits.tpsLimit){
|
||||
limits.tpsLimit = lim;
|
||||
if (lim < limits->tpsLimit){
|
||||
limits->tpsLimit = lim;
|
||||
reasonID = tl.id;
|
||||
limitReason = tlogLimitReason;
|
||||
}
|
||||
|
@ -515,8 +556,8 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
// Don't let any tlogs use up its target bytes faster than its MVCC window!
|
||||
double x = ((targetBytes - springBytes) / ((((double)SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS)/SERVER_KNOBS->VERSIONS_PER_SECOND) + 2.0)) / inputRate;
|
||||
double lim = actualTps * x;
|
||||
if (lim < limits.tpsLimit){
|
||||
limits.tpsLimit = lim;
|
||||
if (lim < limits->tpsLimit){
|
||||
limits->tpsLimit = lim;
|
||||
reasonID = tl.id;
|
||||
limitReason = limitReason_t::log_server_mvcc_write_bandwidth;
|
||||
}
|
||||
|
@ -525,10 +566,10 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
|
||||
self->healthMetrics.worstTLogQueue = worstStorageQueueTLog;
|
||||
|
||||
limits.tpsLimit = std::max(limits.tpsLimit, 0.0);
|
||||
limits->tpsLimit = std::max(limits->tpsLimit, 0.0);
|
||||
|
||||
if(g_network->isSimulated() && g_simulator.speedUpSimulation) {
|
||||
limits.tpsLimit = std::max(limits.tpsLimit, 100.0);
|
||||
limits->tpsLimit = std::max(limits->tpsLimit, 100.0);
|
||||
}
|
||||
|
||||
int64_t totalDiskUsageBytes = 0;
|
||||
|
@ -539,13 +580,13 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
if (s.value.valid)
|
||||
totalDiskUsageBytes += s.value.lastReply.storageBytes.used;
|
||||
|
||||
limits.tpsLimitMetric = std::min(limits.tpsLimit, 1e6);
|
||||
limits.reasonMetric = limitReason;
|
||||
limits->tpsLimitMetric = std::min(limits->tpsLimit, 1e6);
|
||||
limits->reasonMetric = limitReason;
|
||||
|
||||
if (g_random->random01() < 0.1) {
|
||||
std::string name = "RkUpdate" + limits.context;
|
||||
std::string name = "RkUpdate" + limits->context;
|
||||
TraceEvent(name.c_str())
|
||||
.detail("TPSLimit", limits.tpsLimit)
|
||||
.detail("TPSLimit", limits->tpsLimit)
|
||||
.detail("Reason", limitReason)
|
||||
.detail("ReasonServerID", reasonID)
|
||||
.detail("ReleasedTPS", self->smoothReleasedTransactions.smoothRate())
|
||||
|
@ -566,7 +607,7 @@ void updateRate( Ratekeeper* self, RatekeeperLimits &limits ) {
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> configurationMonitor( Ratekeeper* self, Reference<AsyncVar<ServerDBInfo>> dbInfo ) {
|
||||
ACTOR Future<Void> configurationMonitor(Reference<AsyncVar<ServerDBInfo>> dbInfo, DatabaseConfiguration* conf) {
|
||||
state Database cx = openDBOnServer(dbInfo, TaskDefaultEndpoint, true, true);
|
||||
loop {
|
||||
state ReadYourWritesTransaction tr(cx);
|
||||
|
@ -578,7 +619,7 @@ ACTOR Future<Void> configurationMonitor( Ratekeeper* self, Reference<AsyncVar<Se
|
|||
Standalone<RangeResultRef> results = wait( tr.getRange( configKeys, CLIENT_KNOBS->TOO_MANY ) );
|
||||
ASSERT( !results.more && results.size() < CLIENT_KNOBS->TOO_MANY );
|
||||
|
||||
self->configuration.fromKeyValues( (VectorRef<KeyValueRef>) results );
|
||||
conf->fromKeyValues( (VectorRef<KeyValueRef>) results );
|
||||
|
||||
state Future<Void> watchFuture = tr.watch(moveKeysLockOwnerKey);
|
||||
wait( tr.commit() );
|
||||
|
@ -591,21 +632,21 @@ ACTOR Future<Void> configurationMonitor( Ratekeeper* self, Reference<AsyncVar<Se
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> rateKeeper(
|
||||
Reference<AsyncVar<ServerDBInfo>> dbInfo,
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > serverChanges,
|
||||
FutureStream< struct GetRateInfoRequest > getRateInfo,
|
||||
double* lastLimited)
|
||||
{
|
||||
state Ratekeeper self;
|
||||
state Future<Void> track = trackEachStorageServer( &self, serverChanges.getFuture() );
|
||||
ACTOR Future<Void> rateKeeper(RatekeeperInterface rkInterf, Reference<AsyncVar<ServerDBInfo>> dbInfo) {
|
||||
state RatekeeperData self;
|
||||
state Future<Void> timeout = Void();
|
||||
state std::vector<Future<Void>> actors;
|
||||
state std::vector<Future<Void>> tlogTrackers;
|
||||
state std::vector<TLogInterface> tlogInterfs;
|
||||
state Promise<Void> err;
|
||||
state Future<Void> configMonitor = configurationMonitor(&self, dbInfo);
|
||||
self.lastLimited = lastLimited;
|
||||
state Future<Void> collection = actorCollection( self.addActor.getFuture() );
|
||||
|
||||
TraceEvent("Ratekeeper_Starting", rkInterf.id());
|
||||
self.addActor.send( waitFailureServer(rkInterf.waitFailure.getFuture()) );
|
||||
self.addActor.send( configurationMonitor(dbInfo, &self.configuration) );
|
||||
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > serverChanges;
|
||||
self.addActor.send( monitorServerListChange(dbInfo, serverChanges) );
|
||||
self.addActor.send( trackEachStorageServer(&self, serverChanges.getFuture()) );
|
||||
|
||||
TraceEvent("RkTLogQueueSizeParameters").detail("Target", SERVER_KNOBS->TARGET_BYTES_PER_TLOG).detail("Spring", SERVER_KNOBS->SPRING_BYTES_TLOG)
|
||||
.detail("Rate", (SERVER_KNOBS->TARGET_BYTES_PER_TLOG - SERVER_KNOBS->SPRING_BYTES_TLOG) / ((((double)SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS) / SERVER_KNOBS->VERSIONS_PER_SECOND) + 2.0));
|
||||
|
@ -617,18 +658,14 @@ ACTOR Future<Void> rateKeeper(
|
|||
for( int i = 0; i < tlogInterfs.size(); i++ )
|
||||
tlogTrackers.push_back( splitError( trackTLogQueueInfo(&self, tlogInterfs[i]), err ) );
|
||||
|
||||
loop{
|
||||
choose {
|
||||
when (wait( track )) { break; }
|
||||
try {
|
||||
state bool lastLimited = false;
|
||||
loop choose {
|
||||
when (wait( timeout )) {
|
||||
updateRate(&self, self.normalLimits);
|
||||
updateRate(&self, self.batchLimits);
|
||||
|
||||
if(self.smoothReleasedTransactions.smoothRate() > SERVER_KNOBS->LAST_LIMITED_RATIO * self.batchLimits.tpsLimit) {
|
||||
*self.lastLimited = now();
|
||||
}
|
||||
|
||||
updateRate(&self, &self.normalLimits);
|
||||
updateRate(&self, &self.batchLimits);
|
||||
|
||||
lastLimited = self.smoothReleasedTransactions.smoothRate() > SERVER_KNOBS->LAST_LIMITED_RATIO * self.batchLimits.tpsLimit;
|
||||
double tooOld = now() - 1.0;
|
||||
for(auto p=self.proxy_transactionCounts.begin(); p!=self.proxy_transactionCounts.end(); ) {
|
||||
if (p->second.time < tooOld)
|
||||
|
@ -638,7 +675,7 @@ ACTOR Future<Void> rateKeeper(
|
|||
}
|
||||
timeout = delayJittered(SERVER_KNOBS->METRIC_UPDATE_RATE);
|
||||
}
|
||||
when (GetRateInfoRequest req = waitNext(getRateInfo)) {
|
||||
when (GetRateInfoRequest req = waitNext(rkInterf.getRateInfo.getFuture())) {
|
||||
GetRateInfoReply reply;
|
||||
|
||||
auto& p = self.proxy_transactionCounts[ req.requesterID ];
|
||||
|
@ -660,6 +697,7 @@ ACTOR Future<Void> rateKeeper(
|
|||
|
||||
reply.healthMetrics.update(self.healthMetrics, true, req.detailed);
|
||||
reply.healthMetrics.tpsLimit = self.normalLimits.tpsLimit;
|
||||
reply.healthMetrics.batchLimited = lastLimited;
|
||||
|
||||
req.reply.send( reply );
|
||||
}
|
||||
|
@ -672,8 +710,14 @@ ACTOR Future<Void> rateKeeper(
|
|||
tlogTrackers.push_back( splitError( trackTLogQueueInfo(&self, tlogInterfs[i]), err ) );
|
||||
}
|
||||
}
|
||||
when(wait(configMonitor)) {}
|
||||
when ( wait(collection) ) {
|
||||
ASSERT(false);
|
||||
throw internal_error();
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Error& err) {
|
||||
TraceEvent("Ratekeeper_Died", rkInterf.id()).error(err, true);
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
/*
|
||||
* Ratekeeper.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBSERVER_RATEKEEPER_H
|
||||
#define FDBSERVER_RATEKEEPER_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbserver/MasterInterface.h"
|
||||
#include "fdbserver/TLogInterface.h"
|
||||
#include "fdbclient/DatabaseConfiguration.h"
|
||||
|
||||
Future<Void> rateKeeper(
|
||||
Reference<AsyncVar<struct ServerDBInfo>> const& dbInfo,
|
||||
PromiseStream< std::pair<UID, Optional<StorageServerInterface>> > const& serverChanges, // actually an input, but we don't want broken_promise
|
||||
FutureStream< struct GetRateInfoRequest > const& getRateInfo,
|
||||
double* const& lastLimited);
|
||||
|
||||
#endif
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* RatekeeperInterface.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBSERVER_RATEKEEPERINTERFACE_H
|
||||
#define FDBSERVER_RATEKEEPERINTERFACE_H
|
||||
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
|
||||
struct RatekeeperInterface {
|
||||
RequestStream<ReplyPromise<Void>> waitFailure;
|
||||
RequestStream<struct GetRateInfoRequest> getRateInfo;
|
||||
struct LocalityData locality;
|
||||
|
||||
RatekeeperInterface() {}
|
||||
explicit RatekeeperInterface(const struct LocalityData& l) : locality(l) {}
|
||||
|
||||
void initEndpoints() {}
|
||||
UID id() const { return getRateInfo.getEndpoint().token; }
|
||||
NetworkAddress address() const { return getRateInfo.getEndpoint().getPrimaryAddress(); }
|
||||
bool operator== (const RatekeeperInterface& r) const {
|
||||
return id() == r.id();
|
||||
}
|
||||
bool operator!= (const RatekeeperInterface& r) const {
|
||||
return !(*this == r);
|
||||
}
|
||||
|
||||
template <class Archive>
|
||||
void serialize(Archive& ar) {
|
||||
serializer(ar, waitFailure, getRateInfo, locality);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRateInfoRequest {
|
||||
UID requesterID;
|
||||
int64_t totalReleasedTransactions;
|
||||
int64_t batchReleasedTransactions;
|
||||
bool detailed;
|
||||
ReplyPromise<struct GetRateInfoReply> reply;
|
||||
|
||||
GetRateInfoRequest() {}
|
||||
GetRateInfoRequest(UID const& requesterID, int64_t totalReleasedTransactions, int64_t batchReleasedTransactions, bool detailed)
|
||||
: requesterID(requesterID), totalReleasedTransactions(totalReleasedTransactions), batchReleasedTransactions(batchReleasedTransactions), detailed(detailed) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, requesterID, totalReleasedTransactions, batchReleasedTransactions, detailed, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetRateInfoReply {
|
||||
double transactionRate;
|
||||
double batchTransactionRate;
|
||||
double leaseDuration;
|
||||
HealthMetrics healthMetrics;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, transactionRate, batchTransactionRate, leaseDuration, healthMetrics);
|
||||
}
|
||||
};
|
||||
|
||||
#endif //FDBSERVER_RATEKEEPERINTERFACE_H
|
|
@ -26,6 +26,7 @@
|
|||
#include "fdbserver/DataDistributorInterface.h"
|
||||
#include "fdbserver/MasterInterface.h"
|
||||
#include "fdbserver/LogSystemConfig.h"
|
||||
#include "fdbserver/RatekeeperInterface.h"
|
||||
#include "fdbserver/RecoveryState.h"
|
||||
#include "fdbserver/LatencyBandConfig.h"
|
||||
|
||||
|
@ -39,6 +40,7 @@ struct ServerDBInfo {
|
|||
ClientDBInfo client; // After a successful recovery, eventually proxies that communicate with it
|
||||
Optional<DataDistributorInterface> distributor; // The best guess of current data distributor.
|
||||
MasterInterface master; // The best guess as to the most recent master, which might still be recovering
|
||||
Optional<RatekeeperInterface> ratekeeper;
|
||||
vector<ResolverInterface> resolvers;
|
||||
DBRecoveryCount recoveryCount; // A recovery count from DBCoreState. A successful master recovery increments it twice; unsuccessful recoveries may increment it once. Depending on where the current master is in its recovery process, this might not have been written by the current master.
|
||||
RecoveryState recoveryState;
|
||||
|
@ -55,7 +57,7 @@ struct ServerDBInfo {
|
|||
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
serializer(ar, id, clusterInterface, client, distributor, master, resolvers, recoveryCount, recoveryState, masterLifetime, logSystemConfig, priorCommittedLogServers, latencyBandConfig);
|
||||
serializer(ar, id, clusterInterface, client, distributor, master, ratekeeper, resolvers, recoveryCount, recoveryState, masterLifetime, logSystemConfig, priorCommittedLogServers, latencyBandConfig);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1389,7 +1389,7 @@ JsonBuilderObject getPerfLimit(TraceEventFields const& ratekeeper, double transP
|
|||
return perfLimit;
|
||||
}
|
||||
|
||||
ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<struct ServerDBInfo>> db, vector<WorkerDetails> workers, WorkerDetails mWorker, WorkerDetails ddWorker,
|
||||
ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<struct ServerDBInfo>> db, vector<WorkerDetails> workers, WorkerDetails mWorker, WorkerDetails rkWorker,
|
||||
JsonBuilderObject *qos, JsonBuilderObject *data_overlay, std::set<std::string> *incomplete_reasons, Future<ErrorOr<vector<std::pair<StorageServerInterface, EventMap>>>> storageServerFuture)
|
||||
{
|
||||
state JsonBuilderObject statusObj;
|
||||
|
@ -1441,8 +1441,8 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
|||
|
||||
// Transactions
|
||||
try {
|
||||
state TraceEventFields ratekeeper = wait( timeoutError(ddWorker.interf.eventLogRequest.getReply( EventLogRequest(LiteralStringRef("RkUpdate") ) ), 1.0) );
|
||||
TraceEventFields batchRatekeeper = wait( timeoutError(ddWorker.interf.eventLogRequest.getReply( EventLogRequest(LiteralStringRef("RkUpdateBatch") ) ), 1.0) );
|
||||
state TraceEventFields ratekeeper = wait( timeoutError(rkWorker.interf.eventLogRequest.getReply( EventLogRequest(LiteralStringRef("RkUpdate") ) ), 1.0) );
|
||||
TraceEventFields batchRatekeeper = wait( timeoutError(rkWorker.interf.eventLogRequest.getReply( EventLogRequest(LiteralStringRef("RkUpdateBatch") ) ), 1.0) );
|
||||
|
||||
double tpsLimit = ratekeeper.getDouble("TPSLimit");
|
||||
double batchTpsLimit = batchRatekeeper.getDouble("TPSLimit");
|
||||
|
@ -1818,6 +1818,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
state std::set<std::string> status_incomplete_reasons;
|
||||
state WorkerDetails mWorker;
|
||||
state WorkerDetails ddWorker; // DataDistributor worker
|
||||
state WorkerDetails rkWorker; // RateKeeper worker
|
||||
|
||||
try {
|
||||
// Get the master Worker interface
|
||||
|
@ -1839,6 +1840,18 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
ddWorker = _ddWorker.get();
|
||||
}
|
||||
|
||||
// Get the RateKeeper worker interface
|
||||
Optional<WorkerDetails> _rkWorker;
|
||||
if (db->get().ratekeeper.present()) {
|
||||
_rkWorker = getWorker( workers, db->get().ratekeeper.get().address() );
|
||||
}
|
||||
|
||||
if (!db->get().ratekeeper.present() || !_rkWorker.present()) {
|
||||
messages.push_back(JsonString::makeMessage("unreachable_ratekeeper_worker", "Unable to locate the ratekeeper worker."));
|
||||
} else {
|
||||
rkWorker = _rkWorker.get();
|
||||
}
|
||||
|
||||
// Get latest events for various event types from ALL workers
|
||||
// WorkerEvents is a map of worker's NetworkAddress to its event string
|
||||
// The pair represents worker responses and a set of worker NetworkAddress strings which did not respond
|
||||
|
@ -1942,7 +1955,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
state int minReplicasRemaining = -1;
|
||||
std::vector<Future<JsonBuilderObject>> futures2;
|
||||
futures2.push_back(dataStatusFetcher(ddWorker, &minReplicasRemaining));
|
||||
futures2.push_back(workloadStatusFetcher(db, workers, mWorker, ddWorker, &qos, &data_overlay, &status_incomplete_reasons, storageServerFuture));
|
||||
futures2.push_back(workloadStatusFetcher(db, workers, mWorker, rkWorker, &qos, &data_overlay, &status_incomplete_reasons, storageServerFuture));
|
||||
futures2.push_back(layerStatusFetcher(cx, &messages, &status_incomplete_reasons));
|
||||
futures2.push_back(lockedStatusFetcher(db, &messages, &status_incomplete_reasons));
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "fdbserver/DataDistributorInterface.h"
|
||||
#include "fdbserver/MasterInterface.h"
|
||||
#include "fdbserver/TLogInterface.h"
|
||||
#include "fdbserver/RatekeeperInterface.h"
|
||||
#include "fdbserver/ResolverInterface.h"
|
||||
#include "fdbclient/StorageServerInterface.h"
|
||||
#include "fdbserver/TesterInterface.actor.h"
|
||||
|
@ -46,6 +47,7 @@ struct WorkerInterface {
|
|||
RequestStream< struct RecruitMasterRequest > master;
|
||||
RequestStream< struct InitializeMasterProxyRequest > masterProxy;
|
||||
RequestStream< struct InitializeDataDistributorRequest > dataDistributor;
|
||||
RequestStream< struct InitializeRatekeeperRequest > ratekeeper;
|
||||
RequestStream< struct InitializeResolverRequest > resolver;
|
||||
RequestStream< struct InitializeStorageRequest > storage;
|
||||
RequestStream< struct InitializeLogRouterRequest > logRouter;
|
||||
|
@ -68,7 +70,7 @@ struct WorkerInterface {
|
|||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, clientInterface, locality, tLog, master, masterProxy, dataDistributor, resolver, storage, logRouter, debugPing, coordinationPing, waitFailure, setMetricsRate, eventLogRequest, traceBatchDumpRequest, testerInterface, diskStoreRequest);
|
||||
serializer(ar, clientInterface, locality, tLog, master, masterProxy, dataDistributor, ratekeeper, resolver, storage, logRouter, debugPing, coordinationPing, waitFailure, setMetricsRate, eventLogRequest, traceBatchDumpRequest, testerInterface, diskStoreRequest);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -159,12 +161,26 @@ struct InitializeDataDistributorRequest {
|
|||
UID reqId;
|
||||
ReplyPromise<DataDistributorInterface> reply;
|
||||
|
||||
InitializeDataDistributorRequest() {}
|
||||
explicit InitializeDataDistributorRequest(UID uid) : reqId(uid) {}
|
||||
template <class Ar>
|
||||
void serialize( Ar& ar ) {
|
||||
serializer(ar, reqId, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct InitializeRatekeeperRequest {
|
||||
UID reqId;
|
||||
ReplyPromise<RatekeeperInterface> reply;
|
||||
|
||||
InitializeRatekeeperRequest() {}
|
||||
explicit InitializeRatekeeperRequest(UID uid) : reqId(uid) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, reqId, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct InitializeResolverRequest {
|
||||
uint64_t recoveryCount;
|
||||
int proxyCount;
|
||||
|
@ -314,6 +330,7 @@ struct Role {
|
|||
static const Role TESTER;
|
||||
static const Role LOG_ROUTER;
|
||||
static const Role DATA_DISTRIBUTOR;
|
||||
static const Role RATE_KEEPER;
|
||||
|
||||
std::string roleName;
|
||||
std::string abbreviation;
|
||||
|
@ -375,6 +392,7 @@ ACTOR Future<Void> resolver(ResolverInterface proxy, InitializeResolverRequest i
|
|||
ACTOR Future<Void> logRouter(TLogInterface interf, InitializeLogRouterRequest req,
|
||||
Reference<AsyncVar<ServerDBInfo>> db);
|
||||
ACTOR Future<Void> dataDistributor(DataDistributorInterface ddi, Reference<AsyncVar<ServerDBInfo>> db);
|
||||
ACTOR Future<Void> rateKeeper(RatekeeperInterface rki, Reference<AsyncVar<ServerDBInfo>> db);
|
||||
|
||||
void registerThreadForProfiling();
|
||||
void updateCpuProfiler(ProfilerRequest req);
|
||||
|
|
|
@ -759,13 +759,6 @@ std::pair<NetworkAddressList, NetworkAddressList> buildNetworkAddresses(const Cl
|
|||
|
||||
const NetworkAddressList& coordinators = connectionFile.getConnectionString().coordinators();
|
||||
ASSERT(coordinators.size() > 0);
|
||||
bool clusterIsTLS = coordinators[0].isTLS();
|
||||
for (int ii = 1; ii < coordinators.size(); ++ii) {
|
||||
if (coordinators[ii].isTLS() != clusterIsTLS) {
|
||||
fprintf(stderr, "ERROR: coordinators cannot have mixed TLS state.\n");
|
||||
flushAndExit(FDB_EXIT_ERROR);
|
||||
}
|
||||
}
|
||||
|
||||
int numTLSAddress = 0;
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@
|
|||
<EnableCompile>false</EnableCompile>
|
||||
</ActorCompiler>
|
||||
<ClInclude Include="QuietDatabase.h" />
|
||||
<ClInclude Include="Ratekeeper.h" />
|
||||
<ClInclude Include="RatekeeperInterface.h" />
|
||||
<ClInclude Include="RecoveryState.h" />
|
||||
<ClInclude Include="ResolverInterface.h" />
|
||||
<ClInclude Include="RestoreInterface.h" />
|
||||
|
@ -280,7 +280,7 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
|
@ -301,7 +301,7 @@
|
|||
<Optimization>Full</Optimization>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
|
||||
<EnablePREfast>false</EnablePREfast>
|
||||
|
|
|
@ -310,6 +310,7 @@
|
|||
<ItemGroup>
|
||||
<ClInclude Include="ConflictSet.h" />
|
||||
<ClInclude Include="DataDistribution.actor.h" />
|
||||
<ClInclude Include="DataDistributorInterface.h" />
|
||||
<ClInclude Include="MoveKeys.actor.h" />
|
||||
<ClInclude Include="pubsub.h" />
|
||||
<ClInclude Include="Knobs.h" />
|
||||
|
@ -343,7 +344,7 @@
|
|||
</ClInclude>
|
||||
<ClInclude Include="LeaderElection.h" />
|
||||
<ClInclude Include="StorageMetrics.h" />
|
||||
<ClInclude Include="Ratekeeper.h" />
|
||||
<ClInclude Include="RatekeeperInterface.h" />
|
||||
<ClInclude Include="Status.h" />
|
||||
<ClInclude Include="IDiskQueue.h" />
|
||||
<ClInclude Include="CoroFlow.h" />
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <iterator>
|
||||
#include "fdbserver/WaitFailure.h"
|
||||
#include "fdbserver/WorkerInterface.actor.h"
|
||||
#include "fdbserver/Ratekeeper.h"
|
||||
#include "fdbserver/ClusterRecruitmentInterface.h"
|
||||
#include "fdbserver/ServerDBInfo.h"
|
||||
#include "fdbserver/CoordinatedState.h"
|
||||
|
@ -493,6 +492,8 @@ ACTOR Future<Standalone<CommitTransactionRef>> provisionalMaster( Reference<Mast
|
|||
auto lockedKey = parent->txnStateStore->readValue(databaseLockedKey).get();
|
||||
state bool locked = lockedKey.present() && lockedKey.get().size();
|
||||
|
||||
state Optional<Value> metadataVersion = parent->txnStateStore->readValue(metadataVersionKey).get();
|
||||
|
||||
// We respond to a minimal subset of the master proxy protocol. Our sole purpose is to receive a single write-only transaction
|
||||
// which might repair our configuration, and return it.
|
||||
loop choose {
|
||||
|
@ -501,6 +502,7 @@ ACTOR Future<Standalone<CommitTransactionRef>> provisionalMaster( Reference<Mast
|
|||
GetReadVersionReply rep;
|
||||
rep.version = parent->lastEpochEnd;
|
||||
rep.locked = locked;
|
||||
rep.metadataVersion = metadataVersion;
|
||||
req.reply.send( rep );
|
||||
} else
|
||||
req.reply.send(Never()); // We can't perform causally consistent reads without recovering
|
||||
|
|
|
@ -350,6 +350,7 @@ ACTOR Future<Void> registrationClient(
|
|||
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo,
|
||||
ProcessClass initialClass,
|
||||
Reference<AsyncVar<Optional<DataDistributorInterface>>> ddInterf,
|
||||
Reference<AsyncVar<Optional<RatekeeperInterface>>> rkInterf,
|
||||
Reference<AsyncVar<bool>> degraded) {
|
||||
// Keeps the cluster controller (as it may be re-elected) informed that this worker exists
|
||||
// The cluster controller uses waitFailureClient to find out if we die, and returns from registrationReply (requiring us to re-register)
|
||||
|
@ -357,7 +358,7 @@ ACTOR Future<Void> registrationClient(
|
|||
state Generation requestGeneration = 0;
|
||||
state ProcessClass processClass = initialClass;
|
||||
loop {
|
||||
RegisterWorkerRequest request(interf, initialClass, processClass, asyncPriorityInfo->get(), requestGeneration++, ddInterf->get(), degraded->get());
|
||||
RegisterWorkerRequest request(interf, initialClass, processClass, asyncPriorityInfo->get(), requestGeneration++, ddInterf->get(), rkInterf->get(), degraded->get());
|
||||
Future<RegisterWorkerReply> registrationReply = ccInterface->get().present() ? brokenPromiseToNever( ccInterface->get().get().registerWorker.getReply(request) ) : Never();
|
||||
choose {
|
||||
when ( RegisterWorkerReply reply = wait( registrationReply )) {
|
||||
|
@ -366,6 +367,7 @@ ACTOR Future<Void> registrationClient(
|
|||
}
|
||||
when ( wait( ccInterface->onChange() )) {}
|
||||
when ( wait( ddInterf->onChange() ) ) {}
|
||||
when ( wait( rkInterf->onChange() ) ) {}
|
||||
when ( wait( degraded->onChange() ) ) {}
|
||||
}
|
||||
}
|
||||
|
@ -612,6 +614,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
Reference<AsyncVar<ClusterControllerPriorityInfo>> asyncPriorityInfo, ProcessClass initialClass, std::string folder, int64_t memoryLimit, std::string metricsConnFile, std::string metricsPrefix, Promise<Void> recoveredDiskFiles) {
|
||||
state PromiseStream< ErrorInfo > errors;
|
||||
state Reference<AsyncVar<Optional<DataDistributorInterface>>> ddInterf( new AsyncVar<Optional<DataDistributorInterface>>() );
|
||||
state Reference<AsyncVar<Optional<RatekeeperInterface>>> rkInterf( new AsyncVar<Optional<RatekeeperInterface>>() );
|
||||
state Future<Void> handleErrors = workerHandleErrors( errors.getFuture() ); // Needs to be stopped last
|
||||
state ActorCollection errorForwarders(false);
|
||||
state Future<Void> loggingTrigger = Void();
|
||||
|
@ -760,7 +763,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
wait(waitForAll(recoveries));
|
||||
recoveredDiskFiles.send(Void());
|
||||
|
||||
errorForwarders.add( registrationClient( ccInterface, interf, asyncPriorityInfo, initialClass, ddInterf, degraded ) );
|
||||
errorForwarders.add( registrationClient( ccInterface, interf, asyncPriorityInfo, initialClass, ddInterf, rkInterf, degraded ) );
|
||||
|
||||
TraceEvent("RecoveriesComplete", interf.id());
|
||||
|
||||
|
@ -833,6 +836,7 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
TEST(true); // Recruited while already a data distributor.
|
||||
} else {
|
||||
startRole( Role::DATA_DISTRIBUTOR, recruited.id(), interf.id() );
|
||||
DUMPTOKEN( recruited.waitFailure );
|
||||
|
||||
Future<Void> dataDistributorProcess = dataDistributor( recruited, dbInfo );
|
||||
errorForwarders.add( forwardError( errors, Role::DATA_DISTRIBUTOR, recruited.id(), setWhenDoneOrError( dataDistributorProcess, ddInterf, Optional<DataDistributorInterface>() ) ) );
|
||||
|
@ -841,6 +845,25 @@ ACTOR Future<Void> workerServer( Reference<ClusterConnectionFile> connFile, Refe
|
|||
TraceEvent("DataDistributorReceived", req.reqId).detail("DataDistributorId", recruited.id());
|
||||
req.reply.send(recruited);
|
||||
}
|
||||
when ( InitializeRatekeeperRequest req = waitNext(interf.ratekeeper.getFuture()) ) {
|
||||
RatekeeperInterface recruited(locality);
|
||||
recruited.initEndpoints();
|
||||
|
||||
if (rkInterf->get().present()) {
|
||||
recruited = rkInterf->get().get();
|
||||
TEST(true); // Recruited while already a ratekeeper.
|
||||
} else {
|
||||
startRole(Role::RATE_KEEPER, recruited.id(), interf.id());
|
||||
DUMPTOKEN( recruited.waitFailure );
|
||||
DUMPTOKEN( recruited.getRateInfo );
|
||||
|
||||
Future<Void> ratekeeper = rateKeeper( recruited, dbInfo );
|
||||
errorForwarders.add( forwardError( errors, Role::RATE_KEEPER, recruited.id(), setWhenDoneOrError( ratekeeper, rkInterf, Optional<RatekeeperInterface>() ) ) );
|
||||
rkInterf->set(Optional<RatekeeperInterface>(recruited));
|
||||
}
|
||||
TraceEvent("Ratekeeper_InitRequest", req.reqId).detail("RatekeeperId", recruited.id());
|
||||
req.reply.send(recruited);
|
||||
}
|
||||
when( InitializeTLogRequest req = waitNext(interf.tLog.getFuture()) ) {
|
||||
// For now, there's a one-to-one mapping of spill type to TLogVersion.
|
||||
// With future work, a particular version of the TLog can support multiple
|
||||
|
@ -1248,3 +1271,4 @@ const Role Role::CLUSTER_CONTROLLER("ClusterController", "CC");
|
|||
const Role Role::TESTER("Tester", "TS");
|
||||
const Role Role::LOG_ROUTER("LogRouter", "LR");
|
||||
const Role Role::DATA_DISTRIBUTOR("DataDistributor", "DD");
|
||||
const Role Role::RATE_KEEPER("RateKeeper", "RK");
|
||||
|
|
|
@ -39,6 +39,8 @@ struct VersionStampWorkload : TestWorkload {
|
|||
bool validateExtraDB;
|
||||
std::map<Key, std::vector<std::pair<Version, Standalone<StringRef>>>> key_commit;
|
||||
std::map<Key, std::vector<std::pair<Version, Standalone<StringRef>>>> versionStampKey_commit;
|
||||
int apiVersion;
|
||||
bool soleOwnerOfMetadataVersionKey;
|
||||
|
||||
VersionStampWorkload(WorkloadContext const& wcx)
|
||||
: TestWorkload(wcx)
|
||||
|
@ -52,6 +54,7 @@ struct VersionStampWorkload : TestWorkload {
|
|||
vsKeyPrefix = LiteralStringRef("K_").withPrefix(prefix);
|
||||
vsValuePrefix = LiteralStringRef("V_").withPrefix(prefix);
|
||||
validateExtraDB = getOption(options, LiteralStringRef("validateExtraDB"), false);
|
||||
soleOwnerOfMetadataVersionKey = getOption(options, LiteralStringRef("soleOwnerOfMetadataVersionKey"), false);
|
||||
}
|
||||
|
||||
virtual std::string description() { return "VersionStamp"; }
|
||||
|
@ -62,7 +65,6 @@ struct VersionStampWorkload : TestWorkload {
|
|||
// Versionstamp behavior changed starting with API version 520, so
|
||||
// choose a version to check compatibility.
|
||||
double choice = g_random->random01();
|
||||
int apiVersion;
|
||||
if (choice < 0.1) {
|
||||
apiVersion = 500;
|
||||
}
|
||||
|
@ -83,6 +85,10 @@ struct VersionStampWorkload : TestWorkload {
|
|||
}
|
||||
|
||||
Key keyForIndex(uint64_t index) {
|
||||
if((apiVersion >= 610 || apiVersion == Database::API_VERSION_LATEST) && index == 0) {
|
||||
return metadataVersionKey;
|
||||
}
|
||||
|
||||
Key result = makeString(keyBytes);
|
||||
uint8_t* data = mutateString(result);
|
||||
memset(data, '.', keyBytes);
|
||||
|
@ -157,11 +163,35 @@ struct VersionStampWorkload : TestWorkload {
|
|||
// We specifically wish to grab the smalles read version that we can get and maintain it, to
|
||||
// have the strictest check we can on versionstamps monotonically increasing.
|
||||
state Version readVersion = wait(tr.getReadVersion());
|
||||
|
||||
if(BUGGIFY) {
|
||||
if(g_random->random01() < 0.5) {
|
||||
loop {
|
||||
try {
|
||||
tr.makeSelfConflicting();
|
||||
wait(tr.commit());
|
||||
readVersion = tr.getCommittedVersion() - 1;
|
||||
break;
|
||||
} catch( Error &e ) {
|
||||
wait( tr.onError(e) );
|
||||
}
|
||||
}
|
||||
}
|
||||
tr.reset();
|
||||
tr.setVersion(readVersion);
|
||||
}
|
||||
|
||||
state Standalone<RangeResultRef> result;
|
||||
loop{
|
||||
try {
|
||||
Standalone<RangeResultRef> result_ = wait(tr.getRange(KeyRangeRef(self->vsValuePrefix, endOfRange(self->vsValuePrefix)), self->nodeCount + 1));
|
||||
result = result_;
|
||||
if((self->apiVersion >= 610 || self->apiVersion == Database::API_VERSION_LATEST) && self->key_commit.count(metadataVersionKey)) {
|
||||
Optional<Value> mVal = wait(tr.get(metadataVersionKey));
|
||||
if(mVal.present()) {
|
||||
result.push_back_deep(result.arena(), KeyValueRef(metadataVersionKey,mVal.get()));
|
||||
}
|
||||
}
|
||||
ASSERT(result.size() <= self->nodeCount);
|
||||
if (self->failIfDataLost) {
|
||||
ASSERT(result.size() == self->key_commit.size());
|
||||
|
@ -171,29 +201,40 @@ struct VersionStampWorkload : TestWorkload {
|
|||
|
||||
//TraceEvent("VST_Check0").detail("Size", result.size()).detail("NodeCount", self->nodeCount).detail("KeyCommit", self->key_commit.size()).detail("ReadVersion", readVersion);
|
||||
for (auto it : result) {
|
||||
const Standalone<StringRef> key = it.key.removePrefix(self->vsValuePrefix);
|
||||
const Standalone<StringRef> key = it.key == metadataVersionKey ? metadataVersionKey : it.key.removePrefix(self->vsValuePrefix);
|
||||
Version parsedVersion;
|
||||
Standalone<StringRef> parsedVersionstamp;
|
||||
std::tie(parsedVersion, parsedVersionstamp) = versionFromValue(it.value);
|
||||
ASSERT(parsedVersion <= readVersion);
|
||||
|
||||
//TraceEvent("VST_Check0a").detail("ItKey", printable(it.key)).detail("ItValue", printable(it.value)).detail("ParsedVersion", parsedVersion);
|
||||
const auto& all_values_iter = self->key_commit.find(key);
|
||||
ASSERT(all_values_iter != self->key_commit.end()); // Reading a key that we didn't commit.
|
||||
const auto& all_values = all_values_iter->second;
|
||||
|
||||
const auto& value_pair_iter = std::find_if(all_values.cbegin(), all_values.cend(),
|
||||
[parsedVersion](const std::pair<Version, Standalone<StringRef>>& pair) { return pair.first == parsedVersion; });
|
||||
ASSERT(value_pair_iter != all_values.cend()); // The key exists, but we never wrote the timestamp.
|
||||
if (self->failIfDataLost) {
|
||||
auto last_element_iter = all_values.cend(); last_element_iter--;
|
||||
ASSERT(value_pair_iter == last_element_iter);
|
||||
}
|
||||
Version commitVersion = value_pair_iter->first;
|
||||
Standalone<StringRef> commitVersionstamp = value_pair_iter->second;
|
||||
if(it.key == metadataVersionKey && !self->soleOwnerOfMetadataVersionKey) {
|
||||
if(self->failIfDataLost) {
|
||||
for(auto& it : all_values) {
|
||||
ASSERT(it.first <= parsedVersion);
|
||||
if(it.first == parsedVersion) {
|
||||
ASSERT(it.second.compare(parsedVersionstamp) == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
const auto& value_pair_iter = std::find_if(all_values.cbegin(), all_values.cend(),
|
||||
[parsedVersion](const std::pair<Version, Standalone<StringRef>>& pair) { return pair.first == parsedVersion; });
|
||||
ASSERT(value_pair_iter != all_values.cend()); // The key exists, but we never wrote the timestamp.
|
||||
if (self->failIfDataLost) {
|
||||
auto last_element_iter = all_values.cend(); last_element_iter--;
|
||||
ASSERT(value_pair_iter == last_element_iter);
|
||||
}
|
||||
Version commitVersion = value_pair_iter->first;
|
||||
Standalone<StringRef> commitVersionstamp = value_pair_iter->second;
|
||||
|
||||
//TraceEvent("VST_Check0b").detail("Version", commitVersion).detail("CommitVersion", printable(commitVersionstamp));
|
||||
ASSERT(parsedVersion <= readVersion);
|
||||
ASSERT(commitVersionstamp.compare(parsedVersionstamp) == 0);
|
||||
//TraceEvent("VST_Check0b").detail("Version", commitVersion).detail("CommitVersion", printable(commitVersionstamp));
|
||||
ASSERT(commitVersionstamp.compare(parsedVersionstamp) == 0);
|
||||
}
|
||||
}
|
||||
|
||||
Standalone<RangeResultRef> result__ = wait(tr.getRange(KeyRangeRef(self->vsKeyPrefix, endOfRange(self->vsKeyPrefix)), self->nodeCount + 1));
|
||||
|
@ -270,7 +311,11 @@ struct VersionStampWorkload : TestWorkload {
|
|||
state Version committedVersion;
|
||||
|
||||
state Value versionStampValue;
|
||||
if (oldVSFormat) {
|
||||
|
||||
if(key == metadataVersionKey) {
|
||||
value = metadataVersionRequiredValue;
|
||||
versionStampValue = value;
|
||||
} else if (oldVSFormat) {
|
||||
versionStampValue = value;
|
||||
} else {
|
||||
versionStampValue = value.withSuffix(LiteralStringRef("\x00\x00\x00\x00"));
|
||||
|
@ -280,8 +325,13 @@ struct VersionStampWorkload : TestWorkload {
|
|||
state bool error = false;
|
||||
state Error err;
|
||||
//TraceEvent("VST_CommitBegin").detail("Key", printable(key)).detail("VsKey", printable(versionStampKey)).detail("Clear", printable(range));
|
||||
state Key testKey;
|
||||
try {
|
||||
tr.atomicOp(key, versionStampValue, MutationRef::SetVersionstampedValue);
|
||||
if(key == metadataVersionKey) {
|
||||
testKey = "testKey" + g_random->randomUniqueID().toString();
|
||||
tr.atomicOp(testKey, versionStampValue, MutationRef::SetVersionstampedValue);
|
||||
}
|
||||
tr.clear(range);
|
||||
tr.atomicOp(versionStampKey, value, MutationRef::SetVersionstampedKey);
|
||||
state Future<Standalone<StringRef>> fTrVs = tr.getVersionstamp();
|
||||
|
@ -304,14 +354,14 @@ struct VersionStampWorkload : TestWorkload {
|
|||
state ReadYourWritesTransaction cur_tr(cx_is_primary ? cx : extraDB);
|
||||
cur_tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
try {
|
||||
Optional<Value> vs_value = wait(cur_tr.get(key));
|
||||
Optional<Value> vs_value = wait(cur_tr.get(key == metadataVersionKey ? testKey : key));
|
||||
if (!vs_value.present()) {
|
||||
error = true;
|
||||
break;
|
||||
}
|
||||
const Version value_version = versionFromValue(vs_value.get()).first;
|
||||
//TraceEvent("VST_CommitUnknownRead").detail("VsValue", vs_value.present() ? printable(vs_value.get()) : "did not exist");
|
||||
const auto& value_ts = self->key_commit[key.removePrefix(self->vsValuePrefix)];
|
||||
const auto& value_ts = self->key_commit[key == metadataVersionKey ? metadataVersionKey : key.removePrefix(self->vsValuePrefix)];
|
||||
const auto& iter = std::find_if(value_ts.cbegin(), value_ts.cend(),
|
||||
[value_version](const std::pair<Version, Standalone<StringRef>>& pair) {
|
||||
return value_version == pair.first;
|
||||
|
@ -343,7 +393,7 @@ struct VersionStampWorkload : TestWorkload {
|
|||
const Standalone<StringRef> vsKeyKey = versionStampKey.removePrefix(self->vsKeyPrefix).substr(4, 16);
|
||||
const auto& committedVersionPair = std::make_pair(committedVersion, committedVersionStamp);
|
||||
//TraceEvent("VST_CommitSuccess").detail("Key", printable(key)).detail("VsKey", printable(versionStampKey)).detail("VsKeyKey", printable(vsKeyKey)).detail("Clear", printable(range)).detail("Version", tr.getCommittedVersion()).detail("VsValue", printable(committedVersionPair.second));
|
||||
self->key_commit[key.removePrefix(self->vsValuePrefix)].push_back(committedVersionPair);
|
||||
self->key_commit[key == metadataVersionKey ? metadataVersionKey : key.removePrefix(self->vsValuePrefix)].push_back(committedVersionPair);
|
||||
self->versionStampKey_commit[vsKeyKey].push_back(committedVersionPair);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@
|
|||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
@ -73,7 +73,7 @@
|
|||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_MBCS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
|
@ -93,4 +93,4 @@
|
|||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
|
19
flow/Arena.h
19
flow/Arena.h
|
@ -527,6 +527,25 @@ public:
|
|||
StringRef eat(const char *sep) {
|
||||
return eat(StringRef((const uint8_t *)sep, strlen(sep)));
|
||||
}
|
||||
// Return StringRef of bytes from begin() up to but not including the first byte matching any byte in sep,
|
||||
// and remove that sequence (including the sep byte) from *this
|
||||
// Returns and removes all bytes from *this if no bytes within sep were found
|
||||
StringRef eatAny(StringRef sep, uint8_t *foundSeparator) {
|
||||
auto iSep = std::find_first_of(begin(), end(), sep.begin(), sep.end());
|
||||
if(iSep != end()) {
|
||||
if(foundSeparator != nullptr) {
|
||||
*foundSeparator = *iSep;
|
||||
}
|
||||
const int i = iSep - begin();
|
||||
StringRef token = substr(0, i);
|
||||
*this = substr(i + 1);
|
||||
return token;
|
||||
}
|
||||
return eat();
|
||||
}
|
||||
StringRef eatAny(const char *sep, uint8_t *foundSeparator) {
|
||||
return eatAny(StringRef((const uint8_t *)sep, strlen(sep)), foundSeparator);
|
||||
}
|
||||
|
||||
private:
|
||||
// Unimplemented; blocks conversion through std::string
|
||||
|
|
|
@ -154,7 +154,7 @@
|
|||
<Optimization>Disabled</Optimization>
|
||||
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<MultiProcessorCompilation>true</MultiProcessorCompilation>
|
||||
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
|
||||
|
@ -177,7 +177,7 @@
|
|||
<Optimization>Full</Optimization>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
|
||||
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
|
||||
<EnablePREfast>false</EnablePREfast>
|
||||
|
|
|
@ -67,6 +67,7 @@ enum {
|
|||
TaskUnknownEndpoint = 4000,
|
||||
TaskMoveKeys = 3550,
|
||||
TaskDataDistributionLaunch = 3530,
|
||||
TaskRateKeeper = 3510,
|
||||
TaskDataDistribution = 3500,
|
||||
TaskDiskWrite = 3010,
|
||||
TaskUpdateStorage = 3000,
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
testTitle=VersionStamp
|
||||
testName=VersionStamp
|
||||
testName=VersionStamp
|
||||
soleOwnerOfMetadataVersionKey=true
|
||||
|
|
Loading…
Reference in New Issue