Merge remote-tracking branch 'apple/master' into ratekeeper
This commit is contained in:
commit
9f6fe5f649
|
@ -92,9 +92,9 @@ func (o NetworkOptions) SetTraceLogGroup(param string) error {
|
|||
return o.setOpt(33, []byte(param))
|
||||
}
|
||||
|
||||
// Selects trace output format for this client. xml (the default) and json are supported.
|
||||
// Select the format of the log files. xml (the default) and json are supported.
|
||||
//
|
||||
// Parameter: trace format
|
||||
// Parameter: Format of trace files
|
||||
func (o NetworkOptions) SetTraceFormat(param string) error {
|
||||
return o.setOpt(34, []byte(param))
|
||||
}
|
||||
|
@ -351,13 +351,25 @@ func (o TransactionOptions) SetDebugRetryLogging(param string) error {
|
|||
return o.setOpt(401, []byte(param))
|
||||
}
|
||||
|
||||
// Enables tracing for this transaction and logs results to the client trace logs. Client trace logging must be enabled to get log output.
|
||||
// Deprecated
|
||||
//
|
||||
// Parameter: String identifier to be used in the logs when tracing this transaction. The identifier must not exceed 100 characters.
|
||||
func (o TransactionOptions) SetTransactionLoggingEnable(param string) error {
|
||||
return o.setOpt(402, []byte(param))
|
||||
}
|
||||
|
||||
// Sets a client provided identifier for the transaction that will be used in scenarios like tracing or profiling. Client trace logging or transaction profiling must be separately enabled.
|
||||
//
|
||||
// Parameter: String identifier to be used when tracing or profiling this transaction. The identifier must not exceed 100 characters.
|
||||
func (o TransactionOptions) SetDebugTransactionIdentifier(param string) error {
|
||||
return o.setOpt(403, []byte(param))
|
||||
}
|
||||
|
||||
// Enables tracing for this transaction and logs results to the client trace logs. The DEBUG_TRANSACTION_IDENTIFIER option must be set before using this option, and client trace logging must be enabled and to get log output.
|
||||
func (o TransactionOptions) SetLogTransaction() error {
|
||||
return o.setOpt(404, nil)
|
||||
}
|
||||
|
||||
// Set a timeout in milliseconds which, when elapsed, will cause the transaction automatically to be cancelled. Valid parameter values are ``[0, INT_MAX]``. If set to 0, will disable all timeouts. All pending and any future uses of the transaction will throw an exception. The transaction can be used again after it is reset. Like all transaction options, a timeout must be reset after a call to onError. This behavior allows the user to make the timeout dynamic.
|
||||
//
|
||||
// Parameter: value in milliseconds of timeout
|
||||
|
@ -512,12 +524,12 @@ func (t Transaction) Min(key KeyConvertible, param []byte) {
|
|||
t.atomicOp(key.FDBKey(), param, 13)
|
||||
}
|
||||
|
||||
// Transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes.
|
||||
// Transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java, Python, and Go bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes.
|
||||
func (t Transaction) SetVersionstampedKey(key KeyConvertible, param []byte) {
|
||||
t.atomicOp(key.FDBKey(), param, 14)
|
||||
}
|
||||
|
||||
// Transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset.
|
||||
// Transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java, Python, and Go bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset.
|
||||
func (t Transaction) SetVersionstampedValue(key KeyConvertible, param []byte) {
|
||||
t.atomicOp(key.FDBKey(), param, 15)
|
||||
}
|
||||
|
@ -532,6 +544,11 @@ func (t Transaction) ByteMax(key KeyConvertible, param []byte) {
|
|||
t.atomicOp(key.FDBKey(), param, 17)
|
||||
}
|
||||
|
||||
// Performs an atomic ``compare and clear`` operation. If the existing value in the database is equal to the given value, then given key is cleared.
|
||||
func (t Transaction) CompareAndClear(key KeyConvertible, param []byte) {
|
||||
t.atomicOp(key.FDBKey(), param, 20)
|
||||
}
|
||||
|
||||
type conflictRangeType int
|
||||
|
||||
const (
|
||||
|
|
|
@ -54,6 +54,7 @@ set(JAVA_BINDING_SRCS
|
|||
src/main/com/apple/foundationdb/tuple/ByteArrayUtil.java
|
||||
src/main/com/apple/foundationdb/tuple/IterableComparator.java
|
||||
src/main/com/apple/foundationdb/tuple/package-info.java
|
||||
src/main/com/apple/foundationdb/tuple/StringUtil.java
|
||||
src/main/com/apple/foundationdb/tuple/Tuple.java
|
||||
src/main/com/apple/foundationdb/tuple/TupleUtil.java
|
||||
src/main/com/apple/foundationdb/tuple/Versionstamp.java)
|
||||
|
@ -88,6 +89,7 @@ set(JAVA_TESTS_SRCS
|
|||
src/test/com/apple/foundationdb/test/StackUtils.java
|
||||
src/test/com/apple/foundationdb/test/TesterArgs.java
|
||||
src/test/com/apple/foundationdb/test/TestResult.java
|
||||
src/test/com/apple/foundationdb/test/TuplePerformanceTest.java
|
||||
src/test/com/apple/foundationdb/test/TupleTest.java
|
||||
src/test/com/apple/foundationdb/test/VersionstampSmokeTest.java
|
||||
src/test/com/apple/foundationdb/test/WatchTest.java
|
||||
|
|
|
@ -46,8 +46,8 @@ import com.apple.foundationdb.tuple.Versionstamp;
|
|||
* </p>
|
||||
*/
|
||||
public class Subspace {
|
||||
static final Tuple EMPTY_TUPLE = Tuple.from();
|
||||
static final byte[] EMPTY_BYTES = new byte[0];
|
||||
private static final Tuple EMPTY_TUPLE = Tuple.from();
|
||||
private static final byte[] EMPTY_BYTES = new byte[0];
|
||||
|
||||
private final byte[] rawPrefix;
|
||||
|
||||
|
@ -248,8 +248,7 @@ public class Subspace {
|
|||
* @return the {@link Range} of keyspace corresponding to {@code tuple}
|
||||
*/
|
||||
public Range range(Tuple tuple) {
|
||||
Range p = tuple.range();
|
||||
return new Range(join(rawPrefix, p.begin), join(rawPrefix, p.end));
|
||||
return tuple.range(rawPrefix);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
|
||||
package com.apple.foundationdb.tuple;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.util.Arrays;
|
||||
|
@ -154,7 +153,10 @@ public class ByteArrayUtil {
|
|||
* @return a newly created array where {@code pattern} replaced with {@code replacement}
|
||||
*/
|
||||
public static byte[] replace(byte[] src, byte[] pattern, byte[] replacement) {
|
||||
return join(replacement, split(src, pattern));
|
||||
if(src == null) {
|
||||
return null;
|
||||
}
|
||||
return replace(src, 0, src.length, pattern, replacement);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -171,7 +173,75 @@ public class ByteArrayUtil {
|
|||
*/
|
||||
public static byte[] replace(byte[] src, int offset, int length,
|
||||
byte[] pattern, byte[] replacement) {
|
||||
return join(replacement, split(src, offset, length, pattern));
|
||||
if(offset < 0 || offset > src.length) {
|
||||
throw new IllegalArgumentException("Invalid offset for array pattern replacement");
|
||||
}
|
||||
if(length < 0 || offset + length > src.length) {
|
||||
throw new IllegalArgumentException("Invalid length for array pattern replacement");
|
||||
}
|
||||
if(pattern == null || pattern.length == 0) {
|
||||
return Arrays.copyOfRange(src, offset, offset + length);
|
||||
}
|
||||
ByteBuffer dest;
|
||||
if(replacement == null || replacement.length != pattern.length) {
|
||||
// Array might change size. This is the "tricky" case.
|
||||
int newLength = replace(src, offset, length, pattern, replacement, null);
|
||||
if(newLength != length) {
|
||||
dest = ByteBuffer.allocate(newLength);
|
||||
}
|
||||
else {
|
||||
// If the array size didn't change, as the pattern and replacement lengths
|
||||
// differ, it must be the case that there weren't any occurrences of pattern in src
|
||||
// between offset and offset + length, so we can just return a copy.
|
||||
return Arrays.copyOfRange(src, offset, offset + length);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// No matter what, the array will stay the same size as replacement.length = pattern.length
|
||||
dest = ByteBuffer.allocate(length);
|
||||
}
|
||||
replace(src, offset, length, pattern, replacement, dest);
|
||||
return dest.array();
|
||||
}
|
||||
|
||||
// Replace any occurrences of pattern in src between offset and offset + length with replacement.
|
||||
// The new array is serialized into dest and the new length is returned.
|
||||
static int replace(byte[] src, int offset, int length, byte[] pattern, byte[] replacement, ByteBuffer dest) {
|
||||
if(pattern == null || pattern.length == 0) {
|
||||
if(dest != null) {
|
||||
dest.put(src, offset, length);
|
||||
}
|
||||
return length;
|
||||
}
|
||||
byte patternFirst = pattern[0];
|
||||
int lastPosition = offset;
|
||||
int currentPosition = offset;
|
||||
int newLength = 0;
|
||||
int replacementLength = replacement == null ? 0 : replacement.length;
|
||||
|
||||
while(currentPosition < offset + length) {
|
||||
if(src[currentPosition] == patternFirst && regionEquals(src, currentPosition, pattern)) {
|
||||
if(dest != null) {
|
||||
dest.put(src, lastPosition, currentPosition - lastPosition);
|
||||
if(replacement != null) {
|
||||
dest.put(replacement);
|
||||
}
|
||||
}
|
||||
newLength += currentPosition - lastPosition + replacementLength;
|
||||
currentPosition += pattern.length;
|
||||
lastPosition = currentPosition;
|
||||
}
|
||||
else {
|
||||
currentPosition++;
|
||||
}
|
||||
}
|
||||
|
||||
newLength += currentPosition - lastPosition;
|
||||
if(dest != null) {
|
||||
dest.put(src, lastPosition, currentPosition - lastPosition);
|
||||
}
|
||||
|
||||
return newLength;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -203,7 +273,7 @@ public class ByteArrayUtil {
|
|||
* @return a list of byte arrays from {@code src} now not containing {@code delimiter}
|
||||
*/
|
||||
public static List<byte[]> split(byte[] src, int offset, int length, byte[] delimiter) {
|
||||
List<byte[]> parts = new LinkedList<byte[]>();
|
||||
List<byte[]> parts = new LinkedList<>();
|
||||
int idx = offset;
|
||||
int lastSplitEnd = offset;
|
||||
while(idx <= (offset+length) - delimiter.length) {
|
||||
|
@ -225,14 +295,6 @@ public class ByteArrayUtil {
|
|||
return parts;
|
||||
}
|
||||
|
||||
static int bisectLeft(BigInteger[] arr, BigInteger i) {
|
||||
int n = Arrays.binarySearch(arr, i);
|
||||
if(n >= 0)
|
||||
return n;
|
||||
int ip = (n + 1) * -1;
|
||||
return ip;
|
||||
}
|
||||
|
||||
/**
|
||||
* Compare byte arrays for equality and ordering purposes. Elements in the array
|
||||
* are interpreted and compared as unsigned bytes. Neither parameter
|
||||
|
@ -277,61 +339,6 @@ public class ByteArrayUtil {
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan through an array of bytes to find the first occurrence of a specific value.
|
||||
*
|
||||
* @param src array to scan. Must not be {@code null}.
|
||||
* @param what the value for which to search.
|
||||
* @param start the index at which to start the search. If this is at or after
|
||||
* the end of {@code src}, the result will always be {@code -1}.
|
||||
* @param end the index one past the last entry at which to search
|
||||
*
|
||||
* @return return the location of the first instance of {@code value}, or
|
||||
* {@code -1} if not found.
|
||||
*/
|
||||
static int findNext(byte[] src, byte what, int start, int end) {
|
||||
for(int i = start; i < end; i++) {
|
||||
if(src[i] == what)
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the index of the first element after the next occurrence of the byte sequence [nm]
|
||||
* @param v the bytes to scan through
|
||||
* @param n first character to find
|
||||
* @param m second character to find
|
||||
* @param start the index at which to start the scan
|
||||
*
|
||||
* @return the index after the next occurrence of [nm]
|
||||
*/
|
||||
static int findTerminator(byte[] v, byte n, byte m, int start) {
|
||||
return findTerminator(v, n, m, start, v.length);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the index of the first element after the next occurrence of the byte sequence [nm]
|
||||
* @param v the bytes to scan through
|
||||
* @param n first character to find
|
||||
* @param m second character to find
|
||||
* @param start the index at which to start the scan
|
||||
* @param end the index at which to stop the search (exclusive)
|
||||
*
|
||||
* @return the index after the next occurrence of [nm]
|
||||
*/
|
||||
static int findTerminator(byte[] v, byte n, byte m, int start, int end) {
|
||||
int pos = start;
|
||||
while(true) {
|
||||
pos = findNext(v, n, pos, end);
|
||||
if(pos < 0)
|
||||
return end;
|
||||
if(pos + 1 == end || v[pos+1] != m)
|
||||
return pos;
|
||||
pos += 2;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes the first key that would sort outside the range prefixed by {@code key}.
|
||||
* {@code key} must be non-null, and contain at least some character this is not
|
||||
|
@ -418,5 +425,14 @@ public class ByteArrayUtil {
|
|||
return s.toString();
|
||||
}
|
||||
|
||||
static int nullCount(byte[] val) {
|
||||
int nulls = 0;
|
||||
for(int i = 0; i < val.length; i++) {
|
||||
if(val[i] == 0x00)
|
||||
nulls += 1;
|
||||
}
|
||||
return nulls;
|
||||
}
|
||||
|
||||
private ByteArrayUtil() {}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* StringUtil.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package com.apple.foundationdb.tuple;
|
||||
|
||||
final class StringUtil {
|
||||
private static final char SURROGATE_COUNT = Character.MAX_LOW_SURROGATE - Character.MIN_HIGH_SURROGATE + 1;
|
||||
private static final char ABOVE_SURROGATES = Character.MAX_VALUE - Character.MAX_LOW_SURROGATE;
|
||||
|
||||
static char adjustForSurrogates(char c, String s, int pos) {
|
||||
if(c > Character.MAX_LOW_SURROGATE) {
|
||||
return (char)(c - SURROGATE_COUNT);
|
||||
}
|
||||
else {
|
||||
// Validate the UTF-16 string as this can do weird things on invalid strings
|
||||
if((Character.isHighSurrogate(c) && (pos + 1 >= s.length() || !Character.isLowSurrogate(s.charAt(pos + 1)))) ||
|
||||
(Character.isLowSurrogate(c) && (pos == 0 || !Character.isHighSurrogate(s.charAt(pos - 1))))) {
|
||||
throw new IllegalArgumentException("malformed UTF-16 string does not follow high surrogate with low surrogate");
|
||||
}
|
||||
return (char)(c + ABOVE_SURROGATES);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// Compare two strings based on their UTF-8 code point values. Note that Java stores strings
|
||||
// using UTF-16. However, {@link Tuple}s are encoded using UTF-8. Using unsigned byte comparison,
|
||||
// UTF-8 strings will sort based on their Unicode codepoints. However, UTF-16 strings <em>almost</em>,
|
||||
// but not quite, sort that way. This can be addressed by fixing up surrogates. There are 0x800 surrogate
|
||||
// values and about 0x2000 code points above the maximum surrogate value. For anything that is a surrogate,
|
||||
// shift it up by 0x2000, and anything that is above the maximum surrogate value, shift it down by 0x800.
|
||||
// This makes all surrogates sort after all non-surrogates.
|
||||
//
|
||||
// See: https://ssl.icu-project.org/docs/papers/utf16_code_point_order.html
|
||||
static int compareUtf8(String s1, String s2) {
|
||||
// Ignore common prefix at the beginning which will compare equal regardless of encoding
|
||||
int pos = 0;
|
||||
while(pos < s1.length() && pos < s2.length() && s1.charAt(pos) == s2.charAt(pos)) {
|
||||
pos++;
|
||||
}
|
||||
if(pos >= s1.length() || pos >= s2.length()) {
|
||||
// One string is the prefix of another, so return based on length.
|
||||
return Integer.compare(s1.length(), s2.length());
|
||||
}
|
||||
// Compare first different character
|
||||
char c1 = s1.charAt(pos);
|
||||
char c2 = s2.charAt(pos);
|
||||
// Apply "fix up" for surrogates
|
||||
if(c1 >= Character.MIN_HIGH_SURROGATE) {
|
||||
c1 = adjustForSurrogates(c1, s1, pos);
|
||||
}
|
||||
if(c2 >= Character.MIN_HIGH_SURROGATE) {
|
||||
c2 = adjustForSurrogates(c2, s2, pos);
|
||||
}
|
||||
return Character.compare(c1, c2);
|
||||
}
|
||||
|
||||
static int packedSize(String s) {
|
||||
final int strLength = s.length();
|
||||
int size = 0;
|
||||
int pos = 0;
|
||||
|
||||
while(pos < strLength) {
|
||||
char c = s.charAt(pos);
|
||||
if(c == '\0') {
|
||||
// Null is encoded as \x00\xff
|
||||
size += 2;
|
||||
}
|
||||
else if(c <= 0x7f) {
|
||||
// ASCII code point. Only 1 byte.
|
||||
size += 1;
|
||||
}
|
||||
else if(c <= 0x07ff) {
|
||||
// 2 byte code point
|
||||
size += 2;
|
||||
}
|
||||
else if(Character.isHighSurrogate(c)) {
|
||||
if(pos + 1 < s.length() && Character.isLowSurrogate(s.charAt(pos + 1))) {
|
||||
// High surrogate followed by low surrogate means the code point
|
||||
// is between U+10000 and U+10FFFF, so it requires 4 bytes.
|
||||
size += 4;
|
||||
pos += 1;
|
||||
}
|
||||
else {
|
||||
throw new IllegalArgumentException("malformed UTF-16 has high surrogate not followed by low surrogate");
|
||||
}
|
||||
}
|
||||
else if(Character.isLowSurrogate(c)) {
|
||||
throw new IllegalArgumentException("malformed UTF-16 has low surrogate without prior high surrogate");
|
||||
}
|
||||
else {
|
||||
// 3 byte code point
|
||||
size += 3;
|
||||
}
|
||||
pos += 1;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
private StringUtil() {}
|
||||
}
|
|
@ -21,11 +21,11 @@
|
|||
package com.apple.foundationdb.tuple;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -68,18 +68,40 @@ import com.apple.foundationdb.Range;
|
|||
* This class is not thread safe.
|
||||
*/
|
||||
public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
||||
private static IterableComparator comparator = new IterableComparator();
|
||||
private static final IterableComparator comparator = new IterableComparator();
|
||||
private static final byte[] EMPTY_BYTES = new byte[0];
|
||||
|
||||
private List<Object> elements;
|
||||
List<Object> elements;
|
||||
private byte[] packed = null;
|
||||
private int memoizedHash = 0;
|
||||
private int memoizedPackedSize = -1;
|
||||
private final boolean incompleteVersionstamp;
|
||||
|
||||
private Tuple(List<? extends Object> elements, Object newItem) {
|
||||
this(elements);
|
||||
private Tuple(Tuple original, Object newItem, boolean itemHasIncompleteVersionstamp) {
|
||||
this.elements = new ArrayList<>(original.elements.size() + 1);
|
||||
this.elements.addAll(original.elements);
|
||||
this.elements.add(newItem);
|
||||
incompleteVersionstamp = original.incompleteVersionstamp || itemHasIncompleteVersionstamp;
|
||||
}
|
||||
|
||||
private Tuple(List<? extends Object> elements) {
|
||||
this.elements = new ArrayList<>(elements);
|
||||
private Tuple(List<Object> elements) {
|
||||
this.elements = elements;
|
||||
incompleteVersionstamp = TupleUtil.hasIncompleteVersionstamp(elements.stream());
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a new empty {@code Tuple}. After creation, items can be added
|
||||
* with calls to the variations of {@code add()}.
|
||||
*
|
||||
* @see #from(Object...)
|
||||
* @see #fromBytes(byte[])
|
||||
* @see #fromItems(Iterable)
|
||||
*/
|
||||
public Tuple() {
|
||||
elements = Collections.emptyList();
|
||||
packed = EMPTY_BYTES;
|
||||
memoizedPackedSize = 0;
|
||||
incompleteVersionstamp = false;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -105,7 +127,10 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
!(o instanceof Versionstamp)) {
|
||||
throw new IllegalArgumentException("Parameter type (" + o.getClass().getName() + ") not recognized");
|
||||
}
|
||||
return new Tuple(this.elements, o);
|
||||
return new Tuple(this, o,
|
||||
(o instanceof Versionstamp && !((Versionstamp)o).isComplete()) ||
|
||||
(o instanceof List<?> && TupleUtil.hasIncompleteVersionstamp(((List)o).stream())) ||
|
||||
(o instanceof Tuple && ((Tuple) o).hasIncompleteVersionstamp()));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -116,7 +141,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(String s) {
|
||||
return new Tuple(this.elements, s);
|
||||
return new Tuple(this, s, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -127,7 +152,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(long l) {
|
||||
return new Tuple(this.elements, l);
|
||||
return new Tuple(this, l, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -138,7 +163,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(byte[] b) {
|
||||
return new Tuple(this.elements, b);
|
||||
return new Tuple(this, b, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -149,7 +174,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(boolean b) {
|
||||
return new Tuple(this.elements, b);
|
||||
return new Tuple(this, b, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -160,7 +185,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(UUID uuid) {
|
||||
return new Tuple(this.elements, uuid);
|
||||
return new Tuple(this, uuid, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -176,7 +201,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
if(bi == null) {
|
||||
throw new NullPointerException("Number types in Tuple cannot be null");
|
||||
}
|
||||
return new Tuple(this.elements, bi);
|
||||
return new Tuple(this, bi, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -187,7 +212,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(float f) {
|
||||
return new Tuple(this.elements, f);
|
||||
return new Tuple(this, f, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -198,7 +223,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(double d) {
|
||||
return new Tuple(this.elements, d);
|
||||
return new Tuple(this, d, false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -210,11 +235,11 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(Versionstamp v) {
|
||||
return new Tuple(this.elements, v);
|
||||
return new Tuple(this, v, !v.isComplete());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a copy of this {@code Tuple} with an {@link List} appended as the last element.
|
||||
* Creates a copy of this {@code Tuple} with a {@link List} appended as the last element.
|
||||
* This does not add the elements individually (for that, use {@link Tuple#addAll(List) Tuple.addAll}).
|
||||
* This adds the list as a single element nested within the outer {@code Tuple}.
|
||||
*
|
||||
|
@ -222,8 +247,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
*
|
||||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(List<? extends Object> l) {
|
||||
return new Tuple(this.elements, l);
|
||||
public Tuple add(List<?> l) {
|
||||
return new Tuple(this, l, TupleUtil.hasIncompleteVersionstamp(l.stream()));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -236,7 +261,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(Tuple t) {
|
||||
return new Tuple(this.elements, t);
|
||||
return new Tuple(this, t, t.hasIncompleteVersionstamp());
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -249,7 +274,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple add(byte[] b, int offset, int length) {
|
||||
return new Tuple(this.elements, Arrays.copyOfRange(b, offset, offset + length));
|
||||
return new Tuple(this, Arrays.copyOfRange(b, offset, offset + length), false);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -260,8 +285,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
*
|
||||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple addAll(List<? extends Object> o) {
|
||||
List<Object> merged = new ArrayList<Object>(o.size() + this.elements.size());
|
||||
public Tuple addAll(List<?> o) {
|
||||
List<Object> merged = new ArrayList<>(o.size() + this.elements.size());
|
||||
merged.addAll(this.elements);
|
||||
merged.addAll(o);
|
||||
return new Tuple(merged);
|
||||
|
@ -275,32 +300,88 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a newly created {@code Tuple}
|
||||
*/
|
||||
public Tuple addAll(Tuple other) {
|
||||
List<Object> merged = new ArrayList<Object>(this.size() + other.size());
|
||||
List<Object> merged = new ArrayList<>(this.size() + other.size());
|
||||
merged.addAll(this.elements);
|
||||
merged.addAll(other.peekItems());
|
||||
return new Tuple(merged);
|
||||
merged.addAll(other.elements);
|
||||
Tuple t = new Tuple(merged);
|
||||
if(!t.hasIncompleteVersionstamp() && packed != null && other.packed != null) {
|
||||
t.packed = ByteArrayUtil.join(packed, other.packed);
|
||||
}
|
||||
if(memoizedPackedSize >= 0 && other.memoizedPackedSize >= 0) {
|
||||
t.memoizedPackedSize = memoizedPackedSize + other.memoizedPackedSize;
|
||||
}
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an encoded representation of this {@code Tuple}. Each element is encoded to
|
||||
* {@code byte}s and concatenated.
|
||||
* {@code byte}s and concatenated. Note that once a {@code Tuple} has been packed, its
|
||||
* serialized representation is stored internally so that future calls to this function
|
||||
* are faster than the initial call.
|
||||
*
|
||||
* @return a serialized representation of this {@code Tuple}.
|
||||
* @return a packed representation of this {@code Tuple}
|
||||
*/
|
||||
public byte[] pack() {
|
||||
return pack(null);
|
||||
return packInternal(null, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an encoded representation of this {@code Tuple}. Each element is encoded to
|
||||
* {@code byte}s and concatenated, and then the prefix supplied is prepended to
|
||||
* the array.
|
||||
* the array. Note that once a {@code Tuple} has been packed, its serialized representation
|
||||
* is stored internally so that future calls to this function are faster than the
|
||||
* initial call.
|
||||
*
|
||||
* @param prefix additional byte-array prefix to prepend to serialized bytes.
|
||||
* @return a serialized representation of this {@code Tuple} prepended by the {@code prefix}.
|
||||
* @param prefix additional byte-array prefix to prepend to the packed bytes
|
||||
* @return a packed representation of this {@code Tuple} prepended by the {@code prefix}
|
||||
*/
|
||||
public byte[] pack(byte[] prefix) {
|
||||
return TupleUtil.pack(elements, prefix);
|
||||
return packInternal(prefix, true);
|
||||
}
|
||||
|
||||
byte[] packInternal(byte[] prefix, boolean copy) {
|
||||
if(hasIncompleteVersionstamp()) {
|
||||
throw new IllegalArgumentException("Incomplete Versionstamp included in vanilla tuple pack");
|
||||
}
|
||||
if(packed == null) {
|
||||
packed = TupleUtil.pack(elements, getPackedSize());
|
||||
}
|
||||
boolean hasPrefix = prefix != null && prefix.length > 0;
|
||||
if(hasPrefix) {
|
||||
return ByteArrayUtil.join(prefix, packed);
|
||||
}
|
||||
else if(copy) {
|
||||
return Arrays.copyOf(packed, packed.length);
|
||||
}
|
||||
else {
|
||||
return packed;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Pack an encoded representation of this {@code Tuple} onto the end of the given {@link ByteBuffer}.
|
||||
* It is up to the caller to ensure that there is enough space allocated within the buffer
|
||||
* to avoid {@link java.nio.BufferOverflowException}s. The client may call {@link #getPackedSize()}
|
||||
* to determine how large this {@code Tuple} will be once packed in order to allocate sufficient memory.
|
||||
* Note that unlike {@link #pack()}, the serialized representation of this {@code Tuple} is not stored, so
|
||||
* calling this function multiple times with the same {@code Tuple} requires serializing the {@code Tuple}
|
||||
* multiple times.
|
||||
* <br>
|
||||
* <br>
|
||||
* This method will throw an error if there are any incomplete {@link Versionstamp}s in this {@code Tuple}.
|
||||
*
|
||||
* @param dest the destination {@link ByteBuffer} for the encoded {@code Tuple}
|
||||
*/
|
||||
public void packInto(ByteBuffer dest) {
|
||||
if(hasIncompleteVersionstamp()) {
|
||||
throw new IllegalArgumentException("Incomplete Versionstamp included in vanilla tuple pack");
|
||||
}
|
||||
if(packed == null) {
|
||||
TupleUtil.pack(dest, elements);
|
||||
}
|
||||
else {
|
||||
dest.put(packed);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -309,7 +390,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* This works the same as the {@link #packWithVersionstamp(byte[]) one-paramter version of this method},
|
||||
* but it does not add any prefix to the array.
|
||||
*
|
||||
* @return a serialized representation of this {@code Tuple} for use with versionstamp ops.
|
||||
* @return a packed representation of this {@code Tuple} for use with versionstamp ops.
|
||||
* @throws IllegalArgumentException if there is not exactly one incomplete {@link Versionstamp} included in this {@code Tuple}
|
||||
*/
|
||||
public byte[] packWithVersionstamp() {
|
||||
|
@ -322,19 +403,58 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* There must be exactly one incomplete {@link Versionstamp} instance within this
|
||||
* {@code Tuple} or this will throw an {@link IllegalArgumentException}.
|
||||
* Each element is encoded to {@code byte}s and concatenated, the prefix
|
||||
* is then prepended to the array, and then the index of the serialized incomplete
|
||||
* is then prepended to the array, and then the index of the packed incomplete
|
||||
* {@link Versionstamp} is appended as a little-endian integer. This can then be passed
|
||||
* as the key to
|
||||
* {@link com.apple.foundationdb.Transaction#mutate(com.apple.foundationdb.MutationType, byte[], byte[]) Transaction.mutate()}
|
||||
* with the {@code SET_VERSIONSTAMPED_KEY} {@link com.apple.foundationdb.MutationType}, and the transaction's
|
||||
* version will then be filled in at commit time.
|
||||
* <br>
|
||||
* <br>
|
||||
* Note that once a {@code Tuple} has been packed, its serialized representation is stored internally so that
|
||||
* future calls to this function are faster than the initial call.
|
||||
*
|
||||
* @param prefix additional byte-array prefix to prepend to serialized bytes.
|
||||
* @return a serialized representation of this {@code Tuple} for use with versionstamp ops.
|
||||
* @param prefix additional byte-array prefix to prepend to packed bytes.
|
||||
* @return a packed representation of this {@code Tuple} for use with versionstamp ops.
|
||||
* @throws IllegalArgumentException if there is not exactly one incomplete {@link Versionstamp} included in this {@code Tuple}
|
||||
*/
|
||||
public byte[] packWithVersionstamp(byte[] prefix) {
|
||||
return TupleUtil.packWithVersionstamp(elements, prefix);
|
||||
return packWithVersionstampInternal(prefix, true);
|
||||
}
|
||||
|
||||
byte[] packWithVersionstampInternal(byte[] prefix, boolean copy) {
|
||||
if(!hasIncompleteVersionstamp()) {
|
||||
throw new IllegalArgumentException("No incomplete Versionstamp included in tuple pack with versionstamp");
|
||||
}
|
||||
if(packed == null) {
|
||||
packed = TupleUtil.packWithVersionstamp(elements, getPackedSize());
|
||||
}
|
||||
boolean hasPrefix = prefix != null && prefix.length > 0;
|
||||
if(hasPrefix) {
|
||||
byte[] withPrefix = ByteArrayUtil.join(prefix, packed);
|
||||
TupleUtil.adjustVersionPosition(withPrefix, prefix.length);
|
||||
return withPrefix;
|
||||
}
|
||||
else if(copy) {
|
||||
return Arrays.copyOf(packed, packed.length);
|
||||
}
|
||||
else {
|
||||
return packed;
|
||||
}
|
||||
}
|
||||
|
||||
byte[] packMaybeVersionstamp() {
|
||||
if(packed == null) {
|
||||
if(hasIncompleteVersionstamp()) {
|
||||
return packWithVersionstampInternal(null, false);
|
||||
}
|
||||
else {
|
||||
return packInternal(null, false);
|
||||
}
|
||||
}
|
||||
else {
|
||||
return packed;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -343,7 +463,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return the elements that make up this {@code Tuple}.
|
||||
*/
|
||||
public List<Object> getItems() {
|
||||
return new ArrayList<Object>(elements);
|
||||
return new ArrayList<>(elements);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -355,16 +475,6 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
return elements.stream();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the internal elements that make up this tuple. For internal use only, as
|
||||
* modifications to the result will mean that this Tuple is modified.
|
||||
*
|
||||
* @return the elements of this Tuple, without copying
|
||||
*/
|
||||
private List<Object> peekItems() {
|
||||
return this.elements;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets an {@code Iterator} over the {@code Objects} in this {@code Tuple}. This {@code Iterator} is
|
||||
* unmodifiable and will throw an exception if {@link Iterator#remove() remove()} is called.
|
||||
|
@ -376,25 +486,16 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
return Collections.unmodifiableList(this.elements).iterator();
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a new empty {@code Tuple}. After creation, items can be added
|
||||
* with calls the the variations of {@code add()}.
|
||||
*
|
||||
* @see #from(Object...)
|
||||
* @see #fromBytes(byte[])
|
||||
* @see #fromItems(Iterable)
|
||||
*/
|
||||
public Tuple() {
|
||||
this.elements = new LinkedList<Object>();
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct a new {@code Tuple} with elements decoded from a supplied {@code byte} array.
|
||||
* The passed byte array must not be {@code null}.
|
||||
* The passed byte array must not be {@code null}. This will throw an exception if the passed byte
|
||||
* array does not represent a valid {@code Tuple}. For example, this will throw an error if it
|
||||
* encounters an unknown type code or if there is a packed element that appears to be truncated.
|
||||
*
|
||||
* @param bytes encoded {@code Tuple} source
|
||||
*
|
||||
* @return a new {@code Tuple} constructed by deserializing the provided {@code byte} array
|
||||
* @throws IllegalArgumentException if {@code bytes} does not represent a valid {@code Tuple}
|
||||
*/
|
||||
public static Tuple fromBytes(byte[] bytes) {
|
||||
return fromBytes(bytes, 0, bytes.length);
|
||||
|
@ -402,17 +503,29 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
|
||||
/**
|
||||
* Construct a new {@code Tuple} with elements decoded from a supplied {@code byte} array.
|
||||
* The passed byte array must not be {@code null}.
|
||||
* The passed byte array must not be {@code null}. This will throw an exception if the specified slice of
|
||||
* the passed byte array does not represent a valid {@code Tuple}. For example, this will throw an error
|
||||
* if it encounters an unknown type code or if there is a packed element that appears to be truncated.
|
||||
*
|
||||
* @param bytes encoded {@code Tuple} source
|
||||
* @param offset starting offset of byte array of encoded data
|
||||
* @param length length of encoded data within the source
|
||||
*
|
||||
* @return a new {@code Tuple} constructed by deserializing the specified slice of the provided {@code byte} array
|
||||
* @throws IllegalArgumentException if {@code offset} or {@code length} are negative or would exceed the size of
|
||||
* the array or if {@code bytes} does not represent a valid {@code Tuple}
|
||||
*/
|
||||
public static Tuple fromBytes(byte[] bytes, int offset, int length) {
|
||||
Tuple t = new Tuple();
|
||||
t.elements = TupleUtil.unpack(bytes, offset, length);
|
||||
if(offset < 0 || offset > bytes.length) {
|
||||
throw new IllegalArgumentException("Invalid offset for Tuple deserialization");
|
||||
}
|
||||
if(length < 0 || offset + length > bytes.length) {
|
||||
throw new IllegalArgumentException("Invalid length for Tuple deserialization");
|
||||
}
|
||||
byte[] packed = Arrays.copyOfRange(bytes, offset, offset + length);
|
||||
Tuple t = new Tuple(TupleUtil.unpack(packed));
|
||||
t.packed = packed;
|
||||
t.memoizedPackedSize = length;
|
||||
return t;
|
||||
}
|
||||
|
||||
|
@ -623,13 +736,14 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
Object o = this.elements.get(index);
|
||||
if(o == null) {
|
||||
return null;
|
||||
} else if(o instanceof Tuple) {
|
||||
}
|
||||
else if(o instanceof Tuple) {
|
||||
return ((Tuple)o).getItems();
|
||||
} else if(o instanceof List<?>) {
|
||||
List<Object> ret = new LinkedList<Object>();
|
||||
ret.addAll((List<? extends Object>)o);
|
||||
return ret;
|
||||
} else {
|
||||
}
|
||||
else if(o instanceof List<?>) {
|
||||
return new ArrayList<>((List<?>) o);
|
||||
}
|
||||
else {
|
||||
throw new ClassCastException("Cannot convert item of type " + o.getClass() + " to list");
|
||||
}
|
||||
}
|
||||
|
@ -650,11 +764,14 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
Object o = this.elements.get(index);
|
||||
if(o == null) {
|
||||
return null;
|
||||
} else if(o instanceof Tuple) {
|
||||
}
|
||||
else if(o instanceof Tuple) {
|
||||
return (Tuple)o;
|
||||
} else if(o instanceof List<?>) {
|
||||
return Tuple.fromItems((List<? extends Object>)o);
|
||||
} else {
|
||||
}
|
||||
else if(o instanceof List<?>) {
|
||||
return Tuple.fromList((List<?>)o);
|
||||
}
|
||||
else {
|
||||
throw new ClassCastException("Cannot convert item of type " + o.getClass() + " to tuple");
|
||||
}
|
||||
}
|
||||
|
@ -678,15 +795,10 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @throws IllegalStateException if this {@code Tuple} is empty
|
||||
*/
|
||||
public Tuple popFront() {
|
||||
if(elements.size() == 0)
|
||||
if(elements.isEmpty())
|
||||
throw new IllegalStateException("Tuple contains no elements");
|
||||
|
||||
|
||||
List<Object> items = new ArrayList<Object>(elements.size() - 1);
|
||||
for(int i = 1; i < this.elements.size(); i++) {
|
||||
items.add(this.elements.get(i));
|
||||
}
|
||||
return new Tuple(items);
|
||||
return new Tuple(elements.subList(1, elements.size()));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -697,15 +809,10 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @throws IllegalStateException if this {@code Tuple} is empty
|
||||
*/
|
||||
public Tuple popBack() {
|
||||
if(elements.size() == 0)
|
||||
if(elements.isEmpty())
|
||||
throw new IllegalStateException("Tuple contains no elements");
|
||||
|
||||
|
||||
List<Object> items = new ArrayList<Object>(elements.size() - 1);
|
||||
for(int i = 0; i < this.elements.size() - 1; i++) {
|
||||
items.add(this.elements.get(i));
|
||||
}
|
||||
return new Tuple(items);
|
||||
return new Tuple(elements.subList(0, elements.size() - 1));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -718,15 +825,43 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* Tuple t = Tuple.from("a", "b");
|
||||
* Range r = t.range();</pre>
|
||||
* {@code r} includes all tuples ("a", "b", ...)
|
||||
* <br>
|
||||
* This function will throw an error if this {@code Tuple} contains an incomplete
|
||||
* {@link Versionstamp}.
|
||||
*
|
||||
* @return the range of keys containing all {@code Tuple}s that have this {@code Tuple}
|
||||
* as a prefix
|
||||
* @return the range of keys containing all possible keys that have this {@code Tuple}
|
||||
* as a strict prefix
|
||||
*/
|
||||
public Range range() {
|
||||
byte[] p = pack();
|
||||
//System.out.println("Packed tuple is: " + ByteArrayUtil.printable(p));
|
||||
return range(null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a range representing all keys that encode {@code Tuple}s strictly starting
|
||||
* with the given prefix followed by this {@code Tuple}.
|
||||
* <br>
|
||||
* <br>
|
||||
* For example:
|
||||
* <pre>
|
||||
* Tuple t = Tuple.from("a", "b");
|
||||
* Range r = t.range(Tuple.from("c").pack());</pre>
|
||||
* {@code r} contains all tuples ("c", "a", "b", ...)
|
||||
* <br>
|
||||
* This function will throw an error if this {@code Tuple} contains an incomplete
|
||||
* {@link Versionstamp}.
|
||||
*
|
||||
* @param prefix a byte prefix to precede all elements in the range
|
||||
*
|
||||
* @return the range of keys containing all possible keys that have {@code prefix}
|
||||
* followed by this {@code Tuple} as a strict prefix
|
||||
*/
|
||||
public Range range(byte[] prefix) {
|
||||
if(hasIncompleteVersionstamp()) {
|
||||
throw new IllegalStateException("Tuple with incomplete versionstamp used for range");
|
||||
}
|
||||
byte[] p = packInternal(prefix, false);
|
||||
return new Range(ByteArrayUtil.join(p, new byte[] {0x0}),
|
||||
ByteArrayUtil.join(p, new byte[] {(byte)0xff}));
|
||||
ByteArrayUtil.join(p, new byte[] {(byte)0xff}));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -739,7 +874,41 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* {@code Tuple}
|
||||
*/
|
||||
public boolean hasIncompleteVersionstamp() {
|
||||
return TupleUtil.hasIncompleteVersionstamp(stream());
|
||||
return incompleteVersionstamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of bytes in the packed representation of this {@code Tuple}. This is done by summing
|
||||
* the serialized sizes of all of the elements of this {@code Tuple} and does not pack everything
|
||||
* into a single {@code Tuple}. The return value of this function is stored within this {@code Tuple}
|
||||
* after this function has been called so that subsequent calls on the same object are fast. This method
|
||||
* does not validate that there is not more than one incomplete {@link Versionstamp} in this {@code Tuple}.
|
||||
*
|
||||
* @return the number of bytes in the packed representation of this {@code Tuple}
|
||||
*/
|
||||
public int getPackedSize() {
|
||||
if(memoizedPackedSize < 0) {
|
||||
memoizedPackedSize = getPackedSize(false);
|
||||
}
|
||||
return memoizedPackedSize;
|
||||
}
|
||||
|
||||
int getPackedSize(boolean nested) {
|
||||
if(memoizedPackedSize >= 0) {
|
||||
if(!nested) {
|
||||
return memoizedPackedSize;
|
||||
}
|
||||
int nullCount = 0;
|
||||
for(Object elem : elements) {
|
||||
if(elem == null) {
|
||||
nullCount++;
|
||||
}
|
||||
}
|
||||
return memoizedPackedSize + nullCount;
|
||||
}
|
||||
else {
|
||||
return TupleUtil.getPackedSize(elements, nested);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -756,7 +925,14 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
*/
|
||||
@Override
|
||||
public int compareTo(Tuple t) {
|
||||
return comparator.compare(elements, t.elements);
|
||||
// If either tuple has an incomplete versionstamp, then there is a possibility that the byte order
|
||||
// is not the semantic comparison order.
|
||||
if(packed != null && t.packed != null && !hasIncompleteVersionstamp() && !t.hasIncompleteVersionstamp()) {
|
||||
return ByteArrayUtil.compareUnsigned(packed, t.packed);
|
||||
}
|
||||
else {
|
||||
return comparator.compare(elements, t.elements);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -772,14 +948,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
@Override
|
||||
public int hashCode() {
|
||||
if(memoizedHash == 0) {
|
||||
byte[] packed;
|
||||
if(hasIncompleteVersionstamp()) {
|
||||
packed = packWithVersionstamp(null);
|
||||
}
|
||||
else {
|
||||
packed = pack();
|
||||
}
|
||||
memoizedHash = Arrays.hashCode(packed);
|
||||
memoizedHash = Arrays.hashCode(packMaybeVersionstamp());
|
||||
}
|
||||
return memoizedHash;
|
||||
}
|
||||
|
@ -857,12 +1026,15 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
*
|
||||
* @return a new {@code Tuple} with the given items as its elements
|
||||
*/
|
||||
public static Tuple fromItems(Iterable<? extends Object> items) {
|
||||
Tuple t = new Tuple();
|
||||
for(Object o : items) {
|
||||
t = t.addObject(o);
|
||||
public static Tuple fromItems(Iterable<?> items) {
|
||||
if(items instanceof List<?>) {
|
||||
return Tuple.fromList((List<?>)items);
|
||||
}
|
||||
return t;
|
||||
List<Object> elements = new ArrayList<>();
|
||||
for(Object o : items) {
|
||||
elements.add(o);
|
||||
}
|
||||
return new Tuple(elements);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -875,8 +1047,9 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
*
|
||||
* @return a new {@code Tuple} with the given items as its elements
|
||||
*/
|
||||
public static Tuple fromList(List<? extends Object> items) {
|
||||
return new Tuple(items);
|
||||
public static Tuple fromList(List<?> items) {
|
||||
List<Object> elements = new ArrayList<>(items);
|
||||
return new Tuple(elements);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -890,10 +1063,8 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
*
|
||||
* @return a new {@code Tuple} with the given items as its elements
|
||||
*/
|
||||
public static Tuple fromStream(Stream<? extends Object> items) {
|
||||
Tuple t = new Tuple();
|
||||
t.elements = items.collect(Collectors.toList());
|
||||
return t;
|
||||
public static Tuple fromStream(Stream<?> items) {
|
||||
return new Tuple(items.collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -907,7 +1078,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
* @return a new {@code Tuple} with the given items as its elements
|
||||
*/
|
||||
public static Tuple from(Object... items) {
|
||||
return fromList(Arrays.asList(items));
|
||||
return new Tuple(Arrays.asList(items));
|
||||
}
|
||||
|
||||
static void main(String[] args) {
|
||||
|
@ -1011,7 +1182,7 @@ public class Tuple implements Comparable<Tuple>, Iterable<Object> {
|
|||
}
|
||||
|
||||
private static Tuple createTuple(int items) {
|
||||
List<Object> elements = new ArrayList<Object>(items);
|
||||
List<Object> elements = new ArrayList<>(items);
|
||||
for(int i = 0; i < items; i++) {
|
||||
elements.add(new byte[]{99});
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -94,8 +94,8 @@ public class Versionstamp implements Comparable<Versionstamp> {
|
|||
private static final byte[] UNSET_TRANSACTION_VERSION = {(byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff,
|
||||
(byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff, (byte)0xff};
|
||||
|
||||
private boolean complete;
|
||||
private byte[] versionBytes;
|
||||
private final boolean complete;
|
||||
private final byte[] versionBytes;
|
||||
|
||||
/**
|
||||
* From a byte array, unpack the user version starting at the given position.
|
||||
|
|
|
@ -412,7 +412,11 @@ public class AsyncStackTester {
|
|||
return inst.popParams(listSize).thenAcceptAsync(rawElements -> {
|
||||
List<Tuple> tuples = new ArrayList<>(listSize);
|
||||
for(Object o : rawElements) {
|
||||
tuples.add(Tuple.fromBytes((byte[])o));
|
||||
// Unpacking a tuple keeps around the serialized representation and uses
|
||||
// it for comparison if it's available. To test semantic comparison, recreate
|
||||
// the tuple from the item list.
|
||||
Tuple t = Tuple.fromBytes((byte[])o);
|
||||
tuples.add(Tuple.fromList(t.getItems()));
|
||||
}
|
||||
Collections.sort(tuples);
|
||||
for(Tuple t : tuples) {
|
||||
|
|
|
@ -368,9 +368,13 @@ public class StackTester {
|
|||
else if (op == StackOperation.TUPLE_SORT) {
|
||||
int listSize = StackUtils.getInt(inst.popParam().join());
|
||||
List<Object> rawElements = inst.popParams(listSize).join();
|
||||
List<Tuple> tuples = new ArrayList<Tuple>(listSize);
|
||||
List<Tuple> tuples = new ArrayList<>(listSize);
|
||||
for(Object o : rawElements) {
|
||||
tuples.add(Tuple.fromBytes((byte[])o));
|
||||
// Unpacking a tuple keeps around the serialized representation and uses
|
||||
// it for comparison if it's available. To test semantic comparison, recreate
|
||||
// the tuple from the item list.
|
||||
Tuple t = Tuple.fromBytes((byte[])o);
|
||||
tuples.add(Tuple.fromList(t.getItems()));
|
||||
}
|
||||
Collections.sort(tuples);
|
||||
for(Tuple t : tuples) {
|
||||
|
|
|
@ -13,17 +13,26 @@ import com.apple.foundationdb.tuple.Versionstamp;
|
|||
|
||||
public class TuplePerformanceTest {
|
||||
|
||||
private enum GeneratedTypes {
|
||||
ALL,
|
||||
LONG,
|
||||
FLOATING_POINT,
|
||||
STRING_LIKE
|
||||
}
|
||||
|
||||
private final Random r;
|
||||
private final int ignoreIterations;
|
||||
private final int iterations;
|
||||
private final GeneratedTypes generatedTypes;
|
||||
|
||||
public TuplePerformanceTest(Random r, int ignoreIterations, int iterations) {
|
||||
public TuplePerformanceTest(Random r, int ignoreIterations, int iterations, GeneratedTypes generatedTypes) {
|
||||
this.r = r;
|
||||
this.ignoreIterations = ignoreIterations;
|
||||
this.iterations = iterations;
|
||||
this.generatedTypes = generatedTypes;
|
||||
}
|
||||
|
||||
public Tuple createTuple(int length) {
|
||||
public Tuple createMultiTypeTuple(int length) {
|
||||
List<Object> values = new ArrayList<>(length);
|
||||
for(int i = 0; i < length; i++) {
|
||||
double choice = r.nextDouble();
|
||||
|
@ -38,7 +47,7 @@ public class TuplePerformanceTest {
|
|||
else if(choice < 0.3) {
|
||||
char[] chars = new char[r.nextInt(20)];
|
||||
for (int j = 0; j < chars.length; j++) {
|
||||
chars[j] = (char)('a' + r.nextInt(26));
|
||||
chars[j] = (char) ('a' + r.nextInt(26));
|
||||
}
|
||||
values.add(new String(chars));
|
||||
}
|
||||
|
@ -69,7 +78,91 @@ public class TuplePerformanceTest {
|
|||
values.add(nested);
|
||||
}
|
||||
}
|
||||
return Tuple.from(values);
|
||||
return Tuple.fromList(values);
|
||||
}
|
||||
|
||||
public Tuple createLongsTuple(int length) {
|
||||
List<Object> values = new ArrayList<>(length);
|
||||
for(int i = 0; i < length; i++) {
|
||||
int byteLength = r.nextInt(Long.BYTES + 1);
|
||||
long val = 0L;
|
||||
for(int x = 0; x < byteLength; x++) {
|
||||
int nextBytes = r.nextInt(256);
|
||||
val = (val << 8) + nextBytes;
|
||||
}
|
||||
values.add(val);
|
||||
}
|
||||
return Tuple.fromList(values);
|
||||
}
|
||||
|
||||
public Tuple createFloatingPointTuple(int length) {
|
||||
List<Object> values = new ArrayList<>(length);
|
||||
for(int i = 0; i < length; i++) {
|
||||
double choice = r.nextDouble();
|
||||
if(choice < 0.40) {
|
||||
values.add(r.nextFloat());
|
||||
}
|
||||
else if(choice < 0.80) {
|
||||
values.add(r.nextDouble());
|
||||
}
|
||||
// These last two are more likely to produce NaN values
|
||||
else if(choice < 0.90) {
|
||||
values.add(Float.intBitsToFloat(r.nextInt()));
|
||||
}
|
||||
else {
|
||||
values.add(Double.longBitsToDouble(r.nextLong()));
|
||||
}
|
||||
}
|
||||
return Tuple.fromList(values);
|
||||
}
|
||||
|
||||
public Tuple createStringLikeTuple(int length) {
|
||||
List<Object> values = new ArrayList<>(length);
|
||||
for(int i = 0; i < length; i++) {
|
||||
double choice = r.nextDouble();
|
||||
if(choice < 0.4) {
|
||||
byte[] arr = new byte[r.nextInt(20)];
|
||||
r.nextBytes(arr);
|
||||
values.add(arr);
|
||||
}
|
||||
else if(choice < 0.8) {
|
||||
// Random ASCII codepoints
|
||||
int[] codepoints = new int[r.nextInt(20)];
|
||||
for(int x = 0; x < codepoints.length; x++) {
|
||||
codepoints[x] = r.nextInt(0x7F);
|
||||
}
|
||||
values.add(new String(codepoints, 0, codepoints.length));
|
||||
}
|
||||
else if(choice < 0.9) {
|
||||
// All zeroes
|
||||
byte[] zeroes = new byte[r.nextInt(20)];
|
||||
values.add(zeroes);
|
||||
}
|
||||
else {
|
||||
// Random Unicode codepoints
|
||||
int[] codepoints = new int[r.nextInt(20)];
|
||||
for(int x = 0; x < codepoints.length; x++) {
|
||||
codepoints[x] = r.nextInt(0x10FFFF);
|
||||
}
|
||||
values.add(new String(codepoints, 0, codepoints.length));
|
||||
}
|
||||
}
|
||||
return Tuple.fromList(values);
|
||||
}
|
||||
|
||||
public Tuple createTuple(int length) {
|
||||
switch (generatedTypes) {
|
||||
case ALL:
|
||||
return createMultiTypeTuple(length);
|
||||
case LONG:
|
||||
return createLongsTuple(length);
|
||||
case FLOATING_POINT:
|
||||
return createFloatingPointTuple(length);
|
||||
case STRING_LIKE:
|
||||
return createStringLikeTuple(length);
|
||||
default:
|
||||
throw new IllegalStateException("unknown generated types " + generatedTypes);
|
||||
}
|
||||
}
|
||||
|
||||
public void run() {
|
||||
|
@ -86,6 +179,8 @@ public class TuplePerformanceTest {
|
|||
long packNanos = 0L;
|
||||
long unpackNanos = 0L;
|
||||
long equalsNanos = 0L;
|
||||
long equalsArrayNanos = 0L;
|
||||
long sizeNanos = 0L;
|
||||
long hashNanos = 0L;
|
||||
long secondHashNanos = 0L;
|
||||
long subspacePackNanos = 0L;
|
||||
|
@ -93,6 +188,9 @@ public class TuplePerformanceTest {
|
|||
long totalLength = 0L;
|
||||
long totalBytes = 0L;
|
||||
for(int i = 0; i < iterations; i++) {
|
||||
if(i % 100_000 == 0) {
|
||||
System.out.println(" iteration " + i);
|
||||
}
|
||||
int length = r.nextInt(20);
|
||||
Tuple t = createTuple(length);
|
||||
|
||||
|
@ -100,20 +198,39 @@ public class TuplePerformanceTest {
|
|||
byte[] serialized = t.pack();
|
||||
long endNanos = System.nanoTime();
|
||||
packNanos += endNanos - startNanos;
|
||||
totalLength += length;
|
||||
totalBytes += serialized.length;
|
||||
totalLength += t.size();
|
||||
totalBytes += t.getPackedSize();
|
||||
|
||||
startNanos = System.nanoTime();
|
||||
Tuple t2 = Tuple.fromBytes(serialized);
|
||||
endNanos = System.nanoTime();
|
||||
unpackNanos += endNanos - startNanos;
|
||||
|
||||
// Copy items over as if both are packed, their byte arrays are compared
|
||||
Tuple tCopy = Tuple.fromList(t.getItems());
|
||||
Tuple t2Copy = Tuple.fromList(t2.getItems());
|
||||
startNanos = System.nanoTime();
|
||||
if (!tCopy.equals(t2Copy)) {
|
||||
throw new RuntimeException("deserialized did not match serialized: " + t + " -- " + t2);
|
||||
}
|
||||
endNanos = System.nanoTime();
|
||||
equalsNanos += endNanos - startNanos;
|
||||
|
||||
startNanos = System.nanoTime();
|
||||
if(!t.equals(t2)) {
|
||||
throw new RuntimeException("deserialized did not match serialized: " + t + " -- " + t2);
|
||||
}
|
||||
endNanos = System.nanoTime();
|
||||
equalsNanos += endNanos - startNanos;
|
||||
equalsArrayNanos += endNanos - startNanos;
|
||||
|
||||
tCopy = Tuple.fromList(t.getItems());
|
||||
startNanos = System.nanoTime();
|
||||
int size = tCopy.getPackedSize();
|
||||
endNanos = System.nanoTime();
|
||||
if (size != t.pack().length) {
|
||||
throw new RuntimeException("packed size did not match actual packed length: " + t + " -- " + " " + tCopy.getPackedSize() + " instead of " + t.getPackedSize());
|
||||
}
|
||||
sizeNanos += endNanos - startNanos;
|
||||
|
||||
startNanos = System.nanoTime();
|
||||
byte[] subspacePacked = subspace.pack(t);
|
||||
|
@ -126,7 +243,7 @@ public class TuplePerformanceTest {
|
|||
startNanos = System.nanoTime();
|
||||
Tuple t3 = subspace.unpack(subspacePacked);
|
||||
endNanos = System.nanoTime();
|
||||
if(!t.equals(t3)) {
|
||||
if (!Tuple.fromList(t.getItems()).equals(Tuple.fromList(t3.getItems())) || !t.equals(t3)) {
|
||||
throw new RuntimeException("does not unpack equally from subspace");
|
||||
}
|
||||
if(!Arrays.equals(t.pack(), t3.pack())) {
|
||||
|
@ -149,29 +266,33 @@ public class TuplePerformanceTest {
|
|||
}
|
||||
|
||||
System.out.println("Test ended.");
|
||||
System.out.printf(" Total elements: %d%n", totalLength);
|
||||
System.out.printf(" Total bytes: %d kB%n", totalBytes / 1000);
|
||||
System.out.printf(" Bytes per tuple: %f B%n", totalBytes * 1.0 / iterations);
|
||||
System.out.printf(" Pack time: %f s%n", packNanos * 1e-9);
|
||||
System.out.printf(" Pack time per tuple: %f \u03BCs%n", packNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Pack time per kB: %f \u03BCs%n", packNanos * 1.0 / totalBytes);
|
||||
System.out.printf(" Serialization rate: %f objects / \u03BCs%n", totalLength * 1000.0 / packNanos);
|
||||
System.out.printf(" Unpack time: %f s%n", unpackNanos * 1e-9);
|
||||
System.out.printf(" Unpack time per tuple: %f \u03BCs%n", unpackNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Equals time: %f s%n", equalsNanos * 1e-9);
|
||||
System.out.printf(" Equals time per tuple: %f \u03BCs%n", equalsNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Subspace pack time: %f s%n", subspacePackNanos * 1e-9);
|
||||
System.out.printf(" Subspace pack time per tuple: %f \u03BCs%n", subspacePackNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Subspace unpack time: %f s%n", subspaceUnpackNanos * 1e-9);
|
||||
System.out.printf(" Subspace unpack time per tuple: %f \u03BCs%n", subspaceUnpackNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Hash time: %f s%n", hashNanos * 1e-9);
|
||||
System.out.printf(" Hash time per tuple: %f \u03BCs%n", hashNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Second hash time: %f s%n", secondHashNanos * 1e-9);
|
||||
System.out.printf(" Second hash time per tuple: %f \u03BCs%n", secondHashNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Total elements: %d%n", totalLength);
|
||||
System.out.printf(" Total bytes: %d kB%n", totalBytes / 1000);
|
||||
System.out.printf(" Bytes per tuple: %f B%n", totalBytes * 1.0 / iterations);
|
||||
System.out.printf(" Pack time: %f s%n", packNanos * 1e-9);
|
||||
System.out.printf(" Pack time per tuple: %f \u03BCs%n", packNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Pack time per kB: %f \u03BCs%n", packNanos * 1.0 / totalBytes);
|
||||
System.out.printf(" Serialization rate: %f objects / \u03BCs%n", totalLength * 1000.0 / packNanos);
|
||||
System.out.printf(" Unpack time: %f s%n", unpackNanos * 1e-9);
|
||||
System.out.printf(" Unpack time per tuple: %f \u03BCs%n", unpackNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Equals time: %f s%n", equalsNanos * 1e-9);
|
||||
System.out.printf(" Equals time per tuple: %f \u03BCs%n", equalsNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Equals time (using packed): %f s%n", equalsArrayNanos * 1e-9);
|
||||
System.out.printf(" Equals time (using packed) per tuple: %f \u03BCs%n", equalsArrayNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Size time: %f s%n", sizeNanos * 1e-9);
|
||||
System.out.printf(" Size time per tuple: %f \u03BCs%n", sizeNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Subspace pack time: %f s%n", subspacePackNanos * 1e-9);
|
||||
System.out.printf(" Subspace pack time per tuple: %f \u03BCs%n", subspacePackNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Subspace unpack time: %f s%n", subspaceUnpackNanos * 1e-9);
|
||||
System.out.printf(" Subspace unpack time per tuple: %f \u03BCs%n", subspaceUnpackNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Hash time: %f s%n", hashNanos * 1e-9);
|
||||
System.out.printf(" Hash time per tuple: %f \u03BCs%n", hashNanos * 1e-3 / iterations);
|
||||
System.out.printf(" Second hash time: %f s%n", secondHashNanos * 1e-9);
|
||||
System.out.printf(" Second hash time per tuple: %f \u03BCs%n", secondHashNanos * 1e-3 / iterations);
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
TuplePerformanceTest tester = new TuplePerformanceTest(new Random(), 100_000, 10_000);
|
||||
TuplePerformanceTest tester = new TuplePerformanceTest(new Random(), 100_000, 10_000_000, GeneratedTypes.ALL);
|
||||
tester.run();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,16 +20,42 @@
|
|||
|
||||
package com.apple.foundationdb.test;
|
||||
|
||||
import java.math.BigInteger;
|
||||
import java.nio.BufferOverflowException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.ByteOrder;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.UUID;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import com.apple.foundationdb.Database;
|
||||
import com.apple.foundationdb.FDB;
|
||||
import com.apple.foundationdb.TransactionContext;
|
||||
import com.apple.foundationdb.subspace.Subspace;
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
import com.apple.foundationdb.tuple.Versionstamp;
|
||||
|
||||
public class TupleTest {
|
||||
private static final byte FF = (byte)0xff;
|
||||
|
||||
public static void main(String[] args) throws InterruptedException {
|
||||
final int reps = 1000;
|
||||
try {
|
||||
FDB fdb = FDB.selectAPIVersion(610);
|
||||
addMethods();
|
||||
comparisons();
|
||||
emptyTuple();
|
||||
incompleteVersionstamps();
|
||||
intoBuffer();
|
||||
offsetsAndLengths();
|
||||
malformedBytes();
|
||||
replaceTests();
|
||||
serializedForms();
|
||||
try(Database db = fdb.open()) {
|
||||
runTests(reps, db);
|
||||
}
|
||||
|
@ -38,6 +64,896 @@ public class TupleTest {
|
|||
}
|
||||
}
|
||||
|
||||
private static class TupleSerialization {
|
||||
private final Tuple tuple;
|
||||
private final byte[] serialization;
|
||||
|
||||
TupleSerialization(Tuple tuple, byte[] serialization) {
|
||||
this.tuple = tuple;
|
||||
this.serialization = serialization;
|
||||
}
|
||||
|
||||
static void addAll(List<TupleSerialization> list, Object... args) {
|
||||
for(int i = 0; i < args.length; i += 2) {
|
||||
TupleSerialization serialization = new TupleSerialization((Tuple)args[i], (byte[])args[i + 1]);
|
||||
list.add(serialization);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void serializedForms() {
|
||||
List<TupleSerialization> serializations = new ArrayList<>();
|
||||
TupleSerialization.addAll(serializations,
|
||||
Tuple.from(), new byte[0],
|
||||
Tuple.from(0L), new byte[]{0x14},
|
||||
Tuple.from(BigInteger.ZERO), new byte[]{0x14},
|
||||
Tuple.from(1L), new byte[]{0x15, 0x01},
|
||||
Tuple.from(BigInteger.ONE), new byte[]{0x15, 0x01},
|
||||
Tuple.from(-1L), new byte[]{0x13, FF - 1},
|
||||
Tuple.from(BigInteger.ONE.negate()), new byte[]{0x13, FF - 1},
|
||||
Tuple.from(255L), new byte[]{0x15, FF},
|
||||
Tuple.from(BigInteger.valueOf(255)), new byte[]{0x15, FF},
|
||||
Tuple.from(-255L), new byte[]{0x13, 0x00},
|
||||
Tuple.from(BigInteger.valueOf(-255)), new byte[]{0x13, 0x00},
|
||||
Tuple.from(256L), new byte[]{0x16, 0x01, 0x00},
|
||||
Tuple.from(BigInteger.valueOf(256)), new byte[]{0x16, 0x01, 0x00},
|
||||
Tuple.from(-256L), new byte[]{0x12, FF - 1, FF},
|
||||
Tuple.from(BigInteger.valueOf(-256)), new byte[]{0x12, FF - 1, FF},
|
||||
Tuple.from(65536), new byte[]{0x17, 0x01, 0x00, 0x00},
|
||||
Tuple.from(-65536), new byte[]{0x11, FF - 1, FF, FF},
|
||||
Tuple.from(Long.MAX_VALUE), new byte[]{0x1C, 0x7f, FF, FF, FF, FF, FF, FF, FF},
|
||||
Tuple.from(BigInteger.valueOf(Long.MAX_VALUE)), new byte[]{0x1C, 0x7f, FF, FF, FF, FF, FF, FF, FF},
|
||||
Tuple.from(BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE)), new byte[]{0x1C, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(BigInteger.ONE.shiftLeft(64).subtract(BigInteger.ONE)), new byte[]{0x1C, FF, FF, FF, FF, FF, FF, FF, FF},
|
||||
Tuple.from(BigInteger.ONE.shiftLeft(64)), new byte[]{0x1D, 0x09, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(-((1L << 32) - 1)), new byte[]{0x10, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(BigInteger.ONE.shiftLeft(32).subtract(BigInteger.ONE).negate()), new byte[]{0x10, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(Long.MIN_VALUE + 2), new byte[]{0x0C, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
|
||||
Tuple.from(Long.MIN_VALUE + 1), new byte[]{0x0C, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(BigInteger.valueOf(Long.MIN_VALUE).add(BigInteger.ONE)), new byte[]{0x0C, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(Long.MIN_VALUE), new byte[]{0x0C, 0x7f, FF, FF, FF, FF, FF, FF, FF},
|
||||
Tuple.from(BigInteger.valueOf(Long.MIN_VALUE)), new byte[]{0x0C, 0x7f, FF, FF, FF, FF, FF, FF, FF},
|
||||
Tuple.from(BigInteger.valueOf(Long.MIN_VALUE).subtract(BigInteger.ONE)), new byte[]{0x0C, 0x7f, FF, FF, FF, FF, FF, FF, FF - 1},
|
||||
Tuple.from(BigInteger.ONE.shiftLeft(64).subtract(BigInteger.ONE).negate()), new byte[]{0x0C, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(3.14f), new byte[]{0x20, (byte)0xc0, 0x48, (byte)0xf5, (byte)0xc3},
|
||||
Tuple.from(-3.14f), new byte[]{0x20, (byte)0x3f, (byte)0xb7, (byte)0x0a, (byte)0x3c},
|
||||
Tuple.from(3.14), new byte[]{0x21, (byte)0xc0, (byte)0x09, (byte)0x1e, (byte)0xb8, (byte)0x51, (byte)0xeb, (byte)0x85, (byte)0x1f},
|
||||
Tuple.from(-3.14), new byte[]{0x21, (byte)0x3f, (byte)0xf6, (byte)0xe1, (byte)0x47, (byte)0xae, (byte)0x14, (byte)0x7a, (byte)0xe0},
|
||||
Tuple.from(0.0f), new byte[]{0x20, (byte)0x80, 0x00, 0x00, 0x00},
|
||||
Tuple.from(-0.0f), new byte[]{0x20, 0x7f, FF, FF, FF},
|
||||
Tuple.from(0.0), new byte[]{0x21, (byte)0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(-0.0), new byte[]{0x21, 0x7f, FF, FF, FF, FF, FF, FF, FF},
|
||||
Tuple.from(Float.POSITIVE_INFINITY), new byte[]{0x20, FF, (byte)0x80, 0x00, 0x00},
|
||||
Tuple.from(Float.NEGATIVE_INFINITY), new byte[]{0x20, 0x00, 0x7f, FF, FF},
|
||||
Tuple.from(Double.POSITIVE_INFINITY), new byte[]{0x21, FF, (byte)0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(Double.NEGATIVE_INFINITY), new byte[]{0x21, 0x00, 0x0f, FF, FF, FF, FF, FF, FF},
|
||||
Tuple.from(Float.intBitsToFloat(Integer.MAX_VALUE)), new byte[]{0x20, FF, FF, FF, FF},
|
||||
Tuple.from(Double.longBitsToDouble(Long.MAX_VALUE)), new byte[]{0x21, FF, FF, FF, FF, FF, FF, FF, FF},
|
||||
Tuple.from(Float.intBitsToFloat(~0)), new byte[]{0x20, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from(Double.longBitsToDouble(~0L)), new byte[]{0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
|
||||
Tuple.from((Object)new byte[0]), new byte[]{0x01, 0x00},
|
||||
Tuple.from((Object)new byte[]{0x01, 0x02, 0x03}), new byte[]{0x01, 0x01, 0x02, 0x03, 0x00},
|
||||
Tuple.from((Object)new byte[]{0x00, 0x00, 0x00, 0x04}), new byte[]{0x01, 0x00, FF, 0x00, FF, 0x00, FF, 0x04, 0x00},
|
||||
Tuple.from(""), new byte[]{0x02, 0x00},
|
||||
Tuple.from("hello"), new byte[]{0x02, 'h', 'e', 'l', 'l', 'o', 0x00},
|
||||
Tuple.from("\u4e2d\u6587"), new byte[]{0x02, (byte)0xe4, (byte)0xb8, (byte)0xad, (byte)0xe6, (byte)0x96, (byte)0x87, 0x00},
|
||||
Tuple.from("\u03bc\u03ac\u03b8\u03b7\u03bc\u03b1"), new byte[]{0x02, (byte)0xce, (byte)0xbc, (byte)0xce, (byte)0xac, (byte)0xce, (byte)0xb8, (byte)0xce, (byte)0xb7, (byte)0xce, (byte)0xbc, (byte)0xce, (byte)0xb1, 0x00},
|
||||
Tuple.from(new String(new int[]{0x1f525}, 0, 1)), new byte[]{0x02, (byte)0xf0, (byte)0x9f, (byte)0x94, (byte)0xa5, 0x00},
|
||||
Tuple.from("\ud83d\udd25"), new byte[]{0x02, (byte)0xf0, (byte)0x9f, (byte)0x94, (byte)0xa5, 0x00},
|
||||
Tuple.from("\ud83e\udd6f"), new byte[]{0x02, (byte)0xf0, (byte)0x9f, (byte)0xa5, (byte)0xaf, 0x00},
|
||||
Tuple.from("\ud83d"), new byte[]{0x02, 0x3f, 0x00},
|
||||
Tuple.from("\udd25\ud83e\udd6f"), new byte[]{0x02, 0x3f, (byte)0xf0, (byte)0x9f, (byte)0xa5, (byte)0xaf, 0x00}, // malformed string - low surrogate without high surrogate
|
||||
Tuple.from("a\udd25\ud83e\udd6f"), new byte[]{0x02, 'a', 0x3f, (byte)0xf0, (byte)0x9f, (byte)0xa5, (byte)0xaf, 0x00}, // malformed string - low surrogate without high surrogate
|
||||
Tuple.from(Tuple.from((Object)null)), new byte[]{0x05, 0x00, FF, 0x00},
|
||||
Tuple.from(Tuple.from(null, "hello")), new byte[]{0x05, 0x00, FF, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00, 0x00},
|
||||
Tuple.from(Arrays.asList(null, "hello")), new byte[]{0x05, 0x00, FF, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00, 0x00},
|
||||
Tuple.from(Tuple.from(null, "hell\0")), new byte[]{0x05, 0x00, FF, 0x02, 'h', 'e', 'l', 'l', 0x00, FF, 0x00, 0x00},
|
||||
Tuple.from(Arrays.asList(null, "hell\0")), new byte[]{0x05, 0x00, FF, 0x02, 'h', 'e', 'l', 'l', 0x00, FF, 0x00, 0x00},
|
||||
Tuple.from(Tuple.from((Object)null), "hello"), new byte[]{0x05, 0x00, FF, 0x00, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00},
|
||||
Tuple.from(Tuple.from((Object)null), "hello", new byte[]{0x01, 0x00}, new byte[0]), new byte[]{0x05, 0x00, FF, 0x00, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00, 0x01, 0x01, 0x00, FF, 0x00, 0x01, 0x00},
|
||||
Tuple.from(new UUID(0xba5eba11, 0x5ca1ab1e)), new byte[]{0x30, FF, FF, FF, FF, (byte)0xba, 0x5e, (byte)0xba, 0x11, 0x00, 0x00, 0x00, 0x00, 0x5c, (byte)0xa1, (byte)0xab, 0x1e},
|
||||
Tuple.from(false), new byte[]{0x26},
|
||||
Tuple.from(true), new byte[]{0x27},
|
||||
Tuple.from((short)0x3019), new byte[]{0x16, 0x30, 0x19},
|
||||
Tuple.from((byte)0x03), new byte[]{0x15, 0x03},
|
||||
Tuple.from(Versionstamp.complete(new byte[]{(byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee, FF, 0x00, 0x01, 0x02, 0x03})), new byte[]{0x33, (byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee, FF, 0x00, 0x01, 0x02, 0x03, 0x00, 0x00},
|
||||
Tuple.from(Versionstamp.complete(new byte[]{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a}, 657)), new byte[]{0x33, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x02, (byte)0x91}
|
||||
);
|
||||
Tuple bigTuple = new Tuple();
|
||||
List<byte[]> serializedForms = new ArrayList<>();
|
||||
for(TupleSerialization serialization : serializations) {
|
||||
bigTuple = bigTuple.addAll(serialization.tuple);
|
||||
serializedForms.add(serialization.serialization);
|
||||
}
|
||||
serializations.add(new TupleSerialization(bigTuple, ByteArrayUtil.join(null, serializedForms)));
|
||||
|
||||
for(TupleSerialization serialization : serializations) {
|
||||
System.out.println("Packing " + serialization.tuple + " (expecting: " + ByteArrayUtil.printable(serialization.serialization) + ")");
|
||||
if(serialization.tuple.getPackedSize() != serialization.serialization.length) {
|
||||
throw new RuntimeException("Tuple " + serialization.tuple + " packed size " + serialization.tuple.getPackedSize() + " does not match expected packed size " + serialization.serialization.length);
|
||||
}
|
||||
if(!Arrays.equals(serialization.tuple.pack(), serialization.serialization)) {
|
||||
throw new RuntimeException("Tuple " + serialization.tuple + " has serialization " + ByteArrayUtil.printable(serialization.tuple.pack()) +
|
||||
" which does not match expected serialization " + ByteArrayUtil.printable(serialization.serialization));
|
||||
}
|
||||
if(!Objects.equals(serialization.tuple, Tuple.fromItems(Tuple.fromBytes(serialization.serialization).getItems()))) {
|
||||
throw new RuntimeException("Tuple " + serialization.tuple + " does not match deserialization " + Tuple.fromBytes(serialization.serialization) +
|
||||
" which comes from serialization " + ByteArrayUtil.printable(serialization.serialization));
|
||||
}
|
||||
}
|
||||
System.out.println("All tuples had matching serializations");
|
||||
}
|
||||
|
||||
private static void comparisons() {
|
||||
List<Tuple> tuples = Arrays.asList(
|
||||
Tuple.from(0L),
|
||||
Tuple.from(BigInteger.ZERO),
|
||||
Tuple.from(1L),
|
||||
Tuple.from(BigInteger.ONE),
|
||||
Tuple.from(-1L),
|
||||
Tuple.from(BigInteger.ONE.negate()),
|
||||
Tuple.from(Long.MAX_VALUE),
|
||||
Tuple.from(Long.MIN_VALUE),
|
||||
Tuple.from(BigInteger.valueOf(Long.MIN_VALUE).subtract(BigInteger.ONE)),
|
||||
Tuple.from(BigInteger.valueOf(Long.MIN_VALUE).shiftLeft(1)),
|
||||
Tuple.from(-0.0f),
|
||||
Tuple.from(0.0f),
|
||||
Tuple.from(-0.0),
|
||||
Tuple.from(0.0),
|
||||
Tuple.from(Float.NEGATIVE_INFINITY),
|
||||
Tuple.from(Double.NEGATIVE_INFINITY),
|
||||
Tuple.from(Float.NaN),
|
||||
Tuple.from(Double.NaN),
|
||||
Tuple.from(Float.intBitsToFloat(Float.floatToIntBits(Float.NaN) + 1)),
|
||||
Tuple.from(Double.longBitsToDouble(Double.doubleToLongBits(Double.NaN) + 1)),
|
||||
Tuple.from(Float.intBitsToFloat(Float.floatToIntBits(Float.NaN) + 2)),
|
||||
Tuple.from(Double.longBitsToDouble(Double.doubleToLongBits(Double.NaN) + 2)),
|
||||
Tuple.from(Float.intBitsToFloat(Float.floatToIntBits(Float.NaN) ^ Integer.MIN_VALUE)),
|
||||
Tuple.from(Double.longBitsToDouble(Double.doubleToLongBits(Double.NaN) ^ Long.MIN_VALUE)),
|
||||
Tuple.from(Float.intBitsToFloat(Float.floatToIntBits(Float.NaN) ^ Integer.MIN_VALUE + 1)),
|
||||
Tuple.from(Double.longBitsToDouble(Double.doubleToLongBits(Double.NaN) ^ Long.MIN_VALUE + 1)),
|
||||
Tuple.from(Float.POSITIVE_INFINITY),
|
||||
Tuple.from(Double.POSITIVE_INFINITY),
|
||||
Tuple.from((Object)new byte[0]),
|
||||
Tuple.from((Object)new byte[]{0x00}),
|
||||
Tuple.from((Object)new byte[]{0x00, FF}),
|
||||
Tuple.from((Object)new byte[]{0x7f}),
|
||||
Tuple.from((Object)new byte[]{(byte)0x80}),
|
||||
Tuple.from(null, new byte[0]),
|
||||
Tuple.from(null, new byte[]{0x00}),
|
||||
Tuple.from(null, new byte[]{0x00, FF}),
|
||||
Tuple.from(null, new byte[]{0x7f}),
|
||||
Tuple.from(null, new byte[]{(byte)0x80}),
|
||||
Tuple.from(Tuple.from(null, new byte[0])),
|
||||
Tuple.from(Tuple.from(null, new byte[]{0x00})),
|
||||
Tuple.from(Tuple.from(null, new byte[]{0x00, FF})),
|
||||
Tuple.from(Tuple.from(null, new byte[]{0x7f})),
|
||||
Tuple.from(Tuple.from(null, new byte[]{(byte)0x80})),
|
||||
Tuple.from("a"),
|
||||
Tuple.from("\u03bc\u03ac\u03b8\u03b7\u03bc\u03b1"),
|
||||
Tuple.from("\u03bc\u03b1\u0301\u03b8\u03b7\u03bc\u03b1"),
|
||||
Tuple.from("\u4e2d\u6587"),
|
||||
Tuple.from("\u4e2d\u570B"),
|
||||
Tuple.from("\ud83d\udd25"),
|
||||
Tuple.from("\ud83e\udd6f"),
|
||||
Tuple.from("a\ud83d\udd25"),
|
||||
Tuple.from("\ufb49"),
|
||||
Tuple.from("\ud83d\udd25\ufb49"),
|
||||
Tuple.from("\ud8ed\ud8ed"), // malformed string -- two high surrogates
|
||||
Tuple.from("\ud8ed\ud8eda"), // malformed string -- two high surrogates
|
||||
Tuple.from("\udd25\udd25"), // malformed string -- two low surrogates
|
||||
Tuple.from("a\udd25\ud8ed"), // malformed string -- two low surrogates
|
||||
Tuple.from("\udd25\ud83e\udd6f"), // malformed string -- low surrogate followed by high then low surrogate
|
||||
Tuple.from("\udd6f\ud83e\udd6f"), // malformed string -- low surrogate followed by high then low surrogate
|
||||
Tuple.from(new UUID(-1, 0)),
|
||||
Tuple.from(new UUID(-1, -1)),
|
||||
Tuple.from(new UUID(1, -1)),
|
||||
Tuple.from(new UUID(1, 1)),
|
||||
Tuple.from(false),
|
||||
Tuple.from(true),
|
||||
Tuple.from(Arrays.asList(0, 1, 2)),
|
||||
Tuple.from(Arrays.asList(0, 1), "hello"),
|
||||
Tuple.from(Arrays.asList(0, 1), "help"),
|
||||
Tuple.from(Versionstamp.complete(new byte[]{0x0a, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee, FF, 0x00, 0x01, 0x02, 0x03})),
|
||||
Tuple.from(Versionstamp.complete(new byte[]{(byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee, FF, 0x00, 0x01, 0x02, 0x03})),
|
||||
Tuple.from(Versionstamp.complete(new byte[]{(byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee, FF, 0x00, 0x01, 0x02, 0x03}, 1)),
|
||||
Tuple.from(Versionstamp.complete(new byte[]{(byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee, FF, 0x00, 0x01, 0x02, 0x03}, 0xa101)),
|
||||
Tuple.from(Versionstamp.complete(new byte[]{(byte)0xaa, (byte)0xbb, (byte)0xcc, (byte)0xdd, (byte)0xee, FF, 0x00, 0x01, 0x02, 0x03}, 65535))
|
||||
|
||||
);
|
||||
|
||||
for(Tuple t1 : tuples) {
|
||||
for(Tuple t2 : tuples) {
|
||||
System.out.println("Comparing " + t1 + " and " + t2);
|
||||
// Copy the items over to new tuples to avoid having them use the memoized packed representations
|
||||
Tuple t1copy = Tuple.fromList(t1.getItems());
|
||||
Tuple t2copy = Tuple.fromList(t2.getItems());
|
||||
int semanticComparison = t1copy.compareTo(t2copy);
|
||||
int byteComparison = ByteArrayUtil.compareUnsigned(t1.pack(), t2.pack());
|
||||
if(Integer.signum(semanticComparison) != Integer.signum(byteComparison)) {
|
||||
throw new RuntimeException("Tuple t1 and t2 comparison mismatched: semantic = " + semanticComparison + " while byte order = " + byteComparison);
|
||||
}
|
||||
int implicitByteComparison = t1.compareTo(t2);
|
||||
if(Integer.signum(semanticComparison) != Integer.signum(implicitByteComparison)) {
|
||||
throw new RuntimeException("Tuple t1 and t2 comparison mismatched: semantic = " + semanticComparison + " while implicit byte order = " + implicitByteComparison);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void emptyTuple() {
|
||||
Tuple t = new Tuple();
|
||||
if(!t.isEmpty()) {
|
||||
throw new RuntimeException("empty tuple is not empty");
|
||||
}
|
||||
if(t.getPackedSize() != 0) {
|
||||
throw new RuntimeException("empty tuple packed size is not 0");
|
||||
}
|
||||
if(t.pack().length != 0) {
|
||||
throw new RuntimeException("empty tuple is not packed to the empty byte string");
|
||||
}
|
||||
}
|
||||
|
||||
private static void addMethods() {
|
||||
List<Tuple> baseTuples = Arrays.asList(
|
||||
new Tuple(),
|
||||
Tuple.from(),
|
||||
Tuple.from((Object)null),
|
||||
Tuple.from("prefix"),
|
||||
Tuple.from("prefix", null),
|
||||
Tuple.from(new UUID(100, 1000)),
|
||||
Tuple.from(Versionstamp.incomplete(1)),
|
||||
Tuple.from(Tuple.from(Versionstamp.incomplete(2))),
|
||||
Tuple.from(Collections.singletonList(Versionstamp.incomplete(3)))
|
||||
);
|
||||
List<Object> toAdd = Arrays.asList(
|
||||
null,
|
||||
1066L,
|
||||
BigInteger.valueOf(1066),
|
||||
-3.14f,
|
||||
2.71828,
|
||||
new byte[]{0x01, 0x02, 0x03},
|
||||
new byte[]{0x01, 0x00, 0x02, 0x00, 0x03},
|
||||
"hello there",
|
||||
"hell\0 there",
|
||||
"\ud83d\udd25",
|
||||
"\ufb14",
|
||||
false,
|
||||
true,
|
||||
Float.NaN,
|
||||
Float.intBitsToFloat(Integer.MAX_VALUE),
|
||||
Double.NaN,
|
||||
Double.longBitsToDouble(Long.MAX_VALUE),
|
||||
Versionstamp.complete(new byte[]{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09}, 100),
|
||||
Versionstamp.incomplete(4),
|
||||
new UUID(-1, 1),
|
||||
Tuple.from((Object)null),
|
||||
Tuple.from("suffix", "tuple"),
|
||||
Tuple.from("s\0ffix", "tuple"),
|
||||
Arrays.asList("suffix", "tuple"),
|
||||
Arrays.asList("suffix", null, "tuple"),
|
||||
Tuple.from("suffix", null, "tuple"),
|
||||
Tuple.from("suffix", Versionstamp.incomplete(4), "tuple"),
|
||||
Arrays.asList("suffix", Arrays.asList("inner", Versionstamp.incomplete(5), "tuple"), "tuple")
|
||||
);
|
||||
|
||||
for(Tuple baseTuple : baseTuples) {
|
||||
for(Object newItem : toAdd) {
|
||||
int baseSize = baseTuple.size();
|
||||
Tuple freshTuple = Tuple.fromStream(Stream.concat(baseTuple.stream(), Stream.of(newItem)));
|
||||
if(freshTuple.size() != baseSize + 1) {
|
||||
throw new RuntimeException("freshTuple size was not one larger than base size");
|
||||
}
|
||||
Tuple withObjectAdded = baseTuple.addObject(newItem);
|
||||
if(withObjectAdded.size() != baseSize + 1) {
|
||||
throw new RuntimeException("withObjectAdded size was not one larger than the base size");
|
||||
}
|
||||
// Use the appropriate "add" overload.
|
||||
Tuple withValueAdded;
|
||||
if(newItem == null) {
|
||||
withValueAdded = baseTuple.addObject(null);
|
||||
}
|
||||
else if(newItem instanceof byte[]) {
|
||||
withValueAdded = baseTuple.add((byte[])newItem);
|
||||
}
|
||||
else if(newItem instanceof String) {
|
||||
withValueAdded = baseTuple.add((String)newItem);
|
||||
}
|
||||
else if(newItem instanceof Long) {
|
||||
withValueAdded = baseTuple.add((Long)newItem);
|
||||
}
|
||||
else if(newItem instanceof BigInteger) {
|
||||
withValueAdded = baseTuple.add((BigInteger)newItem);
|
||||
}
|
||||
else if(newItem instanceof Float) {
|
||||
withValueAdded = baseTuple.add((Float)newItem);
|
||||
}
|
||||
else if(newItem instanceof Double) {
|
||||
withValueAdded = baseTuple.add((Double)newItem);
|
||||
}
|
||||
else if(newItem instanceof Boolean) {
|
||||
withValueAdded = baseTuple.add((Boolean)newItem);
|
||||
}
|
||||
else if(newItem instanceof UUID) {
|
||||
withValueAdded = baseTuple.add((UUID)newItem);
|
||||
}
|
||||
else if(newItem instanceof Versionstamp) {
|
||||
withValueAdded = baseTuple.add((Versionstamp)newItem);
|
||||
}
|
||||
else if(newItem instanceof List<?>) {
|
||||
withValueAdded = baseTuple.add((List<?>)newItem);
|
||||
}
|
||||
else if(newItem instanceof Tuple) {
|
||||
withValueAdded = baseTuple.add((Tuple)newItem);
|
||||
}
|
||||
else {
|
||||
throw new RuntimeException("unknown type for tuple serialization " + newItem.getClass());
|
||||
}
|
||||
// Use Tuple.addAll, which has optimizations if both tuples have been packed already
|
||||
// Getting their hash codes memoizes the packed representation.
|
||||
Tuple newItemTuple = Tuple.from(newItem);
|
||||
baseTuple.hashCode();
|
||||
newItemTuple.hashCode();
|
||||
Tuple withTupleAddedAll = baseTuple.addAll(newItemTuple);
|
||||
Tuple withListAddedAll = baseTuple.addAll(Collections.singletonList(newItem));
|
||||
List<Tuple> allTuples = Arrays.asList(freshTuple, withObjectAdded, withValueAdded, withTupleAddedAll, withListAddedAll);
|
||||
|
||||
int basePlusNewSize = baseTuple.getPackedSize() + Tuple.from(newItem).getPackedSize();
|
||||
int freshTuplePackedSize = freshTuple.getPackedSize();
|
||||
int withObjectAddedPackedSize = withObjectAdded.getPackedSize();
|
||||
int withValueAddedPackedSize = withValueAdded.getPackedSize();
|
||||
int withTupleAddedAllPackedSize = withTupleAddedAll.getPackedSize();
|
||||
int withListAddAllPackedSize = withListAddedAll.getPackedSize();
|
||||
if(basePlusNewSize != freshTuplePackedSize || basePlusNewSize != withObjectAddedPackedSize ||
|
||||
basePlusNewSize != withValueAddedPackedSize || basePlusNewSize != withTupleAddedAllPackedSize ||
|
||||
basePlusNewSize != withListAddAllPackedSize) {
|
||||
throw new RuntimeException("packed sizes not equivalent");
|
||||
}
|
||||
byte[] concatPacked;
|
||||
byte[] prefixPacked;
|
||||
byte[] freshPacked;
|
||||
byte[] objectAddedPacked;
|
||||
byte[] valueAddedPacked;
|
||||
byte[] tupleAddedAllPacked;
|
||||
byte[] listAddedAllPacked;
|
||||
if(!baseTuple.hasIncompleteVersionstamp() && !Tuple.from(newItem).hasIncompleteVersionstamp()) {
|
||||
concatPacked = ByteArrayUtil.join(baseTuple.pack(), Tuple.from(newItem).pack());
|
||||
prefixPacked = Tuple.from(newItem).pack(baseTuple.pack());
|
||||
freshPacked = freshTuple.pack();
|
||||
objectAddedPacked = withObjectAdded.pack();
|
||||
valueAddedPacked = withValueAdded.pack();
|
||||
tupleAddedAllPacked = withTupleAddedAll.pack();
|
||||
listAddedAllPacked = withListAddedAll.pack();
|
||||
|
||||
for(Tuple t : allTuples) {
|
||||
try {
|
||||
t.packWithVersionstamp();
|
||||
throw new RuntimeException("able to pack tuple without incomplete versionstamp using packWithVersionstamp");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
}
|
||||
else if(!baseTuple.hasIncompleteVersionstamp() && Tuple.from(newItem).hasIncompleteVersionstamp()) {
|
||||
concatPacked = newItemTuple.packWithVersionstamp(baseTuple.pack());
|
||||
try {
|
||||
prefixPacked = Tuple.from(newItem).packWithVersionstamp(baseTuple.pack());
|
||||
}
|
||||
catch(NullPointerException e) {
|
||||
prefixPacked = Tuple.from(newItem).packWithVersionstamp(baseTuple.pack());
|
||||
}
|
||||
freshPacked = freshTuple.packWithVersionstamp();
|
||||
objectAddedPacked = withObjectAdded.packWithVersionstamp();
|
||||
valueAddedPacked = withValueAdded.packWithVersionstamp();
|
||||
tupleAddedAllPacked = withTupleAddedAll.packWithVersionstamp();
|
||||
listAddedAllPacked = withListAddedAll.packWithVersionstamp();
|
||||
|
||||
for(Tuple t : allTuples) {
|
||||
try {
|
||||
t.pack();
|
||||
throw new RuntimeException("able to pack tuple with incomplete versionstamp");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
}
|
||||
else if(baseTuple.hasIncompleteVersionstamp() && !Tuple.from(newItem).hasIncompleteVersionstamp()) {
|
||||
concatPacked = baseTuple.addAll(Tuple.from(newItem)).packWithVersionstamp();
|
||||
prefixPacked = baseTuple.addObject(newItem).packWithVersionstamp();
|
||||
freshPacked = freshTuple.packWithVersionstamp();
|
||||
objectAddedPacked = withObjectAdded.packWithVersionstamp();
|
||||
valueAddedPacked = withValueAdded.packWithVersionstamp();
|
||||
tupleAddedAllPacked = withTupleAddedAll.packWithVersionstamp();
|
||||
listAddedAllPacked = withListAddedAll.packWithVersionstamp();
|
||||
|
||||
for(Tuple t : allTuples) {
|
||||
try {
|
||||
t.pack();
|
||||
throw new RuntimeException("able to pack tuple with incomplete versionstamp");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for(Tuple t : allTuples) {
|
||||
try {
|
||||
t.pack();
|
||||
throw new RuntimeException("able to pack tuple with two versionstamps using pack");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
t.packWithVersionstamp();
|
||||
throw new RuntimeException("able to pack tuple with two versionstamps using packWithVersionstamp");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
t.hashCode();
|
||||
throw new RuntimeException("able to get hash code of tuple with two versionstamps");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
concatPacked = null;
|
||||
prefixPacked = null;
|
||||
freshPacked = null;
|
||||
objectAddedPacked = null;
|
||||
valueAddedPacked = null;
|
||||
tupleAddedAllPacked = null;
|
||||
listAddedAllPacked = null;
|
||||
}
|
||||
if(!Arrays.equals(concatPacked, freshPacked) ||
|
||||
!Arrays.equals(freshPacked, prefixPacked) ||
|
||||
!Arrays.equals(freshPacked, objectAddedPacked) ||
|
||||
!Arrays.equals(freshPacked, valueAddedPacked) ||
|
||||
!Arrays.equals(freshPacked, tupleAddedAllPacked) ||
|
||||
!Arrays.equals(freshPacked, listAddedAllPacked)) {
|
||||
throw new RuntimeException("packed values are not concatenation of original packings");
|
||||
}
|
||||
if(freshPacked != null && freshPacked.length != basePlusNewSize) {
|
||||
throw new RuntimeException("packed length did not match expectation");
|
||||
}
|
||||
if(freshPacked != null) {
|
||||
if(freshTuple.hashCode() != Arrays.hashCode(freshPacked)) {
|
||||
throw new IllegalArgumentException("hash code does not match fresh packed");
|
||||
}
|
||||
for(Tuple t : allTuples) {
|
||||
if(t.hashCode() != freshTuple.hashCode()) {
|
||||
throw new IllegalArgumentException("hash code mismatch");
|
||||
}
|
||||
if(Tuple.fromItems(t.getItems()).hashCode() != freshTuple.hashCode()) {
|
||||
throw new IllegalArgumentException("hash code mismatch after re-compute");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void incompleteVersionstamps() {
|
||||
if(FDB.instance().getAPIVersion() < 520) {
|
||||
throw new IllegalStateException("cannot run test with API version " + FDB.instance().getAPIVersion());
|
||||
}
|
||||
// This is a tricky case where there are two tuples with identical representations but different semantics.
|
||||
byte[] arr = new byte[0x0100fe];
|
||||
Arrays.fill(arr, (byte)0x7f); // The actual value doesn't matter, but it can't be zero.
|
||||
Tuple t1 = Tuple.from(arr, Versionstamp.complete(new byte[]{FF, FF, FF, FF, FF, FF, FF, FF, FF, FF}), new byte[]{0x01, 0x01});
|
||||
Tuple t2 = Tuple.from(arr, Versionstamp.incomplete());
|
||||
if(t1.equals(t2)) {
|
||||
throw new RuntimeException("tuples " + t1 + " and " + t2 + " compared equal");
|
||||
}
|
||||
byte[] bytes1 = t1.pack();
|
||||
byte[] bytes2 = t2.packWithVersionstamp();
|
||||
if(!Arrays.equals(bytes1, bytes2)) {
|
||||
throw new RuntimeException("tuples " + t1 + " and " + t2 + " did not have matching representations");
|
||||
}
|
||||
if(t1.equals(t2)) {
|
||||
throw new RuntimeException("tuples " + t1 + " and " + t2 + " compared equal with memoized packed representations");
|
||||
}
|
||||
|
||||
// Make sure position information adjustment works.
|
||||
Tuple t3 = Tuple.from(Versionstamp.incomplete(1));
|
||||
if(t3.getPackedSize() != 1 + Versionstamp.LENGTH + Integer.BYTES) {
|
||||
throw new RuntimeException("incomplete versionstamp has incorrect packed size " + t3.getPackedSize());
|
||||
}
|
||||
byte[] bytes3 = t3.packWithVersionstamp();
|
||||
if(ByteBuffer.wrap(bytes3, bytes3.length - Integer.BYTES, Integer.BYTES).order(ByteOrder.LITTLE_ENDIAN).getInt() != 1) {
|
||||
throw new RuntimeException("incomplete versionstamp has incorrect position");
|
||||
}
|
||||
if(!Tuple.fromBytes(bytes3, 0, bytes3.length - Integer.BYTES).equals(Tuple.from(Versionstamp.incomplete(1)))) {
|
||||
throw new RuntimeException("unpacked bytes did not match");
|
||||
}
|
||||
Subspace subspace = new Subspace(Tuple.from("prefix"));
|
||||
byte[] bytes4 = subspace.packWithVersionstamp(t3);
|
||||
if(ByteBuffer.wrap(bytes4, bytes4.length - Integer.BYTES, Integer.BYTES).order(ByteOrder.LITTLE_ENDIAN).getInt() != 1 + subspace.getKey().length) {
|
||||
throw new RuntimeException("incomplete versionstamp has incorrect position with prefix");
|
||||
}
|
||||
if(!Tuple.fromBytes(bytes4, 0, bytes4.length - Integer.BYTES).equals(Tuple.from("prefix", Versionstamp.incomplete(1)))) {
|
||||
throw new RuntimeException("unpacked bytes with subspace did not match");
|
||||
}
|
||||
try {
|
||||
// At this point, the representation is cached, so an easy bug would be to have it return the already serialized value
|
||||
t3.pack();
|
||||
throw new RuntimeException("was able to pack versionstamp with incomplete versionstamp");
|
||||
} catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
|
||||
// Tuples with two incomplete versionstamps somewhere.
|
||||
List<Tuple> twoIncompleteList = Arrays.asList(
|
||||
Tuple.from(Versionstamp.incomplete(1), Versionstamp.incomplete(2)),
|
||||
Tuple.from(Tuple.from(Versionstamp.incomplete(3)), Tuple.from(Versionstamp.incomplete(4))),
|
||||
new Tuple().add(Versionstamp.incomplete()).add(Versionstamp.incomplete()),
|
||||
new Tuple().add(Versionstamp.incomplete()).add(3L).add(Versionstamp.incomplete()),
|
||||
Tuple.from(Tuple.from(Versionstamp.incomplete()), "dummy_string").add(Tuple.from(Versionstamp.incomplete())),
|
||||
Tuple.from(Arrays.asList(Versionstamp.incomplete(), "dummy_string")).add(Tuple.from(Versionstamp.incomplete())),
|
||||
Tuple.from(Tuple.from(Versionstamp.incomplete()), "dummy_string").add(Collections.singletonList(Versionstamp.incomplete()))
|
||||
);
|
||||
for(Tuple t : twoIncompleteList) {
|
||||
if(!t.hasIncompleteVersionstamp()) {
|
||||
throw new RuntimeException("tuple doesn't think it has incomplete versionstamp");
|
||||
}
|
||||
if(t.getPackedSize() < 2 * (1 + Versionstamp.LENGTH + Integer.BYTES)) {
|
||||
throw new RuntimeException("tuple packed size " + t.getPackedSize() + " is smaller than expected");
|
||||
}
|
||||
try {
|
||||
t.pack();
|
||||
throw new RuntimeException("no error thrown when packing any incomplete versionstamps");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
t.packWithVersionstamp();
|
||||
throw new RuntimeException("no error thrown when packing with versionstamp with two incompletes");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Assumes API version < 520
|
||||
private static void incompleteVersionstamps300() {
|
||||
if(FDB.instance().getAPIVersion() >= 520) {
|
||||
throw new IllegalStateException("cannot run test with API version " + FDB.instance().getAPIVersion());
|
||||
}
|
||||
Tuple t1 = Tuple.from(Versionstamp.complete(new byte[]{FF, FF, FF, FF, FF, FF, FF, FF, FF, FF}), new byte[]{});
|
||||
Tuple t2 = Tuple.from(Versionstamp.incomplete());
|
||||
if(t1.equals(t2)) {
|
||||
throw new RuntimeException("tuples " + t1 + " and " + t2 + " compared equal");
|
||||
}
|
||||
byte[] bytes1 = t1.pack();
|
||||
byte[] bytes2 = t2.packWithVersionstamp();
|
||||
if(!Arrays.equals(bytes1, bytes2)) {
|
||||
throw new RuntimeException("tuples " + t1 + " and " + t2 + " did not have matching representations");
|
||||
}
|
||||
if(t1.equals(t2)) {
|
||||
throw new RuntimeException("tuples " + t1 + " and " + t2 + " compared equal with memoized packed representations");
|
||||
}
|
||||
|
||||
// Make sure position information adjustment works.
|
||||
Tuple t3 = Tuple.from(Versionstamp.incomplete(1));
|
||||
if(t3.getPackedSize() != 1 + Versionstamp.LENGTH + Short.BYTES) {
|
||||
throw new RuntimeException("incomplete versionstamp has incorrect packed size " + t3.getPackedSize());
|
||||
}
|
||||
byte[] bytes3 = t3.packWithVersionstamp();
|
||||
if(ByteBuffer.wrap(bytes3, bytes3.length - Short.BYTES, Short.BYTES).order(ByteOrder.LITTLE_ENDIAN).getShort() != 1) {
|
||||
throw new RuntimeException("incomplete versionstamp has incorrect position");
|
||||
}
|
||||
if(!Tuple.fromBytes(bytes3, 0, bytes3.length - Short.BYTES).equals(Tuple.from(Versionstamp.incomplete(1)))) {
|
||||
throw new RuntimeException("unpacked bytes did not match");
|
||||
}
|
||||
Subspace subspace = new Subspace(Tuple.from("prefix"));
|
||||
byte[] bytes4 = subspace.packWithVersionstamp(t3);
|
||||
if(ByteBuffer.wrap(bytes4, bytes4.length - Short.BYTES, Short.BYTES).order(ByteOrder.LITTLE_ENDIAN).getShort() != 1 + subspace.getKey().length) {
|
||||
throw new RuntimeException("incomplete versionstamp has incorrect position with prefix");
|
||||
}
|
||||
if(!Tuple.fromBytes(bytes4, 0, bytes4.length - Short.BYTES).equals(Tuple.from("prefix", Versionstamp.incomplete(1)))) {
|
||||
throw new RuntimeException("unpacked bytes with subspace did not match");
|
||||
}
|
||||
|
||||
// Make sure an offset > 0xFFFF throws an error.
|
||||
Tuple t4 = Tuple.from(Versionstamp.incomplete(2));
|
||||
byte[] bytes5 = t4.packWithVersionstamp(); // Get bytes memoized.
|
||||
if(ByteBuffer.wrap(bytes5, bytes5.length - Short.BYTES, Short.BYTES).order(ByteOrder.LITTLE_ENDIAN).getShort() != 1) {
|
||||
throw new RuntimeException("incomplete versionstamp has incorrect position with prefix");
|
||||
}
|
||||
byte[] bytes6 = t4.packWithVersionstamp(new byte[0xfffe]); // Offset is 0xffff
|
||||
if(!Arrays.equals(Arrays.copyOfRange(bytes5, 0, 1 + Versionstamp.LENGTH), Arrays.copyOfRange(bytes6, 0xfffe, 0xffff + Versionstamp.LENGTH))) {
|
||||
throw new RuntimeException("area before versionstamp offset did not match");
|
||||
}
|
||||
if((ByteBuffer.wrap(bytes6, bytes6.length - Short.BYTES, Short.BYTES).order(ByteOrder.LITTLE_ENDIAN).getShort() & 0xffff) != 0xffff) {
|
||||
throw new RuntimeException("incomplete versionstamp has incorrect position with prefix");
|
||||
}
|
||||
try {
|
||||
t4.packWithVersionstamp(new byte[0xffff]); // Offset is 0x10000
|
||||
throw new RuntimeException("able to pack versionstamp with offset that is too large");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
// Same as before, but packed representation is not memoized.
|
||||
try {
|
||||
Tuple.from(Versionstamp.incomplete(3)).packWithVersionstamp(new byte[0xffff]); // Offset is 0x10000
|
||||
throw new RuntimeException("able to pack versionstamp with offset that is too large");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
|
||||
private static void malformedBytes() {
|
||||
List<byte[]> malformedSequences = Arrays.asList(
|
||||
new byte[]{0x01, (byte)0xde, (byte)0xad, (byte)0xc0, (byte)0xde}, // no termination character for byte array
|
||||
new byte[]{0x01, (byte)0xde, (byte)0xad, 0x00, FF, (byte)0xc0, (byte)0xde}, // no termination character but null in middle
|
||||
new byte[]{0x02, 'h', 'e', 'l', 'l', 'o'}, // no termination character for string
|
||||
new byte[]{0x02, 'h', 'e', 'l', 0x00, FF, 'l', 'o'}, // no termination character but null in the middle
|
||||
// Invalid UTF-8 decodes malformed as U+FFFD rather than throwing an error
|
||||
// new byte[]{0x02, 'u', 't', 'f', 0x08, (byte)0x80, 0x00}, // invalid utf-8 code point start character
|
||||
// new byte[]{0x02, 'u', 't', 'f', 0x08, (byte)0xc0, 0x01, 0x00}, // invalid utf-8 code point second character
|
||||
new byte[]{0x05, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00}, // no termination character for nested tuple
|
||||
new byte[]{0x05, 0x02, 'h', 'e', 'l', 'l', 'o', 0x00, 0x00, FF, 0x02, 't', 'h', 'e', 'r', 'e', 0x00}, // no termination character for nested tuple but null in the middle
|
||||
new byte[]{0x16, 0x01}, // integer truncation
|
||||
new byte[]{0x12, 0x01}, // integer truncation
|
||||
new byte[]{0x1d, 0x09, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, // integer truncation
|
||||
new byte[]{0x0b, 0x09 ^ FF, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, // integer truncation
|
||||
new byte[]{0x20, 0x01, 0x02, 0x03}, // float truncation
|
||||
new byte[]{0x21, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}, // double truncation
|
||||
new byte[]{0x30, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e}, // UUID truncation
|
||||
new byte[]{0x33, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b}, // versionstamp truncation
|
||||
new byte[]{FF} // unknown start code
|
||||
);
|
||||
for(byte[] sequence : malformedSequences) {
|
||||
try {
|
||||
Tuple t = Tuple.fromBytes(sequence);
|
||||
throw new RuntimeException("Able to unpack " + ByteArrayUtil.printable(sequence) + " into " + t);
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
System.out.println("Error for " + ByteArrayUtil.printable(sequence) + ": " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
// Perfectly good byte sequences, but using the offset and length to remove terminal bytes
|
||||
List<byte[]> wellFormedSequences = Arrays.asList(
|
||||
Tuple.from((Object)new byte[]{0x01, 0x02}).pack(),
|
||||
Tuple.from("hello").pack(),
|
||||
Tuple.from("hell\0").pack(),
|
||||
Tuple.from(1066L).pack(),
|
||||
Tuple.from(-1066L).pack(),
|
||||
Tuple.from(BigInteger.ONE.shiftLeft(Long.SIZE + 1)).pack(),
|
||||
Tuple.from(BigInteger.ONE.shiftLeft(Long.SIZE + 1).negate()).pack(),
|
||||
Tuple.from(-3.14f).pack(),
|
||||
Tuple.from(2.71828).pack(),
|
||||
Tuple.from(new UUID(1066L, 1415L)).pack(),
|
||||
Tuple.from(Versionstamp.fromBytes(new byte[]{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c})).pack()
|
||||
);
|
||||
for(byte[] sequence : wellFormedSequences) {
|
||||
try {
|
||||
Tuple t = Tuple.fromBytes(sequence, 0, sequence.length - 1);
|
||||
throw new RuntimeException("Able to unpack " + ByteArrayUtil.printable(sequence) + " into " + t + " without last character");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
System.out.println("Error for " + ByteArrayUtil.printable(sequence) + ": " + e.getMessage());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void offsetsAndLengths() {
|
||||
List<Tuple> tuples = Arrays.asList(
|
||||
new Tuple(),
|
||||
Tuple.from((Object)null),
|
||||
Tuple.from(null, new byte[]{0x10, 0x66}),
|
||||
Tuple.from("dummy_string"),
|
||||
Tuple.from(1066L)
|
||||
);
|
||||
Tuple allTuples = tuples.stream().reduce(new Tuple(), Tuple::addAll);
|
||||
byte[] allTupleBytes = allTuples.pack();
|
||||
|
||||
// Unpack each tuple individually using their lengths
|
||||
int offset = 0;
|
||||
for(Tuple t : tuples) {
|
||||
int length = t.getPackedSize();
|
||||
Tuple unpacked = Tuple.fromBytes(allTupleBytes, offset, length);
|
||||
if(!unpacked.equals(t)) {
|
||||
throw new RuntimeException("unpacked tuple " + unpacked + " does not match serialized tuple " + t);
|
||||
}
|
||||
offset += length;
|
||||
}
|
||||
|
||||
// Unpack successive pairs of tuples.
|
||||
offset = 0;
|
||||
for(int i = 0; i < tuples.size() - 1; i++) {
|
||||
Tuple combinedTuple = tuples.get(i).addAll(tuples.get(i + 1));
|
||||
Tuple unpacked = Tuple.fromBytes(allTupleBytes, offset, combinedTuple.getPackedSize());
|
||||
if(!unpacked.equals(combinedTuple)) {
|
||||
throw new RuntimeException("unpacked tuple " + unpacked + " does not match combined tuple " + combinedTuple);
|
||||
}
|
||||
offset += tuples.get(i).getPackedSize();
|
||||
}
|
||||
|
||||
// Allow an offset to equal the length of the array, but essentially only a zero-length is allowed there.
|
||||
Tuple emptyAtEndTuple = Tuple.fromBytes(allTupleBytes, allTupleBytes.length, 0);
|
||||
if(!emptyAtEndTuple.isEmpty()) {
|
||||
throw new RuntimeException("tuple with no bytes is not empty");
|
||||
}
|
||||
|
||||
try {
|
||||
Tuple.fromBytes(allTupleBytes, -1, 4);
|
||||
throw new RuntimeException("able to give negative offset to fromBytes");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
Tuple.fromBytes(allTupleBytes, allTupleBytes.length + 1, 4);
|
||||
throw new RuntimeException("able to give offset larger than array to fromBytes");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
Tuple.fromBytes(allTupleBytes, 0, -1);
|
||||
throw new RuntimeException("able to give negative length to fromBytes");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
Tuple.fromBytes(allTupleBytes, 0, allTupleBytes.length + 1);
|
||||
throw new RuntimeException("able to give length larger than array to fromBytes");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
Tuple.fromBytes(allTupleBytes, allTupleBytes.length / 2, allTupleBytes.length / 2 + 2);
|
||||
throw new RuntimeException("able to exceed array length in fromBytes");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
|
||||
private static void intoBuffer() {
|
||||
Tuple t = Tuple.from("hello", 3.14f, "world");
|
||||
ByteBuffer buffer = ByteBuffer.allocate("hello".length() + 2 + Float.BYTES + 1 + "world".length() + 2);
|
||||
t.packInto(buffer);
|
||||
if(!Arrays.equals(t.pack(), buffer.array())) {
|
||||
throw new RuntimeException("buffer and tuple do not match");
|
||||
}
|
||||
|
||||
buffer = ByteBuffer.allocate(t.getPackedSize() + 2);
|
||||
buffer.order(ByteOrder.LITTLE_ENDIAN);
|
||||
t.packInto(buffer);
|
||||
if(!Arrays.equals(ByteArrayUtil.join(t.pack(), new byte[]{0x00, 0x00}), buffer.array())) {
|
||||
throw new RuntimeException("buffer and tuple do not match");
|
||||
}
|
||||
if(!buffer.order().equals(ByteOrder.LITTLE_ENDIAN)) {
|
||||
throw new RuntimeException("byte order changed");
|
||||
}
|
||||
|
||||
buffer = ByteBuffer.allocate(t.getPackedSize() + 2);
|
||||
buffer.put((byte)0x01).put((byte)0x02);
|
||||
t.packInto(buffer);
|
||||
if(!Arrays.equals(t.pack(new byte[]{0x01, 0x02}), buffer.array())) {
|
||||
throw new RuntimeException("buffer and tuple do not match");
|
||||
}
|
||||
|
||||
buffer = ByteBuffer.allocate(t.getPackedSize() - 1);
|
||||
try {
|
||||
t.packInto(buffer);
|
||||
throw new RuntimeException("able to pack into buffer that was too small");
|
||||
}
|
||||
catch(BufferOverflowException e) {
|
||||
// eat
|
||||
}
|
||||
|
||||
Tuple tCopy = Tuple.fromItems(t.getItems()); // remove memoized stuff
|
||||
buffer = ByteBuffer.allocate(t.getPackedSize() - 1);
|
||||
try {
|
||||
tCopy.packInto(buffer);
|
||||
throw new RuntimeException("able to pack into buffer that was too small");
|
||||
}
|
||||
catch(BufferOverflowException e) {
|
||||
// eat
|
||||
}
|
||||
|
||||
Tuple tWithIncomplete = Tuple.from(Versionstamp.incomplete(3));
|
||||
buffer = ByteBuffer.allocate(tWithIncomplete.getPackedSize());
|
||||
try {
|
||||
tWithIncomplete.packInto(buffer);
|
||||
throw new RuntimeException("able to pack incomplete versionstamp into buffer");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
if(buffer.arrayOffset() != 0) {
|
||||
throw new RuntimeException("offset changed after unsuccessful pack with incomplete versionstamp");
|
||||
}
|
||||
}
|
||||
|
||||
// These should be in ArrayUtilTest, but those can't be run at the moment, so here they go.
|
||||
private static void replaceTests() {
|
||||
List<byte[]> arrays = Arrays.asList(
|
||||
new byte[]{0x01, 0x02, 0x01, 0x02}, new byte[]{0x01, 0x02}, new byte[]{0x03, 0x04}, new byte[]{0x03, 0x04, 0x03, 0x04},
|
||||
new byte[]{0x01, 0x02, 0x01, 0x02}, new byte[]{0x01, 0x02}, new byte[]{0x03}, new byte[]{0x03, 0x03},
|
||||
new byte[]{0x01, 0x02, 0x01, 0x02}, new byte[]{0x01, 0x02}, new byte[]{0x03, 0x04, 0x05}, new byte[]{0x03, 0x04, 0x05, 0x03, 0x04, 0x05},
|
||||
new byte[]{0x00, 0x01, 0x02, 0x00, 0x01, 0x02, 0x00}, new byte[]{0x01, 0x02}, new byte[]{0x03, 0x04, 0x05}, new byte[]{0x00, 0x03, 0x04, 0x05, 0x00, 0x03, 0x04, 0x05, 0x00},
|
||||
new byte[]{0x01, 0x01, 0x01, 0x01}, new byte[]{0x01, 0x02}, new byte[]{0x03, 0x04}, new byte[]{0x01, 0x01, 0x01, 0x01},
|
||||
new byte[]{0x01, 0x01, 0x01, 0x01}, new byte[]{0x01, 0x02}, new byte[]{0x03}, new byte[]{0x01, 0x01, 0x01, 0x01},
|
||||
new byte[]{0x01, 0x01, 0x01, 0x01}, new byte[]{0x01, 0x02}, new byte[]{0x03, 0x04, 0x05}, new byte[]{0x01, 0x01, 0x01, 0x01},
|
||||
new byte[]{0x01, 0x01, 0x01, 0x01, 0x01}, new byte[]{0x01, 0x01}, new byte[]{0x03, 0x04, 0x05}, new byte[]{0x03, 0x04, 0x05, 0x03, 0x04, 0x05, 0x01},
|
||||
new byte[]{0x01, 0x01, 0x01, 0x01, 0x01}, new byte[]{0x01, 0x01}, new byte[]{0x03, 0x04}, new byte[]{0x03, 0x04, 0x03, 0x04, 0x01},
|
||||
new byte[]{0x01, 0x01, 0x01, 0x01, 0x01}, new byte[]{0x01, 0x01}, new byte[]{0x03}, new byte[]{0x03, 0x03, 0x01},
|
||||
new byte[]{0x01, 0x02, 0x01, 0x02}, new byte[]{0x01, 0x02}, null, new byte[0],
|
||||
new byte[]{0x01, 0x02, 0x01, 0x02}, new byte[]{0x01, 0x02}, new byte[0], new byte[0],
|
||||
new byte[]{0x01, 0x02, 0x01, 0x02}, null, new byte[]{0x04}, new byte[]{0x01, 0x02, 0x01, 0x02},
|
||||
new byte[]{0x01, 0x02, 0x01, 0x02}, new byte[0], new byte[]{0x04}, new byte[]{0x01, 0x02, 0x01, 0x02},
|
||||
null, new byte[]{0x01, 0x02}, new byte[]{0x04}, null
|
||||
);
|
||||
for(int i = 0; i < arrays.size(); i += 4) {
|
||||
byte[] src = arrays.get(i);
|
||||
byte[] pattern = arrays.get(i + 1);
|
||||
byte[] replacement = arrays.get(i + 2);
|
||||
byte[] expectedResults = arrays.get(i + 3);
|
||||
byte[] results = ByteArrayUtil.replace(src, pattern, replacement);
|
||||
if(!Arrays.equals(results, expectedResults)) {
|
||||
throw new RuntimeException("results " + ByteArrayUtil.printable(results) + " did not match expected results " +
|
||||
ByteArrayUtil.printable(expectedResults) + " when replacing " + ByteArrayUtil.printable(pattern) +
|
||||
" with " + ByteArrayUtil.printable(replacement) + " in " + ByteArrayUtil.printable(src));
|
||||
}
|
||||
if(src != null && src == results) {
|
||||
throw new RuntimeException("src and results array are pointer-equal when replacing " + ByteArrayUtil.printable(pattern) +
|
||||
" with " + ByteArrayUtil.printable(replacement) + " in " + ByteArrayUtil.printable(src));
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
ByteArrayUtil.replace(null, 0, 1, new byte[]{0x00}, new byte[]{0x00, FF});
|
||||
throw new RuntimeException("able to replace null bytes");
|
||||
}
|
||||
catch(NullPointerException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
ByteArrayUtil.replace(new byte[]{0x00, 0x01}, -1, 2, new byte[]{0x00}, new byte[]{0x00, FF});
|
||||
throw new RuntimeException("able to use negative offset");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
ByteArrayUtil.replace(new byte[]{0x00, 0x01}, 3, 2, new byte[]{0x00}, new byte[]{0x00, FF});
|
||||
throw new RuntimeException("able to use offset after end of array");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
ByteArrayUtil.replace(new byte[]{0x00, 0x01}, 1, -1, new byte[]{0x00}, new byte[]{0x00, FF});
|
||||
throw new RuntimeException("able to use negative length");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
try {
|
||||
ByteArrayUtil.replace(new byte[]{0x00, 0x01}, 1, 2, new byte[]{0x00}, new byte[]{0x00, FF});
|
||||
throw new RuntimeException("able to give length that exceeds end of the array");
|
||||
}
|
||||
catch(IllegalArgumentException e) {
|
||||
// eat
|
||||
}
|
||||
}
|
||||
|
||||
private static void runTests(final int reps, TransactionContext db) {
|
||||
System.out.println("Running tests...");
|
||||
long start = System.currentTimeMillis();
|
||||
|
|
|
@ -141,6 +141,21 @@ Any client connected to FoundationDB can access information about its cluster fi
|
|||
* To get the path to the cluster file, read the key ``\xFF\xFF/cluster_file_path``.
|
||||
* To get the contents of the cluster file, read the key ``\xFF\xFF/connection_string``.
|
||||
|
||||
.. _ipv6-support:
|
||||
|
||||
IPv6 Support
|
||||
============
|
||||
|
||||
FoundationDB (since v6.1) can accept network connections from clients connecting over IPv6. IPv6 address/port pair is represented as ``[IP]:PORT``, e.g. "[::1]:4800", "[abcd::dead:beef]:4500".
|
||||
|
||||
1) The cluster file can contain mix of IPv6 and IPv6 addresses. For example::
|
||||
|
||||
description:ID@127.0.0.1:4500,[::1]:4500,...
|
||||
|
||||
2) Starting ``fdbserver`` with IPv6::
|
||||
|
||||
$ /path/to/fdbserver -C fdb.cluster -p \[::1\]:4500
|
||||
|
||||
.. _adding-machines-to-a-cluster:
|
||||
|
||||
Adding machines to a cluster
|
||||
|
|
|
@ -53,7 +53,7 @@ Python API
|
|||
Installation
|
||||
============
|
||||
|
||||
The FoundationDB Python API is compatible with Python 2.7 - 3.6. You will need to have a Python version within this range on your system before the FoundationDB Python API can be installed.
|
||||
The FoundationDB Python API is compatible with Python 2.7 - 3.7. You will need to have a Python version within this range on your system before the FoundationDB Python API can be installed. Also please note that Python 3.7 no longer bundles a full copy of libffi, which is used for building the _ctypes module on non-macOS UNIX platforms. Hence, if you are using Python 3.7, you should make sure libffi is already installed on your system.
|
||||
|
||||
On macOS, the FoundationDB Python API is installed as part of the FoundationDB installation (see :ref:`installing-client-binaries`). On Ubuntu or RHEL/CentOS, you will need to install the FoundationDB Python API manually.
|
||||
|
||||
|
|
|
@ -334,7 +334,7 @@ The ``expire`` subcommand will remove data from a backup prior to some point in
|
|||
The expiration CUTOFF must be specified by one of the two following arguments:
|
||||
|
||||
``--expire_before_timestamp <DATETIME>``
|
||||
Specifies the expiration cutoff to DATETIME. Requires a cluster file and will use version/timestamp metadata in the database to convert DATETIME to a database commit version. DATETIME must be in the form "YYYY-MM-DD.HH:MI:SS" in UTC.
|
||||
Specifies the expiration cutoff to DATETIME. Requires a cluster file and will use version/timestamp metadata in the database to convert DATETIME to a database commit version. DATETIME must be in the form "YYYY/MM/DD.HH:MI:SS+hhmm", for example "2018/12/31.23:59:59-0800".
|
||||
|
||||
``--expire_before_version <VERSION>``
|
||||
Specifies the cutoff by a database commit version.
|
||||
|
@ -342,7 +342,7 @@ The expiration CUTOFF must be specified by one of the two following arguments:
|
|||
Optionally, the user can specify a minimum RESTORABILITY guarantee with one of the following options.
|
||||
|
||||
``--restorable_after_timestamp <DATETIME>``
|
||||
Specifies that the backup must be restorable to DATETIME and later. Requires a cluster file and will use version/timestamp metadata in the database to convert DATETIME to a database commit version. DATETIME must be in the form "YYYY-MM-DD.HH:MI:SS" in UTC.
|
||||
Specifies that the backup must be restorable to DATETIME and later. Requires a cluster file and will use version/timestamp metadata in the database to convert DATETIME to a database commit version. DATETIME must be in the form "YYYY/MM/DD.HH:MI:SS+hhmm", for example "2018/12/31.23:59:59-0800".
|
||||
|
||||
``--restorable_after_version <VERSION>``
|
||||
Specifies that the backup must be restorable as of VERSION and later.
|
||||
|
@ -446,8 +446,8 @@ The ``start`` command will start a new restore on the specified (or default) tag
|
|||
``-v <VERSION>``
|
||||
Instead of the latest version the backup can be restored to, restore to VERSION.
|
||||
|
||||
``--timestamp <YYYY-MM-DD.HH:MI:SS>``
|
||||
Instead of the latest version the backup can be restored to, restore to a version from approximately the given timestamp. Requires orig_cluster_file to be specified.
|
||||
``--timestamp <DATETIME>``
|
||||
Instead of the latest version the backup can be restored to, restore to a version from approximately the given timestamp. Requires orig_cluster_file to be specified. DATETIME must be in the form "YYYY/MM/DD.HH:MI:SS+hhmm", for example "2018/12/31.23:59:59-0800".
|
||||
|
||||
``--orig_cluster_file <CONNFILE>``
|
||||
The cluster file for the original database from which the backup was created. The original database is only needed to convert a --timestamp argument to a database version.
|
||||
|
|
|
@ -80,7 +80,8 @@ The following format informally describes the JSON containing the status data. T
|
|||
"connected_clients": [
|
||||
{
|
||||
"address": "127.0.0.1:1234",
|
||||
"log_group": "default"
|
||||
"log_group": "default",
|
||||
"connected_coordinators": 2
|
||||
}
|
||||
],
|
||||
"count": 1,
|
||||
|
|
|
@ -7,22 +7,23 @@ Release Notes
|
|||
|
||||
Features
|
||||
--------
|
||||
|
||||
* Improved replication mechanism, a new hierarchical replication technique that further significantly reduces the frequency of data loss events even when multiple machines (e.g., fault-tolerant zones in the current code) permanently fail at the same time. `(PR #964) <https://github.com/apple/foundationdb/pull/964>`_.
|
||||
|
||||
* Added background actor to remove redundant teams from team collection so that the healthy team number is guaranteed not exceeding the desired number. `(PR #1139) <https://github.com/apple/foundationdb/pull/1139>`_
|
||||
|
||||
|
||||
* Show the number of connected coordinators per client in JSON status `(PR #1222) <https://github.com/apple/foundationdb/pull/1222>`_
|
||||
* Get read version, read, and commit requests are counted and aggregated by server-side latency in configurable latency bands and output in JSON status. `(PR #1084) <https://github.com/apple/foundationdb/pull/1084>`_
|
||||
* Added configuration option to choose log spilling implementation `(PR #1160) <https://github.com/apple/foundationdb/pull/1160>`_
|
||||
* Added configuration option to choose log system implementation `(PR #1160) <https://github.com/apple/foundationdb/pull/1160>`_
|
||||
* Batch priority transactions are now limited separately by ratekeeper and will be throttled at lower levels of cluster saturation. This makes it possible to run a more intense background load at saturation without significantly affecting normal priority transactions. It is still recommended not to run excessive loads at batch priority. `(PR #1198) <https://github.com/apple/foundationdb/pull/1198>`_
|
||||
* Restore now requires the destnation cluster to be specified explicitly to avoid confusion. `(PR #1240) <https://github.com/apple/foundationdb/pull/1240>`_
|
||||
* Restore target version can now be specified by timestamp if the original cluster is available. `(PR #1240) <https://github.com/apple/foundationdb/pull/1240>`_
|
||||
* Backup status and describe commands now have a --json output option. `(PR #1248) <https://github.com/apple/foundationdb/pull/1248>`_
|
||||
* Separate data distribution out from master as a new role. `(PR #1062) <https://github.com/apple/foundationdb/pull/1062>`_
|
||||
* Separate rate keeper out from data distribution as a new role. `(PR ##1176) <https://github.com/apple/foundationdb/pull/1176>`_
|
||||
* Added a new atomic op `CompareAndClear`. `(PR #1105) <https://github.com/apple/foundationdb/pull/1105>`_
|
||||
* Added support for IPv6. `(PR #1176) https://github.com/apple/foundationdb/pull/1178`_
|
||||
* FDB can now simultaneously listen to TLS and unencrypted ports to facilitate smoother migration to TLS. `(PR #1157) https://github.com/apple/foundationdb/pull/1157`_
|
||||
* Added `DISABLE_POSIX_KERNEL_AIO` knob to fallback to libeio instead of kernel async I/O (KAIO) for systems that do not support KAIO or O_DIRECT flag. `(PR #1283) https://github.com/apple/foundationdb/pull/1283`_
|
||||
|
||||
Performance
|
||||
-----------
|
||||
|
@ -33,7 +34,8 @@ Fixes
|
|||
-----
|
||||
|
||||
* Python: Creating a ``SingleFloat`` for the tuple layer didn't work with integers. `(PR #1216) <https://github.com/apple/foundationdb/pull/1216>`_
|
||||
* Added `DISABLE_POSIX_KERNEL_AIO` knob to fallback to libeio instead of kernel async I/O (KAIO) for systems that do not support KAIO or O_DIRECT flag. `(PR #1283) https://github.com/apple/foundationdb/pull/1283`_
|
||||
* In some cases, calling ``OnError`` with a non-retryable error would partially reset a transaction. As of API version 610, the transaction will no longer be reset in these cases and will instead put the transaction into an error state. `(PR #1298) <https://github.com/apple/foundationdb/pull/1298>`_
|
||||
* Standardized datetime string format across all backup and restore command options and outputs. `(PR #1248) <https://github.com/apple/foundationdb/pull/1248>`_
|
||||
|
||||
Status
|
||||
------
|
||||
|
@ -48,6 +50,10 @@ Bindings
|
|||
* Java: Deprecated ``FDB.createCluster`` and ``Cluster``. The preferred way to get a ``Database`` is by using ``FDB.open``, which should work in both new and old API versions. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Java: Removed ``Cluster(long cPtr, Executor executor)`` constructor. This is API breaking for any code that has subclassed the ``Cluster`` class and is not protected by API versioning. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Java: Several methods relevant to read-only transactions have been moved into the ``ReadTransaction`` interface.
|
||||
* Java: Tuples now cache previous hash codes and equality checking no longer requires packing the underlying Tuples. `(PR #1166) <https://github.com/apple/foundationdb/pull/1166>`_
|
||||
* Java: Tuple performance has been improved to use fewer allocations when packing and unpacking. `(Issue #1206) <https://github.com/apple/foundationdb/issues/1206>`_
|
||||
* Java: Unpacking a Tuple with a byte array or string that is missing the end-of-string character now throws an error. `(Issue #671) <https://github.com/apple/foundationdb/issues/671>`_
|
||||
* Java: Unpacking a Tuple constrained to a subset of the underlying array now throws an error when it encounters a truncated integer. `(Issue #672) <https://github.com/apple/foundationdb/issues/672>`_
|
||||
* Ruby: Removed ``FDB.init``, ``FDB.create_cluster``, and ``FDB.Cluster``. ``FDB.open`` no longer accepts a ``database_name`` parameter. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Golang: Deprecated ``fdb.StartNetwork``, ``fdb.Open``, ``fdb.MustOpen``, and ``fdb.CreateCluster`` and added ``fdb.OpenDatabase`` and ``fdb.MustOpenDatabase``. The preferred way to start the network and get a ``Database`` is by using ``FDB.OpenDatabase`` or ``FDB.OpenDefault``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
|
||||
* Flow: Removed ``API::createCluster`` and ``Cluster`` and added ``API::createDatabase``. The new way to get a ``Database`` is by using ``API::createDatabase``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_ `(PR #1215) <https://github.com/apple/foundationdb/pull/1215>`_
|
||||
|
@ -57,6 +63,7 @@ Bindings
|
|||
* Flow: Changed ``Transaction::setVersion`` to ``Transaction::setReadVersion``. `(PR #1215) <https://github.com/apple/foundationdb/pull/1215>`_
|
||||
* Flow: On update to this version of the Flow bindings, client code will fail to build due to the changes in the API, irrespective of the API version used. Client code must be updated to use the new bindings API. These changes affect the bindings only and won't impact compatibility with different versions of the cluster. `(PR #1215) <https://github.com/apple/foundationdb/pull/1215>`_
|
||||
* Golang: Added ``fdb.Printable`` to print a human-readable string for a given byte array. Add ``Key.String()``, which converts the ``Key`` to a ``string`` using the ``Printable`` function. `(PR #1010) <https://github.com/apple/foundationdb/pull/1010>`_
|
||||
* Golang: Tuples now support ``Versionstamp`` operations. `(PR #1187) <https://github.com/apple/foundationdb/pull/1187>`_
|
||||
* Python: Python signal handling didn't work when waiting on a future. In particular, pressing Ctrl-C would not successfully interrupt the program. `(PR #1138) <https://github.com/apple/foundationdb/pull/1138>`_
|
||||
|
||||
Other Changes
|
||||
|
|
|
@ -29,10 +29,52 @@ This will configure the new cluster to communicate with TLS.
|
|||
|
||||
.. note:: Depending on your operating system, version and configuration, there may be a firewall in place that prevents external access to certain ports. If necessary, please consult the appropriate documentation for your OS and ensure that all machines in your cluster can reach the ports configured in your :ref:`configuration file <foundationdb-conf>`.
|
||||
|
||||
.. _converting-existing-cluster:
|
||||
.. _converting-existing-cluster-after-6.1:
|
||||
|
||||
Converting an existing cluster to use TLS
|
||||
=========================================
|
||||
Converting an existing cluster to use TLS (since v6.1)
|
||||
======================================================
|
||||
|
||||
Since version 6.1, FoundationDB clusters can be converted to TLS without downtime. FoundationDB server can listen to TLS and unencrypted traffic simultaneously on two separate ports. As a result, FDB clusters can live migrate to TLS:
|
||||
|
||||
1) Restart each FoundationDB server individually, but with an additional listen address for TLS traffic::
|
||||
|
||||
/path/to/fdbserver -C fdb.cluster -p 127.0.0.1:4500 -p 127.0.0.1:4600:tls
|
||||
|
||||
Since, the server still listens to unencrypted traffic and the cluster file still contains the old address, rest of the processes will be able to talk to this new process.
|
||||
|
||||
2) Once all processes are listening to both TLS and unencrypted traffic, switch one or more coordinator to use TLS. Therefore, if the old coordinator list was ``127.0.0.1:4500,127.0.0.1:4501,127.0.0.1:4502``, the new one would be something like ``127.0.0.1:4600:tls,127.0.0.1:4501,127.0.0.1:4502``. Switching few coordinators to TLS at a time allows a smoother migration and a window to find out clients who do not yet have TLS configured. The number of coordinators each client can connect to can be seen via ``fdbstatus`` (look for ``connected_coordinators`` field in ``clients``)::
|
||||
|
||||
"clients" : {
|
||||
"count" : 2,
|
||||
"supported_versions" : [
|
||||
{
|
||||
"client_version" : "6.1.0",
|
||||
"connected_clients" : [
|
||||
{
|
||||
"address" : "127.0.0.1:42916",
|
||||
"connected_coordinators": 3,
|
||||
"log_group" : "default"
|
||||
},
|
||||
{
|
||||
"address" : "127.0.0.1:42918",
|
||||
"connected_coordinators": 2,
|
||||
"log_group" : "default"
|
||||
}
|
||||
]
|
||||
}, ...
|
||||
]
|
||||
}
|
||||
|
||||
3) If there exist a client (e.g., the client 127.0.0.1:42918 in the above example) that cannot connect to all coordinators after a coordinator is switched to TLS, it mean the client does not set up its TLS correctly. System operator should notify the client to correct the client's TLS configuration. Otherwise, when all coordinators are switched to TLS ports, the client will loose connection.
|
||||
|
||||
4) Repeat (2) and (3) until all the addresses in coordinator list are TLS.
|
||||
|
||||
5) Restart each FoundationDB server, but only with one public address that listens to TLS traffic only.
|
||||
|
||||
.. _converting-existing-cluster-before-6.1:
|
||||
|
||||
Converting an existing cluster to use TLS (< v6.1)
|
||||
==================================================
|
||||
|
||||
Enabling TLS on an existing (non-TLS) cluster cannot be accomplished without downtime because all processes must have TLS enabled to communicate. At startup, each server process enables TLS if the addresses in its cluster file are TLS-enabled. As a result, server processes must be stopped and restarted to convert them to use TLS. To convert the cluster to TLS in the most conservative way:
|
||||
|
||||
|
|
|
@ -94,7 +94,7 @@ enum {
|
|||
OPT_EXPIRE_BEFORE_VERSION, OPT_EXPIRE_BEFORE_DATETIME, OPT_EXPIRE_DELETE_BEFORE_DAYS,
|
||||
OPT_EXPIRE_RESTORABLE_AFTER_VERSION, OPT_EXPIRE_RESTORABLE_AFTER_DATETIME, OPT_EXPIRE_MIN_RESTORABLE_DAYS,
|
||||
OPT_BASEURL, OPT_BLOB_CREDENTIALS, OPT_DESCRIBE_DEEP, OPT_DESCRIBE_TIMESTAMPS,
|
||||
OPT_DUMP_BEGIN, OPT_DUMP_END,
|
||||
OPT_DUMP_BEGIN, OPT_DUMP_END, OPT_JSON,
|
||||
|
||||
// Backup and Restore constants
|
||||
OPT_TAGNAME, OPT_BACKUPKEYS, OPT_WAITFORDONE,
|
||||
|
@ -251,6 +251,7 @@ CSimpleOpt::SOption g_rgBackupStatusOptions[] = {
|
|||
{ OPT_HELP, "-h", SO_NONE },
|
||||
{ OPT_HELP, "--help", SO_NONE },
|
||||
{ OPT_DEVHELP, "--dev-help", SO_NONE },
|
||||
{ OPT_JSON, "--json", SO_NONE},
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
|
@ -470,6 +471,7 @@ CSimpleOpt::SOption g_rgBackupDescribeOptions[] = {
|
|||
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
|
||||
{ OPT_DESCRIBE_DEEP, "--deep", SO_NONE },
|
||||
{ OPT_DESCRIBE_TIMESTAMPS, "--version_timestamps", SO_NONE },
|
||||
{ OPT_JSON, "--json", SO_NONE},
|
||||
#ifndef TLS_DISABLED
|
||||
TLS_OPTION_FLAGS
|
||||
#endif
|
||||
|
@ -875,7 +877,7 @@ static void printBackupUsage(bool devhelp) {
|
|||
" File containing blob credentials in JSON format. Can be specified multiple times for multiple files. See below for more details.\n");
|
||||
printf(" --expire_before_timestamp DATETIME\n"
|
||||
" Datetime cutoff for expire operations. Requires a cluster file and will use version/timestamp metadata\n"
|
||||
" in the database to obtain a cutoff version very close to the timestamp given in YYYY-MM-DD.HH:MI:SS format (UTC).\n");
|
||||
" in the database to obtain a cutoff version very close to the timestamp given in %s.\n", BackupAgentBase::timeFormat().c_str());
|
||||
printf(" --expire_before_version VERSION\n"
|
||||
" Version cutoff for expire operations. Deletes data files containing no data at or after VERSION.\n");
|
||||
printf(" --delete_before_days NUM_DAYS\n"
|
||||
|
@ -953,7 +955,7 @@ static void printRestoreUsage(bool devhelp ) {
|
|||
printf(TLS_HELP);
|
||||
#endif
|
||||
printf(" -v DBVERSION The version at which the database will be restored.\n");
|
||||
printf(" --timestamp Instead of a numeric version, use this to specify a timestamp in YYYY-MM-DD.HH:MI:SS format (UTC)\n");
|
||||
printf(" --timestamp Instead of a numeric version, use this to specify a timestamp in %s\n", BackupAgentBase::timeFormat().c_str());
|
||||
printf(" and it will be converted to a version from that time using metadata in orig_cluster_file.\n");
|
||||
printf(" --orig_cluster_file CONNFILE\n");
|
||||
printf(" The cluster file for the original database from which the backup was created. The original database\n");
|
||||
|
@ -1296,8 +1298,8 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
|
|||
tagRoot.create("current_status") = statusText;
|
||||
tagRoot.create("last_restorable_version") = tagLastRestorableVersions[j].get();
|
||||
tagRoot.create("last_restorable_seconds_behind") = last_restorable_seconds_behind;
|
||||
tagRoot.create("running_backup") = (status == BackupAgentBase::STATE_DIFFERENTIAL || status == BackupAgentBase::STATE_BACKUP);
|
||||
tagRoot.create("running_backup_is_restorable") = (status == BackupAgentBase::STATE_DIFFERENTIAL);
|
||||
tagRoot.create("running_backup") = (status == BackupAgentBase::STATE_RUNNING_DIFFERENTIAL || status == BackupAgentBase::STATE_RUNNING);
|
||||
tagRoot.create("running_backup_is_restorable") = (status == BackupAgentBase::STATE_RUNNING_DIFFERENTIAL);
|
||||
tagRoot.create("range_bytes_written") = tagRangeBytes[j].get();
|
||||
tagRoot.create("mutation_log_bytes_written") = tagLogBytes[j].get();
|
||||
tagRoot.create("mutation_stream_id") = backupTagUids[j].toString();
|
||||
|
@ -1340,8 +1342,8 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
|
|||
BackupAgentBase::enumState status = (BackupAgentBase::enumState)backupStatus[i].get();
|
||||
|
||||
JSONDoc tagRoot = tagsRoot.create(tagName);
|
||||
tagRoot.create("running_backup") = (status == BackupAgentBase::STATE_DIFFERENTIAL || status == BackupAgentBase::STATE_BACKUP);
|
||||
tagRoot.create("running_backup_is_restorable") = (status == BackupAgentBase::STATE_DIFFERENTIAL);
|
||||
tagRoot.create("running_backup") = (status == BackupAgentBase::STATE_RUNNING_DIFFERENTIAL || status == BackupAgentBase::STATE_RUNNING);
|
||||
tagRoot.create("running_backup_is_restorable") = (status == BackupAgentBase::STATE_RUNNING_DIFFERENTIAL);
|
||||
tagRoot.create("range_bytes_written") = tagRangeBytesDR[i].get();
|
||||
tagRoot.create("mutation_log_bytes_written") = tagLogBytesDR[i].get();
|
||||
tagRoot.create("mutation_stream_id") = drTagUids[i].toString();
|
||||
|
@ -1748,12 +1750,12 @@ ACTOR Future<Void> statusDBBackup(Database src, Database dest, std::string tagNa
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> statusBackup(Database db, std::string tagName, bool showErrors) {
|
||||
ACTOR Future<Void> statusBackup(Database db, std::string tagName, bool showErrors, bool json) {
|
||||
try
|
||||
{
|
||||
state FileBackupAgent backupAgent;
|
||||
|
||||
std::string statusText = wait(backupAgent.getStatus(db, showErrors, tagName));
|
||||
std::string statusText = wait(json ? backupAgent.getStatusJSON(db, tagName) : backupAgent.getStatus(db, showErrors, tagName));
|
||||
printf("%s\n", statusText.c_str());
|
||||
}
|
||||
catch (Error& e) {
|
||||
|
@ -2163,13 +2165,13 @@ ACTOR Future<Void> deleteBackupContainer(const char *name, std::string destinati
|
|||
return Void();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> describeBackup(const char *name, std::string destinationContainer, bool deep, Optional<Database> cx) {
|
||||
ACTOR Future<Void> describeBackup(const char *name, std::string destinationContainer, bool deep, Optional<Database> cx, bool json) {
|
||||
try {
|
||||
Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
|
||||
state BackupDescription desc = wait(c->describeBackup(deep));
|
||||
if(cx.present())
|
||||
wait(desc.resolveVersionTimes(cx.get()));
|
||||
printf("%s\n", desc.toString().c_str());
|
||||
printf("%s\n", (json ? desc.toJSON() : desc.toString()).c_str());
|
||||
}
|
||||
catch (Error& e) {
|
||||
if(e.code() == error_code_actor_cancelled)
|
||||
|
@ -2685,6 +2687,7 @@ int main(int argc, char* argv[]) {
|
|||
Version dumpEnd = std::numeric_limits<Version>::max();
|
||||
std::string restoreClusterFileDest;
|
||||
std::string restoreClusterFileOrig;
|
||||
bool jsonOutput = false;
|
||||
|
||||
BackupModifyOptions modifyOptions;
|
||||
|
||||
|
@ -2998,6 +3001,9 @@ int main(int argc, char* argv[]) {
|
|||
case OPT_DUMP_END:
|
||||
dumpEnd = parseVersion(args->OptionArg());
|
||||
break;
|
||||
case OPT_JSON:
|
||||
jsonOutput = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3308,7 +3314,7 @@ int main(int argc, char* argv[]) {
|
|||
case BACKUP_STATUS:
|
||||
if(!initCluster())
|
||||
return FDB_EXIT_ERROR;
|
||||
f = stopAfter( statusBackup(db, tagName, true) );
|
||||
f = stopAfter( statusBackup(db, tagName, true, jsonOutput) );
|
||||
break;
|
||||
|
||||
case BACKUP_ABORT:
|
||||
|
@ -3363,7 +3369,7 @@ int main(int argc, char* argv[]) {
|
|||
return FDB_EXIT_ERROR;
|
||||
|
||||
// Only pass database optionDatabase Describe will lookup version timestamps if a cluster file was given, but quietly skip them if not.
|
||||
f = stopAfter( describeBackup(argv[0], destinationContainer, describeDeep, describeTimestamps ? Optional<Database>(db) : Optional<Database>()) );
|
||||
f = stopAfter( describeBackup(argv[0], destinationContainer, describeDeep, describeTimestamps ? Optional<Database>(db) : Optional<Database>(), jsonOutput) );
|
||||
break;
|
||||
|
||||
case BACKUP_LIST:
|
||||
|
|
|
@ -38,13 +38,35 @@
|
|||
|
||||
class BackupAgentBase : NonCopyable {
|
||||
public:
|
||||
// Time formatter for anything backup or restore related
|
||||
static std::string formatTime(int64_t epochs) {
|
||||
time_t curTime = (time_t)epochs;
|
||||
char buffer[128];
|
||||
struct tm timeinfo;
|
||||
getLocalTime(&curTime, &timeinfo);
|
||||
strftime(buffer, 128, "%Y/%m/%d.%H:%M:%S%z", &timeinfo);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static std::string timeFormat() {
|
||||
return "YYYY/MM/DD.HH:MI:SS[+/-]HHMM";
|
||||
}
|
||||
|
||||
static int64_t parseTime(std::string timestamp) {
|
||||
struct tm out;
|
||||
if (strptime(timestamp.c_str(), "%Y/%m/%d.%H:%M:%S%z", &out) == nullptr) {
|
||||
return -1;
|
||||
}
|
||||
return (int64_t) mktime(&out);
|
||||
}
|
||||
|
||||
// Type of program being executed
|
||||
enum enumActionResult {
|
||||
RESULT_SUCCESSFUL = 0, RESULT_ERRORED = 1, RESULT_DUPLICATE = 2, RESULT_UNNEEDED = 3
|
||||
};
|
||||
|
||||
enum enumState {
|
||||
STATE_ERRORED = 0, STATE_SUBMITTED = 1, STATE_BACKUP = 2, STATE_DIFFERENTIAL = 3, STATE_COMPLETED = 4, STATE_NEVERRAN = 5, STATE_ABORTED = 6, STATE_PARTIALLY_ABORTED = 7
|
||||
STATE_ERRORED = 0, STATE_SUBMITTED = 1, STATE_RUNNING = 2, STATE_RUNNING_DIFFERENTIAL = 3, STATE_COMPLETED = 4, STATE_NEVERRAN = 5, STATE_ABORTED = 6, STATE_PARTIALLY_ABORTED = 7
|
||||
};
|
||||
|
||||
static const Key keyFolderId;
|
||||
|
@ -90,11 +112,11 @@ public:
|
|||
}
|
||||
|
||||
else if (!stateText.compare("has been started")) {
|
||||
enState = STATE_BACKUP;
|
||||
enState = STATE_RUNNING;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("is differential")) {
|
||||
enState = STATE_DIFFERENTIAL;
|
||||
enState = STATE_RUNNING_DIFFERENTIAL;
|
||||
}
|
||||
|
||||
else if (!stateText.compare("has been completed")) {
|
||||
|
@ -112,7 +134,7 @@ public:
|
|||
return enState;
|
||||
}
|
||||
|
||||
// Convert the status text to an enumerated value
|
||||
// Convert the status enum to a text description
|
||||
static const char* getStateText(enumState enState)
|
||||
{
|
||||
const char* stateText;
|
||||
|
@ -128,10 +150,10 @@ public:
|
|||
case STATE_SUBMITTED:
|
||||
stateText = "has been submitted";
|
||||
break;
|
||||
case STATE_BACKUP:
|
||||
case STATE_RUNNING:
|
||||
stateText = "has been started";
|
||||
break;
|
||||
case STATE_DIFFERENTIAL:
|
||||
case STATE_RUNNING_DIFFERENTIAL:
|
||||
stateText = "is differential";
|
||||
break;
|
||||
case STATE_COMPLETED:
|
||||
|
@ -151,6 +173,45 @@ public:
|
|||
return stateText;
|
||||
}
|
||||
|
||||
// Convert the status enum to a name
|
||||
static const char* getStateName(enumState enState)
|
||||
{
|
||||
const char* s;
|
||||
|
||||
switch (enState)
|
||||
{
|
||||
case STATE_ERRORED:
|
||||
s = "Errored";
|
||||
break;
|
||||
case STATE_NEVERRAN:
|
||||
s = "NeverRan";
|
||||
break;
|
||||
case STATE_SUBMITTED:
|
||||
s = "Submitted";
|
||||
break;
|
||||
case STATE_RUNNING:
|
||||
s = "Running";
|
||||
break;
|
||||
case STATE_RUNNING_DIFFERENTIAL:
|
||||
s = "RunningDifferentially";
|
||||
break;
|
||||
case STATE_COMPLETED:
|
||||
s = "Completed";
|
||||
break;
|
||||
case STATE_ABORTED:
|
||||
s = "Aborted";
|
||||
break;
|
||||
case STATE_PARTIALLY_ABORTED:
|
||||
s = "Aborting";
|
||||
break;
|
||||
default:
|
||||
s = "<undefined>";
|
||||
break;
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
// Determine if the specified state is runnable
|
||||
static bool isRunnable(enumState enState)
|
||||
{
|
||||
|
@ -159,8 +220,8 @@ public:
|
|||
switch (enState)
|
||||
{
|
||||
case STATE_SUBMITTED:
|
||||
case STATE_BACKUP:
|
||||
case STATE_DIFFERENTIAL:
|
||||
case STATE_RUNNING:
|
||||
case STATE_RUNNING_DIFFERENTIAL:
|
||||
case STATE_PARTIALLY_ABORTED:
|
||||
isRunnable = true;
|
||||
break;
|
||||
|
@ -179,6 +240,7 @@ public:
|
|||
return defaultTagName;
|
||||
}
|
||||
|
||||
// This is only used for automatic backup name generation
|
||||
static Standalone<StringRef> getCurrentTime() {
|
||||
double t = now();
|
||||
time_t curTime = t;
|
||||
|
@ -283,6 +345,7 @@ public:
|
|||
}
|
||||
|
||||
Future<std::string> getStatus(Database cx, bool showErrors, std::string tagName);
|
||||
Future<std::string> getStatusJSON(Database cx, std::string tagName);
|
||||
|
||||
Future<Version> getLastRestorable(Reference<ReadYourWritesTransaction> tr, Key tagName);
|
||||
void setLastRestorable(Reference<ReadYourWritesTransaction> tr, Key tagName, Version version);
|
||||
|
@ -679,6 +742,14 @@ public:
|
|||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
KeyBackedProperty<int64_t> snapshotDispatchLastShardsBehind() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
KeyBackedProperty<Version> snapshotDispatchLastVersion() {
|
||||
return configSpace.pack(LiteralStringRef(__FUNCTION__));
|
||||
}
|
||||
|
||||
Future<Void> initNewSnapshot(Reference<ReadYourWritesTransaction> tr, int64_t intervalSeconds = -1) {
|
||||
BackupConfig © = *this; // Capture this by value instead of this ptr
|
||||
|
||||
|
@ -702,6 +773,8 @@ public:
|
|||
copy.snapshotBeginVersion().set(tr, beginVersion.get());
|
||||
copy.snapshotTargetEndVersion().set(tr, endVersion);
|
||||
copy.snapshotRangeFileCount().set(tr, 0);
|
||||
copy.snapshotDispatchLastVersion().clear(tr);
|
||||
copy.snapshotDispatchLastShardsBehind().clear(tr);
|
||||
|
||||
return Void();
|
||||
});
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
*/
|
||||
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbclient/BackupAgent.actor.h"
|
||||
#include "fdbclient/JsonBuilder.h"
|
||||
#include "flow/Trace.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/Hash3.h"
|
||||
|
@ -68,15 +70,6 @@ void BackupFileList::toStream(FILE *fout) const {
|
|||
}
|
||||
}
|
||||
|
||||
std::string formatTime(int64_t t) {
|
||||
time_t curTime = (time_t)t;
|
||||
char buffer[128];
|
||||
struct tm timeinfo;
|
||||
getLocalTime(&curTime, &timeinfo);
|
||||
strftime(buffer, 128, "%Y-%m-%d %H:%M:%S", &timeinfo);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
Future<Void> fetchTimes(Reference<ReadYourWritesTransaction> tr, std::map<Version, int64_t> *pVersionTimeMap) {
|
||||
std::vector<Future<Void>> futures;
|
||||
|
||||
|
@ -127,7 +120,7 @@ std::string BackupDescription::toString() const {
|
|||
if(!versionTimeMap.empty()) {
|
||||
auto i = versionTimeMap.find(v);
|
||||
if(i != versionTimeMap.end())
|
||||
s = format("%lld (%s)", v, formatTime(i->second).c_str());
|
||||
s = format("%lld (%s)", v, BackupAgentBase::formatTime(i->second).c_str());
|
||||
else
|
||||
s = format("%lld (unknown)", v);
|
||||
}
|
||||
|
@ -142,8 +135,8 @@ std::string BackupDescription::toString() const {
|
|||
};
|
||||
|
||||
for(const KeyspaceSnapshotFile &m : snapshots) {
|
||||
info.append(format("Snapshot: startVersion=%s endVersion=%s totalBytes=%lld restorable=%s\n",
|
||||
formatVersion(m.beginVersion).c_str(), formatVersion(m.endVersion).c_str(), m.totalSize, m.restorable.orDefault(false) ? "true" : "false"));
|
||||
info.append(format("Snapshot: startVersion=%s endVersion=%s totalBytes=%lld restorable=%s expiredPct=%.2f\n",
|
||||
formatVersion(m.beginVersion).c_str(), formatVersion(m.endVersion).c_str(), m.totalSize, m.restorable.orDefault(false) ? "true" : "false", m.expiredPct(expiredEndVersion)));
|
||||
}
|
||||
|
||||
info.append(format("SnapshotBytes: %lld\n", snapshotBytes));
|
||||
|
@ -169,6 +162,65 @@ std::string BackupDescription::toString() const {
|
|||
return info;
|
||||
}
|
||||
|
||||
std::string BackupDescription::toJSON() const {
|
||||
JsonBuilderObject doc;
|
||||
|
||||
doc.setKey("SchemaVersion", "1.0.0");
|
||||
doc.setKey("URL", url.c_str());
|
||||
doc.setKey("Restorable", maxRestorableVersion.present());
|
||||
|
||||
auto formatVersion = [&](Version v) {
|
||||
JsonBuilderObject doc;
|
||||
doc.setKey("Version", v);
|
||||
if(!versionTimeMap.empty()) {
|
||||
auto i = versionTimeMap.find(v);
|
||||
if(i != versionTimeMap.end()) {
|
||||
doc.setKey("Timestamp", BackupAgentBase::formatTime(i->second));
|
||||
doc.setKey("EpochSeconds", i->second);
|
||||
}
|
||||
}
|
||||
else if(maxLogEnd.present()) {
|
||||
double days = double(v - maxLogEnd.get()) / (CLIENT_KNOBS->CORE_VERSIONSPERSECOND * 24 * 60 * 60);
|
||||
doc.setKey("RelativeDays", days);
|
||||
}
|
||||
return doc;
|
||||
};
|
||||
|
||||
JsonBuilderArray snapshotsArray;
|
||||
for(const KeyspaceSnapshotFile &m : snapshots) {
|
||||
JsonBuilderObject snapshotDoc;
|
||||
snapshotDoc.setKey("Start", formatVersion(m.beginVersion));
|
||||
snapshotDoc.setKey("End", formatVersion(m.endVersion));
|
||||
snapshotDoc.setKey("Restorable", m.restorable.orDefault(false));
|
||||
snapshotDoc.setKey("TotalBytes", m.totalSize);
|
||||
snapshotDoc.setKey("PercentageExpired", m.expiredPct(expiredEndVersion));
|
||||
snapshotsArray.push_back(snapshotDoc);
|
||||
}
|
||||
doc.setKey("Snapshots", snapshotsArray);
|
||||
|
||||
doc.setKey("TotalSnapshotBytes", snapshotBytes);
|
||||
|
||||
if(expiredEndVersion.present())
|
||||
doc.setKey("ExpiredEnd", formatVersion(expiredEndVersion.get()));
|
||||
if(unreliableEndVersion.present())
|
||||
doc.setKey("UnreliableEnd", formatVersion(unreliableEndVersion.get()));
|
||||
if(minLogBegin.present())
|
||||
doc.setKey("MinLogBegin", formatVersion(minLogBegin.get()));
|
||||
if(contiguousLogEnd.present())
|
||||
doc.setKey("ContiguousLogEnd", formatVersion(contiguousLogEnd.get()));
|
||||
if(maxLogEnd.present())
|
||||
doc.setKey("MaxLogEnd", formatVersion(maxLogEnd.get()));
|
||||
if(minRestorableVersion.present())
|
||||
doc.setKey("MinRestorablePoint", formatVersion(minRestorableVersion.get()));
|
||||
if(maxRestorableVersion.present())
|
||||
doc.setKey("MaxRestorablePoint", formatVersion(maxRestorableVersion.get()));
|
||||
|
||||
if(!extendedDetail.empty())
|
||||
doc.setKey("ExtendedDetail", extendedDetail);
|
||||
|
||||
return doc.getJson();
|
||||
}
|
||||
|
||||
/* BackupContainerFileSystem implements a backup container which stores files in a nested folder structure.
|
||||
* Inheritors must only defined methods for writing, reading, deleting, sizing, and listing files.
|
||||
*
|
||||
|
@ -1578,20 +1630,11 @@ ACTOR Future<Version> timeKeeperVersionFromDatetime(std::string datetime, Databa
|
|||
state KeyBackedMap<int64_t, Version> versionMap(timeKeeperPrefixRange.begin);
|
||||
state Reference<ReadYourWritesTransaction> tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(db));
|
||||
|
||||
int year, month, day, hour, minute, second;
|
||||
if (sscanf(datetime.c_str(), "%d-%d-%d.%d:%d:%d", &year, &month, &day, &hour, &minute, &second) != 6) {
|
||||
fprintf(stderr, "ERROR: Incorrect date/time format.\n");
|
||||
state int64_t time = BackupAgentBase::parseTime(datetime);
|
||||
if(time < 0) {
|
||||
fprintf(stderr, "ERROR: Incorrect date/time or format. Format is %s.\n", BackupAgentBase::timeFormat().c_str());
|
||||
throw backup_error();
|
||||
}
|
||||
struct tm expDateTime = {0};
|
||||
expDateTime.tm_year = year - 1900;
|
||||
expDateTime.tm_mon = month - 1;
|
||||
expDateTime.tm_mday = day;
|
||||
expDateTime.tm_hour = hour;
|
||||
expDateTime.tm_min = minute;
|
||||
expDateTime.tm_sec = second;
|
||||
expDateTime.tm_isdst = -1;
|
||||
state int64_t time = (int64_t) mktime(&expDateTime);
|
||||
|
||||
loop {
|
||||
try {
|
||||
|
|
|
@ -89,6 +89,21 @@ struct KeyspaceSnapshotFile {
|
|||
std::string fileName;
|
||||
int64_t totalSize;
|
||||
Optional<bool> restorable; // Whether or not the snapshot can be used in a restore, if known
|
||||
bool isSingleVersion() const {
|
||||
return beginVersion == endVersion;
|
||||
}
|
||||
double expiredPct(Optional<Version> expiredEnd) const {
|
||||
double pctExpired = 0;
|
||||
if(expiredEnd.present() && expiredEnd.get() > beginVersion) {
|
||||
if(isSingleVersion()) {
|
||||
pctExpired = 1;
|
||||
}
|
||||
else {
|
||||
pctExpired = double(std::min(endVersion, expiredEnd.get()) - beginVersion) / (endVersion - beginVersion);
|
||||
}
|
||||
}
|
||||
return pctExpired * 100;
|
||||
}
|
||||
|
||||
// Order by beginVersion, break ties with endVersion
|
||||
bool operator< (const KeyspaceSnapshotFile &rhs) const {
|
||||
|
@ -132,6 +147,7 @@ struct BackupDescription {
|
|||
std::map<Version, int64_t> versionTimeMap;
|
||||
|
||||
std::string toString() const;
|
||||
std::string toJSON() const;
|
||||
};
|
||||
|
||||
struct RestorableFileSet {
|
||||
|
|
|
@ -119,13 +119,14 @@ struct OpenDatabaseRequest {
|
|||
Arena arena;
|
||||
StringRef issues, traceLogGroup;
|
||||
VectorRef<ClientVersionRef> supportedVersions;
|
||||
int connectedCoordinatorsNum; // Number of coordinators connected by the client
|
||||
UID knownClientInfoID;
|
||||
ReplyPromise< struct ClientDBInfo > reply;
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
ASSERT( ar.protocolVersion() >= 0x0FDB00A400040001LL );
|
||||
serializer(ar, issues, supportedVersions, traceLogGroup, knownClientInfoID, reply, arena);
|
||||
serializer(ar, issues, supportedVersions, connectedCoordinatorsNum, traceLogGroup, knownClientInfoID, reply, arena);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -1376,7 +1376,7 @@ namespace dbBackup {
|
|||
try {
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.addReadConflictRange(singleKeyRange(sourceStates.pack(DatabaseBackupAgent::keyStateStatus)));
|
||||
tr.set(sourceStates.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_DIFFERENTIAL)));
|
||||
tr.set(sourceStates.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_RUNNING_DIFFERENTIAL)));
|
||||
|
||||
Key versionKey = task->params[DatabaseBackupAgent::keyConfigLogUid].withPrefix(task->params[BackupAgentBase::destUid]).withPrefix(backupLatestVersionsPrefix);
|
||||
Optional<Key> prevBeginVersion = wait(tr.get(versionKey));
|
||||
|
@ -1418,7 +1418,7 @@ namespace dbBackup {
|
|||
wait(success(FinishedFullBackupTaskFunc::addTask(tr, taskBucket, task, TaskCompletionKey::noSignal())));
|
||||
}
|
||||
else { // Start the writing of logs, if differential
|
||||
tr->set(states.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_DIFFERENTIAL)));
|
||||
tr->set(states.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_RUNNING_DIFFERENTIAL)));
|
||||
|
||||
allPartsDone = futureBucket->future(tr);
|
||||
|
||||
|
@ -1544,7 +1544,7 @@ namespace dbBackup {
|
|||
|
||||
srcTr2->set( Subspace(databaseBackupPrefixRange.begin).get(BackupAgentBase::keySourceTagName).pack(task->params[BackupAgentBase::keyTagName]), logUidValue );
|
||||
srcTr2->set( sourceStates.pack(DatabaseBackupAgent::keyFolderId), task->params[DatabaseBackupAgent::keyFolderId] );
|
||||
srcTr2->set( sourceStates.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_BACKUP)));
|
||||
srcTr2->set( sourceStates.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_RUNNING)));
|
||||
|
||||
state Key destPath = destUidValue.withPrefix(backupLogKeys.begin);
|
||||
// Start logging the mutations for the specified ranges of the tag
|
||||
|
@ -1587,7 +1587,7 @@ namespace dbBackup {
|
|||
|
||||
tr->set(logUidValue.withPrefix(applyMutationsBeginRange.begin), BinaryWriter::toValue(beginVersion, Unversioned()));
|
||||
tr->set(logUidValue.withPrefix(applyMutationsEndRange.begin), BinaryWriter::toValue(beginVersion, Unversioned()));
|
||||
tr->set(states.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_BACKUP)));
|
||||
tr->set(states.pack(DatabaseBackupAgent::keyStateStatus), StringRef(BackupAgentBase::getStateText(BackupAgentBase::STATE_RUNNING)));
|
||||
|
||||
state Reference<TaskFuture> kvBackupRangeComplete = futureBucket->future(tr);
|
||||
state Reference<TaskFuture> kvBackupComplete = futureBucket->future(tr);
|
||||
|
@ -1791,7 +1791,7 @@ public:
|
|||
}
|
||||
|
||||
// Break, if in differential mode (restorable) and stopWhenDone is not enabled
|
||||
if ((!stopWhenDone) && (BackupAgentBase::STATE_DIFFERENTIAL == status)) {
|
||||
if ((!stopWhenDone) && (BackupAgentBase::STATE_RUNNING_DIFFERENTIAL == status)) {
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -1954,7 +1954,7 @@ public:
|
|||
state int status = wait(backupAgent->getStateValue(dest, destlogUid));
|
||||
|
||||
TraceEvent("DBA_SwitchoverStart").detail("Status", status);
|
||||
if (status != BackupAgentBase::STATE_DIFFERENTIAL && status != BackupAgentBase::STATE_COMPLETED) {
|
||||
if (status != BackupAgentBase::STATE_RUNNING_DIFFERENTIAL && status != BackupAgentBase::STATE_COMPLETED) {
|
||||
throw backup_duplicate();
|
||||
}
|
||||
|
||||
|
@ -2311,10 +2311,10 @@ public:
|
|||
case BackupAgentBase::STATE_SUBMITTED:
|
||||
statusText += "The DR on tag `" + tagNameDisplay + "' is NOT a complete copy of the primary database (just started).\n";
|
||||
break;
|
||||
case BackupAgentBase::STATE_BACKUP:
|
||||
case BackupAgentBase::STATE_RUNNING:
|
||||
statusText += "The DR on tag `" + tagNameDisplay + "' is NOT a complete copy of the primary database.\n";
|
||||
break;
|
||||
case BackupAgentBase::STATE_DIFFERENTIAL:
|
||||
case BackupAgentBase::STATE_RUNNING_DIFFERENTIAL:
|
||||
statusText += "The DR on tag `" + tagNameDisplay + "' is a complete copy of the primary database.\n";
|
||||
break;
|
||||
case BackupAgentBase::STATE_COMPLETED:
|
||||
|
|
|
@ -38,7 +38,7 @@ void DatabaseConfiguration::resetInternal() {
|
|||
autoDesiredTLogCount = CLIENT_KNOBS->DEFAULT_AUTO_LOGS;
|
||||
usableRegions = 1;
|
||||
regions.clear();
|
||||
tLogPolicy = storagePolicy = remoteTLogPolicy = IRepPolicyRef();
|
||||
tLogPolicy = storagePolicy = remoteTLogPolicy = Reference<IReplicationPolicy>();
|
||||
remoteDesiredTLogCount = -1;
|
||||
remoteTLogReplicationFactor = repopulateRegionAntiQuorum = 0;
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ void parse( int* i, ValueRef const& v ) {
|
|||
*i = atoi(v.toString().c_str());
|
||||
}
|
||||
|
||||
void parseReplicationPolicy(IRepPolicyRef* policy, ValueRef const& v) {
|
||||
void parseReplicationPolicy(Reference<IReplicationPolicy>* policy, ValueRef const& v) {
|
||||
BinaryReader reader(v, IncludeVersion());
|
||||
serializeReplicationPolicy(reader, *policy);
|
||||
}
|
||||
|
@ -91,35 +91,35 @@ void parse( std::vector<RegionInfo>* regions, ValueRef const& v ) {
|
|||
info.satelliteTLogReplicationFactor = 1;
|
||||
info.satelliteTLogUsableDcs = 1;
|
||||
info.satelliteTLogWriteAntiQuorum = 0;
|
||||
info.satelliteTLogPolicy = IRepPolicyRef(new PolicyOne());
|
||||
info.satelliteTLogPolicy = Reference<IReplicationPolicy>(new PolicyOne());
|
||||
} else if(satelliteReplication == "one_satellite_double") {
|
||||
info.satelliteTLogReplicationFactor = 2;
|
||||
info.satelliteTLogUsableDcs = 1;
|
||||
info.satelliteTLogWriteAntiQuorum = 0;
|
||||
info.satelliteTLogPolicy = IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
info.satelliteTLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
} else if(satelliteReplication == "one_satellite_triple") {
|
||||
info.satelliteTLogReplicationFactor = 3;
|
||||
info.satelliteTLogUsableDcs = 1;
|
||||
info.satelliteTLogWriteAntiQuorum = 0;
|
||||
info.satelliteTLogPolicy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
info.satelliteTLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(3, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
} else if(satelliteReplication == "two_satellite_safe") {
|
||||
info.satelliteTLogReplicationFactor = 4;
|
||||
info.satelliteTLogUsableDcs = 2;
|
||||
info.satelliteTLogWriteAntiQuorum = 0;
|
||||
info.satelliteTLogPolicy = IRepPolicyRef(new PolicyAcross(2, "dcid", IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))));
|
||||
info.satelliteTLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(2, "dcid", Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())))));
|
||||
info.satelliteTLogReplicationFactorFallback = 2;
|
||||
info.satelliteTLogUsableDcsFallback = 1;
|
||||
info.satelliteTLogWriteAntiQuorumFallback = 0;
|
||||
info.satelliteTLogPolicyFallback = IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
info.satelliteTLogPolicyFallback = Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
} else if(satelliteReplication == "two_satellite_fast") {
|
||||
info.satelliteTLogReplicationFactor = 4;
|
||||
info.satelliteTLogUsableDcs = 2;
|
||||
info.satelliteTLogWriteAntiQuorum = 2;
|
||||
info.satelliteTLogPolicy = IRepPolicyRef(new PolicyAcross(2, "dcid", IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))));
|
||||
info.satelliteTLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(2, "dcid", Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())))));
|
||||
info.satelliteTLogReplicationFactorFallback = 2;
|
||||
info.satelliteTLogUsableDcsFallback = 1;
|
||||
info.satelliteTLogWriteAntiQuorumFallback = 0;
|
||||
info.satelliteTLogPolicyFallback = IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
info.satelliteTLogPolicyFallback = Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
} else {
|
||||
throw invalid_option();
|
||||
}
|
||||
|
@ -141,20 +141,20 @@ void parse( std::vector<RegionInfo>* regions, ValueRef const& v ) {
|
|||
|
||||
void DatabaseConfiguration::setDefaultReplicationPolicy() {
|
||||
if(!storagePolicy) {
|
||||
storagePolicy = IRepPolicyRef(new PolicyAcross(storageTeamSize, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
storagePolicy = Reference<IReplicationPolicy>(new PolicyAcross(storageTeamSize, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
}
|
||||
if(!tLogPolicy) {
|
||||
tLogPolicy = IRepPolicyRef(new PolicyAcross(tLogReplicationFactor, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
tLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(tLogReplicationFactor, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
}
|
||||
if(remoteTLogReplicationFactor > 0 && !remoteTLogPolicy) {
|
||||
remoteTLogPolicy = IRepPolicyRef(new PolicyAcross(remoteTLogReplicationFactor, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
remoteTLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(remoteTLogReplicationFactor, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
}
|
||||
for(auto& r : regions) {
|
||||
if(r.satelliteTLogReplicationFactor > 0 && !r.satelliteTLogPolicy) {
|
||||
r.satelliteTLogPolicy = IRepPolicyRef(new PolicyAcross(r.satelliteTLogReplicationFactor, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
r.satelliteTLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(r.satelliteTLogReplicationFactor, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
}
|
||||
if(r.satelliteTLogReplicationFactorFallback > 0 && !r.satelliteTLogPolicyFallback) {
|
||||
r.satelliteTLogPolicyFallback = IRepPolicyRef(new PolicyAcross(r.satelliteTLogReplicationFactorFallback, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
r.satelliteTLogPolicyFallback = Reference<IReplicationPolicy>(new PolicyAcross(r.satelliteTLogReplicationFactorFallback, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,13 +49,13 @@ struct RegionInfo {
|
|||
Key dcId;
|
||||
int32_t priority;
|
||||
|
||||
IRepPolicyRef satelliteTLogPolicy;
|
||||
Reference<IReplicationPolicy> satelliteTLogPolicy;
|
||||
int32_t satelliteDesiredTLogCount;
|
||||
int32_t satelliteTLogReplicationFactor;
|
||||
int32_t satelliteTLogWriteAntiQuorum;
|
||||
int32_t satelliteTLogUsableDcs;
|
||||
|
||||
IRepPolicyRef satelliteTLogPolicyFallback;
|
||||
Reference<IReplicationPolicy> satelliteTLogPolicyFallback;
|
||||
int32_t satelliteTLogReplicationFactorFallback;
|
||||
int32_t satelliteTLogWriteAntiQuorumFallback;
|
||||
int32_t satelliteTLogUsableDcsFallback;
|
||||
|
@ -157,7 +157,7 @@ struct DatabaseConfiguration {
|
|||
int32_t autoResolverCount;
|
||||
|
||||
// TLogs
|
||||
IRepPolicyRef tLogPolicy;
|
||||
Reference<IReplicationPolicy> tLogPolicy;
|
||||
int32_t desiredTLogCount;
|
||||
int32_t autoDesiredTLogCount;
|
||||
int32_t tLogWriteAntiQuorum;
|
||||
|
@ -167,7 +167,7 @@ struct DatabaseConfiguration {
|
|||
TLogSpillType tLogSpillType;
|
||||
|
||||
// Storage Servers
|
||||
IRepPolicyRef storagePolicy;
|
||||
Reference<IReplicationPolicy> storagePolicy;
|
||||
int32_t storageTeamSize;
|
||||
KeyValueStoreType storageServerStoreType;
|
||||
|
||||
|
@ -175,7 +175,7 @@ struct DatabaseConfiguration {
|
|||
int32_t desiredLogRouterCount;
|
||||
int32_t remoteDesiredTLogCount;
|
||||
int32_t remoteTLogReplicationFactor;
|
||||
IRepPolicyRef remoteTLogPolicy;
|
||||
Reference<IReplicationPolicy> remoteTLogPolicy;
|
||||
|
||||
//Data centers
|
||||
int32_t usableRegions;
|
||||
|
@ -195,7 +195,7 @@ struct DatabaseConfiguration {
|
|||
if(desired == -1) return autoDesiredTLogCount; return desired;
|
||||
}
|
||||
int32_t getRemoteTLogReplicationFactor() const { if(remoteTLogReplicationFactor == 0) return tLogReplicationFactor; return remoteTLogReplicationFactor; }
|
||||
IRepPolicyRef getRemoteTLogPolicy() const { if(remoteTLogReplicationFactor == 0) return tLogPolicy; return remoteTLogPolicy; }
|
||||
Reference<IReplicationPolicy> getRemoteTLogPolicy() const { if(remoteTLogReplicationFactor == 0) return tLogPolicy; return remoteTLogPolicy; }
|
||||
|
||||
bool operator == ( DatabaseConfiguration const& rhs ) const {
|
||||
const_cast<DatabaseConfiguration*>(this)->makeConfigurationImmutable();
|
||||
|
|
|
@ -46,8 +46,12 @@ private:
|
|||
typedef MultiInterface<ReferencedInterface<StorageServerInterface>> LocationInfo;
|
||||
typedef MultiInterface<MasterProxyInterface> ProxyInfo;
|
||||
|
||||
class DatabaseContext : public ReferenceCounted<DatabaseContext>, NonCopyable {
|
||||
class DatabaseContext : public ReferenceCounted<DatabaseContext>, public FastAllocated<DatabaseContext>, NonCopyable {
|
||||
public:
|
||||
static DatabaseContext* allocateOnForeignThread() {
|
||||
return (DatabaseContext*)DatabaseContext::operator new(sizeof(DatabaseContext));
|
||||
}
|
||||
|
||||
// For internal (fdbserver) use only
|
||||
static Database create( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<ClusterConnectionFile> connFile, LocalityData const& clientLocality );
|
||||
static Database create( Reference<AsyncVar<ClientDBInfo>> clientInfo, Future<Void> clientInfoMonitor, LocalityData clientLocality, bool enableLocalityLoadBalance, int taskID=TaskDefaultEndpoint, bool lockAware=false, int apiVersion=Database::API_VERSION_LATEST );
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
#include "fdbclient/Status.h"
|
||||
#include "fdbclient/KeyBackedTypes.h"
|
||||
#include "fdbclient/JsonBuilder.h"
|
||||
|
||||
#include <ctime>
|
||||
#include <climits>
|
||||
|
@ -46,15 +47,10 @@ static std::string versionToString(Optional<Version> version) {
|
|||
return "N/A";
|
||||
}
|
||||
|
||||
static std::string timeStampToString(Optional<int64_t> ts) {
|
||||
if (!ts.present())
|
||||
static std::string timeStampToString(Optional<int64_t> epochs) {
|
||||
if (!epochs.present())
|
||||
return "N/A";
|
||||
time_t curTs = ts.get();
|
||||
char buffer[128];
|
||||
struct tm* timeinfo;
|
||||
timeinfo = localtime(&curTs);
|
||||
strftime(buffer, 128, "%D %T", timeinfo);
|
||||
return std::string(buffer);
|
||||
return BackupAgentBase::formatTime(epochs.get());
|
||||
}
|
||||
|
||||
static Future<Optional<int64_t>> getTimestampFromVersion(Optional<Version> ver, Reference<ReadYourWritesTransaction> tr) {
|
||||
|
@ -1281,6 +1277,10 @@ namespace fileBackup {
|
|||
static const uint32_t version;
|
||||
|
||||
static struct {
|
||||
// Set by Execute, used by Finish
|
||||
static TaskParam<int64_t> shardsBehind() {
|
||||
return LiteralStringRef(__FUNCTION__);
|
||||
}
|
||||
// Set by Execute, used by Finish
|
||||
static TaskParam<bool> snapshotFinished() {
|
||||
return LiteralStringRef(__FUNCTION__);
|
||||
|
@ -1378,8 +1378,11 @@ namespace fileBackup {
|
|||
&& store(recentReadVersion, tr->getReadVersion())
|
||||
&& taskBucket->keepRunning(tr, task));
|
||||
|
||||
// If the snapshot batch future key does not exist, create it, set it, and commit
|
||||
// Also initialize the target snapshot end version if it is not yet set.
|
||||
// If the snapshot batch future key does not exist, this is the first execution of this dispatch task so
|
||||
// - create and set the snapshot batch future key
|
||||
// - initialize the batch size to 0
|
||||
// - initialize the target snapshot end version if it is not yet set
|
||||
// - commit
|
||||
if(!snapshotBatchFutureKey.present()) {
|
||||
snapshotBatchFuture = futureBucket->future(tr);
|
||||
config.snapshotBatchFuture().set(tr, snapshotBatchFuture->pack());
|
||||
|
@ -1549,14 +1552,38 @@ namespace fileBackup {
|
|||
// Calculate number of shards that should be done before the next interval end
|
||||
// timeElapsed is between 0 and 1 and represents what portion of the shards we should have completed by now
|
||||
double timeElapsed;
|
||||
Version snapshotScheduledVersionInterval = snapshotTargetEndVersion - snapshotBeginVersion;
|
||||
if(snapshotTargetEndVersion > snapshotBeginVersion)
|
||||
timeElapsed = std::min(1.0, (double)(nextDispatchVersion - snapshotBeginVersion) / (snapshotTargetEndVersion - snapshotBeginVersion));
|
||||
timeElapsed = std::min(1.0, (double)(nextDispatchVersion - snapshotBeginVersion) / (snapshotScheduledVersionInterval));
|
||||
else
|
||||
timeElapsed = 1.0;
|
||||
|
||||
state int countExpectedShardsDone = countAllShards * timeElapsed;
|
||||
state int countShardsToDispatch = std::max<int>(0, countExpectedShardsDone - countShardsDone);
|
||||
|
||||
// Calculate the number of shards that would have been dispatched by a normal (on-schedule) BackupSnapshotDispatchTask given
|
||||
// the dispatch window and the start and expected-end versions of the current snapshot.
|
||||
int64_t dispatchWindow = nextDispatchVersion - recentReadVersion;
|
||||
|
||||
// If the scheduled snapshot interval is 0 (such as for initial, as-fast-as-possible snapshot) then all shards are considered late
|
||||
int countShardsExpectedPerNormalWindow;
|
||||
if(snapshotScheduledVersionInterval == 0) {
|
||||
countShardsExpectedPerNormalWindow = 0;
|
||||
}
|
||||
else {
|
||||
// A dispatchWindow of 0 means the target end version is <= now which also results in all shards being considered late
|
||||
countShardsExpectedPerNormalWindow = (double(dispatchWindow) / snapshotScheduledVersionInterval) * countAllShards;
|
||||
}
|
||||
|
||||
// countShardsThisDispatch is how many total shards are to be dispatched by this dispatch cycle.
|
||||
// Since this dispatch cycle can span many incrementally progressing separate executions of the BackupSnapshotDispatchTask
|
||||
// instance, this is calculated as the number of shards dispatched so far in the dispatch batch plus the number of shards
|
||||
// the current execution is going to attempt to do.
|
||||
int countShardsThisDispatch = countShardsToDispatch + snapshotBatchSize.get();
|
||||
// The number of shards 'behind' the snapshot is the count of how may additional shards beyond normal are being dispatched, if any.
|
||||
int countShardsBehind = std::max<int64_t>(0, countShardsToDispatch + snapshotBatchSize.get() - countShardsExpectedPerNormalWindow);
|
||||
Params.shardsBehind().set(task, countShardsBehind);
|
||||
|
||||
TraceEvent("FileBackupSnapshotDispatchStats")
|
||||
.detail("BackupUID", config.getUid())
|
||||
.detail("AllShards", countAllShards)
|
||||
|
@ -1564,6 +1591,7 @@ namespace fileBackup {
|
|||
.detail("ShardsNotDone", countShardsNotDone)
|
||||
.detail("ExpectedShardsDone", countExpectedShardsDone)
|
||||
.detail("ShardsToDispatch", countShardsToDispatch)
|
||||
.detail("ShardsBehind", countShardsBehind)
|
||||
.detail("SnapshotBeginVersion", snapshotBeginVersion)
|
||||
.detail("SnapshotTargetEndVersion", snapshotTargetEndVersion)
|
||||
.detail("NextDispatchVersion", nextDispatchVersion)
|
||||
|
@ -1636,6 +1664,8 @@ namespace fileBackup {
|
|||
ASSERT(snapshotBatchSize.get() == oldBatchSize);
|
||||
config.snapshotBatchSize().set(tr, newBatchSize);
|
||||
snapshotBatchSize = newBatchSize;
|
||||
config.snapshotDispatchLastShardsBehind().set(tr, Params.shardsBehind().get(task));
|
||||
config.snapshotDispatchLastVersion().set(tr, tr->getReadVersion().get());
|
||||
}
|
||||
|
||||
state std::vector<Future<Void>> addTaskFutures;
|
||||
|
@ -1739,6 +1769,10 @@ namespace fileBackup {
|
|||
config.snapshotBatchDispatchDoneKey().clear(tr);
|
||||
config.snapshotBatchSize().clear(tr);
|
||||
|
||||
// Update shardsBehind here again in case the execute phase did not actually have to create any shard tasks
|
||||
config.snapshotDispatchLastShardsBehind().set(tr, Params.shardsBehind().getOrDefault(task, 0));
|
||||
config.snapshotDispatchLastVersion().set(tr, tr->getReadVersion().get());
|
||||
|
||||
state Reference<TaskFuture> snapshotFinishedFuture = task->getDoneFuture(futureBucket);
|
||||
|
||||
// If the snapshot is finished, the next task is to write a snapshot manifest, otherwise it's another snapshot dispatch task.
|
||||
|
@ -2069,8 +2103,8 @@ namespace fileBackup {
|
|||
}
|
||||
|
||||
// If the backup is restorable but the state is not differential then set state to differential
|
||||
if(restorableVersion.present() && backupState != BackupAgentBase::STATE_DIFFERENTIAL)
|
||||
config.stateEnum().set(tr, BackupAgentBase::STATE_DIFFERENTIAL);
|
||||
if(restorableVersion.present() && backupState != BackupAgentBase::STATE_RUNNING_DIFFERENTIAL)
|
||||
config.stateEnum().set(tr, BackupAgentBase::STATE_RUNNING_DIFFERENTIAL);
|
||||
|
||||
// If stopWhenDone is set and there is a restorable version, set the done future and do not create further tasks.
|
||||
if(stopWhenDone && restorableVersion.present()) {
|
||||
|
@ -2305,8 +2339,8 @@ namespace fileBackup {
|
|||
}
|
||||
|
||||
// If the backup is restorable and the state isn't differential the set state to differential
|
||||
if(restorableVersion.present() && backupState != BackupAgentBase::STATE_DIFFERENTIAL)
|
||||
config.stateEnum().set(tr, BackupAgentBase::STATE_DIFFERENTIAL);
|
||||
if(restorableVersion.present() && backupState != BackupAgentBase::STATE_RUNNING_DIFFERENTIAL)
|
||||
config.stateEnum().set(tr, BackupAgentBase::STATE_RUNNING_DIFFERENTIAL);
|
||||
|
||||
// Unless we are to stop, start the next snapshot using the default interval
|
||||
Reference<TaskFuture> snapshotDoneFuture = task->getDoneFuture(futureBucket);
|
||||
|
@ -2386,7 +2420,7 @@ namespace fileBackup {
|
|||
config.startMutationLogs(tr, backupRange, destUidValue);
|
||||
}
|
||||
|
||||
config.stateEnum().set(tr, EBackupState::STATE_BACKUP);
|
||||
config.stateEnum().set(tr, EBackupState::STATE_RUNNING);
|
||||
|
||||
state Reference<TaskFuture> backupFinished = futureBucket->future(tr);
|
||||
|
||||
|
@ -3504,7 +3538,7 @@ public:
|
|||
// Break, if one of the following is true
|
||||
// - no longer runnable
|
||||
// - in differential mode (restorable) and stopWhenDone is not enabled
|
||||
if( !FileBackupAgent::isRunnable(status) || ((!stopWhenDone) && (BackupAgentBase::STATE_DIFFERENTIAL == status) )) {
|
||||
if( !FileBackupAgent::isRunnable(status) || ((!stopWhenDone) && (BackupAgentBase::STATE_RUNNING_DIFFERENTIAL == status) )) {
|
||||
|
||||
if(pContainer != nullptr) {
|
||||
Reference<IBackupContainer> c = wait(config.backupContainer().getOrThrow(tr, false, backup_invalid_info()));
|
||||
|
@ -3840,6 +3874,176 @@ public:
|
|||
return Void();
|
||||
}
|
||||
|
||||
struct TimestampedVersion {
|
||||
Optional<Version> version;
|
||||
Optional<int64_t> epochs;
|
||||
|
||||
bool present() const {
|
||||
return version.present();
|
||||
}
|
||||
|
||||
JsonBuilderObject toJSON() const {
|
||||
JsonBuilderObject doc;
|
||||
if(version.present()) {
|
||||
doc.setKey("Version", version.get());
|
||||
if(epochs.present()) {
|
||||
doc.setKey("EpochSeconds", epochs.get());
|
||||
doc.setKey("Timestamp", timeStampToString(epochs));
|
||||
}
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
};
|
||||
|
||||
// Helper actor for generating status
|
||||
// If f is present, lookup epochs using timekeeper and tr, return TimestampedVersion
|
||||
ACTOR static Future<TimestampedVersion> getTimestampedVersion(Reference<ReadYourWritesTransaction> tr, Future<Optional<Version>> f) {
|
||||
state TimestampedVersion tv;
|
||||
wait(store(tv.version, f));
|
||||
if(tv.version.present()) {
|
||||
wait(store(tv.epochs, timeKeeperEpochsFromVersion(tv.version.get(), tr)));
|
||||
}
|
||||
return tv;
|
||||
}
|
||||
|
||||
ACTOR static Future<std::string> getStatusJSON(FileBackupAgent* backupAgent, Database cx, std::string tagName) {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
|
||||
loop {
|
||||
try {
|
||||
state JsonBuilderObject doc;
|
||||
doc.setKey("SchemaVersion", "1.0.0");
|
||||
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
state KeyBackedTag tag = makeBackupTag(tagName);
|
||||
state Optional<UidAndAbortedFlagT> uidAndAbortedFlag;
|
||||
state Optional<Value> paused;
|
||||
state Version recentReadVersion;
|
||||
|
||||
wait( store(paused, tr->get(backupAgent->taskBucket->getPauseKey())) && store(uidAndAbortedFlag, tag.get(tr)) && store(recentReadVersion, tr->getReadVersion()) );
|
||||
|
||||
doc.setKey("BackupAgentsPaused", paused.present());
|
||||
doc.setKey("Tag", tag.tagName);
|
||||
|
||||
if(uidAndAbortedFlag.present()) {
|
||||
state BackupConfig config(uidAndAbortedFlag.get().first);
|
||||
|
||||
state EBackupState backupState = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
JsonBuilderObject statusDoc;
|
||||
statusDoc.setKey("Name", BackupAgentBase::getStateName(backupState));
|
||||
statusDoc.setKey("Description", BackupAgentBase::getStateText(backupState));
|
||||
statusDoc.setKey("Completed", backupState == BackupAgentBase::STATE_COMPLETED);
|
||||
statusDoc.setKey("Running", BackupAgentBase::isRunnable(backupState));
|
||||
doc.setKey("Status", statusDoc);
|
||||
|
||||
state Future<Void> done = Void();
|
||||
|
||||
if(backupState != BackupAgentBase::STATE_NEVERRAN) {
|
||||
state Reference<IBackupContainer> bc;
|
||||
state TimestampedVersion latestRestorable;
|
||||
|
||||
wait( store(latestRestorable, getTimestampedVersion(tr, config.getLatestRestorableVersion(tr)))
|
||||
&& store(bc, config.backupContainer().getOrThrow(tr))
|
||||
);
|
||||
|
||||
doc.setKey("Restorable", latestRestorable.present());
|
||||
|
||||
if(latestRestorable.present()) {
|
||||
JsonBuilderObject o = latestRestorable.toJSON();
|
||||
if(backupState != BackupAgentBase::STATE_COMPLETED) {
|
||||
o.setKey("LagSeconds", (recentReadVersion - latestRestorable.version.get()) / CLIENT_KNOBS->CORE_VERSIONSPERSECOND);
|
||||
}
|
||||
doc.setKey("LatestRestorablePoint", o);
|
||||
}
|
||||
doc.setKey("DestinationURL", bc->getURL());
|
||||
}
|
||||
|
||||
if(backupState == BackupAgentBase::STATE_RUNNING_DIFFERENTIAL || backupState == BackupAgentBase::STATE_RUNNING) {
|
||||
state int64_t snapshotInterval;
|
||||
state int64_t logBytesWritten;
|
||||
state int64_t rangeBytesWritten;
|
||||
state bool stopWhenDone;
|
||||
state TimestampedVersion snapshotBegin;
|
||||
state TimestampedVersion snapshotTargetEnd;
|
||||
state TimestampedVersion latestLogEnd;
|
||||
state TimestampedVersion latestSnapshotEnd;
|
||||
state TimestampedVersion snapshotLastDispatch;
|
||||
state Optional<int64_t> snapshotLastDispatchShardsBehind;
|
||||
|
||||
wait( store(snapshotInterval, config.snapshotIntervalSeconds().getOrThrow(tr))
|
||||
&& store(logBytesWritten, config.logBytesWritten().getD(tr))
|
||||
&& store(rangeBytesWritten, config.rangeBytesWritten().getD(tr))
|
||||
&& store(stopWhenDone, config.stopWhenDone().getOrThrow(tr))
|
||||
&& store(snapshotBegin, getTimestampedVersion(tr, config.snapshotBeginVersion().get(tr)))
|
||||
&& store(snapshotTargetEnd, getTimestampedVersion(tr, config.snapshotTargetEndVersion().get(tr)))
|
||||
&& store(latestLogEnd, getTimestampedVersion(tr, config.latestLogEndVersion().get(tr)))
|
||||
&& store(latestSnapshotEnd, getTimestampedVersion(tr, config.latestSnapshotEndVersion().get(tr)))
|
||||
&& store(snapshotLastDispatch, getTimestampedVersion(tr, config.snapshotDispatchLastVersion().get(tr)))
|
||||
&& store(snapshotLastDispatchShardsBehind, config.snapshotDispatchLastShardsBehind().get(tr))
|
||||
);
|
||||
|
||||
doc.setKey("StopAfterSnapshot", stopWhenDone);
|
||||
doc.setKey("SnapshotIntervalSeconds", snapshotInterval);
|
||||
doc.setKey("LogBytesWritten", logBytesWritten);
|
||||
doc.setKey("RangeBytesWritten", rangeBytesWritten);
|
||||
|
||||
if(latestLogEnd.present()) {
|
||||
doc.setKey("LatestLogEnd", latestLogEnd.toJSON());
|
||||
}
|
||||
|
||||
if(latestSnapshotEnd.present()) {
|
||||
doc.setKey("LatestSnapshotEnd", latestSnapshotEnd.toJSON());
|
||||
}
|
||||
|
||||
JsonBuilderObject snapshot;
|
||||
|
||||
if(snapshotBegin.present()) {
|
||||
snapshot.setKey("Begin", snapshotBegin.toJSON());
|
||||
|
||||
if(snapshotTargetEnd.present()) {
|
||||
snapshot.setKey("EndTarget", snapshotTargetEnd.toJSON());
|
||||
|
||||
Version interval = snapshotTargetEnd.version.get() - snapshotBegin.version.get();
|
||||
snapshot.setKey("IntervalSeconds", interval / CLIENT_KNOBS->CORE_VERSIONSPERSECOND);
|
||||
|
||||
Version elapsed = recentReadVersion - snapshotBegin.version.get();
|
||||
double progress = (interval > 0) ? (100.0 * elapsed / interval) : 100;
|
||||
snapshot.setKey("ExpectedProgress", progress);
|
||||
}
|
||||
|
||||
JsonBuilderObject dispatchDoc = snapshotLastDispatch.toJSON();
|
||||
if(snapshotLastDispatchShardsBehind.present()) {
|
||||
dispatchDoc.setKey("ShardsBehind", snapshotLastDispatchShardsBehind.get());
|
||||
}
|
||||
snapshot.setKey("LastDispatch", dispatchDoc);
|
||||
}
|
||||
|
||||
doc.setKey("CurrentSnapshot", snapshot);
|
||||
}
|
||||
|
||||
KeyBackedMap<int64_t, std::pair<std::string, Version>>::PairsType errors = wait(config.lastErrorPerType().getRange(tr, 0, std::numeric_limits<int>::max(), CLIENT_KNOBS->TOO_MANY));
|
||||
JsonBuilderArray errorList;
|
||||
for(auto &e : errors) {
|
||||
std::string msg = e.second.first;
|
||||
Version ver = e.second.second;
|
||||
|
||||
JsonBuilderObject errDoc;
|
||||
errDoc.setKey("Message", msg.c_str());
|
||||
errDoc.setKey("RelativeSeconds", (ver - recentReadVersion) / CLIENT_KNOBS->CORE_VERSIONSPERSECOND);
|
||||
}
|
||||
doc.setKey("Errors", errorList);
|
||||
}
|
||||
|
||||
return doc.getJson();
|
||||
}
|
||||
catch (Error &e) {
|
||||
wait(tr->onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<std::string> getStatus(FileBackupAgent* backupAgent, Database cx, bool showErrors, std::string tagName) {
|
||||
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
|
||||
state std::string statusText;
|
||||
|
@ -3882,11 +4086,11 @@ public:
|
|||
case BackupAgentBase::STATE_SUBMITTED:
|
||||
statusText += "The backup on tag `" + tagName + "' is in progress (just started) to " + bc->getURL() + ".\n";
|
||||
break;
|
||||
case BackupAgentBase::STATE_BACKUP:
|
||||
case BackupAgentBase::STATE_RUNNING:
|
||||
statusText += "The backup on tag `" + tagName + "' is in progress to " + bc->getURL() + ".\n";
|
||||
snapshotProgress = true;
|
||||
break;
|
||||
case BackupAgentBase::STATE_DIFFERENTIAL:
|
||||
case BackupAgentBase::STATE_RUNNING_DIFFERENTIAL:
|
||||
statusText += "The backup on tag `" + tagName + "' is restorable but continuing to " + bc->getURL() + ".\n";
|
||||
snapshotProgress = true;
|
||||
break;
|
||||
|
@ -3931,7 +4135,7 @@ public:
|
|||
);
|
||||
|
||||
statusText += format("Snapshot interval is %lld seconds. ", snapshotInterval);
|
||||
if(backupState == BackupAgentBase::STATE_DIFFERENTIAL)
|
||||
if(backupState == BackupAgentBase::STATE_RUNNING_DIFFERENTIAL)
|
||||
statusText += format("Current snapshot progress target is %3.2f%% (>100%% means the snapshot is supposed to be done)\n", 100.0 * (recentReadVersion - snapshotBeginVersion) / (snapshotTargetEndVersion - snapshotBeginVersion)) ;
|
||||
else
|
||||
statusText += "The initial snapshot is still running.\n";
|
||||
|
@ -4076,7 +4280,7 @@ public:
|
|||
backupConfig = BackupConfig(uidFlag.first);
|
||||
state EBackupState status = wait(backupConfig.stateEnum().getOrThrow(ryw_tr));
|
||||
|
||||
if (status != BackupAgentBase::STATE_DIFFERENTIAL ) {
|
||||
if (status != BackupAgentBase::STATE_RUNNING_DIFFERENTIAL ) {
|
||||
throw backup_duplicate();
|
||||
}
|
||||
|
||||
|
@ -4208,6 +4412,10 @@ Future<std::string> FileBackupAgent::getStatus(Database cx, bool showErrors, std
|
|||
return FileBackupAgentImpl::getStatus(this, cx, showErrors, tagName);
|
||||
}
|
||||
|
||||
Future<std::string> FileBackupAgent::getStatusJSON(Database cx, std::string tagName) {
|
||||
return FileBackupAgentImpl::getStatusJSON(this, cx, tagName);
|
||||
}
|
||||
|
||||
Future<Version> FileBackupAgent::getLastRestorable(Reference<ReadYourWritesTransaction> tr, Key tagName) {
|
||||
return FileBackupAgentImpl::getLastRestorable(this, tr, tagName);
|
||||
}
|
||||
|
|
|
@ -192,4 +192,7 @@ ClientKnobs::ClientKnobs(bool randomize) {
|
|||
init( CONSISTENCY_CHECK_RATE_LIMIT_MAX, 50e6 );
|
||||
init( CONSISTENCY_CHECK_ONE_ROUND_TARGET_COMPLETION_TIME, 7 * 24 * 60 * 60 ); // 7 days
|
||||
init( CONSISTENCY_CHECK_RATE_WINDOW, 1.0 );
|
||||
|
||||
// TLS related
|
||||
init( CHECK_CONNECTED_COORDINATOR_NUM_DELAY, 1.0 ); if( randomize && BUGGIFY ) CHECK_CONNECTED_COORDINATOR_NUM_DELAY = g_random->random01() * 60.0; // In seconds
|
||||
}
|
||||
|
|
|
@ -183,6 +183,9 @@ public:
|
|||
int CONSISTENCY_CHECK_ONE_ROUND_TARGET_COMPLETION_TIME;
|
||||
int CONSISTENCY_CHECK_RATE_WINDOW;
|
||||
|
||||
// TLS related
|
||||
int CHECK_CONNECTED_COORDINATOR_NUM_DELAY;
|
||||
|
||||
ClientKnobs(bool randomize = false);
|
||||
};
|
||||
|
||||
|
|
|
@ -99,42 +99,42 @@ std::map<std::string, std::string> configForToken( std::string const& mode ) {
|
|||
}
|
||||
|
||||
std::string redundancy, log_replicas;
|
||||
IRepPolicyRef storagePolicy;
|
||||
IRepPolicyRef tLogPolicy;
|
||||
Reference<IReplicationPolicy> storagePolicy;
|
||||
Reference<IReplicationPolicy> tLogPolicy;
|
||||
|
||||
bool redundancySpecified = true;
|
||||
if (mode == "single") {
|
||||
redundancy="1";
|
||||
log_replicas="1";
|
||||
storagePolicy = tLogPolicy = IRepPolicyRef(new PolicyOne());
|
||||
storagePolicy = tLogPolicy = Reference<IReplicationPolicy>(new PolicyOne());
|
||||
|
||||
} else if(mode == "double" || mode == "fast_recovery_double") {
|
||||
redundancy="2";
|
||||
log_replicas="2";
|
||||
storagePolicy = tLogPolicy = IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
storagePolicy = tLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
} else if(mode == "triple" || mode == "fast_recovery_triple") {
|
||||
redundancy="3";
|
||||
log_replicas="3";
|
||||
storagePolicy = tLogPolicy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
storagePolicy = tLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(3, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
} else if(mode == "three_datacenter" || mode == "multi_dc") {
|
||||
redundancy="6";
|
||||
log_replicas="4";
|
||||
storagePolicy = IRepPolicyRef(new PolicyAcross(3, "dcid",
|
||||
IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))
|
||||
storagePolicy = Reference<IReplicationPolicy>(new PolicyAcross(3, "dcid",
|
||||
Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())))
|
||||
));
|
||||
tLogPolicy = IRepPolicyRef(new PolicyAcross(2, "dcid",
|
||||
IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))
|
||||
tLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(2, "dcid",
|
||||
Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())))
|
||||
));
|
||||
} else if(mode == "three_datacenter_fallback") {
|
||||
redundancy="4";
|
||||
log_replicas="4";
|
||||
storagePolicy = tLogPolicy = IRepPolicyRef(new PolicyAcross(2, "dcid", IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))));
|
||||
storagePolicy = tLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(2, "dcid", Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())))));
|
||||
} else if(mode == "three_data_hall") {
|
||||
redundancy="3";
|
||||
log_replicas="4";
|
||||
storagePolicy = IRepPolicyRef(new PolicyAcross(3, "data_hall", IRepPolicyRef(new PolicyOne())));
|
||||
tLogPolicy = IRepPolicyRef(new PolicyAcross(2, "data_hall",
|
||||
IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))
|
||||
storagePolicy = Reference<IReplicationPolicy>(new PolicyAcross(3, "data_hall", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
tLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(2, "data_hall",
|
||||
Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())))
|
||||
));
|
||||
} else
|
||||
redundancySpecified = false;
|
||||
|
@ -154,29 +154,29 @@ std::map<std::string, std::string> configForToken( std::string const& mode ) {
|
|||
}
|
||||
|
||||
std::string remote_redundancy, remote_log_replicas;
|
||||
IRepPolicyRef remoteTLogPolicy;
|
||||
Reference<IReplicationPolicy> remoteTLogPolicy;
|
||||
bool remoteRedundancySpecified = true;
|
||||
if (mode == "remote_default") {
|
||||
remote_redundancy="0";
|
||||
remote_log_replicas="0";
|
||||
remoteTLogPolicy = IRepPolicyRef();
|
||||
remoteTLogPolicy = Reference<IReplicationPolicy>();
|
||||
} else if (mode == "remote_single") {
|
||||
remote_redundancy="1";
|
||||
remote_log_replicas="1";
|
||||
remoteTLogPolicy = IRepPolicyRef(new PolicyOne());
|
||||
remoteTLogPolicy = Reference<IReplicationPolicy>(new PolicyOne());
|
||||
} else if(mode == "remote_double") {
|
||||
remote_redundancy="2";
|
||||
remote_log_replicas="2";
|
||||
remoteTLogPolicy = IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
remoteTLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
} else if(mode == "remote_triple") {
|
||||
remote_redundancy="3";
|
||||
remote_log_replicas="3";
|
||||
remoteTLogPolicy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
remoteTLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(3, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
} else if(mode == "remote_three_data_hall") { //FIXME: not tested in simulation
|
||||
remote_redundancy="3";
|
||||
remote_log_replicas="4";
|
||||
remoteTLogPolicy = IRepPolicyRef(new PolicyAcross(2, "data_hall",
|
||||
IRepPolicyRef(new PolicyAcross(2, "zoneid", IRepPolicyRef(new PolicyOne())))
|
||||
remoteTLogPolicy = Reference<IReplicationPolicy>(new PolicyAcross(2, "data_hall",
|
||||
Reference<IReplicationPolicy>(new PolicyAcross(2, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())))
|
||||
));
|
||||
} else
|
||||
remoteRedundancySpecified = false;
|
||||
|
@ -212,7 +212,7 @@ ConfigurationResult::Type buildConfiguration( std::vector<StringRef> const& mode
|
|||
auto p = configKeysPrefix.toString();
|
||||
if(!outConf.count(p + "storage_replication_policy") && outConf.count(p + "storage_replicas")) {
|
||||
int storageCount = stoi(outConf[p + "storage_replicas"]);
|
||||
IRepPolicyRef storagePolicy = IRepPolicyRef(new PolicyAcross(storageCount, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
Reference<IReplicationPolicy> storagePolicy = Reference<IReplicationPolicy>(new PolicyAcross(storageCount, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
BinaryWriter policyWriter(IncludeVersion());
|
||||
serializeReplicationPolicy(policyWriter, storagePolicy);
|
||||
outConf[p+"storage_replication_policy"] = policyWriter.toStringRef().toString();
|
||||
|
@ -220,7 +220,7 @@ ConfigurationResult::Type buildConfiguration( std::vector<StringRef> const& mode
|
|||
|
||||
if(!outConf.count(p + "log_replication_policy") && outConf.count(p + "log_replicas")) {
|
||||
int logCount = stoi(outConf[p + "log_replicas"]);
|
||||
IRepPolicyRef logPolicy = IRepPolicyRef(new PolicyAcross(logCount, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
Reference<IReplicationPolicy> logPolicy = Reference<IReplicationPolicy>(new PolicyAcross(logCount, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
BinaryWriter policyWriter(IncludeVersion());
|
||||
serializeReplicationPolicy(policyWriter, logPolicy);
|
||||
outConf[p+"log_replication_policy"] = policyWriter.toStringRef().toString();
|
||||
|
|
|
@ -324,9 +324,17 @@ ClientLeaderRegInterface::ClientLeaderRegInterface( INetwork* local ) {
|
|||
getLeader.makeWellKnownEndpoint( WLTOKEN_CLIENTLEADERREG_GETLEADER, TaskCoordination );
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorNominee( Key key, ClientLeaderRegInterface coord, AsyncTrigger* nomineeChange, Optional<LeaderInfo> *info, int generation ) {
|
||||
// Nominee is the worker among all workers that are considered as leader by a coordinator
|
||||
// This function contacts a coordinator coord to ask if the worker is considered as a leader (i.e., if the worker
|
||||
// is a nominee)
|
||||
ACTOR Future<Void> monitorNominee( Key key, ClientLeaderRegInterface coord, AsyncTrigger* nomineeChange, Optional<LeaderInfo> *info, int generation, Reference<AsyncVar<int>> connectedCoordinatorsNum ) {
|
||||
state bool hasCounted = false;
|
||||
loop {
|
||||
state Optional<LeaderInfo> li = wait( retryBrokenPromise( coord.getLeader, GetLeaderRequest( key, info->present() ? info->get().changeID : UID() ), TaskCoordinationReply ) );
|
||||
if (li.present() && !hasCounted && connectedCoordinatorsNum.isValid()) {
|
||||
connectedCoordinatorsNum->set(connectedCoordinatorsNum->get() + 1);
|
||||
hasCounted = true;
|
||||
}
|
||||
wait( Future<Void>(Void()) ); // Make sure we weren't cancelled
|
||||
|
||||
TraceEvent("GetLeaderReply").suppressFor(1.0).detail("Coordinator", coord.getLeader.getEndpoint().getPrimaryAddress()).detail("Nominee", li.present() ? li.get().changeID : UID()).detail("Generation", generation);
|
||||
|
@ -401,7 +409,8 @@ struct MonitorLeaderInfo {
|
|||
explicit MonitorLeaderInfo( Reference<ClusterConnectionFile> intermediateConnFile ) : intermediateConnFile(intermediateConnFile), hasConnected(false), generation(0) {}
|
||||
};
|
||||
|
||||
ACTOR Future<MonitorLeaderInfo> monitorLeaderOneGeneration( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Value>> outSerializedLeaderInfo, MonitorLeaderInfo info ) {
|
||||
// Leader is the process that will be elected by coordinators as the cluster controller
|
||||
ACTOR Future<MonitorLeaderInfo> monitorLeaderOneGeneration( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Value>> outSerializedLeaderInfo, MonitorLeaderInfo info, Reference<AsyncVar<int>> connectedCoordinatorsNum) {
|
||||
state ClientCoordinators coordinators( info.intermediateConnFile );
|
||||
state AsyncTrigger nomineeChange;
|
||||
state std::vector<Optional<LeaderInfo>> nominees;
|
||||
|
@ -410,8 +419,9 @@ ACTOR Future<MonitorLeaderInfo> monitorLeaderOneGeneration( Reference<ClusterCon
|
|||
nominees.resize(coordinators.clientLeaderServers.size());
|
||||
|
||||
std::vector<Future<Void>> actors;
|
||||
// Ask all coordinators if the worker is considered as a leader (leader nominee) by the coordinator.
|
||||
for(int i=0; i<coordinators.clientLeaderServers.size(); i++)
|
||||
actors.push_back( monitorNominee( coordinators.clusterKey, coordinators.clientLeaderServers[i], &nomineeChange, &nominees[i], info.generation ) );
|
||||
actors.push_back( monitorNominee( coordinators.clusterKey, coordinators.clientLeaderServers[i], &nomineeChange, &nominees[i], info.generation, connectedCoordinatorsNum) );
|
||||
allActors = waitForAll(actors);
|
||||
|
||||
loop {
|
||||
|
@ -442,11 +452,14 @@ ACTOR Future<MonitorLeaderInfo> monitorLeaderOneGeneration( Reference<ClusterCon
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorLeaderInternal( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Value>> outSerializedLeaderInfo ) {
|
||||
ACTOR Future<Void> monitorLeaderInternal( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Value>> outSerializedLeaderInfo, Reference<AsyncVar<int>> connectedCoordinatorsNum ) {
|
||||
state MonitorLeaderInfo info(connFile);
|
||||
loop {
|
||||
MonitorLeaderInfo _info = wait( monitorLeaderOneGeneration( connFile, outSerializedLeaderInfo, info) );
|
||||
// set the AsyncVar to 0
|
||||
if (connectedCoordinatorsNum.isValid()) connectedCoordinatorsNum->set(0);
|
||||
MonitorLeaderInfo _info = wait( monitorLeaderOneGeneration( connFile, outSerializedLeaderInfo, info, connectedCoordinatorsNum) );
|
||||
info = _info;
|
||||
info.generation++;
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,19 +30,19 @@
|
|||
class ClientCoordinators;
|
||||
|
||||
template <class LeaderInterface>
|
||||
Future<Void> monitorLeader( Reference<ClusterConnectionFile> const& connFile, Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader );
|
||||
Future<Void> monitorLeader( Reference<ClusterConnectionFile> const& connFile, Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader, Reference<AsyncVar<int>> connectedCoordinatorsNum = Reference<AsyncVar<int>>() );
|
||||
// Monitors the given coordination group's leader election process and provides a best current guess
|
||||
// of the current leader. If a leader is elected for long enough and communication with a quorum of
|
||||
// coordinators is possible, eventually outKnownLeader will be that leader's interface.
|
||||
|
||||
#pragma region Implementation
|
||||
|
||||
Future<Void> monitorLeaderInternal( Reference<ClusterConnectionFile> const& connFile, Reference<AsyncVar<Value>> const& outSerializedLeaderInfo );
|
||||
Future<Void> monitorLeaderInternal( Reference<ClusterConnectionFile> const& connFile, Reference<AsyncVar<Value>> const& outSerializedLeaderInfo, Reference<AsyncVar<int>> const& connectedCoordinatorsNum );
|
||||
|
||||
template <class LeaderInterface>
|
||||
Future<Void> monitorLeader( Reference<ClusterConnectionFile> const& connFile, Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader ) {
|
||||
Future<Void> monitorLeader( Reference<ClusterConnectionFile> const& connFile, Reference<AsyncVar<Optional<LeaderInterface>>> const& outKnownLeader, Reference<AsyncVar<int>> connectedCoordinatorsNum ) {
|
||||
Reference<AsyncVar<Value>> serializedInfo( new AsyncVar<Value> );
|
||||
Future<Void> m = monitorLeaderInternal( connFile, serializedInfo );
|
||||
Future<Void> m = monitorLeaderInternal( connFile, serializedInfo, connectedCoordinatorsNum );
|
||||
return m || asyncDeserialize( serializedInfo, outKnownLeader );
|
||||
}
|
||||
|
||||
|
|
|
@ -534,13 +534,14 @@ DatabaseContext::DatabaseContext(
|
|||
|
||||
DatabaseContext::DatabaseContext( const Error &err ) : deferredError(err), latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000), bytesPerCommit(1000) {}
|
||||
|
||||
ACTOR static Future<Void> monitorClientInfo( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<ClusterConnectionFile> ccf, Reference<AsyncVar<ClientDBInfo>> outInfo ) {
|
||||
ACTOR static Future<Void> monitorClientInfo( Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<ClusterConnectionFile> ccf, Reference<AsyncVar<ClientDBInfo>> outInfo, Reference<AsyncVar<int>> connectedCoordinatorsNumDelayed ) {
|
||||
try {
|
||||
state Optional<double> incorrectTime;
|
||||
loop {
|
||||
OpenDatabaseRequest req;
|
||||
req.knownClientInfoID = outInfo->get().id;
|
||||
req.supportedVersions = VectorRef<ClientVersionRef>(req.arena, networkOptions.supportedVersions);
|
||||
req.connectedCoordinatorsNum = connectedCoordinatorsNumDelayed->get();
|
||||
req.traceLogGroup = StringRef(req.arena, networkOptions.traceLogGroup);
|
||||
|
||||
ClusterConnectionString fileConnectionString;
|
||||
|
@ -571,6 +572,7 @@ ACTOR static Future<Void> monitorClientInfo( Reference<AsyncVar<Optional<Cluster
|
|||
if(clusterInterface->get().present())
|
||||
TraceEvent("ClientInfo_CCInterfaceChange").detail("CCID", clusterInterface->get().get().id());
|
||||
}
|
||||
when( wait( connectedCoordinatorsNumDelayed->onChange() ) ) {}
|
||||
}
|
||||
}
|
||||
} catch( Error& e ) {
|
||||
|
@ -583,10 +585,14 @@ ACTOR static Future<Void> monitorClientInfo( Reference<AsyncVar<Optional<Cluster
|
|||
}
|
||||
}
|
||||
|
||||
// Create database context and monitor the cluster status;
|
||||
// Notify client when cluster info (e.g., cluster controller) changes
|
||||
Database DatabaseContext::create(Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<ClusterConnectionFile> connFile, LocalityData const& clientLocality) {
|
||||
Reference<Cluster> cluster(new Cluster(connFile, clusterInterface));
|
||||
Reference<AsyncVar<int>> connectedCoordinatorsNum(new AsyncVar<int>(0));
|
||||
Reference<AsyncVar<int>> connectedCoordinatorsNumDelayed(new AsyncVar<int>(0));
|
||||
Reference<Cluster> cluster(new Cluster(connFile, clusterInterface, connectedCoordinatorsNum));
|
||||
Reference<AsyncVar<ClientDBInfo>> clientInfo(new AsyncVar<ClientDBInfo>());
|
||||
Future<Void> clientInfoMonitor = monitorClientInfo(clusterInterface, connFile, clientInfo);
|
||||
Future<Void> clientInfoMonitor = delayedAsyncVar(connectedCoordinatorsNum, connectedCoordinatorsNumDelayed, CLIENT_KNOBS->CHECK_CONNECTED_COORDINATOR_NUM_DELAY) || monitorClientInfo(clusterInterface, connFile, clientInfo, connectedCoordinatorsNumDelayed);
|
||||
|
||||
return Database(new DatabaseContext(cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskDefaultEndpoint, clientLocality, true, false));
|
||||
}
|
||||
|
@ -750,12 +756,22 @@ Reference<ClusterConnectionFile> DatabaseContext::getConnectionFile() {
|
|||
return cluster->getConnectionFile();
|
||||
}
|
||||
|
||||
Database Database::createDatabase( Reference<ClusterConnectionFile> connFile, int apiVersion, LocalityData const& clientLocality ) {
|
||||
Reference<Cluster> cluster(new Cluster(connFile, apiVersion));
|
||||
Database Database::createDatabase( Reference<ClusterConnectionFile> connFile, int apiVersion, LocalityData const& clientLocality, DatabaseContext *preallocatedDb ) {
|
||||
Reference<AsyncVar<int>> connectedCoordinatorsNum(new AsyncVar<int>(0)); // Number of connected coordinators for the client
|
||||
Reference<AsyncVar<int>> connectedCoordinatorsNumDelayed(new AsyncVar<int>(0));
|
||||
Reference<Cluster> cluster(new Cluster(connFile, connectedCoordinatorsNum, apiVersion));
|
||||
Reference<AsyncVar<ClientDBInfo>> clientInfo(new AsyncVar<ClientDBInfo>());
|
||||
Future<Void> clientInfoMonitor = monitorClientInfo(cluster->getClusterInterface(), connFile, clientInfo);
|
||||
Future<Void> clientInfoMonitor = delayedAsyncVar(connectedCoordinatorsNum, connectedCoordinatorsNumDelayed, CLIENT_KNOBS->CHECK_CONNECTED_COORDINATOR_NUM_DELAY) || monitorClientInfo(cluster->getClusterInterface(), connFile, clientInfo, connectedCoordinatorsNumDelayed);
|
||||
|
||||
return Database( new DatabaseContext( cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskDefaultEndpoint, clientLocality, true, false, apiVersion ) );
|
||||
DatabaseContext *db;
|
||||
if(preallocatedDb) {
|
||||
db = new (preallocatedDb) DatabaseContext(cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskDefaultEndpoint, clientLocality, true, false, apiVersion);
|
||||
}
|
||||
else {
|
||||
db = new DatabaseContext(cluster, clientInfo, clientInfoMonitor, LiteralStringRef(""), TaskDefaultEndpoint, clientLocality, true, false, apiVersion);
|
||||
}
|
||||
|
||||
return Database(db);
|
||||
}
|
||||
|
||||
Database Database::createDatabase( std::string connFileName, int apiVersion, LocalityData const& clientLocality ) {
|
||||
|
@ -765,19 +781,19 @@ Database Database::createDatabase( std::string connFileName, int apiVersion, Loc
|
|||
|
||||
extern IPAddress determinePublicIPAutomatically(ClusterConnectionString const& ccs);
|
||||
|
||||
Cluster::Cluster( Reference<ClusterConnectionFile> connFile, int apiVersion )
|
||||
Cluster::Cluster( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<int>> connectedCoordinatorsNum, int apiVersion )
|
||||
: clusterInterface(new AsyncVar<Optional<ClusterInterface>>())
|
||||
{
|
||||
init(connFile, true, apiVersion);
|
||||
init(connFile, true, connectedCoordinatorsNum, apiVersion);
|
||||
}
|
||||
|
||||
Cluster::Cluster( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface)
|
||||
Cluster::Cluster( Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Optional<ClusterInterface>>> clusterInterface, Reference<AsyncVar<int>> connectedCoordinatorsNum)
|
||||
: clusterInterface(clusterInterface)
|
||||
{
|
||||
init(connFile, true);
|
||||
init(connFile, true, connectedCoordinatorsNum);
|
||||
}
|
||||
|
||||
void Cluster::init( Reference<ClusterConnectionFile> connFile, bool startClientInfoMonitor, int apiVersion ) {
|
||||
void Cluster::init( Reference<ClusterConnectionFile> connFile, bool startClientInfoMonitor, Reference<AsyncVar<int>> connectedCoordinatorsNum, int apiVersion ) {
|
||||
connectionFile = connFile;
|
||||
connected = clusterInterface->onChange();
|
||||
|
||||
|
@ -811,7 +827,7 @@ void Cluster::init( Reference<ClusterConnectionFile> connFile, bool startClientI
|
|||
uncancellable( recurring( &systemMonitor, CLIENT_KNOBS->SYSTEM_MONITOR_INTERVAL, TaskFlushTrace ) );
|
||||
}
|
||||
|
||||
leaderMon = monitorLeader( connFile, clusterInterface );
|
||||
leaderMon = monitorLeader( connFile, clusterInterface, connectedCoordinatorsNum );
|
||||
failMon = failureMonitorClient( clusterInterface, false );
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ class Database {
|
|||
public:
|
||||
enum { API_VERSION_LATEST = -1 };
|
||||
|
||||
static Database createDatabase( Reference<ClusterConnectionFile> connFile, int apiVersion, LocalityData const& clientLocality=LocalityData() );
|
||||
static Database createDatabase( Reference<ClusterConnectionFile> connFile, int apiVersion, LocalityData const& clientLocality=LocalityData(), DatabaseContext *preallocatedDb=nullptr );
|
||||
static Database createDatabase( std::string connFileName, int apiVersion, LocalityData const& clientLocality=LocalityData() );
|
||||
|
||||
Database() {} // an uninitialized database can be destructed or reassigned safely; that's it
|
||||
|
@ -115,8 +115,8 @@ void stopNetwork();
|
|||
*/
|
||||
class Cluster : public ReferenceCounted<Cluster>, NonCopyable {
|
||||
public:
|
||||
Cluster(Reference<ClusterConnectionFile> connFile, int apiVersion=Database::API_VERSION_LATEST);
|
||||
Cluster(Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Optional<struct ClusterInterface>>> clusterInterface);
|
||||
Cluster(Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<int>> connectedCoordinatorsNum, int apiVersion=Database::API_VERSION_LATEST);
|
||||
Cluster(Reference<ClusterConnectionFile> connFile, Reference<AsyncVar<Optional<struct ClusterInterface>>> clusterInterface, Reference<AsyncVar<int>> connectedCoordinatorsNum);
|
||||
|
||||
~Cluster();
|
||||
|
||||
|
@ -126,7 +126,7 @@ public:
|
|||
Future<Void> onConnected();
|
||||
|
||||
private:
|
||||
void init(Reference<ClusterConnectionFile> connFile, bool startClientInfoMonitor, int apiVersion=Database::API_VERSION_LATEST);
|
||||
void init(Reference<ClusterConnectionFile> connFile, bool startClientInfoMonitor, Reference<AsyncVar<int>> connectedCoordinatorsNum, int apiVersion=Database::API_VERSION_LATEST);
|
||||
|
||||
Reference<AsyncVar<Optional<struct ClusterInterface>>> clusterInterface;
|
||||
Reference<ClusterConnectionFile> connectionFile;
|
||||
|
|
|
@ -1097,7 +1097,12 @@ public:
|
|||
return Void();
|
||||
} catch( Error &e ) {
|
||||
if ( !ryw->resetPromise.isSet() ) {
|
||||
ryw->resetRyow();
|
||||
if(ryw->tr.apiVersionAtLeast(610)) {
|
||||
ryw->resetPromise.sendError(transaction_cancelled());
|
||||
}
|
||||
else {
|
||||
ryw->resetRyow();
|
||||
}
|
||||
}
|
||||
if( e.code() == error_code_broken_promise )
|
||||
throw transaction_cancelled();
|
||||
|
|
|
@ -301,7 +301,8 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
|||
"connected_clients":[
|
||||
{
|
||||
"address":"127.0.0.1:9898",
|
||||
"log_group":"default"
|
||||
"log_group":"default",
|
||||
"connected_coordinators":2
|
||||
}
|
||||
],
|
||||
"count" : 1,
|
||||
|
|
|
@ -30,7 +30,8 @@
|
|||
// Therefore, it is unsafe to call (explicitly or implicitly) this->addRef in any of these functions.
|
||||
|
||||
ThreadFuture<Void> ThreadSafeDatabase::onConnected() {
|
||||
return onMainThread( [this]() -> Future<Void> {
|
||||
DatabaseContext *db = this->db;
|
||||
return onMainThread( [db]() -> Future<Void> {
|
||||
db->checkDeferredError();
|
||||
return db->onConnected();
|
||||
} );
|
||||
|
@ -50,24 +51,30 @@ Reference<ITransaction> ThreadSafeDatabase::createTransaction() {
|
|||
}
|
||||
|
||||
void ThreadSafeDatabase::setOption( FDBDatabaseOptions::Option option, Optional<StringRef> value) {
|
||||
DatabaseContext *db = this->db;
|
||||
Standalone<Optional<StringRef>> passValue = value;
|
||||
onMainThreadVoid( [this, option, passValue](){ db->setOption(option, passValue.contents()); }, &db->deferredError );
|
||||
onMainThreadVoid( [db, option, passValue](){
|
||||
db->checkDeferredError();
|
||||
db->setOption(option, passValue.contents());
|
||||
}, &db->deferredError );
|
||||
}
|
||||
|
||||
ThreadSafeDatabase::ThreadSafeDatabase(std::string connFilename, int apiVersion) {
|
||||
db = NULL; // All accesses to db happen on the main thread, so this pointer will be set by the time anybody uses it
|
||||
|
||||
Reference<ClusterConnectionFile> connFile = Reference<ClusterConnectionFile>(new ClusterConnectionFile(ClusterConnectionFile::lookupClusterFileName(connFilename).first));
|
||||
onMainThreadVoid([this, connFile, apiVersion](){
|
||||
|
||||
// Allocate memory for the Database from this thread (so the pointer is known for subsequent method calls)
|
||||
// but run its constructor on the main thread
|
||||
DatabaseContext *db = this->db = DatabaseContext::allocateOnForeignThread();
|
||||
|
||||
onMainThreadVoid([db, connFile, apiVersion](){
|
||||
try {
|
||||
Database db = Database::createDatabase(connFile, apiVersion);
|
||||
this->db = db.extractPtr();
|
||||
Database::createDatabase(connFile, apiVersion, LocalityData(), db).extractPtr();
|
||||
}
|
||||
catch(Error &e) {
|
||||
this->db = new DatabaseContext(e);
|
||||
new (db) DatabaseContext(e);
|
||||
}
|
||||
catch(...) {
|
||||
this->db = new DatabaseContext(unknown_error());
|
||||
new (db) DatabaseContext(unknown_error());
|
||||
}
|
||||
}, NULL);
|
||||
}
|
||||
|
|
|
@ -343,6 +343,7 @@ struct Peer : NonCopyable {
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( !destination.isPublic() || outgoingConnectionIdle || destination > compatibleAddr ) {
|
||||
// Keep the new connection
|
||||
TraceEvent("IncomingConnection", conn->getDebugID())
|
||||
|
|
|
@ -36,23 +36,23 @@ public:
|
|||
virtual void delref() { ReferenceCounted<LocalitySet>::delref(); }
|
||||
|
||||
bool selectReplicas(
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry> & results)
|
||||
{
|
||||
LocalitySetRef fromServers = LocalitySetRef::addRef(this);
|
||||
Reference<LocalitySet> fromServers = Reference<LocalitySet>::addRef(this);
|
||||
return policy->selectReplicas(fromServers, alsoServers, results);
|
||||
}
|
||||
|
||||
bool selectReplicas(
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<LocalityEntry> & results)
|
||||
{ return selectReplicas(policy, std::vector<LocalityEntry>(), results); }
|
||||
|
||||
bool validate(
|
||||
IRepPolicyRef const& policy) const
|
||||
Reference<IReplicationPolicy> const& policy) const
|
||||
{
|
||||
LocalitySetRef const solutionSet = LocalitySetRef::addRef((LocalitySet*) this);
|
||||
Reference<LocalitySet> const solutionSet = Reference<LocalitySet>::addRef((LocalitySet*) this);
|
||||
return policy->validate(solutionSet);
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ public:
|
|||
}
|
||||
|
||||
static void staticDisplayEntries(
|
||||
LocalitySetRef const& fromServers,
|
||||
Reference<LocalitySet> const& fromServers,
|
||||
std::vector<LocalityEntry> const& entryArray,
|
||||
const char* name = "zone")
|
||||
{
|
||||
|
@ -174,8 +174,8 @@ public:
|
|||
// the specified value for the given key
|
||||
// The returned LocalitySet contains the LocalityRecords that have the same value as
|
||||
// the indexValue under the same indexKey (e.g., zoneid)
|
||||
LocalitySetRef restrict(AttribKey indexKey, AttribValue indexValue ) {
|
||||
LocalitySetRef localitySet;
|
||||
Reference<LocalitySet> restrict(AttribKey indexKey, AttribValue indexValue ) {
|
||||
Reference<LocalitySet> localitySet;
|
||||
LocalityCacheRecord searchRecord(AttribRecord(indexKey, indexValue), localitySet);
|
||||
auto itKeyValue = std::lower_bound(_cacheArray.begin(), _cacheArray.end(), searchRecord, LocalityCacheRecord::compareKeyValue);
|
||||
|
||||
|
@ -185,7 +185,7 @@ public:
|
|||
localitySet = itKeyValue->_resultset;
|
||||
}
|
||||
else {
|
||||
localitySet = LocalitySetRef(new LocalitySet(*_localitygroup));
|
||||
localitySet = Reference<LocalitySet>(new LocalitySet(*_localitygroup));
|
||||
_cachemisses ++;
|
||||
// If the key is not within the current key set, skip it because no items within
|
||||
// the current entry array has the key
|
||||
|
@ -213,8 +213,8 @@ public:
|
|||
}
|
||||
|
||||
// This function is used to create an subset containing the specified entries
|
||||
LocalitySetRef restrict(std::vector<LocalityEntry> const& entryArray) {
|
||||
LocalitySetRef localitySet(new LocalitySet(*_localitygroup));
|
||||
Reference<LocalitySet> restrict(std::vector<LocalityEntry> const& entryArray) {
|
||||
Reference<LocalitySet> localitySet(new LocalitySet(*_localitygroup));
|
||||
for (auto& entry : entryArray) {
|
||||
localitySet->add(getRecordViaEntry(entry), *this);
|
||||
}
|
||||
|
@ -453,8 +453,8 @@ protected:
|
|||
// This class stores the cache record for each entry within the locality set
|
||||
struct LocalityCacheRecord {
|
||||
AttribRecord _attribute;
|
||||
LocalitySetRef _resultset;
|
||||
LocalityCacheRecord(AttribRecord const& attribute, LocalitySetRef resultset):_attribute(attribute),_resultset(resultset){}
|
||||
Reference<LocalitySet> _resultset;
|
||||
LocalityCacheRecord(AttribRecord const& attribute, Reference<LocalitySet> resultset):_attribute(attribute),_resultset(resultset){}
|
||||
LocalityCacheRecord(LocalityCacheRecord const& source):_attribute(source._attribute),_resultset(source._resultset){}
|
||||
virtual ~LocalityCacheRecord(){}
|
||||
LocalityCacheRecord& operator=(LocalityCacheRecord const& source) {
|
||||
|
@ -584,7 +584,7 @@ struct LocalityMap : public LocalityGroup {
|
|||
virtual ~LocalityMap() {}
|
||||
|
||||
bool selectReplicas(
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry>& entryResults,
|
||||
std::vector<V*> & results)
|
||||
|
@ -601,7 +601,7 @@ struct LocalityMap : public LocalityGroup {
|
|||
}
|
||||
|
||||
bool selectReplicas(
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<V*> & results)
|
||||
{
|
||||
|
@ -610,7 +610,7 @@ struct LocalityMap : public LocalityGroup {
|
|||
}
|
||||
|
||||
bool selectReplicas(
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<V*> & results)
|
||||
{ return selectReplicas(policy, std::vector<LocalityEntry>(), results); }
|
||||
|
||||
|
|
|
@ -24,14 +24,14 @@
|
|||
|
||||
|
||||
bool IReplicationPolicy::selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
Reference<LocalitySet> & fromServers,
|
||||
std::vector<LocalityEntry> & results )
|
||||
{
|
||||
return selectReplicas(fromServers, std::vector<LocalityEntry>(), results);
|
||||
}
|
||||
|
||||
bool IReplicationPolicy::validate(
|
||||
LocalitySetRef const& solutionSet ) const
|
||||
Reference<LocalitySet> const& solutionSet ) const
|
||||
{
|
||||
return validate(solutionSet->getEntries(), solutionSet);
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ bool IReplicationPolicy::validateFull(
|
|||
bool solved,
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
LocalitySetRef const& fromServers )
|
||||
Reference<LocalitySet> const& fromServers )
|
||||
{
|
||||
bool valid = true;
|
||||
std::vector<LocalityEntry> totalSolution(solutionSet);
|
||||
|
@ -105,7 +105,7 @@ bool IReplicationPolicy::validateFull(
|
|||
}
|
||||
|
||||
bool PolicyOne::selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
Reference<LocalitySet> & fromServers,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry> & results )
|
||||
{
|
||||
|
@ -131,12 +131,12 @@ bool PolicyOne::selectReplicas(
|
|||
|
||||
bool PolicyOne::validate(
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
LocalitySetRef const& fromServers ) const
|
||||
Reference<LocalitySet> const& fromServers ) const
|
||||
{
|
||||
return ((solutionSet.size() > 0) && (fromServers->size() > 0));
|
||||
}
|
||||
|
||||
PolicyAcross::PolicyAcross(int count, std::string const& attribKey, IRepPolicyRef const policy):
|
||||
PolicyAcross::PolicyAcross(int count, std::string const& attribKey, Reference<IReplicationPolicy> const policy):
|
||||
_count(count),_attribKey(attribKey),_policy(policy)
|
||||
{
|
||||
return;
|
||||
|
@ -150,7 +150,7 @@ PolicyAcross::~PolicyAcross()
|
|||
// Debug purpose only
|
||||
// Trace all record entries to help debug
|
||||
// fromServers is the servers locality to be printed out.
|
||||
void IReplicationPolicy::traceLocalityRecords(LocalitySetRef const& fromServers) {
|
||||
void IReplicationPolicy::traceLocalityRecords(Reference<LocalitySet> const& fromServers) {
|
||||
std::vector<Reference<LocalityRecord>> const& recordArray = fromServers->getRecordArray();
|
||||
TraceEvent("LocalityRecordArray").detail("Size", recordArray.size());
|
||||
for (auto& record : recordArray) {
|
||||
|
@ -158,7 +158,7 @@ void IReplicationPolicy::traceLocalityRecords(LocalitySetRef const& fromServers)
|
|||
}
|
||||
}
|
||||
|
||||
void IReplicationPolicy::traceOneLocalityRecord(Reference<LocalityRecord> record, LocalitySetRef const& fromServers) {
|
||||
void IReplicationPolicy::traceOneLocalityRecord(Reference<LocalityRecord> record, Reference<LocalitySet> const& fromServers) {
|
||||
int localityEntryIndex = record->_entryIndex._id;
|
||||
Reference<KeyValueMap> const& dataMap = record->_dataMap;
|
||||
std::vector<AttribRecord> const& keyValueArray = dataMap->_keyvaluearray;
|
||||
|
@ -185,7 +185,7 @@ void IReplicationPolicy::traceOneLocalityRecord(Reference<LocalityRecord> record
|
|||
// return true if the team satisfies the policy; false otherwise
|
||||
bool PolicyAcross::validate(
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
LocalitySetRef const& fromServers ) const
|
||||
Reference<LocalitySet> const& fromServers ) const
|
||||
{
|
||||
bool valid = true;
|
||||
int count = 0;
|
||||
|
@ -262,7 +262,7 @@ bool PolicyAcross::validate(
|
|||
// that should be excluded from being selected as replicas.
|
||||
// FIXME: Simplify this function, such as removing unnecessary printf
|
||||
bool PolicyAcross::selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
Reference<LocalitySet> & fromServers,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry> & results )
|
||||
{
|
||||
|
@ -437,7 +437,7 @@ bool PolicyAcross::selectReplicas(
|
|||
|
||||
bool PolicyAnd::validate(
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
LocalitySetRef const& fromServers ) const
|
||||
Reference<LocalitySet> const& fromServers ) const
|
||||
{
|
||||
bool valid = true;
|
||||
for (auto& policy : _policies) {
|
||||
|
@ -450,7 +450,7 @@ bool PolicyAnd::validate(
|
|||
}
|
||||
|
||||
bool PolicyAnd::selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
Reference<LocalitySet> & fromServers,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry> & results )
|
||||
{
|
||||
|
@ -486,26 +486,26 @@ bool PolicyAnd::selectReplicas(
|
|||
return passed;
|
||||
}
|
||||
|
||||
void testPolicySerialization(IRepPolicyRef& policy) {
|
||||
void testPolicySerialization(Reference<IReplicationPolicy>& policy) {
|
||||
std::string policyInfo = policy->info();
|
||||
|
||||
BinaryWriter writer(IncludeVersion());
|
||||
serializeReplicationPolicy(writer, policy);
|
||||
|
||||
BinaryReader reader(writer.getData(), writer.getLength(), IncludeVersion());
|
||||
IRepPolicyRef copy;
|
||||
Reference<IReplicationPolicy> copy;
|
||||
serializeReplicationPolicy(reader, copy);
|
||||
|
||||
ASSERT(policy->info() == copy->info());
|
||||
}
|
||||
|
||||
void testReplicationPolicy(int nTests) {
|
||||
IRepPolicyRef policy = IRepPolicyRef(new PolicyAcross(1, "data_hall", IRepPolicyRef(new PolicyOne())));
|
||||
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(1, "data_hall", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
testPolicySerialization(policy);
|
||||
|
||||
policy = IRepPolicyRef(new PolicyAnd({
|
||||
IRepPolicyRef(new PolicyAcross(2, "data_center", IRepPolicyRef(new PolicyAcross(3, "rack", IRepPolicyRef(new PolicyOne()))))),
|
||||
IRepPolicyRef(new PolicyAcross(2, "data_center", IRepPolicyRef(new PolicyAcross(2, "data_hall", IRepPolicyRef(new PolicyOne())))))
|
||||
policy = Reference<IReplicationPolicy>(new PolicyAnd({
|
||||
Reference<IReplicationPolicy>(new PolicyAcross(2, "data_center", Reference<IReplicationPolicy>(new PolicyAcross(3, "rack", Reference<IReplicationPolicy>(new PolicyOne()))))),
|
||||
Reference<IReplicationPolicy>(new PolicyAcross(2, "data_center", Reference<IReplicationPolicy>(new PolicyAcross(2, "data_hall", Reference<IReplicationPolicy>(new PolicyOne())))))
|
||||
}));
|
||||
|
||||
testPolicySerialization(policy);
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include "fdbrpc/ReplicationTypes.h"
|
||||
|
||||
template <class Ar>
|
||||
void serializeReplicationPolicy(Ar& ar, IRepPolicyRef& policy);
|
||||
void serializeReplicationPolicy(Ar& ar, Reference<IReplicationPolicy>& policy);
|
||||
extern void testReplicationPolicy(int nTests);
|
||||
|
||||
|
||||
|
@ -40,36 +40,36 @@ struct IReplicationPolicy : public ReferenceCounted<IReplicationPolicy> {
|
|||
virtual int maxResults() const = 0;
|
||||
virtual int depth() const = 0;
|
||||
virtual bool selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
Reference<LocalitySet> & fromServers,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry> & results ) = 0;
|
||||
virtual void traceLocalityRecords(LocalitySetRef const& fromServers);
|
||||
virtual void traceOneLocalityRecord(Reference<LocalityRecord> record, LocalitySetRef const& fromServers);
|
||||
virtual void traceLocalityRecords(Reference<LocalitySet> const& fromServers);
|
||||
virtual void traceOneLocalityRecord(Reference<LocalityRecord> record, Reference<LocalitySet> const& fromServers);
|
||||
virtual bool validate(
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
LocalitySetRef const& fromServers ) const = 0;
|
||||
Reference<LocalitySet> const& fromServers ) const = 0;
|
||||
|
||||
bool operator == ( const IReplicationPolicy& r ) const { return info() == r.info(); }
|
||||
bool operator != ( const IReplicationPolicy& r ) const { return info() != r.info(); }
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
IRepPolicyRef refThis(this);
|
||||
Reference<IReplicationPolicy> refThis(this);
|
||||
serializeReplicationPolicy(ar, refThis);
|
||||
refThis->delref_no_destroy();
|
||||
}
|
||||
|
||||
// Utility functions
|
||||
bool selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
Reference<LocalitySet> & fromServers,
|
||||
std::vector<LocalityEntry> & results );
|
||||
bool validate(
|
||||
LocalitySetRef const& solutionSet ) const;
|
||||
Reference<LocalitySet> const& solutionSet ) const;
|
||||
bool validateFull(
|
||||
bool solved,
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
LocalitySetRef const& fromServers );
|
||||
Reference<LocalitySet> const& fromServers );
|
||||
|
||||
// Returns a set of the attributes that this policy uses in selection and validation.
|
||||
std::set<std::string> attributeKeys() const
|
||||
|
@ -78,7 +78,7 @@ struct IReplicationPolicy : public ReferenceCounted<IReplicationPolicy> {
|
|||
};
|
||||
|
||||
template <class Archive>
|
||||
inline void load( Archive& ar, IRepPolicyRef& value ) {
|
||||
inline void load( Archive& ar, Reference<IReplicationPolicy>& value ) {
|
||||
bool present = (value.getPtr());
|
||||
ar >> present;
|
||||
if (present) {
|
||||
|
@ -90,11 +90,11 @@ inline void load( Archive& ar, IRepPolicyRef& value ) {
|
|||
}
|
||||
|
||||
template <class Archive>
|
||||
inline void save( Archive& ar, const IRepPolicyRef& value ) {
|
||||
inline void save( Archive& ar, const Reference<IReplicationPolicy>& value ) {
|
||||
bool present = (value.getPtr());
|
||||
ar << present;
|
||||
if (present) {
|
||||
serializeReplicationPolicy(ar, (IRepPolicyRef&) value);
|
||||
serializeReplicationPolicy(ar, (Reference<IReplicationPolicy>&) value);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,9 +107,9 @@ struct PolicyOne : IReplicationPolicy, public ReferenceCounted<PolicyOne> {
|
|||
virtual int depth() const { return 1; }
|
||||
virtual bool validate(
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
LocalitySetRef const& fromServers ) const;
|
||||
Reference<LocalitySet> const& fromServers ) const;
|
||||
virtual bool selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
Reference<LocalitySet> & fromServers,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry> & results );
|
||||
template <class Ar>
|
||||
|
@ -119,7 +119,7 @@ struct PolicyOne : IReplicationPolicy, public ReferenceCounted<PolicyOne> {
|
|||
};
|
||||
|
||||
struct PolicyAcross : IReplicationPolicy, public ReferenceCounted<PolicyAcross> {
|
||||
PolicyAcross(int count, std::string const& attribKey, IRepPolicyRef const policy);
|
||||
PolicyAcross(int count, std::string const& attribKey, Reference<IReplicationPolicy> const policy);
|
||||
virtual ~PolicyAcross();
|
||||
virtual std::string name() const { return "Across"; }
|
||||
virtual std::string info() const
|
||||
|
@ -128,9 +128,9 @@ struct PolicyAcross : IReplicationPolicy, public ReferenceCounted<PolicyAcross>
|
|||
virtual int depth() const { return 1 + _policy->depth(); }
|
||||
virtual bool validate(
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
LocalitySetRef const& fromServers ) const;
|
||||
Reference<LocalitySet> const& fromServers ) const;
|
||||
virtual bool selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
Reference<LocalitySet> & fromServers,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry> & results );
|
||||
|
||||
|
@ -149,18 +149,18 @@ struct PolicyAcross : IReplicationPolicy, public ReferenceCounted<PolicyAcross>
|
|||
protected:
|
||||
int _count;
|
||||
std::string _attribKey;
|
||||
IRepPolicyRef _policy;
|
||||
Reference<IReplicationPolicy> _policy;
|
||||
|
||||
// Cache temporary members
|
||||
std::vector<AttribValue> _usedValues;
|
||||
std::vector<LocalityEntry> _newResults;
|
||||
LocalitySetRef _selected;
|
||||
Reference<LocalitySet> _selected;
|
||||
VectorRef<std::pair<int,int>> _addedResults;
|
||||
Arena _arena;
|
||||
};
|
||||
|
||||
struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
|
||||
PolicyAnd(std::vector<IRepPolicyRef> policies): _policies(policies), _sortedPolicies(policies)
|
||||
PolicyAnd(std::vector<Reference<IReplicationPolicy>> policies): _policies(policies), _sortedPolicies(policies)
|
||||
{
|
||||
// Sort the policy array
|
||||
std::sort(_sortedPolicies.begin(), _sortedPolicies.end(), PolicyAnd::comparePolicy);
|
||||
|
@ -194,14 +194,14 @@ struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
|
|||
}
|
||||
virtual bool validate(
|
||||
std::vector<LocalityEntry> const& solutionSet,
|
||||
LocalitySetRef const& fromServers ) const;
|
||||
Reference<LocalitySet> const& fromServers ) const;
|
||||
|
||||
virtual bool selectReplicas(
|
||||
LocalitySetRef & fromServers,
|
||||
Reference<LocalitySet> & fromServers,
|
||||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry> & results );
|
||||
|
||||
static bool comparePolicy(const IRepPolicyRef& rhs, const IRepPolicyRef& lhs)
|
||||
static bool comparePolicy(const Reference<IReplicationPolicy>& rhs, const Reference<IReplicationPolicy>& lhs)
|
||||
{ return (lhs->maxResults() < rhs->maxResults()) || (!(rhs->maxResults() < lhs->maxResults()) && (lhs->depth() < rhs->depth())); }
|
||||
|
||||
template <class Ar>
|
||||
|
@ -219,18 +219,18 @@ struct PolicyAnd : IReplicationPolicy, public ReferenceCounted<PolicyAnd> {
|
|||
}
|
||||
|
||||
virtual void attributeKeys(std::set<std::string> *set) const override
|
||||
{ for (const IRepPolicyRef& r : _policies) { r->attributeKeys(set); } }
|
||||
{ for (const Reference<IReplicationPolicy>& r : _policies) { r->attributeKeys(set); } }
|
||||
|
||||
protected:
|
||||
std::vector<IRepPolicyRef> _policies;
|
||||
std::vector<IRepPolicyRef> _sortedPolicies;
|
||||
std::vector<Reference<IReplicationPolicy>> _policies;
|
||||
std::vector<Reference<IReplicationPolicy>> _sortedPolicies;
|
||||
};
|
||||
|
||||
extern int testReplication();
|
||||
|
||||
|
||||
template <class Ar>
|
||||
void serializeReplicationPolicy(Ar& ar, IRepPolicyRef& policy) {
|
||||
void serializeReplicationPolicy(Ar& ar, Reference<IReplicationPolicy>& policy) {
|
||||
if(Ar::isDeserializing) {
|
||||
StringRef name;
|
||||
serializer(ar, name);
|
||||
|
@ -238,20 +238,20 @@ void serializeReplicationPolicy(Ar& ar, IRepPolicyRef& policy) {
|
|||
if(name == LiteralStringRef("One")) {
|
||||
PolicyOne* pointer = new PolicyOne();
|
||||
pointer->serialize(ar);
|
||||
policy = IRepPolicyRef(pointer);
|
||||
policy = Reference<IReplicationPolicy>(pointer);
|
||||
}
|
||||
else if(name == LiteralStringRef("Across")) {
|
||||
PolicyAcross* pointer = new PolicyAcross(0, "", IRepPolicyRef());
|
||||
PolicyAcross* pointer = new PolicyAcross(0, "", Reference<IReplicationPolicy>());
|
||||
pointer->serialize(ar);
|
||||
policy = IRepPolicyRef(pointer);
|
||||
policy = Reference<IReplicationPolicy>(pointer);
|
||||
}
|
||||
else if(name == LiteralStringRef("And")) {
|
||||
PolicyAnd* pointer = new PolicyAnd({});
|
||||
pointer->serialize(ar);
|
||||
policy = IRepPolicyRef(pointer);
|
||||
policy = Reference<IReplicationPolicy>(pointer);
|
||||
}
|
||||
else if(name == LiteralStringRef("None")) {
|
||||
policy = IRepPolicyRef();
|
||||
policy = Reference<IReplicationPolicy>();
|
||||
}
|
||||
else {
|
||||
TraceEvent(SevError, "SerializingInvalidPolicyType")
|
||||
|
|
|
@ -34,9 +34,6 @@ struct LocalityRecord;
|
|||
struct StringToIntMap;
|
||||
struct IReplicationPolicy;
|
||||
|
||||
typedef Reference<LocalitySet> LocalitySetRef;
|
||||
typedef Reference<IReplicationPolicy> IRepPolicyRef;
|
||||
|
||||
extern int g_replicationdebug;
|
||||
|
||||
struct AttribKey {
|
||||
|
|
|
@ -27,8 +27,8 @@
|
|||
|
||||
|
||||
double ratePolicy(
|
||||
LocalitySetRef & localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<LocalitySet> & localitySet,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
unsigned int nTestTotal)
|
||||
{
|
||||
double rating = -1.0;
|
||||
|
@ -85,14 +85,14 @@ double ratePolicy(
|
|||
|
||||
bool findBestPolicySet(
|
||||
std::vector<LocalityEntry>& bestResults,
|
||||
LocalitySetRef & localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<LocalitySet> & localitySet,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
unsigned int nMinItems,
|
||||
unsigned int nSelectTests,
|
||||
unsigned int nPolicyTests)
|
||||
{
|
||||
bool bSucceeded = true;
|
||||
LocalitySetRef bestLocalitySet, testLocalitySet;
|
||||
Reference<LocalitySet> bestLocalitySet, testLocalitySet;
|
||||
std::vector<LocalityEntry> results;
|
||||
double testRate, bestRate = -1.0;
|
||||
|
||||
|
@ -162,15 +162,15 @@ bool findBestPolicySet(
|
|||
|
||||
bool findBestUniquePolicySet(
|
||||
std::vector<LocalityEntry>& bestResults,
|
||||
LocalitySetRef & localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<LocalitySet> & localitySet,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
StringRef localityUniquenessKey,
|
||||
unsigned int nMinItems,
|
||||
unsigned int nSelectTests,
|
||||
unsigned int nPolicyTests)
|
||||
{
|
||||
bool bSucceeded = true;
|
||||
LocalitySetRef bestLocalitySet, testLocalitySet;
|
||||
Reference<LocalitySet> bestLocalitySet, testLocalitySet;
|
||||
std::vector<LocalityEntry> results;
|
||||
double testRate, bestRate = -1.0;
|
||||
|
||||
|
@ -262,7 +262,7 @@ bool findBestUniquePolicySet(
|
|||
bool validateAllCombinations(
|
||||
std::vector<LocalityData> & offendingCombo,
|
||||
LocalityGroup const& localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<LocalityData> const& newItems,
|
||||
unsigned int nCombinationSize,
|
||||
bool bCheckIfValid)
|
||||
|
@ -286,25 +286,39 @@ bool validateAllCombinations(
|
|||
}
|
||||
else
|
||||
{
|
||||
bool bIsValidGroup;
|
||||
LocalityGroup localityGroup;
|
||||
bool bIsValidGroup;
|
||||
Reference<LocalitySet> localSet = Reference<LocalitySet>( new LocalityGroup() );
|
||||
LocalityGroup* localGroup = (LocalityGroup*) localSet.getPtr();
|
||||
localGroup->deep_copy(localitySet);
|
||||
|
||||
std::vector<LocalityEntry> localityGroupEntries = localGroup->getEntries();
|
||||
int originalSize = localityGroupEntries.size();
|
||||
|
||||
for (int i = 0; i < newItems.size(); ++i) {
|
||||
localGroup->add(newItems[i]);
|
||||
}
|
||||
|
||||
std::string bitmask(nCombinationSize, 1); // K leading 1's
|
||||
|
||||
bitmask.resize(newItems.size(), 0); // N-K trailing 0's
|
||||
|
||||
|
||||
std::vector<LocalityEntry> resultEntries;
|
||||
do
|
||||
{
|
||||
localityGroup.deep_copy(localitySet);
|
||||
|
||||
localityGroupEntries.resize(originalSize);
|
||||
// [0..N-1] integers
|
||||
for (int i = 0; i < newItems.size(); ++i) {
|
||||
for (int i = 0; i < bitmask.size(); ++i) {
|
||||
if (bitmask[i]) {
|
||||
localityGroup.add(newItems[i]);
|
||||
localityGroupEntries.push_back(localGroup->getEntry(originalSize + i));
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the group combination passes validation
|
||||
bIsValidGroup = localityGroup.validate(policy);
|
||||
resultEntries.clear();
|
||||
|
||||
// Run the policy, assert if unable to satisfy
|
||||
bool result = localSet->selectReplicas(policy, localityGroupEntries, resultEntries);
|
||||
ASSERT(result);
|
||||
|
||||
bIsValidGroup = resultEntries.size() == 0;
|
||||
|
||||
if (((bCheckIfValid) &&
|
||||
(!bIsValidGroup) ) ||
|
||||
|
@ -319,7 +333,7 @@ bool validateAllCombinations(
|
|||
}
|
||||
if (g_replicationdebug > 2) {
|
||||
printf("Invalid group\n");
|
||||
localityGroup.DisplayEntries();
|
||||
localGroup->DisplayEntries();
|
||||
}
|
||||
if (g_replicationdebug > 3) {
|
||||
printf("Full set\n");
|
||||
|
@ -337,7 +351,7 @@ bool validateAllCombinations(
|
|||
|
||||
bool validateAllCombinations(
|
||||
LocalityGroup const& localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<LocalityData> const& newItems,
|
||||
unsigned int nCombinationSize,
|
||||
bool bCheckIfValid)
|
||||
|
@ -358,10 +372,10 @@ repTestType convertToTestType(int iValue) {
|
|||
return sValue;
|
||||
}
|
||||
|
||||
LocalitySetRef createTestLocalityMap(std::vector<repTestType>& indexes, int dcTotal,
|
||||
Reference<LocalitySet> createTestLocalityMap(std::vector<repTestType>& indexes, int dcTotal,
|
||||
int szTotal, int rackTotal, int slotTotal, int independentItems, int independentTotal)
|
||||
{
|
||||
LocalitySetRef buildServer(new LocalityMap<repTestType>());
|
||||
Reference<LocalitySet> buildServer(new LocalityMap<repTestType>());
|
||||
LocalityMap<repTestType>* serverMap = (LocalityMap<repTestType>*) buildServer.getPtr();
|
||||
int serverValue, dcLoop, szLoop, rackLoop, slotLoop;
|
||||
std::string dcText, szText, rackText, slotText, independentName, independentText;
|
||||
|
@ -442,8 +456,8 @@ LocalitySetRef createTestLocalityMap(std::vector<repTestType>& indexes, int dcTo
|
|||
}
|
||||
|
||||
bool testPolicy(
|
||||
LocalitySetRef servers,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<LocalitySet> servers,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<LocalityEntry> const& including,
|
||||
bool validate)
|
||||
{
|
||||
|
@ -506,109 +520,109 @@ bool testPolicy(
|
|||
}
|
||||
|
||||
bool testPolicy(
|
||||
LocalitySetRef servers,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<LocalitySet> servers,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
bool validate)
|
||||
{
|
||||
return testPolicy(servers, policy, emptyEntryArray, validate);
|
||||
}
|
||||
|
||||
|
||||
std::vector<IRepPolicyRef> const& getStaticPolicies()
|
||||
std::vector<Reference<IReplicationPolicy>> const& getStaticPolicies()
|
||||
{
|
||||
static std::vector<IRepPolicyRef> staticPolicies;
|
||||
static std::vector<Reference<IReplicationPolicy>> staticPolicies;
|
||||
|
||||
if (staticPolicies.empty())
|
||||
{
|
||||
staticPolicies = {
|
||||
|
||||
IRepPolicyRef( new PolicyOne() ),
|
||||
Reference<IReplicationPolicy>( new PolicyOne() ),
|
||||
|
||||
// 1 'dc^2 x 1'
|
||||
IRepPolicyRef( new PolicyAcross(2, "dc", IRepPolicyRef( new PolicyOne() ) ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(2, "dc", Reference<IReplicationPolicy>( new PolicyOne() ) ) ),
|
||||
|
||||
// 2 'dc^3 x 1'
|
||||
IRepPolicyRef( new PolicyAcross(3, "dc", IRepPolicyRef( new PolicyOne() ) ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(3, "dc", Reference<IReplicationPolicy>( new PolicyOne() ) ) ),
|
||||
|
||||
// 3 'sz^3 x 1'
|
||||
IRepPolicyRef( new PolicyAcross(3, "sz", IRepPolicyRef( new PolicyOne() ) ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(3, "sz", Reference<IReplicationPolicy>( new PolicyOne() ) ) ),
|
||||
|
||||
// 4 'dc^1 x az^3 x 1'
|
||||
IRepPolicyRef( new PolicyAcross(1, "dc", IRepPolicyRef( new PolicyAcross(3, "az", IRepPolicyRef( new PolicyOne() ))) ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(1, "dc", Reference<IReplicationPolicy>( new PolicyAcross(3, "az", Reference<IReplicationPolicy>( new PolicyOne() ))) ) ),
|
||||
|
||||
// 5 '(sz^3 x rack^2 x 1) + (dc^2 x az^3 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(3, "sz", IRepPolicyRef(new PolicyAcross(2, "rack", IRepPolicyRef(new PolicyOne() ))))), IRepPolicyRef(new PolicyAcross(2, "dc", IRepPolicyRef(new PolicyAcross(3, "az", IRepPolicyRef(new PolicyOne()) ))) )} ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(3, "sz", Reference<IReplicationPolicy>(new PolicyAcross(2, "rack", Reference<IReplicationPolicy>(new PolicyOne() ))))), Reference<IReplicationPolicy>(new PolicyAcross(2, "dc", Reference<IReplicationPolicy>(new PolicyAcross(3, "az", Reference<IReplicationPolicy>(new PolicyOne()) ))) )} ) ),
|
||||
|
||||
// 6 '(sz^1 x 1)'
|
||||
IRepPolicyRef( new PolicyAcross(1, "sz", IRepPolicyRef(new PolicyOne())) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(1, "sz", Reference<IReplicationPolicy>(new PolicyOne())) ),
|
||||
|
||||
// 7 '(sz^1 x 1) + (sz^1 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(1, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(1, "sz", IRepPolicyRef(new PolicyOne()))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(1, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(1, "sz", Reference<IReplicationPolicy>(new PolicyOne()))) } ) ),
|
||||
|
||||
// 8 '(sz^2 x 1) + (sz^2 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))) } ) ),
|
||||
|
||||
// 9 '(dc^1 x sz^2 x 1)'
|
||||
IRepPolicyRef( new PolicyAcross(1, "dc", IRepPolicyRef( new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))))),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(1, "dc", Reference<IReplicationPolicy>( new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))))),
|
||||
|
||||
//10 '(dc^2 x sz^2 x 1)'
|
||||
IRepPolicyRef( new PolicyAcross(2, "dc", IRepPolicyRef( new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))))),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(2, "dc", Reference<IReplicationPolicy>( new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))))),
|
||||
|
||||
//11 '(dc^1 x sz^2 x 1) + (dc^2 x sz^2 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(1, "dc", IRepPolicyRef( new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))))), IRepPolicyRef(new PolicyAcross(2, "dc", IRepPolicyRef( new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(1, "dc", Reference<IReplicationPolicy>( new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))))), Reference<IReplicationPolicy>(new PolicyAcross(2, "dc", Reference<IReplicationPolicy>( new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))))) } ) ),
|
||||
|
||||
//12 '(dc^2 x sz^2 x 1) + (dc^1 x sz^2 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(2, "dc", IRepPolicyRef( new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))))), IRepPolicyRef(new PolicyAcross(1, "dc", IRepPolicyRef( new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(2, "dc", Reference<IReplicationPolicy>( new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))))), Reference<IReplicationPolicy>(new PolicyAcross(1, "dc", Reference<IReplicationPolicy>( new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))))) } ) ),
|
||||
|
||||
//13 '(sz^2 x 1) + (dc^1 x sz^2 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(1, "dc", IRepPolicyRef( new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(1, "dc", Reference<IReplicationPolicy>( new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))))) } ) ),
|
||||
|
||||
//14 '(sz^2 x 1) + (dc^2 x sz^2 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(2, "dc", IRepPolicyRef( new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(2, "dc", Reference<IReplicationPolicy>( new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))))) } ) ),
|
||||
|
||||
//15 '(sz^3 x 1) + (dc^2 x sz^2 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(3, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(2, "dc", IRepPolicyRef( new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(3, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(2, "dc", Reference<IReplicationPolicy>( new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))))) } ) ),
|
||||
|
||||
//16 '(sz^1 x 1) + (sz^2 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(1, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(1, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))) } ) ),
|
||||
|
||||
//17 '(sz^2 x 1) + (sz^3 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(3, "sz", IRepPolicyRef(new PolicyOne()))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(3, "sz", Reference<IReplicationPolicy>(new PolicyOne()))) } ) ),
|
||||
|
||||
//18 '(sz^1 x 1) + (sz^2 x 1) + (sz^3 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(1, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(3, "sz", IRepPolicyRef(new PolicyOne()))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(1, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(3, "sz", Reference<IReplicationPolicy>(new PolicyOne()))) } ) ),
|
||||
|
||||
//19 '(sz^1 x 1) + (machine^1 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(1, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(1, "zoneid", IRepPolicyRef(new PolicyOne()))) } ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(1, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(1, "zoneid", Reference<IReplicationPolicy>(new PolicyOne()))) } ) ),
|
||||
|
||||
// '(dc^1 x 1) + (sz^1 x 1) + (machine^1 x 1)'
|
||||
// IRepPolicyRef( new PolicyAnd( { IRepPolicyRef(new PolicyAcross(1, "dc", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(1, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(1, "zoneid", IRepPolicyRef(new PolicyOne()))) } ) ),
|
||||
// Reference<IReplicationPolicy>( new PolicyAnd( { Reference<IReplicationPolicy>(new PolicyAcross(1, "dc", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(1, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(1, "zoneid", Reference<IReplicationPolicy>(new PolicyOne()))) } ) ),
|
||||
|
||||
// '(dc^1 x sz^3 x 1)'
|
||||
IRepPolicyRef( new PolicyAcross(1, "dc", IRepPolicyRef( new PolicyAcross(3, "sz", IRepPolicyRef(new PolicyOne())))) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(1, "dc", Reference<IReplicationPolicy>( new PolicyAcross(3, "sz", Reference<IReplicationPolicy>(new PolicyOne())))) ),
|
||||
|
||||
// '(dc^2 x sz^3 x 1)'
|
||||
IRepPolicyRef( new PolicyAcross(2, "dc", IRepPolicyRef( new PolicyAcross(3, "sz", IRepPolicyRef(new PolicyOne())))) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(2, "dc", Reference<IReplicationPolicy>( new PolicyAcross(3, "sz", Reference<IReplicationPolicy>(new PolicyOne())))) ),
|
||||
|
||||
// '(dc^2 x az^3 x 1)'
|
||||
IRepPolicyRef( new PolicyAcross(2, "dc", IRepPolicyRef( new PolicyAcross(3, "az", IRepPolicyRef(new PolicyOne())))) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(2, "dc", Reference<IReplicationPolicy>( new PolicyAcross(3, "az", Reference<IReplicationPolicy>(new PolicyOne())))) ),
|
||||
|
||||
// '(sz^1 x 1) + (dc^2 x az^3 x 1)'
|
||||
IRepPolicyRef( new PolicyAnd({IRepPolicyRef(new PolicyAcross(1, "sz", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(2, "dc", IRepPolicyRef( new PolicyAcross(3, "az", IRepPolicyRef(new PolicyOne())))))}) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAnd({Reference<IReplicationPolicy>(new PolicyAcross(1, "sz", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(2, "dc", Reference<IReplicationPolicy>( new PolicyAcross(3, "az", Reference<IReplicationPolicy>(new PolicyOne())))))}) ),
|
||||
|
||||
// 'dc^1 x (az^2 x 1) + (sz^2 x 1)'
|
||||
// IRepPolicyRef( new PolicyAcross(1, "dc", IRepPolicyRef(new PolicyAnd({IRepPolicyRef(new PolicyAcross(2, "az", IRepPolicyRef(new PolicyOne()))), IRepPolicyRef(new PolicyAcross(2, "sz", IRepPolicyRef(new PolicyOne())))}))) ),
|
||||
// Reference<IReplicationPolicy>( new PolicyAcross(1, "dc", Reference<IReplicationPolicy>(new PolicyAnd({Reference<IReplicationPolicy>(new PolicyAcross(2, "az", Reference<IReplicationPolicy>(new PolicyOne()))), Reference<IReplicationPolicy>(new PolicyAcross(2, "sz", Reference<IReplicationPolicy>(new PolicyOne())))}))) ),
|
||||
|
||||
// Require backtracking
|
||||
IRepPolicyRef( new PolicyAcross(8, "zoneid", IRepPolicyRef(new PolicyAcross(1, "az", IRepPolicyRef(new PolicyOne()))) ) ),
|
||||
IRepPolicyRef( new PolicyAcross(8, "zoneid", IRepPolicyRef(new PolicyAcross(1, "sz", IRepPolicyRef(new PolicyOne()))) ) )
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(8, "zoneid", Reference<IReplicationPolicy>(new PolicyAcross(1, "az", Reference<IReplicationPolicy>(new PolicyOne()))) ) ),
|
||||
Reference<IReplicationPolicy>( new PolicyAcross(8, "zoneid", Reference<IReplicationPolicy>(new PolicyAcross(1, "sz", Reference<IReplicationPolicy>(new PolicyOne()))) ) )
|
||||
};
|
||||
}
|
||||
return staticPolicies;
|
||||
}
|
||||
|
||||
|
||||
IRepPolicyRef const randomAcrossPolicy(LocalitySet const& serverSet)
|
||||
Reference<IReplicationPolicy> const randomAcrossPolicy(LocalitySet const& serverSet)
|
||||
{
|
||||
int usedKeyTotal, keysUsed, keyIndex, valueTotal, maxValueTotal, maxKeyTotal, skips, lastKeyIndex;
|
||||
std::vector<std::string> keyArray(serverSet.getGroupKeyMap()->_lookuparray);
|
||||
|
@ -616,7 +630,7 @@ IRepPolicyRef const randomAcrossPolicy(LocalitySet const& serverSet)
|
|||
AttribKey indexKey;
|
||||
Optional<AttribValue> keyValue;
|
||||
std::string keyText;
|
||||
IRepPolicyRef policy(new PolicyOne());
|
||||
Reference<IReplicationPolicy> policy(new PolicyOne());
|
||||
|
||||
// Determine the number of keys to used within the policy
|
||||
usedKeyTotal = g_random->randomInt(1, keyArray.size()+1);
|
||||
|
@ -669,7 +683,7 @@ IRepPolicyRef const randomAcrossPolicy(LocalitySet const& serverSet)
|
|||
}
|
||||
valueTotal = g_random->randomInt(1, valueSet.size()+2);
|
||||
if ((valueTotal > maxValueTotal) && (g_random->random01() > .25)) valueTotal = maxValueTotal;
|
||||
policy = IRepPolicyRef( new PolicyAcross(valueTotal, keyText, policy) );
|
||||
policy = Reference<IReplicationPolicy>( new PolicyAcross(valueTotal, keyText, policy) );
|
||||
if (g_replicationdebug > 1) {
|
||||
printf(" item%3d: (%3d =>%3d) %-10s =>%4d\n", keysUsed+1, keyIndex, indexKey._id, keyText.c_str(), valueTotal);
|
||||
}
|
||||
|
@ -725,8 +739,8 @@ int testReplication()
|
|||
int policyMin = policyMinEnv ? atoi(policyMinEnv) : 2;
|
||||
int policyIndex, testCounter, alsoSize, debugBackup, maxAlsoSize;
|
||||
std::vector<repTestType> serverIndexes;
|
||||
LocalitySetRef testServers;
|
||||
std::vector<IRepPolicyRef> policies;
|
||||
Reference<LocalitySet> testServers;
|
||||
std::vector<Reference<IReplicationPolicy>> policies;
|
||||
std::vector<LocalityEntry> alsoServers, bestSet;
|
||||
int totalErrors = 0;
|
||||
|
||||
|
@ -819,12 +833,12 @@ void filterLocalityDataForPolicy(const std::set<std::string>& keys, LocalityData
|
|||
}
|
||||
}
|
||||
|
||||
void filterLocalityDataForPolicy(IRepPolicyRef policy, LocalityData* ld) {
|
||||
void filterLocalityDataForPolicy(Reference<IReplicationPolicy> policy, LocalityData* ld) {
|
||||
if (!policy) return;
|
||||
filterLocalityDataForPolicy(policy->attributeKeys(), ld);
|
||||
}
|
||||
|
||||
void filterLocalityDataForPolicy(IRepPolicyRef policy, std::vector<LocalityData>* vld) {
|
||||
void filterLocalityDataForPolicy(Reference<IReplicationPolicy> policy, std::vector<LocalityData>* vld) {
|
||||
if (!policy) return;
|
||||
std::set<std::string> keys = policy->attributeKeys();
|
||||
for (LocalityData& ld : *vld) {
|
||||
|
|
|
@ -34,22 +34,22 @@ extern repTestType convertToTestType(int iValue);
|
|||
extern int testReplication();
|
||||
|
||||
extern double ratePolicy(
|
||||
LocalitySetRef & localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<LocalitySet> & localitySet,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
unsigned int nSelectTests);
|
||||
|
||||
extern bool findBestPolicySet(
|
||||
std::vector<LocalityEntry>& bestResults,
|
||||
LocalitySetRef & localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<LocalitySet> & localitySet,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
unsigned int nMinItems,
|
||||
unsigned int nSelectTests,
|
||||
unsigned int nPolicyTests);
|
||||
|
||||
extern bool findBestUniquePolicySet(
|
||||
std::vector<LocalityEntry>& bestResults,
|
||||
LocalitySetRef & localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<LocalitySet> & localitySet,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
StringRef localityUniquenessKey,
|
||||
unsigned int nMinItems,
|
||||
unsigned int nSelectTests,
|
||||
|
@ -60,20 +60,20 @@ extern bool findBestUniquePolicySet(
|
|||
extern bool validateAllCombinations(
|
||||
std::vector<LocalityData> & offendingCombo,
|
||||
LocalityGroup const& localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<LocalityData> const& newItems,
|
||||
unsigned int nCombinationSize,
|
||||
bool bCheckIfValid = true);
|
||||
|
||||
extern bool validateAllCombinations(
|
||||
LocalityGroup const& localitySet,
|
||||
IRepPolicyRef const& policy,
|
||||
Reference<IReplicationPolicy> const& policy,
|
||||
std::vector<LocalityData> const& newItems,
|
||||
unsigned int nCombinationSize,
|
||||
bool bCheckIfValid = true);
|
||||
|
||||
/// Remove all pieces of locality information from the LocalityData that will not be used when validating the policy.
|
||||
void filterLocalityDataForPolicy(IRepPolicyRef policy, LocalityData* ld);
|
||||
void filterLocalityDataForPolicy(IRepPolicyRef policy, std::vector<LocalityData>* vld);
|
||||
void filterLocalityDataForPolicy(Reference<IReplicationPolicy> policy, LocalityData* ld);
|
||||
void filterLocalityDataForPolicy(Reference<IReplicationPolicy> policy, std::vector<LocalityData>* vld);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -280,11 +280,11 @@ public:
|
|||
std::set<NetworkAddress> protectedAddresses;
|
||||
std::map<NetworkAddress, ProcessInfo*> currentlyRebootingProcesses;
|
||||
class ClusterConnectionString* extraDB;
|
||||
IRepPolicyRef storagePolicy;
|
||||
IRepPolicyRef tLogPolicy;
|
||||
Reference<IReplicationPolicy> storagePolicy;
|
||||
Reference<IReplicationPolicy> tLogPolicy;
|
||||
int32_t tLogWriteAntiQuorum;
|
||||
Optional<Standalone<StringRef>> primaryDcId;
|
||||
IRepPolicyRef remoteTLogPolicy;
|
||||
Reference<IReplicationPolicy> remoteTLogPolicy;
|
||||
int32_t usableRegions;
|
||||
std::string disablePrimary;
|
||||
std::string disableRemote;
|
||||
|
@ -292,8 +292,8 @@ public:
|
|||
bool allowLogSetKills;
|
||||
Optional<Standalone<StringRef>> remoteDcId;
|
||||
bool hasSatelliteReplication;
|
||||
IRepPolicyRef satelliteTLogPolicy;
|
||||
IRepPolicyRef satelliteTLogPolicyFallback;
|
||||
Reference<IReplicationPolicy> satelliteTLogPolicy;
|
||||
Reference<IReplicationPolicy> satelliteTLogPolicyFallback;
|
||||
int32_t satelliteTLogWriteAntiQuorum;
|
||||
int32_t satelliteTLogWriteAntiQuorumFallback;
|
||||
std::vector<Optional<Standalone<StringRef>>> primarySatelliteDcIds;
|
||||
|
|
|
@ -91,7 +91,7 @@ public:
|
|||
ProcessIssuesMap clientsWithIssues, workersWithIssues;
|
||||
std::map<NetworkAddress, double> incompatibleConnections;
|
||||
ClientVersionMap clientVersionMap;
|
||||
std::map<NetworkAddress, std::string> traceLogGroupMap;
|
||||
std::map<NetworkAddress, ClientStatusInfo> clientStatusInfoMap;
|
||||
AsyncTrigger forceMasterFailure;
|
||||
int64_t masterRegistrationCount;
|
||||
bool recoveryStalled;
|
||||
|
@ -234,10 +234,10 @@ public:
|
|||
throw no_more_servers();
|
||||
}
|
||||
|
||||
std::vector<WorkerDetails> getWorkersForSeedServers( DatabaseConfiguration const& conf, IRepPolicyRef const& policy, Optional<Optional<Standalone<StringRef>>> const& dcId = Optional<Optional<Standalone<StringRef>>>() ) {
|
||||
std::vector<WorkerDetails> getWorkersForSeedServers( DatabaseConfiguration const& conf, Reference<IReplicationPolicy> const& policy, Optional<Optional<Standalone<StringRef>>> const& dcId = Optional<Optional<Standalone<StringRef>>>() ) {
|
||||
std::map<ProcessClass::Fitness, vector<WorkerDetails>> fitness_workers;
|
||||
std::vector<WorkerDetails> results;
|
||||
LocalitySetRef logServerSet = Reference<LocalitySet>(new LocalityMap<WorkerDetails>());
|
||||
Reference<LocalitySet> logServerSet = Reference<LocalitySet>(new LocalityMap<WorkerDetails>());
|
||||
LocalityMap<WorkerDetails>* logServerMap = (LocalityMap<WorkerDetails>*) logServerSet.getPtr();
|
||||
bool bCompleted = false;
|
||||
|
||||
|
@ -275,11 +275,11 @@ public:
|
|||
return results;
|
||||
}
|
||||
|
||||
std::vector<WorkerDetails> getWorkersForTlogs( DatabaseConfiguration const& conf, int32_t required, int32_t desired, IRepPolicyRef const& policy, std::map< Optional<Standalone<StringRef>>, int>& id_used, bool checkStable = false, std::set<Optional<Key>> dcIds = std::set<Optional<Key>>() ) {
|
||||
std::vector<WorkerDetails> getWorkersForTlogs( DatabaseConfiguration const& conf, int32_t required, int32_t desired, Reference<IReplicationPolicy> const& policy, std::map< Optional<Standalone<StringRef>>, int>& id_used, bool checkStable = false, std::set<Optional<Key>> dcIds = std::set<Optional<Key>>() ) {
|
||||
std::map<std::pair<ProcessClass::Fitness,bool>, vector<WorkerDetails>> fitness_workers;
|
||||
std::vector<WorkerDetails> results;
|
||||
std::vector<LocalityData> unavailableLocals;
|
||||
LocalitySetRef logServerSet;
|
||||
Reference<LocalitySet> logServerSet;
|
||||
LocalityMap<WorkerDetails>* logServerMap;
|
||||
bool bCompleted = false;
|
||||
|
||||
|
@ -1253,6 +1253,7 @@ ACTOR Future<Void> clusterOpenDatabase(
|
|||
UID knownClientInfoID,
|
||||
std::string issues,
|
||||
Standalone<VectorRef<ClientVersionRef>> supportedVersions,
|
||||
int connectedCoordinatorsNum,
|
||||
Standalone<StringRef> traceLogGroup,
|
||||
ReplyPromise<ClientDBInfo> reply)
|
||||
{
|
||||
|
@ -1264,7 +1265,8 @@ ACTOR Future<Void> clusterOpenDatabase(
|
|||
db->clientVersionMap[reply.getEndpoint().getPrimaryAddress()] = supportedVersions;
|
||||
}
|
||||
|
||||
db->traceLogGroupMap[reply.getEndpoint().getPrimaryAddress()] = traceLogGroup.toString();
|
||||
|
||||
db->clientStatusInfoMap[reply.getEndpoint().getPrimaryAddress()] = ClientStatusInfo(traceLogGroup.toString(), connectedCoordinatorsNum);
|
||||
|
||||
while (db->clientInfo->get().id == knownClientInfoID) {
|
||||
choose {
|
||||
|
@ -1275,7 +1277,7 @@ ACTOR Future<Void> clusterOpenDatabase(
|
|||
|
||||
removeIssue( db->clientsWithIssues, reply.getEndpoint().getPrimaryAddress(), issues, issueID );
|
||||
db->clientVersionMap.erase(reply.getEndpoint().getPrimaryAddress());
|
||||
db->traceLogGroupMap.erase(reply.getEndpoint().getPrimaryAddress());
|
||||
db->clientStatusInfoMap.erase(reply.getEndpoint().getPrimaryAddress());
|
||||
|
||||
reply.send( db->clientInfo->get() );
|
||||
return Void();
|
||||
|
@ -1945,7 +1947,8 @@ ACTOR Future<Void> statusServer(FutureStream< StatusRequest> requests,
|
|||
}
|
||||
}
|
||||
|
||||
state ErrorOr<StatusReply> result = wait(errorOr(clusterGetStatus(self->db.serverInfo, self->cx, workers, self->db.workersWithIssues, self->db.clientsWithIssues, self->db.clientVersionMap, self->db.traceLogGroupMap, coordinators, incompatibleConnections, self->datacenterVersionDifference)));
|
||||
state ErrorOr<StatusReply> result = wait(errorOr(clusterGetStatus(self->db.serverInfo, self->cx, workers, self->db.workersWithIssues, self->db.clientsWithIssues, self->db.clientVersionMap, self->db.clientStatusInfoMap, coordinators, incompatibleConnections, self->datacenterVersionDifference)));
|
||||
|
||||
if (result.isError() && result.getError().code() == error_code_actor_cancelled)
|
||||
throw result.getError();
|
||||
|
||||
|
@ -2541,7 +2544,7 @@ ACTOR Future<Void> clusterControllerCore( ClusterControllerFullInterface interf,
|
|||
return Void();
|
||||
}
|
||||
when( OpenDatabaseRequest req = waitNext( interf.clientInterface.openDatabase.getFuture() ) ) {
|
||||
self.addActor.send( clusterOpenDatabase( &self.db, req.knownClientInfoID, req.issues.toString(), req.supportedVersions, req.traceLogGroup, req.reply ) );
|
||||
self.addActor.send( clusterOpenDatabase( &self.db, req.knownClientInfoID, req.issues.toString(), req.supportedVersions, req.connectedCoordinatorsNum, req.traceLogGroup, req.reply ) );
|
||||
}
|
||||
when( RecruitFromConfigurationRequest req = waitNext( interf.recruitFromConfiguration.getFuture() ) ) {
|
||||
self.addActor.send( clusterRecruitFromConfiguration( &self, req ) );
|
||||
|
|
|
@ -41,7 +41,7 @@ struct CoreTLogSet {
|
|||
int32_t tLogWriteAntiQuorum; // The write anti quorum previously used to write to tLogs, which might be different from the anti quorum suggested by the current configuration going forward!
|
||||
int32_t tLogReplicationFactor; // The replication factor previously used to write to tLogs, which might be different from the current configuration
|
||||
std::vector< LocalityData > tLogLocalities; // Stores the localities of the log servers
|
||||
IRepPolicyRef tLogPolicy;
|
||||
Reference<IReplicationPolicy> tLogPolicy;
|
||||
bool isLocal;
|
||||
int8_t locality;
|
||||
Version startVersion;
|
||||
|
|
|
@ -3708,7 +3708,7 @@ ACTOR Future<Void> dataDistributor(DataDistributorInterface di, Reference<AsyncV
|
|||
return Void();
|
||||
}
|
||||
|
||||
DDTeamCollection* testTeamCollection(int teamSize, IRepPolicyRef policy, int processCount) {
|
||||
DDTeamCollection* testTeamCollection(int teamSize, Reference<IReplicationPolicy> policy, int processCount) {
|
||||
Database database = DatabaseContext::create(
|
||||
Reference<AsyncVar<ClientDBInfo>>(new AsyncVar<ClientDBInfo>()),
|
||||
Never(),
|
||||
|
@ -3750,7 +3750,7 @@ DDTeamCollection* testTeamCollection(int teamSize, IRepPolicyRef policy, int pro
|
|||
return collection;
|
||||
}
|
||||
|
||||
DDTeamCollection* testMachineTeamCollection(int teamSize, IRepPolicyRef policy, int processCount) {
|
||||
DDTeamCollection* testMachineTeamCollection(int teamSize, Reference<IReplicationPolicy> policy, int processCount) {
|
||||
Database database = DatabaseContext::create(Reference<AsyncVar<ClientDBInfo>>(new AsyncVar<ClientDBInfo>()),
|
||||
Never(), LocalityData(), false);
|
||||
|
||||
|
@ -3802,7 +3802,7 @@ TEST_CASE("DataDistribution/AddTeamsBestOf/UseMachineID") {
|
|||
int desiredTeams = SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER * processSize;
|
||||
int maxTeams = SERVER_KNOBS->MAX_TEAMS_PER_SERVER * processSize;
|
||||
|
||||
IRepPolicyRef policy = IRepPolicyRef(new PolicyAcross(teamSize, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(teamSize, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
state DDTeamCollection* collection = testMachineTeamCollection(teamSize, policy, processSize);
|
||||
|
||||
int result = collection->addTeamsBestOf(30, desiredTeams, maxTeams);
|
||||
|
@ -3822,7 +3822,7 @@ TEST_CASE("DataDistribution/AddTeamsBestOf/NotUseMachineID") {
|
|||
int desiredTeams = SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER * processSize;
|
||||
int maxTeams = SERVER_KNOBS->MAX_TEAMS_PER_SERVER * processSize;
|
||||
|
||||
IRepPolicyRef policy = IRepPolicyRef(new PolicyAcross(teamSize, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(teamSize, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
state DDTeamCollection* collection = testMachineTeamCollection(teamSize, policy, processSize);
|
||||
|
||||
if (collection == NULL) {
|
||||
|
@ -3840,7 +3840,7 @@ TEST_CASE("DataDistribution/AddTeamsBestOf/NotUseMachineID") {
|
|||
}
|
||||
|
||||
TEST_CASE("DataDistribution/AddAllTeams/isExhaustive") {
|
||||
IRepPolicyRef policy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(3, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
state int processSize = 10;
|
||||
state int desiredTeams = SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER * processSize;
|
||||
state int maxTeams = SERVER_KNOBS->MAX_TEAMS_PER_SERVER * processSize;
|
||||
|
@ -3859,7 +3859,7 @@ TEST_CASE("DataDistribution/AddAllTeams/isExhaustive") {
|
|||
}
|
||||
|
||||
TEST_CASE("/DataDistribution/AddAllTeams/withLimit") {
|
||||
IRepPolicyRef policy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(3, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
state int processSize = 10;
|
||||
state int desiredTeams = SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER * processSize;
|
||||
state int maxTeams = SERVER_KNOBS->MAX_TEAMS_PER_SERVER * processSize;
|
||||
|
@ -3877,7 +3877,7 @@ TEST_CASE("/DataDistribution/AddAllTeams/withLimit") {
|
|||
|
||||
TEST_CASE("/DataDistribution/AddTeamsBestOf/SkippingBusyServers") {
|
||||
wait(Future<Void>(Void()));
|
||||
IRepPolicyRef policy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(3, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
state int processSize = 10;
|
||||
state int desiredTeams = SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER * processSize;
|
||||
state int maxTeams = SERVER_KNOBS->MAX_TEAMS_PER_SERVER * processSize;
|
||||
|
@ -3907,7 +3907,7 @@ TEST_CASE("/DataDistribution/AddTeamsBestOf/SkippingBusyServers") {
|
|||
TEST_CASE("/DataDistribution/AddTeamsBestOf/NotEnoughServers") {
|
||||
wait(Future<Void>(Void()));
|
||||
|
||||
IRepPolicyRef policy = IRepPolicyRef(new PolicyAcross(3, "zoneid", IRepPolicyRef(new PolicyOne())));
|
||||
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(3, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
|
||||
state int processSize = 5;
|
||||
state int desiredTeams = SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER * processSize;
|
||||
state int maxTeams = SERVER_KNOBS->MAX_TEAMS_PER_SERVER * processSize;
|
||||
|
|
|
@ -382,6 +382,7 @@ public:
|
|||
int64_t TIME_KEEPER_DELAY;
|
||||
int64_t TIME_KEEPER_MAX_ENTRIES;
|
||||
|
||||
|
||||
ServerKnobs(bool randomize = false, ClientKnobs* clientKnobs = NULL);
|
||||
};
|
||||
|
||||
|
|
|
@ -40,8 +40,8 @@ public:
|
|||
int32_t tLogReplicationFactor;
|
||||
std::vector< LocalityData > tLogLocalities; // Stores the localities of the log servers
|
||||
TLogVersion tLogVersion;
|
||||
IRepPolicyRef tLogPolicy;
|
||||
LocalitySetRef logServerSet;
|
||||
Reference<IReplicationPolicy> tLogPolicy;
|
||||
Reference<LocalitySet> logServerSet;
|
||||
std::vector<int> logIndexArray;
|
||||
std::vector<LocalityEntry> logEntryArray;
|
||||
bool isLocal;
|
||||
|
@ -84,7 +84,7 @@ public:
|
|||
used_servers.insert(std::make_pair(0,i));
|
||||
}
|
||||
|
||||
LocalitySetRef serverSet = Reference<LocalitySet>(new LocalityMap<std::pair<int,int>>());
|
||||
Reference<LocalitySet> serverSet = Reference<LocalitySet>(new LocalityMap<std::pair<int,int>>());
|
||||
LocalityMap<std::pair<int,int>>* serverMap = (LocalityMap<std::pair<int,int>>*) serverSet.getPtr();
|
||||
std::vector<std::pair<int,int>> resultPairs;
|
||||
for(int loc = 0; loc < satelliteTagLocations.size(); loc++) {
|
||||
|
@ -189,7 +189,7 @@ public:
|
|||
void updateLocalitySet( vector<LocalityData> const& localities ) {
|
||||
LocalityMap<int>* logServerMap;
|
||||
|
||||
logServerSet = LocalitySetRef(new LocalityMap<int>());
|
||||
logServerSet = Reference<LocalitySet>(new LocalityMap<int>());
|
||||
logServerMap = (LocalityMap<int>*) logServerSet.getPtr();
|
||||
|
||||
logEntryArray.clear();
|
||||
|
@ -412,7 +412,7 @@ struct ILogSystem {
|
|||
int tLogReplicationFactor;
|
||||
|
||||
MergedPeekCursor( vector< Reference<ILogSystem::IPeekCursor> > const& serverCursors, Version begin );
|
||||
MergedPeekCursor( std::vector<Reference<AsyncVar<OptionalInterface<TLogInterface>>>> const& logServers, int bestServer, int readQuorum, Tag tag, Version begin, Version end, bool parallelGetMore, std::vector<LocalityData> const& tLogLocalities, IRepPolicyRef const tLogPolicy, int tLogReplicationFactor );
|
||||
MergedPeekCursor( std::vector<Reference<AsyncVar<OptionalInterface<TLogInterface>>>> const& logServers, int bestServer, int readQuorum, Tag tag, Version begin, Version end, bool parallelGetMore, std::vector<LocalityData> const& tLogLocalities, Reference<IReplicationPolicy> const tLogPolicy, int tLogReplicationFactor );
|
||||
MergedPeekCursor( vector< Reference<IPeekCursor> > const& serverCursors, LogMessageVersion const& messageVersion, int bestServer, int readQuorum, Optional<LogMessageVersion> nextVersion, Reference<LogSet> logSet, int tLogReplicationFactor );
|
||||
|
||||
virtual Reference<IPeekCursor> cloneNoMore();
|
||||
|
|
|
@ -61,7 +61,7 @@ struct TLogSet {
|
|||
int32_t tLogWriteAntiQuorum, tLogReplicationFactor;
|
||||
std::vector< LocalityData > tLogLocalities; // Stores the localities of the log servers
|
||||
TLogVersion tLogVersion;
|
||||
IRepPolicyRef tLogPolicy;
|
||||
Reference<IReplicationPolicy> tLogPolicy;
|
||||
bool isLocal;
|
||||
int8_t locality;
|
||||
Version startVersion;
|
||||
|
|
|
@ -273,7 +273,7 @@ ILogSystem::MergedPeekCursor::MergedPeekCursor( vector< Reference<ILogSystem::IP
|
|||
}
|
||||
|
||||
ILogSystem::MergedPeekCursor::MergedPeekCursor( std::vector<Reference<AsyncVar<OptionalInterface<TLogInterface>>>> const& logServers, int bestServer, int readQuorum, Tag tag, Version begin, Version end,
|
||||
bool parallelGetMore, std::vector< LocalityData > const& tLogLocalities, IRepPolicyRef const tLogPolicy, int tLogReplicationFactor )
|
||||
bool parallelGetMore, std::vector< LocalityData > const& tLogLocalities, Reference<IReplicationPolicy> const tLogPolicy, int tLogReplicationFactor )
|
||||
: bestServer(bestServer), readQuorum(readQuorum), tag(tag), currentCursor(0), hasNextMessage(false), messageVersion(begin), randomID(g_random->randomUniqueID()), tLogReplicationFactor(tLogReplicationFactor) {
|
||||
if(tLogPolicy) {
|
||||
logSet = Reference<LogSet>( new LogSet() );
|
||||
|
|
|
@ -810,7 +810,8 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
|||
return processMap;
|
||||
}
|
||||
|
||||
static JsonBuilderObject clientStatusFetcher(ClientVersionMap clientVersionMap, std::map<NetworkAddress, std::string> traceLogGroupMap) {
|
||||
static JsonBuilderObject clientStatusFetcher(ClientVersionMap clientVersionMap,
|
||||
std::map<NetworkAddress, ClientStatusInfo> clientStatusInfoMap) {
|
||||
JsonBuilderObject clientStatus;
|
||||
|
||||
clientStatus["count"] = (int64_t)clientVersionMap.size();
|
||||
|
@ -834,7 +835,9 @@ static JsonBuilderObject clientStatusFetcher(ClientVersionMap clientVersionMap,
|
|||
for(auto client : cv.second) {
|
||||
JsonBuilderObject cli;
|
||||
cli["address"] = client.toString();
|
||||
cli["log_group"] = traceLogGroupMap[client];
|
||||
ASSERT(clientStatusInfoMap.find(client) != clientStatusInfoMap.end());
|
||||
cli["log_group"] = clientStatusInfoMap[client].traceLogGroup;
|
||||
cli["connected_coordinators"] = (int) clientStatusInfoMap[client].connectedCoordinatorsNum;
|
||||
clients.push_back(cli);
|
||||
}
|
||||
|
||||
|
@ -1806,7 +1809,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
ProcessIssuesMap workerIssues,
|
||||
ProcessIssuesMap clientIssues,
|
||||
ClientVersionMap clientVersionMap,
|
||||
std::map<NetworkAddress, std::string> traceLogGroupMap,
|
||||
std::map<NetworkAddress, ClientStatusInfo> clientStatusInfoMap,
|
||||
ServerCoordinators coordinators,
|
||||
std::vector<NetworkAddress> incompatibleConnections,
|
||||
Version datacenterVersionDifference )
|
||||
|
@ -2039,7 +2042,7 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
|
||||
JsonBuilderObject processStatus = wait(processStatusFetcher(db, workers, pMetrics, mMetrics, latestError, traceFileOpenErrors, programStarts, processIssues, storageServers, tLogs, proxies, cx, configuration, &status_incomplete_reasons));
|
||||
statusObj["processes"] = processStatus;
|
||||
statusObj["clients"] = clientStatusFetcher(clientVersionMap, traceLogGroupMap);
|
||||
statusObj["clients"] = clientStatusFetcher(clientVersionMap, clientStatusInfoMap);
|
||||
|
||||
JsonBuilderArray incompatibleConnectionsArray;
|
||||
for(auto it : incompatibleConnections) {
|
||||
|
|
|
@ -30,8 +30,16 @@
|
|||
typedef std::map< NetworkAddress, std::pair<std::string,UID> > ProcessIssuesMap;
|
||||
typedef std::map< NetworkAddress, Standalone<VectorRef<ClientVersionRef>> > ClientVersionMap;
|
||||
|
||||
struct ClientStatusInfo {
|
||||
std::string traceLogGroup;
|
||||
int connectedCoordinatorsNum;
|
||||
|
||||
ClientStatusInfo() : connectedCoordinatorsNum(0) {}
|
||||
ClientStatusInfo(std::string const& traceLogGroup, int const connectedCoordinatorsNum) : traceLogGroup(traceLogGroup), connectedCoordinatorsNum(connectedCoordinatorsNum) {}
|
||||
};
|
||||
|
||||
Future<StatusReply> clusterGetStatus( Reference<AsyncVar<struct ServerDBInfo>> const& db, Database const& cx, vector<WorkerDetails> const& workers,
|
||||
ProcessIssuesMap const& workerIssues, ProcessIssuesMap const& clientIssues, ClientVersionMap const& clientVersionMap, std::map<NetworkAddress, std::string> const& traceLogGroupMap,
|
||||
ProcessIssuesMap const& workerIssues, ProcessIssuesMap const& clientIssues, ClientVersionMap const& clientVersionMap, std::map<NetworkAddress, struct ClientStatusInfo> const& clientStatusInfoMap,
|
||||
ServerCoordinators const& coordinators, std::vector<NetworkAddress> const& incompatibleConnections, Version const& datacenterVersionDifference );
|
||||
|
||||
#endif
|
||||
|
|
|
@ -531,12 +531,12 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
}
|
||||
if(begin >= lastBegin) {
|
||||
TraceEvent("TLogPeekRemoteBestOnly", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("BestSet", bestSet).detail("BestSetStart", lastBegin).detail("LogRouterIds", tLogs[bestSet]->logRouterString());
|
||||
return Reference<ILogSystem::MergedPeekCursor>( new ILogSystem::MergedPeekCursor( tLogs[bestSet]->logRouters, -1, (int)tLogs[bestSet]->logRouters.size(), tag, begin, getPeekEnd(), false, std::vector<LocalityData>(), IRepPolicyRef(), 0 ) );
|
||||
return Reference<ILogSystem::MergedPeekCursor>( new ILogSystem::MergedPeekCursor( tLogs[bestSet]->logRouters, -1, (int)tLogs[bestSet]->logRouters.size(), tag, begin, getPeekEnd(), false, std::vector<LocalityData>(), Reference<IReplicationPolicy>(), 0 ) );
|
||||
} else {
|
||||
std::vector< Reference<ILogSystem::IPeekCursor> > cursors;
|
||||
std::vector< LogMessageVersion > epochEnds;
|
||||
TraceEvent("TLogPeekRemoteAddingBest", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("BestSet", bestSet).detail("BestSetStart", lastBegin).detail("LogRouterIds", tLogs[bestSet]->logRouterString());
|
||||
cursors.push_back( Reference<ILogSystem::MergedPeekCursor>( new ILogSystem::MergedPeekCursor( tLogs[bestSet]->logRouters, -1, (int)tLogs[bestSet]->logRouters.size(), tag, lastBegin, getPeekEnd(), false, std::vector<LocalityData>(), IRepPolicyRef(), 0 ) ) );
|
||||
cursors.push_back( Reference<ILogSystem::MergedPeekCursor>( new ILogSystem::MergedPeekCursor( tLogs[bestSet]->logRouters, -1, (int)tLogs[bestSet]->logRouters.size(), tag, lastBegin, getPeekEnd(), false, std::vector<LocalityData>(), Reference<IReplicationPolicy>(), 0 ) ) );
|
||||
int i = 0;
|
||||
while(begin < lastBegin) {
|
||||
if(i == oldLogData.size()) {
|
||||
|
@ -565,7 +565,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
TraceEvent("TLogPeekRemoteAddingOldBest", dbgid).detail("Tag", tag.toString()).detail("Begin", begin).detail("BestOldSet", bestOldSet).detail("LogRouterIds", oldLogData[i].tLogs[bestOldSet]->logRouterString())
|
||||
.detail("LastBegin", lastBegin).detail("ThisBegin", thisBegin).detail("BestStartVer", oldLogData[i].tLogs[bestOldSet]->startVersion);
|
||||
cursors.push_back( Reference<ILogSystem::MergedPeekCursor>( new ILogSystem::MergedPeekCursor( oldLogData[i].tLogs[bestOldSet]->logRouters, -1, (int)oldLogData[i].tLogs[bestOldSet]->logRouters.size(), tag,
|
||||
thisBegin, lastBegin, false, std::vector<LocalityData>(), IRepPolicyRef(), 0 ) ) );
|
||||
thisBegin, lastBegin, false, std::vector<LocalityData>(), Reference<IReplicationPolicy>(), 0 ) ) );
|
||||
epochEnds.push_back(LogMessageVersion(lastBegin));
|
||||
lastBegin = thisBegin;
|
||||
}
|
||||
|
@ -959,24 +959,17 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
|
||||
wait( quorum( alive, std::min(logSet->tLogReplicationFactor, numPresent - logSet->tLogWriteAntiQuorum) ) );
|
||||
|
||||
state Reference<LocalityGroup> locked(new LocalityGroup());
|
||||
state std::vector<bool> responded(alive.size());
|
||||
for (int i = 0; i < alive.size(); i++) {
|
||||
responded[i] = false;
|
||||
}
|
||||
state std::vector<LocalityEntry> aliveEntries;
|
||||
state std::vector<bool> responded(alive.size(), false);
|
||||
loop {
|
||||
for (int i = 0; i < alive.size(); i++) {
|
||||
if (!responded[i] && alive[i].isReady() && !alive[i].isError()) {
|
||||
locked->add(logSet->tLogLocalities[i]);
|
||||
aliveEntries.push_back(logSet->logEntryArray[i]);
|
||||
responded[i] = true;
|
||||
}
|
||||
}
|
||||
bool quorum_obtained = locked->validate(logSet->tLogPolicy);
|
||||
// We intentionally skip considering antiquorums, as the CPU cost of doing so is prohibitive.
|
||||
if (logSet->tLogReplicationFactor == 1 && locked->size() > 0) {
|
||||
ASSERT(quorum_obtained);
|
||||
}
|
||||
if (quorum_obtained) {
|
||||
|
||||
if (logSet->satisfiesPolicy(aliveEntries)) {
|
||||
return Void();
|
||||
}
|
||||
|
||||
|
@ -1566,7 +1559,7 @@ struct TagPartitionedLogSystem : ILogSystem, ReferenceCounted<TagPartitionedLogS
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> recruitOldLogRouters( TagPartitionedLogSystem* self, vector<WorkerInterface> workers, LogEpoch recoveryCount, int8_t locality, Version startVersion,
|
||||
std::vector<LocalityData> tLogLocalities, IRepPolicyRef tLogPolicy, bool forRemote ) {
|
||||
std::vector<LocalityData> tLogLocalities, Reference<IReplicationPolicy> tLogPolicy, bool forRemote ) {
|
||||
state vector<vector<Future<TLogInterface>>> logRouterInitializationReplies;
|
||||
state vector<Future<TLogInterface>> allReplies;
|
||||
int nextRouter = 0;
|
||||
|
|
|
@ -120,7 +120,7 @@ struct InitializeLogRouterRequest {
|
|||
Tag routerTag;
|
||||
Version startVersion;
|
||||
std::vector<LocalityData> tLogLocalities;
|
||||
IRepPolicyRef tLogPolicy;
|
||||
Reference<IReplicationPolicy> tLogPolicy;
|
||||
int8_t locality;
|
||||
ReplyPromise<struct TLogInterface> reply;
|
||||
|
||||
|
|
|
@ -192,6 +192,8 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
|||
loop {
|
||||
std::string status = wait(agent.getStatus(cx, true, tag));
|
||||
puts(status.c_str());
|
||||
std::string statusJSON = wait(agent.getStatusJSON(cx, tag));
|
||||
puts(statusJSON.c_str());
|
||||
wait(delay(2.0));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -625,7 +625,7 @@ struct SendBuffer {
|
|||
|
||||
struct PacketBuffer : SendBuffer, FastAllocated<PacketBuffer> {
|
||||
int reference_count;
|
||||
enum { DATA_SIZE = 4096 - 28 };
|
||||
enum { DATA_SIZE = 4096 - 28 }; //28 is the size of the PacketBuffer fields
|
||||
uint8_t data[ DATA_SIZE ];
|
||||
|
||||
PacketBuffer() : reference_count(1) {
|
||||
|
|
Loading…
Reference in New Issue