Merge branch 'release-6.0' of github.com:apple/foundationdb into fix-fdbmonitor-kill-behavior
This commit is contained in:
commit
30b16917ff
|
@ -58,14 +58,17 @@ class Result:
|
|||
self.key_tuple = subspace.unpack(key)
|
||||
self.values = values
|
||||
|
||||
def matches(self, rhs, specification):
|
||||
def matches_key(self, rhs, specification):
|
||||
if not isinstance(rhs, Result):
|
||||
return False
|
||||
|
||||
left_key = self.key_tuple[specification.key_start_index:]
|
||||
right_key = self.key_tuple[specification.key_start_index:]
|
||||
right_key = rhs.key_tuple[specification.key_start_index:]
|
||||
|
||||
if len(left_key) != len(right_key) or left_key != right_key:
|
||||
return left_key == right_key
|
||||
|
||||
def matches(self, rhs, specification):
|
||||
if not self.matches_key(rhs, specification):
|
||||
return False
|
||||
|
||||
for value in self.values:
|
||||
|
|
|
@ -90,7 +90,7 @@ class ResultSet(object):
|
|||
if any([s is not None for s in sequence_nums]):
|
||||
results = {i: r for i, r in results.items() if r.sequence_num(self.specification) == min(sequence_nums)}
|
||||
else:
|
||||
results = {i: r for i, r in results.items() if r.matches(min(results.values()), self.specification)}
|
||||
results = {i: r for i, r in results.items() if r.matches_key(min(results.values()), self.specification)}
|
||||
|
||||
for i in results.keys():
|
||||
indices[i] += 1
|
||||
|
|
|
@ -181,8 +181,8 @@ class InstructionSet(TestInstructions, list):
|
|||
tr[subspace.pack((start + i,))] = instruction.to_value()
|
||||
|
||||
def insert_operations(self, db, subspace):
|
||||
for i in range(0, int(math.ceil(len(self) / 1000.0))):
|
||||
self._insert_operations_transactional(db, subspace, i * 1000, 1000)
|
||||
for i in range(0, int(math.ceil(len(self) / 5000.0))):
|
||||
self._insert_operations_transactional(db, subspace, i * 5000, 5000)
|
||||
|
||||
|
||||
class ThreadedInstructionSet(TestInstructions):
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
#
|
||||
# tuple.py
|
||||
#
|
||||
# This source file is part of the FoundationDB open source project
|
||||
#
|
||||
# Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
import random
|
||||
import struct
|
||||
|
||||
import fdb
|
||||
import fdb.tuple
|
||||
|
||||
from bindingtester import FDB_API_VERSION
|
||||
from bindingtester import util
|
||||
from bindingtester.tests import Test, Instruction, InstructionSet, ResultSpecification
|
||||
from bindingtester.tests import test_util
|
||||
|
||||
fdb.api_version(FDB_API_VERSION)
|
||||
|
||||
class TupleTest(Test):
|
||||
def __init__(self, subspace):
|
||||
super(TupleTest, self).__init__(subspace)
|
||||
self.workspace = self.subspace['workspace'] # The keys and values here must match between subsequent runs of the same test
|
||||
self.stack_subspace = self.subspace['stack']
|
||||
|
||||
def setup(self, args):
|
||||
self.max_int_bits = args.max_int_bits
|
||||
self.api_version = args.api_version
|
||||
|
||||
def generate(self, args, thread_number):
|
||||
instructions = InstructionSet()
|
||||
|
||||
min_value = -2**self.max_int_bits+1
|
||||
max_value = 2**self.max_int_bits-1
|
||||
|
||||
instructions.append('NEW_TRANSACTION')
|
||||
|
||||
# Test integer encoding
|
||||
mutations = 0
|
||||
for i in range(0, self.max_int_bits+1):
|
||||
for sign in [-1, 1]:
|
||||
sign_str = '' if sign == 1 else '-'
|
||||
for offset in range(-10, 11):
|
||||
val = (2**i) * sign + offset
|
||||
if val >= min_value and val <= max_value:
|
||||
if offset == 0:
|
||||
add_str = ''
|
||||
elif offset > 0:
|
||||
add_str = '+%d' % offset
|
||||
else:
|
||||
add_str = '%d' % offset
|
||||
|
||||
instructions.push_args(1, val)
|
||||
instructions.append('TUPLE_PACK')
|
||||
instructions.push_args(self.workspace.pack(('%s2^%d%s' % (sign_str, i, add_str),)))
|
||||
instructions.append('SET')
|
||||
mutations += 1
|
||||
|
||||
if mutations >= 5000:
|
||||
test_util.blocking_commit(instructions)
|
||||
mutations = 0
|
||||
|
||||
instructions.begin_finalization()
|
||||
|
||||
test_util.blocking_commit(instructions)
|
||||
instructions.push_args(self.stack_subspace.key())
|
||||
instructions.append('LOG_STACK')
|
||||
|
||||
test_util.blocking_commit(instructions)
|
||||
|
||||
return instructions
|
||||
|
||||
def get_result_specifications(self):
|
||||
return [
|
||||
ResultSpecification(self.workspace, global_error_filter=[1007, 1021]),
|
||||
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1, global_error_filter=[1007, 1021]),
|
||||
]
|
|
@ -7,7 +7,7 @@ This package requires:
|
|||
|
||||
- Go 1.1+ with CGO enabled
|
||||
- [Mono](http://www.mono-project.com/) (macOS or Windows) or [Visual Studio](https://www.visualstudio.com/) (Windows) (build-time only)
|
||||
- FoundationDB C API 2.0.x, 3.0.x, or 4.x.y (part of the [FoundationDB clients package](https://apple.github.io/foundationdb/downloads.html#c))
|
||||
- FoundationDB C API 2.0.x-6.0.x (part of the [FoundationDB clients package](https://apple.github.io/foundationdb/downloads.html#c))
|
||||
|
||||
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-600.
|
||||
|
||||
|
|
|
@ -87,6 +87,13 @@ func fdb_future_block_until_ready(f *C.FDBFuture) {
|
|||
return
|
||||
}
|
||||
|
||||
// The mutex here is used as a signal that the callback is complete.
|
||||
// We first lock it, then pass it to the callback, and then lock it
|
||||
// again. The second call to lock won't return until the callback has
|
||||
// fired.
|
||||
//
|
||||
// See https://groups.google.com/forum/#!topic/golang-nuts/SPjQEcsdORA
|
||||
// for the history of why this pattern came to be used.
|
||||
m := &sync.Mutex{}
|
||||
m.Lock()
|
||||
C.go_set_callback(unsafe.Pointer(f), unsafe.Pointer(m))
|
||||
|
|
|
@ -286,6 +286,8 @@ func (ri *RangeIterator) MustGet() KeyValue {
|
|||
return kv
|
||||
}
|
||||
|
||||
// Strinc returns the first key that would sort outside the range prefixed by
|
||||
// prefix, or an error if prefix is empty or contains only 0xFF bytes.
|
||||
func Strinc(prefix []byte) ([]byte, error) {
|
||||
for i := len(prefix) - 1; i >= 0; i-- {
|
||||
if prefix[i] != 0xFF {
|
||||
|
@ -311,7 +313,7 @@ func PrefixRange(prefix []byte) (KeyRange, error) {
|
|||
copy(begin, prefix)
|
||||
end, e := Strinc(begin)
|
||||
if e != nil {
|
||||
return KeyRange{}, nil
|
||||
return KeyRange{}, e
|
||||
}
|
||||
return KeyRange{Key(begin), Key(end)}, nil
|
||||
}
|
||||
|
|
|
@ -1524,15 +1524,20 @@ def init(event_model=None):
|
|||
pass
|
||||
|
||||
class ThreadEvent(object):
|
||||
has_async_ = hasattr(gevent.get_hub().loop, 'async_')
|
||||
def __init__(self):
|
||||
self.async = gevent.get_hub().loop.async()
|
||||
self.async.start(nullf)
|
||||
if ThreadEvent.has_async_:
|
||||
self.gevent_async = gevent.get_hub().loop.async_()
|
||||
else:
|
||||
self.gevent_async = getattr(gevent.get_hub().loop, 'async')()
|
||||
|
||||
self.gevent_async.start(nullf)
|
||||
|
||||
def set(self):
|
||||
self.async.send()
|
||||
self.gevent_async.send()
|
||||
|
||||
def wait(self):
|
||||
gevent.get_hub().wait(self.async)
|
||||
gevent.get_hub().wait(self.gevent_async)
|
||||
else:
|
||||
# gevent 0.x doesn't have async, so use a pipe. This doesn't work on Windows.
|
||||
if platform.system() == 'Windows':
|
||||
|
|
|
@ -242,7 +242,7 @@
|
|||
|
||||
.. |option-tls-plugin-blurb| replace::
|
||||
|
||||
Sets the :ref:`TLS plugin <configuring-tls-plugin>` to load. This option, if used, must be set before any other TLS options.
|
||||
Sets the :ref:`TLS plugin <configuring-tls>` to load. This option, if used, must be set before any other TLS options.
|
||||
|
||||
.. |option-tls-cert-path-blurb| replace::
|
||||
|
||||
|
|
|
@ -1064,7 +1064,7 @@ the most part, this also implies that ``T == fdb.tuple.unpack(fdb.tuple.pack(T))
|
|||
.. method:: pack(tuple, prefix=b'')
|
||||
|
||||
Returns a key (byte string) encoding the specified tuple. If ``prefix`` is set, it will prefix the serialized
|
||||
bytes with the prefix string. This throws an error if any of the tuple's items are incomplete `Versionstamp`
|
||||
bytes with the prefix string. This throws an error if any of the tuple's items are incomplete :class:`Versionstamp`
|
||||
instances.
|
||||
|
||||
.. method:: pack_with_versionstamp(tuple, prefix=b'')
|
||||
|
@ -1074,8 +1074,8 @@ the most part, this also implies that ``T == fdb.tuple.unpack(fdb.tuple.pack(T))
|
|||
recurse down nested tuples if there are any to find one.) If so, it will produce a byte string
|
||||
that can be fed into :meth:`fdb.Transaction.set_versionstamped_key` and correctly fill in the
|
||||
versionstamp information at commit time so that when the key is re-read and deserialized, the
|
||||
only difference is that the `Versionstamp` instance is complete and has the transaction version
|
||||
filled in. This throws an error if there are no incomplete `Versionstamp` instances in the tuple
|
||||
only difference is that the :class:`Versionstamp` instance is complete and has the transaction version
|
||||
filled in. This throws an error if there are no incomplete :class:`Versionstamp` instances in the tuple
|
||||
or if there is more than one.
|
||||
|
||||
.. method:: unpack(key)
|
||||
|
|
|
@ -870,7 +870,7 @@ All future objects are a subclass of the :class:`Future` type.
|
|||
|
||||
|future-cancel-blurb|
|
||||
|
||||
.. classmethod:: Future.wait_for_any(*futures) -> Fixnum
|
||||
.. classmethod:: Future.wait_for_any(\*futures) -> Fixnum
|
||||
|
||||
Does not return until at least one of the given future objects is ready. Returns the index in the parameter list of a ready future object.
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
.. _backups:
|
||||
|
||||
######################
|
||||
######################################################
|
||||
Backup, Restore, and Replication for Disaster Recovery
|
||||
######################
|
||||
######################################################
|
||||
|
||||
.. include:: guide-common.rst.inc
|
||||
|
||||
|
@ -323,7 +323,7 @@ Optionally, the user can specify a minimum RESTORABILITY guarantee with one of t
|
|||
.. program:: fdbbackup describe
|
||||
|
||||
``describe``
|
||||
----------
|
||||
------------
|
||||
|
||||
The ``describe`` subcommand will analyze the given backup and print a summary of the snapshot and mutation data versions it contains as well as the version range of restorability the backup can currently provide.
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ For large clusters, you can manually set the allocated number of processes of a
|
|||
|
||||
Set the process using ``configure [proxies|resolvers|logs]=<N>``, where ``<N>`` is an integer greater than 0, or -1 to reset the value to its default.
|
||||
|
||||
For recommendations on appropriate values for process types in large clusters, see :ref:`configuration-large-cluster-performance`.
|
||||
For recommendations on appropriate values for process types in large clusters, see :ref:`guidelines-process-class-config`.
|
||||
|
||||
coordinators
|
||||
------------
|
||||
|
|
|
@ -263,7 +263,8 @@ Contains default parameters for all fdbserver processes on this machine. These s
|
|||
* ``locality_dcid``: Data center identifier key. All processes physically located in a data center should share the id. No default value. If you are depending on data center based replication this must be set on all processes.
|
||||
* ``locality_data_hall``: Data hall identifier key. All processes physically located in a data hall should share the id. No default value. If you are depending on data hall based replication this must be set on all processes.
|
||||
* ``io_trust_seconds``: Time in seconds that a read or write operation is allowed to take before timing out with an error. If an operation times out, all future operations on that file will fail with an error as well. Only has an effect when using AsyncFileKAIO in Linux. If unset, defaults to 0 which means timeout is disabled.
|
||||
.. note:: In addition to the options above, TLS settings as described for the :ref:`TLS plugin <configuring-tls-plugin>` can be specified in the [fdbserver] section.
|
||||
|
||||
.. note:: In addition to the options above, TLS settings as described for the :ref:`TLS plugin <configuring-tls>` can be specified in the [fdbserver] section.
|
||||
|
||||
``[fdbserver.<ID>]`` section(s)
|
||||
---------------------------------
|
||||
|
|
|
@ -10,38 +10,38 @@ macOS
|
|||
|
||||
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
|
||||
|
||||
* `FoundationDB-6.0.11.pkg <https://www.foundationdb.org/downloads/6.0.11/macOS/installers/FoundationDB-6.0.11.pkg>`_
|
||||
* `FoundationDB-6.0.14.pkg <https://www.foundationdb.org/downloads/6.0.14/macOS/installers/FoundationDB-6.0.14.pkg>`_
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
|
||||
* `foundationdb-clients-6.0.11-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.11/ubuntu/installers/foundationdb-clients_6.0.11-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.0.11-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.11/ubuntu/installers/foundationdb-server_6.0.11-1_amd64.deb>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.0.14-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.14/ubuntu/installers/foundationdb-clients_6.0.14-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.0.14-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.14/ubuntu/installers/foundationdb-server_6.0.14-1_amd64.deb>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL6
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
|
||||
|
||||
* `foundationdb-clients-6.0.11-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel6/installers/foundationdb-clients-6.0.11-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.11-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel6/installers/foundationdb-server-6.0.11-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.0.14-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.14/rhel6/installers/foundationdb-clients-6.0.14-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.14-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.14/rhel6/installers/foundationdb-server-6.0.14-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL7
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
|
||||
|
||||
* `foundationdb-clients-6.0.11-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel7/installers/foundationdb-clients-6.0.11-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.11-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.11/rhel7/installers/foundationdb-server-6.0.11-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.0.14-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.14/rhel7/installers/foundationdb-clients-6.0.14-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.0.14-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.14/rhel7/installers/foundationdb-server-6.0.14-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
|
||||
|
||||
* `foundationdb-6.0.11-x64.msi <https://www.foundationdb.org/downloads/6.0.11/windows/installers/foundationdb-6.0.11-x64.msi>`_
|
||||
* `foundationdb-6.0.14-x64.msi <https://www.foundationdb.org/downloads/6.0.14/windows/installers/foundationdb-6.0.14-x64.msi>`_
|
||||
|
||||
API Language Bindings
|
||||
=====================
|
||||
|
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
|
|||
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
|
||||
|
||||
* `foundationdb-6.0.11.tar.gz <https://www.foundationdb.org/downloads/6.0.11/bindings/python/foundationdb-6.0.11.tar.gz>`_
|
||||
* `foundationdb-6.0.14.tar.gz <https://www.foundationdb.org/downloads/6.0.14/bindings/python/foundationdb-6.0.14.tar.gz>`_
|
||||
|
||||
Ruby 1.9.3/2.0.0+
|
||||
-----------------
|
||||
|
||||
* `fdb-6.0.11.gem <https://www.foundationdb.org/downloads/6.0.11/bindings/ruby/fdb-6.0.11.gem>`_
|
||||
* `fdb-6.0.14.gem <https://www.foundationdb.org/downloads/6.0.14/bindings/ruby/fdb-6.0.14.gem>`_
|
||||
|
||||
Java 8+
|
||||
-------
|
||||
|
||||
* `fdb-java-6.0.11.jar <https://www.foundationdb.org/downloads/6.0.11/bindings/java/fdb-java-6.0.11.jar>`_
|
||||
* `fdb-java-6.0.11-javadoc.jar <https://www.foundationdb.org/downloads/6.0.11/bindings/java/fdb-java-6.0.11-javadoc.jar>`_
|
||||
* `fdb-java-6.0.14.jar <https://www.foundationdb.org/downloads/6.0.14/bindings/java/fdb-java-6.0.14.jar>`_
|
||||
* `fdb-java-6.0.14-javadoc.jar <https://www.foundationdb.org/downloads/6.0.14/bindings/java/fdb-java-6.0.14-javadoc.jar>`_
|
||||
|
||||
Go 1.1+
|
||||
-------
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
``coordinators auto`` selects processes based on IP address. If your cluster has processes on the same machine with different IP addresses, ``coordinators auto`` may select a set of coordinators that are not fault tolerant. To ensure maximal fault tolerance, we recommend selecting coordinators according to the criteria in :ref:`configuration-choosing-coordination-servers` and setting them manually.
|
||||
|
||||
.. |conf-file-change-detection| replace::
|
||||
Whenever the ``foundationdb.conf`` file changes, the ``fdbmonitor`` daemon automatically detects the changes and starts, stops, or restarts child processes as necessary.
|
||||
Whenever the ``foundationdb.conf`` file changes, the ``fdbmonitor`` daemon automatically detects the changes and starts, stops, or restarts child processes as necessary. Note that changes to the configuration file contents must be made *atomically*. It is recommended to save the modified file to a temporary filename and then move/rename it into place, replacing the original. Some text editors do this automatically when saving.
|
||||
|
||||
.. |package-deb-clients| replace::
|
||||
foundationdb-clients\_\ |release|\ -1\_amd64.deb
|
||||
|
|
|
@ -14,7 +14,7 @@ Documentation
|
|||
|
||||
FoundationDB is a robust choice for a broad range of use cases:
|
||||
|
||||
**Developers can store all types of data.** FoundationDB is multi-model, meaning you can store many types data in a single database. All data is safely stored, distributed, and replicated in FoundationDB.
|
||||
**Developers can store all types of data.** FoundationDB is multi-model, meaning you can store many types of data in a single database. All data is safely stored, distributed, and replicated in FoundationDB.
|
||||
|
||||
**Administrators easily scale and handle hardware failures.** FoundationDB is easy to install, grow, and manage. It has a distributed architecture that gracefully scales out and handles faults while acting like a single ACID database.
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ These limitations come from fundamental design decisions and are unlikely to cha
|
|||
Large transactions
|
||||
------------------
|
||||
|
||||
Transaction size cannot exceed 10,000,000 bytes of affected data. Keys, values, and ranges that you read or write are all included as affected data. Likewise, conflict ranges that you :ref:`add <api-python-conflict-ranges>` or remove (using a :ref:`snapshot read <api-python-snapshot-reads>` or a :ref:`transaction option <api-python-no-write-conflict-range>`) are also added or removed from the scope of affected data.
|
||||
Transaction size cannot exceed 10,000,000 bytes of affected data. Keys, values, and ranges that you write are included as affected data. Keys and ranges that you read are also included as affected data, but values that you read are not. Likewise, conflict ranges that you :ref:`add <api-python-conflict-ranges>` or remove (using a :ref:`snapshot read <api-python-snapshot-reads>` or a :ref:`transaction option <api-python-no-write-conflict-range>`) are also added or removed from the scope of affected data.
|
||||
|
||||
If any single transaction exceeds one megabyte of affected data, you should modify your design. In the current version, these large transactions can cause performance issues and database availability can (briefly) be impacted.
|
||||
|
||||
|
@ -76,7 +76,7 @@ Anyone who can connect to a FoundationDB cluster can read and write every key in
|
|||
Current limitations
|
||||
===================
|
||||
|
||||
These limitations do not reflect fundamental aspects of our design and are likely be resolved or mitigated in future versions. Administrators should be aware of these issues, but longer-term application development should be less driven by them.
|
||||
These limitations do not reflect fundamental aspects of our design and are likely to be resolved or mitigated in future versions. Administrators should be aware of these issues, but longer-term application development should be less driven by them.
|
||||
|
||||
.. _long-transactions:
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ Local Development
|
|||
Download the FoundationDB package
|
||||
=================================
|
||||
|
||||
:doc:`Download the FoundationDB package <downloads>` for macOS (FoundationDB-*.pkg) onto your local development machine.
|
||||
:doc:`Download the FoundationDB package <downloads>` for macOS (FoundationDB-\*.pkg) onto your local development machine.
|
||||
|
||||
Install the FoundationDB binaries
|
||||
=================================
|
||||
|
|
|
@ -10,7 +10,7 @@ Language support
|
|||
|
||||
* FoundationDB now supports :doc:`Ruby </api-ruby>`
|
||||
|
||||
* FoundationDB now supports :doc:`Node.js </api-node>`
|
||||
* FoundationDB now supports Node.js
|
||||
|
||||
* FoundationDB now supports `Java </javadoc/index.html>`_ and other JVM languages.
|
||||
|
||||
|
|
|
@ -223,12 +223,12 @@ Node
|
|||
----
|
||||
* Support for API version 200 and backwards compatibility with previous API versions.
|
||||
* New APIs for allocating and managing keyspace (:ref:`Directory <developer-guide-directories>`).
|
||||
* Support for the :ref:`Promise/A+ specification <api-node-promises>` with supporting utilities.
|
||||
* Support for the Promise/A+ specification with supporting utilities.
|
||||
* Futures can take multiple callbacks. Callbacks can be added if the original function was called with a callback. The Future type is exposed in our binding.
|
||||
* Added ``as_foundationdb_key`` and ``as_foundationdb_value`` support.
|
||||
* Node prints a stack trace if an error occurs in a callback from V8.
|
||||
* Snapshot transactions can be used in retry loops.
|
||||
* The :ref:`methods <api-node-setAndWatch>` ``db.setAndWatch`` and ``db.clearAndWatch`` now return an object with a watch member instead of a future.
|
||||
* The methods ``db.setAndWatch`` and ``db.clearAndWatch`` now return an object with a watch member instead of a future.
|
||||
* Fix: Could not use the ``'this'`` pointer with the retry decorator.
|
||||
* Fix: Node transactional decorator didn't return a result to the caller if the function was called with a transaction.
|
||||
* Fix: The program could sometimes crash when watches were manually cancelled.
|
||||
|
|
|
@ -47,7 +47,7 @@ Fixes
|
|||
Java
|
||||
----
|
||||
|
||||
* The `ReadTransaction` interface supports the ability to set transaction options.
|
||||
* The ``ReadTransaction`` interface supports the ability to set transaction options.
|
||||
|
||||
Other Changes
|
||||
-------------
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.0.12
|
||||
=====
|
||||
6.0.14
|
||||
======
|
||||
|
||||
Features
|
||||
--------
|
||||
|
@ -27,6 +27,9 @@ Performance
|
|||
* Significantly improved the CPU efficiency of copy mutations to transaction logs during recovery. [6.0.2] `(PR #595) <https://github.com/apple/foundationdb/pull/595>`_
|
||||
* Significantly improved the CPU efficiency of generating status on the cluster controller. [6.0.11] `(PR #758) <https://github.com/apple/foundationdb/pull/758>`_
|
||||
* Reduced CPU cost of truncating files that are being cached. [6.0.12] `(PR #816) <https://github.com/apple/foundationdb/pull/816>`_
|
||||
* Significantly reduced master recovery times for clusters with large amounts of data. [6.0.14] `(PR #836) <https://github.com/apple/foundationdb/pull/836>`_
|
||||
* Reduced read and commit latencies for clusters which are processing transactions larger than 1MB. [6.0.14] `(PR #851) <https://github.com/apple/foundationdb/pull/851>`_
|
||||
* Significantly reduced recovery times when executing rollbacks on the memory storage engine. [6.0.14] `(PR #821) <https://github.com/apple/foundationdb/pull/821>`_
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
@ -55,6 +58,7 @@ Fixes
|
|||
* Restoring a backup to the exact version a snapshot ends did not apply mutations done at the final version. [6.0.12] `(PR #787) <https://github.com/apple/foundationdb/pull/787>`_
|
||||
* Excluding a process that was both the cluster controller and something else would cause two recoveries instead of one. [6.0.12] `(PR #784) <https://github.com/apple/foundationdb/pull/784>`_
|
||||
* Configuring from ``three_datacenter`` to ``three_datacenter_fallback`` would cause a lot of unnecessary data movement. [6.0.12] `(PR #782) <https://github.com/apple/foundationdb/pull/782>`_
|
||||
* Very rarely, backup snapshots would stop making progress. [6.0.14] `(PR #837) <https://github.com/apple/foundationdb/pull/837>`_
|
||||
|
||||
Fixes only impacting 6.0.0+
|
||||
---------------------------
|
||||
|
@ -69,6 +73,7 @@ Fixes only impacting 6.0.0+
|
|||
* The transaction logs would leak memory when serving peek requests to log routers. [6.0.12] `(PR #801) <https://github.com/apple/foundationdb/pull/801>`_
|
||||
* The transaction logs were doing a lot of unnecessary disk writes. [6.0.12] `(PR #784) <https://github.com/apple/foundationdb/pull/784>`_
|
||||
* The master will recover the transaction state store from local transaction logs if possible. [6.0.12] `(PR #801) <https://github.com/apple/foundationdb/pull/801>`_
|
||||
* A bug in status collection led to various workload metrics being missing and the cluster reporting unhealthy. [6.0.13] `(PR #834) <https://github.com/apple/foundationdb/pull/834>`_
|
||||
|
||||
Status
|
||||
------
|
||||
|
@ -86,6 +91,9 @@ Bindings
|
|||
* C API calls made on the network thread could be reordered with calls made from other threads. [6.0.2] `(Issue #518) <https://github.com/apple/foundationdb/issues/518>`_
|
||||
* The TLS_PLUGIN option is now a no-op and has been deprecated. [6.0.10] `(PR #710) <https://github.com/apple/foundationdb/pull/710>`_
|
||||
* Java: the `Versionstamp::getUserVersion() </javadoc/com/apple/foundationdb/tuple/Versionstamp.html#getUserVersion-->`_ method did not handle user versions greater than ``0x00FF`` due to operator precedence errors. [6.0.11] `(Issue #761) <https://github.com/apple/foundationdb/issues/761>`_
|
||||
* Python: bindings didn't work with Python 3.7 because of the new ``async`` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
|
||||
* Go: ``PrefixRange`` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
|
||||
|
||||
|
||||
Other Changes
|
||||
-------------
|
||||
|
|
|
@ -80,7 +80,7 @@ Default Values
|
|||
Certificate file default location
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The default behavior when the certificate or key file is not specified is to look for a file named ``fdb.pem`` in the current working directory. If this file is not present, an attempt is made to load a file from a system-dependent location:
|
||||
The default behavior when the certificate or key file is not specified is to look for a file named ``fdb.pem`` in the current working directory. If this file is not present, an attempt is made to load a file from a system-dependent location as follows:
|
||||
|
||||
* Linux: ``/etc/foundationdb/fdb.pem``
|
||||
* macOS: ``/usr/local/etc/foundationdb/fdb.pem``
|
||||
|
@ -96,20 +96,15 @@ Default Password
|
|||
|
||||
There is no default password. If no password is specified, it is assumed that the private key is unencrypted.
|
||||
|
||||
CA file default location
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If a value is not specified, the software searches for certs in the default openssl certs location.
|
||||
|
||||
Parameters and client bindings
|
||||
------------------------------
|
||||
|
||||
The default LibreSSL-based implementation
|
||||
=================================
|
||||
=========================================
|
||||
|
||||
FoundationDB offers TLS based on the LibreSSL library. By default, it will be enabled automatically when participating in a TLS-enabled cluster.
|
||||
|
||||
For TLS to operate, each process (both server and client) must have an X509 certificate, its corresponding private key, and potentially the certificates with which is was signed. When a process begins to communicate with a FoundationDB server process, the peer's certificate is checked to see if it is trusted and the fields of the peer certificate are verified. Peers must share the same root trusted certificate, and they must both present certificates whose signing chain includes this root certificate.
|
||||
For TLS to operate, each process (both server and client) must have an X509 certificate, its corresponding private key, and the certificates with which it was signed. When a process begins to communicate with a FoundationDB server process, the peer's certificate is checked to see if it is trusted and the fields of the peer certificate are verified. Peers must share the same root trusted certificate, and they must both present certificates whose signing chain includes this root certificate.
|
||||
|
||||
If the local certificate and chain is invalid, a FoundationDB server process bound to a TLS address will not start. In the case of invalid certificates on a client, the client will be able to start but will be unable to connect any TLS-enabled cluster.
|
||||
|
||||
|
@ -235,7 +230,7 @@ Field Well known name
|
|||
``subjectAltName`` Subject Alternative Name
|
||||
================== ========================
|
||||
|
||||
Within a subject alternative name requirement, the value specified is required to have the form ``prefix:value``, where the prefix specifies the type of value being matched against. The following prefixes are supported.
|
||||
Within a subject alternative name requirement, the value specified is required to have the form ``prefix:value``, where the prefix specifies the type of value being matched against. The following prefixes are supported:
|
||||
|
||||
====== ===========================
|
||||
Prefix Well known name
|
||||
|
@ -244,7 +239,7 @@ DNS Domain Name
|
|||
URI Uniform Resource Identifier
|
||||
IP IP Address
|
||||
EMAIL Email Address
|
||||
====== ============================
|
||||
====== ===========================
|
||||
|
||||
The following operators are supported:
|
||||
|
||||
|
|
|
@ -30,12 +30,12 @@
|
|||
#include "fdbclient/Status.h"
|
||||
#include "fdbclient/BackupContainer.h"
|
||||
#include "fdbclient/KeyBackedTypes.h"
|
||||
|
||||
#include "fdbclient/RunTransaction.actor.h"
|
||||
#include "fdbrpc/Platform.h"
|
||||
#include "fdbrpc/BlobStore.h"
|
||||
#include "fdbclient/BlobStore.h"
|
||||
#include "fdbclient/json_spirit/json_spirit_writer_template.h"
|
||||
|
||||
#include "fdbrpc/Platform.h"
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <algorithm> // std::transform
|
||||
|
@ -1080,8 +1080,8 @@ ACTOR Future<std::string> getLayerStatus(Reference<ReadYourWritesTransaction> tr
|
|||
backupTagUids.push_back(config.getUid());
|
||||
|
||||
tagStates.push_back(config.stateEnum().getOrThrow(tr));
|
||||
tagRangeBytes.push_back(config.rangeBytesWritten().getD(tr, 0));
|
||||
tagLogBytes.push_back(config.logBytesWritten().getD(tr, 0));
|
||||
tagRangeBytes.push_back(config.rangeBytesWritten().getD(tr, false, 0));
|
||||
tagLogBytes.push_back(config.logBytesWritten().getD(tr, false, 0));
|
||||
tagContainers.push_back(config.backupContainer().getOrThrow(tr));
|
||||
tagLastRestorableVersions.push_back(fba.getLastRestorable(tr, StringRef(tag->tagName)));
|
||||
}
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "AsyncFileBlobStore.actor.h"
|
||||
#include "AsyncFileReadAhead.actor.h"
|
||||
#include "fdbclient/AsyncFileBlobStore.actor.h"
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "flow/UnitTest.h"
|
||||
|
||||
Future<int64_t> AsyncFileBlobStoreRead::size() {
|
|
@ -30,13 +30,13 @@
|
|||
#include <sstream>
|
||||
#include <time.h>
|
||||
|
||||
#include "IAsyncFile.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
#include "flow/serialize.h"
|
||||
#include "flow/Net2Packet.h"
|
||||
#include "IRateControl.h"
|
||||
#include "BlobStore.h"
|
||||
#include "md5/md5.h"
|
||||
#include "libb64/encode.h"
|
||||
#include "fdbrpc/IRateControl.h"
|
||||
#include "fdbclient/BlobStore.h"
|
||||
#include "fdbclient/md5/md5.h"
|
||||
#include "fdbclient/libb64/encode.h"
|
||||
|
||||
ACTOR template<typename T> static Future<T> joinErrorGroup(Future<T> f, Promise<Void> p) {
|
||||
try {
|
|
@ -22,9 +22,9 @@
|
|||
#include "flow/Trace.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#include "flow/Hash3.h"
|
||||
#include "fdbrpc/AsyncFileBlobStore.actor.h"
|
||||
#include "fdbrpc/AsyncFileReadAhead.actor.h"
|
||||
#include "fdbrpc/Platform.h"
|
||||
#include "fdbclient/AsyncFileBlobStore.actor.h"
|
||||
#include "fdbclient/Status.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbclient/ReadYourWrites.h"
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
|
||||
#include "BlobStore.h"
|
||||
|
||||
#include "md5/md5.h"
|
||||
#include "libb64/encode.h"
|
||||
#include "sha1/SHA1.h"
|
||||
#include "fdbclient/md5/md5.h"
|
||||
#include "fdbclient/libb64/encode.h"
|
||||
#include "fdbclient/sha1/SHA1.h"
|
||||
#include "time.h"
|
||||
#include <boost/algorithm/string/split.hpp>
|
||||
#include <boost/algorithm/string/classification.hpp>
|
||||
#include "IAsyncFile.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
|
||||
json_spirit::mObject BlobStoreEndpoint::Stats::getJSON() {
|
||||
json_spirit::mObject o;
|
|
@ -25,9 +25,9 @@
|
|||
#include "flow/flow.h"
|
||||
#include "flow/Net2Packet.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
#include "IRateControl.h"
|
||||
#include "HTTP.h"
|
||||
#include "JSONDoc.h"
|
||||
#include "fdbrpc/IRateControl.h"
|
||||
#include "fdbclient/HTTP.h"
|
||||
#include "fdbclient/JSONDoc.h"
|
||||
|
||||
// Representation of all the things you need to connect to a blob store instance with some credentials.
|
||||
// Reference counted because a very large number of them could be needed.
|
|
@ -806,7 +806,7 @@ namespace fileBackup {
|
|||
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, false, backup_unneeded()));
|
||||
|
||||
state BackupConfig config(current.first);
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
if (!backupAgent->isRunnable((BackupAgentBase::enumState)status)) {
|
||||
throw backup_unneeded();
|
||||
|
@ -3375,7 +3375,7 @@ public:
|
|||
}
|
||||
|
||||
state BackupConfig config(oldUidAndAborted.get().first);
|
||||
state EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
|
||||
state EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
// Break, if no longer runnable
|
||||
if (!FileBackupAgent::isRunnable(status)) {
|
||||
|
@ -3410,7 +3410,7 @@ public:
|
|||
Optional<UidAndAbortedFlagT> uidAndAbortedFlag = wait(tag.get(tr));
|
||||
if (uidAndAbortedFlag.present()) {
|
||||
state BackupConfig prevConfig(uidAndAbortedFlag.get().first);
|
||||
state EBackupState prevBackupStatus = wait(prevConfig.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
|
||||
state EBackupState prevBackupStatus = wait(prevConfig.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
if (FileBackupAgent::isRunnable(prevBackupStatus)) {
|
||||
throw backup_duplicate();
|
||||
}
|
||||
|
@ -3617,7 +3617,7 @@ public:
|
|||
state KeyBackedTag tag = makeBackupTag(tagName.toString());
|
||||
state UidAndAbortedFlagT current = wait(tag.getOrThrow(tr, false, backup_unneeded()));
|
||||
state BackupConfig config(current.first);
|
||||
state EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
|
||||
state EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
if (!FileBackupAgent::isRunnable(status)) {
|
||||
throw backup_unneeded();
|
||||
|
@ -3668,7 +3668,7 @@ public:
|
|||
|
||||
state BackupConfig config(current.first);
|
||||
state Key destUidValue = wait(config.destUidValue().getOrThrow(tr));
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
|
||||
if (!backupAgent->isRunnable((BackupAgentBase::enumState)status)) {
|
||||
throw backup_unneeded();
|
||||
|
@ -3707,7 +3707,7 @@ public:
|
|||
state Future<Optional<Value>> fPaused = tr->get(backupAgent->taskBucket->getPauseKey());
|
||||
if (uidAndAbortedFlag.present()) {
|
||||
config = BackupConfig(uidAndAbortedFlag.get().first);
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, EBackupState::STATE_NEVERRAN));
|
||||
EBackupState status = wait(config.stateEnum().getD(tr, false, EBackupState::STATE_NEVERRAN));
|
||||
backupState = status;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,11 +18,12 @@
|
|||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "HTTP.h"
|
||||
#include "md5/md5.h"
|
||||
#include "libb64/encode.h"
|
||||
#include "fdbclient/HTTP.h"
|
||||
#include "fdbclient/md5/md5.h"
|
||||
#include "fdbclient/libb64/encode.h"
|
||||
#include "fdbclient/xml2json.hpp"
|
||||
|
||||
#include <cctype>
|
||||
#include "xml2json.hpp"
|
||||
|
||||
namespace HTTP {
|
||||
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
#include "flow/flow.h"
|
||||
#include "flow/Net2Packet.h"
|
||||
#include "IRateControl.h"
|
||||
#include "fdbrpc/IRateControl.h"
|
||||
#include "fdbclient/Knobs.h"
|
||||
|
||||
namespace HTTP {
|
|
@ -5,7 +5,7 @@
|
|||
#include <cmath>
|
||||
#include "flow/flow.h"
|
||||
#include "flow/Trace.h"
|
||||
#include "fdbrpc/JSONDoc.h"
|
||||
#include "fdbclient/JSONDoc.h"
|
||||
|
||||
class JsonBuilder;
|
||||
class JsonBuilderObject;
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#ifndef FDBCLIENT_STATUS_H
|
||||
#define FDBCLIENT_STATUS_H
|
||||
|
||||
#include "../fdbrpc/JSONDoc.h"
|
||||
#include "fdbclient/JSONDoc.h"
|
||||
|
||||
// Reads the entire string s as a JSON value
|
||||
// Throws if no value can be parsed or if s contains data after the first JSON value
|
||||
|
|
|
@ -734,6 +734,11 @@ public:
|
|||
else if(newTimeoutVersion <= version) // Ensure that the time extension is to the future
|
||||
newTimeoutVersion = version + 1;
|
||||
|
||||
// This can happen if extendTimeout is called shortly after task start and the task's timeout was jittered to be longer
|
||||
if(newTimeoutVersion <= task->timeoutVersion) {
|
||||
newTimeoutVersion = task->timeoutVersion + 1;
|
||||
}
|
||||
|
||||
// This is where the task definition is being moved to
|
||||
state Subspace newTimeoutSpace = taskBucket->timeouts.get(newTimeoutVersion).get(task->key);
|
||||
|
||||
|
|
|
@ -20,13 +20,14 @@
|
|||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="KeyRangeMap.actor.cpp" />
|
||||
<ActorCompiler Include="NativeAPI.actor.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="AsyncFileBlobStore.actor.h">
|
||||
<EnableCompile Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">false</EnableCompile>
|
||||
<EnableCompile Condition="'$(Configuration)|$(Platform)'=='Release|X64'">false</EnableCompile>
|
||||
</ActorCompiler>
|
||||
<ClInclude Include="Atomic.h" />
|
||||
<ClInclude Include="BackupContainer.h" />
|
||||
<ClInclude Include="BackupAgent.h" />
|
||||
<ClInclude Include="BlobStore.h" />
|
||||
<ClInclude Include="ClientDBInfo.h" />
|
||||
<ClInclude Include="ClientLogEvents.h" />
|
||||
<ClInclude Include="ClientWorkerInterface.h" />
|
||||
|
@ -34,19 +35,21 @@
|
|||
<ClInclude Include="CommitTransaction.h" />
|
||||
<ClInclude Include="CoordinationInterface.h" />
|
||||
<ClInclude Include="DatabaseConfiguration.h" />
|
||||
<ActorCompiler Include="DatabaseContext.h" />
|
||||
<ClInclude Include="DatabaseContext.h" />
|
||||
<ActorCompiler Include="EventTypes.actor.h">
|
||||
<EnableCompile Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">false</EnableCompile>
|
||||
<EnableCompile Condition="'$(Configuration)|$(Platform)'=='Release|X64'">false</EnableCompile>
|
||||
</ActorCompiler>
|
||||
<ClInclude Include="KeyBackedTypes.h" />
|
||||
<ClInclude Include="MetricLogger.h" />
|
||||
<ActorCompiler Include="MetricLogger.actor.cpp" />
|
||||
<ClInclude Include="FailureMonitorClient.h" />
|
||||
<ClInclude Include="FDBOptions.g.h" />
|
||||
<ClInclude Include="FDBOptions.h" />
|
||||
<ClInclude Include="FDBTypes.h" />
|
||||
<ClInclude Include="HTTP.h" />
|
||||
<ClInclude Include="KeyBackedTypes.h" />
|
||||
<ClInclude Include="MetricLogger.h" />
|
||||
<ClInclude Include="FailureMonitorClient.h" />
|
||||
<ClInclude Include="IClientApi.h" />
|
||||
<ClInclude Include="JsonBuilder.h" />
|
||||
<ClInclude Include="JSONDoc.h" />
|
||||
<ClInclude Include="json_spirit\json_spirit_error_position.h" />
|
||||
<ClInclude Include="json_spirit\json_spirit_reader_template.h" />
|
||||
<ClInclude Include="json_spirit\json_spirit_value.h" />
|
||||
|
@ -54,8 +57,13 @@
|
|||
<ClInclude Include="json_spirit\json_spirit_writer_template.h" />
|
||||
<ClInclude Include="KeyRangeMap.h" />
|
||||
<ClInclude Include="Knobs.h" />
|
||||
<ClInclude Include="libb64\cdecode.h" />
|
||||
<ClInclude Include="libb64\cencode.h" />
|
||||
<ClInclude Include="libb64\decode.h" />
|
||||
<ClInclude Include="libb64\encode.h" />
|
||||
<ClInclude Include="ManagementAPI.h" />
|
||||
<ClInclude Include="MasterProxyInterface.h" />
|
||||
<ClInclude Include="md5\md5.h" />
|
||||
<ClInclude Include="MonitorLeader.h" />
|
||||
<ClInclude Include="MultiVersionAssignmentVars.h" />
|
||||
<ClInclude Include="MultiVersionTransaction.h" />
|
||||
|
@ -66,41 +74,52 @@
|
|||
<ActorCompiler Include="RunTransaction.actor.h" />
|
||||
<ClInclude Include="RYWIterator.h" />
|
||||
<ClInclude Include="Schemas.h" />
|
||||
<ClInclude Include="sha1\SHA1.h" />
|
||||
<ClInclude Include="SnapshotCache.h" />
|
||||
<ClInclude Include="Status.h" />
|
||||
<ClInclude Include="StatusClient.h" />
|
||||
<ClInclude Include="StorageServerInterface.h" />
|
||||
<ClInclude Include="Subspace.h" />
|
||||
<ClInclude Include="SystemData.h" />
|
||||
<ClInclude Include="TaskBucket.h" />
|
||||
<ClInclude Include="ThreadSafeTransaction.h" />
|
||||
<ClInclude Include="Tuple.h" />
|
||||
<ActorCompiler Include="VersionedMap.actor.h">
|
||||
<EnableCompile Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">false</EnableCompile>
|
||||
<EnableCompile Condition="'$(Configuration)|$(Platform)'=='Release|X64'">false</EnableCompile>
|
||||
</ActorCompiler>
|
||||
<ClInclude Include="VersionedMap.h" />
|
||||
<ClInclude Include="WriteMap.h" />
|
||||
<ClInclude Include="Subspace.h" />
|
||||
<ClInclude Include="Tuple.h" />
|
||||
<ClInclude Include="JsonBuilder.h" />
|
||||
<ClInclude Include="xml2json.hpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="FailureMonitorClient.actor.cpp" />
|
||||
<ActorCompiler Include="ReadYourWrites.actor.cpp" />
|
||||
<ActorCompiler Include="AsyncFileBlobStore.actor.cpp" />
|
||||
<ClCompile Include="AutoPublicAddress.cpp" />
|
||||
<ActorCompiler Include="BackupAgentBase.actor.cpp" />
|
||||
<ActorCompiler Include="BackupContainer.actor.cpp" />
|
||||
<ActorCompiler Include="BlobStore.actor.cpp" />
|
||||
<ActorCompiler Include="DatabaseBackupAgent.actor.cpp" />
|
||||
<ClCompile Include="DatabaseConfiguration.cpp" />
|
||||
<ClCompile Include="AutoPublicAddress.cpp" />
|
||||
<ActorCompiler Include="FailureMonitorClient.actor.cpp" />
|
||||
<ClCompile Include="FDBOptions.g.cpp" />
|
||||
<ActorCompiler Include="FileBackupAgent.actor.cpp" />
|
||||
<ActorCompiler Include="HTTP.actor.cpp" />
|
||||
<ActorCompiler Include="KeyRangeMap.actor.cpp" />
|
||||
<ClCompile Include="Knobs.cpp" />
|
||||
<ClCompile Include="libb64\cdecode.c" />
|
||||
<ClCompile Include="libb64\cencode.c" />
|
||||
<ClCompile Include="md5\md5.c" />
|
||||
<ActorCompiler Include="MetricLogger.actor.cpp" />
|
||||
<ActorCompiler Include="MonitorLeader.actor.cpp" />
|
||||
<ActorCompiler Include="ManagementAPI.actor.cpp" />
|
||||
<ActorCompiler Include="MultiVersionTransaction.actor.cpp" />
|
||||
<ActorCompiler Include="NativeAPI.actor.cpp" />
|
||||
<ActorCompiler Include="ReadYourWrites.actor.cpp" />
|
||||
<ClCompile Include="RYWIterator.cpp" />
|
||||
<ActorCompiler Include="StatusClient.actor.cpp" />
|
||||
<ClCompile Include="Schemas.cpp" />
|
||||
<ClCompile Include="SystemData.cpp" />
|
||||
<ClCompile Include="sha1\SHA1.cpp" />
|
||||
<ActorCompiler Include="ThreadSafeTransaction.actor.cpp" />
|
||||
<ActorCompiler Include="TaskBucket.actor.cpp" />
|
||||
<ClCompile Include="Subspace.cpp" />
|
||||
|
|
|
@ -9,19 +9,19 @@
|
|||
#include <string>
|
||||
#include <cctype>
|
||||
|
||||
#include "rapidxml/rapidxml.hpp"
|
||||
#include "rapidxml/rapidxml_utils.hpp"
|
||||
#include "rapidxml/rapidxml_print.hpp"
|
||||
#include "fdbclient/rapidxml/rapidxml.hpp"
|
||||
#include "fdbclient/rapidxml/rapidxml_utils.hpp"
|
||||
#include "fdbclient/rapidxml/rapidxml_print.hpp"
|
||||
|
||||
#include "rapidjson/document.h"
|
||||
#include "rapidjson/prettywriter.h"
|
||||
#include "rapidjson/encodedstream.h"
|
||||
#include "rapidjson/stringbuffer.h"
|
||||
#include "rapidjson/reader.h"
|
||||
#include "rapidjson/writer.h"
|
||||
#include "rapidjson/filereadstream.h"
|
||||
#include "rapidjson/filewritestream.h"
|
||||
#include "rapidjson/error/en.h"
|
||||
#include "fdbclient/rapidjson/document.h"
|
||||
#include "fdbclient/rapidjson/prettywriter.h"
|
||||
#include "fdbclient/rapidjson/encodedstream.h"
|
||||
#include "fdbclient/rapidjson/stringbuffer.h"
|
||||
#include "fdbclient/rapidjson/reader.h"
|
||||
#include "fdbclient/rapidjson/writer.h"
|
||||
#include "fdbclient/rapidjson/filereadstream.h"
|
||||
#include "fdbclient/rapidjson/filewritestream.h"
|
||||
#include "fdbclient/rapidjson/error/en.h"
|
||||
|
||||
/* [Start] This part is configurable */
|
||||
static const char xml2json_text_additional_name[] = "#text";
|
|
@ -177,45 +177,45 @@ Future<Void> AsyncFileCached::truncate( int64_t size ) {
|
|||
|
||||
pageOffset += pageCache->pageSize;
|
||||
}
|
||||
|
||||
// if this call to truncate results in a larger file, there is no
|
||||
// need to erase any pages
|
||||
|
||||
// if this call to truncate results in a larger file, there is no
|
||||
// need to erase any pages
|
||||
if(oldLength > pageOffset) {
|
||||
// Iterating through all pages results in better cache locality than
|
||||
// looking up pages one by one in the hash table. However, if we only need
|
||||
// to truncate a small portion of data, looking up pages one by one should
|
||||
// be faster. So for now we do single key lookup for each page if it results
|
||||
// in less than a fixed percentage of the unordered map being accessed.
|
||||
int64_t numLookups = (oldLength + (pageCache->pageSize-1) - pageOffset) / pageCache->pageSize;
|
||||
if(numLookups < pages.size() * FLOW_KNOBS->PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION) {
|
||||
for(int64_t offset = pageOffset; offset < oldLength; offset += pageCache->pageSize) {
|
||||
auto iter = pages.find(offset);
|
||||
if(iter != pages.end()) {
|
||||
auto f = iter->second->truncate();
|
||||
if(!f.isReady() || f.isError()) {
|
||||
actors.push_back(f);
|
||||
}
|
||||
pages.erase(iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for(auto p = pages.begin(); p != pages.end();) {
|
||||
if(p->first >= pageOffset) {
|
||||
auto f = p->second->truncate();
|
||||
if(!f.isReady() || f.isError()) {
|
||||
actors.push_back(f);
|
||||
}
|
||||
auto last = p;
|
||||
++p;
|
||||
pages.erase(last);
|
||||
}
|
||||
else {
|
||||
++p;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Iterating through all pages results in better cache locality than
|
||||
// looking up pages one by one in the hash table. However, if we only need
|
||||
// to truncate a small portion of data, looking up pages one by one should
|
||||
// be faster. So for now we do single key lookup for each page if it results
|
||||
// in less than a fixed percentage of the unordered map being accessed.
|
||||
int64_t numLookups = (oldLength + (pageCache->pageSize-1) - pageOffset) / pageCache->pageSize;
|
||||
if(numLookups < pages.size() * FLOW_KNOBS->PAGE_CACHE_TRUNCATE_LOOKUP_FRACTION) {
|
||||
for(int64_t offset = pageOffset; offset < oldLength; offset += pageCache->pageSize) {
|
||||
auto iter = pages.find(offset);
|
||||
if(iter != pages.end()) {
|
||||
auto f = iter->second->truncate();
|
||||
if(!f.isReady() || f.isError()) {
|
||||
actors.push_back(f);
|
||||
}
|
||||
pages.erase(iter);
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
for(auto p = pages.begin(); p != pages.end();) {
|
||||
if(p->first >= pageOffset) {
|
||||
auto f = p->second->truncate();
|
||||
if(!f.isReady() || f.isError()) {
|
||||
actors.push_back(f);
|
||||
}
|
||||
auto last = p;
|
||||
++p;
|
||||
pages.erase(last);
|
||||
}
|
||||
else {
|
||||
++p;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return truncate_impl( this, size, waitForAll( actors ) );
|
||||
}
|
||||
|
|
|
@ -12,9 +12,6 @@
|
|||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ActorCompiler Include="ActorFuzz.actor.cpp" />
|
||||
<ActorCompiler Include="AsyncFileBlobStore.actor.cpp" />
|
||||
<ActorCompiler Include="BlobStore.actor.cpp" />
|
||||
<ActorCompiler Include="HTTP.actor.cpp" />
|
||||
<ActorCompiler Include="AsyncFileCached.actor.cpp" />
|
||||
<ActorCompiler Include="AsyncFileNonDurable.actor.cpp" />
|
||||
<ActorCompiler Include="dsltest.actor.cpp" />
|
||||
|
@ -25,10 +22,6 @@
|
|||
<ActorCompiler Include="IAsyncFile.actor.cpp" />
|
||||
<ClCompile Include="crc32c.cpp" />
|
||||
<ClCompile Include="generated-constants.cpp" />
|
||||
<ClCompile Include="sha1\SHA1.cpp" />
|
||||
<ClCompile Include="libb64\cencode.c" />
|
||||
<ClCompile Include="libb64\cdecode.c" />
|
||||
<ClCompile Include="md5\md5.c" />
|
||||
<ClCompile Include="Platform.cpp" />
|
||||
<ClCompile Include="AsyncFileWriteChecker.cpp" />
|
||||
<ClCompile Include="libcoroutine\Common.c" />
|
||||
|
@ -60,9 +53,6 @@
|
|||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="ActorFuzz.h" />
|
||||
<ActorCompiler Include="AsyncFileBlobStore.actor.h">
|
||||
<EnableCompile>false</EnableCompile>
|
||||
</ActorCompiler>
|
||||
<ActorCompiler Include="AsyncFileEIO.actor.h">
|
||||
<EnableCompile>false</EnableCompile>
|
||||
</ActorCompiler>
|
||||
|
@ -95,22 +85,13 @@
|
|||
<ActorCompiler Include="genericactors.actor.h">
|
||||
<EnableCompile>false</EnableCompile>
|
||||
</ActorCompiler>
|
||||
<ClInclude Include="JSONDoc.h" />
|
||||
<ClInclude Include="linux_kaio.h" />
|
||||
<ClInclude Include="LoadPlugin.h" />
|
||||
<ClInclude Include="sha1\SHA1.h" />
|
||||
<ClInclude Include="libb64\encode.h" />
|
||||
<ClInclude Include="libb64\cencode.h" />
|
||||
<ClInclude Include="libb64\decode.h" />
|
||||
<ClInclude Include="libb64\cdecode.h" />
|
||||
<ClInclude Include="md5\md5.h" />
|
||||
<ClInclude Include="IAsyncFile.h" />
|
||||
<ClInclude Include="IRateControl.h" />
|
||||
<ClInclude Include="Platform.h" />
|
||||
<ClInclude Include="fdbrpc.h" />
|
||||
<ClInclude Include="FlowTransport.h" />
|
||||
<ClInclude Include="BlobStore.h" />
|
||||
<ClInclude Include="HTTP.h" />
|
||||
<ClInclude Include="ITLSPlugin.h" />
|
||||
<ClInclude Include="libcoroutine\Base.h" />
|
||||
<ClInclude Include="libcoroutine\Common.h" />
|
||||
|
@ -131,7 +112,6 @@
|
|||
<ClInclude Include="Smoother.h" />
|
||||
<ClInclude Include="TLSConnection.h" />
|
||||
<ClInclude Include="TraceFileIO.h" />
|
||||
<ClInclude Include="xml2json.hpp" />
|
||||
<ClInclude Include="zlib\zlib.h" />
|
||||
<ClInclude Include="zlib\deflate.h" />
|
||||
<ClInclude Include="zlib\gzguts.h" />
|
||||
|
|
|
@ -19,10 +19,6 @@
|
|||
<ActorCompiler Include="FlowTransport.actor.cpp" />
|
||||
<ActorCompiler Include="ActorFuzz.actor.cpp" />
|
||||
<ActorCompiler Include="batcher.actor.h" />
|
||||
<ActorCompiler Include="AsyncFileBlobStore.actor.cpp" />
|
||||
<ActorCompiler Include="BlobStore.actor.cpp" />
|
||||
<ActorCompiler Include="HTTP.actor.cpp" />
|
||||
<ActorCompiler Include="AsyncFileBlobStore.actor.h" />
|
||||
<ActorCompiler Include="AsyncFileReadAhead.actor.h" />
|
||||
<ActorCompiler Include="IAsyncFile.actor.cpp" />
|
||||
</ItemGroup>
|
||||
|
@ -78,10 +74,6 @@
|
|||
<ClCompile Include="Platform.cpp" />
|
||||
<ClCompile Include="QueueModel.cpp" />
|
||||
<ClCompile Include="TraceFileIO.cpp" />
|
||||
<ClCompile Include="sha1\SHA1.cpp" />
|
||||
<ClCompile Include="libb64\cencode.c" />
|
||||
<ClCompile Include="libb64\cdecode.c" />
|
||||
<ClCompile Include="md5\md5.c" />
|
||||
<ClCompile Include="Replication.cpp" />
|
||||
<ClCompile Include="ReplicationTypes.cpp" />
|
||||
<ClCompile Include="ReplicationPolicy.cpp" />
|
||||
|
@ -154,23 +146,13 @@
|
|||
<ClInclude Include="Smoother.h" />
|
||||
<ClInclude Include="TraceFileIO.h" />
|
||||
<ClInclude Include="TLSConnection.h" />
|
||||
<ClInclude Include="sha1\SHA1.h" />
|
||||
<ClInclude Include="libb64\encode.h" />
|
||||
<ClInclude Include="libb64\cencode.h" />
|
||||
<ClInclude Include="libb64\decode.h" />
|
||||
<ClInclude Include="libb64\cdecode.h" />
|
||||
<ClInclude Include="md5\md5.h" />
|
||||
<ClInclude Include="IRateControl.h" />
|
||||
<ClInclude Include="BlobStore.h" />
|
||||
<ClInclude Include="HTTP.h" />
|
||||
<ClInclude Include="Replication.h" />
|
||||
<ClInclude Include="ReplicationTypes.h" />
|
||||
<ClInclude Include="ReplicationPolicy.h" />
|
||||
<ClInclude Include="crc32c.h" />
|
||||
<ClInclude Include="ReplicationUtils.h" />
|
||||
<ClInclude Include="xml2json.hpp" />
|
||||
<ClInclude Include="AsyncFileWriteChecker.h" />
|
||||
<ClInclude Include="JSONDoc.h" />
|
||||
<ClInclude Include="linux_kaio.h" />
|
||||
<ClInclude Include="LoadPlugin.h" />
|
||||
</ItemGroup>
|
||||
|
@ -182,4 +164,4 @@
|
|||
<UniqueIdentifier>{b79fbb2a-5d80-4135-b363-f6de83e62e73}</UniqueIdentifier>
|
||||
</Filter>
|
||||
</ItemGroup>
|
||||
</Project>
|
||||
</Project>
|
||||
|
|
|
@ -28,17 +28,10 @@
|
|||
#include "flow/FaultInjection.h"
|
||||
#include "flow/network.h"
|
||||
#include "Net2FileSystem.h"
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbrpc/Replication.h"
|
||||
#include "fdbrpc/ReplicationUtils.h"
|
||||
#include "AsyncFileWriteChecker.h"
|
||||
|
||||
|
||||
using std::min;
|
||||
using std::max;
|
||||
using std::pair;
|
||||
using std::make_pair;
|
||||
|
||||
bool simulator_should_inject_fault( const char* context, const char* file, int line, int error_code ) {
|
||||
if (!g_network->isSimulated()) return false;
|
||||
|
||||
|
@ -124,7 +117,7 @@ struct SimClogging {
|
|||
}
|
||||
|
||||
double getRecvDelay( NetworkAddress from, NetworkAddress to ) {
|
||||
auto pair = make_pair( from.ip, to.ip );
|
||||
auto pair = std::make_pair( from.ip, to.ip );
|
||||
|
||||
double tnow = now();
|
||||
double t = tnow + halfLatency();
|
||||
|
@ -141,7 +134,7 @@ struct SimClogging {
|
|||
}
|
||||
|
||||
void clogPairFor( uint32_t from, uint32_t to, double t ) {
|
||||
auto& u = clogPairUntil[ make_pair( from, to ) ];
|
||||
auto& u = clogPairUntil[ std::make_pair( from, to ) ];
|
||||
u = std::max(u, now() + t);
|
||||
}
|
||||
void clogSendFor( uint32_t from, double t ) {
|
||||
|
@ -153,9 +146,9 @@ struct SimClogging {
|
|||
u = std::max(u, now() + t);
|
||||
}
|
||||
double setPairLatencyIfNotSet( uint32_t from, uint32_t to, double t ) {
|
||||
auto i = clogPairLatency.find( make_pair(from,to) );
|
||||
auto i = clogPairLatency.find( std::make_pair(from,to) );
|
||||
if (i == clogPairLatency.end())
|
||||
i = clogPairLatency.insert( make_pair( make_pair(from,to), t ) ).first;
|
||||
i = clogPairLatency.insert( std::make_pair( std::make_pair(from,to), t ) ).first;
|
||||
return i->second;
|
||||
}
|
||||
private:
|
||||
|
@ -1194,12 +1187,12 @@ public:
|
|||
TEST( kt == InjectFaults ); // Simulated machine was killed with faults
|
||||
|
||||
if (kt == KillInstantly) {
|
||||
TraceEvent(SevWarn, "FailMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", describe(*machine)).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
||||
TraceEvent(SevWarn, "FailMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
||||
// This will remove all the "tracked" messages that came from the machine being killed
|
||||
latestEventCache.clear();
|
||||
machine->failed = true;
|
||||
} else if (kt == InjectFaults) {
|
||||
TraceEvent(SevWarn, "FaultMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", describe(*machine)).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
||||
TraceEvent(SevWarn, "FaultMachine").detail("Name", machine->name).detail("Address", machine->address).detailext("ZoneId", machine->locality.zoneId()).detail("Process", machine->toString()).detail("Rebooting", machine->rebooting).detail("Protected", protectedAddresses.count(machine->address)).backtrace();
|
||||
should_inject_fault = simulator_should_inject_fault;
|
||||
machine->fault_injection_r = g_random->randomUniqueID().first();
|
||||
machine->fault_injection_p1 = 0.1;
|
||||
|
@ -1315,19 +1308,19 @@ public:
|
|||
else if ((kt == KillInstantly) || (kt == InjectFaults)) {
|
||||
TraceEvent("DeadMachine").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
||||
for (auto process : processesLeft) {
|
||||
TraceEvent("DeadMachineSurvivors").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", describe(*process));
|
||||
TraceEvent("DeadMachineSurvivors").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString());
|
||||
}
|
||||
for (auto process : processesDead) {
|
||||
TraceEvent("DeadMachineVictims").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", describe(*process));
|
||||
TraceEvent("DeadMachineVictims").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString());
|
||||
}
|
||||
}
|
||||
else {
|
||||
TraceEvent("ClearMachine").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("TotalProcesses", machines.size()).detail("ProcessesPerMachine", processesPerMachine).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
||||
for (auto process : processesLeft) {
|
||||
TraceEvent("ClearMachineSurvivors").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", describe(*process));
|
||||
TraceEvent("ClearMachineSurvivors").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString());
|
||||
}
|
||||
for (auto process : processesDead) {
|
||||
TraceEvent("ClearMachineVictims").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", describe(*process));
|
||||
TraceEvent("ClearMachineVictims").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1392,7 +1385,7 @@ public:
|
|||
if ((kt != Reboot) && (protectedAddresses.count(procRecord->address))) {
|
||||
kt = Reboot;
|
||||
TraceEvent(SevWarn, "DcKillChanged").detailext("DataCenter", dcId).detail("KillType", kt).detail("OrigKillType", ktOrig)
|
||||
.detail("Reason", "Datacenter has protected process").detail("ProcessAddress", procRecord->address).detail("Failed", procRecord->failed).detail("Rebooting", procRecord->rebooting).detail("Excluded", procRecord->excluded).detail("Cleared", procRecord->cleared).detail("Process", describe(*procRecord));
|
||||
.detail("Reason", "Datacenter has protected process").detail("ProcessAddress", procRecord->address).detail("Failed", procRecord->failed).detail("Rebooting", procRecord->rebooting).detail("Excluded", procRecord->excluded).detail("Cleared", procRecord->cleared).detail("Process", procRecord->toString());
|
||||
}
|
||||
datacenterZones[processZoneId.get()] ++;
|
||||
dcProcesses ++;
|
||||
|
@ -1422,11 +1415,11 @@ public:
|
|||
TraceEvent("DeadDataCenter").detailext("DataCenter", dcId).detail("KillType", kt).detail("DcZones", datacenterZones.size()).detail("DcProcesses", dcProcesses).detail("ProcessesDead", processesDead.size()).detail("ProcessesLeft", processesLeft.size()).detail("TLogPolicy", tLogPolicy->info()).detail("StoragePolicy", storagePolicy->info());
|
||||
for (auto process : processesLeft) {
|
||||
auto zoneId = process->locality.zoneId();
|
||||
TraceEvent("DeadDcSurvivors").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", describe(*process));
|
||||
TraceEvent("DeadDcSurvivors").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("SurvivingProcess", process->toString());
|
||||
}
|
||||
for (auto process : processesDead) {
|
||||
auto zoneId = process->locality.zoneId();
|
||||
TraceEvent("DeadDcVictims").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", describe(*process));
|
||||
TraceEvent("DeadDcVictims").detailext("ZoneId", zoneId).detail("KillType", kt).detail("ProcessesLeft", processesLeft.size()).detail("ProcessesDead", processesDead.size()).detail("VictimProcess", process->toString());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1612,7 +1605,6 @@ public:
|
|||
std::map<Optional<Standalone<StringRef>>, MachineInfo > machines;
|
||||
std::map<NetworkAddress, ProcessInfo*> addressMap;
|
||||
std::map<ProcessInfo*, Promise<Void>> filesDeadMap;
|
||||
std::set<AddressExclusion> exclusionSet;
|
||||
|
||||
//tasks is guarded by ISimulator::mutex
|
||||
std::priority_queue<Task, std::vector<Task>> tasks;
|
||||
|
|
|
@ -1383,12 +1383,13 @@ ACTOR Future<Void> teamTracker( DDTeamCollection *self, Reference<TCTeamInfo> te
|
|||
state bool firstCheck = true;
|
||||
|
||||
Void _ = wait( yield() );
|
||||
TraceEvent("TeamTrackerStarting", self->masterId).detail("Reason", "Initial wait complete (sc)").detail("Team", team->getDesc());
|
||||
if(!wrongSize) {
|
||||
TraceEvent("TeamTrackerStarting", self->masterId).detail("Reason", "Initial wait complete (sc)").detail("Team", team->getDesc());
|
||||
}
|
||||
self->priority_teams[team->getPriority()]++;
|
||||
|
||||
try {
|
||||
loop {
|
||||
TraceEvent("TeamHealthChangeDetected", self->masterId).detail("IsReady", self->initialFailureReactionDelay.isReady() );
|
||||
// Check if the number of degraded machines has changed
|
||||
state vector<Future<Void>> change;
|
||||
auto servers = team->getServerIDs();
|
||||
|
@ -1437,11 +1438,13 @@ ACTOR Future<Void> teamTracker( DDTeamCollection *self, Reference<TCTeamInfo> te
|
|||
lastOptimal = optimal;
|
||||
}
|
||||
|
||||
if( serversLeft != lastServersLeft || anyUndesired != lastAnyUndesired || anyWrongConfiguration != lastWrongConfiguration || wrongSize || recheck ) {
|
||||
TraceEvent("TeamHealthChanged", self->masterId)
|
||||
.detail("Team", team->getDesc()).detail("ServersLeft", serversLeft)
|
||||
.detail("LastServersLeft", lastServersLeft).detail("ContainsUndesiredServer", anyUndesired)
|
||||
.detail("HealthyTeamsCount", self->healthyTeamCount).detail("IsWrongConfiguration", anyWrongConfiguration);
|
||||
if( serversLeft != lastServersLeft || anyUndesired != lastAnyUndesired || anyWrongConfiguration != lastWrongConfiguration || recheck ) {
|
||||
if(!wrongSize) {
|
||||
TraceEvent("TeamHealthChanged", self->masterId)
|
||||
.detail("Team", team->getDesc()).detail("ServersLeft", serversLeft)
|
||||
.detail("LastServersLeft", lastServersLeft).detail("ContainsUndesiredServer", anyUndesired)
|
||||
.detail("HealthyTeamsCount", self->healthyTeamCount).detail("IsWrongConfiguration", anyWrongConfiguration);
|
||||
}
|
||||
|
||||
team->setWrongConfiguration( anyWrongConfiguration );
|
||||
|
||||
|
@ -1474,7 +1477,6 @@ ACTOR Future<Void> teamTracker( DDTeamCollection *self, Reference<TCTeamInfo> te
|
|||
lastServersLeft = serversLeft;
|
||||
lastAnyUndesired = anyUndesired;
|
||||
lastWrongConfiguration = anyWrongConfiguration;
|
||||
wrongSize = false;
|
||||
|
||||
state int lastPriority = team->getPriority();
|
||||
if( serversLeft < self->configuration.storageTeamSize ) {
|
||||
|
@ -1499,7 +1501,9 @@ ACTOR Future<Void> teamTracker( DDTeamCollection *self, Reference<TCTeamInfo> te
|
|||
self->priority_teams[team->getPriority()]++;
|
||||
}
|
||||
|
||||
TraceEvent("TeamPriorityChange", self->masterId).detail("Priority", team->getPriority());
|
||||
if(!wrongSize) {
|
||||
TraceEvent("TeamPriorityChange", self->masterId).detail("Priority", team->getPriority());
|
||||
}
|
||||
|
||||
lastZeroHealthy = self->zeroHealthyTeams->get(); //set this again in case it changed from this teams health changing
|
||||
if( self->initialFailureReactionDelay.isReady() && !self->zeroHealthyTeams->get() ) {
|
||||
|
@ -1550,7 +1554,9 @@ ACTOR Future<Void> teamTracker( DDTeamCollection *self, Reference<TCTeamInfo> te
|
|||
}
|
||||
}
|
||||
} else {
|
||||
TraceEvent("TeamHealthNotReady", self->masterId).detail("HealthyTeamCount", self->healthyTeamCount);
|
||||
if(!wrongSize) {
|
||||
TraceEvent("TeamHealthNotReady", self->masterId).detail("HealthyTeamCount", self->healthyTeamCount);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -164,27 +164,26 @@ public:
|
|||
fullSnapshot(data);
|
||||
resetSnapshot = true;
|
||||
committedWriteBytes = notifiedCommittedWriteBytes.get();
|
||||
overheadWriteBytes = 0;
|
||||
|
||||
if(disableSnapshot) {
|
||||
return Void();
|
||||
}
|
||||
log_op(OpCommit, StringRef(), StringRef());
|
||||
}
|
||||
else {
|
||||
int64_t bytesWritten = commit_queue(queue, !disableSnapshot, sequential);
|
||||
if(!disableSnapshot) {
|
||||
committedWriteBytes += bytesWritten + OP_DISK_OVERHEAD; //OP_DISK_OVERHEAD is for the following log_op(OpCommit)
|
||||
|
||||
if(disableSnapshot) {
|
||||
return Void();
|
||||
}
|
||||
|
||||
//If there have been no mutations since the last commit, do nothing
|
||||
if( notifiedCommittedWriteBytes.get() == committedWriteBytes )
|
||||
return Void();
|
||||
|
||||
notifiedCommittedWriteBytes.set(committedWriteBytes);
|
||||
}
|
||||
|
||||
if(disableSnapshot) {
|
||||
return Void();
|
||||
}
|
||||
|
||||
log_op(OpCommit, StringRef(), StringRef());
|
||||
if(!transactionIsLarge) {
|
||||
committedWriteBytes += log->getCommitOverhead();
|
||||
if(bytesWritten > 0 || committedWriteBytes > notifiedCommittedWriteBytes.get()) {
|
||||
committedWriteBytes += bytesWritten + overheadWriteBytes + OP_DISK_OVERHEAD; //OP_DISK_OVERHEAD is for the following log_op(OpCommit)
|
||||
notifiedCommittedWriteBytes.set(committedWriteBytes); //This set will cause snapshot items to be written, so it must happen before the OpCommit
|
||||
log_op(OpCommit, StringRef(), StringRef());
|
||||
overheadWriteBytes = log->getCommitOverhead();
|
||||
}
|
||||
}
|
||||
|
||||
auto c = log->commit();
|
||||
|
@ -347,6 +346,7 @@ private:
|
|||
IDiskQueue *log;
|
||||
Future<Void> recovering, snapshotting;
|
||||
int64_t committedWriteBytes;
|
||||
int64_t overheadWriteBytes;
|
||||
NotifiedVersion notifiedCommittedWriteBytes;
|
||||
Key recoveredSnapshotKey; // After recovery, the next key in the currently uncompleted snapshot
|
||||
IDiskQueue::location currentSnapshotEnd; //The end of the most recently completed snapshot (this snapshot cannot be discarded)
|
||||
|
@ -710,7 +710,7 @@ private:
|
|||
};
|
||||
|
||||
KeyValueStoreMemory::KeyValueStoreMemory( IDiskQueue* log, UID id, int64_t memoryLimit, bool disableSnapshot, bool replaceContent, bool exactRecovery )
|
||||
: log(log), id(id), previousSnapshotEnd(-1), currentSnapshotEnd(-1), resetSnapshot(false), memoryLimit(memoryLimit), committedWriteBytes(0),
|
||||
: log(log), id(id), previousSnapshotEnd(-1), currentSnapshotEnd(-1), resetSnapshot(false), memoryLimit(memoryLimit), committedWriteBytes(0), overheadWriteBytes(0),
|
||||
committedDataSize(0), transactionSize(0), transactionIsLarge(false), disableSnapshot(disableSnapshot), replaceContent(replaceContent), snapshotCount(0), firstCommitWithSnapshot(true)
|
||||
{
|
||||
recovering = recover( this, exactRecovery );
|
||||
|
|
|
@ -42,6 +42,8 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
init( BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL, 30 );
|
||||
init( UNFLUSHED_DATA_RATIO, 0.05 ); if( randomize && BUGGIFY ) UNFLUSHED_DATA_RATIO = 0.0;
|
||||
init( DESIRED_TOTAL_BYTES, 150000 ); if( randomize && BUGGIFY ) DESIRED_TOTAL_BYTES = 10000;
|
||||
init( DESIRED_UPDATE_BYTES, 2*DESIRED_TOTAL_BYTES );
|
||||
init( UPDATE_DELAY, 0.001 );
|
||||
init( MAXIMUM_PEEK_BYTES, 10e6 );
|
||||
init( APPLY_MUTATION_BYTES, 1e6 );
|
||||
init( RECOVERY_DATA_BYTE_LIMIT, 100000 );
|
||||
|
|
|
@ -46,6 +46,8 @@ public:
|
|||
double BUGGIFY_TLOG_STORAGE_MIN_UPDATE_INTERVAL;
|
||||
double UNFLUSHED_DATA_RATIO;
|
||||
int DESIRED_TOTAL_BYTES;
|
||||
int DESIRED_UPDATE_BYTES;
|
||||
double UPDATE_DELAY;
|
||||
int MAXIMUM_PEEK_BYTES;
|
||||
int APPLY_MUTATION_BYTES;
|
||||
int RECOVERY_DATA_BYTE_LIMIT;
|
||||
|
|
|
@ -566,19 +566,29 @@ ACTOR Future<Void> commitBatch(
|
|||
}
|
||||
|
||||
// This second pass through committed transactions assigns the actual mutations to the appropriate storage servers' tags
|
||||
int mutationCount = 0, mutationBytes = 0;
|
||||
state int mutationCount = 0;
|
||||
state int mutationBytes = 0;
|
||||
|
||||
state std::map<Key, MutationListRef> logRangeMutations;
|
||||
state Arena logRangeMutationsArena;
|
||||
state uint32_t v = commitVersion / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
||||
state int transactionNum = 0;
|
||||
state int yieldBytes = 0;
|
||||
|
||||
for (int t = 0; t<trs.size(); t++) {
|
||||
for (; transactionNum<trs.size(); transactionNum++) {
|
||||
if (committed[transactionNum] == ConflictBatch::TransactionCommitted && (!locked || trs[transactionNum].isLockAware())) {
|
||||
state int mutationNum = 0;
|
||||
state VectorRef<MutationRef>* pMutations = &trs[transactionNum].transaction.mutations;
|
||||
for (; mutationNum < pMutations->size(); mutationNum++) {
|
||||
if(yieldBytes > SERVER_KNOBS->DESIRED_TOTAL_BYTES) {
|
||||
yieldBytes = 0;
|
||||
Void _ = wait(yield());
|
||||
}
|
||||
|
||||
if (committed[t] == ConflictBatch::TransactionCommitted && (!locked || trs[t].isLockAware())) {
|
||||
|
||||
for (auto m : trs[t].transaction.mutations) {
|
||||
auto& m = (*pMutations)[mutationNum];
|
||||
mutationCount++;
|
||||
mutationBytes += m.expectedSize();
|
||||
yieldBytes += m.expectedSize();
|
||||
// Determine the set of tags (responsible storage servers) for the mutation, splitting it
|
||||
// if necessary. Serialize (splits of) the mutation into the message buffer and add the tags.
|
||||
|
||||
|
@ -840,7 +850,7 @@ ACTOR Future<Void> commitBatch(
|
|||
Void _ = wait(yield());
|
||||
|
||||
if(!self->txsPopVersions.size() || msg.popTo > self->txsPopVersions.back().second) {
|
||||
if(self->txsPopVersions.size() > SERVER_KNOBS->MAX_TXS_POP_VERSION_HISTORY) {
|
||||
if(self->txsPopVersions.size() >= SERVER_KNOBS->MAX_TXS_POP_VERSION_HISTORY) {
|
||||
TraceEvent(SevWarnAlways, "DiscardingTxsPopHistory").suppressFor(1.0);
|
||||
self->txsPopVersions.pop_front();
|
||||
}
|
||||
|
@ -1228,7 +1238,7 @@ ACTOR Future<Void> monitorRemoteCommitted(ProxyCommitData* self, Reference<Async
|
|||
for(auto &it : remoteLogs.get()) {
|
||||
replies.push_back(brokenPromiseToNever( it.interf().getQueuingMetrics.getReply( TLogQueuingMetricsRequest() ) ));
|
||||
}
|
||||
Void _ = wait( waitForAll(replies) );
|
||||
Void _ = wait( waitForAll(replies) || onChange );
|
||||
|
||||
if(onChange.isReady()) {
|
||||
break;
|
||||
|
|
|
@ -414,7 +414,7 @@ struct RolesInfo {
|
|||
obj["input_bytes"] = StatusCounter(metrics.getValue("BytesInput")).getStatus();
|
||||
obj["durable_bytes"] = StatusCounter(metrics.getValue("BytesDurable")).getStatus();
|
||||
obj.setKeyRawNumber("query_queue_max", metrics.getValue("QueryQueueMax"));
|
||||
obj["total_queries"] = StatusCounter(metrics.getValue("AllQueries")).getStatus();
|
||||
obj["total_queries"] = StatusCounter(metrics.getValue("QueryQueue")).getStatus();
|
||||
obj["finished_queries"] = StatusCounter(metrics.getValue("FinishedQueries")).getStatus();
|
||||
obj["bytes_queried"] = StatusCounter(metrics.getValue("BytesQueried")).getStatus();
|
||||
obj["keys_queried"] = StatusCounter(metrics.getValue("RowsQueried")).getStatus();
|
||||
|
@ -1398,7 +1398,7 @@ ACTOR static Future<JsonBuilderObject> workloadStatusFetcher(Reference<AsyncVar<
|
|||
StatusCounter readBytes;
|
||||
|
||||
for(auto &ss : storageServers.get()) {
|
||||
readRequests.updateValues( StatusCounter(ss.second.getValue("AllQueries")));
|
||||
readRequests.updateValues( StatusCounter(ss.second.getValue("QueryQueue")));
|
||||
reads.updateValues( StatusCounter(ss.second.getValue("FinishedQueries")));
|
||||
readKeys.updateValues( StatusCounter(ss.second.getValue("RowsQueried")));
|
||||
readBytes.updateValues( StatusCounter(ss.second.getValue("BytesQueried")));
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue