Merge branch 'release-5.1' into merge-release-5.1-into-release-5.2
# Conflicts: # Makefile # documentation/sphinx/Makefile # documentation/sphinx/source/administration.rst # documentation/sphinx/source/anti-features.rst # documentation/sphinx/source/api-general.rst # documentation/sphinx/source/building-cluster.rst # documentation/sphinx/source/class-scheduling-go.rst # documentation/sphinx/source/class-scheduling-java.rst # documentation/sphinx/source/class-scheduling-ruby.rst # documentation/sphinx/source/class-scheduling.rst # documentation/sphinx/source/command-line-interface.rst # documentation/sphinx/source/configuration.rst # documentation/sphinx/source/downloads.rst # documentation/sphinx/source/fault-tolerance.rst # documentation/sphinx/source/features.rst # documentation/sphinx/source/getting-started-linux.rst # documentation/sphinx/source/getting-started-mac.rst # documentation/sphinx/source/guide-common.rst.inc # documentation/sphinx/source/hierarchical-documents-java.rst # documentation/sphinx/source/index.rst # documentation/sphinx/source/known-limitations.rst # documentation/sphinx/source/multimaps-java.rst # documentation/sphinx/source/performance.rst # documentation/sphinx/source/segmented-range-reads-java.rst # documentation/sphinx/source/simple-indexes-java.rst # documentation/sphinx/source/spatial-indexing-java.rst # documentation/sphinx/source/subspace-indirection-java.rst # documentation/sphinx/source/tables-java.rst # documentation/sphinx/source/vector-java.rst # versions.target
This commit is contained in:
commit
e2d3afb621
9
Makefile
9
Makefile
|
@ -180,8 +180,15 @@ docpreview: javadoc godoc
|
|||
docpreview_clean:
|
||||
CLEAN_TARGETS= $(MAKE) -C documentation docpreview_clean
|
||||
|
||||
docpackage: javadoc godoc
|
||||
packages/foundationdb-docs-$(VERSION)-$(PKGRELEASE).tar.gz: FORCE javadoc godoc
|
||||
TARGETS= $(MAKE) -C documentation docpackage
|
||||
@mkdir -p packages
|
||||
@rm -f packages/foundationdb-docs-$(VERSION)-$(PKGRELEASE).tar.gz
|
||||
@cp documentation/sphinx/.dist/foundationdb-docs-$(VERSION)-$(PKGRELEASE).tar.gz packages/foundationdb-docs-$(VERSION)-$(PKGRELEASE).tar.gz
|
||||
|
||||
docpackage: packages/foundationdb-docs-$(VERSION)-$(PKGRELEASE).tar.gz
|
||||
|
||||
FORCE:
|
||||
|
||||
.SECONDEXPANSION:
|
||||
|
||||
|
|
|
@ -340,9 +340,9 @@ class DirectoryTest(Test):
|
|||
# errors += directory_util.check_for_duplicate_prefixes(db, self.prefix_log)
|
||||
return errors
|
||||
|
||||
def get_result_specfications(self):
|
||||
def get_result_specifications(self):
|
||||
return [
|
||||
ResultSpecification(self.stack, key_start_index=1, ordering_index=1),
|
||||
ResultSpecification(self.stack_subspace, key_start_index=1, ordering_index=1),
|
||||
ResultSpecification(self.directory_log, ordering_index=0),
|
||||
ResultSpecification(self.subspace_log, ordering_index=0)
|
||||
]
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
fdb-go
|
||||
======
|
||||
|
||||
[Go language](http://golang.org) bindings for [FoundationDB](http://foundationdb.org/documentation/), a distributed key-value store with ACID transactions.
|
||||
[Go language](http://golang.org) bindings for [FoundationDB](https://www.foundationdb.org/documentation/), a distributed key-value store with ACID transactions.
|
||||
|
||||
This package requires:
|
||||
|
||||
- Go 1.1+ with CGO enabled
|
||||
- FoundationDB C API 2.0.x, 3.0.x, or 4.x.y (part of the [FoundationDB clients package](https://files.foundationdb.org/fdb-c/))
|
||||
- FoundationDB C API 2.0.x, 3.0.x, or 4.x.y (part of the [FoundationDB clients package](https://www.foundationdb.org/downloads/fdb-c/))
|
||||
|
||||
Use of this package requires the selection of a FoundationDB API version at runtime. This package currently supports FoundationDB API versions 200-520.
|
||||
|
||||
|
@ -27,5 +27,5 @@ of downloading from the remote repository.
|
|||
Documentation
|
||||
-------------
|
||||
|
||||
* [API documentation](https://foundationdb.org/documentation/godoc/fdb.html)
|
||||
* [Tutorial](https://foundationdb.org/documentation/class-scheduling-go.html)
|
||||
* [API documentation](https://www.foundationdb.org/documentation/godoc/fdb.html)
|
||||
* [Tutorial](https://www.foundationdb.org/documentation/class-scheduling-go.html)
|
||||
|
|
|
@ -25,7 +25,7 @@ GOPATH := $(CURDIR)/bindings/go/build
|
|||
GO_IMPORT_PATH := github.com/apple/foundationdb/bindings/go/src
|
||||
GO_DEST := $(GOPATH)/src/$(GO_IMPORT_PATH)
|
||||
|
||||
.PHONY: fdb_go fdb_go_path fdb_go_tester fdb_go_tester_clean godoc godoc_clean
|
||||
.PHONY: fdb_go fdb_go_path fdb_go_fmt fdb_go_fmt_check fdb_go_tester fdb_go_tester_clean godoc godoc_clean
|
||||
|
||||
# We only override if the environment didn't set it (this is used by
|
||||
# the fdbwebsite documentation build process)
|
||||
|
@ -49,7 +49,7 @@ GO_PACKAGE_OBJECTS := $(addprefix $(GO_PACKAGE_OUTDIR)/,$(GO_PACKAGES:=.a))
|
|||
|
||||
GO_SRC := $(shell find $(CURDIR)/bindings/go/src -name '*.go')
|
||||
|
||||
fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC)
|
||||
fdb_go: $(GO_PACKAGE_OBJECTS) $(GO_SRC) fdb_go_fmt_check
|
||||
|
||||
fdb_go_fmt: $(GO_SRC)
|
||||
@echo "Formatting fdb_go"
|
||||
|
@ -59,10 +59,13 @@ fdb_go_fmt_check: $(GO_SRC)
|
|||
@echo "Checking fdb_go"
|
||||
@bash -c 'fmtoutstr=$$(gofmt -l $(GO_SRC)) ; if [[ -n "$${fmtoutstr}" ]] ; then echo "Detected go formatting violations for the following files:" ; echo "$${fmtoutstr}" ; echo "Try running: make fdb_go_fmt"; exit 1 ; fi'
|
||||
|
||||
fdb_go_path: $(GO_SRC)
|
||||
$(GO_DEST)/.stamp: $(GO_SRC)
|
||||
@echo "Creating fdb_go_path"
|
||||
@mkdir -p $(GO_DEST)
|
||||
@cp -r bindings/go/src/* $(GO_DEST)
|
||||
@touch $(GO_DEST)/.stamp
|
||||
|
||||
fdb_go_path: $(GO_DEST)/.stamp
|
||||
|
||||
fdb_go_clean:
|
||||
@echo "Cleaning fdb_go"
|
||||
|
@ -74,31 +77,31 @@ fdb_go_tester_clean:
|
|||
@echo "Cleaning fdb_go_tester"
|
||||
@rm -rf $(GOPATH)/bin
|
||||
|
||||
$(GOPATH)/bin/_stacktester: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_PACKAGE_OBJECTS) $(GO_DEST)/fdb/generated.go
|
||||
$(GOPATH)/bin/_stacktester: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OBJECTS) $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling $(basename $(notdir $@))"
|
||||
@go install $(GO_IMPORT_PATH)/_stacktester
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_DEST)/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/tuple.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb/tuple"
|
||||
@go install $(GO_IMPORT_PATH)/fdb/tuple
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_DEST)/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/subspace.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb/subspace"
|
||||
@go install $(GO_IMPORT_PATH)/fdb/subspace
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a $(GO_DEST)/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb/directory.a: $(GO_DEST)/.stamp $(GO_SRC) $(GO_PACKAGE_OUTDIR)/fdb.a $(GO_PACKAGE_OUTDIR)/fdb/tuple.a $(GO_PACKAGE_OUTDIR)/fdb/subspace.a $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb/directory"
|
||||
@go install $(GO_IMPORT_PATH)/fdb/directory
|
||||
|
||||
$(GO_PACKAGE_OUTDIR)/fdb.a: fdb_go_path fdb_go_fmt_check $(GO_SRC) $(GO_DEST)/fdb/generated.go
|
||||
$(GO_PACKAGE_OUTDIR)/fdb.a: $(GO_DEST)/.stamp lib/libfdb_c.$(DLEXT) $(GO_SRC) $(GO_DEST)/fdb/generated.go
|
||||
@echo "Compiling fdb"
|
||||
@go install $(GO_IMPORT_PATH)/fdb
|
||||
|
||||
$(GO_DEST)/fdb/generated.go: fdb_go_path fdb_go_fmt_check lib/libfdb_c.$(DLEXT) bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
|
||||
$(GO_DEST)/fdb/generated.go: $(GO_DEST)/.stamp bindings/go/src/_util/translate_fdb_options.go fdbclient/vexillographer/fdb.options
|
||||
@echo "Building $@"
|
||||
@go run bindings/go/src/_util/translate_fdb_options.go < fdbclient/vexillographer/fdb.options > $@
|
||||
|
||||
godoc: fdb_go_path $(GO_SRC)
|
||||
godoc: fdb_go_path $(GO_SRC) $(GO_DEST)/fdb/generated.go
|
||||
@echo "Generating Go Documentation"
|
||||
@rm -rf $(GODOC_DIR)/godoc
|
||||
@mkdir -p $(GODOC_DIR)/godoc
|
||||
|
@ -113,6 +116,12 @@ godoc: fdb_go_path $(GO_SRC)
|
|||
@(sed -i -e 's_a href="tuple/"_a href="fdb.tuple.html"_' $(GODOC_DIR)/godoc/fdb.html)
|
||||
@(sed -i -e 's_a href="subspace/"_a href="fdb.subspace.html"_' $(GODOC_DIR)/godoc/fdb.html)
|
||||
@(sed -i -e 's_a href="directory/"_a href="fdb.directory.html"_' $(GODOC_DIR)/godoc/fdb.html)
|
||||
|
||||
@(sed -i -e 's_a href="/pkg/builtin_a href="https://godoc.org/pkg/builtin_g;s_a href="/src/github.com/apple/foundationdb_a href="https://github.com/apple/foundationdb/tree/master_g;s_a href="/pkg/github.com/apple/foundationdb/bindings/go/src/fdb/_a href="./fdb.html_g' $(GODOC_DIR)/godoc/fdb.html)
|
||||
@(sed -i -e 's_a href="/pkg/builtin_a href="https://godoc.org/pkg/builtin_g;s_a href="/src/github.com/apple/foundationdb_a href="https://github.com/apple/foundationdb/tree/master_g;s_a href="/pkg/github.com/apple/foundationdb/bindings/go/src/fdb/_a href="./fdb.html_g' $(GODOC_DIR)/godoc/fdb.directory.html)
|
||||
@(sed -i -e 's_a href="/pkg/builtin_a href="https://godoc.org/pkg/builtin_g;s_a href="/src/github.com/apple/foundationdb_a href="https://github.com/apple/foundationdb/tree/master_g;s_a href="/pkg/github.com/apple/foundationdb/bindings/go/src/fdb/_a href="./fdb.html_g' $(GODOC_DIR)/godoc/fdb.subspace.html)
|
||||
@(sed -i -e 's_a href="/pkg/builtin_a href="https://godoc.org/pkg/builtin_g;s_a href="/src/github.com/apple/foundationdb_a href="https://github.com/apple/foundationdb/tree/master_g;s_a href="/pkg/github.com/apple/foundationdb/bindings/go/src/fdb/_a href="./fdb.html_g' $(GODOC_DIR)/godoc/fdb.tuple.html)
|
||||
|
||||
|
||||
godoc_clean:
|
||||
@echo "Cleaning Go Documentation"
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
//
|
||||
// For general guidance on directory usage, see the Directories section of the
|
||||
// Developer Guide
|
||||
// (https://foundationdb.org/documentation/developer-guide.html#developer-guide-directories).
|
||||
// (https://www.foundationdb.org/documentation/developer-guide.html#developer-guide-directories).
|
||||
//
|
||||
// Directories are identified by hierarchical paths analogous to the paths in a
|
||||
// Unix-like file system. A path is represented as a slice of strings. Each
|
||||
|
|
|
@ -25,12 +25,12 @@ Package fdb provides an interface to FoundationDB databases (version 2.0 or high
|
|||
|
||||
To build and run programs using this package, you must have an installed copy of
|
||||
the FoundationDB client libraries (version 2.0.0 or later), available for Linux,
|
||||
Windows and OS X at https://files.foundationdb.org/fdb-c/.
|
||||
Windows and OS X at https://www.foundationdb.org/downloads/fdb-c/.
|
||||
|
||||
This documentation specifically applies to the FoundationDB Go binding. For more
|
||||
extensive guidance to programming with FoundationDB, as well as API
|
||||
documentation for the other FoundationDB interfaces, please see
|
||||
https://foundationdb.org/documentation/index.html.
|
||||
https://www.foundationdb.org/documentation/index.html.
|
||||
|
||||
Basic Usage
|
||||
|
||||
|
@ -198,7 +198,7 @@ operations perform different transformations. Like other database operations, an
|
|||
atomic operation is used within a transaction.
|
||||
|
||||
For more information on atomic operations in FoundationDB, please see
|
||||
https://foundationdb.org/documentation/developer-guide.html#atomic-operations. The
|
||||
https://www.foundationdb.org/documentation/developer-guide.html#atomic-operations. The
|
||||
operands to atomic operations in this API must be provided as appropriately
|
||||
encoded byte slices. To convert a Go type to a byte slice, see the binary
|
||||
package.
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
// as a panic from any FoundationDB API function whose name ends with OrPanic.
|
||||
//
|
||||
// You may compare the Code field of an Error against the list of FoundationDB
|
||||
// error codes at https://foundationdb.org/documentation/api-error-codes.html,
|
||||
// error codes at https://www.foundationdb.org/documentation/api-error-codes.html,
|
||||
// but generally an Error should be passed to (Transaction).OnError. When using
|
||||
// (Database).Transact, non-fatal errors will be retried automatically.
|
||||
type Error struct {
|
||||
|
|
|
@ -34,7 +34,7 @@ type Selectable interface {
|
|||
//
|
||||
// The most common key selectors are constructed with the functions documented
|
||||
// below. For details of how KeySelectors are specified and resolved, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#key-selectors.
|
||||
// https://www.foundationdb.org/documentation/developer-guide.html#key-selectors.
|
||||
type KeySelector struct {
|
||||
Key KeyConvertible
|
||||
OrEqual bool
|
||||
|
|
|
@ -28,7 +28,7 @@ package fdb
|
|||
// transaction conflicts but making it harder to reason about concurrency.
|
||||
//
|
||||
// For more information on snapshot reads, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#snapshot-reads.
|
||||
// https://www.foundationdb.org/documentation/developer-guide.html#snapshot-reads.
|
||||
type Snapshot struct {
|
||||
*transaction
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
// As a best practice, API clients should use at least one subspace for
|
||||
// application data. For general guidance on subspace usage, see the Subspaces
|
||||
// section of the Developer Guide
|
||||
// (https://foundationdb.org/documentation/developer-guide.html#developer-guide-sub-keyspaces).
|
||||
// (https://www.foundationdb.org/documentation/developer-guide.html#developer-guide-sub-keyspaces).
|
||||
package subspace
|
||||
|
||||
import (
|
||||
|
|
|
@ -171,7 +171,7 @@ func (t Transaction) SetReadVersion(version int64) {
|
|||
// but making it harder to reason about concurrency.
|
||||
//
|
||||
// For more information on snapshot reads, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#using-snapshot-reads.
|
||||
// https://www.foundationdb.org/documentation/developer-guide.html#using-snapshot-reads.
|
||||
func (t Transaction) Snapshot() Snapshot {
|
||||
return Snapshot{t.transaction}
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ func (t Transaction) OnError(e Error) FutureNil {
|
|||
// As with other client/server databases, in some failure scenarios a client may
|
||||
// be unable to determine whether a transaction succeeded. For more information,
|
||||
// see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#developer-guide-unknown-results.
|
||||
// https://www.foundationdb.org/documentation/developer-guide.html#developer-guide-unknown-results.
|
||||
func (t Transaction) Commit() FutureNil {
|
||||
return &futureNil{newFuture(C.fdb_transaction_commit(t.ptr))}
|
||||
}
|
||||
|
@ -396,7 +396,7 @@ func addConflictRange(t *transaction, er ExactRange, crtype conflictRangeType) e
|
|||
// conflict.
|
||||
//
|
||||
// For more information on conflict ranges, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
// https://www.foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
func (t Transaction) AddReadConflictRange(er ExactRange) error {
|
||||
return addConflictRange(t.transaction, er, conflictRangeTypeRead)
|
||||
}
|
||||
|
@ -413,7 +413,7 @@ func copyAndAppend(orig []byte, b byte) []byte {
|
|||
// this key could cause the transaction to fail with a conflict.
|
||||
//
|
||||
// For more information on conflict ranges, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
// https://www.foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
func (t Transaction) AddReadConflictKey(key KeyConvertible) error {
|
||||
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeRead)
|
||||
}
|
||||
|
@ -424,7 +424,7 @@ func (t Transaction) AddReadConflictKey(key KeyConvertible) error {
|
|||
// conflict.
|
||||
//
|
||||
// For more information on conflict ranges, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
// https://www.foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
func (t Transaction) AddWriteConflictRange(er ExactRange) error {
|
||||
return addConflictRange(t.transaction, er, conflictRangeTypeWrite)
|
||||
}
|
||||
|
@ -434,7 +434,7 @@ func (t Transaction) AddWriteConflictRange(er ExactRange) error {
|
|||
// read this key could fail with a conflict.
|
||||
//
|
||||
// For more information on conflict ranges, see
|
||||
// https://foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
// https://www.foundationdb.org/documentation/developer-guide.html#conflict-ranges.
|
||||
func (t Transaction) AddWriteConflictKey(key KeyConvertible) error {
|
||||
return addConflictRange(t.transaction, KeyRange{key, Key(copyAndAppend(key.FDBKey(), 0x00))}, conflictRangeTypeWrite)
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
// of higher-level data models.
|
||||
//
|
||||
// For general guidance on tuple usage, see the Tuple section of Data Modeling
|
||||
// (https://foundationdb.org/documentation/data-modeling.html#data-modeling-tuples).
|
||||
// (https://www.foundationdb.org/documentation/data-modeling.html#data-modeling-tuples).
|
||||
//
|
||||
// FoundationDB tuples can currently encode byte and unicode strings, integers
|
||||
// and NULL values. In Go these are represented as []byte, string, int64 and
|
||||
|
|
|
@ -38,13 +38,14 @@ else
|
|||
endif
|
||||
|
||||
ifeq ($(PLATFORM),linux)
|
||||
fdb_java_CFLAGS += -I/usr/lib/jvm/java-8-openjdk-amd64/include -I/usr/lib/jvm/java-8-openjdk-amd64/include/linux
|
||||
JAVA_HOME ?= /usr/lib/jvm/java-8-openjdk-amd64
|
||||
fdb_java_CFLAGS += -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/linux
|
||||
fdb_java_LDFLAGS += -static-libgcc
|
||||
|
||||
java_ARCH := amd64
|
||||
else ifeq ($(PLATFORM),osx)
|
||||
# FIXME: Surely there is a better way to grab the JNI headers on any version of macOS.
|
||||
fdb_java_CFLAGS += -I/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers -I/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/System/Library/Frameworks/JavaVM.framework/Versions/A/Headers
|
||||
JAVA_HOME ?= $(shell /usr/libexec/java_home)
|
||||
fdb_java_CFLAGS += -I$(JAVA_HOME)/include -I$(JAVA_HOME)/include/darwin
|
||||
|
||||
java_ARCH := x86_64
|
||||
endif
|
||||
|
|
|
@ -10,13 +10,13 @@
|
|||
<packaging>jar</packaging>
|
||||
|
||||
<name>foundationdb-java</name>
|
||||
<description>Java bindings for the FoundationDB database. These bindings require the FoundationDB client, which is under a different license. The client can be obtained from https://files.foundationdb.org/fdb-c/.</description>
|
||||
<description>Java bindings for the FoundationDB database. These bindings require the FoundationDB client, which is under a different license. The client can be obtained from https://www.foundationdb.org/downloads/fdb-c/.</description>
|
||||
<inceptionYear>2010</inceptionYear>
|
||||
<url>http://foundationdb.org</url>
|
||||
<url>https://www.foundationdb.org</url>
|
||||
|
||||
<organization>
|
||||
<name>FoundationDB</name>
|
||||
<url>http://foundationdb.org</url>
|
||||
<url>https://www.foundationdb.org</url>
|
||||
</organization>
|
||||
|
||||
<developers>
|
||||
|
|
|
@ -214,7 +214,7 @@ public class FDB {
|
|||
|
||||
/**
|
||||
* Connects to the cluster specified by the
|
||||
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>.
|
||||
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>.
|
||||
* If the FoundationDB network has not been started, it will be started in the course of this call
|
||||
* as if {@link FDB#startNetwork()} had been called.
|
||||
*
|
||||
|
@ -233,9 +233,9 @@ public class FDB {
|
|||
* {@link #startNetwork()} had been called.
|
||||
*
|
||||
* @param clusterFilePath the
|
||||
* <a href="/foundationdb/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* <a href="/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* defining the FoundationDB cluster. This can be {@code null} if the
|
||||
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* is to be used.
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@code Cluster}.
|
||||
|
@ -254,9 +254,9 @@ public class FDB {
|
|||
* are produced from using the resulting {@link Cluster}.
|
||||
*
|
||||
* @param clusterFilePath the
|
||||
* <a href="/foundationdb/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* <a href="/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* defining the FoundationDB cluster. This can be {@code null} if the
|
||||
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* is to be used.
|
||||
* @param e used to run the FDB network thread
|
||||
*
|
||||
|
@ -279,7 +279,7 @@ public class FDB {
|
|||
|
||||
/**
|
||||
* Initializes networking, connects with the
|
||||
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>,
|
||||
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>,
|
||||
* and opens the database.
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@link Database}
|
||||
|
@ -293,9 +293,9 @@ public class FDB {
|
|||
* and opens the database.
|
||||
*
|
||||
* @param clusterFilePath the
|
||||
* <a href="/foundationdb/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* <a href="/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* defining the FoundationDB cluster. This can be {@code null} if the
|
||||
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* is to be used.
|
||||
*
|
||||
* @return a {@code CompletableFuture} that will be set to a FoundationDB {@link Database}
|
||||
|
@ -309,9 +309,9 @@ public class FDB {
|
|||
* and opens the database.
|
||||
*
|
||||
* @param clusterFilePath the
|
||||
* <a href="/foundationdb/api-general.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* <a href="/administration.html#foundationdb-cluster-file" target="_blank">cluster file</a>
|
||||
* defining the FoundationDB cluster. This can be {@code null} if the
|
||||
* <a href="/foundationdb/api-general.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* <a href="/administration.html#default-cluster-file" target="_blank">default fdb.cluster file</a>
|
||||
* is to be used.
|
||||
* @param e the {@link Executor} to use to execute asynchronous callbacks
|
||||
*
|
||||
|
|
|
@ -57,7 +57,7 @@ import java.util.Arrays;
|
|||
*
|
||||
* <pre>
|
||||
* <code>
|
||||
* {@code CompletableFuture<byte[]>} trVersionFuture = db.run((Transaction tr) -> {
|
||||
* CompletableFuture<byte[]> trVersionFuture = db.run((Transaction tr) -> {
|
||||
* // The incomplete Versionstamp will be overwritten with tr's version information when committed.
|
||||
* Tuple t = Tuple.from("prefix", Versionstamp.incomplete());
|
||||
* tr.mutate(MutationType.SET_VERSIONSTAMPED_KEY, t.packWithVersionstamp(), new byte[0]);
|
||||
|
@ -66,7 +66,7 @@ import java.util.Arrays;
|
|||
*
|
||||
* byte[] trVersion = trVersionFuture.get();
|
||||
*
|
||||
* Versionstamp v = db.run((Transaction tr) -> {
|
||||
* Versionstamp v = db.run((Transaction tr) -> {
|
||||
* Subspace subspace = new Subspace(Tuple.from("prefix"));
|
||||
* byte[] serialized = tr.getRange(subspace.range(), 1).iterator().next().getKey();
|
||||
* Tuple t = subspace.unpack(serialized);
|
||||
|
|
|
@ -5,7 +5,7 @@ This documents the client API for using FoundationDB from Java.<br>
|
|||
<h3>Installation</h3>
|
||||
FoundationDB's Java bindings rely on native libraries that are installed as part of the
|
||||
FoundationDB client binaries installation (see
|
||||
<a href="/foundationdb/api-general.html#installing-client-binaries" target="_blank">
|
||||
<a href="/api-general.html#installing-client-binaries" target="_blank">
|
||||
Installing FoundationDB client binaries</a>). The FoundationDB Java bindings are available
|
||||
through Artifactory. To use them in your Maven-enabled project, add a dependency to your
|
||||
pom.xml like: <br>
|
||||
|
@ -19,7 +19,7 @@ pom.xml like: <br>
|
|||
}
|
||||
</pre>
|
||||
Alternatively, simply download the JAR from
|
||||
<a href="https://files.foundationdb.org/fdb-java/">Artifactory</a>
|
||||
<a href="https://www.foundationdb.org/downloads/fdb-java/">Artifactory</a>
|
||||
and add it to your classpath.<br>
|
||||
<br>
|
||||
<h3>Getting started</h3>
|
||||
|
@ -30,7 +30,7 @@ With this API object you can then open {@link com.apple.foundationdb.Cluster Clu
|
|||
{@link com.apple.foundationdb.Database Database}s and start using
|
||||
{@link com.apple.foundationdb.Transaction Transaction}s.
|
||||
Here we give an example. The example relies on a cluster file at the
|
||||
<a href="/foundationdb/api-general.html#default-cluster-file">default location</a>
|
||||
<a href="/administration.html#default-cluster-file">default location</a>
|
||||
for your platform and a running server.<br>
|
||||
<br>
|
||||
<pre>
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
Complete documentation of the FoundationDB Node.js API can be found at [https://foundationdb.org/documentation/api-node.html](https://foundationdb.org/documentation/api-node.html).
|
||||
Complete documentation of the FoundationDB Node.js API can be found at [https://www.foundationdb.org/documentation/api-node.html](https://www.foundationdb.org/documentation/api-node.html).
|
||||
|
||||
These bindings require the FoundationDB client. The client can be obtained from [https://files.foundationdb.org/fdb-c/](https://files.foundationdb.org/fdb-c/).
|
||||
These bindings require the FoundationDB client. The client can be obtained from [https://www.foundationdb.org/downloads/fdb-c/](https://www.foundationdb.org/downloads/fdb-c/).
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
"registry": "https://registry.npmjs.org"
|
||||
},
|
||||
"version": "VERSION",
|
||||
"author": "FoundationDB <fdbopensource@apple.com> (http://foundationdb.org)",
|
||||
"author": "FoundationDB <fdb-dist@apple.com> (https://www.foundationdb.org)",
|
||||
"description": "Node.js bindings for the FoundationDB database",
|
||||
"keywords": [ "FoundationDB", "database", "NoSQL", "ACID" ],
|
||||
"homepage": "http://17.199.145.104",
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
Complete documentation of the FoundationDB Python API can be found at https://foundationdb.org/documentation/api-python.html.
|
||||
Complete documentation of the FoundationDB Python API can be found at https://www.foundationdb.org/documentation/api-python.html.
|
||||
|
||||
These bindings require the FoundationDB client. The client can be obtained from https://files.foundationdb.org/fdb-c/.
|
||||
These bindings require the FoundationDB client. The client can be obtained from https://www.foundationdb.org/downloads/fdb-c/.
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
# FoundationDB Python API
|
||||
|
||||
"""Documentation for this API can be found at
|
||||
https://foundationdb.org/documentation/api-python.html"""
|
||||
https://www.foundationdb.org/documentation/api-python.html"""
|
||||
|
||||
|
||||
def open(*args, **kwargs):
|
||||
|
|
|
@ -75,7 +75,7 @@ class HighContentionAllocator (object):
|
|||
count = tr.snapshot[self.counters[start]]
|
||||
|
||||
if count != None:
|
||||
count = struct.unpack("<q", str(count))[0]
|
||||
count = struct.unpack("<q", bytes(count))[0]
|
||||
else:
|
||||
count = 0
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
# FoundationDB Python API
|
||||
|
||||
"""Documentation for this API can be found at
|
||||
https://foundationdb.org/documentation/api-python.html"""
|
||||
https://www.foundationdb.org/documentation/api-python.html"""
|
||||
|
||||
from fdb import impl as _impl
|
||||
|
||||
|
|
|
@ -9,9 +9,9 @@ except:
|
|||
setup(name="foundationdb",
|
||||
version="VERSION",
|
||||
author="FoundationDB",
|
||||
author_email="fdbopensource@apple.com",
|
||||
author_email="fdb-dist@apple.com",
|
||||
description="Python bindings for the FoundationDB database",
|
||||
url="http://foundationdb.org",
|
||||
url="https://www.foundationdb.org",
|
||||
packages=['fdb'],
|
||||
package_data={'fdb': ["fdb/*.py"]},
|
||||
long_description=long_desc,
|
||||
|
|
|
@ -9,14 +9,14 @@ Gem::Specification.new do |s|
|
|||
Ruby bindings for the FoundationDB database.
|
||||
|
||||
Complete documentation of the FoundationDB Ruby API can be found at:
|
||||
https://foundationdb.org/documentation/api-ruby.html.
|
||||
https://www.foundationdb.org/documentation/api-ruby.html.
|
||||
EOF
|
||||
s.authors = ["FoundationDB"]
|
||||
s.email = 'fdbopensource@apple.com'
|
||||
s.email = 'fdb-dist@apple.com'
|
||||
s.files = ["LICENSE", "lib/fdb.rb", "lib/fdbdirectory.rb", "lib/fdbimpl.rb", "lib/fdblocality.rb", "lib/fdboptions.rb", "lib/fdbsubspace.rb", "lib/fdbtuple.rb"]
|
||||
s.homepage = 'http://foundationdb.org'
|
||||
s.homepage = 'https://www.foundationdb.org'
|
||||
s.license = 'Apache v2'
|
||||
s.add_dependency('ffi', '>= 1.1.5')
|
||||
s.required_ruby_version = '>= 1.9.3'
|
||||
s.requirements << 'These bindings require the FoundationDB client. The client can be obtained from https://files.foundationdb.org/fdb-c/.'
|
||||
s.requirements << 'These bindings require the FoundationDB client. The client can be obtained from https://www.foundationdb.org/downloads/fdb-c/.'
|
||||
end
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
# FoundationDB Ruby API
|
||||
|
||||
# Documentation for this API can be found at
|
||||
# https://foundationdb.org/documentation/api-ruby.html
|
||||
# https://www.foundationdb.org/documentation/api-ruby.html
|
||||
|
||||
module FDB
|
||||
@@chosen_version = -1
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
# FoundationDB Ruby API
|
||||
|
||||
# Documentation for this API can be found at
|
||||
# https://foundationdb.org/documentation/api-ruby.html
|
||||
# https://www.foundationdb.org/documentation/api-ruby.html
|
||||
|
||||
require 'thread'
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
# FoundationDB Ruby API
|
||||
|
||||
# Documentation for this API can be found at
|
||||
# https://foundationdb.org/documentation/api-ruby.html
|
||||
# https://www.foundationdb.org/documentation/api-ruby.html
|
||||
|
||||
require 'ffi'
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
# FoundationDB Ruby API
|
||||
|
||||
# Documentation for this API can be found at
|
||||
# https://foundationdb.org/documentation/api-ruby.html
|
||||
# https://www.foundationdb.org/documentation/api-ruby.html
|
||||
|
||||
module FDB
|
||||
module Locality
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
# FoundationDB Ruby API
|
||||
|
||||
# Documentation for this API can be found at
|
||||
# https://foundationdb.org/documentation/api-ruby.html
|
||||
# https://www.foundationdb.org/documentation/api-ruby.html
|
||||
|
||||
require_relative 'fdbtuple'
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
# FoundationDB Ruby API
|
||||
|
||||
# Documentation for this API can be found at
|
||||
# https://foundationdb.org/documentation/api-ruby.html
|
||||
# https://www.foundationdb.org/documentation/api-ruby.html
|
||||
|
||||
module FDB
|
||||
module Tuple
|
||||
|
|
|
@ -147,10 +147,8 @@ info:
|
|||
@echo "Package: $(PACKAGE_NAME)"
|
||||
@echo "Version ID: $(VERSION_ID)"
|
||||
@echo "Package ID: $(PKGRELEASE)"
|
||||
@echo "Source Control: $(SOURCE_CONTROL)"
|
||||
@echo "SC Branch: $(SCBRANCH)"
|
||||
@echo "Git Dir: $(GITPRESENT)"
|
||||
@echo "Mercurial Dir: $(HGPRESENT)"
|
||||
@echo "Make Dir: $(MAKEDIR)"
|
||||
@echo "Foundation Dir: $(FDBDIR)"
|
||||
@echo "Fdb Dir Base: $(FDBDIRBASE)"
|
||||
|
|
|
@ -2,20 +2,12 @@
|
|||
#
|
||||
# local vars:
|
||||
PROJECT_NAME := foundationdb-docs
|
||||
#VERSION := $(shell cat version)
|
||||
|
||||
ifeq ($(RELEASE_BUILD),true)
|
||||
RELEASE := $(VERSION)
|
||||
else
|
||||
RELEASE := $(VERSION)-SNAPSHOT
|
||||
endif
|
||||
|
||||
GIT_HEAD_REF := $(shell git rev-parse --short HEAD)
|
||||
GIT_BRANCH := $(shell git symbolic-ref --short HEAD)
|
||||
GIT_REPO_URL := $(shell git config --get remote.origin.url)
|
||||
|
||||
# You can set these variables from the command line.
|
||||
#VERSIONOPTS := -D version=$(VERSION) -D release=$(RELEASE)
|
||||
SPHINXOPTS := -c .
|
||||
PAPER =
|
||||
ROOTDIR := $(CURDIR)
|
||||
|
@ -110,4 +102,5 @@ livehtml: html
|
|||
# removed html prerequisite because it is previously explictly invoked
|
||||
package:
|
||||
mkdir -p $(DISTDIR)
|
||||
cd $(BUILDDIR)/html && tar czf $(DISTDIR)/$(PROJECT_NAME)-$(RELEASE).tar.gz .
|
||||
rm -f $(DISTDIR)/$(PROJECT_NAME)-$(VERSION)-$(PKGRELEASE).tar.gz
|
||||
cd $(BUILDDIR)/html && tar czf $(DISTDIR)/$(PROJECT_NAME)-$(VERSION)-$(PKGRELEASE).tar.gz .
|
||||
|
|
|
@ -97,8 +97,6 @@ This automatic determination of a cluster file makes it easy to write code using
|
|||
|
||||
.. warning:: A cluster file must have the :ref:`required permissions <cluster_file_permissions>` in order to be used.
|
||||
|
||||
.. warning:: If an explicitly provided file has been set to an invalid value (such as an empty value, a file that does not exist, or a file that is not a valid cluster file), an error will result. FoundationDB will not fall back to another file.
|
||||
|
||||
.. warning:: If ``FDB_CLUSTER_FILE`` is read and has been set to an invalid value (such as an empty value, a file that does not exist, or a file that is not a valid cluster file), an error will result. FoundationDB will not fall back to another file.
|
||||
|
||||
.. _cluster_file_permissions:
|
||||
|
@ -225,39 +223,46 @@ Use the ``status`` command of ``fdbcli`` to determine if the cluster is up and r
|
|||
The database is available.
|
||||
|
||||
Welcome to the fdbcli. For help, type `help'.
|
||||
fdb> status
|
||||
fdb> status
|
||||
|
||||
The ``status`` command displays general information about the FoundationDB cluster::
|
||||
Configuration:
|
||||
Redundancy mode - triple
|
||||
Storage engine - ssd-2
|
||||
Coordinators - 5
|
||||
Desired Proxies - 5
|
||||
Desired Logs - 8
|
||||
|
||||
Configuration:
|
||||
Redundancy mode - triple
|
||||
Storage engine - ssd
|
||||
Coordinators - 3
|
||||
Cluster:
|
||||
FoundationDB processes - 272
|
||||
Machines - 16
|
||||
Memory availability - 14.5 GB per process on machine with least available
|
||||
Retransmissions rate - 20 Hz
|
||||
Fault Tolerance - 2 machines
|
||||
Server time - 03/19/18 08:51:52
|
||||
|
||||
Cluster:
|
||||
FoundationDB processes - 3
|
||||
Machines - 3
|
||||
Memory availability - 4.1 GB per process on machine with least available
|
||||
Fault Tolerance - 0 machines
|
||||
Server time - Wed Oct 8 14:41:34 2014
|
||||
Data:
|
||||
Replication health - Healthy
|
||||
Moving data - 0.000 GB
|
||||
Sum of key-value sizes - 3.298 TB
|
||||
Disk space used - 15.243 TB
|
||||
|
||||
Data:
|
||||
Replication health - Healthy
|
||||
Moving data - 0.000 GB
|
||||
Sum of key-value sizes - 0 MB
|
||||
Operating space:
|
||||
Storage server - 1656.2 GB free on most full server
|
||||
Log server - 1794.7 GB free on most full server
|
||||
|
||||
Operating space:
|
||||
Storage server - 1.0 GB free on most full server
|
||||
Log server - 1.0 GB free on most full server
|
||||
Workload:
|
||||
Read rate - 55990 Hz
|
||||
Write rate - 14946 Hz
|
||||
Transactions started - 6321 Hz
|
||||
Transactions committed - 1132 Hz
|
||||
Conflict rate - 0 Hz
|
||||
|
||||
Workload:
|
||||
Read rate - 2 Hz
|
||||
Write rate - 0 Hz
|
||||
Transactions started - 2 Hz
|
||||
Transactions committed - 0 Hz
|
||||
Conflict rate - 0 Hz
|
||||
Backup and DR:
|
||||
Running backups - 1
|
||||
Running DRs - 1 as primary
|
||||
|
||||
Client time: Thu Nov 20 09:50:45 2014
|
||||
Client time: 03/19/18 08:51:51
|
||||
|
||||
|
||||
The summary fields are interpreted as follows:
|
||||
|
||||
|
@ -265,14 +270,18 @@ The summary fields are interpreted as follows:
|
|||
Redundancy mode The currently configured redundancy mode (see the section :ref:`configuration-choosing-redundancy-mode`)
|
||||
Storage engine The currently configured storage engine (see the section :ref:`configuration-configuring-storage-subsystem`)
|
||||
Coordinators The number of FoundationDB coordination servers
|
||||
Desired Proxies Number of proxies desired. If replication mode is 3 then default number of proxies is 3
|
||||
Desired Logs Number of logs desired. If replication mode is 3 then default number of logs is 3
|
||||
FoundationDB processes Number of FoundationDB processes participating in the cluster
|
||||
Machines Number of physical machines running at least one FoundationDB process that is participating in the cluster
|
||||
Memory availability RAM per process on machine with least available (see details below)
|
||||
Retransmissions rate Ratio of retransmitted packets to the total number of packets.
|
||||
Fault tolerance Maximum number of machines that can fail without losing data or availability (number for losing data will be reported separately if lower)
|
||||
Server time Timestamp from the server
|
||||
Replication health A qualitative estimate of the health of data replication
|
||||
Moving data Amount of data currently in movement between machines
|
||||
Sum of key-value sizes Estimated total size of keys and values stored (not including any overhead or replication)
|
||||
Disk space used Overall disk space used by the cluster
|
||||
Storage server Free space for storage on the server with least available. For ``ssd`` storage engine, includes only disk; for ``memory`` storage engine, includes both RAM and disk.
|
||||
Log server Free space for log server on the server with least available.
|
||||
Read rate The current number of reads per second
|
||||
|
@ -280,6 +289,8 @@ Write rate The current number of writes per second
|
|||
Transaction started The current number of transactions started per second
|
||||
Transaction committed The current number of transactions committed per second
|
||||
Conflict rate The current number of conflicts per second
|
||||
Running backups Number of backups currently running. Different backups could be backing up to different prefixes and/or to different targets.
|
||||
Running DRs Number of DRs currently running. Different DRs could be streaming different prefixes and/or to different DR clusters.
|
||||
====================== ==========================================================================================================
|
||||
|
||||
The "Memory availability" is a conservative estimate of the minimal RAM available to any ``fdbserver`` process across all machines in the cluster. This value is calculated in two steps. Memory available per process is first calculated *for each machine* by taking:
|
||||
|
@ -310,53 +321,139 @@ The ``status`` command can provide detailed statistics about the cluster and the
|
|||
Welcome to the fdbcli. For help, type `help'.
|
||||
fdb> status details
|
||||
|
||||
Configuration:
|
||||
Redundancy mode - triple
|
||||
Storage engine - ssd
|
||||
Coordinators - 3
|
||||
|
||||
Cluster:
|
||||
FoundationDB processes - 3
|
||||
Machines - 3
|
||||
Memory availability - 4.1 GB per process on machine with least available
|
||||
Fault Tolerance - 0 machines
|
||||
Server time - Wed Oct 8 14:41:34 2014
|
||||
Configuration:
|
||||
Redundancy mode - triple
|
||||
Storage engine - ssd-2
|
||||
Coordinators - 5
|
||||
|
||||
Data:
|
||||
Replication health - Healthy
|
||||
Moving data - 0.000 GB
|
||||
Sum of key-value sizes - 0 MB
|
||||
Cluster:
|
||||
FoundationDB processes - 85
|
||||
Machines - 5
|
||||
Memory availability - 7.4 GB per process on machine with least available
|
||||
Retransmissions rate - 5 Hz
|
||||
Fault Tolerance - 2 machines
|
||||
Server time - 03/19/18 08:59:37
|
||||
|
||||
Operating space:
|
||||
Storage server - 1.0 GB free on most full server
|
||||
Log server - 1.0 GB free on most full server
|
||||
Data:
|
||||
Replication health - Healthy
|
||||
Moving data - 0.000 GB
|
||||
Sum of key-value sizes - 87.068 GB
|
||||
Disk space used - 327.819 GB
|
||||
|
||||
Workload:
|
||||
Read rate - 2 Hz
|
||||
Write rate - 0 Hz
|
||||
Transactions started - 2 Hz
|
||||
Transactions committed - 0 Hz
|
||||
Conflict rate - 0 Hz
|
||||
Operating space:
|
||||
Storage server - 888.2 GB free on most full server
|
||||
Log server - 897.3 GB free on most full server
|
||||
|
||||
Process performance details:
|
||||
10.0.4.1:4500 ( 3% cpu; 2% machine; 0.004 Gbps; 0% disk; 2.5 GB / 4.1 GB RAM )
|
||||
10.0.4.2:4500 ( 1% cpu; 2% machine; 0.004 Gbps; 0% disk; 2.5 GB / 4.1 GB RAM )
|
||||
10.0.4.3:4500 ( 1% cpu; 2% machine; 0.004 Gbps; 0% disk; 2.5 GB / 4.1 GB RAM )
|
||||
Workload:
|
||||
Read rate - 117 Hz
|
||||
Write rate - 0 Hz
|
||||
Transactions started - 43 Hz
|
||||
Transactions committed - 1 Hz
|
||||
Conflict rate - 0 Hz
|
||||
|
||||
Coordination servers:
|
||||
10.0.4.1:4500
|
||||
10.0.4.2:4500
|
||||
10.0.4.3:4500
|
||||
Process performance details:
|
||||
10.0.4.1:4500 ( 2% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 3.2 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4501 ( 1% cpu; 2% machine; 0.010 Gbps; 3% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4502 ( 2% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4503 ( 0% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4504 ( 0% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4505 ( 2% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4506 ( 2% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4507 ( 2% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4508 ( 2% cpu; 2% machine; 0.010 Gbps; 1% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4509 ( 2% cpu; 2% machine; 0.010 Gbps; 1% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4510 ( 1% cpu; 2% machine; 0.010 Gbps; 1% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4511 ( 0% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4512 ( 0% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4513 ( 0% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4514 ( 0% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 0.2 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4515 ( 12% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 0.2 GB / 7.4 GB RAM )
|
||||
10.0.4.1:4516 ( 0% cpu; 2% machine; 0.010 Gbps; 0% disk IO; 0.3 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4500 ( 2% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 3.2 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4501 ( 15% cpu; 3% machine; 0.124 Gbps; 19% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4502 ( 2% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4503 ( 2% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4504 ( 2% cpu; 3% machine; 0.124 Gbps; 1% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4505 ( 18% cpu; 3% machine; 0.124 Gbps; 18% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4506 ( 2% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4507 ( 2% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4508 ( 2% cpu; 3% machine; 0.124 Gbps; 19% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4509 ( 0% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4510 ( 0% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4511 ( 2% cpu; 3% machine; 0.124 Gbps; 1% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4512 ( 2% cpu; 3% machine; 0.124 Gbps; 19% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4513 ( 0% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4514 ( 0% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 0.2 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4515 ( 11% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 0.2 GB / 7.4 GB RAM )
|
||||
10.0.4.2:4516 ( 0% cpu; 3% machine; 0.124 Gbps; 0% disk IO; 0.6 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4500 ( 14% cpu; 3% machine; 0.284 Gbps; 26% disk IO; 3.0 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4501 ( 2% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 2.8 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4502 ( 2% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 2.8 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4503 ( 2% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4504 ( 7% cpu; 3% machine; 0.284 Gbps; 12% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4505 ( 2% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4506 ( 2% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4507 ( 2% cpu; 3% machine; 0.284 Gbps; 26% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4508 ( 2% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4509 ( 2% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4510 ( 2% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4511 ( 2% cpu; 3% machine; 0.284 Gbps; 12% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4512 ( 2% cpu; 3% machine; 0.284 Gbps; 3% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4513 ( 2% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4514 ( 0% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 0.1 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4515 ( 0% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 0.1 GB / 7.4 GB RAM )
|
||||
10.0.4.3:4516 ( 0% cpu; 3% machine; 0.284 Gbps; 0% disk IO; 0.1 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4500 ( 2% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 3.2 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4501 ( 2% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4502 ( 0% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4503 ( 2% cpu; 4% machine; 0.065 Gbps; 16% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4504 ( 2% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4505 ( 0% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4506 ( 0% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4507 ( 2% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4508 ( 0% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4509 ( 2% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4510 ( 24% cpu; 4% machine; 0.065 Gbps; 15% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4511 ( 2% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.8 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4512 ( 2% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4513 ( 0% cpu; 4% machine; 0.065 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4514 ( 0% cpu; 4% machine; 0.065 Gbps; 1% disk IO; 0.2 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4515 ( 0% cpu; 4% machine; 0.065 Gbps; 1% disk IO; 0.2 GB / 7.4 GB RAM )
|
||||
10.0.4.4:4516 ( 0% cpu; 4% machine; 0.065 Gbps; 1% disk IO; 0.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4500 ( 6% cpu; 2% machine; 0.076 Gbps; 7% disk IO; 3.2 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4501 ( 2% cpu; 2% machine; 0.076 Gbps; 19% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4502 ( 1% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4503 ( 0% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4504 ( 2% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4505 ( 2% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4506 ( 0% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4507 ( 2% cpu; 2% machine; 0.076 Gbps; 6% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4508 ( 31% cpu; 2% machine; 0.076 Gbps; 8% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4509 ( 0% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4510 ( 2% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 2.7 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4511 ( 2% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4512 ( 2% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4513 ( 0% cpu; 2% machine; 0.076 Gbps; 3% disk IO; 2.6 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4514 ( 0% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 0.2 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4515 ( 0% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 0.2 GB / 7.4 GB RAM )
|
||||
10.0.4.5:4516 ( 0% cpu; 2% machine; 0.076 Gbps; 0% disk IO; 0.6 GB / 7.4 GB RAM )
|
||||
|
||||
Client time: Thu Nov 20 09:56:52 2014
|
||||
Coordination servers:
|
||||
10.0.4.1:4500 (reachable)
|
||||
10.0.4.2:4500 (reachable)
|
||||
10.0.4.3:4500 (reachable)
|
||||
10.0.4.4:4500 (reachable)
|
||||
10.0.4.5:4500 (reachable)
|
||||
|
||||
Client time: 03/19/18 08:59:37
|
||||
Several details about individual FoundationDB processes are displayed in a list format in parenthesis after the IP address and port:
|
||||
|
||||
======= =========================================================================
|
||||
cpu CPU utilization of the individual process
|
||||
machine CPU utilization of the machine the process is running on (over all cores)
|
||||
Gbps Total input + output network traffic, in Gbps
|
||||
disk Percentage busy time of the disk subsystem on which the data resides
|
||||
disk IO Percentage busy time of the disk subsystem on which the data resides
|
||||
REXMIT! Displayed only if there have been more than 10 TCP segments retransmitted in last 5s
|
||||
RAM Total physical memory used by process / memory available per process
|
||||
======= =========================================================================
|
||||
|
@ -380,6 +477,8 @@ To make configuring, starting, stopping, and restarting ``fdbserver`` processes
|
|||
|
||||
During normal operation, ``fdbmonitor`` is transparent, and you interact with it only by modifying the configuration in :ref:`foundationdb.conf <foundationdb-conf>` and perhaps occasionally by :ref:`starting and stopping <administration-running-foundationdb>` it manually. If some problem prevents an ``fdbserver`` or ``backup-agent`` process from starting or causes it to stop unexpectedly, ``fdbmonitor`` will log errors to the system log.
|
||||
|
||||
If kill_on_configuration_change parameter is unset or set to `true` in foundationdb.conf then fdbmonitor will restart on changes automatically. If this parameter is set to `false` it will not restart on changes.
|
||||
|
||||
.. _administration-managing-trace-files:
|
||||
|
||||
Managing trace files
|
||||
|
@ -415,7 +514,7 @@ FoundationDB's storage space requirements depend on which storage engine is used
|
|||
|
||||
Using the ``ssd`` storage engine, data is stored in B-trees that add some overhead.
|
||||
|
||||
* For key-value pairs larger than about 100 bytes, overhead should usually be less than 2x per replica. In a triple-replicated configuration, the raw capacity required might be 5x the size of the data. However, SSDs often require over-provisioning (e.g. keeping the drive less than 75% full) for best performance, so 10x would be a reasonable number. For example, 100GB of raw key-values would require 1TB of raw capacity.
|
||||
* For key-value pairs larger than about 100 bytes, overhead should usually be less than 2x per replica. In a triple-replicated configuration, the raw capacity required might be 5x the size of the data. However, SSDs often require over-provisioning (e.g. keeping the drive less than 75% full) for best performance, so 7x would be a reasonable number. For example, 100GB of raw key-values would require 700GB of raw capacity.
|
||||
|
||||
* For very small key-value pairs, the overhead can be a large factor but not usually more than about 40 bytes per replica. Therefore, with triple replication and SSD over-provisioning, allowing 200 bytes of raw storage capacity for each very small key-value pair would be a reasonable guess. For example, 1 billion very small key-value pairs would require 200GB of raw storage.
|
||||
|
||||
|
@ -425,10 +524,12 @@ Using the ``memory`` storage engine, both memory and disk space need to be consi
|
|||
|
||||
* Disk space usage is about 8x the original data size. The memory storage engine interleaves a snapshot on disk with a transaction log, with the resulting snapshot 2x the data size. A snapshot can't be dropped from its log until the next snapshot is completely written, so 2 snapshots must be kept at 4x the data size. The two-file durable queue can't overwrite data in one file until all the data in the other file has been dropped, resulting in 8x the data size. Finally, it should be noted that disk space is not reclaimed when key-value pairs are cleared.
|
||||
|
||||
For either storage engine, there is possible additional overhead when running backup or DR. In usual operation, the overhead is negligible but if backup is unable to write or a secondary cluster is unavailable, mutation logs will build up until copying can resume, occupying space in your cluster.
|
||||
|
||||
Running out of storage space
|
||||
----------------------------
|
||||
|
||||
FoundationDB is aware of the free storage space on each node. It attempts to load all nodes equally so that no node runs out of space before the others. The database attempts to gracefully stop writes as storage space decreases to 100 MB, refusing to start new transactions with priorities other than ``SYSTEM_IMMEDIATE``. This lower bound on free space leaves space to allow you to use ``SYSTEM_IMMEDIATE`` transactions to remove data.
|
||||
FoundationDB is aware of the free storage space on each node. It attempts to distribute data equally on all the nodes so that no node runs out of space before the others. The database attempts to gracefully stop writes as storage space decreases to 100 MB, refusing to start new transactions with priorities other than ``SYSTEM_IMMEDIATE``. This lower bound on free space leaves space to allow you to use ``SYSTEM_IMMEDIATE`` transactions to remove data.
|
||||
|
||||
The measure of free space depends on the storage engine. For the memory storage engine, which is the default after installation, total space is limited to the lesser of the ``storage_memory`` configuration parameter (1 GB in the default configuration) or a fraction of the free disk space.
|
||||
|
||||
|
@ -437,14 +538,14 @@ If the disk is rapidly filled by other programs, trace files, etc., FoundationDB
|
|||
Virtual machines
|
||||
----------------
|
||||
|
||||
Processes running in different VMs on a single machine will appear to FoundationDB as being hardware isolated. FoundationDB takes pains to assure that data replication is protected from hardware-correlated failures. If FoundationDB is run in multiple VMs on a single machine this protection will be subverted. An administrator can inform FoundationDB of this hardware sharing, however, by specifying a machine ID using the ``machine_id`` parameter in :ref:`foundationdb.conf <foundationdb-conf>`. All processes on VMs that share hardware should specify the same ``machine_id``.
|
||||
Processes running in different VMs on a single machine will appear to FoundationDB as being hardware isolated. FoundationDB takes pains to assure that data replication is protected from hardware-correlated failures. If FoundationDB is run in multiple VMs on a single machine this protection will be subverted. An administrator can inform FoundationDB of this hardware sharing, however, by specifying a machine ID using the ``locality_machineid`` parameter in :ref:`foundationdb.conf <foundationdb-conf>`. All processes on VMs that share hardware should specify the same ``locality_machineid``.
|
||||
|
||||
Datacenters
|
||||
------------
|
||||
|
||||
FoundationDB is datacenter aware and supports operation across datacenters. In a multiple-datacenter configuration, it is recommended that you set the :ref:`redundancy mode <configuration-choosing-redundancy-mode>` to ``three_datacenter`` and that you set the ``datacenter_id`` parameter for all FoundationDB processes in :ref:`foundationdb.conf <foundationdb-conf>`.
|
||||
FoundationDB is datacenter aware and supports operation across datacenters. In a multiple-datacenter configuration, it is recommended that you set the :ref:`redundancy mode <configuration-choosing-redundancy-mode>` to ``three_datacenter`` and that you set the ``locality_dcid`` parameter for all FoundationDB processes in :ref:`foundationdb.conf <foundationdb-conf>`.
|
||||
|
||||
If you specify the ``-a`` option to any FoundationDB process in your cluster, you should specify it to all such processes. Processes which do not have a specified datacenter ID on the command line are considered part of a default "unset" datacenter. FoundationDB will incorrectly believe that these processes are failure-isolated from other datacenters, which can reduce performance and fault tolerance.
|
||||
If you specify the ``--datacenter_id`` option to any FoundationDB process in your cluster, you should specify it to all such processes. Processes which do not have a specified datacenter ID on the command line are considered part of a default "unset" datacenter. FoundationDB will incorrectly believe that these processes are failure-isolated from other datacenters, which can reduce performance and fault tolerance.
|
||||
|
||||
.. _administration-removing:
|
||||
|
||||
|
@ -483,13 +584,16 @@ To upgrade a FoundationDB cluster, you must install the updated version of Found
|
|||
|
||||
.. warning:: |development-use-only-warning|
|
||||
|
||||
Install updated client binaries
|
||||
-------------------------------
|
||||
|
||||
Apart from patch version upgrades, you should install the new client binary on all your clients and restart them to ensure they can reconnect after the upgrade. See :ref:`multi-version-client-api` for more information. Running ``status json`` will show you which versions clients are connecting with so you can verify before upgrading that clients are correctly configured.
|
||||
|
||||
Stage the packages
|
||||
------------------
|
||||
|
||||
Go to :doc:`downloads` and select Ubuntu or RHEL/CentOS, as appropriate for your system. Download both the client and server packages and copy them to each machine in your cluster.
|
||||
|
||||
.. warning:: |upgrade-client-server-warning|
|
||||
|
||||
Perform the upgrade
|
||||
-------------------
|
||||
|
||||
|
@ -514,16 +618,21 @@ Test the database
|
|||
|
||||
Test the database to verify that it is operating normally by running ``fdbcli`` and :ref:`reviewing the cluster status <administration-monitoring-cluster-status>`.
|
||||
|
||||
Restart your application clients
|
||||
--------------------------------
|
||||
Remove old client library versions
|
||||
----------------------------------
|
||||
|
||||
Stop and restart all application clients to reload the upgraded FoundationDB dynamic libraries.
|
||||
You can now remove old client library versions from your clients. This is only to stop creating unnecessary connections.
|
||||
|
||||
.. _version-specific-upgrading:
|
||||
|
||||
Version-specific notes on upgrading
|
||||
===================================
|
||||
|
||||
Upgrading from 5.1.x
|
||||
--------------------
|
||||
|
||||
Upgrades from 5.0.x will keep all your old data and configuration settings. 5.1 has a new backup format so backups will need to be restarted after upgrading.
|
||||
|
||||
Upgrading from 5.0.x
|
||||
--------------------
|
||||
|
||||
|
|
|
@ -37,17 +37,10 @@ The rise of mobile computing has led to the model of *disconnected operation* in
|
|||
|
||||
While a central server running FoundationDB could be used as a database for a mobile application to connect and sync to from time to time, FoundationDB's core does not itself directly provide disconnected operation. Because it would sacrifice ACID properties, we believe that in those applications where disconnected operation is needed, the database is the wrong tier to implement it.
|
||||
|
||||
Long-running transactions
|
||||
=========================
|
||||
Long-running read/write transactions
|
||||
====================================
|
||||
|
||||
FoundationDB aims to provide low latencies across a range of metrics. Transaction latencies, in particular, are typically under 15 milliseconds. Some applications require very large operations that require several seconds or more, several orders of magnitude larger than our usual transaction latency. Large operations of this kind are best approached in FoundationDB by decomposition into a set of smaller transactions.
|
||||
|
||||
FoundationDB does not support *long-running transactions*, currently defined as those
|
||||
:ref:`lasting over five seconds <long-transactions>`. The system employs multiversion concurrency control and maintains older versions of the database for a five second period. A transaction that is kept open longer will not be able to commit. If you have a requirement to support large operations, we would be happy to assist you to implement a decomposition strategy within a layer.
|
||||
|
||||
Content delivery networks (CDN)
|
||||
===============================
|
||||
|
||||
A *content delivery network* (CDN) employs geographically dispersed datacenters to serve data with high performance to similarly dispersed end-users. While FoundationDB does support multiple datacenters, it has not been designed as a CDN. The FoundationDB core does not locate data in a geographically aware manner and does not aim to provide low write latencies (e.g., under 5 milliseconds) over large geographic distances.
|
||||
|
||||
In FoundationDB's configuration for multiple datacenters, each datacenter contains a complete, up-to-date copy of the database. Each client will have a primary datacenter, with other datacenters acting in a secondary mode to support minimal downtime if a datacenter becomes unavailable.
|
||||
FoundationDB does not support *long-running read/write transactions*, currently defined as those
|
||||
:ref:`lasting over five seconds <long-transactions>`. The system employs multiversion concurrency control and maintains conflict information for a five second period. A transaction that is kept open longer will not be able to commit.
|
||||
|
|
|
@ -80,7 +80,7 @@ The multi-version client API adds a new ``cluster_version_changed`` error that i
|
|||
|
||||
.. warning:: Setting an API version that is not supported by a particular client library will prevent that client from being used to connect to the cluster. In particular, you should not advance the API version of your application after upgrading your client until the cluster has also been upgraded.
|
||||
|
||||
.. warning:: You should avoid including multiple protocol-compatible clients in the external client libraries list. While the client will still work, it will consume more resources than necessary. Additionally, different patch releases of the same version (e.g. ``x.y.z`` and ``x.y.w``) are generally protocol compatible, and including multiple may result in not using the most recent compatible client.
|
||||
.. warning:: You should avoid including multiple protocol-compatible clients in the external client libraries list. While the client will still work, it will consume more resources than necessary. Additionally, different patch releases of the same version (e.g. ``x.y.z`` and ``x.y.w``) are protocol compatible, and including multiple may result in not using the most recent compatible client.
|
||||
|
||||
.. note:: It is recommended that you not include more external clients than necessary. For example, a client that has been upgraded to a newer version than its cluster may need to include a single external client that matches the version of the cluster, but it generally won't require a copy of every prior version.
|
||||
|
||||
|
|
|
@ -71,12 +71,13 @@ At this point and after each subsequent step, it is a good idea to test the data
|
|||
Machines - 1
|
||||
Memory availability - 4.1 GB per process on machine with least available
|
||||
Fault Tolerance - 0 machines
|
||||
Server time - Wed Oct 8 14:41:34 2014
|
||||
Server time - Thu Mar 15 14:41:34 2018
|
||||
|
||||
Data:
|
||||
Replication health - Healthy
|
||||
Moving data - 0.000 GB
|
||||
Sum of key-value sizes - 0 MB
|
||||
Sum of key-value sizes - 8 MB
|
||||
Disk space used - 103 MB
|
||||
|
||||
Operating space:
|
||||
Storage server - 1.0 GB free on most full server
|
||||
|
@ -89,7 +90,11 @@ At this point and after each subsequent step, it is a good idea to test the data
|
|||
Transactions committed - 0 Hz
|
||||
Conflict rate - 0 Hz
|
||||
|
||||
Client time: Thu Nov 20 09:50:45 2014
|
||||
Backup and DR:
|
||||
Running backups - 0
|
||||
Running DRs - 0
|
||||
|
||||
Client time: Thu Mar 15 14:41:34 2018
|
||||
|
||||
.. note:: If the database is not operational the status command will provide diagnostic information to help you resolve the issue. For more help, please post a question (and the results of the status command) on the `community forums <https://forums.foundationdb.org>`_.
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ Requirements
|
|||
------------
|
||||
|
||||
We'll need to let users list available classes and track which students have signed up for which classes. Here's a first cut at the functions we'll need to implement::
|
||||
|
||||
|
||||
availableClasses() // returns list of classes
|
||||
signup(studentID, class) // signs up a student for a class
|
||||
drop(studentID, class) // drops a student from a class
|
||||
|
@ -123,13 +123,13 @@ Data model
|
|||
----------
|
||||
|
||||
First, we need to design a :doc:`data model <data-modeling>`. A data model is just a method for storing our application data using keys and values in FoundationDB. We seem to have two main types of data: (1) a list of classes and (2) a record of which students will attend which classes. Let's keep attending data like this::
|
||||
|
||||
|
||||
// ("attends", student, class) = ""
|
||||
|
||||
We'll just store the key with a blank value to indicate that a student is signed up for a particular class. For this application, we're going to think about a key-value pair's key as a :ref:`tuple <data-modeling-tuples>`. Encoding a tuple of data elements into a key is a very common pattern for an ordered key-value store.
|
||||
|
||||
We'll keep data about classes like this::
|
||||
|
||||
|
||||
// ("class", class_name) = seatsAvailable
|
||||
|
||||
Similarly, each such key will represent an available class. We'll use ``seatsAvailable`` to record the number of seats available.
|
||||
|
@ -235,7 +235,7 @@ Let's make some sample classes and put them in the ``classNames`` variable. We'l
|
|||
|
||||
var levels = []string{"intro", "for dummies", "remedial", "101", "201", "301", "mastery", "lab", "seminar"}
|
||||
var types = []string{"chem", "bio", "cs", "geometry", "calc", "alg", "film", "music", "art", "dance"}
|
||||
var times = []string{"2:00", "3:00", "4:00", "5:00", "6:00", "7:00", "8:00", "9:00", "10:00", "11:00",
|
||||
var times = []string{"2:00", "3:00", "4:00", "5:00", "6:00", "7:00", "8:00", "9:00", "10:00", "11:00",
|
||||
"12:00", "13:00", "14:00", "15:00", "16:00", "17:00", "18:00", "19:00"}
|
||||
|
||||
classes := make([]string, len(levels) * len(types) * len(times))
|
||||
|
@ -253,7 +253,7 @@ Initializing the database
|
|||
Next, we initialize the database with our class list:
|
||||
|
||||
.. code-block:: go
|
||||
|
||||
|
||||
_, err = db.Transact(func (tr fdb.Transaction) (interface{}, error) {
|
||||
tr.ClearRange(schedulingDir)
|
||||
|
||||
|
@ -301,7 +301,7 @@ Signing up for a class
|
|||
We finally get to the crucial function (which we saw before when looking at :func:`Transact`). A student has decided on a class (by name) and wants to sign up. The ``signup`` function will take a ``studentID`` and a ``class``:
|
||||
|
||||
.. code-block:: go
|
||||
|
||||
|
||||
func signup(t fdb.Transactor, studentID, class string) (err error) {
|
||||
SCKey := attendSS.Pack(tuple.Tuple{studentID, class})
|
||||
|
||||
|
@ -336,7 +336,7 @@ Of course, to actually drop the student from the class, we need to be able to de
|
|||
Done?
|
||||
-----
|
||||
|
||||
We report back to the project leader that our application is done---students can sign up for, drop, and list classes. Unfortunately, we learn that there has been a bit of scope creep in the mean time. Popular classes are getting over-subscribed, and our application is going to need to enforce the class size constraint as students add and drop classes.
|
||||
We report back to the project leader that our application is done---students can sign up for, drop, and list classes. Unfortunately, we learn that a new problem has been discovered: popular classes are being over-subscribed. Our application now needs to enforce the class size constraint as students add and drop classes.
|
||||
|
||||
Seats are limited!
|
||||
------------------
|
||||
|
@ -497,7 +497,7 @@ Fortunately, we decided on a data model that keeps all of the attending records
|
|||
Composing transactions
|
||||
----------------------
|
||||
|
||||
Oh, just one last feature, we're told. We have students that are trying to switch from one popular class to another. By the time they drop one class to free up a slot for themselves, the open slot in the other class is gone. By the time they see this and try to re-add their old class, that slot is gone too! So, can we make it so that a student can switch from one class to another without this worry?
|
||||
Oh, just one last feature, we're told. We have students that are trying to switch from one popular class to another. By the time they drop one class to free up a slot for themselves, the open slot in the other class is gone. By the time they see this and try to re-add their old class, that slot is gone too! So, can we make it so that a student can switch from one class to another without this worry?
|
||||
|
||||
Fortunately, we have FoundationDB, and this sounds an awful lot like the transactional property of atomicity---the all-or-nothing behavior that we already rely on. All we need to do is to *compose* the ``drop`` and ``signup`` functions into a new ``swap`` function. This makes the ``swap`` function exceptionally easy:
|
||||
|
||||
|
@ -524,7 +524,7 @@ Also note that, if an exception is raised, for example, in ``signup``, the excep
|
|||
Are we done?
|
||||
------------
|
||||
|
||||
Yep, we're done. Fortunately, our UI team built an awesome UI while we were working on our back end, and we are ready to deploy. If you want to see this entire application in one place plus some concurrent testing code, look at the :ref:`class-sched-go-appendix`, below.
|
||||
Yep, we’re done and ready to deploy. If you want to see this entire application in one place plus some multithreaded testing code to simulate concurrency, look at the :ref:`class-sched-go-appendix`, below.
|
||||
|
||||
Deploying and scaling
|
||||
---------------------
|
||||
|
@ -536,7 +536,7 @@ Next steps
|
|||
|
||||
* See :doc:`data-modeling` for guidance on using tuple and subspaces to enable effective storage and retrieval of data.
|
||||
* See :doc:`developer-guide` for general guidance on development using FoundationDB.
|
||||
* See the :doc:`API References <api-reference>` for detailed API documentation.
|
||||
* See the :doc:`API References <api-reference>` for detailed API documentation.
|
||||
|
||||
.. _class-sched-go-appendix:
|
||||
|
||||
|
@ -674,7 +674,7 @@ Here's the code for the scheduling tutorial:
|
|||
|
||||
var levels = []string{"intro", "for dummies", "remedial", "101", "201", "301", "mastery", "lab", "seminar"}
|
||||
var types = []string{"chem", "bio", "cs", "geometry", "calc", "alg", "film", "music", "art", "dance"}
|
||||
var times = []string{"2:00", "3:00", "4:00", "5:00", "6:00", "7:00", "8:00", "9:00", "10:00", "11:00",
|
||||
var times = []string{"2:00", "3:00", "4:00", "5:00", "6:00", "7:00", "8:00", "9:00", "10:00", "11:00",
|
||||
"12:00", "13:00", "14:00", "15:00", "16:00", "17:00", "18:00", "19:00"}
|
||||
|
||||
classes := make([]string, len(levels) * len(types) * len(times))
|
||||
|
|
|
@ -26,8 +26,8 @@ Before using the API, we need to specify the API version. This allows programs t
|
|||
|
||||
.. code-block:: java
|
||||
|
||||
private static final FDB fdb;
|
||||
private static final Database db;
|
||||
private static final FDB fdb;
|
||||
private static final Database db;
|
||||
|
||||
static {
|
||||
fdb = FDB.selectAPIVersion(510);
|
||||
|
@ -62,8 +62,8 @@ If this is all working, it looks like we are ready to start building a real appl
|
|||
|
||||
public class HelloWorld {
|
||||
|
||||
private static final FDB fdb;
|
||||
private static final Database db;
|
||||
private static final FDB fdb;
|
||||
private static final Database db;
|
||||
|
||||
static {
|
||||
fdb = FDB.selectAPIVersion(510);
|
||||
|
@ -94,7 +94,7 @@ Requirements
|
|||
------------
|
||||
|
||||
We'll need to let users list available classes and track which students have signed up for which classes. Here's a first cut at the functions we'll need to implement::
|
||||
|
||||
|
||||
availableClasses() // returns list of classes
|
||||
signup(studentID, class) // signs up a student for a class
|
||||
drop(studentID, class) // drops a student from a class
|
||||
|
@ -105,13 +105,13 @@ Data model
|
|||
----------
|
||||
|
||||
First, we need to design a :doc:`data model <data-modeling>`. A data model is just a method for storing our application data using keys and values in FoundationDB. We seem to have two main types of data: (1) a list of classes and (2) a record of which students will attend which classes. Let's keep attending data like this::
|
||||
|
||||
|
||||
// ("attends", student, class) = ""
|
||||
|
||||
We'll just store the key with a blank value to indicate that a student is signed up for a particular class. For this application, we're going to think about a key-value pair's key as a :ref:`tuple <data-modeling-tuples>`. Encoding a tuple of data elements into a key is a very common pattern for an ordered key-value store.
|
||||
|
||||
We'll keep data about classes like this::
|
||||
|
||||
|
||||
// ("class", class_name) = seatsAvailable
|
||||
|
||||
Similarly, each such key will represent an available class. We'll use ``seatsAvailable`` to record the number of seats available.
|
||||
|
@ -188,7 +188,7 @@ Initializing the database
|
|||
We initialize the database with our class list:
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
|
||||
private static void init(Database db) {
|
||||
db.run((Transaction tr) -> {
|
||||
tr.clear(Tuple.from("attends").range());
|
||||
|
@ -225,7 +225,7 @@ Signing up for a class
|
|||
We finally get to the crucial function. A student has decided on a class (by name) and wants to sign up. The ``signup`` function will take a student (``s``) and a class (``c``):
|
||||
|
||||
.. code-block:: java
|
||||
|
||||
|
||||
private static void signup(TransactionContext db, final String s, final String c) {
|
||||
db.run((Transaction tr) -> {
|
||||
byte[] rec = Tuple.from("attends", s, c).pack();
|
||||
|
@ -256,7 +256,7 @@ Of course, to actually drop the student from the class, we need to be able to de
|
|||
Done?
|
||||
-----
|
||||
|
||||
We report back to the project leader that our application is done---students can sign up for, drop, and list classes. Unfortunately, we learn that there has been a bit of scope creep in the mean time. Popular classes are getting over-subscribed, and our application is going to need to enforce the class size constraint as students add and drop classes.
|
||||
We report back to the project leader that our application is done---students can sign up for, drop, and list classes. Unfortunately, we learn that a new problem has been discovered: popular classes are being over-subscribed. Our application now needs to enforce the class size constraint as students add and drop classes.
|
||||
|
||||
Seats are limited!
|
||||
------------------
|
||||
|
@ -370,7 +370,7 @@ Fortunately, we decided on a data model that keeps all of the attending records
|
|||
Composing transactions
|
||||
----------------------
|
||||
|
||||
Oh, just one last feature, we're told. We have students that are trying to switch from one popular class to another. By the time they drop one class to free up a slot for themselves, the open slot in the other class is gone. By the time they see this and try to re-add their old class, that slot is gone too! So, can we make it so that a student can switch from one class to another without this worry?
|
||||
Oh, just one last feature, we're told. We have students that are trying to switch from one popular class to another. By the time they drop one class to free up a slot for themselves, the open slot in the other class is gone. By the time they see this and try to re-add their old class, that slot is gone too! So, can we make it so that a student can switch from one class to another without this worry?
|
||||
|
||||
Fortunately, we have FoundationDB, and this sounds an awful lot like the transactional property of atomicity---the all-or-nothing behavior that we already rely on. All we need to do is to *compose* the ``drop`` and ``signup`` functions into a new ``switchClasses`` function. This makes the ``switchClasses`` function exceptionally easy:
|
||||
|
||||
|
@ -393,7 +393,7 @@ Also note that, if an exception is raised, for example, in ``signup``, the excep
|
|||
Are we done?
|
||||
------------
|
||||
|
||||
Yep, we're done. Fortunately, our UI team built an awesome UI while we were working on our back end, and we are ready to deploy. If you want to see this entire application in one place plus some multithreaded testing code to simulate concurrency, look at the :ref:`class-sched-java-appendix`, below.
|
||||
Yep, we’re done and ready to deploy. If you want to see this entire application in one place plus some multithreaded testing code to simulate concurrency, look at the :ref:`class-sched-java-appendix`, below.
|
||||
|
||||
Deploying and scaling
|
||||
---------------------
|
||||
|
@ -405,7 +405,7 @@ Next steps
|
|||
|
||||
* See :doc:`data-modeling` for guidance on using tuple and subspaces to enable effective storage and retrieval of data.
|
||||
* See :doc:`developer-guide` for general guidance on development using FoundationDB.
|
||||
* See the :doc:`API References <api-reference>` for detailed API documentation.
|
||||
* See the :doc:`API References <api-reference>` for detailed API documentation.
|
||||
|
||||
.. _class-sched-java-appendix:
|
||||
|
||||
|
@ -432,8 +432,8 @@ Here's the code for the scheduling tutorial:
|
|||
|
||||
public class ClassScheduling {
|
||||
|
||||
private static final FDB fdb;
|
||||
private static final Database db;
|
||||
private static final FDB fdb;
|
||||
private static final Database db;
|
||||
|
||||
static {
|
||||
fdb = FDB.selectAPIVersion(510);
|
||||
|
|
|
@ -19,27 +19,27 @@ Open a Ruby interactive interpreter and import the FoundationDB API module::
|
|||
|
||||
$ irb
|
||||
> require 'fdb'
|
||||
=> true
|
||||
=> true
|
||||
|
||||
Before using the API, we need to specify the API version. This allows programs to maintain compatibility even if the API is modified in future versions::
|
||||
|
||||
> FDB.api_version 510
|
||||
=> nil
|
||||
=> nil
|
||||
|
||||
Next, we open a FoundationDB database. The API will connect to the FoundationDB cluster indicated by the :ref:`default cluster file <default-cluster-file>`. ::
|
||||
|
||||
> @db = FDB.open
|
||||
=> #<FDB::Database:0x007fc2309751e0 @dpointer=#<FFI::Pointer address=0x007fc231c139c0>, @options=#<FDB::DatabaseOptions:0x007fc230975168 @setfunc=#<Proc:0x007fc230975190@/Users/stephenpimentel/.rvm/gems/ruby-2.0.0-p247/gems/fdb-1.0.0/lib/fdbimpl.rb:510 (lambda)>>>
|
||||
=> #<FDB::Database:0x007fc2309751e0 @dpointer=#<FFI::Pointer address=0x007fc231c139c0>, @options=#<FDB::DatabaseOptions:0x007fc230975168 @setfunc=#<Proc:0x007fc230975190@/Users/someone/.rvm/gems/ruby-2.0.0-p247/gems/fdb-1.0.0/lib/fdbimpl.rb:510 (lambda)>>>
|
||||
|
||||
We are ready to use the database. In Ruby, using the ``[]`` operator on the database object is a convenient syntax for performing a read or write on the database. First, let's simply write a key-value pair::
|
||||
|
||||
> @db['hello'] = 'world'
|
||||
=> "world"
|
||||
=> "world"
|
||||
|
||||
When this command returns without exception, the modification is durably stored in FoundationDB! Under the covers, this function creates a transaction with a single modification. We'll see later how to do multiple operations in a single transaction. For now, let's read back the data::
|
||||
|
||||
>>> print 'hello ', @db['hello']
|
||||
hello world => nil
|
||||
hello world => nil
|
||||
|
||||
If this is all working, it looks like we are ready to start building a real application. For reference, here's the full code for "hello world":
|
||||
|
||||
|
@ -60,7 +60,7 @@ Requirements
|
|||
------------
|
||||
|
||||
We'll need to let users list available classes and track which students have signed up for which classes. Here's a first cut at the functions we'll need to implement::
|
||||
|
||||
|
||||
available_classes() # returns list of classes
|
||||
signup(studentID, class) # signs up a student for a class
|
||||
drop(studentID, class) # drops a student from a class
|
||||
|
@ -71,13 +71,13 @@ Data model
|
|||
----------
|
||||
|
||||
First, we need to design a :doc:`data model <data-modeling>`. A data model is just a method for storing our application data using keys and values in FoundationDB. We seem to have two main types of data: (1) a list of classes and (2) a record of which students will attend which classes. Let's keep attending data like this::
|
||||
|
||||
|
||||
# ['attends', student, class] = ''
|
||||
|
||||
We'll just store the key with a blank value to indicate that a student is signed up for a particular class. For this application, we're going to think about a key-value pair's key as a :ref:`tuple <data-modeling-tuples>`. Encoding a tuple of data elements into a key is a very common pattern for an ordered key-value store.
|
||||
|
||||
We'll keep data about classes like this::
|
||||
|
||||
|
||||
# ['class', class_name] = seats_available
|
||||
|
||||
Similarly, each such key will represent an available class. We'll use ``seats_available`` to record the number of seats available.
|
||||
|
@ -131,7 +131,7 @@ Let's make some sample classes and put them in the ``@class_names`` variable. We
|
|||
.. code-block:: ruby
|
||||
|
||||
# Generate 1,620 classes like '9:00 chem for dummies'
|
||||
levels = ['intro', 'for dummies', 'remedial', '101',
|
||||
levels = ['intro', 'for dummies', 'remedial', '101',
|
||||
'201', '301', 'mastery', 'lab', 'seminar']
|
||||
types = ['chem', 'bio', 'cs', 'geometry', 'calc',
|
||||
'alg', 'film', 'music', 'art', 'dance']
|
||||
|
@ -144,7 +144,7 @@ Initializing the database
|
|||
We initialize the database with our class list:
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
|
||||
def init(db_or_tr)
|
||||
db_or_tr.transact do |tr|
|
||||
tr.clear_range_start_with(FDB::Tuple.pack(['attends']))
|
||||
|
@ -179,7 +179,7 @@ Signing up for a class
|
|||
We finally get to the crucial function. A student has decided on a class (by name) and wants to sign up. The ``signup`` function will take a student (``s``) and a class (``c``):
|
||||
|
||||
.. code-block:: ruby
|
||||
|
||||
|
||||
def signup(db_or_tr, s, c)
|
||||
db_or_tr.transact do |tr|
|
||||
rec = FDB::Tuple.pack(['attends', s, c])
|
||||
|
@ -208,7 +208,7 @@ Of course, to actually drop the student from the class, we need to be able to de
|
|||
Done?
|
||||
-----
|
||||
|
||||
We report back to the project leader that our application is done---students can sign up for, drop, and list classes. Unfortunately, we learn that there has been a bit of scope creep in the mean time. Popular classes are getting over-subscribed, and our application is going to need to enforce the class size constraint as students add and drop classes.
|
||||
We report back to the project leader that our application is done---students can sign up for, drop, and list classes. Unfortunately, we learn that a new problem has been discovered: popular classes are being over-subscribed. Our application now needs to enforce the class size constraint as students add and drop classes.
|
||||
|
||||
Seats are limited!
|
||||
------------------
|
||||
|
@ -237,7 +237,7 @@ This is easy -- we simply add a condition to check that the value is non-zero. L
|
|||
end
|
||||
|
||||
seats_left = tr[FDB::Tuple.pack(['class', c])].to_i
|
||||
if seats_left == 0
|
||||
if seats_left == 0
|
||||
raise 'No remaining seats'
|
||||
end
|
||||
|
||||
|
@ -301,13 +301,13 @@ Of course, as soon as our new version of the system goes live, we hear of a tric
|
|||
end
|
||||
|
||||
seats_left = tr[FDB::Tuple.pack(['class', c])].to_i
|
||||
if seats_left == 0
|
||||
if seats_left == 0
|
||||
raise 'No remaining seats'
|
||||
end
|
||||
|
||||
r = FDB::Tuple.range(['attends', s])
|
||||
classes = tr.get_range(r[0], r[1])
|
||||
if classes.count == 5
|
||||
if classes.count == 5
|
||||
raise 'Too many classes'
|
||||
end
|
||||
|
||||
|
@ -321,7 +321,7 @@ Fortunately, we decided on a data model that keeps all of the attending records
|
|||
Composing transactions
|
||||
----------------------
|
||||
|
||||
Oh, just one last feature, we're told. We have students that are trying to switch from one popular class to another. By the time they drop one class to free up a slot for themselves, the open slot in the other class is gone. By the time they see this and try to re-add their old class, that slot is gone too! So, can we make it so that a student can switch from one class to another without this worry?
|
||||
Oh, just one last feature, we're told. We have students that are trying to switch from one popular class to another. By the time they drop one class to free up a slot for themselves, the open slot in the other class is gone. By the time they see this and try to re-add their old class, that slot is gone too! So, can we make it so that a student can switch from one class to another without this worry?
|
||||
|
||||
Fortunately, we have FoundationDB, and this sounds an awful lot like the transactional property of atomicity---the all-or-nothing behavior that we already rely on. All we need to do is to *compose* the ``drop`` and ``signup`` functions into a new ``switch`` function. This makes the ``switch`` function exceptionally easy:
|
||||
|
||||
|
@ -343,7 +343,7 @@ Also note that, if an exception is raised, for example, in ``signup``, the excep
|
|||
Are we done?
|
||||
------------
|
||||
|
||||
Yep, we're done. Fortunately, our UI team built an awesome UI while we were working on our back end, and we are ready to deploy. If you want to see this entire application in one place plus some multithreaded testing code to simulate concurrency, look at the :ref:`class-sched-ruby-appendix`, below.
|
||||
Yep, we’re done and ready to deploy. If you want to see this entire application in one place plus some multithreaded testing code to simulate concurrency, look at the :ref:`class-sched-ruby-appendix`, below.
|
||||
|
||||
Deploying and scaling
|
||||
---------------------
|
||||
|
@ -355,7 +355,7 @@ Next steps
|
|||
|
||||
* See :doc:`data-modeling` for guidance on using tuple and subspaces to enable effective storage and retrieval of data.
|
||||
* See :doc:`developer-guide` for general guidance on development using FoundationDB.
|
||||
* See the :doc:`API References <api-reference>` for detailed API documentation.
|
||||
* See the :doc:`API References <api-reference>` for detailed API documentation.
|
||||
|
||||
.. _class-sched-ruby-appendix:
|
||||
|
||||
|
@ -387,7 +387,7 @@ Here's the code for the scheduling tutorial:
|
|||
end
|
||||
|
||||
# Generate 1,620 classes like '9:00 chem for dummies'
|
||||
levels = ['intro', 'for dummies', 'remedial', '101',
|
||||
levels = ['intro', 'for dummies', 'remedial', '101',
|
||||
'201', '301', 'mastery', 'lab', 'seminar']
|
||||
types = ['chem', 'bio', 'cs', 'geometry', 'calc',
|
||||
'alg', 'film', 'music', 'art', 'dance']
|
||||
|
@ -420,13 +420,13 @@ Here's the code for the scheduling tutorial:
|
|||
end
|
||||
|
||||
seats_left = tr[FDB::Tuple.pack(['class', c])].to_i
|
||||
if seats_left == 0
|
||||
if seats_left == 0
|
||||
raise 'No remaining seats'
|
||||
end
|
||||
|
||||
r = FDB::Tuple.range(['attends', s])
|
||||
classes = tr.get_range(r[0], r[1])
|
||||
if classes.count == 5
|
||||
if classes.count == 5
|
||||
raise 'Too many classes'
|
||||
end
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ Requirements
|
|||
------------
|
||||
|
||||
We'll need to let users list available classes and track which students have signed up for which classes. Here's a first cut at the functions we'll need to implement::
|
||||
|
||||
|
||||
available_classes() # returns list of classes
|
||||
signup(studentID, class) # signs up a student for a class
|
||||
drop(studentID, class) # drops a student from a class
|
||||
|
@ -73,13 +73,13 @@ Data model
|
|||
----------
|
||||
|
||||
First, we need to design a :doc:`data model <data-modeling>`. A data model is just a method for storing our application data using keys and values in FoundationDB. We seem to have two main types of data: (1) a list of classes and (2) a record of which students will attend which classes. Let's keep attending data like this::
|
||||
|
||||
|
||||
# ('attends', student, class) = ''
|
||||
|
||||
We'll just store the key with a blank value to indicate that a student is signed up for a particular class. For this application, we're going to think about a key-value pair's key as a :ref:`tuple <data-modeling-tuples>`. Encoding a tuple of data elements into a key is a very common pattern for an ordered key-value store.
|
||||
|
||||
We'll keep data about classes like this::
|
||||
|
||||
|
||||
# ('class', class_name) = seats_available
|
||||
|
||||
Similarly, each such key will represent an available class. We'll use ``seats_available`` to record the number of seats available.
|
||||
|
@ -139,9 +139,9 @@ Making some sample classes
|
|||
Let's make some sample classes and put them in the ``class_names`` variable. The Python ``itertools`` module is used to make individual classes from combinations of class types, levels, and times::
|
||||
|
||||
import itertools
|
||||
|
||||
|
||||
# Generate 1,620 classes like '9:00 chem for dummies'
|
||||
levels = ['intro', 'for dummies', 'remedial', '101',
|
||||
levels = ['intro', 'for dummies', 'remedial', '101',
|
||||
'201', '301', 'mastery', 'lab', 'seminar']
|
||||
types = ['chem', 'bio', 'cs', 'geometry', 'calc',
|
||||
'alg', 'film', 'music', 'art', 'dance']
|
||||
|
@ -152,7 +152,7 @@ Let's make some sample classes and put them in the ``class_names`` variable. The
|
|||
Initializing the database
|
||||
-------------------------
|
||||
We initialize the database with our class list::
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def init(tr):
|
||||
del tr[scheduling.range(())] # Clear the directory
|
||||
|
@ -176,7 +176,7 @@ Signing up for a class
|
|||
----------------------
|
||||
|
||||
We finally get to the crucial function. A student has decided on a class (by name) and wants to sign up. The ``signup`` function will take a student (``s``) and a class (``c``)::
|
||||
|
||||
|
||||
@fdb.transactional
|
||||
def signup(tr, s, c):
|
||||
rec = attends.pack((s, c))
|
||||
|
@ -199,7 +199,7 @@ Of course, to actually drop the student from the class, we need to be able to de
|
|||
Done?
|
||||
-----
|
||||
|
||||
We report back to the project leader that our application is done---students can sign up for, drop, and list classes. Unfortunately, we learn that there has been a bit of scope creep in the mean time. Popular classes are getting over-subscribed, and our application is going to need to enforce the class size constraint as students add and drop classes.
|
||||
We report back to the project leader that our application is done---students can sign up for, drop, and list classes. Unfortunately, we learn that a new problem has been discovered: popular classes are being over-subscribed. Our application now needs to enforce the class size constraint as students add and drop classes.
|
||||
|
||||
Seats are limited!
|
||||
------------------
|
||||
|
@ -211,7 +211,7 @@ Let's go back to the data model. Remember that we stored the number of seats in
|
|||
|
||||
@fdb.transactional
|
||||
def available_classes(tr):
|
||||
return [fdb.tuple.unpack(k)[2] for k, v in tr[course.range(())]
|
||||
return [course.unpack(k)[0] for k, v in tr[course.range(())]
|
||||
if int(v)]
|
||||
|
||||
This is easy -- we simply add a condition to check that the value is non-zero. Let's check out ``signup`` next:
|
||||
|
@ -291,7 +291,7 @@ Fortunately, we decided on a data model that keeps all of the attending records
|
|||
Composing transactions
|
||||
----------------------
|
||||
|
||||
Oh, just one last feature, we're told. We have students that are trying to switch from one popular class to another. By the time they drop one class to free up a slot for themselves, the open slot in the other class is gone. By the time they see this and try to re-add their old class, that slot is gone too! So, can we make it so that a student can switch from one class to another without this worry?
|
||||
Oh, just one last feature, we're told. We have students that are trying to switch from one popular class to another. By the time they drop one class to free up a slot for themselves, the open slot in the other class is gone. By the time they see this and try to re-add their old class, that slot is gone too! So, can we make it so that a student can switch from one class to another without this worry?
|
||||
|
||||
Fortunately, we have FoundationDB, and this sounds an awful lot like the transactional property of atomicity---the all-or-nothing behavior that we already rely on. All we need to do is to *compose* the ``drop`` and ``signup`` functions into a new ``switch`` function. This makes the ``switch`` function exceptionally easy::
|
||||
|
||||
|
@ -309,7 +309,7 @@ Also note that, if an exception is raised, for example, in ``signup``, the excep
|
|||
Are we done?
|
||||
------------
|
||||
|
||||
Yep, we're done. Fortunately, our UI team built an awesome UI while we were working on our back end, and we are ready to deploy. If you want to see this entire application in one place plus some multithreaded testing code to simulate concurrency, look at the :ref:`tutorial-appendix`, below.
|
||||
Yep, we’re done and ready to deploy. If you want to see this entire application in one place plus some multithreaded testing code to simulate concurrency, look at the :ref:`tutorial-appendix`, below.
|
||||
|
||||
Deploying and scaling
|
||||
---------------------
|
||||
|
@ -321,7 +321,7 @@ Next steps
|
|||
|
||||
* See :doc:`data-modeling` for guidance on using tuple and subspaces to enable effective storage and retrieval of data.
|
||||
* See :doc:`developer-guide` for general guidance on development using FoundationDB.
|
||||
* See the :doc:`API References <api-reference>` for detailed API documentation.
|
||||
* See the :doc:`API References <api-reference>` for detailed API documentation.
|
||||
|
||||
.. _tutorial-appendix:
|
||||
|
||||
|
@ -355,7 +355,7 @@ Here's the code for the scheduling tutorial::
|
|||
tr[course.pack((c,))] = bytes(100)
|
||||
|
||||
# Generate 1,620 classes like '9:00 chem for dummies'
|
||||
levels = ['intro', 'for dummies', 'remedial', '101',
|
||||
levels = ['intro', 'for dummies', 'remedial', '101',
|
||||
'201', '301', 'mastery', 'lab', 'seminar']
|
||||
types = ['chem', 'bio', 'cs', 'geometry', 'calc',
|
||||
'alg', 'film', 'music', 'art', 'dance']
|
||||
|
@ -377,7 +377,7 @@ Here's the code for the scheduling tutorial::
|
|||
|
||||
@fdb.transactional
|
||||
def available_classes(tr):
|
||||
return [course.unpack(k)[0] for k, v in tr[course.range(())]
|
||||
return [course.unpack(k)[0] for k, v in tr[course.range(())]
|
||||
if int(v)]
|
||||
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ You can invoke ``fdbcli`` at the command line simply by typing it. For example::
|
|||
The database is available.
|
||||
|
||||
Welcome to the fdbcli. For help, type `help'.
|
||||
fdb>
|
||||
fdb>
|
||||
|
||||
This will result in ``fdbcli`` connecting to the :ref:`default cluster file <default-cluster-file>` (``/etc/foundationdb/fdb.cluster`` for Linux.) You can also specify a cluster file as an argument to ``fdbcli`` using the ``-C`` option. For further information, see :ref:`specifying-a-cluster-file`.
|
||||
|
||||
|
@ -59,7 +59,7 @@ The ``commit`` command commits the current transaction. Any sets or clears execu
|
|||
configure
|
||||
---------
|
||||
|
||||
The ``configure`` command changes the database configuration. Its syntax is ``configure [new] [single|double|triple|three_data_hall|three_datacenter] [ssd|memory] [proxies=<N>] [resolvers=<N>] [logs=<N>]``.
|
||||
The ``configure`` command changes the database configuration. Its syntax is ``configure [new] [single|double|triple|three_data_hall|multi_dc] [ssd|memory] [proxies=<N>] [resolvers=<N>] [logs=<N>]``.
|
||||
|
||||
The ``new`` option, if present, initializes a new database with the given configuration rather than changing the configuration of an existing one. When ``new`` is used, both a redundancy mode and a storage engine must be specified.
|
||||
|
||||
|
@ -71,8 +71,8 @@ Redundancy modes define storage requirements, required cluster size, and resilie
|
|||
* ``single``
|
||||
* ``double``
|
||||
* ``triple``
|
||||
* ``three_datacenter``
|
||||
* ``three_data_hall``
|
||||
* ``multi_dc``
|
||||
|
||||
For descriptions of redundacy modes, see :ref:`configuration-choosing-redundancy-mode`.
|
||||
|
||||
|
@ -127,7 +127,7 @@ For more information on excluding servers, see :ref:`removing-machines-from-a-cl
|
|||
|
||||
exit
|
||||
----
|
||||
|
||||
|
||||
The ``exit`` command exits ``fdbcli``.
|
||||
|
||||
get
|
||||
|
@ -180,7 +180,7 @@ The following options are available for use with the ``option`` command:
|
|||
|
||||
``ACCESS_SYSTEM_KEYS`` - Allows this transaction to read and modify system keys (those that start with the byte ``0xFF``).
|
||||
|
||||
``CAUSAL_READ_RISKY`` - The read version will be committed. It will usually will be the latest committed but might not be in the event of a fault or partition.
|
||||
``CAUSAL_READ_RISKY`` - In the event of a fault or partition, the read version returned may not the last committed version potentially causing you to read outdated data.
|
||||
|
||||
``CAUSAL_WRITE_RISKY`` - The transaction, if not self-conflicting, may be committed a second time after commit succeeds, in the event of a fault.
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@ This document contains *reference* information for configuring a new FoundationD
|
|||
|
||||
.. note:: In FoundationDB, a "cluster" refers to one or more FoundationDB processes spread across one or more physical machines that together host a FoundationDB database.
|
||||
|
||||
To plan an externally accessible cluster, you need to understand some basic aspects of the system. You can start by reviewing the :ref:`system requirements <system-requirements>`, then how to :ref:`choose <configuration-choosing-coordination-servers>` and :ref:`change coordination servers <configuration-changing-coordination-servers>`. Next, you should look at the :ref:`configuration file <foundationdb-conf>`, which controls most other aspects of the system. Then, you should understand how to :ref:`choose a redundancy mode <configuration-choosing-redundancy-mode>` and :ref:`configure the storage subsystem <configuration-configuring-storage-subsystem>`. Finally, there are some configurations you can adjust to improve performance if your :ref:`cluster is large <configuration-large-cluster-performance>`.
|
||||
To plan an externally accessible cluster, you need to understand some basic aspects of the system. You can start by reviewing the :ref:`system requirements <system-requirements>`, then how to :ref:`choose <configuration-choosing-coordination-servers>` and :ref:`change coordination servers <configuration-changing-coordination-servers>`. Next, you should look at the :ref:`configuration file <foundationdb-conf>`, which controls most other aspects of the system. Then, you should understand how to :ref:`choose a redundancy mode <configuration-choosing-redundancy-mode>` and :ref:`configure the storage subsystem <configuration-configuring-storage-subsystem>`. Finally, there are some guidelines for setting :ref:`process class configurations <guidelines-process-class-config>`.
|
||||
|
||||
.. _system-requirements:
|
||||
|
||||
|
@ -98,12 +98,13 @@ After running this command, you can check that it completed successfully by usin
|
|||
Machines - 3
|
||||
Memory availability - 4.1 GB per process on machine with least available
|
||||
Fault Tolerance - 0 machines
|
||||
Server time - Wed Oct 8 14:41:34 2014
|
||||
Server time - Thu Mar 15 14:41:34 2018
|
||||
|
||||
Data:
|
||||
Replication health - Healthy
|
||||
Moving data - 0.000 GB
|
||||
Sum of key-value sizes - 0 MB
|
||||
Sum of key-value sizes - 8 MB
|
||||
Disk space used - 103 MB
|
||||
|
||||
Operating space:
|
||||
Storage server - 1.0 GB free on most full server
|
||||
|
@ -116,6 +117,10 @@ After running this command, you can check that it completed successfully by usin
|
|||
Transactions committed - 0 Hz
|
||||
Conflict rate - 0 Hz
|
||||
|
||||
Backup and DR:
|
||||
Running backups - 0
|
||||
Running DRs - 0
|
||||
|
||||
Process performance details:
|
||||
10.0.4.1:4500 ( 3% cpu; 2% machine; 0.004 Gbps; 0% disk; 2.5 GB / 4.1 GB RAM )
|
||||
10.0.4.2:4500 ( 1% cpu; 2% machine; 0.004 Gbps; 0% disk; 2.5 GB / 4.1 GB RAM )
|
||||
|
@ -126,7 +131,7 @@ After running this command, you can check that it completed successfully by usin
|
|||
10.0.4.2:4500
|
||||
10.0.4.3:4500
|
||||
|
||||
Client time: Thu Nov 20 09:50:45 2014
|
||||
Client time: Thu Mar 15 14:41:34 2018
|
||||
|
||||
The list of coordinators verifies that the coordinator change succeeded. A few things might cause this process to not go smoothly:
|
||||
|
||||
|
@ -229,15 +234,19 @@ Contains settings applicable to all processes (e.g. fdbserver, backup_agent). Th
|
|||
logdir = /var/log/foundationdb
|
||||
# logsize = 10MiB
|
||||
# maxlogssize = 100MiB
|
||||
# machine_id =
|
||||
# datacenter_id =
|
||||
# class =
|
||||
# memory = 8GiB
|
||||
# storage_memory = 1GiB
|
||||
# locality_machineid =
|
||||
# locality_zoneid =
|
||||
# locality_data_hall =
|
||||
# locality_dcid =
|
||||
# io_trust_seconds = 20
|
||||
|
||||
Contains default parameters for all fdbserver processes on this machine. These same options can be overridden for individual processes in their respective ``[fdbserver.<ID>]`` sections. In this section, the ID of the individual fdbserver can be substituted by using the ``$ID`` variable in the value. For example, ``public_address = auto:$ID`` makes each fdbserver listen on a port equal to its ID.
|
||||
|
||||
.. note:: |multiplicative-suffixes|
|
||||
.. note:: In general locality id's are used to specify the location of processes which in turn is used to determine fault and replication domains.
|
||||
|
||||
* ``command``: The location of the ``fdbserver`` binary.
|
||||
* ``public_address``: The publicly visible IP:Port of the process. If ``auto``, the address will be the one used to communicate with the coordination servers.
|
||||
|
@ -246,11 +255,14 @@ Contains default parameters for all fdbserver processes on this machine. These s
|
|||
* ``logdir``: A writable directory (by root or by the user set in the [fdbmonitor] section) where FoundationDB will store log files.
|
||||
* ``logsize``: Roll over to a new log file after the current log file reaches the specified size. The default value is 10MiB.
|
||||
* ``maxlogssize``: Delete the oldest log file when the total size of all log files exceeds the specified size. If set to 0B, old log files will not be deleted. The default value is 100MiB.
|
||||
* ``machine_id``: Machine identifier key. Processes that share a key are considered non-unique for the purposes of data replication. By default, processes on a machine determine a unique key to share. This does not generally need to be set. The ID can be up to 16 hexadecimal digits.
|
||||
* ``datacenter_id``: Data center identifier key. All processes physically located in a data center should share the id. If unset, defaults to a special "default" data center. If you are depending on data center based replication this must be set on all processes. The ID can be up to 16 hexadecimal digits.
|
||||
* ``class``: Machine class specifying the roles that will be taken in the cluster. Valid options are ``storage``, ``transaction``, ``resolution``. See :ref:`configuration-large-cluster-performance` for machine class recommendations in large clusters.
|
||||
* ``class``: Process class specifying the roles that will be taken in the cluster. Recommended options are ``storage``, ``transaction``, ``stateless``. See :ref:`guidelines-process-class-config` for process class config recommendations.
|
||||
* ``memory``: Maximum memory used by the process. The default value is 8GiB. When specified without a unit, MiB is assumed. This parameter does not change the memory allocation of the program. Rather, it sets a hard limit beyond which the process will kill itself and be restarted. The default value of 8GiB is double the intended memory usage in the default configuration (providing an emergency buffer to deal with memory leaks or similar problems). It is *not* recommended to decrease the value of this parameter below its default value. It may be *increased* if you wish to allocate a very large amount of storage engine memory or cache. In particular, when the ``storage_memory`` parameter is increased, the ``memory`` parameter should be increased by an equal amount.
|
||||
* ``storage_memory``: Maximum memory used for data storage. This paramenter is used *only* with memory storage engine, not the ssd storage engine. The default value is 1GiB. When specified without a unit, MB is assumed. Clusters will be restricted to using this amount of memory per process for purposes of data storage. Memory overhead associated with storing the data is counted against this total. If you increase the ``storage_memory``, you should also increase the ``memory`` parameter by the same amount.
|
||||
* ``locality_machineid``: Machine identifier key. All processes on a machine should share a unique id. By default, processes on a machine determine a unique id to share. This does not generally need to be set.
|
||||
* ``locality_zoneid``: Zone identifier key. Processes that share a zone id are considered non-unique for the purposes of data replication. If unset, defaults to machine id.
|
||||
* ``locality_dcid``: Data center identifier key. All processes physically located in a data center should share the id. No default value. If you are depending on data center based replication this must be set on all processes.
|
||||
* ``locality_data_hall``: Data hall identifier key. All processes physically located in a data hall should share the id. No default value. If you are depending on data hall based replication this must be set on all processes.
|
||||
* ``io_trust_seconds``: Time in seconds that a read or write operation is allowed to take before timing out with an error. If an operation times out, all future operations on that file will fail with an error as well. Only has an effect when using AsyncFileKAIO in Linux. If unset, defaults to 0 which means timeout is disabled.
|
||||
|
||||
``[fdbserver.<ID>]`` section(s)
|
||||
---------------------------------
|
||||
|
@ -502,27 +514,22 @@ When creating a partition for use with FoundationDB using the standard Linux fdi
|
|||
|
||||
For an SSD with a single partition, the partition should typically begin at sector 2048 (512 byte sectors yields 1024 KiB alignment).
|
||||
|
||||
.. _configuration-large-cluster-performance:
|
||||
.. _guidelines-process-class-config:
|
||||
|
||||
Large cluster performance
|
||||
=========================
|
||||
Guidelines for setting process class
|
||||
====================================
|
||||
|
||||
.. note:: For small-to-medium clusters (32 processes or fewer), FoundationDB's default behavior generally provides the best performance, and you should ignore this section. Further configuration is recommended only for large clusters (> 32 processes) or if you have special latency requirements.
|
||||
In a FoundationDB cluster, each of the ``fdbserver`` processes perform different tasks. Each process is recruited to do a particular task based on its process ``class``. For example, processes with ``class=storage`` are given preference to be recruited for doing storage server tasks, ``class=transaction`` are for log server processes and ``class=stateless`` are for stateless processes like proxies, resolvers, etc.,
|
||||
|
||||
In a FoundationDB cluster, each of the ``fdbserver`` processes perform different tasks. FoundationDB automatically assigns each machine in the cluster a ``class`` that specifies the tasks it will perform. For large clusters, FoundationDB also provides the ability to tune cluster performance by manually assigning the ``class`` of some machines.
|
||||
The recommended minimum number of ``class=transaction`` (log server) processes is 8 (active) + 2 (standby) and the recommended minimum number for ``class=stateless`` processes is 4 (proxy) + 1 (resolver) + 1 (cluster controller) + 1 (master) + 2 (standby). It is better to spread the transaction and stateless processes across as many machines as possible.
|
||||
|
||||
To assign machine classes manually, set the ``class=transaction`` parameter in :ref:`foundationdb.conf <foundationdb-conf>` on all processes on selected machines. The ratio of total processes to ``class``-specified processes should be about 8:1. For example, if you have 64 processes on 16 machines, you would set ``class=transaction`` for 8 processes on 2 machines.
|
||||
``fdbcli`` is used to set the desired number of processes of a particular process type. To do so, you would issue the ``fdbcli`` commands::
|
||||
|
||||
For large clusters with high write workloads (greater than 100,000 writes/second), you can increase performance by increasing the number of proxies, resolvers, and log servers. These are set using ``fdbcli`` in equal (1:1:1) proportions among the processes on machines set to ``class=transaction``.
|
||||
fdb> configure proxies=5
|
||||
fdb> configure logs=8
|
||||
|
||||
For example, if you have 384 processes on 96 machines, and a workload greater than 100,000 writes per second, you would set ``class=transaction`` for 48 processes on 12 machines. Of the latter, you would set 16 processes on 4 machines each as *proxies*, *resolvers*, and *log servers*. To do so, you would issue the ``fdbcli`` commands::
|
||||
.. note:: In the present release, the default value for proxies and log servers is 3 and for resolvers is 1. You should not set the value of a process type to less than its default.
|
||||
|
||||
fdb> configure proxies=16
|
||||
fdb> configure resolvers=16
|
||||
fdb> configure logs=16
|
||||
|
||||
.. note:: In the present release, the default value for proxies and log servers is 3 and for resolvers is 1. The ratios discussed above are guidelines; regardless of them, you should not set the value of a process type to less than its default. For example, on clusters ranging from 36 to 60 processes with high write workloads, you may choose to increase the number of resolvers to 2. In this case, you would nevertheless leave the number of proxies and log servers at their default values of 3.
|
||||
|
||||
.. warning:: The conflict-resolution algorithm used by FoundationDB is conservative: it guarantees that no conflicting transactions will be committed, but it may fail to commit some transactions that theoretically could have been. The effects of this conservatism may increase as you increase the number of proxies. It is therefore important to employ the recommended techniques for :ref:`minimizing conflicts <developer-guide-transaction-conflicts>` when increasing the number of proxies.
|
||||
.. warning:: The conflict-resolution algorithm used by FoundationDB is conservative: it guarantees that no conflicting transactions will be committed, but it may fail to commit some transactions that theoretically could have been. The effects of this conservatism may increase as you increase the number of resolvers. It is therefore important to employ the recommended techniques for :ref:`minimizing conflicts <developer-guide-transaction-conflicts>` when increasing the number of resolvers.
|
||||
|
||||
You can contact us on the `community forums <https://forums.foundationdb.org>`_ if you are interested in more details or if you are benchmarking or performance-tuning on large clusters. Also see our `performance benchmarks </performance>`_ for a baseline of how a well-configured cluster should perform.
|
||||
|
|
|
@ -5,12 +5,43 @@ Downloads
|
|||
Client & Server Packages
|
||||
========================
|
||||
|
||||
FoundationDB packages are available on Artifactory for the following operating systems:
|
||||
macOS
|
||||
-----
|
||||
|
||||
* `macOS <https://files.foundationdb.org/artifacts/5.1.0/release/osx/>`_. Supported on macOS >= 10.7. Installs client and (optionally) server.
|
||||
* `Ubuntu <https://files.foundationdb.org/artifacts/5.1.0/release/ubuntu/>`_. The server depends on the client. Supported on 64-bit Ubuntu >= 12.04, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
* `RHEL/CentOS EL6 <https://files.foundationdb.org/artifacts/5.1.0/release/rhel6/>`_. The server depends on the client. Supported on 64-bit RHEL/CentOS (6.x).
|
||||
* `RHEL/CentOS EL7 <https://files.foundationdb.org/artifacts/5.1.0/release/rhel7/>`_. The server depends on the client. Supported on 64-bit RHEL/CentOS (7.x).
|
||||
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
|
||||
|
||||
* `FoundationDB-5.1.0.pkg <https://www.foundationdb.org/downloads/5.1.0/macOS/installers/FoundationDB-5.1.0.pkg>`_
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
|
||||
* `foundationdb-clients-5.1.0-1_amd64.deb <https://www.foundationdb.org/downloads/5.1.0/ubuntu/installers/foundationdb-clients_5.1.0-1_amd64.deb>`_
|
||||
* `foundationdb-server-5.1.0-1_amd64.deb <https://www.foundationdb.org/downloads/5.1.0/ubuntu/installers/foundationdb-server_5.1.0-1_amd64.deb>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL6
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
|
||||
|
||||
* `foundationdb-clients-5.1.0-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.1.0/rhel6/installers/foundationdb-clients-5.1.0-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-5.1.0-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/5.1.0/rhel6/installers/foundationdb-server-5.1.0-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL7
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
|
||||
|
||||
* `foundationdb-clients-5.1.0-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.1.0/rhel7/installers/foundationdb-clients-5.1.0-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-5.1.0-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/5.1.0/rhel7/installers/foundationdb-server-5.1.0-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
|
||||
|
||||
* `foundationdb-5.1.0-x64.msi <https://www.foundationdb.org/downloads/5.1.0/windows/installers/foundationdb-5.1.0-x64.msi>`_
|
||||
|
||||
API Language Bindings
|
||||
=====================
|
||||
|
@ -23,27 +54,24 @@ FoundationDB's C bindings are installed with the FoundationDB client binaries. Y
|
|||
Python 2.7 - 3.4
|
||||
----------------
|
||||
|
||||
The FoundationDB Python API is installed as part of your FoundationDB installation.
|
||||
On macOS and Windows, the FoundationDB Python API bindings are installed as part of your FoundationDB installation.
|
||||
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, download the `package <https://files.foundationdb.org/artifacts/5.1.0/release/python/>`_.
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
|
||||
|
||||
Ruby 1.9.3/3.0.2
|
||||
----------------
|
||||
* `foundationdb-5.1.0.tar.gz <https://www.foundationdb.org/downloads/5.1.0/bindings/python/foundationdb-5.1.0.tar.gz>`_
|
||||
|
||||
Download the `gem <https://files.foundationdb.org/artifacts/5.1.0/release/ruby/>`_.
|
||||
|
||||
Java JRE 1.8+
|
||||
-------------
|
||||
|
||||
Download the `jar and javadoc.jar <https://files.foundationdb.org/artifacts/5.1.0/release/java/>`_.
|
||||
|
||||
Node 0.8.x/0.10.x
|
||||
Ruby 1.9.3/2.0.0+
|
||||
-----------------
|
||||
|
||||
Download the `node package <https://files.foundationdb.org/artifacts/5.1.0/release/nodejs/>`_.
|
||||
* `fdb-5.1.0.gem <https://www.foundationdb.org/downloads/5.1.0/bindings/ruby/fdb-5.1.0.gem>`_
|
||||
|
||||
Java 8+
|
||||
-------
|
||||
|
||||
* `fdb-java-5.1.0.jar <https://www.foundationdb.org/downloads/5.1.0/bindings/java/fdb-java-5.1.0.jar>`_
|
||||
* `fdb-java-5.1.0-javadoc.jar <https://www.foundationdb.org/downloads/5.1.0/bindings/java/fdb-java-5.1.0-javadoc.jar>`_
|
||||
|
||||
Go 1.1+
|
||||
-------
|
||||
|
||||
The FoundationDB Go package is available on `github <https://github.com/apple/foundationdb/tree/master/bindings/go>`_
|
||||
The FoundationDB Go package is available on `GitHub <https://github.com/apple/foundationdb/tree/master/bindings/go>`_.
|
||||
|
|
|
@ -23,10 +23,12 @@ Any distributed system faces some basic probabilistic constraints. For example,
|
|||
|
||||
FoundationDB improves these probabilities by selecting "teams" of machines on which to distribute data. Instead of putting each chunk of data on a different set of machines, each machine can participate in multiple teams. In the above example, by selecting only 450 teams of 4 machines that each chunk of data can be on, the chance of data unavailability is reduced to about 0.5%.
|
||||
|
||||
The number of machines in each team is based on the replication mode; the total number of teams increases with the size of the cluster.
|
||||
|
||||
Independence assumptions
|
||||
========================
|
||||
|
||||
As a further refinement, FoundationDB can be made aware that certain machines might tend to fail together. For example, every machine in a rack might share a network and power connection. If either failed, then the entire rack of machines would fail. We use this knowledge when choosing teams, taking care not to place any two machines in a team that would have a tendency to fail together. Pieces of data can then be intelligently distributed across racks or even datacenters, so that characteristic multimachine failures (for example, based on rack configuration) do not cause service interruption or data loss. Using this method, FoundationDB can continuously operate through a failure of an entire datacenter.
|
||||
As a further refinement, FoundationDB can be made aware that certain machines might tend to fail together by specifying the locality of each process. For example, every machine in a rack might share a network and power connection. If either failed, then the entire rack of machines would fail. We use this knowledge when choosing teams, taking care not to place any two machines in a team that would have a tendency to fail together. Pieces of data can then be intelligently distributed across racks or even datacenters, so that characteristic multimachine failures (for example, based on rack configuration) do not cause service interruption or data loss. Our ``three_data_hall`` and ``multi_dc`` configurations use this technique to continuously operate through a failure of a data hall or datacenter respectively.
|
||||
|
||||
Other types of failure
|
||||
======================
|
||||
|
|
|
@ -40,7 +40,7 @@ Clients can create transactional watches on keys to ensure that they are notifie
|
|||
Atomic Operations
|
||||
-----------------
|
||||
|
||||
FoundationDB includes support for specific "atomic operations" (e.g. Add) within a transaction to manipulate the value of a key without requiring the client to actually read the value. This makes these operations "zero-latency" and enables a variety of advanced data structures to be implemented more efficiently as layers.
|
||||
FoundationDB includes support for specific "atomic operations" (e.g. Add) within a transaction to manipulate the value of a key without requiring the client to actually read the value. This makes these operations low-latency and enables a variety of advanced data structures to be implemented more efficiently as layers.
|
||||
|
||||
OLTP and OLAP
|
||||
-------------
|
||||
|
@ -82,7 +82,7 @@ FoundationDB uses multiversion concurrency control to provide transactionally is
|
|||
Concurrent Connections
|
||||
----------------------
|
||||
|
||||
FoundationDB is not slowed by large numbers of concurrent client connections. Because it uses a threadless communications and concurrency model, FoundationDB does not have to create a thread per connection. This allows full performance even with hundreds of thousands of in-flight requests.
|
||||
FoundationDB is able to handle large numbers of concurrent client connections. Because it uses a threadless communications and concurrency model, FoundationDB does not have to create a thread per connection. This allows full performance even with hundreds of thousands of in-flight requests.
|
||||
|
||||
Interactive Transactions
|
||||
------------------------
|
||||
|
@ -100,7 +100,7 @@ A FoundationDB database can start on a single machine and be expanded to a clust
|
|||
Datacenter Failover
|
||||
-------------------
|
||||
|
||||
FoundationDB can be configured to run multiple live redundant clusters in geographically diverse datacenters. Each datacenter contains a complete and fully up-to-date copy of all data in the system, allowing for minimal downtime even when an entire datacenter becomes unavailable.
|
||||
FoundationDB can be configured to run multiple geographically diverse datacenters through our Multi DC mode. Each piece of data is replicated into three data centers, and clients can read data from their local data center at low latencies. In the event of a data center failure, the two remaining data centers will continue accepting writes, allowing for minimal downtime.
|
||||
|
||||
Self Tuning
|
||||
-----------
|
||||
|
|
|
@ -72,12 +72,13 @@ To verify that the local FoundationDB database is operational, open the command
|
|||
Machines - 1
|
||||
Memory availability - 4.1 GB per process on machine with least available
|
||||
Fault Tolerance - 0 machines
|
||||
Server time - Wed Oct 8 14:41:34 2014
|
||||
Server time - Thu Mar 15 14:41:34 2018
|
||||
|
||||
Data:
|
||||
Replication health - Healthy
|
||||
Moving data - 0.000 GB
|
||||
Sum of key-value sizes - 0 MB
|
||||
Sum of key-value sizes - 8 MB
|
||||
Disk space used - 103 MB
|
||||
|
||||
Operating space:
|
||||
Storage server - 1.0 GB free on most full server
|
||||
|
@ -90,7 +91,11 @@ To verify that the local FoundationDB database is operational, open the command
|
|||
Transactions committed - 0 Hz
|
||||
Conflict rate - 0 Hz
|
||||
|
||||
Client time: Thu Nov 20 09:50:45 2014
|
||||
Backup and DR:
|
||||
Running backups - 0
|
||||
Running DRs - 0
|
||||
|
||||
Client time: Thu Mar 15 14:41:34 2018
|
||||
|
||||
If these steps were successful you have installed and validated FoundationDB. You can now start using the database!
|
||||
|
||||
|
|
|
@ -63,12 +63,13 @@ To verify that the local FoundationDB database is operational, open the command
|
|||
Machines - 1
|
||||
Memory availability - 4.1 GB per process on machine with least available
|
||||
Fault Tolerance - 0 machines
|
||||
Server time - Wed Oct 8 14:41:34 2014
|
||||
Server time - Thu Mar 15 14:41:34 2018
|
||||
|
||||
Data:
|
||||
Replication health - Healthy
|
||||
Moving data - 0.000 GB
|
||||
Sum of key-value sizes - 0 MB
|
||||
Sum of key-value sizes - 8 MB
|
||||
Disk space used - 103 MB
|
||||
|
||||
Operating space:
|
||||
Storage server - 1.0 GB free on most full server
|
||||
|
@ -81,7 +82,11 @@ To verify that the local FoundationDB database is operational, open the command
|
|||
Transactions committed - 0 Hz
|
||||
Conflict rate - 0 Hz
|
||||
|
||||
Client time: Thu Nov 20 09:56:52 2014
|
||||
Backup and DR:
|
||||
Running backups - 0
|
||||
Running DRs - 0
|
||||
|
||||
Client time: Thu Mar 15 14:41:34 2018
|
||||
|
||||
If these steps were successful you have installed and validated FoundationDB. You can now start using the database!
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
The ``description`` is a logical description of the database using alphanumeric characters (a-z, A-Z, 0-9) and underscores.
|
||||
|
||||
.. |cluster-file-rule2| replace::
|
||||
The ``ID`` is an arbitrary value containing alphanumeric characters (a-z, A-Z, 0-9). We recommend using a random eight-character identifier (such as the output of ``mktemp -u XXXXXXXX``).
|
||||
The ``ID`` is an arbitrary value containing alphanumeric characters (a-z, A-Z, 0-9). We recommend using a random eight-character identifier (such as the output of ``mktemp -u XXXXXXXX``). Note that the ``ID`` will change when coordinators change.
|
||||
|
||||
.. |cluster-file-rule3| replace::
|
||||
The list of ``IP:PORT`` pairs specify the set of coordination servers. A majority of these servers must be available for the database to be operational so they should be chosen carefully. The number of coordination servers should therefore be odd and must be more than one to support fault-tolerance. We recommend using five coordination servers when using ``triple`` mode to maintain the ability to tolerate two simultaneous machine failures.
|
||||
|
|
|
@ -179,17 +179,15 @@ Here’s a basic implementation of the recipe.
|
|||
}
|
||||
|
||||
public static Object insertDoc(TransactionContext tcx, final Map<Object,Object> doc){
|
||||
return tcx.run(new Function<Transaction,Object>() {
|
||||
public Object apply(Transaction tr){
|
||||
if(!doc.containsKey("doc_id")){
|
||||
doc.put("doc_id", getNewID(tr));
|
||||
}
|
||||
for(Tuple t : toTuples(doc)){
|
||||
tr.set(docSpace.pack(Tuple.from(doc.get("doc_id")).addAll(t.popBack())),
|
||||
Tuple.from(t.get(t.size() - 1)).pack());
|
||||
}
|
||||
return doc.get("doc_id");
|
||||
return tcx.run(tr -> {
|
||||
if(!doc.containsKey("doc_id")){
|
||||
doc.put("doc_id", getNewID(tr));
|
||||
}
|
||||
for(Tuple t : toTuples(doc)){
|
||||
tr.set(docSpace.pack(Tuple.from(doc.get("doc_id")).addAll(t.popBack())),
|
||||
Tuple.from(t.get(t.size() - 1)).pack());
|
||||
}
|
||||
return doc.get("doc_id");
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -198,43 +196,38 @@ Here’s a basic implementation of the recipe.
|
|||
}
|
||||
|
||||
public static Object getDoc(TransactionContext tcx, final Object ID, final Tuple prefix){
|
||||
return tcx.run(new Function<Transaction,Object>() {
|
||||
public Object apply(Transaction tr){
|
||||
Future<byte[]> v = tr.get(docSpace.pack(Tuple.from(ID).addAll(prefix)));
|
||||
if(v.get() != null){
|
||||
// One single item.
|
||||
ArrayList<Tuple> vals = new ArrayList<Tuple>();
|
||||
vals.add(prefix.addAll(Tuple.fromBytes(v.get())));
|
||||
return fromTuples(vals);
|
||||
} else {
|
||||
// Multiple items.
|
||||
ArrayList<Tuple> vals = new ArrayList<Tuple>();
|
||||
for(KeyValue kv : tr.getRange(docSpace.range(Tuple.from(ID).addAll(prefix)))){
|
||||
vals.add(docSpace.unpack(kv.getKey()).popFront().addAll(Tuple.fromBytes(kv.getValue())));
|
||||
}
|
||||
return fromTuples(vals);
|
||||
return tcx.run(tr -> {
|
||||
Future<byte[]> v = tr.get(docSpace.pack(Tuple.from(ID).addAll(prefix)));
|
||||
if(v.get() != null){
|
||||
// One single item.
|
||||
ArrayList<Tuple> vals = new ArrayList<Tuple>();
|
||||
vals.add(prefix.addAll(Tuple.fromBytes(v.get())));
|
||||
return fromTuples(vals);
|
||||
} else {
|
||||
// Multiple items.
|
||||
ArrayList<Tuple> vals = new ArrayList<Tuple>();
|
||||
for(KeyValue kv : tr.getRange(docSpace.range(Tuple.from(ID).addAll(prefix)))){
|
||||
vals.add(docSpace.unpack(kv.getKey()).popFront().addAll(Tuple.fromBytes(kv.getValue())));
|
||||
}
|
||||
return fromTuples(vals);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private static int getNewID(TransactionContext tcx){
|
||||
return tcx.run(new Function<Transaction,Integer>() {
|
||||
@SuppressWarnings("unused")
|
||||
public Integer apply(Transaction tr){
|
||||
boolean found = false;
|
||||
int newID;
|
||||
do {
|
||||
newID = (int)(Math.random()*100000000);
|
||||
found = true;
|
||||
for(KeyValue kv : tr.getRange(docSpace.range(Tuple.from(newID)))){
|
||||
// If not empty, this is false.
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
} while(!found);
|
||||
return newID;
|
||||
}
|
||||
return tcx.run(tr -> {
|
||||
boolean found = false;
|
||||
int newID;
|
||||
do {
|
||||
newID = (int)(Math.random()*100000000);
|
||||
found = true;
|
||||
for(KeyValue kv : tr.getRange(docSpace.range(Tuple.from(newID)))){
|
||||
// If not empty, this is false.
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
} while(!found);
|
||||
return newID;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,12 +7,7 @@ FoundationDB |version|
|
|||
Overview
|
||||
========
|
||||
|
||||
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs :doc:`ACID transactions <transaction-manifesto>` for all operations. It is especially well-suited for read/write workloads but also has excellent :doc:`performance <performance>` for write-intensive workloads. Users interact with the database using a :doc:`API language binding <api-reference>`.
|
||||
|
||||
Local Development
|
||||
=================
|
||||
|
||||
FoundationDB runs on clusters in Apple data centers, but you can :doc:`begin local development <local-dev>` now.
|
||||
FoundationDB is a distributed database designed to handle large volumes of structured data across clusters of commodity servers. It organizes data as an ordered key-value store and employs :doc:`ACID transactions <transaction-manifesto>` for all operations. It is especially well-suited for read/write workloads but also has excellent :doc:`performance <performance>` for write-intensive workloads. Users interact with the database using a :doc:`API language binding <api-reference>`. You can :doc:`begin local development <local-dev>` today.
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
@ -41,6 +36,8 @@ The latest changes are detailed in :doc:`release-notes`. The documentation has t
|
|||
|
||||
* :doc:`tutorials` provide simple examples of client design using FoundationDB.
|
||||
|
||||
* :doc:`administration` contains documentation on administering FoundationDB.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:titlesonly:
|
||||
|
|
|
@ -20,28 +20,6 @@ Design limitations
|
|||
|
||||
These limitations come from fundamental design decisions and are unlikely to change in the short term. Applications using FoundationDB should plan to work around these limitations. See :doc:`anti-features` for related discussion of our design approach to the FoundationDB core.
|
||||
|
||||
.. _long-transactions:
|
||||
|
||||
Long transactions
|
||||
-----------------
|
||||
|
||||
FoundationDB currently does not support transactions running for over five seconds. In particular, after 5 seconds from the first read in a transaction:
|
||||
|
||||
* subsequent reads that go to the database will usually raise a ``past_version`` :doc:`error <api-error-codes>` (although reads cached by the client will not);
|
||||
* a commit with any write will raise a ``past_version`` or ``not_committed`` :doc:`error <api-error-codes>`.
|
||||
|
||||
Clients need to avoid these cases. For the design reasons behind this limitation, see the discussion in :doc:`anti-features`.
|
||||
|
||||
.. admonition:: Workarounds
|
||||
|
||||
The effect of long and large transactions can be achieved using short and small transactions with a variety of techniques, depending on the desired behavior:
|
||||
|
||||
* If an application wants long transactions because of an external process in the loop, it can perform optimistic validation itself at a higher layer.
|
||||
* If it needs long-running read snapshots, it can perform versioning in a layer.
|
||||
* If it needs large bulk inserts, it can use a level of indirection to swap in the inserted data quickly.
|
||||
|
||||
As with all data modeling problems, please ask for help on the community site (or via e-mail) with your specific needs.
|
||||
|
||||
.. _large-transactions:
|
||||
|
||||
Large transactions
|
||||
|
@ -90,12 +68,38 @@ The current version of FoundationDB resolves key selectors with large offsets in
|
|||
|
||||
The RankedSet layer provides a data structure in which large offsets and counting operations require only O(log N) time. It is a good choice for applications such as large leaderboards that require such functionality.
|
||||
|
||||
Not a security boundary
|
||||
-----------------------
|
||||
|
||||
Anyone who can connect to a FoundationDB cluster can read and write every key in the database. There is no user-level access control. External protections must be put into place to protect your database.
|
||||
|
||||
Current limitations
|
||||
===================
|
||||
|
||||
These limitations do not reflect fundamental aspects of our design and are likely be resolved or mitigated in future versions. Administrators should be aware of these issues, but longer-term application development should be less driven by them.
|
||||
|
||||
.. _long-transactions:
|
||||
|
||||
Long running transactions
|
||||
-------------------------
|
||||
|
||||
FoundationDB currently does not support transactions running for over five seconds. In particular, after 5 seconds from the first read in a transaction:
|
||||
|
||||
* subsequent reads that go to the database will usually raise a ``transaction_too_old`` :doc:`error <api-error-codes>` (although reads cached by the client will not);
|
||||
* a commit with any write will raise a ``transaction_too_old`` or ``not_committed`` :doc:`error <api-error-codes>`.
|
||||
|
||||
Long running read/write transactions are a design limitation, see the discussion in :doc:`anti-features`.
|
||||
|
||||
.. admonition:: Workarounds
|
||||
|
||||
The effect of long and large transactions can be achieved using short and small transactions with a variety of techniques, depending on the desired behavior:
|
||||
|
||||
* If an application wants long transactions because of an external process in the loop, it can perform optimistic validation itself at a higher layer.
|
||||
* If it needs long-running read snapshots, it can perform versioning in a layer.
|
||||
* If it needs large bulk inserts, it can use a level of indirection to swap in the inserted data quickly.
|
||||
|
||||
.. _cluster-size:
|
||||
|
||||
Cluster size
|
||||
------------
|
||||
|
||||
|
|
|
@ -80,16 +80,14 @@ Here’s a simple implementation of multimaps with multisets as described:
|
|||
}
|
||||
|
||||
private static void addHelp(TransactionContext tcx, final byte[] key, final long amount){
|
||||
tcx.run(new Function<Transaction,Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
ByteBuffer b = ByteBuffer.allocate(8);
|
||||
b.order(ByteOrder.LITTLE_ENDIAN);
|
||||
b.putLong(amount);
|
||||
|
||||
tr.mutate(MutationType.ADD, key, b.array());
|
||||
|
||||
return null;
|
||||
}
|
||||
tcx.run(tr -> {
|
||||
ByteBuffer b = ByteBuffer.allocate(8);
|
||||
b.order(ByteOrder.LITTLE_ENDIAN);
|
||||
b.putLong(amount);
|
||||
|
||||
tr.mutate(MutationType.ADD, key, b.array());
|
||||
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -102,66 +100,56 @@ Here’s a simple implementation of multimaps with multisets as described:
|
|||
|
||||
public static void add(TransactionContext tcx, final String index,
|
||||
final Object value){
|
||||
tcx.run(new Function<Transaction,Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
addHelp(tr, multi.subspace(Tuple.from(index,value)).getKey(),1l);
|
||||
return null;
|
||||
}
|
||||
tcx.run(tr -> {
|
||||
addHelp(tr, multi.subspace(Tuple.from(index,value)).getKey(),1l);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public static void subtract(TransactionContext tcx, final String index,
|
||||
final Object value){
|
||||
tcx.run(new Function<Transaction,Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
Future<byte[]> v = tr.get(multi.subspace(
|
||||
Tuple.from(index,value)).getKey());
|
||||
|
||||
if(v.get() != null && getLong(v.get()) > 1l){
|
||||
addHelp(tr, multi.subspace(Tuple.from(index,value)).getKey(), -1l);
|
||||
} else {
|
||||
tr.clear(multi.subspace(Tuple.from(index,value)).getKey());
|
||||
}
|
||||
return null;
|
||||
tcx.run(tr -> {
|
||||
Future<byte[]> v = tr.get(multi.subspace(
|
||||
Tuple.from(index,value)).getKey());
|
||||
|
||||
if(v.get() != null && getLong(v.get()) > 1l){
|
||||
addHelp(tr, multi.subspace(Tuple.from(index,value)).getKey(), -1l);
|
||||
} else {
|
||||
tr.clear(multi.subspace(Tuple.from(index,value)).getKey());
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public static ArrayList<Object> get(TransactionContext tcx, final String index){
|
||||
return tcx.run(new Function<Transaction,ArrayList<Object> >() {
|
||||
public ArrayList<Object> apply(Transaction tr){
|
||||
ArrayList<Object> vals = new ArrayList<Object>();
|
||||
for(KeyValue kv : tr.getRange(multi.subspace(
|
||||
Tuple.from(index)).range())){
|
||||
vals.add(multi.unpack(kv.getKey()).get(1));
|
||||
}
|
||||
return vals;
|
||||
return tcx.run(tr -> {
|
||||
ArrayList<Object> vals = new ArrayList<Object>();
|
||||
for(KeyValue kv : tr.getRange(multi.subspace(
|
||||
Tuple.from(index)).range())){
|
||||
vals.add(multi.unpack(kv.getKey()).get(1));
|
||||
}
|
||||
return vals;
|
||||
});
|
||||
}
|
||||
|
||||
public static HashMap<Object,Long> getCounts(TransactionContext tcx,
|
||||
final String index){
|
||||
return tcx.run(new Function<Transaction,HashMap<Object,Long> >() {
|
||||
public HashMap<Object,Long> apply(Transaction tr){
|
||||
HashMap<Object,Long> vals = new HashMap<Object,Long>();
|
||||
for(KeyValue kv : tr.getRange(multi.subspace(
|
||||
Tuple.from(index)).range())){
|
||||
vals.put(multi.unpack(kv.getKey()).get(1),
|
||||
getLong(kv.getValue()));
|
||||
}
|
||||
return vals;
|
||||
return tcx.run(tr -> {
|
||||
HashMap<Object,Long> vals = new HashMap<Object,Long>();
|
||||
for(KeyValue kv : tr.getRange(multi.subspace(
|
||||
Tuple.from(index)).range())){
|
||||
vals.put(multi.unpack(kv.getKey()).get(1),
|
||||
getLong(kv.getValue()));
|
||||
}
|
||||
return vals;
|
||||
});
|
||||
}
|
||||
|
||||
public static boolean isElement(TransactionContext tcx, final String index,
|
||||
final Object value){
|
||||
return tcx.run(new Function<Transaction,Boolean>() {
|
||||
public Boolean apply(Transaction tr){
|
||||
return tr.get(multi.subspace(
|
||||
Tuple.from(index, value)).getKey()).get() != null;
|
||||
}
|
||||
return tcx.run(tr -> {
|
||||
return tr.get(multi.subspace(
|
||||
Tuple.from(index, value)).getKey()).get() != null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,21 +2,23 @@
|
|||
Performance
|
||||
###########
|
||||
|
||||
FoundationDB uses commodity hardware to provide your applications with millions of database operations per penny.
|
||||
FoundationDB uses commodity hardware to provide high throughputs and low latencies to your application at a variety of scales.
|
||||
|
||||
Scaling
|
||||
=======
|
||||
|
||||
FoundationDB has an unmatched ability to scale linearly as you add more cores to a cluster.
|
||||
FoundationDB scales linearly with the number of cores in a cluster over a wide range of sizes.
|
||||
|
||||
.. image:: /images/scaling.png
|
||||
|
||||
Here, a cluster of commodity hardware scales to **8.2 million** operations/sec doing a 90% read and 10% write workload.
|
||||
Here, a cluster of commodity hardware scales to **8.2 million** operations/sec doing a 90% read and 10% write workload with 16 byte keys and values between 8 and 100 bytes.
|
||||
|
||||
The scaling graph uses a 24-machine EC2 c3.8xlarge cluster in which each machine has a 16-core processor. We ran a FoundationDB server process on each core, yielding a 384-process cluster for the largest test, and scaled the cluster down for each smaller test.
|
||||
|
||||
Scaling is the ability to efficiently deliver operations at different scales. For FoundationDB, the relevant operations are reads and writes, measured in operations per sec. Scale is measured in the number of processes, which will usually track the number of available cores. FoundationDB offers scalability from partial utilization of a single core on a single machine to full utilization of dozens of powerful multi-core machines in a cluster.
|
||||
|
||||
See our :ref:`known limitations <cluster-size>` for details about the limits of cluster scalability.
|
||||
|
||||
Latency
|
||||
=======
|
||||
|
||||
|
@ -42,9 +44,9 @@ For FoundationDB, the significant latencies are those experienced by a Foundatio
|
|||
|
||||
* **Transaction start**. This latency will be experienced as part of the first read in a transaction as the read version is obtained. It will typically be a few milliseconds under moderate load, but under high write loads FoundationDB tries to concentrate most transaction latency here.
|
||||
|
||||
* **Reads**. Individual reads should take under 1 ms with moderate loads. If a transaction performs many reads by waiting for each to complete before starting the next, however, these small latencies can add up. You can thus reduce total latency (and potentially conflicts) by doing as many of your reads as possible in parallel. FoundationDB supports non-blocking reads, so it's easy to perform reads without waiting on them.
|
||||
* **Reads**. Individual reads should take under 1 ms with moderate loads. If a transaction performs many reads by waiting for each to complete before starting the next, however, these small latencies can add up. You can thus reduce total latency (and potentially :ref:`conflicts <conflict-ranges>`) by doing as many of your reads as possible in parallel. FoundationDB supports non-blocking reads, so it's easy to perform reads without waiting on them.
|
||||
|
||||
* **Commit**. Transactions that perform writes must be committed, and the commit will not succeed until the transaction is durable with full replication. This latency will average under 3 ms with moderate loads. Only a small part of this latency impacts transaction conflicts.
|
||||
* **Commit**. Transactions that perform writes must be committed, and the commit will not succeed until the transaction is durable with full replication. This latency should average under 3 ms with moderate loads. Only a small part of this latency impacts transaction :ref:`conflicts <conflict-ranges>`.
|
||||
|
||||
Throughput (per core)
|
||||
=====================
|
||||
|
@ -53,7 +55,7 @@ FoundationDB provides good throughput for the full range of read and write workl
|
|||
|
||||
.. image:: /images/throughput.png
|
||||
|
||||
FoundationDB offers two :ref:`storage engines <configuration-storage-engine>`, optimized for distinct use cases, both of which write to disk before reporting transactions committed. For each storage engine, the graph shows throughput of a single FoundationDB process running on a **single core** with saturating read/write workloads ranging from 100% reads to 100% writes. Throughput for the unmixed workloads is about:
|
||||
FoundationDB offers two :ref:`storage engines <configuration-storage-engine>`, optimized for distinct use cases, both of which write to disk before reporting transactions committed. For each storage engine, the graph shows throughput of a single FoundationDB process running on a **single core** with saturating read/write workloads ranging from 100% reads to 100% writes, all with 16 byte keys and values between 8 and 100 bytes. Throughput for the unmixed workloads is about:
|
||||
|
||||
========= ========== ==============
|
||||
workload ssd engine memory engine
|
||||
|
@ -79,7 +81,7 @@ FoundationDB is designed to achieve great performance under high concurrency fro
|
|||
|
||||
.. image:: /images/concurrency.png
|
||||
|
||||
Its asynchronous design allows it to handle very high concurrency, and for a typical workload with 90% reads and 10% writes, maximum throughput is reached at about 200 concurrent operations, achieved with **20** concurrent transactions per FoundationDB process for a workload using 10 ops/transaction.
|
||||
Its asynchronous design allows it to handle very high concurrency, and for a typical workload with 90% reads and 10% writes, maximum throughput is reached at about 200 concurrent operations. This number of operations was achieved with **20** concurrent transactions per FoundationDB process each running 10 operations with 16 byte keys and values between 8 and 100 bytes.
|
||||
|
||||
The concurrency graph uses a single FoundationDB server process on a single core (E3-1240).
|
||||
|
||||
|
@ -102,3 +104,5 @@ A lot of things affect the simple first-order model of performance you see here.
|
|||
* Not all CPUs are the same speed.
|
||||
* To keep up with the performance modeled above, your disk subsystem will need to do a little over 1 IOPS per write, and about 1 IOPS per (uncached) read.
|
||||
* Network performance tuning at the operating system level can be very important for both latency and throughput, especially in larger clusters.
|
||||
* Running DR and/or backup requires applying each mutation multiple times and then reading those mutations from the database. Using either feature will reduce throughput.
|
||||
* See our :ref:`known limitations <known-limitations>` for other considerations which may affect performance.
|
||||
|
|
|
@ -56,32 +56,30 @@ Here’s a basic function that successively reads sub-ranges of a size determine
|
|||
.. code-block:: java
|
||||
|
||||
public static void getRangeLimited(TransactionContext tcx, final KeySelector begin, final KeySelector end){
|
||||
tcx.run(new Function<Transaction,Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
boolean keysToCheck = true;
|
||||
ArrayList<Tuple> keysFound = new ArrayList<Tuple>();
|
||||
KeySelector n_begin = new KeySelector(begin.getKey(),true,begin.getOffset());
|
||||
while(keysToCheck){
|
||||
keysToCheck = false;
|
||||
for(KeyValue kv : tr.getRange(n_begin, end, LIMIT)){
|
||||
keysToCheck = true;
|
||||
Tuple t = Tuple.fromBytes(kv.getKey());
|
||||
if(keysFound.size() == 0
|
||||
|| !t.equals(keysFound.get(keysFound.size()-1))){
|
||||
keysFound.add(t);
|
||||
}
|
||||
}
|
||||
if(keysToCheck){
|
||||
n_begin = KeySelector.firstGreaterThan(keysFound.get(keysFound.size()-1).pack());
|
||||
ArrayList<Object> readableFound = new ArrayList<Object>();
|
||||
for(Tuple t : keysFound){
|
||||
readableFound.add(t.get(1));
|
||||
}
|
||||
System.out.println(readableFound);
|
||||
keysFound = new ArrayList<Tuple>();
|
||||
tcx.run(tr -> {
|
||||
boolean keysToCheck = true;
|
||||
ArrayList<Tuple> keysFound = new ArrayList<Tuple>();
|
||||
KeySelector n_begin = new KeySelector(begin.getKey(),true,begin.getOffset());
|
||||
while(keysToCheck){
|
||||
keysToCheck = false;
|
||||
for(KeyValue kv : tr.getRange(n_begin, end, LIMIT)){
|
||||
keysToCheck = true;
|
||||
Tuple t = Tuple.fromBytes(kv.getKey());
|
||||
if(keysFound.size() == 0
|
||||
|| !t.equals(keysFound.get(keysFound.size()-1))){
|
||||
keysFound.add(t);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
if(keysToCheck){
|
||||
n_begin = KeySelector.firstGreaterThan(keysFound.get(keysFound.size()-1).pack());
|
||||
ArrayList<Object> readableFound = new ArrayList<Object>();
|
||||
for(Tuple t : keysFound){
|
||||
readableFound.add(t.get(1));
|
||||
}
|
||||
System.out.println(readableFound);
|
||||
keysFound = new ArrayList<Tuple>();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
|
|
@ -96,38 +96,32 @@ In this example, we’re storing user data based on user ID but sometimes need t
|
|||
// TODO These three methods (setUser, getUser, and getUserIDsInRegion)
|
||||
// are all in the recipe book.
|
||||
public static void setUser(TransactionContext tcx, final String ID, final String name, final String zipcode){
|
||||
tcx.run(new Function<Transaction,Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
tr.set(main.pack(Tuple.from(ID,zipcode)), Tuple.from(name).pack());
|
||||
tr.set(index.pack(Tuple.from(zipcode,ID)), Tuple.from().pack());
|
||||
return null;
|
||||
}
|
||||
tcx.run(tr ->
|
||||
tr.set(main.pack(Tuple.from(ID,zipcode)), Tuple.from(name).pack());
|
||||
tr.set(index.pack(Tuple.from(zipcode,ID)), Tuple.from().pack());
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
// Normal lookup.
|
||||
public static String getUser(TransactionContext tcx, final String ID){
|
||||
return tcx.run(new Function<Transaction,String>() {
|
||||
public String apply(Transaction tr){
|
||||
for(KeyValue kv : tr.getRange(main.subspace(Tuple.from(ID)).range(), 1)){
|
||||
// Return user with correct ID (if exists).
|
||||
return Tuple.fromBytes(kv.getValue()).getString(0);
|
||||
}
|
||||
return "";
|
||||
return tcx.run(tr -> {
|
||||
for(KeyValue kv : tr.getRange(main.subspace(Tuple.from(ID)).range(), 1)){
|
||||
// Return user with correct ID (if exists).
|
||||
return Tuple.fromBytes(kv.getValue()).getString(0);
|
||||
}
|
||||
return "";
|
||||
});
|
||||
}
|
||||
|
||||
// Index lookup.
|
||||
public static ArrayList<String> getUserIDsInRegion(TransactionContext tcx, final String zipcode){
|
||||
return tcx.run(new Function<Transaction,ArrayList<String>>() {
|
||||
public ArrayList<String> apply(Transaction tr){
|
||||
ArrayList<String> IDs = new ArrayList<String>();
|
||||
for(KeyValue kv : tr.getRange(index.subspace(Tuple.from(zipcode)).range())){
|
||||
IDs.add(index.unpack(kv.getKey()).getString(1));
|
||||
}
|
||||
return IDs;
|
||||
return tcx.run(tr -> {
|
||||
ArrayList<String> IDs = new ArrayList<String>();
|
||||
for(KeyValue kv : tr.getRange(index.subspace(Tuple.from(zipcode)).range())){
|
||||
IDs.add(index.unpack(kv.getKey()).getString(1));
|
||||
}
|
||||
return IDs;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,19 +55,17 @@ The spatial index will use a pair of subspaces: one, ``z_label``, to give us eff
|
|||
.. code-block:: java
|
||||
|
||||
public void setLocation(TransactionContext tcx, final String label, final long[] pos){
|
||||
tcx.run(new Function<Transaction,Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
long z = xyToZ(pos);
|
||||
long previous;
|
||||
// Read labelZ.subspace(Tuple.from(label)) to find previous z.
|
||||
if(/* there is a previous z */){
|
||||
tr.clear(labelZ.pack(Tuple.from(label,previous)));
|
||||
tr.clear(zLabel.pack(Tuple.from(previous,label)));
|
||||
}
|
||||
tr.set(labelZ.pack(Tuple.from(label,z)),Tuple.from().pack());
|
||||
tr.set(zLabel.pack(Tuple.from(z,label)),Tuple.from().pack());
|
||||
return null;
|
||||
tcx.run(tr -> {
|
||||
long z = xyToZ(pos);
|
||||
long previous;
|
||||
// Read labelZ.subspace(Tuple.from(label)) to find previous z.
|
||||
if(/* there is a previous z */){
|
||||
tr.clear(labelZ.pack(Tuple.from(label,previous)));
|
||||
tr.clear(zLabel.pack(Tuple.from(previous,label)));
|
||||
}
|
||||
tr.set(labelZ.pack(Tuple.from(label,z)),Tuple.from().pack());
|
||||
tr.set(zLabel.pack(Tuple.from(z,label)),Tuple.from().pack());
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -45,12 +45,10 @@ The client performs transactions on data in the subspace current in the usual ma
|
|||
final DirectorySubspace newspace = workspace.getNew().get();
|
||||
try {
|
||||
clearSubspace(db, newspace);
|
||||
db.run(new Function<Transaction,Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
tr.set(newspace.pack(Tuple.from(3)),Tuple.from("c").pack());
|
||||
tr.set(newspace.pack(Tuple.from(4)), Tuple.from("d").pack());
|
||||
return null;
|
||||
}
|
||||
db.run(tr -> {
|
||||
tr.set(newspace.pack(Tuple.from(3)),Tuple.from("c").pack());
|
||||
tr.set(newspace.pack(Tuple.from(4)), Tuple.from("d").pack());
|
||||
return null;
|
||||
});
|
||||
} finally {
|
||||
// Asynchronous operation--wait until result is reached.
|
||||
|
@ -87,16 +85,12 @@ Here's a simple Workspace class for swapping in a new workspace supporting the b
|
|||
return dir.createOrOpen(this.db, PathUtil.from("new"));
|
||||
}
|
||||
public Future<DirectorySubspace> replaceWithNew() {
|
||||
return this.db.runAsync(new Function<Transaction,Future<DirectorySubspace>>() {
|
||||
public Future<DirectorySubspace> apply(final Transaction tr){
|
||||
return dir.remove(tr, PathUtil.from("current")) // Clear the old current.
|
||||
.flatMap(new Function<Void,Future<DirectorySubspace>>() {
|
||||
public Future<DirectorySubspace> apply(Void arg0) {
|
||||
// Replace the old directory with the new one.
|
||||
return dir.move(tr, PathUtil.from("new"), PathUtil.from("current"));
|
||||
}
|
||||
});
|
||||
}
|
||||
return this.db.runAsync(tr -> {
|
||||
return dir.remove(tr, PathUtil.from("current")) // Clear the old current.
|
||||
.flatMap(() -> {
|
||||
// Replace the old directory with the new one.
|
||||
return dir.move(tr, PathUtil.from("new"), PathUtil.from("current"));
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,87 +80,75 @@ Here’s a simple implementation of the basic table pattern:
|
|||
|
||||
public static void setCell(TransactionContext tcx, final String row,
|
||||
final String column, final Object value){
|
||||
tcx.run(new Function<Transaction, Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
tr.set(rowIndex.subspace(Tuple.from(row, column)).getKey(),
|
||||
pack(value));
|
||||
tr.set(colIndex.subspace(Tuple.from(column,row)).getKey(),
|
||||
pack(value));
|
||||
|
||||
return null;
|
||||
}
|
||||
tcx.run(tr -> {
|
||||
tr.set(rowIndex.subspace(Tuple.from(row, column)).getKey(),
|
||||
pack(value));
|
||||
tr.set(colIndex.subspace(Tuple.from(column,row)).getKey(),
|
||||
pack(value));
|
||||
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public static Object getCell(TransactionContext tcx, final String row,
|
||||
final String column){
|
||||
return tcx.run(new Function<Transaction, Object>() {
|
||||
public Object apply(Transaction tr){
|
||||
return unpack(tr.get(rowIndex.subspace(
|
||||
Tuple.from(row,column)).getKey()).get());
|
||||
}
|
||||
return tcx.run(tr -> {
|
||||
return unpack(tr.get(rowIndex.subspace(
|
||||
Tuple.from(row,column)).getKey()).get());
|
||||
});
|
||||
}
|
||||
|
||||
public static void setRow(TransactionContext tcx, final String row,
|
||||
final Map<String,Object> cols){
|
||||
tcx.run(new Function<Transaction, Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
tr.clear(rowIndex.subspace(Tuple.from(row)).range());
|
||||
|
||||
for(Map.Entry<String,Object> cv : cols.entrySet()){
|
||||
setCell(tr, row, cv.getKey(), cv.getValue());
|
||||
}
|
||||
return null;
|
||||
tcx.run(tr -> {
|
||||
tr.clear(rowIndex.subspace(Tuple.from(row)).range());
|
||||
|
||||
for(Map.Entry<String,Object> cv : cols.entrySet()){
|
||||
setCell(tr, row, cv.getKey(), cv.getValue());
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public static void setColumn(TransactionContext tcx, final String column,
|
||||
final Map<String,Object> rows){
|
||||
tcx.run(new Function<Transaction,Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
tr.clear(colIndex.subspace(Tuple.from(column)).range());
|
||||
for(Map.Entry<String,Object> rv : rows.entrySet()){
|
||||
setCell(tr, rv.getKey(), column, rv.getValue());
|
||||
}
|
||||
return null;
|
||||
tcx.run(tr -> {
|
||||
tr.clear(colIndex.subspace(Tuple.from(column)).range());
|
||||
for(Map.Entry<String,Object> rv : rows.entrySet()){
|
||||
setCell(tr, rv.getKey(), column, rv.getValue());
|
||||
}
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public static TreeMap<String,Object> getRow(TransactionContext tcx,
|
||||
final String row){
|
||||
return tcx.run(new Function<Transaction,TreeMap<String,Object> >() {
|
||||
public TreeMap<String,Object> apply(Transaction tr){
|
||||
TreeMap<String,Object> cols = new TreeMap<String,Object>();
|
||||
|
||||
for(KeyValue kv : tr.getRange(
|
||||
rowIndex.subspace(Tuple.from(row)).range())){
|
||||
cols.put(rowIndex.unpack(kv.getKey()).getString(1),
|
||||
unpack(kv.getValue()));
|
||||
}
|
||||
|
||||
return cols;
|
||||
return tcx.run(tr -> {
|
||||
TreeMap<String,Object> cols = new TreeMap<String,Object>();
|
||||
|
||||
for(KeyValue kv : tr.getRange(
|
||||
rowIndex.subspace(Tuple.from(row)).range())){
|
||||
cols.put(rowIndex.unpack(kv.getKey()).getString(1),
|
||||
unpack(kv.getValue()));
|
||||
}
|
||||
|
||||
return cols;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
public static TreeMap<String,Object> getColumn(TransactionContext tcx,
|
||||
final String column){
|
||||
return tcx.run(new Function<Transaction,TreeMap<String,Object> >() {
|
||||
public TreeMap<String,Object> apply(Transaction tr){
|
||||
TreeMap<String,Object> rows = new TreeMap<String,Object>();
|
||||
|
||||
for(KeyValue kv : tr.getRange(
|
||||
colIndex.subspace(Tuple.from(column)).range())){
|
||||
rows.put(colIndex.unpack(kv.getKey()).getString(1),
|
||||
unpack(kv.getValue()));
|
||||
}
|
||||
|
||||
return rows;
|
||||
return tcx.run(tr -> {
|
||||
TreeMap<String,Object> rows = new TreeMap<String,Object>();
|
||||
|
||||
for(KeyValue kv : tr.getRange(
|
||||
colIndex.subspace(Tuple.from(column)).range())){
|
||||
rows.put(colIndex.unpack(kv.getKey()).getString(1),
|
||||
unpack(kv.getValue()));
|
||||
}
|
||||
|
||||
return rows;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,20 +83,16 @@ Here’s the basic pattern:
|
|||
}
|
||||
|
||||
public static Object get(TransactionContext tcx, final long index){
|
||||
return tcx.run(new Function<Transaction,Object>() {
|
||||
public Object apply(Transaction tr){
|
||||
return Tuple.fromBytes(tr.get(vector.pack(
|
||||
Tuple.from(index))).get()).get(0);
|
||||
}
|
||||
return tcx.run(tr -> {
|
||||
return Tuple.fromBytes(tr.get(vector.pack(
|
||||
Tuple.from(index))).get()).get(0);
|
||||
});
|
||||
}
|
||||
|
||||
public static void set(TransactionContext tcx, final long index, final Object value){
|
||||
tcx.run(new Function<Transaction,Void>() {
|
||||
public Void apply(Transaction tr){
|
||||
tr.set(vector.pack(Tuple.from(index)), Tuple.from(value).pack());
|
||||
return null;
|
||||
}
|
||||
tcx.run(tr -> {
|
||||
tr.set(vector.pack(Tuple.from(index)), Tuple.from(value).pack());
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -79,7 +79,7 @@ namespace vexillographer
|
|||
# THE SOFTWARE.
|
||||
|
||||
# Documentation for this API can be found at
|
||||
# https://foundationdb.org/documentation/api-ruby.html
|
||||
# https://www.foundationdb.org/documentation/api-ruby.html
|
||||
|
||||
module FDB");
|
||||
foreach (Scope s in Enum.GetValues(typeof(Scope)))
|
||||
|
|
|
@ -5,8 +5,8 @@ Priority: optional
|
|||
Architecture: amd64
|
||||
Conflicts: foundationdb (<< 0.1.4)
|
||||
Depends: libc6 (>= 2.11), adduser
|
||||
Maintainer: FoundationDB <fdbdist@apple.com>
|
||||
Homepage: https://foundationdb.org
|
||||
Maintainer: FoundationDB <fdb-dist@apple.com>
|
||||
Homepage: https://www.foundationdb.org
|
||||
Description: FoundationDB clients and library
|
||||
FoundationDB is a scalable, fault-tolerant, ordered key-value store
|
||||
with full ACID transactions.
|
||||
|
|
|
@ -5,8 +5,8 @@ Priority: optional
|
|||
Architecture: amd64
|
||||
Conflicts: foundationdb (<< 0.1.4)
|
||||
Depends: foundationdb-clients (= VERSION-RELEASE), adduser, libc6 (>= 2.11), python (>= 2.6)
|
||||
Maintainer: FoundationDB <fdbdist@apple.com>
|
||||
Homepage: https://foundationdb.org
|
||||
Maintainer: FoundationDB <fdb-dist@apple.com>
|
||||
Homepage: https://www.foundationdb.org
|
||||
Description: FoundationDB server
|
||||
FoundationDB is a scalable, fault-tolerant, ordered key-value store
|
||||
with full ACID transactions.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
##
|
||||
## Configuration file for FoundationDB server processes
|
||||
## Full documentation is available at
|
||||
## https://foundationdb.org/documentation/configuration.html#foundationdb-conf
|
||||
## https://www.foundationdb.org/documentation/configuration.html#foundationdb-conf
|
||||
|
||||
[fdbmonitor]
|
||||
user = foundationdb
|
||||
|
|
|
@ -38,7 +38,7 @@ def getOrValidateAddress(address):
|
|||
if address is None:
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
s.connect(('foundationdb.org', 80))
|
||||
s.connect(('www.foundationdb.org', 80))
|
||||
return s.getsockname()[0]
|
||||
except Exception as e:
|
||||
print 'ERROR: Could not determine an address'
|
||||
|
|
|
@ -297,7 +297,7 @@
|
|||
<Feature Id='FeatureClient' Title='FoundationDB Clients'
|
||||
Description='Installs the FoundationDB command line interface, client library (required by all bindings) and C header files.'
|
||||
Level='1' Absent='disallow' AllowAdvertise='no' TypicalDefault='install'>
|
||||
<!-- This is the equivalent of the 'clients' package on linux, except without Python -->
|
||||
<!-- This is the equivalent of the 'clients' package on linux -->
|
||||
<ComponentRef Id='CreateClusterFileDir'/> <!-- In a client only install, we don't make any files here, but want it to be easy to drop fdb.cluster here -->
|
||||
<ComponentRef Id='PathAddition'/>
|
||||
<ComponentRef Id='InstallPathEnvVar'/>
|
||||
|
@ -390,7 +390,7 @@
|
|||
</InstallExecuteSequence>
|
||||
|
||||
<Property Id="WIXUI_EXITDIALOGOPTIONALTEXT"
|
||||
Value="Thank you for installing FoundationDB. For documentation, please visit https://foundationdb.org/documentation.
|
||||
Value="Thank you for installing FoundationDB. For documentation, please visit https://www.foundationdb.org/documentation.
|
||||
|
||||
To allow path variables to update, please restart your IDE and any open terminal sessions." />
|
||||
<UIRef Id='WixUI_FeatureTree' />
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
##
|
||||
## Configuration file for FoundationDB server processes
|
||||
## Full documentation is available at
|
||||
## https://foundationdb.org/documentation/configuration.html#foundationdb-conf
|
||||
## https://www.foundationdb.org/documentation/configuration.html#foundationdb-conf
|
||||
|
||||
[fdbmonitor]
|
||||
restart_delay = 20
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
##
|
||||
## Configuration file for FoundationDB server processes
|
||||
## Full documentation is available at
|
||||
## https://foundationdb.org/documentation/configuration.html#foundationdb-conf
|
||||
## https://www.foundationdb.org/documentation/configuration.html#foundationdb-conf
|
||||
|
||||
[general]
|
||||
restart_delay = 60
|
||||
|
|
|
@ -3,4 +3,4 @@
|
|||
{\colortbl;\red255\green255\blue255;}
|
||||
\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\pardirnatural
|
||||
|
||||
\f0\fs26 \cf0 Thank you for installing FoundationDB. For documentation, please visit {\field{\*\fldinst HYPERLINK "https://foundationdb.org/documentation"}{\fldrslt https://foundationdb.org/documentation}}.}
|
||||
\f0\fs26 \cf0 Thank you for installing FoundationDB. For documentation, please visit {\field{\*\fldinst HYPERLINK "https://www.foundationdb.org/documentation"}{\fldrslt https://www.foundationdb.org/documentation}}.}
|
|
@ -2,9 +2,9 @@ Name: foundationdb
|
|||
Version: FDBVERSION
|
||||
Release: FDBRELEASE
|
||||
Group: Applications/Databases
|
||||
License: FoundationDB Community License Agreement (https://foundationdb.org/license)
|
||||
URL: https://foundationdb.org
|
||||
Packager: FoundationDB <fdbdist@apple.com>
|
||||
License: FoundationDB Community License Agreement (https://www.foundationdb.org/license)
|
||||
URL: https://www.foundationdb.org
|
||||
Packager: FoundationDB <fdb-dist@apple.com>
|
||||
BuildArch: x86_64
|
||||
Summary: Ordered key-value store with full ACID transactions
|
||||
Vendor: FoundationDB
|
||||
|
|
Loading…
Reference in New Issue