Merge pull request #920 from etschannen/master

merge 6.0 into master
This commit is contained in:
Evan Tschannen 2018-11-13 16:09:12 -08:00 committed by GitHub
commit cb59571273
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 480 additions and 460 deletions

View File

@ -59,9 +59,9 @@ _java_cmd = 'java -ea -cp %s:%s com.apple.foundationdb.test.' % (
testers = {
'python': Tester('python', 'python ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
'python3': Tester('python3', 'python3 ' + _absolute_path('python/tests/tester.py'), 2040, 23, MAX_API_VERSION, types=ALL_TYPES),
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 64, 23, MAX_API_VERSION),
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 2040, 23, MAX_API_VERSION),
'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 63, 200, MAX_API_VERSION),
'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 2040, 200, MAX_API_VERSION),
'flow': Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION, directory_snapshot_ops_enabled=False),
}

View File

@ -24,7 +24,7 @@ fdb_c_CFLAGS := $(fdbclient_CFLAGS)
fdb_c_LDFLAGS := $(fdbrpc_LDFLAGS)
fdb_c_LIBS := lib/libfdbclient.a lib/libfdbrpc.a lib/libflow.a $(FDB_TLS_LIB)
fdb_c_STATIC_LIBS := $(TLS_LIBS)
fdb_c_tests_LIBS := -Llib -lfdb_c
fdb_c_tests_LIBS := -shared -Llib -lfdb_c
fdb_c_tests_HEADERS := -Ibindings/c
CLEAN_TARGETS += fdb_c_tests_clean

View File

@ -28,6 +28,7 @@ import (
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"log"
"math/big"
"os"
"reflect"
"runtime"
@ -103,7 +104,7 @@ func (sm *StackMachine) waitAndPop() (ret stackEntry) {
switch el := ret.item.(type) {
case []byte:
ret.item = el
case int64, string, bool, tuple.UUID, float32, float64, tuple.Tuple:
case int64, uint64, *big.Int, string, bool, tuple.UUID, float32, float64, tuple.Tuple:
ret.item = el
case fdb.Key:
ret.item = []byte(el)
@ -174,8 +175,10 @@ func tupleToString(t tuple.Tuple) string {
buffer.WriteString(", ")
}
switch el := el.(type) {
case int64:
case int64, uint64:
buffer.WriteString(fmt.Sprintf("%d", el))
case *big.Int:
buffer.WriteString(fmt.Sprintf("%s", el))
case []byte:
buffer.WriteString(fmt.Sprintf("%+q", string(el)))
case string:
@ -184,9 +187,7 @@ func tupleToString(t tuple.Tuple) string {
buffer.WriteString(fmt.Sprintf("%t", el))
case tuple.UUID:
buffer.WriteString(hex.EncodeToString(el[:]))
case float32:
buffer.WriteString(fmt.Sprintf("%f", el))
case float64:
case float32, float64:
buffer.WriteString(fmt.Sprintf("%f", el))
case nil:
buffer.WriteString("nil")
@ -205,8 +206,10 @@ func (sm *StackMachine) dumpStack() {
fmt.Printf(" %d.", sm.stack[i].idx)
el := sm.stack[i].item
switch el := el.(type) {
case int64:
case int64, uint64:
fmt.Printf(" %d", el)
case *big.Int:
fmt.Printf(" %s", el)
case fdb.FutureNil:
fmt.Printf(" FutureNil")
case fdb.FutureByteSlice:
@ -225,9 +228,7 @@ func (sm *StackMachine) dumpStack() {
fmt.Printf(" %s", tupleToString(el))
case tuple.UUID:
fmt.Printf(" %s", hex.EncodeToString(el[:]))
case float32:
fmt.Printf(" %f", el)
case float64:
case float32, float64:
fmt.Printf(" %f", el)
case nil:
fmt.Printf(" nil")
@ -490,7 +491,27 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
case op == "POP":
sm.stack = sm.stack[:len(sm.stack)-1]
case op == "SUB":
sm.store(idx, sm.waitAndPop().item.(int64)-sm.waitAndPop().item.(int64))
var x, y *big.Int
switch x1 := sm.waitAndPop().item.(type) {
case *big.Int:
x = x1
case int64:
x = big.NewInt(x1)
case uint64:
x = new(big.Int)
x.SetUint64(x1)
}
switch y1 := sm.waitAndPop().item.(type) {
case *big.Int:
y = y1
case int64:
y = big.NewInt(y1)
case uint64:
y = new(big.Int)
y.SetUint64(y1)
}
sm.store(idx, x.Sub(x, y))
case op == "CONCAT":
str1 := sm.waitAndPop().item
str2 := sm.waitAndPop().item

View File

@ -30,9 +30,10 @@
// (https://apple.github.io/foundationdb/data-modeling.html#tuples).
//
// FoundationDB tuples can currently encode byte and unicode strings, integers,
// floats, doubles, booleans, UUIDs, tuples, and NULL values. In Go these are
// represented as []byte (or fdb.KeyConvertible), string, int64 (or int),
// float32, float64, bool, UUID, Tuple, and nil.
// large integers, floats, doubles, booleans, UUIDs, tuples, and NULL values.
// In Go these are represented as []byte (or fdb.KeyConvertible), string, int64
// (or int, uint, uint64), *big.Int (or big.Int), float32, float64, bool,
// UUID, Tuple, and nil.
package tuple
import (
@ -40,6 +41,7 @@ import (
"encoding/binary"
"fmt"
"math"
"math/big"
"github.com/apple/foundationdb/bindings/go/src/fdb"
)
@ -50,7 +52,8 @@ import (
// result in a runtime panic).
//
// The valid types for TupleElement are []byte (or fdb.KeyConvertible), string,
// int64 (or int), float, double, bool, UUID, Tuple, and nil.
// int64 (or int, uint, uint64), *big.Int (or big.Int), float, double, bool,
// UUID, Tuple, and nil.
type TupleElement interface{}
// Tuple is a slice of objects that can be encoded as FoundationDB tuples. If
@ -59,7 +62,7 @@ type TupleElement interface{}
//
// Given a Tuple T containing objects only of these types, then T will be
// identical to the Tuple returned by unpacking the byte slice obtained by
// packing T (modulo type normalization to []byte and int64).
// packing T (modulo type normalization to []byte, uint64, and int64).
type Tuple []TupleElement
// UUID wraps a basic byte array as a UUID. We do not provide any special
@ -76,8 +79,8 @@ const bytesCode = 0x01
const stringCode = 0x02
const nestedCode = 0x05
const intZeroCode = 0x14
const posIntEnd = 0x1c
const negIntStart = 0x0c
const posIntEnd = 0x1d
const negIntStart = 0x0b
const floatCode = 0x20
const doubleCode = 0x21
const falseCode = 0x26
@ -96,6 +99,8 @@ var sizeLimits = []uint64{
1<<(8*8) - 1,
}
var minInt64BigInt = big.NewInt(math.MinInt64)
func bisectLeft(u uint64) int {
var n int
for sizeLimits[n] < u {
@ -148,30 +153,79 @@ func (p *packer) encodeBytes(code byte, b []byte) {
p.putByte(0x00)
}
func (p *packer) encodeInt(i int64) {
func (p *packer) encodeUint(i uint64) {
if i == 0 {
p.putByte(0x14)
p.putByte(intZeroCode)
return
}
var n int
n := bisectLeft(i)
var scratch [8]byte
switch {
case i > 0:
n = bisectLeft(uint64(i))
p.putByte(byte(intZeroCode + n))
binary.BigEndian.PutUint64(scratch[:], uint64(i))
case i < 0:
n = bisectLeft(uint64(-i))
p.putByte(byte(0x14 - n))
offsetEncoded := int64(sizeLimits[n]) + i
binary.BigEndian.PutUint64(scratch[:], uint64(offsetEncoded))
}
p.putByte(byte(intZeroCode + n))
binary.BigEndian.PutUint64(scratch[:], i)
p.putBytes(scratch[8-n:])
}
func (p *packer) encodeInt(i int64) {
if i >= 0 {
p.encodeUint(uint64(i))
return
}
n := bisectLeft(uint64(-i))
var scratch [8]byte
p.putByte(byte(intZeroCode - n))
offsetEncoded := int64(sizeLimits[n]) + i
binary.BigEndian.PutUint64(scratch[:], uint64(offsetEncoded))
p.putBytes(scratch[8-n:])
}
func (p *packer) encodeBigInt(i *big.Int) {
length := len(i.Bytes())
if length > 0xff {
panic(fmt.Sprintf("Integer magnitude is too large (more than 255 bytes)"))
}
if i.Sign() >= 0 {
intBytes := i.Bytes()
if length > 8 {
p.putByte(byte(posIntEnd))
p.putByte(byte(len(intBytes)))
} else {
p.putByte(byte(intZeroCode + length))
}
p.putBytes(intBytes)
} else {
add := new(big.Int).Lsh(big.NewInt(1), uint(length*8))
add.Sub(add, big.NewInt(1))
transformed := new(big.Int)
transformed.Add(i, add)
intBytes := transformed.Bytes()
if length > 8 {
p.putByte(byte(negIntStart))
p.putByte(byte(length ^ 0xff))
} else {
p.putByte(byte(intZeroCode - length))
}
// For large negative numbers whose absolute value begins with 0xff bytes,
// the transformed bytes may begin with 0x00 bytes. However, intBytes
// will only contain the non-zero suffix, so this loop is needed to make
// the value written be the correct length.
for i := len(intBytes); i < length; i++ {
p.putByte(0x00)
}
p.putBytes(intBytes)
}
}
func (p *packer) encodeFloat(f float32) {
var scratch [4]byte
binary.BigEndian.PutUint32(scratch[:], math.Float32bits(f))
@ -209,10 +263,18 @@ func (p *packer) encodeTuple(t Tuple, nested bool) {
if nested {
p.putByte(0xff)
}
case int64:
p.encodeInt(e)
case int:
p.encodeInt(int64(e))
case int64:
p.encodeInt(e)
case uint:
p.encodeUint(uint64(e))
case uint64:
p.encodeUint(e)
case *big.Int:
p.encodeBigInt(e)
case big.Int:
p.encodeBigInt(&e)
case []byte:
p.encodeBytes(bytesCode, e)
case fdb.KeyConvertible:
@ -243,8 +305,10 @@ func (p *packer) encodeTuple(t Tuple, nested bool) {
// Pack returns a new byte slice encoding the provided tuple. Pack will panic if
// the tuple contains an element of any type other than []byte,
// fdb.KeyConvertible, string, int64, int, float32, float64, bool, tuple.UUID,
// nil, or a Tuple with elements of valid types.
// fdb.KeyConvertible, string, int64, int, uint64, uint, *big.Int, big.Int, float32,
// float64, bool, tuple.UUID, nil, or a Tuple with elements of valid types. It will
// also panic if an integer is specified with a value outside the range
// [-2**2040+1, 2**2040-1]
//
// Tuple satisfies the fdb.KeyConvertible interface, so it is not necessary to
// call Pack when using a Tuple with a FoundationDB API function that requires a
@ -282,9 +346,9 @@ func decodeString(b []byte) (string, int) {
return string(bp), idx
}
func decodeInt(b []byte) (int64, int) {
func decodeInt(b []byte) (interface{}, int) {
if b[0] == intZeroCode {
return 0, 1
return int64(0), 1
}
var neg bool
@ -299,14 +363,55 @@ func decodeInt(b []byte) (int64, int) {
copy(bp[8-n:], b[1:n+1])
var ret int64
binary.Read(bytes.NewBuffer(bp), binary.BigEndian, &ret)
if neg {
ret -= int64(sizeLimits[n])
return ret - int64(sizeLimits[n]), n + 1
}
return ret, n + 1
if ret > 0 {
return ret, n + 1
}
// The encoded value claimed to be positive yet when put in an int64
// produced a negative value. This means that the number must be a positive
// 64-bit value that uses the most significant bit. This can be fit in a
// uint64, so return that. Note that this is the *only* time we return
// a uint64.
return uint64(ret), n + 1
}
func decodeBigInt(b []byte) (interface{}, int) {
val := new(big.Int)
offset := 1
var length int
if b[0] == negIntStart || b[0] == posIntEnd {
length = int(b[1])
if b[0] == negIntStart {
length ^= 0xff
}
offset += 1
} else {
// Must be a negative 8 byte integer
length = 8
}
val.SetBytes(b[offset : length+offset])
if b[0] < intZeroCode {
sub := new(big.Int).Lsh(big.NewInt(1), uint(length)*8)
sub.Sub(sub, big.NewInt(1))
val.Sub(val, sub)
}
// This is the only value that fits in an int64 or uint64 that is decoded with this function
if val.Cmp(minInt64BigInt) == 0 {
return val.Int64(), length + offset
}
return val, length + offset
}
func decodeFloat(b []byte) (float32, int) {
@ -357,8 +462,12 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
el, off = decodeBytes(b[i:])
case b[i] == stringCode:
el, off = decodeString(b[i:])
case negIntStart <= b[i] && b[i] <= posIntEnd:
case negIntStart+1 < b[i] && b[i] < posIntEnd:
el, off = decodeInt(b[i:])
case negIntStart+1 == b[i] && (b[i+1]&0x80 != 0):
el, off = decodeInt(b[i:])
case negIntStart <= b[i] && b[i] <= posIntEnd:
el, off = decodeBigInt(b[i:])
case b[i] == floatCode:
if i+5 > len(b) {
return nil, i, fmt.Errorf("insufficient bytes to decode float starting at position %d of byte array for tuple", i)

View File

@ -170,7 +170,7 @@ module FDB
Proc.new do || @setfunc.call(v[0], nil) end
when String then
Proc.new do |opt=nil| @setfunc.call(v[0], (opt.nil? ? opt : opt.encode('UTF-8')) ) end
when Fixnum then
when Integer then
Proc.new do |opt| @setfunc.call(v[0], [opt].pack("q<")) end
else
raise ArgumentError, "Don't know how to set options of type #{v[2].class}"

View File

@ -35,8 +35,8 @@ module FDB
@@STRING_CODE = 0x02
@@NESTED_CODE = 0x05
@@INT_ZERO_CODE = 0x14
@@POS_INT_END = 0x1c
@@NEG_INT_START = 0x0c
@@POS_INT_END = 0x1d
@@NEG_INT_START = 0x0b
@@FLOAT_CODE = 0x20
@@DOUBLE_CODE = 0x21
@@FALSE_CODE = 0x26
@ -117,12 +117,28 @@ module FDB
elsif code == @@STRING_CODE
epos = find_terminator(v, pos+1)
[v.slice(pos+1, epos-pos-1).gsub("\x00\xFF", "\x00").force_encoding("UTF-8"), epos+1]
elsif code >= @@INT_ZERO_CODE && code <= @@POS_INT_END
elsif code >= @@INT_ZERO_CODE && code < @@POS_INT_END
n = code - @@INT_ZERO_CODE
[("\x00" * (8-n) + v.slice(pos+1, n)).unpack("Q>")[0], pos+n+1]
elsif code >= @@NEG_INT_START and code < @@INT_ZERO_CODE
elsif code > @@NEG_INT_START and code < @@INT_ZERO_CODE
n = @@INT_ZERO_CODE - code
[("\x00" * (8-n) + v.slice(pos+1, n)).unpack("Q>")[0]-@@size_limits[n], pos+n+1]
elsif code == @@POS_INT_END
length = v.getbyte(pos+1)
val = 0
length.times do |i|
val = val << 8
val += v.getbyte(pos+2+i)
end
[val, pos+length+2]
elsif code == @@NEG_INT_START
length = v.getbyte(pos+1) ^ 0xff
val = 0
length.times do |i|
val = val << 8
val += v.getbyte(pos+2+i)
end
[val - (1 << (length*8)) + 1, pos+length+2]
elsif code == @@FALSE_CODE
[false, pos+1]
elsif code == @@TRUE_CODE
@ -182,15 +198,34 @@ module FDB
raise ArgumentError, "unsupported encoding #{v.encoding.name}"
end
elsif v.kind_of? Integer
raise RangeError, "value outside inclusive range -2**64+1 to 2**64-1" if v < -2**64+1 || v > 2**64-1
raise RangeError, "Integer magnitude is too large (more than 255 bytes)" if v < -2**2040+1 || v > 2**2040-1
if v == 0
@@INT_ZERO_CODE.chr
elsif v > 0
n = bisect_left( @@size_limits, v )
(20+n).chr + [v].pack("Q>").slice(8-n, n)
if v > @@size_limits[-1]
length = (v.bit_length + 7) / 8
result = @@POS_INT_END.chr + length.chr
length.times do |i|
result << ((v >> (8 * (length-i-1))) & 0xff)
end
result
else
n = bisect_left( @@size_limits, v )
(@@INT_ZERO_CODE+n).chr + [v].pack("Q>").slice(8-n, n)
end
else
n = bisect_left( @@size_limits, -v )
(20-n).chr + [@@size_limits[n]+v].pack("Q>").slice(8-n, n)
if -v > @@size_limits[-1]
length = ((-v).bit_length + 7) / 8
v += (1 << (length * 8)) - 1
result = @@NEG_INT_START.chr + (length ^ 0xff).chr
length.times do |i|
result << ((v >> (8 * (length-i-1))) & 0xff)
end
result
else
n = bisect_left( @@size_limits, -v )
(@@INT_ZERO_CODE-n).chr + [@@size_limits[n]+v].pack("Q>").slice(8-n, n)
end
end
elsif v.kind_of? TrueClass
@@TRUE_CODE.chr

0
bindings/ruby/tests/tester.rb Normal file → Executable file
View File

View File

@ -1,8 +1,9 @@
FROM ubuntu:15.04
LABEL version=0.0.2
RUN sed -i -e 's/archive.ubuntu.com\|security.ubuntu.com/old-releases.ubuntu.com/g' -e 's/us\.old/old/g' /etc/apt/sources.list && apt-get clean
RUN apt-get update && apt-get --no-install-recommends install -y --force-yes bzip2 ca-certificates=20141019 adduser apt base-files base-passwd bash binutils build-essential cpp cpp-4.9 dpkg dos2unix fakeroot findutils g++=4:4.9.2-2ubuntu2 g++-4.9=4.9.2-10ubuntu13 gawk=1:4.1.1+dfsg-1 gcc-5-base gcc=4:4.9.2-2ubuntu2 gcc-4.9=4.9.2-10ubuntu13 gcc-4.9-base:amd64=4.9.2-10ubuntu13 gcc-5-base:amd64=5.1~rc1-0ubuntu1 gdb git golang golang-go golang-go-linux-amd64 golang-src grep gzip hostname java-common libasan1 liblsan0 libtsan0 libubsan0 libcilkrts5 libgcc-4.9-dev libstdc++-4.9-dev libgl1-mesa-dri libgl1-mesa-glx libmono-system-xml-linq4.0-cil libmono-system-data-datasetextensions4.0-cil libstdc++-4.9-pic locales login m4 make makedev mawk mono-dmcs npm openjdk-8-jdk passwd python-distlib python-gevent python-greenlet python-html5lib python-minimal python-pip python-pkg-resources python-requests python-setuptools python-six python-urllib3 python-yaml python2.7 python2.7-minimal rpm rpm2cpio ruby ruby2.1 rubygems-integration sed tar texinfo tzdata-java udev unzip util-linux valgrind vim wget golang-go.tools curl sphinx-common gnupg
RUN apt-get update && apt-get --no-install-recommends install -y --force-yes bzip2 ca-certificates=20141019 adduser apt base-files base-passwd bash binutils build-essential cpp cpp-4.9 dpkg dos2unix fakeroot findutils g++=4:4.9.2-2ubuntu2 g++-4.9=4.9.2-10ubuntu13 gawk=1:4.1.1+dfsg-1 gcc-5-base gcc=4:4.9.2-2ubuntu2 gcc-4.9=4.9.2-10ubuntu13 gcc-4.9-base:amd64=4.9.2-10ubuntu13 gcc-5-base:amd64=5.1~rc1-0ubuntu1 gdb git golang golang-go golang-go-linux-amd64 golang-src grep gzip hostname java-common libasan1 liblsan0 libtsan0 libubsan0 libcilkrts5 libgcc-4.9-dev libstdc++-4.9-dev libgl1-mesa-dri libgl1-mesa-glx libmono-system-xml-linq4.0-cil libmono-system-data-datasetextensions4.0-cil libstdc++-4.9-pic locales login m4 make makedev mawk mono-dmcs npm openjdk-8-jdk passwd python-distlib python-gevent python-greenlet python-html5lib python-minimal python-pip python-pkg-resources python-requests python-setuptools python-six python-urllib3 python-yaml python2.7 python2.7-minimal rpm rpm2cpio ruby ruby2.1 rubygems-integration sed tar texinfo tzdata-java udev unzip util-linux valgrind vim wget golang-go.tools curl sphinx-common gnupg python-dev
RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R 0777 /opt

40
build/docker-compose.yaml Normal file
View File

@ -0,0 +1,40 @@
version: "3"
services:
common: &common
image: foundationdb-build:0.0.2
build:
context: .
dockerfile: Dockerfile
build-setup: &build-setup
<<: *common
depends_on: [common]
volumes:
- ..:/foundationdb
working_dir: /foundationdb
build-docs:
<<: *build-setup
command: make docpackage
build-release: &build-release
<<: *build-setup
environment:
- RELEASE=true
command: make packages
build-snapshot: &build-snapshot
<<: *build-setup
environment:
- RELEASE=false
command: make packages
build-prb:
<<: *build-snapshot
shell:
<<: *build-setup
volumes:
- ..:/foundationdb
entrypoint: /bin/bash

View File

@ -983,8 +983,8 @@ In the FoundationDB Ruby API, a tuple is an :class:`Enumerable` of elements of t
| Unicode string | Any value ``v`` where ``v.kind_of? String == true`` and ``v.encoding`` is | ``String`` with encoding ``Encoding::UTF_8`` |
| | ``Encoding::UTF_8`` | |
+----------------------+-----------------------------------------------------------------------------+------------------------------------------------------------------------------+
| 64-bit signed integer| Any value ``v`` where ``v.kind_of? Integer == true`` and ``-2**64+1 <= v <= | ``Fixnum`` or ``Bignum`` (depending on the magnitude of the value) |
| | 2**64-1`` | |
| Integer | Any value ``v`` where ``v.kind_of? Integer == true`` and | ``Integer`` |
| | ``-2**2040+1 <= v <= 2**2040-1`` | |
+----------------------+-----------------------------------------------------------------------------+------------------------------------------------------------------------------+
| Floating point number| Any value ``v`` where ``v.kind_of? FDB::Tuple::SingleFloat`` where | :class:`FDB::Tuple::SingleFloat` |
| (single-precision) | ``v.value.kind_of? Float`` and ``v.value`` fits inside an IEEE 754 32-bit | |

View File

@ -83,19 +83,22 @@ For blob store backup locations, the Backup URL format is
::
blobstore://<api_key>[:<secret>]@<hostname>[:<port>]/<name>[?<param>=<value>[&<param>=<value>]...]
blobstore://<api_key>[:<secret>]@<hostname>[:<port>]/<name>?bucket=<bucket_name>[&<param>=<value>]...]
<api_key> - API key to use for authentication
<secret> - API key's secret. Optional.
<hostname> - Remote hostname or IP address to connect to
<port> - Remote port to connect to. Optional. Default is 80.
<name> - Name of backup. It can contain '/' characters, to place backups into a folder-like structure.
<name> - Name of the backup within the backup bucket. It can contain '/' characters in order to organize backups into a folder-like structure.
<bucket_name> - Name of the bucket to use for backup data.
<param>=<value> - Optional URL parameters. See below for details.
A single bucket (specified by <bucket_name>) can hold any number of backups, each with a different <name>.
If <secret> is not specified, it will be looked up in :ref:`blob credential sources<blob-credential-files>`.
An example blob store Backup URL would be ``blobstore://myKey:mySecret@something.domain.com:80/dec_1_2017_0400``.
An example blob store Backup URL would be ``blobstore://myKey:mySecret@something.domain.com:80/dec_1_2017_0400?bucket=backups``.
Blob store Backup URLs can have optional parameters at the end which set various limits on interactions with the blob store. All values must be positive decimal integers. The default values are not very restrictive. The most likely parameter a user would want to change is ``max_send_bytes_per_second`` (or ``sbps`` for short) which determines the upload speed to the blob service.
@ -347,7 +350,7 @@ The ``list`` subcommand will list the backups at a given 'base' or shortened Bac
user@host$ fdbbackup list -b <BASE_URL>
``-b <BASE_URL>`` or ``--base_url <BASE_URL>``
This a shortened Backup URL which looks just like a Backup URL but without the backup name so that the list command will discover and list all of the backups under that base URL.
This a shortened Backup URL which looks just like a Backup URL but without the backup <name> so that the list command will discover and list all of the backups in the bucket.
``fdbrestore`` command line tool

View File

@ -10,38 +10,38 @@ macOS
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
* `FoundationDB-6.0.14.pkg <https://www.foundationdb.org/downloads/6.0.14/macOS/installers/FoundationDB-6.0.14.pkg>`_
* `FoundationDB-6.0.15.pkg <https://www.foundationdb.org/downloads/6.0.15/macOS/installers/FoundationDB-6.0.15.pkg>`_
Ubuntu
------
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
* `foundationdb-clients-6.0.14-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.14/ubuntu/installers/foundationdb-clients_6.0.14-1_amd64.deb>`_
* `foundationdb-server-6.0.14-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.14/ubuntu/installers/foundationdb-server_6.0.14-1_amd64.deb>`_ (depends on the clients package)
* `foundationdb-clients-6.0.15-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.15/ubuntu/installers/foundationdb-clients_6.0.15-1_amd64.deb>`_
* `foundationdb-server-6.0.15-1_amd64.deb <https://www.foundationdb.org/downloads/6.0.15/ubuntu/installers/foundationdb-server_6.0.15-1_amd64.deb>`_ (depends on the clients package)
RHEL/CentOS EL6
---------------
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
* `foundationdb-clients-6.0.14-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.14/rhel6/installers/foundationdb-clients-6.0.14-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.0.14-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.14/rhel6/installers/foundationdb-server-6.0.14-1.el6.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.0.15-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.15/rhel6/installers/foundationdb-clients-6.0.15-1.el6.x86_64.rpm>`_
* `foundationdb-server-6.0.15-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.15/rhel6/installers/foundationdb-server-6.0.15-1.el6.x86_64.rpm>`_ (depends on the clients package)
RHEL/CentOS EL7
---------------
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
* `foundationdb-clients-6.0.14-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.14/rhel7/installers/foundationdb-clients-6.0.14-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.0.14-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.14/rhel7/installers/foundationdb-server-6.0.14-1.el7.x86_64.rpm>`_ (depends on the clients package)
* `foundationdb-clients-6.0.15-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.15/rhel7/installers/foundationdb-clients-6.0.15-1.el7.x86_64.rpm>`_
* `foundationdb-server-6.0.15-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.0.15/rhel7/installers/foundationdb-server-6.0.15-1.el7.x86_64.rpm>`_ (depends on the clients package)
Windows
-------
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
* `foundationdb-6.0.14-x64.msi <https://www.foundationdb.org/downloads/6.0.14/windows/installers/foundationdb-6.0.14-x64.msi>`_
* `foundationdb-6.0.15-x64.msi <https://www.foundationdb.org/downloads/6.0.15/windows/installers/foundationdb-6.0.15-x64.msi>`_
API Language Bindings
=====================
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
* `foundationdb-6.0.14.tar.gz <https://www.foundationdb.org/downloads/6.0.14/bindings/python/foundationdb-6.0.14.tar.gz>`_
* `foundationdb-6.0.15.tar.gz <https://www.foundationdb.org/downloads/6.0.15/bindings/python/foundationdb-6.0.15.tar.gz>`_
Ruby 1.9.3/2.0.0+
-----------------
* `fdb-6.0.14.gem <https://www.foundationdb.org/downloads/6.0.14/bindings/ruby/fdb-6.0.14.gem>`_
* `fdb-6.0.15.gem <https://www.foundationdb.org/downloads/6.0.15/bindings/ruby/fdb-6.0.15.gem>`_
Java 8+
-------
* `fdb-java-6.0.14.jar <https://www.foundationdb.org/downloads/6.0.14/bindings/java/fdb-java-6.0.14.jar>`_
* `fdb-java-6.0.14-javadoc.jar <https://www.foundationdb.org/downloads/6.0.14/bindings/java/fdb-java-6.0.14-javadoc.jar>`_
* `fdb-java-6.0.15.jar <https://www.foundationdb.org/downloads/6.0.15/bindings/java/fdb-java-6.0.15.jar>`_
* `fdb-java-6.0.15-javadoc.jar <https://www.foundationdb.org/downloads/6.0.15/bindings/java/fdb-java-6.0.15-javadoc.jar>`_
Go 1.1+
-------

View File

@ -2,7 +2,7 @@
Release Notes
#############
6.0.14
6.0.15
======
Features
@ -15,6 +15,7 @@ Features
* TLS peer verification now supports suffix matching by field. `(Issue #515) <https://github.com/apple/foundationdb/issues/515>`_
* TLS certificates are automatically reloaded after being updated. [6.0.5] `(Issue #505) <https://github.com/apple/foundationdb/issues/505>`_
* Added the ``fileconfigure`` command to fdbcli, which configures a database from a JSON document. [6.0.10] `(PR #713) <https://github.com/apple/foundationdb/pull/713>`_
* Backup-to-blobstore now accepts a "bucket" URL parameter for setting the bucket name where backup data will be read/written. [6.0.15] `(PR #914) <https://github.com/apple/foundationdb/pull/914>`_
Performance
-----------
@ -30,6 +31,8 @@ Performance
* Significantly reduced master recovery times for clusters with large amounts of data. [6.0.14] `(PR #836) <https://github.com/apple/foundationdb/pull/836>`_
* Reduced read and commit latencies for clusters which are processing transactions larger than 1MB. [6.0.14] `(PR #851) <https://github.com/apple/foundationdb/pull/851>`_
* Significantly reduced recovery times when executing rollbacks on the memory storage engine. [6.0.14] `(PR #821) <https://github.com/apple/foundationdb/pull/821>`_
* Clients update their key location cache much more efficiently after storage server reboots. [6.0.15] `(PR #892) <https://github.com/apple/foundationdb/pull/892>`_
* Tuned multiple resolver configurations to do a better job balancing work between each resolver. [6.0.15] `(PR #911) <https://github.com/apple/foundationdb/pull/911>`_
Fixes
-----
@ -59,6 +62,12 @@ Fixes
* Excluding a process that was both the cluster controller and something else would cause two recoveries instead of one. [6.0.12] `(PR #784) <https://github.com/apple/foundationdb/pull/784>`_
* Configuring from ``three_datacenter`` to ``three_datacenter_fallback`` would cause a lot of unnecessary data movement. [6.0.12] `(PR #782) <https://github.com/apple/foundationdb/pull/782>`_
* Very rarely, backup snapshots would stop making progress. [6.0.14] `(PR #837) <https://github.com/apple/foundationdb/pull/837>`_
* Sometimes data distribution calculated the size of a shard incorrectly. [6.0.15] `(PR #892) <https://github.com/apple/foundationdb/pull/892>`_
* Changing the storage engine configuration would not effect which storage engine was used by the transaction logs. [6.0.15] `(PR #892) <https://github.com/apple/foundationdb/pull/892>`_
* On exit, fdbmonitor will only kill its child processes instead of its process group when run without the daemonize option. [6.0.15] `(PR #826) <https://github.com/apple/foundationdb/pull/826>`_
* HTTP client used by backup-to-blobstore now correctly treats response header field names as case insensitive. [6.0.15] `(PR #904) <https://github.com/apple/foundationdb/pull/904>`_
* Blobstore REST client was not following the S3 API in several ways (bucket name, date, and response formats). [6.0.15] `(PR #914) <https://github.com/apple/foundationdb/pull/914>`_
* Data distribution could queue shard movements for restoring replication at a low priority. [6.0.15] `(PR #907) <https://github.com/apple/foundationdb/pull/907>`_
Fixes only impacting 6.0.0+
---------------------------
@ -74,6 +83,10 @@ Fixes only impacting 6.0.0+
* The transaction logs were doing a lot of unnecessary disk writes. [6.0.12] `(PR #784) <https://github.com/apple/foundationdb/pull/784>`_
* The master will recover the transaction state store from local transaction logs if possible. [6.0.12] `(PR #801) <https://github.com/apple/foundationdb/pull/801>`_
* A bug in status collection led to various workload metrics being missing and the cluster reporting unhealthy. [6.0.13] `(PR #834) <https://github.com/apple/foundationdb/pull/834>`_
* Data distribution did not stop tracking certain unhealthy teams, leading to incorrect status reporting. [6.0.15] `(PR #892) <https://github.com/apple/foundationdb/pull/892>`_
* Fixed a variety of problems related to changing between different region configurations. [6.0.15] `(PR #892) <https://github.com/apple/foundationdb/pull/892>`_ `(PR #907) <https://github.com/apple/foundationdb/pull/907>`_
* fdbcli protects against configuration changes which could cause irreversible damage to a cluster. [6.0.15] `(PR #892) <https://github.com/apple/foundationdb/pull/892>`_ `(PR #907) <https://github.com/apple/foundationdb/pull/907>`_
* Significantly reduced both client and server memory usage in clusters with large amounts of data and usable_regions=2. [6.0.15] `(PR #892) <https://github.com/apple/foundationdb/pull/892>`_
Status
------
@ -91,8 +104,12 @@ Bindings
* C API calls made on the network thread could be reordered with calls made from other threads. [6.0.2] `(Issue #518) <https://github.com/apple/foundationdb/issues/518>`_
* The TLS_PLUGIN option is now a no-op and has been deprecated. [6.0.10] `(PR #710) <https://github.com/apple/foundationdb/pull/710>`_
* Java: the `Versionstamp::getUserVersion() </javadoc/com/apple/foundationdb/tuple/Versionstamp.html#getUserVersion-->`_ method did not handle user versions greater than ``0x00FF`` due to operator precedence errors. [6.0.11] `(Issue #761) <https://github.com/apple/foundationdb/issues/761>`_
* Python bindings didn't work with Python 3.7 because of the new `async` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
* Python: bindings didn't work with Python 3.7 because of the new `async` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
* Go: `PrefixRange` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
* Go: Add Tuple layer support for `uint`, `uint64`, and `*big.Int` integers up to 255 bytes. Integer values will be decoded into the first of `int64`, `uint64`, or `*big.Int` in which they fit. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
* Ruby: Add Tuple layer support for integers up to 255 bytes. `(PR #915) <https://github.com/apple/foundationdb/pull/915>`_ [6.0.15]
* Python: bindings didn't work with Python 3.7 because of the new ``async`` keyword. [6.0.13] `(Issue #830) <https://github.com/apple/foundationdb/issues/830>`_
* Go: ``PrefixRange`` didn't correctly return an error if it failed to generate the range. [6.0.15] `(PR #878) <https://github.com/apple/foundationdb/pull/878>`_
Other Changes
-------------

View File

@ -992,9 +992,6 @@ private:
class BackupContainerBlobStore : public BackupContainerFileSystem, ReferenceCounted<BackupContainerBlobStore> {
private:
// All backup data goes into a single bucket
static const std::string BUCKET;
// Backup files to under a single folder prefix with subfolders for each named backup
static const std::string DATAFOLDER;
@ -1005,6 +1002,9 @@ private:
Reference<BlobStoreEndpoint> m_bstore;
std::string m_name;
// All backup data goes into a single bucket
std::string m_bucket;
std::string dataPath(const std::string path) {
return DATAFOLDER + "/" + m_name + "/" + path;
}
@ -1015,21 +1015,33 @@ private:
}
public:
BackupContainerBlobStore(Reference<BlobStoreEndpoint> bstore, std::string name)
: m_bstore(bstore), m_name(name) {
BackupContainerBlobStore(Reference<BlobStoreEndpoint> bstore, std::string name, const BlobStoreEndpoint::ParametersT &params)
: m_bstore(bstore), m_name(name), m_bucket("FDB_BACKUPS_V2") {
// Currently only one parameter is supported, "bucket"
for(auto &kv : params) {
if(kv.first == "bucket") {
m_bucket = kv.second;
continue;
}
TraceEvent(SevWarn, "BackupContainerBlobStoreInvalidParameter").detail("Name", printable(kv.first)).detail("Value", printable(kv.second));
throw backup_invalid_url();
}
}
void addref() { return ReferenceCounted<BackupContainerBlobStore>::addref(); }
void delref() { return ReferenceCounted<BackupContainerBlobStore>::delref(); }
static std::string getURLFormat() { return BlobStoreEndpoint::getURLFormat(true); }
static std::string getURLFormat() {
return BlobStoreEndpoint::getURLFormat(true) + " (Note: The 'bucket' parameter is required.)";
}
virtual ~BackupContainerBlobStore() {}
Future<Reference<IAsyncFile>> readFile(std::string path) {
return Reference<IAsyncFile>(
new AsyncFileReadAheadCache(
Reference<IAsyncFile>(new AsyncFileBlobStoreRead(m_bstore, BUCKET, dataPath(path))),
Reference<IAsyncFile>(new AsyncFileBlobStoreRead(m_bstore, m_bucket, dataPath(path))),
m_bstore->knobs.read_block_size,
m_bstore->knobs.read_ahead_blocks,
m_bstore->knobs.concurrent_reads_per_file,
@ -1038,9 +1050,9 @@ public:
);
}
ACTOR static Future<std::vector<std::string>> listURLs(Reference<BlobStoreEndpoint> bstore) {
ACTOR static Future<std::vector<std::string>> listURLs(Reference<BlobStoreEndpoint> bstore, std::string bucket) {
state std::string basePath = INDEXFOLDER + '/';
BlobStoreEndpoint::ListResult contents = wait(bstore->listBucket(BUCKET, basePath));
BlobStoreEndpoint::ListResult contents = wait(bstore->listBucket(bucket, basePath));
std::vector<std::string> results;
for(auto &f : contents.objects) {
results.push_back(bstore->getResourceURL(f.name.substr(basePath.size())));
@ -1070,11 +1082,11 @@ public:
};
Future<Reference<IBackupFile>> writeFile(std::string path) {
return Reference<IBackupFile>(new BackupFile(path, Reference<IAsyncFile>(new AsyncFileBlobStoreWrite(m_bstore, BUCKET, dataPath(path)))));
return Reference<IBackupFile>(new BackupFile(path, Reference<IAsyncFile>(new AsyncFileBlobStoreWrite(m_bstore, m_bucket, dataPath(path)))));
}
Future<Void> deleteFile(std::string path) {
return m_bstore->deleteObject(BUCKET, dataPath(path));
return m_bstore->deleteObject(m_bucket, dataPath(path));
}
ACTOR static Future<FilesAndSizesT> listFiles_impl(Reference<BackupContainerBlobStore> bc, std::string path, std::function<bool(std::string const &)> pathFilter) {
@ -1086,7 +1098,7 @@ public:
return pathFilter(folderPath.substr(prefixTrim));
};
state BlobStoreEndpoint::ListResult result = wait(bc->m_bstore->listBucket(BUCKET, bc->dataPath(path), '/', std::numeric_limits<int>::max(), rawPathFilter));
state BlobStoreEndpoint::ListResult result = wait(bc->m_bstore->listBucket(bc->m_bucket, bc->dataPath(path), '/', std::numeric_limits<int>::max(), rawPathFilter));
FilesAndSizesT files;
for(auto &o : result.objects) {
ASSERT(o.name.size() >= prefixTrim);
@ -1100,12 +1112,12 @@ public:
}
ACTOR static Future<Void> create_impl(Reference<BackupContainerBlobStore> bc) {
wait(bc->m_bstore->createBucket(BUCKET));
wait(bc->m_bstore->createBucket(bc->m_bucket));
// Check/create the index entry
bool exists = wait(bc->m_bstore->objectExists(BUCKET, bc->indexEntry()));
bool exists = wait(bc->m_bstore->objectExists(bc->m_bucket, bc->indexEntry()));
if(!exists) {
wait(bc->m_bstore->writeEntireFile(BUCKET, bc->indexEntry(), ""));
wait(bc->m_bstore->writeEntireFile(bc->m_bucket, bc->indexEntry(), ""));
}
return Void();
@ -1117,10 +1129,10 @@ public:
ACTOR static Future<Void> deleteContainer_impl(Reference<BackupContainerBlobStore> bc, int *pNumDeleted) {
// First delete everything under the data prefix in the bucket
wait(bc->m_bstore->deleteRecursively(BUCKET, bc->dataPath(""), pNumDeleted));
wait(bc->m_bstore->deleteRecursively(bc->m_bucket, bc->dataPath(""), pNumDeleted));
// Now that all files are deleted, delete the index entry
wait(bc->m_bstore->deleteObject(BUCKET, bc->indexEntry()));
wait(bc->m_bstore->deleteObject(bc->m_bucket, bc->indexEntry()));
return Void();
}
@ -1128,9 +1140,12 @@ public:
Future<Void> deleteContainer(int *pNumDeleted) {
return deleteContainer_impl(Reference<BackupContainerBlobStore>::addRef(this), pNumDeleted);
}
std::string getBucket() const {
return m_bucket;
}
};
const std::string BackupContainerBlobStore::BUCKET = "FDB_BACKUPS_V2";
const std::string BackupContainerBlobStore::DATAFOLDER = "data";
const std::string BackupContainerBlobStore::INDEXFOLDER = "backups";
@ -1158,13 +1173,17 @@ Reference<IBackupContainer> IBackupContainer::openContainer(std::string url)
r = Reference<IBackupContainer>(new BackupContainerLocalDirectory(url));
else if(u.startsWith(LiteralStringRef("blobstore://"))) {
std::string resource;
Reference<BlobStoreEndpoint> bstore = BlobStoreEndpoint::fromString(url, &resource, &lastOpenError);
// The URL parameters contain blobstore endpoint tunables as well as possible backup-specific options.
BlobStoreEndpoint::ParametersT backupParams;
Reference<BlobStoreEndpoint> bstore = BlobStoreEndpoint::fromString(url, &resource, &lastOpenError, &backupParams);
if(resource.empty())
throw backup_invalid_url();
for(auto c : resource)
if(!isalnum(c) && c != '_' && c != '-' && c != '.' && c != '/')
throw backup_invalid_url();
r = Reference<IBackupContainer>(new BackupContainerBlobStore(bstore, resource));
r = Reference<IBackupContainer>(new BackupContainerBlobStore(bstore, resource, backupParams));
}
else {
lastOpenError = "invalid URL prefix";
@ -1197,13 +1216,19 @@ ACTOR Future<std::vector<std::string>> listContainers_impl(std::string baseURL)
}
else if(u.startsWith(LiteralStringRef("blobstore://"))) {
std::string resource;
Reference<BlobStoreEndpoint> bstore = BlobStoreEndpoint::fromString(baseURL, &resource, &IBackupContainer::lastOpenError);
BlobStoreEndpoint::ParametersT backupParams;
Reference<BlobStoreEndpoint> bstore = BlobStoreEndpoint::fromString(baseURL, &resource, &IBackupContainer::lastOpenError, &backupParams);
if(!resource.empty()) {
TraceEvent(SevWarn, "BackupContainer").detail("Description", "Invalid backup container base URL, resource aka path should be blank.").detail("URL", baseURL);
throw backup_invalid_url();
}
std::vector<std::string> results = wait(BackupContainerBlobStore::listURLs(bstore));
// Create a dummy container to parse the backup-specific parameters from the URL and get a final bucket name
BackupContainerBlobStore dummy(bstore, "dummy", backupParams);
std::vector<std::string> results = wait(BackupContainerBlobStore::listURLs(bstore, dummy.getBucket()));
return results;
}
else {

View File

@ -27,6 +27,9 @@
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string/classification.hpp>
#include "fdbrpc/IAsyncFile.h"
#include "rapidxml/rapidxml.hpp"
using namespace rapidxml;
json_spirit::mObject BlobStoreEndpoint::Stats::getJSON() {
json_spirit::mObject o;
@ -135,7 +138,7 @@ std::string BlobStoreEndpoint::BlobKnobs::getURLParameters() const {
return r;
}
Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &url, std::string *resourceFromURL, std::string *error) {
Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &url, std::string *resourceFromURL, std::string *error, ParametersT *ignored_parameters) {
if(resourceFromURL)
resourceFromURL->clear();
@ -162,12 +165,28 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
if(name.size() == 0)
break;
StringRef value = t.eat("&");
// First try setting a dummy value (all knobs are currently numeric) just to see if this parameter is known to BlobStoreEndpoint.
// If it is, then we will set it to a good value or throw below, so the dummy set has no bad side effects.
bool known = knobs.set(name, 0);
// If the parameter is not known to BlobStoreEndpoint then throw unless there is an ignored_parameters set to add it to
if(!known) {
if(ignored_parameters == nullptr) {
throw format("%s is not a valid parameter name", name.toString().c_str());
}
(*ignored_parameters)[name.toString()] = value.toString();
continue;
}
// The parameter is known to BlobStoreEndpoint so it must be numeric and valid.
char *valueEnd;
int ivalue = strtol(value.toString().c_str(), &valueEnd, 10);
if(*valueEnd || (ivalue == 0 && value.toString() != "0"))
throw format("%s is not a valid value for %s", value.toString().c_str(), name.toString().c_str());
if(!knobs.set(name, ivalue))
throw format("%s is not a valid parameter name", name.toString().c_str());
// It should not be possible for this set to fail now since the dummy set above had to have worked.
ASSERT(knobs.set(name, ivalue));
}
if(resourceFromURL != nullptr)
@ -448,6 +467,7 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
headers["Content-Length"] = format("%d", contentLen);
headers["Host"] = bstore->host;
headers["Accept"] = "application/xml";
wait(bstore->concurrentRequests.take());
state FlowLock::Releaser globalReleaser(bstore->concurrentRequests, 1);
@ -489,7 +509,6 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<BlobStoreEndpoi
remoteAddress = rconn.conn->getPeerAddress();
wait(bstore->requestRate->getAllowance(1));
state Reference<HTTP::Response> r = wait(timeoutError(HTTP::doRequest(rconn.conn, verb, resource, headers, &contentCopy, contentLen, bstore->sendRate, &bstore->s_stats.bytes_sent, bstore->recvRate), bstore->knobs.request_timeout));
r->convertToJSONifXML();
// Since the response was parsed successfully (which is why we are here) reuse the connection unless we received the "Connection: close" header.
if(r->headers["Connection"] != "close")
@ -628,82 +647,92 @@ ACTOR Future<Void> listBucketStream_impl(Reference<BlobStoreEndpoint> bstore, st
listReleaser.release();
try {
BlobStoreEndpoint::ListResult result;
// Parse the json assuming it is valid and contains the right stuff. If any exceptions are thrown, throw http_bad_response
json_spirit::mValue json;
json_spirit::read_string(r->content, json);
JSONDoc doc(json);
BlobStoreEndpoint::ListResult listResult;
xml_document<> doc;
std::string isTruncated;
if (!doc.tryGet("truncated", more)) {
doc.get("ListBucketResult.IsTruncated", isTruncated);
more = isTruncated == "false" ? false : true;
// Copy content because rapidxml will modify it during parse
std::string content = r->content;
doc.parse<0>((char *)content.c_str());
// There should be exactly one node
xml_node<> *result = doc.first_node();
if(result == nullptr || strcmp(result->name(), "ListBucketResult") != 0) {
throw http_bad_response();
}
if (doc.has("results")) {
for (auto &jsonObject : doc.at("results").get_array()) {
JSONDoc objectDoc(jsonObject);
BlobStoreEndpoint::ObjectInfo object;
objectDoc.get("size", object.size);
objectDoc.get("key", object.name);
result.objects.push_back(std::move(object));
}
}
if(doc.has("ListBucketResult.Contents")) {
if (doc.at("ListBucketResult.Contents").type() == json_spirit::array_type) {
for (auto &jsonObject : doc.at("ListBucketResult.Contents").get_array()) {
JSONDoc objectDoc(jsonObject);
BlobStoreEndpoint::ObjectInfo object;
std::string sizeVal;
objectDoc.get("Size", sizeVal);
object.size = strtoll(sizeVal.c_str(), NULL, 10);
objectDoc.get("Key", object.name);
result.objects.push_back(std::move(object));
xml_node<> *n = result->first_node();
while(n != nullptr) {
const char *name = n->name();
if(strcmp(name, "IsTruncated") == 0) {
const char *val = n->value();
if(strcmp(val, "true") == 0) {
more = true;
}
else if(strcmp(val, "false") == 0) {
more = false;
}
else {
throw http_bad_response();
}
}
else {
auto jsonObject = doc.at("ListBucketResult.Contents");
JSONDoc objectDoc(jsonObject);
else if(strcmp(name, "Contents") == 0) {
BlobStoreEndpoint::ObjectInfo object;
std::string sizeVal;
objectDoc.get("Size", sizeVal);
object.size = strtoll(sizeVal.c_str(), NULL, 10);
objectDoc.get("Key", object.name);
result.objects.push_back(std::move(object));
}
}
if(doc.has("CommonPrefixes")) {
for(auto &jsonObject : doc.at("CommonPrefixes").get_array()) {
JSONDoc objectDoc(jsonObject);
std::string p;
objectDoc.get("Prefix", p);
// If recursing, queue a sub-request, otherwise add the common prefix to the result.
if(maxDepth > 0) {
// If there is no recurse filter or the filter returns true then start listing the subfolder
if(!recurseFilter || recurseFilter(p))
subLists.push_back(bstore->listBucketStream(bucket, results, p, delimiter, maxDepth - 1, recurseFilter));
if(more)
lastFile = std::move(p);
xml_node<> *key = n->first_node("Key");
if(key == nullptr) {
throw http_bad_response();
}
else
result.commonPrefixes.push_back(std::move(p));
object.name = key->value();
xml_node<> *size = n->first_node("Size");
if(size == nullptr) {
throw http_bad_response();
}
object.size = strtoull(size->value(), NULL, 10);
listResult.objects.push_back(object);
}
else if(strcmp(name, "CommonPrefixes") == 0) {
xml_node<> *prefixNode = n->first_node("Prefix");
while(prefixNode != nullptr) {
const char *prefix = prefixNode->value();
// If recursing, queue a sub-request, otherwise add the common prefix to the result.
if(maxDepth > 0) {
// If there is no recurse filter or the filter returns true then start listing the subfolder
if(!recurseFilter || recurseFilter(prefix)) {
subLists.push_back(bstore->listBucketStream(bucket, results, prefix, delimiter, maxDepth - 1, recurseFilter));
}
// Since prefix will not be in the final listResult below we have to set lastFile here in case it's greater than the last object
lastFile = prefix;
}
else {
listResult.commonPrefixes.push_back(prefix);
}
prefixNode = prefixNode->next_sibling("Prefix");
}
}
n = n->next_sibling();
}
results.send(result);
results.send(listResult);
if(more) {
// lastFile will be the last commonprefix for which a sublist was started, if any
if(!result.objects.empty() && lastFile < result.objects.back().name)
lastFile = result.objects.back().name;
if(!result.commonPrefixes.empty() && lastFile < result.commonPrefixes.back())
lastFile = result.commonPrefixes.back();
// lastFile will be the last commonprefix for which a sublist was started, if any.
// If there are any objects and the last one is greater than lastFile then make it the new lastFile.
if(!listResult.objects.empty() && lastFile < listResult.objects.back().name) {
lastFile = listResult.objects.back().name;
}
// If there are any common prefixes and the last one is greater than lastFile then make it the new lastFile.
if(!listResult.commonPrefixes.empty() && lastFile < listResult.commonPrefixes.back()) {
lastFile = listResult.commonPrefixes.back();
}
// If lastFile is empty at this point, something has gone wrong.
if(lastFile.empty()) {
TraceEvent(SevWarn, "BlobStoreEndpointListNoNextMarker").suppressFor(60).detail("Resource", fullResource);
throw backup_error();
throw http_bad_response();
}
}
} catch(Error &e) {
@ -779,10 +808,15 @@ std::string BlobStoreEndpoint::hmac_sha1(std::string const &msg) {
}
void BlobStoreEndpoint::setAuthHeaders(std::string const &verb, std::string const &resource, HTTP::Headers& headers) {
std::string &date = headers["Date"];
char dateBuf[20];
time_t ts;
time(&ts);
std::string &date = headers["Date"];
date = std::string(asctime(gmtime(&ts)), 24) + " GMT"; // asctime() returns a 24 character string plus a \n and null terminator.
// ISO 8601 format YYYYMMDD'T'HHMMSS'Z'
strftime(dateBuf, 20, "%Y%m%dT%H%M%SZ", gmtime(&ts));
date = dateBuf;
std::string msg;
StringRef x;
msg.append(verb);
@ -921,14 +955,25 @@ ACTOR static Future<std::string> beginMultiPartUpload_impl(Reference<BlobStoreEn
std::string resource = std::string("/") + bucket + "/" + object + "?uploads";
HTTP::Headers headers;
Reference<HTTP::Response> r = wait(bstore->doRequest("POST", resource, headers, NULL, 0, {200}));
int start = r->content.find("<UploadId>");
if(start == std::string::npos)
throw http_bad_response();
start += 10;
int end = r->content.find("</UploadId>", start);
if(end == std::string::npos)
throw http_bad_response();
return r->content.substr(start, end - start);
try {
xml_document<> doc;
// Copy content because rapidxml will modify it during parse
std::string content = r->content;
doc.parse<0>((char *)content.c_str());
// There should be exactly one node
xml_node<> *result = doc.first_node();
if(result != nullptr && strcmp(result->name(), "InitiateMultipartUploadResult") == 0) {
xml_node<> *id = result->first_node("UploadId");
if(id != nullptr) {
return id->value();
}
}
} catch(...) {
}
throw http_bad_response();
}
Future<std::string> BlobStoreEndpoint::beginMultiPartUpload(std::string const &bucket, std::string const &object) {

View File

@ -125,7 +125,13 @@ public:
resource = "<name>";
return format("blobstore://<api_key>:<secret>@<host>[:<port>]/%s[?<param>=<value>[&<param>=<value>]...]", resource);
}
static Reference<BlobStoreEndpoint> fromString(std::string const &url, std::string *resourceFromURL = nullptr, std::string *error = nullptr);
typedef std::map<std::string, std::string> ParametersT;
// Parse url and return a BlobStoreEndpoint
// If the url has parameters that BlobStoreEndpoint can't consume then an error will be thrown unless ignored_parameters is given in which case
// the unconsumed parameters will be added to it.
static Reference<BlobStoreEndpoint> fromString(std::string const &url, std::string *resourceFromURL = nullptr, std::string *error = nullptr, ParametersT *ignored_parameters = nullptr);
// Get a normalized version of this URL with the given resource and any non-default BlobKnob values as URL parameters.
std::string getResourceURL(std::string resource);

View File

@ -60,15 +60,6 @@ namespace HTTP {
return !fail_if_header_missing;
}
void Response::convertToJSONifXML() {
auto i = headers.find("Content-Type");
if (i != headers.end() && i->second == "application/xml") {
content = xml2json(content.c_str());
contentLen = content.length();
headers["Content-Type"] = "application/json";
}
}
std::string Response::toString() {
std::string r = format("Response Code: %d\n", code);
r += format("Response ContentLen: %lld\n", contentLen);

View File

@ -45,7 +45,6 @@ namespace HTTP {
int64_t contentLen;
bool verifyMD5(bool fail_if_header_missing, Optional<std::string> content_sum = Optional<std::string>());
void convertToJSONifXML();
};
// Prepend the HTTP request header to the given PacketBuffer, returning the new head of the buffer chain

View File

@ -90,7 +90,6 @@
</ActorCompiler>
<ClInclude Include="VersionedMap.h" />
<ClInclude Include="WriteMap.h" />
<ClInclude Include="xml2json.hpp" />
</ItemGroup>
<ItemGroup>
<ActorCompiler Include="AsyncFileBlobStore.actor.cpp" />

View File

@ -1,273 +0,0 @@
#ifndef XML2JSON_HPP_INCLUDED
#define XML2JSON_HPP_INCLUDED
// Copyright (C) 2015 Alan Zhuang (Cheedoong) HKUST. [Updated to the latest version of rapidjson]
// Copyright (C) 2013 Alan Zhuang (Cheedoong) Tencent, Inc.
#include <iostream>
#include <map>
#include <string>
#include <cctype>
#include "fdbclient/rapidxml/rapidxml.hpp"
#include "fdbclient/rapidxml/rapidxml_utils.hpp"
#include "fdbclient/rapidxml/rapidxml_print.hpp"
#include "fdbclient/rapidjson/document.h"
#include "fdbclient/rapidjson/prettywriter.h"
#include "fdbclient/rapidjson/encodedstream.h"
#include "fdbclient/rapidjson/stringbuffer.h"
#include "fdbclient/rapidjson/reader.h"
#include "fdbclient/rapidjson/writer.h"
#include "fdbclient/rapidjson/filereadstream.h"
#include "fdbclient/rapidjson/filewritestream.h"
#include "fdbclient/rapidjson/error/en.h"
/* [Start] This part is configurable */
static const char xml2json_text_additional_name[] = "#text";
static const char xml2json_attribute_name_prefix[] = "@";
/* Example:
<node_name attribute_name="attribute_value">value</node_name> ---> "node_name":{"#text":"value","@attribute_name":"attribute_value"}
*/
static const bool xml2json_numeric_support = false;
/* Example:
xml2json_numeric_support = false:
<number>26.026</number> ---> "number":"26.026"
xml2json_numeric_support = true:
<number>26.026</number> ---> "number":26.026
*/
/* [End] This part is configurable */
// Avoided any namespace pollution.
static bool xml2json_has_digits_only(const char * input, bool *hasDecimal)
{
if (input == nullptr)
return false; // treat empty input as a string (probably will be an empty string)
const char * runPtr = input;
*hasDecimal = false;
while (*runPtr != '\0')
{
if (*runPtr == '.')
{
if (!(*hasDecimal))
*hasDecimal = true;
else
return false; // we found two dots - not a number
}
else if (isalpha(*runPtr))
{
return false;
}
runPtr++;
}
return true;
}
void xml2json_to_array_form(const char *name, rapidjson::Value &jsvalue, rapidjson::Value &jsvalue_chd, rapidjson::Document::AllocatorType& allocator)
{
rapidjson::Value jsvalue_target; // target to do some operation
rapidjson::Value jn; // this is a must, partially because of the latest version of rapidjson
jn.SetString(name, allocator);
jsvalue_target = jsvalue.FindMember(name)->value;
if(jsvalue_target.IsArray())
{
jsvalue_target.PushBack(jsvalue_chd, allocator);
jsvalue.RemoveMember(name);
jsvalue.AddMember(jn, jsvalue_target, allocator);
}
else
{
rapidjson::Value jsvalue_array;
//jsvalue_array = jsvalue_target;
jsvalue_array.SetArray();
jsvalue_array.PushBack(jsvalue_target, allocator);
jsvalue_array.PushBack(jsvalue_chd, allocator);
jsvalue.RemoveMember(name);
jsvalue.AddMember(jn, jsvalue_array, allocator);
}
}
void xml2json_add_attributes(rapidxml::xml_node<> *xmlnode, rapidjson::Value &jsvalue, rapidjson::Document::AllocatorType& allocator)
{
rapidxml::xml_attribute<> *myattr;
for(myattr = xmlnode->first_attribute(); myattr; myattr = myattr->next_attribute())
{
rapidjson::Value jn, jv;
jn.SetString((std::string(xml2json_attribute_name_prefix) + myattr->name()).c_str(), allocator);
if (xml2json_numeric_support == false)
{
jv.SetString(myattr->value(), allocator);
}
else
{
bool hasDecimal;
if (xml2json_has_digits_only(myattr->value(), &hasDecimal) == false)
{
jv.SetString(myattr->value(), allocator);
}
else
{
if (hasDecimal)
{
double value = std::strtod(myattr->value(),nullptr);
jv.SetDouble(value);
}
else
{
long int value = std::strtol(myattr->value(), nullptr, 0);
jv.SetInt(value);
}
}
}
jsvalue.AddMember(jn, jv, allocator);
}
}
void xml2json_traverse_node(rapidxml::xml_node<> *xmlnode, rapidjson::Value &jsvalue, rapidjson::Document::AllocatorType& allocator)
{
//cout << "this: " << xmlnode->type() << " name: " << xmlnode->name() << " value: " << xmlnode->value() << endl;
rapidjson::Value jsvalue_chd;
jsvalue.SetObject();
jsvalue_chd.SetObject();
rapidxml::xml_node<> *xmlnode_chd;
// classified discussion:
if((xmlnode->type() == rapidxml::node_data || xmlnode->type() == rapidxml::node_cdata) && xmlnode->value())
{
// case: pure_text
jsvalue.SetString(xmlnode->value(), allocator); // then addmember("#text" , jsvalue, allocator)
}
else if(xmlnode->type() == rapidxml::node_element)
{
if(xmlnode->first_attribute())
{
if(xmlnode->first_node() && xmlnode->first_node()->type() == rapidxml::node_data && count_children(xmlnode) == 1)
{
// case: <e attr="xxx">text</e>
rapidjson::Value jn, jv;
jn.SetString(xml2json_text_additional_name, allocator);
jv.SetString(xmlnode->first_node()->value(), allocator);
jsvalue.AddMember(jn, jv, allocator);
xml2json_add_attributes(xmlnode, jsvalue, allocator);
return;
}
else
{
// case: <e attr="xxx">...</e>
xml2json_add_attributes(xmlnode, jsvalue, allocator);
}
}
else
{
if(!xmlnode->first_node())
{
// case: <e />
jsvalue.SetNull();
return;
}
else if(xmlnode->first_node()->type() == rapidxml::node_data && count_children(xmlnode) == 1)
{
// case: <e>text</e>
if (xml2json_numeric_support == false)
{
jsvalue.SetString(rapidjson::StringRef(xmlnode->first_node()->value()), allocator);
}
else
{
bool hasDecimal;
if (xml2json_has_digits_only(xmlnode->first_node()->value(), &hasDecimal) == false)
{
jsvalue.SetString(rapidjson::StringRef(xmlnode->first_node()->value()), allocator);
}
else
{
if (hasDecimal)
{
double value = std::strtod(xmlnode->first_node()->value(), nullptr);
jsvalue.SetDouble(value);
}
else
{
long int value = std::strtol(xmlnode->first_node()->value(), nullptr, 0);
jsvalue.SetInt(value);
}
}
}
return;
}
}
if(xmlnode->first_node())
{
// case: complex else...
std::map<std::string, int> name_count;
for(xmlnode_chd = xmlnode->first_node(); xmlnode_chd; xmlnode_chd = xmlnode_chd->next_sibling())
{
std::string current_name;
const char *name_ptr = NULL;
rapidjson::Value jn, jv;
if(xmlnode_chd->type() == rapidxml::node_data || xmlnode_chd->type() == rapidxml::node_cdata)
{
current_name = xml2json_text_additional_name;
name_count[current_name]++;
jv.SetString(xml2json_text_additional_name, allocator);
name_ptr = jv.GetString();
}
else if(xmlnode_chd->type() == rapidxml::node_element)
{
current_name = xmlnode_chd->name();
name_count[current_name]++;
name_ptr = xmlnode_chd->name();
}
xml2json_traverse_node(xmlnode_chd, jsvalue_chd, allocator);
if(name_count[current_name] > 1 && name_ptr)
xml2json_to_array_form(name_ptr, jsvalue, jsvalue_chd, allocator);
else
{
jn.SetString(name_ptr, allocator);
jsvalue.AddMember(jn, jsvalue_chd, allocator);
}
}
}
}
else
{
std::cerr << "err data!!" << std::endl;
}
}
std::string xml2json(const char *xml_str)
{
//file<> fdoc("track_orig.xml"); // could serve another use case
rapidxml::xml_document<> *xml_doc = new rapidxml::xml_document<>();
xml_doc->parse<0> (const_cast<char *>(xml_str));
rapidjson::Document js_doc;
js_doc.SetObject();
rapidjson::Document::AllocatorType& allocator = js_doc.GetAllocator();
rapidxml::xml_node<> *xmlnode_chd;
for(xmlnode_chd = xml_doc->first_node(); xmlnode_chd; xmlnode_chd = xmlnode_chd->next_sibling())
{
rapidjson::Value jsvalue_chd;
jsvalue_chd.SetObject();
//rapidjson::Value jsvalue_name(xmlnode_chd->name(), allocator);
//js_doc.AddMember(jsvalue_name, jsvalue_chd, allocator);
xml2json_traverse_node(xmlnode_chd, jsvalue_chd, allocator);
js_doc.AddMember(rapidjson::StringRef(xmlnode_chd->name()), jsvalue_chd, allocator);
}
rapidjson::StringBuffer buffer;
rapidjson::Writer<rapidjson::StringBuffer> writer(buffer);
js_doc.Accept(writer);
delete xml_doc;
return buffer.GetString();
}
#endif

View File

@ -1270,8 +1270,10 @@ ACTOR Future<Void> masterCore( Reference<MasterData> self ) {
for(auto& dc : self->primaryDcId) {
tr.set(recoveryCommitRequest.arena, tLogDatacentersKeyFor(dc), StringRef());
}
for(auto& dc : self->remoteDcIds) {
tr.set(recoveryCommitRequest.arena, tLogDatacentersKeyFor(dc), StringRef());
if(self->configuration.usableRegions > 1) {
for(auto& dc : self->remoteDcIds) {
tr.set(recoveryCommitRequest.arena, tLogDatacentersKeyFor(dc), StringRef());
}
}
applyMetadataMutations(self->dbgid, recoveryCommitRequest.arena, tr.mutations.slice(mmApplied, tr.mutations.size()), self->txnStateStore, NULL, NULL);

View File

@ -32,7 +32,7 @@
<Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'>
<Product Name='$(var.Title)'
Id='{34D46755-0BA0-4CA5-8287-EA759D5FAF41}'
Id='{0EDB0964-987A-4CDC-8CC4-D059C20201DB}'
UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}'
Version='$(var.Version)'
Manufacturer='$(var.Manufacturer)'