Merge branch 'master' of https://github.com/apple/foundationdb into feature-backup-json

This commit is contained in:
Stephen Atherton 2019-03-12 03:35:03 -07:00
commit f0eae0295f
139 changed files with 2214 additions and 1139 deletions

View File

@ -36,6 +36,7 @@ ifeq ($(NIGHTLY),true)
CFLAGS += -DFDB_CLEAN_BUILD
endif
BOOST_BASENAME ?= boost_1_67_0
ifeq ($(PLATFORM),Linux)
PLATFORM := linux
@ -44,7 +45,7 @@ ifeq ($(PLATFORM),Linux)
CXXFLAGS += -std=c++0x
BOOSTDIR ?= /opt/boost_1_52_0
BOOST_BASEDIR ?= /opt
TLS_LIBDIR ?= /usr/local/lib
DLEXT := so
java_DLEXT := so
@ -60,13 +61,14 @@ else ifeq ($(PLATFORM),Darwin)
.LIBPATTERNS := lib%.dylib lib%.a
BOOSTDIR ?= $(HOME)/boost_1_52_0
BOOST_BASEDIR ?= ${HOME}
TLS_LIBDIR ?= /usr/local/lib
DLEXT := dylib
java_DLEXT := jnilib
else
$(error Not prepared to compile on platform $(PLATFORM))
endif
BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME}
CCACHE := $(shell which ccache)
ifneq ($(CCACHE),)

View File

@ -37,7 +37,7 @@ become the only build system available.
1. Check out this repo on your Mac.
1. Install the Xcode command-line tools.
1. Download version 1.52 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.52.0/).
1. Download version 1.67.0 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.67.0/).
1. Set the `BOOSTDIR` environment variable to the location containing this boost installation.
1. Install [Mono](http://www.mono-project.com/download/stable/).
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
@ -192,6 +192,7 @@ that Visual Studio is used to compile.
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
1. Set `JAVA_HOME` to the unpacked location and JAVA_COMPILE to
`$JAVA_HOME/bin/javac`.
1. Install [Python](https://www.python.org/downloads/) if it is not already installed by Visual Studio.
1. (Optional) Install [WIX](http://wixtoolset.org/). Without it Visual Studio
won't build the Windows installer.
1. Create a build directory (you can have the build directory anywhere you

View File

@ -62,6 +62,6 @@ testers = {
'ruby': Tester('ruby', _absolute_path('ruby/tests/tester.rb'), 2040, 23, MAX_API_VERSION),
'java': Tester('java', _java_cmd + 'StackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
'java_async': Tester('java', _java_cmd + 'AsyncStackTester', 2040, 510, MAX_API_VERSION, types=ALL_TYPES),
'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 2040, 200, MAX_API_VERSION),
'go': Tester('go', _absolute_path('go/build/bin/_stacktester'), 2040, 200, MAX_API_VERSION, types=ALL_TYPES),
'flow': Tester('flow', _absolute_path('flow/bin/fdb_flow_tester'), 63, 500, MAX_API_VERSION, directory_snapshot_ops_enabled=False),
}

View File

@ -59,18 +59,16 @@ if(NOT WIN32)
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c)
endif()
if(NOT OPEN_FOR_IDE)
# TODO: re-enable once the old vcxproj-based build system is removed.
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)
install(TARGETS fdb_c
EXPORT fdbc
DESTINATION ${FDB_LIB_DIR}
COMPONENT clients)
install(
FILES foundationdb/fdb_c.h
${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
DESTINATION ${FDB_INCLUDE_INSTALL_DIR}/foundationdb COMPONENT clients)
#install(EXPORT fdbc DESTINATION ${FDB_LIB_DIR}/foundationdb COMPONENT clients)
endif()
# TODO: re-enable once the old vcxproj-based build system is removed.
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)
fdb_install(TARGETS fdb_c
EXPORT fdbc
DESTINATION lib
COMPONENT clients)
fdb_install(
FILES foundationdb/fdb_c.h
${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
DESTINATION include COMPONENT clients)
#install(EXPORT fdbc DESTINATION ${FDB_LIB_DIR}/foundationdb COMPONENT clients)

View File

@ -67,20 +67,20 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
</PostBuildEvent>
</ItemDefinitionGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
@ -95,7 +95,7 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
<Optimization>MaxSpeed</Optimization>
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
</ClCompile>

View File

@ -35,7 +35,7 @@ THREAD_FUNC networkThread(void* fdb) {
ACTOR Future<Void> _test() {
API *fdb = FDB::API::selectAPIVersion(610);
auto db = fdb->createDatabase();
state Reference<Transaction> tr( new Transaction(db) );
state Reference<Transaction> tr = db->createTransaction();
// tr->setVersion(1);
@ -98,6 +98,81 @@ void fdb_flow_test() {
}
namespace FDB {
class DatabaseImpl : public Database, NonCopyable {
public:
virtual ~DatabaseImpl() { fdb_database_destroy(db); }
Reference<Transaction> createTransaction() override;
void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>()) override;
private:
FDBDatabase* db;
explicit DatabaseImpl(FDBDatabase* db) : db(db) {}
friend class API;
};
class TransactionImpl : public Transaction, private NonCopyable, public FastAllocated<TransactionImpl> {
friend class DatabaseImpl;
public:
virtual ~TransactionImpl() {
if (tr) {
fdb_transaction_destroy(tr);
}
}
void setReadVersion(Version v) override;
Future<Version> getReadVersion() override;
Future<Optional<FDBStandalone<ValueRef>>> get(const Key& key, bool snapshot = false) override;
Future<FDBStandalone<KeyRef>> getKey(const KeySelector& key, bool snapshot = false) override;
Future<Void> watch(const Key& key) override;
using Transaction::getRange;
Future<FDBStandalone<RangeResultRef>> getRange(const KeySelector& begin, const KeySelector& end,
GetRangeLimits limits = GetRangeLimits(), bool snapshot = false,
bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) override;
void addReadConflictRange(KeyRangeRef const& keys) override;
void addReadConflictKey(KeyRef const& key) override;
void addWriteConflictRange(KeyRangeRef const& keys) override;
void addWriteConflictKey(KeyRef const& key) override;
void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) override;
void set(const KeyRef& key, const ValueRef& value) override;
void clear(const KeyRangeRef& range) override;
void clear(const KeyRef& key) override;
Future<Void> commit() override;
Version getCommittedVersion() override;
Future<FDBStandalone<StringRef>> getVersionstamp() override;
void setOption(FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>()) override;
Future<Void> onError(Error const& e) override;
void cancel() override;
void reset() override;
TransactionImpl() : tr(NULL) {}
TransactionImpl(TransactionImpl&& r) BOOST_NOEXCEPT {
tr = r.tr;
r.tr = NULL;
}
TransactionImpl& operator=(TransactionImpl&& r) BOOST_NOEXCEPT {
tr = r.tr;
r.tr = NULL;
return *this;
}
private:
FDBTransaction* tr;
explicit TransactionImpl(FDBDatabase* db);
};
static inline void throw_on_error( fdb_error_t e ) {
if (e)
@ -187,40 +262,36 @@ namespace FDB {
return fdb_error_predicate( pred, e.code() );
}
Reference<Cluster> API::createCluster( std::string const& connFilename ) {
return Reference<Cluster>(new Cluster(connFilename));
}
Reference<DatabaseContext> API::createDatabase(std::string const& connFilename) {
Reference<Database> API::createDatabase(std::string const& connFilename) {
FDBDatabase *db;
throw_on_error(fdb_create_database(connFilename.c_str(), &db));
return Reference<DatabaseContext>(new DatabaseContext(db));
return Reference<Database>(new DatabaseImpl(db));
}
int API::getAPIVersion() const {
return version;
}
Reference<DatabaseContext> Cluster::createDatabase() {
return API::getInstance()->createDatabase(connFilename.c_str());
Reference<Transaction> DatabaseImpl::createTransaction() {
return Reference<Transaction>(new TransactionImpl(db));
}
void DatabaseContext::setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value) {
void DatabaseImpl::setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value) {
if (value.present())
throw_on_error(fdb_database_set_option(db, option, value.get().begin(), value.get().size()));
else
throw_on_error(fdb_database_set_option(db, option, NULL, 0));
}
Transaction::Transaction( Reference<DatabaseContext> const& db ) {
throw_on_error( fdb_database_create_transaction( db->db, &tr ) );
TransactionImpl::TransactionImpl(FDBDatabase* db) {
throw_on_error(fdb_database_create_transaction(db, &tr));
}
void Transaction::setVersion( Version v ) {
void TransactionImpl::setReadVersion(Version v) {
fdb_transaction_set_read_version( tr, v );
}
Future<Version> Transaction::getReadVersion() {
Future<Version> TransactionImpl::getReadVersion() {
return backToFuture<Version>( fdb_transaction_get_read_version( tr ), [](Reference<CFuture> f){
Version value;
@ -230,7 +301,7 @@ namespace FDB {
} );
}
Future< Optional<FDBStandalone<ValueRef>> > Transaction::get( const Key& key, bool snapshot ) {
Future<Optional<FDBStandalone<ValueRef>>> TransactionImpl::get(const Key& key, bool snapshot) {
return backToFuture< Optional<FDBStandalone<ValueRef>> >( fdb_transaction_get( tr, key.begin(), key.size(), snapshot ), [](Reference<CFuture> f) {
fdb_bool_t present;
uint8_t const* value;
@ -246,14 +317,14 @@ namespace FDB {
} );
}
Future< Void > Transaction::watch( const Key& key ) {
Future<Void> TransactionImpl::watch(const Key& key) {
return backToFuture< Void >( fdb_transaction_watch( tr, key.begin(), key.size() ), [](Reference<CFuture> f) {
throw_on_error( fdb_future_get_error( f->f ) );
return Void();
} );
}
Future< FDBStandalone<KeyRef> > Transaction::getKey( const KeySelector& key, bool snapshot ) {
Future<FDBStandalone<KeyRef>> TransactionImpl::getKey(const KeySelector& key, bool snapshot) {
return backToFuture< FDBStandalone<KeyRef> >( fdb_transaction_get_key( tr, key.key.begin(), key.key.size(), key.orEqual, key.offset, snapshot ), [](Reference<CFuture> f) {
uint8_t const* key;
int key_length;
@ -264,7 +335,7 @@ namespace FDB {
} );
}
Future< FDBStandalone<RangeResultRef> > Transaction::getRange( const KeySelector& begin, const KeySelector& end, GetRangeLimits limits, bool snapshot, bool reverse, FDBStreamingMode streamingMode ) {
Future<FDBStandalone<RangeResultRef>> TransactionImpl::getRange(const KeySelector& begin, const KeySelector& end, GetRangeLimits limits, bool snapshot, bool reverse, FDBStreamingMode streamingMode) {
// FIXME: iteration
return backToFuture< FDBStandalone<RangeResultRef> >( fdb_transaction_get_range( tr, begin.key.begin(), begin.key.size(), begin.orEqual, begin.offset, end.key.begin(), end.key.size(), end.orEqual, end.offset, limits.rows, limits.bytes, streamingMode, 1, snapshot, reverse ), [](Reference<CFuture> f) {
FDBKeyValue const* kv;
@ -277,64 +348,64 @@ namespace FDB {
} );
}
void Transaction::addReadConflictRange( KeyRangeRef const& keys ) {
void TransactionImpl::addReadConflictRange(KeyRangeRef const& keys) {
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_READ ) );
}
void Transaction::addReadConflictKey( KeyRef const& key ) {
void TransactionImpl::addReadConflictKey(KeyRef const& key) {
return addReadConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
}
void Transaction::addWriteConflictRange( KeyRangeRef const& keys ) {
void TransactionImpl::addWriteConflictRange(KeyRangeRef const& keys) {
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_WRITE ) );
}
void Transaction::addWriteConflictKey( KeyRef const& key ) {
void TransactionImpl::addWriteConflictKey(KeyRef const& key) {
return addWriteConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
}
void Transaction::atomicOp( const KeyRef& key, const ValueRef& operand, FDBMutationType operationType ) {
void TransactionImpl::atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) {
fdb_transaction_atomic_op( tr, key.begin(), key.size(), operand.begin(), operand.size(), operationType );
}
void Transaction::set( const KeyRef& key, const ValueRef& value ) {
void TransactionImpl::set(const KeyRef& key, const ValueRef& value) {
fdb_transaction_set( tr, key.begin(), key.size(), value.begin(), value.size() );
}
void Transaction::clear( const KeyRangeRef& range ) {
void TransactionImpl::clear(const KeyRangeRef& range) {
fdb_transaction_clear_range( tr, range.begin.begin(), range.begin.size(), range.end.begin(), range.end.size() );
}
void Transaction::clear( const KeyRef& key ) {
void TransactionImpl::clear(const KeyRef& key) {
fdb_transaction_clear( tr, key.begin(), key.size() );
}
Future<Void> Transaction::commit() {
Future<Void> TransactionImpl::commit() {
return backToFuture< Void >( fdb_transaction_commit( tr ), [](Reference<CFuture> f) {
throw_on_error( fdb_future_get_error( f->f ) );
return Void();
} );
}
Version Transaction::getCommittedVersion() {
Version TransactionImpl::getCommittedVersion() {
Version v;
throw_on_error( fdb_transaction_get_committed_version( tr, &v ) );
return v;
}
Future<FDBStandalone<StringRef>> Transaction::getVersionstamp() {
return backToFuture< FDBStandalone<KeyRef> >( fdb_transaction_get_versionstamp( tr ), [](Reference<CFuture> f) {
Future<FDBStandalone<StringRef>> TransactionImpl::getVersionstamp() {
return backToFuture<FDBStandalone<KeyRef>>(fdb_transaction_get_versionstamp(tr), [](Reference<CFuture> f) {
uint8_t const* key;
int key_length;
throw_on_error( fdb_future_get_key( f->f, &key, &key_length ) );
return FDBStandalone<StringRef>( f, StringRef( key, key_length ) );
} );
});
}
void Transaction::setOption( FDBTransactionOption option, Optional<StringRef> value ) {
void TransactionImpl::setOption(FDBTransactionOption option, Optional<StringRef> value) {
if ( value.present() ) {
throw_on_error( fdb_transaction_set_option( tr, option, value.get().begin(), value.get().size() ) );
} else {
@ -342,18 +413,18 @@ namespace FDB {
}
}
Future<Void> Transaction::onError( Error const& e ) {
Future<Void> TransactionImpl::onError(Error const& e) {
return backToFuture< Void >( fdb_transaction_on_error( tr, e.code() ), [](Reference<CFuture> f) {
throw_on_error( fdb_future_get_error( f->f ) );
return Void();
} );
}
void Transaction::cancel() {
void TransactionImpl::cancel() {
fdb_transaction_cancel( tr );
}
void Transaction::reset() {
void TransactionImpl::reset() {
fdb_transaction_reset( tr );
}

View File

@ -30,68 +30,9 @@
#include "FDBLoanerTypes.h"
namespace FDB {
class DatabaseContext : public ReferenceCounted<DatabaseContext>, NonCopyable {
friend class Cluster;
friend class Transaction;
public:
~DatabaseContext() {
fdb_database_destroy( db );
}
void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>());
private:
FDBDatabase* db;
explicit DatabaseContext( FDBDatabase* db ) : db(db) {}
friend class API;
};
// Deprecated: Use createDatabase instead.
class Cluster : public ReferenceCounted<Cluster>, NonCopyable {
public:
~Cluster() {}
Reference<DatabaseContext> createDatabase();
private:
explicit Cluster( std::string connFilename ) : connFilename(connFilename) {}
std::string connFilename;
friend class API;
};
class API {
public:
static API* selectAPIVersion(int apiVersion);
static API* getInstance();
static bool isAPIVersionSelected();
void setNetworkOption(FDBNetworkOption option, Optional<StringRef> value = Optional<StringRef>());
void setupNetwork();
void runNetwork();
void stopNetwork();
// Deprecated: Use createDatabase instead.
Reference<Cluster> createCluster( std::string const& connFilename );
Reference<DatabaseContext> createDatabase( std::string const& connFilename="" );
bool evaluatePredicate(FDBErrorPredicate pred, Error const& e);
int getAPIVersion() const;
private:
static API* instance;
API(int version);
int version;
};
struct CFuture : NonCopyable, ReferenceCounted<CFuture>, FastAllocated<CFuture> {
CFuture() : f(NULL) {}
explicit CFuture( FDBFuture* f ) : f(f) {}
explicit CFuture(FDBFuture* f) : f(f) {}
~CFuture() {
if (f) {
fdb_future_destroy(f);
@ -107,83 +48,102 @@ namespace FDB {
class FDBStandalone : public T {
public:
FDBStandalone() {}
FDBStandalone( Reference<CFuture> f, T const& t ) : T(t), f(f) {}
FDBStandalone( FDBStandalone const& o ) : T((T const&)o), f(o.f) {}
FDBStandalone(Reference<CFuture> f, T const& t) : T(t), f(f) {}
FDBStandalone(FDBStandalone const& o) : T((T const&)o), f(o.f) {}
private:
Reference<CFuture> f;
};
class Transaction : public ReferenceCounted<Transaction>, private NonCopyable, public FastAllocated<Transaction> {
class ReadTransaction : public ReferenceCounted<ReadTransaction> {
public:
explicit Transaction( Reference<DatabaseContext> const& db );
~Transaction() {
if (tr) {
fdb_transaction_destroy(tr);
}
virtual ~ReadTransaction(){};
virtual void setReadVersion(Version v) = 0;
virtual Future<Version> getReadVersion() = 0;
virtual Future<Optional<FDBStandalone<ValueRef>>> get(const Key& key, bool snapshot = false) = 0;
virtual Future<FDBStandalone<KeyRef>> getKey(const KeySelector& key, bool snapshot = false) = 0;
virtual Future<Void> watch(const Key& key) = 0;
virtual Future<FDBStandalone<RangeResultRef>> getRange(
const KeySelector& begin, const KeySelector& end, GetRangeLimits limits = GetRangeLimits(),
bool snapshot = false, bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) = 0;
virtual Future<FDBStandalone<RangeResultRef>> getRange(
const KeySelector& begin, const KeySelector& end, int limit, bool snapshot = false, bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse, streamingMode);
}
virtual Future<FDBStandalone<RangeResultRef>> getRange(
const KeyRange& keys, int limit, bool snapshot = false, bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limit, snapshot, reverse,
streamingMode);
}
virtual Future<FDBStandalone<RangeResultRef>> getRange(
const KeyRange& keys, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limits, snapshot, reverse,
streamingMode);
}
void setVersion( Version v );
Future<Version> getReadVersion();
virtual void addReadConflictRange(KeyRangeRef const& keys) = 0;
virtual void addReadConflictKey(KeyRef const& key) = 0;
Future< Optional<FDBStandalone<ValueRef>> > get( const Key& key, bool snapshot = false );
Future< Void > watch( const Key& key );
Future< FDBStandalone<KeyRef> > getKey( const KeySelector& key, bool snapshot = false );
Future< FDBStandalone<RangeResultRef> > getRange( const KeySelector& begin, const KeySelector& end, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL);
Future< FDBStandalone<RangeResultRef> > getRange( const KeySelector& begin, const KeySelector& end, int limit, bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) {
return getRange( begin, end, GetRangeLimits(limit), snapshot, reverse, streamingMode );
}
Future< FDBStandalone<RangeResultRef> > getRange( const KeyRange& keys, int limit, bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) {
return getRange( KeySelector( firstGreaterOrEqual(keys.begin), keys.arena() ),
KeySelector( firstGreaterOrEqual(keys.end), keys.arena() ),
limit, snapshot, reverse, streamingMode );
}
Future< FDBStandalone<RangeResultRef> > getRange( const KeyRange& keys, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) {
return getRange( KeySelector( firstGreaterOrEqual(keys.begin), keys.arena() ),
KeySelector( firstGreaterOrEqual(keys.end), keys.arena() ),
limits, snapshot, reverse, streamingMode );
}
virtual void setOption(FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>()) = 0;
// Future< Standalone<VectorRef<const char*>> > getAddressesForKey(const Key& key);
virtual Future<Void> onError(Error const& e) = 0;
void addReadConflictRange( KeyRangeRef const& keys );
void addReadConflictKey( KeyRef const& key );
void addWriteConflictRange( KeyRangeRef const& keys );
void addWriteConflictKey( KeyRef const& key );
// void makeSelfConflicting() { tr.makeSelfConflicting(); }
void atomicOp( const KeyRef& key, const ValueRef& operand, FDBMutationType operationType );
void set( const KeyRef& key, const ValueRef& value );
void clear( const KeyRangeRef& range );
void clear( const KeyRef& key );
Future<Void> commit();
Version getCommittedVersion();
Future<FDBStandalone<StringRef>> getVersionstamp();
void setOption( FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>() );
Future<Void> onError( Error const& e );
void cancel();
void reset();
// double getBackoff() { return tr.getBackoff(); }
// void debugTransaction(UID dID) { tr.debugTransaction(dID); }
Transaction() : tr(NULL) {}
Transaction( Transaction&& r ) noexcept(true) {
tr = r.tr;
r.tr = NULL;
}
Transaction& operator=( Transaction&& r ) noexcept(true) {
tr = r.tr;
r.tr = NULL;
return *this;
}
private:
FDBTransaction* tr;
virtual void cancel() = 0;
virtual void reset() = 0;
};
}
class Transaction : public ReadTransaction {
public:
virtual void addWriteConflictRange(KeyRangeRef const& keys) = 0;
virtual void addWriteConflictKey(KeyRef const& key) = 0;
#endif
virtual void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) = 0;
virtual void set(const KeyRef& key, const ValueRef& value) = 0;
virtual void clear(const KeyRangeRef& range) = 0;
virtual void clear(const KeyRef& key) = 0;
virtual Future<Void> commit() = 0;
virtual Version getCommittedVersion() = 0;
virtual Future<FDBStandalone<StringRef>> getVersionstamp() = 0;
};
class Database : public ReferenceCounted<Database> {
public:
virtual ~Database(){};
virtual Reference<Transaction> createTransaction() = 0;
virtual void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>()) = 0;
};
class API {
public:
static API* selectAPIVersion(int apiVersion);
static API* getInstance();
static bool isAPIVersionSelected();
void setNetworkOption(FDBNetworkOption option, Optional<StringRef> value = Optional<StringRef>());
void setupNetwork();
void runNetwork();
void stopNetwork();
Reference<Database> createDatabase(std::string const& connFilename = "");
bool evaluatePredicate(FDBErrorPredicate pred, Error const& e);
int getAPIVersion() const;
private:
static API* instance;
API(int version);
int version;
};
} // namespace FDB
#endif // FDB_FLOW_FDB_FLOW_H

View File

@ -79,11 +79,11 @@
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<ClCompile>
@ -95,7 +95,7 @@
<Optimization>Disabled</Optimization>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @..\..\flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
@ -118,7 +118,7 @@
<Optimization>Full</Optimization>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);..\c</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
<EnablePREfast>false</EnablePREfast>

View File

@ -41,7 +41,8 @@ std::map<Standalone<StringRef>, Reference<Transaction>> trMap;
const int ITERATION_PROGRESSION[] = { 256, 1000, 4096, 6144, 9216, 13824, 20736, 31104, 46656, 69984, 80000 };
const int MAX_ITERATION = sizeof(ITERATION_PROGRESSION)/sizeof(int);
static Future<Void> runTest(Reference<FlowTesterData> const& data, Reference<DatabaseContext> const& db, StringRef const& prefix);
static Future<Void> runTest(Reference<FlowTesterData> const& data, Reference<Database> const& db,
StringRef const& prefix);
THREAD_FUNC networkThread( void* api ) {
// This is the fdb_flow network we're running on a thread
@ -388,7 +389,7 @@ struct LogStackFunc : InstructionFunc {
ACTOR static Future<Void> logStack(Reference<FlowTesterData> data, std::map<int, StackItem> entries, Standalone<StringRef> prefix) {
loop {
state Reference<Transaction> tr(new Transaction(data->db));
state Reference<Transaction> tr = data->db->createTransaction();
try {
for(auto it : entries) {
Tuple tk;
@ -534,7 +535,7 @@ struct NewTransactionFunc : InstructionFunc {
static const char* name;
static Future<Void> call(Reference<FlowTesterData> const& data, Reference<InstructionData> const& instruction) {
trMap[data->trName] = Reference<Transaction>(new Transaction(data->db));
trMap[data->trName] = data->db->createTransaction();
return Void();
}
};
@ -550,7 +551,7 @@ struct UseTransactionFunc : InstructionFunc {
data->trName = name;
if(trMap.count(data->trName) == 0) {
trMap[data->trName] = Reference<Transaction>(new Transaction(data->db));
trMap[data->trName] = data->db->createTransaction();
}
return Void();
}
@ -681,7 +682,7 @@ struct SetReadVersionFunc : InstructionFunc {
static const char* name;
static Future<Void> call(Reference<FlowTesterData> const& data, Reference<InstructionData> const& instruction) {
instruction->tr->setVersion(data->lastVersion);
instruction->tr->setReadVersion(data->lastVersion);
return Void();
}
};
@ -1323,6 +1324,20 @@ struct StartThreadFunc : InstructionFunc {
const char* StartThreadFunc::name = "START_THREAD";
REGISTER_INSTRUCTION_FUNC(StartThreadFunc);
ACTOR template <class Function>
Future<decltype(fake<Function>()(Reference<ReadTransaction>()).getValue())> read(Reference<Database> db,
Function func) {
state Reference<ReadTransaction> tr = db->createTransaction();
loop {
try {
state decltype(fake<Function>()(Reference<ReadTransaction>()).getValue()) result = wait(func(tr));
return result;
} catch (Error& e) {
wait(tr->onError(e));
}
}
}
// WAIT_EMPTY
struct WaitEmptyFunc : InstructionFunc {
static const char* name;
@ -1333,25 +1348,23 @@ struct WaitEmptyFunc : InstructionFunc {
return Void();
Standalone<StringRef> s1 = wait(items[0].value);
state Standalone<StringRef> prefix = Tuple::unpack(s1).getString(0);
Standalone<StringRef> prefix = Tuple::unpack(s1).getString(0);
// printf("=========WAIT_EMPTY:%s\n", printable(prefix).c_str());
state Reference<Transaction> tr(new Transaction(data->db));
loop {
try {
FDBStandalone<RangeResultRef> results = wait(tr->getRange(KeyRangeRef(prefix, strinc(prefix)), 1));
if(results.size() > 0) {
throw not_committed();
}
break;
}
catch(Error &e) {
wait(tr->onError(e));
}
}
wait(read(data->db,
[=](Reference<ReadTransaction> tr) -> Future<Void> { return checkEmptyPrefix(tr, prefix); }));
return Void();
}
private:
ACTOR static Future<Void> checkEmptyPrefix(Reference<ReadTransaction> tr, Standalone<StringRef> prefix) {
FDBStandalone<RangeResultRef> results = wait(tr->getRange(KeyRangeRef(prefix, strinc(prefix)), 1));
if (results.size() > 0) {
throw not_committed();
}
return Void();
}
};
const char* WaitEmptyFunc::name = "WAIT_EMPTY";
REGISTER_INSTRUCTION_FUNC(WaitEmptyFunc);
@ -1529,7 +1542,7 @@ struct UnitTestsFunc : InstructionFunc {
}
API::selectAPIVersion(fdb->getAPIVersion());
state Reference<Transaction> tr(new Transaction(data->db));
state Reference<Transaction> tr = data->db->createTransaction();
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_BATCH);
@ -1560,7 +1573,7 @@ const char* UnitTestsFunc::name = "UNIT_TESTS";
REGISTER_INSTRUCTION_FUNC(UnitTestsFunc);
ACTOR static Future<Void> getInstructions(Reference<FlowTesterData> data, StringRef prefix) {
state Reference<Transaction> tr(new Transaction(data->db));
state Reference<Transaction> tr = data->db->createTransaction();
// get test instructions
state Tuple testSpec;
@ -1610,7 +1623,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
state Reference<InstructionData> instruction = Reference<InstructionData>(new InstructionData(isDatabase, isSnapshot, data->instructions[idx].value, Reference<Transaction>()));
if (isDatabase) {
state Reference<Transaction> tr(new Transaction(data->db));
state Reference<Transaction> tr = data->db->createTransaction();
instruction->tr = tr;
}
else {
@ -1644,7 +1657,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
return Void();
}
ACTOR static Future<Void> runTest(Reference<FlowTesterData> data, Reference<DatabaseContext> db, StringRef prefix) {
ACTOR static Future<Void> runTest(Reference<FlowTesterData> data, Reference<Database> db, StringRef prefix) {
ASSERT(data);
try {
data->db = db;
@ -1744,7 +1757,7 @@ ACTOR void _test_versionstamp() {
startThread(networkThread, fdb);
auto db = fdb->createDatabase();
state Reference<Transaction> tr(new Transaction(db));
state Reference<Transaction> tr = db->createTransaction();
state Future<FDBStandalone<StringRef>> ftrVersion = tr->getVersionstamp();

View File

@ -199,7 +199,7 @@ struct DirectoryTesterData {
struct FlowTesterData : public ReferenceCounted<FlowTesterData> {
FDB::API *api;
Reference<FDB::DatabaseContext> db;
Reference<FDB::Database> db;
Standalone<FDB::RangeResultRef> instructions;
Standalone<StringRef> trName;
FlowTesterStack stack;

View File

@ -58,13 +58,13 @@
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../../../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../../../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
@ -77,7 +77,7 @@
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;BOOST_ALL_NO_LIB;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<MinimalRebuild>false</MinimalRebuild>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>

View File

@ -25,8 +25,6 @@ import (
"encoding/binary"
"encoding/hex"
"fmt"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
"log"
"math/big"
"os"
@ -37,6 +35,9 @@ import (
"strings"
"sync"
"time"
"github.com/apple/foundationdb/bindings/go/src/fdb"
"github.com/apple/foundationdb/bindings/go/src/fdb/tuple"
)
const verbose bool = false
@ -104,7 +105,7 @@ func (sm *StackMachine) waitAndPop() (ret stackEntry) {
switch el := ret.item.(type) {
case []byte:
ret.item = el
case int64, uint64, *big.Int, string, bool, tuple.UUID, float32, float64, tuple.Tuple:
case int64, uint64, *big.Int, string, bool, tuple.UUID, float32, float64, tuple.Tuple, tuple.Versionstamp:
ret.item = el
case fdb.Key:
ret.item = []byte(el)
@ -661,6 +662,24 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
t = append(t, sm.waitAndPop().item)
}
sm.store(idx, []byte(t.Pack()))
case op == "TUPLE_PACK_WITH_VERSIONSTAMP":
var t tuple.Tuple
prefix := sm.waitAndPop().item.([]byte)
c := sm.waitAndPop().item.(int64)
for i := 0; i < int(c); i++ {
t = append(t, sm.waitAndPop().item)
}
packed, err := t.PackWithVersionstamp(prefix)
if err != nil && strings.Contains(err.Error(), "No incomplete") {
sm.store(idx, []byte("ERROR: NONE"))
} else if err != nil {
sm.store(idx, []byte("ERROR: MULTIPLE"))
} else {
sm.store(idx, []byte("OK"))
sm.store(idx, packed)
}
case op == "TUPLE_UNPACK":
t, e := tuple.Unpack(fdb.Key(sm.waitAndPop().item.([]byte)))
if e != nil {
@ -812,7 +831,8 @@ func (sm *StackMachine) processInst(idx int, inst tuple.Tuple) {
tr.Options().SetRetryLimit(50)
tr.Options().SetMaxRetryDelay(100)
tr.Options().SetUsedDuringCommitProtectionDisable()
tr.Options().SetTransactionLoggingEnable("my_transaction")
tr.Options().SetDebugTransactionIdentifier("my_transaction")
tr.Options().SetLogTransaction()
tr.Options().SetReadLockAware()
tr.Options().SetLockAware()

View File

@ -39,6 +39,7 @@ package tuple
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"math"
"math/big"
@ -72,6 +73,39 @@ type Tuple []TupleElement
// an instance of this type.
type UUID [16]byte
// Versionstamp is struct for a FoundationDB verionstamp. Versionstamps are
// 12 bytes long composed of a 10 byte transaction version and a 2 byte user
// version. The transaction version is filled in at commit time and the user
// version is provided by the application to order results within a transaction.
type Versionstamp struct {
TransactionVersion [10]byte
UserVersion uint16
}
var incompleteTransactionVersion = [10]byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}
const versionstampLength = 12
// IncompleteVersionstamp is the constructor you should use to make
// an incomplete versionstamp to use in a tuple.
func IncompleteVersionstamp(userVersion uint16) Versionstamp {
return Versionstamp{
TransactionVersion: incompleteTransactionVersion,
UserVersion: userVersion,
}
}
// Bytes converts a Versionstamp struct to a byte slice for encoding in a tuple.
func (v Versionstamp) Bytes() []byte {
var scratch [versionstampLength]byte
copy(scratch[:], v.TransactionVersion[:])
binary.BigEndian.PutUint16(scratch[10:], v.UserVersion)
return scratch[:]
}
// Type codes: These prefix the different elements in a packed Tuple
// to indicate what type they are.
const nilCode = 0x00
@ -86,6 +120,7 @@ const doubleCode = 0x21
const falseCode = 0x26
const trueCode = 0x27
const uuidCode = 0x30
const versionstampCode = 0x33
var sizeLimits = []uint64{
1<<(0*8) - 1,
@ -122,7 +157,15 @@ func adjustFloatBytes(b []byte, encode bool) {
}
type packer struct {
buf []byte
versionstampPos int32
buf []byte
}
func newPacker() *packer {
return &packer{
versionstampPos: -1,
buf: make([]byte, 0, 64),
}
}
func (p *packer) putByte(b byte) {
@ -249,7 +292,22 @@ func (p *packer) encodeUUID(u UUID) {
p.putBytes(u[:])
}
func (p *packer) encodeTuple(t Tuple, nested bool) {
func (p *packer) encodeVersionstamp(v Versionstamp) {
p.putByte(versionstampCode)
isIncomplete := v.TransactionVersion == incompleteTransactionVersion
if isIncomplete {
if p.versionstampPos != -1 {
panic(fmt.Sprintf("Tuple can only contain one incomplete versionstamp"))
}
p.versionstampPos = int32(len(p.buf))
}
p.putBytes(v.Bytes())
}
func (p *packer) encodeTuple(t Tuple, nested bool, versionstamps bool) {
if nested {
p.putByte(nestedCode)
}
@ -257,7 +315,7 @@ func (p *packer) encodeTuple(t Tuple, nested bool) {
for i, e := range t {
switch e := e.(type) {
case Tuple:
p.encodeTuple(e, true)
p.encodeTuple(e, true, versionstamps)
case nil:
p.putByte(nilCode)
if nested {
@ -293,6 +351,12 @@ func (p *packer) encodeTuple(t Tuple, nested bool) {
}
case UUID:
p.encodeUUID(e)
case Versionstamp:
if versionstamps == false && e.TransactionVersion == incompleteTransactionVersion {
panic(fmt.Sprintf("Incomplete Versionstamp included in vanilla tuple pack"))
}
p.encodeVersionstamp(e)
default:
panic(fmt.Sprintf("unencodable element at index %d (%v, type %T)", i, t[i], t[i]))
}
@ -306,19 +370,103 @@ func (p *packer) encodeTuple(t Tuple, nested bool) {
// Pack returns a new byte slice encoding the provided tuple. Pack will panic if
// the tuple contains an element of any type other than []byte,
// fdb.KeyConvertible, string, int64, int, uint64, uint, *big.Int, big.Int, float32,
// float64, bool, tuple.UUID, nil, or a Tuple with elements of valid types. It will
// also panic if an integer is specified with a value outside the range
// [-2**2040+1, 2**2040-1]
// float64, bool, tuple.UUID, tuple.Versionstamp, nil, or a Tuple with elements of
// valid types. It will also panic if an integer is specified with a value outside
// the range [-2**2040+1, 2**2040-1]
//
// Tuple satisfies the fdb.KeyConvertible interface, so it is not necessary to
// call Pack when using a Tuple with a FoundationDB API function that requires a
// key.
//
// This method will panic if it contains an incomplete Versionstamp. Use
// PackWithVersionstamp instead.
//
func (t Tuple) Pack() []byte {
p := packer{buf: make([]byte, 0, 64)}
p.encodeTuple(t, false)
p := newPacker()
p.encodeTuple(t, false, false)
return p.buf
}
// PackWithVersionstamp packs the specified tuple into a key for versionstamp
// operations. See Pack for more information. This function will return an error
// if you attempt to pack a tuple with more than one versionstamp. This function will
// return an error if you attempt to pack a tuple with a versionstamp position larger
// than an uint16 if the API version is less than 520.
func (t Tuple) PackWithVersionstamp(prefix []byte) ([]byte, error) {
hasVersionstamp, err := t.HasIncompleteVersionstamp()
if err != nil {
return nil, err
}
apiVersion, err := fdb.GetAPIVersion()
if err != nil {
return nil, err
}
if hasVersionstamp == false {
return nil, errors.New("No incomplete versionstamp included in tuple pack with versionstamp")
}
p := newPacker()
if prefix != nil {
p.putBytes(prefix)
}
p.encodeTuple(t, false, true)
if hasVersionstamp {
var scratch [4]byte
var offsetIndex int
if apiVersion < 520 {
if p.versionstampPos > math.MaxUint16 {
return nil, errors.New("Versionstamp position too large")
}
offsetIndex = 2
binary.LittleEndian.PutUint16(scratch[:], uint16(p.versionstampPos))
} else {
offsetIndex = 4
binary.LittleEndian.PutUint32(scratch[:], uint32(p.versionstampPos))
}
p.putBytes(scratch[0:offsetIndex])
}
return p.buf, nil
}
// HasIncompleteVersionstamp determines if there is at least one incomplete
// versionstamp in a tuple. This function will return an error this tuple has
// more than one versionstamp.
func (t Tuple) HasIncompleteVersionstamp() (bool, error) {
incompleteCount := t.countIncompleteVersionstamps()
var err error
if incompleteCount > 1 {
err = errors.New("Tuple can only contain one incomplete versionstamp")
}
return incompleteCount >= 1, err
}
func (t Tuple) countIncompleteVersionstamps() int {
incompleteCount := 0
for _, el := range t {
switch e := el.(type) {
case Versionstamp:
if e.TransactionVersion == incompleteTransactionVersion {
incompleteCount++
}
case Tuple:
incompleteCount += e.countIncompleteVersionstamps()
}
}
return incompleteCount
}
func findTerminator(b []byte) int {
bp := b
var length int
@ -438,6 +586,20 @@ func decodeUUID(b []byte) (UUID, int) {
return u, 17
}
func decodeVersionstamp(b []byte) (Versionstamp, int) {
var transactionVersion [10]byte
var userVersion uint16
copy(transactionVersion[:], b[1:11])
userVersion = binary.BigEndian.Uint16(b[11:])
return Versionstamp{
TransactionVersion: transactionVersion,
UserVersion: userVersion,
}, versionstampLength + 1
}
func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
var t Tuple
@ -489,6 +651,11 @@ func decodeTuple(b []byte, nested bool) (Tuple, int, error) {
return nil, i, fmt.Errorf("insufficient bytes to decode UUID starting at position %d of byte array for tuple", i)
}
el, off = decodeUUID(b[i:])
case b[i] == versionstampCode:
if i+versionstampLength+1 > len(b) {
return nil, i, fmt.Errorf("insufficient bytes to decode Versionstamp starting at position %d of byte array for tuple", i)
}
el, off = decodeVersionstamp(b[i:])
case b[i] == nestedCode:
var err error
el, off, err = decodeTuple(b[i+1:], true)

View File

@ -206,7 +206,11 @@ JNIEXPORT jthrowable JNICALL Java_com_apple_foundationdb_NativeFuture_Future_1ge
return JNI_NULL;
}
FDBFuture *sav = (FDBFuture *)future;
return getThrowable( jenv, fdb_future_get_error( sav ) );
fdb_error_t err = fdb_future_get_error( sav );
if( err )
return getThrowable( jenv, err );
else
return JNI_NULL;
}
JNIEXPORT jboolean JNICALL Java_com_apple_foundationdb_NativeFuture_Future_1isReady(JNIEnv *jenv, jobject, jlong future) {

View File

@ -45,7 +45,7 @@
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<IncludePath>..\..\;C:\Program Files\Java\jdk6\include\win32;C:\Program Files\Java\jdk6\include;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\Java\jdk6\include\win32;C:\Program Files\Java\jdk6\include;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
</PropertyGroup>
@ -60,7 +60,7 @@
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
</ClCompile>
@ -75,7 +75,7 @@
<FunctionLevelLinking>true</FunctionLevelLinking>
<IntrinsicFunctions>true</IntrinsicFunctions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories);$(SolutionDir)bindings\c</AdditionalIncludeDirectories>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=_WIN32_WINNT_WS03;BOOST_ALL_NO_LIB;WINVER=_WIN32_WINNT_WS03;NTDDI_VERSION=NTDDI_WS03;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<AdditionalOptions>/bigobj "@$(SolutionDir)flow/no_intellisense.opt" %(AdditionalOptions)</AdditionalOptions>
</ClCompile>

View File

@ -40,11 +40,26 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
public final ReadTransaction snapshot;
class ReadSnapshot implements ReadTransaction {
@Override
public boolean isSnapshot() {
return true;
}
@Override
public ReadTransaction snapshot() {
return this;
}
@Override
public CompletableFuture<Long> getReadVersion() {
return FDBTransaction.this.getReadVersion();
}
@Override
public void setReadVersion(long version) {
FDBTransaction.this.setReadVersion(version);
}
@Override
public CompletableFuture<byte[]> get(byte[] key) {
return get_internal(key, true);
@ -126,6 +141,18 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
return getRange(range, ReadTransaction.ROW_LIMIT_UNLIMITED);
}
@Override
public boolean addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) {
// This is a snapshot transaction; do not add the conflict range.
return false;
}
@Override
public boolean addReadConflictKeyIfNotSnapshot(byte[] key) {
// This is a snapshot transaction; do not add the conflict key.
return false;
}
@Override
public TransactionOptions options() {
return FDBTransaction.this.options();
@ -157,6 +184,11 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
transactionOwner = true;
}
@Override
public boolean isSnapshot() {
return false;
}
@Override
public ReadTransaction snapshot() {
return snapshot;
@ -321,11 +353,23 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
}
}
@Override
public boolean addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd) {
addReadConflictRange(keyBegin, keyEnd);
return true;
}
@Override
public void addReadConflictRange(byte[] keyBegin, byte[] keyEnd) {
addConflictRange(keyBegin, keyEnd, ConflictRangeType.READ);
}
@Override
public boolean addReadConflictKeyIfNotSnapshot(byte[] key) {
addReadConflictKey(key);
return true;
}
@Override
public void addReadConflictKey(byte[] key) {
addConflictRange(key, ByteArrayUtil.join(key, new byte[]{(byte) 0}), ConflictRangeType.READ);

View File

@ -37,7 +37,7 @@ class FutureResults extends NativeFuture<RangeResultInfo> {
protected RangeResultInfo getIfDone_internal(long cPtr) throws FDBException {
FDBException err = Future_getError(cPtr);
if(!err.isSuccess()) {
if(err != null && !err.isSuccess()) {
throw err;
}

View File

@ -34,7 +34,7 @@ class FutureVoid extends NativeFuture<Void> {
// with a get on the error and throw if the error is not success.
FDBException err = Future_getError(cPtr);
if(!err.isSuccess()) {
if(err != null && !err.isSuccess()) {
throw err;
}
return null;

View File

@ -32,7 +32,7 @@ import com.apple.foundationdb.tuple.Tuple;
* <br>
* <b>Note:</b> Client must call {@link Transaction#commit()} and wait on the result on all transactions,
* even ones that only read. This is done automatically when using the retry loops from
* {@link Database#run(Function)}. This is explained more in the intro to {@link Transaction}.
* {@link Database#run(java.util.function.Function)}. This is explained more in the intro to {@link Transaction}.
*
* @see Transaction
*/
@ -43,12 +43,73 @@ public interface ReadTransaction extends ReadTransactionContext {
*/
int ROW_LIMIT_UNLIMITED = 0;
/**
* Gets whether this transaction is a snapshot view of the database. In other words, this returns
* whether read conflict ranges are omitted for any reads done through this {@code ReadTransaction}.
* <br>
* For more information about how to use snapshot reads correctly, see
* <a href="/foundationdb/developer-guide.html#using-snapshot-reads" target="_blank">Using snapshot reads</a>.
*
* @return whether this is a snapshot view of the database with relaxed isolation properties
* @see #snapshot()
*/
boolean isSnapshot();
/**
* Return a special-purpose, read-only view of the database. Reads done through this interface are known as "snapshot reads".
* Snapshot reads selectively relax FoundationDB's isolation property, reducing
* <a href="/foundationdb/developer-guide.html#transaction-conflicts" target="_blank">Transaction conflicts</a>
* but making reasoning about concurrency harder.<br>
* <br>
* For more information about how to use snapshot reads correctly, see
* <a href="/foundationdb/developer-guide.html#using-snapshot-reads" target="_blank">Using snapshot reads</a>.
*
* @return a read-only view of this {@code ReadTransaction} with relaxed isolation properties
*/
ReadTransaction snapshot();
/**
* Gets the version at which the reads for this {@code Transaction} will access the database.
* @return the version for database reads
*/
CompletableFuture<Long> getReadVersion();
/**
* Directly sets the version of the database at which to execute reads. The
* normal operation of a transaction is to determine an appropriately recent
* version; this call overrides that behavior. If the version is set too
* far in the past, {@code past_version} errors will be thrown from read operations.
* <i>Infrequently used.</i>
*
* @param version the version at which to read from the database
*/
void setReadVersion(long version);
/**
* Adds the read conflict range that this {@code ReadTransaction} would have added as if it had read
* the given key range. If this is a {@linkplain #snapshot() snapshot} view of the database, this will
* not add the conflict range. This mirrors how reading a range through a snapshot view
* of the database does not add a conflict range for the read keys.
*
* @param keyBegin the first key in the range (inclusive)
* @param keyEnd the last key in the range (exclusive)
* @return {@code true} if the read conflict range was added and {@code false} otherwise
* @see Transaction#addReadConflictRange(byte[], byte[])
*/
boolean addReadConflictRangeIfNotSnapshot(byte[] keyBegin, byte[] keyEnd);
/**
* Adds the read conflict range that this {@code ReadTransaction} would have added as if it had read
* the given key. If this is a {@linkplain #snapshot() snapshot} view of the database, this will
* not add the conflict range. This mirrors how reading a key through a snapshot view
* of the database does not add a conflict range for the read key.
*
* @param key the key to add to the read conflict range set (it this is not a snapshot view of the database)
* @return {@code true} if the read conflict key was added and {@code false} otherwise
* @see Transaction#addReadConflictKey(byte[])
*/
boolean addReadConflictKeyIfNotSnapshot(byte[] key);
/**
* Gets a value from the database. The call will return {@code null} if the key is not
* present in the database.

View File

@ -76,31 +76,6 @@ import com.apple.foundationdb.tuple.Tuple;
*/
public interface Transaction extends AutoCloseable, ReadTransaction, TransactionContext {
/**
* Return special-purpose, read-only view of the database. Reads done through this interface are known as "snapshot reads".
* Snapshot reads selectively relax FoundationDB's isolation property, reducing
* <a href="/foundationdb/developer-guide.html#transaction-conflicts" target="_blank">Transaction conflicts</a>
* but making reasoning about concurrency harder.<br>
* <br>
* For more information about how to use snapshot reads correctly, see
* <a href="/foundationdb/developer-guide.html#using-snapshot-reads" target="_blank">Using snapshot reads</a>.
*
* @return a read-only view of this {@code Transaction} with relaxed isolation properties
*/
ReadTransaction snapshot();
/**
* Directly sets the version of the database at which to execute reads. The
* normal operation of a transaction is to determine an appropriately recent
* version; this call overrides that behavior. If the version is set too
* far in the past, {@code past_version} errors will be thrown from read operations.
* <i>Infrequently used.</i>
*
* @param version the version at which to read from the database
*/
void setReadVersion(long version);
/**
* Adds a range of keys to the transaction's read conflict ranges as if you
* had read the range. As a result, other transactions that write a key in
@ -116,7 +91,7 @@ public interface Transaction extends AutoCloseable, ReadTransaction, Transaction
* the key. As a result, other transactions that concurrently write this key
* could cause the transaction to fail with a conflict.
*
* @param key the key to be added to the range
* @param key the key to be added to the read conflict range set
*/
void addReadConflictKey(byte[] key);

View File

@ -482,7 +482,8 @@ public class AsyncStackTester {
tr.options().setRetryLimit(50);
tr.options().setMaxRetryDelay(100);
tr.options().setUsedDuringCommitProtectionDisable();
tr.options().setTransactionLoggingEnable("my_transaction");
tr.options().setDebugTransactionIdentifier("my_transaction");
tr.options().setLogTransaction();
tr.options().setReadLockAware();
tr.options().setLockAware();

View File

@ -0,0 +1,209 @@
/*
* SnapshotTransactionTest.java
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.apple.foundationdb.test;
import java.util.UUID;
import java.util.concurrent.CompletionException;
import com.apple.foundationdb.Database;
import com.apple.foundationdb.FDB;
import com.apple.foundationdb.FDBException;
import com.apple.foundationdb.ReadTransaction;
import com.apple.foundationdb.Transaction;
import com.apple.foundationdb.subspace.Subspace;
import com.apple.foundationdb.tuple.Tuple;
/**
* Some tests regarding conflict ranges to make sure they do what we expect.
*/
public class SnapshotTransactionTest {
private static final int CONFLICT_CODE = 1020;
private static final Subspace SUBSPACE = new Subspace(Tuple.from("test", "conflict_ranges"));
public static void main(String[] args) {
FDB fdb = FDB.selectAPIVersion(610);
try(Database db = fdb.open()) {
snapshotReadShouldNotConflict(db);
snapshotShouldNotAddConflictRange(db);
snapshotOnSnapshot(db);
}
}
// Adding a random write conflict key makes it so the transaction conflicts are actually resolved.
public static void addUUIDConflicts(Transaction... trs) {
for(Transaction tr : trs) {
tr.options().setTimeout(1000);
tr.getReadVersion().join();
byte[] key = SUBSPACE.pack(Tuple.from("uuids", UUID.randomUUID()));
tr.addReadConflictKey(key);
tr.addWriteConflictKey(key);
}
}
public static <E extends Exception> void validateConflict(E e) throws E {
FDBException fdbE = null;
Throwable current = e;
while(current != null && fdbE == null) {
if(current instanceof FDBException) {
fdbE = (FDBException)current;
}
else {
current = current.getCause();
}
}
if(fdbE == null) {
System.err.println("Error was not caused by FDBException");
throw e;
}
else {
int errorCode = fdbE.getCode();
if(errorCode != CONFLICT_CODE) {
System.err.println("FDB error was not caused by a transaction conflict");
throw e;
}
}
}
public static void snapshotReadShouldNotConflict(Database db) {
try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) {
addUUIDConflicts(tr1, tr2, tr3);
// Verify reading a *range* causes a conflict
tr1.addWriteConflictKey(SUBSPACE.pack(Tuple.from("foo", 0L)));
tr2.snapshot().getRange(SUBSPACE.range(Tuple.from("foo"))).asList().join();
tr3.getRange(SUBSPACE.range(Tuple.from("foo"))).asList().join();
// Two successful commits
tr1.commit().join();
tr2.commit().join();
// Read from tr3 should conflict with update from tr1.
try {
tr3.commit().join();
throw new RuntimeException("tr3 did not conflict");
} catch(CompletionException e) {
validateConflict(e);
}
}
try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) {
addUUIDConflicts(tr1, tr2, tr3);
// Verify reading a *key* causes a conflict
byte[] key = SUBSPACE.pack(Tuple.from("foo", 1066L));
tr1.addWriteConflictKey(key);
tr2.snapshot().get(key);
tr3.get(key).join();
tr1.commit().join();
tr2.commit().join();
try {
tr3.commit().join();
throw new RuntimeException("tr3 did not conflict");
}
catch(CompletionException e) {
validateConflict(e);
}
}
}
public static void snapshotShouldNotAddConflictRange(Database db) {
try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) {
addUUIDConflicts(tr1, tr2, tr3);
// Verify adding a read conflict *range* causes a conflict.
Subspace fooSubspace = SUBSPACE.subspace(Tuple.from("foo"));
tr1.addWriteConflictKey(fooSubspace.pack(Tuple.from(0L)));
byte[] beginKey = fooSubspace.range().begin;
byte[] endKey = fooSubspace.range().end;
if(tr2.snapshot().addReadConflictRangeIfNotSnapshot(beginKey, endKey)) {
throw new RuntimeException("snapshot read said it added a conflict range");
}
if(!tr3.addReadConflictRangeIfNotSnapshot(beginKey, endKey)) {
throw new RuntimeException("non-snapshot read said it did not add a conflict range");
}
// Two successful commits
tr1.commit().join();
tr2.commit().join();
// Read from tr3 should conflict with update from tr1.
try {
tr3.commit().join();
throw new RuntimeException("tr3 did not conflict");
}
catch(CompletionException e) {
validateConflict(e);
}
}
try(Transaction tr1 = db.createTransaction(); Transaction tr2 = db.createTransaction(); Transaction tr3 = db.createTransaction()) {
addUUIDConflicts(tr1, tr2, tr3);
// Verify adding a read conflict *key* causes a conflict.
byte[] key = SUBSPACE.pack(Tuple.from("foo", 1066L));
tr1.addWriteConflictKey(key);
if(tr2.snapshot().addReadConflictKeyIfNotSnapshot(key)) {
throw new RuntimeException("snapshot read said it added a conflict range");
}
if(!tr3.addReadConflictKeyIfNotSnapshot(key)) {
throw new RuntimeException("non-snapshot read said it did not add a conflict range");
}
// Two successful commits
tr1.commit().join();
tr2.commit().join();
// Read from tr3 should conflict with update from tr1.
try {
tr3.commit().join();
throw new RuntimeException("tr3 did not conflict");
}
catch(CompletionException e) {
validateConflict(e);
}
}
}
private static void snapshotOnSnapshot(Database db) {
try(Transaction tr = db.createTransaction()) {
if(tr.isSnapshot()) {
throw new RuntimeException("new transaction is a snapshot transaction");
}
ReadTransaction snapshotTr = tr.snapshot();
if(!snapshotTr.isSnapshot()) {
throw new RuntimeException("snapshot transaction is not a snapshot transaction");
}
if(snapshotTr == tr) {
throw new RuntimeException("snapshot and regular transaction are pointer-equal");
}
ReadTransaction snapshotSnapshotTr = snapshotTr.snapshot();
if(!snapshotSnapshotTr.isSnapshot()) {
throw new RuntimeException("snapshot transaction is not a snapshot transaction");
}
if(snapshotSnapshotTr != snapshotTr) {
throw new RuntimeException("calling snapshot on a snapshot transaction produced a different transaction");
}
}
}
private SnapshotTransactionTest() {}
}

View File

@ -436,7 +436,8 @@ public class StackTester {
tr.options().setRetryLimit(50);
tr.options().setMaxRetryDelay(100);
tr.options().setUsedDuringCommitProtectionDisable();
tr.options().setTransactionLoggingEnable("my_transaction");
tr.options().setDebugTransactionIdentifier("my_transaction");
tr.options().setLogTransaction();
tr.options().setReadLockAware();
tr.options().setLockAware();

View File

@ -62,7 +62,7 @@ endif()
set(package_file_name foundationdb-${FDB_VERSION}.tar.gz)
set(package_file ${CMAKE_BINARY_DIR}/packages/${package_file_name})
add_custom_command(OUTPUT ${package_file}
COMMAND $<TARGET_FILE:Python::Interpreter> setup.py sdist &&
COMMAND $<TARGET_FILE:Python::Interpreter> setup.py sdist --formats=gztar &&
${CMAKE_COMMAND} -E copy dist/${package_file_name} ${package_file}
WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
COMMENT "Create Python sdist package")

View File

@ -136,7 +136,8 @@ def test_options(tr):
tr.options.set_retry_limit(50)
tr.options.set_max_retry_delay(100)
tr.options.set_used_during_commit_protection_disable()
tr.options.set_transaction_logging_enable('my_transaction')
tr.options.set_debug_transaction_identifier('my_transaction')
tr.options.set_log_transaction()
tr.options.set_read_lock_aware()
tr.options.set_lock_aware()

View File

@ -466,7 +466,8 @@ class Tester
tr.options.set_retry_limit(50)
tr.options.set_max_retry_delay(100)
tr.options.set_used_during_commit_protection_disable
tr.options.set_transaction_logging_enable('my_transaction')
tr.options.set_debug_transaction_identifier('my_transaction')
tr.options.set_log_transaction()
tr.options.set_read_lock_aware()
tr.options.set_lock_aware()

View File

@ -10,12 +10,11 @@ RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R
USER fdb
# wget of bintray without forcing UTF-8 encoding results in 403 Forbidden
RUN cd /opt/ && wget http://downloads.sourceforge.net/project/boost/boost/1.52.0/boost_1_52_0.tar.bz2 &&\
RUN cd /opt/ &&\
wget --local-encoding=UTF-8 https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 &&\
echo '2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2' | sha256sum -c - &&\
tar -xjf boost_1_52_0.tar.bz2 &&\
tar -xjf boost_1_67_0.tar.bz2 &&\
rm boost_1_52_0.tar.bz2 boost_1_67_0.tar.bz2
rm boost_1_67_0.tar.bz2
USER root

View File

@ -38,7 +38,7 @@ configure() {
local __res=0
for _ in 1
do
cmake ../foundationdb
cmake ../foundationdb ${CMAKE_EXTRA_ARGS}
__res=$?
if [ ${__res} -ne 0 ]
then
@ -87,6 +87,8 @@ package_fast() {
for _ in 1
do
make -j`nproc` packages
cpack
cpack -G RPM -D GENERATE_EL6=ON
__res=$?
if [ ${__res} -ne 0 ]
then
@ -100,7 +102,7 @@ package() {
local __res=0
for _ in 1
do
configure
build
__res=$?
if [ ${__res} -ne 0 ]
then
@ -120,7 +122,7 @@ rpm() {
local __res=0
for _ in 1
do
cmake -DINSTALL_LAYOUT=RPM ../foundationdb
configure
__res=$?
if [ ${__res} -ne 0 ]
then
@ -132,7 +134,8 @@ rpm() {
then
break
fi
fakeroot cpack
fakeroot cpack -G RPM -D GENERATE_EL6=ON
fakeroot cpack -G RPM
__res=$?
if [ ${__res} -ne 0 ]
then
@ -146,7 +149,7 @@ deb() {
local __res=0
for _ in 1
do
cmake -DINSTALL_LAYOUT=DEB ../foundationdb
configure
__res=$?
if [ ${__res} -ne 0 ]
then
@ -158,7 +161,7 @@ deb() {
then
break
fi
fakeroot cpack
fakeroot cpack -G DEB
__res=$?
if [ ${__res} -ne 0 ]
then
@ -172,7 +175,7 @@ test-fast() {
local __res=0
for _ in 1
do
ctest -j`nproc`
ctest -j`nproc` ${CTEST_EXTRA_ARGS}
__res=$?
if [ ${__res} -ne 0 ]
then

View File

@ -1,3 +0,0 @@
FROM centos:6
RUN yum install -y yum-utils

View File

@ -0,0 +1,3 @@
FROM centos:6
RUN yum install -y yum-utils upstart initscripts

View File

@ -0,0 +1,3 @@
FROM centos:7
RUN yum install -y yum-utils systemd sysvinit-tools

View File

@ -1,3 +1,4 @@
FROM ubuntu:16.04
RUN apt-get update
RUN apt-get install -y systemd

View File

@ -59,6 +59,10 @@ services:
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh test
snowflake-ci: &snowflake-ci
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh package test-fast
shell:
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash

View File

@ -1,7 +1,17 @@
[RPM_1]
name = fdb-centos
location = centos-test
[centos6]
name = fdb-centos6
location = centos6-test
packages = ^.*el6((?!debuginfo).)*\.rpm$
format = rpm
[DEB_1]
[centos7]
name = fdb-centos7
location = centos7-test
packages = ^.*el7((?!debuginfo).)*\.rpm$
format = rpm
[ubuntu_16_04]
name = fdb-debian
location = debian-test
packages = ^.*\.deb$
format = deb

View File

@ -2,6 +2,7 @@
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${source_dir}/modules/globals.sh
source ${source_dir}/modules/util.sh
source ${source_dir}/modules/deb.sh
source ${source_dir}/modules/tests.sh

View File

@ -17,10 +17,8 @@ then
fi
# parse the ini file and read it into an
# associative array
declare -gA ini_name
declare -gA ini_location
eval "$(awk -F ' *= *' '{ if ($1 ~ /^\[/) section=$1; else if ($1 !~ /^$/) print "ini_" $1 section "=" "\"" $2 "\"" }' ${docker_file})"
eval "$(awk -F ' *= *' '{ if ($1 ~ /^\[/) section=$1; else if ($1 !~ /^$/) printf "ini_%s%s=\47%s\47\n", $1, section, $2 }' ${docker_file})"
vms=( "${!ini_name[@]}" )
if [ $? -ne 0 ]
then
echo "ERROR: Could not parse config-file ${docker_file}"
@ -112,15 +110,6 @@ then
then
break
fi
if [ -z ${fdb_packages+x} ]
then
config_find_packages
if [ $? -ne 0 ]
then
__res=1
break
fi
fi
config_load_vms
__res=$?
if [ ${__res} -ne 0 ]

View File

@ -10,7 +10,7 @@ then
local __res=0
enterfun
echo "Install FoundationDB"
cd /build
cd /build/packages
package_names=()
for f in "${package_files[@]}"
do

View File

@ -9,56 +9,55 @@ then
failed_tests=()
docker_threads=()
docker_ids=()
docker_threads=()
docker_logs=()
docker_error_logs=()
docker_wait_any() {
# docker wait waits on all containers (unlike what is documented)
# so we need to do polling
success=0
while [ "${success}" -eq 0 ]
local __res=0
enterfun
while [ "${#docker_threads[@]}" -gt 0 ]
do
for ((i=0;i<"${#docker_ids[@]}";++i))
IFS=";" read -ra res <${pipe_file}
docker_id=${res[0]}
result=${res[1]}
i=0
for (( idx=0; idx<${#docker_ids[@]}; idx++ ))
do
docker_id="${docker_ids[$i]}"
status="$(docker ps -a -f id=${docker_id} --format '{{.Status}}' | awk '{print $1;}')"
if [ "${status}" = "Exited" ]
if [ "${docker_id}" = "${docker_ids[idx]}" ]
then
success=1
ret_code="$(docker wait ${docker_id})"
if [ "${ret_code}" -ne 0 ]
then
failed_tests+=( "${docker_threads[$i]}" )
echo -e "${RED}Test failed: ${docker_threads[$i]} ${NC}"
else
echo -e "${GREEN}Test succeeded: ${docker_threads[$i]} ${NC}"
fi
# remove it
n=$((i+1))
docker_ids=( "${docker_ids[@]:0:$i}" "${docker_ids[@]:$n}" )
docker_threads=( "${docker_threads[@]:0:$i}" "${docker_threads[@]:$n}" )
# prune
set -x
if [ "${pruning_strategy}" = "ALL" ]
then
docker container rm "${docker_id}" > /dev/null
elif [ "${ret_code}" -eq 0 ] && [ "${pruning_strategy}" = "SUCCEEDED" ]
then
docker container rm "${docker_id}" > /dev/null
elif [ "${ret_code}" -ne 0 ] && [ "${pruning_strategy}" = "FAILED" ]
then
docker container rm "${docker_id}" > /dev/null
fi
set +x
i=idx
break
fi
done
sleep 1
if [ "${result}" -eq 0 ]
then
echo -e "${GREEN}Test succeeded: ${docker_threads[$i]}"
echo -e "\tDocker-ID: ${docker_ids[$i]} "
echo -e "\tLog-File: ${docker_logs[$i]}"
echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}"
else
echo -e "${RED}Test FAILED: ${docker_threads[$i]}"
echo -e "\tDocker-ID: ${docker_ids[$i]} "
echo -e "\tLog-File: ${docker_logs[$i]}"
echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}"
failed_tests+=( "${docker_threads[$i]}" )
fi
n=$((i+1))
docker_ids=( "${docker_ids[@]:0:$i}" "${docker_ids[@]:$n}" )
docker_threads=( "${docker_threads[@]:0:$i}" "${docker_threads[@]:$n}" )
docker_logs=( "${docker_logs[@]:0:$i}" "${docker_logs[@]:$n}" )
docker_error_logs=( "${docker_error_logs[@]:0:$i}" "${docker_error_logs[@]:$n}" )
break
done
exitfun
return "${__res}"
}
docker_wait_all() {
local __res=0
while [ "${#docker_ids[@]}" -gt 0 ]
while [ "${#docker_threads[@]}" -gt 0 ]
do
docker_wait_any
if [ "$?" -ne 0 ]
@ -69,158 +68,104 @@ then
return ${__res}
}
docker_build_and_run() {
local __res=0
enterfun
for _ in 1
do
if [[ "$location" = /* ]]
then
cd "${location}"
else
cd ${source_dir}/../${location}
fi
docker_logs="${log_dir}/docker_build_${name}"
docker build . -t ${name} 1> "${docker_logs}.log" 2> "${docker_logs}.err"
successOr "Building Docker image ${name} failed - see ${docker_logs}.log and ${docker_logs}.err"
# we start docker in interactive mode, otherwise CTRL-C won't work
if [ ! -z "${tests_to_run+x}"]
then
tests=()
IFS=';' read -ra tests <<< "${tests_to_run}"
fi
for t in "${tests[@]}"
do
if [ "${#docker_ids[@]}" -ge "${docker_parallelism}" ]
then
docker_wait_any
fi
echo "Starting Test ${PKG,,}_${t}"
docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\
-v "${fdb_build}:/build"\
${name}\
bash /foundationdb/build/cmake/package_tester/${PKG,,}_tests.sh -n ${t} ${packages_to_test[@]} )
docker_ids+=( "${docker_id}" )
docker_threads+=( "${PKG} - ${t} (ID: ${docker_id})" )
done
done
exitfun
return ${__res}
}
docker_run_tests() {
local __res=0
enterfun
counter=1
while true
do
if [ -z "${ini_name[${PKG}_${counter}]+x}" ]
then
# we are done
break
fi
name="${ini_name[${PKG}_${counter}]}"
location="${ini_location[${PKG}_${counter}]}"
docker_build_and_run
__res=$?
counter=$((counter+1))
if [ ${__res} -ne 0 ]
then
break
fi
done
if [ ${counter} -eq 1 ]
then
echo -e "${YELLOW}WARNING: No docker config found!${NC}"
fi
exitfun
return ${__res}
}
docker_debian_tests() {
local __res=0
enterfun
PKG=DEB
packages_to_test=("${deb_packages[@]}")
docker_run_tests
__res=$?
exitfun
return ${__res}
}
docker_rpm_tests() {
local __res=0
enterfun
PKG=RPM
packages_to_test=("${rpm_packages[@]}")
docker_run_tests
__res=$?
exitfun
return ${__res}
}
docker_run() {
local __res=0
enterfun
for _ in 1
do
log_dir="${fdb_build}/pkg_tester"
mkdir -p "${log_dir}"
# create list of package files to test
IFS=':' read -ra packages <<< "${fdb_packages}"
deb_packages=()
rpm_packages=()
for i in "${packages[@]}"
echo "Testing the following:"
echo "======================"
for K in "${vms[@]}"
do
if [[ "${i}" =~ .*".deb" ]]
then
if [ ${run_deb_tests} -ne 0 ]
then
deb_packages+=("${i}")
fi
else
if [ ${run_rpm_tests} -ne 0 ]
then
rpm_packages+=("${i}")
fi
fi
done
do_deb_tests=0
do_rpm_tests=0
if [ "${#deb_packages[@]}" -gt 0 ]
then
do_deb_tests=1
echo "Will test the following debian packages:"
echo "========================================"
for i in "${deb_packages[@]}"
curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${K}]} ) )
echo "Will test the following ${#curr_packages[@]} packages in docker-image ${K}:"
for p in "${curr_packages[@]}"
do
echo " - ${i}"
echo " ${p}"
done
echo
fi
if [ "${#rpm_packages[@]}" -gt 0 ]
done
log_dir="${fdb_build}/pkg_tester"
pipe_file="${fdb_build}/pkg_tester.pipe"
lock_file="${fdb_build}/pkg_tester.lock"
if [ -p "${pipe_file}" ]
then
do_rpm_tests=1
echo "Will test the following rpm packages"
echo "===================================="
for i in "${rpm_packages[@]}"
rm "${pipe_file}"
successOr "Could not delete old pipe file"
fi
if [ -f "${lock_file}" ]
then
rm "${lock_file}"
successOr "Could not delete old pipe file"
fi
touch "${lock_file}"
successOr "Could not create lock file"
mkfifo "${pipe_file}"
successOr "Could not create pipe file"
mkdir -p "${log_dir}"
# setup the containers
# TODO: shall we make this parallel as well?
for vm in "${vms[@]}"
do
curr_name="${ini_name[$vm]}"
curr_location="${ini_location[$vm]}"
if [[ "$curr_location" = /* ]]
then
cd "${curr_location}"
else
cd ${source_dir}/../${curr_location}
fi
docker_buid_logs="${log_dir}/docker_build_${curr_name}"
docker build . -t ${curr_name} 1> "${docker_buid_logs}.log" 2> "${docker_buid_logs}.err"
successOr "Building Docker image ${name} failed - see ${docker_buid_logs}.log and ${docker_buid_logs}.err"
done
if [ ! -z "${tests_to_run+x}"]
then
tests=()
IFS=';' read -ra tests <<< "${tests_to_run}"
fi
for vm in "${vms[@]}"
do
curr_name="${ini_name[$vm]}"
curr_format="${ini_format[$vm]}"
curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${vm}]} ) )
for curr_test in "${tests[@]}"
do
echo " - ${i}"
if [ "${#docker_ids[@]}" -ge "${docker_parallelism}" ]
then
docker_wait_any
fi
echo "Starting Test ${curr_name}/${curr_test}"
log_file="${log_dir}/${curr_name}_${curr_test}.log"
err_file="${log_dir}/${curr_name}_${curr_test}.err"
docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\
-v "${fdb_build}:/build"\
${curr_name} /sbin/init )
{
docker exec "${docker_id}" bash \
/foundationdb/build/cmake/package_tester/${curr_format}_tests.sh -n ${curr_test} ${curr_packages[@]}\
2> ${err_file} 1> ${log_file}
res=$?
if [ "${pruning_strategy}" = "ALL" ]
then
docker kill "${docker_id}" > /dev/null
elif [ "${res}" -eq 0 ] && [ "${pruning_strategy}" = "SUCCEEDED" ]
then
docker kill "${docker_id}" > /dev/null
elif [ "${res}" -ne 0 ] && [ "${pruning_strategy}" = "FAILED" ]
then
docker kill "${docker_id}" > /dev/null
fi
flock "${lock_file}" echo "${docker_id};${res}" >> "${pipe_file}"
} &
docker_ids+=( "${docker_id}" )
docker_threads+=( "${curr_name}/${curr_test}" )
docker_logs+=( "${log_file}" )
docker_error_logs+=( "${err_file}" )
done
fi
if [ "${do_deb_tests}" -eq 0 ] && [ "${do_rpm_tests}" -eq 0 ]
then
echo "nothing to do"
fi
if [ "${do_deb_tests}" -ne 0 ]
then
docker_debian_tests
fi
if [ "${do_rpm_tests}" -ne 0 ]
then
docker_rpm_tests
fi
done
docker_wait_all
rm ${pipe_file}
if [ "${#failed_tests[@]}" -eq 0 ]
then
echo -e "${GREEN}SUCCESS${NC}"
@ -235,6 +180,6 @@ then
fi
done
exitfun
return ${__res}
return "${__res}"
}
fi

View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
# This module has to be included first and only once.
# This is because of a limitation of older bash versions
# that doesn't allow us to declare associative arrays
# globally.
if [ -z "${global_sh_included+x}"]
then
global_sh_included=1
else
echo "global.sh can only be included once"
exit 1
fi
declare -A ini_name
declare -A ini_location
declare -A ini_packages
declare -A ini_format
declare -A test_start_state
declare -A test_exit_state
declare -a tests
declare -a vms

View File

@ -6,11 +6,13 @@ then
source ${source_dir}/modules/util.sh
conf_save_extension=".rpmsave"
install() {
local __res=0
enterfun
cd /build
declare -ga package_names
cd /build/packages
package_names=()
for f in "${package_files[@]}"
do
package_names+=( "$(rpm -qp ${f})" )

View File

@ -25,11 +25,12 @@ EOF
do
case ${opt} in
h )
arguments_usage
test_args_usage
__res=2
break
;;
n )
echo "test_name=${OPTARG}"
test_name="${OPTARG}"
;;
\? )

View File

@ -28,10 +28,6 @@
# build directory can be found in `/build`, the
# source code will be located in `/foundationdb`
declare -A test_start_state
declare -A test_exit_state
declare -a tests
if [ -z "${tests_sh_included}" ]
then
tests_sh_included=1
@ -106,13 +102,23 @@ then
uninstall
# make sure config didn't get deleted
if [ ! -f /etc/foundationdb/fdb.cluster ] || [ ! -f /etc/foundationdb/foundationdb.conf ]
# RPM, however, renames the file on remove, so we need to check for this
conffile="/etc/foundationdb/foundationdb.conf${conf_save_extension}"
if [ ! -f /etc/foundationdb/fdb.cluster ] || [ ! -f "${conffile}" ]
then
fail "Uninstall removed configuration"
fi
differences="$(diff /tmp/foundationdb.conf ${conffile})"
if [ -n "${differences}" ]
then
fail "${conffile} changed during remove"
fi
differences="$(diff /tmp/fdb.cluster /etc/foundationdb/fdb.cluster)"
if [ -n "${differences}" ]
then
fail "/etc/foundationdb/fdb.cluster changed during remove"
fi
rm /tmp/fdb.cluster
rm /tmp/foundationdb.conf
return 0
}
fi

View File

@ -24,7 +24,7 @@ then
successOr ${@:1}
}
successOrOr() {
successOr() {
local __res=$?
if [ ${__res} -ne 0 ]
then

View File

@ -2,6 +2,7 @@
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${source_dir}/modules/globals.sh
source ${source_dir}/modules/util.sh
source ${source_dir}/modules/rpm.sh
source ${source_dir}/modules/tests.sh

View File

@ -2,6 +2,7 @@
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${source_dir}/modules/globals.sh
source ${source_dir}/modules/config.sh
source ${source_dir}/modules/util.sh
source ${source_dir}/modules/arguments.sh

View File

@ -0,0 +1,9 @@
set(error_msg
${CMAKE_SOURCE_DIR}/versions.h exists. This usually means that
you did run `make` "(the old build system)" in this directory before.
This can result in unexpected behavior. run `make clean` in the
source directory to continue)
if(EXISTS "${FILE}")
list(JOIN error_msg " " err)
message(FATAL_ERROR "${err}")
endif()

33
cmake/CPackConfig.cmake Normal file
View File

@ -0,0 +1,33 @@
# RPM specifics
if(CPACK_GENERATOR MATCHES "RPM")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
if(GENERATE_EL6)
set(CPACK_COMPONENTS_ALL clients-el6 server-el6)
else()
set(CPACK_COMPONENTS_ALL clients-el7 server-el7)
endif()
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
elseif(CPACK_GENERATOR MATCHES "DEB")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(CPACK_COMPONENTS_ALL clients-deb server-deb)
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
elseif(CPACK_GENERATOR MATCHES "PackageMaker")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(CPACK_COMPONENTS_ALL clients-pm server-pm)
set(CPACK_STRIP_FILES TRUE)
set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall)
set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt)
elseif(CPACK_GENERATOR MATCHES "TGZ")
set(CPACK_STRIP_FILES TRUE)
set(CPACK_COMPONENTS_ALL clients-tgz server-tgz)
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
else()
message(FATAL_ERROR "Unsupported package format ${CPACK_GENERATOR}")
endif()

View File

@ -44,7 +44,11 @@ set(CMAKE_REQUIRED_LIBRARIES c)
if(WIN32)
add_compile_options(/W3 /EHsc /std:c++14 /bigobj)
# see: https://docs.microsoft.com/en-us/windows/desktop/WinProg/using-the-windows-headers
# this sets the windows target version to Windows 7
set(WINDOWS_TARGET 0x0601)
add_compile_options(/W3 /EHsc /std:c++14 /bigobj $<$<CONFIG:Release>:/Zi>)
add_compile_definitions(_WIN32_WINNT=${WINDOWS_TARGET} BOOST_ALL_NO_LIB)
else()
if(USE_GOLD_LINKER)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")

View File

@ -68,6 +68,24 @@ function(generate_coverage_xml)
add_dependencies(${target_name} coverage_${target_name})
endfunction()
# This function asserts that `versions.h` does not exist in the source
# directory. It does this in the prebuild phase of the target.
# This is an ugly hack that should make sure that cmake isn't used with
# a source directory in which FDB was previously built with `make`.
function(assert_no_version_h target)
message(STATUS "Check versions.h on ${target}")
set(target_name "${target}_versions_h_check")
add_custom_target("${target_name}"
COMMAND "${CMAKE_COMMAND}" -DFILE="${CMAKE_SOURCE_DIR}/versions.h"
-P "${CMAKE_SOURCE_DIR}/cmake/AssertFileDoesntExist.cmake"
COMMAND echo
"${CMAKE_COMMAND}" -P "${CMAKE_SOURCE_DIR}/cmake/AssertFileDoesntExist.cmake"
-DFILE="${CMAKE_SOURCE_DIR}/versions.h"
COMMENT "Check old build system wasn't used in source dir")
add_dependencies(${target} ${target_name})
endfunction()
function(add_flow_target)
set(options EXECUTABLE STATIC_LIBRARY
DYNAMIC_LIBRARY)
@ -138,6 +156,7 @@ function(add_flow_target)
add_custom_target(${AFT_NAME}_actors DEPENDS ${generated_files})
add_dependencies(${AFT_NAME} ${AFT_NAME}_actors)
assert_no_version_h(${AFT_NAME}_actors)
generate_coverage_xml(${AFT_NAME})
endif()
target_include_directories(${AFT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})

View File

@ -2,11 +2,11 @@
# Helper Functions
################################################################################
function(install_symlink)
function(install_symlink_impl)
if (NOT WIN32)
set(options "")
set(one_value_options COMPONENT TO DESTINATION)
set(multi_value_options)
set(one_value_options TO DESTINATION)
set(multi_value_options COMPONENTS)
cmake_parse_arguments(SYM "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/symlinks)
@ -14,95 +14,143 @@ function(install_symlink)
get_filename_component(dest_dir ${SYM_DESTINATION} DIRECTORY)
set(sl ${CMAKE_CURRENT_BINARY_DIR}/symlinks/${fname})
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${SYM_TO} ${sl})
install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${SYM_COMPONENT})
foreach(component IN LISTS SYM_COMPONENTS)
install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${component})
endforeach()
endif()
endfunction()
if(NOT INSTALL_LAYOUT)
if(WIN32)
set(DEFAULT_INSTALL_LAYOUT "WIN")
else()
set(DEFAULT_INSTALL_LAYOUT "STANDALONE")
function(install_symlink)
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
set(options "")
set(one_value_options COMPONENT LINK_DIR FILE_DIR LINK_NAME FILE_NAME)
set(multi_value_options "")
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
set(rel_path "")
string(REGEX MATCHALL "\\/" slashes "${IN_LINK_NAME}")
foreach(ignored IN LISTS slashes)
set(rel_path "../${rel_path}")
endforeach()
if("${IN_FILE_DIR}" MATCHES "bin")
if("${IN_LINK_DIR}" MATCHES "lib")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "lib/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-tgz")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "usr/lib64/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-el6"
"${IN_COMPONENT}-el7"
"${IN_COMPONENT}-deb")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "usr/lib64/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-deb")
elseif("${IN_LINK_DIR}" MATCHES "bin")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "bin/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-tgz")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "usr/bin/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-el6"
"${IN_COMPONENT}-el7"
"${IN_COMPONENT}-deb")
elseif("${IN_LINK_DIR}" MATCHES "fdbmonitor")
install_symlink_impl(
TO "../../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "lib/foundationdb/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-tgz")
install_symlink_impl(
TO "../../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "usr/lib/foundationdb/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-el6"
"${IN_COMPONENT}-el7"
"${IN_COMPONENT}-deb")
else()
message(FATAL_ERROR "Unknown LINK_DIR ${IN_LINK_DIR}")
endif()
else()
message(FATAL_ERROR "Unknown FILE_DIR ${IN_FILE_DIR}")
endif()
endif()
endif()
set(INSTALL_LAYOUT "${DEFAULT_INSTALL_LAYOUT}"
CACHE STRING "Installation directory layout. Options are: TARGZ (as in tar.gz installer), WIN, STANDALONE, RPM, DEB, OSX")
endfunction()
set(DIR_LAYOUT ${INSTALL_LAYOUT})
if(DIR_LAYOUT MATCHES "TARGZ")
set(DIR_LAYOUT "STANDALONE")
function(fdb_install)
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
set(options EXPORT)
set(one_value_options COMPONENT DESTINATION)
set(multi_value_options TARGETS FILES DIRECTORY)
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
if(IN_TARGETS)
set(args TARGETS ${IN_TARGETS})
elseif(IN_FILES)
set(args FILES ${IN_FILES})
elseif(IN_DIRECTORY)
set(args DIRECTORY ${IN_DIRECTORY})
else()
message(FATAL_ERROR "Expected FILES or TARGETS")
endif()
if(IN_EXPORT)
set(args EXPORT)
endif()
if("${IN_DESTINATION}" STREQUAL "bin")
install(${args} DESTINATION "bin" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/bin" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "sbin")
install(${args} DESTINATION "sbin" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/libexec" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "fdbmonitor")
install(${args} DESTINATION "libexec" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "usr/lib/foundationdb" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "usr/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "usr/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/libexec" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "include")
install(${args} DESTINATION "include" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/include" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "etc")
install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/etc/foundationdb" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "log")
install(${args} DESTINATION "log/foundationdb" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-el7")
elseif("${IN_DESTINATION}" STREQUAL "data")
install(${args} DESTINATION "lib/foundationdb" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "var/lib/foundationdb/data" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "var/lib/foundationdb/data" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "var/lib/foundationdb/data" COMPONENT "${IN_COMPONENT}-el7")
endif()
endif()
endfunction()
if(APPLE)
set(CPACK_GENERATOR TGZ PackageMaker)
else()
set(CPACK_GENERATOR RPM DEB TGZ)
endif()
get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
set(CPACK_PACKAGE_CHECKSUM SHA256)
set(FDB_CONFIG_DIR "etc/foundationdb")
if("${LIB64}" STREQUAL "TRUE")
set(LIBSUFFIX 64)
else()
set(LIBSUFFIX "")
endif()
set(FDB_LIB_NOSUFFIX "lib")
if(DIR_LAYOUT MATCHES "STANDALONE")
set(FDB_LIB_DIR "lib${LIBSUFFIX}")
set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}")
set(FDB_BIN_DIR "bin")
set(FDB_SBIN_DIR "sbin")
set(FDB_INCLUDE_INSTALL_DIR "include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "share")
elseif(DIR_LAYOUT MATCHES "WIN")
set(CPACK_GENERATOR "ZIP")
set(FDB_CONFIG_DIR "etc")
set(FDB_LIB_DIR "lib")
set(FDB_LIB_NOSUFFIX "lib")
set(FDB_LIBEXEC_DIR "bin")
set(FDB_SHARE_DIR "share")
set(FDB_BIN_DIR "bin")
set(FDB_SBIN_DIR "bin")
set(FDB_INCLUDE_INSTALL_DIR "include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "share")
elseif(DIR_LAYOUT MATCHES "OSX")
set(CPACK_GENERATOR productbuild)
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(FDB_CONFIG_DIR "usr/local/etc/foundationdb")
set(FDB_LIB_DIR "usr/local/lib")
set(FDB_LIB_NOSUFFIX "usr/local/lib")
set(FDB_LIBEXEC_DIR "usr/local/libexec")
set(FDB_BIN_DIR "usr/local/bin")
set(FDB_SBIN_DIR "usr/local/libexec")
set(FDB_INCLUDE_INSTALL_DIR "usr/local/include")
set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb")
set(FDB_SHARE_DIR "usr/local/share")
else()
if(DIR_LAYOUT MATCHES "RPM")
set(CPACK_GENERATOR RPM)
else()
# DEB
set(CPACK_GENERATOR "DEB")
set(LIBSUFFIX "")
endif()
set(CMAKE_INSTALL_PREFIX "/")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(FDB_CONFIG_DIR "etc/foundationdb")
set(FDB_LIB_DIR "usr/lib${LIBSUFFIX}")
set(FDB_LIB_NOSUFFIX "usr/lib")
set(FDB_LIBEXEC_DIR ${FDB_LIB_DIR})
set(FDB_BIN_DIR "usr/bin")
set(FDB_SBIN_DIR "usr/sbin")
set(FDB_INCLUDE_INSTALL_DIR "usr/include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "usr/share")
endif()
if(INSTALL_LAYOUT MATCHES "OSX")
set(FDBMONITOR_INSTALL_LOCATION "${FDB_LIBEXEC_DIR}")
else()
set(FDBMONITOR_INSTALL_LOCATION "${FDB_LIB_NOSUFFIX}/foundationdb")
endif()
set(CPACK_PROJECT_CONFIG_FILE "${CMAKE_SOURCE_DIR}/cmake/CPackConfig.cmake")
################################################################################
# Version information
@ -130,104 +178,169 @@ set(CPACK_PACKAGE_DESCRIPTION_SUMMARY
"FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions.")
set(CPACK_PACKAGE_ICON ${CMAKE_SOURCE_DIR}/packaging/foundationdb.ico)
set(CPACK_PACKAGE_CONTACT "The FoundationDB Community")
set(CPACK_COMPONENT_server_DEPENDS clients)
if (INSTALL_LAYOUT MATCHES "OSX")
# MacOS needs a file exiension for the LICENSE file
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources)
configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt)
else()
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
set(CPACK_COMPONENT_SERVER-EL6_DEPENDS clients-el6)
set(CPACK_COMPONENT_SERVER-EL7_DEPENDS clients-el7)
set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb)
set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz)
set(CPACK_COMPONENT_SERVER-PM_DEPENDS clients-pm)
set(CPACK_COMPONENT_SERVER-EL6_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_SERVER-TGZ_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_SERVER-PM_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_CLIENTS-EL6_DISPLAY_NAME "foundationdb-clients")
set(CPACK_COMPONENT_CLIENTS-EL7_DISPLAY_NAME "foundationdb-clients")
set(CPACK_COMPONENT_CLIENTS-DEB_DISPLAY_NAME "foundationdb-clients")
set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients")
set(CPACK_COMPONENT_CLIENTS-PM_DISPLAY_NAME "foundationdb-clients")
# MacOS needs a file exiension for the LICENSE file
configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY)
################################################################################
# Filename of packages
################################################################################
if(NOT FDB_RELEASE)
set(prerelease_string ".PRERELEASE")
endif()
set(clients-filename "foundationdb-clients-${PROJECT_VERSION}.${CURRENT_GIT_VERSION}${prerelease_string}")
set(server-filename "foundationdb-server-${PROJECT_VERSION}.${CURRENT_GIT_VERSION}${prerelease_string}")
################################################################################
# Configuration for RPM
################################################################################
if(UNIX AND NOT APPLE)
install(DIRECTORY DESTINATION "var/log/foundationdb" COMPONENT server)
install(DIRECTORY DESTINATION "var/lib/foundationdb/data" COMPONENT server)
endif()
set(CPACK_RPM_PACKAGE_LICENSE "Apache 2.0")
if(INSTALL_LAYOUT MATCHES "RPM")
set(CPACK_RPM_server_USER_FILELIST
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
"/usr/sbin"
"/usr/share/java"
"/usr/lib64/python2.7"
"/usr/lib64/python2.7/site-packages"
"/var"
"/var/log"
"/var/lib"
"/lib"
"/lib/systemd"
"/lib/systemd/system"
"/etc/rc.d/init.d")
set(CPACK_RPM_server_DEBUGINFO_PACKAGE ON)
set(CPACK_RPM_clients_DEBUGINFO_PACKAGE ON)
set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
set(CPACK_RPM_COMPONENT_INSTALL ON)
set(CPACK_RPM_clients_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
set(CPACK_RPM_clients_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
set(CPACK_RPM_server_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
set(CPACK_RPM_server_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh)
set(CPACK_RPM_server_PRE_UNINSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
set(CPACK_RPM_server_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03")
set(CPACK_RPM_server_PACKAGE_RE)
#set(CPACK_RPM_java_PACKAGE_REQUIRES
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
set(CPACK_RPM_python_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
set(CPACK_RPM_PACKAGE_NAME "foundationdb")
set(CPACK_RPM_CLIENTS-EL6_PACKAGE_NAME "foundationdb-clients")
set(CPACK_RPM_CLIENTS-EL7_PACKAGE_NAME "foundationdb-clients")
set(CPACK_RPM_SERVER-EL6_PACKAGE_NAME "foundationdb-server")
set(CPACK_RPM_SERVER-EL7_PACKAGE_NAME "foundationdb-server")
set(CPACK_RPM_CLIENTS-EL6_FILE_NAME "${clients-filename}.el6.x86_64.rpm")
set(CPACK_RPM_CLIENTS-EL7_FILE_NAME "${clients-filename}.el7.x86_64.rpm")
set(CPACK_RPM_SERVER-EL6_FILE_NAME "${server-filename}.el6.x86_64.rpm")
set(CPACK_RPM_SERVER-EL7_FILE_NAME "${server-filename}.el7.x86_64.rpm")
set(CPACK_RPM_CLIENTS-EL6_DEBUGINFO_FILE_NAME "${clients-filename}.el6-debuginfo.x86_64.rpm")
set(CPACK_RPM_CLIENTS-EL7_DEBUGINFO_FILE_NAME "${clients-filename}.el7-debuginfo.x86_64.rpm")
set(CPACK_RPM_SERVER-EL6_DEBUGINFO_FILE_NAME "${server-filename}.el6-debuginfo.x86_64.rpm")
set(CPACK_RPM_SERVER-EL7_DEBUGINFO_FILE_NAME "${server-filename}.el7-debuginfo.x86_64.rpm")
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir")
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION data COMPONENT server)
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION log COMPONENT server)
set(CPACK_RPM_SERVER-EL6_USER_FILELIST
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
set(CPACK_RPM_SERVER-EL7_USER_FILELIST
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
"/usr/sbin"
"/usr/share/java"
"/usr/lib64/python2.7"
"/usr/lib64/python2.7/site-packages"
"/var"
"/var/log"
"/var/lib"
"/lib"
"/lib/systemd"
"/lib/systemd/system"
"/etc/rc.d/init.d")
set(CPACK_RPM_DEBUGINFO_PACKAGE ON)
#set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
set(CPACK_RPM_COMPONENT_INSTALL ON)
set(CPACK_RPM_CLIENTS-EL6_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
set(CPACK_RPM_clients-el7_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
set(CPACK_RPM_CLIENTS-EL6_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
set(CPACK_RPM_CLIENTS-EL7_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
set(CPACK_RPM_SERVER-EL6_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
set(CPACK_RPM_SERVER-EL7_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
set(CPACK_RPM_SERVER-EL6_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver-el6.sh)
set(CPACK_RPM_SERVER-EL7_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh)
set(CPACK_RPM_SERVER-EL6_PRE_UNINSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
set(CPACK_RPM_SERVER-EL7_PRE_UNINSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
set(CPACK_RPM_SERVER-EL6_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
set(CPACK_RPM_SERVER-EL7_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
#set(CPACK_RPM_java_PACKAGE_REQUIRES
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
#set(CPACK_RPM_python_PACKAGE_REQUIRES
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
################################################################################
# Configuration for DEB
################################################################################
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "${clients-filename}_amd64.deb")
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "${server-filename}_amd64.deb")
set(CPACK_DEB_COMPONENT_INSTALL ON)
set(CPACK_DEBIAN_DEBUGINFO_PACKAGE ON)
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_NAME "foundationdb-server")
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_NAME "foundationdb-clients")
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})")
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/prerm
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm)
################################################################################
# MacOS configuration
################################################################################
if(NOT WIN32)
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/osx/uninstall-FoundationDB.sh
DESTINATION "usr/local/foundationdb"
COMPONENT clients-pm)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/com.foundationdb.fdbmonitor.plist
DESTINATION "Library/LaunchDaemons"
COMPONENT server-pm)
endif()
################################################################################
# Configuration for DEB
################################################################################
if(INSTALL_LAYOUT MATCHES "DEB")
set(CPACK_DEB_COMPONENT_INSTALL ON)
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
set(CPACK_DEBIAN_SERVER_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})")
set(CPACK_DEBIAN_CLIENTS_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
set(CPACK_DEBIAN_CLIENTS_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
set(CPACK_DEBIAN_SERVER_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/prerm
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm)
endif()
################################################################################
# MacOS configuration
################################################################################
if(INSTALL_LAYOUT MATCHES "OSX")
set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall)
set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/osx/uninstall-FoundationDB.sh
DESTINATION "usr/local/foundationdb"
COMPONENT clients)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/com.foundationdb.fdbmonitor.plist
DESTINATION "Library/LaunchDaemons"
COMPONENT server)
endif()
set(CPACK_ARCHIVE_COMPONENT_INSTALL ON)
set(CPACK_ARCHIVE_CLIENTS-TGZ_FILE_NAME "${clients-filename}.x86_64")
set(CPACK_ARCHIVE_SERVER-TGZ_FILE_NAME "${server-filename}.x86_64")
################################################################################
# Server configuration
@ -239,54 +352,33 @@ set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description")
set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description")
if(NOT WIN32)
if(INSTALL_LAYOUT MATCHES "OSX")
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/foundationdb.conf.new
DESTINATION ${FDB_CONFIG_DIR}
COMPONENT server)
else()
install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
DESTINATION ${FDB_CONFIG_DIR}
COMPONENT server)
endif()
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/foundationdb.conf.new
DESTINATION "usr/local/etc"
COMPONENT server-pm)
fdb_install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
DESTINATION etc
COMPONENT server)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py
DESTINATION "${FDB_LIB_NOSUFFIX}/foundationdb"
COMPONENT server)
DESTINATION "usr/lib/foundationdb"
COMPONENT server-el6)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
DESTINATION "${FDB_LIB_NOSUFFIX}/foundationdb"
COMPONENT server)
else()
install(FILES ${CMAKE_BINARY_DIR}/fdb.cluster
DESTINATION "etc"
COMPONENT server)
endif()
if((INSTALL_LAYOUT MATCHES "RPM") OR (INSTALL_LAYOUT MATCHES "DEB"))
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
${CMAKE_BINARY_DIR}/packaging/rpm)
install(
DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
DESTINATION "var/log"
COMPONENT server)
install(
DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
DESTINATION "var/lib"
COMPONENT server)
execute_process(
COMMAND pidof systemd
RESULT_VARIABLE IS_SYSTEMD
OUTPUT_QUIET
ERROR_QUIET)
DESTINATION "usr/lib/foundationdb"
COMPONENT server-el6)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py
DESTINATION "usr/lib/foundationdb"
COMPONENT server-deb)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
DESTINATION "usr/lib/foundationdb"
COMPONENT server-deb)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
DESTINATION "lib/systemd/system"
COMPONENT server)
if(INSTALL_LAYOUT MATCHES "RPM")
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
DESTINATION "etc/rc.d/init.d"
RENAME "foundationdb"
COMPONENT server)
else()
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
DESTINATION "etc/init.d"
RENAME "foundationdb"
COMPONENT server)
endif()
COMPONENT server-el7)
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
DESTINATION "etc/rc.d/init.d"
RENAME "foundationdb"
COMPONENT server-el6)
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
DESTINATION "etc/init.d"
RENAME "foundationdb"
COMPONENT server-deb)
endif()

View File

@ -142,10 +142,10 @@
A transaction is not permitted to read any transformed key or value previously set within that transaction, and an attempt to do so will result in an error.
.. |atomic-versionstamps-tuple-warning-key| replace::
At this time, versionstamped keys are not compatible with the Tuple layer except in Java and Python. Note that this implies versionstamped keys may not be used with the Subspace and Directory layers except in those languages.
At this time, versionstamped keys are not compatible with the Tuple layer except in Java, Python, and Go. Note that this implies versionstamped keys may not be used with the Subspace and Directory layers except in those languages.
.. |atomic-versionstamps-tuple-warning-value| replace::
At this time, versionstamped values are not compatible with the Tuple layer except in Java and Python. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages.
At this time, versionstamped values are not compatible with the Tuple layer except in Java, Python, and Go. Note that this implies versionstamped values may not be used with the Subspace and Directory layers except in those languages.
.. |api-version| replace:: 610

View File

@ -127,7 +127,7 @@ The following format informally describes the JSON containing the status data. T
"name": < "initializing"
| "missing_data"
| "healing"
| "removing_redundant_teams"
| "optimizing_team_collections"
| "healthy_repartitioning"
| "healthy_removing_server"
| "healthy_rebalancing"

View File

@ -7,7 +7,7 @@ Release Notes
Features
--------
Improved replication mechanism, a new hierarchical replication technique that further significantly reduces the frequency of data loss events even when multiple machines (e.g., fault-tolerant zones in the current code) permanently fail at the same time. `(PR #964) <https://github.com/apple/foundationdb/pull/964>`.
* Improved replication mechanism, a new hierarchical replication technique that further significantly reduces the frequency of data loss events even when multiple machines (e.g., fault-tolerant zones in the current code) permanently fail at the same time. `(PR #964) <https://github.com/apple/foundationdb/pull/964>`.
* Get read version, read, and commit requests are counted and aggregated by server-side latency in configurable latency bands and output in JSON status. `(PR #1084) <https://github.com/apple/foundationdb/pull/1084>`_
@ -21,6 +21,8 @@ Improved replication mechanism, a new hierarchical replication technique that fu
Performance
-----------
* Java: Succesful commits and range reads no longer create ``FDBException`` objects to reduce memory pressure. `(Issue #1235) <https://github.com/apple/foundationdb/issues/1235>`_
Fixes
-----
@ -39,6 +41,7 @@ Bindings
* Python: Removed ``fdb.init``, ``fdb.create_cluster``, and ``fdb.Cluster``. ``fdb.open`` no longer accepts a ``database_name`` parameter. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Java: Deprecated ``FDB.createCluster`` and ``Cluster``. The preferred way to get a ``Database`` is by using ``FDB.open``, which should work in both new and old API versions. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Java: Removed ``Cluster(long cPtr, Executor executor)`` constructor. This is API breaking for any code that has subclassed the ``Cluster`` class and is not protected by API versioning. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Java: Several methods relevant to read-only transactions have been moved into the ``ReadTransaction`` interface.
* Ruby: Removed ``FDB.init``, ``FDB.create_cluster``, and ``FDB.Cluster``. ``FDB.open`` no longer accepts a ``database_name`` parameter. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Golang: Deprecated ``fdb.StartNetwork``, ``fdb.Open``, ``fdb.MustOpen``, and ``fdb.CreateCluster`` and added ``fdb.OpenDatabase`` and ``fdb.MustOpenDatabase``. The preferred way to start the network and get a ``Database`` is by using ``FDB.OpenDatabase`` or ``FDB.OpenDefault``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_
* Flow: Deprecated ``API::createCluster`` and ``Cluster`` and added ``API::createDatabase``. The preferred way to get a ``Database`` is by using ``API::createDatabase``. `(PR #942) <https://github.com/apple/foundationdb/pull/942>`_

View File

@ -5,21 +5,29 @@ add_flow_target(EXECUTABLE NAME fdbbackup SRCS ${FDBBACKUP_SRCS})
target_link_libraries(fdbbackup PRIVATE fdbclient)
if(NOT OPEN_FOR_IDE)
install(TARGETS fdbbackup DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
fdb_install(TARGETS fdbbackup DESTINATION bin COMPONENT clients)
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent/backup_agent
COMPONENT clients)
COMPONENT clients
FILE_DIR bin
LINK_DIR fdbmonitor
FILE_NAME fdbbackup
LINK_NAME backup_agent/backup_agent)
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_BIN_DIR}/fdbrestore
COMPONENT clients)
COMPONENT clients
FILE_DIR bin
LINK_DIR bin
FILE_NAME fdbbackup
LINK_NAME fdbrestore)
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_BIN_DIR}/dr_agent
COMPONENT clients)
COMPONENT clients
FILE_DIR bin
LINK_DIR bin
FILE_NAME fdbbackup
LINK_NAME dr_agent)
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_BIN_DIR}/fdbdr
COMPONENT clients)
COMPONENT clients
FILE_DIR bin
LINK_DIR bin
FILE_NAME fdbbackup
LINK_NAME fdbdr)
endif()

View File

@ -3324,7 +3324,8 @@ int main(int argc, char* argv[]) {
<< FastAllocator<512>::pageCount << " "
<< FastAllocator<1024>::pageCount << " "
<< FastAllocator<2048>::pageCount << " "
<< FastAllocator<4096>::pageCount << endl;
<< FastAllocator<4096>::pageCount << " "
<< FastAllocator<8192>::pageCount << endl;
vector< std::pair<std::string, const char*> > typeNames;
for( auto i = allocInstr.begin(); i != allocInstr.end(); ++i ) {

View File

@ -53,11 +53,11 @@
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
<CustomBuildBeforeTargets>PreBuildEvent</CustomBuildBeforeTargets>
</PropertyGroup>
<ItemDefinitionGroup>
@ -78,7 +78,7 @@
<Optimization>Disabled</Optimization>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
@ -98,7 +98,7 @@
<Optimization>Full</Optimization>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
<EnablePREfast>false</EnablePREfast>

View File

@ -11,6 +11,4 @@ endif()
add_flow_target(EXECUTABLE NAME fdbcli SRCS ${FDBCLI_SRCS})
target_link_libraries(fdbcli PRIVATE fdbclient)
if(NOT OPEN_FOR_IDE)
install(TARGETS fdbcli DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
endif()
fdb_install(TARGETS fdbcli DESTINATION bin COMPONENT clients)

View File

@ -2031,7 +2031,7 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
wait( makeInterruptable(waitForExcludedServers(db,addresses)) );
std::vector<ProcessData> workers = wait( makeInterruptable(getWorkers(db)) );
std::map<uint32_t, std::set<uint16_t>> workerPorts;
std::map<IPAddress, std::set<uint16_t>> workerPorts;
for(auto addr : workers)
workerPorts[addr.address.ip].insert(addr.address.port);
@ -2050,7 +2050,7 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
"excluded the correct machines or processes before removing them from the cluster:\n");
for(auto addr : absentExclusions) {
if(addr.port == 0)
printf(" %s\n", toIPString(addr.ip).c_str());
printf(" %s\n", addr.ip.toString().c_str());
else
printf(" %s\n", addr.toString().c_str());
}

View File

@ -62,13 +62,13 @@
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
@ -81,7 +81,7 @@
</PrecompiledHeader>
<WarningLevel>Level3</WarningLevel>
<Optimization>Disabled</Optimization>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\zookeeper\win32;..\zookeeper\generated;..\zookeeper\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<MinimalRebuild>false</MinimalRebuild>
@ -105,7 +105,7 @@
</PrecompiledHeader>
<Optimization>Full</Optimization>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>..\zookeeper\win32;..\zookeeper\generated;..\zookeeper\include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>

View File

@ -28,13 +28,21 @@
#include "fdbclient/CoordinationInterface.h"
uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs ) {
IPAddress determinePublicIPAutomatically(ClusterConnectionString const& ccs) {
try {
boost::asio::io_service ioService;
boost::asio::ip::udp::socket socket(ioService);
boost::asio::ip::udp::endpoint endpoint(boost::asio::ip::address_v4(ccs.coordinators()[0].ip), ccs.coordinators()[0].port);
using namespace boost::asio;
io_service ioService;
ip::udp::socket socket(ioService);
const auto& coordAddr = ccs.coordinators()[0];
const auto boostIp = coordAddr.ip.isV6() ? ip::address(ip::address_v6(coordAddr.ip.toV6()))
: ip::address(ip::address_v4(coordAddr.ip.toV4()));
ip::udp::endpoint endpoint(boostIp, coordAddr.port);
socket.connect(endpoint);
auto ip = socket.local_endpoint().address().to_v4().to_ulong();
IPAddress ip = coordAddr.ip.isV6() ? IPAddress(socket.local_endpoint().address().to_v6().to_bytes())
: IPAddress(socket.local_endpoint().address().to_v4().to_ulong());
socket.close();
return ip;
@ -43,4 +51,4 @@ uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs ) {
fprintf(stderr, "Error determining public address: %s\n", e.what());
throw bind_failed();
}
}
}

View File

@ -261,14 +261,14 @@ class FileBackupAgent : public BackupAgentBase {
public:
FileBackupAgent();
FileBackupAgent( FileBackupAgent&& r ) noexcept(true) :
FileBackupAgent( FileBackupAgent&& r ) BOOST_NOEXCEPT :
subspace( std::move(r.subspace) ),
config( std::move(r.config) ),
lastRestorable( std::move(r.lastRestorable) ),
taskBucket( std::move(r.taskBucket) ),
futureBucket( std::move(r.futureBucket) ) {}
void operator=( FileBackupAgent&& r ) noexcept(true) {
void operator=( FileBackupAgent&& r ) BOOST_NOEXCEPT {
subspace = std::move(r.subspace);
config = std::move(r.config);
lastRestorable = std::move(r.lastRestorable),
@ -378,7 +378,7 @@ public:
DatabaseBackupAgent();
explicit DatabaseBackupAgent(Database src);
DatabaseBackupAgent( DatabaseBackupAgent&& r ) noexcept(true) :
DatabaseBackupAgent( DatabaseBackupAgent&& r ) BOOST_NOEXCEPT :
subspace( std::move(r.subspace) ),
states( std::move(r.states) ),
config( std::move(r.config) ),
@ -390,7 +390,7 @@ public:
sourceStates( std::move(r.sourceStates) ),
sourceTagNames( std::move(r.sourceTagNames) ) {}
void operator=( DatabaseBackupAgent&& r ) noexcept(true) {
void operator=( DatabaseBackupAgent&& r ) BOOST_NOEXCEPT {
subspace = std::move(r.subspace);
states = std::move(r.states);
config = std::move(r.config);

View File

@ -633,33 +633,21 @@ struct LogMessageVersion {
};
struct AddressExclusion {
uint32_t ip;
IPAddress ip;
int port;
AddressExclusion() : ip(0), port(0) {}
explicit AddressExclusion( uint32_t ip ) : ip(ip), port(0) {}
explicit AddressExclusion( uint32_t ip, int port ) : ip(ip), port(port) {}
explicit AddressExclusion(const IPAddress& ip) : ip(ip), port(0) {}
explicit AddressExclusion(const IPAddress& ip, int port) : ip(ip), port(port) {}
explicit AddressExclusion (std::string s) {
int a,b,c,d,p,count=-1;
if (sscanf(s.c_str(), "%d.%d.%d.%d:%d%n", &a,&b,&c,&d, &p, &count) == 5 && count == s.size()) {
ip = (a<<24)+(b<<16)+(c<<8)+d;
port = p;
}
else if (sscanf(s.c_str(), "%d.%d.%d.%d%n", &a,&b,&c,&d, &count) == 4 && count == s.size()) {
ip = (a<<24)+(b<<16)+(c<<8)+d;
port = 0;
}
else {
throw connection_string_invalid();
}
bool operator<(AddressExclusion const& r) const {
if (ip != r.ip) return ip < r.ip;
return port < r.port;
}
bool operator< (AddressExclusion const& r) const { if (ip != r.ip) return ip < r.ip; return port<r.port; }
bool operator== (AddressExclusion const& r) const { return ip == r.ip && port == r.port; }
bool operator==(AddressExclusion const& r) const { return ip == r.ip && port == r.port; }
bool isWholeMachine() const { return port == 0; }
bool isValid() const { return ip != 0 || port != 0; }
bool isValid() const { return ip.isValid() || port != 0; }
bool excludes( NetworkAddress const& addr ) const {
if(isWholeMachine())
@ -669,17 +657,16 @@ struct AddressExclusion {
// This is for debugging and IS NOT to be used for serialization to persistant state
std::string toString() const {
std::string as = format( "%d.%d.%d.%d", (ip>>24)&0xff, (ip>>16)&0xff, (ip>>8)&0xff, ip&0xff );
if (!isWholeMachine())
as += format(":%d", port);
return as;
return formatIpPort(ip, port);
return ip.toString();
}
static AddressExclusion parse( StringRef const& );
template <class Ar>
void serialize(Ar& ar) {
ar.serializeBinaryItem(*this);
serializer(ar, ip, port);
}
};

View File

@ -168,7 +168,11 @@ public:
return configSpace.pack(LiteralStringRef(__FUNCTION__));
}
ACTOR static Future<std::vector<KeyRange>> getRestoreRangesOrDefault(RestoreConfig *self, Reference<ReadYourWritesTransaction> tr) {
Future<std::vector<KeyRange>> getRestoreRangesOrDefault(Reference<ReadYourWritesTransaction> tr) {
return getRestoreRangesOrDefault_impl(this, tr);
}
ACTOR static Future<std::vector<KeyRange>> getRestoreRangesOrDefault_impl(RestoreConfig *self, Reference<ReadYourWritesTransaction> tr) {
state std::vector<KeyRange> ranges = wait(self->restoreRanges().getD(tr));
if (ranges.empty()) {
state KeyRange range = wait(self->restoreRange().getD(tr));
@ -374,7 +378,7 @@ ACTOR Future<std::string> RestoreConfig::getFullStatus_impl(RestoreConfig restor
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
state Future<std::vector<KeyRange>> ranges = RestoreConfig::getRestoreRangesOrDefault(&restore, tr);
state Future<std::vector<KeyRange>> ranges = restore.getRestoreRangesOrDefault(tr);
state Future<Key> addPrefix = restore.addPrefix().getD(tr);
state Future<Key> removePrefix = restore.removePrefix().getD(tr);
state Future<Key> url = restore.sourceContainerURL().getD(tr);
@ -2572,7 +2576,7 @@ namespace fileBackup {
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
bc = restore.sourceContainer().getOrThrow(tr);
restoreRanges = RestoreConfig::getRestoreRangesOrDefault(&restore, tr);
restoreRanges = restore.getRestoreRangesOrDefault(tr);
addPrefix = restore.addPrefix().getD(tr);
removePrefix = restore.removePrefix().getD(tr);
@ -3681,8 +3685,9 @@ public:
oldRestore.clear(tr);
}
for (auto &restoreRange : restoreRanges) {
KeyRange restoreIntoRange = KeyRangeRef(restoreRange.begin, restoreRange.end).removePrefix(removePrefix).withPrefix(addPrefix);
state int index;
for (index = 0; index < restoreRanges.size(); index++) {
KeyRange restoreIntoRange = KeyRangeRef(restoreRanges[index].begin, restoreRanges[index].end).removePrefix(removePrefix).withPrefix(addPrefix);
Standalone<RangeResultRef> existingRows = wait(tr->getRange(restoreIntoRange, 1));
if (existingRows.size() > 0) {
throw restore_destination_not_empty();

View File

@ -36,7 +36,7 @@ template <class Val, class Metric=int, class MetricFunc = ConstantMetric<Metric>
class KeyRangeMap : public RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>, NonCopyable, public ReferenceCounted<KeyRangeMap<Val>> {
public:
explicit KeyRangeMap(Val v=Val(), Key endKey = allKeys.end) : RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>(endKey, v), mapEnd(endKey) {}
void operator=(KeyRangeMap&& r) noexcept(true) { mapEnd = std::move(r.mapEnd); RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void operator=(KeyRangeMap&& r) BOOST_NOEXCEPT { mapEnd = std::move(r.mapEnd); RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void insert( const KeyRangeRef& keys, const Val& value ) { RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::insert(keys, value); }
void insert( const KeyRef& key, const Val& value ) { RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::insert( singleKeyRange(key), value); }
std::vector<KeyRangeWith<Val>> getAffectedRangesAfterInsertion( const KeyRangeRef& keys, const Val &insertionValue = Val());
@ -67,7 +67,7 @@ template <class Val, class Metric=int, class MetricFunc = ConstantMetric<Metric>
class CoalescedKeyRefRangeMap : public RangeMap<KeyRef,Val,KeyRangeRef,Metric,MetricFunc>, NonCopyable {
public:
explicit CoalescedKeyRefRangeMap(Val v=Val(), Key endKey = allKeys.end) : RangeMap<KeyRef,Val,KeyRangeRef,Metric,MetricFunc>(endKey, v), mapEnd(endKey) {}
void operator=(CoalescedKeyRefRangeMap&& r) noexcept(true) { mapEnd = std::move(r.mapEnd); RangeMap<KeyRef, Val, KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void operator=(CoalescedKeyRefRangeMap&& r) BOOST_NOEXCEPT { mapEnd = std::move(r.mapEnd); RangeMap<KeyRef, Val, KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void insert( const KeyRangeRef& keys, const Val& value );
void insert( const KeyRef& key, const Val& value, Arena& arena );
Key mapEnd;
@ -77,7 +77,7 @@ template <class Val, class Metric=int, class MetricFunc = ConstantMetric<Metric>
class CoalescedKeyRangeMap : public RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>, NonCopyable {
public:
explicit CoalescedKeyRangeMap(Val v=Val(), Key endKey = allKeys.end) : RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>(endKey, v), mapEnd(endKey) {}
void operator=(CoalescedKeyRangeMap&& r) noexcept(true) { mapEnd = std::move(r.mapEnd); RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void operator=(CoalescedKeyRangeMap&& r) BOOST_NOEXCEPT { mapEnd = std::move(r.mapEnd); RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void insert( const KeyRangeRef& keys, const Val& value );
void insert( const KeyRef& key, const Val& value );
Key mapEnd;

View File

@ -1730,7 +1730,7 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
data.locality.set(LiteralStringRef("rack"), StringRef(rack));
data.locality.set(LiteralStringRef("zoneid"), StringRef(rack));
data.locality.set(LiteralStringRef("machineid"), StringRef(machineId));
data.address.ip = i;
data.address.ip = IPAddress(i);
workers.push_back(data);
}
@ -1749,8 +1749,8 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
LiteralStringRef("machineid")
});
for(auto worker = chosen.begin(); worker != chosen.end(); worker++) {
ASSERT(worker->ip < workers.size());
LocalityData data = workers[worker->ip].locality;
ASSERT(worker->ip.toV4() < workers.size());
LocalityData data = workers[worker->ip.toV4()].locality;
for(auto field = fields.begin(); field != fields.end(); field++) {
chosenValues[*field].insert(data.get(*field).get());
}

View File

@ -214,6 +214,28 @@ TEST_CASE("/fdbclient/MonitorLeader/parseConnectionString/basic") {
ASSERT( input == cs.toString() );
}
{
input = "0xxdeadbeef:100100100@[::1]:1234,[::1]:1235";
std::string commented("#start of comment\n");
commented += input;
commented += "\n";
commented += "# asdfasdf ##";
ClusterConnectionString cs(commented);
ASSERT(input == cs.toString());
}
{
input = "0xxdeadbeef:100100100@[abcd:dcba::1]:1234,[abcd:dcba::abcd:1]:1234";
std::string commented("#start of comment\n");
commented += input;
commented += "\n";
commented += "# asdfasdf ##";
ClusterConnectionString cs(commented);
ASSERT(input == cs.toString());
}
return Void();
}

View File

@ -762,7 +762,7 @@ Database Database::createDatabase( std::string connFileName, int apiVersion, Loc
return Database::createDatabase(rccf, apiVersion, clientLocality);
}
extern uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs );
extern IPAddress determinePublicIPAutomatically(ClusterConnectionString const& ccs);
Cluster::Cluster( Reference<ClusterConnectionFile> connFile, int apiVersion )
: clusterInterface(new AsyncVar<Optional<ClusterInterface>>())
@ -804,7 +804,7 @@ void Cluster::init( Reference<ClusterConnectionFile> connFile, bool startClientI
.detailf("ImageOffset", "%p", platform::getImageOffset())
.trackLatest("ClientStart");
initializeSystemMonitorMachineState(SystemMonitorMachineState(publicIP));
initializeSystemMonitorMachineState(SystemMonitorMachineState(IPAddress(publicIP)));
systemMonitor();
uncancellable( recurring( &systemMonitor, CLIENT_KNOBS->SYSTEM_MONITOR_INTERVAL, TaskFlushTrace ) );
@ -1068,24 +1068,27 @@ bool GetRangeLimits::hasSatisfiedMinRows() {
return hasByteLimit() && minRows == 0;
}
AddressExclusion AddressExclusion::parse( StringRef const& key ) {
//Must not change: serialized to the database!
std::string s = key.toString();
int a,b,c,d,port,count=-1;
if (sscanf(s.c_str(), "%d.%d.%d.%d%n", &a,&b,&c,&d, &count)<4) {
auto parsedIp = IPAddress::parse(key.toString());
if (parsedIp.present()) {
return AddressExclusion(parsedIp.get());
}
// Not a whole machine, includes `port'.
try {
auto addr = NetworkAddress::parse(key.toString());
if (addr.isTLS()) {
TraceEvent(SevWarnAlways, "AddressExclusionParseError")
.detail("String", printable(key))
.detail("Description", "Address inclusion string should not include `:tls' suffix.");
return AddressExclusion();
}
return AddressExclusion(addr.ip, addr.port);
} catch (Error& e) {
TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", printable(key));
return AddressExclusion();
}
s = s.substr(count);
uint32_t ip = (a<<24)+(b<<16)+(c<<8)+d;
if (!s.size())
return AddressExclusion( ip );
if (sscanf( s.c_str(), ":%d%n", &port, &count ) < 1 || count != s.size()) {
TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", printable(key));
return AddressExclusion();
}
return AddressExclusion( ip, port );
}
Future<Standalone<RangeResultRef>> getRange(
@ -1926,7 +1929,7 @@ Transaction::~Transaction() {
cancelWatches();
}
void Transaction::operator=(Transaction&& r) noexcept(true) {
void Transaction::operator=(Transaction&& r) BOOST_NOEXCEPT {
flushTrLogsIfEnabled();
cx = std::move(r.cx);
tr = std::move(r.tr);
@ -2040,7 +2043,7 @@ ACTOR Future< Standalone< VectorRef< const char*>>> getAddressesForKeyActor( Key
Standalone<VectorRef<const char*>> addresses;
for (auto i : ssi) {
std::string ipString = toIPString(i.address().ip);
std::string ipString = i.address().ip.toString();
char* c_string = new (addresses.arena()) char[ipString.length()+1];
strcpy(c_string, ipString.c_str());
addresses.push_back(addresses.arena(), c_string);

View File

@ -74,8 +74,8 @@ public:
Database() {} // an uninitialized database can be destructed or reassigned safely; that's it
void operator= ( Database const& rhs ) { db = rhs.db; }
Database( Database const& rhs ) : db(rhs.db) {}
Database(Database&& r) noexcept(true) : db(std::move(r.db)) {}
void operator= (Database&& r) noexcept(true) { db = std::move(r.db); }
Database(Database&& r) BOOST_NOEXCEPT : db(std::move(r.db)) {}
void operator= (Database&& r) BOOST_NOEXCEPT { db = std::move(r.db); }
// For internal use by the native client:
explicit Database(Reference<DatabaseContext> cx) : db(cx) {}
@ -280,7 +280,7 @@ public:
// These are to permit use as state variables in actors:
Transaction() : info( TaskDefaultEndpoint ) {}
void operator=(Transaction&& r) noexcept(true);
void operator=(Transaction&& r) BOOST_NOEXCEPT;
void reset();
void fullReset();

View File

@ -66,8 +66,8 @@ struct NotifiedVersion {
set( v );
}
NotifiedVersion(NotifiedVersion&& r) noexcept(true) : waiting(std::move(r.waiting)), val(std::move(r.val)) {}
void operator=(NotifiedVersion&& r) noexcept(true) { waiting = std::move(r.waiting); val = std::move(r.val); }
NotifiedVersion(NotifiedVersion&& r) BOOST_NOEXCEPT : waiting(std::move(r.waiting)), val(std::move(r.val)) {}
void operator=(NotifiedVersion&& r) BOOST_NOEXCEPT { waiting = std::move(r.waiting); val = std::move(r.val); }
private:
typedef std::pair<Version,Promise<Void>> Item;

View File

@ -1805,7 +1805,7 @@ void ReadYourWritesTransaction::setOption( FDBTransactionOptions::Option option,
tr.setOption( option, value );
}
void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) noexcept(true) {
void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT {
cache = std::move( r.cache );
writes = std::move( r.writes );
arena = std::move( r.arena );
@ -1826,7 +1826,7 @@ void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) noexcep
writes.arena = &arena;
}
ReadYourWritesTransaction::ReadYourWritesTransaction(ReadYourWritesTransaction&& r) noexcept(true) :
ReadYourWritesTransaction::ReadYourWritesTransaction(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT :
cache( std::move(r.cache) ),
writes( std::move(r.writes) ),
arena( std::move(r.arena) ),

View File

@ -111,8 +111,8 @@ public:
// These are to permit use as state variables in actors:
ReadYourWritesTransaction() : cache(&arena), writes(&arena) {}
void operator=(ReadYourWritesTransaction&& r) noexcept(true);
ReadYourWritesTransaction(ReadYourWritesTransaction&& r) noexcept(true);
void operator=(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT;
ReadYourWritesTransaction(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT;
virtual void addref() { ReferenceCounted<ReadYourWritesTransaction>::addref(); }
virtual void delref() { ReferenceCounted<ReadYourWritesTransaction>::delref(); }

View File

@ -521,7 +521,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"initializing",
"missing_data",
"healing",
"removing_redundant_teams",
"optimizing_team_collections",
"healthy_repartitioning",
"healthy_removing_server",
"healthy_rebalancing",
@ -554,7 +554,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"initializing",
"missing_data",
"healing",
"removing_redundant_teams",
"optimizing_team_collections",
"healthy_repartitioning",
"healthy_removing_server",
"healthy_rebalancing",

View File

@ -277,8 +277,8 @@ public:
entries.insert( Entry( allKeys.end, afterAllKeys, VectorRef<KeyValueRef>() ), NoMetric(), true );
}
// Visual Studio refuses to generate these, apparently despite the standard
SnapshotCache(SnapshotCache&& r) noexcept(true) : entries(std::move(r.entries)), arena(r.arena) {}
SnapshotCache& operator=(SnapshotCache&& r) noexcept(true) { entries = std::move(r.entries); arena = r.arena; return *this; }
SnapshotCache(SnapshotCache&& r) BOOST_NOEXCEPT : entries(std::move(r.entries)), arena(r.arena) {}
SnapshotCache& operator=(SnapshotCache&& r) BOOST_NOEXCEPT { entries = std::move(r.entries); arena = r.arena; return *this; }
bool empty() const {
// Returns true iff anything is known about the contents of the snapshot

View File

@ -374,11 +374,7 @@ const AddressExclusion decodeExcludedServersKey( KeyRef const& key ) {
}
std::string encodeExcludedServersKey( AddressExclusion const& addr ) {
//FIXME: make sure what's persisted here is not affected by innocent changes elsewhere
std::string as = format( "%d.%d.%d.%d", (addr.ip>>24)&0xff, (addr.ip>>16)&0xff, (addr.ip>>8)&0xff, addr.ip&0xff );
//ASSERT( StringRef(as).endsWith(LiteralStringRef(":0")) == (addr.port == 0) );
if (!addr.isWholeMachine())
as += format(":%d", addr.port);
return excludedServersPrefix.toString() + as;
return excludedServersPrefix.toString() + addr.toString();
}
const KeyRangeRef workerListKeys( LiteralStringRef("\xff/worker/"), LiteralStringRef("\xff/worker0") );

View File

@ -286,12 +286,12 @@ ThreadFuture<Void> ThreadSafeTransaction::onError( Error const& e ) {
return onMainThread( [tr, e](){ return tr->onError(e); } );
}
void ThreadSafeTransaction::operator=(ThreadSafeTransaction&& r) noexcept(true) {
void ThreadSafeTransaction::operator=(ThreadSafeTransaction&& r) BOOST_NOEXCEPT {
tr = r.tr;
r.tr = NULL;
}
ThreadSafeTransaction::ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept(true) {
ThreadSafeTransaction::ThreadSafeTransaction(ThreadSafeTransaction&& r) BOOST_NOEXCEPT {
tr = r.tr;
r.tr = NULL;
}

View File

@ -96,8 +96,8 @@ public:
// These are to permit use as state variables in actors:
ThreadSafeTransaction() : tr(NULL) {}
void operator=(ThreadSafeTransaction&& r) noexcept(true);
ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept(true);
void operator=(ThreadSafeTransaction&& r) BOOST_NOEXCEPT;
ThreadSafeTransaction(ThreadSafeTransaction&& r) BOOST_NOEXCEPT;
void reset();

View File

@ -489,10 +489,10 @@ public:
VersionedMap() : oldestVersion(0), latestVersion(0) {
latestRoot = &roots[0];
}
VersionedMap( VersionedMap&& v ) noexcept(true) : oldestVersion(v.oldestVersion), latestVersion(v.latestVersion), roots(std::move(v.roots)) {
VersionedMap( VersionedMap&& v ) BOOST_NOEXCEPT : oldestVersion(v.oldestVersion), latestVersion(v.latestVersion), roots(std::move(v.roots)) {
latestRoot = &roots[latestVersion];
}
void operator = (VersionedMap && v) noexcept(true) {
void operator = (VersionedMap && v) BOOST_NOEXCEPT {
oldestVersion = v.oldestVersion;
latestVersion = v.latestVersion;
roots = std::move(v.roots);

View File

@ -128,8 +128,8 @@ public:
PTreeImpl::insert( writes, ver, WriteMapEntry( afterAllKeys, OperationStack(), false, false, false, false, false ) );
}
WriteMap(WriteMap&& r) noexcept(true) : writeMapEmpty(r.writeMapEmpty), writes(std::move(r.writes)), ver(r.ver), scratch_iterator(std::move(r.scratch_iterator)), arena(r.arena) {}
WriteMap& operator=(WriteMap&& r) noexcept(true) { writeMapEmpty = r.writeMapEmpty; writes = std::move(r.writes); ver = r.ver; scratch_iterator = std::move(r.scratch_iterator); arena = r.arena; return *this; }
WriteMap(WriteMap&& r) BOOST_NOEXCEPT : writeMapEmpty(r.writeMapEmpty), writes(std::move(r.writes)), ver(r.ver), scratch_iterator(std::move(r.scratch_iterator)), arena(r.arena) {}
WriteMap& operator=(WriteMap&& r) BOOST_NOEXCEPT { writeMapEmpty = r.writeMapEmpty; writes = std::move(r.writes); ver = r.ver; scratch_iterator = std::move(r.scratch_iterator); arena = r.arena; return *this; }
//a write with addConflict false on top of an existing write with a conflict range will not remove the conflict
void mutate( KeyRef key, MutationRef::Type operation, ValueRef param, bool addConflict ) {

View File

@ -165,11 +165,11 @@
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>
@ -186,7 +186,7 @@
<Optimization>Disabled</Optimization>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
@ -209,7 +209,7 @@
<Optimization>Full</Optimization>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
<EnablePREfast>false</EnablePREfast>

View File

@ -251,10 +251,10 @@ description is not currently required but encouraged.
description="Performs a little-endian comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored in the database. If the existing value in the database is shorter than ``param``, it is first extended to the length of ``param`` with zero bytes. If ``param`` is shorter than the existing value in the database, the existing value is truncated to match the length of ``param``. The smaller of the two values is then stored in the database."/>
<Option name="set_versionstamped_key" code="14"
paramType="Bytes" paramDescription="value to which to set the transformed key"
description="Transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes." />
description="Transforms ``key`` using a versionstamp for the transaction. Sets the transformed key in the database to ``param``. The key is transformed by removing the final four bytes from the key and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the key from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the key is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java, Python, and Go bindings. Also, note that prior to API version 520, the offset was computed from only the final two bytes rather than the final four bytes." />
<Option name="set_versionstamped_value" code="15"
paramType="Bytes" paramDescription="value to versionstamp and set"
description="Transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java and Python bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset." />
description="Transforms ``param`` using a versionstamp for the transaction. Sets the ``key`` given to the transformed ``param``. The parameter is transformed by removing the final four bytes from ``param`` and reading those as a little-Endian 32-bit integer to get a position ``pos``. The 10 bytes of the parameter from ``pos`` to ``pos + 10`` are replaced with the versionstamp of the transaction used. The first byte of the parameter is position 0. A versionstamp is a 10 byte, unique, monotonically (but not sequentially) increasing value for each committed transaction. The first 8 bytes are the committed version of the database (serialized in big-Endian order). The last 2 bytes are monotonic in the serialization order for transactions. WARNING: At this time, versionstamps are compatible with the Tuple layer only in the Java, Python, and Go bindings. Also, note that prior to API version 520, the versionstamp was always placed at the beginning of the parameter rather than computing an offset." />
<Option name="byte_min" code="16"
paramType="Bytes" paramDescription="value to check against database value"
description="Performs lexicographic comparison of byte strings. If the existing value in the database is not present, then ``param`` is stored. Otherwise the smaller of the two values is then stored in the database."/>

View File

@ -1,6 +1,7 @@
set(FDBMONITOR_SRCS ConvertUTF.h SimpleIni.h fdbmonitor.cpp)
add_executable(fdbmonitor ${FDBMONITOR_SRCS})
assert_no_version_h(fdbmonitor)
if(UNIX AND NOT APPLE)
target_link_libraries(fdbmonitor rt)
endif()
@ -8,6 +9,4 @@ endif()
# as soon as we get rid of the old build system
target_include_directories(fdbmonitor PRIVATE ${CMAKE_BINARY_DIR}/fdbclient)
if(NOT OPEN_FOR_IDE)
install(TARGETS fdbmonitor DESTINATION "${FDBMONITOR_INSTALL_LOCATION}" COMPONENT server)
endif()
fdb_install(TARGETS fdbmonitor DESTINATION fdbmonitor COMPONENT server)

View File

@ -76,4 +76,4 @@
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
<ImportGroup Label="ExtensionTargets">
</ImportGroup>
</Project>
</Project>

View File

@ -77,7 +77,7 @@ struct OpenFileInfo : NonCopyable {
Future<Reference<IAsyncFile>> opened; // Only valid until the file is fully opened
OpenFileInfo() : f(0) {}
OpenFileInfo(OpenFileInfo && r) noexcept(true) : f(r.f), opened(std::move(r.opened)) { r.f = 0; }
OpenFileInfo(OpenFileInfo && r) BOOST_NOEXCEPT : f(r.f), opened(std::move(r.opened)) { r.f = 0; }
Future<Reference<IAsyncFile>> get() {
if (f) return Reference<IAsyncFile>::addRef(f);

View File

@ -194,27 +194,67 @@ public:
};
#define CONNECT_PACKET_V0 0x0FDB00A444020001LL
#define CONNECT_PACKET_V1 0x0FDB00A446030001LL
#define CONNECT_PACKET_V0_SIZE 14
#define CONNECT_PACKET_V1_SIZE 22
#define CONNECT_PACKET_V2_SIZE 26
#pragma pack( push, 1 )
struct ConnectPacket {
uint32_t connectPacketLength; // sizeof(ConnectPacket)-sizeof(uint32_t), or perhaps greater in later protocol versions
// The value does not inclueds the size of `connectPacketLength` itself,
// but only the other fields of this structure.
uint32_t connectPacketLength;
uint64_t protocolVersion; // Expect currentProtocolVersion
uint16_t canonicalRemotePort; // Port number to reconnect to the originating process
uint64_t connectionId; // Multi-version clients will use the same Id for both connections, other connections will set this to zero. Added at protocol Version 0x0FDB00A444020001.
uint32_t canonicalRemoteIp; // IP Address to reconnect to the originating process
size_t minimumSize() {
if (protocolVersion < CONNECT_PACKET_V0) return CONNECT_PACKET_V0_SIZE;
if (protocolVersion < CONNECT_PACKET_V1) return CONNECT_PACKET_V1_SIZE;
return CONNECT_PACKET_V2_SIZE;
// IP Address to reconnect to the originating process. Only one of these must be populated.
uint32_t canonicalRemoteIp4;
enum ConnectPacketFlags {
FLAG_IPV6 = 1
};
uint16_t flags;
uint8_t canonicalRemoteIp6[16];
IPAddress canonicalRemoteIp() const {
if (isIPv6()) {
IPAddress::IPAddressStore store;
memcpy(store.data(), canonicalRemoteIp6, sizeof(canonicalRemoteIp6));
return IPAddress(store);
} else {
return IPAddress(canonicalRemoteIp4);
}
}
void setCanonicalRemoteIp(const IPAddress& ip) {
if (ip.isV6()) {
flags = flags | FLAG_IPV6;
memcpy(&canonicalRemoteIp6, ip.toV6().data(), 16);
} else {
flags = flags & ~FLAG_IPV6;
canonicalRemoteIp4 = ip.toV4();
}
}
bool isIPv6() const { return flags & FLAG_IPV6; }
uint32_t totalPacketSize() const { return connectPacketLength + sizeof(connectPacketLength); }
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, connectPacketLength);
ASSERT(connectPacketLength <= sizeof(ConnectPacket));
serializer(ar, protocolVersion, canonicalRemotePort, connectionId, canonicalRemoteIp4);
if (ar.isDeserializing && ar.protocolVersion() < 0x0FDB00B061030001LL) {
flags = 0;
} else {
// We can send everything in serialized packet, since the current version of ConnectPacket
// is backward compatible with CONNECT_PACKET_V0.
serializer(ar, flags);
ar.serializeBytes(&canonicalRemoteIp6, sizeof(canonicalRemoteIp6));
}
}
};
static_assert( sizeof(ConnectPacket) == CONNECT_PACKET_V2_SIZE, "ConnectPacket packed incorrectly" );
#pragma pack( pop )
ACTOR static Future<Void> connectionReader(TransportData* transport, Reference<IConnection> conn, Peer* peer,
@ -256,23 +296,23 @@ struct Peer : NonCopyable {
for(auto& addr : transport->localAddresses) {
if(addr.isTLS() == destination.isTLS()) {
pkt.canonicalRemotePort = addr.port;
pkt.canonicalRemoteIp = addr.ip;
pkt.setCanonicalRemoteIp(addr.ip);
found = true;
break;
}
}
if (!found) {
pkt.canonicalRemotePort = 0; // a "mixed" TLS/non-TLS connection is like a client/server connection - there's no way to reverse it
pkt.canonicalRemoteIp = 0;
pkt.setCanonicalRemoteIp(IPAddress(0));
}
pkt.connectPacketLength = sizeof(pkt)-sizeof(pkt.connectPacketLength);
pkt.connectPacketLength = sizeof(pkt) - sizeof(pkt.connectPacketLength);
pkt.protocolVersion = currentProtocolVersion;
pkt.connectionId = transport->transportId;
PacketBuffer* pb_first = new PacketBuffer;
PacketWriter wr( pb_first, NULL, Unversioned() );
wr.serializeBinaryItem(pkt);
pkt.serialize(wr);
unsent.prependWriteBuffer(pb_first, wr.finish());
}
@ -647,29 +687,31 @@ ACTOR static Future<Void> connectionReader(
if (expectConnectPacket && unprocessed_end-unprocessed_begin>=CONNECT_PACKET_V0_SIZE) {
// At the beginning of a connection, we expect to receive a packet containing the protocol version and the listening port of the remote process
ConnectPacket* p = (ConnectPacket*)unprocessed_begin;
uint64_t connectionId = 0;
int32_t connectPacketSize = p->minimumSize();
int32_t connectPacketSize = ((ConnectPacket*)unprocessed_begin)->totalPacketSize();
if ( unprocessed_end-unprocessed_begin >= connectPacketSize ) {
if(p->protocolVersion >= 0x0FDB00A444020001) {
connectionId = p->connectionId;
}
uint64_t protocolVersion = ((ConnectPacket*)unprocessed_begin)->protocolVersion;
BinaryReader pktReader(unprocessed_begin, connectPacketSize, AssumeVersion(protocolVersion));
ConnectPacket pkt;
serializer(pktReader, pkt);
if( (p->protocolVersion & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask) ) {
incompatibleProtocolVersionNewer = p->protocolVersion > currentProtocolVersion;
NetworkAddress addr = p->canonicalRemotePort ? NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) : conn->getPeerAddress();
uint64_t connectionId = pkt.connectionId;
if( (pkt.protocolVersion & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask) ) {
incompatibleProtocolVersionNewer = pkt.protocolVersion > currentProtocolVersion;
NetworkAddress addr = pkt.canonicalRemotePort
? NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort)
: conn->getPeerAddress();
if(connectionId != 1) addr.port = 0;
if(!transport->multiVersionConnections.count(connectionId)) {
if(now() - transport->lastIncompatibleMessage > FLOW_KNOBS->CONNECTION_REJECTED_MESSAGE_DELAY) {
TraceEvent(SevWarn, "ConnectionRejected", conn->getDebugID())
.detail("Reason", "IncompatibleProtocolVersion")
.detail("LocalVersion", currentProtocolVersion)
.detail("RejectedVersion", p->protocolVersion)
.detail("VersionMask", compatibleProtocolVersionMask)
.detail("Peer", p->canonicalRemotePort ? NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) : conn->getPeerAddress())
.detail("ConnectionId", connectionId);
.detail("Reason", "IncompatibleProtocolVersion")
.detail("LocalVersion", currentProtocolVersion)
.detail("RejectedVersion", pkt.protocolVersion)
.detail("VersionMask", compatibleProtocolVersionMask)
.detail("Peer", pkt.canonicalRemotePort ? NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort)
: conn->getPeerAddress())
.detail("ConnectionId", connectionId);
transport->lastIncompatibleMessage = now();
}
if(!transport->incompatiblePeers.count(addr)) {
@ -680,7 +722,7 @@ ACTOR static Future<Void> connectionReader(
}
compatible = false;
if(p->protocolVersion < 0x0FDB00A551000000LL) {
if(protocolVersion < 0x0FDB00A551000000LL) {
// Older versions expected us to hang up. It may work even if we don't hang up here, but it's safer to keep the old behavior.
throw incompatible_protocol_version();
}
@ -699,20 +741,23 @@ ACTOR static Future<Void> connectionReader(
unprocessed_begin += connectPacketSize;
expectConnectPacket = false;
peerProtocolVersion = p->protocolVersion;
peerProtocolVersion = protocolVersion;
if (peer != nullptr) {
// Outgoing connection; port information should be what we expect
TraceEvent("ConnectedOutgoing").suppressFor(1.0).detail("PeerAddr", NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) );
TraceEvent("ConnectedOutgoing")
.suppressFor(1.0)
.detail("PeerAddr", NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort));
peer->compatible = compatible;
peer->incompatibleProtocolVersionNewer = incompatibleProtocolVersionNewer;
if (!compatible) {
peer->transport->numIncompatibleConnections++;
incompatiblePeerCounted = true;
}
ASSERT( p->canonicalRemotePort == peerAddress.port );
ASSERT( pkt.canonicalRemotePort == peerAddress.port );
} else {
if (p->canonicalRemotePort) {
peerAddress = NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort, true, peerAddress.isTLS() );
if (pkt.canonicalRemotePort) {
peerAddress = NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort, true,
peerAddress.isTLS());
}
peer = transport->getPeer(peerAddress);
peer->compatible = compatible;

View File

@ -149,7 +149,7 @@ public:
void coalesce( const Range& k );
void validateCoalesced();
void operator=(RangeMap&& r) noexcept(true) { map = std::move(r.map); }
void operator=(RangeMap&& r) BOOST_NOEXCEPT { map = std::move(r.map); }
//void clear( const Val& value ) { ranges.clear(); ranges.insert(std::make_pair(Key(),value)); }
void insert( const Range& keys, const Val& value );

View File

@ -177,7 +177,7 @@ Future<Reference<IConnection>> TLSNetworkConnections::connect( NetworkAddress to
// addresses against certificates, so we have our own peer verifying logic
// to use. For FDB<->external system connections, we can use the standard
// hostname-based certificate verification logic.
if (host.empty() || host == toIPString(toAddr.ip))
if (host.empty() || host == toAddr.ip.toString())
return wrap(options->get_policy(TLSOptions::POLICY_VERIFY_PEERS), true, network->connect(clearAddr), std::string(""));
else
return wrap( options->get_policy(TLSOptions::POLICY_NO_VERIFY_PEERS), true, network->connect( clearAddr ), host );

View File

@ -868,7 +868,7 @@ template<> Future<int> chain<0>( Future<int> const& x ) {
return x;
}
Future<int> chain2( Future<int> const& x, int const& i );
ACTOR Future<int> chain2(Future<int> x, int i);
ACTOR Future<int> chain2( Future<int> x, int i ) {
if (i>1) {
@ -1017,7 +1017,7 @@ ACTOR void cycle(FutureStream<Void> in, PromiseStream<Void> out, int* ptotal){
loop{
waitNext(in);
(*ptotal)++;
out.send(_);
out.send(Void());
}
}

View File

@ -112,7 +112,7 @@ public:
bool isValid() const { return sav != NULL; }
ReplyPromise() : sav(new NetSAV<T>(0, 1)) {}
ReplyPromise(const ReplyPromise& rhs) : sav(rhs.sav) { sav->addPromiseRef(); }
ReplyPromise(ReplyPromise&& rhs) noexcept(true) : sav(rhs.sav) { rhs.sav = 0; }
ReplyPromise(ReplyPromise&& rhs) BOOST_NOEXCEPT : sav(rhs.sav) { rhs.sav = 0; }
~ReplyPromise() { if (sav) sav->delPromiseRef(); }
ReplyPromise(const Endpoint& endpoint) : sav(new NetSAV<T>(0, 1, endpoint)) {}
@ -123,7 +123,7 @@ public:
if (sav) sav->delPromiseRef();
sav = rhs.sav;
}
void operator=(ReplyPromise && rhs) noexcept(true) {
void operator=(ReplyPromise && rhs) BOOST_NOEXCEPT {
if (sav != rhs.sav) {
if (sav) sav->delPromiseRef();
sav = rhs.sav;
@ -323,13 +323,13 @@ public:
FutureStream<T> getFuture() const { queue->addFutureRef(); return FutureStream<T>(queue); }
RequestStream() : queue(new NetNotifiedQueue<T>(0, 1)) {}
RequestStream(const RequestStream& rhs) : queue(rhs.queue) { queue->addPromiseRef(); }
RequestStream(RequestStream&& rhs) noexcept(true) : queue(rhs.queue) { rhs.queue = 0; }
RequestStream(RequestStream&& rhs) BOOST_NOEXCEPT : queue(rhs.queue) { rhs.queue = 0; }
void operator=(const RequestStream& rhs) {
rhs.queue->addPromiseRef();
if (queue) queue->delPromiseRef();
queue = rhs.queue;
}
void operator=(RequestStream&& rhs) noexcept(true) {
void operator=(RequestStream&& rhs) BOOST_NOEXCEPT {
if (queue != rhs.queue) {
if (queue) queue->delPromiseRef();
queue = rhs.queue;

View File

@ -154,11 +154,11 @@
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<CustomBuildStep>
@ -177,7 +177,7 @@
<Optimization>Disabled</Optimization>
<BasicRuntimeChecks>EnableFastChecks</BasicRuntimeChecks>
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;_DEBUG;_HAS_ITERATOR_DEBUGGING=0;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<MultiProcessorCompilation>true</MultiProcessorCompilation>
<AdditionalOptions>/bigobj @../flow/no_intellisense.opt %(AdditionalOptions)</AdditionalOptions>
@ -201,7 +201,7 @@
<Optimization>Full</Optimization>
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
<IntrinsicFunctions>true</IntrinsicFunctions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<PreprocessorDefinitions>TLS_DISABLED;WIN32;_WIN32_WINNT=0x0502;WINVER=0x0502;BOOST_ALL_NO_LIB;NTDDI_VERSION=0x05020000;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;FDB_CLEAN_BUILD;%(PreprocessorDefinitions)</PreprocessorDefinitions>
<AdditionalIncludeDirectories>%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
<EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>
<EnablePREfast>false</EnablePREfast>

View File

@ -135,28 +135,29 @@ struct SimClogging {
return t - tnow;
}
void clogPairFor( uint32_t from, uint32_t to, double t ) {
void clogPairFor(const IPAddress& from, const IPAddress& to, double t) {
auto& u = clogPairUntil[ std::make_pair( from, to ) ];
u = std::max(u, now() + t);
}
void clogSendFor( uint32_t from, double t ) {
void clogSendFor(const IPAddress& from, double t) {
auto& u = clogSendUntil[from];
u = std::max(u, now() + t);
}
void clogRecvFor( uint32_t from, double t ) {
void clogRecvFor(const IPAddress& from, double t) {
auto& u = clogRecvUntil[from];
u = std::max(u, now() + t);
}
double setPairLatencyIfNotSet( uint32_t from, uint32_t to, double t ) {
double setPairLatencyIfNotSet(const IPAddress& from, const IPAddress& to, double t) {
auto i = clogPairLatency.find( std::make_pair(from,to) );
if (i == clogPairLatency.end())
i = clogPairLatency.insert( std::make_pair( std::make_pair(from,to), t ) ).first;
return i->second;
}
private:
std::map< uint32_t, double > clogSendUntil, clogRecvUntil;
std::map< std::pair<uint32_t, uint32_t>, double > clogPairUntil;
std::map< std::pair<uint32_t, uint32_t>, double > clogPairLatency;
std::map<IPAddress, double> clogSendUntil, clogRecvUntil;
std::map<std::pair<IPAddress, IPAddress>, double> clogPairUntil;
std::map<std::pair<IPAddress, IPAddress>, double> clogPairLatency;
double halfLatency() {
double a = g_random->random01();
const double pFast = 0.999;
@ -790,8 +791,16 @@ public:
Reference<Sim2Conn> peerc( new Sim2Conn( peerp ) );
myc->connect(peerc, toAddr);
peerc->connect(myc, NetworkAddress( getCurrentProcess()->address.ip + g_random->randomInt(0,256),
g_random->randomInt(40000, 60000) ));
IPAddress localIp;
if (getCurrentProcess()->address.ip.isV6()) {
IPAddress::IPAddressStore store = getCurrentProcess()->address.ip.toV6();
uint16_t* ipParts = (uint16_t*)store.data();
ipParts[7] += g_random->randomInt(0, 256);
localIp = IPAddress(store);
} else {
localIp = IPAddress(getCurrentProcess()->address.ip.toV4() + g_random->randomInt(0, 256));
}
peerc->connect(myc, NetworkAddress(localIp, g_random->randomInt(40000, 60000)));
((Sim2Listener*)peerp->getListener(toAddr).getPtr())->incomingConnection( 0.5*g_random->random01(), Reference<IConnection>(peerc) );
return onConnect( ::delay(0.5*g_random->random01()), myc );
@ -966,17 +975,21 @@ public:
virtual void run() {
_run(this);
}
virtual ProcessInfo* newProcess(const char* name, uint32_t ip, uint16_t port, uint16_t listenPerProcess,
LocalityData locality, ProcessClass startingClass, const char* dataFolder, const char* coordinationFolder) {
virtual ProcessInfo* newProcess(const char* name, IPAddress ip, uint16_t port, uint16_t listenPerProcess,
LocalityData locality, ProcessClass startingClass, const char* dataFolder,
const char* coordinationFolder) {
ASSERT( locality.machineId().present() );
MachineInfo& machine = machines[ locality.machineId().get() ];
if (!machine.machineId.present())
machine.machineId = locality.machineId();
for( int i = 0; i < machine.processes.size(); i++ ) {
if( machine.processes[i]->locality.machineId() != locality.machineId() ) { // SOMEDAY: compute ip from locality to avoid this check
TraceEvent("Sim2Mismatch").detail("IP", format("%x", ip))
.detailext("MachineId", locality.machineId()).detail("NewName", name)
.detailext("ExistingMachineId", machine.processes[i]->locality.machineId()).detail("ExistingName", machine.processes[i]->name);
TraceEvent("Sim2Mismatch")
.detail("IP", format("%s", ip.toString().c_str()))
.detailext("MachineId", locality.machineId())
.detail("NewName", name)
.detailext("ExistingMachineId", machine.processes[i]->locality.machineId())
.detail("ExistingName", machine.processes[i]->name);
ASSERT( false );
}
ASSERT( machine.processes[i]->address.port != port );
@ -1499,22 +1512,24 @@ public:
return (kt == ktMin);
}
virtual void clogInterface( uint32_t ip, double seconds, ClogMode mode = ClogDefault ) {
virtual void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) {
if (mode == ClogDefault) {
double a = g_random->random01();
if ( a < 0.3 ) mode = ClogSend;
else if (a < 0.6 ) mode = ClogReceive;
else mode = ClogAll;
}
TraceEvent("ClogInterface").detail("IP", toIPString(ip)).detail("Delay", seconds)
.detail("Queue", mode==ClogSend?"Send":mode==ClogReceive?"Receive":"All");
TraceEvent("ClogInterface")
.detail("IP", ip.toString())
.detail("Delay", seconds)
.detail("Queue", mode == ClogSend ? "Send" : mode == ClogReceive ? "Receive" : "All");
if (mode == ClogSend || mode==ClogAll)
g_clogging.clogSendFor( ip, seconds );
if (mode == ClogReceive || mode==ClogAll)
g_clogging.clogRecvFor( ip, seconds );
}
virtual void clogPair( uint32_t from, uint32_t to, double seconds ) {
virtual void clogPair(const IPAddress& from, const IPAddress& to, double seconds) {
g_clogging.clogPairFor( from, to, seconds );
}
virtual std::vector<ProcessInfo*> getAllProcesses() const {
@ -1569,10 +1584,10 @@ public:
Promise<Void> action;
Task( double time, int taskID, uint64_t stable, ProcessInfo* machine, Promise<Void>&& action ) : time(time), taskID(taskID), stable(stable), machine(machine), action(std::move(action)) {}
Task( double time, int taskID, uint64_t stable, ProcessInfo* machine, Future<Void>& future ) : time(time), taskID(taskID), stable(stable), machine(machine) { future = action.getFuture(); }
Task(Task&& rhs) noexcept(true) : time(rhs.time), taskID(rhs.taskID), stable(rhs.stable), machine(rhs.machine), action(std::move(rhs.action)) {}
Task(Task&& rhs) BOOST_NOEXCEPT : time(rhs.time), taskID(rhs.taskID), stable(rhs.stable), machine(rhs.machine), action(std::move(rhs.action)) {}
void operator= ( Task const& rhs ) { taskID = rhs.taskID; time = rhs.time; stable = rhs.stable; machine = rhs.machine; action = rhs.action; }
Task( Task const& rhs ) : taskID(rhs.taskID), time(rhs.time), stable(rhs.stable), machine(rhs.machine), action(rhs.action) {}
void operator= (Task&& rhs) noexcept(true) { time = rhs.time; taskID = rhs.taskID; stable = rhs.stable; machine = rhs.machine; action = std::move(rhs.action); }
void operator= (Task&& rhs) BOOST_NOEXCEPT { time = rhs.time; taskID = rhs.taskID; stable = rhs.stable; machine = rhs.machine; action = std::move(rhs.action); }
bool operator < (Task const& rhs) const {
// Ordering is reversed for priority_queue
@ -1653,7 +1668,7 @@ public:
INetwork *net2;
//Map from machine IP -> machine disk space info
std::map<uint32_t, SimDiskSpace> diskSpaceMap;
std::map<IPAddress, SimDiskSpace> diskSpaceMap;
//Whether or not yield has returned true during the current iteration of the run loop
bool yielded;

View File

@ -114,8 +114,12 @@ public:
std::string toString() const {
const NetworkAddress& address = addresses[0];
return format("name: %s address: %d.%d.%d.%d:%d zone: %s datahall: %s class: %s excluded: %d cleared: %d",
name, (address.ip>>24)&0xff, (address.ip>>16)&0xff, (address.ip>>8)&0xff, address.ip&0xff, address.port, (locality.zoneId().present() ? locality.zoneId().get().printable().c_str() : "[unset]"), (locality.dataHallId().present() ? locality.dataHallId().get().printable().c_str() : "[unset]"), startingClass.toString().c_str(), excluded, cleared);
return format(
"name: %s address: %s zone: %s datahall: %s class: %s excluded: %d cleared: %d", name,
formatIpPort(address.ip, address.port).c_str(),
(locality.zoneId().present() ? locality.zoneId().get().printable().c_str() : "[unset]"),
(locality.dataHallId().present() ? locality.dataHallId().get().printable().c_str() : "[unset]"),
startingClass.toString().c_str(), excluded, cleared);
}
// Members not for external use
@ -138,7 +142,9 @@ public:
virtual Future<Void> onProcess( ISimulator::ProcessInfo *process, int taskID = -1 ) = 0;
virtual Future<Void> onMachine( ISimulator::ProcessInfo *process, int taskID = -1 ) = 0;
virtual ProcessInfo* newProcess(const char* name, uint32_t ip, uint16_t port, uint16_t listenPerProcess, LocalityData locality, ProcessClass startingClass, const char* dataFolder, const char* coordinationFolder) = 0;
virtual ProcessInfo* newProcess(const char* name, IPAddress ip, uint16_t port, uint16_t listenPerProcess,
LocalityData locality, ProcessClass startingClass, const char* dataFolder,
const char* coordinationFolder) = 0;
virtual void killProcess( ProcessInfo* machine, KillType ) = 0;
virtual void rebootProcess(Optional<Standalone<StringRef>> zoneId, bool allProcesses ) = 0;
virtual void rebootProcess( ProcessInfo* process, KillType kt ) = 0;
@ -256,8 +262,8 @@ public:
allSwapsDisabled = true;
}
virtual void clogInterface( uint32_t ip, double seconds, ClogMode mode = ClogDefault ) = 0;
virtual void clogPair( uint32_t from, uint32_t to, double seconds ) = 0;
virtual void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) = 0;
virtual void clogPair(const IPAddress& from, const IPAddress& to, double seconds) = 0;
virtual std::vector<ProcessInfo*> getAllProcesses() const = 0;
virtual ProcessInfo* getProcessByAddress( NetworkAddress const& address ) = 0;
virtual MachineInfo* getMachineByNetworkAddress(NetworkAddress const& address) = 0;

View File

@ -10,7 +10,7 @@ set(FDBSERVER_SRCS
CoroFlow.actor.cpp
CoroFlow.h
DataDistribution.actor.cpp
DataDistribution.h
DataDistribution.actor.h
DataDistributionQueue.actor.cpp
DataDistributionTracker.actor.cpp
DataDistributorInterface.h
@ -130,6 +130,7 @@ set(FDBSERVER_SRCS
workloads/IndexScan.actor.cpp
workloads/Inventory.actor.cpp
workloads/KVStoreTest.actor.cpp
workloads/KillRegion.actor.cpp
workloads/LockDatabase.actor.cpp
workloads/LogMetrics.actor.cpp
workloads/LowLatency.actor.cpp
@ -184,6 +185,4 @@ target_include_directories(fdbserver PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
target_link_libraries(fdbserver PRIVATE fdbclient)
if(NOT OPEN_FOR_IDE)
install(TARGETS fdbserver DESTINATION ${FDB_SBIN_DIR} COMPONENT server)
endif()
fdb_install(TARGETS fdbserver DESTINATION sbin COMPONENT server)

View File

@ -60,9 +60,9 @@ struct WorkerInfo : NonCopyable {
WorkerInfo( Future<Void> watcher, ReplyPromise<RegisterWorkerReply> reply, Generation gen, WorkerInterface interf, ProcessClass initialClass, ProcessClass processClass, ClusterControllerPriorityInfo priorityInfo ) :
watcher(watcher), reply(reply), gen(gen), reboots(0), lastAvailableTime(now()), interf(interf), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo) {}
WorkerInfo( WorkerInfo&& r ) noexcept(true) : watcher(std::move(r.watcher)), reply(std::move(r.reply)), gen(r.gen),
WorkerInfo( WorkerInfo&& r ) BOOST_NOEXCEPT : watcher(std::move(r.watcher)), reply(std::move(r.reply)), gen(r.gen),
reboots(r.reboots), lastAvailableTime(r.lastAvailableTime), interf(std::move(r.interf)), initialClass(r.initialClass), processClass(r.processClass), priorityInfo(r.priorityInfo) {}
void operator=( WorkerInfo&& r ) noexcept(true) {
void operator=( WorkerInfo&& r ) BOOST_NOEXCEPT {
watcher = std::move(r.watcher);
reply = std::move(r.reply);
gen = r.gen;

View File

@ -19,7 +19,7 @@
*/
#include "flow/ActorCollection.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "fdbclient/SystemData.h"
#include "fdbclient/DatabaseContext.h"
#include "fdbserver/MoveKeys.actor.h"

View File

@ -1,5 +1,5 @@
/*
* DataDistribution.h
* DataDistribution.actor.h
*
* This source file is part of the FoundationDB open source project
*
@ -18,10 +18,17 @@
* limitations under the License.
*/
#if defined(NO_INTELLISENSE) && !defined(FDBSERVER_DATA_DISTRIBUTION_ACTOR_G_H)
#define FDBSERVER_DATA_DISTRIBUTION_ACTOR_G_H
#include "fdbserver/DataDistribution.actor.g.h"
#elif !defined(FDBSERVER_DATA_DISTRIBUTION_ACTOR_H)
#define FDBSERVER_DATA_DISTRIBUTION_ACTOR_H
#include "fdbclient/NativeAPI.actor.h"
#include "fdbserver/ClusterRecruitmentInterface.h"
#include "fdbserver/MoveKeys.actor.h"
#include "fdbserver/LogSystem.h"
#include "flow/actorcompiler.h" // This must be the last #include.
struct RelocateShard {
KeyRange keys;
@ -244,5 +251,7 @@ ShardSizeBounds getShardSizeBounds(KeyRangeRef shard, int64_t maxShardSize);
int64_t getMaxShardSize( double dbSizeEstimate );
class DDTeamCollection;
Future<Void> teamRemover(DDTeamCollection* const& self);
Future<Void> teamRemoverPeriodic(DDTeamCollection* const& self);
ACTOR Future<Void> teamRemover(DDTeamCollection* self);
ACTOR Future<Void> teamRemoverPeriodic(DDTeamCollection* self);
#endif

View File

@ -25,7 +25,7 @@
#include "flow/Util.h"
#include "fdbrpc/sim_validation.h"
#include "fdbclient/SystemData.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "fdbclient/DatabaseContext.h"
#include "fdbserver/MoveKeys.actor.h"
#include "fdbserver/Knobs.h"

View File

@ -20,7 +20,7 @@
#include "fdbrpc/FailureMonitor.h"
#include "fdbclient/SystemData.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "fdbserver/Knobs.h"
#include "fdbclient/DatabaseContext.h"
#include "flow/ActorCollection.h"

View File

@ -42,8 +42,8 @@ struct LogRouterData {
TagData( Tag tag, Version popped, Version durableKnownCommittedVersion ) : tag(tag), popped(popped), durableKnownCommittedVersion(durableKnownCommittedVersion) {}
TagData(TagData&& r) noexcept(true) : version_messages(std::move(r.version_messages)), tag(r.tag), popped(r.popped), durableKnownCommittedVersion(r.durableKnownCommittedVersion) {}
void operator= (TagData&& r) noexcept(true) {
TagData(TagData&& r) BOOST_NOEXCEPT : version_messages(std::move(r.version_messages)), tag(r.tag), popped(r.popped), durableKnownCommittedVersion(r.durableKnownCommittedVersion) {}
void operator= (TagData&& r) BOOST_NOEXCEPT {
version_messages = std::move(r.version_messages);
tag = r.tag;
popped = r.popped;

View File

@ -323,8 +323,8 @@ namespace oldTLog_4_6 {
TagData( Version popped, bool nothing_persistent, bool popped_recently, OldTag tag ) : nothing_persistent(nothing_persistent), popped(popped), popped_recently(popped_recently), update_version_sizes(tag != txsTagOld) {}
TagData(TagData&& r) noexcept(true) : version_messages(std::move(r.version_messages)), nothing_persistent(r.nothing_persistent), popped_recently(r.popped_recently), popped(r.popped), update_version_sizes(r.update_version_sizes) {}
void operator= (TagData&& r) noexcept(true) {
TagData(TagData&& r) BOOST_NOEXCEPT : version_messages(std::move(r.version_messages)), nothing_persistent(r.nothing_persistent), popped_recently(r.popped_recently), popped(r.popped), update_version_sizes(r.update_version_sizes) {}
void operator= (TagData&& r) BOOST_NOEXCEPT {
version_messages = std::move(r.version_messages);
nothing_persistent = r.nothing_persistent;
popped_recently = r.popped_recently;

View File

@ -294,8 +294,8 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
TagData( Tag tag, Version popped, bool nothingPersistent, bool poppedRecently, bool unpoppedRecovered ) : tag(tag), nothingPersistent(nothingPersistent), popped(popped), poppedRecently(poppedRecently), unpoppedRecovered(unpoppedRecovered) {}
TagData(TagData&& r) noexcept(true) : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {}
void operator= (TagData&& r) noexcept(true) {
TagData(TagData&& r) BOOST_NOEXCEPT : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {}
void operator= (TagData&& r) BOOST_NOEXCEPT {
versionMessages = std::move(r.versionMessages);
nothingPersistent = r.nothingPersistent;
poppedRecently = r.poppedRecently;

Some files were not shown because too many files have changed in this diff Show More