Merge branch 'master' into feature-metadata-version

This commit is contained in:
Evan Tschannen 2019-03-10 21:13:28 -07:00
commit 2627bcd35e
127 changed files with 2112 additions and 1284 deletions

View File

@ -36,6 +36,7 @@ ifeq ($(NIGHTLY),true)
CFLAGS += -DFDB_CLEAN_BUILD
endif
BOOST_BASENAME ?= boost_1_67_0
ifeq ($(PLATFORM),Linux)
PLATFORM := linux
@ -44,7 +45,7 @@ ifeq ($(PLATFORM),Linux)
CXXFLAGS += -std=c++0x
BOOSTDIR ?= /opt/boost_1_52_0
BOOST_BASEDIR ?= /opt
TLS_LIBDIR ?= /usr/local/lib
DLEXT := so
java_DLEXT := so
@ -60,13 +61,14 @@ else ifeq ($(PLATFORM),Darwin)
.LIBPATTERNS := lib%.dylib lib%.a
BOOSTDIR ?= $(HOME)/boost_1_52_0
BOOST_BASEDIR ?= ${HOME}
TLS_LIBDIR ?= /usr/local/lib
DLEXT := dylib
java_DLEXT := jnilib
else
$(error Not prepared to compile on platform $(PLATFORM))
endif
BOOSTDIR ?= ${BOOST_BASEDIR}/${BOOST_BASENAME}
CCACHE := $(shell which ccache)
ifneq ($(CCACHE),)

View File

@ -37,7 +37,7 @@ become the only build system available.
1. Check out this repo on your Mac.
1. Install the Xcode command-line tools.
1. Download version 1.52 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.52.0/).
1. Download version 1.67.0 of [Boost](https://sourceforge.net/projects/boost/files/boost/1.67.0/).
1. Set the `BOOSTDIR` environment variable to the location containing this boost installation.
1. Install [Mono](http://www.mono-project.com/download/stable/).
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
@ -192,6 +192,7 @@ that Visual Studio is used to compile.
1. Install a [JDK](http://www.oracle.com/technetwork/java/javase/downloads/index.html). FoundationDB currently builds with Java 8.
1. Set `JAVA_HOME` to the unpacked location and JAVA_COMPILE to
`$JAVA_HOME/bin/javac`.
1. Install [Python](https://www.python.org/downloads/) if it is not already installed by Visual Studio.
1. (Optional) Install [WIX](http://wixtoolset.org/). Without it Visual Studio
won't build the Windows installer.
1. Create a build directory (you can have the build directory anywhere you

View File

@ -59,18 +59,16 @@ if(NOT WIN32)
target_link_libraries(fdb_c_ryw_benchmark PRIVATE fdb_c)
endif()
if(NOT OPEN_FOR_IDE)
# TODO: re-enable once the old vcxproj-based build system is removed.
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)
install(TARGETS fdb_c
EXPORT fdbc
DESTINATION ${FDB_LIB_DIR}
COMPONENT clients)
install(
FILES foundationdb/fdb_c.h
${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
DESTINATION ${FDB_INCLUDE_INSTALL_DIR}/foundationdb COMPONENT clients)
#install(EXPORT fdbc DESTINATION ${FDB_LIB_DIR}/foundationdb COMPONENT clients)
endif()
# TODO: re-enable once the old vcxproj-based build system is removed.
#generate_export_header(fdb_c EXPORT_MACRO_NAME "DLLEXPORT"
# EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_export.h)
fdb_install(TARGETS fdb_c
EXPORT fdbc
DESTINATION lib
COMPONENT clients)
fdb_install(
FILES foundationdb/fdb_c.h
${CMAKE_CURRENT_BINARY_DIR}/foundationdb/fdb_c_options.g.h
${CMAKE_SOURCE_DIR}/fdbclient/vexillographer/fdb.options
DESTINATION include COMPONENT clients)
#install(EXPORT fdbc DESTINATION ${FDB_LIB_DIR}/foundationdb COMPONENT clients)

View File

@ -67,14 +67,14 @@ FOR /F "tokens=1" %%i in ('hg.exe id') do copy /Y "$(TargetPath)" "$(TargetPath)
</PostBuildEvent>
</ItemDefinitionGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
<ClCompile>

View File

@ -35,7 +35,7 @@ THREAD_FUNC networkThread(void* fdb) {
ACTOR Future<Void> _test() {
API *fdb = FDB::API::selectAPIVersion(610);
auto db = fdb->createDatabase();
state Reference<Transaction> tr( new Transaction(db) );
state Reference<Transaction> tr = db->createTransaction();
// tr->setVersion(1);
@ -98,6 +98,81 @@ void fdb_flow_test() {
}
namespace FDB {
class DatabaseImpl : public Database, NonCopyable {
public:
virtual ~DatabaseImpl() { fdb_database_destroy(db); }
Reference<Transaction> createTransaction() override;
void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>()) override;
private:
FDBDatabase* db;
explicit DatabaseImpl(FDBDatabase* db) : db(db) {}
friend class API;
};
class TransactionImpl : public Transaction, private NonCopyable, public FastAllocated<TransactionImpl> {
friend class DatabaseImpl;
public:
virtual ~TransactionImpl() {
if (tr) {
fdb_transaction_destroy(tr);
}
}
void setReadVersion(Version v) override;
Future<Version> getReadVersion() override;
Future<Optional<FDBStandalone<ValueRef>>> get(const Key& key, bool snapshot = false) override;
Future<FDBStandalone<KeyRef>> getKey(const KeySelector& key, bool snapshot = false) override;
Future<Void> watch(const Key& key) override;
using Transaction::getRange;
Future<FDBStandalone<RangeResultRef>> getRange(const KeySelector& begin, const KeySelector& end,
GetRangeLimits limits = GetRangeLimits(), bool snapshot = false,
bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) override;
void addReadConflictRange(KeyRangeRef const& keys) override;
void addReadConflictKey(KeyRef const& key) override;
void addWriteConflictRange(KeyRangeRef const& keys) override;
void addWriteConflictKey(KeyRef const& key) override;
void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) override;
void set(const KeyRef& key, const ValueRef& value) override;
void clear(const KeyRangeRef& range) override;
void clear(const KeyRef& key) override;
Future<Void> commit() override;
Version getCommittedVersion() override;
Future<FDBStandalone<StringRef>> getVersionstamp() override;
void setOption(FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>()) override;
Future<Void> onError(Error const& e) override;
void cancel() override;
void reset() override;
TransactionImpl() : tr(NULL) {}
TransactionImpl(TransactionImpl&& r) BOOST_NOEXCEPT {
tr = r.tr;
r.tr = NULL;
}
TransactionImpl& operator=(TransactionImpl&& r) BOOST_NOEXCEPT {
tr = r.tr;
r.tr = NULL;
return *this;
}
private:
FDBTransaction* tr;
explicit TransactionImpl(FDBDatabase* db);
};
static inline void throw_on_error( fdb_error_t e ) {
if (e)
@ -187,40 +262,36 @@ namespace FDB {
return fdb_error_predicate( pred, e.code() );
}
Reference<Cluster> API::createCluster( std::string const& connFilename ) {
return Reference<Cluster>(new Cluster(connFilename));
}
Reference<DatabaseContext> API::createDatabase(std::string const& connFilename) {
Reference<Database> API::createDatabase(std::string const& connFilename) {
FDBDatabase *db;
throw_on_error(fdb_create_database(connFilename.c_str(), &db));
return Reference<DatabaseContext>(new DatabaseContext(db));
return Reference<Database>(new DatabaseImpl(db));
}
int API::getAPIVersion() const {
return version;
}
Reference<DatabaseContext> Cluster::createDatabase() {
return API::getInstance()->createDatabase(connFilename.c_str());
Reference<Transaction> DatabaseImpl::createTransaction() {
return Reference<Transaction>(new TransactionImpl(db));
}
void DatabaseContext::setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value) {
void DatabaseImpl::setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value) {
if (value.present())
throw_on_error(fdb_database_set_option(db, option, value.get().begin(), value.get().size()));
else
throw_on_error(fdb_database_set_option(db, option, NULL, 0));
}
Transaction::Transaction( Reference<DatabaseContext> const& db ) {
throw_on_error( fdb_database_create_transaction( db->db, &tr ) );
TransactionImpl::TransactionImpl(FDBDatabase* db) {
throw_on_error(fdb_database_create_transaction(db, &tr));
}
void Transaction::setVersion( Version v ) {
void TransactionImpl::setReadVersion(Version v) {
fdb_transaction_set_read_version( tr, v );
}
Future<Version> Transaction::getReadVersion() {
Future<Version> TransactionImpl::getReadVersion() {
return backToFuture<Version>( fdb_transaction_get_read_version( tr ), [](Reference<CFuture> f){
Version value;
@ -230,7 +301,7 @@ namespace FDB {
} );
}
Future< Optional<FDBStandalone<ValueRef>> > Transaction::get( const Key& key, bool snapshot ) {
Future<Optional<FDBStandalone<ValueRef>>> TransactionImpl::get(const Key& key, bool snapshot) {
return backToFuture< Optional<FDBStandalone<ValueRef>> >( fdb_transaction_get( tr, key.begin(), key.size(), snapshot ), [](Reference<CFuture> f) {
fdb_bool_t present;
uint8_t const* value;
@ -246,14 +317,14 @@ namespace FDB {
} );
}
Future< Void > Transaction::watch( const Key& key ) {
Future<Void> TransactionImpl::watch(const Key& key) {
return backToFuture< Void >( fdb_transaction_watch( tr, key.begin(), key.size() ), [](Reference<CFuture> f) {
throw_on_error( fdb_future_get_error( f->f ) );
return Void();
} );
}
Future< FDBStandalone<KeyRef> > Transaction::getKey( const KeySelector& key, bool snapshot ) {
Future<FDBStandalone<KeyRef>> TransactionImpl::getKey(const KeySelector& key, bool snapshot) {
return backToFuture< FDBStandalone<KeyRef> >( fdb_transaction_get_key( tr, key.key.begin(), key.key.size(), key.orEqual, key.offset, snapshot ), [](Reference<CFuture> f) {
uint8_t const* key;
int key_length;
@ -264,7 +335,7 @@ namespace FDB {
} );
}
Future< FDBStandalone<RangeResultRef> > Transaction::getRange( const KeySelector& begin, const KeySelector& end, GetRangeLimits limits, bool snapshot, bool reverse, FDBStreamingMode streamingMode ) {
Future<FDBStandalone<RangeResultRef>> TransactionImpl::getRange(const KeySelector& begin, const KeySelector& end, GetRangeLimits limits, bool snapshot, bool reverse, FDBStreamingMode streamingMode) {
// FIXME: iteration
return backToFuture< FDBStandalone<RangeResultRef> >( fdb_transaction_get_range( tr, begin.key.begin(), begin.key.size(), begin.orEqual, begin.offset, end.key.begin(), end.key.size(), end.orEqual, end.offset, limits.rows, limits.bytes, streamingMode, 1, snapshot, reverse ), [](Reference<CFuture> f) {
FDBKeyValue const* kv;
@ -277,64 +348,64 @@ namespace FDB {
} );
}
void Transaction::addReadConflictRange( KeyRangeRef const& keys ) {
void TransactionImpl::addReadConflictRange(KeyRangeRef const& keys) {
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_READ ) );
}
void Transaction::addReadConflictKey( KeyRef const& key ) {
void TransactionImpl::addReadConflictKey(KeyRef const& key) {
return addReadConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
}
void Transaction::addWriteConflictRange( KeyRangeRef const& keys ) {
void TransactionImpl::addWriteConflictRange(KeyRangeRef const& keys) {
throw_on_error( fdb_transaction_add_conflict_range( tr, keys.begin.begin(), keys.begin.size(), keys.end.begin(), keys.end.size(), FDB_CONFLICT_RANGE_TYPE_WRITE ) );
}
void Transaction::addWriteConflictKey( KeyRef const& key ) {
void TransactionImpl::addWriteConflictKey(KeyRef const& key) {
return addWriteConflictRange(KeyRange(KeyRangeRef(key, keyAfter(key))));
}
void Transaction::atomicOp( const KeyRef& key, const ValueRef& operand, FDBMutationType operationType ) {
void TransactionImpl::atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) {
fdb_transaction_atomic_op( tr, key.begin(), key.size(), operand.begin(), operand.size(), operationType );
}
void Transaction::set( const KeyRef& key, const ValueRef& value ) {
void TransactionImpl::set(const KeyRef& key, const ValueRef& value) {
fdb_transaction_set( tr, key.begin(), key.size(), value.begin(), value.size() );
}
void Transaction::clear( const KeyRangeRef& range ) {
void TransactionImpl::clear(const KeyRangeRef& range) {
fdb_transaction_clear_range( tr, range.begin.begin(), range.begin.size(), range.end.begin(), range.end.size() );
}
void Transaction::clear( const KeyRef& key ) {
void TransactionImpl::clear(const KeyRef& key) {
fdb_transaction_clear( tr, key.begin(), key.size() );
}
Future<Void> Transaction::commit() {
Future<Void> TransactionImpl::commit() {
return backToFuture< Void >( fdb_transaction_commit( tr ), [](Reference<CFuture> f) {
throw_on_error( fdb_future_get_error( f->f ) );
return Void();
} );
}
Version Transaction::getCommittedVersion() {
Version TransactionImpl::getCommittedVersion() {
Version v;
throw_on_error( fdb_transaction_get_committed_version( tr, &v ) );
return v;
}
Future<FDBStandalone<StringRef>> Transaction::getVersionstamp() {
return backToFuture< FDBStandalone<KeyRef> >( fdb_transaction_get_versionstamp( tr ), [](Reference<CFuture> f) {
Future<FDBStandalone<StringRef>> TransactionImpl::getVersionstamp() {
return backToFuture<FDBStandalone<KeyRef>>(fdb_transaction_get_versionstamp(tr), [](Reference<CFuture> f) {
uint8_t const* key;
int key_length;
throw_on_error( fdb_future_get_key( f->f, &key, &key_length ) );
return FDBStandalone<StringRef>( f, StringRef( key, key_length ) );
} );
});
}
void Transaction::setOption( FDBTransactionOption option, Optional<StringRef> value ) {
void TransactionImpl::setOption(FDBTransactionOption option, Optional<StringRef> value) {
if ( value.present() ) {
throw_on_error( fdb_transaction_set_option( tr, option, value.get().begin(), value.get().size() ) );
} else {
@ -342,18 +413,18 @@ namespace FDB {
}
}
Future<Void> Transaction::onError( Error const& e ) {
Future<Void> TransactionImpl::onError(Error const& e) {
return backToFuture< Void >( fdb_transaction_on_error( tr, e.code() ), [](Reference<CFuture> f) {
throw_on_error( fdb_future_get_error( f->f ) );
return Void();
} );
}
void Transaction::cancel() {
void TransactionImpl::cancel() {
fdb_transaction_cancel( tr );
}
void Transaction::reset() {
void TransactionImpl::reset() {
fdb_transaction_reset( tr );
}

View File

@ -30,68 +30,9 @@
#include "FDBLoanerTypes.h"
namespace FDB {
class DatabaseContext : public ReferenceCounted<DatabaseContext>, NonCopyable {
friend class Cluster;
friend class Transaction;
public:
~DatabaseContext() {
fdb_database_destroy( db );
}
void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>());
private:
FDBDatabase* db;
explicit DatabaseContext( FDBDatabase* db ) : db(db) {}
friend class API;
};
// Deprecated: Use createDatabase instead.
class Cluster : public ReferenceCounted<Cluster>, NonCopyable {
public:
~Cluster() {}
Reference<DatabaseContext> createDatabase();
private:
explicit Cluster( std::string connFilename ) : connFilename(connFilename) {}
std::string connFilename;
friend class API;
};
class API {
public:
static API* selectAPIVersion(int apiVersion);
static API* getInstance();
static bool isAPIVersionSelected();
void setNetworkOption(FDBNetworkOption option, Optional<StringRef> value = Optional<StringRef>());
void setupNetwork();
void runNetwork();
void stopNetwork();
// Deprecated: Use createDatabase instead.
Reference<Cluster> createCluster( std::string const& connFilename );
Reference<DatabaseContext> createDatabase( std::string const& connFilename="" );
bool evaluatePredicate(FDBErrorPredicate pred, Error const& e);
int getAPIVersion() const;
private:
static API* instance;
API(int version);
int version;
};
struct CFuture : NonCopyable, ReferenceCounted<CFuture>, FastAllocated<CFuture> {
CFuture() : f(NULL) {}
explicit CFuture( FDBFuture* f ) : f(f) {}
explicit CFuture(FDBFuture* f) : f(f) {}
~CFuture() {
if (f) {
fdb_future_destroy(f);
@ -107,83 +48,102 @@ namespace FDB {
class FDBStandalone : public T {
public:
FDBStandalone() {}
FDBStandalone( Reference<CFuture> f, T const& t ) : T(t), f(f) {}
FDBStandalone( FDBStandalone const& o ) : T((T const&)o), f(o.f) {}
FDBStandalone(Reference<CFuture> f, T const& t) : T(t), f(f) {}
FDBStandalone(FDBStandalone const& o) : T((T const&)o), f(o.f) {}
private:
Reference<CFuture> f;
};
class Transaction : public ReferenceCounted<Transaction>, private NonCopyable, public FastAllocated<Transaction> {
class ReadTransaction : public ReferenceCounted<ReadTransaction> {
public:
explicit Transaction( Reference<DatabaseContext> const& db );
~Transaction() {
if (tr) {
fdb_transaction_destroy(tr);
}
virtual ~ReadTransaction(){};
virtual void setReadVersion(Version v) = 0;
virtual Future<Version> getReadVersion() = 0;
virtual Future<Optional<FDBStandalone<ValueRef>>> get(const Key& key, bool snapshot = false) = 0;
virtual Future<FDBStandalone<KeyRef>> getKey(const KeySelector& key, bool snapshot = false) = 0;
virtual Future<Void> watch(const Key& key) = 0;
virtual Future<FDBStandalone<RangeResultRef>> getRange(
const KeySelector& begin, const KeySelector& end, GetRangeLimits limits = GetRangeLimits(),
bool snapshot = false, bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) = 0;
virtual Future<FDBStandalone<RangeResultRef>> getRange(
const KeySelector& begin, const KeySelector& end, int limit, bool snapshot = false, bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse, streamingMode);
}
virtual Future<FDBStandalone<RangeResultRef>> getRange(
const KeyRange& keys, int limit, bool snapshot = false, bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limit, snapshot, reverse,
streamingMode);
}
virtual Future<FDBStandalone<RangeResultRef>> getRange(
const KeyRange& keys, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false,
FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL) {
return getRange(KeySelector(firstGreaterOrEqual(keys.begin), keys.arena()),
KeySelector(firstGreaterOrEqual(keys.end), keys.arena()), limits, snapshot, reverse,
streamingMode);
}
void setVersion( Version v );
Future<Version> getReadVersion();
virtual void addReadConflictRange(KeyRangeRef const& keys) = 0;
virtual void addReadConflictKey(KeyRef const& key) = 0;
Future< Optional<FDBStandalone<ValueRef>> > get( const Key& key, bool snapshot = false );
Future< Void > watch( const Key& key );
Future< FDBStandalone<KeyRef> > getKey( const KeySelector& key, bool snapshot = false );
Future< FDBStandalone<RangeResultRef> > getRange( const KeySelector& begin, const KeySelector& end, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL);
Future< FDBStandalone<RangeResultRef> > getRange( const KeySelector& begin, const KeySelector& end, int limit, bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) {
return getRange( begin, end, GetRangeLimits(limit), snapshot, reverse, streamingMode );
}
Future< FDBStandalone<RangeResultRef> > getRange( const KeyRange& keys, int limit, bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) {
return getRange( KeySelector( firstGreaterOrEqual(keys.begin), keys.arena() ),
KeySelector( firstGreaterOrEqual(keys.end), keys.arena() ),
limit, snapshot, reverse, streamingMode );
}
Future< FDBStandalone<RangeResultRef> > getRange( const KeyRange& keys, GetRangeLimits limits = GetRangeLimits(), bool snapshot = false, bool reverse = false, FDBStreamingMode streamingMode = FDB_STREAMING_MODE_SERIAL ) {
return getRange( KeySelector( firstGreaterOrEqual(keys.begin), keys.arena() ),
KeySelector( firstGreaterOrEqual(keys.end), keys.arena() ),
limits, snapshot, reverse, streamingMode );
}
virtual void setOption(FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>()) = 0;
// Future< Standalone<VectorRef<const char*>> > getAddressesForKey(const Key& key);
virtual Future<Void> onError(Error const& e) = 0;
void addReadConflictRange( KeyRangeRef const& keys );
void addReadConflictKey( KeyRef const& key );
void addWriteConflictRange( KeyRangeRef const& keys );
void addWriteConflictKey( KeyRef const& key );
// void makeSelfConflicting() { tr.makeSelfConflicting(); }
void atomicOp( const KeyRef& key, const ValueRef& operand, FDBMutationType operationType );
void set( const KeyRef& key, const ValueRef& value );
void clear( const KeyRangeRef& range );
void clear( const KeyRef& key );
Future<Void> commit();
Version getCommittedVersion();
Future<FDBStandalone<StringRef>> getVersionstamp();
void setOption( FDBTransactionOption option, Optional<StringRef> value = Optional<StringRef>() );
Future<Void> onError( Error const& e );
void cancel();
void reset();
// double getBackoff() { return tr.getBackoff(); }
// void debugTransaction(UID dID) { tr.debugTransaction(dID); }
Transaction() : tr(NULL) {}
Transaction( Transaction&& r ) noexcept(true) {
tr = r.tr;
r.tr = NULL;
}
Transaction& operator=( Transaction&& r ) noexcept(true) {
tr = r.tr;
r.tr = NULL;
return *this;
}
private:
FDBTransaction* tr;
virtual void cancel() = 0;
virtual void reset() = 0;
};
}
class Transaction : public ReadTransaction {
public:
virtual void addWriteConflictRange(KeyRangeRef const& keys) = 0;
virtual void addWriteConflictKey(KeyRef const& key) = 0;
#endif
virtual void atomicOp(const KeyRef& key, const ValueRef& operand, FDBMutationType operationType) = 0;
virtual void set(const KeyRef& key, const ValueRef& value) = 0;
virtual void clear(const KeyRangeRef& range) = 0;
virtual void clear(const KeyRef& key) = 0;
virtual Future<Void> commit() = 0;
virtual Version getCommittedVersion() = 0;
virtual Future<FDBStandalone<StringRef>> getVersionstamp() = 0;
};
class Database : public ReferenceCounted<Database> {
public:
virtual ~Database(){};
virtual Reference<Transaction> createTransaction() = 0;
virtual void setDatabaseOption(FDBDatabaseOption option, Optional<StringRef> value = Optional<StringRef>()) = 0;
};
class API {
public:
static API* selectAPIVersion(int apiVersion);
static API* getInstance();
static bool isAPIVersionSelected();
void setNetworkOption(FDBNetworkOption option, Optional<StringRef> value = Optional<StringRef>());
void setupNetwork();
void runNetwork();
void stopNetwork();
Reference<Database> createDatabase(std::string const& connFilename = "");
bool evaluatePredicate(FDBErrorPredicate pred, Error const& e);
int getAPIVersion() const;
private:
static API* instance;
API(int version);
int version;
};
} // namespace FDB
#endif // FDB_FLOW_FDB_FLOW_H

View File

@ -79,11 +79,11 @@
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>..\..\;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<ClCompile>

View File

@ -41,7 +41,8 @@ std::map<Standalone<StringRef>, Reference<Transaction>> trMap;
const int ITERATION_PROGRESSION[] = { 256, 1000, 4096, 6144, 9216, 13824, 20736, 31104, 46656, 69984, 80000 };
const int MAX_ITERATION = sizeof(ITERATION_PROGRESSION)/sizeof(int);
static Future<Void> runTest(Reference<FlowTesterData> const& data, Reference<DatabaseContext> const& db, StringRef const& prefix);
static Future<Void> runTest(Reference<FlowTesterData> const& data, Reference<Database> const& db,
StringRef const& prefix);
THREAD_FUNC networkThread( void* api ) {
// This is the fdb_flow network we're running on a thread
@ -388,7 +389,7 @@ struct LogStackFunc : InstructionFunc {
ACTOR static Future<Void> logStack(Reference<FlowTesterData> data, std::map<int, StackItem> entries, Standalone<StringRef> prefix) {
loop {
state Reference<Transaction> tr(new Transaction(data->db));
state Reference<Transaction> tr = data->db->createTransaction();
try {
for(auto it : entries) {
Tuple tk;
@ -534,7 +535,7 @@ struct NewTransactionFunc : InstructionFunc {
static const char* name;
static Future<Void> call(Reference<FlowTesterData> const& data, Reference<InstructionData> const& instruction) {
trMap[data->trName] = Reference<Transaction>(new Transaction(data->db));
trMap[data->trName] = data->db->createTransaction();
return Void();
}
};
@ -550,7 +551,7 @@ struct UseTransactionFunc : InstructionFunc {
data->trName = name;
if(trMap.count(data->trName) == 0) {
trMap[data->trName] = Reference<Transaction>(new Transaction(data->db));
trMap[data->trName] = data->db->createTransaction();
}
return Void();
}
@ -681,7 +682,7 @@ struct SetReadVersionFunc : InstructionFunc {
static const char* name;
static Future<Void> call(Reference<FlowTesterData> const& data, Reference<InstructionData> const& instruction) {
instruction->tr->setVersion(data->lastVersion);
instruction->tr->setReadVersion(data->lastVersion);
return Void();
}
};
@ -1323,6 +1324,20 @@ struct StartThreadFunc : InstructionFunc {
const char* StartThreadFunc::name = "START_THREAD";
REGISTER_INSTRUCTION_FUNC(StartThreadFunc);
ACTOR template <class Function>
Future<decltype(fake<Function>()(Reference<ReadTransaction>()).getValue())> read(Reference<Database> db,
Function func) {
state Reference<ReadTransaction> tr = db->createTransaction();
loop {
try {
state decltype(fake<Function>()(Reference<ReadTransaction>()).getValue()) result = wait(func(tr));
return result;
} catch (Error& e) {
wait(tr->onError(e));
}
}
}
// WAIT_EMPTY
struct WaitEmptyFunc : InstructionFunc {
static const char* name;
@ -1333,25 +1348,23 @@ struct WaitEmptyFunc : InstructionFunc {
return Void();
Standalone<StringRef> s1 = wait(items[0].value);
state Standalone<StringRef> prefix = Tuple::unpack(s1).getString(0);
Standalone<StringRef> prefix = Tuple::unpack(s1).getString(0);
// printf("=========WAIT_EMPTY:%s\n", printable(prefix).c_str());
state Reference<Transaction> tr(new Transaction(data->db));
loop {
try {
FDBStandalone<RangeResultRef> results = wait(tr->getRange(KeyRangeRef(prefix, strinc(prefix)), 1));
if(results.size() > 0) {
throw not_committed();
}
break;
}
catch(Error &e) {
wait(tr->onError(e));
}
}
wait(read(data->db,
[=](Reference<ReadTransaction> tr) -> Future<Void> { return checkEmptyPrefix(tr, prefix); }));
return Void();
}
private:
ACTOR static Future<Void> checkEmptyPrefix(Reference<ReadTransaction> tr, Standalone<StringRef> prefix) {
FDBStandalone<RangeResultRef> results = wait(tr->getRange(KeyRangeRef(prefix, strinc(prefix)), 1));
if (results.size() > 0) {
throw not_committed();
}
return Void();
}
};
const char* WaitEmptyFunc::name = "WAIT_EMPTY";
REGISTER_INSTRUCTION_FUNC(WaitEmptyFunc);
@ -1529,7 +1542,7 @@ struct UnitTestsFunc : InstructionFunc {
}
API::selectAPIVersion(fdb->getAPIVersion());
state Reference<Transaction> tr(new Transaction(data->db));
state Reference<Transaction> tr = data->db->createTransaction();
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_SYSTEM_IMMEDIATE);
tr->setOption(FDBTransactionOption::FDB_TR_OPTION_PRIORITY_BATCH);
@ -1560,7 +1573,7 @@ const char* UnitTestsFunc::name = "UNIT_TESTS";
REGISTER_INSTRUCTION_FUNC(UnitTestsFunc);
ACTOR static Future<Void> getInstructions(Reference<FlowTesterData> data, StringRef prefix) {
state Reference<Transaction> tr(new Transaction(data->db));
state Reference<Transaction> tr = data->db->createTransaction();
// get test instructions
state Tuple testSpec;
@ -1610,7 +1623,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
state Reference<InstructionData> instruction = Reference<InstructionData>(new InstructionData(isDatabase, isSnapshot, data->instructions[idx].value, Reference<Transaction>()));
if (isDatabase) {
state Reference<Transaction> tr(new Transaction(data->db));
state Reference<Transaction> tr = data->db->createTransaction();
instruction->tr = tr;
}
else {
@ -1644,7 +1657,7 @@ ACTOR static Future<Void> doInstructions(Reference<FlowTesterData> data) {
return Void();
}
ACTOR static Future<Void> runTest(Reference<FlowTesterData> data, Reference<DatabaseContext> db, StringRef prefix) {
ACTOR static Future<Void> runTest(Reference<FlowTesterData> data, Reference<Database> db, StringRef prefix) {
ASSERT(data);
try {
data->db = db;
@ -1744,7 +1757,7 @@ ACTOR void _test_versionstamp() {
startThread(networkThread, fdb);
auto db = fdb->createDatabase();
state Reference<Transaction> tr(new Transaction(db));
state Reference<Transaction> tr = db->createTransaction();
state Future<FDBStandalone<StringRef>> ftrVersion = tr->getVersionstamp();

View File

@ -199,7 +199,7 @@ struct DirectoryTesterData {
struct FlowTesterData : public ReferenceCounted<FlowTesterData> {
FDB::API *api;
Reference<FDB::DatabaseContext> db;
Reference<FDB::Database> db;
Standalone<FDB::RangeResultRef> instructions;
Standalone<StringRef> trName;
FlowTesterStack stack;

View File

@ -58,13 +58,13 @@
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../../../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../../../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>

View File

@ -206,7 +206,11 @@ JNIEXPORT jthrowable JNICALL Java_com_apple_foundationdb_NativeFuture_Future_1ge
return JNI_NULL;
}
FDBFuture *sav = (FDBFuture *)future;
return getThrowable( jenv, fdb_future_get_error( sav ) );
fdb_error_t err = fdb_future_get_error( sav );
if( err )
return getThrowable( jenv, err );
else
return JNI_NULL;
}
JNIEXPORT jboolean JNICALL Java_com_apple_foundationdb_NativeFuture_Future_1isReady(JNIEnv *jenv, jobject, jlong future) {

View File

@ -45,7 +45,7 @@
</ImportGroup>
<PropertyGroup Label="UserMacros" />
<PropertyGroup>
<IncludePath>..\..\;C:\Program Files\Java\jdk6\include\win32;C:\Program Files\Java\jdk6\include;C:\Program Files\boost_1_52_0;$(IncludePath)</IncludePath>
<IncludePath>..\..\;C:\Program Files\Java\jdk6\include\win32;C:\Program Files\Java\jdk6\include;C:\Program Files\boost_1_67_0;$(IncludePath)</IncludePath>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
</PropertyGroup>

View File

@ -37,7 +37,7 @@ class FutureResults extends NativeFuture<RangeResultInfo> {
protected RangeResultInfo getIfDone_internal(long cPtr) throws FDBException {
FDBException err = Future_getError(cPtr);
if(!err.isSuccess()) {
if(err != null && !err.isSuccess()) {
throw err;
}

View File

@ -34,7 +34,7 @@ class FutureVoid extends NativeFuture<Void> {
// with a get on the error and throw if the error is not success.
FDBException err = Future_getError(cPtr);
if(!err.isSuccess()) {
if(err != null && !err.isSuccess()) {
throw err;
}
return null;

View File

@ -10,12 +10,11 @@ RUN adduser --disabled-password --gecos '' fdb && chown -R fdb /opt && chmod -R
USER fdb
# wget of bintray without forcing UTF-8 encoding results in 403 Forbidden
RUN cd /opt/ && wget http://downloads.sourceforge.net/project/boost/boost/1.52.0/boost_1_52_0.tar.bz2 &&\
RUN cd /opt/ &&\
wget --local-encoding=UTF-8 https://dl.bintray.com/boostorg/release/1.67.0/source/boost_1_67_0.tar.bz2 &&\
echo '2684c972994ee57fc5632e03bf044746f6eb45d4920c343937a465fd67a5adba boost_1_67_0.tar.bz2' | sha256sum -c - &&\
tar -xjf boost_1_52_0.tar.bz2 &&\
tar -xjf boost_1_67_0.tar.bz2 &&\
rm boost_1_52_0.tar.bz2 boost_1_67_0.tar.bz2
rm boost_1_67_0.tar.bz2
USER root

View File

@ -38,7 +38,7 @@ configure() {
local __res=0
for _ in 1
do
cmake ../foundationdb
cmake ../foundationdb ${CMAKE_EXTRA_ARGS}
__res=$?
if [ ${__res} -ne 0 ]
then
@ -87,6 +87,8 @@ package_fast() {
for _ in 1
do
make -j`nproc` packages
cpack
cpack -G RPM -D GENERATE_EL6=ON
__res=$?
if [ ${__res} -ne 0 ]
then
@ -100,7 +102,7 @@ package() {
local __res=0
for _ in 1
do
configure
build
__res=$?
if [ ${__res} -ne 0 ]
then
@ -120,7 +122,7 @@ rpm() {
local __res=0
for _ in 1
do
cmake -DINSTALL_LAYOUT=RPM ../foundationdb
configure
__res=$?
if [ ${__res} -ne 0 ]
then
@ -132,7 +134,8 @@ rpm() {
then
break
fi
fakeroot cpack
fakeroot cpack -G RPM -D GENERATE_EL6=ON
fakeroot cpack -G RPM
__res=$?
if [ ${__res} -ne 0 ]
then
@ -146,7 +149,7 @@ deb() {
local __res=0
for _ in 1
do
cmake -DINSTALL_LAYOUT=DEB ../foundationdb
configure
__res=$?
if [ ${__res} -ne 0 ]
then
@ -158,7 +161,7 @@ deb() {
then
break
fi
fakeroot cpack
fakeroot cpack -G DEB
__res=$?
if [ ${__res} -ne 0 ]
then
@ -172,7 +175,7 @@ test-fast() {
local __res=0
for _ in 1
do
ctest -j`nproc`
ctest -j`nproc` ${CTEST_EXTRA_ARGS}
__res=$?
if [ ${__res} -ne 0 ]
then

View File

@ -1,3 +0,0 @@
FROM centos:6
RUN yum install -y yum-utils

View File

@ -0,0 +1,3 @@
FROM centos:6
RUN yum install -y yum-utils upstart initscripts

View File

@ -0,0 +1,3 @@
FROM centos:7
RUN yum install -y yum-utils systemd sysvinit-tools

View File

@ -1,3 +1,4 @@
FROM ubuntu:16.04
RUN apt-get update
RUN apt-get install -y systemd

View File

@ -59,6 +59,10 @@ services:
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh test
snowflake-ci: &snowflake-ci
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash ../foundationdb/build/cmake/build.sh package test-fast
shell:
<<: *build-setup
command: scl enable devtoolset-7 rh-ruby24 rh-python36 python27 -- bash

View File

@ -1,7 +1,17 @@
[RPM_1]
name = fdb-centos
location = centos-test
[centos6]
name = fdb-centos6
location = centos6-test
packages = ^.*el6((?!debuginfo).)*\.rpm$
format = rpm
[DEB_1]
[centos7]
name = fdb-centos7
location = centos7-test
packages = ^.*el7((?!debuginfo).)*\.rpm$
format = rpm
[ubuntu_16_04]
name = fdb-debian
location = debian-test
packages = ^.*\.deb$
format = deb

View File

@ -2,6 +2,7 @@
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${source_dir}/modules/globals.sh
source ${source_dir}/modules/util.sh
source ${source_dir}/modules/deb.sh
source ${source_dir}/modules/tests.sh

View File

@ -17,10 +17,8 @@ then
fi
# parse the ini file and read it into an
# associative array
declare -gA ini_name
declare -gA ini_location
eval "$(awk -F ' *= *' '{ if ($1 ~ /^\[/) section=$1; else if ($1 !~ /^$/) print "ini_" $1 section "=" "\"" $2 "\"" }' ${docker_file})"
eval "$(awk -F ' *= *' '{ if ($1 ~ /^\[/) section=$1; else if ($1 !~ /^$/) printf "ini_%s%s=\47%s\47\n", $1, section, $2 }' ${docker_file})"
vms=( "${!ini_name[@]}" )
if [ $? -ne 0 ]
then
echo "ERROR: Could not parse config-file ${docker_file}"
@ -112,15 +110,6 @@ then
then
break
fi
if [ -z ${fdb_packages+x} ]
then
config_find_packages
if [ $? -ne 0 ]
then
__res=1
break
fi
fi
config_load_vms
__res=$?
if [ ${__res} -ne 0 ]

View File

@ -10,7 +10,7 @@ then
local __res=0
enterfun
echo "Install FoundationDB"
cd /build
cd /build/packages
package_names=()
for f in "${package_files[@]}"
do

View File

@ -9,56 +9,55 @@ then
failed_tests=()
docker_threads=()
docker_ids=()
docker_threads=()
docker_logs=()
docker_error_logs=()
docker_wait_any() {
# docker wait waits on all containers (unlike what is documented)
# so we need to do polling
success=0
while [ "${success}" -eq 0 ]
local __res=0
enterfun
while [ "${#docker_threads[@]}" -gt 0 ]
do
for ((i=0;i<"${#docker_ids[@]}";++i))
IFS=";" read -ra res <${pipe_file}
docker_id=${res[0]}
result=${res[1]}
i=0
for (( idx=0; idx<${#docker_ids[@]}; idx++ ))
do
docker_id="${docker_ids[$i]}"
status="$(docker ps -a -f id=${docker_id} --format '{{.Status}}' | awk '{print $1;}')"
if [ "${status}" = "Exited" ]
if [ "${docker_id}" = "${docker_ids[idx]}" ]
then
success=1
ret_code="$(docker wait ${docker_id})"
if [ "${ret_code}" -ne 0 ]
then
failed_tests+=( "${docker_threads[$i]}" )
echo -e "${RED}Test failed: ${docker_threads[$i]} ${NC}"
else
echo -e "${GREEN}Test succeeded: ${docker_threads[$i]} ${NC}"
fi
# remove it
n=$((i+1))
docker_ids=( "${docker_ids[@]:0:$i}" "${docker_ids[@]:$n}" )
docker_threads=( "${docker_threads[@]:0:$i}" "${docker_threads[@]:$n}" )
# prune
set -x
if [ "${pruning_strategy}" = "ALL" ]
then
docker container rm "${docker_id}" > /dev/null
elif [ "${ret_code}" -eq 0 ] && [ "${pruning_strategy}" = "SUCCEEDED" ]
then
docker container rm "${docker_id}" > /dev/null
elif [ "${ret_code}" -ne 0 ] && [ "${pruning_strategy}" = "FAILED" ]
then
docker container rm "${docker_id}" > /dev/null
fi
set +x
i=idx
break
fi
done
sleep 1
if [ "${result}" -eq 0 ]
then
echo -e "${GREEN}Test succeeded: ${docker_threads[$i]}"
echo -e "\tDocker-ID: ${docker_ids[$i]} "
echo -e "\tLog-File: ${docker_logs[$i]}"
echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}"
else
echo -e "${RED}Test FAILED: ${docker_threads[$i]}"
echo -e "\tDocker-ID: ${docker_ids[$i]} "
echo -e "\tLog-File: ${docker_logs[$i]}"
echo -e "\tErr-File: ${docker_error_logs[$i]} ${NC}"
failed_tests+=( "${docker_threads[$i]}" )
fi
n=$((i+1))
docker_ids=( "${docker_ids[@]:0:$i}" "${docker_ids[@]:$n}" )
docker_threads=( "${docker_threads[@]:0:$i}" "${docker_threads[@]:$n}" )
docker_logs=( "${docker_logs[@]:0:$i}" "${docker_logs[@]:$n}" )
docker_error_logs=( "${docker_error_logs[@]:0:$i}" "${docker_error_logs[@]:$n}" )
break
done
exitfun
return "${__res}"
}
docker_wait_all() {
local __res=0
while [ "${#docker_ids[@]}" -gt 0 ]
while [ "${#docker_threads[@]}" -gt 0 ]
do
docker_wait_any
if [ "$?" -ne 0 ]
@ -69,158 +68,104 @@ then
return ${__res}
}
docker_build_and_run() {
local __res=0
enterfun
for _ in 1
do
if [[ "$location" = /* ]]
then
cd "${location}"
else
cd ${source_dir}/../${location}
fi
docker_logs="${log_dir}/docker_build_${name}"
docker build . -t ${name} 1> "${docker_logs}.log" 2> "${docker_logs}.err"
successOr "Building Docker image ${name} failed - see ${docker_logs}.log and ${docker_logs}.err"
# we start docker in interactive mode, otherwise CTRL-C won't work
if [ ! -z "${tests_to_run+x}"]
then
tests=()
IFS=';' read -ra tests <<< "${tests_to_run}"
fi
for t in "${tests[@]}"
do
if [ "${#docker_ids[@]}" -ge "${docker_parallelism}" ]
then
docker_wait_any
fi
echo "Starting Test ${PKG,,}_${t}"
docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\
-v "${fdb_build}:/build"\
${name}\
bash /foundationdb/build/cmake/package_tester/${PKG,,}_tests.sh -n ${t} ${packages_to_test[@]} )
docker_ids+=( "${docker_id}" )
docker_threads+=( "${PKG} - ${t} (ID: ${docker_id})" )
done
done
exitfun
return ${__res}
}
docker_run_tests() {
local __res=0
enterfun
counter=1
while true
do
if [ -z "${ini_name[${PKG}_${counter}]+x}" ]
then
# we are done
break
fi
name="${ini_name[${PKG}_${counter}]}"
location="${ini_location[${PKG}_${counter}]}"
docker_build_and_run
__res=$?
counter=$((counter+1))
if [ ${__res} -ne 0 ]
then
break
fi
done
if [ ${counter} -eq 1 ]
then
echo -e "${YELLOW}WARNING: No docker config found!${NC}"
fi
exitfun
return ${__res}
}
docker_debian_tests() {
local __res=0
enterfun
PKG=DEB
packages_to_test=("${deb_packages[@]}")
docker_run_tests
__res=$?
exitfun
return ${__res}
}
docker_rpm_tests() {
local __res=0
enterfun
PKG=RPM
packages_to_test=("${rpm_packages[@]}")
docker_run_tests
__res=$?
exitfun
return ${__res}
}
docker_run() {
local __res=0
enterfun
for _ in 1
do
log_dir="${fdb_build}/pkg_tester"
mkdir -p "${log_dir}"
# create list of package files to test
IFS=':' read -ra packages <<< "${fdb_packages}"
deb_packages=()
rpm_packages=()
for i in "${packages[@]}"
echo "Testing the following:"
echo "======================"
for K in "${vms[@]}"
do
if [[ "${i}" =~ .*".deb" ]]
then
if [ ${run_deb_tests} -ne 0 ]
then
deb_packages+=("${i}")
fi
else
if [ ${run_rpm_tests} -ne 0 ]
then
rpm_packages+=("${i}")
fi
fi
done
do_deb_tests=0
do_rpm_tests=0
if [ "${#deb_packages[@]}" -gt 0 ]
then
do_deb_tests=1
echo "Will test the following debian packages:"
echo "========================================"
for i in "${deb_packages[@]}"
curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${K}]} ) )
echo "Will test the following ${#curr_packages[@]} packages in docker-image ${K}:"
for p in "${curr_packages[@]}"
do
echo " - ${i}"
echo " ${p}"
done
echo
fi
if [ "${#rpm_packages[@]}" -gt 0 ]
done
log_dir="${fdb_build}/pkg_tester"
pipe_file="${fdb_build}/pkg_tester.pipe"
lock_file="${fdb_build}/pkg_tester.lock"
if [ -p "${pipe_file}" ]
then
do_rpm_tests=1
echo "Will test the following rpm packages"
echo "===================================="
for i in "${rpm_packages[@]}"
rm "${pipe_file}"
successOr "Could not delete old pipe file"
fi
if [ -f "${lock_file}" ]
then
rm "${lock_file}"
successOr "Could not delete old pipe file"
fi
touch "${lock_file}"
successOr "Could not create lock file"
mkfifo "${pipe_file}"
successOr "Could not create pipe file"
mkdir -p "${log_dir}"
# setup the containers
# TODO: shall we make this parallel as well?
for vm in "${vms[@]}"
do
curr_name="${ini_name[$vm]}"
curr_location="${ini_location[$vm]}"
if [[ "$curr_location" = /* ]]
then
cd "${curr_location}"
else
cd ${source_dir}/../${curr_location}
fi
docker_buid_logs="${log_dir}/docker_build_${curr_name}"
docker build . -t ${curr_name} 1> "${docker_buid_logs}.log" 2> "${docker_buid_logs}.err"
successOr "Building Docker image ${name} failed - see ${docker_buid_logs}.log and ${docker_buid_logs}.err"
done
if [ ! -z "${tests_to_run+x}"]
then
tests=()
IFS=';' read -ra tests <<< "${tests_to_run}"
fi
for vm in "${vms[@]}"
do
curr_name="${ini_name[$vm]}"
curr_format="${ini_format[$vm]}"
curr_packages=( $(cd ${fdb_build}/packages; ls | grep -P ${ini_packages[${vm}]} ) )
for curr_test in "${tests[@]}"
do
echo " - ${i}"
if [ "${#docker_ids[@]}" -ge "${docker_parallelism}" ]
then
docker_wait_any
fi
echo "Starting Test ${curr_name}/${curr_test}"
log_file="${log_dir}/${curr_name}_${curr_test}.log"
err_file="${log_dir}/${curr_name}_${curr_test}.err"
docker_id=$( docker run -d -v "${fdb_source}:/foundationdb"\
-v "${fdb_build}:/build"\
${curr_name} /sbin/init )
{
docker exec "${docker_id}" bash \
/foundationdb/build/cmake/package_tester/${curr_format}_tests.sh -n ${curr_test} ${curr_packages[@]}\
2> ${err_file} 1> ${log_file}
res=$?
if [ "${pruning_strategy}" = "ALL" ]
then
docker kill "${docker_id}" > /dev/null
elif [ "${res}" -eq 0 ] && [ "${pruning_strategy}" = "SUCCEEDED" ]
then
docker kill "${docker_id}" > /dev/null
elif [ "${res}" -ne 0 ] && [ "${pruning_strategy}" = "FAILED" ]
then
docker kill "${docker_id}" > /dev/null
fi
flock "${lock_file}" echo "${docker_id};${res}" >> "${pipe_file}"
} &
docker_ids+=( "${docker_id}" )
docker_threads+=( "${curr_name}/${curr_test}" )
docker_logs+=( "${log_file}" )
docker_error_logs+=( "${err_file}" )
done
fi
if [ "${do_deb_tests}" -eq 0 ] && [ "${do_rpm_tests}" -eq 0 ]
then
echo "nothing to do"
fi
if [ "${do_deb_tests}" -ne 0 ]
then
docker_debian_tests
fi
if [ "${do_rpm_tests}" -ne 0 ]
then
docker_rpm_tests
fi
done
docker_wait_all
rm ${pipe_file}
if [ "${#failed_tests[@]}" -eq 0 ]
then
echo -e "${GREEN}SUCCESS${NC}"
@ -235,6 +180,6 @@ then
fi
done
exitfun
return ${__res}
return "${__res}"
}
fi

View File

@ -0,0 +1,23 @@
#!/usr/bin/env bash
# This module has to be included first and only once.
# This is because of a limitation of older bash versions
# that doesn't allow us to declare associative arrays
# globally.
if [ -z "${global_sh_included+x}"]
then
global_sh_included=1
else
echo "global.sh can only be included once"
exit 1
fi
declare -A ini_name
declare -A ini_location
declare -A ini_packages
declare -A ini_format
declare -A test_start_state
declare -A test_exit_state
declare -a tests
declare -a vms

View File

@ -6,11 +6,13 @@ then
source ${source_dir}/modules/util.sh
conf_save_extension=".rpmsave"
install() {
local __res=0
enterfun
cd /build
declare -ga package_names
cd /build/packages
package_names=()
for f in "${package_files[@]}"
do
package_names+=( "$(rpm -qp ${f})" )

View File

@ -25,11 +25,12 @@ EOF
do
case ${opt} in
h )
arguments_usage
test_args_usage
__res=2
break
;;
n )
echo "test_name=${OPTARG}"
test_name="${OPTARG}"
;;
\? )

View File

@ -28,10 +28,6 @@
# build directory can be found in `/build`, the
# source code will be located in `/foundationdb`
declare -A test_start_state
declare -A test_exit_state
declare -a tests
if [ -z "${tests_sh_included}" ]
then
tests_sh_included=1
@ -106,13 +102,23 @@ then
uninstall
# make sure config didn't get deleted
if [ ! -f /etc/foundationdb/fdb.cluster ] || [ ! -f /etc/foundationdb/foundationdb.conf ]
# RPM, however, renames the file on remove, so we need to check for this
conffile="/etc/foundationdb/foundationdb.conf${conf_save_extension}"
if [ ! -f /etc/foundationdb/fdb.cluster ] || [ ! -f "${conffile}" ]
then
fail "Uninstall removed configuration"
fi
differences="$(diff /tmp/foundationdb.conf ${conffile})"
if [ -n "${differences}" ]
then
fail "${conffile} changed during remove"
fi
differences="$(diff /tmp/fdb.cluster /etc/foundationdb/fdb.cluster)"
if [ -n "${differences}" ]
then
fail "/etc/foundationdb/fdb.cluster changed during remove"
fi
rm /tmp/fdb.cluster
rm /tmp/foundationdb.conf
return 0
}
fi

View File

@ -24,7 +24,7 @@ then
successOr ${@:1}
}
successOrOr() {
successOr() {
local __res=$?
if [ ${__res} -ne 0 ]
then

View File

@ -2,6 +2,7 @@
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${source_dir}/modules/globals.sh
source ${source_dir}/modules/util.sh
source ${source_dir}/modules/rpm.sh
source ${source_dir}/modules/tests.sh

View File

@ -2,6 +2,7 @@
source_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${source_dir}/modules/globals.sh
source ${source_dir}/modules/config.sh
source ${source_dir}/modules/util.sh
source ${source_dir}/modules/arguments.sh

View File

@ -0,0 +1,11 @@
set(error_msg
${CMAKE_SOURCE_DIR}/versions.h exists. This usually means that
you did run `make` "(the old build system)" in this directory before.
This can result in unexpected behavior. run `make clean` in the
source directory to continue)
if(EXISTS "${FILE}")
list(JOIN error_msg " " err)
message(FATAL_ERROR "${err}")
else()
message(STATUS "${FILE} does not exist")
endif()

33
cmake/CPackConfig.cmake Normal file
View File

@ -0,0 +1,33 @@
# RPM specifics
if(CPACK_GENERATOR MATCHES "RPM")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
if(GENERATE_EL6)
set(CPACK_COMPONENTS_ALL clients-el6 server-el6)
else()
set(CPACK_COMPONENTS_ALL clients-el7 server-el7)
endif()
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
elseif(CPACK_GENERATOR MATCHES "DEB")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(CPACK_COMPONENTS_ALL clients-deb server-deb)
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
elseif(CPACK_GENERATOR MATCHES "PackageMaker")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(CPACK_COMPONENTS_ALL clients-pm server-pm)
set(CPACK_STRIP_FILES TRUE)
set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall)
set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt)
elseif(CPACK_GENERATOR MATCHES "TGZ")
set(CPACK_STRIP_FILES TRUE)
set(CPACK_COMPONENTS_ALL clients-tgz server-tgz)
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
else()
message(FATAL_ERROR "Unsupported package format ${CPACK_GENERATOR}")
endif()

View File

@ -44,7 +44,7 @@ set(CMAKE_REQUIRED_LIBRARIES c)
if(WIN32)
add_compile_options(/W3 /EHsc /std:c++14 /bigobj)
add_compile_options(/W3 /EHsc /std:c++14 /bigobj $<$<CONFIG:Release>:/Zi>)
else()
if(USE_GOLD_LINKER)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fuse-ld=gold -Wl,--disable-new-dtags")

View File

@ -68,6 +68,24 @@ function(generate_coverage_xml)
add_dependencies(${target_name} coverage_${target_name})
endfunction()
# This function asserts that `versions.h` does not exist in the source
# directory. It does this in the prebuild phase of the target.
# This is an ugly hack that should make sure that cmake isn't used with
# a source directory in which FDB was previously built with `make`.
function(assert_no_version_h target)
message(STATUS "Check versions.h on ${target}")
set(target_name "${target}_versions_h_check")
add_custom_target("${target_name}"
COMMAND "${CMAKE_COMMAND}" -DFILE="${CMAKE_SOURCE_DIR}/versions.h"
-P "${CMAKE_SOURCE_DIR}/cmake/AssertFileDoesntExist.cmake"
COMMAND echo
"${CMAKE_COMMAND}" -P "${CMAKE_SOURCE_DIR}/cmake/AssertFileDoesntExist.cmake"
-DFILE="${CMAKE_SOURCE_DIR}/versions.h"
COMMENT "Check old build system wasn't used in source dir")
add_dependencies(${target} ${target_name})
endfunction()
function(add_flow_target)
set(options EXECUTABLE STATIC_LIBRARY
DYNAMIC_LIBRARY)
@ -138,6 +156,7 @@ function(add_flow_target)
add_custom_target(${AFT_NAME}_actors DEPENDS ${generated_files})
add_dependencies(${AFT_NAME} ${AFT_NAME}_actors)
assert_no_version_h(${AFT_NAME}_actors)
generate_coverage_xml(${AFT_NAME})
endif()
target_include_directories(${AFT_NAME} PUBLIC ${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})

View File

@ -2,11 +2,11 @@
# Helper Functions
################################################################################
function(install_symlink)
function(install_symlink_impl)
if (NOT WIN32)
set(options "")
set(one_value_options COMPONENT TO DESTINATION)
set(multi_value_options)
set(one_value_options TO DESTINATION)
set(multi_value_options COMPONENTS)
cmake_parse_arguments(SYM "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/symlinks)
@ -14,95 +14,143 @@ function(install_symlink)
get_filename_component(dest_dir ${SYM_DESTINATION} DIRECTORY)
set(sl ${CMAKE_CURRENT_BINARY_DIR}/symlinks/${fname})
execute_process(COMMAND ${CMAKE_COMMAND} -E create_symlink ${SYM_TO} ${sl})
install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${SYM_COMPONENT})
foreach(component IN LISTS SYM_COMPONENTS)
install(FILES ${sl} DESTINATION ${dest_dir} COMPONENT ${component})
endforeach()
endif()
endfunction()
if(NOT INSTALL_LAYOUT)
if(WIN32)
set(DEFAULT_INSTALL_LAYOUT "WIN")
else()
set(DEFAULT_INSTALL_LAYOUT "STANDALONE")
function(install_symlink)
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
set(options "")
set(one_value_options COMPONENT LINK_DIR FILE_DIR LINK_NAME FILE_NAME)
set(multi_value_options "")
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
set(rel_path "")
string(REGEX MATCHALL "\\/" slashes "${IN_LINK_NAME}")
foreach(ignored IN LISTS slashes)
set(rel_path "../${rel_path}")
endforeach()
if("${IN_FILE_DIR}" MATCHES "bin")
if("${IN_LINK_DIR}" MATCHES "lib")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "lib/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-tgz")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "usr/lib64/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-el6"
"${IN_COMPONENT}-el7"
"${IN_COMPONENT}-deb")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "usr/lib64/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-deb")
elseif("${IN_LINK_DIR}" MATCHES "bin")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "bin/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-tgz")
install_symlink_impl(
TO "../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "usr/bin/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-el6"
"${IN_COMPONENT}-el7"
"${IN_COMPONENT}-deb")
elseif("${IN_LINK_DIR}" MATCHES "fdbmonitor")
install_symlink_impl(
TO "../../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "lib/foundationdb/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-tgz")
install_symlink_impl(
TO "../../${rel_path}bin/${IN_FILE_NAME}"
DESTINATION "usr/lib/foundationdb/${IN_LINK_NAME}"
COMPONENTS "${IN_COMPONENT}-el6"
"${IN_COMPONENT}-el7"
"${IN_COMPONENT}-deb")
else()
message(FATAL_ERROR "Unknown LINK_DIR ${IN_LINK_DIR}")
endif()
else()
message(FATAL_ERROR "Unknown FILE_DIR ${IN_FILE_DIR}")
endif()
endif()
endif()
set(INSTALL_LAYOUT "${DEFAULT_INSTALL_LAYOUT}"
CACHE STRING "Installation directory layout. Options are: TARGZ (as in tar.gz installer), WIN, STANDALONE, RPM, DEB, OSX")
endfunction()
set(DIR_LAYOUT ${INSTALL_LAYOUT})
if(DIR_LAYOUT MATCHES "TARGZ")
set(DIR_LAYOUT "STANDALONE")
function(fdb_install)
if(NOT WIN32 AND NOT OPEN_FOR_IDE)
set(options EXPORT)
set(one_value_options COMPONENT DESTINATION)
set(multi_value_options TARGETS FILES DIRECTORY)
cmake_parse_arguments(IN "${options}" "${one_value_options}" "${multi_value_options}" "${ARGN}")
if(IN_TARGETS)
set(args TARGETS ${IN_TARGETS})
elseif(IN_FILES)
set(args FILES ${IN_FILES})
elseif(IN_DIRECTORY)
set(args DIRECTORY ${IN_DIRECTORY})
else()
message(FATAL_ERROR "Expected FILES or TARGETS")
endif()
if(IN_EXPORT)
set(args EXPORT)
endif()
if("${IN_DESTINATION}" STREQUAL "bin")
install(${args} DESTINATION "bin" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "usr/bin" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/bin" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "sbin")
install(${args} DESTINATION "sbin" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "usr/sbin" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/libexec" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "fdbmonitor")
install(${args} DESTINATION "libexec" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "usr/lib/foundationdb" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "usr/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "usr/lib/foundationdb" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/libexec" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "include")
install(${args} DESTINATION "include" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "usr/include" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/include" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "etc")
install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "etc/foundationdb" COMPONENT "${IN_COMPONENT}-el7")
install(${args} DESTINATION "usr/local/etc/foundationdb" COMPONENT "${IN_COMPONENT}-pm")
elseif("${IN_DESTINATION}" STREQUAL "log")
install(${args} DESTINATION "log/foundationdb" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "var/log/foundationdb" COMPONENT "${IN_COMPONENT}-el7")
elseif("${IN_DESTINATION}" STREQUAL "data")
install(${args} DESTINATION "lib/foundationdb" COMPONENT "${IN_COMPONENT}-tgz")
install(${args} DESTINATION "var/lib/foundationdb/data" COMPONENT "${IN_COMPONENT}-deb")
install(${args} DESTINATION "var/lib/foundationdb/data" COMPONENT "${IN_COMPONENT}-el6")
install(${args} DESTINATION "var/lib/foundationdb/data" COMPONENT "${IN_COMPONENT}-el7")
endif()
endif()
endfunction()
if(APPLE)
set(CPACK_GENERATOR TGZ PackageMaker)
else()
set(CPACK_GENERATOR RPM DEB TGZ)
endif()
get_property(LIB64 GLOBAL PROPERTY FIND_LIBRARY_USE_LIB64_PATHS)
set(CPACK_PACKAGE_CHECKSUM SHA256)
set(FDB_CONFIG_DIR "etc/foundationdb")
if("${LIB64}" STREQUAL "TRUE")
set(LIBSUFFIX 64)
else()
set(LIBSUFFIX "")
endif()
set(FDB_LIB_NOSUFFIX "lib")
if(DIR_LAYOUT MATCHES "STANDALONE")
set(FDB_LIB_DIR "lib${LIBSUFFIX}")
set(FDB_LIBEXEC_DIR "${FDB_LIB_DIR}")
set(FDB_BIN_DIR "bin")
set(FDB_SBIN_DIR "sbin")
set(FDB_INCLUDE_INSTALL_DIR "include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "share")
elseif(DIR_LAYOUT MATCHES "WIN")
set(CPACK_GENERATOR "ZIP")
set(FDB_CONFIG_DIR "etc")
set(FDB_LIB_DIR "lib")
set(FDB_LIB_NOSUFFIX "lib")
set(FDB_LIBEXEC_DIR "bin")
set(FDB_SHARE_DIR "share")
set(FDB_BIN_DIR "bin")
set(FDB_SBIN_DIR "bin")
set(FDB_INCLUDE_INSTALL_DIR "include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "share")
elseif(DIR_LAYOUT MATCHES "OSX")
set(CPACK_GENERATOR productbuild)
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(FDB_CONFIG_DIR "usr/local/etc/foundationdb")
set(FDB_LIB_DIR "usr/local/lib")
set(FDB_LIB_NOSUFFIX "usr/local/lib")
set(FDB_LIBEXEC_DIR "usr/local/libexec")
set(FDB_BIN_DIR "usr/local/bin")
set(FDB_SBIN_DIR "usr/local/libexec")
set(FDB_INCLUDE_INSTALL_DIR "usr/local/include")
set(FDB_PYTHON_INSTALL_DIR "Library/Python/2.7/site-packages/fdb")
set(FDB_SHARE_DIR "usr/local/share")
else()
if(DIR_LAYOUT MATCHES "RPM")
set(CPACK_GENERATOR RPM)
else()
# DEB
set(CPACK_GENERATOR "DEB")
set(LIBSUFFIX "")
endif()
set(CMAKE_INSTALL_PREFIX "/")
set(CPACK_PACKAGING_INSTALL_PREFIX "/")
set(FDB_CONFIG_DIR "etc/foundationdb")
set(FDB_LIB_DIR "usr/lib${LIBSUFFIX}")
set(FDB_LIB_NOSUFFIX "usr/lib")
set(FDB_LIBEXEC_DIR ${FDB_LIB_DIR})
set(FDB_BIN_DIR "usr/bin")
set(FDB_SBIN_DIR "usr/sbin")
set(FDB_INCLUDE_INSTALL_DIR "usr/include")
set(FDB_PYTHON_INSTALL_DIR "${FDB_LIB_DIR}/python2.7/site-packages/fdb")
set(FDB_SHARE_DIR "usr/share")
endif()
if(INSTALL_LAYOUT MATCHES "OSX")
set(FDBMONITOR_INSTALL_LOCATION "${FDB_LIBEXEC_DIR}")
else()
set(FDBMONITOR_INSTALL_LOCATION "${FDB_LIB_NOSUFFIX}/foundationdb")
endif()
set(CPACK_PROJECT_CONFIG_FILE "${CMAKE_SOURCE_DIR}/cmake/CPackConfig.cmake")
################################################################################
# Version information
@ -130,104 +178,169 @@ set(CPACK_PACKAGE_DESCRIPTION_SUMMARY
"FoundationDB is a scalable, fault-tolerant, ordered key-value store with full ACID transactions.")
set(CPACK_PACKAGE_ICON ${CMAKE_SOURCE_DIR}/packaging/foundationdb.ico)
set(CPACK_PACKAGE_CONTACT "The FoundationDB Community")
set(CPACK_COMPONENT_server_DEPENDS clients)
if (INSTALL_LAYOUT MATCHES "OSX")
# MacOS needs a file exiension for the LICENSE file
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/packaging/osx/resources/conclusion.rtf)
set(CPACK_PRODUCTBUILD_RESOURCES_DIR ${CMAKE_SOURCE_DIR}/packaging/osx/resources)
configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_BINARY_DIR}/License.txt)
else()
set(CPACK_RESOURCE_FILE_README ${CMAKE_SOURCE_DIR}/README.md)
set(CPACK_RESOURCE_FILE_LICENSE ${CMAKE_SOURCE_DIR}/LICENSE)
set(CPACK_COMPONENT_SERVER-EL6_DEPENDS clients-el6)
set(CPACK_COMPONENT_SERVER-EL7_DEPENDS clients-el7)
set(CPACK_COMPONENT_SERVER-DEB_DEPENDS clients-deb)
set(CPACK_COMPONENT_SERVER-TGZ_DEPENDS clients-tgz)
set(CPACK_COMPONENT_SERVER-PM_DEPENDS clients-pm)
set(CPACK_COMPONENT_SERVER-EL6_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_SERVER-EL7_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_SERVER-DEB_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_SERVER-TGZ_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_SERVER-PM_DISPLAY_NAME "foundationdb-server")
set(CPACK_COMPONENT_CLIENTS-EL6_DISPLAY_NAME "foundationdb-clients")
set(CPACK_COMPONENT_CLIENTS-EL7_DISPLAY_NAME "foundationdb-clients")
set(CPACK_COMPONENT_CLIENTS-DEB_DISPLAY_NAME "foundationdb-clients")
set(CPACK_COMPONENT_CLIENTS-TGZ_DISPLAY_NAME "foundationdb-clients")
set(CPACK_COMPONENT_CLIENTS-PM_DISPLAY_NAME "foundationdb-clients")
# MacOS needs a file exiension for the LICENSE file
configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYONLY)
################################################################################
# Filename of packages
################################################################################
if(NOT FDB_RELEASE)
set(prerelease_string ".PRERELEASE")
endif()
set(clients-filename "foundationdb-clients-${PROJECT_VERSION}.${CURRENT_GIT_VERSION}${prerelease_string}")
set(server-filename "foundationdb-server-${PROJECT_VERSION}.${CURRENT_GIT_VERSION}${prerelease_string}")
################################################################################
# Configuration for RPM
################################################################################
if(UNIX AND NOT APPLE)
install(DIRECTORY DESTINATION "var/log/foundationdb" COMPONENT server)
install(DIRECTORY DESTINATION "var/lib/foundationdb/data" COMPONENT server)
endif()
set(CPACK_RPM_PACKAGE_LICENSE "Apache 2.0")
if(INSTALL_LAYOUT MATCHES "RPM")
set(CPACK_RPM_server_USER_FILELIST
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
"/usr/sbin"
"/usr/share/java"
"/usr/lib64/python2.7"
"/usr/lib64/python2.7/site-packages"
"/var"
"/var/log"
"/var/lib"
"/lib"
"/lib/systemd"
"/lib/systemd/system"
"/etc/rc.d/init.d")
set(CPACK_RPM_server_DEBUGINFO_PACKAGE ON)
set(CPACK_RPM_clients_DEBUGINFO_PACKAGE ON)
set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
set(CPACK_RPM_COMPONENT_INSTALL ON)
set(CPACK_RPM_clients_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
set(CPACK_RPM_clients_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
set(CPACK_RPM_server_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
set(CPACK_RPM_server_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh)
set(CPACK_RPM_server_PRE_UNINSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
set(CPACK_RPM_server_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}, initscripts >= 9.03")
set(CPACK_RPM_server_PACKAGE_RE)
#set(CPACK_RPM_java_PACKAGE_REQUIRES
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
set(CPACK_RPM_python_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
set(CPACK_RPM_PACKAGE_NAME "foundationdb")
set(CPACK_RPM_CLIENTS-EL6_PACKAGE_NAME "foundationdb-clients")
set(CPACK_RPM_CLIENTS-EL7_PACKAGE_NAME "foundationdb-clients")
set(CPACK_RPM_SERVER-EL6_PACKAGE_NAME "foundationdb-server")
set(CPACK_RPM_SERVER-EL7_PACKAGE_NAME "foundationdb-server")
set(CPACK_RPM_CLIENTS-EL6_FILE_NAME "${clients-filename}.el6.x86_64.rpm")
set(CPACK_RPM_CLIENTS-EL7_FILE_NAME "${clients-filename}.el7.x86_64.rpm")
set(CPACK_RPM_SERVER-EL6_FILE_NAME "${server-filename}.el6.x86_64.rpm")
set(CPACK_RPM_SERVER-EL7_FILE_NAME "${server-filename}.el7.x86_64.rpm")
set(CPACK_RPM_CLIENTS-EL6_DEBUGINFO_FILE_NAME "${clients-filename}.el6-debuginfo.x86_64.rpm")
set(CPACK_RPM_CLIENTS-EL7_DEBUGINFO_FILE_NAME "${clients-filename}.el7-debuginfo.x86_64.rpm")
set(CPACK_RPM_SERVER-EL6_DEBUGINFO_FILE_NAME "${server-filename}.el6-debuginfo.x86_64.rpm")
set(CPACK_RPM_SERVER-EL7_DEBUGINFO_FILE_NAME "${server-filename}.el7-debuginfo.x86_64.rpm")
file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir")
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION data COMPONENT server)
fdb_install(DIRECTORY "${CMAKE_BINARY_DIR}/packaging/emptydir/" DESTINATION log COMPONENT server)
set(CPACK_RPM_SERVER-EL6_USER_FILELIST
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
set(CPACK_RPM_SERVER-EL7_USER_FILELIST
"%config(noreplace) /etc/foundationdb/foundationdb.conf"
"%attr(0700,foundationdb,foundationdb) /var/log/foundationdb"
"%attr(0700, foundationdb, foundationdb) /var/lib/foundationdb")
set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION
"/usr/sbin"
"/usr/share/java"
"/usr/lib64/python2.7"
"/usr/lib64/python2.7/site-packages"
"/var"
"/var/log"
"/var/lib"
"/lib"
"/lib/systemd"
"/lib/systemd/system"
"/etc/rc.d/init.d")
set(CPACK_RPM_DEBUGINFO_PACKAGE ON)
#set(CPACK_RPM_BUILD_SOURCE_DIRS_PREFIX /usr/src)
set(CPACK_RPM_COMPONENT_INSTALL ON)
set(CPACK_RPM_CLIENTS-EL6_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
set(CPACK_RPM_clients-el7_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preclients.sh)
set(CPACK_RPM_CLIENTS-EL6_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
set(CPACK_RPM_CLIENTS-EL7_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postclients.sh)
set(CPACK_RPM_SERVER-EL6_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
set(CPACK_RPM_SERVER-EL7_PRE_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preserver.sh)
set(CPACK_RPM_SERVER-EL6_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver-el6.sh)
set(CPACK_RPM_SERVER-EL7_POST_INSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/postserver.sh)
set(CPACK_RPM_SERVER-EL6_PRE_UNINSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
set(CPACK_RPM_SERVER-EL7_PRE_UNINSTALL_SCRIPT_FILE
${CMAKE_SOURCE_DIR}/packaging/rpm/scripts/preunserver.sh)
set(CPACK_RPM_SERVER-EL6_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
set(CPACK_RPM_SERVER-EL7_PACKAGE_REQUIRES
"foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
#set(CPACK_RPM_java_PACKAGE_REQUIRES
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
#set(CPACK_RPM_python_PACKAGE_REQUIRES
# "foundationdb-clients = ${FDB_MAJOR}.${FDB_MINOR}.${FDB_PATCH}")
################################################################################
# Configuration for DEB
################################################################################
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "${clients-filename}_amd64.deb")
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "${server-filename}_amd64.deb")
set(CPACK_DEB_COMPONENT_INSTALL ON)
set(CPACK_DEBIAN_DEBUGINFO_PACKAGE ON)
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_NAME "foundationdb-server")
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_NAME "foundationdb-clients")
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})")
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
set(CPACK_DEBIAN_CLIENTS-DEB_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
set(CPACK_DEBIAN_SERVER-DEB_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/prerm
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm)
################################################################################
# MacOS configuration
################################################################################
if(NOT WIN32)
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/osx/uninstall-FoundationDB.sh
DESTINATION "usr/local/foundationdb"
COMPONENT clients-pm)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/com.foundationdb.fdbmonitor.plist
DESTINATION "Library/LaunchDaemons"
COMPONENT server-pm)
endif()
################################################################################
# Configuration for DEB
################################################################################
if(INSTALL_LAYOUT MATCHES "DEB")
set(CPACK_DEB_COMPONENT_INSTALL ON)
set(CPACK_DEBIAN_PACKAGE_SECTION "database")
set(CPACK_DEBIAN_ENABLE_COMPONENT_DEPENDS ON)
set(CPACK_DEBIAN_SERVER_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12), python (>= 2.6), foundationdb-clients (= ${FDB_VERSION})")
set(CPACK_DEBIAN_CLIENTS_PACKAGE_DEPENDS "adduser, libc6 (>= 2.12)")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://www.foundationdb.org")
set(CPACK_DEBIAN_CLIENTS_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-clients/postinst)
set(CPACK_DEBIAN_SERVER_PACKAGE_CONTROL_EXTRA
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/conffiles
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/preinst
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postinst
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/prerm
${CMAKE_SOURCE_DIR}/packaging/deb/DEBIAN-foundationdb-server/postrm)
endif()
################################################################################
# MacOS configuration
################################################################################
if(INSTALL_LAYOUT MATCHES "OSX")
set(CPACK_PREFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
set(CPACK_POSTFLIGHT_SERVER_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/postinstall)
set(CPACK_POSTFLIGHT_CLIENTS_SCRIPT ${CMAKE_SOURCE_DIR}/packaging/osx/scripts-server/preinstall)
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/osx/uninstall-FoundationDB.sh
DESTINATION "usr/local/foundationdb"
COMPONENT clients)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/com.foundationdb.fdbmonitor.plist
DESTINATION "Library/LaunchDaemons"
COMPONENT server)
endif()
set(CPACK_ARCHIVE_COMPONENT_INSTALL ON)
set(CPACK_ARCHIVE_CLIENTS-TGZ_FILE_NAME "${clients-filename}.x86_64")
set(CPACK_ARCHIVE_SERVER-TGZ_FILE_NAME "${server-filename}.x86_64")
################################################################################
# Server configuration
@ -239,54 +352,33 @@ set(CLUSTER_DESCRIPTION1 ${description1} CACHE STRING "Cluster description")
set(CLUSTER_DESCRIPTION2 ${description2} CACHE STRING "Cluster description")
if(NOT WIN32)
if(INSTALL_LAYOUT MATCHES "OSX")
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/foundationdb.conf.new
DESTINATION ${FDB_CONFIG_DIR}
COMPONENT server)
else()
install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
DESTINATION ${FDB_CONFIG_DIR}
COMPONENT server)
endif()
install(FILES ${CMAKE_SOURCE_DIR}/packaging/osx/foundationdb.conf.new
DESTINATION "usr/local/etc"
COMPONENT server-pm)
fdb_install(FILES ${CMAKE_SOURCE_DIR}/packaging/foundationdb.conf
DESTINATION etc
COMPONENT server)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py
DESTINATION "${FDB_LIB_NOSUFFIX}/foundationdb"
COMPONENT server)
DESTINATION "usr/lib/foundationdb"
COMPONENT server-el6)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
DESTINATION "${FDB_LIB_NOSUFFIX}/foundationdb"
COMPONENT server)
else()
install(FILES ${CMAKE_BINARY_DIR}/fdb.cluster
DESTINATION "etc"
COMPONENT server)
endif()
if((INSTALL_LAYOUT MATCHES "RPM") OR (INSTALL_LAYOUT MATCHES "DEB"))
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
${CMAKE_BINARY_DIR}/packaging/rpm)
install(
DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
DESTINATION "var/log"
COMPONENT server)
install(
DIRECTORY ${CMAKE_BINARY_DIR}/packaging/foundationdb
DESTINATION "var/lib"
COMPONENT server)
execute_process(
COMMAND pidof systemd
RESULT_VARIABLE IS_SYSTEMD
OUTPUT_QUIET
ERROR_QUIET)
DESTINATION "usr/lib/foundationdb"
COMPONENT server-el6)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/argparse.py
DESTINATION "usr/lib/foundationdb"
COMPONENT server-deb)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/make_public.py
DESTINATION "usr/lib/foundationdb"
COMPONENT server-deb)
install(FILES ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb.service
DESTINATION "lib/systemd/system"
COMPONENT server)
if(INSTALL_LAYOUT MATCHES "RPM")
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
DESTINATION "etc/rc.d/init.d"
RENAME "foundationdb"
COMPONENT server)
else()
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
DESTINATION "etc/init.d"
RENAME "foundationdb"
COMPONENT server)
endif()
COMPONENT server-el7)
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/rpm/foundationdb-init
DESTINATION "etc/rc.d/init.d"
RENAME "foundationdb"
COMPONENT server-el6)
install(PROGRAMS ${CMAKE_SOURCE_DIR}/packaging/deb/foundationdb-init
DESTINATION "etc/init.d"
RENAME "foundationdb"
COMPONENT server-deb)
endif()

View File

@ -376,12 +376,14 @@ The following options apply to all commands:
.. warning:: If multiple restore tasks are in progress they should be restoring to different prefixes or the result is undefined.
``-C <CLUSTER_FILE>``
Path to the cluster file that should be used to connect to the FoundationDB cluster you want to use. If not specified, a :ref:`default cluster file <default-cluster-file>` will be used.
``--blob_credentials <FILE>``
Use FILE as a :ref:`Blob Credential File<blob-credential-files>`. Can be used multiple times.
The following options apply to all commands except ``start``:
``-C <CLUSTER_FILE>``
Path to the cluster file that should be used to connect to the FoundationDB cluster you want to use. If not specified, a :ref:`default cluster file <default-cluster-file>` will be used.
.. _restore-start:
``start``
@ -395,6 +397,10 @@ The ``start`` command will start a new restore on the specified (or default) tag
``-r <BACKUP_URL>``
Required. Specifies the Backup URL for the source backup data to restore to the database. The source data must be accessible by the ``backup_agent`` processes for the cluster.
``--dest_cluster_file <CONNFILE>``
Required. The backup data will be restored into this cluster.
``-w``
Wait for the restore to reach a final state (such as complete) before exiting. Prints a progress update every few seconds. Behavior is identical to that of the wait command.
@ -413,6 +419,12 @@ The ``start`` command will start a new restore on the specified (or default) tag
``-v <VERSION>``
Instead of the latest version the backup can be restored to, restore to VERSION.
``--timestamp <YYYY-MM-DD.HH:MI:SS>``
Instead of the latest version the backup can be restored to, restore to a version from approximately the given timestamp. Requires orig_cluster_file to be specified.
``--orig_cluster_file <CONNFILE>``
The cluster file for the original database from which the backup was created. The original database is only needed to convert a --timestamp argument to a database version.
.. program:: fdbrestore abort
``abort``

View File

@ -127,7 +127,7 @@ The following format informally describes the JSON containing the status data. T
"name": < "initializing"
| "missing_data"
| "healing"
| "removing_redundant_teams"
| "optimizing_team_collections"
| "healthy_repartitioning"
| "healthy_removing_server"
| "healthy_rebalancing"

View File

@ -7,17 +7,21 @@ Release Notes
Features
--------
Improved replication mechanism, a new hierarchical replication technique that further significantly reduces the frequency of data loss events even when multiple machines (e.g., fault-tolerant zones in the current code) permanently fail at the same time. `(PR #964) <https://github.com/apple/foundationdb/pull/964>`.
* Improved replication mechanism, a new hierarchical replication technique that further significantly reduces the frequency of data loss events even when multiple machines (e.g., fault-tolerant zones in the current code) permanently fail at the same time. `(PR #964) <https://github.com/apple/foundationdb/pull/964>`.
* Get read version, read, and commit requests are counted and aggregated by server-side latency in configurable latency bands and output in JSON status. `(PR #1084) <https://github.com/apple/foundationdb/pull/1084>`_
* Added configuration option to choose log spilling implementation `(PR #1160) <https://github.com/apple/foundationdb/pull/1160>`_
* Added configuration option to choose log system implementation `(PR #1160) <https://github.com/apple/foundationdb/pull/1160>`_
* Batch priority transactions are now limited separately by ratekeeper and will be throttled at lower levels of cluster saturation. This makes it possible to run a more intense background load at saturation without significantly affecting normal priority transactions. It is still recommended not to run excessive loads at batch priority. `(PR #1198) <https://github.com/apple/foundationdb/pull/1198>`_
* Restore now requires the destnation cluster to be specified explicitly to avoid confusion. `(PR #1240) <https://github.com/apple/foundationdb/pull/1240>`_
* Restore target version can now be specified by timestamp if the original cluster is available. `(PR #1240) <https://github.com/apple/foundationdb/pull/1240>`_
Performance
-----------
* Java: Succesful commits and range reads no longer create ``FDBException`` objects to reduce memory pressure. `(Issue #1235) <https://github.com/apple/foundationdb/issues/1235>`_
Fixes
-----

View File

@ -5,21 +5,29 @@ add_flow_target(EXECUTABLE NAME fdbbackup SRCS ${FDBBACKUP_SRCS})
target_link_libraries(fdbbackup PRIVATE fdbclient)
if(NOT OPEN_FOR_IDE)
install(TARGETS fdbbackup DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
fdb_install(TARGETS fdbbackup DESTINATION bin COMPONENT clients)
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_LIB_DIR}/foundationdb/backup_agent/backup_agent
COMPONENT clients)
COMPONENT clients
FILE_DIR bin
LINK_DIR fdbmonitor
FILE_NAME fdbbackup
LINK_NAME backup_agent/backup_agent)
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_BIN_DIR}/fdbrestore
COMPONENT clients)
COMPONENT clients
FILE_DIR bin
LINK_DIR bin
FILE_NAME fdbbackup
LINK_NAME fdbrestore)
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_BIN_DIR}/dr_agent
COMPONENT clients)
COMPONENT clients
FILE_DIR bin
LINK_DIR bin
FILE_NAME fdbbackup
LINK_NAME dr_agent)
install_symlink(
TO /${FDB_BIN_DIR}/fdbbackup
DESTINATION ${FDB_BIN_DIR}/fdbdr
COMPONENT clients)
COMPONENT clients
FILE_DIR bin
LINK_DIR bin
FILE_NAME fdbbackup
LINK_NAME fdbdr)
endif()

View File

@ -100,7 +100,7 @@ enum {
OPT_TAGNAME, OPT_BACKUPKEYS, OPT_WAITFORDONE,
// Restore constants
OPT_RESTORECONTAINER, OPT_DBVERSION, OPT_PREFIX_ADD, OPT_PREFIX_REMOVE,
OPT_RESTORECONTAINER, OPT_RESTORE_VERSION, OPT_RESTORE_TIMESTAMP, OPT_PREFIX_ADD, OPT_PREFIX_REMOVE, OPT_RESTORE_CLUSTERFILE_DEST, OPT_RESTORE_CLUSTERFILE_ORIG,
// Shared constants
OPT_CLUSTERFILE, OPT_QUIET, OPT_DRYRUN, OPT_FORCE,
@ -504,7 +504,9 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
#ifdef _WIN32
{ OPT_PARENTPID, "--parentpid", SO_REQ_SEP },
#endif
{ OPT_CLUSTERFILE, "-C", SO_REQ_SEP },
{ OPT_RESTORE_CLUSTERFILE_DEST, "--dest_cluster_file", SO_REQ_SEP },
{ OPT_RESTORE_CLUSTERFILE_ORIG, "--orig_cluster_file", SO_REQ_SEP },
{ OPT_RESTORE_TIMESTAMP, "--timestamp", SO_REQ_SEP },
{ OPT_KNOB, "--knob_", SO_REQ_SEP },
{ OPT_RESTORECONTAINER,"-r", SO_REQ_SEP },
{ OPT_PREFIX_ADD, "-add_prefix", SO_REQ_SEP },
@ -513,11 +515,10 @@ CSimpleOpt::SOption g_rgRestoreOptions[] = {
{ OPT_TAGNAME, "--tagname", SO_REQ_SEP },
{ OPT_BACKUPKEYS, "-k", SO_REQ_SEP },
{ OPT_BACKUPKEYS, "--keys", SO_REQ_SEP },
{ OPT_WAITFORDONE, "-w", SO_NONE },
{ OPT_WAITFORDONE, "--waitfordone", SO_NONE },
{ OPT_CLUSTERFILE, "--cluster_file", SO_REQ_SEP },
{ OPT_DBVERSION, "--version", SO_REQ_SEP },
{ OPT_DBVERSION, "-v", SO_REQ_SEP },
{ OPT_WAITFORDONE, "-w", SO_NONE },
{ OPT_WAITFORDONE, "--waitfordone", SO_NONE },
{ OPT_RESTORE_VERSION, "--version", SO_REQ_SEP },
{ OPT_RESTORE_VERSION, "-v", SO_REQ_SEP },
{ OPT_TRACE, "--log", SO_NONE },
{ OPT_TRACE_DIR, "--logdir", SO_REQ_SEP },
{ OPT_TRACE_FORMAT, "--trace_format", SO_REQ_SEP },
@ -891,25 +892,30 @@ static void printRestoreUsage(bool devhelp ) {
printf("Usage: %s (start | status | abort | wait) [OPTIONS]\n\n", exeRestore.toString().c_str());
//printf(" FOLDERS Paths to folders containing the backup files.\n");
printf("Options for all commands:\n\n");
printf(" -C CONNFILE The path of a file containing the connection string for the\n"
" FoundationDB cluster. The default is first the value of the\n"
" FDB_CLUSTER_FILE environment variable, then `./fdb.cluster',\n"
" then `%s'.\n", platform::getDefaultClusterFilePath().c_str());
printf(" -t TAGNAME The restore tag to act on. Default is 'default'\n");
printf(" --tagname TAGNAME\n\n");
printf(" Options for start:\n\n");
printf(" --dest_cluster_file CONNFILE\n");
printf(" The cluster file to restore data into.\n");
printf(" -t, --tagname TAGNAME\n");
printf(" The restore tag to act on. Default is 'default'\n");
printf("Options for start:\n\n");
printf(" -r URL The Backup URL for the restore to read from.\n");
printBackupContainerInfo();
printf(" -w Wait for the restore to complete before exiting. Prints progress updates.\n");
printf(" --waitfordone\n");
printf(" -k KEYS List of key ranges from the backup to restore\n");
printf(" --remove_prefix PREFIX prefix to remove from the restored keys\n");
printf(" --add_prefix PREFIX prefix to add to the restored keys\n");
printf(" -n, --dryrun Perform a trial run with no changes made.\n");
printf(" -w, --waitfordone\n");
printf(" Wait for the restore to complete before exiting. Prints progress updates.\n");
printf(" -k KEYS List of key ranges from the backup to restore.\n");
printf(" --remove_prefix PREFIX\n");
printf(" Prefix to remove from the restored keys.\n");
printf(" --add_prefix PREFIX\n");
printf(" Prefix to add to the restored keys\n");
printf(" -n, --dryrun Perform a trial run with no changes made.\n");
#ifndef TLS_DISABLED
printf(TLS_HELP);
#endif
printf(" -v DBVERSION The version at which the database will be restored.\n");
printf(" --timestamp Instead of a numeric version, use this to specify a timestamp in YYYY-MM-DD.HH:MI:SS format (UTC)\n");
printf(" and it will be converted to a version from that time using metadata in orig_cluster_file.\n");
printf(" --orig_cluster_file CONNFILE\n");
printf(" The cluster file for the original database from which the backup was created. The original database\n");
printf(" is only needed to convert a --timestamp argument to a database version.\n");
printf(" -h, --help Display this help and exit.\n");
if( devhelp ) {
@ -1868,17 +1874,74 @@ ACTOR Future<Void> changeDBBackupResumed(Database src, Database dest, bool pause
return Void();
}
ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string container, Standalone<VectorRef<KeyRangeRef>> ranges, Version targetVersion, bool performRestore, bool verbose, bool waitForDone, std::string addPrefix, std::string removePrefix) {
try
{
state FileBackupAgent backupAgent;
Reference<IBackupContainer> openBackupContainer(const char *name, std::string destinationContainer) {
// Error, if no dest container was specified
if (destinationContainer.empty()) {
fprintf(stderr, "ERROR: No backup destination was specified.\n");
printHelpTeaser(name);
throw backup_error();
}
if(ranges.size() > 1) {
fprintf(stderr, "Currently only a single restore range is supported!\n");
Reference<IBackupContainer> c;
try {
c = IBackupContainer::openContainer(destinationContainer);
}
catch (Error& e) {
std::string msg = format("ERROR: '%s' on URL '%s'", e.what(), destinationContainer.c_str());
if(e.code() == error_code_backup_invalid_url && !IBackupContainer::lastOpenError.empty()) {
msg += format(": %s", IBackupContainer::lastOpenError.c_str());
}
fprintf(stderr, "%s\n", msg.c_str());
printHelpTeaser(name);
throw;
}
return c;
}
ACTOR Future<Void> runRestore(std::string destClusterFile, std::string originalClusterFile, std::string tagName, std::string container, Standalone<VectorRef<KeyRangeRef>> ranges, Version targetVersion, std::string targetTimestamp, bool performRestore, bool verbose, bool waitForDone, std::string addPrefix, std::string removePrefix) {
if(ranges.empty()) {
ranges.push_back_deep(ranges.arena(), normalKeys);
}
if(targetVersion != invalidVersion && !targetTimestamp.empty()) {
fprintf(stderr, "Restore target version and target timestamp cannot both be specified\n");
throw restore_error();
}
if(destClusterFile.empty()) {
fprintf(stderr, "Restore destination cluster file must be specified explicitly.\n");
throw restore_error();
}
if(!fileExists(destClusterFile)) {
fprintf(stderr, "Restore destination cluster file '%s' does not exist.\n", destClusterFile.c_str());
throw restore_error();
}
state Optional<Database> origDb;
// Resolve targetTimestamp if given
if(!targetTimestamp.empty()) {
if(originalClusterFile.empty()) {
fprintf(stderr, "An original cluster file must be given in order to resolve restore target timestamp '%s'\n", targetTimestamp.c_str());
throw restore_error();
}
state KeyRange range = (ranges.size() == 0) ? normalKeys : ranges.front();
if(!fileExists(originalClusterFile)) {
fprintf(stderr, "Original source database cluster file '%s' does not exist.\n", originalClusterFile.c_str());
throw restore_error();
}
origDb = Database::createDatabase(originalClusterFile, Database::API_VERSION_LATEST);
Version v = wait(timeKeeperVersionFromDatetime(targetTimestamp, origDb.get()));
printf("Timestamp '%s' resolves to version %lld\n", targetTimestamp.c_str(), v);
targetVersion = v;
}
try {
state Database db = Database::createDatabase(destClusterFile, Database::API_VERSION_LATEST);
state FileBackupAgent backupAgent;
state Reference<IBackupContainer> bc = IBackupContainer::openContainer(container);
@ -1901,7 +1964,7 @@ ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string cont
}
if (performRestore) {
Version restoredVersion = wait(backupAgent.restore(db, KeyRef(tagName), KeyRef(container), waitForDone, targetVersion, verbose, range, KeyRef(addPrefix), KeyRef(removePrefix)));
Version restoredVersion = wait(backupAgent.restore(db, origDb, KeyRef(tagName), KeyRef(container), ranges, waitForDone, targetVersion, verbose, KeyRef(addPrefix), KeyRef(removePrefix)));
if(waitForDone && verbose) {
// If restore is now complete then report version restored
@ -1930,30 +1993,6 @@ ACTOR Future<Void> runRestore(Database db, std::string tagName, std::string cont
return Void();
}
Reference<IBackupContainer> openBackupContainer(const char *name, std::string destinationContainer) {
// Error, if no dest container was specified
if (destinationContainer.empty()) {
fprintf(stderr, "ERROR: No backup destination was specified.\n");
printHelpTeaser(name);
throw backup_error();
}
std::string error;
Reference<IBackupContainer> c;
try {
c = IBackupContainer::openContainer(destinationContainer);
}
catch (Error& e) {
if(!error.empty())
error = std::string("[") + error + "]";
fprintf(stderr, "ERROR (%s) on %s %s\n", e.what(), destinationContainer.c_str(), error.c_str());
printHelpTeaser(name);
throw;
}
return c;
}
ACTOR Future<Void> dumpBackupData(const char *name, std::string destinationContainer, Version beginVersion, Version endVersion) {
state Reference<IBackupContainer> c = openBackupContainer(name, destinationContainer);
@ -2485,7 +2524,8 @@ int main(int argc, char* argv[]) {
std::string removePrefix;
Standalone<VectorRef<KeyRangeRef>> backupKeys;
int maxErrors = 20;
Version dbVersion = invalidVersion;
Version restoreVersion = invalidVersion;
std::string restoreTimestamp;
bool waitForDone = false;
bool stopWhenDone = true;
bool forceAction = false;
@ -2505,6 +2545,8 @@ int main(int argc, char* argv[]) {
std::string tlsCertPath, tlsKeyPath, tlsCAPath, tlsPassword, tlsVerifyPeers;
Version dumpBegin = 0;
Version dumpEnd = std::numeric_limits<Version>::max();
std::string restoreClusterFileDest;
std::string restoreClusterFileOrig;
if( argc == 1 ) {
printUsage(programExe, false);
@ -2641,9 +2683,18 @@ int main(int argc, char* argv[]) {
expireRestorableAfterVersion = ver;
break;
}
case OPT_RESTORE_TIMESTAMP:
restoreTimestamp = args->OptionArg();
break;
case OPT_BASEURL:
baseUrl = args->OptionArg();
break;
case OPT_RESTORE_CLUSTERFILE_DEST:
restoreClusterFileDest = args->OptionArg();
break;
case OPT_RESTORE_CLUSTERFILE_ORIG:
restoreClusterFileOrig = args->OptionArg();
break;
case OPT_CLUSTERFILE:
clusterFile = args->OptionArg();
break;
@ -2723,15 +2774,15 @@ int main(int argc, char* argv[]) {
}
break;
}
case OPT_DBVERSION: {
case OPT_RESTORE_VERSION: {
const char* a = args->OptionArg();
long long dbVersionValue = 0;
if (!sscanf(a, "%lld", &dbVersionValue)) {
long long ver = 0;
if (!sscanf(a, "%lld", &ver)) {
fprintf(stderr, "ERROR: Could not parse database version `%s'\n", a);
printHelpTeaser(argv[0]);
return FDB_EXIT_ERROR;
}
dbVersion = dbVersionValue;
restoreVersion = ver;
break;
}
#ifdef _WIN32
@ -3175,13 +3226,13 @@ int main(int argc, char* argv[]) {
if(dryRun) {
initTraceFile();
}
else if(!initCluster()) {
else if(restoreType != RESTORE_START && !initCluster()) {
return FDB_EXIT_ERROR;
}
switch(restoreType) {
case RESTORE_START:
f = stopAfter( runRestore(db, tagName, restoreContainer, backupKeys, dbVersion, !dryRun, !quietDisplay, waitForDone, addPrefix, removePrefix) );
f = stopAfter( runRestore(restoreClusterFileDest, restoreClusterFileOrig, tagName, restoreContainer, backupKeys, restoreVersion, restoreTimestamp, !dryRun, !quietDisplay, waitForDone, addPrefix, removePrefix) );
break;
case RESTORE_WAIT:
f = stopAfter( success(ba.waitRestore(db, KeyRef(tagName), true)) );
@ -3193,7 +3244,6 @@ int main(int argc, char* argv[]) {
}) );
break;
case RESTORE_STATUS:
// If no tag is specifically provided then print all tag status, don't just use "default"
if(tagProvided)
tag = tagName;
@ -3268,7 +3318,8 @@ int main(int argc, char* argv[]) {
<< FastAllocator<512>::pageCount << " "
<< FastAllocator<1024>::pageCount << " "
<< FastAllocator<2048>::pageCount << " "
<< FastAllocator<4096>::pageCount << endl;
<< FastAllocator<4096>::pageCount << " "
<< FastAllocator<8192>::pageCount << endl;
vector< std::pair<std::string, const char*> > typeNames;
for( auto i = allocInstr.begin(); i != allocInstr.end(); ++i ) {

View File

@ -53,11 +53,11 @@
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
<CustomBuildBeforeTargets>PreBuildEvent</CustomBuildBeforeTargets>
</PropertyGroup>
<ItemDefinitionGroup>

View File

@ -11,6 +11,4 @@ endif()
add_flow_target(EXECUTABLE NAME fdbcli SRCS ${FDBCLI_SRCS})
target_link_libraries(fdbcli PRIVATE fdbclient)
if(NOT OPEN_FOR_IDE)
install(TARGETS fdbcli DESTINATION ${FDB_BIN_DIR} COMPONENT clients)
endif()
fdb_install(TARGETS fdbcli DESTINATION bin COMPONENT clients)

View File

@ -2031,7 +2031,7 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
wait( makeInterruptable(waitForExcludedServers(db,addresses)) );
std::vector<ProcessData> workers = wait( makeInterruptable(getWorkers(db)) );
std::map<uint32_t, std::set<uint16_t>> workerPorts;
std::map<IPAddress, std::set<uint16_t>> workerPorts;
for(auto addr : workers)
workerPorts[addr.address.ip].insert(addr.address.port);
@ -2050,7 +2050,7 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
"excluded the correct machines or processes before removing them from the cluster:\n");
for(auto addr : absentExclusions) {
if(addr.port == 0)
printf(" %s\n", toIPString(addr.ip).c_str());
printf(" %s\n", addr.ip.toString().c_str());
else
printf(" %s\n", addr.toString().c_str());
}

View File

@ -62,13 +62,13 @@
<LinkIncremental>true</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<OutDir>$(SolutionDir)bin\$(Configuration)\</OutDir>
<IntDir>$(SystemDrive)\temp\msvcfdb\$(Platform)$(Configuration)\$(MSBuildProjectName)\</IntDir>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>

View File

@ -28,13 +28,21 @@
#include "fdbclient/CoordinationInterface.h"
uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs ) {
IPAddress determinePublicIPAutomatically(ClusterConnectionString const& ccs) {
try {
boost::asio::io_service ioService;
boost::asio::ip::udp::socket socket(ioService);
boost::asio::ip::udp::endpoint endpoint(boost::asio::ip::address_v4(ccs.coordinators()[0].ip), ccs.coordinators()[0].port);
using namespace boost::asio;
io_service ioService;
ip::udp::socket socket(ioService);
const auto& coordAddr = ccs.coordinators()[0];
const auto boostIp = coordAddr.ip.isV6() ? ip::address(ip::address_v6(coordAddr.ip.toV6()))
: ip::address(ip::address_v4(coordAddr.ip.toV4()));
ip::udp::endpoint endpoint(boostIp, coordAddr.port);
socket.connect(endpoint);
auto ip = socket.local_endpoint().address().to_v4().to_ulong();
IPAddress ip = coordAddr.ip.isV6() ? IPAddress(socket.local_endpoint().address().to_v6().to_bytes())
: IPAddress(socket.local_endpoint().address().to_v4().to_ulong());
socket.close();
return ip;
@ -43,4 +51,4 @@ uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs ) {
fprintf(stderr, "Error determining public address: %s\n", e.what());
throw bind_failed();
}
}
}

View File

@ -199,14 +199,14 @@ class FileBackupAgent : public BackupAgentBase {
public:
FileBackupAgent();
FileBackupAgent( FileBackupAgent&& r ) noexcept(true) :
FileBackupAgent( FileBackupAgent&& r ) BOOST_NOEXCEPT :
subspace( std::move(r.subspace) ),
config( std::move(r.config) ),
lastRestorable( std::move(r.lastRestorable) ),
taskBucket( std::move(r.taskBucket) ),
futureBucket( std::move(r.futureBucket) ) {}
void operator=( FileBackupAgent&& r ) noexcept(true) {
void operator=( FileBackupAgent&& r ) BOOST_NOEXCEPT {
subspace = std::move(r.subspace);
config = std::move(r.config);
lastRestorable = std::move(r.lastRestorable),
@ -233,9 +233,18 @@ public:
// - submit a restore on the given tagName
// - Optionally wait for the restore's completion. Will restore_error if restore fails or is aborted.
// restore() will return the targetVersion which will be either the valid version passed in or the max restorable version for the given url.
Future<Version> restore(Database cx, Key tagName, Key url, bool waitForComplete = true, Version targetVersion = -1, bool verbose = true, KeyRange range = normalKeys, Key addPrefix = Key(), Key removePrefix = Key(), bool lockDB = true);
Future<Version> atomicRestore(Database cx, Key tagName, KeyRange range = normalKeys, Key addPrefix = Key(), Key removePrefix = Key());
Future<Version> restore(Database cx, Optional<Database> cxOrig, Key tagName, Key url, Standalone<VectorRef<KeyRangeRef>> ranges, bool waitForComplete = true, Version targetVersion = -1, bool verbose = true, Key addPrefix = Key(), Key removePrefix = Key(), bool lockDB = true);
Future<Version> restore(Database cx, Optional<Database> cxOrig, Key tagName, Key url, bool waitForComplete = true, Version targetVersion = -1, bool verbose = true, KeyRange range = normalKeys, Key addPrefix = Key(), Key removePrefix = Key(), bool lockDB = true) {
Standalone<VectorRef<KeyRangeRef>> rangeRef;
rangeRef.push_back_deep(rangeRef.arena(), range);
return restore(cx, cxOrig, tagName, url, rangeRef, waitForComplete, targetVersion, verbose, addPrefix, removePrefix, lockDB);
}
Future<Version> atomicRestore(Database cx, Key tagName, Standalone<VectorRef<KeyRangeRef>> ranges, Key addPrefix = Key(), Key removePrefix = Key());
Future<Version> atomicRestore(Database cx, Key tagName, KeyRange range = normalKeys, Key addPrefix = Key(), Key removePrefix = Key()) {
Standalone<VectorRef<KeyRangeRef>> rangeRef;
rangeRef.push_back_deep(rangeRef.arena(), range);
return atomicRestore(cx, tagName, rangeRef, addPrefix, removePrefix);
}
// Tries to abort the restore for a tag. Returns the final (stable) state of the tag.
Future<ERestoreState> abortRestore(Reference<ReadYourWritesTransaction> tr, Key tagName);
Future<ERestoreState> abortRestore(Database cx, Key tagName);
@ -306,7 +315,7 @@ public:
DatabaseBackupAgent();
explicit DatabaseBackupAgent(Database src);
DatabaseBackupAgent( DatabaseBackupAgent&& r ) noexcept(true) :
DatabaseBackupAgent( DatabaseBackupAgent&& r ) BOOST_NOEXCEPT :
subspace( std::move(r.subspace) ),
states( std::move(r.states) ),
config( std::move(r.config) ),
@ -318,7 +327,7 @@ public:
sourceStates( std::move(r.sourceStates) ),
sourceTagNames( std::move(r.sourceTagNames) ) {}
void operator=( DatabaseBackupAgent&& r ) noexcept(true) {
void operator=( DatabaseBackupAgent&& r ) BOOST_NOEXCEPT {
subspace = std::move(r.subspace);
states = std::move(r.states);
config = std::move(r.config);

View File

@ -633,33 +633,21 @@ struct LogMessageVersion {
};
struct AddressExclusion {
uint32_t ip;
IPAddress ip;
int port;
AddressExclusion() : ip(0), port(0) {}
explicit AddressExclusion( uint32_t ip ) : ip(ip), port(0) {}
explicit AddressExclusion( uint32_t ip, int port ) : ip(ip), port(port) {}
explicit AddressExclusion(const IPAddress& ip) : ip(ip), port(0) {}
explicit AddressExclusion(const IPAddress& ip, int port) : ip(ip), port(port) {}
explicit AddressExclusion (std::string s) {
int a,b,c,d,p,count=-1;
if (sscanf(s.c_str(), "%d.%d.%d.%d:%d%n", &a,&b,&c,&d, &p, &count) == 5 && count == s.size()) {
ip = (a<<24)+(b<<16)+(c<<8)+d;
port = p;
}
else if (sscanf(s.c_str(), "%d.%d.%d.%d%n", &a,&b,&c,&d, &count) == 4 && count == s.size()) {
ip = (a<<24)+(b<<16)+(c<<8)+d;
port = 0;
}
else {
throw connection_string_invalid();
}
bool operator<(AddressExclusion const& r) const {
if (ip != r.ip) return ip < r.ip;
return port < r.port;
}
bool operator< (AddressExclusion const& r) const { if (ip != r.ip) return ip < r.ip; return port<r.port; }
bool operator== (AddressExclusion const& r) const { return ip == r.ip && port == r.port; }
bool operator==(AddressExclusion const& r) const { return ip == r.ip && port == r.port; }
bool isWholeMachine() const { return port == 0; }
bool isValid() const { return ip != 0 || port != 0; }
bool isValid() const { return ip.isValid() || port != 0; }
bool excludes( NetworkAddress const& addr ) const {
if(isWholeMachine())
@ -669,17 +657,16 @@ struct AddressExclusion {
// This is for debugging and IS NOT to be used for serialization to persistant state
std::string toString() const {
std::string as = format( "%d.%d.%d.%d", (ip>>24)&0xff, (ip>>16)&0xff, (ip>>8)&0xff, ip&0xff );
if (!isWholeMachine())
as += format(":%d", port);
return as;
return formatIpPort(ip, port);
return ip.toString();
}
static AddressExclusion parse( StringRef const& );
template <class Ar>
void serialize(Ar& ar) {
ar.serializeBinaryItem(*this);
serializer(ar, ip, port);
}
};

View File

@ -129,9 +129,13 @@ public:
KeyBackedProperty<Key> removePrefix() {
return configSpace.pack(LiteralStringRef(__FUNCTION__));
}
// XXX: Remove restoreRange() once it is safe to remove. It has been changed to restoreRanges
KeyBackedProperty<KeyRange> restoreRange() {
return configSpace.pack(LiteralStringRef(__FUNCTION__));
}
KeyBackedProperty<std::vector<KeyRange>> restoreRanges() {
return configSpace.pack(LiteralStringRef(__FUNCTION__));
}
KeyBackedProperty<Key> batchFuture() {
return configSpace.pack(LiteralStringRef(__FUNCTION__));
}
@ -168,6 +172,19 @@ public:
return configSpace.pack(LiteralStringRef(__FUNCTION__));
}
Future<std::vector<KeyRange>> getRestoreRangesOrDefault(Reference<ReadYourWritesTransaction> tr) {
return getRestoreRangesOrDefault_impl(this, tr);
}
ACTOR static Future<std::vector<KeyRange>> getRestoreRangesOrDefault_impl(RestoreConfig *self, Reference<ReadYourWritesTransaction> tr) {
state std::vector<KeyRange> ranges = wait(self->restoreRanges().getD(tr));
if (ranges.empty()) {
state KeyRange range = wait(self->restoreRange().getD(tr));
ranges.push_back(range);
}
return ranges;
}
// Describes a file to load blocks from during restore. Ordered by version and then fileName to enable
// incrementally advancing through the map, saving the version and path of the next starting point.
struct RestoreFile {
@ -365,7 +382,7 @@ ACTOR Future<std::string> RestoreConfig::getFullStatus_impl(RestoreConfig restor
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
state Future<KeyRange> range = restore.restoreRange().getD(tr);
state Future<std::vector<KeyRange>> ranges = restore.getRestoreRangesOrDefault(tr);
state Future<Key> addPrefix = restore.addPrefix().getD(tr);
state Future<Key> removePrefix = restore.removePrefix().getD(tr);
state Future<Key> url = restore.sourceContainerURL().getD(tr);
@ -374,17 +391,19 @@ ACTOR Future<std::string> RestoreConfig::getFullStatus_impl(RestoreConfig restor
// restore might no longer be valid after the first wait so make sure it is not needed anymore.
state UID uid = restore.getUid();
wait(success(range) && success(addPrefix) && success(removePrefix) && success(url) && success(restoreVersion) && success(progress));
wait(success(ranges) && success(addPrefix) && success(removePrefix) && success(url) && success(restoreVersion) && success(progress));
return format("%s URL: %s Begin: '%s' End: '%s' AddPrefix: '%s' RemovePrefix: '%s' Version: %lld",
progress.get().c_str(),
url.get().toString().c_str(),
printable(range.get().begin).c_str(),
printable(range.get().end).c_str(),
printable(addPrefix.get()).c_str(),
printable(removePrefix.get()).c_str(),
restoreVersion.get()
);
std::string returnStr;
returnStr = format("%s URL: %s", progress.get().c_str(), url.get().toString().c_str());
for (auto &range : ranges.get()) {
returnStr += format(" Range: '%s'-'%s'", printable(range.begin).c_str(), printable(range.end).c_str());
}
returnStr += format(" AddPrefix: '%s' RemovePrefix: '%s' Version: %lld",
printable(addPrefix.get()).c_str(),
printable(removePrefix.get()).c_str(),
restoreVersion.get()
);
return returnStr;
}
@ -2473,10 +2492,26 @@ namespace fileBackup {
static struct : InputParams {
// The range of data that the (possibly empty) data represented, which is set if it intersects the target restore range
static TaskParam<KeyRange> originalFileRange() { return LiteralStringRef(__FUNCTION__); }
static TaskParam<std::vector<KeyRange>> originalFileRanges() { return LiteralStringRef(__FUNCTION__); }
static std::vector<KeyRange> getOriginalFileRanges(Reference<Task> task) {
if (originalFileRanges().exists(task)) {
return Params.originalFileRanges().get(task);
}
else {
std::vector<KeyRange> range;
if (originalFileRange().exists(task))
range.push_back(Params.originalFileRange().get(task));
return range;
}
}
} Params;
std::string toString(Reference<Task> task) {
return RestoreFileTaskFuncBase::toString(task) + format(" originalFileRange '%s'", printable(Params.originalFileRange().get(task)).c_str());
std::string returnStr = RestoreFileTaskFuncBase::toString(task);
for(auto &range : Params.getOriginalFileRanges(task))
returnStr += format(" originalFileRange '%s'", printable(range).c_str());
return returnStr;
}
ACTOR static Future<Void> _execute(Database cx, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
@ -2496,28 +2531,29 @@ namespace fileBackup {
.detail("ReadLen", readLen)
.detail("TaskInstance", THIS_ADDR);
state Reference<ReadYourWritesTransaction> tr( new ReadYourWritesTransaction(cx) );
state Reference<ReadYourWritesTransaction> tr(new ReadYourWritesTransaction(cx));
state Future<Reference<IBackupContainer>> bc;
state Future<KeyRange> restoreRange;
state Future<std::vector<KeyRange>> restoreRanges;
state Future<Key> addPrefix;
state Future<Key> removePrefix;
loop {
loop{
try {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
bc = restore.sourceContainer().getOrThrow(tr);
restoreRange = restore.restoreRange().getD(tr);
restoreRanges = restore.getRestoreRangesOrDefault(tr);
addPrefix = restore.addPrefix().getD(tr);
removePrefix = restore.removePrefix().getD(tr);
wait(taskBucket->keepRunning(tr, task));
wait(success(bc) && success(restoreRange) && success(addPrefix) && success(removePrefix) && checkTaskVersion(tr->getDatabase(), task, name, version));
wait(success(bc) && success(restoreRanges) && success(addPrefix) && success(removePrefix) && checkTaskVersion(tr->getDatabase(), task, name, version));
break;
} catch(Error &e) {
}
catch (Error &e) {
wait(tr->onError(e));
}
}
@ -2527,113 +2563,128 @@ namespace fileBackup {
// First and last key are the range for this file
state KeyRange fileRange = KeyRangeRef(blockData.front().key, blockData.back().key);
state std::vector<KeyRange> originalFileRanges;
// If fileRange doesn't intersect restore range then we're done.
if(!fileRange.intersects(restoreRange.get()))
return Void();
state int index;
for (index = 0; index < restoreRanges.get().size(); index++) {
auto &restoreRange = restoreRanges.get()[index];
if (!fileRange.intersects(restoreRange))
continue;
// We know the file range intersects the restore range but there could still be keys outside the restore range.
// Find the subvector of kv pairs that intersect the restore range. Note that the first and last keys are just the range endpoints for this file
int rangeStart = 1;
int rangeEnd = blockData.size() - 1;
// Slide start forward, stop if something in range is found
while(rangeStart < rangeEnd && !restoreRange.get().contains(blockData[rangeStart].key))
++rangeStart;
// Side end backward, stop if something in range is found
while(rangeEnd > rangeStart && !restoreRange.get().contains(blockData[rangeEnd - 1].key))
--rangeEnd;
// We know the file range intersects the restore range but there could still be keys outside the restore range.
// Find the subvector of kv pairs that intersect the restore range. Note that the first and last keys are just the range endpoints for this file
int rangeStart = 1;
int rangeEnd = blockData.size() - 1;
// Slide start forward, stop if something in range is found
while (rangeStart < rangeEnd && !restoreRange.contains(blockData[rangeStart].key))
++rangeStart;
// Side end backward, stop if something in range is found
while (rangeEnd > rangeStart && !restoreRange.contains(blockData[rangeEnd - 1].key))
--rangeEnd;
state VectorRef<KeyValueRef> data = blockData.slice(rangeStart, rangeEnd);
state VectorRef<KeyValueRef> data = blockData.slice(rangeStart, rangeEnd);
// Shrink file range to be entirely within restoreRange and translate it to the new prefix
// First, use the untranslated file range to create the shrunk original file range which must be used in the kv range version map for applying mutations
state KeyRange originalFileRange = KeyRangeRef(std::max(fileRange.begin, restoreRange.get().begin), std::min(fileRange.end, restoreRange.get().end));
Params.originalFileRange().set(task, originalFileRange);
// Shrink file range to be entirely within restoreRange and translate it to the new prefix
// First, use the untranslated file range to create the shrunk original file range which must be used in the kv range version map for applying mutations
state KeyRange originalFileRange = KeyRangeRef(std::max(fileRange.begin, restoreRange.begin), std::min(fileRange.end, restoreRange.end));
originalFileRanges.push_back(originalFileRange);
// Now shrink and translate fileRange
Key fileEnd = std::min(fileRange.end, restoreRange.get().end);
if(fileEnd == (removePrefix.get() == StringRef() ? normalKeys.end : strinc(removePrefix.get())) ) {
fileEnd = addPrefix.get() == StringRef() ? normalKeys.end : strinc(addPrefix.get());
} else {
fileEnd = fileEnd.removePrefix(removePrefix.get()).withPrefix(addPrefix.get());
}
fileRange = KeyRangeRef(std::max(fileRange.begin, restoreRange.get().begin).removePrefix(removePrefix.get()).withPrefix(addPrefix.get()),fileEnd);
// Now shrink and translate fileRange
Key fileEnd = std::min(fileRange.end, restoreRange.end);
if (fileEnd == (removePrefix.get() == StringRef() ? normalKeys.end : strinc(removePrefix.get()))) {
fileEnd = addPrefix.get() == StringRef() ? normalKeys.end : strinc(addPrefix.get());
}
else {
fileEnd = fileEnd.removePrefix(removePrefix.get()).withPrefix(addPrefix.get());
}
fileRange = KeyRangeRef(std::max(fileRange.begin, restoreRange.begin).removePrefix(removePrefix.get()).withPrefix(addPrefix.get()), fileEnd);
state int start = 0;
state int end = data.size();
state int dataSizeLimit = BUGGIFY ? g_random->randomInt(256 * 1024, 10e6) : CLIENT_KNOBS->RESTORE_WRITE_TX_SIZE;
state int start = 0;
state int end = data.size();
state int dataSizeLimit = BUGGIFY ? g_random->randomInt(256 * 1024, 10e6) : CLIENT_KNOBS->RESTORE_WRITE_TX_SIZE;
tr->reset();
loop {
try {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
tr->reset();
loop{
try {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
state int i = start;
state int txBytes = 0;
state int iend = start;
state int i = start;
state int txBytes = 0;
state int iend = start;
// find iend that results in the desired transaction size
for(; iend < end && txBytes < dataSizeLimit; ++iend) {
txBytes += data[iend].key.expectedSize();
txBytes += data[iend].value.expectedSize();
// find iend that results in the desired transaction size
for (; iend < end && txBytes < dataSizeLimit; ++iend) {
txBytes += data[iend].key.expectedSize();
txBytes += data[iend].value.expectedSize();
}
// Clear the range we are about to set.
// If start == 0 then use fileBegin for the start of the range, else data[start]
// If iend == end then use fileEnd for the end of the range, else data[iend]
state KeyRange trRange = KeyRangeRef((start == 0) ? fileRange.begin : data[start].key.removePrefix(removePrefix.get()).withPrefix(addPrefix.get())
, (iend == end) ? fileRange.end : data[iend].key.removePrefix(removePrefix.get()).withPrefix(addPrefix.get()));
tr->clear(trRange);
for (; i < iend; ++i) {
tr->setOption(FDBTransactionOptions::NEXT_WRITE_NO_WRITE_CONFLICT_RANGE);
tr->set(data[i].key.removePrefix(removePrefix.get()).withPrefix(addPrefix.get()), data[i].value);
}
// Add to bytes written count
restore.bytesWritten().atomicOp(tr, txBytes, MutationRef::Type::AddValue);
state Future<Void> checkLock = checkDatabaseLock(tr, restore.getUid());
wait(taskBucket->keepRunning(tr, task));
wait(checkLock);
wait(tr->commit());
TraceEvent("FileRestoreCommittedRange")
.suppressFor(60)
.detail("RestoreUID", restore.getUid())
.detail("FileName", rangeFile.fileName)
.detail("FileVersion", rangeFile.version)
.detail("FileSize", rangeFile.fileSize)
.detail("ReadOffset", readOffset)
.detail("ReadLen", readLen)
.detail("CommitVersion", tr->getCommittedVersion())
.detail("BeginRange", printable(trRange.begin))
.detail("EndRange", printable(trRange.end))
.detail("StartIndex", start)
.detail("EndIndex", i)
.detail("DataSize", data.size())
.detail("Bytes", txBytes)
.detail("OriginalFileRange", printable(originalFileRange))
.detail("TaskInstance", THIS_ADDR);
// Commit succeeded, so advance starting point
start = i;
if (start == end)
break;
tr->reset();
}
// Clear the range we are about to set.
// If start == 0 then use fileBegin for the start of the range, else data[start]
// If iend == end then use fileEnd for the end of the range, else data[iend]
state KeyRange trRange = KeyRangeRef((start == 0 ) ? fileRange.begin : data[start].key.removePrefix(removePrefix.get()).withPrefix(addPrefix.get())
, (iend == end) ? fileRange.end : data[iend ].key.removePrefix(removePrefix.get()).withPrefix(addPrefix.get()));
tr->clear(trRange);
for(; i < iend; ++i) {
tr->setOption(FDBTransactionOptions::NEXT_WRITE_NO_WRITE_CONFLICT_RANGE);
tr->set(data[i].key.removePrefix(removePrefix.get()).withPrefix(addPrefix.get()), data[i].value);
}
// Add to bytes written count
restore.bytesWritten().atomicOp(tr, txBytes, MutationRef::Type::AddValue);
state Future<Void> checkLock = checkDatabaseLock(tr, restore.getUid());
wait(taskBucket->keepRunning(tr, task));
wait( checkLock );
wait(tr->commit());
TraceEvent("FileRestoreCommittedRange")
.suppressFor(60)
.detail("RestoreUID", restore.getUid())
.detail("FileName", rangeFile.fileName)
.detail("FileVersion", rangeFile.version)
.detail("FileSize", rangeFile.fileSize)
.detail("ReadOffset", readOffset)
.detail("ReadLen", readLen)
.detail("CommitVersion", tr->getCommittedVersion())
.detail("BeginRange", printable(trRange.begin))
.detail("EndRange", printable(trRange.end))
.detail("StartIndex", start)
.detail("EndIndex", i)
.detail("DataSize", data.size())
.detail("Bytes", txBytes)
.detail("OriginalFileRange", printable(originalFileRange))
.detail("TaskInstance", THIS_ADDR);
// Commit succeeded, so advance starting point
start = i;
if(start == end)
return Void();
tr->reset();
} catch(Error &e) {
if(e.code() == error_code_transaction_too_large)
catch (Error &e) {
if (e.code() == error_code_transaction_too_large)
dataSizeLimit /= 2;
else
wait(tr->onError(e));
}
}
}
if (!originalFileRanges.empty()) {
if (BUGGIFY && restoreRanges.get().size() == 1) {
Params.originalFileRange().set(task, originalFileRanges[0]);
}
else {
Params.originalFileRanges().set(task, originalFileRanges);
}
}
return Void();
}
ACTOR static Future<Void> _finish(Reference<ReadYourWritesTransaction> tr, Reference<TaskBucket> taskBucket, Reference<FutureBucket> futureBucket, Reference<Task> task) {
@ -2641,15 +2692,16 @@ namespace fileBackup {
restore.fileBlocksFinished().atomicOp(tr, 1, MutationRef::Type::AddValue);
// Update the KV range map if originalFileRange is set
Future<Void> updateMap = Void();
if(Params.originalFileRange().exists(task)) {
std::vector<Future<Void>> updateMap;
std::vector<KeyRange> ranges = Params.getOriginalFileRanges(task);
for (auto &range : ranges) {
Value versionEncoded = BinaryWriter::toValue(Params.inputFile().get(task).version, Unversioned());
updateMap = krmSetRange(tr, restore.applyMutationsMapPrefix(), Params.originalFileRange().get(task), versionEncoded);
updateMap.push_back(krmSetRange(tr, restore.applyMutationsMapPrefix(), range, versionEncoded));
}
state Reference<TaskFuture> taskFuture = futureBucket->unpack(task->params[Task::reservedTaskParamKeyDone]);
wait(taskFuture->set(tr, taskBucket) &&
taskBucket->finish(tr, task) && updateMap);
taskBucket->finish(tr, task) && waitForAll(updateMap));
return Void();
}
@ -3575,8 +3627,20 @@ public:
return Void();
}
ACTOR static Future<Void> submitRestore(FileBackupAgent* backupAgent, Reference<ReadYourWritesTransaction> tr, Key tagName, Key backupURL, Version restoreVersion, Key addPrefix, Key removePrefix, KeyRange restoreRange, bool lockDB, UID uid) {
ASSERT(restoreRange.contains(removePrefix) || removePrefix.size() == 0);
ACTOR static Future<Void> submitRestore(FileBackupAgent* backupAgent, Reference<ReadYourWritesTransaction> tr, Key tagName, Key backupURL, Standalone<VectorRef<KeyRangeRef>> ranges, Version restoreVersion, Key addPrefix, Key removePrefix, bool lockDB, UID uid) {
KeyRangeMap<int> restoreRangeSet;
for (auto& range : ranges) {
restoreRangeSet.insert(range, 1);
}
restoreRangeSet.coalesce(allKeys);
state std::vector<KeyRange> restoreRanges;
for (auto& restoreRange : restoreRangeSet.ranges()) {
if (restoreRange.value()) {
restoreRanges.push_back(KeyRange(KeyRangeRef(restoreRange.range().begin, restoreRange.range().end)));
}
}
for (auto &restoreRange : restoreRanges)
ASSERT(restoreRange.contains(removePrefix) || removePrefix.size() == 0);
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
@ -3606,13 +3670,15 @@ public:
// Clear the old restore config
oldRestore.clear(tr);
}
KeyRange restoreIntoRange = KeyRangeRef(restoreRange.begin, restoreRange.end).removePrefix(removePrefix).withPrefix(addPrefix);
Standalone<RangeResultRef> existingRows = wait(tr->getRange(restoreIntoRange, 1));
if (existingRows.size() > 0) {
throw restore_destination_not_empty();
}
state int index;
for (index = 0; index < restoreRanges.size(); index++) {
KeyRange restoreIntoRange = KeyRangeRef(restoreRanges[index].begin, restoreRanges[index].end).removePrefix(removePrefix).withPrefix(addPrefix);
Standalone<RangeResultRef> existingRows = wait(tr->getRange(restoreIntoRange, 1));
if (existingRows.size() > 0) {
throw restore_destination_not_empty();
}
}
// Make new restore config
state RestoreConfig restore(uid);
@ -3626,7 +3692,12 @@ public:
restore.sourceContainer().set(tr, bc);
restore.stateEnum().set(tr, ERestoreState::QUEUED);
restore.restoreVersion().set(tr, restoreVersion);
restore.restoreRange().set(tr, restoreRange);
if (BUGGIFY && restoreRanges.size() == 1) {
restore.restoreRange().set(tr, restoreRanges[0]);
}
else {
restore.restoreRanges().set(tr, restoreRanges);
}
// this also sets restore.add/removePrefix.
restore.initApplyMutations(tr, addPrefix, removePrefix);
@ -3937,10 +4008,13 @@ public:
return r;
}
ACTOR static Future<Version> restore(FileBackupAgent* backupAgent, Database cx, Key tagName, Key url, bool waitForComplete, Version targetVersion, bool verbose, KeyRange range, Key addPrefix, Key removePrefix, bool lockDB, UID randomUid) {
ACTOR static Future<Version> restore(FileBackupAgent* backupAgent, Database cx, Optional<Database> cxOrig, Key tagName, Key url, Standalone<VectorRef<KeyRangeRef>> ranges, bool waitForComplete, Version targetVersion, bool verbose, Key addPrefix, Key removePrefix, bool lockDB, UID randomUid) {
state Reference<IBackupContainer> bc = IBackupContainer::openContainer(url.toString());
state BackupDescription desc = wait(bc->describeBackup());
wait(desc.resolveVersionTimes(cx));
if(cxOrig.present()) {
wait(desc.resolveVersionTimes(cxOrig.get()));
}
printf("Backup Description\n%s", desc.toString().c_str());
if(targetVersion == invalidVersion && desc.maxRestorableVersion.present())
@ -3965,7 +4039,7 @@ public:
try {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
wait(submitRestore(backupAgent, tr, tagName, url, targetVersion, addPrefix, removePrefix, range, lockDB, randomUid));
wait(submitRestore(backupAgent, tr, tagName, url, ranges, targetVersion, addPrefix, removePrefix, lockDB, randomUid));
wait(tr->commit());
break;
} catch(Error &e) {
@ -3986,7 +4060,7 @@ public:
//used for correctness only, locks the database before discontinuing the backup and that same lock is then used while doing the restore.
//the tagname of the backup must be the same as the restore.
ACTOR static Future<Version> atomicRestore(FileBackupAgent* backupAgent, Database cx, Key tagName, KeyRange range, Key addPrefix, Key removePrefix) {
ACTOR static Future<Version> atomicRestore(FileBackupAgent* backupAgent, Database cx, Key tagName, Standalone<VectorRef<KeyRangeRef>> ranges, Key addPrefix, Key removePrefix) {
state Reference<ReadYourWritesTransaction> ryw_tr = Reference<ReadYourWritesTransaction>(new ReadYourWritesTransaction(cx));
state BackupConfig backupConfig;
loop {
@ -4065,9 +4139,11 @@ public:
loop {
try {
ryw_tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
ryw_tr->setOption(FDBTransactionOptions::LOCK_AWARE);
ryw_tr->addReadConflictRange(range);
ryw_tr->clear(range);
ryw_tr->setOption(FDBTransactionOptions::LOCK_AWARE);
for (auto &range : ranges) {
ryw_tr->addReadConflictRange(range);
ryw_tr->clear(range);
}
wait( ryw_tr->commit() );
TraceEvent("AS_ClearedRange");
break;
@ -4079,7 +4155,7 @@ public:
Reference<IBackupContainer> bc = wait(backupConfig.backupContainer().getOrThrow(cx));
TraceEvent("AS_StartRestore");
Version ver = wait( restore(backupAgent, cx, tagName, KeyRef(bc->getURL()), true, -1, true, range, addPrefix, removePrefix, true, randomUid) );
Version ver = wait( restore(backupAgent, cx, cx, tagName, KeyRef(bc->getURL()), ranges, true, -1, true, addPrefix, removePrefix, true, randomUid) );
return ver;
}
};
@ -4088,12 +4164,12 @@ const std::string BackupAgentBase::defaultTagName = "default";
const int BackupAgentBase::logHeaderSize = 12;
const int FileBackupAgent::dataFooterSize = 20;
Future<Version> FileBackupAgent::restore(Database cx, Key tagName, Key url, bool waitForComplete, Version targetVersion, bool verbose, KeyRange range, Key addPrefix, Key removePrefix, bool lockDB) {
return FileBackupAgentImpl::restore(this, cx, tagName, url, waitForComplete, targetVersion, verbose, range, addPrefix, removePrefix, lockDB, g_random->randomUniqueID());
Future<Version> FileBackupAgent::restore(Database cx, Optional<Database> cxOrig, Key tagName, Key url, Standalone<VectorRef<KeyRangeRef>> ranges, bool waitForComplete, Version targetVersion, bool verbose, Key addPrefix, Key removePrefix, bool lockDB) {
return FileBackupAgentImpl::restore(this, cx, cxOrig, tagName, url, ranges, waitForComplete, targetVersion, verbose, addPrefix, removePrefix, lockDB, g_random->randomUniqueID());
}
Future<Version> FileBackupAgent::atomicRestore(Database cx, Key tagName, KeyRange range, Key addPrefix, Key removePrefix) {
return FileBackupAgentImpl::atomicRestore(this, cx, tagName, range, addPrefix, removePrefix);
Future<Version> FileBackupAgent::atomicRestore(Database cx, Key tagName, Standalone<VectorRef<KeyRangeRef>> ranges, Key addPrefix, Key removePrefix) {
return FileBackupAgentImpl::atomicRestore(this, cx, tagName, ranges, addPrefix, removePrefix);
}
Future<ERestoreState> FileBackupAgent::abortRestore(Reference<ReadYourWritesTransaction> tr, Key tagName) {

View File

@ -36,7 +36,7 @@ template <class Val, class Metric=int, class MetricFunc = ConstantMetric<Metric>
class KeyRangeMap : public RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>, NonCopyable, public ReferenceCounted<KeyRangeMap<Val>> {
public:
explicit KeyRangeMap(Val v=Val(), Key endKey = allKeys.end) : RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>(endKey, v), mapEnd(endKey) {}
void operator=(KeyRangeMap&& r) noexcept(true) { mapEnd = std::move(r.mapEnd); RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void operator=(KeyRangeMap&& r) BOOST_NOEXCEPT { mapEnd = std::move(r.mapEnd); RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void insert( const KeyRangeRef& keys, const Val& value ) { RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::insert(keys, value); }
void insert( const KeyRef& key, const Val& value ) { RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::insert( singleKeyRange(key), value); }
std::vector<KeyRangeWith<Val>> getAffectedRangesAfterInsertion( const KeyRangeRef& keys, const Val &insertionValue = Val());
@ -67,7 +67,7 @@ template <class Val, class Metric=int, class MetricFunc = ConstantMetric<Metric>
class CoalescedKeyRefRangeMap : public RangeMap<KeyRef,Val,KeyRangeRef,Metric,MetricFunc>, NonCopyable {
public:
explicit CoalescedKeyRefRangeMap(Val v=Val(), Key endKey = allKeys.end) : RangeMap<KeyRef,Val,KeyRangeRef,Metric,MetricFunc>(endKey, v), mapEnd(endKey) {}
void operator=(CoalescedKeyRefRangeMap&& r) noexcept(true) { mapEnd = std::move(r.mapEnd); RangeMap<KeyRef, Val, KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void operator=(CoalescedKeyRefRangeMap&& r) BOOST_NOEXCEPT { mapEnd = std::move(r.mapEnd); RangeMap<KeyRef, Val, KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void insert( const KeyRangeRef& keys, const Val& value );
void insert( const KeyRef& key, const Val& value, Arena& arena );
Key mapEnd;
@ -77,7 +77,7 @@ template <class Val, class Metric=int, class MetricFunc = ConstantMetric<Metric>
class CoalescedKeyRangeMap : public RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>, NonCopyable {
public:
explicit CoalescedKeyRangeMap(Val v=Val(), Key endKey = allKeys.end) : RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>(endKey, v), mapEnd(endKey) {}
void operator=(CoalescedKeyRangeMap&& r) noexcept(true) { mapEnd = std::move(r.mapEnd); RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void operator=(CoalescedKeyRangeMap&& r) BOOST_NOEXCEPT { mapEnd = std::move(r.mapEnd); RangeMap<Key,Val,KeyRangeRef,Metric,MetricFunc>::operator=(std::move(r)); }
void insert( const KeyRangeRef& keys, const Val& value );
void insert( const KeyRef& key, const Val& value );
Key mapEnd;

View File

@ -1730,7 +1730,7 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
data.locality.set(LiteralStringRef("rack"), StringRef(rack));
data.locality.set(LiteralStringRef("zoneid"), StringRef(rack));
data.locality.set(LiteralStringRef("machineid"), StringRef(machineId));
data.address.ip = i;
data.address.ip = IPAddress(i);
workers.push_back(data);
}
@ -1749,8 +1749,8 @@ TEST_CASE("/ManagementAPI/AutoQuorumChange/checkLocality") {
LiteralStringRef("machineid")
});
for(auto worker = chosen.begin(); worker != chosen.end(); worker++) {
ASSERT(worker->ip < workers.size());
LocalityData data = workers[worker->ip].locality;
ASSERT(worker->ip.toV4() < workers.size());
LocalityData data = workers[worker->ip.toV4()].locality;
for(auto field = fields.begin(); field != fields.end(); field++) {
chosenValues[*field].insert(data.get(*field).get());
}

View File

@ -214,6 +214,28 @@ TEST_CASE("/fdbclient/MonitorLeader/parseConnectionString/basic") {
ASSERT( input == cs.toString() );
}
{
input = "0xxdeadbeef:100100100@[::1]:1234,[::1]:1235";
std::string commented("#start of comment\n");
commented += input;
commented += "\n";
commented += "# asdfasdf ##";
ClusterConnectionString cs(commented);
ASSERT(input == cs.toString());
}
{
input = "0xxdeadbeef:100100100@[abcd:dcba::1]:1234,[abcd:dcba::abcd:1]:1234";
std::string commented("#start of comment\n");
commented += input;
commented += "\n";
commented += "# asdfasdf ##";
ClusterConnectionString cs(commented);
ASSERT(input == cs.toString());
}
return Void();
}

View File

@ -763,7 +763,7 @@ Database Database::createDatabase( std::string connFileName, int apiVersion, Loc
return Database::createDatabase(rccf, apiVersion, clientLocality);
}
extern uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs );
extern IPAddress determinePublicIPAutomatically(ClusterConnectionString const& ccs);
Cluster::Cluster( Reference<ClusterConnectionFile> connFile, int apiVersion )
: clusterInterface(new AsyncVar<Optional<ClusterInterface>>())
@ -805,7 +805,7 @@ void Cluster::init( Reference<ClusterConnectionFile> connFile, bool startClientI
.detailf("ImageOffset", "%p", platform::getImageOffset())
.trackLatest("ClientStart");
initializeSystemMonitorMachineState(SystemMonitorMachineState(publicIP));
initializeSystemMonitorMachineState(SystemMonitorMachineState(IPAddress(publicIP)));
systemMonitor();
uncancellable( recurring( &systemMonitor, CLIENT_KNOBS->SYSTEM_MONITOR_INTERVAL, TaskFlushTrace ) );
@ -1069,24 +1069,27 @@ bool GetRangeLimits::hasSatisfiedMinRows() {
return hasByteLimit() && minRows == 0;
}
AddressExclusion AddressExclusion::parse( StringRef const& key ) {
//Must not change: serialized to the database!
std::string s = key.toString();
int a,b,c,d,port,count=-1;
if (sscanf(s.c_str(), "%d.%d.%d.%d%n", &a,&b,&c,&d, &count)<4) {
auto parsedIp = IPAddress::parse(key.toString());
if (parsedIp.present()) {
return AddressExclusion(parsedIp.get());
}
// Not a whole machine, includes `port'.
try {
auto addr = NetworkAddress::parse(key.toString());
if (addr.isTLS()) {
TraceEvent(SevWarnAlways, "AddressExclusionParseError")
.detail("String", printable(key))
.detail("Description", "Address inclusion string should not include `:tls' suffix.");
return AddressExclusion();
}
return AddressExclusion(addr.ip, addr.port);
} catch (Error& e) {
TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", printable(key));
return AddressExclusion();
}
s = s.substr(count);
uint32_t ip = (a<<24)+(b<<16)+(c<<8)+d;
if (!s.size())
return AddressExclusion( ip );
if (sscanf( s.c_str(), ":%d%n", &port, &count ) < 1 || count != s.size()) {
TraceEvent(SevWarnAlways, "AddressExclusionParseError").detail("String", printable(key));
return AddressExclusion();
}
return AddressExclusion( ip, port );
}
Future<Standalone<RangeResultRef>> getRange(
@ -1927,7 +1930,7 @@ Transaction::~Transaction() {
cancelWatches();
}
void Transaction::operator=(Transaction&& r) noexcept(true) {
void Transaction::operator=(Transaction&& r) BOOST_NOEXCEPT {
flushTrLogsIfEnabled();
cx = std::move(r.cx);
tr = std::move(r.tr);
@ -2072,7 +2075,7 @@ ACTOR Future< Standalone< VectorRef< const char*>>> getAddressesForKeyActor( Key
Standalone<VectorRef<const char*>> addresses;
for (auto i : ssi) {
std::string ipString = toIPString(i.address().ip);
std::string ipString = i.address().ip.toString();
char* c_string = new (addresses.arena()) char[ipString.length()+1];
strcpy(c_string, ipString.c_str());
addresses.push_back(addresses.arena(), c_string);

View File

@ -74,8 +74,8 @@ public:
Database() {} // an uninitialized database can be destructed or reassigned safely; that's it
void operator= ( Database const& rhs ) { db = rhs.db; }
Database( Database const& rhs ) : db(rhs.db) {}
Database(Database&& r) noexcept(true) : db(std::move(r.db)) {}
void operator= (Database&& r) noexcept(true) { db = std::move(r.db); }
Database(Database&& r) BOOST_NOEXCEPT : db(std::move(r.db)) {}
void operator= (Database&& r) BOOST_NOEXCEPT { db = std::move(r.db); }
// For internal use by the native client:
explicit Database(Reference<DatabaseContext> cx) : db(cx) {}
@ -280,7 +280,7 @@ public:
// These are to permit use as state variables in actors:
Transaction() : info( TaskDefaultEndpoint ) {}
void operator=(Transaction&& r) noexcept(true);
void operator=(Transaction&& r) BOOST_NOEXCEPT;
void reset();
void fullReset();

View File

@ -66,8 +66,8 @@ struct NotifiedVersion {
set( v );
}
NotifiedVersion(NotifiedVersion&& r) noexcept(true) : waiting(std::move(r.waiting)), val(std::move(r.val)) {}
void operator=(NotifiedVersion&& r) noexcept(true) { waiting = std::move(r.waiting); val = std::move(r.val); }
NotifiedVersion(NotifiedVersion&& r) BOOST_NOEXCEPT : waiting(std::move(r.waiting)), val(std::move(r.val)) {}
void operator=(NotifiedVersion&& r) BOOST_NOEXCEPT { waiting = std::move(r.waiting); val = std::move(r.val); }
private:
typedef std::pair<Version,Promise<Void>> Item;

View File

@ -1818,7 +1818,7 @@ void ReadYourWritesTransaction::setOption( FDBTransactionOptions::Option option,
tr.setOption( option, value );
}
void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) noexcept(true) {
void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT {
cache = std::move( r.cache );
writes = std::move( r.writes );
arena = std::move( r.arena );
@ -1839,7 +1839,7 @@ void ReadYourWritesTransaction::operator=(ReadYourWritesTransaction&& r) noexcep
writes.arena = &arena;
}
ReadYourWritesTransaction::ReadYourWritesTransaction(ReadYourWritesTransaction&& r) noexcept(true) :
ReadYourWritesTransaction::ReadYourWritesTransaction(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT :
cache( std::move(r.cache) ),
writes( std::move(r.writes) ),
arena( std::move(r.arena) ),

View File

@ -111,8 +111,8 @@ public:
// These are to permit use as state variables in actors:
ReadYourWritesTransaction() : cache(&arena), writes(&arena) {}
void operator=(ReadYourWritesTransaction&& r) noexcept(true);
ReadYourWritesTransaction(ReadYourWritesTransaction&& r) noexcept(true);
void operator=(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT;
ReadYourWritesTransaction(ReadYourWritesTransaction&& r) BOOST_NOEXCEPT;
virtual void addref() { ReferenceCounted<ReadYourWritesTransaction>::addref(); }
virtual void delref() { ReferenceCounted<ReadYourWritesTransaction>::delref(); }

View File

@ -521,7 +521,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"initializing",
"missing_data",
"healing",
"removing_redundant_teams",
"optimizing_team_collections",
"healthy_repartitioning",
"healthy_removing_server",
"healthy_rebalancing",
@ -554,7 +554,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
"initializing",
"missing_data",
"healing",
"removing_redundant_teams",
"optimizing_team_collections",
"healthy_repartitioning",
"healthy_removing_server",
"healthy_rebalancing",

View File

@ -277,8 +277,8 @@ public:
entries.insert( Entry( allKeys.end, afterAllKeys, VectorRef<KeyValueRef>() ), NoMetric(), true );
}
// Visual Studio refuses to generate these, apparently despite the standard
SnapshotCache(SnapshotCache&& r) noexcept(true) : entries(std::move(r.entries)), arena(r.arena) {}
SnapshotCache& operator=(SnapshotCache&& r) noexcept(true) { entries = std::move(r.entries); arena = r.arena; return *this; }
SnapshotCache(SnapshotCache&& r) BOOST_NOEXCEPT : entries(std::move(r.entries)), arena(r.arena) {}
SnapshotCache& operator=(SnapshotCache&& r) BOOST_NOEXCEPT { entries = std::move(r.entries); arena = r.arena; return *this; }
bool empty() const {
// Returns true iff anything is known about the contents of the snapshot

View File

@ -374,11 +374,7 @@ const AddressExclusion decodeExcludedServersKey( KeyRef const& key ) {
}
std::string encodeExcludedServersKey( AddressExclusion const& addr ) {
//FIXME: make sure what's persisted here is not affected by innocent changes elsewhere
std::string as = format( "%d.%d.%d.%d", (addr.ip>>24)&0xff, (addr.ip>>16)&0xff, (addr.ip>>8)&0xff, addr.ip&0xff );
//ASSERT( StringRef(as).endsWith(LiteralStringRef(":0")) == (addr.port == 0) );
if (!addr.isWholeMachine())
as += format(":%d", addr.port);
return excludedServersPrefix.toString() + as;
return excludedServersPrefix.toString() + addr.toString();
}
const KeyRangeRef workerListKeys( LiteralStringRef("\xff/worker/"), LiteralStringRef("\xff/worker0") );

View File

@ -286,12 +286,12 @@ ThreadFuture<Void> ThreadSafeTransaction::onError( Error const& e ) {
return onMainThread( [tr, e](){ return tr->onError(e); } );
}
void ThreadSafeTransaction::operator=(ThreadSafeTransaction&& r) noexcept(true) {
void ThreadSafeTransaction::operator=(ThreadSafeTransaction&& r) BOOST_NOEXCEPT {
tr = r.tr;
r.tr = NULL;
}
ThreadSafeTransaction::ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept(true) {
ThreadSafeTransaction::ThreadSafeTransaction(ThreadSafeTransaction&& r) BOOST_NOEXCEPT {
tr = r.tr;
r.tr = NULL;
}

View File

@ -96,8 +96,8 @@ public:
// These are to permit use as state variables in actors:
ThreadSafeTransaction() : tr(NULL) {}
void operator=(ThreadSafeTransaction&& r) noexcept(true);
ThreadSafeTransaction(ThreadSafeTransaction&& r) noexcept(true);
void operator=(ThreadSafeTransaction&& r) BOOST_NOEXCEPT;
ThreadSafeTransaction(ThreadSafeTransaction&& r) BOOST_NOEXCEPT;
void reset();

View File

@ -489,10 +489,10 @@ public:
VersionedMap() : oldestVersion(0), latestVersion(0) {
latestRoot = &roots[0];
}
VersionedMap( VersionedMap&& v ) noexcept(true) : oldestVersion(v.oldestVersion), latestVersion(v.latestVersion), roots(std::move(v.roots)) {
VersionedMap( VersionedMap&& v ) BOOST_NOEXCEPT : oldestVersion(v.oldestVersion), latestVersion(v.latestVersion), roots(std::move(v.roots)) {
latestRoot = &roots[latestVersion];
}
void operator = (VersionedMap && v) noexcept(true) {
void operator = (VersionedMap && v) BOOST_NOEXCEPT {
oldestVersion = v.oldestVersion;
latestVersion = v.latestVersion;
roots = std::move(v.roots);

View File

@ -128,8 +128,8 @@ public:
PTreeImpl::insert( writes, ver, WriteMapEntry( afterAllKeys, OperationStack(), false, false, false, false, false ) );
}
WriteMap(WriteMap&& r) noexcept(true) : writeMapEmpty(r.writeMapEmpty), writes(std::move(r.writes)), ver(r.ver), scratch_iterator(std::move(r.scratch_iterator)), arena(r.arena) {}
WriteMap& operator=(WriteMap&& r) noexcept(true) { writeMapEmpty = r.writeMapEmpty; writes = std::move(r.writes); ver = r.ver; scratch_iterator = std::move(r.scratch_iterator); arena = r.arena; return *this; }
WriteMap(WriteMap&& r) BOOST_NOEXCEPT : writeMapEmpty(r.writeMapEmpty), writes(std::move(r.writes)), ver(r.ver), scratch_iterator(std::move(r.scratch_iterator)), arena(r.arena) {}
WriteMap& operator=(WriteMap&& r) BOOST_NOEXCEPT { writeMapEmpty = r.writeMapEmpty; writes = std::move(r.writes); ver = r.ver; scratch_iterator = std::move(r.scratch_iterator); arena = r.arena; return *this; }
//a write with addConflict false on top of an existing write with a conflict range will not remove the conflict
void mutate( KeyRef key, MutationRef::Type operation, ValueRef param, bool addConflict ) {

View File

@ -165,11 +165,11 @@
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<ClCompile>

View File

@ -1,6 +1,7 @@
set(FDBMONITOR_SRCS ConvertUTF.h SimpleIni.h fdbmonitor.cpp)
add_executable(fdbmonitor ${FDBMONITOR_SRCS})
assert_no_version_h(fdbmonitor)
if(UNIX AND NOT APPLE)
target_link_libraries(fdbmonitor rt)
endif()
@ -8,6 +9,4 @@ endif()
# as soon as we get rid of the old build system
target_include_directories(fdbmonitor PRIVATE ${CMAKE_BINARY_DIR}/fdbclient)
if(NOT OPEN_FOR_IDE)
install(TARGETS fdbmonitor DESTINATION "${FDBMONITOR_INSTALL_LOCATION}" COMPONENT server)
endif()
fdb_install(TARGETS fdbmonitor DESTINATION fdbmonitor COMPONENT server)

View File

@ -77,7 +77,7 @@ struct OpenFileInfo : NonCopyable {
Future<Reference<IAsyncFile>> opened; // Only valid until the file is fully opened
OpenFileInfo() : f(0) {}
OpenFileInfo(OpenFileInfo && r) noexcept(true) : f(r.f), opened(std::move(r.opened)) { r.f = 0; }
OpenFileInfo(OpenFileInfo && r) BOOST_NOEXCEPT : f(r.f), opened(std::move(r.opened)) { r.f = 0; }
Future<Reference<IAsyncFile>> get() {
if (f) return Reference<IAsyncFile>::addRef(f);

View File

@ -194,27 +194,67 @@ public:
};
#define CONNECT_PACKET_V0 0x0FDB00A444020001LL
#define CONNECT_PACKET_V1 0x0FDB00A446030001LL
#define CONNECT_PACKET_V0_SIZE 14
#define CONNECT_PACKET_V1_SIZE 22
#define CONNECT_PACKET_V2_SIZE 26
#pragma pack( push, 1 )
struct ConnectPacket {
uint32_t connectPacketLength; // sizeof(ConnectPacket)-sizeof(uint32_t), or perhaps greater in later protocol versions
// The value does not inclueds the size of `connectPacketLength` itself,
// but only the other fields of this structure.
uint32_t connectPacketLength;
uint64_t protocolVersion; // Expect currentProtocolVersion
uint16_t canonicalRemotePort; // Port number to reconnect to the originating process
uint64_t connectionId; // Multi-version clients will use the same Id for both connections, other connections will set this to zero. Added at protocol Version 0x0FDB00A444020001.
uint32_t canonicalRemoteIp; // IP Address to reconnect to the originating process
size_t minimumSize() {
if (protocolVersion < CONNECT_PACKET_V0) return CONNECT_PACKET_V0_SIZE;
if (protocolVersion < CONNECT_PACKET_V1) return CONNECT_PACKET_V1_SIZE;
return CONNECT_PACKET_V2_SIZE;
// IP Address to reconnect to the originating process. Only one of these must be populated.
uint32_t canonicalRemoteIp4;
enum ConnectPacketFlags {
FLAG_IPV6 = 1
};
uint16_t flags;
uint8_t canonicalRemoteIp6[16];
IPAddress canonicalRemoteIp() const {
if (isIPv6()) {
IPAddress::IPAddressStore store;
memcpy(store.data(), canonicalRemoteIp6, sizeof(canonicalRemoteIp6));
return IPAddress(store);
} else {
return IPAddress(canonicalRemoteIp4);
}
}
void setCanonicalRemoteIp(const IPAddress& ip) {
if (ip.isV6()) {
flags = flags | FLAG_IPV6;
memcpy(&canonicalRemoteIp6, ip.toV6().data(), 16);
} else {
flags = flags & ~FLAG_IPV6;
canonicalRemoteIp4 = ip.toV4();
}
}
bool isIPv6() const { return flags & FLAG_IPV6; }
uint32_t totalPacketSize() const { return connectPacketLength + sizeof(connectPacketLength); }
template <class Ar>
void serialize(Ar& ar) {
serializer(ar, connectPacketLength);
ASSERT(connectPacketLength <= sizeof(ConnectPacket));
serializer(ar, protocolVersion, canonicalRemotePort, connectionId, canonicalRemoteIp4);
if (ar.isDeserializing && ar.protocolVersion() < 0x0FDB00B061030001LL) {
flags = 0;
} else {
// We can send everything in serialized packet, since the current version of ConnectPacket
// is backward compatible with CONNECT_PACKET_V0.
serializer(ar, flags);
ar.serializeBytes(&canonicalRemoteIp6, sizeof(canonicalRemoteIp6));
}
}
};
static_assert( sizeof(ConnectPacket) == CONNECT_PACKET_V2_SIZE, "ConnectPacket packed incorrectly" );
#pragma pack( pop )
ACTOR static Future<Void> connectionReader(TransportData* transport, Reference<IConnection> conn, Peer* peer,
@ -256,23 +296,23 @@ struct Peer : NonCopyable {
for(auto& addr : transport->localAddresses) {
if(addr.isTLS() == destination.isTLS()) {
pkt.canonicalRemotePort = addr.port;
pkt.canonicalRemoteIp = addr.ip;
pkt.setCanonicalRemoteIp(addr.ip);
found = true;
break;
}
}
if (!found) {
pkt.canonicalRemotePort = 0; // a "mixed" TLS/non-TLS connection is like a client/server connection - there's no way to reverse it
pkt.canonicalRemoteIp = 0;
pkt.setCanonicalRemoteIp(IPAddress(0));
}
pkt.connectPacketLength = sizeof(pkt)-sizeof(pkt.connectPacketLength);
pkt.connectPacketLength = sizeof(pkt) - sizeof(pkt.connectPacketLength);
pkt.protocolVersion = currentProtocolVersion;
pkt.connectionId = transport->transportId;
PacketBuffer* pb_first = new PacketBuffer;
PacketWriter wr( pb_first, NULL, Unversioned() );
wr.serializeBinaryItem(pkt);
pkt.serialize(wr);
unsent.prependWriteBuffer(pb_first, wr.finish());
}
@ -647,29 +687,31 @@ ACTOR static Future<Void> connectionReader(
if (expectConnectPacket && unprocessed_end-unprocessed_begin>=CONNECT_PACKET_V0_SIZE) {
// At the beginning of a connection, we expect to receive a packet containing the protocol version and the listening port of the remote process
ConnectPacket* p = (ConnectPacket*)unprocessed_begin;
uint64_t connectionId = 0;
int32_t connectPacketSize = p->minimumSize();
int32_t connectPacketSize = ((ConnectPacket*)unprocessed_begin)->totalPacketSize();
if ( unprocessed_end-unprocessed_begin >= connectPacketSize ) {
if(p->protocolVersion >= 0x0FDB00A444020001) {
connectionId = p->connectionId;
}
uint64_t protocolVersion = ((ConnectPacket*)unprocessed_begin)->protocolVersion;
BinaryReader pktReader(unprocessed_begin, connectPacketSize, AssumeVersion(protocolVersion));
ConnectPacket pkt;
serializer(pktReader, pkt);
if( (p->protocolVersion & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask) ) {
incompatibleProtocolVersionNewer = p->protocolVersion > currentProtocolVersion;
NetworkAddress addr = p->canonicalRemotePort ? NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) : conn->getPeerAddress();
uint64_t connectionId = pkt.connectionId;
if( (pkt.protocolVersion & compatibleProtocolVersionMask) != (currentProtocolVersion & compatibleProtocolVersionMask) ) {
incompatibleProtocolVersionNewer = pkt.protocolVersion > currentProtocolVersion;
NetworkAddress addr = pkt.canonicalRemotePort
? NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort)
: conn->getPeerAddress();
if(connectionId != 1) addr.port = 0;
if(!transport->multiVersionConnections.count(connectionId)) {
if(now() - transport->lastIncompatibleMessage > FLOW_KNOBS->CONNECTION_REJECTED_MESSAGE_DELAY) {
TraceEvent(SevWarn, "ConnectionRejected", conn->getDebugID())
.detail("Reason", "IncompatibleProtocolVersion")
.detail("LocalVersion", currentProtocolVersion)
.detail("RejectedVersion", p->protocolVersion)
.detail("VersionMask", compatibleProtocolVersionMask)
.detail("Peer", p->canonicalRemotePort ? NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) : conn->getPeerAddress())
.detail("ConnectionId", connectionId);
.detail("Reason", "IncompatibleProtocolVersion")
.detail("LocalVersion", currentProtocolVersion)
.detail("RejectedVersion", pkt.protocolVersion)
.detail("VersionMask", compatibleProtocolVersionMask)
.detail("Peer", pkt.canonicalRemotePort ? NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort)
: conn->getPeerAddress())
.detail("ConnectionId", connectionId);
transport->lastIncompatibleMessage = now();
}
if(!transport->incompatiblePeers.count(addr)) {
@ -680,7 +722,7 @@ ACTOR static Future<Void> connectionReader(
}
compatible = false;
if(p->protocolVersion < 0x0FDB00A551000000LL) {
if(protocolVersion < 0x0FDB00A551000000LL) {
// Older versions expected us to hang up. It may work even if we don't hang up here, but it's safer to keep the old behavior.
throw incompatible_protocol_version();
}
@ -699,20 +741,23 @@ ACTOR static Future<Void> connectionReader(
unprocessed_begin += connectPacketSize;
expectConnectPacket = false;
peerProtocolVersion = p->protocolVersion;
peerProtocolVersion = protocolVersion;
if (peer != nullptr) {
// Outgoing connection; port information should be what we expect
TraceEvent("ConnectedOutgoing").suppressFor(1.0).detail("PeerAddr", NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort ) );
TraceEvent("ConnectedOutgoing")
.suppressFor(1.0)
.detail("PeerAddr", NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort));
peer->compatible = compatible;
peer->incompatibleProtocolVersionNewer = incompatibleProtocolVersionNewer;
if (!compatible) {
peer->transport->numIncompatibleConnections++;
incompatiblePeerCounted = true;
}
ASSERT( p->canonicalRemotePort == peerAddress.port );
ASSERT( pkt.canonicalRemotePort == peerAddress.port );
} else {
if (p->canonicalRemotePort) {
peerAddress = NetworkAddress( p->canonicalRemoteIp, p->canonicalRemotePort, true, peerAddress.isTLS() );
if (pkt.canonicalRemotePort) {
peerAddress = NetworkAddress(pkt.canonicalRemoteIp(), pkt.canonicalRemotePort, true,
peerAddress.isTLS());
}
peer = transport->getPeer(peerAddress);
peer->compatible = compatible;

View File

@ -149,7 +149,7 @@ public:
void coalesce( const Range& k );
void validateCoalesced();
void operator=(RangeMap&& r) noexcept(true) { map = std::move(r.map); }
void operator=(RangeMap&& r) BOOST_NOEXCEPT { map = std::move(r.map); }
//void clear( const Val& value ) { ranges.clear(); ranges.insert(std::make_pair(Key(),value)); }
void insert( const Range& keys, const Val& value );

View File

@ -177,7 +177,7 @@ Future<Reference<IConnection>> TLSNetworkConnections::connect( NetworkAddress to
// addresses against certificates, so we have our own peer verifying logic
// to use. For FDB<->external system connections, we can use the standard
// hostname-based certificate verification logic.
if (host.empty() || host == toIPString(toAddr.ip))
if (host.empty() || host == toAddr.ip.toString())
return wrap(options->get_policy(TLSOptions::POLICY_VERIFY_PEERS), true, network->connect(clearAddr), std::string(""));
else
return wrap( options->get_policy(TLSOptions::POLICY_NO_VERIFY_PEERS), true, network->connect( clearAddr ), host );

View File

@ -868,7 +868,7 @@ template<> Future<int> chain<0>( Future<int> const& x ) {
return x;
}
Future<int> chain2( Future<int> const& x, int const& i );
ACTOR Future<int> chain2(Future<int> x, int i);
ACTOR Future<int> chain2( Future<int> x, int i ) {
if (i>1) {
@ -1017,7 +1017,7 @@ ACTOR void cycle(FutureStream<Void> in, PromiseStream<Void> out, int* ptotal){
loop{
waitNext(in);
(*ptotal)++;
out.send(_);
out.send(Void());
}
}

View File

@ -112,7 +112,7 @@ public:
bool isValid() const { return sav != NULL; }
ReplyPromise() : sav(new NetSAV<T>(0, 1)) {}
ReplyPromise(const ReplyPromise& rhs) : sav(rhs.sav) { sav->addPromiseRef(); }
ReplyPromise(ReplyPromise&& rhs) noexcept(true) : sav(rhs.sav) { rhs.sav = 0; }
ReplyPromise(ReplyPromise&& rhs) BOOST_NOEXCEPT : sav(rhs.sav) { rhs.sav = 0; }
~ReplyPromise() { if (sav) sav->delPromiseRef(); }
ReplyPromise(const Endpoint& endpoint) : sav(new NetSAV<T>(0, 1, endpoint)) {}
@ -123,7 +123,7 @@ public:
if (sav) sav->delPromiseRef();
sav = rhs.sav;
}
void operator=(ReplyPromise && rhs) noexcept(true) {
void operator=(ReplyPromise && rhs) BOOST_NOEXCEPT {
if (sav != rhs.sav) {
if (sav) sav->delPromiseRef();
sav = rhs.sav;
@ -323,13 +323,13 @@ public:
FutureStream<T> getFuture() const { queue->addFutureRef(); return FutureStream<T>(queue); }
RequestStream() : queue(new NetNotifiedQueue<T>(0, 1)) {}
RequestStream(const RequestStream& rhs) : queue(rhs.queue) { queue->addPromiseRef(); }
RequestStream(RequestStream&& rhs) noexcept(true) : queue(rhs.queue) { rhs.queue = 0; }
RequestStream(RequestStream&& rhs) BOOST_NOEXCEPT : queue(rhs.queue) { rhs.queue = 0; }
void operator=(const RequestStream& rhs) {
rhs.queue->addPromiseRef();
if (queue) queue->delPromiseRef();
queue = rhs.queue;
}
void operator=(RequestStream&& rhs) noexcept(true) {
void operator=(RequestStream&& rhs) BOOST_NOEXCEPT {
if (queue != rhs.queue) {
if (queue) queue->delPromiseRef();
queue = rhs.queue;

View File

@ -154,11 +154,11 @@
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<ItemDefinitionGroup>
<CustomBuildStep>

View File

@ -135,28 +135,29 @@ struct SimClogging {
return t - tnow;
}
void clogPairFor( uint32_t from, uint32_t to, double t ) {
void clogPairFor(const IPAddress& from, const IPAddress& to, double t) {
auto& u = clogPairUntil[ std::make_pair( from, to ) ];
u = std::max(u, now() + t);
}
void clogSendFor( uint32_t from, double t ) {
void clogSendFor(const IPAddress& from, double t) {
auto& u = clogSendUntil[from];
u = std::max(u, now() + t);
}
void clogRecvFor( uint32_t from, double t ) {
void clogRecvFor(const IPAddress& from, double t) {
auto& u = clogRecvUntil[from];
u = std::max(u, now() + t);
}
double setPairLatencyIfNotSet( uint32_t from, uint32_t to, double t ) {
double setPairLatencyIfNotSet(const IPAddress& from, const IPAddress& to, double t) {
auto i = clogPairLatency.find( std::make_pair(from,to) );
if (i == clogPairLatency.end())
i = clogPairLatency.insert( std::make_pair( std::make_pair(from,to), t ) ).first;
return i->second;
}
private:
std::map< uint32_t, double > clogSendUntil, clogRecvUntil;
std::map< std::pair<uint32_t, uint32_t>, double > clogPairUntil;
std::map< std::pair<uint32_t, uint32_t>, double > clogPairLatency;
std::map<IPAddress, double> clogSendUntil, clogRecvUntil;
std::map<std::pair<IPAddress, IPAddress>, double> clogPairUntil;
std::map<std::pair<IPAddress, IPAddress>, double> clogPairLatency;
double halfLatency() {
double a = g_random->random01();
const double pFast = 0.999;
@ -790,8 +791,16 @@ public:
Reference<Sim2Conn> peerc( new Sim2Conn( peerp ) );
myc->connect(peerc, toAddr);
peerc->connect(myc, NetworkAddress( getCurrentProcess()->address.ip + g_random->randomInt(0,256),
g_random->randomInt(40000, 60000) ));
IPAddress localIp;
if (getCurrentProcess()->address.ip.isV6()) {
IPAddress::IPAddressStore store = getCurrentProcess()->address.ip.toV6();
uint16_t* ipParts = (uint16_t*)store.data();
ipParts[7] += g_random->randomInt(0, 256);
localIp = IPAddress(store);
} else {
localIp = IPAddress(getCurrentProcess()->address.ip.toV4() + g_random->randomInt(0, 256));
}
peerc->connect(myc, NetworkAddress(localIp, g_random->randomInt(40000, 60000)));
((Sim2Listener*)peerp->getListener(toAddr).getPtr())->incomingConnection( 0.5*g_random->random01(), Reference<IConnection>(peerc) );
return onConnect( ::delay(0.5*g_random->random01()), myc );
@ -966,17 +975,21 @@ public:
virtual void run() {
_run(this);
}
virtual ProcessInfo* newProcess(const char* name, uint32_t ip, uint16_t port, uint16_t listenPerProcess,
LocalityData locality, ProcessClass startingClass, const char* dataFolder, const char* coordinationFolder) {
virtual ProcessInfo* newProcess(const char* name, IPAddress ip, uint16_t port, uint16_t listenPerProcess,
LocalityData locality, ProcessClass startingClass, const char* dataFolder,
const char* coordinationFolder) {
ASSERT( locality.machineId().present() );
MachineInfo& machine = machines[ locality.machineId().get() ];
if (!machine.machineId.present())
machine.machineId = locality.machineId();
for( int i = 0; i < machine.processes.size(); i++ ) {
if( machine.processes[i]->locality.machineId() != locality.machineId() ) { // SOMEDAY: compute ip from locality to avoid this check
TraceEvent("Sim2Mismatch").detail("IP", format("%x", ip))
.detailext("MachineId", locality.machineId()).detail("NewName", name)
.detailext("ExistingMachineId", machine.processes[i]->locality.machineId()).detail("ExistingName", machine.processes[i]->name);
TraceEvent("Sim2Mismatch")
.detail("IP", format("%s", ip.toString().c_str()))
.detailext("MachineId", locality.machineId())
.detail("NewName", name)
.detailext("ExistingMachineId", machine.processes[i]->locality.machineId())
.detail("ExistingName", machine.processes[i]->name);
ASSERT( false );
}
ASSERT( machine.processes[i]->address.port != port );
@ -1499,22 +1512,24 @@ public:
return (kt == ktMin);
}
virtual void clogInterface( uint32_t ip, double seconds, ClogMode mode = ClogDefault ) {
virtual void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) {
if (mode == ClogDefault) {
double a = g_random->random01();
if ( a < 0.3 ) mode = ClogSend;
else if (a < 0.6 ) mode = ClogReceive;
else mode = ClogAll;
}
TraceEvent("ClogInterface").detail("IP", toIPString(ip)).detail("Delay", seconds)
.detail("Queue", mode==ClogSend?"Send":mode==ClogReceive?"Receive":"All");
TraceEvent("ClogInterface")
.detail("IP", ip.toString())
.detail("Delay", seconds)
.detail("Queue", mode == ClogSend ? "Send" : mode == ClogReceive ? "Receive" : "All");
if (mode == ClogSend || mode==ClogAll)
g_clogging.clogSendFor( ip, seconds );
if (mode == ClogReceive || mode==ClogAll)
g_clogging.clogRecvFor( ip, seconds );
}
virtual void clogPair( uint32_t from, uint32_t to, double seconds ) {
virtual void clogPair(const IPAddress& from, const IPAddress& to, double seconds) {
g_clogging.clogPairFor( from, to, seconds );
}
virtual std::vector<ProcessInfo*> getAllProcesses() const {
@ -1569,10 +1584,10 @@ public:
Promise<Void> action;
Task( double time, int taskID, uint64_t stable, ProcessInfo* machine, Promise<Void>&& action ) : time(time), taskID(taskID), stable(stable), machine(machine), action(std::move(action)) {}
Task( double time, int taskID, uint64_t stable, ProcessInfo* machine, Future<Void>& future ) : time(time), taskID(taskID), stable(stable), machine(machine) { future = action.getFuture(); }
Task(Task&& rhs) noexcept(true) : time(rhs.time), taskID(rhs.taskID), stable(rhs.stable), machine(rhs.machine), action(std::move(rhs.action)) {}
Task(Task&& rhs) BOOST_NOEXCEPT : time(rhs.time), taskID(rhs.taskID), stable(rhs.stable), machine(rhs.machine), action(std::move(rhs.action)) {}
void operator= ( Task const& rhs ) { taskID = rhs.taskID; time = rhs.time; stable = rhs.stable; machine = rhs.machine; action = rhs.action; }
Task( Task const& rhs ) : taskID(rhs.taskID), time(rhs.time), stable(rhs.stable), machine(rhs.machine), action(rhs.action) {}
void operator= (Task&& rhs) noexcept(true) { time = rhs.time; taskID = rhs.taskID; stable = rhs.stable; machine = rhs.machine; action = std::move(rhs.action); }
void operator= (Task&& rhs) BOOST_NOEXCEPT { time = rhs.time; taskID = rhs.taskID; stable = rhs.stable; machine = rhs.machine; action = std::move(rhs.action); }
bool operator < (Task const& rhs) const {
// Ordering is reversed for priority_queue
@ -1653,7 +1668,7 @@ public:
INetwork *net2;
//Map from machine IP -> machine disk space info
std::map<uint32_t, SimDiskSpace> diskSpaceMap;
std::map<IPAddress, SimDiskSpace> diskSpaceMap;
//Whether or not yield has returned true during the current iteration of the run loop
bool yielded;

View File

@ -114,8 +114,12 @@ public:
std::string toString() const {
const NetworkAddress& address = addresses[0];
return format("name: %s address: %d.%d.%d.%d:%d zone: %s datahall: %s class: %s excluded: %d cleared: %d",
name, (address.ip>>24)&0xff, (address.ip>>16)&0xff, (address.ip>>8)&0xff, address.ip&0xff, address.port, (locality.zoneId().present() ? locality.zoneId().get().printable().c_str() : "[unset]"), (locality.dataHallId().present() ? locality.dataHallId().get().printable().c_str() : "[unset]"), startingClass.toString().c_str(), excluded, cleared);
return format(
"name: %s address: %s zone: %s datahall: %s class: %s excluded: %d cleared: %d", name,
formatIpPort(address.ip, address.port).c_str(),
(locality.zoneId().present() ? locality.zoneId().get().printable().c_str() : "[unset]"),
(locality.dataHallId().present() ? locality.dataHallId().get().printable().c_str() : "[unset]"),
startingClass.toString().c_str(), excluded, cleared);
}
// Members not for external use
@ -138,7 +142,9 @@ public:
virtual Future<Void> onProcess( ISimulator::ProcessInfo *process, int taskID = -1 ) = 0;
virtual Future<Void> onMachine( ISimulator::ProcessInfo *process, int taskID = -1 ) = 0;
virtual ProcessInfo* newProcess(const char* name, uint32_t ip, uint16_t port, uint16_t listenPerProcess, LocalityData locality, ProcessClass startingClass, const char* dataFolder, const char* coordinationFolder) = 0;
virtual ProcessInfo* newProcess(const char* name, IPAddress ip, uint16_t port, uint16_t listenPerProcess,
LocalityData locality, ProcessClass startingClass, const char* dataFolder,
const char* coordinationFolder) = 0;
virtual void killProcess( ProcessInfo* machine, KillType ) = 0;
virtual void rebootProcess(Optional<Standalone<StringRef>> zoneId, bool allProcesses ) = 0;
virtual void rebootProcess( ProcessInfo* process, KillType kt ) = 0;
@ -256,8 +262,8 @@ public:
allSwapsDisabled = true;
}
virtual void clogInterface( uint32_t ip, double seconds, ClogMode mode = ClogDefault ) = 0;
virtual void clogPair( uint32_t from, uint32_t to, double seconds ) = 0;
virtual void clogInterface(const IPAddress& ip, double seconds, ClogMode mode = ClogDefault) = 0;
virtual void clogPair(const IPAddress& from, const IPAddress& to, double seconds) = 0;
virtual std::vector<ProcessInfo*> getAllProcesses() const = 0;
virtual ProcessInfo* getProcessByAddress( NetworkAddress const& address ) = 0;
virtual MachineInfo* getMachineByNetworkAddress(NetworkAddress const& address) = 0;

View File

@ -10,7 +10,7 @@ set(FDBSERVER_SRCS
CoroFlow.actor.cpp
CoroFlow.h
DataDistribution.actor.cpp
DataDistribution.h
DataDistribution.actor.h
DataDistributionQueue.actor.cpp
DataDistributionTracker.actor.cpp
DataDistributorInterface.h
@ -130,6 +130,7 @@ set(FDBSERVER_SRCS
workloads/IndexScan.actor.cpp
workloads/Inventory.actor.cpp
workloads/KVStoreTest.actor.cpp
workloads/KillRegion.actor.cpp
workloads/LockDatabase.actor.cpp
workloads/LogMetrics.actor.cpp
workloads/LowLatency.actor.cpp
@ -184,6 +185,4 @@ target_include_directories(fdbserver PRIVATE
${CMAKE_CURRENT_SOURCE_DIR}/workloads)
target_link_libraries(fdbserver PRIVATE fdbclient)
if(NOT OPEN_FOR_IDE)
install(TARGETS fdbserver DESTINATION ${FDB_SBIN_DIR} COMPONENT server)
endif()
fdb_install(TARGETS fdbserver DESTINATION sbin COMPONENT server)

View File

@ -60,9 +60,9 @@ struct WorkerInfo : NonCopyable {
WorkerInfo( Future<Void> watcher, ReplyPromise<RegisterWorkerReply> reply, Generation gen, WorkerInterface interf, ProcessClass initialClass, ProcessClass processClass, ClusterControllerPriorityInfo priorityInfo ) :
watcher(watcher), reply(reply), gen(gen), reboots(0), lastAvailableTime(now()), interf(interf), initialClass(initialClass), processClass(processClass), priorityInfo(priorityInfo) {}
WorkerInfo( WorkerInfo&& r ) noexcept(true) : watcher(std::move(r.watcher)), reply(std::move(r.reply)), gen(r.gen),
WorkerInfo( WorkerInfo&& r ) BOOST_NOEXCEPT : watcher(std::move(r.watcher)), reply(std::move(r.reply)), gen(r.gen),
reboots(r.reboots), lastAvailableTime(r.lastAvailableTime), interf(std::move(r.interf)), initialClass(r.initialClass), processClass(r.processClass), priorityInfo(r.priorityInfo) {}
void operator=( WorkerInfo&& r ) noexcept(true) {
void operator=( WorkerInfo&& r ) BOOST_NOEXCEPT {
watcher = std::move(r.watcher);
reply = std::move(r.reply);
gen = r.gen;

View File

@ -19,7 +19,7 @@
*/
#include "flow/ActorCollection.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "fdbclient/SystemData.h"
#include "fdbclient/DatabaseContext.h"
#include "fdbserver/MoveKeys.actor.h"

View File

@ -1,5 +1,5 @@
/*
* DataDistribution.h
* DataDistribution.actor.h
*
* This source file is part of the FoundationDB open source project
*
@ -18,10 +18,17 @@
* limitations under the License.
*/
#if defined(NO_INTELLISENSE) && !defined(FDBSERVER_DATA_DISTRIBUTION_ACTOR_G_H)
#define FDBSERVER_DATA_DISTRIBUTION_ACTOR_G_H
#include "fdbserver/DataDistribution.actor.g.h"
#elif !defined(FDBSERVER_DATA_DISTRIBUTION_ACTOR_H)
#define FDBSERVER_DATA_DISTRIBUTION_ACTOR_H
#include "fdbclient/NativeAPI.actor.h"
#include "fdbserver/ClusterRecruitmentInterface.h"
#include "fdbserver/MoveKeys.actor.h"
#include "fdbserver/LogSystem.h"
#include "flow/actorcompiler.h" // This must be the last #include.
struct RelocateShard {
KeyRange keys;
@ -244,5 +251,7 @@ ShardSizeBounds getShardSizeBounds(KeyRangeRef shard, int64_t maxShardSize);
int64_t getMaxShardSize( double dbSizeEstimate );
class DDTeamCollection;
Future<Void> teamRemover(DDTeamCollection* const& self);
Future<Void> teamRemoverPeriodic(DDTeamCollection* const& self);
ACTOR Future<Void> teamRemover(DDTeamCollection* self);
ACTOR Future<Void> teamRemoverPeriodic(DDTeamCollection* self);
#endif

View File

@ -25,7 +25,7 @@
#include "flow/Util.h"
#include "fdbrpc/sim_validation.h"
#include "fdbclient/SystemData.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "fdbclient/DatabaseContext.h"
#include "fdbserver/MoveKeys.actor.h"
#include "fdbserver/Knobs.h"

View File

@ -20,7 +20,7 @@
#include "fdbrpc/FailureMonitor.h"
#include "fdbclient/SystemData.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "fdbserver/Knobs.h"
#include "fdbclient/DatabaseContext.h"
#include "flow/ActorCollection.h"

View File

@ -42,8 +42,8 @@ struct LogRouterData {
TagData( Tag tag, Version popped, Version durableKnownCommittedVersion ) : tag(tag), popped(popped), durableKnownCommittedVersion(durableKnownCommittedVersion) {}
TagData(TagData&& r) noexcept(true) : version_messages(std::move(r.version_messages)), tag(r.tag), popped(r.popped), durableKnownCommittedVersion(r.durableKnownCommittedVersion) {}
void operator= (TagData&& r) noexcept(true) {
TagData(TagData&& r) BOOST_NOEXCEPT : version_messages(std::move(r.version_messages)), tag(r.tag), popped(r.popped), durableKnownCommittedVersion(r.durableKnownCommittedVersion) {}
void operator= (TagData&& r) BOOST_NOEXCEPT {
version_messages = std::move(r.version_messages);
tag = r.tag;
popped = r.popped;

View File

@ -323,8 +323,8 @@ namespace oldTLog_4_6 {
TagData( Version popped, bool nothing_persistent, bool popped_recently, OldTag tag ) : nothing_persistent(nothing_persistent), popped(popped), popped_recently(popped_recently), update_version_sizes(tag != txsTagOld) {}
TagData(TagData&& r) noexcept(true) : version_messages(std::move(r.version_messages)), nothing_persistent(r.nothing_persistent), popped_recently(r.popped_recently), popped(r.popped), update_version_sizes(r.update_version_sizes) {}
void operator= (TagData&& r) noexcept(true) {
TagData(TagData&& r) BOOST_NOEXCEPT : version_messages(std::move(r.version_messages)), nothing_persistent(r.nothing_persistent), popped_recently(r.popped_recently), popped(r.popped), update_version_sizes(r.update_version_sizes) {}
void operator= (TagData&& r) BOOST_NOEXCEPT {
version_messages = std::move(r.version_messages);
nothing_persistent = r.nothing_persistent;
popped_recently = r.popped_recently;

View File

@ -294,8 +294,8 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
TagData( Tag tag, Version popped, bool nothingPersistent, bool poppedRecently, bool unpoppedRecovered ) : tag(tag), nothingPersistent(nothingPersistent), popped(popped), poppedRecently(poppedRecently), unpoppedRecovered(unpoppedRecovered) {}
TagData(TagData&& r) noexcept(true) : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {}
void operator= (TagData&& r) noexcept(true) {
TagData(TagData&& r) BOOST_NOEXCEPT : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {}
void operator= (TagData&& r) BOOST_NOEXCEPT {
versionMessages = std::move(r.versionMessages);
nothingPersistent = r.nothingPersistent;
poppedRecently = r.poppedRecently;

View File

@ -191,22 +191,13 @@ ACTOR Future<Void> runDr( Reference<ClusterConnectionFile> connFile ) {
// SOMEDAY: when a process can be rebooted in isolation from the other on that machine,
// a loop{} will be needed around the waiting on simulatedFDBD(). For now this simply
// takes care of house-keeping such as context switching and file closing.
ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(
Reference<ClusterConnectionFile> connFile,
uint32_t ip,
bool sslEnabled,
Reference<TLSOptions> tlsOptions,
uint16_t port,
uint16_t listenPerProcess,
LocalityData localities,
ProcessClass processClass,
std::string* dataFolder,
std::string* coordFolder,
std::string baseFolder,
ClusterConnectionString connStr,
bool useSeedFile,
bool runBackupAgents)
{
ACTOR Future<ISimulator::KillType> simulatedFDBDRebooter(Reference<ClusterConnectionFile> connFile, IPAddress ip,
bool sslEnabled, Reference<TLSOptions> tlsOptions,
uint16_t port, uint16_t listenPerProcess,
LocalityData localities, ProcessClass processClass,
std::string* dataFolder, std::string* coordFolder,
std::string baseFolder, ClusterConnectionString connStr,
bool useSeedFile, bool runBackupAgents) {
state ISimulator::ProcessInfo *simProcess = g_simulator.getCurrentProcess();
state UID randomId = g_nondeterministic_random->randomUniqueID();
state int cycles = 0;
@ -363,19 +354,10 @@ std::string describe(int const& val) {
// Since a datacenter kill is considered to be the same as killing a machine, files cannot be swapped across datacenters
std::map< Optional<Standalone<StringRef>>, std::vector< std::vector< std::string > > > availableFolders;
// process count is no longer needed because it is now the length of the vector of ip's, because it was one ip per process
ACTOR Future<Void> simulatedMachine(
ClusterConnectionString connStr,
std::vector<uint32_t> ips,
bool sslEnabled,
Reference<TLSOptions> tlsOptions,
LocalityData localities,
ProcessClass processClass,
std::string baseFolder,
bool restarting,
bool useSeedFile,
bool runBackupAgents,
bool sslOnly)
{
ACTOR Future<Void> simulatedMachine(ClusterConnectionString connStr, std::vector<IPAddress> ips, bool sslEnabled,
Reference<TLSOptions> tlsOptions, LocalityData localities,
ProcessClass processClass, std::string baseFolder, bool restarting,
bool useSeedFile, bool runBackupAgents, bool sslOnly) {
state int bootCount = 0;
state std::vector<std::string> myFolders;
state std::vector<std::string> coordFolders;
@ -603,6 +585,20 @@ ACTOR Future<Void> simulatedMachine(
}
}
IPAddress makeIPAddressForSim(bool isIPv6, std::array<int, 4> parts) {
if (isIPv6) {
IPAddress::IPAddressStore addrStore{ 0xAB, 0xCD };
uint16_t* ptr = (uint16_t*)addrStore.data();
ptr[4] = (uint16_t)(parts[0] << 8);
ptr[5] = (uint16_t)(parts[1] << 8);
ptr[6] = (uint16_t)(parts[2] << 8);
ptr[7] = (uint16_t)(parts[3] << 8);
return IPAddress(addrStore);
} else {
return IPAddress(parts[0] << 24 | parts[1] << 16 | parts[2] << 8 | parts[3]);
}
}
#include "fdbclient/MonitorLeader.h"
ACTOR Future<Void> restartSimulatedSystem(
@ -658,21 +654,43 @@ ACTOR Future<Void> restartSimulatedSystem(
dcIds.push_back(dcUIDini);
}
std::vector<uint32_t> ipAddrs;
std::vector<IPAddress> ipAddrs;
int processes = atoi(ini.GetValue(machineIdString.c_str(), "processes"));
auto ip = ini.GetValue(machineIdString.c_str(), "ipAddr");
// Helper to translate the IP address stored in INI file to out IPAddress representation.
// After IPv6 work, we store the actual string representation of IP address, however earlier, it was
// instead the 32 bit integer value.
auto parseIp = [](const char* ipStr) -> IPAddress {
Optional<IPAddress> parsedIp = IPAddress::parse(ipStr);
if (parsedIp.present()) {
return parsedIp.get();
} else {
return IPAddress(strtoul(ipStr, NULL, 10));
}
};
if( ip == NULL ) {
for (int i = 0; i < processes; i++){
ipAddrs.push_back(strtoul(ini.GetValue(machineIdString.c_str(), format("ipAddr%d", i*listenersPerProcess).c_str()), NULL, 10));
for (int i = 0; i < processes; i++) {
const char* val =
ini.GetValue(machineIdString.c_str(), format("ipAddr%d", i * listenersPerProcess).c_str());
ipAddrs.push_back(parseIp(val));
}
}
else {
// old way
ipAddrs.push_back(strtoul(ip, NULL, 10));
ipAddrs.push_back(parseIp(ip));
for (int i = 1; i < processes; i++){
ipAddrs.push_back(ipAddrs.back() + 1);
if (ipAddrs.back().isV6()) {
IPAddress::IPAddressStore store = ipAddrs.back().toV6();
uint16_t* ptr = (uint16_t*)store.data();
ptr[7] += 1;
ipAddrs.push_back(IPAddress(store));
} else {
ipAddrs.push_back(IPAddress(ipAddrs.back().toV4() + 1));
}
}
}
@ -1057,10 +1075,9 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
}
}
void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseFolder,
int* pTesterCount, Optional<ClusterConnectionString> *pConnString,
Standalone<StringRef> *pStartingConfiguration, int extraDB, int minimumReplication, int minimumRegions, Reference<TLSOptions> tlsOptions)
{
void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFolder, int* pTesterCount,
Optional<ClusterConnectionString>* pConnString, Standalone<StringRef>* pStartingConfiguration,
int extraDB, int minimumReplication, int minimumRegions, Reference<TLSOptions> tlsOptions) {
// SOMEDAY: this does not test multi-interface configurations
SimulationConfig simconfig(extraDB, minimumReplication, minimumRegions);
StatusObject startingConfigJSON = simconfig.db.toJSON(true);
@ -1137,6 +1154,11 @@ void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseF
TEST( sslEnabled ); // SSL enabled
TEST( !sslEnabled ); // SSL disabled
// Use IPv6 25% of the time
bool useIPv6 = g_random->random01() < 0.25;
TEST( useIPv6 );
TEST( !useIPv6 );
vector<NetworkAddress> coordinatorAddresses;
if(minimumRegions > 1) {
//do not put coordinators in the primary region so that we can kill that region safely
@ -1144,7 +1166,7 @@ void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseF
for( int dc = 1; dc < dataCenters; dc+=2 ) {
int dcCoordinators = coordinatorCount / nonPrimaryDcs + ((dc-1)/2 < coordinatorCount%nonPrimaryDcs);
for(int m = 0; m < dcCoordinators; m++) {
uint32_t ip = 2<<24 | dc<<16 | 1<<8 | m;
auto ip = makeIPAddressForSim(useIPv6, { 2, dc, 1, m });
coordinatorAddresses.push_back(NetworkAddress(ip, sslEnabled && !sslOnly ? 2 : 1, true, sslEnabled && sslOnly));
TraceEvent("SelectedCoordinator").detail("Address", coordinatorAddresses.back());
}
@ -1160,10 +1182,16 @@ void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseF
int machines = machineCount / dataCenters + (dc < machineCount % dataCenters);
for(int m = 0; m < dcCoordinators; m++) {
if(coordinatorCount>4 && (assignedMachines==4 || (m+1==dcCoordinators && assignedMachines<4 && assignedMachines+machines-dcCoordinators>=4))) {
uint32_t ip = 2<<24 | dc<<16 | 1<<8 | m;
TraceEvent("SkippedCoordinator").detail("Address", ip).detail("M", m).detail("Machines", machines).detail("Assigned", assignedMachines).detail("DcCoord", dcCoordinators).detail("CoordinatorCount", coordinatorCount);
auto ip = makeIPAddressForSim(useIPv6, { 2, dc, 1, m });
TraceEvent("SkippedCoordinator")
.detail("Address", ip.toString())
.detail("M", m)
.detail("Machines", machines)
.detail("Assigned", assignedMachines)
.detail("DcCoord", dcCoordinators)
.detail("CoordinatorCount", coordinatorCount);
} else {
uint32_t ip = 2<<24 | dc<<16 | 1<<8 | m;
auto ip = makeIPAddressForSim(useIPv6, { 2, dc, 1, m });
coordinatorAddresses.push_back(NetworkAddress(ip, sslEnabled && !sslOnly ? 2 : 1, true, sslEnabled && sslOnly));
TraceEvent("SelectedCoordinator").detail("Address", coordinatorAddresses.back()).detail("M", m).detail("Machines", machines).detail("Assigned", assignedMachines).detail("DcCoord", dcCoordinators).detail("P1", (m+1==dcCoordinators)).detail("P2", (assignedMachines<4)).detail("P3", (assignedMachines+machines-dcCoordinators>=4)).detail("CoordinatorCount", coordinatorCount);
}
@ -1175,10 +1203,13 @@ void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseF
g_random->randomShuffle(coordinatorAddresses);
for(int i = 0; i < (coordinatorAddresses.size()/2)+1; i++) {
TraceEvent("ProtectCoordinator").detail("Address", coordinatorAddresses[i]).detail("Coordinators", describe(coordinatorAddresses)).backtrace();
g_simulator.protectedAddresses.insert(NetworkAddress(coordinatorAddresses[i].ip,coordinatorAddresses[i].port,true,false));
TraceEvent("ProtectCoordinator")
.detail("Address", coordinatorAddresses[i])
.detail("Coordinators", describe(coordinatorAddresses));
g_simulator.protectedAddresses.insert(
NetworkAddress(coordinatorAddresses[i].ip, coordinatorAddresses[i].port, true, false));
if(coordinatorAddresses[i].port==2) {
g_simulator.protectedAddresses.insert(NetworkAddress(coordinatorAddresses[i].ip,1,true,false));
g_simulator.protectedAddresses.insert(NetworkAddress(coordinatorAddresses[i].ip, 1, true, false));
}
}
g_random->randomShuffle(coordinatorAddresses);
@ -1234,9 +1265,9 @@ void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseF
nonVersatileMachines++;
}
std::vector<uint32_t> ips;
for (int i = 0; i < processesPerMachine; i++){
ips.push_back(2 << 24 | dc << 16 | g_random->randomInt(1, i+2) << 8 | machine);
std::vector<IPAddress> ips;
for (int i = 0; i < processesPerMachine; i++) {
ips.push_back(makeIPAddressForSim(useIPv6, { 2, dc, g_random->randomInt(1, i + 2), machine }));
}
// check the sslEnablementMap using only one ip(
LocalityData localities(Optional<Standalone<StringRef>>(), zoneId, machineId, dcUID);
@ -1245,9 +1276,9 @@ void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseF
localities, processClass, baseFolder, false, machine == useSeedForMachine, true, sslOnly ), "SimulatedMachine"));
if (extraDB && g_simulator.extraDB->toString() != conn.toString()) {
std::vector<uint32_t> extraIps;
std::vector<IPAddress> extraIps;
for (int i = 0; i < processesPerMachine; i++){
extraIps.push_back(4 << 24 | dc << 16 | g_random->randomInt(1, i + 2) << 8 | machine);
extraIps.push_back(makeIPAddressForSim(useIPv6, { 4, dc, g_random->randomInt(1, i + 2), machine }));
}
Standalone<StringRef> newMachineId(g_random->randomUniqueID().toString());
@ -1277,8 +1308,8 @@ void setupSimulatedSystem( vector<Future<Void>> *systemActors, std::string baseF
int testerCount = *pTesterCount = g_random->randomInt(4, 9);
int useSeedForMachine = g_random->randomInt(0, testerCount);
for(int i=0; i<testerCount; i++) {
std::vector<uint32_t> ips;
ips.push_back(0x03040301 + i);
std::vector<IPAddress> ips;
ips.push_back(makeIPAddressForSim(useIPv6, { 3, 4, 3, i + 1 }));
Standalone<StringRef> newZoneId = Standalone<StringRef>(g_random->randomUniqueID().toString());
LocalityData localities(Optional<Standalone<StringRef>>(), newZoneId, newZoneId, Optional<Standalone<StringRef>>());
systemActors->push_back( reportErrors( simulatedMachine(
@ -1350,8 +1381,15 @@ ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool reboot
state int minimumRegions = 0;
checkExtraDB(testFile, extraDB, minimumReplication, minimumRegions);
wait( g_simulator.onProcess( g_simulator.newProcess(
"TestSystem", 0x01010101, 1, 1, LocalityData(Optional<Standalone<StringRef>>(), Standalone<StringRef>(g_random->randomUniqueID().toString()), Standalone<StringRef>(g_random->randomUniqueID().toString()), Optional<Standalone<StringRef>>()), ProcessClass(ProcessClass::TesterClass, ProcessClass::CommandLineSource), "", "" ), TaskDefaultYield ) );
// TODO (IPv6) Use IPv6?
wait(g_simulator.onProcess(
g_simulator.newProcess("TestSystem", IPAddress(0x01010101), 1, 1,
LocalityData(Optional<Standalone<StringRef>>(),
Standalone<StringRef>(g_random->randomUniqueID().toString()),
Standalone<StringRef>(g_random->randomUniqueID().toString()),
Optional<Standalone<StringRef>>()),
ProcessClass(ProcessClass::TesterClass, ProcessClass::CommandLineSource), "", ""),
TaskDefaultYield));
Sim2FileSystem::newFileSystem();
FlowTransport::createInstance(1);
if (tlsOptions->enabled()) {
@ -1367,7 +1405,8 @@ ACTOR void setupAndRun(std::string dataFolder, const char *testFile, bool reboot
}
else {
g_expect_full_pointermap = 1;
setupSimulatedSystem( &systemActors, dataFolder, &testerCount, &connFile, &startingConfiguration, extraDB, minimumReplication, minimumRegions, tlsOptions );
setupSimulatedSystem(&systemActors, dataFolder, &testerCount, &connFile, &startingConfiguration, extraDB,
minimumReplication, minimumRegions, tlsOptions);
wait( delay(1.0) ); // FIXME: WHY!!! //wait for machines to boot
}
std::string clusterFileDir = joinPath( dataFolder, g_random->randomUniqueID().toString() );

View File

@ -494,12 +494,12 @@ public:
~SkipList() {
destroy();
}
SkipList(SkipList&& other) noexcept(true)
SkipList(SkipList&& other) BOOST_NOEXCEPT
: header(other.header)
{
other.header = NULL;
}
void operator=(SkipList&& other) noexcept(true) {
void operator=(SkipList&& other) BOOST_NOEXCEPT {
destroy();
header = other.header;
other.header = NULL;

View File

@ -27,7 +27,7 @@
#include "fdbserver/ClusterRecruitmentInterface.h"
#include <time.h>
#include "fdbserver/CoordinationInterface.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "flow/UnitTest.h"
#include "fdbserver/QuietDatabase.h"
#include "fdbserver/RecoveryState.h"
@ -289,7 +289,7 @@ static JsonBuilderObject machineStatusFetcher(WorkerEvents mMetrics, vector<std:
const TraceEventFields& event = it->second;
try {
std::string address = toIPString(it->first.ip);
std::string address = it->first.ip.toString();
// We will use the "physical" caluculated machine ID here to limit exposure to machineID repurposing
std::string machineId = event.getValue("MachineID");
@ -1204,8 +1204,8 @@ ACTOR static Future<JsonBuilderObject> dataStatusFetcher(std::pair<WorkerInterfa
}
else if (highestPriority >= PRIORITY_TEAM_REDUNDANT) {
stateSectionObj["healthy"] = true;
stateSectionObj["name"] = "removing_redundant_teams";
stateSectionObj["description"] = "Removing redundant machine teams";
stateSectionObj["name"] = "optimizing_team_collections";
stateSectionObj["description"] = "Optimizing team collections";
}
else if (highestPriority >= PRIORITY_MERGE_SHARD) {
stateSectionObj["healthy"] = true;
@ -1254,9 +1254,15 @@ namespace std
{
size_t operator()(const NetworkAddress& na) const
{
return (na.ip << 16) + na.port;
}
};
int result = 0;
if (na.ip.isV6()) {
result = hashlittle(na.ip.toV6().data(), 16, 0);
} else {
result = na.ip.toV4();
}
return (result << 16) + na.port;
}
};
}
ACTOR template <class iface>
@ -1667,7 +1673,7 @@ static JsonBuilderArray getClientIssuesAsMessages( ProcessIssuesMap const& _issu
std::map<std::string, std::vector<std::string>> deduplicatedIssues;
for(auto i : issues) {
deduplicatedIssues[i.second.first].push_back(format("%s:%d", toIPString(i.first.ip).c_str(), i.first.port));
deduplicatedIssues[i.second.first].push_back(formatIpPort(i.first.ip, i.first.port));
}
for (auto i : deduplicatedIssues) {

View File

@ -321,8 +321,8 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
TagData( Tag tag, Version popped, bool nothingPersistent, bool poppedRecently, bool unpoppedRecovered ) : tag(tag), nothingPersistent(nothingPersistent), popped(popped), poppedRecently(poppedRecently), unpoppedRecovered(unpoppedRecovered) {}
TagData(TagData&& r) noexcept(true) : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {}
void operator= (TagData&& r) noexcept(true) {
TagData(TagData&& r) BOOST_NOEXCEPT : versionMessages(std::move(r.versionMessages)), nothingPersistent(r.nothingPersistent), poppedRecently(r.poppedRecently), popped(r.popped), tag(r.tag), unpoppedRecovered(r.unpoppedRecovered) {}
void operator= (TagData&& r) BOOST_NOEXCEPT {
versionMessages = std::move(r.versionMessages);
nothingPersistent = r.nothingPersistent;
poppedRecently = r.poppedRecently;

View File

@ -38,7 +38,7 @@
#include "fdbserver/ServerDBInfo.h"
#include "fdbserver/MoveKeys.actor.h"
#include "fdbserver/ConflictSet.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "fdbserver/NetworkTest.h"
#include "fdbserver/IKeyValueStore.h"
#include <algorithm>
@ -170,7 +170,7 @@ extern void copyTest();
extern void versionedMapTest();
extern void createTemplateDatabase();
// FIXME: this really belongs in a header somewhere since it is actually used.
extern uint32_t determinePublicIPAutomatically( ClusterConnectionString const& ccs );
extern IPAddress determinePublicIPAutomatically(ClusterConnectionString const& ccs);
extern const char* getHGVersion();
@ -776,7 +776,7 @@ std::pair<NetworkAddressList, NetworkAddressList> buildNetworkAddresses(const Cl
if (autoPublicAddress) {
try {
const NetworkAddress& parsedAddress = NetworkAddress::parse("0.0.0.0:" + publicAddressStr.substr(5));
uint32_t publicIP = determinePublicIPAutomatically(connectionFile.getConnectionString());
const IPAddress publicIP = determinePublicIPAutomatically(connectionFile.getConnectionString());
publicNetworkAddresses.emplace_back(publicIP, parsedAddress.port, true, parsedAddress.isTLS());
} catch (Error& e) {
fprintf(stderr, "ERROR: could not determine public address automatically from `%s': %s\n", publicAddressStr.c_str(), e.what());
@ -793,7 +793,7 @@ std::pair<NetworkAddressList, NetworkAddressList> buildNetworkAddresses(const Cl
const NetworkAddress& currentPublicAddress = publicNetworkAddresses.back();
if (!currentPublicAddress.isValid()) {
fprintf(stderr, "ERROR: %s is not valid a public ip address\n");
fprintf(stderr, "ERROR: %s is not a valid IP address\n", currentPublicAddress.toString().c_str());
flushAndExit(FDB_EXIT_ERROR);
}
@ -1749,7 +1749,8 @@ int main(int argc, char* argv[]) {
<< FastAllocator<512>::pageCount << " "
<< FastAllocator<1024>::pageCount << " "
<< FastAllocator<2048>::pageCount << " "
<< FastAllocator<4096>::pageCount << std::endl;
<< FastAllocator<4096>::pageCount << " "
<< FastAllocator<8192>::pageCount << std::endl;
vector< std::pair<std::string, const char*> > typeNames;
for( auto i = allocInstr.begin(); i != allocInstr.end(); ++i ) {

View File

@ -159,7 +159,9 @@
<ClInclude Include="CoordinatedState.h" />
<ClInclude Include="CoordinationInterface.h" />
<ClInclude Include="CoroFlow.h" />
<ClInclude Include="DataDistribution.h" />
<ActorCompiler Include="DataDistribution.actor.h">
<EnableCompile>false</EnableCompile>
</ActorCompiler>
<ClInclude Include="DataDistributorInterface.h" />
<ClInclude Include="DBCoreState.h" />
<ClInclude Include="IDiskQueue.h" />
@ -253,11 +255,11 @@
<PropertyGroup Label="UserMacros" />
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|X64'">
<LinkIncremental>true</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
</PropertyGroup>
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|X64'">
<LinkIncremental>false</LinkIncremental>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_52_0</IncludePath>
<IncludePath>$(IncludePath);../;C:\Program Files\boost_1_67_0</IncludePath>
<CustomBuildBeforeTargets>PreBuildEvent</CustomBuildBeforeTargets>
</PropertyGroup>
<ItemDefinitionGroup>

View File

@ -309,7 +309,7 @@
</ItemGroup>
<ItemGroup>
<ClInclude Include="ConflictSet.h" />
<ClInclude Include="DataDistribution.h" />
<ClInclude Include="DataDistribution.actor.h" />
<ClInclude Include="MoveKeys.actor.h" />
<ClInclude Include="pubsub.h" />
<ClInclude Include="Knobs.h" />

View File

@ -26,7 +26,7 @@
#include "fdbclient/Notified.h"
#include "fdbclient/SystemData.h"
#include "fdbserver/ConflictSet.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "fdbserver/Knobs.h"
#include <iterator>
#include "fdbserver/WaitFailure.h"
@ -53,8 +53,8 @@ struct ProxyVersionReplies {
std::map<uint64_t, GetCommitVersionReply> replies;
NotifiedVersion latestRequestNum;
ProxyVersionReplies(ProxyVersionReplies&& r) noexcept(true) : replies(std::move(r.replies)), latestRequestNum(std::move(r.latestRequestNum)) {}
void operator=(ProxyVersionReplies&& r) noexcept(true) { replies = std::move(r.replies); latestRequestNum = std::move(r.latestRequestNum); }
ProxyVersionReplies(ProxyVersionReplies&& r) BOOST_NOEXCEPT : replies(std::move(r.replies)), latestRequestNum(std::move(r.latestRequestNum)) {}
void operator=(ProxyVersionReplies&& r) BOOST_NOEXCEPT { replies = std::move(r.replies); latestRequestNum = std::move(r.latestRequestNum); }
ProxyVersionReplies() : latestRequestNum(0) {}
};

View File

@ -81,9 +81,12 @@ struct AtomicRestoreWorkload : TestWorkload {
loop {
std::vector<Future<Version>> restores;
for (auto &range : self->backupRanges) {
restores.push_back(backupAgent.atomicRestore(cx, BackupAgentBase::getDefaultTag(), range, StringRef(), StringRef()));
if (g_random->random01() < 0.5) {
for (auto &range : self->backupRanges)
restores.push_back(backupAgent.atomicRestore(cx, BackupAgentBase::getDefaultTag(), range, StringRef(), StringRef()));
}
else {
restores.push_back(backupAgent.atomicRestore(cx, BackupAgentBase::getDefaultTag(), self->backupRanges, StringRef(), StringRef()));
}
try {
wait(waitForAll(restores));

View File

@ -34,10 +34,14 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
int backupRangesCount, backupRangeLengthMax;
bool differentialBackup, performRestore, agentRequest;
Standalone<VectorRef<KeyRangeRef>> backupRanges;
std::vector<std::string> prefixesMandatory;
Standalone<VectorRef<KeyRangeRef>> skipRestoreRanges;
Standalone<VectorRef<KeyRangeRef>> restoreRanges;
static int backupAgentRequests;
bool locked;
bool allowPauses;
bool shareLogRange;
bool shouldSkipRestoreRanges;
BackupAndRestoreCorrectnessWorkload(WorkloadContext const& wcx)
: TestWorkload(wcx) {
@ -55,11 +59,12 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
agentRequest = getOption(options, LiteralStringRef("simBackupAgents"), true);
allowPauses = getOption(options, LiteralStringRef("allowPauses"), true);
shareLogRange = getOption(options, LiteralStringRef("shareLogRange"), false);
KeyRef beginRange;
KeyRef endRange;
prefixesMandatory = getOption(options, LiteralStringRef("prefixesMandatory"), std::vector<std::string>());
shouldSkipRestoreRanges = g_random->random01() < 0.3 ? true : false;
TraceEvent("BARW_ClientId").detail("Id", wcx.clientId);
UID randomID = g_nondeterministic_random->randomUniqueID();
TraceEvent("BARW_PerformRestore", randomID).detail("Value", performRestore);
if (shareLogRange) {
bool beforePrefix = sharedRandomNumber & 1;
if (beforePrefix)
@ -83,10 +88,34 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
backupRanges.push_back_deep(backupRanges.arena(), KeyRangeRef(start, *i));
// Track the added range
TraceEvent("BARW_BackupCorrectnessRange", randomID).detail("RangeBegin", (beginRange < endRange) ? printable(beginRange) : printable(endRange))
.detail("RangeEnd", (beginRange < endRange) ? printable(endRange) : printable(beginRange));
TraceEvent("BARW_BackupCorrectnessRange", randomID).detail("RangeBegin", start).detail("RangeEnd", *i);
}
}
if (performRestore && !prefixesMandatory.empty() && shouldSkipRestoreRanges) {
for (auto &range : backupRanges) {
bool intersection = false;
for (auto &prefix : prefixesMandatory) {
KeyRange mandatoryRange(KeyRangeRef(prefix, strinc(prefix)));
if (range.intersects(mandatoryRange))
intersection = true;
TraceEvent("BARW_PrefixSkipRangeDetails").detail("PrefixMandatory", printable(mandatoryRange)).detail("BackUpRange", printable(range)).detail("Intersection", intersection);
}
if (!intersection && g_random->random01() < 0.5)
skipRestoreRanges.push_back(skipRestoreRanges.arena(), range);
else
restoreRanges.push_back(restoreRanges.arena(), range);
}
}
else {
restoreRanges = backupRanges;
}
for (auto &range : restoreRanges) {
TraceEvent("BARW_RestoreRange", randomID).detail("RangeBegin", printable(range.begin)).detail("RangeEnd", printable(range.end));
}
for (auto &range : skipRestoreRanges) {
TraceEvent("BARW_SkipRange", randomID).detail("RangeBegin", printable(range.begin)).detail("RangeEnd", printable(range.end));
}
}
virtual std::string description() {
@ -117,6 +146,32 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
}
virtual Future<bool> check(Database const& cx) {
if (clientId != 0)
return true;
else
return _check(cx, this);
}
ACTOR static Future<bool> _check(Database cx, BackupAndRestoreCorrectnessWorkload *self) {
state Transaction tr(cx);
loop {
try {
state int restoreIndex;
for (restoreIndex = 0; restoreIndex < self->skipRestoreRanges.size(); restoreIndex++) {
state KeyRangeRef range = self->skipRestoreRanges[restoreIndex];
Standalone<StringRef> restoreTag(self->backupTag.toString() + "_" + std::to_string(restoreIndex));
Standalone<RangeResultRef> res = wait(tr.getRange(range, GetRangeLimits::ROW_LIMIT_UNLIMITED));
if (!res.empty()) {
TraceEvent(SevError, "BARW_UnexpectedRangePresent").detail("Range", printable(range));
return false;
}
}
break;
}
catch (Error& e) {
wait(tr.onError(e));
}
}
return true;
}
@ -289,7 +344,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
// Try doing a restore without clearing the keys
if (rowCount > 0) {
try {
wait(success(backupAgent->restore(cx, self->backupTag, KeyRef(lastBackupContainer), true, -1, true, normalKeys, Key(), Key(), self->locked)));
wait(success(backupAgent->restore(cx, cx, self->backupTag, KeyRef(lastBackupContainer), true, -1, true, normalKeys, Key(), Key(), self->locked)));
TraceEvent(SevError, "BARW_RestoreAllowedOverwrittingDatabase", randomID);
ASSERT(false);
}
@ -402,28 +457,51 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
state std::vector<Future<Version>> restores;
state std::vector<Standalone<StringRef>> restoreTags;
state int restoreIndex;
for (restoreIndex = 0; restoreIndex < self->backupRanges.size(); restoreIndex++) {
auto range = self->backupRanges[restoreIndex];
state bool multipleRangesInOneTag = false;
state int restoreIndex = 0;
if (g_random->random01() < 0.5) {
for (restoreIndex = 0; restoreIndex < self->restoreRanges.size(); restoreIndex++) {
auto range = self->restoreRanges[restoreIndex];
Standalone<StringRef> restoreTag(self->backupTag.toString() + "_" + std::to_string(restoreIndex));
restoreTags.push_back(restoreTag);
restores.push_back(backupAgent.restore(cx, cx, restoreTag, KeyRef(lastBackupContainer->getURL()), true, targetVersion, true, range, Key(), Key(), self->locked));
}
}
else {
multipleRangesInOneTag = true;
Standalone<StringRef> restoreTag(self->backupTag.toString() + "_" + std::to_string(restoreIndex));
restoreTags.push_back(restoreTag);
restores.push_back(backupAgent.restore(cx, restoreTag, KeyRef(lastBackupContainer->getURL()), true, targetVersion, true, range, Key(), Key(), self->locked));
restores.push_back(backupAgent.restore(cx, cx, restoreTag, KeyRef(lastBackupContainer->getURL()), self->restoreRanges, true, targetVersion, true, Key(), Key(), self->locked));
}
// Sometimes kill and restart the restore
if(BUGGIFY) {
if (BUGGIFY) {
wait(delay(g_random->randomInt(0, 10)));
for(restoreIndex = 0; restoreIndex < restores.size(); restoreIndex++) {
FileBackupAgent::ERestoreState rs = wait(backupAgent.abortRestore(cx, restoreTags[restoreIndex]));
if (multipleRangesInOneTag) {
FileBackupAgent::ERestoreState rs = wait(backupAgent.abortRestore(cx, restoreTags[0]));
// The restore may have already completed, or the abort may have been done before the restore
// was even able to start. Only run a new restore if the previous one was actually aborted.
if (rs == FileBackupAgent::ERestoreState::ABORTED) {
wait(runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) -> Future<Void> {
tr->clear(self->backupRanges[restoreIndex]);
for(auto &range : self->restoreRanges)
tr->clear(range);
return Void();
}));
restores[restoreIndex] = backupAgent.restore(cx, restoreTags[restoreIndex], KeyRef(lastBackupContainer->getURL()), true, -1, true, self->backupRanges[restoreIndex], Key(), Key(), self->locked);
restores[restoreIndex] = backupAgent.restore(cx, cx, restoreTags[restoreIndex], KeyRef(lastBackupContainer->getURL()), self->restoreRanges, true, -1, true, Key(), Key(), self->locked);
}
}
else {
for (restoreIndex = 0; restoreIndex < restores.size(); restoreIndex++) {
FileBackupAgent::ERestoreState rs = wait(backupAgent.abortRestore(cx, restoreTags[restoreIndex]));
// The restore may have already completed, or the abort may have been done before the restore
// was even able to start. Only run a new restore if the previous one was actually aborted.
if (rs == FileBackupAgent::ERestoreState::ABORTED) {
wait(runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) -> Future<Void> {
tr->clear(self->restoreRanges[restoreIndex]);
return Void();
}));
restores[restoreIndex] = backupAgent.restore(cx, cx, restoreTags[restoreIndex], KeyRef(lastBackupContainer->getURL()), true, -1, true, self->restoreRanges[restoreIndex], Key(), Key(), self->locked);
}
}
}
}

View File

@ -28,7 +28,7 @@
#include "fdbrpc/simulator.h"
#include "fdbserver/Knobs.h"
#include "fdbserver/StorageMetrics.h"
#include "fdbserver/DataDistribution.h"
#include "fdbserver/DataDistribution.actor.h"
#include "fdbserver/QuietDatabase.h"
#include "flow/DeterministicRandom.h"
#include "fdbclient/ManagementAPI.actor.h"

View File

@ -90,7 +90,8 @@ struct CpuProfilerWorkload : TestWorkload
req.duration = 0; //unused
//The profiler output name will be the ip.port.prof
req.outputFile = StringRef(toIPString(self->profilingWorkers[i].address().ip) + "." + format("%d", self->profilingWorkers[i].address().port) + ".profile.bin");
req.outputFile = StringRef(self->profilingWorkers[i].address().ip.toString() + "." +
format("%d", self->profilingWorkers[i].address().port) + ".profile.bin");
replies.push_back(self->profilingWorkers[i].clientInterface.profiler.tryGetReply(req));
}

View File

@ -152,9 +152,8 @@ struct MemoryLifetime : KVWorkload {
tr = ReadYourWritesTransaction(cx);
wait( delay(0.01) );
//we cannot check the contents like other operations so just touch all the values to make sure we dont crash
for(int i = 0; i < getAddress_res1.size(); i++) {
int a,b,c,d,count=-1;
ASSERT(sscanf(getAddress_res1[i], "%d.%d.%d.%d%n", &a,&b,&c,&d, &count)==4 && count == strlen(getAddress_res1[i]));
for (int i = 0; i < getAddress_res1.size(); i++) {
ASSERT(IPAddress::parse(getAddress_res1[i]).present());
}
}
if(now() - startTime > self->testDuration)

View File

@ -65,7 +65,7 @@ struct RemoveServersSafelyWorkload : TestWorkload {
std::map<Optional<Standalone<StringRef>>, AddressExclusion> machinesMap; // Locality Zone Id -> ip address
std::vector<AddressExclusion> processAddrs; // IF (killProcesses) THEN ip:port ELSE ip addresses unique list of the machines
std::map<uint32_t, Optional<Standalone<StringRef>>> ip_dcid;
std::map<IPAddress, Optional<Standalone<StringRef>>> ip_dcid;
auto processes = getServers();
for(auto& it : processes) {
AddressExclusion machineIp(it->address.ip);

Some files were not shown because too many files have changed in this diff Show More