diff --git a/build/link-validate.sh b/build/link-validate.sh index 54b22192da..ac2c799893 100755 --- a/build/link-validate.sh +++ b/build/link-validate.sh @@ -15,13 +15,22 @@ fi # Step 1: glibc version +FAILED=0 for i in $(objdump -T "$1" | awk '{print $5}' | grep GLIBC | sed 's/ *$//g' | sed 's/GLIBC_//' | sort | uniq); do if ! verlte "$i" "$2"; then - echo "!!! WARNING: DEPENDENCY ON NEWER LIBC DETECTED !!!" - exit 1 + if [[ $FAILED == 0 ]]; then + echo "!!! WARNING: DEPENDENCY ON NEWER LIBC DETECTED !!!" + fi + + objdump -T "$1" | grep GLIBC_$i | awk '{print $5 " " $6}' | grep "^GLIBC" | sort | awk '$0="\t"$0' + FAILED=1 fi done +if [[ $FAILED == 1 ]]; then + exit 1 +fi + # Step 2: Other dynamic dependencies for j in $(objdump -p "$1" | grep NEEDED | awk '{print $2}'); do diff --git a/documentation/sphinx/source/backups.rst b/documentation/sphinx/source/backups.rst index fdfcbc4ffb..5600522f9f 100644 --- a/documentation/sphinx/source/backups.rst +++ b/documentation/sphinx/source/backups.rst @@ -406,10 +406,8 @@ The following options apply to all commands: ``--blob_credentials `` Use FILE as a :ref:`Blob Credential File`. Can be used multiple times. -The following options apply to all commands except ``start``: - -``-C `` - Path to the cluster file that should be used to connect to the FoundationDB cluster you want to use. If not specified, a :ref:`default cluster file ` will be used. +``--dest_cluster_file `` + Required. Path to the cluster file that should be used to connect to the FoundationDB cluster you are restoring to. .. _restore-start: @@ -424,10 +422,6 @@ The ``start`` command will start a new restore on the specified (or default) tag ``-r `` Required. Specifies the Backup URL for the source backup data to restore to the database. The source data must be accessible by the ``backup_agent`` processes for the cluster. -``--dest_cluster_file `` - Required. The backup data will be restored into this cluster. - - ``-w`` Wait for the restore to reach a final state (such as complete) before exiting. Prints a progress update every few seconds. Behavior is identical to that of the wait command. diff --git a/documentation/sphinx/source/downloads.rst b/documentation/sphinx/source/downloads.rst index 5f08800996..0ecaf87413 100644 --- a/documentation/sphinx/source/downloads.rst +++ b/documentation/sphinx/source/downloads.rst @@ -10,38 +10,38 @@ macOS The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server. -* `FoundationDB-6.1.11.pkg `_ +* `FoundationDB-6.1.12.pkg `_ Ubuntu ------ The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x. -* `foundationdb-clients-6.1.11-1_amd64.deb `_ -* `foundationdb-server-6.1.11-1_amd64.deb `_ (depends on the clients package) +* `foundationdb-clients-6.1.12-1_amd64.deb `_ +* `foundationdb-server-6.1.12-1_amd64.deb `_ (depends on the clients package) RHEL/CentOS EL6 --------------- The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x. -* `foundationdb-clients-6.1.11-1.el6.x86_64.rpm `_ -* `foundationdb-server-6.1.11-1.el6.x86_64.rpm `_ (depends on the clients package) +* `foundationdb-clients-6.1.12-1.el6.x86_64.rpm `_ +* `foundationdb-server-6.1.12-1.el6.x86_64.rpm `_ (depends on the clients package) RHEL/CentOS EL7 --------------- The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x. -* `foundationdb-clients-6.1.11-1.el7.x86_64.rpm `_ -* `foundationdb-server-6.1.11-1.el7.x86_64.rpm `_ (depends on the clients package) +* `foundationdb-clients-6.1.12-1.el7.x86_64.rpm `_ +* `foundationdb-server-6.1.12-1.el7.x86_64.rpm `_ (depends on the clients package) Windows ------- The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server. -* `foundationdb-6.1.11-x64.msi `_ +* `foundationdb-6.1.12-x64.msi `_ API Language Bindings ===================== @@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package: -* `foundationdb-6.1.11.tar.gz `_ +* `foundationdb-6.1.12.tar.gz `_ Ruby 1.9.3/2.0.0+ ----------------- -* `fdb-6.1.11.gem `_ +* `fdb-6.1.12.gem `_ Java 8+ ------- -* `fdb-java-6.1.11.jar `_ -* `fdb-java-6.1.11-javadoc.jar `_ +* `fdb-java-6.1.12.jar `_ +* `fdb-java-6.1.12-javadoc.jar `_ Go 1.11+ -------- diff --git a/documentation/sphinx/source/old-release-notes/release-notes-610.rst b/documentation/sphinx/source/old-release-notes/release-notes-610.rst index 2a43886f82..ebaefbdeb1 100644 --- a/documentation/sphinx/source/old-release-notes/release-notes-610.rst +++ b/documentation/sphinx/source/old-release-notes/release-notes-610.rst @@ -2,6 +2,15 @@ Release Notes ############# +6.1.12 +====== + +Fixes +----- + +* Fixed a thread safety issue while writing large keys or values. `(Issue #1846) `_ +* An untracked data distributor could prevent a newly recruited data distributor from being started. `(PR #1849) `_ + 6.1.11 ====== diff --git a/fdbbackup/backup.actor.cpp b/fdbbackup/backup.actor.cpp index 931f5e9472..a4b81e7b5f 100644 --- a/fdbbackup/backup.actor.cpp +++ b/fdbbackup/backup.actor.cpp @@ -944,7 +944,7 @@ static void printBackupUsage(bool devhelp) { printf(" -e ERRORLIMIT The maximum number of errors printed by status (default is 10).\n"); printf(" -k KEYS List of key ranges to backup.\n" " If not specified, the entire database will be backed up.\n"); - printf(" -n, --dryrun For start or restore operations, performs a trial run with no actual changes made.\n"); + printf(" -n, --dryrun For backup start or restore start, performs a trial run with no actual changes made.\n"); printf(" --log Enables trace file logging for the CLI session.\n" " --logdir PATH Specifes the output directory for trace files. If\n" " unspecified, defaults to the current directory. Has\n" @@ -3485,27 +3485,31 @@ int main(int argc, char* argv[]) { break; case EXE_RESTORE: if(dryRun) { + if(restoreType != RESTORE_START) { + fprintf(stderr, "Restore dry run only works for 'start' command\n"); + return FDB_EXIT_ERROR; + } + + // Must explicitly call trace file options handling if not calling Database::createDatabase() initTraceFile(); } - else if(restoreType != RESTORE_START && !initCluster()) { - return FDB_EXIT_ERROR; - } + else { + if(restoreClusterFileDest.empty()) { + fprintf(stderr, "Restore destination cluster file must be specified explicitly.\n"); + return FDB_EXIT_ERROR; + } - if(restoreClusterFileDest.empty()) { - fprintf(stderr, "Restore destination cluster file must be specified explicitly.\n"); - return FDB_EXIT_ERROR; - } + if(!fileExists(restoreClusterFileDest)) { + fprintf(stderr, "Restore destination cluster file '%s' does not exist.\n", restoreClusterFileDest.c_str()); + return FDB_EXIT_ERROR; + } - if(!fileExists(restoreClusterFileDest)) { - fprintf(stderr, "Restore destination cluster file '%s' does not exist.\n", restoreClusterFileDest.c_str()); - return FDB_EXIT_ERROR; - } - - try { - db = Database::createDatabase(restoreClusterFileDest, Database::API_VERSION_LATEST); - } catch(Error &e) { - fprintf(stderr, "Restore destination cluster file '%s' invalid: %s\n", restoreClusterFileDest.c_str(), e.what()); - return FDB_EXIT_ERROR; + try { + db = Database::createDatabase(restoreClusterFileDest, Database::API_VERSION_LATEST); + } catch(Error &e) { + fprintf(stderr, "Restore destination cluster file '%s' invalid: %s\n", restoreClusterFileDest.c_str(), e.what()); + return FDB_EXIT_ERROR; + } } switch(restoreType) { diff --git a/fdbserver/MoveKeys.actor.cpp b/fdbserver/MoveKeys.actor.cpp index 6a979e3cc5..6cdf86c1cb 100644 --- a/fdbserver/MoveKeys.actor.cpp +++ b/fdbserver/MoveKeys.actor.cpp @@ -28,7 +28,7 @@ using std::min; using std::max; -ACTOR Future takeMoveKeysLock( Database cx, UID masterId ) { +ACTOR Future takeMoveKeysLock(Database cx, UID ddId) { state Transaction tr(cx); loop { try { @@ -36,7 +36,7 @@ ACTOR Future takeMoveKeysLock( Database cx, UID masterId ) { tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE); if( !g_network->isSimulated() ) { UID id(deterministicRandom()->randomUniqueID()); - TraceEvent("TakeMoveKeysLockTransaction", masterId) + TraceEvent("TakeMoveKeysLockTransaction", ddId) .detail("TransactionUID", id); tr.debugTransaction( id ); } @@ -49,6 +49,8 @@ ACTOR Future takeMoveKeysLock( Database cx, UID masterId ) { lock.prevWrite = readVal.present() ? BinaryReader::fromStringRef(readVal.get(), Unversioned()) : UID(); } lock.myOwner = deterministicRandom()->randomUniqueID(); + tr.set(moveKeysLockOwnerKey, BinaryWriter::toValue(lock.myOwner, Unversioned())); + wait(tr.commit()); return lock; } catch (Error &e){ wait(tr.onError(e)); diff --git a/fdbserver/MoveKeys.actor.h b/fdbserver/MoveKeys.actor.h index 9e44af3076..2e483d15d8 100644 --- a/fdbserver/MoveKeys.actor.h +++ b/fdbserver/MoveKeys.actor.h @@ -37,15 +37,14 @@ struct MoveKeysLock { void serialize(Ar& ar) { serializer(ar, prevOwner, myOwner, prevWrite); } }; -ACTOR Future takeMoveKeysLock(Database cx, UID masterId); // Calling moveKeys, etc with the return value of this actor ensures that no movekeys, etc -// has been executed by a different locker since takeMoveKeysLock(). -// takeMoveKeysLock itself is a read-only operation - it does not conflict with other -// attempts to take the lock. +// has been executed by a different locker since takeMoveKeysLock(), as calling +// takeMoveKeysLock() updates "moveKeysLockOwnerKey" to a random UID. +ACTOR Future takeMoveKeysLock(Database cx, UID ddId); -Future checkMoveKeysLockReadOnly( Transaction* tr, MoveKeysLock lock ); // Checks that the a moveKeysLock has not changed since having taken it // This does not modify the moveKeysLock +Future checkMoveKeysLockReadOnly(Transaction* tr, MoveKeysLock lock); void seedShardServers( Arena& trArena, diff --git a/flow/Arena.h b/flow/Arena.h index b956b195b0..b761bdb9ff 100644 --- a/flow/Arena.h +++ b/flow/Arena.h @@ -259,7 +259,7 @@ struct ArenaBlock : NonCopyable, ThreadSafeReferenceCounted if(FLOW_KNOBS && g_trace_depth == 0 && nondeterministicRandom()->random01() < (reqSize / FLOW_KNOBS->HUGE_ARENA_LOGGING_BYTES)) { hugeArenaSample(reqSize); } - g_hugeArenaMemory += reqSize; + g_hugeArenaMemory.fetch_add(reqSize); // If the new block has less free space than the old block, make the old block depend on it if (next && !next->isTiny() && next->unused() >= reqSize-dataSize) { @@ -296,7 +296,7 @@ struct ArenaBlock : NonCopyable, ThreadSafeReferenceCounted #ifdef ALLOC_INSTRUMENTATION allocInstr[ "ArenaHugeKB" ].dealloc( (bigSize+1023)>>10 ); #endif - g_hugeArenaMemory -= bigSize; + g_hugeArenaMemory.fetch_sub(bigSize); delete[] (uint8_t*)this; } } diff --git a/flow/FastAlloc.cpp b/flow/FastAlloc.cpp index e909c470ae..ac5f4c79b1 100644 --- a/flow/FastAlloc.cpp +++ b/flow/FastAlloc.cpp @@ -82,21 +82,23 @@ void setFastAllocatorThreadInitFunction( ThreadInitFunction f ) { threadInitFunction = f; } -int64_t g_hugeArenaMemory = 0; +std::atomic g_hugeArenaMemory(0); double hugeArenaLastLogged = 0; std::map> hugeArenaTraces; void hugeArenaSample(int size) { - auto& info = hugeArenaTraces[platform::get_backtrace()]; - info.first++; - info.second+=size; - if(now() - hugeArenaLastLogged > FLOW_KNOBS->HUGE_ARENA_LOGGING_INTERVAL) { - for(auto& it : hugeArenaTraces) { - TraceEvent("HugeArenaSample").detail("Count", it.second.first).detail("Size", it.second.second).detail("Backtrace", it.first); + if(TraceEvent::isNetworkThread()) { + auto& info = hugeArenaTraces[platform::get_backtrace()]; + info.first++; + info.second+=size; + if(now() - hugeArenaLastLogged > FLOW_KNOBS->HUGE_ARENA_LOGGING_INTERVAL) { + for(auto& it : hugeArenaTraces) { + TraceEvent("HugeArenaSample").detail("Count", it.second.first).detail("Size", it.second.second).detail("Backtrace", it.first); + } + hugeArenaLastLogged = now(); + hugeArenaTraces.clear(); } - hugeArenaLastLogged = now(); - hugeArenaTraces.clear(); } } diff --git a/flow/FastAlloc.h b/flow/FastAlloc.h index 1959816e54..28609eca67 100644 --- a/flow/FastAlloc.h +++ b/flow/FastAlloc.h @@ -40,6 +40,7 @@ #include "flow/Hash3.h" #include +#include #include #include #include @@ -152,7 +153,7 @@ private: static void releaseMagazine(void*); }; -extern int64_t g_hugeArenaMemory; +extern std::atomic g_hugeArenaMemory; void hugeArenaSample(int size); void releaseAllThreadMagazines(); int64_t getTotalUnusedAllocatedMemory(); diff --git a/flow/SystemMonitor.cpp b/flow/SystemMonitor.cpp index c391c93db1..1cc537cb9b 100644 --- a/flow/SystemMonitor.cpp +++ b/flow/SystemMonitor.cpp @@ -115,7 +115,7 @@ SystemStatistics customSystemMonitor(std::string eventName, StatisticsState *sta .DETAILALLOCATORMEMUSAGE(2048) .DETAILALLOCATORMEMUSAGE(4096) .DETAILALLOCATORMEMUSAGE(8192) - .detail("HugeArenaMemory", g_hugeArenaMemory); + .detail("HugeArenaMemory", g_hugeArenaMemory.load()); TraceEvent n("NetworkMetrics"); n diff --git a/flow/Trace.cpp b/flow/Trace.cpp index 7abd7504db..41fa71ae73 100644 --- a/flow/Trace.cpp +++ b/flow/Trace.cpp @@ -43,7 +43,7 @@ #undef min #endif -int g_trace_depth = 0; +thread_local int g_trace_depth = 0; class DummyThreadPool : public IThreadPool, ReferenceCounted { public: diff --git a/flow/Trace.h b/flow/Trace.h index b3e91e380c..da2d39fc55 100644 --- a/flow/Trace.h +++ b/flow/Trace.h @@ -42,7 +42,7 @@ inline int fastrand() { //inline static bool TRACE_SAMPLE() { return fastrand()<16; } inline static bool TRACE_SAMPLE() { return false; } -extern int g_trace_depth; +extern thread_local int g_trace_depth; enum Severity { SevSample=1, diff --git a/packaging/msi/FDBInstaller.wxs b/packaging/msi/FDBInstaller.wxs index 486cd85ea0..ff932587a4 100644 --- a/packaging/msi/FDBInstaller.wxs +++ b/packaging/msi/FDBInstaller.wxs @@ -32,7 +32,7 @@ + true -PRERELEASE .0 + true .1