From 7fbc4d739169484c6ba8ebe8fe5263c0bdccac58 Mon Sep 17 00:00:00 2001 From: Andrew Noyes Date: Fri, 4 Dec 2020 23:58:42 +0000 Subject: [PATCH] Resolve conflicts --- fdbclient/NativeAPI.actor.cpp | 4 -- fdbrpc/LoadBalance.actor.h | 8 ---- fdbserver/DataDistribution.actor.h | 40 +++++------------- fdbserver/DataDistributionQueue.actor.cpp | 11 ----- fdbserver/KeyValueStoreRocksDB.actor.cpp | 4 -- fdbserver/KeyValueStoreSQLite.actor.cpp | 8 ---- fdbserver/TLogServer.actor.cpp | 5 --- fdbserver/storageserver.actor.cpp | 11 ++--- fdbserver/workloads/LowLatency.actor.cpp | 6 +-- flow/CMakeLists.txt | 6 --- flow/Trace.cpp | 6 --- flow/error_definitions.h | 3 -- tests/CMakeLists.txt | 49 +---------------------- versions.target | 4 +- 14 files changed, 17 insertions(+), 148 deletions(-) diff --git a/fdbclient/NativeAPI.actor.cpp b/fdbclient/NativeAPI.actor.cpp index a1027930a2..9b6b2a343f 100644 --- a/fdbclient/NativeAPI.actor.cpp +++ b/fdbclient/NativeAPI.actor.cpp @@ -1822,16 +1822,12 @@ ACTOR Future>>> getKeyRangeLocatio } } -<<<<<<< HEAD -// Returns a vector of pairs. -======= // Get the SS locations for each shard in the 'keys' key-range; // Returned vector size is the number of shards in the input keys key-range. // Returned vector element is pairs, where // ShardRange is the whole shard key-range, not a part of the given key range. // Example: If query the function with key range (b, d), the returned list of pairs could be something like: // [([a, b1), locationInfo), ([b1, c), locationInfo), ([c, d1), locationInfo)]. ->>>>>>> release-6.3 template Future< vector< pair> > > getKeyRangeLocations( Database const& cx, KeyRange const& keys, int limit, bool reverse, F StorageServerInterface::*member, TransactionInfo const& info ) { ASSERT (!keys.empty()); diff --git a/fdbrpc/LoadBalance.actor.h b/fdbrpc/LoadBalance.actor.h index abc8c81774..98f3d75671 100644 --- a/fdbrpc/LoadBalance.actor.h +++ b/fdbrpc/LoadBalance.actor.h @@ -171,7 +171,6 @@ void addLaggingRequest(Future> reply, Promise requestFinis // failMon's information for load balancing and avoiding failed servers // If ALL the servers are failed and the list of servers is not fresh, throws an exception to let the caller refresh the list of servers ACTOR template -<<<<<<< HEAD Future< REPLY_TYPE(Request) > loadBalance( Reference> alternatives, RequestStream Interface::* channel, @@ -180,13 +179,6 @@ Future< REPLY_TYPE(Request) > loadBalance( bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically QueueModel* model = nullptr) { -======= -Future loadBalance( - Reference> alternatives, RequestStream Interface::*channel, - Request request = Request(), TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint, - bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically - QueueModel* model = NULL) { ->>>>>>> release-6.3 state Future> firstRequest; state Optional firstRequestEndpoint; state Future> secondRequest; diff --git a/fdbserver/DataDistribution.actor.h b/fdbserver/DataDistribution.actor.h index 3ff4da49e5..d58c0ff40e 100644 --- a/fdbserver/DataDistribution.actor.h +++ b/fdbserver/DataDistribution.actor.h @@ -233,19 +233,15 @@ struct ShardTrackedData { Reference>> stats; }; -<<<<<<< HEAD -ACTOR Future dataDistributionTracker( - Reference initData, - Database cx, - PromiseStream output, - Reference shardsAffectedByTeamFailure, - PromiseStream getShardMetrics, - PromiseStream getShardMetricsList, - FutureStream> getAverageShardBytes, - Promise readyToStart, - Reference> zeroHealthyTeams, - UID distributorId, - KeyRangeMap* shards); +ACTOR Future dataDistributionTracker(Reference initData, Database cx, + PromiseStream output, + Reference shardsAffectedByTeamFailure, + PromiseStream getShardMetrics, + PromiseStream getShardMetricsList, + FutureStream> getAverageShardBytes, + Promise readyToStart, Reference> zeroHealthyTeams, + UID distributorId, KeyRangeMap* shards, + bool const* trackerCancelled); ACTOR Future dataDistributionQueue( Database cx, @@ -262,24 +258,6 @@ ACTOR Future dataDistributionQueue( int singleRegionTeamSize, double* lastLimited, const DDEnabledState* ddEnabledState); -======= -ACTOR Future dataDistributionTracker(Reference initData, Database cx, - PromiseStream output, - Reference shardsAffectedByTeamFailure, - PromiseStream getShardMetrics, - PromiseStream getShardMetricsList, - FutureStream> getAverageShardBytes, - Promise readyToStart, Reference> anyZeroHealthyTeams, - UID distributorId, KeyRangeMap* shards, - bool const* trackerCancelled); - -ACTOR Future dataDistributionQueue( - Database cx, PromiseStream output, FutureStream input, - PromiseStream getShardMetrics, Reference> processingUnhealthy, - vector teamCollection, Reference shardsAffectedByTeamFailure, - MoveKeysLock lock, PromiseStream> getAverageShardBytes, UID distributorId, int teamSize, - int singleRegionTeamSize, double* lastLimited); ->>>>>>> release-6.3 //Holds the permitted size and IO Bounds for a shard struct ShardSizeBounds { diff --git a/fdbserver/DataDistributionQueue.actor.cpp b/fdbserver/DataDistributionQueue.actor.cpp index f3108388d9..b248d6de86 100644 --- a/fdbserver/DataDistributionQueue.actor.cpp +++ b/fdbserver/DataDistributionQueue.actor.cpp @@ -83,15 +83,9 @@ struct RelocateData { bool operator!=(const RelocateData& rhs) const { return !(*this == rhs); } }; -<<<<<<< HEAD class ParallelTCInfo final : public ReferenceCounted, public IDataDistributionTeam { - vector> teams; - vector tempServerIDs; -======= -class ParallelTCInfo : public ReferenceCounted, public IDataDistributionTeam { std::vector> teams; std::vector tempServerIDs; ->>>>>>> release-6.3 int64_t sum(std::function func) const { int64_t result = 0; @@ -191,13 +185,8 @@ public: return all([minRatio](IDataDistributionTeam const& team) { return team.hasHealthyAvailableSpace(minRatio); }); } -<<<<<<< HEAD Future updateStorageMetrics() override { - vector> futures; -======= - virtual Future updateStorageMetrics() { std::vector> futures; ->>>>>>> release-6.3 for (auto& team : teams) { futures.push_back(team->updateStorageMetrics()); diff --git a/fdbserver/KeyValueStoreRocksDB.actor.cpp b/fdbserver/KeyValueStoreRocksDB.actor.cpp index a84a34da40..955d1e35d7 100644 --- a/fdbserver/KeyValueStoreRocksDB.actor.cpp +++ b/fdbserver/KeyValueStoreRocksDB.actor.cpp @@ -404,14 +404,10 @@ struct RocksDBKeyValueStore : IKeyValueStore { return res; } -<<<<<<< HEAD StorageBytes getStorageBytes() const override { -======= - StorageBytes getStorageBytes() override { uint64_t live = 0; ASSERT(db->GetIntProperty(rocksdb::DB::Properties::kEstimateLiveDataSize, &live)); ->>>>>>> release-6.3 int64_t free; int64_t total; g_network->getDiskBytes(path, free, total); diff --git a/fdbserver/KeyValueStoreSQLite.actor.cpp b/fdbserver/KeyValueStoreSQLite.actor.cpp index 035c1c003b..1beee67c81 100644 --- a/fdbserver/KeyValueStoreSQLite.actor.cpp +++ b/fdbserver/KeyValueStoreSQLite.actor.cpp @@ -112,14 +112,10 @@ struct PageChecksumCodec { // some chance the page was written with another checksum algorithm crc32Sum.part1 = 0; crc32Sum.part2 = crc32c_append(0xfdbeefdb, static_cast(data), dataLen); -<<<<<<< HEAD if (crc32Sum == *pSumInPage) { TEST(true); // Read CRC32 checksum return true; } -======= - if (crc32Sum == *pSumInPage) return true; ->>>>>>> release-6.3 } // Try xxhash3 @@ -132,14 +128,10 @@ struct PageChecksumCodec { auto xxHash3 = XXH3_64bits(data, dataLen); xxHash3Sum.part1 = static_cast((xxHash3 >> 32) & 0x00ffffff); xxHash3Sum.part2 = static_cast(xxHash3 & 0xffffffff); -<<<<<<< HEAD - if (xxHash3Sum == *pSumInPage) return true; -======= if (xxHash3Sum == *pSumInPage) { TEST(true); // Read xxHash3 checksum return true; } ->>>>>>> release-6.3 } // Try hashlittle2 diff --git a/fdbserver/TLogServer.actor.cpp b/fdbserver/TLogServer.actor.cpp index a1270ccf89..3c7b9d0a22 100644 --- a/fdbserver/TLogServer.actor.cpp +++ b/fdbserver/TLogServer.actor.cpp @@ -463,17 +463,12 @@ struct LogData : NonCopyable, public ReferenceCounted { bool stopped, initialized; DBRecoveryCount recoveryCount; -<<<<<<< HEAD - VersionMetricHandle persistentDataVersion, persistentDataDurableVersion; // The last version number in the portion of the log (written|durable) to persistentData - NotifiedVersion version, queueCommittedVersion; -======= // If persistentDataVersion != persistentDurableDataVersion, // then spilling is happening from persistentDurableDataVersion to persistentDataVersion. // Data less than persistentDataDurableVersion is spilled on disk (or fully popped from the TLog); VersionMetricHandle persistentDataVersion, persistentDataDurableVersion; // The last version number in the portion of the log (written|durable) to persistentData NotifiedVersion version; NotifiedVersion queueCommittedVersion; // The disk queue has committed up until the queueCommittedVersion version. ->>>>>>> release-6.3 Version queueCommittingVersion; Version knownCommittedVersion; // The maximum version that a proxy has told us that is committed (all TLogs have // ack'd a commit for this version). diff --git a/fdbserver/storageserver.actor.cpp b/fdbserver/storageserver.actor.cpp index fc29357af9..1ef0e1266a 100644 --- a/fdbserver/storageserver.actor.cpp +++ b/fdbserver/storageserver.actor.cpp @@ -25,19 +25,14 @@ #include "fdbrpc/fdbrpc.h" #include "fdbrpc/LoadBalance.h" -<<<<<<< HEAD -#include "flow/Arena.h" -#include "flow/IRandom.h" -#include "flow/Tracing.h" -#include "flow/IndexedSet.h" -#include "flow/Hash3.h" -======= ->>>>>>> release-6.3 #include "flow/ActorCollection.h" +#include "flow/Arena.h" #include "flow/Hash3.h" #include "flow/Histogram.h" +#include "flow/IRandom.h" #include "flow/IndexedSet.h" #include "flow/SystemMonitor.h" +#include "flow/Tracing.h" #include "flow/Util.h" #include "fdbclient/Atomic.h" #include "fdbclient/DatabaseContext.h" diff --git a/fdbserver/workloads/LowLatency.actor.cpp b/fdbserver/workloads/LowLatency.actor.cpp index 7af641779c..3c8b21eac4 100644 --- a/fdbserver/workloads/LowLatency.actor.cpp +++ b/fdbserver/workloads/LowLatency.actor.cpp @@ -49,17 +49,13 @@ struct LowLatencyWorkload : TestWorkload { std::string description() const override { return "LowLatency"; } -<<<<<<< HEAD - Future setup(Database const& cx) override { return Void(); } -======= - virtual Future setup( Database const& cx ) { + Future setup(Database const& cx) override { if (g_network->isSimulated()) { ASSERT(const_cast(SERVER_KNOBS)->setKnob("min_delay_cc_worst_fit_candidacy_seconds", "5")); ASSERT(const_cast(SERVER_KNOBS)->setKnob("max_delay_cc_worst_fit_candidacy_seconds", "10")); } return Void(); } ->>>>>>> release-6.3 Future start(Database const& cx) override { if( clientId == 0 ) diff --git a/flow/CMakeLists.txt b/flow/CMakeLists.txt index 6f6b2d9fbc..f791a35831 100644 --- a/flow/CMakeLists.txt +++ b/flow/CMakeLists.txt @@ -88,7 +88,6 @@ set(FLOW_SRCS serialize.h stacktrace.amalgamation.cpp stacktrace.h -<<<<<<< HEAD test_memcpy.cpp test_memcpy_perf.cpp version.cpp @@ -98,11 +97,6 @@ set(FLOW_SRCS if(UNIX AND NOT APPLE) list(APPEND FLOW_SRCS folly_memcpy.S) endif() -======= - version.cpp - xxhash.c - xxhash.h) ->>>>>>> release-6.3 configure_file(${CMAKE_CURRENT_SOURCE_DIR}/SourceVersion.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/SourceVersion.h) diff --git a/flow/Trace.cpp b/flow/Trace.cpp index 37cd51a0ff..4dd36e0b1e 100644 --- a/flow/Trace.cpp +++ b/flow/Trace.cpp @@ -1062,13 +1062,7 @@ TraceEvent& TraceEvent::GetLastError() { #endif } -<<<<<<< HEAD unsigned long TraceEvent::eventCounts[NUM_MAJOR_LEVELS_OF_EVENTS] = {0, 0, 0, 0, 0}; -======= -// We're cheating in counting, as in practice, we only use {10,20,30,40}. -static_assert(SevMaxUsed / 10 + 1 == 5, "Please bump eventCounts[5] to SevMaxUsed/10+1"); -unsigned long TraceEvent::eventCounts[5] = { 0, 0, 0, 0, 0 }; ->>>>>>> release-6.3 unsigned long TraceEvent::CountEventsLoggedAt(Severity sev) { ASSERT(sev <= SevMaxUsed); diff --git a/flow/error_definitions.h b/flow/error_definitions.h index b805ddad10..20d9855e44 100755 --- a/flow/error_definitions.h +++ b/flow/error_definitions.h @@ -94,10 +94,7 @@ ERROR( master_resolver_failed, 1210, "Master terminating because a Resolver fail ERROR( server_overloaded, 1211, "Server is under too much load and cannot respond" ) ERROR( master_backup_worker_failed, 1212, "Master terminating because a backup worker failed") ERROR( tag_throttled, 1213, "Transaction tag is being throttled" ) -<<<<<<< HEAD ERROR( grv_proxy_failed, 1214, "Master terminating because a GRV CommitProxy failed" ) -======= ->>>>>>> release-6.3 ERROR( dd_tracker_cancelled, 1215, "The data distribution tracker has been cancelled" ) // 15xx Platform errors diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index f5267a126d..69575ebcaa 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -200,12 +200,11 @@ if(WITH_PYTHON) TEST_FILES restarting/from_5.2.0/ClientTransactionProfilingCorrectness-1.txt restarting/from_5.2.0/ClientTransactionProfilingCorrectness-2.txt) add_fdb_test( -<<<<<<< HEAD TEST_FILES restarting/from_7.0.0/UpgradeAndBackupRestore-1.toml restarting/from_7.0.0/UpgradeAndBackupRestore-2.toml) add_fdb_test( - TEST_FILES restarting/to_6.3.5/CycleTestRestart-1.txt - restarting/to_6.3.5/CycleTestRestart-2.txt IGNORE) + TEST_FILES restarting/to_6.3.10/CycleTestRestart-1.txt + restarting/to_6.3.10/CycleTestRestart-2.txt) add_fdb_test(TEST_FILES slow/ApiCorrectness.toml) add_fdb_test(TEST_FILES slow/ApiCorrectnessAtomicRestore.toml) add_fdb_test(TEST_FILES slow/ApiCorrectnessSwitchover.toml) @@ -246,50 +245,6 @@ if(WITH_PYTHON) add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupCorrectnessMultiCycles.toml) add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupWriteDuringReadAtomicRestore.toml) add_fdb_test(TEST_FILES ParallelRestoreOldBackupApiCorrectnessAtomicRestore.toml IGNORE) -======= - TEST_FILES restarting/to_6.3.10/CycleTestRestart-1.txt - restarting/to_6.3.10/CycleTestRestart-2.txt) - add_fdb_test(TEST_FILES slow/ApiCorrectness.txt) - add_fdb_test(TEST_FILES slow/ApiCorrectnessAtomicRestore.txt) - add_fdb_test(TEST_FILES slow/ApiCorrectnessSwitchover.txt) - add_fdb_test(TEST_FILES slow/ClogWithRollbacks.txt) - add_fdb_test(TEST_FILES slow/CloggedCycleTest.txt) - add_fdb_test(TEST_FILES slow/CloggedStorefront.txt) - add_fdb_test(TEST_FILES slow/CommitBug.txt) - add_fdb_test(TEST_FILES slow/ConfigureTest.txt) - add_fdb_test(TEST_FILES slow/CycleRollbackPlain.txt) - add_fdb_test(TEST_FILES slow/DDBalanceAndRemove.txt) - add_fdb_test(TEST_FILES slow/DDBalanceAndRemoveStatus.txt) - add_fdb_test(TEST_FILES slow/DifferentClustersSameRV.txt) - add_fdb_test(TEST_FILES slow/FastTriggeredWatches.txt) - add_fdb_test(TEST_FILES slow/LowLatencyWithFailures.txt) - add_fdb_test(TEST_FILES slow/MoveKeysClean.txt) - add_fdb_test(TEST_FILES slow/MoveKeysSideband.txt) - add_fdb_test(TEST_FILES slow/RyowCorrectness.txt) - add_fdb_test(TEST_FILES slow/Serializability.txt) - add_fdb_test(TEST_FILES slow/SharedBackupCorrectness.txt) - add_fdb_test(TEST_FILES slow/SharedBackupToDBCorrectness.txt) - add_fdb_test(TEST_FILES slow/StorefrontTest.txt) - add_fdb_test(TEST_FILES slow/SwizzledApiCorrectness.txt) - add_fdb_test(TEST_FILES slow/SwizzledCycleTest.txt) - add_fdb_test(TEST_FILES slow/SwizzledDdBalance.txt) - add_fdb_test(TEST_FILES slow/SwizzledRollbackTimeLapse.txt) - add_fdb_test(TEST_FILES slow/SwizzledRollbackTimeLapseIncrement.txt) - add_fdb_test(TEST_FILES slow/VersionStampBackupToDB.txt) - add_fdb_test(TEST_FILES slow/VersionStampSwitchover.txt) - add_fdb_test(TEST_FILES slow/WriteDuringReadAtomicRestore.txt) - add_fdb_test(TEST_FILES slow/WriteDuringReadSwitchover.txt) - add_fdb_test(TEST_FILES slow/ddbalance.txt) - add_fdb_test(TEST_FILES slow/ParallelRestoreNewBackupCorrectnessAtomicOp.txt) - add_fdb_test(TEST_FILES slow/ParallelRestoreNewBackupCorrectnessCycle.txt) - add_fdb_test(TEST_FILES slow/ParallelRestoreNewBackupCorrectnessMultiCycles.txt) - add_fdb_test(TEST_FILES slow/ParallelRestoreNewBackupWriteDuringReadAtomicRestore.txt) - add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupCorrectnessAtomicOp.txt) - add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupCorrectnessCycle.txt) - add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupCorrectnessMultiCycles.txt) - add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupWriteDuringReadAtomicRestore.txt) - add_fdb_test(TEST_FILES ParallelRestoreOldBackupApiCorrectnessAtomicRestore.txt IGNORE) ->>>>>>> release-6.3 # Note that status tests are not deterministic. add_fdb_test(TEST_FILES status/invalid_proc_addresses.txt) add_fdb_test(TEST_FILES status/local_6_machine_no_replicas_remain.txt) diff --git a/versions.target b/versions.target index 8b1467c77e..1b42c29961 100644 --- a/versions.target +++ b/versions.target @@ -1,7 +1,7 @@ - 6.3.10 - 6.3 + 7.0.0 + 7.0