Resolve conflicts
This commit is contained in:
parent
877997632d
commit
7fbc4d7391
|
@ -1822,16 +1822,12 @@ ACTOR Future<vector<pair<KeyRange, Reference<LocationInfo>>>> getKeyRangeLocatio
|
|||
}
|
||||
}
|
||||
|
||||
<<<<<<< HEAD
|
||||
// Returns a vector of <ShardRange, storage server location info> pairs.
|
||||
=======
|
||||
// Get the SS locations for each shard in the 'keys' key-range;
|
||||
// Returned vector size is the number of shards in the input keys key-range.
|
||||
// Returned vector element is <ShardRange, storage server location info> pairs, where
|
||||
// ShardRange is the whole shard key-range, not a part of the given key range.
|
||||
// Example: If query the function with key range (b, d), the returned list of pairs could be something like:
|
||||
// [([a, b1), locationInfo), ([b1, c), locationInfo), ([c, d1), locationInfo)].
|
||||
>>>>>>> release-6.3
|
||||
template <class F>
|
||||
Future< vector< pair<KeyRange,Reference<LocationInfo>> > > getKeyRangeLocations( Database const& cx, KeyRange const& keys, int limit, bool reverse, F StorageServerInterface::*member, TransactionInfo const& info ) {
|
||||
ASSERT (!keys.empty());
|
||||
|
|
|
@ -171,7 +171,6 @@ void addLaggingRequest(Future<Optional<Reply>> reply, Promise<Void> requestFinis
|
|||
// failMon's information for load balancing and avoiding failed servers
|
||||
// If ALL the servers are failed and the list of servers is not fresh, throws an exception to let the caller refresh the list of servers
|
||||
ACTOR template <class Interface, class Request, class Multi>
|
||||
<<<<<<< HEAD
|
||||
Future< REPLY_TYPE(Request) > loadBalance(
|
||||
Reference<MultiInterface<Multi>> alternatives,
|
||||
RequestStream<Request> Interface::* channel,
|
||||
|
@ -180,13 +179,6 @@ Future< REPLY_TYPE(Request) > loadBalance(
|
|||
bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically
|
||||
QueueModel* model = nullptr)
|
||||
{
|
||||
=======
|
||||
Future<REPLY_TYPE(Request)> loadBalance(
|
||||
Reference<MultiInterface<Multi>> alternatives, RequestStream<Request> Interface::*channel,
|
||||
Request request = Request(), TaskPriority taskID = TaskPriority::DefaultPromiseEndpoint,
|
||||
bool atMostOnce = false, // if true, throws request_maybe_delivered() instead of retrying automatically
|
||||
QueueModel* model = NULL) {
|
||||
>>>>>>> release-6.3
|
||||
state Future<Optional<REPLY_TYPE(Request)>> firstRequest;
|
||||
state Optional<uint64_t> firstRequestEndpoint;
|
||||
state Future<Optional<REPLY_TYPE(Request)>> secondRequest;
|
||||
|
|
|
@ -233,19 +233,15 @@ struct ShardTrackedData {
|
|||
Reference<AsyncVar<Optional<ShardMetrics>>> stats;
|
||||
};
|
||||
|
||||
<<<<<<< HEAD
|
||||
ACTOR Future<Void> dataDistributionTracker(
|
||||
Reference<InitialDataDistribution> initData,
|
||||
Database cx,
|
||||
PromiseStream<RelocateShard> output,
|
||||
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure,
|
||||
PromiseStream<GetMetricsRequest> getShardMetrics,
|
||||
PromiseStream<GetMetricsListRequest> getShardMetricsList,
|
||||
FutureStream<Promise<int64_t>> getAverageShardBytes,
|
||||
Promise<Void> readyToStart,
|
||||
Reference<AsyncVar<bool>> zeroHealthyTeams,
|
||||
UID distributorId,
|
||||
KeyRangeMap<ShardTrackedData>* shards);
|
||||
ACTOR Future<Void> dataDistributionTracker(Reference<InitialDataDistribution> initData, Database cx,
|
||||
PromiseStream<RelocateShard> output,
|
||||
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure,
|
||||
PromiseStream<GetMetricsRequest> getShardMetrics,
|
||||
PromiseStream<GetMetricsListRequest> getShardMetricsList,
|
||||
FutureStream<Promise<int64_t>> getAverageShardBytes,
|
||||
Promise<Void> readyToStart, Reference<AsyncVar<bool>> zeroHealthyTeams,
|
||||
UID distributorId, KeyRangeMap<ShardTrackedData>* shards,
|
||||
bool const* trackerCancelled);
|
||||
|
||||
ACTOR Future<Void> dataDistributionQueue(
|
||||
Database cx,
|
||||
|
@ -262,24 +258,6 @@ ACTOR Future<Void> dataDistributionQueue(
|
|||
int singleRegionTeamSize,
|
||||
double* lastLimited,
|
||||
const DDEnabledState* ddEnabledState);
|
||||
=======
|
||||
ACTOR Future<Void> dataDistributionTracker(Reference<InitialDataDistribution> initData, Database cx,
|
||||
PromiseStream<RelocateShard> output,
|
||||
Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure,
|
||||
PromiseStream<GetMetricsRequest> getShardMetrics,
|
||||
PromiseStream<GetMetricsListRequest> getShardMetricsList,
|
||||
FutureStream<Promise<int64_t>> getAverageShardBytes,
|
||||
Promise<Void> readyToStart, Reference<AsyncVar<bool>> anyZeroHealthyTeams,
|
||||
UID distributorId, KeyRangeMap<ShardTrackedData>* shards,
|
||||
bool const* trackerCancelled);
|
||||
|
||||
ACTOR Future<Void> dataDistributionQueue(
|
||||
Database cx, PromiseStream<RelocateShard> output, FutureStream<RelocateShard> input,
|
||||
PromiseStream<GetMetricsRequest> getShardMetrics, Reference<AsyncVar<bool>> processingUnhealthy,
|
||||
vector<TeamCollectionInterface> teamCollection, Reference<ShardsAffectedByTeamFailure> shardsAffectedByTeamFailure,
|
||||
MoveKeysLock lock, PromiseStream<Promise<int64_t>> getAverageShardBytes, UID distributorId, int teamSize,
|
||||
int singleRegionTeamSize, double* lastLimited);
|
||||
>>>>>>> release-6.3
|
||||
|
||||
//Holds the permitted size and IO Bounds for a shard
|
||||
struct ShardSizeBounds {
|
||||
|
|
|
@ -83,15 +83,9 @@ struct RelocateData {
|
|||
bool operator!=(const RelocateData& rhs) const { return !(*this == rhs); }
|
||||
};
|
||||
|
||||
<<<<<<< HEAD
|
||||
class ParallelTCInfo final : public ReferenceCounted<ParallelTCInfo>, public IDataDistributionTeam {
|
||||
vector<Reference<IDataDistributionTeam>> teams;
|
||||
vector<UID> tempServerIDs;
|
||||
=======
|
||||
class ParallelTCInfo : public ReferenceCounted<ParallelTCInfo>, public IDataDistributionTeam {
|
||||
std::vector<Reference<IDataDistributionTeam>> teams;
|
||||
std::vector<UID> tempServerIDs;
|
||||
>>>>>>> release-6.3
|
||||
|
||||
int64_t sum(std::function<int64_t(IDataDistributionTeam const&)> func) const {
|
||||
int64_t result = 0;
|
||||
|
@ -191,13 +185,8 @@ public:
|
|||
return all([minRatio](IDataDistributionTeam const& team) { return team.hasHealthyAvailableSpace(minRatio); });
|
||||
}
|
||||
|
||||
<<<<<<< HEAD
|
||||
Future<Void> updateStorageMetrics() override {
|
||||
vector<Future<Void>> futures;
|
||||
=======
|
||||
virtual Future<Void> updateStorageMetrics() {
|
||||
std::vector<Future<Void>> futures;
|
||||
>>>>>>> release-6.3
|
||||
|
||||
for (auto& team : teams) {
|
||||
futures.push_back(team->updateStorageMetrics());
|
||||
|
|
|
@ -404,14 +404,10 @@ struct RocksDBKeyValueStore : IKeyValueStore {
|
|||
return res;
|
||||
}
|
||||
|
||||
<<<<<<< HEAD
|
||||
StorageBytes getStorageBytes() const override {
|
||||
=======
|
||||
StorageBytes getStorageBytes() override {
|
||||
uint64_t live = 0;
|
||||
ASSERT(db->GetIntProperty(rocksdb::DB::Properties::kEstimateLiveDataSize, &live));
|
||||
|
||||
>>>>>>> release-6.3
|
||||
int64_t free;
|
||||
int64_t total;
|
||||
g_network->getDiskBytes(path, free, total);
|
||||
|
|
|
@ -112,14 +112,10 @@ struct PageChecksumCodec {
|
|||
// some chance the page was written with another checksum algorithm
|
||||
crc32Sum.part1 = 0;
|
||||
crc32Sum.part2 = crc32c_append(0xfdbeefdb, static_cast<uint8_t*>(data), dataLen);
|
||||
<<<<<<< HEAD
|
||||
if (crc32Sum == *pSumInPage) {
|
||||
TEST(true); // Read CRC32 checksum
|
||||
return true;
|
||||
}
|
||||
=======
|
||||
if (crc32Sum == *pSumInPage) return true;
|
||||
>>>>>>> release-6.3
|
||||
}
|
||||
|
||||
// Try xxhash3
|
||||
|
@ -132,14 +128,10 @@ struct PageChecksumCodec {
|
|||
auto xxHash3 = XXH3_64bits(data, dataLen);
|
||||
xxHash3Sum.part1 = static_cast<uint32_t>((xxHash3 >> 32) & 0x00ffffff);
|
||||
xxHash3Sum.part2 = static_cast<uint32_t>(xxHash3 & 0xffffffff);
|
||||
<<<<<<< HEAD
|
||||
if (xxHash3Sum == *pSumInPage) return true;
|
||||
=======
|
||||
if (xxHash3Sum == *pSumInPage) {
|
||||
TEST(true); // Read xxHash3 checksum
|
||||
return true;
|
||||
}
|
||||
>>>>>>> release-6.3
|
||||
}
|
||||
|
||||
// Try hashlittle2
|
||||
|
|
|
@ -463,17 +463,12 @@ struct LogData : NonCopyable, public ReferenceCounted<LogData> {
|
|||
bool stopped, initialized;
|
||||
DBRecoveryCount recoveryCount;
|
||||
|
||||
<<<<<<< HEAD
|
||||
VersionMetricHandle persistentDataVersion, persistentDataDurableVersion; // The last version number in the portion of the log (written|durable) to persistentData
|
||||
NotifiedVersion version, queueCommittedVersion;
|
||||
=======
|
||||
// If persistentDataVersion != persistentDurableDataVersion,
|
||||
// then spilling is happening from persistentDurableDataVersion to persistentDataVersion.
|
||||
// Data less than persistentDataDurableVersion is spilled on disk (or fully popped from the TLog);
|
||||
VersionMetricHandle persistentDataVersion, persistentDataDurableVersion; // The last version number in the portion of the log (written|durable) to persistentData
|
||||
NotifiedVersion version;
|
||||
NotifiedVersion queueCommittedVersion; // The disk queue has committed up until the queueCommittedVersion version.
|
||||
>>>>>>> release-6.3
|
||||
Version queueCommittingVersion;
|
||||
Version knownCommittedVersion; // The maximum version that a proxy has told us that is committed (all TLogs have
|
||||
// ack'd a commit for this version).
|
||||
|
|
|
@ -25,19 +25,14 @@
|
|||
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "fdbrpc/LoadBalance.h"
|
||||
<<<<<<< HEAD
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/Tracing.h"
|
||||
#include "flow/IndexedSet.h"
|
||||
#include "flow/Hash3.h"
|
||||
=======
|
||||
>>>>>>> release-6.3
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "flow/Arena.h"
|
||||
#include "flow/Hash3.h"
|
||||
#include "flow/Histogram.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/IndexedSet.h"
|
||||
#include "flow/SystemMonitor.h"
|
||||
#include "flow/Tracing.h"
|
||||
#include "flow/Util.h"
|
||||
#include "fdbclient/Atomic.h"
|
||||
#include "fdbclient/DatabaseContext.h"
|
||||
|
|
|
@ -49,17 +49,13 @@ struct LowLatencyWorkload : TestWorkload {
|
|||
|
||||
std::string description() const override { return "LowLatency"; }
|
||||
|
||||
<<<<<<< HEAD
|
||||
Future<Void> setup(Database const& cx) override { return Void(); }
|
||||
=======
|
||||
virtual Future<Void> setup( Database const& cx ) {
|
||||
Future<Void> setup(Database const& cx) override {
|
||||
if (g_network->isSimulated()) {
|
||||
ASSERT(const_cast<ServerKnobs*>(SERVER_KNOBS)->setKnob("min_delay_cc_worst_fit_candidacy_seconds", "5"));
|
||||
ASSERT(const_cast<ServerKnobs*>(SERVER_KNOBS)->setKnob("max_delay_cc_worst_fit_candidacy_seconds", "10"));
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
>>>>>>> release-6.3
|
||||
|
||||
Future<Void> start(Database const& cx) override {
|
||||
if( clientId == 0 )
|
||||
|
|
|
@ -88,7 +88,6 @@ set(FLOW_SRCS
|
|||
serialize.h
|
||||
stacktrace.amalgamation.cpp
|
||||
stacktrace.h
|
||||
<<<<<<< HEAD
|
||||
test_memcpy.cpp
|
||||
test_memcpy_perf.cpp
|
||||
version.cpp
|
||||
|
@ -98,11 +97,6 @@ set(FLOW_SRCS
|
|||
if(UNIX AND NOT APPLE)
|
||||
list(APPEND FLOW_SRCS folly_memcpy.S)
|
||||
endif()
|
||||
=======
|
||||
version.cpp
|
||||
xxhash.c
|
||||
xxhash.h)
|
||||
>>>>>>> release-6.3
|
||||
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/SourceVersion.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/SourceVersion.h)
|
||||
|
||||
|
|
|
@ -1062,13 +1062,7 @@ TraceEvent& TraceEvent::GetLastError() {
|
|||
#endif
|
||||
}
|
||||
|
||||
<<<<<<< HEAD
|
||||
unsigned long TraceEvent::eventCounts[NUM_MAJOR_LEVELS_OF_EVENTS] = {0, 0, 0, 0, 0};
|
||||
=======
|
||||
// We're cheating in counting, as in practice, we only use {10,20,30,40}.
|
||||
static_assert(SevMaxUsed / 10 + 1 == 5, "Please bump eventCounts[5] to SevMaxUsed/10+1");
|
||||
unsigned long TraceEvent::eventCounts[5] = { 0, 0, 0, 0, 0 };
|
||||
>>>>>>> release-6.3
|
||||
|
||||
unsigned long TraceEvent::CountEventsLoggedAt(Severity sev) {
|
||||
ASSERT(sev <= SevMaxUsed);
|
||||
|
|
|
@ -94,10 +94,7 @@ ERROR( master_resolver_failed, 1210, "Master terminating because a Resolver fail
|
|||
ERROR( server_overloaded, 1211, "Server is under too much load and cannot respond" )
|
||||
ERROR( master_backup_worker_failed, 1212, "Master terminating because a backup worker failed")
|
||||
ERROR( tag_throttled, 1213, "Transaction tag is being throttled" )
|
||||
<<<<<<< HEAD
|
||||
ERROR( grv_proxy_failed, 1214, "Master terminating because a GRV CommitProxy failed" )
|
||||
=======
|
||||
>>>>>>> release-6.3
|
||||
ERROR( dd_tracker_cancelled, 1215, "The data distribution tracker has been cancelled" )
|
||||
|
||||
// 15xx Platform errors
|
||||
|
|
|
@ -200,12 +200,11 @@ if(WITH_PYTHON)
|
|||
TEST_FILES restarting/from_5.2.0/ClientTransactionProfilingCorrectness-1.txt
|
||||
restarting/from_5.2.0/ClientTransactionProfilingCorrectness-2.txt)
|
||||
add_fdb_test(
|
||||
<<<<<<< HEAD
|
||||
TEST_FILES restarting/from_7.0.0/UpgradeAndBackupRestore-1.toml
|
||||
restarting/from_7.0.0/UpgradeAndBackupRestore-2.toml)
|
||||
add_fdb_test(
|
||||
TEST_FILES restarting/to_6.3.5/CycleTestRestart-1.txt
|
||||
restarting/to_6.3.5/CycleTestRestart-2.txt IGNORE)
|
||||
TEST_FILES restarting/to_6.3.10/CycleTestRestart-1.txt
|
||||
restarting/to_6.3.10/CycleTestRestart-2.txt)
|
||||
add_fdb_test(TEST_FILES slow/ApiCorrectness.toml)
|
||||
add_fdb_test(TEST_FILES slow/ApiCorrectnessAtomicRestore.toml)
|
||||
add_fdb_test(TEST_FILES slow/ApiCorrectnessSwitchover.toml)
|
||||
|
@ -246,50 +245,6 @@ if(WITH_PYTHON)
|
|||
add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupCorrectnessMultiCycles.toml)
|
||||
add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupWriteDuringReadAtomicRestore.toml)
|
||||
add_fdb_test(TEST_FILES ParallelRestoreOldBackupApiCorrectnessAtomicRestore.toml IGNORE)
|
||||
=======
|
||||
TEST_FILES restarting/to_6.3.10/CycleTestRestart-1.txt
|
||||
restarting/to_6.3.10/CycleTestRestart-2.txt)
|
||||
add_fdb_test(TEST_FILES slow/ApiCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES slow/ApiCorrectnessAtomicRestore.txt)
|
||||
add_fdb_test(TEST_FILES slow/ApiCorrectnessSwitchover.txt)
|
||||
add_fdb_test(TEST_FILES slow/ClogWithRollbacks.txt)
|
||||
add_fdb_test(TEST_FILES slow/CloggedCycleTest.txt)
|
||||
add_fdb_test(TEST_FILES slow/CloggedStorefront.txt)
|
||||
add_fdb_test(TEST_FILES slow/CommitBug.txt)
|
||||
add_fdb_test(TEST_FILES slow/ConfigureTest.txt)
|
||||
add_fdb_test(TEST_FILES slow/CycleRollbackPlain.txt)
|
||||
add_fdb_test(TEST_FILES slow/DDBalanceAndRemove.txt)
|
||||
add_fdb_test(TEST_FILES slow/DDBalanceAndRemoveStatus.txt)
|
||||
add_fdb_test(TEST_FILES slow/DifferentClustersSameRV.txt)
|
||||
add_fdb_test(TEST_FILES slow/FastTriggeredWatches.txt)
|
||||
add_fdb_test(TEST_FILES slow/LowLatencyWithFailures.txt)
|
||||
add_fdb_test(TEST_FILES slow/MoveKeysClean.txt)
|
||||
add_fdb_test(TEST_FILES slow/MoveKeysSideband.txt)
|
||||
add_fdb_test(TEST_FILES slow/RyowCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES slow/Serializability.txt)
|
||||
add_fdb_test(TEST_FILES slow/SharedBackupCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES slow/SharedBackupToDBCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES slow/StorefrontTest.txt)
|
||||
add_fdb_test(TEST_FILES slow/SwizzledApiCorrectness.txt)
|
||||
add_fdb_test(TEST_FILES slow/SwizzledCycleTest.txt)
|
||||
add_fdb_test(TEST_FILES slow/SwizzledDdBalance.txt)
|
||||
add_fdb_test(TEST_FILES slow/SwizzledRollbackTimeLapse.txt)
|
||||
add_fdb_test(TEST_FILES slow/SwizzledRollbackTimeLapseIncrement.txt)
|
||||
add_fdb_test(TEST_FILES slow/VersionStampBackupToDB.txt)
|
||||
add_fdb_test(TEST_FILES slow/VersionStampSwitchover.txt)
|
||||
add_fdb_test(TEST_FILES slow/WriteDuringReadAtomicRestore.txt)
|
||||
add_fdb_test(TEST_FILES slow/WriteDuringReadSwitchover.txt)
|
||||
add_fdb_test(TEST_FILES slow/ddbalance.txt)
|
||||
add_fdb_test(TEST_FILES slow/ParallelRestoreNewBackupCorrectnessAtomicOp.txt)
|
||||
add_fdb_test(TEST_FILES slow/ParallelRestoreNewBackupCorrectnessCycle.txt)
|
||||
add_fdb_test(TEST_FILES slow/ParallelRestoreNewBackupCorrectnessMultiCycles.txt)
|
||||
add_fdb_test(TEST_FILES slow/ParallelRestoreNewBackupWriteDuringReadAtomicRestore.txt)
|
||||
add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupCorrectnessAtomicOp.txt)
|
||||
add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupCorrectnessCycle.txt)
|
||||
add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupCorrectnessMultiCycles.txt)
|
||||
add_fdb_test(TEST_FILES slow/ParallelRestoreOldBackupWriteDuringReadAtomicRestore.txt)
|
||||
add_fdb_test(TEST_FILES ParallelRestoreOldBackupApiCorrectnessAtomicRestore.txt IGNORE)
|
||||
>>>>>>> release-6.3
|
||||
# Note that status tests are not deterministic.
|
||||
add_fdb_test(TEST_FILES status/invalid_proc_addresses.txt)
|
||||
add_fdb_test(TEST_FILES status/local_6_machine_no_replicas_remain.txt)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
<?xml version="1.0"?>
|
||||
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<PropertyGroup>
|
||||
<Version>6.3.10</Version>
|
||||
<PackageName>6.3</PackageName>
|
||||
<Version>7.0.0</Version>
|
||||
<PackageName>7.0</PackageName>
|
||||
</PropertyGroup>
|
||||
</Project>
|
||||
|
|
Loading…
Reference in New Issue