2021-08-05 05:47:18 +08:00
|
|
|
/*
|
|
|
|
* BlobManager.actor.cpp
|
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
|
|
|
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
#include <sstream>
|
2021-11-20 09:54:22 +08:00
|
|
|
#include <queue>
|
2021-09-03 00:09:37 +08:00
|
|
|
#include <vector>
|
|
|
|
#include <unordered_map>
|
|
|
|
|
2021-11-18 09:03:32 +08:00
|
|
|
#include "contrib/fmt-8.0.1/include/fmt/format.h"
|
2021-11-23 04:48:30 +08:00
|
|
|
#include "fdbclient/BackupContainerFileSystem.h"
|
2021-10-18 22:49:25 +08:00
|
|
|
#include "fdbclient/BlobGranuleCommon.h"
|
2021-08-24 03:14:48 +08:00
|
|
|
#include "fdbclient/BlobWorkerInterface.h"
|
2021-08-05 05:47:18 +08:00
|
|
|
#include "fdbclient/KeyRangeMap.h"
|
2021-08-24 03:14:48 +08:00
|
|
|
#include "fdbclient/ReadYourWrites.h"
|
|
|
|
#include "fdbclient/SystemData.h"
|
2021-08-05 05:47:18 +08:00
|
|
|
#include "fdbserver/BlobManagerInterface.h"
|
2021-08-24 03:14:48 +08:00
|
|
|
#include "fdbserver/Knobs.h"
|
2021-11-24 23:12:54 +08:00
|
|
|
#include "fdbserver/BlobGranuleServerCommon.actor.h"
|
2021-10-20 23:54:19 +08:00
|
|
|
#include "fdbserver/QuietDatabase.h"
|
2021-09-04 04:13:26 +08:00
|
|
|
#include "fdbserver/WaitFailure.h"
|
2021-09-21 03:42:20 +08:00
|
|
|
#include "fdbserver/WorkerInterface.actor.h"
|
2021-12-07 01:09:38 +08:00
|
|
|
#include "flow/Error.h"
|
2021-08-24 03:14:48 +08:00
|
|
|
#include "flow/IRandom.h"
|
2021-08-05 05:47:18 +08:00
|
|
|
#include "flow/UnitTest.h"
|
|
|
|
#include "flow/actorcompiler.h" // has to be last include
|
|
|
|
|
2021-10-29 07:23:43 +08:00
|
|
|
#define BM_DEBUG true
|
2021-09-01 01:30:43 +08:00
|
|
|
|
2022-01-25 04:12:36 +08:00
|
|
|
// DO NOT CHANGE THIS
|
|
|
|
// Special key where the value means the epoch + sequence number of the split, instead of the actual boundary
|
|
|
|
// Chosen because this should not be a start or end key in any split
|
|
|
|
static Key splitBoundarySpecialKey = "\xff\xff\xff"_sr;
|
|
|
|
|
2021-08-25 03:15:14 +08:00
|
|
|
// TODO add comments + documentation
|
2021-08-05 05:47:18 +08:00
|
|
|
void handleClientBlobRange(KeyRangeMap<bool>* knownBlobRanges,
|
2021-11-09 04:51:32 +08:00
|
|
|
Arena& ar,
|
2021-08-05 05:47:18 +08:00
|
|
|
VectorRef<KeyRangeRef>* rangesToAdd,
|
|
|
|
VectorRef<KeyRangeRef>* rangesToRemove,
|
|
|
|
KeyRef rangeStart,
|
|
|
|
KeyRef rangeEnd,
|
|
|
|
bool rangeActive) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print(
|
|
|
|
"db range [{0} - {1}): {2}\n", rangeStart.printable(), rangeEnd.printable(), rangeActive ? "T" : "F");
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-05 05:47:18 +08:00
|
|
|
KeyRange keyRange(KeyRangeRef(rangeStart, rangeEnd));
|
|
|
|
auto allRanges = knownBlobRanges->intersectingRanges(keyRange);
|
|
|
|
for (auto& r : allRanges) {
|
|
|
|
if (r.value() != rangeActive) {
|
|
|
|
KeyRef overlapStart = (r.begin() > keyRange.begin) ? r.begin() : keyRange.begin;
|
|
|
|
KeyRef overlapEnd = (keyRange.end < r.end()) ? keyRange.end : r.end();
|
|
|
|
KeyRangeRef overlap(overlapStart, overlapEnd);
|
|
|
|
if (rangeActive) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BM Adding client range [{0} - {1})\n",
|
|
|
|
overlapStart.printable().c_str(),
|
|
|
|
overlapEnd.printable().c_str());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-05 05:47:18 +08:00
|
|
|
rangesToAdd->push_back_deep(ar, overlap);
|
|
|
|
} else {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BM Removing client range [{0} - {1})\n",
|
|
|
|
overlapStart.printable().c_str(),
|
|
|
|
overlapEnd.printable().c_str());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-05 05:47:18 +08:00
|
|
|
rangesToRemove->push_back_deep(ar, overlap);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
knownBlobRanges->insert(keyRange, rangeActive);
|
|
|
|
}
|
|
|
|
|
|
|
|
void updateClientBlobRanges(KeyRangeMap<bool>* knownBlobRanges,
|
|
|
|
RangeResult dbBlobRanges,
|
2021-11-09 04:51:32 +08:00
|
|
|
Arena& ar,
|
2021-08-05 05:47:18 +08:00
|
|
|
VectorRef<KeyRangeRef>* rangesToAdd,
|
|
|
|
VectorRef<KeyRangeRef>* rangesToRemove) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Updating {0} client blob ranges", dbBlobRanges.size() / 2);
|
2021-09-01 01:30:43 +08:00
|
|
|
for (int i = 0; i < dbBlobRanges.size() - 1; i += 2) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print(" [{0} - {1})", dbBlobRanges[i].key.printable(), dbBlobRanges[i + 1].key.printable());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
|
|
|
printf("\n");
|
2021-08-05 05:47:18 +08:00
|
|
|
}
|
|
|
|
// essentially do merge diff of current known blob ranges and new ranges, to assign new ranges to
|
|
|
|
// workers and revoke old ranges from workers
|
|
|
|
|
|
|
|
// basically, for any range that is set in results that isn't set in ranges, assign the range to the
|
|
|
|
// worker. for any range that isn't set in results that is set in ranges, revoke the range from the
|
|
|
|
// worker. and, update ranges to match results as you go
|
|
|
|
|
|
|
|
// FIXME: could change this to O(N) instead of O(NLogN) by doing a sorted merge instead of requesting the
|
|
|
|
// intersection for each insert, but this operation is pretty infrequent so it's probably not necessary
|
|
|
|
if (dbBlobRanges.size() == 0) {
|
|
|
|
// special case. Nothing in the DB, reset knownBlobRanges and revoke all existing ranges from workers
|
|
|
|
handleClientBlobRange(
|
|
|
|
knownBlobRanges, ar, rangesToAdd, rangesToRemove, normalKeys.begin, normalKeys.end, false);
|
|
|
|
} else {
|
|
|
|
if (dbBlobRanges[0].key > normalKeys.begin) {
|
|
|
|
handleClientBlobRange(
|
|
|
|
knownBlobRanges, ar, rangesToAdd, rangesToRemove, normalKeys.begin, dbBlobRanges[0].key, false);
|
|
|
|
}
|
|
|
|
for (int i = 0; i < dbBlobRanges.size() - 1; i++) {
|
|
|
|
if (dbBlobRanges[i].key >= normalKeys.end) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Found invalid blob range start {0}\n", dbBlobRanges[i].key.printable());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-05 05:47:18 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
bool active = dbBlobRanges[i].value == LiteralStringRef("1");
|
|
|
|
if (active) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BM sees client range [{0} - {1})\n",
|
|
|
|
dbBlobRanges[i].key.printable(),
|
|
|
|
dbBlobRanges[i + 1].key.printable());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-05 05:47:18 +08:00
|
|
|
}
|
|
|
|
KeyRef endKey = dbBlobRanges[i + 1].key;
|
|
|
|
if (endKey > normalKeys.end) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Removing system keyspace from blob range [{0} - {1})\n",
|
|
|
|
dbBlobRanges[i].key.printable(),
|
|
|
|
endKey.printable());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-05 05:47:18 +08:00
|
|
|
endKey = normalKeys.end;
|
|
|
|
}
|
|
|
|
handleClientBlobRange(
|
|
|
|
knownBlobRanges, ar, rangesToAdd, rangesToRemove, dbBlobRanges[i].key, endKey, active);
|
|
|
|
}
|
|
|
|
if (dbBlobRanges[dbBlobRanges.size() - 1].key < normalKeys.end) {
|
|
|
|
handleClientBlobRange(knownBlobRanges,
|
|
|
|
ar,
|
|
|
|
rangesToAdd,
|
|
|
|
rangesToRemove,
|
|
|
|
dbBlobRanges[dbBlobRanges.size() - 1].key,
|
|
|
|
normalKeys.end,
|
|
|
|
false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
knownBlobRanges->coalesce(normalKeys);
|
|
|
|
}
|
|
|
|
|
|
|
|
void getRanges(std::vector<std::pair<KeyRangeRef, bool>>& results, KeyRangeMap<bool>& knownBlobRanges) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Getting ranges:\n");
|
|
|
|
}
|
2021-08-05 05:47:18 +08:00
|
|
|
auto allRanges = knownBlobRanges.ranges();
|
|
|
|
for (auto& r : allRanges) {
|
|
|
|
results.emplace_back(r.range(), r.value());
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print(" [{0} - {1}): {2}\n", r.begin().printable(), r.end().printable(), r.value() ? "T" : "F");
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-05 05:47:18 +08:00
|
|
|
}
|
|
|
|
}
|
2021-08-24 03:14:48 +08:00
|
|
|
|
2021-09-04 04:13:26 +08:00
|
|
|
struct RangeAssignmentData {
|
2021-10-23 10:41:19 +08:00
|
|
|
AssignRequestType type;
|
2021-09-04 04:13:26 +08:00
|
|
|
|
2021-10-23 10:41:19 +08:00
|
|
|
RangeAssignmentData() : type(AssignRequestType::Normal) {}
|
|
|
|
RangeAssignmentData(AssignRequestType type) : type(type) {}
|
2021-09-04 04:13:26 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
struct RangeRevokeData {
|
|
|
|
bool dispose;
|
|
|
|
|
|
|
|
RangeRevokeData() {}
|
|
|
|
RangeRevokeData(bool dispose) : dispose(dispose) {}
|
|
|
|
};
|
|
|
|
|
2021-08-31 02:07:25 +08:00
|
|
|
struct RangeAssignment {
|
|
|
|
bool isAssign;
|
2021-09-04 04:13:26 +08:00
|
|
|
KeyRange keyRange;
|
|
|
|
Optional<UID> worker;
|
2021-08-31 02:59:53 +08:00
|
|
|
|
2021-09-04 04:13:26 +08:00
|
|
|
// I tried doing this with a union and it was just kind of messy
|
|
|
|
Optional<RangeAssignmentData> assign;
|
|
|
|
Optional<RangeRevokeData> revoke;
|
2021-08-31 02:07:25 +08:00
|
|
|
};
|
|
|
|
|
2021-09-02 11:36:44 +08:00
|
|
|
// TODO: track worker's reads/writes eventually
|
|
|
|
struct BlobWorkerStats {
|
|
|
|
int numGranulesAssigned;
|
|
|
|
|
2021-09-04 04:13:26 +08:00
|
|
|
BlobWorkerStats(int numGranulesAssigned = 0) : numGranulesAssigned(numGranulesAssigned) {}
|
2021-09-02 11:36:44 +08:00
|
|
|
};
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
struct BlobManagerData : NonCopyable, ReferenceCounted<BlobManagerData> {
|
2021-08-24 03:14:48 +08:00
|
|
|
UID id;
|
|
|
|
Database db;
|
2021-12-06 07:02:25 +08:00
|
|
|
Optional<Key> dcId;
|
2021-09-15 23:35:58 +08:00
|
|
|
PromiseStream<Future<Void>> addActor;
|
2022-01-16 00:05:43 +08:00
|
|
|
Promise<Void> doLockCheck;
|
2021-08-24 03:14:48 +08:00
|
|
|
|
2021-11-23 04:48:30 +08:00
|
|
|
Reference<BackupContainerFileSystem> bstore;
|
|
|
|
|
2021-09-03 00:09:37 +08:00
|
|
|
std::unordered_map<UID, BlobWorkerInterface> workersById;
|
2021-09-02 11:36:44 +08:00
|
|
|
std::unordered_map<UID, BlobWorkerStats> workerStats; // mapping between workerID -> workerStats
|
2021-12-04 02:29:22 +08:00
|
|
|
std::unordered_set<NetworkAddress> workerAddresses;
|
2021-12-04 06:12:08 +08:00
|
|
|
std::unordered_set<UID> deadWorkers;
|
2021-08-31 02:07:25 +08:00
|
|
|
KeyRangeMap<UID> workerAssignments;
|
2022-01-16 00:05:43 +08:00
|
|
|
KeyRangeActorMap assignsInProgress;
|
2021-08-31 02:07:25 +08:00
|
|
|
KeyRangeMap<bool> knownBlobRanges;
|
|
|
|
|
2021-10-22 05:39:38 +08:00
|
|
|
AsyncTrigger startRecruiting;
|
2021-10-09 01:46:06 +08:00
|
|
|
Debouncer restartRecruiting;
|
2021-09-21 03:42:20 +08:00
|
|
|
std::set<NetworkAddress> recruitingLocalities; // the addrs of the workers being recruited on
|
2021-10-22 05:39:38 +08:00
|
|
|
AsyncVar<int> recruitingStream;
|
2022-01-22 00:29:54 +08:00
|
|
|
Promise<Void> foundBlobWorkers;
|
2021-09-21 03:42:20 +08:00
|
|
|
|
2021-08-28 05:33:07 +08:00
|
|
|
int64_t epoch = -1;
|
|
|
|
int64_t seqNo = 1;
|
|
|
|
|
2021-08-31 02:07:25 +08:00
|
|
|
Promise<Void> iAmReplaced;
|
|
|
|
|
|
|
|
// The order maintained here is important. The order ranges are put into the promise stream is the order they get
|
|
|
|
// assigned sequence numbers
|
|
|
|
PromiseStream<RangeAssignment> rangesToAssign;
|
|
|
|
|
2021-12-06 07:02:25 +08:00
|
|
|
BlobManagerData(UID id, Database db, Optional<Key> dcId)
|
|
|
|
: id(id), db(db), dcId(dcId), knownBlobRanges(false, normalKeys.end),
|
2021-10-22 05:39:38 +08:00
|
|
|
restartRecruiting(SERVER_KNOBS->DEBOUNCE_RECRUITING_DELAY), recruitingStream(0) {}
|
2022-01-16 00:05:43 +08:00
|
|
|
|
|
|
|
// TODO REMOVE
|
|
|
|
~BlobManagerData() {
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("Destroying blob manager data for {0} {1}\n", epoch, id.toString());
|
|
|
|
}
|
|
|
|
}
|
2021-08-24 03:14:48 +08:00
|
|
|
};
|
|
|
|
|
2021-09-04 04:13:26 +08:00
|
|
|
ACTOR Future<Standalone<VectorRef<KeyRef>>> splitRange(Reference<ReadYourWritesTransaction> tr, KeyRange range) {
|
2021-08-24 03:14:48 +08:00
|
|
|
// TODO is it better to just pass empty metrics to estimated?
|
2021-10-18 21:56:47 +08:00
|
|
|
// redo split if previous txn failed to calculate it
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Splitting new range [{0} - {1})\n", range.begin.printable(), range.end.printable());
|
2021-10-18 21:56:47 +08:00
|
|
|
}
|
2022-01-21 09:20:11 +08:00
|
|
|
state StorageMetrics estimated =
|
|
|
|
wait(tr->getTransaction().getStorageMetrics(range, CLIENT_KNOBS->TOO_MANY));
|
2021-08-24 03:14:48 +08:00
|
|
|
|
2021-10-18 21:56:47 +08:00
|
|
|
if (BM_DEBUG) {
|
2021-11-18 09:03:32 +08:00
|
|
|
fmt::print("Estimated bytes for [{0} - {1}): {2}\n",
|
|
|
|
range.begin.printable(),
|
|
|
|
range.end.printable(),
|
|
|
|
estimated.bytes);
|
2021-10-18 21:56:47 +08:00
|
|
|
}
|
2021-08-24 03:14:48 +08:00
|
|
|
|
2021-10-18 21:56:47 +08:00
|
|
|
if (estimated.bytes > SERVER_KNOBS->BG_SNAPSHOT_FILE_TARGET_BYTES) {
|
|
|
|
// printf(" Splitting range\n");
|
|
|
|
// only split on bytes
|
2022-01-21 09:20:11 +08:00
|
|
|
state Standalone<VectorRef<KeyRef>> keys;
|
|
|
|
state StorageMetrics splitMetrics;
|
2021-10-18 21:56:47 +08:00
|
|
|
splitMetrics.bytes = SERVER_KNOBS->BG_SNAPSHOT_FILE_TARGET_BYTES;
|
|
|
|
splitMetrics.bytesPerKSecond = splitMetrics.infinity;
|
|
|
|
splitMetrics.iosPerKSecond = splitMetrics.infinity;
|
|
|
|
splitMetrics.bytesReadPerKSecond = splitMetrics.infinity; // Don't split by readBandwidth
|
|
|
|
|
2022-01-21 09:20:11 +08:00
|
|
|
while (keys.empty() || keys.back() < range.end) {
|
|
|
|
// allow partial in case we have a large split
|
|
|
|
Standalone<VectorRef<KeyRef>> newKeys =
|
|
|
|
wait(tr->getTransaction().splitStorageMetrics(range, splitMetrics, estimated, true));
|
|
|
|
ASSERT(!newKeys.empty());
|
|
|
|
if (keys.empty()) {
|
|
|
|
keys = newKeys;
|
|
|
|
} else {
|
|
|
|
TEST(true); // large split that requires multiple rounds
|
|
|
|
// start key was repeated with last request, so don't include it
|
|
|
|
ASSERT(newKeys[0] == keys.back());
|
|
|
|
keys.append_deep(keys.arena(), newKeys.begin() + 1, newKeys.size() - 1);
|
|
|
|
}
|
|
|
|
range = KeyRangeRef(keys.back(), range.end);
|
|
|
|
}
|
2021-11-12 03:50:19 +08:00
|
|
|
ASSERT(keys.size() >= 2);
|
2021-10-18 21:56:47 +08:00
|
|
|
return keys;
|
|
|
|
} else {
|
|
|
|
// printf(" Not splitting range\n");
|
|
|
|
Standalone<VectorRef<KeyRef>> keys;
|
|
|
|
keys.push_back_deep(keys.arena(), range.begin);
|
|
|
|
keys.push_back_deep(keys.arena(), range.end);
|
|
|
|
return keys;
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
2021-08-24 03:14:48 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-02 11:36:44 +08:00
|
|
|
// Picks a worker with the fewest number of already assigned ranges.
|
|
|
|
// If there is a tie, picks one such worker at random.
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<UID> pickWorkerForAssign(Reference<BlobManagerData> bmData) {
|
2021-10-23 10:41:19 +08:00
|
|
|
// wait until there are BWs to pick from
|
2021-10-22 05:39:38 +08:00
|
|
|
while (bmData->workerStats.size() == 0) {
|
2022-01-06 02:48:53 +08:00
|
|
|
// TODO REMOVE
|
|
|
|
if (BM_DEBUG) {
|
2022-01-16 00:05:43 +08:00
|
|
|
fmt::print("BM {0} waiting for blob workers before assigning granules\n", bmData->epoch);
|
2022-01-06 02:48:53 +08:00
|
|
|
}
|
2021-10-23 10:41:19 +08:00
|
|
|
bmData->restartRecruiting.trigger();
|
2022-01-22 00:29:54 +08:00
|
|
|
wait(bmData->recruitingStream.onChange() || bmData->foundBlobWorkers.getFuture());
|
2021-10-22 05:39:38 +08:00
|
|
|
}
|
2021-09-04 04:13:26 +08:00
|
|
|
|
2021-10-27 09:39:41 +08:00
|
|
|
int minGranulesAssigned = INT_MAX;
|
|
|
|
std::vector<UID> eligibleWorkers;
|
|
|
|
|
2021-09-04 04:13:26 +08:00
|
|
|
for (auto const& worker : bmData->workerStats) {
|
2021-09-02 11:36:44 +08:00
|
|
|
UID currId = worker.first;
|
|
|
|
int granulesAssigned = worker.second.numGranulesAssigned;
|
|
|
|
|
|
|
|
if (granulesAssigned < minGranulesAssigned) {
|
|
|
|
eligibleWorkers.resize(0);
|
|
|
|
minGranulesAssigned = granulesAssigned;
|
|
|
|
eligibleWorkers.emplace_back(currId);
|
|
|
|
} else if (granulesAssigned == minGranulesAssigned) {
|
|
|
|
eligibleWorkers.emplace_back(currId);
|
|
|
|
}
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-31 02:07:25 +08:00
|
|
|
|
2021-09-02 11:36:44 +08:00
|
|
|
// pick a random worker out of the eligible workers
|
|
|
|
ASSERT(eligibleWorkers.size() > 0);
|
|
|
|
int idx = deterministicRandom()->randomInt(0, eligibleWorkers.size());
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("picked worker {0}, which has a minimal number ({1}) of granules assigned\n",
|
|
|
|
eligibleWorkers[idx].toString(),
|
|
|
|
minGranulesAssigned);
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-09-02 11:36:44 +08:00
|
|
|
|
|
|
|
return eligibleWorkers[idx];
|
2021-08-31 02:07:25 +08:00
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> doRangeAssignment(Reference<BlobManagerData> bmData,
|
|
|
|
RangeAssignment assignment,
|
|
|
|
UID workerID,
|
|
|
|
int64_t seqNo) {
|
2021-08-24 03:14:48 +08:00
|
|
|
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BM {0} {1} range [{2} - {3}) @ ({4}, {5}) to {6}\n",
|
|
|
|
bmData->epoch,
|
2021-11-18 09:03:32 +08:00
|
|
|
assignment.isAssign ? "assigning" : "revoking",
|
|
|
|
assignment.keyRange.begin.printable(),
|
|
|
|
assignment.keyRange.end.printable(),
|
|
|
|
bmData->epoch,
|
2022-01-15 07:10:53 +08:00
|
|
|
seqNo,
|
|
|
|
workerID.toString());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-24 03:14:48 +08:00
|
|
|
|
2021-08-31 02:07:25 +08:00
|
|
|
try {
|
2021-09-04 04:13:26 +08:00
|
|
|
if (assignment.isAssign) {
|
|
|
|
ASSERT(assignment.assign.present());
|
|
|
|
ASSERT(!assignment.revoke.present());
|
|
|
|
|
|
|
|
AssignBlobRangeRequest req;
|
|
|
|
req.keyRange = KeyRangeRef(StringRef(req.arena, assignment.keyRange.begin),
|
|
|
|
StringRef(req.arena, assignment.keyRange.end));
|
|
|
|
req.managerEpoch = bmData->epoch;
|
|
|
|
req.managerSeqno = seqNo;
|
2021-10-23 10:41:19 +08:00
|
|
|
req.type = assignment.assign.get().type;
|
2021-10-01 23:08:00 +08:00
|
|
|
|
|
|
|
// if that worker isn't alive anymore, add the range back into the stream
|
|
|
|
if (bmData->workersById.count(workerID) == 0) {
|
2021-10-14 02:56:17 +08:00
|
|
|
throw no_more_servers();
|
2021-10-01 23:08:00 +08:00
|
|
|
}
|
2022-01-21 01:43:34 +08:00
|
|
|
wait(bmData->workersById[workerID].assignBlobRangeRequest.getReply(req));
|
2021-09-04 04:13:26 +08:00
|
|
|
} else {
|
|
|
|
ASSERT(!assignment.assign.present());
|
|
|
|
ASSERT(assignment.revoke.present());
|
|
|
|
|
|
|
|
RevokeBlobRangeRequest req;
|
|
|
|
req.keyRange = KeyRangeRef(StringRef(req.arena, assignment.keyRange.begin),
|
|
|
|
StringRef(req.arena, assignment.keyRange.end));
|
|
|
|
req.managerEpoch = bmData->epoch;
|
|
|
|
req.managerSeqno = seqNo;
|
|
|
|
req.dispose = assignment.revoke.get().dispose;
|
|
|
|
|
2021-10-01 23:08:00 +08:00
|
|
|
// if that worker isn't alive anymore, this is a noop
|
|
|
|
if (bmData->workersById.count(workerID)) {
|
2022-01-21 01:43:34 +08:00
|
|
|
wait(bmData->workersById[workerID].revokeBlobRangeRequest.getReply(req));
|
2021-10-01 23:08:00 +08:00
|
|
|
} else {
|
|
|
|
return Void();
|
|
|
|
}
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
2021-08-31 02:07:25 +08:00
|
|
|
} catch (Error& e) {
|
2021-10-23 10:41:19 +08:00
|
|
|
if (e.code() == error_code_operation_cancelled) {
|
|
|
|
throw;
|
|
|
|
}
|
2022-01-21 01:43:34 +08:00
|
|
|
if (e.code() == error_code_blob_manager_replaced) {
|
|
|
|
if (bmData->iAmReplaced.canBeSet()) {
|
|
|
|
bmData->iAmReplaced.send(Void());
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
2022-01-16 00:05:43 +08:00
|
|
|
if (e.code() == error_code_granule_assignment_conflict) {
|
|
|
|
// Another blob worker already owns the range, don't retry.
|
|
|
|
// And, if it was us that send the request to another worker for this range, this actor should have been
|
|
|
|
// cancelled. So if it wasn't, it's likely that the conflict is from a new blob manager. Trigger the lock
|
|
|
|
// check to make sure, and die if so.
|
2022-01-15 07:10:53 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-16 00:05:43 +08:00
|
|
|
fmt::print("BM {0} got conflict assigning [{1} - {2}) to worker {3}, ignoring\n",
|
2022-01-15 07:10:53 +08:00
|
|
|
bmData->epoch,
|
|
|
|
assignment.keyRange.begin.printable(),
|
|
|
|
assignment.keyRange.end.printable(),
|
|
|
|
workerID.toString());
|
|
|
|
}
|
2022-01-16 00:05:43 +08:00
|
|
|
if (bmData->doLockCheck.canBeSet()) {
|
|
|
|
bmData->doLockCheck.send(Void());
|
|
|
|
}
|
2022-01-15 04:55:35 +08:00
|
|
|
return Void();
|
|
|
|
}
|
2022-01-16 00:05:43 +08:00
|
|
|
|
2021-08-31 02:07:25 +08:00
|
|
|
// TODO confirm: using reliable delivery this should only trigger if the worker is marked as failed, right?
|
|
|
|
// So assignment needs to be retried elsewhere, and a revoke is trivially complete
|
|
|
|
if (assignment.isAssign) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-21 06:45:13 +08:00
|
|
|
fmt::print("BM got error {0} assigning range [{1} - {2}) to worker {3}, requeueing\n",
|
|
|
|
e.name(),
|
2022-01-15 07:10:53 +08:00
|
|
|
assignment.keyRange.begin.printable(),
|
|
|
|
assignment.keyRange.end.printable(),
|
|
|
|
workerID.toString());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2022-01-21 06:28:08 +08:00
|
|
|
|
2021-08-31 02:07:25 +08:00
|
|
|
// re-send revoke to queue to handle range being un-assigned from that worker before the new one
|
2021-09-04 04:13:26 +08:00
|
|
|
RangeAssignment revokeOld;
|
|
|
|
revokeOld.isAssign = false;
|
|
|
|
revokeOld.worker = workerID;
|
|
|
|
revokeOld.keyRange = assignment.keyRange;
|
|
|
|
revokeOld.revoke = RangeRevokeData(false);
|
|
|
|
bmData->rangesToAssign.send(revokeOld);
|
|
|
|
|
|
|
|
// send assignment back to queue as is, clearing designated worker if present
|
2022-01-21 06:45:13 +08:00
|
|
|
// if we failed to send continue or reassign to the worker we thought owned the shard, it should be retried
|
|
|
|
// as a normal assign
|
|
|
|
ASSERT(assignment.assign.present());
|
|
|
|
assignment.assign.get().type = AssignRequestType::Normal;
|
2021-10-23 10:41:19 +08:00
|
|
|
assignment.worker.reset();
|
2021-08-31 02:07:25 +08:00
|
|
|
bmData->rangesToAssign.send(assignment);
|
|
|
|
// FIXME: improvement would be to add history of failed workers to assignment so it can try other ones first
|
2021-09-04 04:13:26 +08:00
|
|
|
} else {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BM got error revoking range [{0} - {1}) from worker",
|
|
|
|
assignment.keyRange.begin.printable(),
|
|
|
|
assignment.keyRange.end.printable());
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (assignment.revoke.get().dispose) {
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf(", retrying for dispose\n");
|
|
|
|
}
|
|
|
|
// send assignment back to queue as is, clearing designated worker if present
|
|
|
|
assignment.worker.reset();
|
|
|
|
bmData->rangesToAssign.send(assignment);
|
|
|
|
//
|
|
|
|
} else {
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf(", ignoring\n");
|
|
|
|
}
|
|
|
|
}
|
2021-08-31 02:07:25 +08:00
|
|
|
}
|
2021-08-28 05:33:07 +08:00
|
|
|
}
|
2021-08-24 03:14:48 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> rangeAssigner(Reference<BlobManagerData> bmData) {
|
2021-08-31 02:07:25 +08:00
|
|
|
loop {
|
2021-09-14 23:19:15 +08:00
|
|
|
// inject delay into range assignments
|
|
|
|
if (BUGGIFY_WITH_PROB(0.05)) {
|
|
|
|
wait(delay(deterministicRandom()->random01()));
|
|
|
|
}
|
2021-10-22 05:39:38 +08:00
|
|
|
state RangeAssignment assignment = waitNext(bmData->rangesToAssign.getFuture());
|
|
|
|
state int64_t seqNo = bmData->seqNo;
|
2021-08-31 02:07:25 +08:00
|
|
|
bmData->seqNo++;
|
|
|
|
|
|
|
|
// modify the in-memory assignment data structures, and send request off to worker
|
2021-10-22 05:39:38 +08:00
|
|
|
state UID workerId;
|
2021-08-31 02:07:25 +08:00
|
|
|
if (assignment.isAssign) {
|
|
|
|
// Ensure range isn't currently assigned anywhere, and there is only 1 intersecting range
|
|
|
|
auto currentAssignments = bmData->workerAssignments.intersectingRanges(assignment.keyRange);
|
|
|
|
int count = 0;
|
2021-10-27 22:40:03 +08:00
|
|
|
for (auto i = currentAssignments.begin(); i != currentAssignments.end(); ++i) {
|
2021-10-27 09:39:41 +08:00
|
|
|
/* TODO: rethink asserts here
|
2021-10-23 10:41:19 +08:00
|
|
|
if (assignment.assign.get().type == AssignRequestType::Continue) {
|
|
|
|
ASSERT(assignment.worker.present());
|
|
|
|
ASSERT(it.value() == assignment.worker.get());
|
2021-09-04 04:13:26 +08:00
|
|
|
} else {
|
2021-10-23 10:41:19 +08:00
|
|
|
ASSERT(it.value() == UID());
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
2021-10-23 10:41:19 +08:00
|
|
|
*/
|
2021-08-31 02:07:25 +08:00
|
|
|
count++;
|
|
|
|
}
|
|
|
|
ASSERT(count == 1);
|
|
|
|
|
2021-10-22 05:39:38 +08:00
|
|
|
if (assignment.worker.present() && assignment.worker.get().isValid()) {
|
2021-10-23 10:41:19 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BW {0} already chosen for seqno {1} in BM {2}\n",
|
|
|
|
assignment.worker.get().toString(),
|
|
|
|
seqNo,
|
|
|
|
bmData->id.toString());
|
2021-10-23 10:41:19 +08:00
|
|
|
}
|
2022-01-15 07:10:53 +08:00
|
|
|
workerId = assignment.worker.get();
|
|
|
|
} else {
|
2021-10-22 05:39:38 +08:00
|
|
|
UID _workerId = wait(pickWorkerForAssign(bmData));
|
2021-10-23 10:41:19 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Chose BW {0} for seqno {1} in BM {2}\n", _workerId.toString(), seqNo, bmData->epoch);
|
2021-10-23 10:41:19 +08:00
|
|
|
}
|
2021-10-22 05:39:38 +08:00
|
|
|
workerId = _workerId;
|
|
|
|
}
|
2021-08-31 02:07:25 +08:00
|
|
|
bmData->workerAssignments.insert(assignment.keyRange, workerId);
|
|
|
|
|
2021-10-23 10:41:19 +08:00
|
|
|
// If we know about the worker and this is not a continue, then this is a new range for the worker
|
|
|
|
if (bmData->workerStats.count(workerId) && assignment.assign.get().type != AssignRequestType::Continue) {
|
|
|
|
bmData->workerStats[workerId].numGranulesAssigned += 1;
|
2021-10-01 23:08:00 +08:00
|
|
|
}
|
|
|
|
|
2021-08-31 02:07:25 +08:00
|
|
|
// FIXME: if range is assign, have some sort of semaphore for outstanding assignments so we don't assign
|
|
|
|
// a ton ranges at once and blow up FDB with reading initial snapshots.
|
2022-01-16 00:05:43 +08:00
|
|
|
bmData->assignsInProgress.insert(assignment.keyRange,
|
|
|
|
doRangeAssignment(bmData, assignment, workerId, seqNo));
|
2021-08-31 02:07:25 +08:00
|
|
|
} else {
|
|
|
|
// Revoking a range could be a large range that contains multiple ranges.
|
|
|
|
auto currentAssignments = bmData->workerAssignments.intersectingRanges(assignment.keyRange);
|
|
|
|
for (auto& it : currentAssignments) {
|
|
|
|
// ensure range doesn't truncate existing ranges
|
|
|
|
ASSERT(it.begin() >= assignment.keyRange.begin);
|
2021-08-31 02:59:53 +08:00
|
|
|
ASSERT(it.end() <= assignment.keyRange.end);
|
2021-08-31 02:07:25 +08:00
|
|
|
|
|
|
|
// It is fine for multiple disjoint sub-ranges to have the same sequence number since they were part of
|
|
|
|
// the same logical change
|
2021-10-01 23:08:00 +08:00
|
|
|
|
|
|
|
if (bmData->workerStats.count(it.value())) {
|
|
|
|
bmData->workerStats[it.value()].numGranulesAssigned -= 1;
|
|
|
|
}
|
|
|
|
|
2022-01-16 00:05:43 +08:00
|
|
|
bmData->assignsInProgress.cancel(assignment.keyRange);
|
|
|
|
|
2021-10-01 23:08:00 +08:00
|
|
|
// revoke the range for the worker that owns it, not the worker specified in the revoke
|
|
|
|
bmData->addActor.send(doRangeAssignment(bmData, assignment, it.value(), seqNo));
|
2021-08-31 02:07:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bmData->workerAssignments.insert(assignment.keyRange, UID());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> checkManagerLock(Reference<ReadYourWritesTransaction> tr, Reference<BlobManagerData> bmData) {
|
2021-09-04 04:13:26 +08:00
|
|
|
Optional<Value> currentLockValue = wait(tr->get(blobManagerEpochKey));
|
|
|
|
ASSERT(currentLockValue.present());
|
|
|
|
int64_t currentEpoch = decodeBlobManagerEpochValue(currentLockValue.get());
|
|
|
|
if (currentEpoch != bmData->epoch) {
|
|
|
|
ASSERT(currentEpoch > bmData->epoch);
|
|
|
|
|
2021-09-14 23:19:15 +08:00
|
|
|
if (BM_DEBUG) {
|
2021-11-18 09:03:32 +08:00
|
|
|
fmt::print(
|
|
|
|
"BM {0} found new epoch {1} > {2} in lock check\n", bmData->id.toString(), currentEpoch, bmData->epoch);
|
2021-09-14 23:19:15 +08:00
|
|
|
}
|
2021-09-04 04:13:26 +08:00
|
|
|
if (bmData->iAmReplaced.canBeSet()) {
|
|
|
|
bmData->iAmReplaced.send(Void());
|
|
|
|
}
|
|
|
|
|
2022-01-21 01:43:34 +08:00
|
|
|
throw blob_manager_replaced();
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
tr->addReadConflictRange(singleKeyRange(blobManagerEpochKey));
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> writeInitialGranuleMapping(Reference<BlobManagerData> bmData,
|
|
|
|
Standalone<VectorRef<KeyRef>> boundaries) {
|
2022-01-06 02:48:53 +08:00
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(bmData->db);
|
|
|
|
// don't do too many in one transaction
|
|
|
|
state int i = 0;
|
|
|
|
state int transactionChunkSize = BUGGIFY ? deterministicRandom()->randomInt(2, 5) : 1000;
|
|
|
|
while (i < boundaries.size() - 1) {
|
|
|
|
TEST(i > 0); // multiple transactions for large granule split
|
|
|
|
tr->reset();
|
|
|
|
state int j = 0;
|
2022-01-06 04:46:01 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2022-01-14 23:07:42 +08:00
|
|
|
tr->setOption(FDBTransactionOptions::Option::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
tr->setOption(FDBTransactionOptions::Option::ACCESS_SYSTEM_KEYS);
|
2022-01-06 04:46:01 +08:00
|
|
|
while (i + j < boundaries.size() - 1 && j < transactionChunkSize) {
|
|
|
|
// TODO REMOVE
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Persisting initial mapping for [{0} - {1})\n",
|
|
|
|
boundaries[i + j].printable(),
|
|
|
|
boundaries[i + j + 1].printable());
|
2022-01-06 04:46:01 +08:00
|
|
|
}
|
|
|
|
// set to empty UID - no worker assigned yet
|
|
|
|
wait(krmSetRange(tr,
|
|
|
|
blobGranuleMappingKeys.begin,
|
|
|
|
KeyRangeRef(boundaries[i + j], boundaries[i + j + 1]),
|
|
|
|
blobGranuleMappingValueFor(UID())));
|
|
|
|
j++;
|
|
|
|
}
|
|
|
|
wait(tr->commit());
|
2022-01-28 00:04:06 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
for (int k = 0; k < j; k++) {
|
|
|
|
fmt::print("Persisted initial mapping for [{0} - {1})\n",
|
|
|
|
boundaries[i + k].printable(),
|
|
|
|
boundaries[i + k + 1].printable());
|
|
|
|
}
|
|
|
|
}
|
2022-01-06 04:46:01 +08:00
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
2022-01-14 06:29:25 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Persisting initial mapping got error {}\n", e.name());
|
2022-01-14 06:29:25 +08:00
|
|
|
}
|
2022-01-06 04:46:01 +08:00
|
|
|
wait(tr->onError(e));
|
|
|
|
j = 0;
|
2022-01-06 02:48:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
i += j;
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-10-26 00:19:28 +08:00
|
|
|
// FIXME: this does all logic in one transaction. Adding a giant range to an existing database to blobify would
|
2021-09-04 04:13:26 +08:00
|
|
|
// require doing a ton of storage metrics calls, which we should split up across multiple transactions likely.
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> monitorClientRanges(Reference<BlobManagerData> bmData) {
|
2022-01-06 04:46:01 +08:00
|
|
|
state Optional<Value> lastChangeKeyValue;
|
2021-08-24 03:14:48 +08:00
|
|
|
loop {
|
2021-08-31 02:07:25 +08:00
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(bmData->db);
|
2021-08-24 03:14:48 +08:00
|
|
|
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Blob manager checking for range updates\n");
|
|
|
|
}
|
2021-08-24 03:14:48 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
2022-01-06 04:46:01 +08:00
|
|
|
// read change key at this point along with ranges
|
|
|
|
state Optional<Value> ckvBegin = wait(tr->get(blobRangeChangeKey));
|
|
|
|
|
2021-08-24 03:14:48 +08:00
|
|
|
// TODO probably knobs here? This should always be pretty small though
|
|
|
|
RangeResult results = wait(krmGetRanges(
|
|
|
|
tr, blobRangeKeys.begin, KeyRange(normalKeys), 10000, GetRangeLimits::BYTE_LIMIT_UNLIMITED));
|
|
|
|
ASSERT(!results.more && results.size() < CLIENT_KNOBS->TOO_MANY);
|
|
|
|
|
|
|
|
state Arena ar;
|
|
|
|
ar.dependsOn(results.arena());
|
|
|
|
VectorRef<KeyRangeRef> rangesToAdd;
|
|
|
|
VectorRef<KeyRangeRef> rangesToRemove;
|
2021-08-31 02:07:25 +08:00
|
|
|
updateClientBlobRanges(&bmData->knownBlobRanges, results, ar, &rangesToAdd, &rangesToRemove);
|
2021-08-24 03:14:48 +08:00
|
|
|
|
|
|
|
for (KeyRangeRef range : rangesToRemove) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print(
|
|
|
|
"BM Got range to revoke [{0} - {1})\n", range.begin.printable(), range.end.printable());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-24 03:14:48 +08:00
|
|
|
|
2021-09-04 04:13:26 +08:00
|
|
|
RangeAssignment ra;
|
|
|
|
ra.isAssign = false;
|
|
|
|
ra.keyRange = range;
|
|
|
|
ra.revoke = RangeRevokeData(true); // dispose=true
|
|
|
|
bmData->rangesToAssign.send(ra);
|
2021-08-24 03:14:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
state std::vector<Future<Standalone<VectorRef<KeyRef>>>> splitFutures;
|
|
|
|
// Divide new ranges up into equal chunks by using SS byte sample
|
|
|
|
for (KeyRangeRef range : rangesToAdd) {
|
2021-09-04 04:13:26 +08:00
|
|
|
splitFutures.push_back(splitRange(tr, range));
|
2021-08-24 03:14:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (auto f : splitFutures) {
|
2022-01-06 02:48:53 +08:00
|
|
|
state Standalone<VectorRef<KeyRef>> splits = wait(f);
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Split client range [{0} - {1}) into {2} ranges:\n",
|
|
|
|
splits[0].printable(),
|
|
|
|
splits[splits.size() - 1].printable(),
|
|
|
|
splits.size() - 1);
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-31 02:07:25 +08:00
|
|
|
|
2022-01-06 02:48:53 +08:00
|
|
|
// Write to DB BEFORE sending assign requests, so that if manager dies before/during, new manager
|
|
|
|
// picks up the same ranges
|
|
|
|
wait(writeInitialGranuleMapping(bmData, splits));
|
|
|
|
|
2021-08-24 03:14:48 +08:00
|
|
|
for (int i = 0; i < splits.size() - 1; i++) {
|
|
|
|
KeyRange range = KeyRange(KeyRangeRef(splits[i], splits[i + 1]));
|
2021-10-23 10:41:19 +08:00
|
|
|
// only add the client range if this is the first BM or it's not already assigned
|
2022-01-06 02:48:53 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print(
|
|
|
|
" [{0} - {1})\n", range.begin.printable().c_str(), range.end.printable().c_str());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2022-01-06 02:48:53 +08:00
|
|
|
|
|
|
|
RangeAssignment ra;
|
|
|
|
ra.isAssign = true;
|
|
|
|
ra.keyRange = range;
|
|
|
|
ra.assign = RangeAssignmentData(); // type=normal
|
|
|
|
bmData->rangesToAssign.send(ra);
|
2021-08-24 03:14:48 +08:00
|
|
|
}
|
2022-01-06 04:46:01 +08:00
|
|
|
wait(bmData->rangesToAssign.onEmpty());
|
2021-08-24 03:14:48 +08:00
|
|
|
}
|
|
|
|
|
2022-01-06 04:46:01 +08:00
|
|
|
lastChangeKeyValue =
|
|
|
|
ckvBegin; // the version of the ranges we processed is the one read alongside the ranges
|
|
|
|
|
|
|
|
// do a new transaction, check for change in change key, watch if none
|
|
|
|
tr->reset();
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
state Future<Void> watchFuture;
|
|
|
|
|
|
|
|
Optional<Value> ckvEnd = wait(tr->get(blobRangeChangeKey));
|
|
|
|
|
|
|
|
if (ckvEnd == lastChangeKeyValue) {
|
|
|
|
watchFuture = tr->watch(blobRangeChangeKey); // watch for change in key
|
|
|
|
wait(tr->commit());
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Blob manager done processing client ranges, awaiting update\n");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
watchFuture = Future<Void>(Void()); // restart immediately
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2022-01-06 04:46:01 +08:00
|
|
|
|
2021-08-31 02:07:25 +08:00
|
|
|
wait(watchFuture);
|
2021-08-24 03:14:48 +08:00
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Blob manager got error looking for range updates {}\n", e.name());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-24 03:14:48 +08:00
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-12 23:52:55 +08:00
|
|
|
// split recursively in the middle to guarantee roughly equal splits across different parts of key space
|
|
|
|
static void downsampleSplit(const Standalone<VectorRef<KeyRef>>& splits,
|
|
|
|
Standalone<VectorRef<KeyRef>>& out,
|
|
|
|
int startIdx,
|
|
|
|
int endIdx,
|
|
|
|
int remaining) {
|
|
|
|
ASSERT(endIdx - startIdx >= remaining);
|
|
|
|
ASSERT(remaining >= 0);
|
|
|
|
if (remaining == 0) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (endIdx - startIdx == remaining) {
|
|
|
|
out.append(out.arena(), splits.begin() + startIdx, remaining);
|
|
|
|
} else {
|
|
|
|
int mid = (startIdx + endIdx) / 2;
|
|
|
|
int startCount = (remaining - 1) / 2;
|
|
|
|
int endCount = remaining - startCount - 1;
|
|
|
|
// ensure no infinite recursion
|
|
|
|
ASSERT(mid != endIdx);
|
|
|
|
ASSERT(mid + 1 != startIdx);
|
|
|
|
downsampleSplit(splits, out, startIdx, mid, startCount);
|
|
|
|
out.push_back(out.arena(), splits[mid]);
|
|
|
|
downsampleSplit(splits, out, mid + 1, endIdx, endCount);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> maybeSplitRange(Reference<BlobManagerData> bmData,
|
2021-10-09 06:35:36 +08:00
|
|
|
UID currentWorkerId,
|
|
|
|
KeyRange granuleRange,
|
|
|
|
UID granuleID,
|
|
|
|
Version granuleStartVersion,
|
|
|
|
Version latestVersion) {
|
2021-09-04 04:13:26 +08:00
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(bmData->db);
|
|
|
|
state Standalone<VectorRef<KeyRef>> newRanges;
|
|
|
|
state int64_t newLockSeqno = -1;
|
|
|
|
|
|
|
|
// first get ranges to split
|
2021-11-12 03:50:19 +08:00
|
|
|
Standalone<VectorRef<KeyRef>> _newRanges = wait(splitRange(tr, granuleRange));
|
|
|
|
newRanges = _newRanges;
|
2021-09-04 04:13:26 +08:00
|
|
|
|
2022-01-20 07:03:57 +08:00
|
|
|
ASSERT(newRanges.size() >= 2);
|
2021-09-04 04:13:26 +08:00
|
|
|
if (newRanges.size() == 2) {
|
|
|
|
// not large enough to split, just reassign back to worker
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Not splitting existing range [{0} - {1}). Continuing assignment to {2}\n",
|
|
|
|
granuleRange.begin.printable(),
|
|
|
|
granuleRange.end.printable(),
|
|
|
|
currentWorkerId.toString());
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
RangeAssignment raContinue;
|
|
|
|
raContinue.isAssign = true;
|
|
|
|
raContinue.worker = currentWorkerId;
|
2021-10-09 06:35:36 +08:00
|
|
|
raContinue.keyRange = granuleRange;
|
2021-10-23 10:41:19 +08:00
|
|
|
raContinue.assign = RangeAssignmentData(AssignRequestType::Continue); // continue assignment and re-snapshot
|
2021-09-04 04:13:26 +08:00
|
|
|
bmData->rangesToAssign.send(raContinue);
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2022-01-12 23:52:55 +08:00
|
|
|
// TODO KNOB for this.
|
|
|
|
// Enforce max split fanout of 10 for performance reasons
|
|
|
|
int maxSplitFanout = 10;
|
|
|
|
if (newRanges.size() >= maxSplitFanout + 2) { // +2 because this is boundaries, so N keys would have N+1 bounaries.
|
|
|
|
TEST(true); // downsampling granule split because fanout too high
|
|
|
|
Standalone<VectorRef<KeyRef>> coalescedRanges;
|
|
|
|
coalescedRanges.arena().dependsOn(newRanges.arena());
|
|
|
|
coalescedRanges.push_back(coalescedRanges.arena(), newRanges.front());
|
|
|
|
|
|
|
|
// since we include start + end boundaries here, only need maxSplitFanout-1 split boundaries to produce
|
|
|
|
// maxSplitFanout granules
|
|
|
|
downsampleSplit(newRanges, coalescedRanges, 1, newRanges.size() - 1, maxSplitFanout - 1);
|
|
|
|
|
|
|
|
coalescedRanges.push_back(coalescedRanges.arena(), newRanges.back());
|
|
|
|
ASSERT(coalescedRanges.size() == maxSplitFanout + 1);
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("Downsampled split from {0} -> {1} granules", newRanges.size() - 1, maxSplitFanout);
|
|
|
|
}
|
|
|
|
|
|
|
|
newRanges = coalescedRanges;
|
|
|
|
}
|
|
|
|
|
2021-11-12 03:50:19 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-11 07:22:56 +08:00
|
|
|
fmt::print("Splitting range [{0} - {1}) into {2} granules @ {3}:\n",
|
|
|
|
granuleRange.begin.printable(),
|
|
|
|
granuleRange.end.printable(),
|
|
|
|
newRanges.size() - 1,
|
|
|
|
latestVersion);
|
2021-11-12 03:50:19 +08:00
|
|
|
for (int i = 0; i < newRanges.size(); i++) {
|
2022-01-11 07:22:56 +08:00
|
|
|
fmt::print(" {}\n", newRanges[i].printable());
|
2021-11-12 03:50:19 +08:00
|
|
|
}
|
|
|
|
}
|
2022-01-20 07:03:57 +08:00
|
|
|
ASSERT(granuleRange.begin == newRanges.front());
|
|
|
|
ASSERT(granuleRange.end == newRanges.back());
|
|
|
|
|
|
|
|
// Have to make set of granule ids deterministic across retries to not end up with extra UIDs in the split
|
|
|
|
// state, which could cause recovery to fail and resources to not be cleaned up.
|
|
|
|
// This entire transaction must be idempotent across retries for all splitting state
|
|
|
|
state std::vector<UID> newGranuleIDs;
|
|
|
|
newGranuleIDs.reserve(newRanges.size() - 1);
|
|
|
|
for (int i = 0; i < newRanges.size() - 1; i++) {
|
|
|
|
newGranuleIDs.push_back(deterministicRandom()->randomUniqueID());
|
|
|
|
}
|
2021-11-12 03:50:19 +08:00
|
|
|
|
2022-01-25 04:12:36 +08:00
|
|
|
state int64_t splitSeqno = bmData->seqNo;
|
|
|
|
bmData->seqNo++;
|
|
|
|
|
2022-01-20 07:03:57 +08:00
|
|
|
// Need to split range. Persist intent to split and split metadata to DB BEFORE sending split assignments to blob
|
|
|
|
// workers, so that nothing is lost on blob manager recovery
|
2021-09-04 04:13:26 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr->reset();
|
|
|
|
tr->setOption(FDBTransactionOptions::Option::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
tr->setOption(FDBTransactionOptions::Option::ACCESS_SYSTEM_KEYS);
|
2021-11-12 03:50:19 +08:00
|
|
|
ASSERT(newRanges.size() > 2);
|
2021-09-04 04:13:26 +08:00
|
|
|
|
|
|
|
// make sure we're still manager when this transaction gets committed
|
|
|
|
wait(checkManagerLock(tr, bmData));
|
|
|
|
|
|
|
|
// acquire lock for old granule to make sure nobody else modifies it
|
2021-10-09 06:35:36 +08:00
|
|
|
state Key lockKey = blobGranuleLockKeyFor(granuleRange);
|
2021-09-04 04:13:26 +08:00
|
|
|
Optional<Value> lockValue = wait(tr->get(lockKey));
|
|
|
|
ASSERT(lockValue.present());
|
|
|
|
std::tuple<int64_t, int64_t, UID> prevGranuleLock = decodeBlobGranuleLockValue(lockValue.get());
|
|
|
|
if (std::get<0>(prevGranuleLock) > bmData->epoch) {
|
2021-09-14 23:19:15 +08:00
|
|
|
if (BM_DEBUG) {
|
2021-11-18 09:03:32 +08:00
|
|
|
fmt::print("BM {0} found a higher epoch {1} than {2} for granule lock of [{3} - {4})\n",
|
|
|
|
bmData->id.toString(),
|
|
|
|
std::get<0>(prevGranuleLock),
|
|
|
|
bmData->epoch,
|
|
|
|
granuleRange.begin.printable(),
|
|
|
|
granuleRange.end.printable());
|
2021-09-14 23:19:15 +08:00
|
|
|
}
|
2021-09-04 04:13:26 +08:00
|
|
|
|
|
|
|
if (bmData->iAmReplaced.canBeSet()) {
|
|
|
|
bmData->iAmReplaced.send(Void());
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
2022-01-21 22:28:36 +08:00
|
|
|
int64_t ownerEpoch = std::get<0>(prevGranuleLock);
|
|
|
|
int64_t ownerSeqno = std::get<1>(prevGranuleLock);
|
2021-09-04 04:13:26 +08:00
|
|
|
if (newLockSeqno == -1) {
|
|
|
|
newLockSeqno = bmData->seqNo;
|
|
|
|
bmData->seqNo++;
|
2022-01-21 22:28:36 +08:00
|
|
|
if (!(bmData->epoch > ownerEpoch || (bmData->epoch == ownerEpoch && newLockSeqno > ownerSeqno))) {
|
|
|
|
printf("BM seqno for granule [%s - %s) out of order for lock! manager: (%lld, %lld), owner: %lld, "
|
|
|
|
"%lld)\n",
|
|
|
|
granuleRange.begin.printable().c_str(),
|
|
|
|
granuleRange.end.printable().c_str(),
|
|
|
|
bmData->epoch,
|
|
|
|
newLockSeqno,
|
|
|
|
ownerEpoch,
|
|
|
|
ownerSeqno);
|
|
|
|
}
|
|
|
|
ASSERT(bmData->epoch > ownerEpoch || (bmData->epoch == ownerEpoch && newLockSeqno > ownerSeqno));
|
2021-09-04 04:13:26 +08:00
|
|
|
} else {
|
2022-01-21 22:28:36 +08:00
|
|
|
if (!(bmData->epoch > ownerEpoch || (bmData->epoch == ownerEpoch && newLockSeqno >= ownerSeqno))) {
|
|
|
|
printf("BM seqno for granule [%s - %s) out of order for lock on retry! manager: (%lld, %lld), "
|
|
|
|
"owner: %lld, "
|
|
|
|
"%lld)\n",
|
|
|
|
granuleRange.begin.printable().c_str(),
|
|
|
|
granuleRange.end.printable().c_str(),
|
|
|
|
bmData->epoch,
|
|
|
|
newLockSeqno,
|
|
|
|
ownerEpoch,
|
|
|
|
ownerSeqno);
|
|
|
|
}
|
|
|
|
// previous transaction could have succeeded but got commit_unknown_result, so use >= instead of > for
|
|
|
|
// seqno if epochs are equal
|
|
|
|
ASSERT(bmData->epoch > ownerEpoch || (bmData->epoch == ownerEpoch && newLockSeqno >= ownerSeqno));
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
|
2021-09-23 01:46:20 +08:00
|
|
|
// acquire granule lock so nobody else can make changes to this granule.
|
2021-09-04 04:13:26 +08:00
|
|
|
tr->set(lockKey, blobGranuleLockValueFor(bmData->epoch, newLockSeqno, std::get<2>(prevGranuleLock)));
|
|
|
|
|
|
|
|
// set up split metadata
|
2022-01-20 07:03:57 +08:00
|
|
|
/*fmt::print("Persisting granule split {0} [{1} - {2})\n",
|
|
|
|
granuleID.toString().substr(0, 6),
|
|
|
|
granuleRange.begin.printable(),
|
|
|
|
granuleRange.end.printable());*/
|
|
|
|
|
2022-01-25 04:12:36 +08:00
|
|
|
// first key in split boundaries is special: key that doesn't occur normally to the (epoch, seqno) of split
|
|
|
|
tr->set(blobGranuleSplitBoundaryKeyFor(granuleID, splitBoundarySpecialKey),
|
|
|
|
blobGranuleSplitBoundaryValueFor(bmData->epoch, splitSeqno));
|
2021-09-04 04:13:26 +08:00
|
|
|
for (int i = 0; i < newRanges.size() - 1; i++) {
|
2022-01-20 07:03:57 +08:00
|
|
|
/*fmt::print(" {0} [{1} - {2})\n",
|
|
|
|
newGranuleIDs[i].toString().substr(0, 6),
|
|
|
|
newRanges[i].printable(),
|
|
|
|
newRanges[i + 1].printable());*/
|
2021-10-09 06:35:36 +08:00
|
|
|
|
2022-01-20 07:03:57 +08:00
|
|
|
Key splitKey = blobGranuleSplitKeyFor(granuleID, newGranuleIDs[i]);
|
2022-01-19 04:22:34 +08:00
|
|
|
tr->set(blobGranuleSplitBoundaryKeyFor(granuleID, newRanges[i]), Value());
|
2021-10-09 06:35:36 +08:00
|
|
|
|
|
|
|
tr->atomicOp(splitKey,
|
2021-11-12 03:50:19 +08:00
|
|
|
blobGranuleSplitValueFor(BlobGranuleSplitState::Initialized),
|
2021-09-23 01:46:20 +08:00
|
|
|
MutationRef::SetVersionstampedValue);
|
|
|
|
|
2021-10-09 06:35:36 +08:00
|
|
|
Key historyKey = blobGranuleHistoryKeyFor(KeyRangeRef(newRanges[i], newRanges[i + 1]), latestVersion);
|
|
|
|
|
|
|
|
Standalone<BlobGranuleHistoryValue> historyValue;
|
2022-01-20 07:03:57 +08:00
|
|
|
historyValue.granuleID = newGranuleIDs[i];
|
2021-10-09 06:35:36 +08:00
|
|
|
historyValue.parentGranules.push_back(historyValue.arena(),
|
|
|
|
std::pair(granuleRange, granuleStartVersion));
|
|
|
|
|
2022-01-15 23:33:47 +08:00
|
|
|
/*fmt::print("Creating history entry [{0} - {1}) - [{2} - {3})\n",
|
|
|
|
newRanges[i].printable(),
|
|
|
|
newRanges[i + 1].printable(),
|
|
|
|
granuleStartVersion,
|
|
|
|
latestVersion);*/
|
2021-10-09 06:35:36 +08:00
|
|
|
tr->set(historyKey, blobGranuleHistoryValueFor(historyValue));
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
2022-01-19 04:22:34 +08:00
|
|
|
tr->set(blobGranuleSplitBoundaryKeyFor(granuleID, newRanges.back()), Value());
|
2021-09-23 01:46:20 +08:00
|
|
|
|
2021-09-04 04:13:26 +08:00
|
|
|
wait(tr->commit());
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
2022-01-20 07:03:57 +08:00
|
|
|
if (e.code() == error_code_operation_cancelled) {
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("BM {0} Persisting granule split got error {1}\n", bmData->epoch, e.name());
|
|
|
|
}
|
2021-09-24 02:07:01 +08:00
|
|
|
if (e.code() == error_code_granule_assignment_conflict) {
|
|
|
|
if (bmData->iAmReplaced.canBeSet()) {
|
|
|
|
bmData->iAmReplaced.send(Void());
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
2021-09-04 04:13:26 +08:00
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// transaction committed, send range assignments
|
|
|
|
// revoke from current worker
|
|
|
|
RangeAssignment raRevoke;
|
|
|
|
raRevoke.isAssign = false;
|
|
|
|
raRevoke.worker = currentWorkerId;
|
2021-10-09 06:35:36 +08:00
|
|
|
raRevoke.keyRange = granuleRange;
|
2021-09-04 04:13:26 +08:00
|
|
|
raRevoke.revoke = RangeRevokeData(false); // not a dispose
|
|
|
|
bmData->rangesToAssign.send(raRevoke);
|
|
|
|
|
|
|
|
for (int i = 0; i < newRanges.size() - 1; i++) {
|
|
|
|
// reassign new range and do handover of previous range
|
|
|
|
RangeAssignment raAssignSplit;
|
|
|
|
raAssignSplit.isAssign = true;
|
|
|
|
raAssignSplit.keyRange = KeyRangeRef(newRanges[i], newRanges[i + 1]);
|
2021-10-23 10:41:19 +08:00
|
|
|
raAssignSplit.assign = RangeAssignmentData();
|
2021-09-04 04:13:26 +08:00
|
|
|
// don't care who this range gets assigned to
|
|
|
|
bmData->rangesToAssign.send(raAssignSplit);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> deregisterBlobWorker(Reference<BlobManagerData> bmData, BlobWorkerInterface interf) {
|
2021-10-20 23:54:19 +08:00
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(bmData->db);
|
|
|
|
loop {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
try {
|
2022-01-21 09:20:11 +08:00
|
|
|
wait(checkManagerLock(tr, bmData));
|
2021-10-20 23:54:19 +08:00
|
|
|
Key blobWorkerListKey = blobWorkerListKeyFor(interf.id());
|
|
|
|
tr->addReadConflictRange(singleKeyRange(blobWorkerListKey));
|
|
|
|
tr->clear(blobWorkerListKey);
|
|
|
|
|
|
|
|
wait(tr->commit());
|
|
|
|
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Deregistered blob worker {0}\n", interf.id().toString());
|
2021-10-20 23:54:19 +08:00
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Deregistering blob worker {0} got error {1}\n", interf.id().toString(), e.name());
|
2021-10-20 23:54:19 +08:00
|
|
|
}
|
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-21 08:37:25 +08:00
|
|
|
ACTOR Future<Void> haltBlobWorker(Reference<BlobManagerData> bmData, BlobWorkerInterface bwInterf) {
|
2022-01-21 01:43:34 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
wait(bwInterf.haltBlobWorker.getReply(HaltBlobWorkerRequest(bmData->epoch, bmData->id)));
|
2022-01-21 06:45:13 +08:00
|
|
|
break;
|
2022-01-21 01:43:34 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
// throw other errors instead of returning?
|
|
|
|
if (e.code() == error_code_operation_cancelled) {
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
// TODO REMOVE
|
|
|
|
fmt::print("BM {0} got error {1} trying to halt blob worker {2}\n",
|
|
|
|
bmData->epoch,
|
|
|
|
e.name(),
|
|
|
|
bwInterf.id().toString());
|
|
|
|
if (e.code() != error_code_blob_manager_replaced) {
|
2022-01-21 06:45:13 +08:00
|
|
|
break;
|
2022-01-21 01:43:34 +08:00
|
|
|
}
|
|
|
|
if (bmData->iAmReplaced.canBeSet()) {
|
|
|
|
bmData->iAmReplaced.send(Void());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2022-01-21 06:45:13 +08:00
|
|
|
|
|
|
|
return Void();
|
2022-01-21 01:43:34 +08:00
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> killBlobWorker(Reference<BlobManagerData> bmData, BlobWorkerInterface bwInterf, bool registered) {
|
2021-12-04 06:12:08 +08:00
|
|
|
state UID bwId = bwInterf.id();
|
2021-10-01 23:08:00 +08:00
|
|
|
|
|
|
|
// Remove blob worker from stats map so that when we try to find a worker to takeover the range,
|
|
|
|
// the one we just killed isn't considered.
|
2021-10-13 04:36:05 +08:00
|
|
|
// Remove it from workersById also since otherwise that worker addr will remain excluded
|
2021-10-01 23:08:00 +08:00
|
|
|
// when we try to recruit new blob workers.
|
2021-12-04 02:29:22 +08:00
|
|
|
|
|
|
|
if (registered) {
|
2021-12-04 06:12:08 +08:00
|
|
|
bmData->deadWorkers.insert(bwId);
|
2021-12-04 02:29:22 +08:00
|
|
|
bmData->workerStats.erase(bwId);
|
|
|
|
bmData->workersById.erase(bwId);
|
|
|
|
bmData->workerAddresses.erase(bwInterf.stableAddress());
|
|
|
|
}
|
2021-10-23 10:41:19 +08:00
|
|
|
|
2022-01-21 08:37:25 +08:00
|
|
|
// Remove blob worker from persisted list of blob workers
|
|
|
|
Future<Void> deregister = deregisterBlobWorker(bmData, bwInterf);
|
|
|
|
|
2021-10-01 23:08:00 +08:00
|
|
|
// for every range owned by this blob worker, we want to
|
2021-10-13 04:36:05 +08:00
|
|
|
// - send a revoke request for that range
|
2021-10-01 23:08:00 +08:00
|
|
|
// - add the range back to the stream of ranges to be assigned
|
2021-10-13 04:36:05 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Taking back ranges from BW {0}\n", bwId.toString());
|
2021-10-13 04:36:05 +08:00
|
|
|
}
|
2021-12-11 02:25:42 +08:00
|
|
|
// copy ranges into vector before sending, because send then modifies workerAssignments
|
|
|
|
state std::vector<KeyRange> rangesToMove;
|
2021-10-01 23:08:00 +08:00
|
|
|
for (auto& it : bmData->workerAssignments.ranges()) {
|
|
|
|
if (it.cvalue() == bwId) {
|
2021-12-11 02:25:42 +08:00
|
|
|
rangesToMove.push_back(it.range());
|
2021-10-01 23:08:00 +08:00
|
|
|
}
|
|
|
|
}
|
2021-12-11 02:25:42 +08:00
|
|
|
for (auto& it : rangesToMove) {
|
|
|
|
// Send revoke request
|
|
|
|
RangeAssignment raRevoke;
|
|
|
|
raRevoke.isAssign = false;
|
|
|
|
raRevoke.keyRange = it;
|
|
|
|
raRevoke.revoke = RangeRevokeData(false);
|
|
|
|
bmData->rangesToAssign.send(raRevoke);
|
|
|
|
|
|
|
|
// Add range back into the stream of ranges to be assigned
|
|
|
|
RangeAssignment raAssign;
|
|
|
|
raAssign.isAssign = true;
|
|
|
|
raAssign.worker = Optional<UID>();
|
|
|
|
raAssign.keyRange = it;
|
|
|
|
raAssign.assign = RangeAssignmentData(); // not a continue
|
|
|
|
bmData->rangesToAssign.send(raAssign);
|
|
|
|
}
|
2021-10-01 23:08:00 +08:00
|
|
|
|
|
|
|
// Send halt to blob worker, with no expectation of hearing back
|
2021-10-13 04:36:05 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Sending halt to BW {}\n", bwId.toString());
|
2021-10-13 04:36:05 +08:00
|
|
|
}
|
2022-01-21 09:20:11 +08:00
|
|
|
bmData->addActor.send(haltBlobWorker(bmData, bwInterf));
|
2021-12-04 06:12:08 +08:00
|
|
|
|
2022-01-22 00:29:54 +08:00
|
|
|
// wait for blob worker to be removed from DB and in-memory mapping to have reassigned all shards from this worker
|
|
|
|
// before removing it from deadWorkers, to avoid a race with checkBlobWorkerList
|
|
|
|
wait(deregister && bmData->rangesToAssign.onEmpty());
|
|
|
|
// delay(0) after onEmpty to yield back to the range assigner on the final pop to ensure it gets processed before
|
|
|
|
// deadWorkers.erase
|
|
|
|
wait(delay(0));
|
2022-01-21 08:37:25 +08:00
|
|
|
|
|
|
|
// restart recruiting to replace the dead blob worker
|
|
|
|
bmData->restartRecruiting.trigger();
|
|
|
|
|
|
|
|
if (registered) {
|
|
|
|
bmData->deadWorkers.erase(bwInterf.id());
|
|
|
|
}
|
|
|
|
|
2021-10-22 05:39:38 +08:00
|
|
|
return Void();
|
2021-10-01 23:08:00 +08:00
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> monitorBlobWorkerStatus(Reference<BlobManagerData> bmData, BlobWorkerInterface bwInterf) {
|
2021-10-06 05:51:19 +08:00
|
|
|
state KeyRangeMap<std::pair<int64_t, int64_t>> lastSeenSeqno;
|
2021-10-06 22:24:48 +08:00
|
|
|
// outer loop handles reconstructing stream if it got a retryable error
|
2022-01-22 04:20:52 +08:00
|
|
|
// do backoff, we can get a lot of retries in a row
|
|
|
|
|
|
|
|
// TODO knob?
|
|
|
|
state double backoff = 0.1;
|
2021-10-06 05:51:19 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
state ReplyPromiseStream<GranuleStatusReply> statusStream =
|
|
|
|
bwInterf.granuleStatusStreamRequest.getReplyStream(GranuleStatusStreamRequest(bmData->epoch));
|
2021-10-06 22:24:48 +08:00
|
|
|
// read from stream until worker fails (should never get explicit end_of_stream)
|
2021-10-06 05:51:19 +08:00
|
|
|
loop {
|
|
|
|
GranuleStatusReply rep = waitNext(statusStream.getFuture());
|
2021-10-01 23:08:00 +08:00
|
|
|
|
2021-09-04 04:13:26 +08:00
|
|
|
if (BM_DEBUG) {
|
2021-11-18 09:03:32 +08:00
|
|
|
fmt::print("BM {0} got status of [{1} - {2}) @ ({3}, {4}) from BW {5}: {6}\n",
|
|
|
|
bmData->epoch,
|
|
|
|
rep.granuleRange.begin.printable(),
|
|
|
|
rep.granuleRange.end.printable(),
|
|
|
|
rep.epoch,
|
|
|
|
rep.seqno,
|
|
|
|
bwInterf.id().toString(),
|
|
|
|
rep.doSplit ? "split" : "");
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
2022-01-22 04:20:52 +08:00
|
|
|
// if we get a reply from the stream, reset backoff
|
|
|
|
backoff = 0.1;
|
2021-09-04 04:13:26 +08:00
|
|
|
if (rep.epoch > bmData->epoch) {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BM heard from BW {0} that there is a new manager with higher epoch\n",
|
|
|
|
bwInterf.id().toString());
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
if (bmData->iAmReplaced.canBeSet()) {
|
|
|
|
bmData->iAmReplaced.send(Void());
|
|
|
|
}
|
2021-10-23 10:41:19 +08:00
|
|
|
} else if (rep.epoch < bmData->epoch) {
|
2021-12-11 02:25:42 +08:00
|
|
|
// TODO: revoke the range from that worker? and send optimistic halt req to other (zombie) BM?
|
|
|
|
// it's optimistic because such a BM is not necessarily a zombie. it could have gotten killed
|
|
|
|
// properly but the BW that sent this reply was behind (i.e. it started the req when the old BM
|
|
|
|
// was in charge and finished by the time the new BM took over)
|
2021-10-23 10:41:19 +08:00
|
|
|
continue;
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
|
2021-12-11 02:25:42 +08:00
|
|
|
// TODO maybe this won't be true eventually, but right now the only time the blob worker reports
|
|
|
|
// back is to split the range.
|
2021-09-04 04:13:26 +08:00
|
|
|
ASSERT(rep.doSplit);
|
|
|
|
|
2021-10-13 04:36:05 +08:00
|
|
|
// only evaluate for split if this worker currently owns the granule in this blob manager's mapping
|
2021-10-01 23:08:00 +08:00
|
|
|
auto currGranuleAssignment = bmData->workerAssignments.rangeContaining(rep.granuleRange.begin);
|
|
|
|
if (!(currGranuleAssignment.begin() == rep.granuleRange.begin &&
|
|
|
|
currGranuleAssignment.end() == rep.granuleRange.end &&
|
|
|
|
currGranuleAssignment.cvalue() == bwInterf.id())) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-09-04 04:13:26 +08:00
|
|
|
|
|
|
|
auto lastReqForGranule = lastSeenSeqno.rangeContaining(rep.granuleRange.begin);
|
|
|
|
if (rep.granuleRange.begin == lastReqForGranule.begin() &&
|
|
|
|
rep.granuleRange.end == lastReqForGranule.end() && rep.epoch == lastReqForGranule.value().first &&
|
|
|
|
rep.seqno == lastReqForGranule.value().second) {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-12 00:33:39 +08:00
|
|
|
fmt::print("Manager {0} received repeat status for the same granule [{1} - {2}), ignoring.\n",
|
2021-11-18 09:03:32 +08:00
|
|
|
bmData->epoch,
|
|
|
|
rep.granuleRange.begin.printable(),
|
|
|
|
rep.granuleRange.end.printable());
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (BM_DEBUG) {
|
2021-11-18 09:03:32 +08:00
|
|
|
fmt::print("Manager {0} evaluating [{1} - {2}) for split\n",
|
|
|
|
bmData->epoch,
|
|
|
|
rep.granuleRange.begin.printable().c_str(),
|
|
|
|
rep.granuleRange.end.printable().c_str());
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
lastSeenSeqno.insert(rep.granuleRange, std::pair(rep.epoch, rep.seqno));
|
2021-10-09 06:35:36 +08:00
|
|
|
bmData->addActor.send(maybeSplitRange(
|
|
|
|
bmData, bwInterf.id(), rep.granuleRange, rep.granuleID, rep.startVersion, rep.latestVersion));
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-06 05:51:19 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_operation_cancelled) {
|
|
|
|
throw e;
|
|
|
|
}
|
2021-10-23 10:41:19 +08:00
|
|
|
|
2022-01-08 01:21:05 +08:00
|
|
|
// on known network errors or stream close errors, throw
|
|
|
|
if (e.code() == error_code_broken_promise) {
|
2021-10-23 10:41:19 +08:00
|
|
|
throw e;
|
|
|
|
}
|
|
|
|
|
2022-01-21 01:43:34 +08:00
|
|
|
// if manager is replaced, die
|
|
|
|
if (e.code() == error_code_blob_manager_replaced) {
|
|
|
|
if (bmData->iAmReplaced.canBeSet()) {
|
|
|
|
bmData->iAmReplaced.send(Void());
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-10-06 22:24:48 +08:00
|
|
|
// if we got an error constructing or reading from stream that is retryable, wait and retry.
|
2022-01-08 01:21:05 +08:00
|
|
|
// Sometimes we get connection_failed without the failure monitor tripping. One example is simulation's
|
|
|
|
// rollRandomClose. In this case, just reconstruct the stream. If it was a transient failure, it works, and
|
|
|
|
// if it is permanent, the failure monitor will eventually trip.
|
2021-10-06 05:51:19 +08:00
|
|
|
ASSERT(e.code() != error_code_end_of_stream);
|
2022-01-08 01:21:05 +08:00
|
|
|
if (e.code() == error_code_request_maybe_delivered || e.code() == error_code_connection_failed) {
|
2022-01-22 04:20:52 +08:00
|
|
|
wait(delay(backoff));
|
|
|
|
backoff = std::min(backoff * 1.5, 5.0);
|
2021-10-06 05:51:19 +08:00
|
|
|
continue;
|
|
|
|
} else {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print(
|
|
|
|
"BM got unexpected error {0} monitoring BW {1} status\n", e.name(), bwInterf.id().toString());
|
2021-10-06 05:51:19 +08:00
|
|
|
}
|
|
|
|
// TODO change back from SevError?
|
|
|
|
TraceEvent(SevError, "BWStatusMonitoringFailed", bmData->id)
|
|
|
|
.detail("BlobWorkerID", bwInterf.id())
|
|
|
|
.error(e);
|
|
|
|
throw e;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> monitorBlobWorker(Reference<BlobManagerData> bmData, BlobWorkerInterface bwInterf) {
|
2021-10-06 05:51:19 +08:00
|
|
|
try {
|
|
|
|
state Future<Void> waitFailure = waitFailureClient(bwInterf.waitFailure, SERVER_KNOBS->BLOB_WORKER_TIMEOUT);
|
|
|
|
state Future<Void> monitorStatus = monitorBlobWorkerStatus(bmData, bwInterf);
|
|
|
|
|
|
|
|
choose {
|
|
|
|
when(wait(waitFailure)) {
|
|
|
|
if (BM_DEBUG) {
|
2021-11-18 09:03:32 +08:00
|
|
|
fmt::print("BM {0} detected BW {1} is dead\n", bmData->epoch, bwInterf.id().toString());
|
2021-10-06 05:51:19 +08:00
|
|
|
}
|
|
|
|
TraceEvent("BlobWorkerFailed", bmData->id).detail("BlobWorkerID", bwInterf.id());
|
|
|
|
}
|
|
|
|
when(wait(monitorStatus)) {
|
2022-01-21 06:28:08 +08:00
|
|
|
// should only return when manager got replaced
|
|
|
|
ASSERT(!bmData->iAmReplaced.canBeSet());
|
2021-10-06 05:51:19 +08:00
|
|
|
}
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
2021-10-20 23:54:19 +08:00
|
|
|
// will blob worker get cleaned up in this case?
|
2021-10-06 05:51:19 +08:00
|
|
|
if (e.code() == error_code_operation_cancelled) {
|
|
|
|
throw e;
|
|
|
|
}
|
2021-10-23 10:41:19 +08:00
|
|
|
|
2022-01-08 01:21:05 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print(
|
|
|
|
"BM {0} got monitoring error {1} from BW {2}\n", bmData->epoch, e.name(), bwInterf.id().toString());
|
|
|
|
}
|
|
|
|
|
2021-10-23 10:41:19 +08:00
|
|
|
// TODO: re-evaluate the expected errors here once wait failure issue is resolved
|
2022-01-08 01:21:05 +08:00
|
|
|
// Expected errors here are: [broken_promise]
|
|
|
|
if (e.code() != error_code_broken_promise) {
|
2021-10-23 10:41:19 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BM got unexpected error {0} monitoring BW {1}\n", e.name(), bwInterf.id().toString());
|
2021-10-23 10:41:19 +08:00
|
|
|
}
|
|
|
|
// TODO change back from SevError?
|
|
|
|
TraceEvent(SevError, "BWMonitoringFailed", bmData->id).detail("BlobWorkerID", bwInterf.id()).error(e);
|
|
|
|
throw e;
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
}
|
2021-10-01 23:08:00 +08:00
|
|
|
|
2021-10-13 04:36:05 +08:00
|
|
|
// kill the blob worker
|
2021-12-04 02:29:22 +08:00
|
|
|
wait(killBlobWorker(bmData, bwInterf, true));
|
2021-10-13 04:36:05 +08:00
|
|
|
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("No longer monitoring BW {0}\n", bwInterf.id().toString());
|
2021-10-13 04:36:05 +08:00
|
|
|
}
|
2021-10-01 23:08:00 +08:00
|
|
|
return Void();
|
2021-09-04 04:13:26 +08:00
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> checkBlobWorkerList(Reference<BlobManagerData> bmData, Promise<Void> workerListReady) {
|
2022-01-22 00:29:54 +08:00
|
|
|
|
|
|
|
try {
|
|
|
|
loop {
|
|
|
|
// Get list of last known blob workers
|
|
|
|
// note: the list will include every blob worker that the old manager knew about,
|
|
|
|
// but it might also contain blob workers that died while the new manager was being recruited
|
|
|
|
std::vector<BlobWorkerInterface> blobWorkers = wait(getBlobWorkers(bmData->db));
|
|
|
|
// add all blob workers to this new blob manager's records and start monitoring it
|
|
|
|
bool foundAnyNew = false;
|
|
|
|
for (auto& worker : blobWorkers) {
|
|
|
|
if (!bmData->deadWorkers.count(worker.id())) {
|
|
|
|
if (!bmData->workerAddresses.count(worker.stableAddress()) &&
|
|
|
|
worker.locality.dcId() == bmData->dcId) {
|
|
|
|
bmData->workerAddresses.insert(worker.stableAddress());
|
|
|
|
bmData->workersById[worker.id()] = worker;
|
|
|
|
bmData->workerStats[worker.id()] = BlobWorkerStats();
|
|
|
|
bmData->addActor.send(monitorBlobWorker(bmData, worker));
|
|
|
|
foundAnyNew = true;
|
|
|
|
} else if (!bmData->workersById.count(worker.id())) {
|
|
|
|
bmData->addActor.send(killBlobWorker(bmData, worker, false));
|
|
|
|
}
|
2021-12-04 06:12:08 +08:00
|
|
|
}
|
2021-12-04 02:29:22 +08:00
|
|
|
}
|
2022-01-22 00:29:54 +08:00
|
|
|
if (workerListReady.canBeSet()) {
|
|
|
|
workerListReady.send(Void());
|
|
|
|
}
|
|
|
|
// if any assigns are stuck on workers, and we have workers, wake them
|
|
|
|
if (foundAnyNew || !bmData->workersById.empty()) {
|
|
|
|
Promise<Void> hold = bmData->foundBlobWorkers;
|
|
|
|
bmData->foundBlobWorkers = Promise<Void>();
|
|
|
|
hold.send(Void());
|
|
|
|
}
|
|
|
|
wait(delay(SERVER_KNOBS->BLOB_WORKERLIST_FETCH_INTERVAL));
|
2021-12-04 02:29:22 +08:00
|
|
|
}
|
2022-01-22 00:29:54 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("BM {0} got error {1} reading blob worker list!!\n", bmData->epoch, e.name());
|
2021-12-04 06:12:08 +08:00
|
|
|
}
|
2022-01-22 00:29:54 +08:00
|
|
|
throw e;
|
2021-12-04 02:29:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> recoverBlobManager(Reference<BlobManagerData> bmData) {
|
2021-12-04 06:12:08 +08:00
|
|
|
state Promise<Void> workerListReady;
|
|
|
|
bmData->addActor.send(checkBlobWorkerList(bmData, workerListReady));
|
|
|
|
wait(workerListReady.getFuture());
|
2021-12-04 02:29:22 +08:00
|
|
|
|
2021-10-23 10:41:19 +08:00
|
|
|
// Once we acknowledge the existing blob workers, we can go ahead and recruit new ones
|
|
|
|
bmData->startRecruiting.trigger();
|
2021-10-20 23:54:19 +08:00
|
|
|
|
2021-12-06 07:02:25 +08:00
|
|
|
// skip them rest of the algorithm for the first blob manager
|
|
|
|
if (bmData->epoch == 1) {
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-10-23 10:41:19 +08:00
|
|
|
// At this point, bmData->workersById is a list of all alive blob workers, but could also include some dead BWs.
|
|
|
|
// The algorithm below works as follows:
|
2022-01-25 04:12:36 +08:00
|
|
|
// 1. We get the ongoing split boundaries to construct the set of granules we should have. For these splits, we
|
|
|
|
// simply assign the range to the next best worker if it is not present in the assignment mapping. This is not
|
|
|
|
// any worse than what the old blob manager would have done. Details: Note that this means that if a worker we
|
|
|
|
// intended to give a splitted range to dies before the new BM recovers, then we'll simply assign the range to
|
|
|
|
// the next best worker.
|
|
|
|
//
|
|
|
|
// 2. We get the existing granule mappings that were persisted by blob workers who were assigned ranges and
|
2021-10-23 10:41:19 +08:00
|
|
|
// add them to bmData->granuleAssignments, which is a key range map.
|
|
|
|
// Details: re-assignments might have happened between the time the mapping was last updated and now.
|
|
|
|
// For example, suppose a blob manager sends requests to the range assigner stream to move a granule G.
|
|
|
|
// However, before sending those requests off to the workers, the BM dies. So the persisting mapping
|
|
|
|
// still has G->oldWorker. The following algorithm will re-assign G to oldWorker (as long as it is also still
|
2021-12-11 02:25:42 +08:00
|
|
|
// alive). Note that this is fine because it simply means that the range was not moved optimally, but it is
|
|
|
|
// still owned. In the above case, even if the revoke goes through, since we don't update the mapping during
|
|
|
|
// revokes, this is the same as the case above. Another case to consider is when a blob worker dies when the
|
|
|
|
// BM is recovering. Now the mapping at this time looks like G->deadBW. But the rangeAssigner handles this:
|
2022-01-25 04:12:36 +08:00
|
|
|
// we'll try to assign a range to a dead worker and fail and reassign it to the next best worker. It will also
|
|
|
|
// handle the case where the mapping does not reflect the desired set of granules based on the ongoing spits, and
|
|
|
|
// correct it.
|
2021-10-20 23:54:19 +08:00
|
|
|
//
|
2022-01-25 04:12:36 +08:00
|
|
|
// 3. For every range in our granuleAssignments, we send an assign request to the stream of requests,
|
2021-10-23 10:41:19 +08:00
|
|
|
// ultimately giving every range back to some worker (trying to mimic the state of the old BM).
|
|
|
|
// If the worker already had the range, this is a no-op. If the worker didn't have it, it will
|
|
|
|
// begin persisting it. The worker that had the same range before will now be at a lower seqno.
|
|
|
|
|
2021-11-16 07:19:14 +08:00
|
|
|
state KeyRangeMap<Optional<UID>> workerAssignments;
|
2021-10-23 10:41:19 +08:00
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(bmData->db);
|
2022-01-15 23:33:47 +08:00
|
|
|
|
|
|
|
// TODO KNOB
|
|
|
|
state int rowLimit = BUGGIFY ? deterministicRandom()->randomInt(2, 10) : 10000;
|
2021-10-23 10:41:19 +08:00
|
|
|
|
2022-01-15 23:33:47 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-19 04:22:34 +08:00
|
|
|
fmt::print("BM {0} recovering:\n", bmData->epoch);
|
2022-01-25 04:12:36 +08:00
|
|
|
fmt::print("BM {0} found in progress splits:\n", bmData->epoch);
|
2021-10-20 23:54:19 +08:00
|
|
|
}
|
2021-10-22 05:39:38 +08:00
|
|
|
|
2022-01-19 04:22:34 +08:00
|
|
|
// TODO use range stream instead
|
2022-01-15 23:33:47 +08:00
|
|
|
|
2022-01-19 04:22:34 +08:00
|
|
|
state UID currentParentID = UID();
|
|
|
|
state Optional<UID> nextParentID;
|
|
|
|
state std::vector<Key> splitBoundaries;
|
2022-01-25 04:12:36 +08:00
|
|
|
state std::pair<int64_t, int64_t>
|
|
|
|
splitEpochSeqno; // used to order splits since we can have multiple splits of the same range in progress at once
|
2022-01-15 23:33:47 +08:00
|
|
|
|
2022-01-19 04:22:34 +08:00
|
|
|
state Key boundaryBeginKey = blobGranuleSplitBoundaryKeys.begin;
|
|
|
|
state RangeResult boundaryResult;
|
|
|
|
boundaryResult.readThrough = boundaryBeginKey;
|
|
|
|
boundaryResult.more = true;
|
|
|
|
state int boundaryResultIdx = 0;
|
2022-01-15 23:33:47 +08:00
|
|
|
|
2022-01-26 22:38:24 +08:00
|
|
|
// Step 2. Get the latest known split and merge state. Because we can have multiple splits in progress at the same
|
2022-01-25 04:12:36 +08:00
|
|
|
// time, and we don't know which parts of those are reflected in the current set of worker assignments we read, we
|
|
|
|
// have to construct the current desired set of granules from the set of ongoing splits and merges. Then, if any of
|
|
|
|
// those are not represented in the worker mapping, we must add them.
|
|
|
|
state KeyRangeMap<std::pair<int64_t, int64_t>> inProgressSplits;
|
|
|
|
|
2021-10-22 05:39:38 +08:00
|
|
|
tr->reset();
|
2022-01-19 04:22:34 +08:00
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
2021-10-20 23:54:19 +08:00
|
|
|
loop {
|
2022-01-19 04:22:34 +08:00
|
|
|
// Advance boundary reader
|
|
|
|
loop {
|
|
|
|
if (boundaryResultIdx >= boundaryResult.size()) {
|
|
|
|
if (!boundaryResult.more) {
|
|
|
|
break;
|
|
|
|
}
|
2022-01-20 07:49:19 +08:00
|
|
|
ASSERT(boundaryResult.readThrough.present() || boundaryResult.size() > 0);
|
|
|
|
boundaryBeginKey = boundaryResult.readThrough.present() ? boundaryResult.readThrough.get()
|
|
|
|
: keyAfter(boundaryResult.back().key);
|
2022-01-19 04:22:34 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
RangeResult r = wait(
|
|
|
|
tr->getRange(KeyRangeRef(boundaryBeginKey, blobGranuleSplitBoundaryKeys.end), rowLimit));
|
|
|
|
ASSERT(r.size() > 0 || !r.more);
|
|
|
|
boundaryResult = r;
|
|
|
|
boundaryResultIdx = 0;
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("BM {0} got error advancing boundary cursor: {1}\n", bmData->epoch, e.name());
|
|
|
|
}
|
|
|
|
wait(tr->onError(e));
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
}
|
2022-01-15 23:33:47 +08:00
|
|
|
}
|
2022-01-19 04:22:34 +08:00
|
|
|
// if we got a response and there are zero rows, we are done
|
|
|
|
if (boundaryResult.empty()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2022-01-19 08:47:42 +08:00
|
|
|
bool foundNext = false;
|
2022-01-19 04:22:34 +08:00
|
|
|
while (boundaryResultIdx < boundaryResult.size()) {
|
|
|
|
UID parentGranuleID;
|
|
|
|
Key boundaryKey;
|
2022-01-19 08:47:42 +08:00
|
|
|
|
2022-01-19 04:22:34 +08:00
|
|
|
std::tie(parentGranuleID, boundaryKey) =
|
|
|
|
decodeBlobGranuleSplitBoundaryKey(boundaryResult[boundaryResultIdx].key);
|
|
|
|
if (parentGranuleID != currentParentID) {
|
|
|
|
// nextParentID should have already been set by split reader
|
2022-01-25 04:12:36 +08:00
|
|
|
nextParentID = parentGranuleID;
|
2022-01-19 08:47:42 +08:00
|
|
|
foundNext = true;
|
2022-01-19 04:22:34 +08:00
|
|
|
break;
|
|
|
|
}
|
2022-01-19 08:47:42 +08:00
|
|
|
|
2022-01-25 04:12:36 +08:00
|
|
|
if (splitBoundarySpecialKey == boundaryKey) {
|
|
|
|
ASSERT(splitEpochSeqno.first == 0 && splitEpochSeqno.second == 0);
|
|
|
|
ASSERT(boundaryResult[boundaryResultIdx].value.size() > 0);
|
|
|
|
splitEpochSeqno = decodeBlobGranuleSplitBoundaryValue(boundaryResult[boundaryResultIdx].value);
|
|
|
|
ASSERT(splitEpochSeqno.first != 0 && splitEpochSeqno.second != 0);
|
|
|
|
} else {
|
|
|
|
ASSERT(boundaryResult[boundaryResultIdx].value.size() == 0);
|
|
|
|
splitBoundaries.push_back(boundaryKey);
|
|
|
|
}
|
|
|
|
|
2022-01-19 04:22:34 +08:00
|
|
|
boundaryResultIdx++;
|
|
|
|
}
|
2022-01-19 08:47:42 +08:00
|
|
|
if (foundNext) {
|
|
|
|
break;
|
|
|
|
}
|
2022-01-19 04:22:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// process this split
|
|
|
|
if (currentParentID != UID()) {
|
2022-01-19 08:47:42 +08:00
|
|
|
std::sort(splitBoundaries.begin(), splitBoundaries.end());
|
|
|
|
|
2022-01-19 04:22:34 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-25 04:12:36 +08:00
|
|
|
fmt::print(" [{0} - {1}) {2} @ ({3}, {4}):\n",
|
2022-01-19 04:22:34 +08:00
|
|
|
splitBoundaries.front().printable(),
|
|
|
|
splitBoundaries.back().printable(),
|
2022-01-25 04:12:36 +08:00
|
|
|
currentParentID.toString().substr(0, 6),
|
|
|
|
splitEpochSeqno.first,
|
|
|
|
splitEpochSeqno.second);
|
2022-01-19 04:22:34 +08:00
|
|
|
}
|
2022-01-25 04:12:36 +08:00
|
|
|
for (int i = 0; i < splitBoundaries.size() - 1; i++) {
|
|
|
|
// if this split boundary has not been opened by a blob worker yet, or was not in the assignment list
|
|
|
|
// when we previously read it, we must ensure it gets assigned to one
|
2022-01-19 04:22:34 +08:00
|
|
|
KeyRange range = KeyRange(KeyRangeRef(splitBoundaries[i], splitBoundaries[i + 1]));
|
2022-01-26 22:38:24 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print(" [{0} - {1})\n", range.begin.printable(), range.end.printable());
|
|
|
|
}
|
2022-01-15 23:33:47 +08:00
|
|
|
|
2022-01-25 04:12:36 +08:00
|
|
|
// same algorithm as worker map. If we read boundary changes from the log out of order, save the newer
|
|
|
|
// ones, apply this one, and re-apply the other ones over this one don't concurrently modify with
|
|
|
|
// iterator
|
|
|
|
std::vector<std::pair<KeyRange, std::pair<int64_t, int64_t>>> newer;
|
|
|
|
newer.reserve(splitBoundaries.size() - 1);
|
|
|
|
auto intersecting = inProgressSplits.intersectingRanges(range);
|
|
|
|
for (auto& it : intersecting) {
|
|
|
|
if (splitEpochSeqno.first < it.value().first ||
|
2022-01-26 22:38:24 +08:00
|
|
|
(splitEpochSeqno.first == it.value().first && splitEpochSeqno.second < it.value().second)) {
|
|
|
|
// range currently there is newer than this range.
|
2022-01-25 04:12:36 +08:00
|
|
|
newer.push_back(std::pair(it.range(), it.value()));
|
2022-01-15 23:33:47 +08:00
|
|
|
}
|
2022-01-25 04:12:36 +08:00
|
|
|
}
|
|
|
|
inProgressSplits.insert(range, splitEpochSeqno);
|
|
|
|
|
|
|
|
for (auto& it : newer) {
|
|
|
|
inProgressSplits.insert(it.first, it.second);
|
2022-01-19 04:22:34 +08:00
|
|
|
}
|
2021-10-27 09:39:41 +08:00
|
|
|
}
|
2022-01-19 04:22:34 +08:00
|
|
|
}
|
|
|
|
splitBoundaries.clear();
|
2022-01-25 04:12:36 +08:00
|
|
|
splitEpochSeqno = std::pair(0, 0);
|
2021-10-23 10:41:19 +08:00
|
|
|
|
2022-01-19 04:22:34 +08:00
|
|
|
if (!nextParentID.present()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
currentParentID = nextParentID.get();
|
|
|
|
nextParentID.reset();
|
|
|
|
}
|
2021-10-27 09:39:41 +08:00
|
|
|
|
2022-01-25 04:12:36 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("BM {0} found old assignments:\n", bmData->epoch);
|
|
|
|
}
|
|
|
|
// TODO could populate most/all of this list by just asking existing blob workers for their range sets to reduce DB
|
2022-01-26 22:38:24 +08:00
|
|
|
// read load on BM restart
|
|
|
|
|
|
|
|
// Step 3. Get the latest known mapping of granules to blob workers (i.e. assignments)
|
2022-01-25 04:12:36 +08:00
|
|
|
// This must happen causally AFTER reading the split boundaries, since the blob workers can clear the split
|
|
|
|
// boundaries for a granule as part of persisting their assignment.
|
2022-01-28 00:04:06 +08:00
|
|
|
state KeyRef beginKey = blobGranuleMappingKeys.begin;
|
2022-01-25 04:12:36 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
|
|
|
// TODO: replace row limit with knob
|
2022-01-28 00:04:06 +08:00
|
|
|
KeyRange nextRange(KeyRangeRef(beginKey, blobGranuleMappingKeys.end));
|
|
|
|
// using the krm functions can produce incorrect behavior here as it does weird stuff with beginKey
|
|
|
|
state GetRangeLimits limits(rowLimit, GetRangeLimits::BYTE_LIMIT_UNLIMITED);
|
|
|
|
limits.minRows = 2;
|
|
|
|
RangeResult results = wait(tr->getRange(nextRange, limits));
|
2022-01-25 04:12:36 +08:00
|
|
|
|
|
|
|
// Add the mappings to our in memory key range map
|
|
|
|
for (int rangeIdx = 0; rangeIdx < results.size() - 1; rangeIdx++) {
|
2022-01-28 00:04:06 +08:00
|
|
|
Key granuleStartKey = results[rangeIdx].key.removePrefix(blobGranuleMappingKeys.begin);
|
|
|
|
Key granuleEndKey = results[rangeIdx + 1].key.removePrefix(blobGranuleMappingKeys.begin);
|
2022-01-25 04:12:36 +08:00
|
|
|
if (results[rangeIdx].value.size()) {
|
|
|
|
// note: if the old owner is dead, we handle this in rangeAssigner
|
|
|
|
UID existingOwner = decodeBlobGranuleMappingValue(results[rangeIdx].value);
|
|
|
|
workerAssignments.insert(KeyRangeRef(granuleStartKey, granuleEndKey), existingOwner);
|
|
|
|
bmData->knownBlobRanges.insert(KeyRangeRef(granuleStartKey, granuleEndKey), true);
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print(" [{0} - {1})={2}\n",
|
2022-01-28 00:26:21 +08:00
|
|
|
granuleStartKey.printable(),
|
|
|
|
granuleEndKey.printable(),
|
2022-01-25 04:12:36 +08:00
|
|
|
results[rangeIdx].value.printable());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-28 00:26:21 +08:00
|
|
|
fmt::print(" [{0} - {1})\n", granuleStartKey.printable(), granuleEndKey.printable());
|
2022-01-25 04:12:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-28 00:04:06 +08:00
|
|
|
if (!results.more || results.size() <= 1) {
|
2022-01-25 04:12:36 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2022-01-28 00:04:06 +08:00
|
|
|
// re-read last key to get range that starts there
|
|
|
|
beginKey = results.back().key;
|
2022-01-25 04:12:36 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("BM {0} got error reading granule mapping during recovery: {1}\n", bmData->epoch, e.name());
|
|
|
|
}
|
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Splits overriding the following ranges:\n");
|
|
|
|
}
|
|
|
|
// Apply current granule boundaries to the assignment map. If they don't exactly match what is currently in the map,
|
|
|
|
// override and assign it to a new worker
|
|
|
|
auto splits = inProgressSplits.intersectingRanges(normalKeys);
|
|
|
|
for (auto& it : splits) {
|
|
|
|
if (it.value().first == 0 || it.value().second == 0) {
|
|
|
|
// no in-progress splits for this range
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
auto r = workerAssignments.rangeContaining(it.begin());
|
|
|
|
|
|
|
|
// if this range is at all different from the worker mapping, the mapping is out of date
|
|
|
|
if (r.begin() != it.begin() || r.end() != it.end()) {
|
|
|
|
// the empty UID signifies that we need to find an owner (worker) for this range
|
|
|
|
workerAssignments.insert(it.range(), UID());
|
|
|
|
fmt::print(" [{0} - {1})\n", it.begin().printable().c_str(), it.end().printable().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-19 04:22:34 +08:00
|
|
|
// Step 4. Send assign requests for all the granules and transfer assignments
|
|
|
|
// from local workerAssignments to bmData
|
|
|
|
// before we take ownership of all of the ranges, check the manager lock again
|
|
|
|
tr->reset();
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
wait(checkManagerLock(tr, bmData));
|
|
|
|
break;
|
2021-10-20 23:54:19 +08:00
|
|
|
} catch (Error& e) {
|
2022-01-19 04:22:34 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("BM {0} got error checking lock after recovery: {1}\n", bmData->epoch, e.name());
|
|
|
|
}
|
2021-10-20 23:54:19 +08:00
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
2021-10-22 05:39:38 +08:00
|
|
|
|
2022-01-15 23:33:47 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("BM {0} final ranges:\n", bmData->epoch);
|
|
|
|
}
|
2022-01-19 04:22:34 +08:00
|
|
|
|
2021-11-16 07:19:14 +08:00
|
|
|
for (auto& range : workerAssignments.intersectingRanges(normalKeys)) {
|
|
|
|
if (!range.value().present()) {
|
2022-01-28 00:04:06 +08:00
|
|
|
/*if (BM_DEBUG) {
|
|
|
|
fmt::print(" [{0} - {1}) invalid\n", range.begin().printable(), range.end().printable());
|
|
|
|
}*/
|
2021-11-16 07:19:14 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-01-15 23:33:47 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print(" [{0} - {1})\n", range.begin().printable(), range.end().printable());
|
|
|
|
}
|
|
|
|
|
2021-11-16 07:19:14 +08:00
|
|
|
bmData->workerAssignments.insert(range.range(), range.value().get());
|
2021-11-16 05:44:59 +08:00
|
|
|
|
2021-10-22 05:39:38 +08:00
|
|
|
RangeAssignment raAssign;
|
|
|
|
raAssign.isAssign = true;
|
2021-11-16 07:19:14 +08:00
|
|
|
raAssign.worker = range.value().get();
|
2021-10-22 05:39:38 +08:00
|
|
|
raAssign.keyRange = range.range();
|
2021-10-23 10:41:19 +08:00
|
|
|
raAssign.assign = RangeAssignmentData(AssignRequestType::Reassign);
|
2021-10-22 05:39:38 +08:00
|
|
|
bmData->rangesToAssign.send(raAssign);
|
|
|
|
}
|
2021-10-20 23:54:19 +08:00
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> chaosRangeMover(Reference<BlobManagerData> bmData) {
|
2022-01-31 22:27:37 +08:00
|
|
|
// Only move each granule once during the test, otherwise it can cause availability issues
|
|
|
|
// KeyRange isn't hashable and this is only for simulation, so just use toString of range
|
|
|
|
state std::unordered_set<std::string> alreadyMoved;
|
2021-09-25 23:30:27 +08:00
|
|
|
ASSERT(g_network->isSimulated());
|
2021-08-31 02:59:53 +08:00
|
|
|
loop {
|
|
|
|
wait(delay(30.0));
|
|
|
|
|
2021-09-25 23:30:27 +08:00
|
|
|
if (g_simulator.speedUpSimulation) {
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Range mover stopping\n");
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-08-31 02:59:53 +08:00
|
|
|
if (bmData->workersById.size() > 1) {
|
|
|
|
int tries = 10;
|
|
|
|
while (tries > 0) {
|
|
|
|
tries--;
|
|
|
|
auto randomRange = bmData->workerAssignments.randomRange();
|
2022-01-31 22:27:37 +08:00
|
|
|
if (randomRange.value() != UID() && !alreadyMoved.count(randomRange.range().toString())) {
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Range mover moving range [{0} - {1}): {2}\n",
|
|
|
|
randomRange.begin().printable().c_str(),
|
|
|
|
randomRange.end().printable().c_str(),
|
|
|
|
randomRange.value().toString().c_str());
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2022-01-31 22:27:37 +08:00
|
|
|
alreadyMoved.insert(randomRange.range().toString());
|
2021-08-31 02:59:53 +08:00
|
|
|
|
2021-10-22 05:39:38 +08:00
|
|
|
// FIXME: with low probability, could immediately revoke it from the new assignment and move
|
|
|
|
// it back right after to test that race
|
2021-09-24 22:55:37 +08:00
|
|
|
|
2021-12-08 22:42:27 +08:00
|
|
|
state KeyRange range = randomRange.range();
|
2021-09-04 04:13:26 +08:00
|
|
|
RangeAssignment revokeOld;
|
|
|
|
revokeOld.isAssign = false;
|
2021-12-08 22:42:27 +08:00
|
|
|
revokeOld.keyRange = range;
|
2021-09-04 04:13:26 +08:00
|
|
|
revokeOld.revoke = RangeRevokeData(false);
|
|
|
|
bmData->rangesToAssign.send(revokeOld);
|
|
|
|
|
|
|
|
RangeAssignment assignNew;
|
|
|
|
assignNew.isAssign = true;
|
2021-12-08 22:42:27 +08:00
|
|
|
assignNew.keyRange = range;
|
2021-10-23 10:41:19 +08:00
|
|
|
assignNew.assign = RangeAssignmentData(); // not a continue
|
2021-09-04 04:13:26 +08:00
|
|
|
bmData->rangesToAssign.send(assignNew);
|
2021-08-31 02:59:53 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2021-09-01 01:30:43 +08:00
|
|
|
if (tries == 0 && BM_DEBUG) {
|
2021-09-23 01:46:20 +08:00
|
|
|
printf("Range mover couldn't find random range to move, skipping\n");
|
2021-08-31 02:59:53 +08:00
|
|
|
}
|
2021-09-01 01:30:43 +08:00
|
|
|
} else if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Range mover found {0} workers, skipping\n", bmData->workerAssignments.size());
|
2021-08-31 02:59:53 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// Returns the number of blob workers on addr
|
2022-01-21 02:33:15 +08:00
|
|
|
int numExistingBWOnAddr(Reference<BlobManagerData> self, const AddressExclusion& addr) {
|
2021-09-21 03:42:20 +08:00
|
|
|
int numExistingBW = 0;
|
|
|
|
for (auto& server : self->workersById) {
|
|
|
|
const NetworkAddress& netAddr = server.second.stableAddress();
|
|
|
|
AddressExclusion usedAddr(netAddr.ip, netAddr.port);
|
|
|
|
if (usedAddr == addr) {
|
|
|
|
++numExistingBW;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return numExistingBW;
|
|
|
|
}
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// Tries to recruit a blob worker on the candidateWorker process
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> initializeBlobWorker(Reference<BlobManagerData> self, RecruitBlobWorkerReply candidateWorker) {
|
2021-09-21 03:42:20 +08:00
|
|
|
const NetworkAddress& netAddr = candidateWorker.worker.stableAddress();
|
|
|
|
AddressExclusion workerAddr(netAddr.ip, netAddr.port);
|
2021-10-22 05:39:38 +08:00
|
|
|
self->recruitingStream.set(self->recruitingStream.get() + 1);
|
|
|
|
|
2021-09-21 03:42:20 +08:00
|
|
|
// Ask the candidateWorker to initialize a BW only if the worker does not have a pending request
|
|
|
|
if (numExistingBWOnAddr(self, workerAddr) == 0 &&
|
|
|
|
self->recruitingLocalities.count(candidateWorker.worker.stableAddress()) == 0) {
|
|
|
|
state UID interfaceId = deterministicRandom()->randomUniqueID();
|
|
|
|
|
|
|
|
state InitializeBlobWorkerRequest initReq;
|
|
|
|
initReq.reqId = deterministicRandom()->randomUniqueID();
|
|
|
|
initReq.interfaceId = interfaceId;
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// acknowledge that this worker is currently being recruited on
|
2021-09-21 03:42:20 +08:00
|
|
|
self->recruitingLocalities.insert(candidateWorker.worker.stableAddress());
|
|
|
|
|
|
|
|
TraceEvent("BMRecruiting")
|
|
|
|
.detail("State", "Sending request to worker")
|
|
|
|
.detail("WorkerID", candidateWorker.worker.id())
|
|
|
|
.detail("WorkerLocality", candidateWorker.worker.locality.toString())
|
|
|
|
.detail("Interf", interfaceId)
|
|
|
|
.detail("Addr", candidateWorker.worker.address());
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// send initialization request to worker (i.e. worker.actor.cpp)
|
|
|
|
// here, the worker will construct the blob worker at which point the BW will start!
|
2021-09-21 03:42:20 +08:00
|
|
|
Future<ErrorOr<InitializeBlobWorkerReply>> fRecruit =
|
|
|
|
candidateWorker.worker.blobWorker.tryGetReply(initReq, TaskPriority::BlobManager);
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// wait on the reply to the request
|
2021-09-21 03:42:20 +08:00
|
|
|
state ErrorOr<InitializeBlobWorkerReply> newBlobWorker = wait(fRecruit);
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// if the initialization failed in an unexpected way, then kill the BM.
|
|
|
|
// if it failed in an expected way, add some delay before we try to recruit again
|
|
|
|
// on this worker
|
2021-09-21 03:42:20 +08:00
|
|
|
if (newBlobWorker.isError()) {
|
|
|
|
TraceEvent(SevWarn, "BMRecruitmentError").error(newBlobWorker.getError());
|
|
|
|
if (!newBlobWorker.isError(error_code_recruitment_failed) &&
|
|
|
|
!newBlobWorker.isError(error_code_request_maybe_delivered)) {
|
|
|
|
throw newBlobWorker.getError();
|
|
|
|
}
|
|
|
|
wait(delay(SERVER_KNOBS->STORAGE_RECRUITMENT_DELAY, TaskPriority::BlobManager));
|
|
|
|
}
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// if the initialization succeeded, add the blob worker's interface to
|
|
|
|
// the blob manager's data and start monitoring the blob worker
|
2021-09-21 03:42:20 +08:00
|
|
|
if (newBlobWorker.present()) {
|
|
|
|
BlobWorkerInterface bwi = newBlobWorker.get().interf;
|
|
|
|
|
2021-12-06 07:02:25 +08:00
|
|
|
if (!self->deadWorkers.count(bwi.id())) {
|
|
|
|
if (!self->workerAddresses.count(bwi.stableAddress()) && bwi.locality.dcId() == self->dcId) {
|
|
|
|
self->workerAddresses.insert(bwi.stableAddress());
|
|
|
|
self->workersById[bwi.id()] = bwi;
|
|
|
|
self->workerStats[bwi.id()] = BlobWorkerStats();
|
|
|
|
self->addActor.send(monitorBlobWorker(self, bwi));
|
|
|
|
} else if (!self->workersById.count(bwi.id())) {
|
|
|
|
self->addActor.send(killBlobWorker(self, bwi, false));
|
|
|
|
}
|
|
|
|
}
|
2021-09-21 03:42:20 +08:00
|
|
|
|
|
|
|
TraceEvent("BMRecruiting")
|
|
|
|
.detail("State", "Finished request")
|
|
|
|
.detail("WorkerID", candidateWorker.worker.id())
|
|
|
|
.detail("WorkerLocality", candidateWorker.worker.locality.toString())
|
|
|
|
.detail("Interf", interfaceId)
|
|
|
|
.detail("Addr", candidateWorker.worker.address());
|
|
|
|
}
|
2021-09-29 07:15:32 +08:00
|
|
|
|
|
|
|
// acknowledge that this worker is not actively being recruited on anymore.
|
|
|
|
// if the initialization did succeed, then this worker will still be excluded
|
|
|
|
// since it was added to workersById.
|
|
|
|
self->recruitingLocalities.erase(candidateWorker.worker.stableAddress());
|
2021-09-21 03:42:20 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// try to recruit more blob workers
|
2021-10-22 05:39:38 +08:00
|
|
|
self->recruitingStream.set(self->recruitingStream.get() - 1);
|
2021-09-21 03:42:20 +08:00
|
|
|
self->restartRecruiting.trigger();
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// Recruits blob workers in a loop
|
2021-09-21 03:42:20 +08:00
|
|
|
ACTOR Future<Void> blobWorkerRecruiter(
|
2022-01-21 02:33:15 +08:00
|
|
|
Reference<BlobManagerData> self,
|
2021-09-21 03:42:20 +08:00
|
|
|
Reference<IAsyncListener<RequestStream<RecruitBlobWorkerRequest>>> recruitBlobWorker) {
|
|
|
|
state Future<RecruitBlobWorkerReply> fCandidateWorker;
|
|
|
|
state RecruitBlobWorkerRequest lastRequest;
|
|
|
|
|
2021-10-23 10:41:19 +08:00
|
|
|
// wait until existing blob workers have been acknowledged so we don't break recruitment invariants
|
2021-10-22 05:39:38 +08:00
|
|
|
loop choose {
|
|
|
|
when(wait(self->startRecruiting.onTrigger())) { break; }
|
|
|
|
}
|
2021-10-23 10:41:19 +08:00
|
|
|
|
2021-09-21 03:42:20 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
state RecruitBlobWorkerRequest recruitReq;
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// workers that are used by existing blob workers should be excluded
|
2021-09-21 03:42:20 +08:00
|
|
|
for (auto const& [bwId, bwInterf] : self->workersById) {
|
|
|
|
auto addr = bwInterf.stableAddress();
|
|
|
|
AddressExclusion addrExcl(addr.ip, addr.port);
|
|
|
|
recruitReq.excludeAddresses.emplace_back(addrExcl);
|
|
|
|
}
|
|
|
|
|
2021-09-29 07:15:32 +08:00
|
|
|
// workers that are used by blob workers that are currently being recruited should be excluded
|
2021-09-21 03:42:20 +08:00
|
|
|
for (auto addr : self->recruitingLocalities) {
|
|
|
|
recruitReq.excludeAddresses.emplace_back(AddressExclusion(addr.ip, addr.port));
|
|
|
|
}
|
|
|
|
|
|
|
|
TraceEvent("BMRecruiting").detail("State", "Sending request to CC");
|
|
|
|
|
|
|
|
if (!fCandidateWorker.isValid() || fCandidateWorker.isReady() ||
|
|
|
|
recruitReq.excludeAddresses != lastRequest.excludeAddresses) {
|
|
|
|
lastRequest = recruitReq;
|
2021-09-29 07:15:32 +08:00
|
|
|
// send req to cluster controller to get back a candidate worker we can recruit on
|
2021-09-21 03:42:20 +08:00
|
|
|
fCandidateWorker =
|
|
|
|
brokenPromiseToNever(recruitBlobWorker->get().getReply(recruitReq, TaskPriority::BlobManager));
|
|
|
|
}
|
|
|
|
|
|
|
|
choose {
|
2021-10-22 05:39:38 +08:00
|
|
|
// when we get back a worker we can use, we will try to initialize a blob worker onto that
|
|
|
|
// process
|
2021-09-21 03:42:20 +08:00
|
|
|
when(RecruitBlobWorkerReply candidateWorker = wait(fCandidateWorker)) {
|
|
|
|
self->addActor.send(initializeBlobWorker(self, candidateWorker));
|
|
|
|
}
|
2021-09-29 07:15:32 +08:00
|
|
|
|
|
|
|
// when the CC changes, so does the request stream so we need to restart recruiting here
|
2021-09-21 03:42:20 +08:00
|
|
|
when(wait(recruitBlobWorker->onChange())) { fCandidateWorker = Future<RecruitBlobWorkerReply>(); }
|
2021-09-29 07:15:32 +08:00
|
|
|
|
|
|
|
// signal used to restart the loop and try to recruit the next blob worker
|
2021-10-13 04:36:05 +08:00
|
|
|
when(wait(self->restartRecruiting.onTrigger())) {}
|
2021-09-21 03:42:20 +08:00
|
|
|
}
|
|
|
|
wait(delay(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY, TaskPriority::BlobManager));
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_timed_out) {
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
TEST(true); // Blob worker recruitment timed out
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> haltBlobGranules(Reference<BlobManagerData> bmData) {
|
2021-12-08 13:43:58 +08:00
|
|
|
std::vector<BlobWorkerInterface> blobWorkers = wait(getBlobWorkers(bmData->db));
|
|
|
|
std::vector<Future<Void>> deregisterBlobWorkers;
|
|
|
|
for (auto& worker : blobWorkers) {
|
|
|
|
// TODO: send a special req to blob workers so they clean up granules/CFs
|
2022-01-21 09:20:11 +08:00
|
|
|
bmData->addActor.send(haltBlobWorker(bmData, worker));
|
2021-12-08 13:43:58 +08:00
|
|
|
deregisterBlobWorkers.emplace_back(deregisterBlobWorker(bmData, worker));
|
|
|
|
}
|
|
|
|
waitForAll(deregisterBlobWorkers);
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<GranuleFiles> loadHistoryFiles(Reference<BlobManagerData> bmData, UID granuleID) {
|
2021-11-23 22:32:12 +08:00
|
|
|
state Transaction tr(bmData->db);
|
|
|
|
state KeyRange range = blobGranuleFileKeyRangeFor(granuleID);
|
|
|
|
state Key startKey = range.begin;
|
|
|
|
state GranuleFiles files;
|
|
|
|
loop {
|
|
|
|
try {
|
2021-12-11 05:46:22 +08:00
|
|
|
wait(readGranuleFiles(&tr, &startKey, range.end, &files, granuleID, BM_DEBUG));
|
2021-11-23 22:32:12 +08:00
|
|
|
return files;
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Deletes all files pertaining to the granule with id granuleId and
|
|
|
|
* also removes the history entry for this granule from the system keyspace
|
2022-01-19 04:22:34 +08:00
|
|
|
* TODO ensure cannot fully delete granule that is still splitting!
|
2021-11-23 22:32:12 +08:00
|
|
|
*/
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> fullyDeleteGranule(Reference<BlobManagerData> self, UID granuleId, KeyRef historyKey) {
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Fully deleting granule {0}: init\n", granuleId.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
|
|
|
|
2021-11-23 22:32:12 +08:00
|
|
|
// get files
|
2022-01-11 03:33:18 +08:00
|
|
|
GranuleFiles files = wait(loadHistoryFiles(self->db, granuleId, BM_DEBUG));
|
2021-11-23 04:48:30 +08:00
|
|
|
|
|
|
|
std::vector<Future<Void>> deletions;
|
2021-12-04 03:46:48 +08:00
|
|
|
std::vector<std::string> filesToDelete; // TODO: remove, just for debugging
|
2021-11-23 04:48:30 +08:00
|
|
|
|
|
|
|
for (auto snapshotFile : files.snapshotFiles) {
|
|
|
|
std::string fname = snapshotFile.filename;
|
|
|
|
deletions.emplace_back(self->bstore->deleteFile(fname));
|
2021-12-04 03:46:48 +08:00
|
|
|
filesToDelete.emplace_back(fname);
|
2021-11-23 04:48:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
for (auto deltaFile : files.deltaFiles) {
|
|
|
|
std::string fname = deltaFile.filename;
|
|
|
|
deletions.emplace_back(self->bstore->deleteFile(fname));
|
2021-12-04 03:46:48 +08:00
|
|
|
filesToDelete.emplace_back(fname);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Fully deleting granule {0}: deleting {1} files\n", granuleId.toString(), deletions.size());
|
2021-12-04 03:46:48 +08:00
|
|
|
for (auto filename : filesToDelete) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print(" - {}\n", filename.c_str());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-23 04:48:30 +08:00
|
|
|
}
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
// delete the files before the corresponding metadata.
|
|
|
|
// this could lead to dangling pointers in fdb, but this granule should
|
|
|
|
// never be read again anyways, and we can clean up the keys the next time around.
|
|
|
|
// deleting files before corresponding metadata reduces the # of orphaned files.
|
2021-11-23 04:48:30 +08:00
|
|
|
wait(waitForAll(deletions));
|
|
|
|
|
|
|
|
// delete metadata in FDB (history entry and file keys)
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Fully deleting granule {0}: deleting history and file keys\n", granuleId.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2022-01-11 03:33:18 +08:00
|
|
|
|
|
|
|
state Transaction tr(self->db);
|
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
2021-11-23 04:48:30 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
KeyRange fileRangeKey = blobGranuleFileKeyRangeFor(granuleId);
|
|
|
|
tr.clear(historyKey);
|
|
|
|
tr.clear(fileRangeKey);
|
|
|
|
wait(tr.commit());
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Fully deleting granule {0}: success\n", granuleId.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
|
|
|
|
2021-11-23 04:48:30 +08:00
|
|
|
return Void();
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
|
2021-11-23 22:32:12 +08:00
|
|
|
/*
|
|
|
|
* For the granule with id granuleId, finds the first snapshot file at a
|
|
|
|
* version <= pruneVersion and deletes all files older than it.
|
2021-12-11 05:46:22 +08:00
|
|
|
*
|
|
|
|
* Assumption: this granule's startVersion might change because the first snapshot
|
|
|
|
* file might be deleted. We will need to ensure we don't rely on the granule's startVersion
|
|
|
|
* (that's persisted as part of the key), but rather use the granule's first snapshot's version when needed
|
2021-11-23 22:32:12 +08:00
|
|
|
*/
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> partiallyDeleteGranule(Reference<BlobManagerData> self, UID granuleId, Version pruneVersion) {
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Partially deleting granule {0}: init\n", granuleId.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// get files
|
2022-01-11 03:33:18 +08:00
|
|
|
GranuleFiles files = wait(loadHistoryFiles(self->db, granuleId, BM_DEBUG));
|
2021-11-23 04:48:30 +08:00
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
// represents the version of the latest snapshot file in this granule with G.version < pruneVersion
|
|
|
|
Version latestSnapshotVersion = invalidVersion;
|
2021-11-23 04:48:30 +08:00
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
state std::vector<Future<Void>> deletions; // deletion work per file
|
|
|
|
state std::vector<Key> deletedFileKeys; // keys for deleted files
|
|
|
|
state std::vector<std::string> filesToDelete; // TODO: remove evenutally, just for debugging
|
2021-11-23 04:48:30 +08:00
|
|
|
|
2021-11-24 23:12:54 +08:00
|
|
|
// TODO: binary search these snapshot files for latestSnapshotVersion
|
2021-11-23 04:48:30 +08:00
|
|
|
for (int idx = files.snapshotFiles.size() - 1; idx >= 0; --idx) {
|
|
|
|
// if we already found the latestSnapshotVersion, this snapshot can be deleted
|
2021-12-04 03:46:48 +08:00
|
|
|
if (latestSnapshotVersion != invalidVersion) {
|
2021-11-23 04:48:30 +08:00
|
|
|
std::string fname = files.snapshotFiles[idx].filename;
|
|
|
|
deletions.emplace_back(self->bstore->deleteFile(fname));
|
2021-11-24 23:12:54 +08:00
|
|
|
deletedFileKeys.emplace_back(blobGranuleFileKeyFor(granuleId, 'S', files.snapshotFiles[idx].version));
|
2021-12-04 03:46:48 +08:00
|
|
|
filesToDelete.emplace_back(fname);
|
2021-11-23 22:32:12 +08:00
|
|
|
} else if (files.snapshotFiles[idx].version <= pruneVersion) {
|
2021-11-23 04:48:30 +08:00
|
|
|
// otherwise if this is the FIRST snapshot file with version < pruneVersion,
|
|
|
|
// then we found our latestSnapshotVersion (FIRST since we are traversing in reverse)
|
2021-12-04 03:46:48 +08:00
|
|
|
latestSnapshotVersion = files.snapshotFiles[idx].version;
|
2021-11-23 04:48:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
// we would have only partially deleted the granule if such a snapshot existed
|
|
|
|
ASSERT(latestSnapshotVersion != invalidVersion);
|
2021-11-23 04:48:30 +08:00
|
|
|
|
|
|
|
// delete all delta files older than latestSnapshotVersion
|
|
|
|
for (auto deltaFile : files.deltaFiles) {
|
2021-12-04 03:46:48 +08:00
|
|
|
// traversing in fwd direction, so stop once we find the first delta file past the latestSnapshotVersion
|
|
|
|
if (deltaFile.version > latestSnapshotVersion) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// otherwise deltaFile.version <= latestSnapshotVersion so delete it
|
|
|
|
// == should also be deleted because the last delta file before a snapshot would have the same version
|
|
|
|
std::string fname = deltaFile.filename;
|
|
|
|
deletions.emplace_back(self->bstore->deleteFile(fname));
|
|
|
|
deletedFileKeys.emplace_back(blobGranuleFileKeyFor(granuleId, 'D', deltaFile.version));
|
|
|
|
filesToDelete.emplace_back(fname);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Partially deleting granule {0}: deleting {1} files\n", granuleId.toString(), deletions.size());
|
2021-12-04 03:46:48 +08:00
|
|
|
for (auto filename : filesToDelete) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print(" - {0}\n", filename);
|
2021-11-23 04:48:30 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
// TODO: the following comment relies on the assumption that BWs will not get requests to
|
|
|
|
// read data that was already pruned. confirm assumption is fine. otherwise, we'd need
|
|
|
|
// to communicate with BWs here and have them ack the pruneVersion
|
|
|
|
|
|
|
|
// delete the files before the corresponding metadata.
|
|
|
|
// this could lead to dangling pointers in fdb, but we should never read data older than
|
|
|
|
// pruneVersion anyways, and we can clean up the keys the next time around.
|
|
|
|
// deleting files before corresponding metadata reduces the # of orphaned files.
|
2021-11-23 04:48:30 +08:00
|
|
|
wait(waitForAll(deletions));
|
2021-11-24 23:12:54 +08:00
|
|
|
|
|
|
|
// delete metadata in FDB (deleted file keys)
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Partially deleting granule {0}: deleting file keys\n", granuleId.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
|
|
|
|
2022-01-11 03:33:18 +08:00
|
|
|
state Transaction tr(self->db);
|
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
2021-11-24 23:12:54 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2021-12-04 03:46:48 +08:00
|
|
|
for (auto& key : deletedFileKeys) {
|
2021-11-24 23:12:54 +08:00
|
|
|
tr.clear(key);
|
|
|
|
}
|
|
|
|
wait(tr.commit());
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
|
|
|
}
|
|
|
|
}
|
2021-12-04 03:46:48 +08:00
|
|
|
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Partially deleting granule {0}: success\n", granuleId.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-23 04:48:30 +08:00
|
|
|
return Void();
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
|
2021-11-23 22:32:12 +08:00
|
|
|
/*
|
|
|
|
* This method is used to prune the range [startKey, endKey) at (and including) pruneVersion.
|
|
|
|
* To do this, we do a BFS traversal starting at the active granules. Then we classify granules
|
|
|
|
* in the history as nodes that can be fully deleted (i.e. their files and history can be deleted)
|
|
|
|
* and nodes that can be partially deleted (i.e. some of their files can be deleted).
|
|
|
|
* Once all this is done, we finally clear the pruneIntent key, if possible, to indicate we are done
|
|
|
|
* processing this prune intent.
|
|
|
|
*/
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> pruneRange(Reference<BlobManagerData> self,
|
|
|
|
KeyRef startKey,
|
|
|
|
KeyRef endKey,
|
|
|
|
Version pruneVersion,
|
|
|
|
bool force) {
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("pruneRange starting for range [{0} - {1}) @ pruneVersion={2}, force={3}\n",
|
|
|
|
startKey.printable(),
|
|
|
|
endKey.printable(),
|
|
|
|
pruneVersion,
|
|
|
|
force);
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
|
|
|
|
2021-11-20 09:54:22 +08:00
|
|
|
// queue of <range, startVersion, endVersion> for BFS traversal of history
|
2021-11-23 22:32:12 +08:00
|
|
|
// TODO: consider using GranuleHistoryEntry, but that also makes it a little messy
|
2021-11-20 09:54:22 +08:00
|
|
|
state std::queue<std::tuple<KeyRange, Version, Version>> historyEntryQueue;
|
|
|
|
|
2021-11-23 22:32:12 +08:00
|
|
|
// stacks of <granuleId, historyKey> and <granuleId> to track which granules to delete
|
|
|
|
state std::vector<std::tuple<UID, KeyRef>> toFullyDelete;
|
|
|
|
state std::vector<UID> toPartiallyDelete;
|
2021-11-23 04:48:30 +08:00
|
|
|
|
2021-12-11 05:46:22 +08:00
|
|
|
// track which granules we have already added to traversal
|
|
|
|
// note: (startKey, startVersion) uniquely identifies a granule
|
|
|
|
state std::unordered_set<std::pair<const uint8_t*, Version>, boost::hash<std::pair<const uint8_t*, Version>>>
|
|
|
|
visited;
|
2021-11-23 04:48:30 +08:00
|
|
|
|
2021-11-24 23:12:54 +08:00
|
|
|
state KeyRange range(KeyRangeRef(startKey, endKey)); // range for [startKey, endKey)
|
2021-11-20 09:54:22 +08:00
|
|
|
|
|
|
|
// find all active granules (that comprise the range) and add to the queue
|
2021-11-24 23:12:54 +08:00
|
|
|
state KeyRangeMap<UID>::Ranges activeRanges = self->workerAssignments.intersectingRanges(range);
|
|
|
|
|
2021-11-20 09:54:22 +08:00
|
|
|
state Transaction tr(self->db);
|
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
2021-11-24 23:12:54 +08:00
|
|
|
state KeyRangeMap<UID>::iterator activeRange;
|
|
|
|
for (activeRange = activeRanges.begin(); activeRange != activeRanges.end(); ++activeRange) {
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Checking if active range [{0} - {1}), owned by BW {2}, should be pruned\n",
|
|
|
|
activeRange.begin().printable(),
|
|
|
|
activeRange.end().printable(),
|
|
|
|
activeRange.value().toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-30 00:53:40 +08:00
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
// assumption: prune boundaries must respect granule boundaries
|
2021-11-30 00:53:40 +08:00
|
|
|
if (activeRange.begin() < startKey || activeRange.end() > endKey) {
|
|
|
|
continue;
|
|
|
|
}
|
2021-11-24 23:12:54 +08:00
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
// TODO: if this is a force prune, then revoke the assignment from the corresponding BW first
|
|
|
|
// so that it doesn't try to interact with the granule (i.e. force it to give up gLock).
|
|
|
|
// we'll need some way to ack that the revoke was successful
|
|
|
|
|
2021-11-23 22:32:12 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Fetching latest history entry for range [{0} - {1})\n",
|
|
|
|
activeRange.begin().printable(),
|
|
|
|
activeRange.end().printable());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-23 22:32:12 +08:00
|
|
|
Optional<GranuleHistory> history = wait(getLatestGranuleHistory(&tr, activeRange.range()));
|
2022-01-19 04:22:34 +08:00
|
|
|
// TODO: can we tell from the krm that this range is not valid, so that we don't need to do a
|
|
|
|
// get
|
2021-11-30 00:53:40 +08:00
|
|
|
if (history.present()) {
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Adding range to history queue\n");
|
|
|
|
}
|
2021-12-11 05:46:22 +08:00
|
|
|
visited.insert({ activeRange.range().begin.begin(), history.get().version });
|
2021-11-30 00:53:40 +08:00
|
|
|
historyEntryQueue.push({ activeRange.range(), history.get().version, MAX_VERSION });
|
|
|
|
}
|
2021-11-23 22:32:12 +08:00
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
|
|
|
}
|
|
|
|
}
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Beginning BFS traversal of history\n");
|
|
|
|
}
|
2021-11-20 09:54:22 +08:00
|
|
|
while (!historyEntryQueue.empty()) {
|
2021-11-23 22:32:12 +08:00
|
|
|
// process the node at the front of the queue and remove it
|
2021-11-20 09:54:22 +08:00
|
|
|
KeyRange currRange;
|
2021-11-24 23:12:54 +08:00
|
|
|
state Version startVersion;
|
|
|
|
state Version endVersion;
|
2021-11-20 09:54:22 +08:00
|
|
|
std::tie(currRange, startVersion, endVersion) = historyEntryQueue.front();
|
|
|
|
historyEntryQueue.pop();
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Processing history node [{0} - {1}) with versions [{2}, {3})\n",
|
|
|
|
currRange.begin.printable(),
|
|
|
|
currRange.end.printable(),
|
|
|
|
startVersion,
|
|
|
|
endVersion);
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
|
|
|
|
2021-11-20 09:54:22 +08:00
|
|
|
// get the persisted history entry for this granule
|
2021-11-23 22:32:12 +08:00
|
|
|
state Standalone<BlobGranuleHistoryValue> currHistoryNode;
|
|
|
|
state KeyRef historyKey = blobGranuleHistoryKeyFor(currRange, startVersion);
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
Optional<Value> persistedHistory = wait(tr.get(historyKey));
|
|
|
|
ASSERT(persistedHistory.present());
|
|
|
|
currHistoryNode = decodeBlobGranuleHistoryValue(persistedHistory.get());
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr.onError(e));
|
|
|
|
}
|
|
|
|
}
|
2021-11-20 09:54:22 +08:00
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Found history entry for this node. It's granuleID is {0}\n",
|
|
|
|
currHistoryNode.granuleID.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
|
|
|
|
2021-11-20 09:54:22 +08:00
|
|
|
// There are three cases this granule can fall into:
|
2021-11-23 22:32:12 +08:00
|
|
|
// - if the granule's end version is at or before the prune version or this is a force delete,
|
2021-11-23 04:48:30 +08:00
|
|
|
// this granule should be completely deleted
|
2021-12-04 03:46:48 +08:00
|
|
|
// - else if the startVersion <= pruneVersion, then G.startVersion < pruneVersion < G.endVersion
|
2021-11-23 04:48:30 +08:00
|
|
|
// and so this granule should be partially deleted
|
|
|
|
// - otherwise, this granule is active, so don't schedule it for deletion
|
2021-11-23 22:32:12 +08:00
|
|
|
if (force || endVersion <= pruneVersion) {
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Granule {0} will be FULLY deleted\n", currHistoryNode.granuleID.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-23 22:32:12 +08:00
|
|
|
toFullyDelete.push_back({ currHistoryNode.granuleID, historyKey });
|
2021-12-04 03:46:48 +08:00
|
|
|
} else if (startVersion < pruneVersion) {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Granule {0} will be partially deleted\n", currHistoryNode.granuleID.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-12-11 06:13:33 +08:00
|
|
|
toPartiallyDelete.push_back({ currHistoryNode.granuleID });
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// add all of the node's parents to the queue
|
|
|
|
for (auto& parent : currHistoryNode.parentGranules) {
|
2021-12-11 05:46:22 +08:00
|
|
|
// if we already added this node to queue, skip it; otherwise, mark it as visited
|
|
|
|
if (visited.count({ parent.first.begin.begin(), parent.second })) {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Already added {0} to queue, so skipping it\n", currHistoryNode.granuleID.toString());
|
2021-12-11 05:46:22 +08:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
visited.insert({ parent.first.begin.begin(), parent.second });
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Adding parent [{0} - {1}) with versions [{2} - {3}) to queue\n",
|
|
|
|
parent.first.begin.printable(),
|
|
|
|
parent.first.end.printable(),
|
|
|
|
parent.second,
|
|
|
|
startVersion);
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-12-11 05:46:22 +08:00
|
|
|
|
|
|
|
// the parent's end version is this node's startVersion,
|
|
|
|
// since this node must have started where it's parent finished
|
2021-11-20 09:54:22 +08:00
|
|
|
historyEntryQueue.push({ parent.first, parent.second, startVersion });
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// The top of the stacks have the oldest ranges. This implies that for a granule located at
|
|
|
|
// index i, it's parent must be located at some index j, where j > i. For this reason,
|
|
|
|
// we delete granules in reverse order; this way, we will never end up with unreachable
|
|
|
|
// nodes in the persisted history. Moreover, for any node that must be fully deleted,
|
|
|
|
// any node that must be partially deleted must occur later on in the history. Thus,
|
|
|
|
// we delete the 'toFullyDelete' granules first.
|
2021-11-23 22:32:12 +08:00
|
|
|
//
|
2021-12-04 03:46:48 +08:00
|
|
|
// Unfortunately we can't do parallelize _full_ deletions because they might
|
|
|
|
// race and we'll end up with unreachable nodes in the case of a crash.
|
|
|
|
// Since partial deletions only occur for "leafs", they can be done in parallel
|
2021-12-07 01:09:38 +08:00
|
|
|
//
|
|
|
|
// Note about file deletions: although we might be retrying a deletion of a granule,
|
|
|
|
// we won't run into any issues with trying to "re-delete" a blob file since deleting
|
|
|
|
// a file that doesn't exist is considered successful
|
2021-11-20 09:54:22 +08:00
|
|
|
|
2021-11-24 23:12:54 +08:00
|
|
|
state int i;
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("{0} granules to fully delete\n", toFullyDelete.size());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-24 23:12:54 +08:00
|
|
|
for (i = toFullyDelete.size() - 1; i >= 0; --i) {
|
2021-11-23 04:48:30 +08:00
|
|
|
UID granuleId;
|
2021-11-23 22:32:12 +08:00
|
|
|
KeyRef historyKey;
|
|
|
|
std::tie(granuleId, historyKey) = toFullyDelete[i];
|
2021-11-30 00:53:40 +08:00
|
|
|
// FIXME: consider batching into a single txn (need to take care of txn size limit)
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("About to fully delete granule {0}\n", granuleId.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-23 22:32:12 +08:00
|
|
|
wait(fullyDeleteGranule(self, granuleId, historyKey));
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("{0} granules to partially delete\n", toPartiallyDelete.size());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-30 00:53:40 +08:00
|
|
|
std::vector<Future<Void>> partialDeletions;
|
2021-11-24 23:12:54 +08:00
|
|
|
for (i = toPartiallyDelete.size() - 1; i >= 0; --i) {
|
2021-11-23 22:32:12 +08:00
|
|
|
UID granuleId = toPartiallyDelete[i];
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("About to partially delete granule {0}\n", granuleId.toString());
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-30 00:53:40 +08:00
|
|
|
partialDeletions.emplace_back(partiallyDeleteGranule(self, granuleId, pruneVersion));
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
|
2021-11-30 00:53:40 +08:00
|
|
|
wait(waitForAll(partialDeletions));
|
|
|
|
|
|
|
|
// Now that all the necessary granules and their files have been deleted, we can
|
|
|
|
// clear the pruneIntent key to signify that the work is done. However, there could have been
|
|
|
|
// another pruneIntent that got written for this table while we were processing this one.
|
|
|
|
// If that is the case, we should not clear the key. Otherwise, we can just clear the key.
|
2021-11-24 23:12:54 +08:00
|
|
|
|
2021-11-30 00:53:40 +08:00
|
|
|
tr.reset();
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("About to clear prune intent\n");
|
|
|
|
}
|
2021-11-20 09:54:22 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2021-11-30 00:53:40 +08:00
|
|
|
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
2021-11-20 09:54:22 +08:00
|
|
|
|
2021-11-30 00:53:40 +08:00
|
|
|
state Key pruneIntentKey = blobGranulePruneKeys.begin.withSuffix(startKey);
|
|
|
|
state Optional<Value> pruneIntentValue = wait(tr.get(pruneIntentKey));
|
|
|
|
ASSERT(pruneIntentValue.present());
|
|
|
|
|
|
|
|
Version currPruneVersion;
|
|
|
|
bool currForce;
|
|
|
|
std::tie(currPruneVersion, currForce) = decodeBlobGranulePruneValue(pruneIntentValue.get());
|
|
|
|
|
|
|
|
if (currPruneVersion == pruneVersion && currForce == force) {
|
|
|
|
tr.clear(pruneIntentKey.withPrefix(blobGranulePruneKeys.begin));
|
|
|
|
wait(tr.commit());
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Attempt to clear prune intent got error {}\n", e.name());
|
2021-11-30 00:53:40 +08:00
|
|
|
wait(tr.onError(e));
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
}
|
2021-11-24 23:12:54 +08:00
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("Successfully pruned range [{0} - {1}) at pruneVersion={2}\n",
|
|
|
|
startKey.printable(),
|
|
|
|
endKey.printable(),
|
|
|
|
pruneVersion);
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-11-24 23:12:54 +08:00
|
|
|
return Void();
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2021-11-23 22:32:12 +08:00
|
|
|
* This monitor watches for changes to a key K that gets updated whenever there is a new prune intent.
|
|
|
|
* On this change, we scan through all blobGranulePruneKeys (which look like <startKey, endKey>=<prune_version,
|
|
|
|
* force>) and prune any intents.
|
2021-11-20 09:54:22 +08:00
|
|
|
*
|
2021-11-23 22:32:12 +08:00
|
|
|
* Once the prune has succeeded, we clear the key IF the version is still the same one that was pruned.
|
|
|
|
* That way, if another prune intent arrived for the same range while we were working on an older one,
|
|
|
|
* we wouldn't end up clearing the intent.
|
2021-11-20 09:54:22 +08:00
|
|
|
*
|
2021-11-23 22:32:12 +08:00
|
|
|
* When watching for changes, we might end up in scenarios where we failed to do the work
|
|
|
|
* for a prune intent even though the watch was triggered (maybe the BM had a blip). This is problematic
|
|
|
|
* if the intent is a force and there isn't another prune intent for quite some time. To remedy this,
|
|
|
|
* if we don't see a watch change in X (configurable) seconds, we will just sweep through the prune intents,
|
|
|
|
* consolidating any work we might have missed before.
|
2021-11-20 09:54:22 +08:00
|
|
|
*
|
2021-11-23 22:32:12 +08:00
|
|
|
* Note: we could potentially use a changefeed here to get the exact pruneIntent that was added
|
|
|
|
* rather than iterating through all of them, but this might have too much overhead for latency
|
|
|
|
* improvements we don't really need here (also we need to go over all prune intents anyways in the
|
|
|
|
* case that the timer is up before any new prune intents arrive).
|
2021-11-20 09:54:22 +08:00
|
|
|
*/
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> monitorPruneKeys(Reference<BlobManagerData> self) {
|
2021-12-11 05:46:22 +08:00
|
|
|
// setup bstore
|
|
|
|
try {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BM constructing backup container from {}\n", SERVER_KNOBS->BG_URL.c_str());
|
2021-12-11 05:46:22 +08:00
|
|
|
}
|
|
|
|
self->bstore = BackupContainerFileSystem::openContainerFS(SERVER_KNOBS->BG_URL);
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("BM constructed backup container\n");
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("BM got backup container init error {0}\n", e.name());
|
2021-12-11 05:46:22 +08:00
|
|
|
}
|
|
|
|
throw e;
|
|
|
|
}
|
|
|
|
|
2021-11-23 22:32:12 +08:00
|
|
|
try {
|
2021-11-30 00:53:40 +08:00
|
|
|
state Value oldPruneWatchVal;
|
2021-11-23 04:48:30 +08:00
|
|
|
loop {
|
2021-11-23 22:32:12 +08:00
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(self->db);
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
|
|
|
// Wait for the watch to change, or some time to expire (whichever comes first)
|
2021-11-30 00:53:40 +08:00
|
|
|
// before checking through the prune intents. We write a UID into the change key value
|
|
|
|
// so that we can still recognize when the watch key has been changed while we weren't
|
|
|
|
// monitoring it
|
2021-11-23 22:32:12 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2021-11-30 00:53:40 +08:00
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
state Optional<Value> newPruneWatchVal = wait(tr->get(blobGranulePruneChangeKey));
|
2021-11-30 00:53:40 +08:00
|
|
|
|
|
|
|
// if the value at the change key has changed, that means there is new work to do
|
|
|
|
if (newPruneWatchVal.present() && oldPruneWatchVal != newPruneWatchVal.get()) {
|
|
|
|
oldPruneWatchVal = newPruneWatchVal.get();
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("the blobGranulePruneChangeKey changed\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: debugging code, remove it
|
2021-12-11 06:01:42 +08:00
|
|
|
/*
|
2021-12-11 05:46:22 +08:00
|
|
|
if (newPruneWatchVal.get().toString().substr(0, 6) == "prune=") {
|
2021-12-11 06:01:42 +08:00
|
|
|
state Reference<ReadYourWritesTransaction> dummy =
|
|
|
|
makeReference<ReadYourWritesTransaction>(self->db);
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
dummy->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
dummy->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
std::istringstream iss(newPruneWatchVal.get().toString().substr(6));
|
|
|
|
Version version;
|
|
|
|
iss >> version;
|
|
|
|
dummy->set(blobGranulePruneKeys.begin.withSuffix(normalKeys.begin),
|
|
|
|
blobGranulePruneValueFor(version, false));
|
|
|
|
wait(dummy->commit());
|
|
|
|
break;
|
|
|
|
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(dummy->onError(e));
|
|
|
|
}
|
|
|
|
}
|
2021-12-04 03:46:48 +08:00
|
|
|
}
|
2021-12-11 06:01:42 +08:00
|
|
|
*/
|
2021-11-30 00:53:40 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// otherwise, there are no changes and we should wait until the next change (or timeout)
|
2021-11-23 22:32:12 +08:00
|
|
|
state Future<Void> watchPruneIntentsChange = tr->watch(blobGranulePruneChangeKey);
|
|
|
|
wait(tr->commit());
|
2021-12-04 03:46:48 +08:00
|
|
|
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("monitorPruneKeys waiting for change or timeout\n");
|
|
|
|
}
|
|
|
|
|
2021-11-30 00:53:40 +08:00
|
|
|
choose {
|
2021-12-04 03:46:48 +08:00
|
|
|
when(wait(watchPruneIntentsChange)) {
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("monitorPruneKeys saw a change\n");
|
|
|
|
}
|
|
|
|
tr->reset();
|
|
|
|
}
|
2021-11-30 00:53:40 +08:00
|
|
|
when(wait(delay(SERVER_KNOBS->BG_PRUNE_TIMEOUT))) {
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("monitorPruneKeys got a timeout\n");
|
|
|
|
}
|
2021-11-30 00:53:40 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tr->reset();
|
|
|
|
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Looping over prune intents\n");
|
2021-11-23 04:48:30 +08:00
|
|
|
}
|
|
|
|
|
2021-11-23 22:32:12 +08:00
|
|
|
// loop through all prune intentions and do prune work accordingly
|
2021-12-07 01:09:38 +08:00
|
|
|
try {
|
|
|
|
state KeyRef beginKey = normalKeys.begin;
|
|
|
|
loop {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
2021-11-30 00:53:40 +08:00
|
|
|
|
2021-12-07 01:09:38 +08:00
|
|
|
state std::vector<Future<Void>> prunes;
|
|
|
|
try {
|
|
|
|
// TODO: replace 10000 with a knob
|
|
|
|
KeyRange nextRange(KeyRangeRef(beginKey, normalKeys.end));
|
|
|
|
state RangeResult pruneIntents = wait(krmGetRanges(
|
|
|
|
tr, blobGranulePruneKeys.begin, nextRange, 10000, GetRangeLimits::BYTE_LIMIT_UNLIMITED));
|
|
|
|
state Key lastEndKey;
|
|
|
|
|
|
|
|
for (int rangeIdx = 0; rangeIdx < pruneIntents.size() - 1; ++rangeIdx) {
|
|
|
|
KeyRef rangeStartKey = pruneIntents[rangeIdx].key;
|
|
|
|
KeyRef rangeEndKey = pruneIntents[rangeIdx + 1].key;
|
|
|
|
lastEndKey = rangeEndKey;
|
|
|
|
if (pruneIntents[rangeIdx].value.size() == 0) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
KeyRange range(KeyRangeRef(rangeStartKey, rangeEndKey));
|
|
|
|
Version pruneVersion;
|
|
|
|
bool force;
|
|
|
|
std::tie(pruneVersion, force) = decodeBlobGranulePruneValue(pruneIntents[rangeIdx].value);
|
|
|
|
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("about to prune range [{0} - {1}) @ {2}, force={3}\n",
|
|
|
|
rangeStartKey.printable(),
|
|
|
|
rangeEndKey.printable(),
|
|
|
|
pruneVersion,
|
|
|
|
force ? "T" : "F");
|
2021-12-07 01:09:38 +08:00
|
|
|
prunes.emplace_back(pruneRange(self, rangeStartKey, rangeEndKey, pruneVersion, force));
|
2021-11-23 22:32:12 +08:00
|
|
|
}
|
2021-11-20 09:54:22 +08:00
|
|
|
|
2022-01-19 04:22:34 +08:00
|
|
|
// wait for this set of prunes to complete before starting the next ones since if we
|
|
|
|
// prune a range R at version V and while we are doing that, the time expires, we will
|
|
|
|
// end up trying to prune the same range again since the work isn't finished and the
|
|
|
|
// prunes will race
|
2021-12-07 01:09:38 +08:00
|
|
|
//
|
2022-01-15 23:33:47 +08:00
|
|
|
// TODO: this isn't that efficient though. Instead we could keep metadata as part of the
|
|
|
|
// BM's memory that tracks which prunes are active. Once done, we can mark that work as
|
2022-01-19 04:22:34 +08:00
|
|
|
// done. If the BM fails then all prunes will fail and so the next BM will have a clear
|
|
|
|
// set of metadata (i.e. no work in progress) so we will end up doing the work in the
|
|
|
|
// new BM
|
2021-12-07 01:09:38 +08:00
|
|
|
wait(waitForAll(prunes));
|
|
|
|
|
|
|
|
if (!pruneIntents.more) {
|
|
|
|
break;
|
|
|
|
}
|
2021-11-20 09:54:22 +08:00
|
|
|
|
2021-12-07 01:09:38 +08:00
|
|
|
beginKey = lastEndKey;
|
|
|
|
} catch (Error& e) {
|
|
|
|
wait(tr->onError(e));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_actor_cancelled) {
|
|
|
|
throw e;
|
|
|
|
}
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("monitorPruneKeys for BM {0} saw error {1}\n", self->id.toString(), e.name());
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
2021-12-07 01:09:38 +08:00
|
|
|
// don't want to kill the blob manager for errors around pruning
|
|
|
|
TraceEvent("MonitorPruneKeysError", self->id).detail("Error", e.name());
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
2021-12-04 03:46:48 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Done pruning current set of prune intents.\n");
|
|
|
|
}
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
2021-11-23 22:32:12 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (BM_DEBUG) {
|
2022-01-15 07:10:53 +08:00
|
|
|
fmt::print("monitorPruneKeys got error {}\n", e.name());
|
2021-11-23 22:32:12 +08:00
|
|
|
}
|
|
|
|
throw e;
|
2021-11-20 09:54:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
ACTOR Future<Void> doLockChecks(Reference<BlobManagerData> bmData) {
|
2022-01-16 00:05:43 +08:00
|
|
|
loop {
|
|
|
|
Promise<Void> check = bmData->doLockCheck;
|
|
|
|
wait(check.getFuture());
|
|
|
|
wait(delay(0.5)); // don't do this too often if a lot of conflict
|
|
|
|
|
|
|
|
state Reference<ReadYourWritesTransaction> tr = makeReference<ReadYourWritesTransaction>(bmData->db);
|
|
|
|
|
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
|
|
|
tr->setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
wait(checkManagerLock(tr, bmData));
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_granule_assignment_conflict) {
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("BM {0} got lock out of date in lock check on conflict! Dying\n", bmData->epoch);
|
|
|
|
}
|
|
|
|
if (bmData->iAmReplaced.canBeSet()) {
|
|
|
|
bmData->iAmReplaced.send(Void());
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
wait(tr->onError(e));
|
|
|
|
if (BM_DEBUG) {
|
|
|
|
fmt::print("BM {0} still ok after checking lock on conflict\n", bmData->epoch);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bmData->doLockCheck = Promise<Void>();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-15 23:35:58 +08:00
|
|
|
ACTOR Future<Void> blobManager(BlobManagerInterface bmInterf,
|
|
|
|
Reference<AsyncVar<ServerDBInfo> const> dbInfo,
|
|
|
|
int64_t epoch) {
|
2022-01-21 02:33:15 +08:00
|
|
|
state Reference<BlobManagerData> self =
|
|
|
|
makeReference<BlobManagerData>(deterministicRandom()->randomUniqueID(),
|
|
|
|
openDBOnServer(dbInfo, TaskPriority::DefaultEndpoint, LockAware::True),
|
|
|
|
bmInterf.locality.dcId());
|
2021-08-31 02:07:25 +08:00
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
state Future<Void> collection = actorCollection(self->addActor.getFuture());
|
2021-08-31 02:07:25 +08:00
|
|
|
|
2021-09-01 01:30:43 +08:00
|
|
|
if (BM_DEBUG) {
|
2022-01-16 00:05:43 +08:00
|
|
|
fmt::print("Blob manager {0} starting...\n", epoch);
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2022-01-24 23:46:14 +08:00
|
|
|
TraceEvent("BlobManagerInit", bmInterf.id()).detail("Epoch", epoch).log();
|
2021-09-01 01:30:43 +08:00
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
self->epoch = epoch;
|
2021-09-15 23:35:58 +08:00
|
|
|
|
2021-10-23 10:41:19 +08:00
|
|
|
// although we start the recruiter, we wait until existing workers are ack'd
|
2021-09-21 03:42:20 +08:00
|
|
|
auto recruitBlobWorker = IAsyncListener<RequestStream<RecruitBlobWorkerRequest>>::create(
|
|
|
|
dbInfo, [](auto const& info) { return info.clusterInterface.recruitBlobWorker; });
|
2022-01-21 02:33:15 +08:00
|
|
|
self->addActor.send(blobWorkerRecruiter(self, recruitBlobWorker));
|
2021-10-22 05:39:38 +08:00
|
|
|
|
2021-10-27 09:39:41 +08:00
|
|
|
// we need to recover the old blob manager's state (e.g. granule assignments) before
|
|
|
|
// before the new blob manager does anything
|
2022-01-21 02:33:15 +08:00
|
|
|
wait(recoverBlobManager(self));
|
2021-10-20 23:54:19 +08:00
|
|
|
|
2022-01-21 02:33:15 +08:00
|
|
|
self->addActor.send(doLockChecks(self));
|
|
|
|
self->addActor.send(monitorClientRanges(self));
|
|
|
|
self->addActor.send(rangeAssigner(self));
|
|
|
|
self->addActor.send(monitorPruneKeys(self));
|
2021-09-01 01:30:43 +08:00
|
|
|
|
2021-09-23 01:46:20 +08:00
|
|
|
if (BUGGIFY) {
|
2022-01-21 02:33:15 +08:00
|
|
|
self->addActor.send(chaosRangeMover(self));
|
2021-09-23 01:46:20 +08:00
|
|
|
}
|
2021-08-31 02:07:25 +08:00
|
|
|
|
|
|
|
// TODO probably other things here eventually
|
2021-09-23 12:12:14 +08:00
|
|
|
try {
|
|
|
|
loop choose {
|
2022-01-21 02:33:15 +08:00
|
|
|
when(wait(self->iAmReplaced.getFuture())) {
|
2021-09-23 12:12:14 +08:00
|
|
|
if (BM_DEBUG) {
|
|
|
|
printf("Blob Manager exiting because it is replaced\n");
|
|
|
|
}
|
2021-09-24 02:07:01 +08:00
|
|
|
break;
|
2021-09-23 12:12:14 +08:00
|
|
|
}
|
|
|
|
when(HaltBlobManagerRequest req = waitNext(bmInterf.haltBlobManager.getFuture())) {
|
|
|
|
req.reply.send(Void());
|
|
|
|
TraceEvent("BlobManagerHalted", bmInterf.id()).detail("ReqID", req.requesterID);
|
2021-12-08 13:43:58 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
when(state HaltBlobGranulesRequest req = waitNext(bmInterf.haltBlobGranules.getFuture())) {
|
2022-01-21 02:33:15 +08:00
|
|
|
wait(haltBlobGranules(self));
|
2021-12-08 13:43:58 +08:00
|
|
|
req.reply.send(Void());
|
|
|
|
TraceEvent("BlobGranulesHalted", bmInterf.id()).detail("ReqID", req.requesterID);
|
2021-09-23 12:12:14 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
when(wait(collection)) {
|
|
|
|
TraceEvent("BlobManagerActorCollectionError");
|
|
|
|
ASSERT(false);
|
|
|
|
throw internal_error();
|
2021-09-01 01:30:43 +08:00
|
|
|
}
|
2021-08-31 02:07:25 +08:00
|
|
|
}
|
2021-09-23 12:12:14 +08:00
|
|
|
} catch (Error& err) {
|
|
|
|
TraceEvent("BlobManagerDied", bmInterf.id()).error(err, true);
|
2021-08-31 02:07:25 +08:00
|
|
|
}
|
2021-09-23 12:12:14 +08:00
|
|
|
return Void();
|
2021-08-31 02:07:25 +08:00
|
|
|
}
|
|
|
|
|
2021-08-05 05:47:18 +08:00
|
|
|
// Test:
|
|
|
|
// start empty
|
|
|
|
// DB has [A - B). That should show up in knownBlobRanges and should be in added
|
|
|
|
// DB has nothing. knownBlobRanges should be empty and [A - B) should be in removed
|
|
|
|
// DB has [A - B) and [C - D). They should both show up in knownBlobRanges and added.
|
|
|
|
// DB has [A - D). It should show up coalesced in knownBlobRanges, and [B - C) should be in added.
|
|
|
|
// DB has [A - C). It should show up coalesced in knownBlobRanges, and [C - D) should be in removed.
|
|
|
|
// DB has [B - C). It should show up coalesced in knownBlobRanges, and [A - B) should be removed.
|
|
|
|
// DB has [B - D). It should show up coalesced in knownBlobRanges, and [C - D) should be removed.
|
|
|
|
// DB has [A - D). It should show up coalesced in knownBlobRanges, and [A - B) should be removed.
|
|
|
|
// DB has [A - B) and [C - D). They should show up in knownBlobRanges, and [B - C) should be in removed.
|
2022-01-19 04:22:34 +08:00
|
|
|
// DB has [B - C). It should show up in knownBlobRanges, [B - C) should be in added, and [A - B) and [C - D)
|
|
|
|
// should be in removed.
|
2021-11-09 05:04:53 +08:00
|
|
|
TEST_CASE(":/blobmanager/updateranges") {
|
2021-08-05 05:47:18 +08:00
|
|
|
KeyRangeMap<bool> knownBlobRanges(false, normalKeys.end);
|
|
|
|
Arena ar;
|
|
|
|
|
|
|
|
VectorRef<KeyRangeRef> added;
|
|
|
|
VectorRef<KeyRangeRef> removed;
|
|
|
|
|
|
|
|
StringRef active = LiteralStringRef("1");
|
|
|
|
StringRef inactive = StringRef();
|
|
|
|
|
|
|
|
RangeResult dbDataEmpty;
|
2021-10-14 05:26:44 +08:00
|
|
|
std::vector<std::pair<KeyRangeRef, bool>> kbrRanges;
|
2021-08-05 05:47:18 +08:00
|
|
|
|
|
|
|
StringRef keyA = StringRef(ar, LiteralStringRef("A"));
|
|
|
|
StringRef keyB = StringRef(ar, LiteralStringRef("B"));
|
|
|
|
StringRef keyC = StringRef(ar, LiteralStringRef("C"));
|
|
|
|
StringRef keyD = StringRef(ar, LiteralStringRef("D"));
|
|
|
|
|
|
|
|
// db data setup
|
|
|
|
RangeResult dbDataAB;
|
|
|
|
dbDataAB.emplace_back(ar, keyA, active);
|
|
|
|
dbDataAB.emplace_back(ar, keyB, inactive);
|
|
|
|
|
|
|
|
RangeResult dbDataAC;
|
|
|
|
dbDataAC.emplace_back(ar, keyA, active);
|
|
|
|
dbDataAC.emplace_back(ar, keyC, inactive);
|
|
|
|
|
|
|
|
RangeResult dbDataAD;
|
|
|
|
dbDataAD.emplace_back(ar, keyA, active);
|
|
|
|
dbDataAD.emplace_back(ar, keyD, inactive);
|
|
|
|
|
|
|
|
RangeResult dbDataBC;
|
|
|
|
dbDataBC.emplace_back(ar, keyB, active);
|
|
|
|
dbDataBC.emplace_back(ar, keyC, inactive);
|
|
|
|
|
|
|
|
RangeResult dbDataBD;
|
|
|
|
dbDataBD.emplace_back(ar, keyB, active);
|
|
|
|
dbDataBD.emplace_back(ar, keyD, inactive);
|
|
|
|
|
|
|
|
RangeResult dbDataCD;
|
|
|
|
dbDataCD.emplace_back(ar, keyC, active);
|
|
|
|
dbDataCD.emplace_back(ar, keyD, inactive);
|
|
|
|
|
|
|
|
RangeResult dbDataAB_CD;
|
|
|
|
dbDataAB_CD.emplace_back(ar, keyA, active);
|
|
|
|
dbDataAB_CD.emplace_back(ar, keyB, inactive);
|
|
|
|
dbDataAB_CD.emplace_back(ar, keyC, active);
|
|
|
|
dbDataAB_CD.emplace_back(ar, keyD, inactive);
|
|
|
|
|
|
|
|
// key ranges setup
|
|
|
|
KeyRangeRef rangeAB = KeyRangeRef(keyA, keyB);
|
|
|
|
KeyRangeRef rangeAC = KeyRangeRef(keyA, keyC);
|
|
|
|
KeyRangeRef rangeAD = KeyRangeRef(keyA, keyD);
|
|
|
|
|
|
|
|
KeyRangeRef rangeBC = KeyRangeRef(keyB, keyC);
|
|
|
|
KeyRangeRef rangeBD = KeyRangeRef(keyB, keyD);
|
|
|
|
|
|
|
|
KeyRangeRef rangeCD = KeyRangeRef(keyC, keyD);
|
|
|
|
|
|
|
|
KeyRangeRef rangeStartToA = KeyRangeRef(normalKeys.begin, keyA);
|
|
|
|
KeyRangeRef rangeStartToB = KeyRangeRef(normalKeys.begin, keyB);
|
|
|
|
KeyRangeRef rangeStartToC = KeyRangeRef(normalKeys.begin, keyC);
|
|
|
|
KeyRangeRef rangeBToEnd = KeyRangeRef(keyB, normalKeys.end);
|
|
|
|
KeyRangeRef rangeCToEnd = KeyRangeRef(keyC, normalKeys.end);
|
|
|
|
KeyRangeRef rangeDToEnd = KeyRangeRef(keyD, normalKeys.end);
|
|
|
|
|
|
|
|
// actual test
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 1);
|
|
|
|
ASSERT(kbrRanges[0].first == normalKeys);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
|
|
|
|
// DB has [A - B)
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataAB, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 1);
|
|
|
|
ASSERT(added[0] == rangeAB);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 0);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 3);
|
|
|
|
ASSERT(kbrRanges[0].first == rangeStartToA);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
ASSERT(kbrRanges[1].first == rangeAB);
|
|
|
|
ASSERT(kbrRanges[1].second);
|
|
|
|
ASSERT(kbrRanges[2].first == rangeBToEnd);
|
|
|
|
ASSERT(!kbrRanges[2].second);
|
|
|
|
|
|
|
|
// DB has nothing
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataEmpty, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 0);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 1);
|
|
|
|
ASSERT(removed[0] == rangeAB);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges[0].first == normalKeys);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
|
|
|
|
// DB has [A - B) and [C - D)
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataAB_CD, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 2);
|
|
|
|
ASSERT(added[0] == rangeAB);
|
|
|
|
ASSERT(added[1] == rangeCD);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 0);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 5);
|
|
|
|
ASSERT(kbrRanges[0].first == rangeStartToA);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
ASSERT(kbrRanges[1].first == rangeAB);
|
|
|
|
ASSERT(kbrRanges[1].second);
|
|
|
|
ASSERT(kbrRanges[2].first == rangeBC);
|
|
|
|
ASSERT(!kbrRanges[2].second);
|
|
|
|
ASSERT(kbrRanges[3].first == rangeCD);
|
|
|
|
ASSERT(kbrRanges[3].second);
|
|
|
|
ASSERT(kbrRanges[4].first == rangeDToEnd);
|
|
|
|
ASSERT(!kbrRanges[4].second);
|
|
|
|
|
|
|
|
// DB has [A - D)
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataAD, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 1);
|
|
|
|
ASSERT(added[0] == rangeBC);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 0);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 3);
|
|
|
|
ASSERT(kbrRanges[0].first == rangeStartToA);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
ASSERT(kbrRanges[1].first == rangeAD);
|
|
|
|
ASSERT(kbrRanges[1].second);
|
|
|
|
ASSERT(kbrRanges[2].first == rangeDToEnd);
|
|
|
|
ASSERT(!kbrRanges[2].second);
|
|
|
|
|
|
|
|
// DB has [A - C)
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataAC, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 0);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 1);
|
|
|
|
ASSERT(removed[0] == rangeCD);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 3);
|
|
|
|
ASSERT(kbrRanges[0].first == rangeStartToA);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
ASSERT(kbrRanges[1].first == rangeAC);
|
|
|
|
ASSERT(kbrRanges[1].second);
|
|
|
|
ASSERT(kbrRanges[2].first == rangeCToEnd);
|
|
|
|
ASSERT(!kbrRanges[2].second);
|
|
|
|
|
|
|
|
// DB has [B - C)
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataBC, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 0);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 1);
|
|
|
|
ASSERT(removed[0] == rangeAB);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 3);
|
|
|
|
ASSERT(kbrRanges[0].first == rangeStartToB);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
ASSERT(kbrRanges[1].first == rangeBC);
|
|
|
|
ASSERT(kbrRanges[1].second);
|
|
|
|
ASSERT(kbrRanges[2].first == rangeCToEnd);
|
|
|
|
ASSERT(!kbrRanges[2].second);
|
|
|
|
|
|
|
|
// DB has [B - D)
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataBD, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 1);
|
|
|
|
ASSERT(added[0] == rangeCD);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 0);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 3);
|
|
|
|
ASSERT(kbrRanges[0].first == rangeStartToB);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
ASSERT(kbrRanges[1].first == rangeBD);
|
|
|
|
ASSERT(kbrRanges[1].second);
|
|
|
|
ASSERT(kbrRanges[2].first == rangeDToEnd);
|
|
|
|
ASSERT(!kbrRanges[2].second);
|
|
|
|
|
|
|
|
// DB has [A - D)
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataAD, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 1);
|
|
|
|
ASSERT(added[0] == rangeAB);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 0);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 3);
|
|
|
|
ASSERT(kbrRanges[0].first == rangeStartToA);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
ASSERT(kbrRanges[1].first == rangeAD);
|
|
|
|
ASSERT(kbrRanges[1].second);
|
|
|
|
ASSERT(kbrRanges[2].first == rangeDToEnd);
|
|
|
|
ASSERT(!kbrRanges[2].second);
|
|
|
|
|
|
|
|
// DB has [A - B) and [C - D)
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataAB_CD, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 0);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 1);
|
|
|
|
ASSERT(removed[0] == rangeBC);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 5);
|
|
|
|
ASSERT(kbrRanges[0].first == rangeStartToA);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
ASSERT(kbrRanges[1].first == rangeAB);
|
|
|
|
ASSERT(kbrRanges[1].second);
|
|
|
|
ASSERT(kbrRanges[2].first == rangeBC);
|
|
|
|
ASSERT(!kbrRanges[2].second);
|
|
|
|
ASSERT(kbrRanges[3].first == rangeCD);
|
|
|
|
ASSERT(kbrRanges[3].second);
|
|
|
|
ASSERT(kbrRanges[4].first == rangeDToEnd);
|
|
|
|
ASSERT(!kbrRanges[4].second);
|
|
|
|
|
|
|
|
// DB has [B - C)
|
|
|
|
kbrRanges.clear();
|
|
|
|
added.clear();
|
|
|
|
removed.clear();
|
|
|
|
updateClientBlobRanges(&knownBlobRanges, dbDataBC, ar, &added, &removed);
|
|
|
|
|
|
|
|
ASSERT(added.size() == 1);
|
|
|
|
ASSERT(added[0] == rangeBC);
|
|
|
|
|
|
|
|
ASSERT(removed.size() == 2);
|
|
|
|
ASSERT(removed[0] == rangeAB);
|
|
|
|
ASSERT(removed[1] == rangeCD);
|
|
|
|
|
|
|
|
getRanges(kbrRanges, knownBlobRanges);
|
|
|
|
ASSERT(kbrRanges.size() == 3);
|
|
|
|
ASSERT(kbrRanges[0].first == rangeStartToB);
|
|
|
|
ASSERT(!kbrRanges[0].second);
|
|
|
|
ASSERT(kbrRanges[1].first == rangeBC);
|
|
|
|
ASSERT(kbrRanges[1].second);
|
|
|
|
ASSERT(kbrRanges[2].first == rangeCToEnd);
|
|
|
|
ASSERT(!kbrRanges[2].second);
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|