2021-07-23 01:14:30 +08:00
|
|
|
/*
|
|
|
|
* BlobGranuleReader.actor.cpp
|
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
2022-03-22 04:36:23 +08:00
|
|
|
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
|
2021-07-23 01:14:30 +08:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2021-10-18 22:49:25 +08:00
|
|
|
#include <map>
|
|
|
|
#include <vector>
|
|
|
|
|
2022-03-05 02:42:45 +08:00
|
|
|
#include "contrib/fmt-8.1.1/include/fmt/format.h"
|
2021-07-23 01:14:30 +08:00
|
|
|
#include "fdbclient/AsyncFileS3BlobStore.actor.h"
|
2021-10-18 22:49:25 +08:00
|
|
|
#include "fdbclient/BlobGranuleCommon.h"
|
2021-11-04 02:19:07 +08:00
|
|
|
#include "fdbclient/BlobGranuleFiles.h"
|
2021-07-23 01:14:30 +08:00
|
|
|
#include "fdbclient/BlobGranuleReader.actor.h"
|
2021-10-18 22:49:25 +08:00
|
|
|
#include "fdbclient/BlobWorkerCommon.h"
|
|
|
|
#include "fdbclient/BlobWorkerInterface.h"
|
2021-07-23 01:14:30 +08:00
|
|
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
|
|
|
|
2022-03-13 21:02:11 +08:00
|
|
|
// TODO more efficient data structure besides std::map? PTree is unnecessary since this isn't versioned, but some other
|
2021-07-23 01:14:30 +08:00
|
|
|
// sorted thing could work. And if it used arenas it'd probably be more efficient with allocations, since everything
|
|
|
|
// else is in 1 arena and discarded at the end.
|
|
|
|
|
2021-08-05 05:47:18 +08:00
|
|
|
// TODO could refactor the file reading code from here and the delta file function into another actor,
|
2021-07-23 01:14:30 +08:00
|
|
|
// then this part would also be testable? but meh
|
|
|
|
|
2021-11-04 02:19:07 +08:00
|
|
|
ACTOR Future<Standalone<StringRef>> readFile(Reference<BackupContainerFileSystem> bstore, BlobFilePointerRef f) {
|
2021-08-05 05:47:18 +08:00
|
|
|
try {
|
|
|
|
state Arena arena;
|
2021-08-13 03:30:33 +08:00
|
|
|
// printf("Starting read of snapshot file %s\n", filename.c_str());
|
2021-08-25 02:47:47 +08:00
|
|
|
state Reference<IAsyncFile> reader = wait(bstore->readFile(f.filename.toString()));
|
2021-08-13 03:30:33 +08:00
|
|
|
// printf("Got snapshot file size %lld\n", size);
|
2021-08-24 23:05:46 +08:00
|
|
|
state uint8_t* data = new (arena) uint8_t[f.length];
|
2021-08-13 03:30:33 +08:00
|
|
|
// printf("Reading %lld bytes from snapshot file %s\n", size, filename.c_str());
|
2021-08-24 23:05:46 +08:00
|
|
|
int readSize = wait(reader->read(data, f.length, f.offset));
|
2021-08-13 03:30:33 +08:00
|
|
|
// printf("Read %lld bytes from snapshot file %s\n", readSize, filename.c_str());
|
2021-08-24 23:05:46 +08:00
|
|
|
ASSERT(f.length == readSize);
|
2021-07-23 01:14:30 +08:00
|
|
|
|
2021-08-24 23:05:46 +08:00
|
|
|
StringRef dataRef(data, f.length);
|
2021-11-04 02:19:07 +08:00
|
|
|
return Standalone<StringRef>(dataRef, arena);
|
2021-08-05 05:47:18 +08:00
|
|
|
} catch (Error& e) {
|
2021-11-04 02:19:07 +08:00
|
|
|
printf("Reading file %s got error %s\n", f.toString().c_str(), e.name());
|
2021-08-05 05:47:18 +08:00
|
|
|
throw e;
|
|
|
|
}
|
|
|
|
}
|
2021-07-23 01:14:30 +08:00
|
|
|
|
2021-10-18 22:49:25 +08:00
|
|
|
// TODO: improve the interface of this function so that it doesn't need
|
2021-09-03 22:10:15 +08:00
|
|
|
// to be passed the entire BlobWorkerStats object
|
2021-11-04 02:19:07 +08:00
|
|
|
|
|
|
|
// FIXME: probably want to chunk this up with yields to avoid slow task for blob worker re-snapshotting by calling the
|
|
|
|
// sub-functions that BlobGranuleFiles actually exposes?
|
2021-08-24 23:05:46 +08:00
|
|
|
ACTOR Future<RangeResult> readBlobGranule(BlobGranuleChunkRef chunk,
|
2021-07-23 01:14:30 +08:00
|
|
|
KeyRangeRef keyRange,
|
|
|
|
Version readVersion,
|
2021-09-03 22:10:15 +08:00
|
|
|
Reference<BackupContainerFileSystem> bstore,
|
2021-10-18 22:49:25 +08:00
|
|
|
Optional<BlobWorkerStats*> stats) {
|
2021-09-03 22:10:15 +08:00
|
|
|
|
2021-08-24 23:05:46 +08:00
|
|
|
// TODO REMOVE with V2 of protocol
|
|
|
|
ASSERT(readVersion == chunk.includedVersion);
|
2021-11-04 02:19:07 +08:00
|
|
|
ASSERT(chunk.snapshotFile.present());
|
|
|
|
|
2021-07-23 01:14:30 +08:00
|
|
|
state Arena arena;
|
|
|
|
|
2021-08-05 05:47:18 +08:00
|
|
|
try {
|
2021-11-04 02:19:07 +08:00
|
|
|
Future<Standalone<StringRef>> readSnapshotFuture = readFile(bstore, chunk.snapshotFile.get());
|
|
|
|
state std::vector<Future<Standalone<StringRef>>> readDeltaFutures;
|
|
|
|
if (stats.present()) {
|
|
|
|
++stats.get()->s3GetReqs;
|
2021-09-03 22:10:15 +08:00
|
|
|
}
|
|
|
|
|
2021-08-24 23:05:46 +08:00
|
|
|
readDeltaFutures.reserve(chunk.deltaFiles.size());
|
2021-10-15 03:58:37 +08:00
|
|
|
for (BlobFilePointerRef deltaFile : chunk.deltaFiles) {
|
2021-11-04 02:19:07 +08:00
|
|
|
readDeltaFutures.push_back(readFile(bstore, deltaFile));
|
2021-09-03 22:10:15 +08:00
|
|
|
if (stats.present()) {
|
|
|
|
++stats.get()->s3GetReqs;
|
|
|
|
}
|
2021-08-05 05:47:18 +08:00
|
|
|
}
|
|
|
|
|
2021-11-04 02:19:07 +08:00
|
|
|
state Standalone<StringRef> snapshotData = wait(readSnapshotFuture);
|
|
|
|
arena.dependsOn(snapshotData.arena());
|
2021-07-23 01:14:30 +08:00
|
|
|
|
2021-11-04 02:19:07 +08:00
|
|
|
state int numDeltaFiles = chunk.deltaFiles.size();
|
|
|
|
state StringRef* deltaData = new (arena) StringRef[numDeltaFiles];
|
|
|
|
state int deltaIdx;
|
2021-07-23 01:14:30 +08:00
|
|
|
|
2021-11-04 02:19:07 +08:00
|
|
|
// for (Future<Standalone<StringRef>> deltaFuture : readDeltaFutures) {
|
|
|
|
for (deltaIdx = 0; deltaIdx < numDeltaFiles; deltaIdx++) {
|
|
|
|
Standalone<StringRef> data = wait(readDeltaFutures[deltaIdx]);
|
|
|
|
deltaData[deltaIdx] = data;
|
|
|
|
arena.dependsOn(data.arena());
|
2021-08-05 05:47:18 +08:00
|
|
|
}
|
|
|
|
|
2021-11-04 02:19:07 +08:00
|
|
|
return materializeBlobGranule(chunk, keyRange, readVersion, snapshotData, deltaData);
|
|
|
|
|
2021-08-05 05:47:18 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
printf("Reading blob granule got error %s\n", e.name());
|
|
|
|
throw e;
|
|
|
|
}
|
2021-07-23 01:14:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO probably should add things like limit/bytelimit at some point?
|
|
|
|
ACTOR Future<Void> readBlobGranules(BlobGranuleFileRequest request,
|
|
|
|
BlobGranuleFileReply reply,
|
2021-08-25 02:47:47 +08:00
|
|
|
Reference<BackupContainerFileSystem> bstore,
|
2021-07-23 01:14:30 +08:00
|
|
|
PromiseStream<RangeResult> results) {
|
|
|
|
// TODO for large amount of chunks, this should probably have some sort of buffer limit like ReplyPromiseStream.
|
|
|
|
// Maybe just use ReplyPromiseStream instead of PromiseStream?
|
|
|
|
try {
|
|
|
|
state int i;
|
|
|
|
for (i = 0; i < reply.chunks.size(); i++) {
|
2021-08-13 03:30:33 +08:00
|
|
|
/*printf("ReadBlobGranules processing chunk %d [%s - %s)\n",
|
2021-07-23 01:14:30 +08:00
|
|
|
i,
|
|
|
|
reply.chunks[i].keyRange.begin.printable().c_str(),
|
2021-08-13 03:30:33 +08:00
|
|
|
reply.chunks[i].keyRange.end.printable().c_str());*/
|
2021-07-23 01:14:30 +08:00
|
|
|
RangeResult chunkResult =
|
2021-08-25 02:47:47 +08:00
|
|
|
wait(readBlobGranule(reply.chunks[i], request.keyRange, request.readVersion, bstore));
|
2021-07-23 01:14:30 +08:00
|
|
|
results.send(std::move(chunkResult));
|
|
|
|
}
|
2021-08-13 03:30:33 +08:00
|
|
|
// printf("ReadBlobGranules done, sending EOS\n");
|
2021-07-23 01:14:30 +08:00
|
|
|
results.sendError(end_of_stream());
|
|
|
|
} catch (Error& e) {
|
|
|
|
printf("ReadBlobGranules got error %s\n", e.name());
|
|
|
|
results.sendError(e);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|