Add g_traceBatch for prefetch query (#7730)

This commit is contained in:
Hao Fu 2022-07-28 17:54:31 -07:00 committed by GitHub
parent 8491c2530f
commit 62bee7ac9a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 23 additions and 1 deletions

View File

@ -3818,6 +3818,9 @@ ACTOR Future<GetRangeReqAndResultRef> quickGetKeyValues(
state double getValuesStart = g_network->timer();
getRange.begin = firstGreaterOrEqual(KeyRef(*a, prefix));
getRange.end = firstGreaterOrEqual(strinc(prefix, *a));
if (pOriginalReq->debugID.present())
g_traceBatch.addEvent(
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.quickGetKeyValues.Before");
try {
// TODO: Use a lower level API may be better? Or tweak priorities?
GetKeyValuesRequest req;
@ -3848,6 +3851,10 @@ ACTOR Future<GetRangeReqAndResultRef> quickGetKeyValues(
getRange.result = RangeResultRef(reply.data, reply.more);
const double duration = g_network->timer() - getValuesStart;
data->counters.mappedRangeLocalSample.addMeasurement(duration);
if (pOriginalReq->debugID.present())
g_traceBatch.addEvent("TransactionDebug",
pOriginalReq->debugID.get().first(),
"storageserver.quickGetKeyValues.AfterLocalFetch");
return getRange;
}
// Otherwise fallback.
@ -3869,6 +3876,10 @@ ACTOR Future<GetRangeReqAndResultRef> quickGetKeyValues(
getRange.result = rangeResult;
const double duration = g_network->timer() - getValuesStart;
data->counters.mappedRangeRemoteSample.addMeasurement(duration);
if (pOriginalReq->debugID.present())
g_traceBatch.addEvent("TransactionDebug",
pOriginalReq->debugID.get().first(),
"storageserver.quickGetKeyValues.AfterRemoteFetch");
return getRange;
} else {
throw quick_get_key_values_miss();
@ -4156,7 +4167,9 @@ ACTOR Future<GetMappedKeyValuesReply> mapKeyValues(StorageServer* data,
result.arena.dependsOn(input.arena);
result.data.reserve(result.arena, input.data.size());
if (pOriginalReq->debugID.present())
g_traceBatch.addEvent(
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.mapKeyValues.Start");
state Tuple mappedKeyFormatTuple;
state Tuple mappedKeyTuple;
@ -4175,6 +4188,9 @@ ACTOR Future<GetMappedKeyValuesReply> mapKeyValues(StorageServer* data,
state std::vector<MappedKeyValueRef> kvms(k);
state std::vector<Future<Void>> subqueries;
state int offset = 0;
if (pOriginalReq->debugID.present())
g_traceBatch.addEvent(
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.mapKeyValues.BeforeLoop");
for (; offset < sz; offset += SERVER_KNOBS->MAX_PARALLEL_QUICK_GET_VALUE) {
// Divide into batches of MAX_PARALLEL_QUICK_GET_VALUE subqueries
for (int i = 0; i + offset < sz && i < SERVER_KNOBS->MAX_PARALLEL_QUICK_GET_VALUE; i++) {
@ -4210,11 +4226,17 @@ ACTOR Future<GetMappedKeyValuesReply> mapKeyValues(StorageServer* data,
mappedKey));
}
wait(waitForAll(subqueries));
if (pOriginalReq->debugID.present())
g_traceBatch.addEvent(
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.mapKeyValues.AfterBatch");
subqueries.clear();
for (int i = 0; i + offset < sz && i < SERVER_KNOBS->MAX_PARALLEL_QUICK_GET_VALUE; i++) {
result.data.push_back(result.arena, kvms[i]);
}
}
if (pOriginalReq->debugID.present())
g_traceBatch.addEvent(
"TransactionDebug", pOriginalReq->debugID.get().first(), "storageserver.mapKeyValues.AfterAll");
return result;
}