Fix formatting in StorageMetrics.* files

This commit is contained in:
Bharadwaj V.R 2022-08-29 10:02:29 -07:00
parent 8df9dd72bb
commit 63cb2841a5
2 changed files with 103 additions and 103 deletions

View File

@ -56,10 +56,10 @@ KeyRef StorageMetricSample::splitEstimate(KeyRangeRef range, int64_t offset, boo
auto it = fwd_split;
++it;
KeyRef split = keyBetween(
KeyRangeRef(*fwd_split, it != sample.end() ? std::min<KeyRef>(*it, range.end) : range.end));
if (front || (getEstimate(KeyRangeRef(split, range.end)) > 0 &&
split.size() <= CLIENT_KNOBS->SPLIT_KEY_SIZE_LIMIT))
KeyRef split =
keyBetween(KeyRangeRef(*fwd_split, it != sample.end() ? std::min<KeyRef>(*it, range.end) : range.end));
if (front ||
(getEstimate(KeyRangeRef(split, range.end)) > 0 && split.size() <= CLIENT_KNOBS->SPLIT_KEY_SIZE_LIMIT))
return split;
fwd_split = it;
@ -77,8 +77,7 @@ StorageMetrics StorageServerMetrics::getMetrics(KeyRangeRef const& keys) const {
result.bytes = byteSample.getEstimate(keys);
result.bytesPerKSecond =
bandwidthSample.getEstimate(keys) * SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS;
result.iosPerKSecond =
iopsSample.getEstimate(keys) * SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS;
result.iosPerKSecond = iopsSample.getEstimate(keys) * SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS;
result.bytesReadPerKSecond =
bytesReadSample.getEstimate(keys) * SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS;
return result;
@ -138,7 +137,8 @@ void StorageServerMetrics::notifyBytesReadPerKSecond(KeyRef key, int64_t in) {
// Called by StorageServerDisk when the size of a key in byteSample changes, to notify WaitMetricsRequest
// Should not be called for keys past allKeys.end
void StorageServerMetrics::notifyBytes(RangeMap<Key, std::vector<PromiseStream<StorageMetrics>>, KeyRangeRef>::iterator shard,
void StorageServerMetrics::notifyBytes(
RangeMap<Key, std::vector<PromiseStream<StorageMetrics>>, KeyRangeRef>::iterator shard,
int64_t bytes) {
ASSERT(shard.end() <= allKeys.end);
@ -347,7 +347,8 @@ void StorageServerMetrics::getStorageMetrics(GetStorageMetricsRequest req,
// Given a read hot shard, this function will divide the shard into chunks and find those chunks whose
// readBytes/sizeBytes exceeds the `readDensityRatio`. Please make sure to run unit tests
// `StorageMetricsSampleTests.txt` after change made.
std::vector<ReadHotRangeWithMetrics> StorageServerMetrics::getReadHotRanges(KeyRangeRef shard,
std::vector<ReadHotRangeWithMetrics> StorageServerMetrics::getReadHotRanges(
KeyRangeRef shard,
double readDensityRatio,
int64_t baseChunkSize,
int64_t minShardReadBandwidthPerKSeconds) const {
@ -364,8 +365,7 @@ std::vector<ReadHotRangeWithMetrics> StorageServerMetrics::getReadHotRanges(KeyR
if (bytesReadSample.getEstimate(shard) > (readDensityRatio * shardSize)) {
toReturn.emplace_back(shard,
bytesReadSample.getEstimate(shard) / shardSize,
bytesReadSample.getEstimate(shard) /
SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL);
bytesReadSample.getEstimate(shard) / SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL);
}
return toReturn;
}
@ -392,14 +392,14 @@ std::vector<ReadHotRangeWithMetrics> StorageServerMetrics::getReadHotRanges(KeyR
range = KeyRangeRef(toReturn.back().keys.begin, *endKey);
toReturn.pop_back();
}
toReturn.emplace_back(
range,
(double)bytesReadSample.getEstimate(range) / std::max(baseChunkSize, byteSample.getEstimate(range)),
toReturn.emplace_back(range,
(double)bytesReadSample.getEstimate(range) /
std::max(baseChunkSize, byteSample.getEstimate(range)),
bytesReadSample.getEstimate(range) / SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL);
}
beginKey = *endKey;
endKey = byteSample.sample.index(byteSample.sample.sumTo(byteSample.sample.lower_bound(beginKey)) +
baseChunkSize);
endKey =
byteSample.sample.index(byteSample.sample.sumTo(byteSample.sample.lower_bound(beginKey)) + baseChunkSize);
}
return toReturn;
}
@ -426,7 +426,9 @@ void StorageServerMetrics::getSplitPoints(SplitRangeRequest req, Optional<Key> p
req.reply.send(reply);
}
std::vector<KeyRef> StorageServerMetrics::getSplitPoints(KeyRangeRef range, int64_t chunkSize, Optional<Key> prefixToRemove) const {
std::vector<KeyRef> StorageServerMetrics::getSplitPoints(KeyRangeRef range,
int64_t chunkSize,
Optional<Key> prefixToRemove) const {
std::vector<KeyRef> toReturn;
KeyRef beginKey = range.begin;
IndexedSet<Key, int64_t>::const_iterator endKey =
@ -445,8 +447,7 @@ std::vector<KeyRef> StorageServerMetrics::getSplitPoints(KeyRangeRef range, int6
}
toReturn.push_back(splitPoint);
beginKey = *endKey;
endKey =
byteSample.sample.index(byteSample.sample.sumTo(byteSample.sample.lower_bound(beginKey)) + chunkSize);
endKey = byteSample.sample.index(byteSample.sample.sumTo(byteSample.sample.lower_bound(beginKey)) + chunkSize);
}
return toReturn;
}
@ -496,11 +497,11 @@ void TransientStorageMetricSample::erase(KeyRangeRef keys) {
}
bool TransientStorageMetricSample::roll(KeyRef key, int64_t metric) const {
return deterministicRandom()->random01() <
(double)metric / metricUnitsPerSample; //< SOMEDAY: Better randomInt64?
return deterministicRandom()->random01() < (double)metric / metricUnitsPerSample; //< SOMEDAY: Better randomInt64?
}
void TransientStorageMetricSample::poll(KeyRangeMap<std::vector<PromiseStream<StorageMetrics>>>& waitMap, StorageMetrics m) {
void TransientStorageMetricSample::poll(KeyRangeMap<std::vector<PromiseStream<StorageMetrics>>>& waitMap,
StorageMetrics m) {
double now = ::now();
while (queue.size() && queue.front().first <= now) {
KeyRef key = queue.front().second.first;

View File

@ -67,7 +67,6 @@ struct TransientStorageMetricSample : StorageMetricSample {
private:
bool roll(KeyRef key, int64_t metric) const;
int64_t add(KeyRef key, int64_t metric);
};
struct StorageServerMetrics {