Merge pull request #9007 from sfc-gh-tclinkenbeard/fix-clang-15-warnings
Fix clang 15 compiler warnings
This commit is contained in:
commit
10070fd7f2
|
@ -89,9 +89,9 @@ public:
|
|||
};
|
||||
|
||||
class alignas(64) ThreadStatistics {
|
||||
uint64_t conflicts;
|
||||
uint64_t total_errors;
|
||||
uint64_t total_timeouts;
|
||||
uint64_t conflicts{ 0 };
|
||||
uint64_t total_errors{ 0 };
|
||||
uint64_t total_timeouts{ 0 };
|
||||
std::array<uint64_t, MAX_OP> ops;
|
||||
std::array<uint64_t, MAX_OP> errors;
|
||||
std::array<uint64_t, MAX_OP> timeouts;
|
||||
|
@ -101,7 +101,11 @@ class alignas(64) ThreadStatistics {
|
|||
|
||||
public:
|
||||
ThreadStatistics() noexcept {
|
||||
memset(this, 0, sizeof(ThreadStatistics));
|
||||
std::fill(ops.begin(), ops.end(), 0);
|
||||
std::fill(errors.begin(), errors.end(), 0);
|
||||
std::fill(timeouts.begin(), timeouts.end(), 0);
|
||||
std::fill(latency_samples.begin(), latency_samples.end(), 0);
|
||||
std::fill(latency_us_total.begin(), latency_us_total.end(), 0);
|
||||
sketches.resize(MAX_OP);
|
||||
}
|
||||
|
||||
|
|
|
@ -3048,7 +3048,7 @@ static std::vector<std::vector<StringRef>> parseLine(std::string& line, bool& er
|
|||
|
||||
static void addKeyRange(std::string optionValue, Standalone<VectorRef<KeyRangeRef>>& keyRanges) {
|
||||
bool err = false, partial = false;
|
||||
int tokenArray = 0;
|
||||
[[maybe_unused]] int tokenArray = 0;
|
||||
|
||||
auto parsed = parseLine(optionValue, err, partial);
|
||||
|
||||
|
|
|
@ -9694,15 +9694,11 @@ Version ChangeFeedData::getVersion() {
|
|||
// native api has consumed and processed, them, and then the fdb client has consumed all of the mutations.
|
||||
ACTOR Future<Void> changeFeedWaitLatest(Reference<ChangeFeedData> self, Version version) {
|
||||
// wait on SS to have sent up through version
|
||||
int desired = 0;
|
||||
int waiting = 0;
|
||||
std::vector<Future<Void>> allAtLeast;
|
||||
for (auto& it : self->storageData) {
|
||||
if (it->version.get() < version) {
|
||||
waiting++;
|
||||
if (version > it->desired.get()) {
|
||||
it->desired.set(version);
|
||||
desired++;
|
||||
}
|
||||
allAtLeast.push_back(it->version.whenAtLeast(version));
|
||||
}
|
||||
|
|
|
@ -112,7 +112,7 @@ void populateVersionVector(VersionVector& vv,
|
|||
int tagsPerLocality = tagCount / localityCount;
|
||||
|
||||
// Populate localities.
|
||||
for (int i = 0; localities.size() < (size_t)localityCount; i++) {
|
||||
while (localities.size() < (size_t)localityCount) {
|
||||
int8_t locality = deterministicRandom()->randomInt(tagLocalityInvalid + 1, INT8_MAX);
|
||||
if (std::find(localities.begin(), localities.end(), locality) == localities.end()) {
|
||||
localities.push_back(locality);
|
||||
|
|
|
@ -80,8 +80,9 @@ public:
|
|||
Future<Void> done() { return reader; }
|
||||
|
||||
private:
|
||||
Version beginVersion, endVersion, currentBeginVersion;
|
||||
unsigned pipelineDepth;
|
||||
[[maybe_unused]] Version beginVersion;
|
||||
Version endVersion, currentBeginVersion;
|
||||
[[maybe_unused]] unsigned pipelineDepth;
|
||||
Future<Void> reader;
|
||||
};
|
||||
|
||||
|
|
|
@ -77,13 +77,11 @@ bool PolicyOne::selectReplicas(Reference<LocalitySet>& fromServers,
|
|||
std::vector<LocalityEntry> const& alsoServers,
|
||||
std::vector<LocalityEntry>& results) {
|
||||
int totalUsed = 0;
|
||||
int itemsUsed = 0;
|
||||
if (alsoServers.size()) {
|
||||
totalUsed++;
|
||||
} else if (fromServers->size()) {
|
||||
auto randomEntry = fromServers->random();
|
||||
results.push_back(randomEntry);
|
||||
itemsUsed++;
|
||||
totalUsed++;
|
||||
}
|
||||
return (totalUsed > 0);
|
||||
|
|
|
@ -1150,7 +1150,7 @@ struct DDQueue : public IDDRelocationQueue {
|
|||
// canceled inflight relocateData. Launch the relocation for the rd.
|
||||
void launchQueuedWork(std::set<RelocateData, std::greater<RelocateData>> combined,
|
||||
const DDEnabledState* ddEnabledState) {
|
||||
int startedHere = 0;
|
||||
[[maybe_unused]] int startedHere = 0;
|
||||
double startTime = now();
|
||||
// kick off relocators from items in the queue as need be
|
||||
std::set<RelocateData, std::greater<RelocateData>>::iterator it = combined.begin();
|
||||
|
|
|
@ -4007,7 +4007,6 @@ void DDTeamCollection::traceAllInfo(bool shouldPrint) const {
|
|||
|
||||
void DDTeamCollection::rebuildMachineLocalityMap() {
|
||||
machineLocalityMap.clear();
|
||||
int numHealthyMachine = 0;
|
||||
for (auto& [_, machine] : machine_info) {
|
||||
if (machine->serversOnMachine.empty()) {
|
||||
TraceEvent(SevWarn, "RebuildMachineLocalityMapError")
|
||||
|
@ -4028,7 +4027,6 @@ void DDTeamCollection::rebuildMachineLocalityMap() {
|
|||
}
|
||||
const LocalityEntry& localityEntry = machineLocalityMap.add(locality, &representativeServer->getId());
|
||||
machine->localityEntry = localityEntry;
|
||||
++numHealthyMachine;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5819,43 +5817,43 @@ TEST_CASE("/DataDistribution/GetTeam/DeprioritizeWigglePausedTeam") {
|
|||
}
|
||||
|
||||
TEST_CASE("/DataDistribution/StorageWiggler/NextIdWithMinAge") {
|
||||
state StorageWiggler wiggler(nullptr);
|
||||
state Reference<StorageWiggler> wiggler = makeReference<StorageWiggler>(nullptr);
|
||||
state double startTime = now();
|
||||
wiggler.addServer(UID(1, 0),
|
||||
StorageMetadataType(startTime - SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 5.0,
|
||||
KeyValueStoreType::SSD_BTREE_V2));
|
||||
wiggler.addServer(UID(2, 0),
|
||||
StorageMetadataType(
|
||||
startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC, KeyValueStoreType::MEMORY, true));
|
||||
wiggler.addServer(UID(3, 0), StorageMetadataType(startTime - 5.0, KeyValueStoreType::SSD_ROCKSDB_V1, true));
|
||||
wiggler.addServer(UID(4, 0),
|
||||
StorageMetadataType(startTime - SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC - 1.0,
|
||||
KeyValueStoreType::SSD_BTREE_V2));
|
||||
wiggler->addServer(UID(1, 0),
|
||||
StorageMetadataType(startTime - SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 5.0,
|
||||
KeyValueStoreType::SSD_BTREE_V2));
|
||||
wiggler->addServer(UID(2, 0),
|
||||
StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC,
|
||||
KeyValueStoreType::MEMORY,
|
||||
true));
|
||||
wiggler->addServer(UID(3, 0), StorageMetadataType(startTime - 5.0, KeyValueStoreType::SSD_ROCKSDB_V1, true));
|
||||
wiggler->addServer(UID(4, 0),
|
||||
StorageMetadataType(startTime - SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC - 1.0,
|
||||
KeyValueStoreType::SSD_BTREE_V2));
|
||||
std::vector<Optional<UID>> correctResult{ UID(3, 0), UID(2, 0), UID(4, 0), Optional<UID>() };
|
||||
for (int i = 0; i < 4; ++i) {
|
||||
auto id = wiggler.getNextServerId();
|
||||
auto id = wiggler->getNextServerId();
|
||||
ASSERT(id == correctResult[i]);
|
||||
}
|
||||
|
||||
{
|
||||
std::cout << "Finish Initial Check. Start test getNextWigglingServerID() loop...\n";
|
||||
// test the getNextWigglingServerID() loop
|
||||
UID id = wait(DDTeamCollectionImpl::getNextWigglingServerID(Reference<StorageWiggler>::addRef(&wiggler)));
|
||||
UID id = wait(DDTeamCollectionImpl::getNextWigglingServerID(wiggler));
|
||||
ASSERT(id == UID(1, 0));
|
||||
}
|
||||
|
||||
std::cout << "Test after addServer() ...\n";
|
||||
state Future<UID> nextFuture =
|
||||
DDTeamCollectionImpl::getNextWigglingServerID(Reference<StorageWiggler>::addRef(&wiggler));
|
||||
state Future<UID> nextFuture = DDTeamCollectionImpl::getNextWigglingServerID(wiggler);
|
||||
ASSERT(!nextFuture.isReady());
|
||||
startTime = now();
|
||||
StorageMetadataType metadata(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 100.0,
|
||||
KeyValueStoreType::SSD_BTREE_V2);
|
||||
wiggler.addServer(UID(5, 0), metadata);
|
||||
wiggler->addServer(UID(5, 0), metadata);
|
||||
ASSERT(!nextFuture.isReady());
|
||||
|
||||
std::cout << "Test after updateServer() ...\n";
|
||||
StorageWiggler* ptr = &wiggler;
|
||||
StorageWiggler* ptr = wiggler.getPtr();
|
||||
wait(trigger(
|
||||
[ptr]() {
|
||||
ptr->updateMetadata(UID(5, 0),
|
||||
|
@ -5872,22 +5870,22 @@ TEST_CASE("/DataDistribution/StorageWiggler/NextIdWithMinAge") {
|
|||
TEST_CASE("/DataDistribution/StorageWiggler/NextIdWithTSS") {
|
||||
state std::unique_ptr<DDTeamCollection> collection =
|
||||
DDTeamCollectionUnitTest::testMachineTeamCollection(1, Reference<IReplicationPolicy>(new PolicyOne()), 5);
|
||||
state StorageWiggler wiggler(collection.get());
|
||||
state Reference<StorageWiggler> wiggler = makeReference<StorageWiggler>(collection.get());
|
||||
|
||||
std::cout << "Test when need TSS ... \n";
|
||||
collection->configuration.usableRegions = 1;
|
||||
collection->configuration.desiredTSSCount = 1;
|
||||
state double startTime = now();
|
||||
wiggler.addServer(UID(1, 0),
|
||||
StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0,
|
||||
KeyValueStoreType::SSD_BTREE_V2));
|
||||
wiggler.addServer(UID(2, 0),
|
||||
StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0,
|
||||
KeyValueStoreType::SSD_BTREE_V2));
|
||||
ASSERT(!wiggler.getNextServerId(true).present());
|
||||
ASSERT(wiggler.getNextServerId(collection->reachTSSPairTarget()) == UID(1, 0));
|
||||
UID id = wait(DDTeamCollectionImpl::getNextWigglingServerID(
|
||||
Reference<StorageWiggler>::addRef(&wiggler), Optional<Value>(), Optional<Value>(), collection.get()));
|
||||
wiggler->addServer(UID(1, 0),
|
||||
StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0,
|
||||
KeyValueStoreType::SSD_BTREE_V2));
|
||||
wiggler->addServer(UID(2, 0),
|
||||
StorageMetadataType(startTime + SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0,
|
||||
KeyValueStoreType::SSD_BTREE_V2));
|
||||
ASSERT(!wiggler->getNextServerId(true).present());
|
||||
ASSERT(wiggler->getNextServerId(collection->reachTSSPairTarget()) == UID(1, 0));
|
||||
UID id = wait(
|
||||
DDTeamCollectionImpl::getNextWigglingServerID(wiggler, Optional<Value>(), Optional<Value>(), collection.get()));
|
||||
ASSERT(now() - startTime < SERVER_KNOBS->DD_STORAGE_WIGGLE_MIN_SS_AGE_SEC + 150.0);
|
||||
ASSERT(id == UID(2, 0));
|
||||
return Void();
|
||||
|
|
|
@ -1912,14 +1912,11 @@ private:
|
|||
readThreads[i].clear();
|
||||
}
|
||||
void checkFreePages() {
|
||||
int iterations = 0;
|
||||
|
||||
int64_t freeListSize = freeListPages;
|
||||
while (!freeTableEmpty && freeListSize < SERVER_KNOBS->CHECK_FREE_PAGE_AMOUNT) {
|
||||
int deletedPages = cursor->lazyDelete(SERVER_KNOBS->CHECK_FREE_PAGE_AMOUNT);
|
||||
freeTableEmpty = (deletedPages != SERVER_KNOBS->CHECK_FREE_PAGE_AMOUNT);
|
||||
springCleaningStats.lazyDeletePages += deletedPages;
|
||||
++iterations;
|
||||
|
||||
freeListSize = conn.freePages();
|
||||
}
|
||||
|
|
|
@ -639,7 +639,6 @@ int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, i
|
|||
return 0;
|
||||
}
|
||||
|
||||
int accumulatedRows = 0;
|
||||
int accumulatedBytes = 0;
|
||||
rocksdb::Status s;
|
||||
|
||||
|
@ -651,7 +650,6 @@ int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, i
|
|||
cursor->Seek(toSlice(range.begin));
|
||||
while (cursor->Valid() && toStringRef(cursor->key()) < range.end) {
|
||||
KeyValueRef kv(toStringRef(cursor->key()), toStringRef(cursor->value()));
|
||||
++accumulatedRows;
|
||||
accumulatedBytes += sizeof(KeyValueRef) + kv.expectedSize();
|
||||
result->push_back_deep(result->arena(), kv);
|
||||
// Calling `cursor->Next()` is potentially expensive, so short-circut here just in case.
|
||||
|
@ -671,7 +669,6 @@ int readRangeInDb(PhysicalShard* shard, const KeyRangeRef range, int rowLimit, i
|
|||
}
|
||||
while (cursor->Valid() && toStringRef(cursor->key()) >= range.begin) {
|
||||
KeyValueRef kv(toStringRef(cursor->key()), toStringRef(cursor->value()));
|
||||
++accumulatedRows;
|
||||
accumulatedBytes += sizeof(KeyValueRef) + kv.expectedSize();
|
||||
result->push_back_deep(result->arena(), kv);
|
||||
// Calling `cursor->Prev()` is potentially expensive, so short-circut here just in case.
|
||||
|
|
|
@ -60,11 +60,8 @@ struct LogRouterData {
|
|||
TaskPriority taskID) {
|
||||
while (!self->version_messages.empty() && self->version_messages.front().first < before) {
|
||||
Version version = self->version_messages.front().first;
|
||||
int64_t messagesErased = 0;
|
||||
|
||||
while (!self->version_messages.empty() && self->version_messages.front().first == version) {
|
||||
++messagesErased;
|
||||
|
||||
self->version_messages.pop_front();
|
||||
}
|
||||
|
||||
|
|
|
@ -284,12 +284,10 @@ ACTOR static Future<Void> getAndComputeStagingKeys(
|
|||
.detail("GetKeys", incompleteStagingKeys.size())
|
||||
.detail("DelayTime", delayTime);
|
||||
ASSERT(!g_network->isSimulated());
|
||||
int i = 0;
|
||||
for (auto& key : incompleteStagingKeys) {
|
||||
MutationRef m(MutationRef::SetValue, key.first, "0"_sr);
|
||||
key.second->second.add(m, LogMessageVersion(1));
|
||||
key.second->second.precomputeResult("GetAndComputeStagingKeys", applierID, batchIndex);
|
||||
i++;
|
||||
}
|
||||
return Void();
|
||||
}
|
||||
|
|
|
@ -2350,7 +2350,7 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
|
|||
.detail("ConfigString", startingConfigString);
|
||||
|
||||
bool requiresExtraDBMachines = !g_simulator->extraDatabases.empty() && !useLocalDatabase;
|
||||
int assignedMachines = 0, nonVersatileMachines = 0;
|
||||
int assignedMachines = 0;
|
||||
bool gradualMigrationPossible = true;
|
||||
std::vector<ProcessClass::ClassType> processClassesSubSet = { ProcessClass::UnsetClass,
|
||||
ProcessClass::StatelessClass };
|
||||
|
@ -2404,10 +2404,7 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
|
|||
else
|
||||
processClass = ProcessClass((ProcessClass::ClassType)deterministicRandom()->randomInt(0, 3),
|
||||
ProcessClass::CommandLineSource); // Unset, Storage, or Transaction
|
||||
if (processClass ==
|
||||
ProcessClass::StatelessClass) { // *can't* be assigned to other roles, even in an emergency
|
||||
nonVersatileMachines++;
|
||||
}
|
||||
|
||||
if (processClass == ProcessClass::UnsetClass || processClass == ProcessClass::StorageClass) {
|
||||
possible_ss++;
|
||||
}
|
||||
|
@ -2419,11 +2416,9 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
|
|||
if (machine >= machines) {
|
||||
if (storageCacheMachines > 0 && dc == 0) {
|
||||
processClass = ProcessClass(ProcessClass::StorageCacheClass, ProcessClass::CommandLineSource);
|
||||
nonVersatileMachines++;
|
||||
storageCacheMachines--;
|
||||
} else if (blobWorkerMachines > 0) { // add blob workers to every DC
|
||||
processClass = ProcessClass(ProcessClass::BlobWorkerClass, ProcessClass::CommandLineSource);
|
||||
nonVersatileMachines++;
|
||||
blobWorkerMachines--;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6724,7 +6724,7 @@ private:
|
|||
debug_print(addPrefix(context, update->toString()));
|
||||
|
||||
if (REDWOOD_DEBUG) {
|
||||
int c = 0;
|
||||
[[maybe_unused]] int c = 0;
|
||||
auto i = mBegin;
|
||||
while (1) {
|
||||
debug_printf("%s Mutation %4d '%s': %s\n",
|
||||
|
|
|
@ -670,8 +670,8 @@ struct BlobGranuleCorrectnessWorkload : TestWorkload {
|
|||
} else {
|
||||
int targetQueryBytes = (deterministicRandom()->randomInt(1, 20) * targetBytesReadPerQuery) / 10;
|
||||
int estimatedQueryBytes = 0;
|
||||
for (int i = 0; estimatedQueryBytes < targetQueryBytes && endKeyIt != threadData->keyData.end();
|
||||
i++, endKeyIt++) {
|
||||
for (; estimatedQueryBytes < targetQueryBytes && endKeyIt != threadData->keyData.end();
|
||||
endKeyIt++) {
|
||||
// iterate forward until end or target keys have passed
|
||||
estimatedQueryBytes += (1 + endKeyIt->second.writes.size() - endKeyIt->second.nextClearIdx) *
|
||||
threadData->targetValLength;
|
||||
|
|
|
@ -88,7 +88,6 @@ struct WatchAndWaitWorkload : TestWorkload {
|
|||
|
||||
ACTOR Future<Void> _start(Database cx, WatchAndWaitWorkload* self) {
|
||||
state std::vector<Future<Void>> watches;
|
||||
int watchCounter = 0;
|
||||
uint64_t endNode = (self->nodeCount * (self->clientId + 1)) / self->clientCount;
|
||||
uint64_t startNode = (self->nodeCount * self->clientId) / self->clientCount;
|
||||
uint64_t NodesPerWatch = self->nodeCount / self->watchCount;
|
||||
|
@ -100,7 +99,6 @@ struct WatchAndWaitWorkload : TestWorkload {
|
|||
.detail("Npw", NodesPerWatch);
|
||||
for (uint64_t i = startNode; i < endNode; i += NodesPerWatch) {
|
||||
watches.push_back(self->watchAndWait(cx, self, i));
|
||||
watchCounter++;
|
||||
}
|
||||
wait(delay(self->testDuration)); // || waitForAll( watches )
|
||||
TraceEvent("WatchAndWaitEnd").detail("Duration", self->testDuration);
|
||||
|
|
|
@ -74,7 +74,7 @@ public:
|
|||
|
||||
// Moves all timers that are scheduled to be executed at or before now to the ready queue.
|
||||
void processReadyTimers(double now) {
|
||||
int numTimers = 0;
|
||||
[[maybe_unused]] int numTimers = 0;
|
||||
while (!timers.empty() && timers.top().at <= now + INetwork::TIME_EPS) {
|
||||
++numTimers;
|
||||
++countTimers;
|
||||
|
@ -86,7 +86,7 @@ public:
|
|||
|
||||
// Moves all tasks scheduled from a different thread to the ready queue.
|
||||
void processThreadReady() {
|
||||
int numReady = 0;
|
||||
[[maybe_unused]] int numReady = 0;
|
||||
while (true) {
|
||||
Optional<std::pair<TaskPriority, Task*>> t = threadReady.pop();
|
||||
if (!t.present())
|
||||
|
|
Loading…
Reference in New Issue