Fix some clang warnings on unused variables
This commit is contained in:
parent
aee717f5d6
commit
c127bb1c30
|
@ -42,7 +42,7 @@ ACTOR static Future<Void> produce(ParallelStream<ParallelStreamTest::TestValue>:
|
|||
}
|
||||
|
||||
ACTOR static Future<Void> consume(FutureStream<ParallelStreamTest::TestValue> stream, int expected) {
|
||||
state int next;
|
||||
state int next = 0;
|
||||
try {
|
||||
loop {
|
||||
ParallelStreamTest::TestValue value = waitNext(stream);
|
||||
|
|
|
@ -284,7 +284,6 @@ Future<ConfigurationResult> changeConfig(Reference<DB> db, std::map<std::string,
|
|||
state Key versionKey = BinaryWriter::toValue(deterministicRandom()->randomUniqueID(), Unversioned());
|
||||
state bool oldReplicationUsesDcId = false;
|
||||
state bool warnPPWGradual = false;
|
||||
state bool warnChangeStorageNoMigrate = false;
|
||||
state bool warnRocksDBIsExperimental = false;
|
||||
state bool warnShardedRocksDBIsExperimental = false;
|
||||
loop {
|
||||
|
|
|
@ -3549,7 +3549,7 @@ ACTOR Future<Void> doBlobGranuleFileRequest(Reference<BlobWorkerData> bwData, Bl
|
|||
continue;
|
||||
}
|
||||
state Reference<GranuleMetadata> metadata = m;
|
||||
state Version granuleBeginVersion = req.beginVersion;
|
||||
// state Version granuleBeginVersion = req.beginVersion;
|
||||
// skip waiting for CF ready for recovery mode
|
||||
if (!isFullRestoreMode()) {
|
||||
choose {
|
||||
|
|
|
@ -382,7 +382,6 @@ ACTOR Future<bool> checkDataConsistency(Database cx,
|
|||
// Note: this may cause some shards to be processed more than once or not at all in a non-quiescent database
|
||||
state int effectiveClientCount = distributed ? clientCount : 1;
|
||||
state int i = clientId * (shardSampleFactor + 1);
|
||||
state int increment = (distributed && !firstClient) ? effectiveClientCount * shardSampleFactor : 1;
|
||||
state int64_t rateLimitForThisRound =
|
||||
*bytesReadInPrevRound == 0
|
||||
? maxRate
|
||||
|
|
|
@ -272,9 +272,6 @@ ACTOR Future<Void> trackShardMetrics(DataDistributionTracker::SafeAccessor self,
|
|||
state double lastLowBandwidthStartTime =
|
||||
shardMetrics->get().present() ? shardMetrics->get().get().lastLowBandwidthStartTime : now();
|
||||
state int shardCount = shardMetrics->get().present() ? shardMetrics->get().get().shardCount : 1;
|
||||
state ReadBandwidthStatus readBandwidthStatus = shardMetrics->get().present()
|
||||
? getReadBandwidthStatus(shardMetrics->get().get().metrics)
|
||||
: ReadBandwidthStatusNormal;
|
||||
state bool initWithNewMetrics = whenDDInit;
|
||||
wait(delay(0, TaskPriority::DataDistribution));
|
||||
|
||||
|
|
|
@ -1518,8 +1518,6 @@ public:
|
|||
ServerStatus* status,
|
||||
Version addedVersion) {
|
||||
state StorageServerInterface interf = server->getLastKnownInterface();
|
||||
state int targetTeamNumPerServer =
|
||||
(SERVER_KNOBS->DESIRED_TEAMS_PER_SERVER * (self->configuration.storageTeamSize + 1)) / 2;
|
||||
loop {
|
||||
state bool inHealthyZone = false; // healthChanged actor will be Never() if this flag is true
|
||||
if (self->healthyZone.get().present()) {
|
||||
|
|
|
@ -836,14 +836,13 @@ private:
|
|||
useDelta = false;
|
||||
|
||||
auto thisSnapshotEnd = self->log_op(OpSnapshotEnd, StringRef(), StringRef());
|
||||
//TraceEvent("SnapshotEnd", self->id)
|
||||
// .detail("LastKey", lastKey.present() ? lastKey.get() : "<none>"_sr)
|
||||
// .detail("CurrentSnapshotEndLoc", self->currentSnapshotEnd)
|
||||
// .detail("PreviousSnapshotEndLoc", self->previousSnapshotEnd)
|
||||
// .detail("ThisSnapshotEnd", thisSnapshotEnd)
|
||||
// .detail("Items", snapItems)
|
||||
// .detail("CommittedWrites", self->notifiedCommittedWriteBytes.get())
|
||||
// .detail("SnapshotSize", snapshotBytes);
|
||||
DisabledTraceEvent("SnapshotEnd", self->id)
|
||||
.detail("CurrentSnapshotEndLoc", self->currentSnapshotEnd)
|
||||
.detail("PreviousSnapshotEndLoc", self->previousSnapshotEnd)
|
||||
.detail("ThisSnapshotEnd", thisSnapshotEnd)
|
||||
.detail("Items", snapItems)
|
||||
.detail("CommittedWrites", self->notifiedCommittedWriteBytes.get())
|
||||
.detail("SnapshotSize", snapshotBytes);
|
||||
|
||||
ASSERT(thisSnapshotEnd >= self->currentSnapshotEnd);
|
||||
self->previousSnapshotEnd = self->currentSnapshotEnd;
|
||||
|
|
|
@ -2607,7 +2607,6 @@ TEST_CASE("noSim/ShardedRocksDB/Initialization") {
|
|||
|
||||
state IKeyValueStore* kvStore =
|
||||
new ShardedRocksDBKeyValueStore(rocksDBTestDir, deterministicRandom()->randomUniqueID());
|
||||
state ShardedRocksDBKeyValueStore* rocksDB = dynamic_cast<ShardedRocksDBKeyValueStore*>(kvStore);
|
||||
wait(kvStore->init());
|
||||
|
||||
Future<Void> closed = kvStore->onClosed();
|
||||
|
@ -2622,7 +2621,6 @@ TEST_CASE("noSim/ShardedRocksDB/SingleShardRead") {
|
|||
|
||||
state IKeyValueStore* kvStore =
|
||||
new ShardedRocksDBKeyValueStore(rocksDBTestDir, deterministicRandom()->randomUniqueID());
|
||||
state ShardedRocksDBKeyValueStore* rocksDB = dynamic_cast<ShardedRocksDBKeyValueStore*>(kvStore);
|
||||
wait(kvStore->init());
|
||||
|
||||
KeyRangeRef range("a"_sr, "b"_sr);
|
||||
|
|
Loading…
Reference in New Issue