Fix warnings on unused variables

Found by -Wunused-variable flag.
This commit is contained in:
Jingyu Zhou 2019-03-29 13:21:15 -07:00
parent a55f06e082
commit f7f8ddd894
19 changed files with 14 additions and 34 deletions

View File

@ -959,9 +959,6 @@ void printStatus(StatusObjectReader statusObj, StatusClient::StatusLevel level,
StatusObjectReader machinesMap;
outputStringCache = outputString;
// this bool removed code duplication when there's an else (usually due to a missing field) that should print some error message
// which would be the same error message if the catch block was hit
bool success = false;
try {
outputString += "\n FoundationDB processes - ";
if (statusObjCluster.get("processes", processesMap)) {

View File

@ -36,7 +36,7 @@ ACTOR static Future<vector<AddressExclusion>> getExcludedServers(Transaction* tr
bool isInteger(const std::string& s) {
if( s.empty() ) return false;
char *p;
auto ign = strtol(s.c_str(), &p, 10);
strtol(s.c_str(), &p, 10);
return (*p == 0);
}

View File

@ -250,7 +250,6 @@ public:
{
bool bComplete = true;
int nItemsLeft = _mutableEntryArray.size();
int nExceptions = exceptionArray.size();
while (nRandomItems > 0)
{

View File

@ -279,8 +279,7 @@ ACTOR void coroSwitcher( Future<Void> what, int taskID, Coro* coro ) {
void CoroThreadPool::waitFor( Future<Void> what ) {
ASSERT (current_coro != main_coro);
if (what.isReady()) return;
Coro* c = current_coro;
double t = now();
//double t = now();
coroSwitcher( what, g_network->getCurrentTask(), current_coro );
Coro_switchTo_( swapCoro(main_coro), main_coro );
//if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting && now()!=t)

View File

@ -1191,7 +1191,6 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
TraceEvent("ServerInfo").detail("Size", server_info.size());
for (auto& server : server_info) {
const UID& uid = server.first;
TraceEvent("ServerInfo")
.detail("ServerInfoIndex", i++)
.detail("ServerID", server.first.toString())
@ -1314,7 +1313,6 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
// return number of added machine teams
int addBestMachineTeams(int targetMachineTeamsToBuild) {
int addedMachineTeams = 0;
int totalServerIndex = 0;
int machineTeamsToBuild = 0;
ASSERT(targetMachineTeamsToBuild >= 0);
@ -1827,7 +1825,6 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
ACTOR static Future<Void> buildTeams( DDTeamCollection* self ) {
state int desiredTeams;
int serverCount = 0;
int uniqueDataCenters = 0;
int uniqueMachines = 0;
std::set<Optional<Standalone<StringRef>>> machines;
@ -3805,7 +3802,7 @@ TEST_CASE("DataDistribution/AddTeamsBestOf/UseMachineID") {
Reference<IReplicationPolicy> policy = Reference<IReplicationPolicy>(new PolicyAcross(teamSize, "zoneid", Reference<IReplicationPolicy>(new PolicyOne())));
state DDTeamCollection* collection = testMachineTeamCollection(teamSize, policy, processSize);
int result = collection->addTeamsBestOf(30, desiredTeams, maxTeams);
collection->addTeamsBestOf(30, desiredTeams, maxTeams);
ASSERT(collection->sanityCheckTeams() == true);
@ -3831,7 +3828,7 @@ TEST_CASE("DataDistribution/AddTeamsBestOf/NotUseMachineID") {
}
collection->addBestMachineTeams(30); // Create machine teams to help debug
int result = collection->addTeamsBestOf(30, desiredTeams, maxTeams);
collection->addTeamsBestOf(30, desiredTeams, maxTeams);
collection->sanityCheckTeams(); // Server team may happen to be on the same machine team, although unlikely
if (collection) delete (collection);
@ -3916,7 +3913,7 @@ TEST_CASE("/DataDistribution/AddTeamsBestOf/NotEnoughServers") {
collection->addTeam(std::set<UID>({ UID(1, 0), UID(2, 0), UID(3, 0) }), true);
collection->addTeam(std::set<UID>({ UID(1, 0), UID(3, 0), UID(4, 0) }), true);
int resultMachineTeams = collection->addBestMachineTeams(10);
collection->addBestMachineTeams(10);
int result = collection->addTeamsBestOf(10, desiredTeams, maxTeams);
if (collection->machineTeams.size() != 10 || result != 8) {

View File

@ -761,7 +761,7 @@ struct RawCursor {
int fragmentedWaste = kv.key.size() * (fragments - 1);
// Total bytes used for the fragmented case
int fragmentedTotal = kv.expectedSize() + fragmentedWaste;
//int fragmentedTotal = kv.expectedSize() + fragmentedWaste;
// Calculate bytes saved by having extra key instances stored vs the original partial overflow page bytes.
int savings = unfragmentedWaste - fragmentedWaste;

View File

@ -57,7 +57,6 @@ struct LogRouterData {
int64_t messagesErased = 0;
while(!self->version_messages.empty() && self->version_messages.front().first == version) {
auto const& m = self->version_messages.front();
++messagesErased;
self->version_messages.pop_front();

View File

@ -1152,8 +1152,6 @@ ACTOR static Future<Void> transactionStarter(
vector<vector<GetReadVersionRequest>> start(2); // start[0] is transactions starting with !(flags&CAUSAL_READ_RISKY), start[1] is transactions starting with flags&CAUSAL_READ_RISKY
Optional<UID> debugID;
double leftToStart = 0;
double batchLeftToStart = 0;
int requestsToStart = 0;
while (!transactionQueue.empty() && requestsToStart < SERVER_KNOBS->START_TRANSACTION_MAX_REQUESTS_TO_START) {
auto& req = transactionQueue.top().first;
@ -1196,9 +1194,7 @@ ACTOR static Future<Void> transactionStarter(
.detail("NumSystemTransactionsStarted", systemTransactionsStarted[0] + systemTransactionsStarted[1])
.detail("NumNonSystemTransactionsStarted", transactionsStarted[0] + transactionsStarted[1] - systemTransactionsStarted[0] - systemTransactionsStarted[1])
.detail("TransactionBudget", transactionBudget)
.detail("BatchTransactionBudget", batchTransactionBudget)
.detail("LastLeftToStart", leftToStart)
.detail("LastBatchLeftToStart", batchLeftToStart);*/
.detail("BatchTransactionBudget", batchTransactionBudget);*/
transactionCount += transactionsStarted[0] + transactionsStarted[1];
batchTransactionCount += batchPriTransactionsStarted[0] + batchPriTransactionsStarted[1];

View File

@ -132,7 +132,6 @@ ACTOR Future<Void> resolveBatch(
vector<int> commitList;
vector<int> tooOldList;
double commitTime = now();
// Detect conflicts
double expire = now() + SERVER_KNOBS->SAMPLE_EXPIRATION_TIME;

View File

@ -2303,7 +2303,6 @@ TEST_CASE("/status/json/builderPerf") {
int iterations = 200;
printf("Generating and serializing random document\n");
double start = timer();
int64_t bytes = 0;
double generated = 0;

View File

@ -223,14 +223,13 @@ static int VFSAsyncFileSize(sqlite3_file *pFile, sqlite_int64 *pSize){
}
static int asyncLock(sqlite3_file *pFile, int eLock){
VFSAsyncFile *p = (VFSAsyncFile*)pFile;
//VFSAsyncFile *p = (VFSAsyncFile*)pFile;
//TraceEvent("FileLock").detail("File", p->filename).detail("Fd", p->file->debugFD()).detail("PrevLockLevel", p->lockLevel).detail("Op", eLock).detail("LockCount", *p->pLockCount);
return eLock == EXCLUSIVE_LOCK ? SQLITE_BUSY : SQLITE_OK;
}
static int asyncUnlock(sqlite3_file *pFile, int eLock) {
VFSAsyncFile *p = (VFSAsyncFile*)pFile;
assert( eLock <= SHARED_LOCK );
return SQLITE_OK;

View File

@ -220,7 +220,6 @@ static std::vector<BoundaryAndPage> buildPages(bool minimalBoundaries, StringRef
// If flush then write a page using records from start to i. It's guaranteed that pageUpperBound has been set above.
if(flush) {
end = i == iEnd; // i could have been moved above
int count = i - start;
debug_printf("Flushing page start=%d i=%d\nlower='%s'\nupper='%s'\n", start, i, pageLowerBound.toHexString(20).c_str(), pageUpperBound.toHexString(20).c_str());
ASSERT(pageLowerBound <= pageUpperBound);
for(int j = start; j < i; ++j) {

View File

@ -895,7 +895,7 @@ int main(int argc, char* argv[]) {
const char *targetKey = NULL;
uint64_t memLimit = 8LL << 30; // Nice to maintain the same default value for memLimit and SERVER_KNOBS->SERVER_MEM_LIMIT and SERVER_KNOBS->COMMIT_BATCHES_MEM_BYTES_HARD_LIMIT
uint64_t storageMemLimit = 1LL << 30;
bool buggifyEnabled = false, machineIdOverride = false, restarting = false;
bool buggifyEnabled = false, restarting = false;
Optional<Standalone<StringRef>> zoneId;
Optional<Standalone<StringRef>> dcId;
ProcessClass processClass = ProcessClass( ProcessClass::UnsetClass, ProcessClass::CommandLineSource );

View File

@ -703,7 +703,6 @@ ACTOR Future<DistributedTestResults> runWorkload( Database cx, std::vector< Test
for(int i= 0; i < workloads.size(); i++)
metricTasks.push_back( workloads[i].metrics.template getReply<vector<PerfMetric>>() );
wait( waitForAllReady( metricTasks ) );
int failedMetrics = 0;
for(int i = 0; i < metricTasks.size(); i++) {
if(!metricTasks[i].isError())
metricsResults.push_back( metricTasks[i].get() );

View File

@ -55,7 +55,6 @@ TEST_CASE("/flow/Deque/queue") {
TEST_CASE("/flow/Deque/max_size") {
Deque<uint8_t> q;
double begin = timer();
for (int i = 0; i < 10; i++)
q.push_back(i);
q.pop_front();

View File

@ -129,7 +129,7 @@ TEST_CASE("/flow/IndexedSet/erase 400k of 1M") {
is.testonly_assertBalanced();
int count = 0;
for (auto i : is) ++count;
for (auto& i : is) ++count;
ASSERT(count*3 == is.sumTo(is.end()));
@ -232,7 +232,7 @@ TEST_CASE("/flow/IndexedSet/random ops") {
auto before = timer();
is.erase(ib, ie);
auto erase_time = timer() - before;
//auto erase_time = timer() - before;
is.testonly_assertBalanced();
int count = 0, incount = 0;
@ -399,7 +399,7 @@ TEST_CASE("/flow/IndexedSet/all numbers") {
for (int i = 0; i<100000; i++) {
int b = g_random->randomInt(1, (int)allNumbers.size());
int64_t ntotal = int64_t(b)*(b - 1) / 2;
int64_t nmax = int64_t(b + 1)*(b) / 2;
//int64_t nmax = int64_t(b + 1)*(b) / 2;
int64_t n = ntotal;// + g_random->randomInt( 0, int(std::max<int64_t>(1<<30,nmax-ntotal)) );
auto ii = is.index(n);
int ib = ii != is.end() ? *ii : 1000000;

View File

@ -1499,7 +1499,6 @@ static void enableLargePages() {
}
static void *allocateInternal(size_t length, bool largePages) {
void *block = NULL;
#ifdef _WIN32
DWORD allocType = MEM_COMMIT|MEM_RESERVE;

View File

@ -386,7 +386,7 @@ size_t raw_backtrace(void** addresses, int maxStackDepth);
std::string get_backtrace();
std::string format_backtrace(void **addresses, int numAddresses);
}; // namespace platform
} // namespace platform
#ifdef __linux__
typedef struct {

View File

@ -277,7 +277,7 @@ inline _IncludeVersion IncludeVersion( uint64_t defaultVersion = currentProtocol
inline _AssumeVersion AssumeVersion( uint64_t version ) { return _AssumeVersion(version); }
inline _Unversioned Unversioned() { return _Unversioned(); }
static uint64_t size_limits[] = { 0ULL, 255ULL, 65535ULL, 16777215ULL, 4294967295ULL, 1099511627775ULL, 281474976710655ULL, 72057594037927935ULL, 18446744073709551615ULL };
//static uint64_t size_limits[] = { 0ULL, 255ULL, 65535ULL, 16777215ULL, 4294967295ULL, 1099511627775ULL, 281474976710655ULL, 72057594037927935ULL, 18446744073709551615ULL };
class BinaryWriter : NonCopyable {
public: