Use structured bindings in for loops
This commit is contained in:
parent
0d4e81e6b4
commit
5b2e88b187
|
@ -1006,8 +1006,7 @@ static void printAgentUsage(bool devhelp) {
|
|||
void printBackupContainerInfo() {
|
||||
printf(" Backup URL forms:\n\n");
|
||||
std::vector<std::string> formats = IBackupContainer::getURLFormats();
|
||||
for(auto &f : formats)
|
||||
printf(" %s\n", f.c_str());
|
||||
for (const auto& f : formats) printf(" %s\n", f.c_str());
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -152,17 +152,16 @@ public:
|
|||
|
||||
//Applies all enabled transaction options to the given transaction
|
||||
void apply(Reference<ReadYourWritesTransaction> tr) {
|
||||
for(auto itr = transactionOptions.options.begin(); itr != transactionOptions.options.end(); ++itr)
|
||||
tr->setOption(itr->first, itr->second.castTo<StringRef>());
|
||||
for (const auto& [name, value] : transactionOptions.options) {
|
||||
tr->setOption(name, value.castTo<StringRef>());
|
||||
}
|
||||
}
|
||||
|
||||
//Returns true if any options have been set
|
||||
bool hasAnyOptionsEnabled() {
|
||||
return !transactionOptions.options.empty();
|
||||
}
|
||||
bool hasAnyOptionsEnabled() const { return !transactionOptions.options.empty(); }
|
||||
|
||||
//Prints a list of enabled options, along with their parameters (if any)
|
||||
void print() {
|
||||
void print() const {
|
||||
bool found = false;
|
||||
found = found || transactionOptions.print();
|
||||
|
||||
|
@ -171,14 +170,10 @@ public:
|
|||
}
|
||||
|
||||
//Returns a vector of the names of all documented options
|
||||
std::vector<std::string> getValidOptions() {
|
||||
return transactionOptions.getValidOptions();
|
||||
}
|
||||
std::vector<std::string> getValidOptions() const { return transactionOptions.getValidOptions(); }
|
||||
|
||||
//Prints the help string obtained by invoking `help options'
|
||||
void printHelpString() {
|
||||
transactionOptions.printHelpString();
|
||||
}
|
||||
void printHelpString() const { transactionOptions.printHelpString(); }
|
||||
|
||||
private:
|
||||
//Sets a transaction option. If intrans == true, then this option is also applied to the passed in transaction.
|
||||
|
@ -219,7 +214,7 @@ private:
|
|||
}
|
||||
|
||||
//Prints a list of all enabled options in this group
|
||||
bool print() {
|
||||
bool print() const {
|
||||
bool found = false;
|
||||
|
||||
for(auto itr = legalOptions.begin(); itr != legalOptions.end(); ++itr) {
|
||||
|
@ -238,7 +233,7 @@ private:
|
|||
}
|
||||
|
||||
//Returns true if the specified option is documented
|
||||
bool isDocumented(typename T::Option option) {
|
||||
bool isDocumented(typename T::Option option) const {
|
||||
FDBOptionInfo info = T::optionInfo.getMustExist(option);
|
||||
|
||||
std::string deprecatedStr = "Deprecated";
|
||||
|
@ -246,7 +241,7 @@ private:
|
|||
}
|
||||
|
||||
//Returns a vector of the names of all documented options
|
||||
std::vector<std::string> getValidOptions() {
|
||||
std::vector<std::string> getValidOptions() const {
|
||||
std::vector<std::string> ret;
|
||||
|
||||
for (auto itr = legalOptions.begin(); itr != legalOptions.end(); ++itr)
|
||||
|
@ -258,7 +253,7 @@ private:
|
|||
|
||||
//Prints a help string for each option in this group. Any options with no comment
|
||||
//are excluded from this help string. Lines are wrapped to 80 characters.
|
||||
void printHelpString() {
|
||||
void printHelpString() const {
|
||||
for(auto itr = legalOptions.begin(); itr != legalOptions.end(); ++itr) {
|
||||
if(isDocumented(itr->second)) {
|
||||
FDBOptionInfo info = T::optionInfo.getMustExist(itr->second);
|
||||
|
@ -629,12 +624,12 @@ void printVersion() {
|
|||
|
||||
void printHelpOverview() {
|
||||
printf("\nList of commands:\n\n");
|
||||
for (auto i = helpMap.begin(); i != helpMap.end(); ++i)
|
||||
if (i->second.short_desc.size())
|
||||
printf(" %s:\n %s\n", i->first.c_str(), i->second.short_desc.c_str());
|
||||
printf("\nFor information on a specific command, type `help <command>'.");
|
||||
printf("\nFor information on escaping keys and values, type `help escaping'.");
|
||||
printf("\nFor information on available options, type `help options'.\n\n");
|
||||
for (const auto& [command, help] : helpMap) {
|
||||
if (help.short_desc.size()) printf(" %s:\n %s\n", command.c_str(), help.short_desc.c_str());
|
||||
printf("\nFor information on a specific command, type `help <command>'.");
|
||||
printf("\nFor information on escaping keys and values, type `help escaping'.");
|
||||
printf("\nFor information on available options, type `help options'.\n\n");
|
||||
}
|
||||
}
|
||||
|
||||
void printHelp(StringRef command) {
|
||||
|
@ -2005,16 +2000,18 @@ ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDa
|
|||
configString = "new";
|
||||
}
|
||||
|
||||
for(auto kv : configJSON) {
|
||||
for (const auto& [name, value] : configJSON) {
|
||||
if(!configString.empty()) {
|
||||
configString += " ";
|
||||
}
|
||||
if( kv.second.type() == json_spirit::int_type ) {
|
||||
configString += kv.first + ":=" + format("%d", kv.second.get_int());
|
||||
} else if( kv.second.type() == json_spirit::str_type ) {
|
||||
configString += kv.second.get_str();
|
||||
} else if( kv.second.type() == json_spirit::array_type ) {
|
||||
configString += kv.first + "=" + json_spirit::write_string(json_spirit::mValue(kv.second.get_array()), json_spirit::Output_options::none);
|
||||
if (value.type() == json_spirit::int_type) {
|
||||
configString += name + ":=" + format("%d", value.get_int());
|
||||
} else if (value.type() == json_spirit::str_type) {
|
||||
configString += value.get_str();
|
||||
} else if (value.type() == json_spirit::array_type) {
|
||||
configString +=
|
||||
name + "=" +
|
||||
json_spirit::write_string(json_spirit::mValue(value.get_array()), json_spirit::Output_options::none);
|
||||
} else {
|
||||
printUsage(LiteralStringRef("fileconfigure"));
|
||||
return true;
|
||||
|
@ -2229,8 +2226,7 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
|
|||
}
|
||||
|
||||
printf("There are currently %zu servers or processes being excluded from the database:\n", excl.size());
|
||||
for(auto& e : excl)
|
||||
printf(" %s\n", e.toString().c_str());
|
||||
for (const auto& e : excl) printf(" %s\n", e.toString().c_str());
|
||||
|
||||
printf("To find out whether it is safe to remove one or more of these\n"
|
||||
"servers from the cluster, type `exclude <addresses>'.\n"
|
||||
|
@ -2435,7 +2431,7 @@ ACTOR Future<bool> exclude( Database db, std::vector<StringRef> tokens, Referenc
|
|||
|
||||
bool foundCoordinator = false;
|
||||
auto ccs = ClusterConnectionFile( ccf->getFilename() ).getConnectionString();
|
||||
for( auto& c : ccs.coordinators()) {
|
||||
for (const auto& c : ccs.coordinators()) {
|
||||
if (std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip, c.port)) ||
|
||||
std::count(exclusionVector.begin(), exclusionVector.end(), AddressExclusion(c.ip))) {
|
||||
printf("WARNING: %s is a coordinator!\n", c.toString().c_str());
|
||||
|
@ -2483,7 +2479,7 @@ ACTOR Future<bool> setClass( Database db, std::vector<StringRef> tokens ) {
|
|||
std::sort(workers.begin(), workers.end(), ProcessData::sort_by_address());
|
||||
|
||||
printf("There are currently %zu processes in the database:\n", workers.size());
|
||||
for(auto& w : workers)
|
||||
for (const auto& w : workers)
|
||||
printf(" %s: %s (%s)\n", w.address.toString().c_str(), w.processClass.toString().c_str(), w.processClass.sourceString().c_str());
|
||||
return false;
|
||||
}
|
||||
|
@ -2841,22 +2837,25 @@ struct CLIOptions {
|
|||
ClientKnobs* clientKnobs = new ClientKnobs;
|
||||
CLIENT_KNOBS = clientKnobs;
|
||||
|
||||
for(auto k=knobs.begin(); k!=knobs.end(); ++k) {
|
||||
for (const auto& [knob, value] : knobs) {
|
||||
try {
|
||||
if (!flowKnobs->setKnob( k->first, k->second ) &&
|
||||
!clientKnobs->setKnob( k->first, k->second ))
|
||||
{
|
||||
fprintf(stderr, "WARNING: Unrecognized knob option '%s'\n", k->first.c_str());
|
||||
TraceEvent(SevWarnAlways, "UnrecognizedKnobOption").detail("Knob", printable(k->first));
|
||||
if (!flowKnobs->setKnob(knob, value) && !clientKnobs->setKnob(knob, value)) {
|
||||
fprintf(stderr, "WARNING: Unrecognized knob option '%s'\n", knob.c_str());
|
||||
TraceEvent(SevWarnAlways, "UnrecognizedKnobOption").detail("Knob", printable(knob));
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_invalid_option_value) {
|
||||
fprintf(stderr, "WARNING: Invalid value '%s' for knob option '%s'\n", k->second.c_str(), k->first.c_str());
|
||||
TraceEvent(SevWarnAlways, "InvalidKnobValue").detail("Knob", printable(k->first)).detail("Value", printable(k->second));
|
||||
fprintf(stderr, "WARNING: Invalid value '%s' for knob option '%s'\n", value.c_str(), knob.c_str());
|
||||
TraceEvent(SevWarnAlways, "InvalidKnobValue")
|
||||
.detail("Knob", printable(knob))
|
||||
.detail("Value", printable(value));
|
||||
}
|
||||
else {
|
||||
fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", k->first.c_str(), e.what());
|
||||
TraceEvent(SevError, "FailedToSetKnob").detail("Knob", printable(k->first)).detail("Value", printable(k->second)).error(e);
|
||||
fprintf(stderr, "ERROR: Failed to set knob option '%s': %s\n", knob.c_str(), e.what());
|
||||
TraceEvent(SevError, "FailedToSetKnob")
|
||||
.detail("Knob", printable(knob))
|
||||
.detail("Value", printable(value))
|
||||
.error(e);
|
||||
exit_code = FDB_EXIT_ERROR;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -159,8 +159,7 @@ public:
|
|||
state int i;
|
||||
|
||||
// Validate each filename, update version range
|
||||
for (i = 0; i < fileNames.size(); ++i) {
|
||||
auto const& f = fileNames[i];
|
||||
for (const auto& f : fileNames) {
|
||||
if (pathToRangeFile(rf, f, 0)) {
|
||||
fileArray.push_back(f);
|
||||
if (rf.version < minVer) minVer = rf.version;
|
||||
|
|
|
@ -72,7 +72,7 @@ ACTOR static Future<BackupContainerFileSystem::FilesAndSizesT> listFiles_impl(st
|
|||
[](std::string const& f) { return StringRef(f).endsWith(LiteralStringRef(".lnk")); }),
|
||||
files.end());
|
||||
|
||||
for (auto& f : files) {
|
||||
for (const auto& f : files) {
|
||||
// Hide .part or .temp files.
|
||||
StringRef s(f);
|
||||
if (!s.endsWith(LiteralStringRef(".part")) && !s.endsWith(LiteralStringRef(".temp")))
|
||||
|
@ -147,7 +147,7 @@ Future<std::vector<std::string>> BackupContainerLocalDirectory::listURLs(const s
|
|||
std::vector<std::string> dirs = platform::listDirectories(path);
|
||||
std::vector<std::string> results;
|
||||
|
||||
for (auto& r : dirs) {
|
||||
for (const auto& r : dirs) {
|
||||
if (r == "." || r == "..") continue;
|
||||
results.push_back(std::string("file://") + joinPath(path, r));
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ public:
|
|||
state std::string basePath = INDEXFOLDER + '/';
|
||||
S3BlobStoreEndpoint::ListResult contents = wait(bstore->listObjects(bucket, basePath));
|
||||
std::vector<std::string> results;
|
||||
for (auto& f : contents.objects) {
|
||||
for (const auto& f : contents.objects) {
|
||||
results.push_back(
|
||||
bstore->getResourceURL(f.name.substr(basePath.size()), format("bucket=%s", bucket.c_str())));
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ public:
|
|||
state S3BlobStoreEndpoint::ListResult result = wait(bc->m_bstore->listObjects(
|
||||
bc->m_bucket, bc->dataPath(path), '/', std::numeric_limits<int>::max(), rawPathFilter));
|
||||
BackupContainerFileSystem::FilesAndSizesT files;
|
||||
for (auto& o : result.objects) {
|
||||
for (const auto& o : result.objects) {
|
||||
ASSERT(o.name.size() >= prefixTrim);
|
||||
files.push_back({ o.name.substr(prefixTrim), o.size });
|
||||
}
|
||||
|
@ -135,15 +135,13 @@ BackupContainerS3BlobStore::BackupContainerS3BlobStore(Reference<S3BlobStoreEndp
|
|||
: m_bstore(bstore), m_name(name), m_bucket("FDB_BACKUPS_V2") {
|
||||
|
||||
// Currently only one parameter is supported, "bucket"
|
||||
for (auto& kv : params) {
|
||||
if (kv.first == "bucket") {
|
||||
m_bucket = kv.second;
|
||||
for (const auto& [name, value] : params) {
|
||||
if (name == "bucket") {
|
||||
m_bucket = value;
|
||||
continue;
|
||||
}
|
||||
TraceEvent(SevWarn, "BackupContainerS3BlobStoreInvalidParameter")
|
||||
.detail("Name", kv.first)
|
||||
.detail("Value", kv.second);
|
||||
IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", kv.first.c_str());
|
||||
TraceEvent(SevWarn, "BackupContainerS3BlobStoreInvalidParameter").detail("Name", name).detail("Value", value);
|
||||
IBackupContainer::lastOpenError = format("Unknown URL parameter: '%s'", name.c_str());
|
||||
throw backup_invalid_url();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -233,7 +233,7 @@ Reference<BlobStoreEndpoint> BlobStoreEndpoint::fromString(std::string const &ur
|
|||
}
|
||||
}
|
||||
|
||||
std::string BlobStoreEndpoint::getResourceURL(std::string resource, std::string params) {
|
||||
std::string BlobStoreEndpoint::getResourceURL(std::string resource, std::string params) const {
|
||||
std::string hostPort = host;
|
||||
if(!service.empty()) {
|
||||
hostPort.append(":");
|
||||
|
@ -256,14 +256,14 @@ std::string BlobStoreEndpoint::getResourceURL(std::string resource, std::string
|
|||
params.append(knobParams);
|
||||
}
|
||||
|
||||
for(auto &kv : extraHeaders) {
|
||||
for (const auto& [k, v] : extraHeaders) {
|
||||
if(!params.empty()) {
|
||||
params.append("&");
|
||||
}
|
||||
params.append("header=");
|
||||
params.append(HTTP::urlEncode(kv.first));
|
||||
params.append(HTTP::urlEncode(k));
|
||||
params.append(":");
|
||||
params.append(HTTP::urlEncode(kv.second));
|
||||
params.append(HTTP::urlEncode(v));
|
||||
}
|
||||
|
||||
if(!params.empty())
|
||||
|
|
|
@ -1104,7 +1104,7 @@ bool DatabaseContext::getCachedLocations( const KeyRangeRef& range, vector<std::
|
|||
Reference<LocationInfo> DatabaseContext::setCachedLocation( const KeyRangeRef& keys, const vector<StorageServerInterface>& servers ) {
|
||||
vector<Reference<ReferencedInterface<StorageServerInterface>>> serverRefs;
|
||||
serverRefs.reserve(servers.size());
|
||||
for(auto& interf : servers) {
|
||||
for (const auto& interf : servers) {
|
||||
serverRefs.push_back( StorageServerInfo::getInterface( this, interf, clientLocality ) );
|
||||
}
|
||||
|
||||
|
@ -1850,17 +1850,17 @@ Future< vector< pair<KeyRange,Reference<LocationInfo>> > > getKeyRangeLocations(
|
|||
}
|
||||
|
||||
bool foundFailed = false;
|
||||
for(auto& it : locations) {
|
||||
for (const auto& [range, locInfo] : locations) {
|
||||
bool onlyEndpointFailed = false;
|
||||
for(int i = 0; i < it.second->size(); i++) {
|
||||
if( IFailureMonitor::failureMonitor().onlyEndpointFailed(it.second->get(i, member).getEndpoint()) ) {
|
||||
for (int i = 0; i < locInfo->size(); i++) {
|
||||
if (IFailureMonitor::failureMonitor().onlyEndpointFailed(locInfo->get(i, member).getEndpoint())) {
|
||||
onlyEndpointFailed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if( onlyEndpointFailed ) {
|
||||
cx->invalidateCache( it.first.begin );
|
||||
cx->invalidateCache(range.begin);
|
||||
foundFailed = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -249,7 +249,7 @@ Reference<S3BlobStoreEndpoint> S3BlobStoreEndpoint::fromString(std::string const
|
|||
}
|
||||
}
|
||||
|
||||
std::string S3BlobStoreEndpoint::getResourceURL(std::string resource, std::string params) {
|
||||
std::string S3BlobStoreEndpoint::getResourceURL(std::string resource, std::string params) const {
|
||||
std::string hostPort = host;
|
||||
if (!service.empty()) {
|
||||
hostPort.append(":");
|
||||
|
@ -271,14 +271,14 @@ std::string S3BlobStoreEndpoint::getResourceURL(std::string resource, std::strin
|
|||
params.append(knobParams);
|
||||
}
|
||||
|
||||
for (auto& kv : extraHeaders) {
|
||||
for (const auto& [k, v] : extraHeaders) {
|
||||
if (!params.empty()) {
|
||||
params.append("&");
|
||||
}
|
||||
params.append("header=");
|
||||
params.append(HTTP::urlEncode(kv.first));
|
||||
params.append(HTTP::urlEncode(k));
|
||||
params.append(":");
|
||||
params.append(HTTP::urlEncode(kv.second));
|
||||
params.append(HTTP::urlEncode(v));
|
||||
}
|
||||
|
||||
if (!params.empty()) r.append("?").append(params);
|
||||
|
@ -563,12 +563,12 @@ ACTOR Future<Reference<HTTP::Response>> doRequest_impl(Reference<S3BlobStoreEndp
|
|||
headers["Accept"] = "application/xml";
|
||||
|
||||
// Merge extraHeaders into headers
|
||||
for (auto& kv : bstore->extraHeaders) {
|
||||
std::string& fieldValue = headers[kv.first];
|
||||
for (const auto& [k, v] : bstore->extraHeaders) {
|
||||
std::string& fieldValue = headers[k];
|
||||
if (!fieldValue.empty()) {
|
||||
fieldValue.append(",");
|
||||
}
|
||||
fieldValue.append(kv.second);
|
||||
fieldValue.append(v);
|
||||
}
|
||||
|
||||
// For requests with content to upload, the request timeout should be at least twice the amount of time
|
||||
|
|
|
@ -123,7 +123,7 @@ public:
|
|||
|
||||
// Get a normalized version of this URL with the given resource and any non-default BlobKnob values as URL
|
||||
// parameters in addition to the passed params string
|
||||
std::string getResourceURL(std::string resource, std::string params);
|
||||
std::string getResourceURL(std::string resource, std::string params) const;
|
||||
|
||||
struct ReusableConnection {
|
||||
Reference<IConnection> conn;
|
||||
|
|
|
@ -70,7 +70,7 @@ public:
|
|||
void delref() { ReferenceCounted<BackupProgress>::delref(); }
|
||||
|
||||
private:
|
||||
std::set<Tag> enumerateLogRouterTags(int logRouterTags) {
|
||||
std::set<Tag> enumerateLogRouterTags(int logRouterTags) const {
|
||||
std::set<Tag> tags;
|
||||
for (int i = 0; i < logRouterTags; i++) {
|
||||
tags.insert(Tag(tagLocalityLogRouter, i));
|
||||
|
|
|
@ -701,15 +701,15 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
// The following kills a reference cycle between the teamTracker actor and the TCTeamInfo that both holds and is
|
||||
// held by the actor It also ensures that the trackers are done fiddling with healthyTeamCount before we free
|
||||
// this
|
||||
for(int i=0; i < teams.size(); i++) {
|
||||
teams[i]->tracker.cancel();
|
||||
for (auto& team : teams) {
|
||||
team->tracker.cancel();
|
||||
}
|
||||
// The commented TraceEvent log is useful in detecting what is running during the destruction
|
||||
// TraceEvent("DDTeamCollectionDestructed", distributorId)
|
||||
// .detail("Primary", primary)
|
||||
// .detail("TeamTrackerDestroyed", teams.size());
|
||||
for(int i=0; i < badTeams.size(); i++) {
|
||||
badTeams[i]->tracker.cancel();
|
||||
for (auto& badTeam : badTeams) {
|
||||
badTeam->tracker.cancel();
|
||||
}
|
||||
// TraceEvent("DDTeamCollectionDestructed", distributorId)
|
||||
// .detail("Primary", primary)
|
||||
|
@ -717,9 +717,9 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
// The following makes sure that, even if a reference to a team is held in the DD Queue, the tracker will be
|
||||
// stopped
|
||||
// before the server_status map to which it has a pointer, is destroyed.
|
||||
for(auto it = server_info.begin(); it != server_info.end(); ++it) {
|
||||
it->second->tracker.cancel();
|
||||
it->second->collection = nullptr;
|
||||
for (auto& [_, info] : server_info) {
|
||||
info->tracker.cancel();
|
||||
info->collection = nullptr;
|
||||
}
|
||||
// TraceEvent("DDTeamCollectionDestructed", distributorId)
|
||||
// .detail("Primary", primary)
|
||||
|
@ -799,9 +799,9 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
self->lastMedianAvailableSpaceUpdate = now();
|
||||
std::vector<double> teamAvailableSpace;
|
||||
teamAvailableSpace.reserve(self->teams.size());
|
||||
for( int i = 0; i < self->teams.size(); i++ ) {
|
||||
if (self->teams[i]->isHealthy()) {
|
||||
teamAvailableSpace.push_back(self->teams[i]->getMinAvailableSpaceRatio());
|
||||
for (const auto& team : self->teams) {
|
||||
if (team->isHealthy()) {
|
||||
teamAvailableSpace.push_back(team->getMinAvailableSpaceRatio());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1135,14 +1135,14 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
double varTeams = 0;
|
||||
|
||||
std::map<Optional<Standalone<StringRef>>, int> machineTeams;
|
||||
for(auto s = server_info.begin(); s != server_info.end(); ++s) {
|
||||
if(!server_status.get(s->first).isUnhealthy()) {
|
||||
int stc = s->second->teams.size();
|
||||
for (const auto& [id, info] : server_info) {
|
||||
if (!server_status.get(id).isUnhealthy()) {
|
||||
int stc = info->teams.size();
|
||||
minTeams = std::min(minTeams, stc);
|
||||
maxTeams = std::max(maxTeams, stc);
|
||||
varTeams += (stc - teamsPerServer)*(stc - teamsPerServer);
|
||||
// Use zoneId as server's machine id
|
||||
machineTeams[s->second->lastKnownInterface.locality.zoneId()] += stc;
|
||||
machineTeams[info->lastKnownInterface.locality.zoneId()] += stc;
|
||||
}
|
||||
}
|
||||
varTeams /= teamsPerServer*teamsPerServer;
|
||||
|
@ -1167,14 +1167,15 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
.detail("MachineMaxTeams", maxMachineTeams);
|
||||
}
|
||||
|
||||
int overlappingMembers( vector<UID> &team ) {
|
||||
int overlappingMembers(const vector<UID>& team) const {
|
||||
if (team.empty()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
int maxMatchingServers = 0;
|
||||
UID& serverID = team[0];
|
||||
for (auto& usedTeam : server_info[serverID]->teams) {
|
||||
const UID& serverID = team[0];
|
||||
const auto& usedTeams = server_info.find(serverID)->second->teams;
|
||||
for (const auto& usedTeam : usedTeams) {
|
||||
auto used = usedTeam->getServerIDs();
|
||||
int teamIdx = 0;
|
||||
int usedIdx = 0;
|
||||
|
|
|
@ -319,7 +319,7 @@ ACTOR Future<Void> readHotDetector(DataDistributionTracker* self) {
|
|||
loop {
|
||||
try {
|
||||
Standalone<VectorRef<ReadHotRangeWithMetrics>> readHotRanges = wait(tr.getReadHotRanges(keys));
|
||||
for (auto& keyRange : readHotRanges) {
|
||||
for (const auto& keyRange : readHotRanges) {
|
||||
TraceEvent("ReadHotRangeLog")
|
||||
.detail("ReadDensity", keyRange.density)
|
||||
.detail("ReadBandwidth", keyRange.readBandwidth)
|
||||
|
@ -394,12 +394,14 @@ ACTOR Future<Void> changeSizes( DataDistributionTracker* self, KeyRange keys, in
|
|||
wait( yield(TaskPriority::DataDistribution) );
|
||||
|
||||
int64_t newShardsStartingSize = 0;
|
||||
for ( int i = 0; i < sizes.size(); i++ )
|
||||
newShardsStartingSize += sizes[i].get();
|
||||
for (const auto& size : sizes) {
|
||||
newShardsStartingSize += size.get();
|
||||
}
|
||||
|
||||
int64_t newSystemShardsStartingSize = 0;
|
||||
for ( int i = 0; i < systemSizes.size(); i++ )
|
||||
newSystemShardsStartingSize += systemSizes[i].get();
|
||||
for (const auto& systemSize : systemSizes) {
|
||||
newSystemShardsStartingSize += systemSize.get();
|
||||
}
|
||||
|
||||
int64_t totalSizeEstimate = self->dbSizeEstimate->get();
|
||||
/*TraceEvent("TrackerChangeSizes")
|
||||
|
|
|
@ -182,7 +182,7 @@ void commitMessages( LogRouterData* self, Version version, const std::vector<Tag
|
|||
}
|
||||
|
||||
int msgSize = 0;
|
||||
for(auto& i : taggedMessages) {
|
||||
for (const auto& i : taggedMessages) {
|
||||
msgSize += i.message.size();
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ void commitMessages( LogRouterData* self, Version version, const std::vector<Tag
|
|||
|
||||
block.pop_front(block.size());
|
||||
|
||||
for(auto& msg : taggedMessages) {
|
||||
for (const auto& msg : taggedMessages) {
|
||||
if(msg.message.size() > block.capacity() - block.size()) {
|
||||
self->messageBlocks.emplace_back(version, block);
|
||||
block = Standalone<VectorRef<uint8_t>>();
|
||||
|
@ -207,7 +207,7 @@ void commitMessages( LogRouterData* self, Version version, const std::vector<Tag
|
|||
}
|
||||
|
||||
block.append(block.arena(), msg.message.begin(), msg.message.size());
|
||||
for(auto& tag : msg.tags) {
|
||||
for (const auto& tag : msg.tags) {
|
||||
auto tagData = self->getTagData(tag);
|
||||
if(!tagData) {
|
||||
tagData = self->createTagData(tag, 0, 0);
|
||||
|
|
|
@ -601,7 +601,7 @@ struct ResolutionRequestBuilder {
|
|||
ASSERT( transactionNumberInBatch >= 0 && transactionNumberInBatch < 32768 );
|
||||
|
||||
bool isTXNStateTransaction = false;
|
||||
for (auto & m : trIn.mutations) {
|
||||
for (const auto& m : trIn.mutations) {
|
||||
if (m.type == MutationRef::SetVersionstampedKey) {
|
||||
transformVersionstampMutation( m, &MutationRef::param1, requests[0].version, transactionNumberInBatch );
|
||||
trIn.write_conflict_ranges.push_back( requests[0].arena, singleKeyRange( m.param1, requests[0].arena ) );
|
||||
|
@ -767,16 +767,16 @@ bool isWhitelisted(const vector<Standalone<StringRef>>& binPathVec, StringRef bi
|
|||
return std::find(binPathVec.begin(), binPathVec.end(), binPath) != binPathVec.end();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> addBackupMutations(ProxyCommitData* self, std::map<Key, MutationListRef>* logRangeMutations,
|
||||
LogPushData* toCommit, Version commitVersion, double* computeDuration, double* computeStart) {
|
||||
state std::map<Key, MutationListRef>::iterator logRangeMutation = logRangeMutations->begin();
|
||||
ACTOR Future<Void> addBackupMutations(ProxyCommitData* self, std::map<Key, MutationListRef> const* logRangeMutations,
|
||||
LogPushData* toCommit, Version commitVersion, double* computeDuration,
|
||||
double* computeStart) {
|
||||
state std::map<Key, MutationListRef>::const_iterator logRangeMutation = logRangeMutations->begin();
|
||||
state int32_t version = commitVersion / CLIENT_KNOBS->LOG_RANGE_BLOCK_SIZE;
|
||||
state int yieldBytes = 0;
|
||||
state BinaryWriter valueWriter(Unversioned());
|
||||
|
||||
// Serialize the log range mutations within the map
|
||||
for (; logRangeMutation != logRangeMutations->end(); ++logRangeMutation)
|
||||
{
|
||||
for (; logRangeMutation != logRangeMutations->cend(); ++logRangeMutation) {
|
||||
//FIXME: this is re-implementing the serialize function of MutationListRef in order to have a yield
|
||||
valueWriter = BinaryWriter(IncludeVersion(ProtocolVersion::withBackupMutations()));
|
||||
valueWriter << logRangeMutation->second.totalSize();
|
||||
|
|
|
@ -114,7 +114,7 @@ private:
|
|||
m_is_inline = isInline;
|
||||
}
|
||||
|
||||
StringRef getKey() {
|
||||
StringRef getKey() const {
|
||||
if (m_is_inline) {
|
||||
return StringRef(&key.inlineData[0], m_inline_length);
|
||||
} else {
|
||||
|
@ -122,9 +122,9 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
inline int getKeySize() { return m_is_inline ? m_inline_length : key.data.size(); }
|
||||
inline int getKeySize() const { return m_is_inline ? m_inline_length : key.data.size(); }
|
||||
|
||||
inline int16_t getFirstByte() {
|
||||
inline int16_t getFirstByte() const {
|
||||
if (m_is_inline) {
|
||||
return m_inline_length == 0 ? LEAF_BYTE : key.inlineData[0];
|
||||
} else {
|
||||
|
@ -132,7 +132,7 @@ private:
|
|||
}
|
||||
}
|
||||
|
||||
inline size_type getArenaSize() { return m_is_inline ? 0 : arena.getSize(); }
|
||||
inline size_type getArenaSize() const { return m_is_inline ? 0 : arena.getSize(); }
|
||||
|
||||
uint32_t m_is_leaf : 1;
|
||||
uint32_t m_is_fixed : 1; // if true, then we have fixed number of children (3)
|
||||
|
|
|
@ -729,8 +729,7 @@ ACTOR Future<Void> monitorServerListChange(
|
|||
self->lastSSListFetchedTimestamp = now();
|
||||
|
||||
std::map<UID, StorageServerInterface> newServers;
|
||||
for (int i = 0; i < results.size(); i++) {
|
||||
const StorageServerInterface& ssi = results[i].first;
|
||||
for (const auto& [ssi, _] : results) {
|
||||
const UID serverId = ssi.id();
|
||||
newServers[serverId] = ssi;
|
||||
|
||||
|
|
Loading…
Reference in New Issue