fix teamSorter usage bug
This commit is contained in:
parent
e6893ba0b6
commit
b811a62b65
|
@ -821,9 +821,9 @@ struct ChangeFeedVersionUpdateRequest {
|
||||||
|
|
||||||
struct GetStorageMetricsReply {
|
struct GetStorageMetricsReply {
|
||||||
constexpr static FileIdentifier file_identifier = 15491478;
|
constexpr static FileIdentifier file_identifier = 15491478;
|
||||||
StorageMetrics load;
|
StorageMetrics load; // sum of key-value metrics (logical bytes)
|
||||||
StorageMetrics available;
|
StorageMetrics available; // physical bytes
|
||||||
StorageMetrics capacity;
|
StorageMetrics capacity; // physical bytes
|
||||||
double bytesInputRate;
|
double bytesInputRate;
|
||||||
int64_t versionLag;
|
int64_t versionLag;
|
||||||
double lastUpdate;
|
double lastUpdate;
|
||||||
|
|
|
@ -250,8 +250,8 @@ public:
|
||||||
self->shardsAffectedByTeamFailure->hasShards(ShardsAffectedByTeamFailure::Team(
|
self->shardsAffectedByTeamFailure->hasShards(ShardsAffectedByTeamFailure::Team(
|
||||||
self->teams[currentIndex]->getServerIDs(), self->primary))) &&
|
self->teams[currentIndex]->getServerIDs(), self->primary))) &&
|
||||||
// sort conditions
|
// sort conditions
|
||||||
(!bestOption.present() || req.lessCompare(bestOption.get(), self->teams[currentIndex]) ||
|
(!bestOption.present() ||
|
||||||
!req.lessCompareByLoad(loadBytes, bestLoadBytes))) {
|
req.lessCompare(bestOption.get(), self->teams[currentIndex], bestLoadBytes, loadBytes))) {
|
||||||
bestLoadBytes = loadBytes;
|
bestLoadBytes = loadBytes;
|
||||||
bestOption = self->teams[currentIndex];
|
bestOption = self->teams[currentIndex];
|
||||||
bestIndex = currentIndex;
|
bestIndex = currentIndex;
|
||||||
|
@ -299,8 +299,8 @@ public:
|
||||||
|
|
||||||
for (int i = 0; i < randomTeams.size(); i++) {
|
for (int i = 0; i < randomTeams.size(); i++) {
|
||||||
int64_t loadBytes = randomTeams[i]->getLoadBytes(true, req.inflightPenalty);
|
int64_t loadBytes = randomTeams[i]->getLoadBytes(true, req.inflightPenalty);
|
||||||
if (!bestOption.present() || req.lessCompare(bestOption.get(), randomTeams[i]) ||
|
if (!bestOption.present() ||
|
||||||
!req.lessCompareByLoad(loadBytes, bestLoadBytes)) {
|
req.lessCompare(bestOption.get(), randomTeams[i], bestLoadBytes, loadBytes)) {
|
||||||
bestLoadBytes = loadBytes;
|
bestLoadBytes = loadBytes;
|
||||||
bestOption = randomTeams[i];
|
bestOption = randomTeams[i];
|
||||||
}
|
}
|
||||||
|
|
|
@ -106,16 +106,17 @@ struct GetTeamRequest {
|
||||||
teamMustHaveShards(teamMustHaveShards), inflightPenalty(inflightPenalty) {}
|
teamMustHaveShards(teamMustHaveShards), inflightPenalty(inflightPenalty) {}
|
||||||
|
|
||||||
// return true if a.score < b.score
|
// return true if a.score < b.score
|
||||||
[[nodiscard]] bool lessCompare(TeamRef a, TeamRef b) const {
|
[[nodiscard]] bool lessCompare(TeamRef a, TeamRef b, int64_t aLoadBytes, int64_t bLoadBytes) const {
|
||||||
if (teamSorter) {
|
if (teamSorter) {
|
||||||
return teamSorter(a, b);
|
return teamSorter(a, b);
|
||||||
}
|
}
|
||||||
return false;
|
return lessCompareByLoad(aLoadBytes, bLoadBytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
// return true if scoreWithLoadBytes < bestScoreWithBestLoadBytes
|
// return true if preferHigherUtil && aLoadBytes <= bLoadBytes (higher load bytes has larger score)
|
||||||
bool lessCompareByLoad(int64_t loadBytes, int64_t bestLoadBytes) const {
|
// or preferLowerUtil && aLoadBytes > bLoadBytes
|
||||||
bool lessLoad = loadBytes < bestLoadBytes;
|
bool lessCompareByLoad(int64_t aLoadBytes, int64_t bLoadBytes) const {
|
||||||
|
bool lessLoad = aLoadBytes <= bLoadBytes;
|
||||||
return preferLowerUtilization ? !lessLoad : lessLoad;
|
return preferLowerUtilization ? !lessLoad : lessLoad;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -86,9 +86,10 @@ struct RelocateData {
|
||||||
|
|
||||||
bool operator==(const RelocateData& rhs) const {
|
bool operator==(const RelocateData& rhs) const {
|
||||||
return priority == rhs.priority && boundaryPriority == rhs.boundaryPriority &&
|
return priority == rhs.priority && boundaryPriority == rhs.boundaryPriority &&
|
||||||
healthPriority == rhs.healthPriority && keys == rhs.keys && startTime == rhs.startTime &&
|
healthPriority == rhs.healthPriority && reason == rhs.reason && keys == rhs.keys &&
|
||||||
workFactor == rhs.workFactor && src == rhs.src && completeSources == rhs.completeSources &&
|
startTime == rhs.startTime && workFactor == rhs.workFactor && src == rhs.src &&
|
||||||
wantsNewServers == rhs.wantsNewServers && randomId == rhs.randomId;
|
completeSources == rhs.completeSources && wantsNewServers == rhs.wantsNewServers &&
|
||||||
|
randomId == rhs.randomId;
|
||||||
}
|
}
|
||||||
bool operator!=(const RelocateData& rhs) const { return !(*this == rhs); }
|
bool operator!=(const RelocateData& rhs) const { return !(*this == rhs); }
|
||||||
};
|
};
|
||||||
|
@ -1040,9 +1041,9 @@ ACTOR Future<Void> dataDistributionRelocator(DDQueueData* self, RelocateData rd,
|
||||||
req.src = rd.src;
|
req.src = rd.src;
|
||||||
req.completeSources = rd.completeSources;
|
req.completeSources = rd.completeSources;
|
||||||
|
|
||||||
// if (rd.reason == RelocateReason::REBALANCE_READ) {
|
if (rd.reason == RelocateReason::REBALANCE_READ) {
|
||||||
// req.teamSorter = greaterReadLoad;
|
req.teamSorter = greaterReadLoad;
|
||||||
// }
|
}
|
||||||
// bestTeam.second = false if the bestTeam in the teamCollection (in the DC) does not have any
|
// bestTeam.second = false if the bestTeam in the teamCollection (in the DC) does not have any
|
||||||
// server that hosts the relocateData. This is possible, for example, in a fearless configuration
|
// server that hosts the relocateData. This is possible, for example, in a fearless configuration
|
||||||
// when the remote DC is just brought up.
|
// when the remote DC is just brought up.
|
||||||
|
@ -1343,13 +1344,13 @@ ACTOR Future<bool> rebalanceReadLoad(DDQueueData* self,
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (metrics.keys.present() && metrics.bytes > 0) {
|
if (metrics.keys.present() && metrics.bytes > 0) {
|
||||||
// auto srcLoad = sourceTeam->getLoadReadBandwidth(), destLoad = destTeam->getLoadReadBandwidth();
|
// auto srcLoad = sourceTeam->getLoadReadBandwidth(), destLoad = destTeam->getLoadReadBandwidth();
|
||||||
// if (abs(srcLoad - destLoad) <=
|
// if (abs(srcLoad - destLoad) <=
|
||||||
// 3 * std::max(metrics.bytesReadPerKSecond, SERVER_KNOBS->SHARD_READ_HOT_BANDWITH_MIN_PER_KSECONDS)) {
|
// 3 * std::max(metrics.bytesReadPerKSecond, SERVER_KNOBS->SHARD_READ_HOT_BANDWITH_MIN_PER_KSECONDS)) {
|
||||||
// traceEvent->detail("SkipReason", "TeamTooSimilar");
|
// traceEvent->detail("SkipReason", "TeamTooSimilar");
|
||||||
// return false;
|
// return false;
|
||||||
// }
|
// }
|
||||||
// Verify the shard is still in ShardsAffectedByTeamFailure
|
// Verify the shard is still in ShardsAffectedByTeamFailure
|
||||||
shards = self->shardsAffectedByTeamFailure->getShardsFor(
|
shards = self->shardsAffectedByTeamFailure->getShardsFor(
|
||||||
ShardsAffectedByTeamFailure::Team(sourceTeam->getServerIDs(), primary));
|
ShardsAffectedByTeamFailure::Team(sourceTeam->getServerIDs(), primary));
|
||||||
for (int i = 0; i < shards.size(); i++) {
|
for (int i = 0; i < shards.size(); i++) {
|
||||||
|
|
Loading…
Reference in New Issue