Merge branch 'release-6.1' of https://github.com/apple/foundationdb into sqlite-grow-bigger
This commit is contained in:
commit
df0548503d
|
@ -2,14 +2,6 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.1.2
|
||||
=====
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
||||
* Consistency check could report inaccurate shard size estimates if there were enough keys with large values and a small number of keys with small values. `(PR #1468) <https://github.com/apple/foundationdb/pull/1468>`_.
|
||||
|
||||
6.1.0
|
||||
=====
|
||||
|
||||
|
@ -76,6 +68,7 @@ Fixes
|
|||
* Windows: Fixed a crash when deleting files. `(Issue #1380) <https://github.com/apple/foundationdb/issues/1380>`_ (by KrzysFR)
|
||||
* Starting a restore on a tag already in-use would hang and the process would eventually run out of memory. `(PR #1394) <https://github.com/apple/foundationdb/pull/1394>`_
|
||||
* The ``proxy_memory_limit_exceeded`` error was treated as retryable, but ``fdb_error_predicate`` returned that it is not retryable. `(PR #1438) <https://github.com/apple/foundationdb/pull/1438>`_.
|
||||
* Consistency check could report inaccurate shard size estimates if there were enough keys with large values and a small number of keys with small values. [6.1.3] `(PR #1468) <https://github.com/apple/foundationdb/pull/1468>`_.
|
||||
|
||||
Status
|
||||
------
|
||||
|
@ -122,6 +115,7 @@ Fixes only impacting 6.1.0+
|
|||
|
||||
* The ``consistencycheck`` fdbserver role would repeatedly exit. [6.1.1] `(PR #1437) <https://github.com/apple/foundationdb/pull/1437>`_
|
||||
* The ``consistencycheck`` fdbserver role could proceed at a very slow rate after inserting data into an empty database. [6.1.2] `(PR #1452) <https://github.com/apple/foundationdb/pull/1452>`_
|
||||
* The background actor which removes redundant teams could leave data unbalanced. [6.1.3] `(PR #1479) <https://github.com/apple/foundationdb/pull/1479>`_
|
||||
|
||||
Earlier release notes
|
||||
---------------------
|
||||
|
|
|
@ -745,11 +745,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
sources.insert( req.sources[i] );
|
||||
|
||||
for( int i = 0; i < req.sources.size(); i++ ) {
|
||||
if( !self->server_info.count( req.sources[i] ) ) {
|
||||
TEST( true ); // GetSimilarTeams source server now unknown
|
||||
TraceEvent(SevWarn, "GetTeam").detail("ReqSourceUnknown", req.sources[i]);
|
||||
}
|
||||
else {
|
||||
if( self->server_info.count( req.sources[i] ) ) {
|
||||
auto& teamList = self->server_info[ req.sources[i] ]->teams;
|
||||
for( int j = 0; j < teamList.size(); j++ ) {
|
||||
if( teamList[j]->isHealthy() && (!req.preferLowerUtilization || teamList[j]->hasHealthyFreeSpace())) {
|
||||
|
@ -2325,6 +2321,8 @@ ACTOR Future<Void> teamRemover(DDTeamCollection* self) {
|
|||
TEST(true);
|
||||
}
|
||||
|
||||
self->doBuildTeams = true;
|
||||
|
||||
if (self->badTeamRemover.isReady()) {
|
||||
self->badTeamRemover = removeBadTeams(self);
|
||||
self->addActor.send(self->badTeamRemover);
|
||||
|
|
|
@ -51,7 +51,8 @@ struct RelocateData {
|
|||
rs.priority == PRIORITY_REBALANCE_SHARD ||
|
||||
rs.priority == PRIORITY_REBALANCE_OVERUTILIZED_TEAM ||
|
||||
rs.priority == PRIORITY_REBALANCE_UNDERUTILIZED_TEAM ||
|
||||
rs.priority == PRIORITY_SPLIT_SHARD ), interval("QueuedRelocation") {}
|
||||
rs.priority == PRIORITY_SPLIT_SHARD ||
|
||||
rs.priority == PRIORITY_TEAM_REDUNDANT ), interval("QueuedRelocation") {}
|
||||
|
||||
bool operator> (const RelocateData& rhs) const {
|
||||
return priority != rhs.priority ? priority > rhs.priority : ( startTime != rhs.startTime ? startTime < rhs.startTime : randomId > rhs.randomId );
|
||||
|
|
|
@ -51,7 +51,6 @@ enum {
|
|||
TaskTLogPeek = 8590,
|
||||
TaskTLogCommitReply = 8580,
|
||||
TaskTLogCommit = 8570,
|
||||
TaskTLogSpilledPeekReply = 8567,
|
||||
TaskProxyGetRawCommittedVersion = 8565,
|
||||
TaskProxyResolverReply = 8560,
|
||||
TaskProxyCommitBatcher = 8550,
|
||||
|
@ -73,6 +72,7 @@ enum {
|
|||
TaskDataDistribution = 3500,
|
||||
TaskDiskWrite = 3010,
|
||||
TaskUpdateStorage = 3000,
|
||||
TaskTLogSpilledPeekReply = 2800,
|
||||
TaskLowPriority = 2000,
|
||||
|
||||
TaskMinPriority = 1000
|
||||
|
|
Loading…
Reference in New Issue