Merge pull request #3510 from apple/release-6.3
Merge Release 6.3 into master
This commit is contained in:
commit
29da89525f
|
@ -10,38 +10,38 @@ macOS
|
|||
|
||||
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
|
||||
|
||||
* `FoundationDB-6.3.2.pkg <https://www.foundationdb.org/downloads/6.3.2/macOS/installers/FoundationDB-6.3.2.pkg>`_
|
||||
* `FoundationDB-6.3.3.pkg <https://www.foundationdb.org/downloads/6.3.3/macOS/installers/FoundationDB-6.3.3.pkg>`_
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
|
||||
* `foundationdb-clients-6.3.2-1_amd64.deb <https://www.foundationdb.org/downloads/6.3.2/ubuntu/installers/foundationdb-clients_6.3.2-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.3.2-1_amd64.deb <https://www.foundationdb.org/downloads/6.3.2/ubuntu/installers/foundationdb-server_6.3.2-1_amd64.deb>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.3.3-1_amd64.deb <https://www.foundationdb.org/downloads/6.3.3/ubuntu/installers/foundationdb-clients_6.3.3-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.3.3-1_amd64.deb <https://www.foundationdb.org/downloads/6.3.3/ubuntu/installers/foundationdb-server_6.3.3-1_amd64.deb>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL6
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
|
||||
|
||||
* `foundationdb-clients-6.3.2-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.3.2/rhel6/installers/foundationdb-clients-6.3.2-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.3.2-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.3.2/rhel6/installers/foundationdb-server-6.3.2-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.3.3-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.3.3/rhel6/installers/foundationdb-clients-6.3.3-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.3.3-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.3.3/rhel6/installers/foundationdb-server-6.3.3-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL7
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
|
||||
|
||||
* `foundationdb-clients-6.3.2-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.3.2/rhel7/installers/foundationdb-clients-6.3.2-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.3.2-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.3.2/rhel7/installers/foundationdb-server-6.3.2-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.3.3-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.3.3/rhel7/installers/foundationdb-clients-6.3.3-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.3.3-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.3.3/rhel7/installers/foundationdb-server-6.3.3-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
|
||||
|
||||
* `foundationdb-6.3.2-x64.msi <https://www.foundationdb.org/downloads/6.3.2/windows/installers/foundationdb-6.3.2-x64.msi>`_
|
||||
* `foundationdb-6.3.3-x64.msi <https://www.foundationdb.org/downloads/6.3.3/windows/installers/foundationdb-6.3.3-x64.msi>`_
|
||||
|
||||
API Language Bindings
|
||||
=====================
|
||||
|
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
|
|||
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, use the Python package manager ``pip`` (``pip install foundationdb``) or download the Python package:
|
||||
|
||||
* `foundationdb-6.3.2.tar.gz <https://www.foundationdb.org/downloads/6.3.2/bindings/python/foundationdb-6.3.2.tar.gz>`_
|
||||
* `foundationdb-6.3.3.tar.gz <https://www.foundationdb.org/downloads/6.3.3/bindings/python/foundationdb-6.3.3.tar.gz>`_
|
||||
|
||||
Ruby 1.9.3/2.0.0+
|
||||
-----------------
|
||||
|
||||
* `fdb-6.3.2.gem <https://www.foundationdb.org/downloads/6.3.2/bindings/ruby/fdb-6.3.2.gem>`_
|
||||
* `fdb-6.3.3.gem <https://www.foundationdb.org/downloads/6.3.3/bindings/ruby/fdb-6.3.3.gem>`_
|
||||
|
||||
Java 8+
|
||||
-------
|
||||
|
||||
* `fdb-java-6.3.2.jar <https://www.foundationdb.org/downloads/6.3.2/bindings/java/fdb-java-6.3.2.jar>`_
|
||||
* `fdb-java-6.3.2-javadoc.jar <https://www.foundationdb.org/downloads/6.3.2/bindings/java/fdb-java-6.3.2-javadoc.jar>`_
|
||||
* `fdb-java-6.3.3.jar <https://www.foundationdb.org/downloads/6.3.3/bindings/java/fdb-java-6.3.3.jar>`_
|
||||
* `fdb-java-6.3.3-javadoc.jar <https://www.foundationdb.org/downloads/6.3.3/bindings/java/fdb-java-6.3.3-javadoc.jar>`_
|
||||
|
||||
Go 1.11+
|
||||
--------
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.3.2
|
||||
6.3.3
|
||||
=====
|
||||
|
||||
Features
|
||||
|
@ -104,10 +104,12 @@ Fixes from previous versions
|
|||
----------------------------
|
||||
|
||||
* The 6.3.1 patch release includes all fixes from the patch releases 6.2.21 and 6.2.22. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
* The 6.3.3 patch release includes all fixes from the patch release 6.2.23. :doc:`(6.2 Release Notes) </release-notes/release-notes-620>`
|
||||
|
||||
Fixes only impacting 6.3.0+
|
||||
---------------------------
|
||||
|
||||
* Clients did not probably balance requests to the proxies. [6.3.3] `(PR #3377) <https://github.com/apple/foundationdb/pull/3377>`_
|
||||
* Renamed ``MIN_DELAY_STORAGE_CANDIDACY_SECONDS`` knob to ``MIN_DELAY_CC_WORST_FIT_CANDIDACY_SECONDS``. [6.3.2] `(PR #3327) <https://github.com/apple/foundationdb/pull/3327>`_
|
||||
* Refreshing TLS certificates could cause crashes. [6.3.2] `(PR #3352) <https://github.com/apple/foundationdb/pull/3352>`_
|
||||
* All storage class processes attempted to connect to the same coordinator. [6.3.2] `(PR #3361) <https://github.com/apple/foundationdb/pull/3361>`_
|
||||
|
|
|
@ -258,7 +258,14 @@ ACTOR Future<Void> getRate(UID myID, Reference<AsyncVar<ServerDBInfo>> db, int64
|
|||
when ( wait( nextRequestTimer ) ) {
|
||||
nextRequestTimer = Never();
|
||||
bool detailed = now() - lastDetailedReply > SERVER_KNOBS->DETAILED_METRIC_UPDATE_RATE;
|
||||
reply = brokenPromiseToNever(db->get().ratekeeper.get().getRateInfo.getReply(GetRateInfoRequest(myID, *inTransactionCount, *inBatchTransactionCount, *transactionTagCounter, detailed)));
|
||||
|
||||
TransactionTagMap<uint64_t> tagCounts;
|
||||
for(auto itr : *throttledTags) {
|
||||
for(auto priorityThrottles : itr.second) {
|
||||
tagCounts[priorityThrottles.first] = (*transactionTagCounter)[priorityThrottles.first];
|
||||
}
|
||||
}
|
||||
reply = brokenPromiseToNever(db->get().ratekeeper.get().getRateInfo.getReply(GetRateInfoRequest(myID, *inTransactionCount, *inBatchTransactionCount, tagCounts, detailed)));
|
||||
transactionTagCounter->clear();
|
||||
expectingDetailedReply = detailed;
|
||||
}
|
||||
|
@ -1505,9 +1512,14 @@ ACTOR Future<Void> sendGrvReplies(Future<GetReadVersionReply> replyFuture, std::
|
|||
auto tagItr = priorityThrottledTags.find(tag.first);
|
||||
if(tagItr != priorityThrottledTags.end()) {
|
||||
if(tagItr->second.expiration > now()) {
|
||||
if(tagItr->second.tpsRate == std::numeric_limits<double>::max()) {
|
||||
TEST(true); // Auto TPS rate is unlimited
|
||||
}
|
||||
else {
|
||||
TEST(true); // Proxy returning tag throttle
|
||||
reply.tagThrottleInfo[tag.first] = tagItr->second;
|
||||
}
|
||||
}
|
||||
else {
|
||||
// This isn't required, but we might as well
|
||||
TEST(true); // Proxy expiring tag throttle
|
||||
|
|
|
@ -149,7 +149,7 @@ private:
|
|||
RkTagThrottleData() : clientRate(CLIENT_KNOBS->TAG_THROTTLE_SMOOTHING_WINDOW) {}
|
||||
|
||||
double getTargetRate(Optional<double> requestRate) {
|
||||
if(limits.tpsRate == 0.0 || !requestRate.present() || requestRate.get() == 0.0) {
|
||||
if(limits.tpsRate == 0.0 || !requestRate.present() || requestRate.get() == 0.0 || !rateSet) {
|
||||
return limits.tpsRate;
|
||||
}
|
||||
else {
|
||||
|
@ -162,7 +162,7 @@ private:
|
|||
double targetRate = getTargetRate(requestRate);
|
||||
if(targetRate == std::numeric_limits<double>::max()) {
|
||||
rateSet = false;
|
||||
return Optional<double>();
|
||||
return targetRate;
|
||||
}
|
||||
if(!rateSet) {
|
||||
rateSet = true;
|
||||
|
@ -184,6 +184,10 @@ private:
|
|||
}
|
||||
};
|
||||
|
||||
void initializeTag(TransactionTag const& tag) {
|
||||
tagData.try_emplace(tag);
|
||||
}
|
||||
|
||||
public:
|
||||
RkTagThrottleCollection() {}
|
||||
|
||||
|
@ -217,17 +221,30 @@ public:
|
|||
ASSERT(!expiration.present() || expiration.get() > now());
|
||||
|
||||
auto itr = autoThrottledTags.find(tag);
|
||||
if(itr == autoThrottledTags.end() && autoThrottledTags.size() >= SERVER_KNOBS->MAX_AUTO_THROTTLED_TRANSACTION_TAGS) {
|
||||
bool present = (itr != autoThrottledTags.end());
|
||||
if(!present) {
|
||||
if(autoThrottledTags.size() >= SERVER_KNOBS->MAX_AUTO_THROTTLED_TRANSACTION_TAGS) {
|
||||
TEST(true); // Reached auto-throttle limit
|
||||
return Optional<double>();
|
||||
}
|
||||
|
||||
itr = autoThrottledTags.try_emplace(itr, tag);
|
||||
itr = autoThrottledTags.try_emplace(tag).first;
|
||||
initializeTag(tag);
|
||||
}
|
||||
else if(itr->second.limits.expiration <= now()) {
|
||||
TEST(true); // Re-throttling expired tag that hasn't been cleaned up
|
||||
present = false;
|
||||
itr->second = RkTagThrottleData();
|
||||
}
|
||||
|
||||
auto &throttle = itr->second;
|
||||
|
||||
if(!tpsRate.present()) {
|
||||
if(now() <= throttle.created + SERVER_KNOBS->AUTO_TAG_THROTTLE_START_AGGREGATION_TIME) {
|
||||
tpsRate = std::numeric_limits<double>::max();
|
||||
if(present) {
|
||||
return Optional<double>();
|
||||
}
|
||||
}
|
||||
else if(now() <= throttle.lastUpdated + SERVER_KNOBS->AUTO_TAG_THROTTLE_UPDATE_FREQUENCY) {
|
||||
TEST(true); // Tag auto-throttled too quickly
|
||||
|
@ -253,15 +270,19 @@ public:
|
|||
|
||||
ASSERT(tpsRate.present() && tpsRate.get() >= 0);
|
||||
|
||||
TraceEvent("RkSetAutoThrottle", id)
|
||||
.detail("Tag", tag)
|
||||
.detail("TargetRate", tpsRate.get())
|
||||
.detail("Expiration", expiration.get() - now());
|
||||
|
||||
throttle.limits.tpsRate = tpsRate.get();
|
||||
throttle.limits.expiration = expiration.get();
|
||||
|
||||
throttle.updateAndGetClientRate(getRequestRate(tag));
|
||||
Optional<double> clientRate = throttle.updateAndGetClientRate(getRequestRate(tag));
|
||||
|
||||
TraceEvent("RkSetAutoThrottle", id)
|
||||
.detail("Tag", tag)
|
||||
.detail("TargetRate", tpsRate.get())
|
||||
.detail("Expiration", expiration.get() - now())
|
||||
.detail("ClientRate", clientRate)
|
||||
.detail("Created", now()-throttle.created)
|
||||
.detail("LastUpdate", now()-throttle.lastUpdated)
|
||||
.detail("LastReduced", now()-throttle.lastReduced);
|
||||
|
||||
if(tpsRate.get() != std::numeric_limits<double>::max()) {
|
||||
return tpsRate.get();
|
||||
|
@ -278,6 +299,7 @@ public:
|
|||
|
||||
auto &priorityThrottleMap = manualThrottledTags[tag];
|
||||
auto result = priorityThrottleMap.try_emplace(priority);
|
||||
initializeTag(tag);
|
||||
ASSERT(result.second); // Updating to the map is done by copying the whole map
|
||||
|
||||
result.first->second.limits.tpsRate = tpsRate;
|
||||
|
@ -365,7 +387,8 @@ public:
|
|||
Optional<double> autoClientRate = autoItr->second.updateAndGetClientRate(requestRate);
|
||||
if(autoClientRate.present()) {
|
||||
double adjustedRate = autoClientRate.get();
|
||||
if(now() >= autoItr->second.lastReduced + SERVER_KNOBS->AUTO_TAG_THROTTLE_RAMP_UP_TIME) {
|
||||
double rampStartTime = autoItr->second.lastReduced + SERVER_KNOBS->AUTO_TAG_THROTTLE_DURATION - SERVER_KNOBS->AUTO_TAG_THROTTLE_RAMP_UP_TIME;
|
||||
if(now() >= rampStartTime && adjustedRate != std::numeric_limits<double>::max()) {
|
||||
TEST(true); // Tag auto-throttle ramping up
|
||||
|
||||
double targetBusyness = SERVER_KNOBS->AUTO_THROTTLE_TARGET_TAG_BUSYNESS;
|
||||
|
@ -373,7 +396,7 @@ public:
|
|||
targetBusyness = 0.01;
|
||||
}
|
||||
|
||||
double rampLocation = (now() - autoItr->second.lastReduced - SERVER_KNOBS->AUTO_TAG_THROTTLE_RAMP_UP_TIME) / targetBusyness;
|
||||
double rampLocation = (now() - rampStartTime) / SERVER_KNOBS->AUTO_TAG_THROTTLE_RAMP_UP_TIME;
|
||||
adjustedRate = computeTargetTpsRate(targetBusyness, pow(targetBusyness, 1 - rampLocation), adjustedRate);
|
||||
}
|
||||
|
||||
|
@ -388,10 +411,16 @@ public:
|
|||
clientRates[TransactionPriority::BATCH][tagItr->first] = ClientTagThrottleLimits(0, autoItr->second.limits.expiration);
|
||||
}
|
||||
else {
|
||||
ASSERT(autoItr->second.limits.expiration <= now());
|
||||
TEST(true); // Auto throttle expired
|
||||
if(BUGGIFY) { // Temporarily extend the window between expiration and cleanup
|
||||
tagPresent = true;
|
||||
}
|
||||
else {
|
||||
autoThrottledTags.erase(autoItr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(!tagPresent) {
|
||||
TEST(true); // All tag throttles expired
|
||||
|
@ -406,8 +435,8 @@ public:
|
|||
}
|
||||
|
||||
void addRequests(TransactionTag const& tag, int requests) {
|
||||
if(requests > 0) {
|
||||
TEST(true); // Requests reported for throttled tag
|
||||
ASSERT(requests > 0);
|
||||
|
||||
auto tagItr = tagData.try_emplace(tag);
|
||||
tagItr.first->second.requestRate.addDelta(requests);
|
||||
|
@ -426,6 +455,7 @@ public:
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Optional<double> getRequestRate(TransactionTag const& tag) {
|
||||
auto itr = tagData.find(tag);
|
||||
|
@ -810,7 +840,7 @@ ACTOR Future<Void> monitorThrottlingChanges(RatekeeperData *self) {
|
|||
}
|
||||
}
|
||||
|
||||
void tryAutoThrottleTag(RatekeeperData *self, StorageQueueInfo const& ss, RkTagThrottleCollection& throttledTags) {
|
||||
void tryAutoThrottleTag(RatekeeperData *self, StorageQueueInfo const& ss) {
|
||||
if(ss.busiestTag.present() && ss.busiestTagFractionalBusyness > SERVER_KNOBS->AUTO_THROTTLE_TARGET_TAG_BUSYNESS && ss.busiestTagRate > SERVER_KNOBS->MIN_TAG_COST) {
|
||||
TEST(true); // Transaction tag auto-throttled
|
||||
|
||||
|
@ -824,7 +854,7 @@ void tryAutoThrottleTag(RatekeeperData *self, StorageQueueInfo const& ss, RkTagT
|
|||
}
|
||||
}
|
||||
|
||||
void updateRate(RatekeeperData* self, RatekeeperLimits* limits, RkTagThrottleCollection& throttledTags) {
|
||||
void updateRate(RatekeeperData* self, RatekeeperLimits* limits) {
|
||||
//double controlFactor = ; // dt / eFoldingTime
|
||||
|
||||
double actualTps = self->smoothReleasedTransactions.smoothRate();
|
||||
|
@ -892,7 +922,7 @@ void updateRate(RatekeeperData* self, RatekeeperLimits* limits, RkTagThrottleCol
|
|||
double targetRateRatio = std::min(( storageQueue - targetBytes + springBytes ) / (double)springBytes, 2.0);
|
||||
|
||||
if(limits->priority == TransactionPriority::DEFAULT && (storageQueue > SERVER_KNOBS->AUTO_TAG_THROTTLE_STORAGE_QUEUE_BYTES || storageDurabilityLag > SERVER_KNOBS->AUTO_TAG_THROTTLE_DURABILITY_LAG_VERSIONS)) {
|
||||
tryAutoThrottleTag(self, ss, throttledTags);
|
||||
tryAutoThrottleTag(self, ss);
|
||||
}
|
||||
|
||||
double inputRate = ss.smoothInputBytes.smoothRate();
|
||||
|
@ -1162,8 +1192,8 @@ void updateRate(RatekeeperData* self, RatekeeperLimits* limits, RkTagThrottleCol
|
|||
.detail("LimitingStorageServerVersionLag", limitingVersionLag)
|
||||
.detail("WorstStorageServerDurabilityLag", worstDurabilityLag)
|
||||
.detail("LimitingStorageServerDurabilityLag", limitingDurabilityLag)
|
||||
.detail("TagsAutoThrottled", throttledTags.autoThrottleCount())
|
||||
.detail("TagsManuallyThrottled", throttledTags.manualThrottleCount())
|
||||
.detail("TagsAutoThrottled", self->throttledTags.autoThrottleCount())
|
||||
.detail("TagsManuallyThrottled", self->throttledTags.manualThrottleCount())
|
||||
.trackLatest(name);
|
||||
}
|
||||
}
|
||||
|
@ -1227,8 +1257,8 @@ ACTOR Future<Void> ratekeeper(RatekeeperInterface rkInterf, Reference<AsyncVar<S
|
|||
state bool lastLimited = false;
|
||||
loop choose {
|
||||
when (wait( timeout )) {
|
||||
updateRate(&self, &self.normalLimits, self.throttledTags);
|
||||
updateRate(&self, &self.batchLimits, self.throttledTags);
|
||||
updateRate(&self, &self.normalLimits);
|
||||
updateRate(&self, &self.batchLimits);
|
||||
|
||||
lastLimited = self.smoothReleasedTransactions.smoothRate() > SERVER_KNOBS->LAST_LIMITED_RATIO * self.batchLimits.tpsLimit;
|
||||
double tooOld = now() - 1.0;
|
||||
|
|
Loading…
Reference in New Issue