Merge branch 'release-6.1'
# Conflicts: # documentation/sphinx/source/release-notes.rst # versions.target
This commit is contained in:
commit
2d5043c665
|
@ -79,22 +79,23 @@ def write_unix_asm(asmfile, functions, prefix):
|
|||
asmfile.write("\tjmp r11\n")
|
||||
|
||||
|
||||
with open(asm, 'w') as asmfile, open(h, 'w') as hfile:
|
||||
hfile.write(
|
||||
"void fdb_api_ptr_unimpl() { fprintf(stderr, \"UNIMPLEMENTED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
hfile.write(
|
||||
"void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
with open(asm, 'w') as asmfile:
|
||||
with open(h, 'w') as hfile:
|
||||
hfile.write(
|
||||
"void fdb_api_ptr_unimpl() { fprintf(stderr, \"UNIMPLEMENTED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
hfile.write(
|
||||
"void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
|
||||
if platform == "linux":
|
||||
write_unix_asm(asmfile, functions, '')
|
||||
elif platform == "osx":
|
||||
write_unix_asm(asmfile, functions, '_')
|
||||
elif platform == "windows":
|
||||
write_windows_asm(asmfile, functions)
|
||||
if platform == "linux":
|
||||
write_unix_asm(asmfile, functions, '')
|
||||
elif platform == "osx":
|
||||
write_unix_asm(asmfile, functions, '_')
|
||||
elif platform == "windows":
|
||||
write_windows_asm(asmfile, functions)
|
||||
|
||||
for f in functions:
|
||||
if platform == "windows":
|
||||
hfile.write("extern \"C\" ")
|
||||
hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f)
|
||||
for v in functions[f]:
|
||||
hfile.write("#define %s_v%d_PREV %s_v%d\n" % (f, v, f, v - 1))
|
||||
for f in functions:
|
||||
if platform == "windows":
|
||||
hfile.write("extern \"C\" ")
|
||||
hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f)
|
||||
for v in functions[f]:
|
||||
hfile.write("#define %s_v%d_PREV %s_v%d\n" % (f, v, f, v - 1))
|
||||
|
|
|
@ -10,38 +10,38 @@ macOS
|
|||
|
||||
The macOS installation package is supported on macOS 10.7+. It includes the client and (optionally) the server.
|
||||
|
||||
* `FoundationDB-6.1.2.pkg <https://www.foundationdb.org/downloads/6.1.2/macOS/installers/FoundationDB-6.1.2.pkg>`_
|
||||
* `FoundationDB-6.1.4.pkg <https://www.foundationdb.org/downloads/6.1.4/macOS/installers/FoundationDB-6.1.4.pkg>`_
|
||||
|
||||
Ubuntu
|
||||
------
|
||||
|
||||
The Ubuntu packages are supported on 64-bit Ubuntu 12.04+, but beware of the Linux kernel bug in Ubuntu 12.x.
|
||||
|
||||
* `foundationdb-clients-6.1.2-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.2/ubuntu/installers/foundationdb-clients_6.1.2-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.1.2-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.2/ubuntu/installers/foundationdb-server_6.1.2-1_amd64.deb>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.1.4-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.4/ubuntu/installers/foundationdb-clients_6.1.4-1_amd64.deb>`_
|
||||
* `foundationdb-server-6.1.4-1_amd64.deb <https://www.foundationdb.org/downloads/6.1.4/ubuntu/installers/foundationdb-server_6.1.4-1_amd64.deb>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL6
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL6 packages are supported on 64-bit RHEL/CentOS 6.x.
|
||||
|
||||
* `foundationdb-clients-6.1.2-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.2/rhel6/installers/foundationdb-clients-6.1.2-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.2-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.2/rhel6/installers/foundationdb-server-6.1.2-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.1.4-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.4/rhel6/installers/foundationdb-clients-6.1.4-1.el6.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.4-1.el6.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.4/rhel6/installers/foundationdb-server-6.1.4-1.el6.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
RHEL/CentOS EL7
|
||||
---------------
|
||||
|
||||
The RHEL/CentOS EL7 packages are supported on 64-bit RHEL/CentOS 7.x.
|
||||
|
||||
* `foundationdb-clients-6.1.2-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.2/rhel7/installers/foundationdb-clients-6.1.2-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.2-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.2/rhel7/installers/foundationdb-server-6.1.2-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
* `foundationdb-clients-6.1.4-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.4/rhel7/installers/foundationdb-clients-6.1.4-1.el7.x86_64.rpm>`_
|
||||
* `foundationdb-server-6.1.4-1.el7.x86_64.rpm <https://www.foundationdb.org/downloads/6.1.4/rhel7/installers/foundationdb-server-6.1.4-1.el7.x86_64.rpm>`_ (depends on the clients package)
|
||||
|
||||
Windows
|
||||
-------
|
||||
|
||||
The Windows installer is supported on 64-bit Windows XP and later. It includes the client and (optionally) the server.
|
||||
|
||||
* `foundationdb-6.1.2-x64.msi <https://www.foundationdb.org/downloads/6.1.2/windows/installers/foundationdb-6.1.2-x64.msi>`_
|
||||
* `foundationdb-6.1.4-x64.msi <https://www.foundationdb.org/downloads/6.1.4/windows/installers/foundationdb-6.1.4-x64.msi>`_
|
||||
|
||||
API Language Bindings
|
||||
=====================
|
||||
|
@ -58,18 +58,18 @@ On macOS and Windows, the FoundationDB Python API bindings are installed as part
|
|||
|
||||
If you need to use the FoundationDB Python API from other Python installations or paths, download the Python package:
|
||||
|
||||
* `foundationdb-6.1.2.tar.gz <https://www.foundationdb.org/downloads/6.1.2/bindings/python/foundationdb-6.1.2.tar.gz>`_
|
||||
* `foundationdb-6.1.4.tar.gz <https://www.foundationdb.org/downloads/6.1.4/bindings/python/foundationdb-6.1.4.tar.gz>`_
|
||||
|
||||
Ruby 1.9.3/2.0.0+
|
||||
-----------------
|
||||
|
||||
* `fdb-6.1.2.gem <https://www.foundationdb.org/downloads/6.1.2/bindings/ruby/fdb-6.1.2.gem>`_
|
||||
* `fdb-6.1.4.gem <https://www.foundationdb.org/downloads/6.1.4/bindings/ruby/fdb-6.1.4.gem>`_
|
||||
|
||||
Java 8+
|
||||
-------
|
||||
|
||||
* `fdb-java-6.1.2.jar <https://www.foundationdb.org/downloads/6.1.2/bindings/java/fdb-java-6.1.2.jar>`_
|
||||
* `fdb-java-6.1.2-javadoc.jar <https://www.foundationdb.org/downloads/6.1.2/bindings/java/fdb-java-6.1.2-javadoc.jar>`_
|
||||
* `fdb-java-6.1.4.jar <https://www.foundationdb.org/downloads/6.1.4/bindings/java/fdb-java-6.1.4.jar>`_
|
||||
* `fdb-java-6.1.4-javadoc.jar <https://www.foundationdb.org/downloads/6.1.4/bindings/java/fdb-java-6.1.4-javadoc.jar>`_
|
||||
|
||||
Go 1.1+
|
||||
-------
|
||||
|
|
|
@ -2,15 +2,7 @@
|
|||
Release Notes
|
||||
#############
|
||||
|
||||
6.1.2
|
||||
=====
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
||||
* Consistency check could report inaccurate shard size estimates if there were enough keys with large values and a small number of keys with small values. `(PR #1468) <https://github.com/apple/foundationdb/pull/1468>`_.
|
||||
|
||||
6.1.0
|
||||
6.1.4
|
||||
=====
|
||||
|
||||
Features
|
||||
|
@ -55,6 +47,9 @@ Performance
|
|||
|
||||
* Increased the get read version batch size in the client. This change reduces the load on the proxies when doing many transactions with only a few operations per transaction. `(PR #1311) <https://github.com/apple/foundationdb/pull/1311>`_
|
||||
* Clients no longer attempt to connect to the master during recovery. `(PR #1317) <https://github.com/apple/foundationdb/pull/1317>`_
|
||||
* Increase the rate that deleted pages are made available for reuse in the SQLite storage engine. Rename and add knobs to provide more control over this process. [6.1.3] `(PR #1485) <https://github.com/apple/foundationdb/pull/1485>`_
|
||||
* SQLite page files now grow and shrink in chunks based on a knob which defaults to an effective chunk size of 100MB. [6.1.4] `(PR #1482) <https://github.com/apple/foundationdb/pull/1482>`_ `(PR #1499) <https://github.com/apple/foundationdb/pull/1499>`_
|
||||
* Reduced the rate at which data is moved between servers, to reduce the impact a failure has on cluster performance. [6.1.4] `(PR #1499) <https://github.com/apple/foundationdb/pull/1499>`_
|
||||
|
||||
Fixes
|
||||
-----
|
||||
|
@ -76,6 +71,10 @@ Fixes
|
|||
* Windows: Fixed a crash when deleting files. `(Issue #1380) <https://github.com/apple/foundationdb/issues/1380>`_ (by KrzysFR)
|
||||
* Starting a restore on a tag already in-use would hang and the process would eventually run out of memory. `(PR #1394) <https://github.com/apple/foundationdb/pull/1394>`_
|
||||
* The ``proxy_memory_limit_exceeded`` error was treated as retryable, but ``fdb_error_predicate`` returned that it is not retryable. `(PR #1438) <https://github.com/apple/foundationdb/pull/1438>`_.
|
||||
* Consistency check could report inaccurate shard size estimates if there were enough keys with large values and a small number of keys with small values. [6.1.3] `(PR #1468) <https://github.com/apple/foundationdb/pull/1468>`_.
|
||||
* Storage servers could not rejoin the cluster when the proxies were saturated. [6.1.4] `(PR #1486) <https://github.com/apple/foundationdb/pull/1486>`_ `(PR #1499) <https://github.com/apple/foundationdb/pull/1499>`_
|
||||
* The ``configure`` command in ``fdbcli`` returned successfully even when the configuration was not changed for some error types. [6.1.4] `(PR #1509) <https://github.com/apple/foundationdb/pull/1509>`_
|
||||
* Safety protections in the ``configure`` command in ``fdbcli`` would trigger spuriously when changing between ``three_datacenter`` replication and a region configuration. [6.1.4] `(PR #1509) <https://github.com/apple/foundationdb/pull/1509>`_
|
||||
|
||||
Status
|
||||
------
|
||||
|
@ -122,6 +121,7 @@ Fixes only impacting 6.1.0+
|
|||
|
||||
* The ``consistencycheck`` fdbserver role would repeatedly exit. [6.1.1] `(PR #1437) <https://github.com/apple/foundationdb/pull/1437>`_
|
||||
* The ``consistencycheck`` fdbserver role could proceed at a very slow rate after inserting data into an empty database. [6.1.2] `(PR #1452) <https://github.com/apple/foundationdb/pull/1452>`_
|
||||
* The background actor which removes redundant teams could leave data unbalanced. [6.1.3] `(PR #1479) <https://github.com/apple/foundationdb/pull/1479>`_
|
||||
|
||||
Earlier release notes
|
||||
---------------------
|
||||
|
@ -143,4 +143,4 @@ Earlier release notes
|
|||
* :doc:`Beta 2 (API Version 22) </old-release-notes/release-notes-022>`
|
||||
* :doc:`Beta 1 (API Version 21) </old-release-notes/release-notes-021>`
|
||||
* :doc:`Alpha 6 (API Version 16) </old-release-notes/release-notes-016>`
|
||||
* :doc:`Alpha 5 (API Version 14) </old-release-notes/release-notes-014>`
|
||||
* :doc:`Alpha 5 (API Version 14) </old-release-notes/release-notes-014>`
|
|
@ -1665,32 +1665,42 @@ ACTOR Future<bool> configure( Database db, std::vector<StringRef> tokens, Refere
|
|||
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
||||
printf("ERROR: The database is unavailable\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
||||
printf("ERROR: All storage servers must be in one of the known regions\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
||||
printf("ERROR: When usable_regions > 1, all regions with priority >= 0 must be fully replicated before changing the configuration\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
||||
printf("ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::REGIONS_CHANGED:
|
||||
printf("ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||
printf("ERROR: Not enough processes exist to support the specified configuration\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||
printf("ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::DCID_MISSING:
|
||||
printf("ERROR: `No storage servers in one of the specified regions\n");
|
||||
printf("Type `configure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
printf("Configuration changed\n");
|
||||
|
@ -1779,32 +1789,42 @@ ACTOR Future<bool> fileConfigure(Database db, std::string filePath, bool isNewDa
|
|||
case ConfigurationResult::DATABASE_UNAVAILABLE:
|
||||
printf("ERROR: The database is unavailable\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::STORAGE_IN_UNKNOWN_DCID:
|
||||
printf("ERROR: All storage servers must be in one of the known regions\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_NOT_FULLY_REPLICATED:
|
||||
printf("ERROR: When usable_regions > 1, All regions with priority >= 0 must be fully replicated before changing the configuration\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::MULTIPLE_ACTIVE_REGIONS:
|
||||
printf("ERROR: When changing usable_regions, only one region can have priority >= 0\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::REGIONS_CHANGED:
|
||||
printf("ERROR: The region configuration cannot be changed while simultaneously changing usable_regions\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::NOT_ENOUGH_WORKERS:
|
||||
printf("ERROR: Not enough processes exist to support the specified configuration\n");
|
||||
printf("Type `fileconfigure FORCE <FILENAME>' to configure without this check\n");
|
||||
ret=false;
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::REGION_REPLICATION_MISMATCH:
|
||||
printf("ERROR: `three_datacenter' replication is incompatible with region configuration\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::DCID_MISSING:
|
||||
printf("ERROR: `No storage servers in one of the specified regions\n");
|
||||
printf("Type `fileconfigure FORCE <TOKEN>*' to configure without this check\n");
|
||||
ret=true;
|
||||
break;
|
||||
case ConfigurationResult::SUCCESS:
|
||||
printf("Configuration changed\n");
|
||||
|
|
|
@ -304,6 +304,7 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
|
||||
state Future<Void> tooLong = delay(4.5);
|
||||
state Key versionKey = BinaryWriter::toValue(g_random->randomUniqueID(),Unversioned());
|
||||
state bool oldReplicationUsesDcId = false;
|
||||
loop {
|
||||
try {
|
||||
tr.setOption( FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE );
|
||||
|
@ -330,6 +331,12 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
if(!newConfig.isValid()) {
|
||||
return ConfigurationResult::INVALID_CONFIGURATION;
|
||||
}
|
||||
|
||||
if(newConfig.tLogPolicy->attributeKeys().count("dcid") && newConfig.regions.size()>0) {
|
||||
return ConfigurationResult::REGION_REPLICATION_MISMATCH;
|
||||
}
|
||||
|
||||
oldReplicationUsesDcId = oldReplicationUsesDcId || oldConfig.tLogPolicy->attributeKeys().count("dcid");
|
||||
|
||||
if(oldConfig.usableRegions != newConfig.usableRegions) {
|
||||
//cannot change region configuration
|
||||
|
@ -358,21 +365,45 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
state Future<Standalone<RangeResultRef>> fServerList = (newConfig.regions.size()) ? tr.getRange( serverListKeys, CLIENT_KNOBS->TOO_MANY ) : Future<Standalone<RangeResultRef>>();
|
||||
|
||||
if(newConfig.usableRegions==2) {
|
||||
//all regions with priority >= 0 must be fully replicated
|
||||
state std::vector<Future<Optional<Value>>> replicasFutures;
|
||||
for(auto& it : newConfig.regions) {
|
||||
if(it.priority >= 0) {
|
||||
replicasFutures.push_back(tr.get(datacenterReplicasKeyFor(it.dcId)));
|
||||
}
|
||||
}
|
||||
wait( waitForAll(replicasFutures) || tooLong );
|
||||
if(oldReplicationUsesDcId) {
|
||||
state Future<Standalone<RangeResultRef>> fLocalityList = tr.getRange( tagLocalityListKeys, CLIENT_KNOBS->TOO_MANY );
|
||||
wait( success(fLocalityList) || tooLong );
|
||||
if(!fLocalityList.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
Standalone<RangeResultRef> localityList = fLocalityList.get();
|
||||
ASSERT( !localityList.more && localityList.size() < CLIENT_KNOBS->TOO_MANY );
|
||||
|
||||
for(auto& it : replicasFutures) {
|
||||
if(!it.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
std::set<Key> localityDcIds;
|
||||
for(auto& s : localityList) {
|
||||
auto dc = decodeTagLocalityListKey( s.key );
|
||||
if(dc.present()) {
|
||||
localityDcIds.insert(dc.get());
|
||||
}
|
||||
}
|
||||
|
||||
for(auto& it : newConfig.regions) {
|
||||
if(localityDcIds.count(it.dcId) == 0) {
|
||||
return ConfigurationResult::DCID_MISSING;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
//all regions with priority >= 0 must be fully replicated
|
||||
state std::vector<Future<Optional<Value>>> replicasFutures;
|
||||
for(auto& it : newConfig.regions) {
|
||||
if(it.priority >= 0) {
|
||||
replicasFutures.push_back(tr.get(datacenterReplicasKeyFor(it.dcId)));
|
||||
}
|
||||
}
|
||||
if(!it.get().present()) {
|
||||
return ConfigurationResult::REGION_NOT_FULLY_REPLICATED;
|
||||
wait( waitForAll(replicasFutures) || tooLong );
|
||||
|
||||
for(auto& it : replicasFutures) {
|
||||
if(!it.isReady()) {
|
||||
return ConfigurationResult::DATABASE_UNAVAILABLE;
|
||||
}
|
||||
if(!it.get().present()) {
|
||||
return ConfigurationResult::REGION_NOT_FULLY_REPLICATED;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -390,12 +421,16 @@ ACTOR Future<ConfigurationResult::Type> changeConfig( Database cx, std::map<std:
|
|||
for(auto& it : newConfig.regions) {
|
||||
newDcIds.insert(it.dcId);
|
||||
}
|
||||
std::set<Key> missingDcIds;
|
||||
for(auto& s : serverList) {
|
||||
auto ssi = decodeServerListValue( s.value );
|
||||
if ( !ssi.locality.dcId().present() || !newDcIds.count(ssi.locality.dcId().get()) ) {
|
||||
return ConfigurationResult::STORAGE_IN_UNKNOWN_DCID;
|
||||
missingDcIds.insert(ssi.locality.dcId().get());
|
||||
}
|
||||
}
|
||||
if(missingDcIds.size() > (oldReplicationUsesDcId ? 1 : 0)) {
|
||||
return ConfigurationResult::STORAGE_IN_UNKNOWN_DCID;
|
||||
}
|
||||
}
|
||||
|
||||
wait( success(fWorkers) || tooLong );
|
||||
|
|
|
@ -59,6 +59,8 @@ public:
|
|||
MULTIPLE_ACTIVE_REGIONS,
|
||||
REGIONS_CHANGED,
|
||||
NOT_ENOUGH_WORKERS,
|
||||
REGION_REPLICATION_MISMATCH,
|
||||
DCID_MISSING,
|
||||
SUCCESS
|
||||
};
|
||||
};
|
||||
|
|
|
@ -65,6 +65,7 @@ struct MasterProxyInterface {
|
|||
getConsistentReadVersion.getEndpoint(TaskProxyGetConsistentReadVersion);
|
||||
getRawCommittedVersion.getEndpoint(TaskProxyGetRawCommittedVersion);
|
||||
commit.getEndpoint(TaskProxyCommitDispatcher);
|
||||
getStorageServerRejoinInfo.getEndpoint(TaskProxyStorageRejoin);
|
||||
//getKeyServersLocations.getEndpoint(TaskProxyGetKeyServersLocations); //do not increase the priority of these requests, because clients cans bring down the cluster with too many of these messages.
|
||||
}
|
||||
};
|
||||
|
|
|
@ -259,6 +259,8 @@ public:
|
|||
int result = -1;
|
||||
KAIOLogEvent(logFile, id, OpLogEntry::TRUNCATE, OpLogEntry::START, size / 4096);
|
||||
bool completed = false;
|
||||
double begin = timer_monotonic();
|
||||
|
||||
if( ctx.fallocateSupported && size >= lastFileSize ) {
|
||||
result = fallocate( fd, 0, 0, size);
|
||||
if (result != 0) {
|
||||
|
@ -278,6 +280,12 @@ public:
|
|||
if ( !completed )
|
||||
result = ftruncate(fd, size);
|
||||
|
||||
double end = timer_monotonic();
|
||||
if(g_nondeterministic_random->random01() < end-begin) {
|
||||
TraceEvent("SlowKAIOTruncate")
|
||||
.detail("TruncateTime", end - begin)
|
||||
.detail("TruncateBytes", size - lastFileSize);
|
||||
}
|
||||
KAIOLogEvent(logFile, id, OpLogEntry::TRUNCATE, OpLogEntry::COMPLETE, size / 4096, result);
|
||||
|
||||
if(result != 0) {
|
||||
|
|
|
@ -745,11 +745,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
|||
sources.insert( req.sources[i] );
|
||||
|
||||
for( int i = 0; i < req.sources.size(); i++ ) {
|
||||
if( !self->server_info.count( req.sources[i] ) ) {
|
||||
TEST( true ); // GetSimilarTeams source server now unknown
|
||||
TraceEvent(SevWarn, "GetTeam").detail("ReqSourceUnknown", req.sources[i]);
|
||||
}
|
||||
else {
|
||||
if( self->server_info.count( req.sources[i] ) ) {
|
||||
auto& teamList = self->server_info[ req.sources[i] ]->teams;
|
||||
for( int j = 0; j < teamList.size(); j++ ) {
|
||||
if( teamList[j]->isHealthy() && (!req.preferLowerUtilization || teamList[j]->hasHealthyFreeSpace())) {
|
||||
|
@ -2322,6 +2318,8 @@ ACTOR Future<Void> teamRemover(DDTeamCollection* self) {
|
|||
TEST(true);
|
||||
}
|
||||
|
||||
self->doBuildTeams = true;
|
||||
|
||||
if (self->badTeamRemover.isReady()) {
|
||||
self->badTeamRemover = removeBadTeams(self);
|
||||
self->addActor.send(self->badTeamRemover);
|
||||
|
|
|
@ -51,7 +51,8 @@ struct RelocateData {
|
|||
rs.priority == PRIORITY_REBALANCE_SHARD ||
|
||||
rs.priority == PRIORITY_REBALANCE_OVERUTILIZED_TEAM ||
|
||||
rs.priority == PRIORITY_REBALANCE_UNDERUTILIZED_TEAM ||
|
||||
rs.priority == PRIORITY_SPLIT_SHARD ), interval("QueuedRelocation") {}
|
||||
rs.priority == PRIORITY_SPLIT_SHARD ||
|
||||
rs.priority == PRIORITY_TEAM_REDUNDANT ), interval("QueuedRelocation") {}
|
||||
|
||||
bool operator> (const RelocateData& rhs) const {
|
||||
return priority != rhs.priority ? priority > rhs.priority : ( startTime != rhs.startTime ? startTime < rhs.startTime : randomId > rhs.randomId );
|
||||
|
|
|
@ -1325,6 +1325,16 @@ void SQLiteDB::open(bool writable) {
|
|||
int result = sqlite3_open_v2(apath.c_str(), &db, (writable ? SQLITE_OPEN_READWRITE : SQLITE_OPEN_READONLY), NULL);
|
||||
checkError("open", result);
|
||||
|
||||
int chunkSize;
|
||||
if( !g_network->isSimulated() ) {
|
||||
chunkSize = 4096 * SERVER_KNOBS->SQLITE_CHUNK_SIZE_PAGES;
|
||||
} else if( BUGGIFY ) {
|
||||
chunkSize = 4096 * g_random->randomInt(0, 100);
|
||||
} else {
|
||||
chunkSize = 4096 * SERVER_KNOBS->SQLITE_CHUNK_SIZE_PAGES_SIM;
|
||||
}
|
||||
checkError("setChunkSize", sqlite3_file_control(db, nullptr, SQLITE_FCNTL_CHUNK_SIZE, &chunkSize));
|
||||
|
||||
btree = db->aDb[0].pBt;
|
||||
initPagerCodec();
|
||||
|
||||
|
@ -1431,7 +1441,12 @@ public:
|
|||
KeyValueStoreSQLite(std::string const& filename, UID logID, KeyValueStoreType type, bool checkChecksums, bool checkIntegrity);
|
||||
~KeyValueStoreSQLite();
|
||||
|
||||
Future<Void> doClean();
|
||||
struct SpringCleaningWorkPerformed {
|
||||
int lazyDeletePages = 0;
|
||||
int vacuumedPages = 0;
|
||||
};
|
||||
|
||||
Future<SpringCleaningWorkPerformed> doClean();
|
||||
void startReadThreads();
|
||||
|
||||
private:
|
||||
|
@ -1704,15 +1719,17 @@ private:
|
|||
}
|
||||
|
||||
struct SpringCleaningAction : TypedAction<Writer, SpringCleaningAction>, FastAllocated<SpringCleaningAction> {
|
||||
ThreadReturnPromise<Void> result;
|
||||
virtual double getTimeEstimate() { return SERVER_KNOBS->SPRING_CLEANING_TIME_ESTIMATE; }
|
||||
ThreadReturnPromise<SpringCleaningWorkPerformed> result;
|
||||
virtual double getTimeEstimate() {
|
||||
return std::max(SERVER_KNOBS->SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE, SERVER_KNOBS->SPRING_CLEANING_VACUUM_TIME_ESTIMATE);
|
||||
}
|
||||
};
|
||||
void action(SpringCleaningAction& a) {
|
||||
double s = now();
|
||||
double end = now() + SERVER_KNOBS->SPRING_CLEANING_TIME_ESTIMATE;
|
||||
double lazyDeleteEnd = now() + SERVER_KNOBS->SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE;
|
||||
double vacuumEnd = now() + SERVER_KNOBS->SPRING_CLEANING_VACUUM_TIME_ESTIMATE;
|
||||
|
||||
int lazyDeletePages = 0;
|
||||
int vacuumedPages = 0;
|
||||
SpringCleaningWorkPerformed workPerformed;
|
||||
|
||||
double lazyDeleteTime = 0;
|
||||
double vacuumTime = 0;
|
||||
|
@ -1722,8 +1739,13 @@ private:
|
|||
|
||||
loop {
|
||||
double begin = now();
|
||||
bool canDelete = !freeTableEmpty && (now() < end || lazyDeletePages < SERVER_KNOBS->SPRING_CLEANING_MIN_LAZY_DELETE_PAGES) && lazyDeletePages < SERVER_KNOBS->SPRING_CLEANING_MAX_LAZY_DELETE_PAGES;
|
||||
bool canVacuum = !vacuumFinished && (now() < end || vacuumedPages < SERVER_KNOBS->SPRING_CLEANING_MIN_VACUUM_PAGES) && vacuumedPages < SERVER_KNOBS->SPRING_CLEANING_MAX_VACUUM_PAGES;
|
||||
bool canDelete = !freeTableEmpty
|
||||
&& (now() < lazyDeleteEnd || workPerformed.lazyDeletePages < SERVER_KNOBS->SPRING_CLEANING_MIN_LAZY_DELETE_PAGES)
|
||||
&& workPerformed.lazyDeletePages < SERVER_KNOBS->SPRING_CLEANING_MAX_LAZY_DELETE_PAGES;
|
||||
|
||||
bool canVacuum = !vacuumFinished
|
||||
&& (now() < vacuumEnd || workPerformed.vacuumedPages < SERVER_KNOBS->SPRING_CLEANING_MIN_VACUUM_PAGES)
|
||||
&& workPerformed.vacuumedPages < SERVER_KNOBS->SPRING_CLEANING_MAX_VACUUM_PAGES;
|
||||
|
||||
if(!canDelete && !canVacuum) {
|
||||
break;
|
||||
|
@ -1733,10 +1755,10 @@ private:
|
|||
TEST(canVacuum); // SQLite lazy deletion when vacuuming is active
|
||||
TEST(!canVacuum); // SQLite lazy deletion when vacuuming is inactive
|
||||
|
||||
int pagesToDelete = std::max(1, std::min(SERVER_KNOBS->SPRING_CLEANING_LAZY_DELETE_BATCH_SIZE, SERVER_KNOBS->SPRING_CLEANING_MAX_LAZY_DELETE_PAGES - lazyDeletePages));
|
||||
int pagesToDelete = std::max(1, std::min(SERVER_KNOBS->SPRING_CLEANING_LAZY_DELETE_BATCH_SIZE, SERVER_KNOBS->SPRING_CLEANING_MAX_LAZY_DELETE_PAGES - workPerformed.lazyDeletePages));
|
||||
int pagesDeleted = cursor->lazyDelete(pagesToDelete) ;
|
||||
freeTableEmpty = (pagesDeleted != pagesToDelete);
|
||||
lazyDeletePages += pagesDeleted;
|
||||
workPerformed.lazyDeletePages += pagesDeleted;
|
||||
lazyDeleteTime += now() - begin;
|
||||
}
|
||||
else {
|
||||
|
@ -1747,7 +1769,7 @@ private:
|
|||
|
||||
vacuumFinished = conn.vacuum();
|
||||
if(!vacuumFinished) {
|
||||
++vacuumedPages;
|
||||
++workPerformed.vacuumedPages;
|
||||
}
|
||||
|
||||
vacuumTime += now() - begin;
|
||||
|
@ -1758,19 +1780,19 @@ private:
|
|||
|
||||
freeListPages = conn.freePages();
|
||||
|
||||
TEST(lazyDeletePages > 0); // Pages lazily deleted
|
||||
TEST(vacuumedPages > 0); // Pages vacuumed
|
||||
TEST(workPerformed.lazyDeletePages > 0); // Pages lazily deleted
|
||||
TEST(workPerformed.vacuumedPages > 0); // Pages vacuumed
|
||||
TEST(vacuumTime > 0); // Time spent vacuuming
|
||||
TEST(lazyDeleteTime > 0); // Time spent lazy deleting
|
||||
|
||||
++springCleaningStats.springCleaningCount;
|
||||
springCleaningStats.lazyDeletePages += lazyDeletePages;
|
||||
springCleaningStats.vacuumedPages += vacuumedPages;
|
||||
springCleaningStats.lazyDeletePages += workPerformed.lazyDeletePages;
|
||||
springCleaningStats.vacuumedPages += workPerformed.vacuumedPages;
|
||||
springCleaningStats.springCleaningTime += now() - s;
|
||||
springCleaningStats.vacuumTime += vacuumTime;
|
||||
springCleaningStats.lazyDeleteTime += lazyDeleteTime;
|
||||
|
||||
a.result.send(Void());
|
||||
a.result.send(workPerformed);
|
||||
++writesComplete;
|
||||
if (g_network->isSimulated() && g_simulator.getCurrentProcess()->rebooting)
|
||||
TraceEvent("SpringCleaningActionFinished", dbgid).detail("Elapsed", now()-s);
|
||||
|
@ -1849,9 +1871,22 @@ IKeyValueStore* keyValueStoreSQLite( std::string const& filename, UID logID, Key
|
|||
}
|
||||
|
||||
ACTOR Future<Void> cleanPeriodically( KeyValueStoreSQLite* self ) {
|
||||
wait(delayJittered(SERVER_KNOBS->SPRING_CLEANING_NO_ACTION_INTERVAL));
|
||||
loop {
|
||||
wait( delayJittered(SERVER_KNOBS->CLEANING_INTERVAL) );
|
||||
wait( self->doClean() );
|
||||
KeyValueStoreSQLite::SpringCleaningWorkPerformed workPerformed = wait(self->doClean());
|
||||
|
||||
double duration = std::numeric_limits<double>::max();
|
||||
if (workPerformed.lazyDeletePages >= SERVER_KNOBS->SPRING_CLEANING_LAZY_DELETE_BATCH_SIZE) {
|
||||
duration = std::min(duration, SERVER_KNOBS->SPRING_CLEANING_LAZY_DELETE_INTERVAL);
|
||||
}
|
||||
if (workPerformed.vacuumedPages > 0) {
|
||||
duration = std::min(duration, SERVER_KNOBS->SPRING_CLEANING_VACUUM_INTERVAL);
|
||||
}
|
||||
if (duration == std::numeric_limits<double>::max()) {
|
||||
duration = SERVER_KNOBS->SPRING_CLEANING_NO_ACTION_INTERVAL;
|
||||
}
|
||||
|
||||
wait(delayJittered(duration));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1957,7 +1992,7 @@ Future<Standalone<VectorRef<KeyValueRef>>> KeyValueStoreSQLite::readRange( KeyRa
|
|||
readThreads->post(p);
|
||||
return f;
|
||||
}
|
||||
Future<Void> KeyValueStoreSQLite::doClean() {
|
||||
Future<KeyValueStoreSQLite::SpringCleaningWorkPerformed> KeyValueStoreSQLite::doClean() {
|
||||
++writesRequested;
|
||||
auto p = new Writer::SpringCleaningAction;
|
||||
auto f = p->result.getFuture();
|
||||
|
|
|
@ -81,7 +81,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
init( BEST_TEAM_STUCK_DELAY, 1.0 );
|
||||
init( BG_DD_POLLING_INTERVAL, 10.0 );
|
||||
init( DD_QUEUE_LOGGING_INTERVAL, 5.0 );
|
||||
init( RELOCATION_PARALLELISM_PER_SOURCE_SERVER, 4 ); if( randomize && BUGGIFY ) RELOCATION_PARALLELISM_PER_SOURCE_SERVER = 1;
|
||||
init( RELOCATION_PARALLELISM_PER_SOURCE_SERVER, 2 ); if( randomize && BUGGIFY ) RELOCATION_PARALLELISM_PER_SOURCE_SERVER = 1;
|
||||
init( DD_QUEUE_MAX_KEY_SERVERS, 100 ); if( randomize && BUGGIFY ) DD_QUEUE_MAX_KEY_SERVERS = 1;
|
||||
init( DD_REBALANCE_PARALLELISM, 50 );
|
||||
init( DD_REBALANCE_RESET_AMOUNT, 30 );
|
||||
|
@ -195,6 +195,8 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
|
||||
init( SQLITE_PAGE_SCAN_ERROR_LIMIT, 10000 );
|
||||
init( SQLITE_BTREE_PAGE_USABLE, 4096 - 8); // pageSize - reserveSize for page checksum
|
||||
init( SQLITE_CHUNK_SIZE_PAGES, 25600 ); // 100MB
|
||||
init( SQLITE_CHUNK_SIZE_PAGES_SIM, 1024 ); // 4MB
|
||||
|
||||
// Maximum and minimum cell payload bytes allowed on primary page as calculated in SQLite.
|
||||
// These formulas are copied from SQLite, using its hardcoded constants, so if you are
|
||||
|
@ -219,8 +221,11 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
init( SQLITE_FRAGMENT_MIN_SAVINGS, 0.20 );
|
||||
|
||||
// KeyValueStoreSqlite spring cleaning
|
||||
init( CLEANING_INTERVAL, 1.0 );
|
||||
init( SPRING_CLEANING_TIME_ESTIMATE, .010 );
|
||||
init( SPRING_CLEANING_NO_ACTION_INTERVAL, 1.0 ); if( randomize && BUGGIFY ) SPRING_CLEANING_NO_ACTION_INTERVAL = g_random->coinflip() ? 0.1 : g_random->random01() * 5;
|
||||
init( SPRING_CLEANING_LAZY_DELETE_INTERVAL, 0.1 ); if( randomize && BUGGIFY ) SPRING_CLEANING_LAZY_DELETE_INTERVAL = g_random->coinflip() ? 1.0 : g_random->random01() * 5;
|
||||
init( SPRING_CLEANING_VACUUM_INTERVAL, 1.0 ); if( randomize && BUGGIFY ) SPRING_CLEANING_VACUUM_INTERVAL = g_random->coinflip() ? 0.1 : g_random->random01() * 5;
|
||||
init( SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE, .010 ); if( randomize && BUGGIFY ) SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE = g_random->random01() * 5;
|
||||
init( SPRING_CLEANING_VACUUM_TIME_ESTIMATE, .010 ); if( randomize && BUGGIFY ) SPRING_CLEANING_VACUUM_TIME_ESTIMATE = g_random->random01() * 5;
|
||||
init( SPRING_CLEANING_VACUUMS_PER_LAZY_DELETE_PAGE, 0.0 ); if( randomize && BUGGIFY ) SPRING_CLEANING_VACUUMS_PER_LAZY_DELETE_PAGE = g_random->coinflip() ? 1e9 : g_random->random01() * 5;
|
||||
init( SPRING_CLEANING_MIN_LAZY_DELETE_PAGES, 0 ); if( randomize && BUGGIFY ) SPRING_CLEANING_MIN_LAZY_DELETE_PAGES = g_random->randomInt(1, 100);
|
||||
init( SPRING_CLEANING_MAX_LAZY_DELETE_PAGES, 1e9 ); if( randomize && BUGGIFY ) SPRING_CLEANING_MAX_LAZY_DELETE_PAGES = g_random->coinflip() ? 0 : g_random->randomInt(1, 1e4);
|
||||
|
@ -394,7 +399,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
|||
init( STORAGE_LIMIT_BYTES, 500000 );
|
||||
init( BUGGIFY_LIMIT_BYTES, 1000 );
|
||||
init( FETCH_BLOCK_BYTES, 2e6 );
|
||||
init( FETCH_KEYS_PARALLELISM_BYTES, 5e6 ); if( randomize && BUGGIFY ) FETCH_KEYS_PARALLELISM_BYTES = 4e6;
|
||||
init( FETCH_KEYS_PARALLELISM_BYTES, 4e6 ); if( randomize && BUGGIFY ) FETCH_KEYS_PARALLELISM_BYTES = 3e6;
|
||||
init( BUGGIFY_BLOCK_BYTES, 10000 );
|
||||
init( STORAGE_COMMIT_BYTES, 10000000 ); if( randomize && BUGGIFY ) STORAGE_COMMIT_BYTES = 2000000;
|
||||
init( STORAGE_COMMIT_INTERVAL, 0.5 ); if( randomize && BUGGIFY ) STORAGE_COMMIT_INTERVAL = 2.0;
|
||||
|
|
|
@ -166,10 +166,15 @@ public:
|
|||
int SQLITE_FRAGMENT_PRIMARY_PAGE_USABLE;
|
||||
int SQLITE_FRAGMENT_OVERFLOW_PAGE_USABLE;
|
||||
double SQLITE_FRAGMENT_MIN_SAVINGS;
|
||||
int SQLITE_CHUNK_SIZE_PAGES;
|
||||
int SQLITE_CHUNK_SIZE_PAGES_SIM;
|
||||
|
||||
// KeyValueStoreSqlite spring cleaning
|
||||
double CLEANING_INTERVAL;
|
||||
double SPRING_CLEANING_TIME_ESTIMATE;
|
||||
double SPRING_CLEANING_NO_ACTION_INTERVAL;
|
||||
double SPRING_CLEANING_LAZY_DELETE_INTERVAL;
|
||||
double SPRING_CLEANING_VACUUM_INTERVAL;
|
||||
double SPRING_CLEANING_LAZY_DELETE_TIME_ESTIMATE;
|
||||
double SPRING_CLEANING_VACUUM_TIME_ESTIMATE;
|
||||
double SPRING_CLEANING_VACUUMS_PER_LAZY_DELETE_PAGE;
|
||||
int SPRING_CLEANING_MIN_LAZY_DELETE_PAGES;
|
||||
int SPRING_CLEANING_MAX_LAZY_DELETE_PAGES;
|
||||
|
|
|
@ -54,6 +54,7 @@ struct ProxyStats {
|
|||
Counter mutationBytes;
|
||||
Counter mutations;
|
||||
Counter conflictRanges;
|
||||
Counter keyServerLocationRequests;
|
||||
Version lastCommitVersionAssigned;
|
||||
|
||||
LatencyBands commitLatencyBands;
|
||||
|
@ -65,8 +66,8 @@ struct ProxyStats {
|
|||
: cc("ProxyStats", id.toString()),
|
||||
txnStartIn("TxnStartIn", cc), txnStartOut("TxnStartOut", cc), txnStartBatch("TxnStartBatch", cc), txnSystemPriorityStartIn("TxnSystemPriorityStartIn", cc), txnSystemPriorityStartOut("TxnSystemPriorityStartOut", cc), txnBatchPriorityStartIn("TxnBatchPriorityStartIn", cc), txnBatchPriorityStartOut("TxnBatchPriorityStartOut", cc),
|
||||
txnDefaultPriorityStartIn("TxnDefaultPriorityStartIn", cc), txnDefaultPriorityStartOut("TxnDefaultPriorityStartOut", cc), txnCommitIn("TxnCommitIn", cc), txnCommitVersionAssigned("TxnCommitVersionAssigned", cc), txnCommitResolving("TxnCommitResolving", cc), txnCommitResolved("TxnCommitResolved", cc), txnCommitOut("TxnCommitOut", cc),
|
||||
txnCommitOutSuccess("TxnCommitOutSuccess", cc), txnConflicts("TxnConflicts", cc), commitBatchIn("CommitBatchIn", cc), commitBatchOut("CommitBatchOut", cc), mutationBytes("MutationBytes", cc), mutations("Mutations", cc), conflictRanges("ConflictRanges", cc), lastCommitVersionAssigned(0),
|
||||
commitLatencyBands("CommitLatencyMetrics", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY), grvLatencyBands("GRVLatencyMetrics", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY)
|
||||
txnCommitOutSuccess("TxnCommitOutSuccess", cc), txnConflicts("TxnConflicts", cc), commitBatchIn("CommitBatchIn", cc), commitBatchOut("CommitBatchOut", cc), mutationBytes("MutationBytes", cc), mutations("Mutations", cc), conflictRanges("ConflictRanges", cc), keyServerLocationRequests("KeyServerLocationRequests", cc),
|
||||
lastCommitVersionAssigned(0), commitLatencyBands("CommitLatencyMetrics", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY), grvLatencyBands("GRVLatencyMetrics", id, SERVER_KNOBS->STORAGE_LOGGING_DELAY)
|
||||
{
|
||||
specialCounter(cc, "LastAssignedCommitVersion", [this](){return this->lastCommitVersionAssigned;});
|
||||
specialCounter(cc, "Version", [pVersion](){return *pVersion; });
|
||||
|
@ -1220,121 +1221,121 @@ ACTOR static Future<Void> transactionStarter(
|
|||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> readRequestServer(
|
||||
MasterProxyInterface proxy,
|
||||
ProxyCommitData* commitData
|
||||
)
|
||||
{
|
||||
ACTOR static Future<Void> readRequestServer( MasterProxyInterface proxy, ProxyCommitData* commitData ) {
|
||||
// Implement read-only parts of the proxy interface
|
||||
|
||||
// We can't respond to these requests until we have valid txnStateStore
|
||||
wait(commitData->validState.getFuture());
|
||||
|
||||
TraceEvent("ProxyReadyForReads", proxy.id());
|
||||
|
||||
loop {
|
||||
choose{
|
||||
when(GetKeyServerLocationsRequest req = waitNext(proxy.getKeyServersLocations.getFuture())) {
|
||||
GetKeyServerLocationsReply rep;
|
||||
if(!req.end.present()) {
|
||||
auto r = req.reverse ? commitData->keyInfo.rangeContainingKeyBefore(req.begin) : commitData->keyInfo.rangeContaining(req.begin);
|
||||
vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(r.value().src_info.size());
|
||||
for(auto& it : r.value().src_info) {
|
||||
ssis.push_back(it->interf);
|
||||
}
|
||||
rep.results.push_back(std::make_pair(r.range(), ssis));
|
||||
} else if(!req.reverse) {
|
||||
int count = 0;
|
||||
for(auto r = commitData->keyInfo.rangeContaining(req.begin); r != commitData->keyInfo.ranges().end() && count < req.limit && r.begin() < req.end.get(); ++r) {
|
||||
vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(r.value().src_info.size());
|
||||
for(auto& it : r.value().src_info) {
|
||||
ssis.push_back(it->interf);
|
||||
}
|
||||
rep.results.push_back(std::make_pair(r.range(), ssis));
|
||||
count++;
|
||||
}
|
||||
} else {
|
||||
int count = 0;
|
||||
auto r = commitData->keyInfo.rangeContainingKeyBefore(req.end.get());
|
||||
while( count < req.limit && req.begin < r.end() ) {
|
||||
vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(r.value().src_info.size());
|
||||
for(auto& it : r.value().src_info) {
|
||||
ssis.push_back(it->interf);
|
||||
}
|
||||
rep.results.push_back(std::make_pair(r.range(), ssis));
|
||||
if(r == commitData->keyInfo.ranges().begin()) {
|
||||
break;
|
||||
}
|
||||
count++;
|
||||
--r;
|
||||
}
|
||||
}
|
||||
req.reply.send(rep);
|
||||
GetKeyServerLocationsRequest req = waitNext(proxy.getKeyServersLocations.getFuture());
|
||||
++commitData->stats.keyServerLocationRequests;
|
||||
GetKeyServerLocationsReply rep;
|
||||
if(!req.end.present()) {
|
||||
auto r = req.reverse ? commitData->keyInfo.rangeContainingKeyBefore(req.begin) : commitData->keyInfo.rangeContaining(req.begin);
|
||||
vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(r.value().src_info.size());
|
||||
for(auto& it : r.value().src_info) {
|
||||
ssis.push_back(it->interf);
|
||||
}
|
||||
when(GetStorageServerRejoinInfoRequest req = waitNext(proxy.getStorageServerRejoinInfo.getFuture())) {
|
||||
if (commitData->txnStateStore->readValue(serverListKeyFor(req.id)).get().present()) {
|
||||
GetStorageServerRejoinInfoReply rep;
|
||||
rep.version = commitData->version;
|
||||
rep.tag = decodeServerTagValue( commitData->txnStateStore->readValue(serverTagKeyFor(req.id)).get().get() );
|
||||
Standalone<VectorRef<KeyValueRef>> history = commitData->txnStateStore->readRange(serverTagHistoryRangeFor(req.id)).get();
|
||||
for(int i = history.size()-1; i >= 0; i-- ) {
|
||||
rep.history.push_back(std::make_pair(decodeServerTagHistoryKey(history[i].key), decodeServerTagValue(history[i].value)));
|
||||
}
|
||||
auto localityKey = commitData->txnStateStore->readValue(tagLocalityListKeyFor(req.dcId)).get();
|
||||
if( localityKey.present() ) {
|
||||
rep.newLocality = false;
|
||||
int8_t locality = decodeTagLocalityListValue(localityKey.get());
|
||||
if(locality != rep.tag.locality) {
|
||||
uint16_t tagId = 0;
|
||||
std::vector<uint16_t> usedTags;
|
||||
auto tagKeys = commitData->txnStateStore->readRange(serverTagKeys).get();
|
||||
for( auto& kv : tagKeys ) {
|
||||
Tag t = decodeServerTagValue( kv.value );
|
||||
if(t.locality == locality) {
|
||||
usedTags.push_back(t.id);
|
||||
}
|
||||
}
|
||||
auto historyKeys = commitData->txnStateStore->readRange(serverTagHistoryKeys).get();
|
||||
for( auto& kv : historyKeys ) {
|
||||
Tag t = decodeServerTagValue( kv.value );
|
||||
if(t.locality == locality) {
|
||||
usedTags.push_back(t.id);
|
||||
}
|
||||
}
|
||||
std::sort(usedTags.begin(), usedTags.end());
|
||||
|
||||
int usedIdx = 0;
|
||||
for(; usedTags.size() > 0 && tagId <= usedTags.end()[-1]; tagId++) {
|
||||
if(tagId < usedTags[usedIdx]) {
|
||||
break;
|
||||
} else {
|
||||
usedIdx++;
|
||||
}
|
||||
}
|
||||
rep.newTag = Tag(locality, tagId);
|
||||
}
|
||||
} else {
|
||||
rep.newLocality = true;
|
||||
int8_t maxTagLocality = -1;
|
||||
auto localityKeys = commitData->txnStateStore->readRange(tagLocalityListKeys).get();
|
||||
for( auto& kv : localityKeys ) {
|
||||
maxTagLocality = std::max(maxTagLocality, decodeTagLocalityListValue( kv.value ));
|
||||
}
|
||||
rep.newTag = Tag(maxTagLocality+1,0);
|
||||
}
|
||||
req.reply.send(rep);
|
||||
} else {
|
||||
req.reply.sendError(worker_removed());
|
||||
rep.results.push_back(std::make_pair(r.range(), ssis));
|
||||
} else if(!req.reverse) {
|
||||
int count = 0;
|
||||
for(auto r = commitData->keyInfo.rangeContaining(req.begin); r != commitData->keyInfo.ranges().end() && count < req.limit && r.begin() < req.end.get(); ++r) {
|
||||
vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(r.value().src_info.size());
|
||||
for(auto& it : r.value().src_info) {
|
||||
ssis.push_back(it->interf);
|
||||
}
|
||||
rep.results.push_back(std::make_pair(r.range(), ssis));
|
||||
count++;
|
||||
}
|
||||
} else {
|
||||
int count = 0;
|
||||
auto r = commitData->keyInfo.rangeContainingKeyBefore(req.end.get());
|
||||
while( count < req.limit && req.begin < r.end() ) {
|
||||
vector<StorageServerInterface> ssis;
|
||||
ssis.reserve(r.value().src_info.size());
|
||||
for(auto& it : r.value().src_info) {
|
||||
ssis.push_back(it->interf);
|
||||
}
|
||||
rep.results.push_back(std::make_pair(r.range(), ssis));
|
||||
if(r == commitData->keyInfo.ranges().begin()) {
|
||||
break;
|
||||
}
|
||||
count++;
|
||||
--r;
|
||||
}
|
||||
}
|
||||
req.reply.send(rep);
|
||||
wait(yield());
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> rejoinServer( MasterProxyInterface proxy, ProxyCommitData* commitData ) {
|
||||
// We can't respond to these requests until we have valid txnStateStore
|
||||
wait(commitData->validState.getFuture());
|
||||
|
||||
loop {
|
||||
GetStorageServerRejoinInfoRequest req = waitNext(proxy.getStorageServerRejoinInfo.getFuture());
|
||||
if (commitData->txnStateStore->readValue(serverListKeyFor(req.id)).get().present()) {
|
||||
GetStorageServerRejoinInfoReply rep;
|
||||
rep.version = commitData->version;
|
||||
rep.tag = decodeServerTagValue( commitData->txnStateStore->readValue(serverTagKeyFor(req.id)).get().get() );
|
||||
Standalone<VectorRef<KeyValueRef>> history = commitData->txnStateStore->readRange(serverTagHistoryRangeFor(req.id)).get();
|
||||
for(int i = history.size()-1; i >= 0; i-- ) {
|
||||
rep.history.push_back(std::make_pair(decodeServerTagHistoryKey(history[i].key), decodeServerTagValue(history[i].value)));
|
||||
}
|
||||
auto localityKey = commitData->txnStateStore->readValue(tagLocalityListKeyFor(req.dcId)).get();
|
||||
if( localityKey.present() ) {
|
||||
rep.newLocality = false;
|
||||
int8_t locality = decodeTagLocalityListValue(localityKey.get());
|
||||
if(locality != rep.tag.locality) {
|
||||
uint16_t tagId = 0;
|
||||
std::vector<uint16_t> usedTags;
|
||||
auto tagKeys = commitData->txnStateStore->readRange(serverTagKeys).get();
|
||||
for( auto& kv : tagKeys ) {
|
||||
Tag t = decodeServerTagValue( kv.value );
|
||||
if(t.locality == locality) {
|
||||
usedTags.push_back(t.id);
|
||||
}
|
||||
}
|
||||
auto historyKeys = commitData->txnStateStore->readRange(serverTagHistoryKeys).get();
|
||||
for( auto& kv : historyKeys ) {
|
||||
Tag t = decodeServerTagValue( kv.value );
|
||||
if(t.locality == locality) {
|
||||
usedTags.push_back(t.id);
|
||||
}
|
||||
}
|
||||
std::sort(usedTags.begin(), usedTags.end());
|
||||
|
||||
int usedIdx = 0;
|
||||
for(; usedTags.size() > 0 && tagId <= usedTags.end()[-1]; tagId++) {
|
||||
if(tagId < usedTags[usedIdx]) {
|
||||
break;
|
||||
} else {
|
||||
usedIdx++;
|
||||
}
|
||||
}
|
||||
rep.newTag = Tag(locality, tagId);
|
||||
}
|
||||
} else {
|
||||
rep.newLocality = true;
|
||||
int8_t maxTagLocality = -1;
|
||||
auto localityKeys = commitData->txnStateStore->readRange(tagLocalityListKeys).get();
|
||||
for( auto& kv : localityKeys ) {
|
||||
maxTagLocality = std::max(maxTagLocality, decodeTagLocalityListValue( kv.value ));
|
||||
}
|
||||
rep.newTag = Tag(maxTagLocality+1,0);
|
||||
}
|
||||
req.reply.send(rep);
|
||||
} else {
|
||||
req.reply.sendError(worker_removed());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> healthMetricsRequestServer(MasterProxyInterface proxy, GetHealthMetricsReply* healthMetricsReply, GetHealthMetricsReply* detailedHealthMetricsReply)
|
||||
{
|
||||
loop {
|
||||
|
@ -1464,6 +1465,7 @@ ACTOR Future<Void> masterProxyServerCore(
|
|||
addActor.send(monitorRemoteCommitted(&commitData, db));
|
||||
addActor.send(transactionStarter(proxy, db, addActor, &commitData, &healthMetricsReply, &detailedHealthMetricsReply));
|
||||
addActor.send(readRequestServer(proxy, &commitData));
|
||||
addActor.send(rejoinServer(proxy, &commitData));
|
||||
addActor.send(healthMetricsRequestServer(proxy, &healthMetricsReply, &detailedHealthMetricsReply));
|
||||
|
||||
// wait for txnStateStore recovery
|
||||
|
|
|
@ -78,7 +78,9 @@ struct VFSAsyncFile {
|
|||
|
||||
int debug_zcrefs, debug_zcreads, debug_reads;
|
||||
|
||||
VFSAsyncFile(std::string const& filename, int flags) : filename(filename), flags(flags), pLockCount(&filename_lockCount_openCount[filename].first), debug_zcrefs(0), debug_zcreads(0), debug_reads(0) {
|
||||
int chunkSize;
|
||||
|
||||
VFSAsyncFile(std::string const& filename, int flags) : filename(filename), flags(flags), pLockCount(&filename_lockCount_openCount[filename].first), debug_zcrefs(0), debug_zcreads(0), debug_reads(0), chunkSize(0) {
|
||||
filename_lockCount_openCount[filename].second++;
|
||||
}
|
||||
~VFSAsyncFile();
|
||||
|
@ -183,6 +185,12 @@ static int asyncWrite(sqlite3_file *pFile, const void *zBuf, int iAmt, sqlite_in
|
|||
|
||||
static int asyncTruncate(sqlite3_file *pFile, sqlite_int64 size){
|
||||
VFSAsyncFile *p = (VFSAsyncFile*)pFile;
|
||||
|
||||
// Adjust size to a multiple of chunkSize if set
|
||||
if(p->chunkSize != 0) {
|
||||
size = ((size + p->chunkSize - 1) / p->chunkSize) * p->chunkSize;
|
||||
}
|
||||
|
||||
try {
|
||||
waitFor( p->file->truncate( size ) );
|
||||
return SQLITE_OK;
|
||||
|
@ -242,7 +250,18 @@ static int asyncCheckReservedLock(sqlite3_file *pFile, int *pResOut){
|
|||
** No xFileControl() verbs are implemented by this VFS.
|
||||
*/
|
||||
static int VFSAsyncFileControl(sqlite3_file *pFile, int op, void *pArg){
|
||||
return SQLITE_NOTFOUND;
|
||||
VFSAsyncFile *p = (VFSAsyncFile*)pFile;
|
||||
switch(op) {
|
||||
case SQLITE_FCNTL_CHUNK_SIZE:
|
||||
p->chunkSize = *(int *)pArg;
|
||||
return SQLITE_OK;
|
||||
|
||||
case SQLITE_FCNTL_SIZE_HINT:
|
||||
return asyncTruncate(pFile, *(int64_t *)pArg);
|
||||
|
||||
default:
|
||||
return SQLITE_NOTFOUND;
|
||||
};
|
||||
}
|
||||
|
||||
static int asyncSectorSize(sqlite3_file *pFile){ return 512; } // SOMEDAY: Would 4K be better?
|
||||
|
|
|
@ -1413,15 +1413,16 @@ int main(int argc, char* argv[]) {
|
|||
!clientKnobs->setKnob( k->first, k->second ) &&
|
||||
!serverKnobs->setKnob( k->first, k->second ))
|
||||
{
|
||||
fprintf(stderr, "Unrecognized knob option '%s'\n", k->first.c_str());
|
||||
flushAndExit(FDB_EXIT_ERROR);
|
||||
fprintf(stderr, "WARNING: Unrecognized knob option '%s'\n", k->first.c_str());
|
||||
TraceEvent(SevWarnAlways, "UnrecognizedKnobOption").detail("Knob", printable(k->first));
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_invalid_option_value) {
|
||||
fprintf(stderr, "Invalid value '%s' for option '%s'\n", k->second.c_str(), k->first.c_str());
|
||||
flushAndExit(FDB_EXIT_ERROR);
|
||||
fprintf(stderr, "WARNING: Invalid value '%s' for option '%s'\n", k->second.c_str(), k->first.c_str());
|
||||
TraceEvent(SevWarnAlways, "InvalidKnobValue").detail("Knob", printable(k->first)).detail("Value", printable(k->second));
|
||||
} else {
|
||||
throw;
|
||||
}
|
||||
throw;
|
||||
}
|
||||
}
|
||||
if (!serverKnobs->setKnob("server_mem_limit", std::to_string(memLimit))) ASSERT(false);
|
||||
|
|
|
@ -44,6 +44,7 @@ enum {
|
|||
TaskFailureMonitor = 8700,
|
||||
TaskResolutionMetrics = 8700,
|
||||
TaskClusterController = 8650,
|
||||
TaskProxyStorageRejoin = 8645,
|
||||
TaskProxyCommitDispatcher = 8640,
|
||||
TaskTLogQueuingMetrics = 8620,
|
||||
TaskTLogPop = 8610,
|
||||
|
@ -51,7 +52,6 @@ enum {
|
|||
TaskTLogPeek = 8590,
|
||||
TaskTLogCommitReply = 8580,
|
||||
TaskTLogCommit = 8570,
|
||||
TaskTLogSpilledPeekReply = 8567,
|
||||
TaskProxyGetRawCommittedVersion = 8565,
|
||||
TaskProxyResolverReply = 8560,
|
||||
TaskProxyCommitBatcher = 8550,
|
||||
|
@ -73,6 +73,7 @@ enum {
|
|||
TaskDataDistribution = 3500,
|
||||
TaskDiskWrite = 3010,
|
||||
TaskUpdateStorage = 3000,
|
||||
TaskTLogSpilledPeekReply = 2800,
|
||||
TaskLowPriority = 2000,
|
||||
|
||||
TaskMinPriority = 1000
|
||||
|
|
|
@ -22,8 +22,8 @@ FROM ubuntu:18.04
|
|||
# Install dependencies
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl=7.58.0-2ubuntu3.5 \
|
||||
dnsutils=1:9.11.3+dfsg-1ubuntu1.3 && \
|
||||
apt-get install -y curl>=7.58.0-2ubuntu3.6 \
|
||||
dnsutils>=1:9.11.3+dfsg-1ubuntu1.7 && \
|
||||
rm -r /var/lib/apt/lists/*
|
||||
|
||||
# Install FoundationDB Binaries
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
<Wix xmlns='http://schemas.microsoft.com/wix/2006/wi'>
|
||||
<Product Name='$(var.Title)'
|
||||
Id='{C7ECCB4E-3522-45E7-BB07-20D873D941E5}'
|
||||
Id='{94FC1149-B569-4F0C-886D-3C9D0A83A3E7}'
|
||||
UpgradeCode='{A95EA002-686E-4164-8356-C715B7F8B1C8}'
|
||||
Version='$(var.Version)'
|
||||
Manufacturer='$(var.Manufacturer)'
|
||||
|
|
Loading…
Reference in New Issue