Merge pull request #2006 from ajbeamon/add-coordinator-to-status-roles-list
Add 'coordinator' to the list of roles that a process can have in status
This commit is contained in:
commit
b7a4540a35
|
@ -66,7 +66,8 @@
|
|||
"cluster_controller",
|
||||
"data_distributor",
|
||||
"ratekeeper",
|
||||
"router"
|
||||
"router",
|
||||
"coordinator"
|
||||
]
|
||||
},
|
||||
"data_version":12341234,
|
||||
|
|
|
@ -56,6 +56,7 @@ Status
|
|||
* ``connected_clients`` is now only a sample of the connected clients, rather than a complete list. `(PR #1902) <https://github.com/apple/foundationdb/pull/1902>`_.
|
||||
* Added ``max_protocol_clients`` to the ``supported_versions`` section, which provides a sample of connected clients which cannot connect to any higher protocol version. `(PR #1902) <https://github.com/apple/foundationdb/pull/1902>`_.
|
||||
* Clients which connect without specifying their supported versions are tracked as an ``Unknown`` version in the ``supported_versions`` section. [6.2.2] `(PR #1990) <https://github.com/apple/foundationdb/pull/1990>`_.
|
||||
* Add ``coordinator`` to the list of roles that can be reported for a process. [6.2.3] `(PR #2006) <https://github.com/apple/foundationdb/pull/2006>`_.
|
||||
* Added ``worst_durability_lag_storage_server`` and ``limiting_durability_lag_storage_server`` to the ``cluster.qos`` section, each with subfields ``versions`` and ``seconds``. These report the durability lag values being used by ratekeeper to potentially limit the transaction rate. [6.2.3] `(PR #2003) <https://github.com/apple/foundationdb/pull/2003>`_.
|
||||
* Added ``worst_data_lag_storage_server`` and ``limiting_data_lag_storage_server`` to the ``cluster.qos`` section, each with subfields ``versions`` and ``seconds``. These are meant to replace ``worst_version_lag_storage_server`` and ``limiting_version_lag_storage_server``, which are now deprecated. [6.2.3] `(PR #2003) <https://github.com/apple/foundationdb/pull/2003>`_.
|
||||
|
||||
|
|
|
@ -86,7 +86,8 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
|||
"cluster_controller",
|
||||
"data_distributor",
|
||||
"ratekeeper",
|
||||
"router"
|
||||
"router",
|
||||
"coordinator"
|
||||
]
|
||||
},
|
||||
"data_version":12341234,
|
||||
|
|
|
@ -549,6 +549,11 @@ struct RolesInfo {
|
|||
JsonBuilderObject& addRole(std::string const& role, InterfaceType& iface) {
|
||||
return addRole(iface.address(), role, iface.id());
|
||||
}
|
||||
JsonBuilderObject& addCoordinatorRole(NetworkAddress addr) {
|
||||
JsonBuilderObject obj;
|
||||
obj["role"] = "coordinator";
|
||||
return roles.insert(std::make_pair(addr, obj))->second;
|
||||
}
|
||||
JsonBuilderArray getStatusForAddress( NetworkAddress a ) {
|
||||
JsonBuilderArray v;
|
||||
auto it = roles.lower_bound(a);
|
||||
|
@ -563,10 +568,11 @@ struct RolesInfo {
|
|||
ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
||||
Reference<AsyncVar<struct ServerDBInfo>> db, std::vector<WorkerDetails> workers, WorkerEvents pMetrics,
|
||||
WorkerEvents mMetrics, WorkerEvents nMetrics, WorkerEvents errors, WorkerEvents traceFileOpenErrors,
|
||||
WorkerEvents programStarts, std::map<std::string, std::vector<JsonBuilderObject>> processIssues,
|
||||
WorkerEvents programStarts, std::map<std::string, std::vector<JsonBuilderObject>> processIssues,
|
||||
vector<std::pair<StorageServerInterface, EventMap>> storageServers,
|
||||
vector<std::pair<TLogInterface, EventMap>> tLogs, vector<std::pair<MasterProxyInterface, EventMap>> proxies,
|
||||
Database cx, Optional<DatabaseConfiguration> configuration, Optional<Key> healthyZone, std::set<std::string>* incomplete_reasons) {
|
||||
ServerCoordinators coordinators, Database cx, Optional<DatabaseConfiguration> configuration,
|
||||
Optional<Key> healthyZone, std::set<std::string>* incomplete_reasons) {
|
||||
|
||||
state JsonBuilderObject processMap;
|
||||
|
||||
|
@ -647,6 +653,10 @@ ACTOR static Future<JsonBuilderObject> processStatusFetcher(
|
|||
}
|
||||
}
|
||||
|
||||
for(auto& coordinator : coordinators.ccf->getConnectionString().coordinators()) {
|
||||
roles.addCoordinatorRole(coordinator);
|
||||
}
|
||||
|
||||
state std::vector<std::pair<MasterProxyInterface, EventMap>>::iterator proxy;
|
||||
for(proxy = proxies.begin(); proxy != proxies.end(); ++proxy) {
|
||||
roles.addRole( "proxy", proxy->first, proxy->second );
|
||||
|
@ -2298,8 +2308,9 @@ ACTOR Future<StatusReply> clusterGetStatus(
|
|||
|
||||
JsonBuilderObject processStatus = wait(processStatusFetcher(db, workers, pMetrics, mMetrics, networkMetrics,
|
||||
latestError, traceFileOpenErrors, programStarts,
|
||||
processIssues, storageServers, tLogs, proxies, cx,
|
||||
configuration, loadResult.present() ? loadResult.get().healthyZone : Optional<Key>(),
|
||||
processIssues, storageServers, tLogs, proxies,
|
||||
coordinators, cx, configuration,
|
||||
loadResult.present() ? loadResult.get().healthyZone : Optional<Key>(),
|
||||
&status_incomplete_reasons));
|
||||
statusObj["processes"] = processStatus;
|
||||
statusObj["clients"] = clientStatusFetcher(clientStatus);
|
||||
|
|
Loading…
Reference in New Issue