Use snake_case to match status json convention
This commit is contained in:
parent
e1dfa410c1
commit
409ccf3be2
|
@ -850,14 +850,14 @@ Reads in the metrics module are not transactional and may require rpcs to comple
|
|||
>>> for k, v in db.get_range_startswith('\xff\xff/metrics/data_distribution_stats/', limit=3):
|
||||
... print(k, v)
|
||||
...
|
||||
('\xff\xff/metrics/data_distribution_stats/', '{"ShardBytes":3828000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako00079', '{"ShardBytes":2013000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako00126', '{"ShardBytes":3201000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/', '{"shard_bytes":3828000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako00079', '{"shard_bytes":2013000}')
|
||||
('\xff\xff/metrics/data_distribution_stats/mako00126', '{"shard_bytes":3201000}')
|
||||
|
||||
========================= ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
------------------------- -------- ---------------
|
||||
ShardBytes number An estimate of the sum of kv sizes for this shard.
|
||||
shard_bytes number An estimate of the sum of kv sizes for this shard.
|
||||
========================= ======== ===============
|
||||
|
||||
Keys starting with ``\xff\xff/metrics/health/`` represent stats about the health of the cluster, suitable for application-level throttling.
|
||||
|
@ -866,23 +866,23 @@ Some of this information is also available in ``\xff\xff/status/json``, but thes
|
|||
>>> for k, v in db.get_range_startswith('\xff\xff/metrics/health/'):
|
||||
... print(k, v)
|
||||
...
|
||||
('\xff\xff/metrics/health/aggregate', '{"batchLimited":false,"tpsLimit":483988.66315011407,"worstStorageDurabilityLag":5000001,"worstStorageQueue":2036,"worstTLogQueue":300}')
|
||||
('\xff\xff/metrics/health/log/e639a9ad0373367784cc550c615c469b', '{"tLogQueue":300}')
|
||||
('\xff\xff/metrics/health/storage/ab2ce4caf743c9c1ae57063629c6678a', '{"cpuUsage":2.398696781487125,"diskUsage":0.059995917598039405,"storageDurabilityLag":5000001,"storageQueue":2036}')
|
||||
('\xff\xff/metrics/health/aggregate', '{"batch_limited":false,"tps_limit":483988.66315011407,"worst_storage_durability_lag":5000001,"worst_storage_queue":2036,"worst_log_queue":300}')
|
||||
('\xff\xff/metrics/health/log/e639a9ad0373367784cc550c615c469b', '{"log_queue":300}')
|
||||
('\xff\xff/metrics/health/storage/ab2ce4caf743c9c1ae57063629c6678a', '{"cpu_usage":2.398696781487125,"disk_usage":0.059995917598039405,"storage_durability_lag":5000001,"storage_queue":2036}')
|
||||
|
||||
``\xff\xff/metrics/health/aggregate``
|
||||
|
||||
Aggregate stats about cluster health. Reading this key alone is slightly cheaper than reading any of the per-process keys.
|
||||
|
||||
========================= ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
------------------------- -------- ---------------
|
||||
batchLimited boolean Whether or not the cluster is limiting batch priority transactions
|
||||
tpsLimit number The rate at which normal priority transactions are allowed to start
|
||||
worstStorageDurabilityLag number See the description for storageDurabilityLag
|
||||
worstStorageQueue number See the description for storageQueue
|
||||
worstTLogQueue number See the description for tLogQueue
|
||||
========================= ======== ===============
|
||||
============================ ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
---------------------------- -------- ---------------
|
||||
batch_limited boolean Whether or not the cluster is limiting batch priority transactions
|
||||
tps_limit number The rate at which normal priority transactions are allowed to start
|
||||
worst_storage_durability_lag number See the description for storage_durability_lag
|
||||
worst_storage_queue number See the description for storage_queue
|
||||
worst_log_queue number See the description for log_queue
|
||||
============================ ======== ===============
|
||||
|
||||
``\xff\xff/metrics/health/log/<id>``
|
||||
|
||||
|
@ -891,21 +891,21 @@ Stats about the health of a particular transaction log process
|
|||
========================= ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
------------------------- -------- ---------------
|
||||
tLogQueue number The number of bytes of mutations that need to be stored in memory on this transaction log process
|
||||
log_queue number The number of bytes of mutations that need to be stored in memory on this transaction log process
|
||||
========================= ======== ===============
|
||||
|
||||
``\xff\xff/metrics/health/storage/<id>``
|
||||
|
||||
Stats about the health of a particular storage process
|
||||
|
||||
========================= ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
------------------------- -------- ---------------
|
||||
cpuUsage number The cpu percentage used by this storage process
|
||||
diskUsage number The disk IO percentage used by this storage process
|
||||
storageDurabilityLag number The difference between the newest version and the durable version on this storage process. On a lightly loaded cluster this will stay just above 5000000.
|
||||
storageQueue number The number of bytes of mutations that need to be stored in memory on this storage process
|
||||
========================= ======== ===============
|
||||
========================== ======== ===============
|
||||
**Field** **Type** **Description**
|
||||
-------------------------- -------- ---------------
|
||||
cpu_usage number The cpu percentage used by this storage process
|
||||
disk_usage number The disk IO percentage used by this storage process
|
||||
storage_durability_lag number The difference between the newest version and the durable version on this storage process. On a lightly loaded cluster this will stay just above 5000000.
|
||||
storage_queue number The number of bytes of mutations that need to be stored in memory on this storage process
|
||||
========================== ======== ===============
|
||||
|
||||
Caveats
|
||||
~~~~~~~
|
||||
|
|
|
@ -557,11 +557,11 @@ static Standalone<RangeResultRef> healthMetricsToKVPairs(const HealthMetrics& me
|
|||
if (CLIENT_BUGGIFY) return result;
|
||||
if (kr.contains(LiteralStringRef("\xff\xff/metrics/health/aggregate")) && metrics.worstStorageDurabilityLag != 0) {
|
||||
json_spirit::mObject statsObj;
|
||||
statsObj["batchLimited"] = metrics.batchLimited;
|
||||
statsObj["tpsLimit"] = metrics.tpsLimit;
|
||||
statsObj["worstStorageDurabilityLag"] = metrics.worstStorageDurabilityLag;
|
||||
statsObj["worstStorageQueue"] = metrics.worstStorageQueue;
|
||||
statsObj["worstTLogQueue"] = metrics.worstTLogQueue;
|
||||
statsObj["batch_limited"] = metrics.batchLimited;
|
||||
statsObj["tps_limit"] = metrics.tpsLimit;
|
||||
statsObj["worst_storage_durability_lag"] = metrics.worstStorageDurabilityLag;
|
||||
statsObj["worst_storage_queue"] = metrics.worstStorageQueue;
|
||||
statsObj["worst_log_queue"] = metrics.worstTLogQueue;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
ValueRef bytes(result.arena(), statsString);
|
||||
|
@ -580,7 +580,7 @@ static Standalone<RangeResultRef> healthMetricsToKVPairs(const HealthMetrics& me
|
|||
if (phase == 1) {
|
||||
if (k < kr.end) {
|
||||
json_spirit::mObject statsObj;
|
||||
statsObj["tLogQueue"] = logStats;
|
||||
statsObj["log_queue"] = logStats;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
ValueRef bytes(result.arena(), statsString);
|
||||
|
@ -603,10 +603,10 @@ static Standalone<RangeResultRef> healthMetricsToKVPairs(const HealthMetrics& me
|
|||
if (phase == 1) {
|
||||
if (k < kr.end) {
|
||||
json_spirit::mObject statsObj;
|
||||
statsObj["storageDurabilityLag"] = storageStats.storageDurabilityLag;
|
||||
statsObj["storageQueue"] = storageStats.storageQueue;
|
||||
statsObj["cpuUsage"] = storageStats.cpuUsage;
|
||||
statsObj["diskUsage"] = storageStats.diskUsage;
|
||||
statsObj["storage_durability_lag"] = storageStats.storageDurabilityLag;
|
||||
statsObj["storage_queue"] = storageStats.storageQueue;
|
||||
statsObj["cpu_usage"] = storageStats.cpuUsage;
|
||||
statsObj["disk_usage"] = storageStats.diskUsage;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
ValueRef bytes(result.arena(), statsString);
|
||||
|
@ -1458,9 +1458,7 @@ ACTOR Future< pair<KeyRange,Reference<LocationInfo>> > getKeyLocation_internal(
|
|||
}
|
||||
|
||||
template <class F>
|
||||
Future<pair<KeyRange, Reference<LocationInfo>>> getKeyLocation(Database const& cx, Key const& key,
|
||||
F StorageServerInterface::*member,
|
||||
TransactionInfo const& info, bool isBackward = false) {
|
||||
Future<pair<KeyRange, Reference<LocationInfo>>> getKeyLocation( Database const& cx, Key const& key, F StorageServerInterface::*member, TransactionInfo const& info, bool isBackward = false ) {
|
||||
auto ssi = cx->getCachedLocation( key, isBackward );
|
||||
if (!ssi.second) {
|
||||
return getKeyLocation_internal( cx, key, info, isBackward );
|
||||
|
|
|
@ -856,31 +856,31 @@ const KeyRef JSONSchemas::latencyBandConfigurationSchema = LiteralStringRef(R"co
|
|||
|
||||
const KeyRef JSONSchemas::dataDistributionStatsSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"ShardBytes": 1947000
|
||||
"shard_bytes": 1947000
|
||||
}
|
||||
)""");
|
||||
|
||||
const KeyRef JSONSchemas::logHealthSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"tLogQueue": 156
|
||||
"log_queue": 156
|
||||
}
|
||||
)""");
|
||||
|
||||
const KeyRef JSONSchemas::storageHealthSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"cpuUsage": 3.28629447047675,
|
||||
"diskUsage": 0.19997897369207954,
|
||||
"storageDurabilityLag": 5050809,
|
||||
"storageQueue": 2030
|
||||
"cpu_usage": 3.28629447047675,
|
||||
"disk_usage": 0.19997897369207954,
|
||||
"storage_durability_lag": 5050809,
|
||||
"storage_queue": 2030
|
||||
}
|
||||
)""");
|
||||
|
||||
const KeyRef JSONSchemas::aggregateHealthSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"batchLimited": false,
|
||||
"tpsLimit": 457082.8105811302,
|
||||
"worstStorageDurabilityLag": 5050809,
|
||||
"worstStorageQueue": 2030,
|
||||
"worstTLogQueue": 156
|
||||
"batch_limited": false,
|
||||
"tps_limit": 457082.8105811302,
|
||||
"worst_storage_durability_lag": 5050809,
|
||||
"worst_storage_queue": 2030,
|
||||
"worst_log_queue": 156
|
||||
}
|
||||
)""");
|
||||
|
|
|
@ -360,7 +360,7 @@ ACTOR Future<Standalone<RangeResultRef>> ddMetricsGetRangeActor(ReadYourWritesTr
|
|||
KeyRef beginKey = ddMetricsRef.beginKey.withPrefix(ddStatsRange.begin, result.arena());
|
||||
// Use json string encoded in utf-8 to encode the values, easy for adding more fields in the future
|
||||
json_spirit::mObject statsObj;
|
||||
statsObj["ShardBytes"] = ddMetricsRef.shardBytes;
|
||||
statsObj["shard_bytes"] = ddMetricsRef.shardBytes;
|
||||
std::string statsString =
|
||||
json_spirit::write_string(json_spirit::mValue(statsObj), json_spirit::Output_options::raw_utf8);
|
||||
ValueRef bytes(result.arena(), statsString);
|
||||
|
|
Loading…
Reference in New Issue