Add schemas, and check dataDistributionStatsSchema
This commit is contained in:
parent
f8bb76a607
commit
403274bba8
|
@ -853,3 +853,37 @@ const KeyRef JSONSchemas::latencyBandConfigurationSchema = LiteralStringRef(R"co
|
|||
"max_commit_bytes":0
|
||||
}
|
||||
})configSchema");
|
||||
|
||||
const KeyRef JSONSchemas::dataDistributionStatsSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"ShardBytes": 1947000,
|
||||
"Storages": [
|
||||
"697f39751849d70067eabd1b079478a5"
|
||||
]
|
||||
}
|
||||
)""");
|
||||
|
||||
const KeyRef JSONSchemas::logHealthSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"tLogQueue": 156
|
||||
}
|
||||
)""");
|
||||
|
||||
const KeyRef JSONSchemas::storageHealthSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"cpuUsage": 3.28629447047675,
|
||||
"diskUsage": 0.19997897369207954,
|
||||
"storageDurabilityLag": 5050809,
|
||||
"storageQueue": 2030
|
||||
}
|
||||
)""");
|
||||
|
||||
const KeyRef JSONSchemas::aggregateHealthSchema = LiteralStringRef(R"""(
|
||||
{
|
||||
"batchLimited": false,
|
||||
"tpsLimit": 457082.8105811302,
|
||||
"worstStorageDurabilityLag": 5050809,
|
||||
"worstStorageQueue": 2030,
|
||||
"worstTLogQueue": 156
|
||||
}
|
||||
)""");
|
||||
|
|
|
@ -30,6 +30,10 @@ struct JSONSchemas {
|
|||
static const KeyRef statusSchema;
|
||||
static const KeyRef clusterConfigurationSchema;
|
||||
static const KeyRef latencyBandConfigurationSchema;
|
||||
static const KeyRef dataDistributionStatsSchema;
|
||||
static const KeyRef logHealthSchema;
|
||||
static const KeyRef storageHealthSchema;
|
||||
static const KeyRef aggregateHealthSchema;
|
||||
};
|
||||
|
||||
#endif /* FDBCLIENT_SCHEMAS_H */
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
|
||||
#include <boost/lexical_cast.hpp>
|
||||
|
||||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
#include "fdbclient/ReadYourWrites.h"
|
||||
#include "fdbclient/Schemas.h"
|
||||
#include "fdbserver/workloads/workloads.actor.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last include
|
||||
|
||||
|
@ -144,9 +146,16 @@ struct DataDistributionMetricsWorkload : KVWorkload {
|
|||
self->numShards = result.size();
|
||||
if (self->numShards < 1) return false;
|
||||
state int64_t totalBytes = 0;
|
||||
auto schema = readJSONStrictly(JSONSchemas::dataDistributionStatsSchema.toString()).get_obj();
|
||||
for (int i = 0; i < result.size(); ++i) {
|
||||
ASSERT(result[i].key.startsWith(ddStatsRange.begin));
|
||||
totalBytes += readJSONStrictly(result[i].value.toString()).get_obj()["ShardBytes"].get_int64();
|
||||
std::string errorStr;
|
||||
auto valueObj = readJSONStrictly(result[i].value.toString()).get_obj();
|
||||
if (!schemaMatch(schema, valueObj, errorStr, SevError, true))
|
||||
TraceEvent(SevError, "DataDistributionStatsSchemaValidationFailed")
|
||||
.detail("ErrorStr", errorStr.c_str())
|
||||
.detail("JSON", json_spirit::write_string(json_spirit::mValue(result[i].value.toString())));
|
||||
totalBytes += valueObj["ShardBytes"].get_int64();
|
||||
}
|
||||
self->avgBytes = totalBytes / self->numShards;
|
||||
// fetch data-distribution stats for a smaller range
|
||||
|
|
Loading…
Reference in New Issue