2017-05-26 04:48:44 +08:00
|
|
|
/*
|
|
|
|
* tester.actor.cpp
|
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
|
|
|
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2020-07-13 10:53:44 +08:00
|
|
|
#include <boost/algorithm/string/predicate.hpp>
|
2019-05-05 01:52:02 +08:00
|
|
|
#include <cinttypes>
|
2017-05-26 04:48:44 +08:00
|
|
|
#include <fstream>
|
2020-07-06 19:06:45 +08:00
|
|
|
#include <functional>
|
|
|
|
#include <map>
|
|
|
|
#include <toml.hpp>
|
2020-07-13 10:53:44 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
#include "flow/ActorCollection.h"
|
|
|
|
#include "fdbrpc/sim_validation.h"
|
|
|
|
#include "fdbrpc/simulator.h"
|
|
|
|
#include "fdbclient/ClusterInterface.h"
|
2019-02-18 07:41:16 +08:00
|
|
|
#include "fdbclient/NativeAPI.actor.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
#include "fdbclient/SystemData.h"
|
2019-02-18 11:25:16 +08:00
|
|
|
#include "fdbserver/TesterInterface.actor.h"
|
2019-02-18 11:13:26 +08:00
|
|
|
#include "fdbserver/WorkerInterface.actor.h"
|
2019-02-18 11:18:30 +08:00
|
|
|
#include "fdbserver/workloads/workloads.actor.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/Status.h"
|
|
|
|
#include "fdbserver/QuietDatabase.h"
|
2017-05-26 04:48:44 +08:00
|
|
|
#include "fdbclient/MonitorLeader.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/CoordinationInterface.h"
|
2019-02-18 09:38:13 +08:00
|
|
|
#include "fdbclient/ManagementAPI.actor.h"
|
2020-04-07 11:58:43 +08:00
|
|
|
#include "fdbserver/WorkerInterface.actor.h"
|
2021-03-11 02:06:03 +08:00
|
|
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
using namespace std;
|
|
|
|
|
|
|
|
WorkloadContext::WorkloadContext() {}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
WorkloadContext::WorkloadContext(const WorkloadContext& r)
|
|
|
|
: options(r.options), clientId(r.clientId), clientCount(r.clientCount), dbInfo(r.dbInfo),
|
|
|
|
sharedRandomNumber(r.sharedRandomNumber) {}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
WorkloadContext::~WorkloadContext() {}
|
|
|
|
|
|
|
|
const char HEX_CHAR_LOOKUP[16] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void emplaceIndex(uint8_t* data, int offset, int64_t index) {
|
|
|
|
for (int i = 0; i < 16; i++) {
|
|
|
|
data[(15 - i) + offset] = HEX_CHAR_LOOKUP[index & 0xf];
|
|
|
|
index = index >> 4;
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Key doubleToTestKey(double p) {
|
2017-05-26 04:48:44 +08:00
|
|
|
return StringRef(format("%016llx", *(uint64_t*)&p));
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
double testKeyToDouble(const KeyRef& p) {
|
2017-05-26 04:48:44 +08:00
|
|
|
uint64_t x = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
sscanf(p.toString().c_str(), "%" SCNx64, &x);
|
2017-05-26 04:48:44 +08:00
|
|
|
return *(double*)&x;
|
|
|
|
}
|
|
|
|
|
|
|
|
Key doubleToTestKey(double p, const KeyRef& prefix) {
|
|
|
|
return doubleToTestKey(p).withPrefix(prefix);
|
|
|
|
}
|
|
|
|
|
2020-10-05 13:29:07 +08:00
|
|
|
Key KVWorkload::getRandomKey() const {
|
2017-05-26 04:48:44 +08:00
|
|
|
return getRandomKey(absentFrac);
|
|
|
|
}
|
|
|
|
|
2020-10-05 13:29:07 +08:00
|
|
|
Key KVWorkload::getRandomKey(double absentFrac) const {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (absentFrac > 0.0000001) {
|
2019-05-11 05:01:52 +08:00
|
|
|
return getRandomKey(deterministicRandom()->random01() < absentFrac);
|
2017-05-26 04:48:44 +08:00
|
|
|
} else {
|
|
|
|
return getRandomKey(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-05 13:29:07 +08:00
|
|
|
Key KVWorkload::getRandomKey(bool absent) const {
|
2021-03-11 02:06:03 +08:00
|
|
|
return keyForIndex(deterministicRandom()->randomInt(0, nodeCount), absent);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2020-10-05 13:29:07 +08:00
|
|
|
Key KVWorkload::keyForIndex(uint64_t index) const {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (absentFrac > 0.0000001) {
|
2019-05-11 05:01:52 +08:00
|
|
|
return keyForIndex(index, deterministicRandom()->random01() < absentFrac);
|
2017-05-26 04:48:44 +08:00
|
|
|
} else {
|
|
|
|
return keyForIndex(index, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-05 13:29:07 +08:00
|
|
|
Key KVWorkload::keyForIndex(uint64_t index, bool absent) const {
|
2017-05-26 04:48:44 +08:00
|
|
|
int adjustedKeyBytes = (absent) ? (keyBytes + 1) : keyBytes;
|
2021-03-11 02:06:03 +08:00
|
|
|
Key result = makeString(adjustedKeyBytes);
|
|
|
|
uint8_t* data = mutateString(result);
|
2017-05-26 04:48:44 +08:00
|
|
|
memset(data, '.', adjustedKeyBytes);
|
|
|
|
|
|
|
|
int idx = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (nodePrefix > 0) {
|
2019-01-11 03:05:50 +08:00
|
|
|
ASSERT(keyBytes >= 32);
|
2021-03-11 02:06:03 +08:00
|
|
|
emplaceIndex(data, 0, nodePrefix);
|
2017-05-26 04:48:44 +08:00
|
|
|
idx += 16;
|
|
|
|
}
|
2019-01-11 03:05:50 +08:00
|
|
|
ASSERT(keyBytes >= 16);
|
2017-05-26 04:48:44 +08:00
|
|
|
double d = double(index) / nodeCount;
|
2021-03-11 02:06:03 +08:00
|
|
|
emplaceIndex(data, idx, *(int64_t*)&d);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
double testKeyToDouble(const KeyRef& p, const KeyRef& prefix) {
|
|
|
|
return testKeyToDouble(p.removePrefix(prefix));
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> poisson(double* last, double meanInterval) {
|
|
|
|
*last += meanInterval * -log(deterministicRandom()->random01());
|
|
|
|
wait(delayUntil(*last));
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> uniform(double* last, double meanInterval) {
|
2017-05-26 04:48:44 +08:00
|
|
|
*last += meanInterval;
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delayUntil(*last));
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Value getOption(VectorRef<KeyValueRef> options, Key key, Value defaultValue) {
|
|
|
|
for (int i = 0; i < options.size(); i++)
|
|
|
|
if (options[i].key == key) {
|
2017-05-26 04:48:44 +08:00
|
|
|
Value value = options[i].value;
|
|
|
|
options[i].value = LiteralStringRef("");
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
return defaultValue;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
int getOption(VectorRef<KeyValueRef> options, Key key, int defaultValue) {
|
|
|
|
for (int i = 0; i < options.size(); i++)
|
|
|
|
if (options[i].key == key) {
|
2017-05-26 04:48:44 +08:00
|
|
|
int r;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (sscanf(options[i].value.toString().c_str(), "%d", &r)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
options[i].value = LiteralStringRef("");
|
|
|
|
return r;
|
2019-02-28 02:31:56 +08:00
|
|
|
} else {
|
2019-03-19 06:03:43 +08:00
|
|
|
TraceEvent(SevError, "InvalidTestOption").detail("OptionName", key);
|
2017-05-26 04:48:44 +08:00
|
|
|
throw test_specification_invalid();
|
2019-02-28 02:31:56 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return defaultValue;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
uint64_t getOption(VectorRef<KeyValueRef> options, Key key, uint64_t defaultValue) {
|
|
|
|
for (int i = 0; i < options.size(); i++)
|
|
|
|
if (options[i].key == key) {
|
2017-05-26 04:48:44 +08:00
|
|
|
uint64_t r;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (sscanf(options[i].value.toString().c_str(), "%" SCNd64, &r)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
options[i].value = LiteralStringRef("");
|
|
|
|
return r;
|
2019-02-28 02:31:56 +08:00
|
|
|
} else {
|
2019-03-19 06:03:43 +08:00
|
|
|
TraceEvent(SevError, "InvalidTestOption").detail("OptionName", key);
|
2017-05-26 04:48:44 +08:00
|
|
|
throw test_specification_invalid();
|
2019-02-28 02:31:56 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return defaultValue;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
int64_t getOption(VectorRef<KeyValueRef> options, Key key, int64_t defaultValue) {
|
|
|
|
for (int i = 0; i < options.size(); i++)
|
|
|
|
if (options[i].key == key) {
|
2017-05-26 04:48:44 +08:00
|
|
|
int64_t r;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (sscanf(options[i].value.toString().c_str(), "%" SCNd64, &r)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
options[i].value = LiteralStringRef("");
|
|
|
|
return r;
|
2019-02-28 02:31:56 +08:00
|
|
|
} else {
|
2019-03-19 06:03:43 +08:00
|
|
|
TraceEvent(SevError, "InvalidTestOption").detail("OptionName", key);
|
2017-05-26 04:48:44 +08:00
|
|
|
throw test_specification_invalid();
|
2019-02-28 02:31:56 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return defaultValue;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
double getOption(VectorRef<KeyValueRef> options, Key key, double defaultValue) {
|
|
|
|
for (int i = 0; i < options.size(); i++)
|
|
|
|
if (options[i].key == key) {
|
2017-05-26 04:48:44 +08:00
|
|
|
float r;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (sscanf(options[i].value.toString().c_str(), "%f", &r)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
options[i].value = LiteralStringRef("");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return defaultValue;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
bool getOption(VectorRef<KeyValueRef> options, Key key, bool defaultValue) {
|
2017-05-26 04:48:44 +08:00
|
|
|
Value p = getOption(options, key, defaultValue ? LiteralStringRef("true") : LiteralStringRef("false"));
|
|
|
|
if (p == LiteralStringRef("true"))
|
|
|
|
return true;
|
|
|
|
if (p == LiteralStringRef("false"))
|
|
|
|
return false;
|
|
|
|
ASSERT(false);
|
|
|
|
return false; // Assure that compiler is fine with the function
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
vector<std::string> getOption(VectorRef<KeyValueRef> options, Key key, vector<std::string> defaultValue) {
|
|
|
|
for (int i = 0; i < options.size(); i++)
|
|
|
|
if (options[i].key == key) {
|
2017-05-26 04:48:44 +08:00
|
|
|
vector<std::string> v;
|
|
|
|
int begin = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int c = 0; c < options[i].value.size(); c++)
|
2017-05-26 04:48:44 +08:00
|
|
|
if (options[i].value[c] == ',') {
|
2021-03-11 02:06:03 +08:00
|
|
|
v.push_back(options[i].value.substr(begin, c - begin).toString());
|
|
|
|
begin = c + 1;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
v.push_back(options[i].value.substr(begin).toString());
|
|
|
|
options[i].value = LiteralStringRef("");
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
return defaultValue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// returns unconsumed options
|
2021-03-11 02:06:03 +08:00
|
|
|
Standalone<VectorRef<KeyValueRef>> checkAllOptionsConsumed(VectorRef<KeyValueRef> options) {
|
2017-05-26 04:48:44 +08:00
|
|
|
static StringRef nothing = LiteralStringRef("");
|
|
|
|
Standalone<VectorRef<KeyValueRef>> unconsumed;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < options.size(); i++)
|
|
|
|
if (!(options[i].value == nothing)) {
|
|
|
|
TraceEvent(SevError, "OptionNotConsumed")
|
|
|
|
.detail("Key", options[i].key.toString().c_str())
|
|
|
|
.detail("Value", options[i].value.toString().c_str());
|
|
|
|
unconsumed.push_back_deep(unconsumed.arena(), options[i]);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
return unconsumed;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct CompoundWorkload : TestWorkload {
|
|
|
|
vector<TestWorkload*> workloads;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
CompoundWorkload(WorkloadContext& wcx) : TestWorkload(wcx) {}
|
|
|
|
CompoundWorkload* add(TestWorkload* w) {
|
|
|
|
workloads.push_back(w);
|
|
|
|
return this;
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-01-26 09:55:43 +08:00
|
|
|
~CompoundWorkload() override {
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int w = 0; w < workloads.size(); w++)
|
|
|
|
delete workloads[w];
|
2020-10-05 13:29:07 +08:00
|
|
|
}
|
|
|
|
std::string description() const override {
|
2017-05-26 04:48:44 +08:00
|
|
|
std::string d;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int w = 0; w < workloads.size(); w++)
|
|
|
|
d += workloads[w]->description() + (w == workloads.size() - 1 ? "" : ";");
|
2017-05-26 04:48:44 +08:00
|
|
|
return d;
|
|
|
|
}
|
2020-10-05 13:29:07 +08:00
|
|
|
Future<Void> setup(Database const& cx) override {
|
2017-05-26 04:48:44 +08:00
|
|
|
vector<Future<Void>> all;
|
2021-03-04 11:36:21 +08:00
|
|
|
all.reserve(workloads.size());
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int w = 0; w < workloads.size(); w++)
|
|
|
|
all.push_back(workloads[w]->setup(cx));
|
2017-05-26 04:48:44 +08:00
|
|
|
return waitForAll(all);
|
|
|
|
}
|
2020-10-05 13:29:07 +08:00
|
|
|
Future<Void> start(Database const& cx) override {
|
2017-05-26 04:48:44 +08:00
|
|
|
vector<Future<Void>> all;
|
2021-03-04 11:36:21 +08:00
|
|
|
all.reserve(workloads.size());
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int w = 0; w < workloads.size(); w++)
|
|
|
|
all.push_back(workloads[w]->start(cx));
|
2017-05-26 04:48:44 +08:00
|
|
|
return waitForAll(all);
|
|
|
|
}
|
2020-10-05 13:29:07 +08:00
|
|
|
Future<bool> check(Database const& cx) override {
|
2017-05-26 04:48:44 +08:00
|
|
|
vector<Future<bool>> all;
|
2021-03-04 11:36:21 +08:00
|
|
|
all.reserve(workloads.size());
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int w = 0; w < workloads.size(); w++)
|
|
|
|
all.push_back(workloads[w]->check(cx));
|
2017-05-26 04:48:44 +08:00
|
|
|
return allTrue(all);
|
|
|
|
}
|
2020-10-05 13:29:07 +08:00
|
|
|
void getMetrics(vector<PerfMetric>& m) override {
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int w = 0; w < workloads.size(); w++) {
|
2017-05-26 04:48:44 +08:00
|
|
|
vector<PerfMetric> p;
|
|
|
|
workloads[w]->getMetrics(p);
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < p.size(); i++)
|
|
|
|
m.push_back(p[i].withPrefix(workloads[w]->description() + "."));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
2020-10-08 02:46:23 +08:00
|
|
|
double getCheckTimeout() const override {
|
2017-05-26 04:48:44 +08:00
|
|
|
double m = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int w = 0; w < workloads.size(); w++)
|
|
|
|
m = std::max(workloads[w]->getCheckTimeout(), m);
|
2017-05-26 04:48:44 +08:00
|
|
|
return m;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
TestWorkload* getWorkloadIface(WorkloadRequest work,
|
|
|
|
VectorRef<KeyValueRef> options,
|
|
|
|
Reference<AsyncVar<ServerDBInfo>> dbInfo) {
|
|
|
|
Value testName = getOption(options, LiteralStringRef("testName"), LiteralStringRef("no-test-specified"));
|
2017-05-26 04:48:44 +08:00
|
|
|
WorkloadContext wcx;
|
|
|
|
wcx.clientId = work.clientId;
|
|
|
|
wcx.clientCount = work.clientCount;
|
|
|
|
wcx.dbInfo = dbInfo;
|
|
|
|
wcx.options = options;
|
|
|
|
wcx.sharedRandomNumber = work.sharedRandomNumber;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
TestWorkload* workload = IWorkloadFactory::create(testName.toString(), wcx);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
auto unconsumedOptions = checkAllOptionsConsumed(workload ? workload->options : VectorRef<KeyValueRef>());
|
|
|
|
if (!workload || unconsumedOptions.size()) {
|
|
|
|
TraceEvent evt(SevError, "TestCreationError");
|
2019-03-19 06:03:43 +08:00
|
|
|
evt.detail("TestName", testName);
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!workload) {
|
2017-05-26 04:48:44 +08:00
|
|
|
evt.detail("Reason", "Null workload");
|
2021-03-11 02:06:03 +08:00
|
|
|
fprintf(stderr,
|
|
|
|
"ERROR: Workload could not be created, perhaps testName (%s) is not a valid workload\n",
|
|
|
|
printable(testName).c_str());
|
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
evt.detail("Reason", "Not all options consumed");
|
|
|
|
fprintf(stderr, "ERROR: Workload had invalid options. The following were unrecognized:\n");
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < unconsumedOptions.size(); i++)
|
|
|
|
fprintf(stderr,
|
|
|
|
" '%s' = '%s'\n",
|
|
|
|
unconsumedOptions[i].key.toString().c_str(),
|
|
|
|
unconsumedOptions[i].value.toString().c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
delete workload;
|
|
|
|
}
|
|
|
|
throw test_specification_invalid();
|
|
|
|
}
|
|
|
|
return workload;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
TestWorkload* getWorkloadIface(WorkloadRequest work, Reference<AsyncVar<ServerDBInfo>> dbInfo) {
|
|
|
|
if (work.options.size() < 1) {
|
|
|
|
TraceEvent(SevError, "TestCreationError").detail("Reason", "No options provided");
|
2017-05-26 04:48:44 +08:00
|
|
|
fprintf(stderr, "ERROR: No options were provided for workload.\n");
|
|
|
|
throw test_specification_invalid();
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (work.options.size() == 1)
|
|
|
|
return getWorkloadIface(work, work.options[0], dbInfo);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
WorkloadContext wcx;
|
|
|
|
wcx.clientId = work.clientId;
|
|
|
|
wcx.clientCount = work.clientCount;
|
|
|
|
wcx.sharedRandomNumber = work.sharedRandomNumber;
|
2021-03-11 02:06:03 +08:00
|
|
|
// FIXME: Other stuff not filled in; why isn't this constructed here and passed down to the other
|
|
|
|
// getWorkloadIface()?
|
|
|
|
CompoundWorkload* compound = new CompoundWorkload(wcx);
|
|
|
|
for (int i = 0; i < work.options.size(); i++) {
|
|
|
|
TestWorkload* workload = getWorkloadIface(work, work.options[i], dbInfo);
|
|
|
|
compound->add(workload);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
return compound;
|
|
|
|
}
|
|
|
|
|
2021-03-24 00:51:16 +08:00
|
|
|
/**
|
|
|
|
* Only works in simulation. This method prints all simulated processes in a human readable form to stdout. It groups
|
|
|
|
* processes by data center, data hall, zone, and machine (in this order).
|
|
|
|
*/
|
2021-03-23 23:38:07 +08:00
|
|
|
void printSimulatedTopology() {
|
|
|
|
if (!g_network->isSimulated()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
auto processes = g_simulator.getAllProcesses();
|
|
|
|
std::sort(processes.begin(), processes.end(), [](ISimulator::ProcessInfo* lhs, ISimulator::ProcessInfo* rhs) {
|
|
|
|
auto l = lhs->locality;
|
|
|
|
auto r = rhs->locality;
|
|
|
|
if (l.dcId() != r.dcId()) {
|
|
|
|
return l.dcId() < r.dcId();
|
|
|
|
}
|
|
|
|
if (l.dataHallId() != r.dataHallId()) {
|
|
|
|
return l.dataHallId() < r.dataHallId();
|
|
|
|
}
|
|
|
|
if (l.zoneId() != r.zoneId()) {
|
|
|
|
return l.zoneId() < r.zoneId();
|
|
|
|
}
|
|
|
|
if (l.machineId() != r.zoneId()) {
|
|
|
|
return l.machineId() < r.machineId();
|
|
|
|
}
|
|
|
|
return lhs->address < rhs->address;
|
|
|
|
});
|
|
|
|
printf("Simulated Cluster Topology:\n");
|
|
|
|
printf("===========================\n");
|
|
|
|
Optional<Standalone<StringRef>> dcId, dataHallId, zoneId, machineId;
|
|
|
|
for (auto p : processes) {
|
|
|
|
std::string indent = "";
|
|
|
|
if (dcId != p->locality.dcId()) {
|
|
|
|
dcId = p->locality.dcId();
|
|
|
|
printf("%sdcId: %s\n", indent.c_str(), p->locality.describeDcId().c_str());
|
|
|
|
}
|
|
|
|
indent += " ";
|
|
|
|
if (dataHallId != p->locality.dataHallId()) {
|
|
|
|
dataHallId = p->locality.dataHallId();
|
|
|
|
printf("%sdataHallId: %s\n", indent.c_str(), p->locality.describeDataHall().c_str());
|
|
|
|
}
|
|
|
|
indent += " ";
|
|
|
|
if (zoneId != p->locality.zoneId()) {
|
|
|
|
zoneId = p->locality.zoneId();
|
|
|
|
printf("%szoneId: %s\n", indent.c_str(), p->locality.describeZone().c_str());
|
|
|
|
}
|
|
|
|
indent += " ";
|
|
|
|
if (machineId != p->locality.machineId()) {
|
|
|
|
machineId = p->locality.machineId();
|
|
|
|
printf("%smachineId: %s\n", indent.c_str(), p->locality.describeMachineId().c_str());
|
|
|
|
}
|
|
|
|
indent += " ";
|
|
|
|
printf("%sAddress: %s\n", indent.c_str(), p->address.toString().c_str(), p->name);
|
2021-03-24 00:59:01 +08:00
|
|
|
indent += " ";
|
2021-03-23 23:38:07 +08:00
|
|
|
printf("%sClass: %s\n", indent.c_str(), p->startingClass.toString().c_str());
|
|
|
|
printf("%sName: %s\n", indent.c_str(), p->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> databaseWarmer(Database cx) {
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
2021-03-11 02:06:03 +08:00
|
|
|
state Transaction tr(cx);
|
2019-04-18 07:04:10 +08:00
|
|
|
wait(success(tr.getReadVersion()));
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(0.25));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tries indefinitly to commit a simple, self conflicting transaction
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> pingDatabase(Database cx) {
|
|
|
|
state Transaction tr(cx);
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
tr.setOption(FDBTransactionOptions::PRIORITY_SYSTEM_IMMEDIATE);
|
|
|
|
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
|
|
|
Optional<Value> v =
|
|
|
|
wait(tr.get(StringRef("/Liveness/" + deterministicRandom()->randomUniqueID().toString())));
|
2017-05-26 04:48:44 +08:00
|
|
|
tr.makeSelfConflicting();
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(tr.commit());
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("PingingDatabaseTransactionError").error(e);
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(tr.onError(e));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> testDatabaseLiveness(Database cx,
|
|
|
|
double databasePingDelay,
|
|
|
|
string context,
|
|
|
|
double startDelay = 0.0) {
|
|
|
|
wait(delay(startDelay));
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
state double start = now();
|
2020-04-23 05:14:56 +08:00
|
|
|
auto traceMsg = "PingingDatabaseLiveness_" + context;
|
|
|
|
TraceEvent(traceMsg.c_str());
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(timeoutError(pingDatabase(cx), databasePingDelay));
|
2017-05-26 04:48:44 +08:00
|
|
|
double pingTime = now() - start;
|
2021-03-11 02:06:03 +08:00
|
|
|
ASSERT(pingTime > 0);
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent(("PingingDatabaseLivenessDone_" + context).c_str()).detail("TimeTaken", pingTime);
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(databasePingDelay - pingTime));
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() != error_code_actor_cancelled)
|
|
|
|
TraceEvent(SevError, ("PingingDatabaseLivenessError_" + context).c_str())
|
|
|
|
.error(e)
|
|
|
|
.detail("PingDelay", databasePingDelay);
|
2017-05-26 04:48:44 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class T>
|
2021-03-11 02:06:03 +08:00
|
|
|
void sendResult(ReplyPromise<T>& reply, Optional<ErrorOr<T>> const& result) {
|
2017-05-26 04:48:44 +08:00
|
|
|
auto& res = result.get();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (res.isError())
|
2017-05-26 04:48:44 +08:00
|
|
|
reply.sendError(res.getError());
|
2021-03-11 02:06:03 +08:00
|
|
|
else
|
2017-05-26 04:48:44 +08:00
|
|
|
reply.send(res.get());
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> runWorkloadAsync(Database cx,
|
|
|
|
WorkloadInterface workIface,
|
|
|
|
TestWorkload* workload,
|
|
|
|
double databasePingDelay) {
|
2019-01-25 06:43:12 +08:00
|
|
|
state unique_ptr<TestWorkload> delw(workload);
|
2017-05-26 04:48:44 +08:00
|
|
|
state Optional<ErrorOr<Void>> setupResult;
|
|
|
|
state Optional<ErrorOr<Void>> startResult;
|
2019-08-29 05:40:50 +08:00
|
|
|
state Optional<ErrorOr<CheckReply>> checkResult;
|
2017-05-26 04:48:44 +08:00
|
|
|
state ReplyPromise<Void> setupReq;
|
|
|
|
state ReplyPromise<Void> startReq;
|
2019-08-29 05:40:50 +08:00
|
|
|
state ReplyPromise<CheckReply> checkReq;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("TestBeginAsync", workIface.id())
|
|
|
|
.detail("Workload", workload->description())
|
|
|
|
.detail("DatabasePingDelay", databasePingDelay);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
state Future<Void> databaseError =
|
|
|
|
databasePingDelay == 0.0 ? Never() : testDatabaseLiveness(cx, databasePingDelay, "RunWorkloadAsync");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
loop choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(ReplyPromise<Void> req = waitNext(workIface.setup.getFuture())) {
|
2017-05-26 04:48:44 +08:00
|
|
|
printf("Test received trigger for setup...\n");
|
|
|
|
TraceEvent("TestSetupBeginning", workIface.id()).detail("Workload", workload->description());
|
|
|
|
setupReq = req;
|
|
|
|
if (!setupResult.present()) {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(workload->setup(cx) || databaseError);
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("TestSetupComplete", workIface.id()).detail("Workload", workload->description());
|
|
|
|
setupResult = Void();
|
|
|
|
} catch (Error& e) {
|
|
|
|
setupResult = operation_failed();
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(SevError, "TestSetupError", workIface.id())
|
|
|
|
.error(e)
|
|
|
|
.detail("Workload", workload->description());
|
|
|
|
if (e.code() == error_code_please_reboot || e.code() == error_code_please_reboot_delete)
|
|
|
|
throw;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
sendResult(setupReq, setupResult);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(ReplyPromise<Void> req = waitNext(workIface.start.getFuture())) {
|
2017-05-26 04:48:44 +08:00
|
|
|
startReq = req;
|
|
|
|
if (!startResult.present()) {
|
|
|
|
try {
|
|
|
|
TraceEvent("TestStarting", workIface.id()).detail("Workload", workload->description());
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(workload->start(cx) || databaseError);
|
2017-05-26 04:48:44 +08:00
|
|
|
startResult = Void();
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2017-05-26 04:48:44 +08:00
|
|
|
startResult = operation_failed();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (e.code() == error_code_please_reboot || e.code() == error_code_please_reboot_delete)
|
|
|
|
throw;
|
|
|
|
TraceEvent(SevError, "TestFailure", workIface.id())
|
|
|
|
.error(e, true)
|
|
|
|
.detail("Reason", "Error starting workload")
|
|
|
|
.detail("Workload", workload->description());
|
|
|
|
// ok = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("TestComplete", workIface.id())
|
|
|
|
.detail("Workload", workload->description())
|
|
|
|
.detail("OK", !startResult.get().isError());
|
2017-05-26 04:48:44 +08:00
|
|
|
printf("%s complete\n", workload->description().c_str());
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
sendResult(startReq, startResult);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2019-08-29 05:40:50 +08:00
|
|
|
when(ReplyPromise<CheckReply> req = waitNext(workIface.check.getFuture())) {
|
2017-05-26 04:48:44 +08:00
|
|
|
checkReq = req;
|
|
|
|
if (!checkResult.present()) {
|
|
|
|
try {
|
2020-10-22 11:22:33 +08:00
|
|
|
TraceEvent("TestChecking", workIface.id()).detail("Workload", workload->description());
|
2021-03-11 02:06:03 +08:00
|
|
|
bool check = wait(timeoutError(workload->check(cx), workload->getCheckTimeout()));
|
2019-08-29 05:40:50 +08:00
|
|
|
checkResult = CheckReply{ (!startResult.present() || !startResult.get().isError()) && check };
|
2017-05-26 04:48:44 +08:00
|
|
|
} catch (Error& e) {
|
2021-03-11 02:06:03 +08:00
|
|
|
checkResult = operation_failed(); // was: checkResult = false;
|
|
|
|
if (e.code() == error_code_please_reboot || e.code() == error_code_please_reboot_delete)
|
|
|
|
throw;
|
|
|
|
TraceEvent(SevError, "TestFailure", workIface.id())
|
|
|
|
.error(e)
|
|
|
|
.detail("Reason", "Error checking workload")
|
|
|
|
.detail("Workload", workload->description());
|
|
|
|
// ok = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2020-10-22 11:22:33 +08:00
|
|
|
TraceEvent("TestCheckComplete", workIface.id()).detail("Workload", workload->description());
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
sendResult(checkReq, checkResult);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(ReplyPromise<vector<PerfMetric>> req = waitNext(workIface.metrics.getFuture())) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state ReplyPromise<vector<PerfMetric>> s_req = req;
|
|
|
|
try {
|
|
|
|
vector<PerfMetric> m;
|
2021-03-11 02:06:03 +08:00
|
|
|
workload->getMetrics(m);
|
|
|
|
TraceEvent("WorkloadSendMetrics", workIface.id()).detail("Count", m.size());
|
|
|
|
req.send(m);
|
2017-05-26 04:48:44 +08:00
|
|
|
} catch (Error& e) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (e.code() == error_code_please_reboot || e.code() == error_code_please_reboot_delete)
|
|
|
|
throw;
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent(SevError, "WorkloadSendMetrics", workIface.id()).error(e);
|
2021-03-11 02:06:03 +08:00
|
|
|
s_req.sendError(operation_failed());
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(ReplyPromise<Void> r = waitNext(workIface.stop.getFuture())) {
|
2017-05-26 04:48:44 +08:00
|
|
|
r.send(Void());
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> testerServerWorkload(WorkloadRequest work,
|
|
|
|
Reference<ClusterConnectionFile> ccf,
|
|
|
|
Reference<AsyncVar<struct ServerDBInfo>> dbInfo,
|
|
|
|
LocalityData locality) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state WorkloadInterface workIface;
|
|
|
|
state bool replied = false;
|
|
|
|
state Database cx;
|
|
|
|
try {
|
|
|
|
std::map<std::string, std::string> details;
|
|
|
|
details["WorkloadTitle"] = printable(work.title);
|
|
|
|
details["ClientId"] = format("%d", work.clientId);
|
|
|
|
details["ClientCount"] = format("%d", work.clientCount);
|
|
|
|
details["WorkloadTimeout"] = format("%d", work.timeout);
|
2018-09-06 06:06:14 +08:00
|
|
|
startRole(Role::TESTER, workIface.id(), UID(), details);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (work.useDatabase) {
|
2019-07-09 05:01:04 +08:00
|
|
|
cx = Database::createDatabase(ccf, -1, true, locality);
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(1.0));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// add test for "done" ?
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("WorkloadReceived", workIface.id()).detail("Title", work.title);
|
|
|
|
TestWorkload* workload = getWorkloadIface(work, dbInfo);
|
|
|
|
if (!workload) {
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("TestCreationError").detail("Reason", "Workload could not be created");
|
|
|
|
fprintf(stderr, "ERROR: The workload could not be created.\n");
|
|
|
|
throw test_specification_invalid();
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
Future<Void> test = runWorkloadAsync(cx, workIface, workload, work.databasePingDelay) ||
|
|
|
|
traceRole(Role::TESTER, workIface.id());
|
2017-05-26 04:48:44 +08:00
|
|
|
work.reply.send(workIface);
|
|
|
|
replied = true;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (work.timeout > 0) {
|
|
|
|
test = timeoutError(test, work.timeout);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(test);
|
2020-02-01 12:23:35 +08:00
|
|
|
|
2018-09-06 06:06:14 +08:00
|
|
|
endRole(Role::TESTER, workIface.id(), "Complete");
|
2017-05-26 04:48:44 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
if (!replied) {
|
|
|
|
if (e.code() == error_code_test_specification_invalid)
|
|
|
|
work.reply.sendError(e);
|
|
|
|
else
|
2021-03-11 02:06:03 +08:00
|
|
|
work.reply.sendError(operation_failed());
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
bool ok = e.code() == error_code_please_reboot || e.code() == error_code_please_reboot_delete ||
|
|
|
|
e.code() == error_code_actor_cancelled;
|
2018-09-06 06:06:14 +08:00
|
|
|
endRole(Role::TESTER, workIface.id(), "Error", ok, e);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
if (e.code() != error_code_test_specification_invalid && e.code() != error_code_timed_out) {
|
2021-03-11 02:06:03 +08:00
|
|
|
throw; // fatal errors will kill the testerServer as well
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> testerServerCore(TesterInterface interf,
|
|
|
|
Reference<ClusterConnectionFile> ccf,
|
|
|
|
Reference<AsyncVar<struct ServerDBInfo>> dbInfo,
|
|
|
|
LocalityData locality) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state PromiseStream<Future<Void>> addWorkload;
|
|
|
|
state Future<Void> workerFatalError = actorCollection(addWorkload.getFuture());
|
|
|
|
|
|
|
|
TraceEvent("StartingTesterServerCore", interf.id());
|
|
|
|
loop choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(workerFatalError)) {}
|
|
|
|
when(WorkloadRequest work = waitNext(interf.recruitments.getFuture())) {
|
2017-09-02 03:53:01 +08:00
|
|
|
addWorkload.send(testerServerWorkload(work, ccf, dbInfo, locality));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> clearData(Database cx) {
|
|
|
|
state Transaction tr(cx);
|
2017-05-26 04:48:44 +08:00
|
|
|
loop {
|
|
|
|
try {
|
|
|
|
// This transaction needs to be self-conflicting, but not conflict consistently with
|
|
|
|
// any other transactions
|
2021-03-11 02:06:03 +08:00
|
|
|
tr.clear(normalKeys);
|
2017-05-26 04:48:44 +08:00
|
|
|
tr.makeSelfConflicting();
|
2019-04-18 07:04:10 +08:00
|
|
|
wait(success(tr.getReadVersion())); // required since we use addReadConflictRange but not get
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(tr.commit());
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("TesterClearingDatabase").detail("AtVersion", tr.getCommittedVersion());
|
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
TraceEvent(SevWarn, "TesterClearingDatabaseError").error(e);
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(tr.onError(e));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
Future<Void> dumpDatabase(Database const& cx, std::string const& outputFilename, KeyRange const& range);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
int passCount = 0;
|
|
|
|
int failCount = 0;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
vector<PerfMetric> aggregateMetrics(vector<vector<PerfMetric>> metrics) {
|
2017-05-26 04:48:44 +08:00
|
|
|
std::map<std::string, vector<PerfMetric>> metricMap;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < metrics.size(); i++) {
|
2017-05-26 04:48:44 +08:00
|
|
|
vector<PerfMetric> workloadMetrics = metrics[i];
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("MetricsReturned").detail("Count", workloadMetrics.size());
|
|
|
|
for (int m = 0; m < workloadMetrics.size(); m++) {
|
|
|
|
printf("Metric (%d, %d): %s, %f, %s\n",
|
|
|
|
i,
|
|
|
|
m,
|
|
|
|
workloadMetrics[m].name().c_str(),
|
|
|
|
workloadMetrics[m].value(),
|
|
|
|
workloadMetrics[m].formatted().c_str());
|
|
|
|
metricMap[workloadMetrics[m].name()].push_back(workloadMetrics[m]);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
TraceEvent("Metric")
|
2021-03-11 02:06:03 +08:00
|
|
|
.detail("Name", "Reporting Clients")
|
|
|
|
.detail("Value", (double)metrics.size())
|
|
|
|
.detail("Formatted", format("%d", metrics.size()).c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
vector<PerfMetric> result;
|
|
|
|
std::map<std::string, vector<PerfMetric>>::iterator it;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (it = metricMap.begin(); it != metricMap.end(); it++) {
|
2017-05-26 04:48:44 +08:00
|
|
|
auto& vec = it->second;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!vec.size())
|
2017-05-26 04:48:44 +08:00
|
|
|
continue;
|
|
|
|
double sum = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < vec.size(); i++)
|
2017-05-26 04:48:44 +08:00
|
|
|
sum += vec[i].value();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (vec[0].averaged() && vec.size())
|
2017-05-26 04:48:44 +08:00
|
|
|
sum /= vec.size();
|
2021-03-11 02:06:03 +08:00
|
|
|
result.push_back(PerfMetric(vec[0].name(), sum, false, vec[0].format_code()));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
void logMetrics(vector<PerfMetric> metrics) {
|
|
|
|
for (int idx = 0; idx < metrics.size(); idx++)
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("Metric")
|
2021-03-11 02:06:03 +08:00
|
|
|
.detail("Name", metrics[idx].name())
|
|
|
|
.detail("Value", metrics[idx].value())
|
|
|
|
.detail("Formatted", format(metrics[idx].format_code().c_str(), metrics[idx].value()));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2019-03-29 08:05:30 +08:00
|
|
|
template <class T>
|
2021-03-11 02:06:03 +08:00
|
|
|
void throwIfError(const std::vector<Future<ErrorOr<T>>>& futures, std::string errorMsg) {
|
|
|
|
for (auto& future : futures) {
|
|
|
|
if (future.get().isError()) {
|
2019-03-29 08:05:30 +08:00
|
|
|
TraceEvent(SevError, errorMsg.c_str()).error(future.get().getError());
|
|
|
|
throw future.get().getError();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<DistributedTestResults> runWorkload(Database cx, std::vector<TesterInterface> testers, TestSpec spec) {
|
|
|
|
TraceEvent("TestRunning")
|
|
|
|
.detail("WorkloadTitle", spec.title)
|
|
|
|
.detail("TesterCount", testers.size())
|
|
|
|
.detail("Phases", spec.phases)
|
|
|
|
.detail("TestTimeout", spec.timeout);
|
2018-08-30 05:40:39 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
state vector<Future<WorkloadInterface>> workRequests;
|
2017-05-26 04:48:44 +08:00
|
|
|
state vector<vector<PerfMetric>> metricsResults;
|
|
|
|
|
|
|
|
state int i = 0;
|
|
|
|
state int success = 0;
|
|
|
|
state int failure = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
int64_t sharedRandom = deterministicRandom()->randomInt64(0, 10000000);
|
|
|
|
for (; i < testers.size(); i++) {
|
2017-05-26 04:48:44 +08:00
|
|
|
WorkloadRequest req;
|
|
|
|
req.title = spec.title;
|
2018-08-17 01:24:12 +08:00
|
|
|
req.useDatabase = spec.useDB;
|
2017-05-26 04:48:44 +08:00
|
|
|
req.timeout = spec.timeout;
|
2021-04-05 12:36:05 +08:00
|
|
|
req.databasePingDelay = spec.useDB ? spec.databasePingDelay : 0.0;
|
2017-05-26 04:48:44 +08:00
|
|
|
req.options = spec.options;
|
|
|
|
req.clientId = i;
|
|
|
|
req.clientCount = testers.size();
|
|
|
|
req.sharedRandomNumber = sharedRandom;
|
2021-03-11 02:06:03 +08:00
|
|
|
workRequests.push_back(testers[i].recruitments.getReply(req));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
state vector<WorkloadInterface> workloads = wait(getAll(workRequests));
|
|
|
|
state double waitForFailureTime = g_network->isSimulated() ? 24 * 60 * 60 : 60;
|
|
|
|
if (g_network->isSimulated() && spec.simCheckRelocationDuration)
|
|
|
|
debug_setCheckRelocationDuration(true);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (spec.phases & TestWorkload::SETUP) {
|
|
|
|
state std::vector<Future<ErrorOr<Void>>> setups;
|
2017-05-26 04:48:44 +08:00
|
|
|
printf("setting up test (%s)...\n", printable(spec.title).c_str());
|
2019-03-19 06:03:43 +08:00
|
|
|
TraceEvent("TestSetupStart").detail("WorkloadTitle", spec.title);
|
2021-03-04 11:36:21 +08:00
|
|
|
setups.reserve(workloads.size());
|
|
|
|
for (int i = 0; i < workloads.size(); i++)
|
2021-03-11 02:06:03 +08:00
|
|
|
setups.push_back(workloads[i].setup.template getReplyUnlessFailedFor<Void>(waitForFailureTime, 0));
|
|
|
|
wait(waitForAll(setups));
|
2019-03-29 08:05:30 +08:00
|
|
|
throwIfError(setups, "SetupFailedForWorkload" + printable(spec.title));
|
2019-03-19 06:03:43 +08:00
|
|
|
TraceEvent("TestSetupComplete").detail("WorkloadTitle", spec.title);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (spec.phases & TestWorkload::EXECUTION) {
|
2019-03-19 06:03:43 +08:00
|
|
|
TraceEvent("TestStarting").detail("WorkloadTitle", spec.title);
|
2018-08-30 05:40:39 +08:00
|
|
|
printf("running test (%s)...\n", printable(spec.title).c_str());
|
2021-03-11 02:06:03 +08:00
|
|
|
state std::vector<Future<ErrorOr<Void>>> starts;
|
2021-03-04 11:36:21 +08:00
|
|
|
starts.reserve(workloads.size());
|
|
|
|
for (int i = 0; i < workloads.size(); i++)
|
2021-03-11 02:06:03 +08:00
|
|
|
starts.push_back(workloads[i].start.template getReplyUnlessFailedFor<Void>(waitForFailureTime, 0));
|
|
|
|
wait(waitForAll(starts));
|
2019-03-29 08:05:30 +08:00
|
|
|
throwIfError(starts, "StartFailedForWorkload" + printable(spec.title));
|
2017-05-26 04:48:44 +08:00
|
|
|
printf("%s complete\n", printable(spec.title).c_str());
|
2019-03-19 06:03:43 +08:00
|
|
|
TraceEvent("TestComplete").detail("WorkloadTitle", spec.title);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (spec.phases & TestWorkload::CHECK) {
|
|
|
|
if (spec.useDB && (spec.phases & TestWorkload::EXECUTION)) {
|
|
|
|
wait(delay(3.0));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2019-08-29 05:40:50 +08:00
|
|
|
state std::vector<Future<ErrorOr<CheckReply>>> checks;
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("CheckingResults");
|
2019-01-31 04:02:25 +08:00
|
|
|
|
2018-08-30 05:40:39 +08:00
|
|
|
printf("checking test (%s)...\n", printable(spec.title).c_str());
|
2019-01-31 04:02:25 +08:00
|
|
|
|
2021-03-04 11:36:21 +08:00
|
|
|
checks.reserve(workloads.size());
|
|
|
|
for (int i = 0; i < workloads.size(); i++)
|
2019-08-29 05:40:50 +08:00
|
|
|
checks.push_back(workloads[i].check.template getReplyUnlessFailedFor<CheckReply>(waitForFailureTime, 0));
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(waitForAll(checks));
|
2018-12-04 07:29:08 +08:00
|
|
|
|
2019-03-29 08:05:30 +08:00
|
|
|
throwIfError(checks, "CheckFailedForWorkload" + printable(spec.title));
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < checks.size(); i++) {
|
2019-08-29 05:40:50 +08:00
|
|
|
if (checks[i].get().get().value)
|
2017-05-26 04:48:44 +08:00
|
|
|
success++;
|
|
|
|
else
|
|
|
|
failure++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (spec.phases & TestWorkload::METRICS) {
|
|
|
|
state std::vector<Future<ErrorOr<vector<PerfMetric>>>> metricTasks;
|
2018-08-30 05:40:39 +08:00
|
|
|
printf("fetching metrics (%s)...\n", printable(spec.title).c_str());
|
2019-03-19 06:03:43 +08:00
|
|
|
TraceEvent("TestFetchingMetrics").detail("WorkloadTitle", spec.title);
|
2021-03-04 11:36:21 +08:00
|
|
|
metricTasks.reserve(workloads.size());
|
|
|
|
for (int i = 0; i < workloads.size(); i++)
|
2021-03-11 02:06:03 +08:00
|
|
|
metricTasks.push_back(
|
|
|
|
workloads[i].metrics.template getReplyUnlessFailedFor<vector<PerfMetric>>(waitForFailureTime, 0));
|
|
|
|
wait(waitForAll(metricTasks));
|
2019-03-29 08:05:30 +08:00
|
|
|
throwIfError(metricTasks, "MetricFailedForWorkload" + printable(spec.title));
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < metricTasks.size(); i++) {
|
|
|
|
metricsResults.push_back(metricTasks[i].get().get());
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stopping the workloads is unreliable, but they have a timeout
|
|
|
|
// FIXME: stop if one of the above phases throws an exception
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < workloads.size(); i++)
|
2017-05-26 04:48:44 +08:00
|
|
|
workloads[i].stop.send(ReplyPromise<Void>());
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
return DistributedTestResults(aggregateMetrics(metricsResults), success, failure);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// Sets the database configuration by running the ChangeConfig workload
|
|
|
|
ACTOR Future<Void> changeConfiguration(Database cx, std::vector<TesterInterface> testers, StringRef configMode) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state TestSpec spec;
|
|
|
|
Standalone<VectorRef<KeyValueRef>> options;
|
|
|
|
spec.title = LiteralStringRef("ChangeConfig");
|
2021-03-11 02:06:03 +08:00
|
|
|
options.push_back_deep(options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("testName"), LiteralStringRef("ChangeConfig")));
|
2017-05-26 04:48:44 +08:00
|
|
|
options.push_back_deep(options.arena(), KeyValueRef(LiteralStringRef("configMode"), configMode));
|
|
|
|
spec.options.push_back_deep(spec.options.arena(), options);
|
|
|
|
|
2018-08-17 01:24:12 +08:00
|
|
|
DistributedTestResults testResults = wait(runWorkload(cx, testers, spec));
|
2018-08-30 05:40:39 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// Runs the consistency check workload, which verifies that the database is in a consistent state
|
|
|
|
ACTOR Future<Void> checkConsistency(Database cx,
|
|
|
|
std::vector<TesterInterface> testers,
|
|
|
|
bool doQuiescentCheck,
|
2021-03-12 04:53:46 +08:00
|
|
|
bool doCacheCheck,
|
2021-03-06 03:28:15 +08:00
|
|
|
bool doTSSCheck,
|
2021-03-11 02:06:03 +08:00
|
|
|
double quiescentWaitTimeout,
|
|
|
|
double softTimeLimit,
|
|
|
|
double databasePingDelay,
|
|
|
|
Reference<AsyncVar<ServerDBInfo>> dbInfo) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state TestSpec spec;
|
|
|
|
|
2017-09-19 03:46:29 +08:00
|
|
|
state double connectionFailures;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (g_network->isSimulated()) {
|
2017-09-19 03:46:29 +08:00
|
|
|
connectionFailures = g_simulator.connectionFailuresDisableDuration;
|
|
|
|
g_simulator.connectionFailuresDisableDuration = 1e6;
|
2017-05-26 04:48:44 +08:00
|
|
|
g_simulator.speedUpSimulation = true;
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
Standalone<VectorRef<KeyValueRef>> options;
|
2019-04-14 00:51:01 +08:00
|
|
|
StringRef performQuiescent = LiteralStringRef("false");
|
2021-03-05 06:30:35 +08:00
|
|
|
StringRef performCacheCheck = LiteralStringRef("false");
|
2021-03-06 03:28:15 +08:00
|
|
|
StringRef performTSSCheck = LiteralStringRef("false");
|
2019-04-14 00:51:01 +08:00
|
|
|
if (doQuiescentCheck) {
|
|
|
|
performQuiescent = LiteralStringRef("true");
|
|
|
|
}
|
2021-03-05 06:30:35 +08:00
|
|
|
if (doCacheCheck) {
|
|
|
|
performCacheCheck = LiteralStringRef("true");
|
|
|
|
}
|
2021-03-06 03:28:15 +08:00
|
|
|
if (doTSSCheck) {
|
|
|
|
performTSSCheck = LiteralStringRef("true");
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
spec.title = LiteralStringRef("ConsistencyCheck");
|
|
|
|
spec.databasePingDelay = databasePingDelay;
|
|
|
|
spec.timeout = 32000;
|
2021-03-11 02:06:03 +08:00
|
|
|
options.push_back_deep(options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("testName"), LiteralStringRef("ConsistencyCheck")));
|
2019-04-14 00:51:01 +08:00
|
|
|
options.push_back_deep(options.arena(), KeyValueRef(LiteralStringRef("performQuiescentChecks"), performQuiescent));
|
2021-03-05 06:30:35 +08:00
|
|
|
options.push_back_deep(options.arena(), KeyValueRef(LiteralStringRef("performCacheCheck"), performCacheCheck));
|
2021-03-06 03:28:15 +08:00
|
|
|
options.push_back_deep(options.arena(), KeyValueRef(LiteralStringRef("performTSSCheck"), performTSSCheck));
|
2021-03-11 02:06:03 +08:00
|
|
|
options.push_back_deep(options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("quiescentWaitTimeout"),
|
|
|
|
ValueRef(options.arena(), format("%f", quiescentWaitTimeout))));
|
2017-05-26 04:48:44 +08:00
|
|
|
options.push_back_deep(options.arena(), KeyValueRef(LiteralStringRef("distributed"), LiteralStringRef("false")));
|
|
|
|
spec.options.push_back_deep(spec.options.arena(), options);
|
|
|
|
|
|
|
|
state double start = now();
|
|
|
|
state bool lastRun = false;
|
|
|
|
loop {
|
2018-08-17 01:24:12 +08:00
|
|
|
DistributedTestResults testResults = wait(runWorkload(cx, testers, spec));
|
2021-03-11 02:06:03 +08:00
|
|
|
if (testResults.ok() || lastRun) {
|
|
|
|
if (g_network->isSimulated()) {
|
2017-09-19 03:46:29 +08:00
|
|
|
g_simulator.connectionFailuresDisableDuration = connectionFailures;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
return Void();
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (now() - start > softTimeLimit) {
|
|
|
|
spec.options[0].push_back_deep(spec.options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("failureIsError"), LiteralStringRef("true")));
|
2017-05-26 04:48:44 +08:00
|
|
|
lastRun = true;
|
|
|
|
}
|
2018-08-30 05:40:39 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(repairDeadDatacenter(cx, dbInfo, "ConsistencyCheck"));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<bool> runTest(Database cx,
|
|
|
|
std::vector<TesterInterface> testers,
|
|
|
|
TestSpec spec,
|
|
|
|
Reference<AsyncVar<ServerDBInfo>> dbInfo) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state DistributedTestResults testResults;
|
|
|
|
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
Future<DistributedTestResults> fTestResults = runWorkload(cx, testers, spec);
|
|
|
|
if (spec.timeout > 0) {
|
|
|
|
fTestResults = timeoutError(fTestResults, spec.timeout);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
DistributedTestResults _testResults = wait(fTestResults);
|
2017-05-26 04:48:44 +08:00
|
|
|
testResults = _testResults;
|
2021-03-11 02:06:03 +08:00
|
|
|
logMetrics(testResults.metrics);
|
|
|
|
} catch (Error& e) {
|
|
|
|
if (e.code() == error_code_timed_out) {
|
|
|
|
TraceEvent(SevError, "TestFailure")
|
|
|
|
.error(e)
|
|
|
|
.detail("Reason", "Test timed out")
|
|
|
|
.detail("Timeout", spec.timeout);
|
2017-05-26 04:48:44 +08:00
|
|
|
fprintf(stderr, "ERROR: Test timed out after %d seconds.\n", spec.timeout);
|
|
|
|
testResults.failures = testers.size();
|
|
|
|
testResults.successes = 0;
|
|
|
|
} else
|
|
|
|
throw;
|
|
|
|
}
|
|
|
|
|
|
|
|
state bool ok = testResults.ok();
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (spec.useDB) {
|
|
|
|
if (spec.dumpAfterTest) {
|
2017-05-26 04:48:44 +08:00
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(timeoutError(dumpDatabase(cx, "dump after " + printable(spec.title) + ".html", allKeys), 30.0));
|
2017-05-26 04:48:44 +08:00
|
|
|
} catch (Error& e) {
|
2018-08-02 05:30:57 +08:00
|
|
|
TraceEvent(SevError, "TestFailure").error(e).detail("Reason", "Unable to dump database");
|
2017-05-26 04:48:44 +08:00
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(1.0));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// Run the consistency check workload
|
|
|
|
if (spec.runConsistencyCheck) {
|
2017-05-26 04:48:44 +08:00
|
|
|
try {
|
|
|
|
bool quiescent = g_network->isSimulated() ? !BUGGIFY : spec.waitForQuiescenceEnd;
|
2021-03-12 04:53:46 +08:00
|
|
|
wait(timeoutError(checkConsistency(cx,
|
|
|
|
testers,
|
|
|
|
quiescent,
|
|
|
|
spec.runConsistencyCheckOnCache,
|
2021-05-13 02:53:20 +08:00
|
|
|
spec.runConsistencyCheckOnTSS,
|
2021-03-12 04:53:46 +08:00
|
|
|
10000.0,
|
|
|
|
18000,
|
|
|
|
spec.databasePingDelay,
|
|
|
|
dbInfo),
|
|
|
|
20000.0));
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2018-08-02 05:30:57 +08:00
|
|
|
TraceEvent(SevError, "TestFailure").error(e).detail("Reason", "Unable to perform consistency check");
|
2017-05-26 04:48:44 +08:00
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(ok ? SevInfo : SevWarnAlways, "TestResults").detail("Workload", spec.title).detail("Passed", (int)ok);
|
|
|
|
//.detail("Metrics", metricSummary);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (ok) {
|
|
|
|
passCount++;
|
|
|
|
} else {
|
|
|
|
failCount++;
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
printf("%d test clients passed; %d test clients failed\n", testResults.successes, testResults.failures);
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (spec.useDB && spec.clearAfterTest) {
|
2017-05-26 04:48:44 +08:00
|
|
|
try {
|
|
|
|
TraceEvent("TesterClearingDatabase");
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(timeoutError(clearData(cx), 1000.0));
|
2017-05-26 04:48:44 +08:00
|
|
|
} catch (Error& e) {
|
|
|
|
TraceEvent(SevError, "ErrorClearingDatabaseAfterTest").error(e);
|
2021-03-11 02:06:03 +08:00
|
|
|
throw; // If we didn't do this, we don't want any later tests to run on this DB
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(1.0));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
|
2020-07-13 05:42:43 +08:00
|
|
|
std::map<std::string, std::function<void(const std::string&)>> testSpecGlobalKeys = {
|
2020-07-06 19:06:45 +08:00
|
|
|
// These are read by SimulatedCluster and used before testers exist. Thus, they must
|
|
|
|
// be recognized and accepted, but there's no point in placing them into a testSpec.
|
2021-03-11 02:06:03 +08:00
|
|
|
{ "extraDB", [](const std::string& value) { TraceEvent("TestParserTest").detail("ParsedExtraDB", ""); } },
|
|
|
|
{ "configureLocked",
|
|
|
|
[](const std::string& value) { TraceEvent("TestParserTest").detail("ParsedConfigureLocked", ""); } },
|
|
|
|
{ "minimumReplication",
|
|
|
|
[](const std::string& value) { TraceEvent("TestParserTest").detail("ParsedMinimumReplication", ""); } },
|
|
|
|
{ "minimumRegions",
|
|
|
|
[](const std::string& value) { TraceEvent("TestParserTest").detail("ParsedMinimumRegions", ""); } },
|
|
|
|
{ "logAntiQuorum",
|
|
|
|
[](const std::string& value) { TraceEvent("TestParserTest").detail("ParsedLogAntiQuorum", ""); } },
|
|
|
|
{ "buggify", [](const std::string& value) { TraceEvent("TestParserTest").detail("ParsedBuggify", ""); } },
|
2020-07-06 19:06:45 +08:00
|
|
|
// The test harness handles NewSeverity events specially.
|
2021-03-11 02:06:03 +08:00
|
|
|
{ "StderrSeverity", [](const std::string& value) { TraceEvent("StderrSeverity").detail("NewSeverity", value); } },
|
|
|
|
{ "ClientInfoLogging",
|
|
|
|
[](const std::string& value) {
|
|
|
|
if (value == "false") {
|
|
|
|
setNetworkOption(FDBNetworkOptions::DISABLE_CLIENT_STATISTICS_LOGGING);
|
|
|
|
}
|
|
|
|
// else { } It is enable by default for tester
|
|
|
|
TraceEvent("TestParserTest").detail("ClientInfoLogging", value);
|
|
|
|
} },
|
|
|
|
{ "startIncompatibleProcess",
|
2021-03-26 02:40:06 +08:00
|
|
|
[](const std::string& value) { TraceEvent("TestParserTest").detail("ParsedStartIncompatibleProcess", value); } },
|
2021-04-08 03:59:51 +08:00
|
|
|
{ "storageEngineExcludeTypes",
|
|
|
|
[](const std::string& value) { TraceEvent("TestParserTest").detail("ParsedStorageEngineExcludeTypes", ""); } },
|
|
|
|
{ "maxTLogVersion",
|
|
|
|
[](const std::string& value) { TraceEvent("TestParserTest").detail("ParsedMaxTLogVersion", ""); } }
|
2020-07-06 19:06:45 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
std::map<std::string, std::function<void(const std::string& value, TestSpec* spec)>> testSpecTestKeys = {
|
2021-03-11 02:06:03 +08:00
|
|
|
{ "testTitle",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
spec->title = value;
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedTest", spec->title);
|
|
|
|
} },
|
|
|
|
{ "timeout",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
sscanf(value.c_str(), "%d", &(spec->timeout));
|
|
|
|
ASSERT(spec->timeout > 0);
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedTimeout", spec->timeout);
|
|
|
|
} },
|
|
|
|
{ "databasePingDelay",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
double databasePingDelay;
|
|
|
|
sscanf(value.c_str(), "%lf", &databasePingDelay);
|
|
|
|
ASSERT(databasePingDelay >= 0);
|
|
|
|
if (!spec->useDB && databasePingDelay > 0) {
|
|
|
|
TraceEvent(SevError, "TestParserError")
|
|
|
|
.detail("Reason", "Cannot have non-zero ping delay on test that does not use database")
|
|
|
|
.detail("PingDelay", databasePingDelay)
|
|
|
|
.detail("UseDB", spec->useDB);
|
|
|
|
ASSERT(false);
|
|
|
|
}
|
|
|
|
spec->databasePingDelay = databasePingDelay;
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedPingDelay", spec->databasePingDelay);
|
|
|
|
} },
|
|
|
|
{ "runSetup",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
spec->phases = TestWorkload::EXECUTION | TestWorkload::CHECK | TestWorkload::METRICS;
|
|
|
|
if (value == "true")
|
|
|
|
spec->phases |= TestWorkload::SETUP;
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedSetupFlag", (spec->phases & TestWorkload::SETUP) != 0);
|
|
|
|
} },
|
|
|
|
{ "dumpAfterTest",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
spec->dumpAfterTest = (value == "true");
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedDumpAfter", spec->dumpAfterTest);
|
|
|
|
} },
|
|
|
|
{ "clearAfterTest",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
spec->clearAfterTest = (value == "true");
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedClearAfter", spec->clearAfterTest);
|
|
|
|
} },
|
|
|
|
{ "useDB",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
spec->useDB = (value == "true");
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedUseDB", spec->useDB);
|
|
|
|
if (!spec->useDB)
|
|
|
|
spec->databasePingDelay = 0.0;
|
|
|
|
} },
|
|
|
|
{ "startDelay",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
sscanf(value.c_str(), "%lf", &spec->startDelay);
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedStartDelay", spec->startDelay);
|
|
|
|
} },
|
|
|
|
{ "runConsistencyCheck",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
spec->runConsistencyCheck = (value == "true");
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedRunConsistencyCheck", spec->runConsistencyCheck);
|
|
|
|
} },
|
2021-03-12 04:53:46 +08:00
|
|
|
{ "runConsistencyCheckOnCache",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
spec->runConsistencyCheckOnCache = (value == "true");
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedRunConsistencyCheckOnCache", spec->runConsistencyCheckOnCache);
|
|
|
|
} },
|
2021-03-06 03:28:15 +08:00
|
|
|
{ "runConsistencyCheckOnTSS",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
spec->runConsistencyCheckOnTSS = (value == "true");
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedRunConsistencyCheckOnTSS", spec->runConsistencyCheckOnTSS);
|
|
|
|
} },
|
2021-03-11 02:06:03 +08:00
|
|
|
{ "waitForQuiescence",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
bool toWait = value == "true";
|
|
|
|
spec->waitForQuiescenceBegin = toWait;
|
|
|
|
spec->waitForQuiescenceEnd = toWait;
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedWaitForQuiescence", toWait);
|
|
|
|
} },
|
|
|
|
{ "waitForQuiescenceBegin",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
bool toWait = value == "true";
|
|
|
|
spec->waitForQuiescenceBegin = toWait;
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedWaitForQuiescenceBegin", toWait);
|
|
|
|
} },
|
|
|
|
{ "waitForQuiescenceEnd",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
bool toWait = value == "true";
|
|
|
|
spec->waitForQuiescenceEnd = toWait;
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedWaitForQuiescenceEnd", toWait);
|
|
|
|
} },
|
|
|
|
{ "simCheckRelocationDuration",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
spec->simCheckRelocationDuration = (value == "true");
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedSimCheckRelocationDuration", spec->simCheckRelocationDuration);
|
|
|
|
} },
|
|
|
|
{ "connectionFailuresDisableDuration",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
double connectionFailuresDisableDuration;
|
|
|
|
sscanf(value.c_str(), "%lf", &connectionFailuresDisableDuration);
|
|
|
|
ASSERT(connectionFailuresDisableDuration >= 0);
|
|
|
|
spec->simConnectionFailuresDisableDuration = connectionFailuresDisableDuration;
|
|
|
|
if (g_network->isSimulated())
|
|
|
|
g_simulator.connectionFailuresDisableDuration = spec->simConnectionFailuresDisableDuration;
|
|
|
|
TraceEvent("TestParserTest")
|
|
|
|
.detail("ParsedSimConnectionFailuresDisableDuration", spec->simConnectionFailuresDisableDuration);
|
|
|
|
} },
|
|
|
|
{ "simBackupAgents",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
if (value == "BackupToFile" || value == "BackupToFileAndDB")
|
|
|
|
spec->simBackupAgents = ISimulator::BackupAgentType::BackupToFile;
|
|
|
|
else
|
|
|
|
spec->simBackupAgents = ISimulator::BackupAgentType::NoBackupAgents;
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedSimBackupAgents", spec->simBackupAgents);
|
|
|
|
|
|
|
|
if (value == "BackupToDB" || value == "BackupToFileAndDB")
|
|
|
|
spec->simDrAgents = ISimulator::BackupAgentType::BackupToDB;
|
|
|
|
else
|
|
|
|
spec->simDrAgents = ISimulator::BackupAgentType::NoBackupAgents;
|
|
|
|
TraceEvent("TestParserTest").detail("ParsedSimDrAgents", spec->simDrAgents);
|
|
|
|
} },
|
|
|
|
{ "checkOnly",
|
|
|
|
[](const std::string& value, TestSpec* spec) {
|
|
|
|
if (value == "true")
|
|
|
|
spec->phases = TestWorkload::CHECK;
|
|
|
|
} },
|
2020-07-06 19:06:45 +08:00
|
|
|
};
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
vector<TestSpec> readTests(ifstream& ifs) {
|
2017-05-26 04:48:44 +08:00
|
|
|
TestSpec spec;
|
|
|
|
vector<TestSpec> result;
|
2021-03-11 02:06:03 +08:00
|
|
|
Standalone<VectorRef<KeyValueRef>> workloadOptions;
|
2017-05-26 04:48:44 +08:00
|
|
|
std::string cline;
|
2020-07-06 17:03:30 +08:00
|
|
|
bool beforeFirstTest = true;
|
|
|
|
bool parsingWorkloads = false;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
while (ifs.good()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
getline(ifs, cline);
|
2021-03-11 02:06:03 +08:00
|
|
|
string line = removeWhitespace(string(cline));
|
|
|
|
if (!line.size() || line.find(';') == 0)
|
2017-05-26 04:48:44 +08:00
|
|
|
continue;
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
size_t found = line.find('=');
|
|
|
|
if (found == string::npos)
|
2017-05-26 04:48:44 +08:00
|
|
|
// hmmm, not good
|
|
|
|
continue;
|
2021-03-11 02:06:03 +08:00
|
|
|
string attrib = removeWhitespace(line.substr(0, found));
|
|
|
|
string value = removeWhitespace(line.substr(found + 1));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (attrib == "testTitle") {
|
2020-07-06 17:03:30 +08:00
|
|
|
beforeFirstTest = false;
|
|
|
|
parsingWorkloads = false;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (workloadOptions.size()) {
|
|
|
|
spec.options.push_back_deep(spec.options.arena(), workloadOptions);
|
|
|
|
workloadOptions = Standalone<VectorRef<KeyValueRef>>();
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (spec.options.size() && spec.title.size()) {
|
|
|
|
result.push_back(spec);
|
2017-05-26 04:48:44 +08:00
|
|
|
spec = TestSpec();
|
|
|
|
}
|
|
|
|
|
2020-07-13 05:42:43 +08:00
|
|
|
testSpecTestKeys[attrib](value, &spec);
|
2021-03-11 02:06:03 +08:00
|
|
|
} else if (testSpecTestKeys.find(attrib) != testSpecTestKeys.end()) {
|
|
|
|
if (parsingWorkloads)
|
|
|
|
TraceEvent(SevError, "TestSpecTestParamInWorkload").detail("Attrib", attrib).detail("Value", value);
|
2020-07-06 19:06:45 +08:00
|
|
|
testSpecTestKeys[attrib](value, &spec);
|
2021-03-11 02:06:03 +08:00
|
|
|
} else if (testSpecGlobalKeys.find(attrib) != testSpecGlobalKeys.end()) {
|
|
|
|
if (!beforeFirstTest)
|
|
|
|
TraceEvent(SevError, "TestSpecGlobalParamInTest").detail("Attrib", attrib).detail("Value", value);
|
2020-07-13 05:42:43 +08:00
|
|
|
testSpecGlobalKeys[attrib](value);
|
2021-03-11 02:06:03 +08:00
|
|
|
} else {
|
|
|
|
if (attrib == "testName") {
|
2020-07-06 17:03:30 +08:00
|
|
|
parsingWorkloads = true;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (workloadOptions.size()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("TestParserFlush").detail("Reason", "new (compound) test");
|
2021-03-11 02:06:03 +08:00
|
|
|
spec.options.push_back_deep(spec.options.arena(), workloadOptions);
|
|
|
|
workloadOptions = Standalone<VectorRef<KeyValueRef>>();
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
workloadOptions.push_back_deep(workloadOptions.arena(), KeyValueRef(StringRef(attrib), StringRef(value)));
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent("TestParserOption").detail("ParsedKey", attrib).detail("ParsedValue", value);
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
if (workloadOptions.size())
|
|
|
|
spec.options.push_back_deep(spec.options.arena(), workloadOptions);
|
|
|
|
if (spec.options.size() && spec.title.size()) {
|
|
|
|
result.push_back(spec);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2020-07-13 05:42:43 +08:00
|
|
|
template <typename T>
|
|
|
|
std::string toml_to_string(const T& value) {
|
|
|
|
// TOML formatting converts numbers to strings exactly how they're in the file
|
|
|
|
// and thus, is equivalent to testspec. However, strings are quoted, so we
|
|
|
|
// must remove the quotes.
|
|
|
|
if (value.type() == toml::value_t::string) {
|
|
|
|
const std::string& formatted = toml::format(value);
|
2021-03-11 02:06:03 +08:00
|
|
|
return formatted.substr(1, formatted.size() - 2);
|
2020-07-13 05:42:43 +08:00
|
|
|
} else {
|
|
|
|
return toml::format(value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
std::vector<TestSpec> readTOMLTests_(std::string fileName) {
|
2020-07-13 05:42:43 +08:00
|
|
|
TestSpec spec;
|
2021-03-11 02:06:03 +08:00
|
|
|
Standalone<VectorRef<KeyValueRef>> workloadOptions;
|
2020-07-13 05:42:43 +08:00
|
|
|
std::vector<TestSpec> result;
|
|
|
|
|
|
|
|
const toml::value& conf = toml::parse(fileName);
|
|
|
|
|
|
|
|
// Then parse each test
|
|
|
|
const toml::array& tests = toml::find(conf, "test").as_array();
|
|
|
|
for (const toml::value& test : tests) {
|
|
|
|
spec = TestSpec();
|
|
|
|
|
|
|
|
// First handle all test-level settings
|
|
|
|
for (const auto& [k, v] : test.as_table()) {
|
|
|
|
if (k == "workload") {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (testSpecTestKeys.find(k) != testSpecTestKeys.end()) {
|
|
|
|
testSpecTestKeys[k](toml_to_string(v), &spec);
|
2020-07-13 10:41:09 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(SevError, "TestSpecUnrecognizedTestParam")
|
|
|
|
.detail("Attrib", k)
|
|
|
|
.detail("Value", toml_to_string(v));
|
2020-07-13 05:42:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// And then copy the workload attributes to spec.options
|
|
|
|
const toml::array& workloads = toml::find(test, "workload").as_array();
|
|
|
|
for (const toml::value& workload : workloads) {
|
2021-03-11 02:06:03 +08:00
|
|
|
workloadOptions = Standalone<VectorRef<KeyValueRef>>();
|
2020-07-13 05:42:43 +08:00
|
|
|
TraceEvent("TestParserFlush").detail("Reason", "new (compound) test");
|
|
|
|
for (const auto& [attrib, v] : workload.as_table()) {
|
|
|
|
const std::string& value = toml_to_string(v);
|
2021-03-11 02:06:03 +08:00
|
|
|
workloadOptions.push_back_deep(workloadOptions.arena(),
|
|
|
|
KeyValueRef(StringRef(attrib), StringRef(value)));
|
2020-07-13 05:42:43 +08:00
|
|
|
TraceEvent("TestParserOption").detail("ParsedKey", attrib).detail("ParsedValue", value);
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
spec.options.push_back_deep(spec.options.arena(), workloadOptions);
|
2020-07-13 05:42:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
result.push_back(spec);
|
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
// A hack to catch and log std::exception, because TOML11 has very useful
|
|
|
|
// error messages, but the actor framework can't handle std::exception.
|
2021-03-11 02:06:03 +08:00
|
|
|
std::vector<TestSpec> readTOMLTests(std::string fileName) {
|
2020-07-13 05:42:43 +08:00
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
return readTOMLTests_(fileName);
|
|
|
|
} catch (std::exception& e) {
|
2020-07-13 05:42:43 +08:00
|
|
|
std::cerr << e.what() << std::endl;
|
|
|
|
TraceEvent("TOMLParseError").detail("Error", printable(e.what()));
|
|
|
|
// TODO: replace with toml_parse_error();
|
|
|
|
throw unknown_error();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-07 11:58:43 +08:00
|
|
|
ACTOR Future<Void> monitorServerDBInfo(Reference<AsyncVar<Optional<ClusterControllerFullInterface>>> ccInterface,
|
|
|
|
LocalityData locality,
|
|
|
|
Reference<AsyncVar<ServerDBInfo>> dbInfo) {
|
|
|
|
// Initially most of the serverDBInfo is not known, but we know our locality right away
|
|
|
|
ServerDBInfo localInfo;
|
|
|
|
localInfo.myLocality = locality;
|
|
|
|
dbInfo->set(localInfo);
|
|
|
|
|
|
|
|
loop {
|
|
|
|
GetServerDBInfoRequest req;
|
|
|
|
req.knownServerInfoID = dbInfo->get().id;
|
|
|
|
|
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(ServerDBInfo _localInfo =
|
|
|
|
wait(ccInterface->get().present()
|
|
|
|
? brokenPromiseToNever(ccInterface->get().get().getServerDBInfo.getReply(req))
|
|
|
|
: Never())) {
|
2020-04-12 10:30:05 +08:00
|
|
|
ServerDBInfo localInfo = _localInfo;
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("GotServerDBInfoChange")
|
|
|
|
.detail("ChangeID", localInfo.id)
|
|
|
|
.detail("MasterID", localInfo.master.id())
|
|
|
|
.detail("RatekeeperID", localInfo.ratekeeper.present() ? localInfo.ratekeeper.get().id() : UID())
|
|
|
|
.detail("DataDistributorID",
|
|
|
|
localInfo.distributor.present() ? localInfo.distributor.get().id() : UID());
|
2020-04-07 11:58:43 +08:00
|
|
|
|
|
|
|
localInfo.myLocality = locality;
|
|
|
|
dbInfo->set(localInfo);
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(ccInterface->onChange())) {
|
|
|
|
if (ccInterface->get().present())
|
|
|
|
TraceEvent("GotCCInterfaceChange")
|
|
|
|
.detail("CCID", ccInterface->get().get().id())
|
|
|
|
.detail("CCMachine", ccInterface->get().get().getWorkers.getEndpoint().getPrimaryAddress());
|
2020-04-07 11:58:43 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-24 00:51:16 +08:00
|
|
|
/**
|
|
|
|
* \brief Test orchestrator: sends test specification to testers in the right order and collects the results.
|
|
|
|
*
|
|
|
|
* There are multiple actors in this file with similar names (runTest, runTests) and slightly different signatures.
|
|
|
|
*
|
|
|
|
* This is the actual orchestrator. It reads the test specifications (from tests), prepares the cluster (by running the
|
|
|
|
* configure command given in startingConfiguration) and then runs the workload.
|
|
|
|
*
|
|
|
|
* \param cc The cluster controller interface
|
|
|
|
* \param ci Same as cc.clientInterface
|
|
|
|
* \param testers The interfaces of the testers that should run the actual workloads
|
|
|
|
* \param tests The test specifications to run
|
|
|
|
* \param startingConfiguration If non-empty, the orchestrator will attempt to set this configuration before starting
|
|
|
|
* the tests.
|
|
|
|
* \param locality client locality (it seems this is unused?)
|
|
|
|
*
|
|
|
|
* \returns A future which will be set after all tests finished.
|
|
|
|
*/
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> runTests(Reference<AsyncVar<Optional<struct ClusterControllerFullInterface>>> cc,
|
|
|
|
Reference<AsyncVar<Optional<struct ClusterInterface>>> ci,
|
|
|
|
vector<TesterInterface> testers,
|
|
|
|
vector<TestSpec> tests,
|
|
|
|
StringRef startingConfiguration,
|
|
|
|
LocalityData locality) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state Database cx;
|
2021-03-11 02:06:03 +08:00
|
|
|
state Reference<AsyncVar<ServerDBInfo>> dbInfo(new AsyncVar<ServerDBInfo>);
|
2020-04-07 11:58:43 +08:00
|
|
|
state Future<Void> ccMonitor = monitorServerDBInfo(cc, LocalityData(), dbInfo); // FIXME: locality
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
state bool useDB = false;
|
|
|
|
state bool waitForQuiescenceBegin = false;
|
|
|
|
state bool waitForQuiescenceEnd = false;
|
|
|
|
state double startDelay = 0.0;
|
|
|
|
state double databasePingDelay = 1e9;
|
2020-11-23 11:49:16 +08:00
|
|
|
state ISimulator::BackupAgentType simBackupAgents = ISimulator::BackupAgentType::NoBackupAgents;
|
|
|
|
state ISimulator::BackupAgentType simDrAgents = ISimulator::BackupAgentType::NoBackupAgents;
|
2019-07-13 08:12:31 +08:00
|
|
|
state bool enableDD = false;
|
2021-03-11 02:06:03 +08:00
|
|
|
if (tests.empty())
|
|
|
|
useDB = true;
|
|
|
|
for (auto iter = tests.begin(); iter != tests.end(); ++iter) {
|
|
|
|
if (iter->useDB)
|
|
|
|
useDB = true;
|
|
|
|
if (iter->waitForQuiescenceBegin)
|
|
|
|
waitForQuiescenceBegin = true;
|
|
|
|
if (iter->waitForQuiescenceEnd)
|
|
|
|
waitForQuiescenceEnd = true;
|
|
|
|
startDelay = std::max(startDelay, iter->startDelay);
|
|
|
|
databasePingDelay = std::min(databasePingDelay, iter->databasePingDelay);
|
2020-11-23 11:49:16 +08:00
|
|
|
if (iter->simBackupAgents != ISimulator::BackupAgentType::NoBackupAgents)
|
|
|
|
simBackupAgents = iter->simBackupAgents;
|
2018-02-21 05:22:31 +08:00
|
|
|
|
2020-11-23 11:49:16 +08:00
|
|
|
if (iter->simDrAgents != ISimulator::BackupAgentType::NoBackupAgents) {
|
2018-02-21 05:22:31 +08:00
|
|
|
simDrAgents = iter->simDrAgents;
|
|
|
|
}
|
2019-07-13 08:12:31 +08:00
|
|
|
enableDD = enableDD || getOption(iter->options[0], LiteralStringRef("enableDD"), false);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (g_network->isSimulated()) {
|
|
|
|
g_simulator.backupAgents = simBackupAgents;
|
2018-02-21 05:22:31 +08:00
|
|
|
g_simulator.drAgents = simDrAgents;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// turn off the database ping functionality if the suite of tests are not going to be using the database
|
2021-03-11 02:06:03 +08:00
|
|
|
if (!useDB)
|
2017-05-26 04:48:44 +08:00
|
|
|
databasePingDelay = 0.0;
|
2021-03-11 02:06:03 +08:00
|
|
|
|
2017-05-26 04:48:44 +08:00
|
|
|
if (useDB) {
|
2019-07-24 10:22:44 +08:00
|
|
|
cx = openDBOnServer(dbInfo);
|
2018-08-17 01:24:12 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2018-02-19 04:59:43 +08:00
|
|
|
state Future<Void> disabler = disableConnectionFailuresAfter(450, "Tester");
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// Change the configuration (and/or create the database) if necessary
|
2018-08-30 05:40:39 +08:00
|
|
|
printf("startingConfiguration:%s start\n", startingConfiguration.toString().c_str());
|
2021-03-23 23:38:07 +08:00
|
|
|
printSimulatedTopology();
|
2021-03-11 02:06:03 +08:00
|
|
|
if (useDB && startingConfiguration != StringRef()) {
|
2017-05-26 04:48:44 +08:00
|
|
|
try {
|
2018-08-17 01:24:12 +08:00
|
|
|
wait(timeoutError(changeConfiguration(cx, testers, startingConfiguration), 2000.0));
|
2019-07-13 08:12:31 +08:00
|
|
|
if (g_network->isSimulated() && enableDD) {
|
|
|
|
wait(success(setDDMode(cx, 1)));
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2018-08-02 05:30:57 +08:00
|
|
|
TraceEvent(SevError, "TestFailure").error(e).detail("Reason", "Unable to set starting configuration");
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-14 09:01:34 +08:00
|
|
|
if (useDB && waitForQuiescenceBegin) {
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent("TesterStartingPreTestChecks")
|
|
|
|
.detail("DatabasePingDelay", databasePingDelay)
|
|
|
|
.detail("StartDelay", startDelay);
|
2017-05-26 04:48:44 +08:00
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(quietDatabase(cx, dbInfo, "Start") ||
|
|
|
|
(databasePingDelay == 0.0
|
|
|
|
? Never()
|
|
|
|
: testDatabaseLiveness(cx, databasePingDelay, "QuietDatabaseStart", startDelay)));
|
|
|
|
} catch (Error& e) {
|
2018-08-02 05:30:57 +08:00
|
|
|
TraceEvent("QuietDatabaseStartExternalError").error(e);
|
2017-05-26 04:48:44 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TraceEvent("TestsExpectedToPass").detail("Count", tests.size());
|
|
|
|
state int idx = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (; idx < tests.size(); idx++) {
|
2018-12-04 07:29:08 +08:00
|
|
|
printf("Run test:%s start\n", tests[idx].title.toString().c_str());
|
2019-04-18 07:04:10 +08:00
|
|
|
wait(success(runTest(cx, testers, tests[idx], dbInfo)));
|
2019-07-26 01:46:11 +08:00
|
|
|
printf("Run test:%s Done.\n", tests[idx].title.toString().c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
// do we handle a failure here?
|
|
|
|
}
|
|
|
|
|
2019-11-04 08:13:32 +08:00
|
|
|
printf("\n%d tests passed; %d tests failed.\n", passCount, failCount);
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
// If the database was deleted during the workload we need to recreate the database
|
|
|
|
if (tests.empty() || useDB) {
|
|
|
|
if (waitForQuiescenceEnd) {
|
2019-11-04 08:13:32 +08:00
|
|
|
printf("Waiting for DD to end...\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
try {
|
2018-11-22 03:18:26 +08:00
|
|
|
wait(quietDatabase(cx, dbInfo, "End", 0, 2e6, 2e6) ||
|
|
|
|
(databasePingDelay == 0.0 ? Never()
|
|
|
|
: testDatabaseLiveness(cx, databasePingDelay, "QuietDatabaseEnd")));
|
2021-03-11 02:06:03 +08:00
|
|
|
} catch (Error& e) {
|
2018-08-02 05:30:57 +08:00
|
|
|
TraceEvent("QuietDatabaseEndExternalError").error(e);
|
2017-05-26 04:48:44 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-11-04 08:13:32 +08:00
|
|
|
printf("\n");
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-24 00:51:16 +08:00
|
|
|
/**
|
|
|
|
* \brief Proxy function that waits until enough testers are available and then calls into the orchestrator.
|
|
|
|
*
|
|
|
|
* There are multiple actors in this file with similar names (runTest, runTests) and slightly different signatures.
|
|
|
|
*
|
|
|
|
* This actor wraps the actual orchestrator (also called runTests). But before calling that actor, it waits for enough
|
|
|
|
* testers to come up.
|
|
|
|
*
|
|
|
|
* \param cc The cluster controller interface
|
|
|
|
* \param ci Same as cc.clientInterface
|
|
|
|
* \param tests The test specifications to run
|
|
|
|
* \param minTestersExpected The number of testers to expect. This actor will block until it can find this many testers.
|
|
|
|
* \param startingConfiguration If non-empty, the orchestrator will attempt to set this configuration before starting
|
|
|
|
* the tests.
|
|
|
|
* \param locality client locality (it seems this is unused?)
|
|
|
|
*
|
|
|
|
* \returns A future which will be set after all tests finished.
|
|
|
|
*/
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> runTests(Reference<AsyncVar<Optional<struct ClusterControllerFullInterface>>> cc,
|
|
|
|
Reference<AsyncVar<Optional<struct ClusterInterface>>> ci,
|
|
|
|
vector<TestSpec> tests,
|
|
|
|
test_location_t at,
|
|
|
|
int minTestersExpected,
|
|
|
|
StringRef startingConfiguration,
|
|
|
|
LocalityData locality) {
|
|
|
|
state int flags = (at == TEST_ON_SERVERS ? 0 : GetWorkersRequest::TESTER_CLASS_ONLY) |
|
|
|
|
GetWorkersRequest::NON_EXCLUDED_PROCESSES_ONLY;
|
2018-05-06 08:20:48 +08:00
|
|
|
state Future<Void> testerTimeout = delay(600.0); // wait 600 sec for testers to show up
|
2019-03-09 00:25:07 +08:00
|
|
|
state vector<WorkerDetails> workers;
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
loop {
|
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(vector<WorkerDetails> w =
|
|
|
|
wait(cc->get().present()
|
|
|
|
? brokenPromiseToNever(cc->get().get().getWorkers.getReply(GetWorkersRequest(flags)))
|
|
|
|
: Never())) {
|
2017-05-26 04:48:44 +08:00
|
|
|
if (w.size() >= minTestersExpected) {
|
|
|
|
workers = w;
|
2021-03-11 02:06:03 +08:00
|
|
|
break;
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(delay(SERVER_KNOBS->WORKER_POLL_DELAY));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(cc->onChange())) {}
|
|
|
|
when(wait(testerTimeout)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
TraceEvent(SevError, "TesterRecruitmentTimeout");
|
|
|
|
throw timed_out();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
vector<TesterInterface> ts;
|
2021-03-04 11:36:21 +08:00
|
|
|
ts.reserve(workers.size());
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int i = 0; i < workers.size(); i++)
|
|
|
|
ts.push_back(workers[i].interf.testerInterface);
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(runTests(cc, ci, ts, tests, startingConfiguration, locality));
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
|
2021-03-24 00:51:16 +08:00
|
|
|
/**
|
|
|
|
* \brief Set up testing environment and run the given tests on a cluster.
|
|
|
|
*
|
|
|
|
* There are multiple actors in this file with similar names (runTest, runTests) and slightly different signatures.
|
|
|
|
*
|
|
|
|
* This actor is usually the first entry point into the test environment. It itself doesn't implement too much
|
|
|
|
* functionality. Its main purpose is to generate the test specification from passed arguments and then call into the
|
|
|
|
* correct actor which will orchestrate the actual test.
|
|
|
|
*
|
|
|
|
* \param connFile A cluster connection file. Not all tests require a functional cluster but all tests require
|
|
|
|
* a cluster file.
|
|
|
|
* \param whatToRun TEST_TYPE_FROM_FILE to read the test description from a passed toml file or
|
|
|
|
* TEST_TYPE_CONSISTENCY_CHECK to generate a test spec for consistency checking
|
|
|
|
* \param at TEST_HERE: this process will act as a test client and execute the given workload. TEST_ON_SERVERS: Run a
|
|
|
|
* test client on every worker in the cluster. TEST_ON_TESTERS: Run a test client on all servers with class Test
|
|
|
|
* \param minTestersExpected In at is not TEST_HERE, this will instruct the orchestrator until it can find at least
|
|
|
|
* minTestersExpected test-clients. This is usually passed through from a command line argument. In simulation, the
|
|
|
|
* simulator will pass the number of testers that it started.
|
|
|
|
* \param fileName The path to the toml-file containing the test description. Is ignored if whatToRun !=
|
|
|
|
* TEST_TYPE_FROM_FILE
|
|
|
|
* \param startingConfiguration Can be used to configure a cluster before running the test. If this is an empty string,
|
|
|
|
* it will be ignored, otherwise it will be passed to changeConfiguration.
|
|
|
|
* \param locality The client locality to be used. This is only used if at == TEST_HERE
|
|
|
|
*
|
|
|
|
* \returns A future which will be set after all tests finished.
|
|
|
|
*/
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> runTests(Reference<ClusterConnectionFile> connFile,
|
|
|
|
test_type_t whatToRun,
|
|
|
|
test_location_t at,
|
|
|
|
int minTestersExpected,
|
|
|
|
std::string fileName,
|
|
|
|
StringRef startingConfiguration,
|
2021-04-06 17:36:10 +08:00
|
|
|
LocalityData locality,
|
|
|
|
UnitTestParameters testOptions) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state vector<TestSpec> testSpecs;
|
2020-11-07 15:50:55 +08:00
|
|
|
auto cc = makeReference<AsyncVar<Optional<ClusterControllerFullInterface>>>();
|
|
|
|
auto ci = makeReference<AsyncVar<Optional<ClusterInterface>>>();
|
2017-05-26 04:48:44 +08:00
|
|
|
vector<Future<Void>> actors;
|
2021-04-05 12:36:05 +08:00
|
|
|
if (connFile) {
|
|
|
|
actors.push_back(reportErrors(monitorLeader(connFile, cc), "MonitorLeader"));
|
|
|
|
actors.push_back(reportErrors(extractClusterInterface(cc, ci), "ExtractClusterInterface"));
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
if (whatToRun == TEST_TYPE_CONSISTENCY_CHECK) {
|
2017-05-26 04:48:44 +08:00
|
|
|
TestSpec spec;
|
|
|
|
Standalone<VectorRef<KeyValueRef>> options;
|
|
|
|
spec.title = LiteralStringRef("ConsistencyCheck");
|
|
|
|
spec.databasePingDelay = 0;
|
|
|
|
spec.timeout = 0;
|
|
|
|
spec.waitForQuiescenceBegin = false;
|
|
|
|
spec.waitForQuiescenceEnd = false;
|
2019-02-08 08:08:39 +08:00
|
|
|
std::string rateLimitMax = format("%d", CLIENT_KNOBS->CONSISTENCY_CHECK_RATE_LIMIT_MAX);
|
2021-03-11 02:06:03 +08:00
|
|
|
options.push_back_deep(options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("testName"), LiteralStringRef("ConsistencyCheck")));
|
|
|
|
options.push_back_deep(options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("performQuiescentChecks"), LiteralStringRef("false")));
|
|
|
|
options.push_back_deep(options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("distributed"), LiteralStringRef("false")));
|
|
|
|
options.push_back_deep(options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("failureIsError"), LiteralStringRef("true")));
|
2017-05-26 04:48:44 +08:00
|
|
|
options.push_back_deep(options.arena(), KeyValueRef(LiteralStringRef("indefinite"), LiteralStringRef("true")));
|
2019-02-08 08:08:39 +08:00
|
|
|
options.push_back_deep(options.arena(), KeyValueRef(LiteralStringRef("rateLimitMax"), StringRef(rateLimitMax)));
|
2021-03-11 02:06:03 +08:00
|
|
|
options.push_back_deep(options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("shuffleShards"), LiteralStringRef("true")));
|
2017-05-26 04:48:44 +08:00
|
|
|
spec.options.push_back_deep(spec.options.arena(), options);
|
|
|
|
testSpecs.push_back(spec);
|
2021-04-05 12:36:05 +08:00
|
|
|
} else if (whatToRun == TEST_TYPE_UNIT_TESTS) {
|
|
|
|
TestSpec spec;
|
|
|
|
Standalone<VectorRef<KeyValueRef>> options;
|
|
|
|
spec.title = LiteralStringRef("UnitTests");
|
|
|
|
spec.startDelay = 0;
|
|
|
|
spec.useDB = false;
|
|
|
|
spec.timeout = 0;
|
|
|
|
options.push_back_deep(options.arena(),
|
|
|
|
KeyValueRef(LiteralStringRef("testName"), LiteralStringRef("UnitTests")));
|
|
|
|
options.push_back_deep(options.arena(), KeyValueRef(LiteralStringRef("testsMatching"), fileName));
|
2021-04-06 17:36:10 +08:00
|
|
|
// Add unit test options as test spec options
|
|
|
|
for (auto& kv : testOptions.params) {
|
|
|
|
options.push_back_deep(options.arena(), KeyValueRef(kv.first, kv.second));
|
|
|
|
}
|
2021-04-05 12:36:05 +08:00
|
|
|
spec.options.push_back_deep(spec.options.arena(), options);
|
|
|
|
testSpecs.push_back(spec);
|
2017-05-26 04:48:44 +08:00
|
|
|
} else {
|
|
|
|
ifstream ifs;
|
2021-03-11 02:06:03 +08:00
|
|
|
ifs.open(fileName.c_str(), ifstream::in);
|
|
|
|
if (!ifs.good()) {
|
|
|
|
TraceEvent(SevError, "TestHarnessFail")
|
|
|
|
.detail("Reason", "file open failed")
|
|
|
|
.detail("File", fileName.c_str());
|
2020-07-13 05:42:43 +08:00
|
|
|
fprintf(stderr, "ERROR: Could not open file `%s'\n", fileName.c_str());
|
2017-05-26 04:48:44 +08:00
|
|
|
return Void();
|
|
|
|
}
|
|
|
|
enableClientInfoLogging(); // Enable Client Info logging by default for tester
|
2021-03-11 02:06:03 +08:00
|
|
|
if (boost::algorithm::ends_with(fileName, ".txt")) {
|
|
|
|
testSpecs = readTests(ifs);
|
|
|
|
} else if (boost::algorithm::ends_with(fileName, ".toml")) {
|
2020-07-13 10:53:44 +08:00
|
|
|
// TOML is weird about opening the file as binary on windows, so we
|
|
|
|
// just let TOML re-open the file instead of using ifs.
|
2021-03-11 02:06:03 +08:00
|
|
|
testSpecs = readTOMLTests(fileName);
|
2020-07-13 10:53:44 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(SevError, "TestHarnessFail")
|
|
|
|
.detail("Reason", "unknown tests specification extension")
|
|
|
|
.detail("File", fileName.c_str());
|
2020-07-13 10:53:44 +08:00
|
|
|
return Void();
|
2020-07-13 05:42:43 +08:00
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
ifs.close();
|
|
|
|
}
|
|
|
|
|
|
|
|
Future<Void> tests;
|
|
|
|
if (at == TEST_HERE) {
|
2020-11-07 15:50:55 +08:00
|
|
|
auto db = makeReference<AsyncVar<ServerDBInfo>>();
|
2017-05-26 04:48:44 +08:00
|
|
|
vector<TesterInterface> iTesters(1);
|
2021-03-11 02:06:03 +08:00
|
|
|
actors.push_back(
|
|
|
|
reportErrors(monitorServerDBInfo(cc, LocalityData(), db), "MonitorServerDBInfo")); // FIXME: Locality
|
|
|
|
actors.push_back(reportErrors(testerServerCore(iTesters[0], connFile, db, locality), "TesterServerCore"));
|
|
|
|
tests = runTests(cc, ci, iTesters, testSpecs, startingConfiguration, locality);
|
2017-05-26 04:48:44 +08:00
|
|
|
} else {
|
2021-03-11 02:06:03 +08:00
|
|
|
tests = reportErrors(runTests(cc, ci, testSpecs, at, minTestersExpected, startingConfiguration, locality),
|
|
|
|
"RunTests");
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
choose {
|
2021-03-11 02:06:03 +08:00
|
|
|
when(wait(tests)) { return Void(); }
|
|
|
|
when(wait(quorum(actors, 1))) {
|
|
|
|
ASSERT(false);
|
|
|
|
throw internal_error();
|
|
|
|
}
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|