2017-05-26 04:48:44 +08:00
|
|
|
/*
|
|
|
|
* Increment.actor.cpp
|
|
|
|
*
|
|
|
|
* This source file is part of the FoundationDB open source project
|
|
|
|
*
|
|
|
|
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
2018-02-22 02:25:11 +08:00
|
|
|
*
|
2017-05-26 04:48:44 +08:00
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
|
2019-02-18 07:41:16 +08:00
|
|
|
#include "fdbclient/NativeAPI.actor.h"
|
2019-02-18 11:25:16 +08:00
|
|
|
#include "fdbserver/TesterInterface.actor.h"
|
2019-02-18 11:18:30 +08:00
|
|
|
#include "fdbserver/workloads/workloads.actor.h"
|
2018-10-20 01:30:13 +08:00
|
|
|
#include "fdbserver/workloads/BulkSetup.actor.h"
|
2021-03-11 02:06:03 +08:00
|
|
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
struct Increment : TestWorkload {
|
|
|
|
int actorCount, nodeCount;
|
|
|
|
double testDuration, transactionsPerSecond, minExpectedTransactionsPerSecond;
|
|
|
|
|
|
|
|
vector<Future<Void>> clients;
|
2018-02-01 04:05:41 +08:00
|
|
|
PerfIntCounter transactions, retries, tooOldRetries, commitFailedRetries;
|
2017-05-26 04:48:44 +08:00
|
|
|
PerfDoubleCounter totalLatency;
|
|
|
|
|
|
|
|
Increment(WorkloadContext const& wcx)
|
2021-07-23 13:48:27 +08:00
|
|
|
: TestWorkload(wcx), transactions("Transactions"), retries("Retries"), tooOldRetries("Retries.too_old"),
|
|
|
|
commitFailedRetries("Retries.commit_failed"), totalLatency("Latency") {
|
2021-03-11 02:06:03 +08:00
|
|
|
testDuration = getOption(options, LiteralStringRef("testDuration"), 10.0);
|
|
|
|
transactionsPerSecond = getOption(options, LiteralStringRef("transactionsPerSecond"), 5000.0);
|
|
|
|
actorCount = getOption(options, LiteralStringRef("actorsPerClient"), transactionsPerSecond / 5);
|
|
|
|
nodeCount = getOption(options, LiteralStringRef("nodeCount"), transactionsPerSecond * clientCount);
|
|
|
|
minExpectedTransactionsPerSecond =
|
|
|
|
transactionsPerSecond * getOption(options, LiteralStringRef("expectedRate"), 0.7);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2020-10-05 13:29:07 +08:00
|
|
|
std::string description() const override { return "IncrementWorkload"; }
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2020-10-05 13:29:07 +08:00
|
|
|
Future<Void> setup(Database const& cx) override { return Void(); }
|
|
|
|
Future<Void> start(Database const& cx) override {
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int c = 0; c < actorCount; c++)
|
2017-05-26 04:48:44 +08:00
|
|
|
clients.push_back(
|
2021-03-11 02:06:03 +08:00
|
|
|
timeout(incrementClient(cx->clone(), this, actorCount / transactionsPerSecond), testDuration, Void()));
|
2017-05-26 04:48:44 +08:00
|
|
|
return delay(testDuration);
|
|
|
|
}
|
2020-10-05 13:29:07 +08:00
|
|
|
Future<bool> check(Database const& cx) override {
|
2017-05-26 04:48:44 +08:00
|
|
|
int errors = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (int c = 0; c < clients.size(); c++)
|
2017-05-26 04:48:44 +08:00
|
|
|
errors += clients[c].isError();
|
|
|
|
if (errors)
|
|
|
|
TraceEvent(SevError, "TestFailure").detail("Reason", "There were client errors.");
|
|
|
|
clients.clear();
|
2021-03-11 02:06:03 +08:00
|
|
|
return incrementCheck(cx->clone(), this, !errors);
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
2020-10-05 13:29:07 +08:00
|
|
|
void getMetrics(vector<PerfMetric>& m) override {
|
2021-03-11 02:06:03 +08:00
|
|
|
m.push_back(transactions.getMetric());
|
|
|
|
m.push_back(retries.getMetric());
|
|
|
|
m.push_back(tooOldRetries.getMetric());
|
|
|
|
m.push_back(commitFailedRetries.getMetric());
|
|
|
|
m.push_back(PerfMetric("Avg Latency (ms)", 1000 * totalLatency.getValue() / transactions.getValue(), true));
|
|
|
|
m.push_back(PerfMetric("Read rows/simsec (approx)", transactions.getValue() * 3 / testDuration, false));
|
|
|
|
m.push_back(PerfMetric("Write rows/simsec (approx)", transactions.getValue() * 4 / testDuration, false));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
static Key intToTestKey(int i) { return StringRef(format("%016d", i)); }
|
2017-05-26 04:48:44 +08:00
|
|
|
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<Void> incrementClient(Database cx, Increment* self, double delay) {
|
2017-05-26 04:48:44 +08:00
|
|
|
state double lastTime = now();
|
|
|
|
try {
|
|
|
|
loop {
|
2021-03-11 02:06:03 +08:00
|
|
|
wait(poisson(&lastTime, delay));
|
2017-05-26 04:48:44 +08:00
|
|
|
|
|
|
|
state double tstart = now();
|
|
|
|
state Transaction tr(cx);
|
|
|
|
while (true) {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
tr.atomicOp(intToTestKey(deterministicRandom()->randomInt(0, self->nodeCount / 2)),
|
|
|
|
LiteralStringRef("\x01"),
|
|
|
|
MutationRef::AddValue);
|
|
|
|
tr.atomicOp(
|
|
|
|
intToTestKey(deterministicRandom()->randomInt(self->nodeCount / 2, self->nodeCount)),
|
|
|
|
LiteralStringRef("\x01"),
|
|
|
|
MutationRef::AddValue);
|
|
|
|
wait(tr.commit());
|
2017-05-26 04:48:44 +08:00
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
2021-03-11 02:06:03 +08:00
|
|
|
if (e.code() == error_code_transaction_too_old)
|
|
|
|
++self->tooOldRetries;
|
|
|
|
else if (e.code() == error_code_not_committed)
|
|
|
|
++self->commitFailedRetries;
|
|
|
|
wait(tr.onError(e));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
++self->retries;
|
|
|
|
}
|
|
|
|
++self->transactions;
|
|
|
|
self->totalLatency += now() - tstart;
|
|
|
|
}
|
|
|
|
} catch (Error& e) {
|
2018-08-02 05:30:57 +08:00
|
|
|
TraceEvent(SevError, "IncrementClient").error(e);
|
2017-05-26 04:48:44 +08:00
|
|
|
throw;
|
|
|
|
}
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
bool incrementCheckData(const VectorRef<KeyValueRef>& data, Version v, Increment* self) {
|
|
|
|
TEST(self->transactions.getValue()); // incrementCheckData transaction has value
|
2017-05-26 04:48:44 +08:00
|
|
|
if (self->transactions.getValue() && data.size() == 0) {
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(SevError, "TestFailure")
|
|
|
|
.detail("Reason", "No successful increments")
|
|
|
|
.detail("Before", nodeCount)
|
|
|
|
.detail("After", data.size())
|
|
|
|
.detail("Version", v);
|
2017-05-26 04:48:44 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
int firstSum = 0;
|
|
|
|
int secondSum = 0;
|
2021-03-11 02:06:03 +08:00
|
|
|
for (auto it : data) {
|
|
|
|
ASSERT(it.value.size() <= sizeof(uint64_t));
|
2017-05-26 04:48:44 +08:00
|
|
|
uint64_t intValue = 0;
|
|
|
|
memcpy(&intValue, it.value.begin(), it.value.size());
|
2021-03-11 02:06:03 +08:00
|
|
|
if (it.key < intToTestKey(nodeCount / 2)) {
|
2017-05-26 04:48:44 +08:00
|
|
|
firstSum += intValue;
|
2021-03-11 02:06:03 +08:00
|
|
|
} else {
|
2017-05-26 04:48:44 +08:00
|
|
|
secondSum += intValue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (firstSum != secondSum) {
|
2021-03-11 02:06:03 +08:00
|
|
|
TraceEvent(SevError, "TestFailure")
|
|
|
|
.detail("Reason", "Bad increments")
|
|
|
|
.detail("A", firstSum)
|
|
|
|
.detail("B", secondSum);
|
2017-05-26 04:48:44 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2021-03-11 02:06:03 +08:00
|
|
|
ACTOR Future<bool> incrementCheck(Database cx, Increment* self, bool ok) {
|
2017-05-26 04:48:44 +08:00
|
|
|
if (self->transactions.getMetric().value() < self->testDuration * self->minExpectedTransactionsPerSecond) {
|
2020-07-30 05:09:32 +08:00
|
|
|
TraceEvent(SevWarnAlways, "TestFailure")
|
2021-03-11 02:06:03 +08:00
|
|
|
.detail("Reason", "Rate below desired rate")
|
|
|
|
.detail("File", __FILE__)
|
|
|
|
.detail(
|
|
|
|
"Details",
|
|
|
|
format("%.2f",
|
|
|
|
self->transactions.getMetric().value() / (self->transactionsPerSecond * self->testDuration)))
|
|
|
|
.detail("TransactionsAchieved", self->transactions.getMetric().value())
|
|
|
|
.detail("MinTransactionsExpected", self->testDuration * self->minExpectedTransactionsPerSecond)
|
|
|
|
.detail("TransactionGoal", self->transactionsPerSecond * self->testDuration);
|
2017-05-26 04:48:44 +08:00
|
|
|
ok = false;
|
|
|
|
}
|
|
|
|
if (!self->clientId) {
|
|
|
|
// One client checks the validity of the cycle
|
|
|
|
state Transaction tr(cx);
|
|
|
|
state int retryCount = 0;
|
|
|
|
loop {
|
|
|
|
try {
|
2021-03-11 02:06:03 +08:00
|
|
|
state Version v = wait(tr.getReadVersion());
|
2021-05-04 04:14:16 +08:00
|
|
|
RangeResult data = wait(tr.getRange(firstGreaterOrEqual(intToTestKey(0)),
|
|
|
|
firstGreaterOrEqual(intToTestKey(self->nodeCount)),
|
|
|
|
self->nodeCount + 1));
|
2021-03-11 02:06:03 +08:00
|
|
|
ok = self->incrementCheckData(data, v, self) && ok;
|
2017-05-26 04:48:44 +08:00
|
|
|
break;
|
|
|
|
} catch (Error& e) {
|
|
|
|
retryCount++;
|
|
|
|
TraceEvent(retryCount > 20 ? SevWarnAlways : SevWarn, "IncrementCheckError").error(e);
|
2018-08-11 04:57:10 +08:00
|
|
|
wait(tr.onError(e));
|
2017-05-26 04:48:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ok;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
WorkloadFactory<Increment> IncrementWorkloadFactory("Increment");
|