renamed workloads and made code style adjustments
This commit is contained in:
parent
1e9d31597c
commit
f12a3909f3
|
@ -2047,7 +2047,7 @@ ACTOR Future<Standalone<RangeResultRef>> getRange( Database cx, Reference<Transa
|
||||||
}
|
}
|
||||||
|
|
||||||
Future<Standalone<RangeResultRef>> getRange( Database const& cx, Future<Version> const& fVersion, KeySelector const& begin, KeySelector const& end,
|
Future<Standalone<RangeResultRef>> getRange( Database const& cx, Future<Version> const& fVersion, KeySelector const& begin, KeySelector const& end,
|
||||||
GetRangeLimits const& limits, bool const& reverse, TransactionInfo const& info)
|
GetRangeLimits const& limits, bool const& reverse, TransactionInfo const& info )
|
||||||
{
|
{
|
||||||
return getRange(cx, Reference<TransactionLogInfo>(), fVersion, begin, end, limits, Promise<std::pair<Key, Key>>(), true, reverse, info);
|
return getRange(cx, Reference<TransactionLogInfo>(), fVersion, begin, end, limits, Promise<std::pair<Key, Key>>(), true, reverse, info);
|
||||||
}
|
}
|
||||||
|
|
|
@ -116,6 +116,7 @@ set(FDBSERVER_SRCS
|
||||||
workloads/Cycle.actor.cpp
|
workloads/Cycle.actor.cpp
|
||||||
workloads/DDBalance.actor.cpp
|
workloads/DDBalance.actor.cpp
|
||||||
workloads/DDMetrics.actor.cpp
|
workloads/DDMetrics.actor.cpp
|
||||||
|
workloads/DDMetricsExclude.actor.cpp
|
||||||
workloads/DiskDurability.actor.cpp
|
workloads/DiskDurability.actor.cpp
|
||||||
workloads/DiskDurabilityTest.actor.cpp
|
workloads/DiskDurabilityTest.actor.cpp
|
||||||
workloads/DummyWorkload.actor.cpp
|
workloads/DummyWorkload.actor.cpp
|
||||||
|
|
|
@ -411,7 +411,7 @@ ServerKnobs::ServerKnobs(bool randomize, ClientKnobs* clientKnobs) {
|
||||||
init( BUGGIFY_LIMIT_BYTES, 1000 );
|
init( BUGGIFY_LIMIT_BYTES, 1000 );
|
||||||
init( FETCH_BLOCK_BYTES, 2e6 );
|
init( FETCH_BLOCK_BYTES, 2e6 );
|
||||||
init( FETCH_KEYS_PARALLELISM_BYTES, 4e6 ); if( randomize && BUGGIFY ) FETCH_KEYS_PARALLELISM_BYTES = 3e6;
|
init( FETCH_KEYS_PARALLELISM_BYTES, 4e6 ); if( randomize && BUGGIFY ) FETCH_KEYS_PARALLELISM_BYTES = 3e6;
|
||||||
init( FETCH_KEYS_LOWER_PRIORITY, 0 );
|
init( FETCH_KEYS_LOWER_PRIORITY, 0 );
|
||||||
init( BUGGIFY_BLOCK_BYTES, 10000 );
|
init( BUGGIFY_BLOCK_BYTES, 10000 );
|
||||||
init( STORAGE_COMMIT_BYTES, 10000000 ); if( randomize && BUGGIFY ) STORAGE_COMMIT_BYTES = 2000000;
|
init( STORAGE_COMMIT_BYTES, 10000000 ); if( randomize && BUGGIFY ) STORAGE_COMMIT_BYTES = 2000000;
|
||||||
init( STORAGE_DURABILITY_LAG_REJECT_THRESHOLD, 0.25 );
|
init( STORAGE_DURABILITY_LAG_REJECT_THRESHOLD, 0.25 );
|
||||||
|
|
|
@ -18,68 +18,47 @@
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include "fdbclient/ManagementAPI.actor.h"
|
|
||||||
#include "fdbclient/NativeAPI.actor.h"
|
#include "fdbclient/NativeAPI.actor.h"
|
||||||
#include "fdbclient/ReadYourWrites.h"
|
#include "fdbserver/TesterInterface.actor.h"
|
||||||
|
#include "fdbserver/Status.h"
|
||||||
|
#include "fdbserver/QuietDatabase.h"
|
||||||
#include "fdbserver/ServerDBInfo.h"
|
#include "fdbserver/ServerDBInfo.h"
|
||||||
#include "fdbclient/StatusClient.h"
|
|
||||||
#include "fdbserver/workloads/workloads.actor.h"
|
#include "fdbserver/workloads/workloads.actor.h"
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
struct DDMetricsWorkload : TestWorkload {
|
struct DDMetricsWorkload : TestWorkload {
|
||||||
double ddDone;
|
double startDelay, ddDone;
|
||||||
Value excludeIp;
|
|
||||||
int excludePort;
|
|
||||||
double peakMovingData;
|
|
||||||
double peakInQueue;
|
|
||||||
double peakInFlight;
|
|
||||||
double movingDataPerSec;
|
|
||||||
|
|
||||||
DDMetricsWorkload(WorkloadContext const& wcx)
|
DDMetricsWorkload(WorkloadContext const& wcx)
|
||||||
: TestWorkload(wcx), ddDone(0.0), peakMovingData(0.0), peakInQueue(0.0), peakInFlight(0.0), movingDataPerSec(0.0)
|
: TestWorkload(wcx), ddDone( 0.0 )
|
||||||
{
|
{
|
||||||
excludeIp = getOption(options, LiteralStringRef("excludeIp"), Value(LiteralStringRef("127.0.0.1")));
|
startDelay = getOption( options, LiteralStringRef("beginPoll"), 10.0 );
|
||||||
excludePort = getOption(options, LiteralStringRef("excludePort"), 4500);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static Value getRandomValue() { return Standalone<StringRef>(format("Value/%080d", deterministicRandom()->randomInt(0, 10e6))); }
|
virtual std::string description() { return "Data Distribution Metrics"; }
|
||||||
|
|
||||||
ACTOR static Future<double> getMovingDataAmount(Database cx, DDMetricsWorkload* self) {
|
ACTOR Future<int> getHighPriorityRelocationsInFlight( Database cx, DDMetricsWorkload *self ) {
|
||||||
try {
|
WorkerInterface masterWorker = wait(getMasterWorker(cx, self->dbInfo));
|
||||||
StatusObject statusObj = wait(StatusClient::statusFetcher(cx->getConnectionFile()));
|
|
||||||
StatusObjectReader statusObjCluster;
|
TraceEvent("GetHighPriorityReliocationsInFlight").detail("Stage", "ContactingMaster");
|
||||||
((StatusObjectReader)statusObj).get("cluster", statusObjCluster);
|
TraceEventFields md = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
||||||
StatusObjectReader statusObjData;
|
EventLogRequest( LiteralStringRef( "MovingData" ) ) ), 1.0 ) );
|
||||||
statusObjCluster.get("data", statusObjData);
|
int relocations;
|
||||||
if (statusObjData.has("moving_data")) {
|
sscanf(md.getValue("HighPriorityRelocations").c_str(), "%d", &relocations);
|
||||||
StatusObjectReader movingData = statusObjData.last();
|
return relocations;
|
||||||
double dataInQueue, dataInFlight;
|
|
||||||
if (movingData.get("in_queue_bytes", dataInQueue) && movingData.get("in_flight_bytes", dataInFlight)) {
|
|
||||||
self->peakInQueue = std::max(self->peakInQueue, dataInQueue);
|
|
||||||
self->peakInFlight = std::max(self->peakInFlight, dataInFlight);
|
|
||||||
return dataInQueue + dataInFlight;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch(Error& e) {
|
|
||||||
TraceEvent("DDMetricsGetMovingDataError").error(e);
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
return -1.0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ACTOR static Future<Void> _start(Database cx, DDMetricsWorkload *self) {
|
ACTOR Future<Void> work( Database cx, DDMetricsWorkload *self ) {
|
||||||
try {
|
try {
|
||||||
state std::vector<AddressExclusion> excluded;
|
TraceEvent("DDMetricsWaiting").detail("StartDelay", self->startDelay);
|
||||||
excluded.push_back(AddressExclusion(IPAddress::parse(self->excludeIp.toString()).get(), self->excludePort));
|
wait( delay( self->startDelay ) );
|
||||||
wait(excludeServers(cx, excluded));
|
TraceEvent("DDMetricsStarting");
|
||||||
state double startTime = now();
|
state double startTime = now();
|
||||||
loop {
|
loop {
|
||||||
wait( delay( 2.5 ) );
|
wait( delay( 2.5 ) );
|
||||||
double movingData = wait( self->getMovingDataAmount( cx, self ) );
|
int dif = wait( self->getHighPriorityRelocationsInFlight( cx, self ) );
|
||||||
self->peakMovingData = std::max(self->peakMovingData, movingData);
|
TraceEvent("DDMetricsCheck").detail("DIF", dif);
|
||||||
TraceEvent("DDMetricsCheck")
|
if( dif == 0 ) {
|
||||||
.detail("movingData", movingData);
|
|
||||||
if( movingData == 0.0 ) {
|
|
||||||
self->ddDone = now() - startTime;
|
self->ddDone = now() - startTime;
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
@ -90,20 +69,16 @@ struct DDMetricsWorkload : TestWorkload {
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual std::string description() { return "Data Distribution Metrics"; }
|
virtual Future<Void> start( Database const& cx ) {
|
||||||
virtual Future<Void> setup( Database const& cx ) { return Void(); }
|
return clientId == 0 ? work( cx, this ) : Void();
|
||||||
virtual Future<Void> start( Database const& cx ) { return _start(cx, this); }
|
}
|
||||||
|
|
||||||
virtual Future<bool> check( Database const& cx ) {
|
virtual Future<bool> check( Database const& cx ) {
|
||||||
movingDataPerSec = peakMovingData / ddDone;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual void getMetrics( vector<PerfMetric>& m ) {
|
virtual void getMetrics( vector<PerfMetric>& m ) {
|
||||||
m.push_back( PerfMetric( "peakMovingData", peakMovingData, false));
|
|
||||||
m.push_back( PerfMetric( "peakInQueue", peakInQueue, false));
|
|
||||||
m.push_back( PerfMetric( "peakInFlight", peakInFlight, false));
|
|
||||||
m.push_back( PerfMetric( "DDDuration", ddDone, false ) );
|
m.push_back( PerfMetric( "DDDuration", ddDone, false ) );
|
||||||
m.push_back( PerfMetric( "movingDataPerSec", movingDataPerSec, false));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -0,0 +1,111 @@
|
||||||
|
/*
|
||||||
|
* DDMetricsExclude.actor.cpp
|
||||||
|
*
|
||||||
|
* This source file is part of the FoundationDB open source project
|
||||||
|
*
|
||||||
|
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "fdbclient/ManagementAPI.actor.h"
|
||||||
|
#include "fdbclient/NativeAPI.actor.h"
|
||||||
|
#include "fdbclient/ReadYourWrites.h"
|
||||||
|
#include "fdbserver/ServerDBInfo.h"
|
||||||
|
#include "fdbclient/StatusClient.h"
|
||||||
|
#include "fdbserver/workloads/workloads.actor.h"
|
||||||
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
|
|
||||||
|
struct DDMetricsExcludeWorkload : TestWorkload {
|
||||||
|
double ddDone;
|
||||||
|
Value excludeIp;
|
||||||
|
int excludePort;
|
||||||
|
double peakMovingData;
|
||||||
|
double peakInQueue;
|
||||||
|
double peakInFlight;
|
||||||
|
double movingDataPerSec;
|
||||||
|
|
||||||
|
DDMetricsExcludeWorkload(WorkloadContext const& wcx)
|
||||||
|
: TestWorkload(wcx), ddDone(0.0), peakMovingData(0.0), peakInQueue(0.0), peakInFlight(0.0), movingDataPerSec(0.0)
|
||||||
|
{
|
||||||
|
excludeIp = getOption(options, LiteralStringRef("excludeIp"), Value(LiteralStringRef("127.0.0.1")));
|
||||||
|
excludePort = getOption(options, LiteralStringRef("excludePort"), 4500);
|
||||||
|
}
|
||||||
|
|
||||||
|
static Value getRandomValue() { return Standalone<StringRef>(format("Value/%080d", deterministicRandom()->randomInt(0, 10e6))); }
|
||||||
|
|
||||||
|
ACTOR static Future<double> getMovingDataAmount(Database cx, DDMetricsExcludeWorkload* self) {
|
||||||
|
try {
|
||||||
|
StatusObject statusObj = wait(StatusClient::statusFetcher(cx->getConnectionFile()));
|
||||||
|
StatusObjectReader statusObjCluster;
|
||||||
|
((StatusObjectReader)statusObj).get("cluster", statusObjCluster);
|
||||||
|
StatusObjectReader statusObjData;
|
||||||
|
statusObjCluster.get("data", statusObjData);
|
||||||
|
if (statusObjData.has("moving_data")) {
|
||||||
|
StatusObjectReader movingData = statusObjData.last();
|
||||||
|
double dataInQueue, dataInFlight;
|
||||||
|
if (movingData.get("in_queue_bytes", dataInQueue) && movingData.get("in_flight_bytes", dataInFlight)) {
|
||||||
|
self->peakInQueue = std::max(self->peakInQueue, dataInQueue);
|
||||||
|
self->peakInFlight = std::max(self->peakInFlight, dataInFlight);
|
||||||
|
return dataInQueue + dataInFlight;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch(Error& e) {
|
||||||
|
TraceEvent("DDMetricsExcludeGetMovingDataError").error(e);
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
return -1.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ACTOR static Future<Void> _start(Database cx, DDMetricsExcludeWorkload *self) {
|
||||||
|
try {
|
||||||
|
state std::vector<AddressExclusion> excluded;
|
||||||
|
excluded.push_back(AddressExclusion(IPAddress::parse(self->excludeIp.toString()).get(), self->excludePort));
|
||||||
|
wait(excludeServers(cx, excluded));
|
||||||
|
state double startTime = now();
|
||||||
|
loop {
|
||||||
|
wait( delay( 2.5 ) );
|
||||||
|
double movingData = wait( self->getMovingDataAmount( cx, self ) );
|
||||||
|
self->peakMovingData = std::max(self->peakMovingData, movingData);
|
||||||
|
TraceEvent("DDMetricsExcludeCheck")
|
||||||
|
.detail("movingData", movingData);
|
||||||
|
if( movingData == 0.0 ) {
|
||||||
|
self->ddDone = now() - startTime;
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch( Error& e ) {
|
||||||
|
TraceEvent("DDMetricsExcludeError").error(e);
|
||||||
|
}
|
||||||
|
return Void();
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual std::string description() { return "Data Distribution Metrics Exclude"; }
|
||||||
|
virtual Future<Void> setup( Database const& cx ) { return Void(); }
|
||||||
|
virtual Future<Void> start( Database const& cx ) { return _start(cx, this); }
|
||||||
|
virtual Future<bool> check( Database const& cx ) {
|
||||||
|
movingDataPerSec = peakMovingData / ddDone;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
virtual void getMetrics( vector<PerfMetric>& m ) {
|
||||||
|
m.push_back( PerfMetric( "peakMovingData", peakMovingData, false));
|
||||||
|
m.push_back( PerfMetric( "peakInQueue", peakInQueue, false));
|
||||||
|
m.push_back( PerfMetric( "peakInFlight", peakInFlight, false));
|
||||||
|
m.push_back( PerfMetric( "DDDuration", ddDone, false ) );
|
||||||
|
m.push_back( PerfMetric( "movingDataPerSec", movingDataPerSec, false));
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
WorkloadFactory<DDMetricsExcludeWorkload> DDMetricsExcludeWorkloadFactory("DDMetricsExclude");
|
|
@ -1,86 +0,0 @@
|
||||||
/*
|
|
||||||
* DDMetricsOld.actor.cpp
|
|
||||||
*
|
|
||||||
* This source file is part of the FoundationDB open source project
|
|
||||||
*
|
|
||||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include "fdbclient/NativeAPI.actor.h"
|
|
||||||
#include "fdbserver/TesterInterface.actor.h"
|
|
||||||
#include "fdbserver/Status.h"
|
|
||||||
#include "fdbserver/QuietDatabase.h"
|
|
||||||
#include "fdbserver/ServerDBInfo.h"
|
|
||||||
#include "fdbserver/workloads/workloads.actor.h"
|
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
|
||||||
|
|
||||||
struct DDMetricsOldWorkload : TestWorkload {
|
|
||||||
double startDelay, ddDone;
|
|
||||||
|
|
||||||
DDMetricsOldWorkload(WorkloadContext const& wcx)
|
|
||||||
: TestWorkload(wcx), ddDone( 0.0 )
|
|
||||||
{
|
|
||||||
startDelay = getOption( options, LiteralStringRef("beginPoll"), 10.0 );
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual std::string description() { return "Data Distribution Metrics Old"; }
|
|
||||||
|
|
||||||
ACTOR Future<int> getHighPriorityRelocationsInFlight( Database cx, DDMetricsOldWorkload *self ) {
|
|
||||||
WorkerInterface masterWorker = wait(getMasterWorker(cx, self->dbInfo));
|
|
||||||
|
|
||||||
TraceEvent("GetHighPriorityReliocationsInFlight").detail("Stage", "ContactingMaster");
|
|
||||||
TraceEventFields md = wait( timeoutError(masterWorker.eventLogRequest.getReply(
|
|
||||||
EventLogRequest( LiteralStringRef( "MovingData" ) ) ), 1.0 ) );
|
|
||||||
int relocations;
|
|
||||||
sscanf(md.getValue("HighPriorityRelocations").c_str(), "%d", &relocations);
|
|
||||||
return relocations;
|
|
||||||
}
|
|
||||||
|
|
||||||
ACTOR Future<Void> work( Database cx, DDMetricsOldWorkload *self ) {
|
|
||||||
try {
|
|
||||||
TraceEvent("DDMetricsOldWaiting").detail("StartDelay", self->startDelay);
|
|
||||||
wait( delay( self->startDelay ) );
|
|
||||||
TraceEvent("DDMetricsOldStarting");
|
|
||||||
state double startTime = now();
|
|
||||||
loop {
|
|
||||||
wait( delay( 2.5 ) );
|
|
||||||
int dif = wait( self->getHighPriorityRelocationsInFlight( cx, self ) );
|
|
||||||
TraceEvent("DDMetricsOldCheck").detail("DIF", dif);
|
|
||||||
if( dif == 0 ) {
|
|
||||||
self->ddDone = now() - startTime;
|
|
||||||
return Void();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch( Error& e ) {
|
|
||||||
TraceEvent("DDMetricsOldError").error(e);
|
|
||||||
}
|
|
||||||
return Void();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Future<Void> start( Database const& cx ) {
|
|
||||||
return clientId == 0 ? work( cx, this ) : Void();
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Future<bool> check( Database const& cx ) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void getMetrics( vector<PerfMetric>& m ) {
|
|
||||||
m.push_back( PerfMetric( "DDDuration", ddDone, false ) );
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
WorkloadFactory<DDMetricsOldWorkload> DDMetricsOldWorkloadFactory("DDMetricsOld");
|
|
|
@ -176,7 +176,7 @@ add_fdb_test(TEST_FILES slow/ConfigureTest.txt)
|
||||||
add_fdb_test(TEST_FILES slow/CycleRollbackPlain.txt)
|
add_fdb_test(TEST_FILES slow/CycleRollbackPlain.txt)
|
||||||
add_fdb_test(TEST_FILES slow/DDBalanceAndRemove.txt)
|
add_fdb_test(TEST_FILES slow/DDBalanceAndRemove.txt)
|
||||||
add_fdb_test(TEST_FILES slow/DDBalanceAndRemoveStatus.txt)
|
add_fdb_test(TEST_FILES slow/DDBalanceAndRemoveStatus.txt)
|
||||||
add_fdb_test(TEST_FILES slow/DDMetrics.txt)
|
add_fdb_test(TEST_FILES slow/DDMetricsExclude.txt)
|
||||||
add_fdb_test(TEST_FILES slow/FastTriggeredWatches.txt)
|
add_fdb_test(TEST_FILES slow/FastTriggeredWatches.txt)
|
||||||
add_fdb_test(TEST_FILES slow/LowLatencyWithFailures.txt)
|
add_fdb_test(TEST_FILES slow/LowLatencyWithFailures.txt)
|
||||||
add_fdb_test(TEST_FILES slow/MoveKeysClean.txt)
|
add_fdb_test(TEST_FILES slow/MoveKeysClean.txt)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
testTitle=DDMetrics_populate
|
testTitle=DDMetricsExclude_populate
|
||||||
testName=Mako
|
testName=Mako
|
||||||
testDuration=30.0
|
testDuration=30.0
|
||||||
transactionsPerSecond=100000
|
transactionsPerSecond=100000
|
||||||
|
@ -14,7 +14,7 @@ testTitle=DDMetrics_populate
|
||||||
runBenchmark=true
|
runBenchmark=true
|
||||||
preserveData=true
|
preserveData=true
|
||||||
|
|
||||||
testTitle=DDMetrics_test
|
testTitle=DDMetricsExclude_test
|
||||||
testName=Mako
|
testName=Mako
|
||||||
testDuration=60.0
|
testDuration=60.0
|
||||||
transactionsPerSecond=100000
|
transactionsPerSecond=100000
|
||||||
|
@ -30,6 +30,6 @@ testTitle=DDMetrics_test
|
||||||
runBenchmark=true
|
runBenchmark=true
|
||||||
preserveData=true
|
preserveData=true
|
||||||
|
|
||||||
testName=DDMetrics
|
testName=DDMetricsExclude
|
||||||
excludeIp=127.0.0.1
|
excludeIp=127.0.0.1
|
||||||
excludePort=4500
|
excludePort=4500
|
Loading…
Reference in New Issue