Merge remote-tracking branch 'origin/master' into paxos-config-db
This commit is contained in:
commit
ad99f025e3
|
@ -104,43 +104,48 @@ def maintenance(logger):
|
|||
|
||||
@enable_logging()
|
||||
def setclass(logger):
|
||||
# get all processes' network addresses
|
||||
output1 = run_fdbcli_command('setclass')
|
||||
class_type_line_1 = output1.split('\n')[-1]
|
||||
logger.debug(class_type_line_1)
|
||||
# check process' network address
|
||||
assert '127.0.0.1' in class_type_line_1
|
||||
network_address = ':'.join(class_type_line_1.split(':')[:2])
|
||||
logger.debug("Network address: {}".format(network_address))
|
||||
logger.debug(output1)
|
||||
# except the first line, each line is one process
|
||||
process_types = output1.split('\n')[1:]
|
||||
assert len(process_types) == args.process_number
|
||||
addresses = []
|
||||
for line in process_types:
|
||||
assert '127.0.0.1' in line
|
||||
# check class type
|
||||
assert 'unset' in class_type_line_1
|
||||
assert 'unset' in line
|
||||
# check class source
|
||||
assert 'command_line' in class_type_line_1
|
||||
assert 'command_line' in line
|
||||
# check process' network address
|
||||
network_address = ':'.join(line.split(':')[:2])
|
||||
logger.debug("Network address: {}".format(network_address))
|
||||
addresses.append(network_address)
|
||||
random_address = random.choice(addresses)
|
||||
logger.debug("Randomly selected address: {}".format(random_address))
|
||||
# set class to a random valid type
|
||||
class_types = ['storage', 'storage', 'transaction', 'resolution',
|
||||
class_types = ['storage', 'transaction', 'resolution',
|
||||
'commit_proxy', 'grv_proxy', 'master', 'stateless', 'log',
|
||||
'router', 'cluster_controller', 'fast_restore', 'data_distributor',
|
||||
'coordinator', 'ratekeeper', 'storage_cache', 'backup'
|
||||
]
|
||||
random_class_type = random.choice(class_types)
|
||||
logger.debug("Change to type: {}".format(random_class_type))
|
||||
run_fdbcli_command('setclass', network_address, random_class_type)
|
||||
run_fdbcli_command('setclass', random_address, random_class_type)
|
||||
# check the set successful
|
||||
output2 = run_fdbcli_command('setclass')
|
||||
class_type_line_2 = output2.split('\n')[-1]
|
||||
logger.debug(class_type_line_2)
|
||||
logger.debug(output2)
|
||||
assert random_address in output2
|
||||
process_types = output2.split('\n')[1:]
|
||||
# check process' network address
|
||||
assert network_address in class_type_line_2
|
||||
for line in process_types:
|
||||
if random_address in line:
|
||||
# check class type changed to the specified value
|
||||
assert random_class_type in class_type_line_2
|
||||
assert random_class_type in line
|
||||
# check class source
|
||||
assert 'set_class' in class_type_line_2
|
||||
# set back to default
|
||||
run_fdbcli_command('setclass', network_address, 'default')
|
||||
# everything should be back to the same as before
|
||||
output3 = run_fdbcli_command('setclass')
|
||||
class_type_line_3 = output3.split('\n')[-1]
|
||||
logger.debug(class_type_line_3)
|
||||
assert class_type_line_3 == class_type_line_1
|
||||
assert 'set_class' in line
|
||||
# set back to unset
|
||||
run_fdbcli_command('setclass', random_address, 'unset')
|
||||
|
||||
|
||||
@enable_logging()
|
||||
|
@ -475,6 +480,36 @@ def wait_for_database_available(logger):
|
|||
time.sleep(1)
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def profile(logger):
|
||||
# profile list should return the same list as kill
|
||||
addresses = get_fdb_process_addresses(logger)
|
||||
output1 = run_fdbcli_command('profile', 'list')
|
||||
assert output1.split('\n') == addresses
|
||||
# check default output
|
||||
default_profile_client_get_output = 'Client profiling rate is set to default and size limit is set to default.'
|
||||
output2 = run_fdbcli_command('profile', 'client', 'get')
|
||||
assert output2 == default_profile_client_get_output
|
||||
# set rate and size limit
|
||||
run_fdbcli_command('profile', 'client', 'set', '0.5', '1GB')
|
||||
output3 = run_fdbcli_command('profile', 'client', 'get')
|
||||
logger.debug(output3)
|
||||
output3_list = output3.split(' ')
|
||||
assert float(output3_list[6]) == 0.5
|
||||
# size limit should be 1GB
|
||||
assert output3_list[-1] == '1000000000.'
|
||||
# change back to default value and check
|
||||
run_fdbcli_command('profile', 'client', 'set', 'default', 'default')
|
||||
assert run_fdbcli_command('profile', 'client', 'get') == default_profile_client_get_output
|
||||
|
||||
|
||||
@enable_logging()
|
||||
def triggerddteaminfolog(logger):
|
||||
# this command is straightforward and only has one code path
|
||||
output = run_fdbcli_command('triggerddteaminfolog')
|
||||
assert output == 'Triggered team info logging in data distribution.'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
|
||||
description="""
|
||||
|
@ -512,11 +547,13 @@ if __name__ == '__main__':
|
|||
kill()
|
||||
lockAndUnlock()
|
||||
maintenance()
|
||||
setclass()
|
||||
profile()
|
||||
suspend()
|
||||
transaction()
|
||||
throttle()
|
||||
triggerddteaminfolog()
|
||||
else:
|
||||
assert args.process_number > 1, "Process number should be positive"
|
||||
coordinators()
|
||||
exclude()
|
||||
setclass()
|
||||
|
|
|
@ -1570,23 +1570,23 @@ struct RedwoodMetrics {
|
|||
if (levelCounter > 0) {
|
||||
std::string levelString = "L" + std::to_string(levelCounter);
|
||||
level.buildFillPctSketch = Reference<Histogram>(new Histogram(
|
||||
Reference<HistogramRegistry>(), "buildFillPct", levelString, Histogram::Unit::percentage));
|
||||
Reference<HistogramRegistry>(), "buildFillPct", levelString, Histogram::Unit::percentageLinear));
|
||||
level.modifyFillPctSketch = Reference<Histogram>(new Histogram(
|
||||
Reference<HistogramRegistry>(), "modifyFillPct", levelString, Histogram::Unit::percentage));
|
||||
Reference<HistogramRegistry>(), "modifyFillPct", levelString, Histogram::Unit::percentageLinear));
|
||||
level.buildStoredPctSketch = Reference<Histogram>(new Histogram(
|
||||
Reference<HistogramRegistry>(), "buildStoredPct", levelString, Histogram::Unit::percentage));
|
||||
Reference<HistogramRegistry>(), "buildStoredPct", levelString, Histogram::Unit::percentageLinear));
|
||||
level.modifyStoredPctSketch = Reference<Histogram>(new Histogram(
|
||||
Reference<HistogramRegistry>(), "modifyStoredPct", levelString, Histogram::Unit::percentage));
|
||||
Reference<HistogramRegistry>(), "modifyStoredPct", levelString, Histogram::Unit::percentageLinear));
|
||||
level.buildItemCountSketch = Reference<Histogram>(new Histogram(Reference<HistogramRegistry>(),
|
||||
"buildItemCount",
|
||||
levelString,
|
||||
Histogram::Unit::count,
|
||||
Histogram::Unit::countLinear,
|
||||
0,
|
||||
maxRecordCount));
|
||||
level.modifyItemCountSketch = Reference<Histogram>(new Histogram(Reference<HistogramRegistry>(),
|
||||
"modifyItemCount",
|
||||
levelString,
|
||||
Histogram::Unit::count,
|
||||
Histogram::Unit::countLinear,
|
||||
0,
|
||||
maxRecordCount));
|
||||
}
|
||||
|
@ -10114,26 +10114,23 @@ TEST_CASE(":/redwood/performance/histogramThroughput") {
|
|||
{
|
||||
// Time needed to log 33 histograms.
|
||||
std::vector<Reference<Histogram>> histograms;
|
||||
for(int i = 0; i<33; i++){
|
||||
for (int i = 0; i < 33; i++) {
|
||||
std::string levelString = "L" + std::to_string(i);
|
||||
histograms.push_back(
|
||||
Histogram::getHistogram(
|
||||
LiteralStringRef("histogramTest"), LiteralStringRef("levelString"), Histogram::Unit::bytes)
|
||||
);
|
||||
histograms.push_back(Histogram::getHistogram(
|
||||
LiteralStringRef("histogramTest"), LiteralStringRef("levelString"), Histogram::Unit::bytes));
|
||||
}
|
||||
for(int i = 0; i<33; i++){
|
||||
for(int j = 0; j<32; j++){
|
||||
for (int i = 0; i < 33; i++) {
|
||||
for (int j = 0; j < 32; j++) {
|
||||
histograms[i]->sample(std::pow(2, j));
|
||||
}
|
||||
}
|
||||
auto t_start = std::chrono::high_resolution_clock::now();
|
||||
for(int i = 0; i<33; i++){
|
||||
for (int i = 0; i < 33; i++) {
|
||||
histograms[i]->writeToLog(30.0);
|
||||
}
|
||||
auto t_end = std::chrono::high_resolution_clock::now();
|
||||
double elapsed_time_ms = std::chrono::duration<double, std::milli>(t_end - t_start).count();
|
||||
std::cout << "Time needed to log 33 histograms (millisecond): " << elapsed_time_ms << std::endl;
|
||||
|
||||
}
|
||||
{
|
||||
std::cout << "Histogram Unit bytes" << std::endl;
|
||||
|
@ -10158,7 +10155,7 @@ TEST_CASE(":/redwood/performance/histogramThroughput") {
|
|||
std::cout << "Histogram Unit percentage: " << std::endl;
|
||||
auto t_start = std::chrono::high_resolution_clock::now();
|
||||
Reference<Histogram> h = Histogram::getHistogram(
|
||||
LiteralStringRef("histogramTest"), LiteralStringRef("counts"), Histogram::Unit::percentage);
|
||||
LiteralStringRef("histogramTest"), LiteralStringRef("counts"), Histogram::Unit::percentageLinear);
|
||||
ASSERT(uniform.size() == inputSize);
|
||||
for (size_t i = 0; i < uniform.size(); i++) {
|
||||
h->samplePercentage((double)uniform[i] / UINT32_MAX);
|
||||
|
|
|
@ -110,7 +110,15 @@ struct UnitTestWorkload : TestWorkload {
|
|||
tests.push_back(test);
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stdout, "Found %zu tests\n", tests.size());
|
||||
|
||||
if (tests.size() == 0) {
|
||||
TraceEvent(SevError, "NoMatchingUnitTests").detail("TestPattern", self->testPattern);
|
||||
++self->testsFailed;
|
||||
return Void();
|
||||
}
|
||||
|
||||
deterministicRandom()->randomShuffle(tests);
|
||||
if (self->testRunLimit > 0 && tests.size() > self->testRunLimit)
|
||||
tests.resize(self->testRunLimit);
|
||||
|
|
|
@ -129,17 +129,18 @@ void Histogram::writeToLog(double elapsed) {
|
|||
totalCount += buckets[i];
|
||||
switch (unit) {
|
||||
case Unit::microseconds:
|
||||
e.detail(format("LessThan%u.%03u", value / 1000, value % 1000), buckets[i]);
|
||||
e.detail(format("LessThan%u.%03u", int(value / 1000), int(value % 1000)), buckets[i]);
|
||||
break;
|
||||
case Unit::bytes:
|
||||
case Unit::bytes_per_second:
|
||||
e.detail(format("LessThan%u", value), buckets[i]);
|
||||
e.detail(format("LessThan%" PRIu64, value), buckets[i]);
|
||||
break;
|
||||
case Unit::percentage:
|
||||
case Unit::percentageLinear:
|
||||
e.detail(format("LessThan%f", (i + 1) * 0.04), buckets[i]);
|
||||
break;
|
||||
case Unit::count:
|
||||
e.detail(format("LessThan%f", (i + 1) * ((upperBound - lowerBound) / 31.0)), buckets[i]);
|
||||
case Unit::countLinear:
|
||||
value = uint64_t((i + 1) * ((upperBound - lowerBound) / 31.0));
|
||||
e.detail(format("LessThan%" PRIu64, value), buckets[i]);
|
||||
break;
|
||||
case Unit::MAXHISTOGRAMUNIT:
|
||||
e.detail(format("Default%u", i), buckets[i]);
|
||||
|
|
|
@ -58,7 +58,7 @@ HistogramRegistry& GetHistogramRegistry();
|
|||
*/
|
||||
class Histogram final : public ReferenceCounted<Histogram> {
|
||||
public:
|
||||
enum class Unit { microseconds = 0, bytes, bytes_per_second, percentage, count, MAXHISTOGRAMUNIT };
|
||||
enum class Unit { microseconds = 0, bytes, bytes_per_second, percentageLinear, countLinear, MAXHISTOGRAMUNIT };
|
||||
static const char* const UnitToStringMapper[];
|
||||
|
||||
Histogram(Reference<HistogramRegistry> regis,
|
||||
|
|
|
@ -73,7 +73,7 @@ RUN /var/fdb/scripts/download_multiversion_libraries.bash $FDB_WEBSITE $FDB_ADDI
|
|||
|
||||
RUN rm -rf /mnt/website
|
||||
|
||||
RUN mkdir -p logs
|
||||
RUN mkdir -p /var/fdb/logs
|
||||
|
||||
VOLUME /var/fdb/data
|
||||
|
||||
|
|
Loading…
Reference in New Issue