[lit] - Allow 1 test to report multiple micro-test results to provide support for microbenchmarks.

Summary:
These changes are to allow to a Result object to have nested Result objects in
order to support microbenchmarks. Currently lit is restricted to reporting one
result object for one test, this change provides support tests that want to
report individual timings for individual kernels.

This revision is the result of the discussions in
https://reviews.llvm.org/D32272#794759,
https://reviews.llvm.org/D37421#f8003b27 and https://reviews.llvm.org/D38496.
It is a separation of the changes purposed in https://reviews.llvm.org/D40077.

This change will enable adding LCALS (Livermore Compiler Analysis Loop Suite)
collection of loop kernels to the llvm test suite using the google benchmark
library (https://reviews.llvm.org/D43319) with tracking of individual kernel
timings.

Previously microbenchmarks had been handled by using macros to section groups
of microbenchmarks together and build many executables while still getting a
grouped timing (MultiSource/TSVC). Recently the google benchmark library was
added to the test suite and utilized with a litsupport plugin. However the
limitation of 1 test 1 result limited its use to passing a runtime option to
run only 1 microbenchmark with several hand written tests
(MicroBenchmarks/XRay). This runs the same executable many times with different
hand-written tests. I will update the litsupport plugin to utilize the new
functionality (https://reviews.llvm.org/D43316).

These changes allow lit to report micro test results if desired in order to get
many precise timing results from 1 run of 1 test executable.


Reviewers: MatzeB, hfinkel, rengolin, delcypher

Differential Revision: https://reviews.llvm.org/D43314

llvm-svn: 327422
This commit is contained in:
Brian Homerding 2018-03-13 16:37:59 +00:00
parent 5182113f07
commit d5c558ff21
7 changed files with 201 additions and 0 deletions

View File

@ -135,6 +135,8 @@ class Result(object):
self.elapsed = elapsed
# The metrics reported by this test.
self.metrics = {}
# The micro-test results reported by this test.
self.microResults = {}
def addMetric(self, name, value):
"""
@ -153,6 +155,24 @@ class Result(object):
raise TypeError("unexpected metric value: %r" % (value,))
self.metrics[name] = value
def addMicroResult(self, name, microResult):
"""
addMicroResult(microResult)
Attach a micro-test result to the test result, with the given name and
result. It is an error to attempt to attach a micro-test with the
same name multiple times.
Each micro-test result must be an instance of the Result class.
"""
if name in self.microResults:
raise ValueError("Result already includes microResult for %r" % (
name,))
if not isinstance(microResult, Result):
raise TypeError("unexpected MicroResult value %r" % (microResult,))
self.microResults[name] = microResult
# Test classes.
class TestSuite:

View File

@ -81,6 +81,18 @@ class TestingProgressDisplay(object):
print('%s: %s ' % (metric_name, value.format()))
print("*" * 10)
# Report micro-tests, if present
if test.result.microResults:
items = sorted(test.result.microResults.items())
for micro_test_name, micro_test in items:
print("%s MICRO-TEST: %s" %
('*'*3, micro_test_name))
if micro_test.metrics:
sorted_metrics = sorted(micro_test.metrics.items())
for metric_name, value in sorted_metrics:
print(' %s: %s ' % (metric_name, value.format()))
# Ensure the output is flushed.
sys.stdout.flush()
@ -113,6 +125,25 @@ def write_test_results(run, lit_config, testing_time, output_path):
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
# Report micro-tests separately, if present
if test.result.microResults:
for key, micro_test in test.result.microResults.items():
# Expand parent test name with micro test name
parent_name = test.getFullName()
micro_full_name = parent_name + ':' + key
micro_test_data = {
'name' : micro_full_name,
'code' : micro_test.code.name,
'output' : micro_test.output,
'elapsed' : micro_test.elapsed }
if micro_test.metrics:
micro_test_data['metrics'] = micro_metrics_data = {}
for key, value in micro_test.metrics.items():
micro_metrics_data[key] = value.todata()
tests_data.append(micro_test_data)
tests_data.append(test_data)
# Write the output.

View File

@ -0,0 +1,52 @@
import os
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
import lit.formats
import lit.Test
class DummyFormat(lit.formats.FileBasedTest):
def execute(self, test, lit_config):
# In this dummy format, expect that each test file is actually just a
# .ini format dump of the results to report.
source_path = test.getSourcePath()
cfg = ConfigParser.ConfigParser()
cfg.read(source_path)
# Create the basic test result.
result_code = cfg.get('global', 'result_code')
result_output = cfg.get('global', 'result_output')
result = lit.Test.Result(getattr(lit.Test, result_code),
result_output)
# Load additional metrics.
for key,value_str in cfg.items('results'):
value = eval(value_str)
if isinstance(value, int):
metric = lit.Test.IntMetricValue(value)
elif isinstance(value, float):
metric = lit.Test.RealMetricValue(value)
else:
raise RuntimeError("unsupported result type")
result.addMetric(key, metric)
# Create micro test results
for key,micro_name in cfg.items('micro-tests'):
micro_result = lit.Test.Result(getattr(lit.Test, result_code, ''))
# Load micro test additional metrics
for key,value_str in cfg.items('micro-results'):
value = eval(value_str)
if isinstance(value, int):
metric = lit.Test.IntMetricValue(value)
elif isinstance(value, float):
metric = lit.Test.RealMetricValue(value)
else:
raise RuntimeError("unsupported result type")
micro_result.addMetric(key, metric)
result.addMicroResult(micro_name, micro_result)
return result

View File

@ -0,0 +1,10 @@
import site
site.addsitedir(os.path.dirname(__file__))
import dummy_format
config.name = 'test-data-micro'
config.suffixes = ['.ini']
config.test_format = dummy_format.DummyFormat()
config.test_source_root = None
config.test_exec_root = None
config.target_triple = None

View File

@ -0,0 +1,16 @@
[global]
result_code = PASS
result_output = Test passed.
[results]
value0 = 1
value1 = 2.3456
[micro-tests]
microtest0 = test0
microtest1 = test1
microtest2 = test2
[micro-results]
micro_value0 = 4
micro_value1 = 1.3

View File

@ -0,0 +1,21 @@
# Test features related to formats which support reporting additional test data.
# and multiple test results.
# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro | FileCheck %s
# CHECK: -- Testing:
# CHECK: PASS: test-data-micro :: micro-tests.ini
# CHECK-NEXT: *** TEST 'test-data-micro :: micro-tests.ini' RESULTS ***
# CHECK-NEXT: value0: 1
# CHECK-NEXT: value1: 2.3456
# CHECK-NEXT: ***
# CHECK-NEXT: *** MICRO-TEST: test0
# CHECK-NEXT: micro_value0: 4
# CHECK-NEXT: micro_value1: 1.3
# CHECK-NEXT: *** MICRO-TEST: test1
# CHECK-NEXT: micro_value0: 4
# CHECK-NEXT: micro_value1: 1.3
# CHECK-NEXT: *** MICRO-TEST: test2
# CHECK-NEXT: micro_value0: 4
# CHECK-NEXT: micro_value1: 1.3

View File

@ -0,0 +1,51 @@
# RUN: %{lit} -j 1 -v %{inputs}/test-data-micro --output %t.results.out
# RUN: FileCheck < %t.results.out %s
# RUN: rm %t.results.out
# CHECK: {
# CHECK: "__version__"
# CHECK: "elapsed"
# CHECK-NEXT: "tests": [
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": null,
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "micro_value0": 4,
# CHECK-NEXT: "micro_value1": 1.3
# CHECK-NEXT: },
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
# CHECK-NEXT: "output": ""
# CHECK-NEXT: },
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": null,
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "micro_value0": 4,
# CHECK-NEXT: "micro_value1": 1.3
# CHECK-NEXT: },
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
# CHECK-NEXT: "output": ""
# CHECK-NEXT: },
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": null,
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "micro_value0": 4,
# CHECK-NEXT: "micro_value1": 1.3
# CHECK-NEXT: },
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini:test{{[0-2]}}",
# CHECK-NEXT: "output": ""
# CHECK-NEXT: },
# CHECK-NEXT: {
# CHECK-NEXT: "code": "PASS",
# CHECK-NEXT: "elapsed": {{[0-9.]+}},
# CHECK-NEXT: "metrics": {
# CHECK-NEXT: "value0": 1,
# CHECK-NEXT: "value1": 2.3456
# CHECK-NEXT: },
# CHECK-NEXT: "name": "test-data-micro :: micro-tests.ini",
# CHECK-NEXT: "output": "Test passed."
# CHECK-NEXT: }
# CHECK-NEXT: ]
# CHECK-NEXT: }