[lit] Improve naming of test result categories

Improve consistency when printing test results:
Previously we were using different labels for group names (the header
for the list of, e.g., failing tests) and summary count lines.  For
example, "Failing Tests"/"Unexpected Failures".  This commit changes lit
to label things consistently.

Improve wording of labels:
When talking about individual test results, the first word in
"Unexpected Failures", "Expected Passes", and "Individual Timeouts" is
superfluous.  Some labels contain the word "Tests" and some don't.
Let's simplify the names.

Before:
```
Failing Tests (1):
  ...

Expected Passes    : 3
Unexpected Failures: 1
```

After:
```
Failed Tests (1):
  ...

Passed: 3
Failed: 1
```

Reviewed By: ldionne

Differential Revision: https://reviews.llvm.org/D77708
This commit is contained in:
Julian Lettner 2020-04-07 22:48:39 -07:00
parent 9bcef270d7
commit 99d6e05e71
20 changed files with 71 additions and 72 deletions

View File

@ -264,12 +264,12 @@
-- Testing: Testing: 2534 tests, 4 threads --
Testing: 0 .. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90..
Testing Time: 81.52s
Expected Passes : 2503
Expected Failures : 28
Unsupported Tests : 3
Passed : 2503
Expectedly Failed: 28
Unsupported : 3
</pre>
<p>The statistic, "Unexpected Failures" (not shown if all tests pass), is the important one.</p>
<p>The statistic, "Failed" (not shown if all tests pass), is the important one.</p>
<!--=====================================================================-->
<h2 id="patches">Creating Patch Files</h2>

View File

@ -265,34 +265,33 @@ def print_histogram(tests):
def add_result_category(result_code, label):
assert isinstance(result_code, lit.Test.ResultCode)
category = (result_code, "%s Tests" % label, label)
category = (result_code, label)
result_codes.append(category)
# Status code, summary label, group label
result_codes = [
# Passes
(lit.Test.EXCLUDED, 'Excluded Tests', 'Excluded'),
(lit.Test.SKIPPED, 'Skipped Tests', 'Skipped'),
(lit.Test.UNSUPPORTED, 'Unsupported Tests', 'Unsupported'),
(lit.Test.PASS, 'Expected Passes', ''),
(lit.Test.FLAKYPASS, 'Passes With Retry', ''),
(lit.Test.XFAIL, 'Expected Failures', 'Expected Failing'),
(lit.Test.EXCLUDED, 'Excluded'),
(lit.Test.SKIPPED, 'Skipped'),
(lit.Test.UNSUPPORTED, 'Unsupported'),
(lit.Test.PASS, 'Passed'),
(lit.Test.FLAKYPASS, 'Passed With Retry'),
(lit.Test.XFAIL, 'Expectedly Failed'),
# Failures
(lit.Test.UNRESOLVED, 'Unresolved Tests', 'Unresolved'),
(lit.Test.TIMEOUT, 'Individual Timeouts', 'Timed Out'),
(lit.Test.FAIL, 'Unexpected Failures', 'Failing'),
(lit.Test.XPASS, 'Unexpected Passes', 'Unexpected Passing')
(lit.Test.UNRESOLVED, 'Unresolved'),
(lit.Test.TIMEOUT, 'Timed Out'),
(lit.Test.FAIL, 'Failed'),
(lit.Test.XPASS, 'Unexpectedly Passed')
]
def print_results(tests, elapsed, opts):
tests_by_code = {code: [] for (code, _, _) in result_codes}
tests_by_code = {code: [] for code, _ in result_codes}
for test in tests:
tests_by_code[test.result.code].append(test)
for (code, _, group_label) in result_codes:
print_group(code, group_label, tests_by_code[code], opts)
for (code, label) in result_codes:
print_group(code, label, tests_by_code[code], opts)
print_summary(tests_by_code, opts.quiet, elapsed)
@ -318,7 +317,7 @@ def print_summary(tests_by_code, quiet, elapsed):
print('\nTesting Time: %.2fs' % elapsed)
codes = [c for c in result_codes if not quiet or c.isFailure]
groups = [(label, len(tests_by_code[code])) for code, label, _ in codes]
groups = [(label, len(tests_by_code[code])) for code, label in codes]
groups = [(label, count) for label, count in groups if count]
if not groups:
return

View File

@ -5,18 +5,18 @@
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST1 %s
# CHECK-TEST1: Passes With Retry: 1
# CHECK-TEST1: Passed With Retry: 1
# Test that a per-file ALLOW_RETRIES overwrites the config-wide test_retry_attempts property, if any.
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/allow-retries/succeeds-within-limit.py -Dtest_retry_attempts=2 -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST2 %s
# CHECK-TEST2: Passes With Retry: 1
# CHECK-TEST2: Passed With Retry: 1
# This test does not succeed within the allowed retry limit
#
# RUN: not %{lit} -j 1 %{inputs}/allow-retries/does-not-succeed-within-limit.py | FileCheck --check-prefix=CHECK-TEST3 %s
# CHECK-TEST3: Failing Tests (1):
# CHECK-TEST3: Failed Tests (1):
# CHECK-TEST3: allow-retries :: does-not-succeed-within-limit.py
# This test should be UNRESOLVED since it has more than one ALLOW_RETRIES
@ -38,4 +38,4 @@
#
# RUN: rm -f %t.counter
# RUN: %{lit} -j 1 %{inputs}/test_retry_attempts/test.py -Dcounter=%t.counter -Dpython=%{python} | FileCheck --check-prefix=CHECK-TEST6 %s
# CHECK-TEST6: Passes With Retry: 1
# CHECK-TEST6: Passed With Retry: 1

View File

@ -6,10 +6,10 @@
# CHECK: CUSTOM_PASS: custom-result-category :: test1.txt
# CHECK: CUSTOM_FAILURE: custom-result-category :: test2.txt
# TODO(yln): Passing tests shouldn't be printed by default.
# TODO(yln): Passed tests shouldn't be printed by default.
# CHECK: My Passed Tests (1)
# CHECK: My Failed Tests (1)
# CHECK: custom-result-category :: test2.txt
# CHECK: My Passed Tests: 1
# CHECK: My Failed Tests: 1
# CHECK: My Passed: 1
# CHECK: My Failed: 1

View File

@ -5,6 +5,6 @@
# CHECK: -- Testing:
# CHECK: Failing Tests (1):
# CHECK: Failed Tests (1):
# CHECK: googletest-discovery-failed :: subdir/OneTest.py/failed_to_discover_tests_from_gtest
# CHECK: Unexpected Failures: 1
# CHECK: Failed: 1

View File

@ -17,7 +17,7 @@
# CHECK: ***
# CHECK: PASS: googletest-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/1.subTest
# CHECK: Failing Tests (1)
# CHECK: Expected Passes : 3
# CHECK: Unexpected Failures: 1
# CHECK: Failed Tests (1)
# CHECK: Passed: 3
# CHECK: Failed: 1

View File

@ -16,8 +16,8 @@
# CHECK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestA
# CHECK: TIMEOUT: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestB
# CHECK: TIMEOUT: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/FirstTest.subTestC
# CHECK: Expected Passes : 1
# CHECK: Individual Timeouts: 2
# CHECK: Passed : 1
# CHECK: Timed Out: 2
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.

View File

@ -15,6 +15,6 @@
# CHECK: ***
# CHECK: PASS: googletest-upstream-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-upstream-format :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/ParameterizedTest/1.subTest
# CHECK: Failing Tests (1)
# CHECK: Expected Passes : 3
# CHECK: Unexpected Failures: 1
# CHECK: Failed Tests (1)
# CHECK: Passed: 3
# CHECK: Failed: 1

View File

@ -24,10 +24,10 @@
# CHECK: Testing: 1 tests
# CHECK-NOT: PASS
# CHECK: Expected Passes: 1
# CHECK: Passed: 1
# SHOW-ALL: Testing: 1 tests
# SHOW-ALL: PASS: lit-opts :: test.txt (1 of 1)
# SHOW-ALL: {{^}}[[VAR]]
# SHOW-ALL-NOT: PASS
# SHOW-ALL: Expected Passes: 1
# SHOW-ALL: Passed: 1

View File

@ -10,15 +10,15 @@
#
# CHECK-NOT: reached maximum number of test failures
# CHECK-NOT: Skipped Tests
# CHECK: Unexpected Failures: 3
# CHECK-NOT: Skipped
# CHECK: Failed: 3
# CHECK: reached maximum number of test failures, skipping remaining tests
# CHECK: Skipped Tests : 2
# CHECK: Unexpected Failures: 1
# CHECK: Skipped: 2
# CHECK: Failed : 1
# CHECK: reached maximum number of test failures, skipping remaining tests
# CHECK: Skipped Tests : 1
# CHECK: Unexpected Failures: 2
# CHECK: Skipped: 1
# CHECK: Failed : 2
# CHECK: error: argument --max-failures: requires positive integer, but found '0'

View File

@ -5,5 +5,5 @@
# RUN: %{lit} %{inputs}/max-time --max-time=5 2>&1 | FileCheck %s
# CHECK: reached timeout, skipping remaining tests
# CHECK: Skipped Tests : 1
# CHECK: Expected Passes: 1
# CHECK: Skipped: 1
# CHECK: Passed : 1

View File

@ -15,4 +15,4 @@
# CHECK: -- Testing: 2 tests, 2 workers --
# CHECK-DAG: PASS: parallelism-groups :: test1.txt
# CHECK-DAG: PASS: parallelism-groups :: test2.txt
# CHECK: Expected Passes: 2
# CHECK: Passed: 2

View File

@ -22,14 +22,14 @@
# RUN: %{lit} --filter 'O[A-Z]E' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# RUN: env LIT_FILTER='o[a-z]e' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# CHECK-FILTER: Testing: 2 of 5 tests
# CHECK-FILTER: Excluded Tests : 3
# CHECK-FILTER: Excluded: 3
# Check that maximum counts work
#
# RUN: %{lit} --max-tests 3 %{inputs}/discovery | FileCheck --check-prefix=CHECK-MAX %s
# CHECK-MAX: Testing: 3 of 5 tests
# CHECK-MAX: Excluded Tests : 2
# CHECK-MAX: Excluded: 2
# Check that sharding partitions the testsuite in a way that distributes the
@ -40,7 +40,7 @@
# RUN: FileCheck --check-prefix=CHECK-SHARD0-OUT < %t.out %s
# CHECK-SHARD0-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
# CHECK-SHARD0-OUT: Testing: 2 of 5 tests
# CHECK-SHARD0-OUT: Excluded Tests : 3
# CHECK-SHARD0-OUT: Excluded: 3
#
# RUN: %{lit} --num-shards 3 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ERR < %t.err %s

View File

@ -93,6 +93,6 @@
# CHECK: $ "env" "A_FOO=1" "-u" "FOO" "B_BAR=2" "-u" "BAR" "C_OOF=3" "{{[^"]*}}" "print_environment.py"
# CHECK-NOT: ${{.*}}print_environment.py
# CHECK: Expected Passes : 4
# CHECK: Unexpected Failures: 12
# CHECK: Passed: 4
# CHECK: Failed: 12
# CHECK-NOT: {{.}}

View File

@ -69,21 +69,21 @@
# CHECK-NEXT: true
# CHECK-NEXT: --
# CHECK: Failing Tests (3)
# CHECK: Failed Tests (3)
# CHECK: shtest-format :: external_shell/fail.txt
# CHECK: shtest-format :: external_shell/fail_with_bad_encoding.txt
# CHECK: shtest-format :: fail.txt
# CHECK: Unexpected Passing Tests (1)
# CHECK: Unexpectedly Passed Tests (1)
# CHECK: shtest-format :: xpass.txt
# CHECK: Testing Time:
# CHECK: Unsupported Tests : 4
# CHECK: Expected Passes : 7
# CHECK: Expected Failures : 4
# CHECK: Unresolved Tests : 3
# CHECK: Unexpected Failures: 3
# CHECK: Unexpected Passes : 1
# CHECK: Unsupported : 4
# CHECK: Passed : 7
# CHECK: Expectedly Failed : 4
# CHECK: Unresolved : 3
# CHECK: Failed : 3
# CHECK: Unexpectedly Passed: 1
# XUNIT: <?xml version="1.0" encoding="UTF-8"?>

View File

@ -11,7 +11,7 @@
# CHECK-TEST1: THIS WAS
# CHECK-TEST1: INJECTED
#
# CHECK-TEST1: Expected Passes: 1
# CHECK-TEST1: Passed: 1
# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-one.txt --show-all | FileCheck --check-prefix=CHECK-TEST2 %s
#
@ -26,7 +26,7 @@
# CHECK-TEST2: INJECTED
# CHECK-TEST2: IN THE FILE
#
# CHECK-TEST2: Expected Passes: 1
# CHECK-TEST2: Passed: 1
# RUN: %{lit} -j 1 %{inputs}/shtest-inject/test-many.txt --show-all | FileCheck --check-prefix=CHECK-TEST3 %s
#
@ -45,4 +45,4 @@
# CHECK-TEST3: IF IT WORKS
# CHECK-TEST3: AS EXPECTED
#
# CHECK-TEST3: Expected Passes: 1
# CHECK-TEST3: Passed: 1

View File

@ -110,6 +110,6 @@
# CHECK: Error: 'not --crash' cannot call 'rm'
# CHECK: error: command failed with exit status: {{.*}}
# CHECK: Expected Passes : 1
# CHECK: Unexpected Failures: 12
# CHECK: Passed: 1
# CHECK: Failed: 12
# CHECK-NOT: {{.}}

View File

@ -583,4 +583,4 @@
# CHECK: ***
# CHECK: PASS: shtest-shell :: valid-shell.txt
# CHECK: Failing Tests (35)
# CHECK: Failed Tests (35)

View File

@ -50,8 +50,8 @@
# CHECK-OUT-COMMON: PASS: per_test_timeout :: short.py
# CHECK-OUT-COMMON: Expected Passes{{ *}}: 1
# CHECK-OUT-COMMON: Individual Timeouts{{ *}}: 1
# CHECK-OUT-COMMON: Passed : 1
# CHECK-OUT-COMMON: Timed Out: 1
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
@ -71,5 +71,5 @@
# CHECK-CMDLINE-OVERRIDE-OUT: PASS: per_test_timeout :: short.py
# CHECK-CMDLINE-OVERRIDE-OUT: Expected Passes{{ *}}: 1
# CHECK-CMDLINE-OVERRIDE-OUT: Individual Timeouts{{ *}}: 1
# CHECK-CMDLINE-OVERRIDE-OUT: Passed : 1
# CHECK-CMDLINE-OVERRIDE-OUT: Timed Out: 1

View File

@ -1,4 +1,4 @@
# RUN: %cmake %mlir_src_root/examples/standalone -DCMAKE_CXX_COMPILER=%host_cxx -DCMAKE_C_COMPILER=%host_cc -DMLIR_DIR=%llvm_lib_dir/cmake/mlir ; %cmake --build . --target check-standalone | tee %t | FileCheck %s
# CHECK: Expected Passes: 3
# CHECK: Passed: 3
# UNSUPPORTED: windows, android