[lit] Add EXCLUDED test result category

Track and print the number of tests that were discovered but not
executed due to test selection options:
  * --filter (regex filter)
  * --max-tests (limits number of tests)
  * sharding feature

With this change all discovered tests are accounted for: every
discovered test is included in one of the counts printed in the summary.

Reviewed By: jdenny

Differential Revision: https://reviews.llvm.org/D78078
This commit is contained in:
Julian Lettner 2020-04-13 13:01:46 -07:00
parent a3515ab8af
commit 8cb8fe909b
3 changed files with 26 additions and 12 deletions

View File

@ -37,6 +37,7 @@ UNRESOLVED = ResultCode('UNRESOLVED', True)
UNSUPPORTED = ResultCode('UNSUPPORTED', False)
TIMEOUT = ResultCode('TIMEOUT', True)
SKIPPED = ResultCode('SKIPPED', False)
EXCLUDED = ResultCode('EXCLUDED', False)
# Test metric values.

View File

@ -57,9 +57,11 @@ def main(builtin_params={}):
opts.maxIndividualTestTime))
lit_config.maxIndividualTestTime = opts.maxIndividualTestTime
filtered_tests = [t for t in discovered_tests if
determine_order(discovered_tests, opts.order)
selected_tests = [t for t in discovered_tests if
opts.filter.search(t.getFullName())]
if not filtered_tests:
if not selected_tests:
sys.stderr.write('error: filter did not match any tests '
'(of %d discovered). ' % len(discovered_tests))
if opts.allow_empty_runs:
@ -71,30 +73,30 @@ def main(builtin_params={}):
'error.\n')
sys.exit(2)
determine_order(filtered_tests, opts.order)
if opts.shard:
(run, shards) = opts.shard
filtered_tests = filter_by_shard(filtered_tests, run, shards, lit_config)
if not filtered_tests:
selected_tests = filter_by_shard(selected_tests, run, shards, lit_config)
if not selected_tests:
sys.stderr.write('warning: shard does not contain any tests. '
'Consider decreasing the number of shards.\n')
sys.exit(0)
filtered_tests = filtered_tests[:opts.max_tests]
selected_tests = selected_tests[:opts.max_tests]
mark_excluded(discovered_tests, selected_tests)
start = time.time()
run_tests(filtered_tests, lit_config, opts, len(discovered_tests))
run_tests(selected_tests, lit_config, opts, len(discovered_tests))
elapsed = time.time() - start
# TODO(yln): eventually, all functions below should act on discovered_tests
executed_tests = [
t for t in filtered_tests if t.result.code != lit.Test.SKIPPED]
t for t in selected_tests if t.result.code != lit.Test.SKIPPED]
if opts.time_tests:
print_histogram(executed_tests)
print_results(filtered_tests, elapsed, opts)
print_results(discovered_tests, elapsed, opts)
if opts.output_path:
#TODO(yln): pass in discovered_tests
@ -109,7 +111,7 @@ def main(builtin_params={}):
if lit_config.numWarnings:
sys.stderr.write('\n%d warning(s) in tests\n' % lit_config.numWarnings)
has_failure = any(t.isFailure() for t in executed_tests)
has_failure = any(t.isFailure() for t in discovered_tests)
if has_failure:
sys.exit(1)
@ -187,6 +189,13 @@ def filter_by_shard(tests, run, shards, lit_config):
return selected_tests
def mark_excluded(discovered_tests, selected_tests):
excluded_tests = set(discovered_tests) - set(selected_tests)
result = lit.Test.Result(lit.Test.EXCLUDED)
for t in excluded_tests:
t.setResult(result)
def run_tests(tests, lit_config, opts, discovered_tests):
workers = min(len(tests), opts.workers)
display = lit.display.create_display(opts, len(tests), discovered_tests,
@ -261,6 +270,7 @@ def add_result_category(result_code, label):
# Status code, summary label, group label
result_codes = [
# Passes
(lit.Test.EXCLUDED, 'Excluded Tests', 'Excluded'),
(lit.Test.SKIPPED, 'Skipped Tests', 'Skipped'),
(lit.Test.UNSUPPORTED, 'Unsupported Tests', 'Unsupported'),
(lit.Test.PASS, 'Expected Passes', ''),
@ -289,7 +299,7 @@ def print_group(code, label, tests, opts):
if not tests:
return
# TODO(yln): FLAKYPASS? Make this more consistent!
if code in {lit.Test.SKIPPED, lit.Test.PASS}:
if code in {lit.Test.EXCLUDED, lit.Test.SKIPPED, lit.Test.PASS}:
return
if (lit.Test.XFAIL == code and not opts.show_xfail) or \
(lit.Test.UNSUPPORTED == code and not opts.show_unsupported):

View File

@ -22,12 +22,14 @@
# RUN: %{lit} --filter 'O[A-Z]E' %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# RUN: env LIT_FILTER='o[a-z]e' %{lit} %{inputs}/discovery | FileCheck --check-prefix=CHECK-FILTER %s
# CHECK-FILTER: Testing: 2 of 5 tests
# CHECK-FILTER: Excluded Tests : 3
# Check that maximum counts work
#
# RUN: %{lit} --max-tests 3 %{inputs}/discovery | FileCheck --check-prefix=CHECK-MAX %s
# CHECK-MAX: Testing: 3 of 5 tests
# CHECK-MAX: Excluded Tests : 2
# Check that sharding partitions the testsuite in a way that distributes the
@ -38,6 +40,7 @@
# RUN: FileCheck --check-prefix=CHECK-SHARD0-OUT < %t.out %s
# CHECK-SHARD0-ERR: note: Selecting shard 1/3 = size 2/5 = tests #(3*k)+1 = [1, 4]
# CHECK-SHARD0-OUT: Testing: 2 of 5 tests
# CHECK-SHARD0-OUT: Excluded Tests : 3
#
# RUN: %{lit} --num-shards 3 --run-shard 2 %{inputs}/discovery >%t.out 2>%t.err
# RUN: FileCheck --check-prefix=CHECK-SHARD1-ERR < %t.err %s