[lit] Break main into smaller functions

This change is purely mechanical.  I will do further cleanups of
parameter usages.

Reviewed By: rnk

Differential Revision: https://reviews.llvm.org/D68830

llvm-svn: 374452
This commit is contained in:
Julian Lettner 2019-10-10 21:24:41 +00:00
parent 58417b3390
commit 8d0744a8b5
1 changed files with 198 additions and 180 deletions

View File

@ -25,77 +25,6 @@ import lit.run
import lit.Test
import lit.util
def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')
# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?
# Encode the tests.
data['tests'] = tests_data = []
for test in run.tests:
test_data = {
'name' : test.getFullName(),
'code' : test.result.code.name,
'output' : test.result.output,
'elapsed' : test.result.elapsed }
# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
# Report micro-tests separately, if present
if test.result.microResults:
for key, micro_test in test.result.microResults.items():
# Expand parent test name with micro test name
parent_name = test.getFullName()
micro_full_name = parent_name + ':' + key
micro_test_data = {
'name' : micro_full_name,
'code' : micro_test.code.name,
'output' : micro_test.output,
'elapsed' : micro_test.elapsed }
if micro_test.metrics:
micro_test_data['metrics'] = micro_metrics_data = {}
for key, value in micro_test.metrics.items():
micro_metrics_data[key] = value.todata()
tests_data.append(micro_test_data)
tests_data.append(test_data)
# Write the output.
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()
def update_incremental_cache(test):
if not test.result.code.isFailure:
return
fname = test.getFilePath()
os.utime(fname, None)
def by_mtime(test):
fname = test.getFilePath()
try:
return os.path.getmtime(fname)
except:
return 0
def main(builtinParameters = {}):
# Create a temp directory inside the normal temp directory so that we can
# try to avoid temporary test file leaks. The user can avoid this behavior
@ -132,14 +61,7 @@ def main_with_tmp(builtinParameters):
print("lit %s" % (lit.__version__,))
return
# Create the user defined parameters.
userParams = dict(builtinParameters)
for entry in opts.userParameters:
if '=' not in entry:
name,val = entry,''
else:
name,val = entry.split('=', 1)
userParams[name] = val
userParams = create_user_parameters(builtinParameters, opts)
# Decide what the requested maximum indvidual test time should be
if opts.maxIndividualTestTime is not None:
@ -186,57 +108,16 @@ def main_with_tmp(builtinParameters):
litConfig.maxIndividualTestTime = opts.maxIndividualTestTime
if opts.showSuites or opts.showTests:
# Aggregate the tests by suite.
suitesAndTests = {}
for result_test in run.tests:
if result_test.suite not in suitesAndTests:
suitesAndTests[result_test.suite] = []
suitesAndTests[result_test.suite].append(result_test)
suitesAndTests = list(suitesAndTests.items())
suitesAndTests.sort(key = lambda item: item[0].name)
# Show the suites, if requested.
if opts.showSuites:
print('-- Test Suites --')
for ts,ts_tests in suitesAndTests:
print(' %s - %d tests' %(ts.name, len(ts_tests)))
print(' Source Root: %s' % ts.source_root)
print(' Exec Root : %s' % ts.exec_root)
if ts.config.available_features:
print(' Available Features : %s' % ' '.join(
sorted(ts.config.available_features)))
# Show the tests, if requested.
if opts.showTests:
print('-- Available Tests --')
for ts,ts_tests in suitesAndTests:
ts_tests.sort(key = lambda test: test.path_in_suite)
for test in ts_tests:
print(' %s' % (test.getFullName(),))
# Exit.
sys.exit(0)
print_suites_or_tests(run, opts)
return
# Select and order the tests.
numTotalTests = len(run.tests)
# First, select based on the filter expression if given.
if opts.filter:
try:
rex = re.compile(opts.filter)
except:
parser.error("invalid regular expression for --filter: %r" % (
opts.filter))
run.tests = [result_test for result_test in run.tests
if rex.search(result_test.getFullName())]
filter_tests(run, opts)
# Then select the order.
if opts.shuffle:
random.shuffle(run.tests)
elif opts.incremental:
run.tests.sort(key=by_mtime, reverse=True)
else:
run.tests.sort(key = lambda t: (not t.isEarlyTest(), t.getFullName()))
order_tests(run, opts)
# Then optionally restrict our attention to a shard of the tests.
if (opts.numShards is not None) or (opts.runShard is not None):
@ -262,27 +143,7 @@ def main_with_tmp(builtinParameters):
# Don't create more workers than tests.
opts.numWorkers = min(len(run.tests), opts.numWorkers)
# Because some tests use threads internally, and at least on Linux each
# of these threads counts toward the current process limit, try to
# raise the (soft) process limit so that tests don't fail due to
# resource exhaustion.
try:
cpus = lit.util.detectCPUs()
desired_limit = opts.numWorkers * cpus * 2 # the 2 is a safety factor
# Import the resource module here inside this try block because it
# will likely fail on Windows.
import resource
max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
desired_limit = min(desired_limit, max_procs_hard)
if max_procs_soft < desired_limit:
resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
litConfig.note('raised the process limit from %d to %d' % \
(max_procs_soft, desired_limit))
except:
pass
increase_process_limit(litConfig, opts)
display = lit.display.create_display(opts, len(run.tests),
numTotalTests, opts.numWorkers)
@ -358,41 +219,7 @@ def main_with_tmp(builtinParameters):
print(' %s: %d' % (name,N))
if opts.xunit_output_file:
# Collect the tests, indexed by test suite
by_suite = {}
for result_test in run.tests:
suite = result_test.suite.config.name
if suite not in by_suite:
by_suite[suite] = {
'passes' : 0,
'failures' : 0,
'skipped': 0,
'tests' : [] }
by_suite[suite]['tests'].append(result_test)
if result_test.result.code.isFailure:
by_suite[suite]['failures'] += 1
elif result_test.result.code == lit.Test.UNSUPPORTED:
by_suite[suite]['skipped'] += 1
else:
by_suite[suite]['passes'] += 1
xunit_output_file = open(opts.xunit_output_file, "w")
xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
xunit_output_file.write("<testsuites>\n")
for suite_name, suite in by_suite.items():
safe_suite_name = quoteattr(suite_name.replace(".", "-"))
xunit_output_file.write("<testsuite name=" + safe_suite_name)
xunit_output_file.write(" tests=\"" + str(suite['passes'] +
suite['failures'] + suite['skipped']) + "\"")
xunit_output_file.write(" failures=\"" + str(suite['failures']) + "\"")
xunit_output_file.write(" skipped=\"" + str(suite['skipped']) +
"\">\n")
for result_test in suite['tests']:
result_test.writeJUnitXML(xunit_output_file)
xunit_output_file.write("\n")
xunit_output_file.write("</testsuite>\n")
xunit_output_file.write("</testsuites>")
xunit_output_file.close()
write_test_results_xunit(run, opts)
# If we encountered any additional errors, exit abnormally.
if litConfig.numErrors:
@ -407,5 +234,196 @@ def main_with_tmp(builtinParameters):
sys.exit(1)
sys.exit(0)
def create_user_parameters(builtinParameters, opts):
userParams = dict(builtinParameters)
for entry in opts.userParameters:
if '=' not in entry:
name,val = entry,''
else:
name,val = entry.split('=', 1)
userParams[name] = val
return userParams
def print_suites_or_tests(run, opts):
# Aggregate the tests by suite.
suitesAndTests = {}
for result_test in run.tests:
if result_test.suite not in suitesAndTests:
suitesAndTests[result_test.suite] = []
suitesAndTests[result_test.suite].append(result_test)
suitesAndTests = list(suitesAndTests.items())
suitesAndTests.sort(key = lambda item: item[0].name)
# Show the suites, if requested.
if opts.showSuites:
print('-- Test Suites --')
for ts,ts_tests in suitesAndTests:
print(' %s - %d tests' %(ts.name, len(ts_tests)))
print(' Source Root: %s' % ts.source_root)
print(' Exec Root : %s' % ts.exec_root)
if ts.config.available_features:
print(' Available Features : %s' % ' '.join(
sorted(ts.config.available_features)))
# Show the tests, if requested.
if opts.showTests:
print('-- Available Tests --')
for ts,ts_tests in suitesAndTests:
ts_tests.sort(key = lambda test: test.path_in_suite)
for test in ts_tests:
print(' %s' % (test.getFullName(),))
# Exit.
sys.exit(0)
def filter_tests(run, opts):
try:
rex = re.compile(opts.filter)
except:
parser.error("invalid regular expression for --filter: %r" % (
opts.filter))
run.tests = [result_test for result_test in run.tests
if rex.search(result_test.getFullName())]
def order_tests(run, opts):
if opts.shuffle:
random.shuffle(run.tests)
elif opts.incremental:
run.tests.sort(key = by_mtime, reverse = True)
else:
run.tests.sort(key = lambda t: (not t.isEarlyTest(), t.getFullName()))
def by_mtime(test):
fname = test.getFilePath()
try:
return os.path.getmtime(fname)
except:
return 0
def update_incremental_cache(test):
if not test.result.code.isFailure:
return
fname = test.getFilePath()
os.utime(fname, None)
def increase_process_limit(litConfig, opts):
# Because some tests use threads internally, and at least on Linux each
# of these threads counts toward the current process limit, try to
# raise the (soft) process limit so that tests don't fail due to
# resource exhaustion.
try:
cpus = lit.util.detectCPUs()
desired_limit = opts.numWorkers * cpus * 2 # the 2 is a safety factor
# Import the resource module here inside this try block because it
# will likely fail on Windows.
import resource
max_procs_soft, max_procs_hard = resource.getrlimit(resource.RLIMIT_NPROC)
desired_limit = min(desired_limit, max_procs_hard)
if max_procs_soft < desired_limit:
resource.setrlimit(resource.RLIMIT_NPROC, (desired_limit, max_procs_hard))
litConfig.note('raised the process limit from %d to %d' % \
(max_procs_soft, desired_limit))
except:
pass
def write_test_results(run, lit_config, testing_time, output_path):
try:
import json
except ImportError:
lit_config.fatal('test output unsupported with Python 2.5')
# Construct the data we will write.
data = {}
# Encode the current lit version as a schema version.
data['__version__'] = lit.__versioninfo__
data['elapsed'] = testing_time
# FIXME: Record some information on the lit configuration used?
# FIXME: Record information from the individual test suites?
# Encode the tests.
data['tests'] = tests_data = []
for test in run.tests:
test_data = {
'name' : test.getFullName(),
'code' : test.result.code.name,
'output' : test.result.output,
'elapsed' : test.result.elapsed }
# Add test metrics, if present.
if test.result.metrics:
test_data['metrics'] = metrics_data = {}
for key, value in test.result.metrics.items():
metrics_data[key] = value.todata()
# Report micro-tests separately, if present
if test.result.microResults:
for key, micro_test in test.result.microResults.items():
# Expand parent test name with micro test name
parent_name = test.getFullName()
micro_full_name = parent_name + ':' + key
micro_test_data = {
'name' : micro_full_name,
'code' : micro_test.code.name,
'output' : micro_test.output,
'elapsed' : micro_test.elapsed }
if micro_test.metrics:
micro_test_data['metrics'] = micro_metrics_data = {}
for key, value in micro_test.metrics.items():
micro_metrics_data[key] = value.todata()
tests_data.append(micro_test_data)
tests_data.append(test_data)
# Write the output.
f = open(output_path, 'w')
try:
json.dump(data, f, indent=2, sort_keys=True)
f.write('\n')
finally:
f.close()
def write_test_results_xunit(run, opts):
# Collect the tests, indexed by test suite
by_suite = {}
for result_test in run.tests:
suite = result_test.suite.config.name
if suite not in by_suite:
by_suite[suite] = {
'passes' : 0,
'failures' : 0,
'skipped': 0,
'tests' : [] }
by_suite[suite]['tests'].append(result_test)
if result_test.result.code.isFailure:
by_suite[suite]['failures'] += 1
elif result_test.result.code == lit.Test.UNSUPPORTED:
by_suite[suite]['skipped'] += 1
else:
by_suite[suite]['passes'] += 1
xunit_output_file = open(opts.xunit_output_file, "w")
xunit_output_file.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n")
xunit_output_file.write("<testsuites>\n")
for suite_name, suite in by_suite.items():
safe_suite_name = quoteattr(suite_name.replace(".", "-"))
xunit_output_file.write("<testsuite name=" + safe_suite_name)
xunit_output_file.write(" tests=\"" + str(suite['passes'] +
suite['failures'] + suite['skipped']) + "\"")
xunit_output_file.write(" failures=\"" + str(suite['failures']) + "\"")
xunit_output_file.write(" skipped=\"" + str(suite['skipped']) +
"\">\n")
for result_test in suite['tests']:
result_test.writeJUnitXML(xunit_output_file)
xunit_output_file.write("\n")
xunit_output_file.write("</testsuite>\n")
xunit_output_file.write("</testsuites>")
xunit_output_file.close()
if __name__=='__main__':
main()