[Reland][lit] Use sharding for GoogleTest format

This helps lit unit test performance by a lot, especially on windows. The performance gain comes from launching one gtest executable for many subtests instead of one (this is the current situation).

The shards are executed by the test runner and the results are stored in the
json format supported by the GoogleTest. Later in the test reporting stage,
all test results in the json file are retrieved to continue the test results
summary etc.

On my Win10 desktop, before this patch: `check-clang-unit`: 177s, `check-llvm-unit`: 38s; after this patch: `check-clang-unit`: 37s, `check-llvm-unit`: 11s.
On my Linux machine, before this patch: `check-clang-unit`: 46s, `check-llvm-unit`: 8s; after this patch: `check-clang-unit`: 7s, `check-llvm-unit`: 4s.

Reviewed By: yln, rnk, abrachet

Differential Revision: https://reviews.llvm.org/D122251
This commit is contained in:
Yuanfang Chen 2022-04-12 12:09:34 -07:00
parent 163a9f4552
commit cd0a5889d7
23 changed files with 525 additions and 250 deletions

View File

@ -178,6 +178,11 @@ TEST(CrashRecoveryTest, UnixCRCReturnCode) {
int Res = setenv("LLVM_CRC_UNIXCRCRETURNCODE", "1", 0);
ASSERT_EQ(Res, 0);
Res = unsetenv("GTEST_SHARD_INDEX");
ASSERT_EQ(Res, 0);
Res = unsetenv("GTEST_TOTAL_SHARDS");
ASSERT_EQ(Res, 0);
std::string Error;
bool ExecutionFailed;
int RetCode = ExecuteAndWait(Executable, argv, {}, {}, 0, 0, &Error,

View File

@ -95,7 +95,9 @@ protected:
};
while (*EnvP != nullptr) {
EnvTable.emplace_back(prepareEnvVar(*EnvP));
auto S = prepareEnvVar(*EnvP);
if (!StringRef(S).startswith("GTEST_"))
EnvTable.emplace_back(S);
++EnvP;
}
}

View File

@ -22,7 +22,7 @@ class LitConfig(object):
def __init__(self, progname, path, quiet,
useValgrind, valgrindLeakCheck, valgrindArgs,
noExecute, debug, isWindows,
noExecute, debug, isWindows, order,
params, config_prefix = None,
maxIndividualTestTime = 0,
parallelism_groups = {},
@ -38,6 +38,7 @@ class LitConfig(object):
self.noExecute = noExecute
self.debug = debug
self.isWindows = bool(isWindows)
self.order = order
self.params = dict(params)
self.bashPath = None

View File

@ -52,6 +52,7 @@ def load_test_suite(inputs):
noExecute=False,
debug=False,
isWindows=windows,
order='smart',
params={})
# Perform test discovery.

View File

@ -219,11 +219,12 @@ class TestSuite:
class Test:
"""Test - Information on a single test instance."""
def __init__(self, suite, path_in_suite, config, file_path = None):
def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_file = None):
self.suite = suite
self.path_in_suite = path_in_suite
self.config = config
self.file_path = file_path
self.gtest_json_file = gtest_json_file
# A list of conditions under which this test is expected to fail.
# Each condition is a boolean expression of features and target
@ -258,7 +259,7 @@ class Test:
# The previous test elapsed time, if applicable.
self.previous_elapsed = 0.0
if '/'.join(path_in_suite) in suite.test_times:
if suite.test_times and '/'.join(path_in_suite) in suite.test_times:
time = suite.test_times['/'.join(path_in_suite)]
self.previous_elapsed = abs(time)
self.previous_failure = time < 0

View File

@ -28,7 +28,7 @@ class TestingConfig(object):
'TMPDIR', 'TMP', 'TEMP', 'TEMPDIR', 'AVRLIT_BOARD',
'AVRLIT_PORT', 'FILECHECK_OPTS', 'VCINSTALLDIR',
'VCToolsinstallDir', 'VSINSTALLDIR', 'WindowsSdkDir',
'WindowsSDKLibVersion', 'SOURCE_DATE_EPOCH']
'WindowsSDKLibVersion', 'SOURCE_DATE_EPOCH','GTEST_FILTER']
if sys.platform == 'win32':
pass_vars.append('COMSPEC')

View File

@ -1,6 +1,7 @@
from __future__ import absolute_import
import json
import math
import os
import re
import shlex
import subprocess
import sys
@ -25,74 +26,25 @@ class GoogleTest(TestFormat):
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
self.run_under = run_under
def getGTestTests(self, path, litConfig, localConfig):
"""getGTestTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
list_test_cmd = self.prepareCmd([path, '--gtest_list_tests'])
def get_num_tests(self, path, litConfig, localConfig):
list_test_cmd = self.prepareCmd(
[path, '--gtest_list_tests', '--gtest_filter=-*DISABLED_*'])
try:
output = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
out = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-tests in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output))
# This doesn't look like a valid gtest file. This can
# have a number of causes, none of them good. For
# instance, we could have created a broken executable.
# Alternatively, someone has cruft in their test
# directory. If we don't return a test here, then no
# failures will get reported, so return a dummy test name
# so that the failure is reported later.
yield 'failed_to_discover_tests_from_gtest'
return
return None
return sum(
map(lambda line: lit.util.to_string(line).startswith(' '),
out.splitlines(False)))
upstream_prefix = re.compile('Running main\(\) from .*gtest_main\.cc')
nested_tests = []
for ln in output.splitlines(False): # Don't keep newlines.
ln = lit.util.to_string(ln)
if upstream_prefix.fullmatch(ln):
# Upstream googletest prints this to stdout prior to running
# tests. LLVM removed that print statement in r61540, but we
# handle it here in case upstream googletest is being used.
continue
# The test name list includes trailing comments beginning with
# a '#' on some lines, so skip those. We don't support test names
# that use escaping to embed '#' into their name as the names come
# from C++ class and method names where such things are hard and
# uninteresting to support.
ln = ln.split('#', 1)[0].rstrip()
if not ln.lstrip():
continue
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
def getTestsInDirectory(self, testSuite, path_in_suite, litConfig,
localConfig):
init_shard_size = 512 # number of tests in a shard
core_count = lit.util.usable_core_count()
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
@ -102,13 +54,50 @@ class GoogleTest(TestFormat):
suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
testnames = self.getGTestTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(testSuite, testPath, localConfig,
num_tests = self.get_num_tests(execpath, litConfig,
localConfig)
if num_tests is not None:
# Compute the number of shards.
shard_size = init_shard_size
nshard = int(math.ceil(num_tests / shard_size))
while nshard < core_count and shard_size > 1:
shard_size = shard_size // 2
nshard = int(math.ceil(num_tests / shard_size))
# Create one lit test for each shard.
for idx in range(nshard):
testPath = path_in_suite + (subdir, fn, str(idx),
str(nshard))
json_file = '-'.join([
execpath, testSuite.config.name,
str(os.getpid()),
str(idx),
str(nshard)
]) + '.json'
yield lit.Test.Test(testSuite,
testPath,
localConfig,
file_path=execpath,
gtest_json_file=json_file)
else:
# This doesn't look like a valid gtest file. This can
# have a number of causes, none of them good. For
# instance, we could have created a broken executable.
# Alternatively, someone has cruft in their test
# directory. If we don't return a test here, then no
# failures will get reported, so return a dummy test name
# so that the failure is reported later.
testPath = path_in_suite + (
subdir, fn, 'failed_to_discover_tests_from_gtest')
yield lit.Test.Test(testSuite,
testPath,
localConfig,
file_path=execpath)
def execute(self, test, litConfig):
if test.gtest_json_file is None:
return lit.Test.FAIL, ''
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
@ -116,7 +105,20 @@ class GoogleTest(TestFormat):
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
cmd = [testPath, '--gtest_filter=' + testName]
testName,total_shards = os.path.split(testName)
testName,shard_idx = os.path.split(testName)
from lit.cl_arguments import TestOrder
use_shuffle = TestOrder(litConfig.order) == TestOrder.RANDOM
shard_env = {
'GTEST_COLOR': 'no',
'GTEST_SHUFFLE': '1' if use_shuffle else '0',
'GTEST_TOTAL_SHARDS': total_shards,
'GTEST_SHARD_INDEX': shard_idx,
'GTEST_OUTPUT': 'json:' + test.gtest_json_file
}
test.config.environment.update(shard_env)
cmd = [testPath]
cmd = self.prepareCmd(cmd)
if litConfig.useValgrind:
cmd = litConfig.valgrindArgs + cmd
@ -124,30 +126,52 @@ class GoogleTest(TestFormat):
if litConfig.noExecute:
return lit.Test.PASS, ''
header = f"Script:\n--\n{' '.join(cmd)}\n--\n"
def get_shard_header(shard_env):
shard_envs = '\n'.join([k + '=' + v for k, v in shard_env.items()])
return f"Script(shard):\n--\n%s\n%s\n--\n" % (shard_envs, ' '.join(cmd))
shard_header = get_shard_header(shard_env)
try:
out, err, exitCode = lit.util.executeCommand(
_, _, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
f'{header}Reached timeout of '
return (lit.Test.TIMEOUT, f'{shard_header}Reached timeout of '
f'{litConfig.maxIndividualTestTime} seconds')
if exitCode:
return lit.Test.FAIL, header + out + err
if not os.path.exists(test.gtest_json_file):
errmsg = f"shard JSON output does not exist: %s" % (
test.gtest_json_file)
return lit.Test.FAIL, shard_header + errmsg
if '[ SKIPPED ] 1 test,' in out:
return lit.Test.SKIPPED, ''
if exitCode == 0:
return lit.Test.PASS, ''
passing_test_line = '[ PASSED ] 1 test.'
if passing_test_line not in out:
return (lit.Test.UNRESOLVED,
f'{header}Unable to find {passing_test_line} '
f'in gtest output:\n\n{out}{err}')
with open(test.gtest_json_file, encoding='utf-8') as f:
jf = json.load(f)
return lit.Test.PASS,''
if use_shuffle:
shard_env['GTEST_RANDOM_SEED'] = str(jf['random_seed'])
output = get_shard_header(shard_env) + '\n'
for testcase in jf['testsuites']:
for testinfo in testcase['testsuite']:
result = testinfo['result']
if result == 'SUPPRESSED' or result == 'SKIPPED':
continue
testname = testcase['name'] + '.' + testinfo['name']
header = f"Script:\n--\n%s --gtest_filter=%s\n--\n" % (
' '.join(cmd), testname)
if 'failures' in testinfo:
output += header
for fail in testinfo['failures']:
output += fail['failure'] + '\n'
output += '\n'
elif result != 'COMPLETED':
output += header
output += 'unresolved test result\n'
return lit.Test.FAIL, output
def prepareCmd(self, cmd):
"""Insert interpreter if needed.
@ -166,3 +190,65 @@ class GoogleTest(TestFormat):
else:
cmd = shlex.split(self.run_under) + cmd
return cmd
@staticmethod
def post_process_shard_results(selected_tests, discovered_tests):
def remove_gtest(tests):
return [t for t in tests if t.gtest_json_file is None]
discovered_tests = remove_gtest(discovered_tests)
gtests = [t for t in selected_tests if t.gtest_json_file]
selected_tests = remove_gtest(selected_tests)
for test in gtests:
# In case gtest has bugs such that no JSON file was emitted.
if not os.path.exists(test.gtest_json_file):
selected_tests.append(test)
discovered_tests.append(test)
continue
start_time = test.result.start
# Load json file to retrieve results.
with open(test.gtest_json_file, encoding='utf-8') as f:
testsuites = json.load(f)['testsuites']
for testcase in testsuites:
for testinfo in testcase['testsuite']:
# Ignore disabled tests.
if testinfo['result'] == 'SUPPRESSED':
continue
testPath = test.path_in_suite[:-2] + (testcase['name'],
testinfo['name'])
subtest = lit.Test.Test(test.suite, testPath,
test.config, test.file_path)
testname = testcase['name'] + '.' + testinfo['name']
header = f"Script:\n--\n%s --gtest_filter=%s\n--\n" % (
test.file_path, testname)
output = ''
if testinfo['result'] == 'SKIPPED':
returnCode = lit.Test.SKIPPED
elif 'failures' in testinfo:
returnCode = lit.Test.FAIL
output = header
for fail in testinfo['failures']:
output += fail['failure'] + '\n'
elif testinfo['result'] == 'COMPLETED':
returnCode = lit.Test.PASS
else:
returnCode = lit.Test.UNRESOLVED
output = header + 'unresolved test result\n'
elapsed_time = float(testinfo['time'][:-1])
res = lit.Test.Result(returnCode, output, elapsed_time)
res.pid = test.result.pid
res.start = start_time
start_time = start_time + elapsed_time
subtest.setResult(res)
selected_tests.append(subtest)
discovered_tests.append(subtest)
os.remove(test.gtest_json_file)
return selected_tests, discovered_tests

View File

@ -18,6 +18,7 @@ import lit.reports
import lit.run
import lit.Test
import lit.util
from lit.formats.googletest import GoogleTest
from lit.TestTimes import record_test_times
@ -36,6 +37,7 @@ def main(builtin_params={}):
noExecute=opts.noExecute,
debug=opts.debug,
isWindows=is_windows,
order=opts.order,
params=params,
config_prefix=opts.configPrefix,
echo_all_commands=opts.echoAllCommands)
@ -86,11 +88,9 @@ def main(builtin_params={}):
# When running multiple shards, don't include skipped tests in the xunit
# output since merging the files will result in duplicates.
tests_for_report = discovered_tests
if opts.shard:
(run, shards) = opts.shard
selected_tests = filter_by_shard(selected_tests, run, shards, lit_config)
tests_for_report = selected_tests
if not selected_tests:
sys.stderr.write('warning: shard does not contain any tests. '
'Consider decreasing the number of shards.\n')
@ -108,11 +108,15 @@ def main(builtin_params={}):
record_test_times(selected_tests, lit_config)
selected_tests, discovered_tests = GoogleTest.post_process_shard_results(
selected_tests, discovered_tests)
if opts.time_tests:
print_histogram(discovered_tests)
print_results(discovered_tests, elapsed, opts)
tests_for_report = selected_tests if opts.shard else discovered_tests
for report in opts.reports:
report.write_results(tests_for_report, elapsed)

View File

@ -0,0 +1,51 @@
import os
import sys
if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
raise ValueError("unexpected argument: %s" % (sys.argv[2]))
print("""\
FirstTest.
subTestA""")
sys.exit(0)
elif len(sys.argv) != 1:
# sharding and json output are specified using environment variables
raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
if e not in os.environ:
raise ValueError("missing environment variables: " + e)
if not os.environ['GTEST_OUTPUT'].startswith('json:'):
raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
output = """\
{
"testsuites": [
{
"name": "FirstTest",
"testsuite": [
{
"name": "subTestA",
"result": "COMPLETED",
"time": "0.001s"
}
]
}
]
}"""
dummy_output = """\
{
"testsuites": [
]
}"""
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
with open(json_filename, 'w') as f:
if os.environ['GTEST_SHARD_INDEX'] == '0':
f.write(output)
else:
f.write(dummy_output)
sys.exit(0)

View File

@ -0,0 +1,3 @@
import lit.formats
config.name = 'googletest-cmd-wrapper'
config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test' if 'win32' in sys.platform else '.exe', [sys.executable])

View File

@ -0,0 +1,45 @@
#!/usr/bin/env python
import os
import sys
if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
raise ValueError("unexpected argument: %s" % (sys.argv[2]))
print("""\
FirstTest.
subTestA
subTestB
subTestC
subTestD
ParameterizedTest/0.
subTest
ParameterizedTest/1.
subTest""")
sys.exit(0)
elif len(sys.argv) != 1:
# sharding and json output are specified using environment variables
raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
if e not in os.environ:
raise ValueError("missing environment variables: " + e)
if not os.environ['GTEST_OUTPUT'].startswith('json:'):
raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
dummy_output = """\
{
"testsuites": [
]
}"""
if os.environ['GTEST_SHARD_INDEX'] == '0':
exit_code = 1
else:
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
with open(json_filename, 'w') as f:
f.write(dummy_output)
exit_code = 0
sys.exit(exit_code)

View File

@ -1,3 +1,3 @@
import lit.formats
config.name = 'googletest-upstream-format'
config.name = 'googletest-crash'
config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python
import os
import sys
if len(sys.argv) != 2:
raise ValueError("unexpected number of args")
if sys.argv[1] == "--gtest_list_tests":
if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
raise ValueError("unexpected argument: %s" % (sys.argv[2]))
print("""\
FirstTest.
subTestA
@ -17,31 +17,88 @@ ParameterizedTest/0.
ParameterizedTest/1.
subTest""")
sys.exit(0)
elif not sys.argv[1].startswith("--gtest_filter="):
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
elif len(sys.argv) != 1:
# sharding and json output are specified using environment variables
raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
test_name = sys.argv[1].split('=',1)[1]
if test_name == 'FirstTest.subTestA':
print('I am subTest A, I PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
elif test_name == 'FirstTest.subTestB':
print('I am subTest B, I FAIL')
print('And I have two lines of output')
sys.exit(1)
elif test_name == 'FirstTest.subTestC':
print('I am subTest C, I am SKIPPED')
print('[ PASSED ] 0 tests.')
print('[ SKIPPED ] 1 test, listed below:')
print('[ SKIPPED ] FirstTest.subTestC')
sys.exit(0)
elif test_name == 'FirstTest.subTestD':
print('I am subTest D, I am UNRESOLVED')
sys.exit(0)
elif test_name in ('ParameterizedTest/0.subTest',
'ParameterizedTest/1.subTest'):
print('I am a parameterized test, I also PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
else:
raise SystemExit("error: invalid test name: %r" % (test_name,))
for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
if e not in os.environ:
raise ValueError("missing environment variables: " + e)
if not os.environ['GTEST_OUTPUT'].startswith('json:'):
raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
output = """\
{
"random_seed": 123,
"testsuites": [
{
"name": "FirstTest",
"testsuite": [
{
"name": "subTestA",
"result": "COMPLETED",
"time": "0.001s"
},
{
"name": "subTestB",
"result": "COMPLETED",
"time": "0.001s",
"failures": [
{
"failure": "I am subTest B, I FAIL\\nAnd I have two lines of output",
"type": ""
}
]
},
{
"name": "subTestC",
"result": "SKIPPED",
"time": "0.001s"
},
{
"name": "subTestD",
"result": "UNRESOLVED",
"time": "0.001s"
}
]
},
{
"name": "ParameterizedTest/0",
"testsuite": [
{
"name": "subTest",
"result": "COMPLETED",
"time": "0.001s"
}
]
},
{
"name": "ParameterizedTest/1",
"testsuite": [
{
"name": "subTest",
"result": "COMPLETED",
"time": "0.001s"
}
]
}
]
}"""
dummy_output = """\
{
"testsuites": [
]
}"""
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
with open(json_filename, 'w') as f:
if os.environ['GTEST_SHARD_INDEX'] == '0':
f.write(output)
exit_code = 1
else:
f.write(dummy_output)
exit_code = 0
sys.exit(exit_code)

View File

@ -1,29 +1,66 @@
#!/usr/bin/env python
import os
import sys
import time
if len(sys.argv) != 2:
raise ValueError("unexpected number of args")
if sys.argv[1] == "--gtest_list_tests":
if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
raise ValueError("unexpected argument: %s" % (sys.argv[2]))
print("""\
T.
QuickSubTest
InfiniteLoopSubTest
""")
sys.exit(0)
elif not sys.argv[1].startswith("--gtest_filter="):
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
elif len(sys.argv) != 1:
# sharding and json output are specified using environment variables
raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
test_name = sys.argv[1].split('=',1)[1]
if test_name == 'T.QuickSubTest':
print('I am QuickSubTest, I PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
elif test_name == 'T.InfiniteLoopSubTest':
print('I am InfiniteLoopSubTest, I will hang')
while True:
pass
for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT', 'GTEST_FILTER']:
if e not in os.environ:
raise ValueError("missing environment variables: " + e)
if not os.environ['GTEST_OUTPUT'].startswith('json:'):
raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
output = """\
{
"testsuites": [
{
"name": "T",
"testsuite": [
{
"name": "QuickSubTest",
"result": "COMPLETED",
"time": "2s"
}
]
}
]
}"""
dummy_output = """\
{
"testsuites": [
]
}"""
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
if os.environ['GTEST_SHARD_INDEX'] == '0':
test_name = os.environ['GTEST_FILTER']
if test_name == 'QuickSubTest':
with open(json_filename, 'w') as f:
f.write(output)
exit_code = 0
elif test_name == 'InfiniteLoopSubTest':
while True:
pass
else:
raise SystemExit("error: invalid test name: %r" % (test_name,))
else:
raise SystemExit("error: invalid test name: %r" % (test_name,))
with open(json_filename, 'w') as f:
f.write(dummy_output)
exit_code = 0
sys.exit(exit_code)

View File

@ -3,6 +3,7 @@ config.name = 'googletest-timeout'
config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
configSetTimeout = lit_config.params.get('set_timeout', '0')
config.environment['GTEST_FILTER'] = lit_config.params.get('gtest_filter')
if configSetTimeout == '1':
# Try setting the max individual test time in the configuration

View File

@ -1,50 +0,0 @@
#!/usr/bin/env python
import os
import sys
if len(sys.argv) != 2:
raise ValueError("unexpected number of args")
if sys.argv[1] == "--gtest_list_tests":
print(f"""\
Running main() from {os.getcwd()}/gtest_main.cc
FirstTest.
subTestA
subTestB
subTestC
subTestD
ParameterizedTest/0.
subTest
ParameterizedTest/1.
subTest""")
sys.exit(0)
elif not sys.argv[1].startswith("--gtest_filter="):
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
test_name = sys.argv[1].split('=',1)[1]
print('Running main() from gtest_main.cc')
if test_name == 'FirstTest.subTestA':
print('I am subTest A, I PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
elif test_name == 'FirstTest.subTestB':
print('I am subTest B, I FAIL')
print('And I have two lines of output')
sys.exit(1)
elif test_name == 'FirstTest.subTestC':
print('I am subTest C, I am SKIPPED')
print('[ PASSED ] 0 tests.')
print('[ SKIPPED ] 1 test, listed below:')
print('[ SKIPPED ] FirstTest.subTestC')
sys.exit(0)
elif test_name == 'FirstTest.subTestD':
print('I am subTest D, I am UNRESOLVED')
sys.exit(0)
elif test_name in ('ParameterizedTest/0.subTest',
'ParameterizedTest/1.subTest'):
print('I am a parameterized test, I also PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
else:
raise SystemExit("error: invalid test name: %r" % (test_name,))

View File

@ -0,0 +1,7 @@
# Check the GoogleTest format support command wrappers.
# RUN: %{lit} -v %{inputs}/googletest-cmd-wrapper | FileCheck %s
# CHECK: -- Testing:
# CHECK-NEXT: PASS: googletest-cmd-wrapper :: DummySubDir/OneTest.exe/0/1 (1 of 1)
# CHECK: Passed: 1

View File

@ -0,0 +1,21 @@
# Check GoogleTest shard test crashes are handled.
# RUN: not %{lit} -v %{inputs}/googletest-crash | FileCheck %s
# CHECK: -- Testing:
# CHECK: FAIL: googletest-crash :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0
# CHECK: *** TEST 'googletest-crash :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
# CHECK-NEXT: Script(shard):
# CHECK-NEXT: --
# CHECK-NEXT: GTEST_COLOR=no
# CHECK-NEXT: GTEST_SHUFFLE=0
# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
# CHECK-NEXT: GTEST_SHARD_INDEX=0
# CHECK-NEXT: GTEST_OUTPUT=json:[[JSON:.*\.json]]
# CHECK-NEXT: [[FILE]]
# CHECK-NEXT: --
# CHECK-NEXT: shard JSON output does not exist: [[JSON]]
# CHECK-NEXT: ***
# CHECK: Failed Tests (1):
# CHECK-NEXT: googletest-crash :: [[PATH]][[FILE]]/0/6
# CHECK: Failed{{ *}}: 1

View File

@ -1,6 +1,6 @@
# Check the various features of the GoogleTest format.
# RUN: not %{lit} -v %{inputs}/googletest-format > %t.out
# RUN: not %{lit} -v --order=random %{inputs}/googletest-format > %t.out
# FIXME: Temporarily dump test output so we can debug failing tests on
# buildbots.
# RUN: cat %t.out
@ -9,28 +9,37 @@
# END.
# CHECK: -- Testing:
# CHECK: PASS: googletest-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/FirstTest.subTestA
# CHECK: FAIL: googletest-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestB]]
# CHECK-NEXT: *** TEST 'googletest-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
# CHECK: FAIL: googletest-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0
# CHECK: *** TEST 'googletest-format :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
# CHECK-NEXT: Script(shard):
# CHECK-NEXT: --
# CHECK-NEXT: GTEST_COLOR=no
# CHECK-NEXT: GTEST_SHUFFLE=1
# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
# CHECK-NEXT: GTEST_SHARD_INDEX=0
# CHECK-NEXT: GTEST_OUTPUT=json:{{.*\.json}}
# CHECK-NEXT: GTEST_RANDOM_SEED=123
# CHECK-NEXT: [[FILE]]
# CHECK-NEXT: --
# CHECK-EMPTY:
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-NEXT: [[FILE]] --gtest_filter=FirstTest.subTestB
# CHECK-NEXT: --
# CHECK-NEXT: I am subTest B, I FAIL
# CHECK-NEXT: And I have two lines of output
# CHECK-EMPTY:
# CHECK: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=FirstTest.subTestD
# CHECK-NEXT: --
# CHECK-NEXT: unresolved test result
# CHECK: ***
# CHECK: SKIPPED: googletest-format :: [[PATH]][[FILE]]/FirstTest.subTestC
# CHECK: UNRESOLVED: googletest-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestD]]
# CHECK-NEXT: *** TEST 'googletest-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-NEXT: --
# CHECK-NEXT: Unable to find [ PASSED ] 1 test. in gtest output
# CHECK: I am subTest D, I am UNRESOLVED
# CHECK: PASS: googletest-format :: [[PATH]][[FILE]]/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-format :: [[PATH]][[FILE]]/ParameterizedTest/1.subTest
# CHECK: Failed Tests (1)
# CHECK: Unresolved Tests (1):
# CHECK-NEXT: googletest-format :: [[PATH]][[FILE]]/FirstTest/subTestD
# CHECK: ***
# CHECK-NEXT: Failed Tests (1):
# CHECK-NEXT: googletest-format :: [[PATH]][[FILE]]/FirstTest/subTestB
# CHECK: Skipped{{ *}}: 1
# CHECK: Passed{{ *}}: 3
# CHECK: Unresolved{{ *}}: 1

View File

@ -7,24 +7,30 @@
# Check that the per test timeout is enforced when running GTest tests.
#
# RUN: not %{lit} -v %{inputs}/googletest-timeout \
# RUN: --filter=InfiniteLoopSubTest --timeout=1 > %t.cmd.out
# RUN: --param gtest_filter=InfiniteLoopSubTest --timeout=1 > %t.cmd.out
# RUN: FileCheck --check-prefix=CHECK-INF < %t.cmd.out %s
# Check that the per test timeout is enforced when running GTest tests via
# the configuration file
#
# RUN: not %{lit} -v %{inputs}/googletest-timeout \
# RUN: --filter=InfiniteLoopSubTest --param set_timeout=1 \
# RUN: --param gtest_filter=InfiniteLoopSubTest --param set_timeout=1 \
# RUN: > %t.cfgset.out
# RUN: FileCheck --check-prefix=CHECK-INF < %t.cfgset.out %s
# CHECK-INF: -- Testing:
# CHECK-INF: TIMEOUT: googletest-timeout :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/[[TEST:T\.InfiniteLoopSubTest]]
# CHECK-INF-NEXT: ******************** TEST 'googletest-timeout :: [[PATH]][[FILE]]/[[TEST]]' FAILED ********************
# CHECK-INF-NEXT: Script:
# CHECK-INF: TIMEOUT: googletest-timeout :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0/2
# CHECK-INF-NEXT: ******************** TEST 'googletest-timeout :: [[PATH]][[FILE]]/0/2' FAILED ********************
# CHECK-INF-NEXT: Script(shard):
# CHECK-INF-NEXT: --
# CHECK-INF-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-INF-NEXT: GTEST_COLOR=no
# CHECK-INF-NEXT: GTEST_SHUFFLE=0
# CHECK-INF-NEXT: GTEST_TOTAL_SHARDS=2
# CHECK-INF-NEXT: GTEST_SHARD_INDEX=0
# CHECK-INF-NEXT: GTEST_OUTPUT=json:{{.*\.json}}
# CHECK-INF-NEXT: [[FILE]]
# CHECK-INF-NEXT: --
# CHECK-INF-NEXT: Reached timeout of 1 seconds
# CHECK-INF: Timed Out: 1
###############################################################################
@ -35,15 +41,15 @@
###############################################################################
# RUN: %{lit} -v %{inputs}/googletest-timeout \
# RUN: --filter=QuickSubTest --timeout=3600 > %t.cmd.out
# RUN: --param gtest_filter=QuickSubTest --timeout=3600 > %t.cmd.out
# RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmd.out %s
# CHECK-QUICK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/T.QuickSubTest
# CHECK-QUICK: Passed : 1
# CHECK-QUICK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/0/2 {{.*}}
# CHECK-QUICK: Passed: 1
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
# RUN: %{lit} -v %{inputs}/googletest-timeout --filter=QuickSubTest \
# RUN: %{lit} -v %{inputs}/googletest-timeout --param gtest_filter=QuickSubTest \
# RUN: --param set_timeout=1 --timeout=3600 \
# RUN: > %t.cmdover.out 2> %t.cmdover.err
# RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmdover.out %s

View File

@ -1,35 +0,0 @@
# Check the various features of the GoogleTest format.
# RUN: not %{lit} -v %{inputs}/googletest-upstream-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: PASS: googletest-upstream-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/FirstTest.subTestA
# CHECK: FAIL: googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestB]]
# CHECK-NEXT: *** TEST 'googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-NEXT: --
# CHECK-NEXT: Running main() from gtest_main.cc
# CHECK-NEXT: I am subTest B, I FAIL
# CHECK-NEXT: And I have two lines of output
# CHECK: SKIPPED: googletest-upstream-format :: [[PATH]][[FILE]]/FirstTest.subTestC
# CHECK: UNRESOLVED: googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestD]]
# CHECK-NEXT: *** TEST 'googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-NEXT: --
# CHECK-NEXT: Unable to find [ PASSED ] 1 test. in gtest output
# CHECK: I am subTest D, I am UNRESOLVED
# CHECK: ***
# CHECK: PASS: googletest-upstream-format :: [[PATH]][[FILE]]/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-upstream-format :: [[PATH]][[FILE]]/ParameterizedTest/1.subTest
# CHECK: Failed Tests (1)
# CHECK: Skipped{{ *}}: 1
# CHECK: Passed{{ *}}: 3
# CHECK: Unresolved{{ *}}: 1
# CHECK: Failed{{ *}}: 1

View File

@ -1,5 +1,7 @@
# RUN: %{lit} -j 1 -v %{inputs}/test-data --resultdb-output %t.results.out > %t.out
# RUN: FileCheck < %t.results.out %s
# RUN: %{lit} -j 1 -v %{inputs}/googletest-cmd-wrapper --resultdb-output %t.results-unit.out > %t.out
# RUN: FileCheck < %t.results-unit.out --check-prefix=UNIT %s
# CHECK: {
# CHECK: "__version__"
@ -20,3 +22,23 @@
# CHECK-NEXT: }
# CHECK-NEXT: ]
# CHECK-NEXT: }
# UNIT: {
# UNIT: "__version__"
# UNIT: "elapsed"
# UNIT-NEXT: "tests": [
# UNIT-NEXT: {
# UNIT-NEXT: "artifacts": {
# UNIT-NEXT: "artifact-content-in-request": {
# UNIT-NEXT: "contents": ""
# UNIT-NEXT: }
# UNIT-NEXT: },
# UNIT-NEXT: "duration"
# UNIT-NEXT: "expected": true,
# UNIT-NEXT: "start_time"
# UNIT-NEXT: "status": "PASS",
# UNIT-NEXT: "summary_html": "<p><text-artifact artifact-id=\"artifact-content-in-request\"></p>",
# UNIT-NEXT: "testId": "googletest-cmd-wrapper :: DummySubDir/OneTest.exe/FirstTest/subTestA"
# UNIT-NEXT: }
# UNIT-NEXT: ]
# UNIT-NEXT: }

View File

@ -34,6 +34,7 @@ class TestIntegratedTestKeywordParser(unittest.TestCase):
debug=False,
isWindows=(
platform.system() == 'Windows'),
order='smart',
params={})
TestIntegratedTestKeywordParser.litConfig = lit_config
# Perform test discovery.