forked from OSchip/llvm-project
[lit] Use sharding for GoogleTest format
This helps lit unit test performance by a lot, especially on windows. The performance gain comes from launching one gtest executable for many subtests instead of one (this is the current situation). The shards are executed by the test runner and the results are stored in the json format supported by the GoogleTest. Later in the test reporting stage, all test results in the json file are retrieved to continue the test results summary etc. On my Win10 desktop, before this patch: `check-clang-unit`: 177s, `check-llvm-unit`: 38s; after this patch: `check-clang-unit`: 37s, `check-llvm-unit`: 11s. On my Linux machine, before this patch: `check-clang-unit`: 46s, `check-llvm-unit`: 8s; after this patch: `check-clang-unit`: 7s, `check-llvm-unit`: 4s. Reviewed By: yln, rnk Differential Revision: https://reviews.llvm.org/D122251
This commit is contained in:
parent
f830392be7
commit
a87ba5c86d
llvm
unittests/Support
utils/lit
lit
tests
Inputs
googletest-crash
googletest-format/DummySubDir
googletest-upstream-format/DummySubDir
|
@ -178,6 +178,11 @@ TEST(CrashRecoveryTest, UnixCRCReturnCode) {
|
|||
int Res = setenv("LLVM_CRC_UNIXCRCRETURNCODE", "1", 0);
|
||||
ASSERT_EQ(Res, 0);
|
||||
|
||||
Res = unsetenv("GTEST_SHARD_INDEX");
|
||||
ASSERT_EQ(Res, 0);
|
||||
Res = unsetenv("GTEST_TOTAL_SHARDS");
|
||||
ASSERT_EQ(Res, 0);
|
||||
|
||||
std::string Error;
|
||||
bool ExecutionFailed;
|
||||
int RetCode = ExecuteAndWait(Executable, argv, {}, {}, 0, 0, &Error,
|
||||
|
|
|
@ -95,7 +95,9 @@ protected:
|
|||
};
|
||||
|
||||
while (*EnvP != nullptr) {
|
||||
EnvTable.emplace_back(prepareEnvVar(*EnvP));
|
||||
auto S = prepareEnvVar(*EnvP);
|
||||
if (!StringRef(S).startswith("GTEST_"))
|
||||
EnvTable.emplace_back(S);
|
||||
++EnvP;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -219,11 +219,12 @@ class TestSuite:
|
|||
class Test:
|
||||
"""Test - Information on a single test instance."""
|
||||
|
||||
def __init__(self, suite, path_in_suite, config, file_path = None):
|
||||
def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_file = None):
|
||||
self.suite = suite
|
||||
self.path_in_suite = path_in_suite
|
||||
self.config = config
|
||||
self.file_path = file_path
|
||||
self.gtest_json_file = gtest_json_file
|
||||
|
||||
# A list of conditions under which this test is expected to fail.
|
||||
# Each condition is a boolean expression of features and target
|
||||
|
@ -258,7 +259,7 @@ class Test:
|
|||
# The previous test elapsed time, if applicable.
|
||||
self.previous_elapsed = 0.0
|
||||
|
||||
if '/'.join(path_in_suite) in suite.test_times:
|
||||
if suite.test_times and '/'.join(path_in_suite) in suite.test_times:
|
||||
time = suite.test_times['/'.join(path_in_suite)]
|
||||
self.previous_elapsed = abs(time)
|
||||
self.previous_failure = time < 0
|
||||
|
|
|
@ -28,7 +28,7 @@ class TestingConfig(object):
|
|||
'TMPDIR', 'TMP', 'TEMP', 'TEMPDIR', 'AVRLIT_BOARD',
|
||||
'AVRLIT_PORT', 'FILECHECK_OPTS', 'VCINSTALLDIR',
|
||||
'VCToolsinstallDir', 'VSINSTALLDIR', 'WindowsSdkDir',
|
||||
'WindowsSDKLibVersion', 'SOURCE_DATE_EPOCH']
|
||||
'WindowsSDKLibVersion', 'SOURCE_DATE_EPOCH','GTEST_FILTER']
|
||||
|
||||
if sys.platform == 'win32':
|
||||
pass_vars.append('COMSPEC')
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
from __future__ import absolute_import
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import lit.Test
|
||||
|
@ -25,74 +25,19 @@ class GoogleTest(TestFormat):
|
|||
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
|
||||
self.run_under = run_under
|
||||
|
||||
def getGTestTests(self, path, litConfig, localConfig):
|
||||
"""getGTestTests(path) - [name]
|
||||
|
||||
Return the tests available in gtest executable.
|
||||
|
||||
Args:
|
||||
path: String path to a gtest executable
|
||||
litConfig: LitConfig instance
|
||||
localConfig: TestingConfig instance"""
|
||||
|
||||
list_test_cmd = self.prepareCmd([path, '--gtest_list_tests'])
|
||||
|
||||
try:
|
||||
output = subprocess.check_output(list_test_cmd,
|
||||
env=localConfig.environment)
|
||||
except subprocess.CalledProcessError as exc:
|
||||
litConfig.warning(
|
||||
"unable to discover google-tests in %r: %s. Process output: %s"
|
||||
% (path, sys.exc_info()[1], exc.output))
|
||||
# This doesn't look like a valid gtest file. This can
|
||||
# have a number of causes, none of them good. For
|
||||
# instance, we could have created a broken executable.
|
||||
# Alternatively, someone has cruft in their test
|
||||
# directory. If we don't return a test here, then no
|
||||
# failures will get reported, so return a dummy test name
|
||||
# so that the failure is reported later.
|
||||
yield 'failed_to_discover_tests_from_gtest'
|
||||
return
|
||||
|
||||
upstream_prefix = re.compile('Running main\(\) from .*gtest_main\.cc')
|
||||
nested_tests = []
|
||||
for ln in output.splitlines(False): # Don't keep newlines.
|
||||
ln = lit.util.to_string(ln)
|
||||
|
||||
if upstream_prefix.fullmatch(ln):
|
||||
# Upstream googletest prints this to stdout prior to running
|
||||
# tests. LLVM removed that print statement in r61540, but we
|
||||
# handle it here in case upstream googletest is being used.
|
||||
continue
|
||||
|
||||
# The test name list includes trailing comments beginning with
|
||||
# a '#' on some lines, so skip those. We don't support test names
|
||||
# that use escaping to embed '#' into their name as the names come
|
||||
# from C++ class and method names where such things are hard and
|
||||
# uninteresting to support.
|
||||
ln = ln.split('#', 1)[0].rstrip()
|
||||
if not ln.lstrip():
|
||||
continue
|
||||
|
||||
index = 0
|
||||
while ln[index*2:index*2+2] == ' ':
|
||||
index += 1
|
||||
while len(nested_tests) > index:
|
||||
nested_tests.pop()
|
||||
|
||||
ln = ln[index*2:]
|
||||
if ln.endswith('.'):
|
||||
nested_tests.append(ln)
|
||||
elif any([name.startswith('DISABLED_')
|
||||
for name in nested_tests + [ln]]):
|
||||
# Gtest will internally skip these tests. No need to launch a
|
||||
# child process for it.
|
||||
continue
|
||||
else:
|
||||
yield ''.join(nested_tests) + ln
|
||||
def get_num_tests(self, path, localConfig):
|
||||
cmd = [path, '--gtest_list_tests', '--gtest_filter=-*DISABLED_*']
|
||||
if cmd[0].endswith('.py'):
|
||||
cmd = [sys.executable] + cmd
|
||||
out, _, exitCode = lit.util.executeCommand(cmd, env=localConfig.environment)
|
||||
if exitCode == 0:
|
||||
return sum(map(lambda line: line.startswith(' '), out.splitlines()))
|
||||
return None
|
||||
|
||||
def getTestsInDirectory(self, testSuite, path_in_suite,
|
||||
litConfig, localConfig):
|
||||
init_shard_size = 512 # number of tests in a shard
|
||||
core_count = lit.util.usable_core_count()
|
||||
source_path = testSuite.getSourcePath(path_in_suite)
|
||||
for subdir in self.test_sub_dirs:
|
||||
dir_path = os.path.join(source_path, subdir)
|
||||
|
@ -102,13 +47,40 @@ class GoogleTest(TestFormat):
|
|||
suffixes=self.test_suffixes):
|
||||
# Discover the tests in this executable.
|
||||
execpath = os.path.join(source_path, subdir, fn)
|
||||
testnames = self.getGTestTests(execpath, litConfig, localConfig)
|
||||
for testname in testnames:
|
||||
testPath = path_in_suite + (subdir, fn, testname)
|
||||
yield lit.Test.Test(testSuite, testPath, localConfig,
|
||||
file_path=execpath)
|
||||
num_tests = self.get_num_tests(execpath, localConfig)
|
||||
if num_tests is not None:
|
||||
# Compute the number of shards.
|
||||
shard_size = init_shard_size
|
||||
nshard = int(math.ceil(num_tests/shard_size))
|
||||
while nshard < core_count and shard_size > 1:
|
||||
shard_size = shard_size//2
|
||||
nshard = int(math.ceil(num_tests/shard_size))
|
||||
|
||||
# Create one lit test for each shard.
|
||||
for idx in range(nshard):
|
||||
testPath = path_in_suite + (subdir, fn,
|
||||
str(idx), str(nshard))
|
||||
json_file = '-'.join([execpath, testSuite.config.name,
|
||||
str(os.getpid()), str(idx),
|
||||
str(nshard)]) + '.json'
|
||||
yield lit.Test.Test(testSuite, testPath, localConfig,
|
||||
file_path=execpath,
|
||||
gtest_json_file=json_file)
|
||||
else:
|
||||
# This doesn't look like a valid gtest file. This can
|
||||
# have a number of causes, none of them good. For
|
||||
# instance, we could have created a broken executable.
|
||||
# Alternatively, someone has cruft in their test
|
||||
# directory. If we don't return a test here, then no
|
||||
# failures will get reported, so return a dummy test name
|
||||
# so that the failure is reported later.
|
||||
testPath = path_in_suite + (subdir, fn, 'failed_to_discover_tests_from_gtest')
|
||||
yield lit.Test.Test(testSuite, testPath, localConfig, file_path=execpath)
|
||||
|
||||
def execute(self, test, litConfig):
|
||||
if test.gtest_json_file is None:
|
||||
return lit.Test.FAIL, ''
|
||||
|
||||
testPath,testName = os.path.split(test.getSourcePath())
|
||||
while not os.path.exists(testPath):
|
||||
# Handle GTest parametrized and typed tests, whose name includes
|
||||
|
@ -116,7 +88,12 @@ class GoogleTest(TestFormat):
|
|||
testPath, namePrefix = os.path.split(testPath)
|
||||
testName = namePrefix + '/' + testName
|
||||
|
||||
cmd = [testPath, '--gtest_filter=' + testName]
|
||||
testName,total_shards = os.path.split(testName)
|
||||
testName,shard_idx = os.path.split(testName)
|
||||
shard_env = {'GTEST_COLOR':'no','GTEST_TOTAL_SHARDS':total_shards, 'GTEST_SHARD_INDEX':shard_idx, 'GTEST_OUTPUT':'json:'+test.gtest_json_file}
|
||||
test.config.environment.update(shard_env)
|
||||
|
||||
cmd = [testPath]
|
||||
cmd = self.prepareCmd(cmd)
|
||||
if litConfig.useValgrind:
|
||||
cmd = litConfig.valgrindArgs + cmd
|
||||
|
@ -124,30 +101,43 @@ class GoogleTest(TestFormat):
|
|||
if litConfig.noExecute:
|
||||
return lit.Test.PASS, ''
|
||||
|
||||
header = f"Script:\n--\n{' '.join(cmd)}\n--\n"
|
||||
shard_envs= '\n'.join([k + '=' + v for k, v in shard_env.items()])
|
||||
shard_header = f"Script(shard):\n--\n{shard_envs}\n{' '.join(cmd)}\n--\n"
|
||||
|
||||
try:
|
||||
out, err, exitCode = lit.util.executeCommand(
|
||||
_, _, exitCode = lit.util.executeCommand(
|
||||
cmd, env=test.config.environment,
|
||||
timeout=litConfig.maxIndividualTestTime)
|
||||
except lit.util.ExecuteCommandTimeoutException:
|
||||
return (lit.Test.TIMEOUT,
|
||||
f'{header}Reached timeout of '
|
||||
f'{shard_header}Reached timeout of '
|
||||
f'{litConfig.maxIndividualTestTime} seconds')
|
||||
|
||||
if not os.path.exists(test.gtest_json_file):
|
||||
errmsg = f"shard JSON output does not exist: %s" % (test.gtest_json_file)
|
||||
return lit.Test.FAIL, shard_header + errmsg
|
||||
|
||||
if exitCode:
|
||||
return lit.Test.FAIL, header + out + err
|
||||
|
||||
if '[ SKIPPED ] 1 test,' in out:
|
||||
return lit.Test.SKIPPED, ''
|
||||
|
||||
passing_test_line = '[ PASSED ] 1 test.'
|
||||
if passing_test_line not in out:
|
||||
return (lit.Test.UNRESOLVED,
|
||||
f'{header}Unable to find {passing_test_line} '
|
||||
f'in gtest output:\n\n{out}{err}')
|
||||
|
||||
return lit.Test.PASS,''
|
||||
output = shard_header + '\n'
|
||||
with open(test.gtest_json_file, encoding='utf-8') as f:
|
||||
testsuites = json.load(f)['testsuites']
|
||||
for testcase in testsuites:
|
||||
for testinfo in testcase['testsuite']:
|
||||
if testinfo['result'] == 'SUPPRESSED' or testinfo['result'] == 'SKIPPED':
|
||||
continue
|
||||
testname = testcase['name'] + '.' + testinfo['name']
|
||||
header = f"Script:\n--\n{' '.join(cmd)} --gtest_filter={testname}\n--\n"
|
||||
if 'failures' in testinfo:
|
||||
output += header
|
||||
for fail in testinfo['failures']:
|
||||
output += fail['failure'] + '\n'
|
||||
output += '\n'
|
||||
elif testinfo['result'] != 'COMPLETED':
|
||||
output += header
|
||||
output += 'unresolved test result\n'
|
||||
return lit.Test.FAIL, output
|
||||
else:
|
||||
return lit.Test.PASS, ''
|
||||
|
||||
def prepareCmd(self, cmd):
|
||||
"""Insert interpreter if needed.
|
||||
|
@ -166,3 +156,61 @@ class GoogleTest(TestFormat):
|
|||
else:
|
||||
cmd = shlex.split(self.run_under) + cmd
|
||||
return cmd
|
||||
|
||||
@staticmethod
|
||||
def post_process_shard_results(selected_tests, discovered_tests):
|
||||
def remove_gtest(tests):
|
||||
idxs = []
|
||||
for idx, t in enumerate(tests):
|
||||
if t.gtest_json_file:
|
||||
idxs.append(idx)
|
||||
for i in range(len(idxs)):
|
||||
del tests[idxs[i]-i]
|
||||
|
||||
remove_gtest(discovered_tests)
|
||||
gtests = [t for t in selected_tests if t.gtest_json_file]
|
||||
remove_gtest(selected_tests)
|
||||
for test in gtests:
|
||||
# In case gtest has bugs such that no JSON file was emitted.
|
||||
if not os.path.exists(test.gtest_json_file):
|
||||
selected_tests.append(test)
|
||||
discovered_tests.append(test)
|
||||
continue
|
||||
|
||||
# Load json file to retrieve results.
|
||||
with open(test.gtest_json_file, encoding='utf-8') as f:
|
||||
testsuites = json.load(f)['testsuites']
|
||||
for testcase in testsuites:
|
||||
for testinfo in testcase['testsuite']:
|
||||
# Ignore disabled tests.
|
||||
if testinfo['result'] == 'SUPPRESSED':
|
||||
continue
|
||||
|
||||
testPath = test.path_in_suite[:-2] + (testcase['name'], testinfo['name'])
|
||||
subtest = lit.Test.Test(test.suite, testPath,
|
||||
test.config, test.file_path)
|
||||
|
||||
testname = testcase['name'] + '.' + testinfo['name']
|
||||
header = f"Script:\n--\n{test.file_path} --gtest_filter={testname}\n--\n"
|
||||
|
||||
output = ''
|
||||
if testinfo['result'] == 'SKIPPED':
|
||||
returnCode = lit.Test.SKIPPED
|
||||
elif 'failures' in testinfo:
|
||||
returnCode = lit.Test.FAIL
|
||||
output = header
|
||||
for fail in testinfo['failures']:
|
||||
output += fail['failure'] + '\n'
|
||||
elif testinfo['result'] == 'COMPLETED':
|
||||
returnCode = lit.Test.PASS
|
||||
else:
|
||||
returnCode = lit.Test.UNRESOLVED
|
||||
output = header + 'unresolved test result\n'
|
||||
|
||||
subtest.setResult(lit.Test.Result(returnCode, output, float(testinfo['time'][:-1])))
|
||||
|
||||
selected_tests.append(subtest)
|
||||
discovered_tests.append(subtest)
|
||||
os.remove(test.gtest_json_file)
|
||||
|
||||
return selected_tests, discovered_tests
|
||||
|
|
|
@ -18,6 +18,7 @@ import lit.reports
|
|||
import lit.run
|
||||
import lit.Test
|
||||
import lit.util
|
||||
from lit.formats.googletest import GoogleTest
|
||||
from lit.TestTimes import record_test_times
|
||||
|
||||
|
||||
|
@ -108,6 +109,9 @@ def main(builtin_params={}):
|
|||
|
||||
record_test_times(selected_tests, lit_config)
|
||||
|
||||
selected_tests, discovered_tests = GoogleTest.post_process_shard_results(
|
||||
selected_tests, discovered_tests)
|
||||
|
||||
if opts.time_tests:
|
||||
print_histogram(discovered_tests)
|
||||
|
||||
|
|
|
@ -0,0 +1,45 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
|
||||
if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
|
||||
raise ValueError("unexpected argument: %s" % (sys.argv[2]))
|
||||
print("""\
|
||||
FirstTest.
|
||||
subTestA
|
||||
subTestB
|
||||
subTestC
|
||||
subTestD
|
||||
ParameterizedTest/0.
|
||||
subTest
|
||||
ParameterizedTest/1.
|
||||
subTest""")
|
||||
sys.exit(0)
|
||||
elif len(sys.argv) != 1:
|
||||
# sharding and json output are specified using environment variables
|
||||
raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
|
||||
|
||||
for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
|
||||
if e not in os.environ:
|
||||
raise ValueError("missing environment variables: " + e)
|
||||
|
||||
if not os.environ['GTEST_OUTPUT'].startswith('json:'):
|
||||
raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
|
||||
|
||||
dummy_output = """\
|
||||
{
|
||||
"testsuites": [
|
||||
]
|
||||
}"""
|
||||
|
||||
if os.environ['GTEST_SHARD_INDEX'] == '0':
|
||||
exit_code = 1
|
||||
else:
|
||||
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
|
||||
with open(json_filename, 'w') as f:
|
||||
f.write(dummy_output)
|
||||
exit_code = 0
|
||||
|
||||
sys.exit(exit_code)
|
|
@ -1,3 +1,3 @@
|
|||
import lit.formats
|
||||
config.name = 'googletest-upstream-format'
|
||||
config.name = 'googletest-crash'
|
||||
config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
|
|
@ -1,11 +1,11 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
raise ValueError("unexpected number of args")
|
||||
|
||||
if sys.argv[1] == "--gtest_list_tests":
|
||||
if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
|
||||
if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
|
||||
raise ValueError("unexpected argument: %s" % (sys.argv[2]))
|
||||
print("""\
|
||||
FirstTest.
|
||||
subTestA
|
||||
|
@ -17,31 +17,87 @@ ParameterizedTest/0.
|
|||
ParameterizedTest/1.
|
||||
subTest""")
|
||||
sys.exit(0)
|
||||
elif not sys.argv[1].startswith("--gtest_filter="):
|
||||
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
|
||||
elif len(sys.argv) != 1:
|
||||
# sharding and json output are specified using environment variables
|
||||
raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
|
||||
|
||||
test_name = sys.argv[1].split('=',1)[1]
|
||||
if test_name == 'FirstTest.subTestA':
|
||||
print('I am subTest A, I PASS')
|
||||
print('[ PASSED ] 1 test.')
|
||||
sys.exit(0)
|
||||
elif test_name == 'FirstTest.subTestB':
|
||||
print('I am subTest B, I FAIL')
|
||||
print('And I have two lines of output')
|
||||
sys.exit(1)
|
||||
elif test_name == 'FirstTest.subTestC':
|
||||
print('I am subTest C, I am SKIPPED')
|
||||
print('[ PASSED ] 0 tests.')
|
||||
print('[ SKIPPED ] 1 test, listed below:')
|
||||
print('[ SKIPPED ] FirstTest.subTestC')
|
||||
sys.exit(0)
|
||||
elif test_name == 'FirstTest.subTestD':
|
||||
print('I am subTest D, I am UNRESOLVED')
|
||||
sys.exit(0)
|
||||
elif test_name in ('ParameterizedTest/0.subTest',
|
||||
'ParameterizedTest/1.subTest'):
|
||||
print('I am a parameterized test, I also PASS')
|
||||
print('[ PASSED ] 1 test.')
|
||||
sys.exit(0)
|
||||
else:
|
||||
raise SystemExit("error: invalid test name: %r" % (test_name,))
|
||||
for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
|
||||
if e not in os.environ:
|
||||
raise ValueError("missing environment variables: " + e)
|
||||
|
||||
if not os.environ['GTEST_OUTPUT'].startswith('json:'):
|
||||
raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
|
||||
|
||||
output = """\
|
||||
{
|
||||
"testsuites": [
|
||||
{
|
||||
"name": "FirstTest",
|
||||
"testsuite": [
|
||||
{
|
||||
"name": "subTestA",
|
||||
"result": "COMPLETED",
|
||||
"time": "0.001s"
|
||||
},
|
||||
{
|
||||
"name": "subTestB",
|
||||
"result": "COMPLETED",
|
||||
"time": "0.001s",
|
||||
"failures": [
|
||||
{
|
||||
"failure": "I am subTest B, I FAIL\\nAnd I have two lines of output",
|
||||
"type": ""
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "subTestC",
|
||||
"result": "SKIPPED",
|
||||
"time": "0.001s"
|
||||
},
|
||||
{
|
||||
"name": "subTestD",
|
||||
"result": "UNRESOLVED",
|
||||
"time": "0.001s"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "ParameterizedTest/0",
|
||||
"testsuite": [
|
||||
{
|
||||
"name": "subTest",
|
||||
"result": "COMPLETED",
|
||||
"time": "0.001s"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "ParameterizedTest/1",
|
||||
"testsuite": [
|
||||
{
|
||||
"name": "subTest",
|
||||
"result": "COMPLETED",
|
||||
"time": "0.001s"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}"""
|
||||
|
||||
dummy_output = """\
|
||||
{
|
||||
"testsuites": [
|
||||
]
|
||||
}"""
|
||||
|
||||
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
|
||||
with open(json_filename, 'w') as f:
|
||||
if os.environ['GTEST_SHARD_INDEX'] == '0':
|
||||
f.write(output)
|
||||
exit_code = 1
|
||||
else:
|
||||
f.write(dummy_output)
|
||||
exit_code = 0
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
if len(sys.argv) != 2:
|
||||
raise ValueError("unexpected number of args")
|
||||
|
||||
if sys.argv[1] == "--gtest_list_tests":
|
||||
print(f"""\
|
||||
Running main() from {os.getcwd()}/gtest_main.cc
|
||||
FirstTest.
|
||||
subTestA
|
||||
subTestB
|
||||
subTestC
|
||||
subTestD
|
||||
ParameterizedTest/0.
|
||||
subTest
|
||||
ParameterizedTest/1.
|
||||
subTest""")
|
||||
sys.exit(0)
|
||||
elif not sys.argv[1].startswith("--gtest_filter="):
|
||||
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
|
||||
|
||||
test_name = sys.argv[1].split('=',1)[1]
|
||||
print('Running main() from gtest_main.cc')
|
||||
if test_name == 'FirstTest.subTestA':
|
||||
print('I am subTest A, I PASS')
|
||||
print('[ PASSED ] 1 test.')
|
||||
sys.exit(0)
|
||||
elif test_name == 'FirstTest.subTestB':
|
||||
print('I am subTest B, I FAIL')
|
||||
print('And I have two lines of output')
|
||||
sys.exit(1)
|
||||
elif test_name == 'FirstTest.subTestC':
|
||||
print('I am subTest C, I am SKIPPED')
|
||||
print('[ PASSED ] 0 tests.')
|
||||
print('[ SKIPPED ] 1 test, listed below:')
|
||||
print('[ SKIPPED ] FirstTest.subTestC')
|
||||
sys.exit(0)
|
||||
elif test_name == 'FirstTest.subTestD':
|
||||
print('I am subTest D, I am UNRESOLVED')
|
||||
sys.exit(0)
|
||||
elif test_name in ('ParameterizedTest/0.subTest',
|
||||
'ParameterizedTest/1.subTest'):
|
||||
print('I am a parameterized test, I also PASS')
|
||||
print('[ PASSED ] 1 test.')
|
||||
sys.exit(0)
|
||||
else:
|
||||
raise SystemExit("error: invalid test name: %r" % (test_name,))
|
|
@ -0,0 +1,20 @@
|
|||
# Check GoogleTest shard test crashes are handled.
|
||||
|
||||
# RUN: not %{lit} -v %{inputs}/googletest-crash | FileCheck %s
|
||||
|
||||
# CHECK: -- Testing:
|
||||
# CHECK: FAIL: googletest-crash :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0
|
||||
# CHECK: *** TEST 'googletest-crash :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
|
||||
# CHECK-NEXT: Script(shard):
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: GTEST_COLOR=no
|
||||
# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
|
||||
# CHECK-NEXT: GTEST_SHARD_INDEX=0
|
||||
# CHECK-NEXT: GTEST_OUTPUT=json:[[JSON:.*\.json]]
|
||||
# CHECK-NEXT: [[FILE]]
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: shard JSON output does not exist: [[JSON]]
|
||||
# CHECK-NEXT: ***
|
||||
# CHECK: Failed Tests (1):
|
||||
# CHECK-NEXT: googletest-crash :: [[PATH]][[FILE]]/0/6
|
||||
# CHECK: Failed{{ *}}: 1
|
|
@ -9,28 +9,35 @@
|
|||
# END.
|
||||
|
||||
# CHECK: -- Testing:
|
||||
# CHECK: PASS: googletest-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/FirstTest.subTestA
|
||||
# CHECK: FAIL: googletest-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestB]]
|
||||
# CHECK-NEXT: *** TEST 'googletest-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
|
||||
# CHECK: FAIL: googletest-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0
|
||||
# CHECK: *** TEST 'googletest-format :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
|
||||
# CHECK-NEXT: Script(shard):
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: GTEST_COLOR=no
|
||||
# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
|
||||
# CHECK-NEXT: GTEST_SHARD_INDEX=0
|
||||
# CHECK-NEXT: GTEST_OUTPUT=json:{{.*\.json}}
|
||||
# CHECK-NEXT: [[FILE]]
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-EMPTY:
|
||||
# CHECK-NEXT: Script:
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
|
||||
# CHECK-NEXT: [[FILE]] --gtest_filter=FirstTest.subTestB
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: I am subTest B, I FAIL
|
||||
# CHECK-NEXT: And I have two lines of output
|
||||
# CHECK-EMPTY:
|
||||
# CHECK: Script:
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: [[FILE]] --gtest_filter=FirstTest.subTestD
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: unresolved test result
|
||||
# CHECK: ***
|
||||
# CHECK: SKIPPED: googletest-format :: [[PATH]][[FILE]]/FirstTest.subTestC
|
||||
# CHECK: UNRESOLVED: googletest-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestD]]
|
||||
# CHECK-NEXT: *** TEST 'googletest-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
|
||||
# CHECK-NEXT: Script:
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: Unable to find [ PASSED ] 1 test. in gtest output
|
||||
# CHECK: I am subTest D, I am UNRESOLVED
|
||||
# CHECK: PASS: googletest-format :: [[PATH]][[FILE]]/ParameterizedTest/0.subTest
|
||||
# CHECK: PASS: googletest-format :: [[PATH]][[FILE]]/ParameterizedTest/1.subTest
|
||||
# CHECK: Failed Tests (1)
|
||||
# CHECK: Unresolved Tests (1):
|
||||
# CHECK-NEXT: googletest-format :: [[PATH]][[FILE]]/FirstTest/subTestD
|
||||
# CHECK: ***
|
||||
# CHECK-NEXT: Failed Tests (1):
|
||||
# CHECK-NEXT: googletest-format :: [[PATH]][[FILE]]/FirstTest/subTestB
|
||||
# CHECK: Skipped{{ *}}: 1
|
||||
# CHECK: Passed{{ *}}: 3
|
||||
# CHECK: Unresolved{{ *}}: 1
|
||||
|
|
|
@ -1,35 +0,0 @@
|
|||
# Check the various features of the GoogleTest format.
|
||||
|
||||
# RUN: not %{lit} -v %{inputs}/googletest-upstream-format > %t.out
|
||||
# RUN: FileCheck < %t.out %s
|
||||
#
|
||||
# END.
|
||||
|
||||
# CHECK: -- Testing:
|
||||
# CHECK: PASS: googletest-upstream-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/FirstTest.subTestA
|
||||
# CHECK: FAIL: googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestB]]
|
||||
# CHECK-NEXT: *** TEST 'googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
|
||||
# CHECK-NEXT: Script:
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: Running main() from gtest_main.cc
|
||||
# CHECK-NEXT: I am subTest B, I FAIL
|
||||
# CHECK-NEXT: And I have two lines of output
|
||||
# CHECK: SKIPPED: googletest-upstream-format :: [[PATH]][[FILE]]/FirstTest.subTestC
|
||||
# CHECK: UNRESOLVED: googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestD]]
|
||||
# CHECK-NEXT: *** TEST 'googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
|
||||
# CHECK-NEXT: Script:
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
|
||||
# CHECK-NEXT: --
|
||||
# CHECK-NEXT: Unable to find [ PASSED ] 1 test. in gtest output
|
||||
# CHECK: I am subTest D, I am UNRESOLVED
|
||||
# CHECK: ***
|
||||
# CHECK: PASS: googletest-upstream-format :: [[PATH]][[FILE]]/ParameterizedTest/0.subTest
|
||||
# CHECK: PASS: googletest-upstream-format :: [[PATH]][[FILE]]/ParameterizedTest/1.subTest
|
||||
# CHECK: Failed Tests (1)
|
||||
# CHECK: Skipped{{ *}}: 1
|
||||
# CHECK: Passed{{ *}}: 3
|
||||
# CHECK: Unresolved{{ *}}: 1
|
||||
# CHECK: Failed{{ *}}: 1
|
Loading…
Reference in New Issue