Revert "Reland "[lit] Use sharding for GoogleTest format""

This reverts commit 948f3deca9.
This commit is contained in:
Alex Brachet 2022-04-04 16:33:33 +00:00
parent aebd3f0799
commit 47f59df892
16 changed files with 252 additions and 398 deletions

View File

@ -178,11 +178,6 @@ TEST(CrashRecoveryTest, UnixCRCReturnCode) {
int Res = setenv("LLVM_CRC_UNIXCRCRETURNCODE", "1", 0);
ASSERT_EQ(Res, 0);
Res = unsetenv("GTEST_SHARD_INDEX");
ASSERT_EQ(Res, 0);
Res = unsetenv("GTEST_TOTAL_SHARDS");
ASSERT_EQ(Res, 0);
std::string Error;
bool ExecutionFailed;
int RetCode = ExecuteAndWait(Executable, argv, {}, {}, 0, 0, &Error,

View File

@ -95,9 +95,7 @@ protected:
};
while (*EnvP != nullptr) {
auto S = prepareEnvVar(*EnvP);
if (!StringRef(S).startswith("GTEST_"))
EnvTable.emplace_back(S);
EnvTable.emplace_back(prepareEnvVar(*EnvP));
++EnvP;
}
}

View File

@ -219,12 +219,11 @@ class TestSuite:
class Test:
"""Test - Information on a single test instance."""
def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_file = None):
def __init__(self, suite, path_in_suite, config, file_path = None):
self.suite = suite
self.path_in_suite = path_in_suite
self.config = config
self.file_path = file_path
self.gtest_json_file = gtest_json_file
# A list of conditions under which this test is expected to fail.
# Each condition is a boolean expression of features and target
@ -259,7 +258,7 @@ class Test:
# The previous test elapsed time, if applicable.
self.previous_elapsed = 0.0
if suite.test_times and '/'.join(path_in_suite) in suite.test_times:
if '/'.join(path_in_suite) in suite.test_times:
time = suite.test_times['/'.join(path_in_suite)]
self.previous_elapsed = abs(time)
self.previous_failure = time < 0

View File

@ -28,7 +28,7 @@ class TestingConfig(object):
'TMPDIR', 'TMP', 'TEMP', 'TEMPDIR', 'AVRLIT_BOARD',
'AVRLIT_PORT', 'FILECHECK_OPTS', 'VCINSTALLDIR',
'VCToolsinstallDir', 'VSINSTALLDIR', 'WindowsSdkDir',
'WindowsSDKLibVersion', 'SOURCE_DATE_EPOCH','GTEST_FILTER']
'WindowsSDKLibVersion', 'SOURCE_DATE_EPOCH']
if sys.platform == 'win32':
pass_vars.append('COMSPEC')

View File

@ -1,8 +1,8 @@
from __future__ import absolute_import
import json
import math
import os
import re
import shlex
import subprocess
import sys
import lit.Test
@ -25,19 +25,74 @@ class GoogleTest(TestFormat):
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
self.run_under = run_under
def get_num_tests(self, path, localConfig):
cmd = [path, '--gtest_list_tests', '--gtest_filter=-*DISABLED_*']
if cmd[0].endswith('.py'):
cmd = [sys.executable] + cmd
out, _, exitCode = lit.util.executeCommand(cmd, env=localConfig.environment)
if exitCode == 0:
return sum(map(lambda line: line.startswith(' '), out.splitlines()))
return None
def getGTestTests(self, path, litConfig, localConfig):
"""getGTestTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
list_test_cmd = self.prepareCmd([path, '--gtest_list_tests'])
try:
output = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-tests in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output))
# This doesn't look like a valid gtest file. This can
# have a number of causes, none of them good. For
# instance, we could have created a broken executable.
# Alternatively, someone has cruft in their test
# directory. If we don't return a test here, then no
# failures will get reported, so return a dummy test name
# so that the failure is reported later.
yield 'failed_to_discover_tests_from_gtest'
return
upstream_prefix = re.compile('Running main\(\) from .*gtest_main\.cc')
nested_tests = []
for ln in output.splitlines(False): # Don't keep newlines.
ln = lit.util.to_string(ln)
if upstream_prefix.fullmatch(ln):
# Upstream googletest prints this to stdout prior to running
# tests. LLVM removed that print statement in r61540, but we
# handle it here in case upstream googletest is being used.
continue
# The test name list includes trailing comments beginning with
# a '#' on some lines, so skip those. We don't support test names
# that use escaping to embed '#' into their name as the names come
# from C++ class and method names where such things are hard and
# uninteresting to support.
ln = ln.split('#', 1)[0].rstrip()
if not ln.lstrip():
continue
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
init_shard_size = 512 # number of tests in a shard
core_count = lit.util.usable_core_count()
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
@ -47,40 +102,13 @@ class GoogleTest(TestFormat):
suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
num_tests = self.get_num_tests(execpath, localConfig)
if num_tests is not None:
# Compute the number of shards.
shard_size = init_shard_size
nshard = int(math.ceil(num_tests/shard_size))
while nshard < core_count and shard_size > 1:
shard_size = shard_size//2
nshard = int(math.ceil(num_tests/shard_size))
# Create one lit test for each shard.
for idx in range(nshard):
testPath = path_in_suite + (subdir, fn,
str(idx), str(nshard))
json_file = '-'.join([execpath, testSuite.config.name,
str(os.getpid()), str(idx),
str(nshard)]) + '.json'
yield lit.Test.Test(testSuite, testPath, localConfig,
file_path=execpath,
gtest_json_file=json_file)
else:
# This doesn't look like a valid gtest file. This can
# have a number of causes, none of them good. For
# instance, we could have created a broken executable.
# Alternatively, someone has cruft in their test
# directory. If we don't return a test here, then no
# failures will get reported, so return a dummy test name
# so that the failure is reported later.
testPath = path_in_suite + (subdir, fn, 'failed_to_discover_tests_from_gtest')
yield lit.Test.Test(testSuite, testPath, localConfig, file_path=execpath)
testnames = self.getGTestTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(testSuite, testPath, localConfig,
file_path=execpath)
def execute(self, test, litConfig):
if test.gtest_json_file is None:
return lit.Test.FAIL, ''
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
@ -88,12 +116,7 @@ class GoogleTest(TestFormat):
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
testName,total_shards = os.path.split(testName)
testName,shard_idx = os.path.split(testName)
shard_env = {'GTEST_COLOR':'no','GTEST_TOTAL_SHARDS':total_shards, 'GTEST_SHARD_INDEX':shard_idx, 'GTEST_OUTPUT':'json:'+test.gtest_json_file}
test.config.environment.update(shard_env)
cmd = [testPath]
cmd = [testPath, '--gtest_filter=' + testName]
cmd = self.prepareCmd(cmd)
if litConfig.useValgrind:
cmd = litConfig.valgrindArgs + cmd
@ -101,43 +124,30 @@ class GoogleTest(TestFormat):
if litConfig.noExecute:
return lit.Test.PASS, ''
shard_envs= '\n'.join([k + '=' + v for k, v in shard_env.items()])
shard_header = f"Script(shard):\n--\n{shard_envs}\n{' '.join(cmd)}\n--\n"
header = f"Script:\n--\n{' '.join(cmd)}\n--\n"
try:
_, _, exitCode = lit.util.executeCommand(
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
f'{shard_header}Reached timeout of '
f'{header}Reached timeout of '
f'{litConfig.maxIndividualTestTime} seconds')
if not os.path.exists(test.gtest_json_file):
errmsg = f"shard JSON output does not exist: %s" % (test.gtest_json_file)
return lit.Test.FAIL, shard_header + errmsg
if exitCode:
output = shard_header + '\n'
with open(test.gtest_json_file, encoding='utf-8') as f:
testsuites = json.load(f)['testsuites']
for testcase in testsuites:
for testinfo in testcase['testsuite']:
if testinfo['result'] == 'SUPPRESSED' or testinfo['result'] == 'SKIPPED':
continue
testname = testcase['name'] + '.' + testinfo['name']
header = f"Script:\n--\n{' '.join(cmd)} --gtest_filter={testname}\n--\n"
if 'failures' in testinfo:
output += header
for fail in testinfo['failures']:
output += fail['failure'] + '\n'
output += '\n'
elif testinfo['result'] != 'COMPLETED':
output += header
output += 'unresolved test result\n'
return lit.Test.FAIL, output
else:
return lit.Test.PASS, ''
return lit.Test.FAIL, header + out + err
if '[ SKIPPED ] 1 test,' in out:
return lit.Test.SKIPPED, ''
passing_test_line = '[ PASSED ] 1 test.'
if passing_test_line not in out:
return (lit.Test.UNRESOLVED,
f'{header}Unable to find {passing_test_line} '
f'in gtest output:\n\n{out}{err}')
return lit.Test.PASS,''
def prepareCmd(self, cmd):
"""Insert interpreter if needed.
@ -156,61 +166,3 @@ class GoogleTest(TestFormat):
else:
cmd = shlex.split(self.run_under) + cmd
return cmd
@staticmethod
def post_process_shard_results(selected_tests, discovered_tests):
def remove_gtest(tests):
idxs = []
for idx, t in enumerate(tests):
if t.gtest_json_file:
idxs.append(idx)
for i in range(len(idxs)):
del tests[idxs[i]-i]
remove_gtest(discovered_tests)
gtests = [t for t in selected_tests if t.gtest_json_file]
remove_gtest(selected_tests)
for test in gtests:
# In case gtest has bugs such that no JSON file was emitted.
if not os.path.exists(test.gtest_json_file):
selected_tests.append(test)
discovered_tests.append(test)
continue
# Load json file to retrieve results.
with open(test.gtest_json_file, encoding='utf-8') as f:
testsuites = json.load(f)['testsuites']
for testcase in testsuites:
for testinfo in testcase['testsuite']:
# Ignore disabled tests.
if testinfo['result'] == 'SUPPRESSED':
continue
testPath = test.path_in_suite[:-2] + (testcase['name'], testinfo['name'])
subtest = lit.Test.Test(test.suite, testPath,
test.config, test.file_path)
testname = testcase['name'] + '.' + testinfo['name']
header = f"Script:\n--\n{test.file_path} --gtest_filter={testname}\n--\n"
output = ''
if testinfo['result'] == 'SKIPPED':
returnCode = lit.Test.SKIPPED
elif 'failures' in testinfo:
returnCode = lit.Test.FAIL
output = header
for fail in testinfo['failures']:
output += fail['failure'] + '\n'
elif testinfo['result'] == 'COMPLETED':
returnCode = lit.Test.PASS
else:
returnCode = lit.Test.UNRESOLVED
output = header + 'unresolved test result\n'
subtest.setResult(lit.Test.Result(returnCode, output, float(testinfo['time'][:-1])))
selected_tests.append(subtest)
discovered_tests.append(subtest)
os.remove(test.gtest_json_file)
return selected_tests, discovered_tests

View File

@ -18,7 +18,6 @@ import lit.reports
import lit.run
import lit.Test
import lit.util
from lit.formats.googletest import GoogleTest
from lit.TestTimes import record_test_times
@ -109,9 +108,6 @@ def main(builtin_params={}):
record_test_times(selected_tests, lit_config)
selected_tests, discovered_tests = GoogleTest.post_process_shard_results(
selected_tests, discovered_tests)
if opts.time_tests:
print_histogram(discovered_tests)

View File

@ -1,45 +0,0 @@
#!/usr/bin/env python
import os
import sys
if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
raise ValueError("unexpected argument: %s" % (sys.argv[2]))
print("""\
FirstTest.
subTestA
subTestB
subTestC
subTestD
ParameterizedTest/0.
subTest
ParameterizedTest/1.
subTest""")
sys.exit(0)
elif len(sys.argv) != 1:
# sharding and json output are specified using environment variables
raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
if e not in os.environ:
raise ValueError("missing environment variables: " + e)
if not os.environ['GTEST_OUTPUT'].startswith('json:'):
raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
dummy_output = """\
{
"testsuites": [
]
}"""
if os.environ['GTEST_SHARD_INDEX'] == '0':
exit_code = 1
else:
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
with open(json_filename, 'w') as f:
f.write(dummy_output)
exit_code = 0
sys.exit(exit_code)

View File

@ -1,11 +1,11 @@
#!/usr/bin/env python
import os
import sys
if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
raise ValueError("unexpected argument: %s" % (sys.argv[2]))
if len(sys.argv) != 2:
raise ValueError("unexpected number of args")
if sys.argv[1] == "--gtest_list_tests":
print("""\
FirstTest.
subTestA
@ -17,87 +17,31 @@ ParameterizedTest/0.
ParameterizedTest/1.
subTest""")
sys.exit(0)
elif len(sys.argv) != 1:
# sharding and json output are specified using environment variables
raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
elif not sys.argv[1].startswith("--gtest_filter="):
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT']:
if e not in os.environ:
raise ValueError("missing environment variables: " + e)
if not os.environ['GTEST_OUTPUT'].startswith('json:'):
raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
output = """\
{
"testsuites": [
{
"name": "FirstTest",
"testsuite": [
{
"name": "subTestA",
"result": "COMPLETED",
"time": "0.001s"
},
{
"name": "subTestB",
"result": "COMPLETED",
"time": "0.001s",
"failures": [
{
"failure": "I am subTest B, I FAIL\\nAnd I have two lines of output",
"type": ""
}
]
},
{
"name": "subTestC",
"result": "SKIPPED",
"time": "0.001s"
},
{
"name": "subTestD",
"result": "UNRESOLVED",
"time": "0.001s"
}
]
},
{
"name": "ParameterizedTest/0",
"testsuite": [
{
"name": "subTest",
"result": "COMPLETED",
"time": "0.001s"
}
]
},
{
"name": "ParameterizedTest/1",
"testsuite": [
{
"name": "subTest",
"result": "COMPLETED",
"time": "0.001s"
}
]
}
]
}"""
dummy_output = """\
{
"testsuites": [
]
}"""
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
with open(json_filename, 'w') as f:
if os.environ['GTEST_SHARD_INDEX'] == '0':
f.write(output)
exit_code = 1
else:
f.write(dummy_output)
exit_code = 0
sys.exit(exit_code)
test_name = sys.argv[1].split('=',1)[1]
if test_name == 'FirstTest.subTestA':
print('I am subTest A, I PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
elif test_name == 'FirstTest.subTestB':
print('I am subTest B, I FAIL')
print('And I have two lines of output')
sys.exit(1)
elif test_name == 'FirstTest.subTestC':
print('I am subTest C, I am SKIPPED')
print('[ PASSED ] 0 tests.')
print('[ SKIPPED ] 1 test, listed below:')
print('[ SKIPPED ] FirstTest.subTestC')
sys.exit(0)
elif test_name == 'FirstTest.subTestD':
print('I am subTest D, I am UNRESOLVED')
sys.exit(0)
elif test_name in ('ParameterizedTest/0.subTest',
'ParameterizedTest/1.subTest'):
print('I am a parameterized test, I also PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
else:
raise SystemExit("error: invalid test name: %r" % (test_name,))

View File

@ -1,66 +1,29 @@
#!/usr/bin/env python
import os
import sys
import time
if len(sys.argv) == 3 and sys.argv[1] == "--gtest_list_tests":
if sys.argv[2] != '--gtest_filter=-*DISABLED_*':
raise ValueError("unexpected argument: %s" % (sys.argv[2]))
if len(sys.argv) != 2:
raise ValueError("unexpected number of args")
if sys.argv[1] == "--gtest_list_tests":
print("""\
T.
QuickSubTest
InfiniteLoopSubTest
""")
sys.exit(0)
elif len(sys.argv) != 1:
# sharding and json output are specified using environment variables
raise ValueError("unexpected argument: %r" % (' '.join(sys.argv[1:])))
elif not sys.argv[1].startswith("--gtest_filter="):
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
for e in ['GTEST_TOTAL_SHARDS', 'GTEST_SHARD_INDEX', 'GTEST_OUTPUT', 'GTEST_FILTER']:
if e not in os.environ:
raise ValueError("missing environment variables: " + e)
if not os.environ['GTEST_OUTPUT'].startswith('json:'):
raise ValueError("must emit json output: " + os.environ['GTEST_OUTPUT'])
output = """\
{
"testsuites": [
{
"name": "T",
"testsuite": [
{
"name": "QuickSubTest",
"result": "COMPLETED",
"time": "2s"
}
]
}
]
}"""
dummy_output = """\
{
"testsuites": [
]
}"""
json_filename = os.environ['GTEST_OUTPUT'].split(':', 1)[1]
if os.environ['GTEST_SHARD_INDEX'] == '0':
test_name = os.environ['GTEST_FILTER']
if test_name == 'QuickSubTest':
with open(json_filename, 'w') as f:
f.write(output)
exit_code = 0
elif test_name == 'InfiniteLoopSubTest':
while True:
pass
else:
raise SystemExit("error: invalid test name: %r" % (test_name,))
test_name = sys.argv[1].split('=',1)[1]
if test_name == 'T.QuickSubTest':
print('I am QuickSubTest, I PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
elif test_name == 'T.InfiniteLoopSubTest':
print('I am InfiniteLoopSubTest, I will hang')
while True:
pass
else:
with open(json_filename, 'w') as f:
f.write(dummy_output)
exit_code = 0
sys.exit(exit_code)
raise SystemExit("error: invalid test name: %r" % (test_name,))

View File

@ -3,7 +3,6 @@ config.name = 'googletest-timeout'
config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')
configSetTimeout = lit_config.params.get('set_timeout', '0')
config.environment['GTEST_FILTER'] = lit_config.params.get('gtest_filter')
if configSetTimeout == '1':
# Try setting the max individual test time in the configuration

View File

@ -0,0 +1,50 @@
#!/usr/bin/env python
import os
import sys
if len(sys.argv) != 2:
raise ValueError("unexpected number of args")
if sys.argv[1] == "--gtest_list_tests":
print(f"""\
Running main() from {os.getcwd()}/gtest_main.cc
FirstTest.
subTestA
subTestB
subTestC
subTestD
ParameterizedTest/0.
subTest
ParameterizedTest/1.
subTest""")
sys.exit(0)
elif not sys.argv[1].startswith("--gtest_filter="):
raise ValueError("unexpected argument: %r" % (sys.argv[1]))
test_name = sys.argv[1].split('=',1)[1]
print('Running main() from gtest_main.cc')
if test_name == 'FirstTest.subTestA':
print('I am subTest A, I PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
elif test_name == 'FirstTest.subTestB':
print('I am subTest B, I FAIL')
print('And I have two lines of output')
sys.exit(1)
elif test_name == 'FirstTest.subTestC':
print('I am subTest C, I am SKIPPED')
print('[ PASSED ] 0 tests.')
print('[ SKIPPED ] 1 test, listed below:')
print('[ SKIPPED ] FirstTest.subTestC')
sys.exit(0)
elif test_name == 'FirstTest.subTestD':
print('I am subTest D, I am UNRESOLVED')
sys.exit(0)
elif test_name in ('ParameterizedTest/0.subTest',
'ParameterizedTest/1.subTest'):
print('I am a parameterized test, I also PASS')
print('[ PASSED ] 1 test.')
sys.exit(0)
else:
raise SystemExit("error: invalid test name: %r" % (test_name,))

View File

@ -1,3 +1,3 @@
import lit.formats
config.name = 'googletest-crash'
config.name = 'googletest-upstream-format'
config.test_format = lit.formats.GoogleTest('DummySubDir', 'Test')

View File

@ -1,20 +0,0 @@
# Check GoogleTest shard test crashes are handled.
# RUN: not %{lit} -v %{inputs}/googletest-crash | FileCheck %s
# CHECK: -- Testing:
# CHECK: FAIL: googletest-crash :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0
# CHECK: *** TEST 'googletest-crash :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
# CHECK-NEXT: Script(shard):
# CHECK-NEXT: --
# CHECK-NEXT: GTEST_COLOR=no
# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
# CHECK-NEXT: GTEST_SHARD_INDEX=0
# CHECK-NEXT: GTEST_OUTPUT=json:[[JSON:.*\.json]]
# CHECK-NEXT: [[FILE]]
# CHECK-NEXT: --
# CHECK-NEXT: shard JSON output does not exist: [[JSON]]
# CHECK-NEXT: ***
# CHECK: Failed Tests (1):
# CHECK-NEXT: googletest-crash :: [[PATH]][[FILE]]/0/6
# CHECK: Failed{{ *}}: 1

View File

@ -9,35 +9,28 @@
# END.
# CHECK: -- Testing:
# CHECK: FAIL: googletest-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0
# CHECK: *** TEST 'googletest-format :: [[PATH]][[FILE]]/0{{.*}} FAILED ***
# CHECK-NEXT: Script(shard):
# CHECK-NEXT: --
# CHECK-NEXT: GTEST_COLOR=no
# CHECK-NEXT: GTEST_TOTAL_SHARDS=6
# CHECK-NEXT: GTEST_SHARD_INDEX=0
# CHECK-NEXT: GTEST_OUTPUT=json:{{.*\.json}}
# CHECK-NEXT: [[FILE]]
# CHECK-NEXT: --
# CHECK-EMPTY:
# CHECK: PASS: googletest-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/FirstTest.subTestA
# CHECK: FAIL: googletest-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestB]]
# CHECK-NEXT: *** TEST 'googletest-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=FirstTest.subTestB
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-NEXT: --
# CHECK-NEXT: I am subTest B, I FAIL
# CHECK-NEXT: And I have two lines of output
# CHECK-EMPTY:
# CHECK: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=FirstTest.subTestD
# CHECK-NEXT: --
# CHECK-NEXT: unresolved test result
# CHECK: ***
# CHECK: Unresolved Tests (1):
# CHECK-NEXT: googletest-format :: [[PATH]][[FILE]]/FirstTest/subTestD
# CHECK: ***
# CHECK-NEXT: Failed Tests (1):
# CHECK-NEXT: googletest-format :: [[PATH]][[FILE]]/FirstTest/subTestB
# CHECK: SKIPPED: googletest-format :: [[PATH]][[FILE]]/FirstTest.subTestC
# CHECK: UNRESOLVED: googletest-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestD]]
# CHECK-NEXT: *** TEST 'googletest-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-NEXT: --
# CHECK-NEXT: Unable to find [ PASSED ] 1 test. in gtest output
# CHECK: I am subTest D, I am UNRESOLVED
# CHECK: PASS: googletest-format :: [[PATH]][[FILE]]/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-format :: [[PATH]][[FILE]]/ParameterizedTest/1.subTest
# CHECK: Failed Tests (1)
# CHECK: Skipped{{ *}}: 1
# CHECK: Passed{{ *}}: 3
# CHECK: Unresolved{{ *}}: 1

View File

@ -7,29 +7,24 @@
# Check that the per test timeout is enforced when running GTest tests.
#
# RUN: not %{lit} -v %{inputs}/googletest-timeout \
# RUN: --param gtest_filter=InfiniteLoopSubTest --timeout=1 > %t.cmd.out
# RUN: --filter=InfiniteLoopSubTest --timeout=1 > %t.cmd.out
# RUN: FileCheck --check-prefix=CHECK-INF < %t.cmd.out %s
# Check that the per test timeout is enforced when running GTest tests via
# the configuration file
#
# RUN: not %{lit} -v %{inputs}/googletest-timeout \
# RUN: --param gtest_filter=InfiniteLoopSubTest --param set_timeout=1 \
# RUN: --filter=InfiniteLoopSubTest --param set_timeout=1 \
# RUN: > %t.cfgset.out
# RUN: FileCheck --check-prefix=CHECK-INF < %t.cfgset.out %s
# CHECK-INF: -- Testing:
# CHECK-INF: TIMEOUT: googletest-timeout :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/0/2
# CHECK-INF-NEXT: ******************** TEST 'googletest-timeout :: [[PATH]][[FILE]]/0/2' FAILED ********************
# CHECK-INF-NEXT: Script(shard):
# CHECK-INF: TIMEOUT: googletest-timeout :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/[[TEST:T\.InfiniteLoopSubTest]]
# CHECK-INF-NEXT: ******************** TEST 'googletest-timeout :: [[PATH]][[FILE]]/[[TEST]]' FAILED ********************
# CHECK-INF-NEXT: Script:
# CHECK-INF-NEXT: --
# CHECK-INF-NEXT: GTEST_COLOR=no
# CHECK-INF-NEXT: GTEST_TOTAL_SHARDS=2
# CHECK-INF-NEXT: GTEST_SHARD_INDEX=0
# CHECK-INF-NEXT: GTEST_OUTPUT=json:{{.*\.json}}
# CHECK-INF-NEXT: [[FILE]]
# CHECK-INF-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-INF-NEXT: --
# CHECK-INF-NEXT: Reached timeout of 1 seconds
# CHECK-INF: Timed Out: 1
###############################################################################
@ -40,15 +35,15 @@
###############################################################################
# RUN: %{lit} -v %{inputs}/googletest-timeout \
# RUN: --param gtest_filter=QuickSubTest --timeout=3600 > %t.cmd.out
# RUN: --filter=QuickSubTest --timeout=3600 > %t.cmd.out
# RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmd.out %s
# CHECK-QUICK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/0/2 {{.*}}
# CHECK-QUICK: Passed: 1
# CHECK-QUICK: PASS: googletest-timeout :: {{[Dd]ummy[Ss]ub[Dd]ir}}/OneTest.py/T.QuickSubTest
# CHECK-QUICK: Passed : 1
# Test per test timeout via a config file and on the command line.
# The value set on the command line should override the config file.
# RUN: %{lit} -v %{inputs}/googletest-timeout --param gtest_filter=QuickSubTest \
# RUN: %{lit} -v %{inputs}/googletest-timeout --filter=QuickSubTest \
# RUN: --param set_timeout=1 --timeout=3600 \
# RUN: > %t.cmdover.out 2> %t.cmdover.err
# RUN: FileCheck --check-prefix=CHECK-QUICK < %t.cmdover.out %s

View File

@ -0,0 +1,35 @@
# Check the various features of the GoogleTest format.
# RUN: not %{lit} -v %{inputs}/googletest-upstream-format > %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: -- Testing:
# CHECK: PASS: googletest-upstream-format :: [[PATH:[Dd]ummy[Ss]ub[Dd]ir/]][[FILE:OneTest\.py]]/FirstTest.subTestA
# CHECK: FAIL: googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestB]]
# CHECK-NEXT: *** TEST 'googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-NEXT: --
# CHECK-NEXT: Running main() from gtest_main.cc
# CHECK-NEXT: I am subTest B, I FAIL
# CHECK-NEXT: And I have two lines of output
# CHECK: SKIPPED: googletest-upstream-format :: [[PATH]][[FILE]]/FirstTest.subTestC
# CHECK: UNRESOLVED: googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST:FirstTest\.subTestD]]
# CHECK-NEXT: *** TEST 'googletest-upstream-format :: [[PATH]][[FILE]]/[[TEST]]' FAILED ***
# CHECK-NEXT: Script:
# CHECK-NEXT: --
# CHECK-NEXT: [[FILE]] --gtest_filter=[[TEST]]
# CHECK-NEXT: --
# CHECK-NEXT: Unable to find [ PASSED ] 1 test. in gtest output
# CHECK: I am subTest D, I am UNRESOLVED
# CHECK: ***
# CHECK: PASS: googletest-upstream-format :: [[PATH]][[FILE]]/ParameterizedTest/0.subTest
# CHECK: PASS: googletest-upstream-format :: [[PATH]][[FILE]]/ParameterizedTest/1.subTest
# CHECK: Failed Tests (1)
# CHECK: Skipped{{ *}}: 1
# CHECK: Passed{{ *}}: 3
# CHECK: Unresolved{{ *}}: 1
# CHECK: Failed{{ *}}: 1