Rename cxx-benchmark-unittests target and convert to LIT.

This patch renames the cxx-benchmark-unittests to check-cxx-benchmarks
and converts the target to use LIT in order to make the tests run faster
and provide better output.

In particular this runs each benchmark in a suite one by one, allowing
more parallelism while ensuring output isn't garbage with multiple threads.

Additionally, it adds the CMake flag '-DLIBCXX_BENCHMARK_TEST_ARGS=<list>'
to specify what options are passed when running the benchmarks.

llvm-svn: 346888
This commit is contained in:
Eric Fiselier 2018-11-14 20:38:46 +00:00
parent 7f15568c40
commit 336a1a6811
7 changed files with 201 additions and 22 deletions

View File

@ -83,6 +83,11 @@ option(LIBCXX_INCLUDE_TESTS "Build the libc++ tests." ${LLVM_INCLUDE_TESTS})
# Benchmark options -----------------------------------------------------------
option(LIBCXX_INCLUDE_BENCHMARKS "Build the libc++ benchmarks and their dependencies" ON)
set(LIBCXX_BENCHMARK_TEST_ARGS_DEFAULT --benchmark_min_time=0.01)
set(LIBCXX_BENCHMARK_TEST_ARGS "${LIBCXX_BENCHMARK_TEST_ARGS_DEFAULT}" CACHE STRING
"Arguments to pass when running the benchmarks using check-cxx-benchmarks")
set(LIBCXX_BENCHMARK_NATIVE_STDLIB "" CACHE STRING
"Build the benchmarks against the specified native STL.
The value must be one of libc++/libstdc++")
@ -766,6 +771,18 @@ include_directories(include)
add_subdirectory(include)
add_subdirectory(lib)
set(LIBCXX_TEST_DEPS "")
if (LIBCXX_ENABLE_EXPERIMENTAL_LIBRARY)
list(APPEND LIBCXX_TEST_DEPS cxx_experimental)
endif()
if (LIBCXX_ENABLE_FILESYSTEM)
list(APPEND LIBCXX_TEST_DEPS cxx_filesystem)
endif()
if (LIBCXX_BUILD_EXTERNAL_THREAD_LIBRARY)
list(APPEND LIBCXX_TEST_DEPS cxx_external_threads)
endif()
if (LIBCXX_INCLUDE_BENCHMARKS)
add_subdirectory(benchmarks)

View File

@ -180,15 +180,22 @@ foreach(test_path ${BENCHMARK_TESTS})
add_benchmark_test(${test_name} ${test_file})
endforeach()
if (LIBCXX_INCLUDE_TESTS)
include(AddLLVM)
add_custom_target(cxx-benchmark-unittests)
foreach(libcxx_tg ${libcxx_benchmark_targets})
message("Adding test ${libcxx_tg}")
# Add a target that runs the benchmark for the smallest possible time, simply so we get test
# and sanitizer coverage on the targets.
add_custom_target(${libcxx_tg}_test
COMMAND ${libcxx_tg} --benchmark_min_time=0.01
COMMENT "Running test ${libcxx_tg}"
)
add_dependencies(cxx-benchmark-unittests ${libcxx_tg}_test)
endforeach()
if (NOT DEFINED LIBCXX_TEST_DEPS)
message(FATAL_ERROR "Expected LIBCXX_TEST_DEPS to be defined")
endif()
configure_lit_site_cfg(
${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in
${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py)
set(BENCHMARK_LIT_ARGS "--show-all --show-xfail --show-unsupported ${LIT_ARGS_DEFAULT}")
add_lit_testsuite(check-cxx-benchmarks
"Running libcxx benchmarks tests"
${CMAKE_CURRENT_BINARY_DIR}
DEPENDS cxx-benchmarks ${LIBCXX_TEST_DEPS}
ARGS ${BENCHMARK_LIT_ARGS})
endif()

View File

@ -0,0 +1,23 @@
# -*- Python -*- vim: set ft=python ts=4 sw=4 expandtab tw=79:
# Configuration file for the 'lit' test runner.
import os
import site
site.addsitedir(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'utils'))
from libcxx.test.googlebenchmark import GoogleBenchmark
# Tell pylint that we know config and lit_config exist somewhere.
if 'PYLINT_IMPORT' in os.environ:
config = object()
lit_config = object()
# name: The name of this test suite.
config.name = 'libc++ benchmarks'
config.suffixes = []
config.test_exec_root = os.path.join(config.libcxx_obj_root, 'benchmarks')
config.test_source_root = config.test_exec_root
config.test_format = GoogleBenchmark(test_sub_dirs='.',
test_suffix='.libcxx.out',
benchmark_args=config.benchmark_args)

View File

@ -0,0 +1,10 @@
@LIT_SITE_CFG_IN_HEADER@
import sys
config.libcxx_src_root = "@LIBCXX_SOURCE_DIR@"
config.libcxx_obj_root = "@LIBCXX_BINARY_DIR@"
config.benchmark_args = "@LIBCXX_BENCHMARK_TEST_ARGS@".split(';')
# Let the main config do the real work.
lit_config.load_config(config, "@LIBCXX_SOURCE_DIR@/benchmarks/lit.cfg.py")

View File

@ -316,6 +316,15 @@ libc++ Feature Options
Build the libc++ benchmark tests and the Google Benchmark library needed
to support them.
.. option:: LIBCXX_BENCHMARK_TEST_ARGS:STRING
**Default**: ``--benchmark_min_time=0.01``
A semicolon list of arguments to pass when running the libc++ benchmarks using the
``check-cxx-benchmarks`` rule. By default we run the benchmarks for a very short amount of time,
since the primary use of ``check-cxx-benchmarks`` is to get test and sanitizer coverage, not to
get accurate measurements.
.. option:: LIBCXX_BENCHMARK_NATIVE_STDLIB:STRING
**Default**:: ``""``

View File

@ -55,17 +55,8 @@ set(LIBCXX_EXECUTOR "None" CACHE STRING
set(AUTO_GEN_COMMENT "## Autogenerated by libcxx configuration.\n# Do not edit!")
set(LIBCXX_TEST_DEPS "")
if (LIBCXX_ENABLE_EXPERIMENTAL_LIBRARY)
list(APPEND LIBCXX_TEST_DEPS cxx_experimental)
endif()
if (LIBCXX_ENABLE_FILESYSTEM)
list(APPEND LIBCXX_TEST_DEPS cxx_filesystem)
endif()
if (LIBCXX_BUILD_EXTERNAL_THREAD_LIBRARY)
list(APPEND LIBCXX_TEST_DEPS cxx_external_threads)
if (NOT DEFINED LIBCXX_TEST_DEPS)
message(FATAL_ERROR "Expected LIBCXX_TEST_DEPS to be defined")
endif()
if (LIBCXX_INCLUDE_TESTS)

View File

@ -0,0 +1,122 @@
from __future__ import absolute_import
import os
import subprocess
import sys
import lit.Test
import lit.TestRunner
import lit.util
from lit.formats.base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
class GoogleBenchmark(TestFormat):
def __init__(self, test_sub_dirs, test_suffix, benchmark_args=[]):
self.benchmark_args = list(benchmark_args)
self.test_sub_dirs = os.path.normcase(str(test_sub_dirs)).split(';')
# On Windows, assume tests will also end in '.exe'.
exe_suffix = str(test_suffix)
if kIsWindows:
exe_suffix += '.exe'
# Also check for .py files for testing purposes.
self.test_suffixes = {exe_suffix, test_suffix + '.py'}
def getBenchmarkTests(self, path, litConfig, localConfig):
"""getBenchmarkTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
# TODO: allow splitting tests according to the "benchmark family" so
# the output for a single family of tests all belongs to the same test
# target.
list_test_cmd = [path, '--benchmark_list_tests']
try:
output = subprocess.check_output(list_test_cmd,
env=localConfig.environment)
except subprocess.CalledProcessError as exc:
litConfig.warning(
"unable to discover google-benchmarks in %r: %s. Process output: %s"
% (path, sys.exc_info()[1], exc.output))
raise StopIteration
nested_tests = []
for ln in output.splitlines(False): # Don't keep newlines.
ln = lit.util.to_string(ln)
if not ln.strip():
continue
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
elif any([name.startswith('DISABLED_')
for name in nested_tests + [ln]]):
# Gtest will internally skip these tests. No need to launch a
# child process for it.
continue
else:
yield ''.join(nested_tests) + ln
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for subdir in self.test_sub_dirs:
dir_path = os.path.join(source_path, subdir)
if not os.path.isdir(dir_path):
continue
for fn in lit.util.listdir_files(dir_path,
suffixes=self.test_suffixes):
# Discover the tests in this executable.
execpath = os.path.join(source_path, subdir, fn)
testnames = self.getBenchmarkTests(execpath, litConfig, localConfig)
for testname in testnames:
testPath = path_in_suite + (subdir, fn, testname)
yield lit.Test.Test(testSuite, testPath, localConfig,
file_path=execpath)
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = namePrefix + '/' + testName
cmd = [testPath, '--benchmark_filter=%s$' % testName ] + self.benchmark_args
if litConfig.noExecute:
return lit.Test.PASS, ''
try:
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment,
timeout=litConfig.maxIndividualTestTime)
except lit.util.ExecuteCommandTimeoutException:
return (lit.Test.TIMEOUT,
'Reached timeout of {} seconds'.format(
litConfig.maxIndividualTestTime)
)
if exitCode:
return lit.Test.FAIL, out + err
passing_test_line = testName
if passing_test_line not in out:
msg = ('Unable to find %r in google benchmark output:\n\n%s%s' %
(passing_test_line, out, err))
return lit.Test.UNRESOLVED, msg
return lit.Test.PASS, err + out