forked from OSchip/llvm-project
[lit] Limit parallelism of sanitizer tests on Darwin [llvm part, take 2]
Running lit tests and unit tests of ASan and TSan on macOS has very bad performance when running with a high number of threads. This is caused by xnu (the macOS kernel), which currently doesn't handle mapping and unmapping of sanitizer shadow regions (reserved VM which are several terabytes large) very well. The situation is so bad that increasing the number of threads actually makes the total testing time larger. The macOS buildbots are affected by this. Note that we can't easily limit the number of sanitizer testing threads without affecting the rest of the tests. This patch adds a special "group" into lit, and limits the number of concurrently running tests in this group. This helps solve the contention problem, while still allowing other tests to run in full, that means running lit with -j8 will still with 8 threads, and parallelism is only limited in sanitizer tests. Differential Revision: https://reviews.llvm.org/D28420 llvm-svn: 292548
This commit is contained in:
parent
680663931c
commit
30881272e1
|
@ -24,7 +24,8 @@ class LitConfig(object):
|
|||
noExecute, debug, isWindows,
|
||||
params, config_prefix = None,
|
||||
maxIndividualTestTime = 0,
|
||||
maxFailures = None):
|
||||
maxFailures = None,
|
||||
parallelism_groups = []):
|
||||
# The name of the test runner.
|
||||
self.progname = progname
|
||||
# The items to add to the PATH environment variable.
|
||||
|
@ -62,6 +63,7 @@ class LitConfig(object):
|
|||
|
||||
self.maxIndividualTestTime = maxIndividualTestTime
|
||||
self.maxFailures = maxFailures
|
||||
self.parallelism_groups = parallelism_groups
|
||||
|
||||
@property
|
||||
def maxIndividualTestTime(self):
|
||||
|
|
|
@ -106,7 +106,7 @@ class TestingConfig:
|
|||
environment, substitutions, unsupported,
|
||||
test_exec_root, test_source_root, excludes,
|
||||
available_features, pipefail, limit_to_features = [],
|
||||
is_early = False):
|
||||
is_early = False, parallelism_group = ""):
|
||||
self.parent = parent
|
||||
self.name = str(name)
|
||||
self.suffixes = set(suffixes)
|
||||
|
@ -125,6 +125,7 @@ class TestingConfig:
|
|||
self.limit_to_features = set(limit_to_features)
|
||||
# Whether the suite should be tested early in a given run.
|
||||
self.is_early = bool(is_early)
|
||||
self.parallelism_group = parallelism_group
|
||||
|
||||
def finish(self, litConfig):
|
||||
"""finish() - Finish this config object, after loading is complete."""
|
||||
|
|
|
@ -335,7 +335,8 @@ def main_with_tmp(builtinParameters):
|
|||
params = userParams,
|
||||
config_prefix = opts.configPrefix,
|
||||
maxIndividualTestTime = maxIndividualTestTime,
|
||||
maxFailures = opts.maxFailures)
|
||||
maxFailures = opts.maxFailures,
|
||||
parallelism_groups = {})
|
||||
|
||||
# Perform test discovery.
|
||||
run = lit.run.Run(litConfig,
|
||||
|
|
|
@ -177,9 +177,15 @@ class Run(object):
|
|||
self.tests = tests
|
||||
|
||||
def execute_test(self, test):
|
||||
pg = test.config.parallelism_group
|
||||
if callable(pg): pg = pg(test)
|
||||
|
||||
result = None
|
||||
start_time = time.time()
|
||||
semaphore = None
|
||||
try:
|
||||
if pg: semaphore = self.parallelism_semaphores[pg]
|
||||
if semaphore: semaphore.acquire()
|
||||
start_time = time.time()
|
||||
result = test.config.test_format.execute(test, self.lit_config)
|
||||
|
||||
# Support deprecated result from execute() which returned the result
|
||||
|
@ -189,6 +195,8 @@ class Run(object):
|
|||
result = lit.Test.Result(code, output)
|
||||
elif not isinstance(result, lit.Test.Result):
|
||||
raise ValueError("unexpected result from test execution")
|
||||
|
||||
result.elapsed = time.time() - start_time
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
|
@ -198,7 +206,8 @@ class Run(object):
|
|||
output += traceback.format_exc()
|
||||
output += '\n'
|
||||
result = lit.Test.Result(lit.Test.UNRESOLVED, output)
|
||||
result.elapsed = time.time() - start_time
|
||||
finally:
|
||||
if semaphore: semaphore.release()
|
||||
|
||||
test.setResult(result)
|
||||
|
||||
|
@ -231,6 +240,7 @@ class Run(object):
|
|||
try:
|
||||
task_impl = multiprocessing.Process
|
||||
queue_impl = multiprocessing.Queue
|
||||
sem_impl = multiprocessing.Semaphore
|
||||
canceled_flag = multiprocessing.Value('i', 0)
|
||||
consumer = MultiprocessResultsConsumer(self, display, jobs)
|
||||
except:
|
||||
|
@ -242,9 +252,13 @@ class Run(object):
|
|||
if not consumer:
|
||||
task_impl = threading.Thread
|
||||
queue_impl = queue.Queue
|
||||
sem_impl = threading.Semaphore
|
||||
canceled_flag = LockedValue(0)
|
||||
consumer = ThreadResultsConsumer(display)
|
||||
|
||||
self.parallelism_semaphores = {k: sem_impl(v)
|
||||
for k, v in self.lit_config.parallelism_groups.items()}
|
||||
|
||||
# Create the test provider.
|
||||
provider = TestProvider(queue_impl, canceled_flag)
|
||||
handleFailures(provider, consumer, self.lit_config.maxFailures)
|
||||
|
|
Loading…
Reference in New Issue