2019-09-23 17:02:43 +08:00
|
|
|
# SPDX-License-Identifier: GPL-2.0
|
|
|
|
#
|
2021-10-12 05:50:37 +08:00
|
|
|
# Parses KTAP test results from a kernel dmesg log and incrementally prints
|
|
|
|
# results with reader-friendly format. Stores and returns test results in a
|
|
|
|
# Test object.
|
2019-09-23 17:02:43 +08:00
|
|
|
#
|
|
|
|
# Copyright (C) 2019, Google LLC.
|
|
|
|
# Author: Felix Guo <felixguoxiuping@gmail.com>
|
|
|
|
# Author: Brendan Higgins <brendanhiggins@google.com>
|
2021-10-12 05:50:37 +08:00
|
|
|
# Author: Rae Moar <rmoar@google.com>
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
from __future__ import annotations
|
2022-11-04 01:47:38 +08:00
|
|
|
from dataclasses import dataclass
|
2019-09-23 17:02:43 +08:00
|
|
|
import re
|
2022-12-01 02:54:19 +08:00
|
|
|
import textwrap
|
2019-09-23 17:02:43 +08:00
|
|
|
|
|
|
|
from enum import Enum, auto
|
2021-01-15 08:39:12 +08:00
|
|
|
from typing import Iterable, Iterator, List, Optional, Tuple
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2022-05-17 03:47:30 +08:00
|
|
|
from kunit_printer import stdout
|
|
|
|
|
2022-05-10 04:49:09 +08:00
|
|
|
class Test:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""
|
|
|
|
A class to represent a test parsed from KTAP results. All KTAP
|
|
|
|
results within a test log are stored in a main Test object as
|
|
|
|
subtests.
|
|
|
|
|
|
|
|
Attributes:
|
|
|
|
status : TestStatus - status of the test
|
|
|
|
name : str - name of the test
|
|
|
|
expected_count : int - expected number of subtests (0 if single
|
|
|
|
test case and None if unknown expected number of subtests)
|
|
|
|
subtests : List[Test] - list of subtests
|
|
|
|
log : List[str] - log of KTAP lines that correspond to the test
|
|
|
|
counts : TestCounts - counts of the test statuses and errors of
|
|
|
|
subtests or of the test itself if the test is a single
|
|
|
|
test case.
|
|
|
|
"""
|
2021-01-15 08:39:11 +08:00
|
|
|
def __init__(self) -> None:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Creates Test object with default attributes."""
|
|
|
|
self.status = TestStatus.TEST_CRASHED
|
2019-09-23 17:02:43 +08:00
|
|
|
self.name = ''
|
2021-10-12 05:50:37 +08:00
|
|
|
self.expected_count = 0 # type: Optional[int]
|
|
|
|
self.subtests = [] # type: List[Test]
|
2021-01-15 08:39:11 +08:00
|
|
|
self.log = [] # type: List[str]
|
2021-10-12 05:50:37 +08:00
|
|
|
self.counts = TestCounts()
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-01-15 08:39:11 +08:00
|
|
|
def __str__(self) -> str:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Returns string representation of a Test class object."""
|
2022-05-13 02:35:38 +08:00
|
|
|
return (f'Test({self.status}, {self.name}, {self.expected_count}, '
|
|
|
|
f'{self.subtests}, {self.log}, {self.counts})')
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-01-15 08:39:11 +08:00
|
|
|
def __repr__(self) -> str:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Returns string representation of a Test class object."""
|
2019-09-23 17:02:43 +08:00
|
|
|
return str(self)
|
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
def add_error(self, error_message: str) -> None:
|
|
|
|
"""Records an error that occurred while parsing this test."""
|
|
|
|
self.counts.errors += 1
|
2022-05-17 03:47:30 +08:00
|
|
|
stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
|
2021-10-12 05:50:37 +08:00
|
|
|
|
kunit: tool: print summary of failed tests if a few failed out of a lot
E.g. all the hw_breakpoint tests are failing right now.
So if I run `kunit.py run --altests --arch=x86_64`, then I see
> Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7
Seeing which 9 tests failed out of the hundreds is annoying.
If my terminal doesn't have scrollback support, I have to resort to
looking at `.kunit/test.log` for the `not ok` lines.
Teach kunit.py to print a summarized list of failures if the # of tests
reachs an arbitrary threshold (>=100 tests).
To try and keep the output from being too long/noisy, this new logic
a) just reports "parent_test failed" if every child test failed
b) won't print anything if there are >10 failures (also arbitrary).
With this patch, we get an extra line of output showing:
> Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7
> Failures: hw_breakpoint
This also works with parameterized tests, e.g. if I add a fake failure
> Failures: kcsan.test_atomic_builtins_missing_barrier.threads=6
Note: we didn't have enough tests for this to be a problem before.
But with commit 980ac3ad0512 ("kunit: tool: rename all_test_uml.config,
use it for --alltests"), --alltests works and thus running >100 tests
will probably become more common.
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2022-10-29 05:02:56 +08:00
|
|
|
def ok_status(self) -> bool:
|
|
|
|
"""Returns true if the status was ok, i.e. passed or skipped."""
|
|
|
|
return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED)
|
|
|
|
|
2019-09-23 17:02:43 +08:00
|
|
|
class TestStatus(Enum):
|
2021-10-12 05:50:37 +08:00
|
|
|
"""An enumeration class to represent the status of a test."""
|
2019-09-23 17:02:43 +08:00
|
|
|
SUCCESS = auto()
|
|
|
|
FAILURE = auto()
|
2021-06-25 14:58:13 +08:00
|
|
|
SKIPPED = auto()
|
2019-09-23 17:02:43 +08:00
|
|
|
TEST_CRASHED = auto()
|
|
|
|
NO_TESTS = auto()
|
2020-08-05 04:47:44 +08:00
|
|
|
FAILURE_TO_PARSE_TESTS = auto()
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2022-11-04 01:47:38 +08:00
|
|
|
@dataclass
|
2021-10-12 05:50:37 +08:00
|
|
|
class TestCounts:
|
|
|
|
"""
|
|
|
|
Tracks the counts of statuses of all test cases and any errors within
|
|
|
|
a Test.
|
|
|
|
"""
|
2022-11-04 01:47:38 +08:00
|
|
|
passed: int = 0
|
|
|
|
failed: int = 0
|
|
|
|
crashed: int = 0
|
|
|
|
skipped: int = 0
|
|
|
|
errors: int = 0
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def __str__(self) -> str:
|
2022-05-13 02:35:38 +08:00
|
|
|
"""Returns the string representation of a TestCounts object."""
|
kunit: tool: update test counts summary line format
Before:
> Testing complete. Passed: 137, Failed: 0, Crashed: 0, Skipped: 36, Errors: 0
After:
> Testing complete. Ran 173 tests: passed: 137, skipped: 36
Even with our current set of statuses, the output is a bit verbose.
It could get worse in the future if we add more (e.g. timeout, kasan).
Let's only print the relevant ones.
I had previously been sympathetic to the argument that always
printing out all the statuses would make it easier to parse results.
But now we have commit acd8e8407b8f ("kunit: Print test statistics on
failure"), there are test counts printed out in the raw output.
We don't currently print out an overall total across all suites, but it
would be easy to add, if we see a need for that.
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Co-developed-by: David Gow <davidgow@google.com>
Signed-off-by: David Gow <davidgow@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2022-04-09 05:51:05 +08:00
|
|
|
statuses = [('passed', self.passed), ('failed', self.failed),
|
|
|
|
('crashed', self.crashed), ('skipped', self.skipped),
|
|
|
|
('errors', self.errors)]
|
|
|
|
return f'Ran {self.total()} tests: ' + \
|
|
|
|
', '.join(f'{s}: {n}' for s, n in statuses if n > 0)
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def total(self) -> int:
|
|
|
|
"""Returns the total number of test cases within a test
|
|
|
|
object, where a test case is a test with no subtests.
|
|
|
|
"""
|
|
|
|
return (self.passed + self.failed + self.crashed +
|
|
|
|
self.skipped)
|
|
|
|
|
|
|
|
def add_subtest_counts(self, counts: TestCounts) -> None:
|
|
|
|
"""
|
|
|
|
Adds the counts of another TestCounts object to the current
|
|
|
|
TestCounts object. Used to add the counts of a subtest to the
|
|
|
|
parent test.
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
counts - a different TestCounts object whose counts
|
|
|
|
will be added to the counts of the TestCounts object
|
|
|
|
"""
|
|
|
|
self.passed += counts.passed
|
|
|
|
self.failed += counts.failed
|
|
|
|
self.crashed += counts.crashed
|
|
|
|
self.skipped += counts.skipped
|
|
|
|
self.errors += counts.errors
|
|
|
|
|
|
|
|
def get_status(self) -> TestStatus:
|
|
|
|
"""Returns the aggregated status of a Test using test
|
|
|
|
counts.
|
|
|
|
"""
|
|
|
|
if self.total() == 0:
|
|
|
|
return TestStatus.NO_TESTS
|
2022-05-10 04:49:09 +08:00
|
|
|
if self.crashed:
|
2022-05-13 02:35:38 +08:00
|
|
|
# Crashes should take priority.
|
2021-10-12 05:50:37 +08:00
|
|
|
return TestStatus.TEST_CRASHED
|
2022-05-10 04:49:09 +08:00
|
|
|
if self.failed:
|
2021-10-12 05:50:37 +08:00
|
|
|
return TestStatus.FAILURE
|
2022-05-10 04:49:09 +08:00
|
|
|
if self.passed:
|
2022-05-13 02:35:38 +08:00
|
|
|
# No failures or crashes, looks good!
|
2021-10-12 05:50:37 +08:00
|
|
|
return TestStatus.SUCCESS
|
2022-05-10 04:49:09 +08:00
|
|
|
# We have only skipped tests.
|
|
|
|
return TestStatus.SKIPPED
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def add_status(self, status: TestStatus) -> None:
|
2022-05-13 02:35:38 +08:00
|
|
|
"""Increments the count for `status`."""
|
2021-10-12 05:50:37 +08:00
|
|
|
if status == TestStatus.SUCCESS:
|
|
|
|
self.passed += 1
|
|
|
|
elif status == TestStatus.FAILURE:
|
|
|
|
self.failed += 1
|
|
|
|
elif status == TestStatus.SKIPPED:
|
|
|
|
self.skipped += 1
|
|
|
|
elif status != TestStatus.NO_TESTS:
|
|
|
|
self.crashed += 1
|
|
|
|
|
2021-05-26 16:22:17 +08:00
|
|
|
class LineStream:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""
|
|
|
|
A class to represent the lines of kernel output.
|
2021-10-08 05:14:17 +08:00
|
|
|
Provides a lazy peek()/pop() interface over an iterator of
|
2021-10-12 05:50:37 +08:00
|
|
|
(line#, text).
|
|
|
|
"""
|
2021-05-26 16:22:17 +08:00
|
|
|
_lines: Iterator[Tuple[int, str]]
|
|
|
|
_next: Tuple[int, str]
|
2021-10-08 05:14:17 +08:00
|
|
|
_need_next: bool
|
2021-05-26 16:22:17 +08:00
|
|
|
_done: bool
|
|
|
|
|
|
|
|
def __init__(self, lines: Iterator[Tuple[int, str]]):
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Creates a new LineStream that wraps the given iterator."""
|
2021-05-26 16:22:17 +08:00
|
|
|
self._lines = lines
|
|
|
|
self._done = False
|
2021-10-08 05:14:17 +08:00
|
|
|
self._need_next = True
|
2021-05-26 16:22:17 +08:00
|
|
|
self._next = (0, '')
|
|
|
|
|
|
|
|
def _get_next(self) -> None:
|
2021-10-08 05:14:17 +08:00
|
|
|
"""Advances the LineSteam to the next line, if necessary."""
|
|
|
|
if not self._need_next:
|
|
|
|
return
|
2021-05-26 16:22:17 +08:00
|
|
|
try:
|
|
|
|
self._next = next(self._lines)
|
|
|
|
except StopIteration:
|
|
|
|
self._done = True
|
2021-10-08 05:14:17 +08:00
|
|
|
finally:
|
|
|
|
self._need_next = False
|
2021-05-26 16:22:17 +08:00
|
|
|
|
|
|
|
def peek(self) -> str:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Returns the current line, without advancing the LineStream.
|
|
|
|
"""
|
2021-10-08 05:14:17 +08:00
|
|
|
self._get_next()
|
2021-05-26 16:22:17 +08:00
|
|
|
return self._next[1]
|
|
|
|
|
|
|
|
def pop(self) -> str:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Returns the current line and advances the LineStream to
|
|
|
|
the next line.
|
|
|
|
"""
|
2021-10-08 05:14:17 +08:00
|
|
|
s = self.peek()
|
|
|
|
if self._done:
|
|
|
|
raise ValueError(f'LineStream: going past EOF, last line was {s}')
|
|
|
|
self._need_next = True
|
|
|
|
return s
|
2021-05-26 16:22:17 +08:00
|
|
|
|
|
|
|
def __bool__(self) -> bool:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Returns True if stream has more lines."""
|
2021-10-08 05:14:17 +08:00
|
|
|
self._get_next()
|
2021-05-26 16:22:17 +08:00
|
|
|
return not self._done
|
|
|
|
|
|
|
|
# Only used by kunit_tool_test.py.
|
|
|
|
def __iter__(self) -> Iterator[str]:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Empties all lines stored in LineStream object into
|
|
|
|
Iterator object and returns the Iterator object.
|
|
|
|
"""
|
2021-05-26 16:22:17 +08:00
|
|
|
while bool(self):
|
|
|
|
yield self.pop()
|
|
|
|
|
|
|
|
def line_number(self) -> int:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Returns the line number of the current line."""
|
2021-10-08 05:14:17 +08:00
|
|
|
self._get_next()
|
2021-05-26 16:22:17 +08:00
|
|
|
return self._next[0]
|
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
# Parsing helper methods:
|
|
|
|
|
2022-12-01 02:54:19 +08:00
|
|
|
KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$')
|
|
|
|
TAP_START = re.compile(r'\s*TAP version ([0-9]+)$')
|
|
|
|
KTAP_END = re.compile(r'\s*(List of all partitions:|'
|
2021-10-12 05:50:37 +08:00
|
|
|
'Kernel panic - not syncing: VFS:|reboot: System halted)')
|
kunit: tool: Add command line interface to filter and report attributes
Add ability to kunit.py to filter attributes and report a list of tests
including attributes without running tests.
Add flag "--filter" to input filters on test attributes. Tests will be
filtered out if they do not match all inputted filters.
Example: --filter speed=slow (This filter would run only the tests that are
marked as slow)
Filters have operations: <, >, <=, >=, !=, and =. But note that the
characters < and > are often interpreted by the shell, so they may need to
be quoted or escaped.
Example: --filter "speed>slow" or --filter speed\>slow (This filter would
run only the tests that have the speed faster than slow.
Additionally, multiple filters can be used.
Example: --filter "speed=slow, module!=example" (This filter would run
only the tests that have the speed slow and are not in the "example"
module)
Note if the user wants to skip filtered tests instead of not
running/showing them use the "--filter_action=skip" flag instead.
Expose the output of kunit.action=list option with flag "--list_tests" to
output a list of tests. Additionally, add flag "--list_tests_attr" to
output a list of tests and their attributes. These flags are useful to see
tests and test attributes without needing to run tests.
Example of the output of "--list_tests_attr":
example
example.test_1
example.test_2
# example.test_2.speed: slow
This output includes a suite, example, with two test cases, test_1 and
test_2. And in this instance test_2 has been marked as slow.
Reviewed-by: David Gow <davidgow@google.com>
Signed-off-by: Rae Moar <rmoar@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-26 05:25:16 +08:00
|
|
|
EXECUTOR_ERROR = re.compile(r'\s*kunit executor: (.*)$')
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2022-12-01 02:54:19 +08:00
|
|
|
def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""Extracts KTAP lines from the kernel output."""
|
|
|
|
def isolate_ktap_output(kernel_output: Iterable[str]) \
|
|
|
|
-> Iterator[Tuple[int, str]]:
|
2021-05-26 16:22:17 +08:00
|
|
|
line_num = 0
|
|
|
|
started = False
|
|
|
|
for line in kernel_output:
|
|
|
|
line_num += 1
|
2021-10-12 05:50:37 +08:00
|
|
|
line = line.rstrip() # remove trailing \n
|
|
|
|
if not started and KTAP_START.search(line):
|
|
|
|
# start extracting KTAP lines and set prefix
|
|
|
|
# to number of characters before version line
|
|
|
|
prefix_len = len(
|
|
|
|
line.split('KTAP version')[0])
|
|
|
|
started = True
|
|
|
|
yield line_num, line[prefix_len:]
|
|
|
|
elif not started and TAP_START.search(line):
|
|
|
|
# start extracting KTAP lines and set prefix
|
|
|
|
# to number of characters before version line
|
2021-05-26 16:22:17 +08:00
|
|
|
prefix_len = len(line.split('TAP version')[0])
|
|
|
|
started = True
|
|
|
|
yield line_num, line[prefix_len:]
|
2021-10-12 05:50:37 +08:00
|
|
|
elif started and KTAP_END.search(line):
|
|
|
|
# stop extracting KTAP lines
|
2021-05-26 16:22:17 +08:00
|
|
|
break
|
|
|
|
elif started:
|
2022-12-01 02:54:19 +08:00
|
|
|
# remove the prefix, if any.
|
2022-08-11 07:02:58 +08:00
|
|
|
line = line[prefix_len:]
|
2021-10-12 05:50:37 +08:00
|
|
|
yield line_num, line
|
kunit: tool: Add command line interface to filter and report attributes
Add ability to kunit.py to filter attributes and report a list of tests
including attributes without running tests.
Add flag "--filter" to input filters on test attributes. Tests will be
filtered out if they do not match all inputted filters.
Example: --filter speed=slow (This filter would run only the tests that are
marked as slow)
Filters have operations: <, >, <=, >=, !=, and =. But note that the
characters < and > are often interpreted by the shell, so they may need to
be quoted or escaped.
Example: --filter "speed>slow" or --filter speed\>slow (This filter would
run only the tests that have the speed faster than slow.
Additionally, multiple filters can be used.
Example: --filter "speed=slow, module!=example" (This filter would run
only the tests that have the speed slow and are not in the "example"
module)
Note if the user wants to skip filtered tests instead of not
running/showing them use the "--filter_action=skip" flag instead.
Expose the output of kunit.action=list option with flag "--list_tests" to
output a list of tests. Additionally, add flag "--list_tests_attr" to
output a list of tests and their attributes. These flags are useful to see
tests and test attributes without needing to run tests.
Example of the output of "--list_tests_attr":
example
example.test_1
example.test_2
# example.test_2.speed: slow
This output includes a suite, example, with two test cases, test_1 and
test_2. And in this instance test_2 has been marked as slow.
Reviewed-by: David Gow <davidgow@google.com>
Signed-off-by: Rae Moar <rmoar@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-26 05:25:16 +08:00
|
|
|
elif EXECUTOR_ERROR.search(line):
|
|
|
|
yield line_num, line
|
2021-10-12 05:50:37 +08:00
|
|
|
return LineStream(lines=isolate_ktap_output(kernel_output))
|
|
|
|
|
|
|
|
KTAP_VERSIONS = [1]
|
|
|
|
TAP_VERSIONS = [13, 14]
|
|
|
|
|
|
|
|
def check_version(version_num: int, accepted_versions: List[int],
|
|
|
|
version_type: str, test: Test) -> None:
|
|
|
|
"""
|
|
|
|
Adds error to test object if version number is too high or too
|
|
|
|
low.
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
version_num - The inputted version number from the parsed KTAP or TAP
|
|
|
|
header line
|
|
|
|
accepted_version - List of accepted KTAP or TAP versions
|
|
|
|
version_type - 'KTAP' or 'TAP' depending on the type of
|
|
|
|
version line.
|
|
|
|
test - Test object for current test being parsed
|
|
|
|
"""
|
|
|
|
if version_num < min(accepted_versions):
|
2022-05-13 02:35:38 +08:00
|
|
|
test.add_error(f'{version_type} version lower than expected!')
|
2021-10-12 05:50:37 +08:00
|
|
|
elif version_num > max(accepted_versions):
|
2022-05-13 02:35:38 +08:00
|
|
|
test.add_error(f'{version_type} version higer than expected!')
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def parse_ktap_header(lines: LineStream, test: Test) -> bool:
|
|
|
|
"""
|
|
|
|
Parses KTAP/TAP header line and checks version number.
|
|
|
|
Returns False if fails to parse KTAP/TAP header line.
|
|
|
|
|
|
|
|
Accepted formats:
|
|
|
|
- 'KTAP version [version number]'
|
|
|
|
- 'TAP version [version number]'
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
lines - LineStream of KTAP output to parse
|
|
|
|
test - Test object for current test being parsed
|
|
|
|
|
|
|
|
Return:
|
|
|
|
True if successfully parsed KTAP/TAP header line
|
|
|
|
"""
|
|
|
|
ktap_match = KTAP_START.match(lines.peek())
|
|
|
|
tap_match = TAP_START.match(lines.peek())
|
|
|
|
if ktap_match:
|
|
|
|
version_num = int(ktap_match.group(1))
|
|
|
|
check_version(version_num, KTAP_VERSIONS, 'KTAP', test)
|
|
|
|
elif tap_match:
|
|
|
|
version_num = int(tap_match.group(1))
|
|
|
|
check_version(version_num, TAP_VERSIONS, 'TAP', test)
|
|
|
|
else:
|
|
|
|
return False
|
2022-11-29 08:12:34 +08:00
|
|
|
lines.pop()
|
2021-10-12 05:50:37 +08:00
|
|
|
return True
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2022-12-01 02:54:19 +08:00
|
|
|
TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$')
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
def parse_test_header(lines: LineStream, test: Test) -> bool:
|
|
|
|
"""
|
|
|
|
Parses test header and stores test name in test object.
|
|
|
|
Returns False if fails to parse test header line.
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
Accepted format:
|
|
|
|
- '# Subtest: [test name]'
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
Parameters:
|
|
|
|
lines - LineStream of KTAP output to parse
|
|
|
|
test - Test object for current test being parsed
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
Return:
|
|
|
|
True if successfully parsed test header line
|
|
|
|
"""
|
|
|
|
match = TEST_HEADER.match(lines.peek())
|
|
|
|
if not match:
|
|
|
|
return False
|
|
|
|
test.name = match.group(1)
|
2022-11-29 08:12:34 +08:00
|
|
|
lines.pop()
|
2021-10-12 05:50:37 +08:00
|
|
|
return True
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2022-12-01 02:54:19 +08:00
|
|
|
TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)')
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
def parse_test_plan(lines: LineStream, test: Test) -> bool:
|
|
|
|
"""
|
|
|
|
Parses test plan line and stores the expected number of subtests in
|
|
|
|
test object. Reports an error if expected count is 0.
|
2021-11-02 15:30:11 +08:00
|
|
|
Returns False and sets expected_count to None if there is no valid test
|
|
|
|
plan.
|
2021-06-25 14:58:13 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
Accepted format:
|
|
|
|
- '1..[number of subtests]'
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
Parameters:
|
|
|
|
lines - LineStream of KTAP output to parse
|
|
|
|
test - Test object for current test being parsed
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
Return:
|
|
|
|
True if successfully parsed test plan line
|
|
|
|
"""
|
|
|
|
match = TEST_PLAN.match(lines.peek())
|
|
|
|
if not match:
|
|
|
|
test.expected_count = None
|
2019-09-23 17:02:43 +08:00
|
|
|
return False
|
2021-10-12 05:50:37 +08:00
|
|
|
expected_count = int(match.group(1))
|
|
|
|
test.expected_count = expected_count
|
2022-11-29 08:12:34 +08:00
|
|
|
lines.pop()
|
2021-10-12 05:50:37 +08:00
|
|
|
return True
|
|
|
|
|
2022-12-01 02:54:19 +08:00
|
|
|
TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
|
2021-10-12 05:50:37 +08:00
|
|
|
|
2022-12-01 02:54:19 +08:00
|
|
|
TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def peek_test_name_match(lines: LineStream, test: Test) -> bool:
|
|
|
|
"""
|
|
|
|
Matches current line with the format of a test result line and checks
|
|
|
|
if the name matches the name of the current test.
|
|
|
|
Returns False if fails to match format or name.
|
|
|
|
|
|
|
|
Accepted format:
|
|
|
|
- '[ok|not ok] [test number] [-] [test name] [optional skip
|
|
|
|
directive]'
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
lines - LineStream of KTAP output to parse
|
|
|
|
test - Test object for current test being parsed
|
|
|
|
|
|
|
|
Return:
|
|
|
|
True if matched a test result line and the name matching the
|
|
|
|
expected test name
|
|
|
|
"""
|
|
|
|
line = lines.peek()
|
|
|
|
match = TEST_RESULT.match(line)
|
|
|
|
if not match:
|
2019-09-23 17:02:43 +08:00
|
|
|
return False
|
2021-10-12 05:50:37 +08:00
|
|
|
name = match.group(4)
|
2022-05-10 04:49:09 +08:00
|
|
|
return name == test.name
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def parse_test_result(lines: LineStream, test: Test,
|
|
|
|
expected_num: int) -> bool:
|
|
|
|
"""
|
|
|
|
Parses test result line and stores the status and name in the test
|
|
|
|
object. Reports an error if the test number does not match expected
|
|
|
|
test number.
|
|
|
|
Returns False if fails to parse test result line.
|
|
|
|
|
|
|
|
Note that the SKIP directive is the only direction that causes a
|
|
|
|
change in status.
|
|
|
|
|
|
|
|
Accepted format:
|
|
|
|
- '[ok|not ok] [test number] [-] [test name] [optional skip
|
|
|
|
directive]'
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
lines - LineStream of KTAP output to parse
|
|
|
|
test - Test object for current test being parsed
|
|
|
|
expected_num - expected test number for current test
|
|
|
|
|
|
|
|
Return:
|
|
|
|
True if successfully parsed a test result line.
|
|
|
|
"""
|
2021-05-26 16:22:17 +08:00
|
|
|
line = lines.peek()
|
2021-10-12 05:50:37 +08:00
|
|
|
match = TEST_RESULT.match(line)
|
|
|
|
skip_match = TEST_RESULT_SKIP.match(line)
|
|
|
|
|
|
|
|
# Check if line matches test result line format
|
|
|
|
if not match:
|
2019-09-23 17:02:43 +08:00
|
|
|
return False
|
2022-11-29 08:12:34 +08:00
|
|
|
lines.pop()
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
# Set name of test object
|
|
|
|
if skip_match:
|
|
|
|
test.name = skip_match.group(4)
|
2019-09-23 17:02:43 +08:00
|
|
|
else:
|
2021-10-12 05:50:37 +08:00
|
|
|
test.name = match.group(4)
|
|
|
|
|
|
|
|
# Check test num
|
|
|
|
num = int(match.group(2))
|
|
|
|
if num != expected_num:
|
2022-05-13 02:35:38 +08:00
|
|
|
test.add_error(f'Expected test number {expected_num} but found {num}')
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
# Set status of test object
|
|
|
|
status = match.group(1)
|
|
|
|
if skip_match:
|
|
|
|
test.status = TestStatus.SKIPPED
|
|
|
|
elif status == 'ok':
|
|
|
|
test.status = TestStatus.SUCCESS
|
2019-09-23 17:02:43 +08:00
|
|
|
else:
|
2021-10-12 05:50:37 +08:00
|
|
|
test.status = TestStatus.FAILURE
|
|
|
|
return True
|
|
|
|
|
|
|
|
def parse_diagnostic(lines: LineStream) -> List[str]:
|
|
|
|
"""
|
|
|
|
Parse lines that do not match the format of a test result line or
|
|
|
|
test header line and returns them in list.
|
|
|
|
|
|
|
|
Line formats that are not parsed:
|
|
|
|
- '# Subtest: [test name]'
|
|
|
|
- '[ok|not ok] [test number] [-] [test name] [optional skip
|
|
|
|
directive]'
|
2022-11-24 02:25:57 +08:00
|
|
|
- 'KTAP version [version number]'
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
Parameters:
|
|
|
|
lines - LineStream of KTAP output to parse
|
|
|
|
|
|
|
|
Return:
|
|
|
|
Log of diagnostic lines
|
|
|
|
"""
|
|
|
|
log = [] # type: List[str]
|
2023-12-08 05:34:09 +08:00
|
|
|
non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START, TEST_PLAN]
|
2022-11-24 02:25:57 +08:00
|
|
|
while lines and not any(re.match(lines.peek())
|
|
|
|
for re in non_diagnostic_lines):
|
2021-10-12 05:50:37 +08:00
|
|
|
log.append(lines.pop())
|
|
|
|
return log
|
|
|
|
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
# Printing helper methods:
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
DIVIDER = '=' * 60
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
def format_test_divider(message: str, len_message: int) -> str:
|
|
|
|
"""
|
|
|
|
Returns string with message centered in fixed width divider.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
'===================== message example ====================='
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
message - message to be centered in divider line
|
|
|
|
len_message - length of the message to be printed such that
|
|
|
|
any characters of the color codes are not counted
|
|
|
|
|
|
|
|
Return:
|
|
|
|
String containing message centered in fixed width divider
|
|
|
|
"""
|
|
|
|
default_count = 3 # default number of dashes
|
|
|
|
len_1 = default_count
|
|
|
|
len_2 = default_count
|
|
|
|
difference = len(DIVIDER) - len_message - 2 # 2 spaces added
|
|
|
|
if difference > 0:
|
|
|
|
# calculate number of dashes for each side of the divider
|
|
|
|
len_1 = int(difference / 2)
|
|
|
|
len_2 = difference - len_1
|
2022-05-13 02:35:38 +08:00
|
|
|
return ('=' * len_1) + f' {message} ' + ('=' * len_2)
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def print_test_header(test: Test) -> None:
|
|
|
|
"""
|
|
|
|
Prints test header with test name and optionally the expected number
|
|
|
|
of subtests.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
'=================== example (2 subtests) ==================='
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
test - Test object representing current test being printed
|
|
|
|
"""
|
|
|
|
message = test.name
|
2022-11-24 02:25:57 +08:00
|
|
|
if message != "":
|
|
|
|
# Add a leading space before the subtest counts only if a test name
|
|
|
|
# is provided using a "# Subtest" header line.
|
|
|
|
message += " "
|
2021-10-12 05:50:37 +08:00
|
|
|
if test.expected_count:
|
|
|
|
if test.expected_count == 1:
|
2022-11-24 02:25:57 +08:00
|
|
|
message += '(1 subtest)'
|
2020-08-05 04:47:44 +08:00
|
|
|
else:
|
2022-11-24 02:25:57 +08:00
|
|
|
message += f'({test.expected_count} subtests)'
|
2022-05-17 03:47:30 +08:00
|
|
|
stdout.print_with_timestamp(format_test_divider(message, len(message)))
|
2019-09-23 17:02:43 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
def print_log(log: Iterable[str]) -> None:
|
2022-05-13 02:35:38 +08:00
|
|
|
"""Prints all strings in saved log for test in yellow."""
|
2022-12-01 02:54:19 +08:00
|
|
|
formatted = textwrap.dedent('\n'.join(log))
|
|
|
|
for line in formatted.splitlines():
|
|
|
|
stdout.print_with_timestamp(stdout.yellow(line))
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def format_test_result(test: Test) -> str:
|
|
|
|
"""
|
|
|
|
Returns string with formatted test result with colored status and test
|
|
|
|
name.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
'[PASSED] example'
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
test - Test object representing current test being printed
|
|
|
|
|
|
|
|
Return:
|
|
|
|
String containing formatted test result
|
|
|
|
"""
|
|
|
|
if test.status == TestStatus.SUCCESS:
|
2022-05-17 03:47:30 +08:00
|
|
|
return stdout.green('[PASSED] ') + test.name
|
2022-05-10 04:49:09 +08:00
|
|
|
if test.status == TestStatus.SKIPPED:
|
2022-05-17 03:47:30 +08:00
|
|
|
return stdout.yellow('[SKIPPED] ') + test.name
|
2022-05-10 04:49:09 +08:00
|
|
|
if test.status == TestStatus.NO_TESTS:
|
2022-05-17 03:47:30 +08:00
|
|
|
return stdout.yellow('[NO TESTS RUN] ') + test.name
|
2022-05-10 04:49:09 +08:00
|
|
|
if test.status == TestStatus.TEST_CRASHED:
|
2021-10-12 05:50:37 +08:00
|
|
|
print_log(test.log)
|
2022-05-17 03:47:30 +08:00
|
|
|
return stdout.red('[CRASHED] ') + test.name
|
2022-05-10 04:49:09 +08:00
|
|
|
print_log(test.log)
|
2022-05-17 03:47:30 +08:00
|
|
|
return stdout.red('[FAILED] ') + test.name
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def print_test_result(test: Test) -> None:
|
|
|
|
"""
|
|
|
|
Prints result line with status of test.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
'[PASSED] example'
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
test - Test object representing current test being printed
|
|
|
|
"""
|
2022-05-17 03:47:30 +08:00
|
|
|
stdout.print_with_timestamp(format_test_result(test))
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
def print_test_footer(test: Test) -> None:
|
|
|
|
"""
|
|
|
|
Prints test footer with status of test.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
'===================== [PASSED] example ====================='
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
test - Test object representing current test being printed
|
|
|
|
"""
|
|
|
|
message = format_test_result(test)
|
2022-05-17 03:47:30 +08:00
|
|
|
stdout.print_with_timestamp(format_test_divider(message,
|
|
|
|
len(message) - stdout.color_len()))
|
2021-10-12 05:50:37 +08:00
|
|
|
|
kunit: tool: print summary of failed tests if a few failed out of a lot
E.g. all the hw_breakpoint tests are failing right now.
So if I run `kunit.py run --altests --arch=x86_64`, then I see
> Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7
Seeing which 9 tests failed out of the hundreds is annoying.
If my terminal doesn't have scrollback support, I have to resort to
looking at `.kunit/test.log` for the `not ok` lines.
Teach kunit.py to print a summarized list of failures if the # of tests
reachs an arbitrary threshold (>=100 tests).
To try and keep the output from being too long/noisy, this new logic
a) just reports "parent_test failed" if every child test failed
b) won't print anything if there are >10 failures (also arbitrary).
With this patch, we get an extra line of output showing:
> Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7
> Failures: hw_breakpoint
This also works with parameterized tests, e.g. if I add a fake failure
> Failures: kcsan.test_atomic_builtins_missing_barrier.threads=6
Note: we didn't have enough tests for this to be a problem before.
But with commit 980ac3ad0512 ("kunit: tool: rename all_test_uml.config,
use it for --alltests"), --alltests works and thus running >100 tests
will probably become more common.
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2022-10-29 05:02:56 +08:00
|
|
|
|
|
|
|
|
|
|
|
def _summarize_failed_tests(test: Test) -> str:
|
|
|
|
"""Tries to summarize all the failing subtests in `test`."""
|
|
|
|
|
|
|
|
def failed_names(test: Test, parent_name: str) -> List[str]:
|
|
|
|
# Note: we use 'main' internally for the top-level test.
|
|
|
|
if not parent_name or parent_name == 'main':
|
|
|
|
full_name = test.name
|
|
|
|
else:
|
|
|
|
full_name = parent_name + '.' + test.name
|
|
|
|
|
|
|
|
if not test.subtests: # this is a leaf node
|
|
|
|
return [full_name]
|
|
|
|
|
|
|
|
# If all the children failed, just say this subtest failed.
|
|
|
|
# Don't summarize it down "the top-level test failed", though.
|
|
|
|
failed_subtests = [sub for sub in test.subtests if not sub.ok_status()]
|
|
|
|
if parent_name and len(failed_subtests) == len(test.subtests):
|
|
|
|
return [full_name]
|
|
|
|
|
|
|
|
all_failures = [] # type: List[str]
|
|
|
|
for t in failed_subtests:
|
|
|
|
all_failures.extend(failed_names(t, full_name))
|
|
|
|
return all_failures
|
|
|
|
|
|
|
|
failures = failed_names(test, '')
|
|
|
|
# If there are too many failures, printing them out will just be noisy.
|
|
|
|
if len(failures) > 10: # this is an arbitrary limit
|
|
|
|
return ''
|
|
|
|
|
|
|
|
return 'Failures: ' + ', '.join(failures)
|
|
|
|
|
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
def print_summary_line(test: Test) -> None:
|
|
|
|
"""
|
|
|
|
Prints summary line of test object. Color of line is dependent on
|
|
|
|
status of test. Color is green if test passes, yellow if test is
|
|
|
|
skipped, and red if the test fails or crashes. Summary line contains
|
|
|
|
counts of the statuses of the tests subtests or the test itself if it
|
|
|
|
has no subtests.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
"Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0,
|
|
|
|
Errors: 0"
|
|
|
|
|
|
|
|
test - Test object representing current test being printed
|
|
|
|
"""
|
|
|
|
if test.status == TestStatus.SUCCESS:
|
2022-05-17 03:47:30 +08:00
|
|
|
color = stdout.green
|
2022-05-10 04:49:09 +08:00
|
|
|
elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
|
2022-05-17 03:47:30 +08:00
|
|
|
color = stdout.yellow
|
2021-10-12 05:50:37 +08:00
|
|
|
else:
|
2022-05-17 03:47:30 +08:00
|
|
|
color = stdout.red
|
|
|
|
stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
|
2021-10-12 05:50:37 +08:00
|
|
|
|
kunit: tool: print summary of failed tests if a few failed out of a lot
E.g. all the hw_breakpoint tests are failing right now.
So if I run `kunit.py run --altests --arch=x86_64`, then I see
> Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7
Seeing which 9 tests failed out of the hundreds is annoying.
If my terminal doesn't have scrollback support, I have to resort to
looking at `.kunit/test.log` for the `not ok` lines.
Teach kunit.py to print a summarized list of failures if the # of tests
reachs an arbitrary threshold (>=100 tests).
To try and keep the output from being too long/noisy, this new logic
a) just reports "parent_test failed" if every child test failed
b) won't print anything if there are >10 failures (also arbitrary).
With this patch, we get an extra line of output showing:
> Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7
> Failures: hw_breakpoint
This also works with parameterized tests, e.g. if I add a fake failure
> Failures: kcsan.test_atomic_builtins_missing_barrier.threads=6
Note: we didn't have enough tests for this to be a problem before.
But with commit 980ac3ad0512 ("kunit: tool: rename all_test_uml.config,
use it for --alltests"), --alltests works and thus running >100 tests
will probably become more common.
Signed-off-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: David Gow <davidgow@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2022-10-29 05:02:56 +08:00
|
|
|
# Summarize failures that might have gone off-screen since we had a lot
|
|
|
|
# of tests (arbitrarily defined as >=100 for now).
|
|
|
|
if test.ok_status() or test.counts.total() < 100:
|
|
|
|
return
|
|
|
|
summarized = _summarize_failed_tests(test)
|
|
|
|
if not summarized:
|
|
|
|
return
|
|
|
|
stdout.print_with_timestamp(color(summarized))
|
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
# Other methods:
|
|
|
|
|
|
|
|
def bubble_up_test_results(test: Test) -> None:
|
|
|
|
"""
|
|
|
|
If the test has subtests, add the test counts of the subtests to the
|
|
|
|
test and check if any of the tests crashed and if so set the test
|
|
|
|
status to crashed. Otherwise if the test has no subtests add the
|
|
|
|
status of the test to the test counts.
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
test - Test object for current test being parsed
|
|
|
|
"""
|
|
|
|
subtests = test.subtests
|
|
|
|
counts = test.counts
|
|
|
|
status = test.status
|
|
|
|
for t in subtests:
|
|
|
|
counts.add_subtest_counts(t.counts)
|
|
|
|
if counts.total() == 0:
|
|
|
|
counts.add_status(status)
|
|
|
|
elif test.counts.get_status() == TestStatus.TEST_CRASHED:
|
|
|
|
test.status = TestStatus.TEST_CRASHED
|
|
|
|
|
2022-11-24 02:25:57 +08:00
|
|
|
def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""
|
|
|
|
Finds next test to parse in LineStream, creates new Test object,
|
|
|
|
parses any subtests of the test, populates Test object with all
|
|
|
|
information (status, name) about the test and the Test objects for
|
|
|
|
any subtests, and then returns the Test object. The method accepts
|
|
|
|
three formats of tests:
|
|
|
|
|
|
|
|
Accepted test formats:
|
|
|
|
|
|
|
|
- Main KTAP/TAP header
|
|
|
|
|
|
|
|
Example:
|
|
|
|
|
|
|
|
KTAP version 1
|
|
|
|
1..4
|
|
|
|
[subtests]
|
|
|
|
|
2022-11-24 02:25:57 +08:00
|
|
|
- Subtest header (must include either the KTAP version line or
|
|
|
|
"# Subtest" header line)
|
2021-10-12 05:50:37 +08:00
|
|
|
|
2022-11-24 02:25:57 +08:00
|
|
|
Example (preferred format with both KTAP version line and
|
|
|
|
"# Subtest" line):
|
|
|
|
|
|
|
|
KTAP version 1
|
|
|
|
# Subtest: name
|
|
|
|
1..3
|
|
|
|
[subtests]
|
|
|
|
ok 1 name
|
|
|
|
|
|
|
|
Example (only "# Subtest" line):
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
# Subtest: name
|
|
|
|
1..3
|
|
|
|
[subtests]
|
|
|
|
ok 1 name
|
|
|
|
|
2022-11-24 02:25:57 +08:00
|
|
|
Example (only KTAP version line, compliant with KTAP v1 spec):
|
|
|
|
|
|
|
|
KTAP version 1
|
|
|
|
1..3
|
|
|
|
[subtests]
|
|
|
|
ok 1 name
|
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
- Test result line
|
|
|
|
|
|
|
|
Example:
|
|
|
|
|
|
|
|
ok 1 - test
|
|
|
|
|
|
|
|
Parameters:
|
|
|
|
lines - LineStream of KTAP output to parse
|
|
|
|
expected_num - expected test number for test to be parsed
|
|
|
|
log - list of strings containing any preceding diagnostic lines
|
|
|
|
corresponding to the current test
|
2022-11-24 02:25:57 +08:00
|
|
|
is_subtest - boolean indicating whether test is a subtest
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
Return:
|
|
|
|
Test object populated with characteristics and any subtests
|
|
|
|
"""
|
|
|
|
test = Test()
|
|
|
|
test.log.extend(log)
|
kunit: tool: Add command line interface to filter and report attributes
Add ability to kunit.py to filter attributes and report a list of tests
including attributes without running tests.
Add flag "--filter" to input filters on test attributes. Tests will be
filtered out if they do not match all inputted filters.
Example: --filter speed=slow (This filter would run only the tests that are
marked as slow)
Filters have operations: <, >, <=, >=, !=, and =. But note that the
characters < and > are often interpreted by the shell, so they may need to
be quoted or escaped.
Example: --filter "speed>slow" or --filter speed\>slow (This filter would
run only the tests that have the speed faster than slow.
Additionally, multiple filters can be used.
Example: --filter "speed=slow, module!=example" (This filter would run
only the tests that have the speed slow and are not in the "example"
module)
Note if the user wants to skip filtered tests instead of not
running/showing them use the "--filter_action=skip" flag instead.
Expose the output of kunit.action=list option with flag "--list_tests" to
output a list of tests. Additionally, add flag "--list_tests_attr" to
output a list of tests and their attributes. These flags are useful to see
tests and test attributes without needing to run tests.
Example of the output of "--list_tests_attr":
example
example.test_1
example.test_2
# example.test_2.speed: slow
This output includes a suite, example, with two test cases, test_1 and
test_2. And in this instance test_2 has been marked as slow.
Reviewed-by: David Gow <davidgow@google.com>
Signed-off-by: Rae Moar <rmoar@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-26 05:25:16 +08:00
|
|
|
|
|
|
|
# Parse any errors prior to parsing tests
|
|
|
|
err_log = parse_diagnostic(lines)
|
|
|
|
test.log.extend(err_log)
|
|
|
|
|
2022-11-24 02:25:57 +08:00
|
|
|
if not is_subtest:
|
|
|
|
# If parsing the main/top-level test, parse KTAP version line and
|
2021-10-12 05:50:37 +08:00
|
|
|
# test plan
|
|
|
|
test.name = "main"
|
2022-11-24 02:25:57 +08:00
|
|
|
ktap_line = parse_ktap_header(lines, test)
|
2023-12-08 05:34:09 +08:00
|
|
|
test.log.extend(parse_diagnostic(lines))
|
2021-10-12 05:50:37 +08:00
|
|
|
parse_test_plan(lines, test)
|
2021-11-02 15:30:12 +08:00
|
|
|
parent_test = True
|
2021-10-12 05:50:37 +08:00
|
|
|
else:
|
2022-11-24 02:25:57 +08:00
|
|
|
# If not the main test, attempt to parse a test header containing
|
|
|
|
# the KTAP version line and/or subtest header line
|
|
|
|
ktap_line = parse_ktap_header(lines, test)
|
|
|
|
subtest_line = parse_test_header(lines, test)
|
|
|
|
parent_test = (ktap_line or subtest_line)
|
2021-10-12 05:50:37 +08:00
|
|
|
if parent_test:
|
2022-11-24 02:25:57 +08:00
|
|
|
# If KTAP version line and/or subtest header is found, attempt
|
|
|
|
# to parse test plan and print test header
|
2023-12-08 05:34:09 +08:00
|
|
|
test.log.extend(parse_diagnostic(lines))
|
2021-10-12 05:50:37 +08:00
|
|
|
parse_test_plan(lines, test)
|
|
|
|
print_test_header(test)
|
|
|
|
expected_count = test.expected_count
|
|
|
|
subtests = []
|
|
|
|
test_num = 1
|
2021-11-02 15:30:12 +08:00
|
|
|
while parent_test and (expected_count is None or test_num <= expected_count):
|
2021-10-12 05:50:37 +08:00
|
|
|
# Loop to parse any subtests.
|
|
|
|
# Break after parsing expected number of tests or
|
|
|
|
# if expected number of tests is unknown break when test
|
|
|
|
# result line with matching name to subtest header is found
|
|
|
|
# or no more lines in stream.
|
|
|
|
sub_log = parse_diagnostic(lines)
|
|
|
|
sub_test = Test()
|
|
|
|
if not lines or (peek_test_name_match(lines, test) and
|
2022-11-24 02:25:57 +08:00
|
|
|
is_subtest):
|
2021-10-12 05:50:37 +08:00
|
|
|
if expected_count and test_num <= expected_count:
|
|
|
|
# If parser reaches end of test before
|
|
|
|
# parsing expected number of subtests, print
|
|
|
|
# crashed subtest and record error
|
|
|
|
test.add_error('missing expected subtest!')
|
|
|
|
sub_test.log.extend(sub_log)
|
|
|
|
test.counts.add_status(
|
|
|
|
TestStatus.TEST_CRASHED)
|
|
|
|
print_test_result(sub_test)
|
2019-09-23 17:02:43 +08:00
|
|
|
else:
|
2021-10-12 05:50:37 +08:00
|
|
|
test.log.extend(sub_log)
|
|
|
|
break
|
|
|
|
else:
|
2022-11-24 02:25:57 +08:00
|
|
|
sub_test = parse_test(lines, test_num, sub_log, True)
|
2021-10-12 05:50:37 +08:00
|
|
|
subtests.append(sub_test)
|
|
|
|
test_num += 1
|
|
|
|
test.subtests = subtests
|
2022-11-24 02:25:57 +08:00
|
|
|
if is_subtest:
|
2021-10-12 05:50:37 +08:00
|
|
|
# If not main test, look for test result line
|
|
|
|
test.log.extend(parse_diagnostic(lines))
|
2022-11-24 02:25:57 +08:00
|
|
|
if test.name != "" and not peek_test_name_match(lines, test):
|
2021-10-12 05:50:37 +08:00
|
|
|
test.add_error('missing subtest result line!')
|
2022-11-24 02:25:57 +08:00
|
|
|
else:
|
|
|
|
parse_test_result(lines, test, expected_num)
|
2021-11-02 15:30:12 +08:00
|
|
|
|
2022-11-24 02:25:57 +08:00
|
|
|
# Check for there being no subtests within parent test
|
2021-11-02 15:30:12 +08:00
|
|
|
if parent_test and len(subtests) == 0:
|
2022-05-13 02:35:37 +08:00
|
|
|
# Don't override a bad status if this test had one reported.
|
|
|
|
# Assumption: no subtests means CRASHED is from Test.__init__()
|
|
|
|
if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
|
kunit: tool: Add command line interface to filter and report attributes
Add ability to kunit.py to filter attributes and report a list of tests
including attributes without running tests.
Add flag "--filter" to input filters on test attributes. Tests will be
filtered out if they do not match all inputted filters.
Example: --filter speed=slow (This filter would run only the tests that are
marked as slow)
Filters have operations: <, >, <=, >=, !=, and =. But note that the
characters < and > are often interpreted by the shell, so they may need to
be quoted or escaped.
Example: --filter "speed>slow" or --filter speed\>slow (This filter would
run only the tests that have the speed faster than slow.
Additionally, multiple filters can be used.
Example: --filter "speed=slow, module!=example" (This filter would run
only the tests that have the speed slow and are not in the "example"
module)
Note if the user wants to skip filtered tests instead of not
running/showing them use the "--filter_action=skip" flag instead.
Expose the output of kunit.action=list option with flag "--list_tests" to
output a list of tests. Additionally, add flag "--list_tests_attr" to
output a list of tests and their attributes. These flags are useful to see
tests and test attributes without needing to run tests.
Example of the output of "--list_tests_attr":
example
example.test_1
example.test_2
# example.test_2.speed: slow
This output includes a suite, example, with two test cases, test_1 and
test_2. And in this instance test_2 has been marked as slow.
Reviewed-by: David Gow <davidgow@google.com>
Signed-off-by: Rae Moar <rmoar@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
2023-07-26 05:25:16 +08:00
|
|
|
print_log(test.log)
|
2022-05-13 02:35:37 +08:00
|
|
|
test.status = TestStatus.NO_TESTS
|
|
|
|
test.add_error('0 tests run!')
|
2021-11-02 15:30:12 +08:00
|
|
|
|
2021-10-12 05:50:37 +08:00
|
|
|
# Add statuses to TestCounts attribute in Test object
|
|
|
|
bubble_up_test_results(test)
|
2022-11-24 02:25:57 +08:00
|
|
|
if parent_test and is_subtest:
|
2021-10-12 05:50:37 +08:00
|
|
|
# If test has subtests and is not the main test object, print
|
|
|
|
# footer.
|
|
|
|
print_test_footer(test)
|
2022-11-24 02:25:57 +08:00
|
|
|
elif is_subtest:
|
2021-10-12 05:50:37 +08:00
|
|
|
print_test_result(test)
|
|
|
|
return test
|
2020-08-05 04:47:44 +08:00
|
|
|
|
2021-12-15 03:26:12 +08:00
|
|
|
def parse_run_tests(kernel_output: Iterable[str]) -> Test:
|
2021-10-12 05:50:37 +08:00
|
|
|
"""
|
|
|
|
Using kernel output, extract KTAP lines, parse the lines for test
|
2022-05-13 02:35:38 +08:00
|
|
|
results and print condensed test results and summary line.
|
2021-10-12 05:50:37 +08:00
|
|
|
|
|
|
|
Parameters:
|
|
|
|
kernel_output - Iterable object contains lines of kernel output
|
|
|
|
|
|
|
|
Return:
|
2021-12-15 03:26:12 +08:00
|
|
|
Test - the main test object with all subtests.
|
2021-10-12 05:50:37 +08:00
|
|
|
"""
|
2022-05-17 03:47:30 +08:00
|
|
|
stdout.print_with_timestamp(DIVIDER)
|
2021-05-26 16:22:17 +08:00
|
|
|
lines = extract_tap_lines(kernel_output)
|
2021-10-12 05:50:37 +08:00
|
|
|
test = Test()
|
|
|
|
if not lines:
|
2022-03-30 05:42:48 +08:00
|
|
|
test.name = '<missing>'
|
2022-11-11 11:18:55 +08:00
|
|
|
test.add_error('Could not find any KTAP output. Did any KUnit tests run?')
|
2021-10-12 05:50:37 +08:00
|
|
|
test.status = TestStatus.FAILURE_TO_PARSE_TESTS
|
2020-08-05 04:47:44 +08:00
|
|
|
else:
|
2022-11-24 02:25:57 +08:00
|
|
|
test = parse_test(lines, 0, [], False)
|
2021-10-12 05:50:37 +08:00
|
|
|
if test.status != TestStatus.NO_TESTS:
|
|
|
|
test.status = test.counts.get_status()
|
2022-05-17 03:47:30 +08:00
|
|
|
stdout.print_with_timestamp(DIVIDER)
|
2021-10-12 05:50:37 +08:00
|
|
|
print_summary_line(test)
|
2021-12-15 03:26:12 +08:00
|
|
|
return test
|