summaryrefslogtreecommitdiff
path: root/tools/testing/kunit/kunit_parser.py
diff options
context:
space:
mode:
authorDaniel Latypov <dlatypov@google.com>2022-10-28 14:02:56 -0700
committerShuah Khan <skhan@linuxfoundation.org>2022-10-31 02:47:37 -0600
commitf19dd011d8de6f0c1d20abea5158aa4f5d9cea44 (patch)
treecae8f5362941beabdf1080f33a17f79a70d9ca97 /tools/testing/kunit/kunit_parser.py
parent3ffdcf7e3b7dff04b055771c03c9646aa383cc1e (diff)
kunit: tool: print summary of failed tests if a few failed out of a lot
E.g. all the hw_breakpoint tests are failing right now. So if I run `kunit.py run --altests --arch=x86_64`, then I see > Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7 Seeing which 9 tests failed out of the hundreds is annoying. If my terminal doesn't have scrollback support, I have to resort to looking at `.kunit/test.log` for the `not ok` lines. Teach kunit.py to print a summarized list of failures if the # of tests reachs an arbitrary threshold (>=100 tests). To try and keep the output from being too long/noisy, this new logic a) just reports "parent_test failed" if every child test failed b) won't print anything if there are >10 failures (also arbitrary). With this patch, we get an extra line of output showing: > Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7 > Failures: hw_breakpoint This also works with parameterized tests, e.g. if I add a fake failure > Failures: kcsan.test_atomic_builtins_missing_barrier.threads=6 Note: we didn't have enough tests for this to be a problem before. But with commit 980ac3ad0512 ("kunit: tool: rename all_test_uml.config, use it for --alltests"), --alltests works and thus running >100 tests will probably become more common. Signed-off-by: Daniel Latypov <dlatypov@google.com> Reviewed-by: David Gow <davidgow@google.com> Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
Diffstat (limited to 'tools/testing/kunit/kunit_parser.py')
-rw-r--r--tools/testing/kunit/kunit_parser.py47
1 files changed, 47 insertions, 0 deletions
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 1ae873e3e341..94dba66feec5 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -58,6 +58,10 @@ class Test:
self.counts.errors += 1
stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
+ def ok_status(self) -> bool:
+ """Returns true if the status was ok, i.e. passed or skipped."""
+ return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED)
+
class TestStatus(Enum):
"""An enumeration class to represent the status of a test."""
SUCCESS = auto()
@@ -565,6 +569,40 @@ def print_test_footer(test: Test) -> None:
stdout.print_with_timestamp(format_test_divider(message,
len(message) - stdout.color_len()))
+
+
+def _summarize_failed_tests(test: Test) -> str:
+ """Tries to summarize all the failing subtests in `test`."""
+
+ def failed_names(test: Test, parent_name: str) -> List[str]:
+ # Note: we use 'main' internally for the top-level test.
+ if not parent_name or parent_name == 'main':
+ full_name = test.name
+ else:
+ full_name = parent_name + '.' + test.name
+
+ if not test.subtests: # this is a leaf node
+ return [full_name]
+
+ # If all the children failed, just say this subtest failed.
+ # Don't summarize it down "the top-level test failed", though.
+ failed_subtests = [sub for sub in test.subtests if not sub.ok_status()]
+ if parent_name and len(failed_subtests) == len(test.subtests):
+ return [full_name]
+
+ all_failures = [] # type: List[str]
+ for t in failed_subtests:
+ all_failures.extend(failed_names(t, full_name))
+ return all_failures
+
+ failures = failed_names(test, '')
+ # If there are too many failures, printing them out will just be noisy.
+ if len(failures) > 10: # this is an arbitrary limit
+ return ''
+
+ return 'Failures: ' + ', '.join(failures)
+
+
def print_summary_line(test: Test) -> None:
"""
Prints summary line of test object. Color of line is dependent on
@@ -587,6 +625,15 @@ def print_summary_line(test: Test) -> None:
color = stdout.red
stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
+ # Summarize failures that might have gone off-screen since we had a lot
+ # of tests (arbitrarily defined as >=100 for now).
+ if test.ok_status() or test.counts.total() < 100:
+ return
+ summarized = _summarize_failed_tests(test)
+ if not summarized:
+ return
+ stdout.print_with_timestamp(color(summarized))
+
# Other methods:
def bubble_up_test_results(test: Test) -> None: