OSDN Git Service

kunit: tool: Report an error if any test has no subtests
authorDavid Gow <davidgow@google.com>
Tue, 2 Nov 2021 07:30:12 +0000 (00:30 -0700)
committerShuah Khan <skhan@linuxfoundation.org>
Mon, 13 Dec 2021 20:36:15 +0000 (13:36 -0700)
It's possible for a test to have a subtest header, but zero valid
subtests. We used to error on this if the test plan had no subtests
listed, but it's possible to have subtests without a test plan (indeed,
this is how parameterised tests work).

Tests with 0 subtests now have the result NO_TESTS, and will report an
error (which does not halt test execution, but is printed in a scary red
colour and is noted in the results summary).

Signed-off-by: David Gow <davidgow@google.com>
Reviewed-by: Daniel Latypov <dlatypov@google.com>
Reviewed-by: Brendan Higgins <brendanhiggins@google.com>
Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
tools/testing/kunit/kunit_parser.py
tools/testing/kunit/kunit_tool_test.py
tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log [new file with mode: 0644]

index 50ded55..68c847e 100644 (file)
@@ -360,9 +360,6 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool:
        test.log.append(lines.pop())
        expected_count = int(match.group(1))
        test.expected_count = expected_count
-       if expected_count == 0:
-               test.status = TestStatus.NO_TESTS
-               test.add_error('0 tests run!')
        return True
 
 TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
@@ -589,6 +586,8 @@ def format_test_result(test: Test) -> str:
                return (green('[PASSED] ') + test.name)
        elif test.status == TestStatus.SKIPPED:
                return (yellow('[SKIPPED] ') + test.name)
+       elif test.status == TestStatus.NO_TESTS:
+               return (yellow('[NO TESTS RUN] ') + test.name)
        elif test.status == TestStatus.TEST_CRASHED:
                print_log(test.log)
                return (red('[CRASHED] ') + test.name)
@@ -731,6 +730,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
                # test plan
                test.name = "main"
                parse_test_plan(lines, test)
+               parent_test = True
        else:
                # If KTAP/TAP header is not found, test must be subtest
                # header or test result line so parse attempt to parser
@@ -744,7 +744,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
        expected_count = test.expected_count
        subtests = []
        test_num = 1
-       while expected_count is None or test_num <= expected_count:
+       while parent_test and (expected_count is None or test_num <= expected_count):
                # Loop to parse any subtests.
                # Break after parsing expected number of tests or
                # if expected number of tests is unknown break when test
@@ -779,9 +779,15 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
                        parse_test_result(lines, test, expected_num)
                else:
                        test.add_error('missing subtest result line!')
+
+       # Check for there being no tests
+       if parent_test and len(subtests) == 0:
+               test.status = TestStatus.NO_TESTS
+               test.add_error('0 tests run!')
+
        # Add statuses to TestCounts attribute in Test object
        bubble_up_test_results(test)
-       if parent_test:
+       if parent_test and not main:
                # If test has subtests and is not the main test object, print
                # footer.
                print_test_footer(test)
index 0fcdddf..1b93f11 100755 (executable)
@@ -209,6 +209,18 @@ class KUnitParserTest(unittest.TestCase):
                        kunit_parser.TestStatus.NO_TESTS,
                        result.status)
 
+       def test_no_tests_no_plan(self):
+               no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log')
+               with open(no_plan_log) as file:
+                       result = kunit_parser.parse_run_tests(
+                               kunit_parser.extract_tap_lines(file.readlines()))
+               self.assertEqual(0, len(result.test.subtests[0].subtests[0].subtests))
+               self.assertEqual(
+                       kunit_parser.TestStatus.NO_TESTS,
+                       result.test.subtests[0].subtests[0].status)
+               self.assertEqual(1, result.test.counts.errors)
+
+
        def test_no_kunit_output(self):
                crash_log = test_data_path('test_insufficient_memory.log')
                print_mock = mock.patch('builtins.print').start()
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
new file mode 100644 (file)
index 0000000..dd873c9
--- /dev/null
@@ -0,0 +1,7 @@
+TAP version 14
+1..1
+  # Subtest: suite
+  1..1
+    # Subtest: case
+  ok 1 - case # SKIP
+ok 1 - suite