diff mbox series

[v2,2/2] kunit: tool: print failed tests only

Message ID 20241113222406.1590372-2-rmoar@google.com (mailing list archive)
State Accepted
Commit 3c67a2c09b3c32fd9fc5caf2afacd15267d08071
Delegated to: Brendan Higgins
Headers show
Series [v2,1/2] kunit: tool: Only print the summary | expand

Commit Message

Rae Moar Nov. 13, 2024, 10:24 p.m. UTC
Add flag --failed to kunit.py to print only failed tests. This printing
is done after running is over.

This patch also adds the method print_test() that will also print your
Test object. Before, all printing of tests occurred during parsing. This
method could be useful in the future when converting to/from KTAP to this
pretty-print output.

Signed-off-by: Rae Moar <rmoar@google.com>
---
 tools/testing/kunit/kunit.py           | 14 ++++++++++++--
 tools/testing/kunit/kunit_parser.py    | 25 +++++++++++++++++++++++++
 tools/testing/kunit/kunit_tool_test.py |  6 +++---
 3 files changed, 40 insertions(+), 5 deletions(-)

Comments

David Gow Nov. 14, 2024, 5:58 a.m. UTC | #1
On Thu, 14 Nov 2024 at 06:24, Rae Moar <rmoar@google.com> wrote:
>
> Add flag --failed to kunit.py to print only failed tests. This printing
> is done after running is over.
>
> This patch also adds the method print_test() that will also print your
> Test object. Before, all printing of tests occurred during parsing. This
> method could be useful in the future when converting to/from KTAP to this
> pretty-print output.
>
> Signed-off-by: Rae Moar <rmoar@google.com>
> ---

Thanks very much. This series looks good to me, now.

Reviewed-by: David Gow <davidgow@google.com>

Cheers,
-- David

>  tools/testing/kunit/kunit.py           | 14 ++++++++++++--
>  tools/testing/kunit/kunit_parser.py    | 25 +++++++++++++++++++++++++
>  tools/testing/kunit/kunit_tool_test.py |  6 +++---
>  3 files changed, 40 insertions(+), 5 deletions(-)
>
> diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
> index 27c55a7fc1a0..676fa99a8b19 100755
> --- a/tools/testing/kunit/kunit.py
> +++ b/tools/testing/kunit/kunit.py
> @@ -50,6 +50,7 @@ class KunitParseRequest:
>         raw_output: Optional[str]
>         json: Optional[str]
>         summary: bool
> +       failed: bool
>
>  @dataclass
>  class KunitExecRequest(KunitParseRequest):
> @@ -237,13 +238,15 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input
>                 return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test
>
>         default_printer = stdout
> -       if request.summary:
> +       if request.summary or request.failed:
>                 default_printer = null_printer
>
>         # Actually parse the test results.
>         test = kunit_parser.parse_run_tests(input_data, default_printer)
>         parse_time = time.time() - parse_start
>
> +       if request.failed:
> +               kunit_parser.print_test(test, request.failed, stdout)
>         kunit_parser.print_summary_line(test, stdout)
>
>         if request.json:
> @@ -423,6 +426,10 @@ def add_parse_opts(parser: argparse.ArgumentParser) -> None:
>                             help='Prints only the summary line for parsed test results.'
>                                 'Does nothing if --raw_output is set.',
>                             action='store_true')
> +       parser.add_argument('--failed',
> +                           help='Prints only the failed parsed test results and summary line.'
> +                               'Does nothing if --raw_output is set.',
> +                           action='store_true')
>
>
>  def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree:
> @@ -459,6 +466,7 @@ def run_handler(cli_args: argparse.Namespace) -> None:
>                                         raw_output=cli_args.raw_output,
>                                         json=cli_args.json,
>                                         summary=cli_args.summary,
> +                                       failed=cli_args.failed,
>                                         timeout=cli_args.timeout,
>                                         filter_glob=cli_args.filter_glob,
>                                         filter=cli_args.filter,
> @@ -507,6 +515,7 @@ def exec_handler(cli_args: argparse.Namespace) -> None:
>                                         build_dir=cli_args.build_dir,
>                                         json=cli_args.json,
>                                         summary=cli_args.summary,
> +                                       failed=cli_args.failed,
>                                         timeout=cli_args.timeout,
>                                         filter_glob=cli_args.filter_glob,
>                                         filter=cli_args.filter,
> @@ -532,7 +541,8 @@ def parse_handler(cli_args: argparse.Namespace) -> None:
>         # We know nothing about how the result was created!
>         metadata = kunit_json.Metadata()
>         request = KunitParseRequest(raw_output=cli_args.raw_output,
> -                                       json=cli_args.json, summary=cli_args.summary)
> +                                       json=cli_args.json, summary=cli_args.summary,
> +                                       failed=cli_args.failed)
>         result, _ = parse_tests(request, metadata, kunit_output)
>         if result.status != KunitStatus.SUCCESS:
>                 sys.exit(1)
> diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
> index 732f448263de..29fc27e8949b 100644
> --- a/tools/testing/kunit/kunit_parser.py
> +++ b/tools/testing/kunit/kunit_parser.py
> @@ -574,7 +574,32 @@ def print_test_footer(test: Test, printer: Printer) -> None:
>         printer.print_with_timestamp(format_test_divider(message,
>                 len(message) - printer.color_len()))
>
> +def print_test(test: Test, failed_only: bool, printer: Printer) -> None:
> +       """
> +       Prints Test object to given printer. For a child test, the result line is
> +       printed. For a parent test, the test header, all child test results, and
> +       the test footer are all printed. If failed_only is true, only failed/crashed
> +       tests will be printed.
>
> +       Parameters:
> +       test - Test object to print
> +       failed_only - True if only failed/crashed tests should be printed.
> +       printer - Printer object to output results
> +       """
> +       if test.name == "main":
> +               printer.print_with_timestamp(DIVIDER)
> +               for subtest in test.subtests:
> +                       print_test(subtest, failed_only, printer)
> +               printer.print_with_timestamp(DIVIDER)
> +       elif test.subtests != []:
> +               if not failed_only or not test.ok_status():
> +                       print_test_header(test, printer)
> +                       for subtest in test.subtests:
> +                               print_test(subtest, failed_only, printer)
> +                       print_test_footer(test, printer)
> +       else:
> +               if not failed_only or not test.ok_status():
> +                       print_test_result(test, printer)
>
>  def _summarize_failed_tests(test: Test) -> str:
>         """Tries to summarize all the failing subtests in `test`."""
> diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
> index 02aa296d8850..0bcb0cc002f8 100755
> --- a/tools/testing/kunit/kunit_tool_test.py
> +++ b/tools/testing/kunit/kunit_tool_test.py
> @@ -811,7 +811,7 @@ class KUnitMainTest(unittest.TestCase):
>                 self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
>
>                 got = kunit._list_tests(self.linux_source_mock,
> -                                    kunit.KunitExecRequest(None, None, False, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False))
> +                                    kunit.KunitExecRequest(None, None, False, False, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False))
>                 self.assertEqual(got, want)
>                 # Should respect the user's filter glob when listing tests.
>                 self.linux_source_mock.run_kernel.assert_called_once_with(
> @@ -824,7 +824,7 @@ class KUnitMainTest(unittest.TestCase):
>
>                 # Should respect the user's filter glob when listing tests.
>                 mock_tests.assert_called_once_with(mock.ANY,
> -                                    kunit.KunitExecRequest(None, None, False, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False))
> +                                    kunit.KunitExecRequest(None, None, False, False, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False))
>                 self.linux_source_mock.run_kernel.assert_has_calls([
>                         mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300),
>                         mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300),
> @@ -837,7 +837,7 @@ class KUnitMainTest(unittest.TestCase):
>
>                 # Should respect the user's filter glob when listing tests.
>                 mock_tests.assert_called_once_with(mock.ANY,
> -                                    kunit.KunitExecRequest(None, None, False, '.kunit', 300, 'suite*', '', None, None, 'test', False, False))
> +                                    kunit.KunitExecRequest(None, None, False, False, '.kunit', 300, 'suite*', '', None, None, 'test', False, False))
>                 self.linux_source_mock.run_kernel.assert_has_calls([
>                         mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300),
>                         mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300),
> --
> 2.47.0.277.g8800431eea-goog
>
diff mbox series

Patch

diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 27c55a7fc1a0..676fa99a8b19 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -50,6 +50,7 @@  class KunitParseRequest:
 	raw_output: Optional[str]
 	json: Optional[str]
 	summary: bool
+	failed: bool
 
 @dataclass
 class KunitExecRequest(KunitParseRequest):
@@ -237,13 +238,15 @@  def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input
 		return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test
 
 	default_printer = stdout
-	if request.summary:
+	if request.summary or request.failed:
 		default_printer = null_printer
 
 	# Actually parse the test results.
 	test = kunit_parser.parse_run_tests(input_data, default_printer)
 	parse_time = time.time() - parse_start
 
+	if request.failed:
+		kunit_parser.print_test(test, request.failed, stdout)
 	kunit_parser.print_summary_line(test, stdout)
 
 	if request.json:
@@ -423,6 +426,10 @@  def add_parse_opts(parser: argparse.ArgumentParser) -> None:
 			    help='Prints only the summary line for parsed test results.'
 				'Does nothing if --raw_output is set.',
 			    action='store_true')
+	parser.add_argument('--failed',
+			    help='Prints only the failed parsed test results and summary line.'
+				'Does nothing if --raw_output is set.',
+			    action='store_true')
 
 
 def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree:
@@ -459,6 +466,7 @@  def run_handler(cli_args: argparse.Namespace) -> None:
 					raw_output=cli_args.raw_output,
 					json=cli_args.json,
 					summary=cli_args.summary,
+					failed=cli_args.failed,
 					timeout=cli_args.timeout,
 					filter_glob=cli_args.filter_glob,
 					filter=cli_args.filter,
@@ -507,6 +515,7 @@  def exec_handler(cli_args: argparse.Namespace) -> None:
 					build_dir=cli_args.build_dir,
 					json=cli_args.json,
 					summary=cli_args.summary,
+					failed=cli_args.failed,
 					timeout=cli_args.timeout,
 					filter_glob=cli_args.filter_glob,
 					filter=cli_args.filter,
@@ -532,7 +541,8 @@  def parse_handler(cli_args: argparse.Namespace) -> None:
 	# We know nothing about how the result was created!
 	metadata = kunit_json.Metadata()
 	request = KunitParseRequest(raw_output=cli_args.raw_output,
-					json=cli_args.json, summary=cli_args.summary)
+					json=cli_args.json, summary=cli_args.summary,
+					failed=cli_args.failed)
 	result, _ = parse_tests(request, metadata, kunit_output)
 	if result.status != KunitStatus.SUCCESS:
 		sys.exit(1)
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 732f448263de..29fc27e8949b 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -574,7 +574,32 @@  def print_test_footer(test: Test, printer: Printer) -> None:
 	printer.print_with_timestamp(format_test_divider(message,
 		len(message) - printer.color_len()))
 
+def print_test(test: Test, failed_only: bool, printer: Printer) -> None:
+	"""
+	Prints Test object to given printer. For a child test, the result line is
+	printed. For a parent test, the test header, all child test results, and
+	the test footer are all printed. If failed_only is true, only failed/crashed
+	tests will be printed.
 
+	Parameters:
+	test - Test object to print
+	failed_only - True if only failed/crashed tests should be printed.
+	printer - Printer object to output results
+	"""
+	if test.name == "main":
+		printer.print_with_timestamp(DIVIDER)
+		for subtest in test.subtests:
+			print_test(subtest, failed_only, printer)
+		printer.print_with_timestamp(DIVIDER)
+	elif test.subtests != []:
+		if not failed_only or not test.ok_status():
+			print_test_header(test, printer)
+			for subtest in test.subtests:
+				print_test(subtest, failed_only, printer)
+			print_test_footer(test, printer)
+	else:
+		if not failed_only or not test.ok_status():
+			print_test_result(test, printer)
 
 def _summarize_failed_tests(test: Test) -> str:
 	"""Tries to summarize all the failing subtests in `test`."""
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 02aa296d8850..0bcb0cc002f8 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -811,7 +811,7 @@  class KUnitMainTest(unittest.TestCase):
 		self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
 
 		got = kunit._list_tests(self.linux_source_mock,
-				     kunit.KunitExecRequest(None, None, False, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False))
+				     kunit.KunitExecRequest(None, None, False, False, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False))
 		self.assertEqual(got, want)
 		# Should respect the user's filter glob when listing tests.
 		self.linux_source_mock.run_kernel.assert_called_once_with(
@@ -824,7 +824,7 @@  class KUnitMainTest(unittest.TestCase):
 
 		# Should respect the user's filter glob when listing tests.
 		mock_tests.assert_called_once_with(mock.ANY,
-				     kunit.KunitExecRequest(None, None, False, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False))
+				     kunit.KunitExecRequest(None, None, False, False, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False))
 		self.linux_source_mock.run_kernel.assert_has_calls([
 			mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300),
 			mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300),
@@ -837,7 +837,7 @@  class KUnitMainTest(unittest.TestCase):
 
 		# Should respect the user's filter glob when listing tests.
 		mock_tests.assert_called_once_with(mock.ANY,
-				     kunit.KunitExecRequest(None, None, False, '.kunit', 300, 'suite*', '', None, None, 'test', False, False))
+				     kunit.KunitExecRequest(None, None, False, False, '.kunit', 300, 'suite*', '', None, None, 'test', False, False))
 		self.linux_source_mock.run_kernel.assert_has_calls([
 			mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300),
 			mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300),