@@ -11,3 +11,4 @@
!Makefile
!Makefile.kvm
!*.test
+!*.py
new file mode 100644
@@ -0,0 +1,96 @@
+# SPDX-License-Identifier: GPL-2.0
+import pathlib
+import argparse
+import platform
+import logging
+import os
+import enum
+import test_runner
+
+
+def cli():
+ parser = argparse.ArgumentParser(
+ prog="KVM Selftests Runner",
+ description="Run KVM selftests with different configurations",
+ formatter_class=argparse.RawTextHelpFormatter
+ )
+
+ parser.add_argument("--tests",
+ nargs="*",
+ default=[],
+ help="Test cases to run. Provide the space separated test case file paths")
+
+ parser.add_argument("--test_dirs",
+ nargs="*",
+ default=[],
+ help="Run tests in the given directory and all its sub directories. Provide the space separated paths to add multiple directories.")
+
+ parser.add_argument("-j",
+ "--jobs",
+ default=1,
+ type=int,
+ help="Number of parallel test runners to start")
+
+ parser.add_argument("-t",
+ "--timeout",
+ default=120,
+ type=int,
+ help="How long to wait for a single test to finish before killing it")
+
+ parser.add_argument("-o",
+ "--output",
+ nargs='?',
+ help="Output directory for test results.")
+
+ return parser.parse_args()
+
+
+def setup_logging(args):
+ output = args.output
+ if output == None:
+ logging.basicConfig(level=logging.INFO,
+ format="%(asctime)s | %(process)d | %(levelname)8s | %(message)s")
+ else:
+ logging_file = os.path.join(output, "log")
+ pathlib.Path(output).mkdir(parents=True, exist_ok=True)
+ logging.basicConfig(level=logging.INFO,
+ format="%(asctime)s | %(process)d | %(levelname)8s | %(message)s",
+ handlers=[
+ logging.FileHandler(logging_file, mode='w'),
+ logging.StreamHandler()
+ ])
+
+
+def fetch_tests_from_dirs(scan_dirs, exclude_dirs):
+ test_files = []
+ for scan_dir in scan_dirs:
+ for root, dirs, files in os.walk(scan_dir):
+ dirs[:] = [dir for dir in dirs if dir not in exclude_dirs]
+ for file in files:
+ test_files.append(os.path.join(root, file))
+ return test_files
+
+
+def fetch_test_files(args):
+ exclude_dirs = ["aarch64", "x86_64", "riscv", "s390x"]
+ # Don't exclude tests of the current platform
+ exclude_dirs.remove(platform.machine())
+
+ test_files = args.tests
+ test_files.extend(fetch_tests_from_dirs(args.test_dirs, exclude_dirs))
+ # Remove duplicates
+ test_files = list(dict.fromkeys(test_files))
+ return test_files
+
+
+def main():
+ args = cli()
+ setup_logging(args)
+ test_files = fetch_test_files(args)
+ tr = test_runner.TestRunner(
+ test_files, args.output, args.timeout, args.jobs)
+ tr.start()
+
+
+if __name__ == "__main__":
+ main()
new file mode 100644
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0
+import contextlib
+import subprocess
+import os
+import pathlib
+
+
+class Command:
+ """Executes a command
+
+ Just execute a command. Dump output to the directory if provided.
+
+ Returns the exit code of the command.
+ """
+
+ def __init__(self, command, timeout=None, output_dir=None):
+ self.command = command
+ self.timeout = timeout
+ self.output_dir = output_dir
+
+ def __run(self, output=None, error=None):
+ proc = subprocess.run(self.command, stdout=output,
+ stderr=error, universal_newlines=True,
+ shell=True, timeout=self.timeout)
+ return proc.returncode
+
+ def run(self):
+ if self.output_dir is not None:
+ pathlib.Path(self.output_dir).mkdir(parents=True, exist_ok=True)
+
+ output = None
+ error = None
+ with contextlib.ExitStack() as stack:
+ if self.output_dir is not None:
+ output_path = os.path.join(self.output_dir, "stdout")
+ output = stack.enter_context(
+ open(output_path, encoding="utf-8", mode="w"))
+
+ error_path = os.path.join(self.output_dir, "stderr")
+ error = stack.enter_context(
+ open(error_path, encoding="utf-8", mode="w"))
+ return self.__run(output, error)
new file mode 100644
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0
+import subprocess
+import command
+import pathlib
+import enum
+import os
+import logging
+
+
+class SelftestStatus(str, enum.Enum):
+ PASSED = "Passed"
+ FAILED = "Failed"
+ SKIPPED = "Skipped"
+ TIMED_OUT = "Timed out"
+ NO_RUN = "No run"
+
+ def __str__(self):
+ return str.__str__(self)
+
+
+class Selftest:
+ """A single test.
+
+ A test which can be run on its own.
+ """
+
+ def __init__(self, test_path, output_dir=None, timeout=None,):
+ test_command = pathlib.Path(test_path).read_text().strip()
+ if not test_command:
+ raise ValueError("Empty test command in " + test_path)
+
+ if output_dir is not None:
+ output_dir = os.path.join(output_dir, test_path)
+ self.test_path = test_path
+ self.command = command.Command(test_command, timeout, output_dir)
+ self.status = SelftestStatus.NO_RUN
+
+ def run(self):
+ try:
+ ret = self.command.run()
+ if ret == 0:
+ self.status = SelftestStatus.PASSED
+ elif ret == 4:
+ self.status = SelftestStatus.SKIPPED
+ else:
+ self.status = SelftestStatus.FAILED
+ except subprocess.TimeoutExpired as e:
+ # logging.error(type(e).__name__ + str(e))
+ self.status = SelftestStatus.TIMED_OUT
new file mode 100644
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: GPL-2.0
+import queue
+import concurrent.futures
+import logging
+import time
+import selftest
+
+
+class TestRunner:
+ def __init__(self, test_files, output_dir, timeout, parallelism):
+ self.parallelism = parallelism
+ self.tests = []
+
+ for test_file in test_files:
+ self.tests.append(selftest.Selftest(
+ test_file, output_dir, timeout))
+
+ def _run(self, test):
+ test.run()
+ return test
+
+ def start(self):
+
+ status = {x: 0 for x in selftest.SelftestStatus}
+ count = 0
+ with concurrent.futures.ProcessPoolExecutor(max_workers=self.parallelism) as executor:
+ all_futures = []
+ for test in self.tests:
+ future = executor.submit(self._run, test)
+ all_futures.append(future)
+
+ for future in concurrent.futures.as_completed(all_futures):
+ test = future.result()
+ logging.info(f"[{test.status}] {test.test_path}")
+ status[test.status] += 1
+ count += 1
+
+ logging.info(f"Tests ran: {count} tests")
+ for result, count in status.items():
+ logging.info(f"{result}: {count}")
Create KVM selftest runner to run selftests and provide various options for execution. Provide following features in the runner: 1. --timeout/-t: Max time each test should finish in before killing it. 2. --jobs/-j: Run these many tests in parallel. 3. --tests: Provide space separated path of tests to execute. 4. --test_dirs: Directories to search for test files and run them. 5. --output/-o: Create the folder with given name and dump output of each test in a hierarchical way. 6. Add summary at the end. Runner needs testcase files which are provided in the previous patch. Following are the examples to start the runner (cwd is tools/testing/selftests/kvm) - Basic run: python3 runner --test_dirs testcases - Run specific test python3 runner --tests ./testcases/dirty_log_perf_test/default.test - Run tests parallel python3 runner --test_dirs testcases -j 10 - Run 5 tests parallely at a time, with the timeout of 10 seconds and dump output in "result" directory python3 runner --test_dirs testcases -j 5 -t 10 --output result Sample output from the above command: python3_binary runner --test_dirs testcases -j 5 -t 10 --output result 2025-02-21 16:45:46,774 | 16809 | INFO | [Passed] testcases/guest_print_test/default.test 2025-02-21 16:45:47,040 | 16809 | INFO | [Passed] testcases/kvm_create_max_vcpus/default.test 2025-02-21 16:45:49,244 | 16809 | INFO | [Passed] testcases/dirty_log_perf_test/default.test ... 2025-02-21 16:46:07,225 | 16809 | INFO | [Passed] testcases/x86_64/pmu_event_filter_test/default.test 2025-02-21 16:46:08,020 | 16809 | INFO | [Passed] testcases/x86_64/vmx_preemption_timer_test/default.test 2025-02-21 16:46:09,734 | 16809 | INFO | [Timed out] testcases/x86_64/pmu_counters_test/default.test 2025-02-21 16:46:10,202 | 16809 | INFO | [Passed] testcases/hardware_disable_test/default.test 2025-02-21 16:46:10,203 | 16809 | INFO | Tests ran: 85 tests 2025-02-21 16:46:10,204 | 16809 | INFO | Passed: 61 2025-02-21 16:46:10,204 | 16809 | INFO | Failed: 4 2025-02-21 16:46:10,204 | 16809 | INFO | Skipped: 17 2025-02-21 16:46:10,204 | 16809 | INFO | Timed out: 3 2025-02-21 16:46:10,204 | 16809 | INFO | No run: 0 Output dumped in result directory $ tree result/ result/ ├── log └── testcases ├── access_tracking_perf_test │ └── default.test │ ├── stderr │ └── stdout ├── coalesced_io_test │ └── default.test │ ├── stderr │ └── stdout ... results/log file will have the status of each test like the one printed on console. Each stderr and stdout will have data based on the execution. Runner is implemented in python and needs at least 3.6 version. Signed-off-by: Vipin Sharma <vipinsh@google.com> --- tools/testing/selftests/kvm/.gitignore | 1 + .../testing/selftests/kvm/runner/__main__.py | 96 +++++++++++++++++++ tools/testing/selftests/kvm/runner/command.py | 42 ++++++++ .../testing/selftests/kvm/runner/selftest.py | 49 ++++++++++ .../selftests/kvm/runner/test_runner.py | 40 ++++++++ 5 files changed, 228 insertions(+) create mode 100644 tools/testing/selftests/kvm/runner/__main__.py create mode 100644 tools/testing/selftests/kvm/runner/command.py create mode 100644 tools/testing/selftests/kvm/runner/selftest.py create mode 100644 tools/testing/selftests/kvm/runner/test_runner.py