From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- tools/testing/kunit/.gitignore | 4 + tools/testing/kunit/configs/all_tests.config | 44 ++ tools/testing/kunit/configs/arch_uml.config | 8 + tools/testing/kunit/configs/coverage_uml.config | 11 + tools/testing/kunit/configs/default.config | 3 + tools/testing/kunit/kunit.py | 591 +++++++++++++++ tools/testing/kunit/kunit_config.py | 108 +++ tools/testing/kunit/kunit_json.py | 63 ++ tools/testing/kunit/kunit_kernel.py | 376 ++++++++++ tools/testing/kunit/kunit_parser.py | 823 ++++++++++++++++++++ tools/testing/kunit/kunit_printer.py | 48 ++ tools/testing/kunit/kunit_tool_test.py | 831 +++++++++++++++++++++ tools/testing/kunit/mypy.ini | 6 + tools/testing/kunit/qemu_config.py | 20 + tools/testing/kunit/qemu_configs/alpha.py | 10 + tools/testing/kunit/qemu_configs/arm.py | 13 + tools/testing/kunit/qemu_configs/arm64.py | 12 + tools/testing/kunit/qemu_configs/i386.py | 10 + tools/testing/kunit/qemu_configs/m68k.py | 10 + tools/testing/kunit/qemu_configs/powerpc.py | 12 + tools/testing/kunit/qemu_configs/riscv.py | 28 + tools/testing/kunit/qemu_configs/s390.py | 14 + tools/testing/kunit/qemu_configs/sh.py | 17 + tools/testing/kunit/qemu_configs/sparc.py | 10 + tools/testing/kunit/qemu_configs/x86_64.py | 10 + tools/testing/kunit/run_checks.py | 81 ++ .../kunit/test_data/test_config_printk_time.log | 32 + .../kunit/test_data/test_insufficient_memory.log | 0 .../test_data/test_interrupted_tap_output.log | 38 + .../test_data/test_is_test_passed-all_passed.log | 33 + .../test_is_test_passed-all_passed_nested.log | 34 + .../test_data/test_is_test_passed-failure.log | 37 + .../test_data/test_is_test_passed-kselftest.log | 14 + .../test_data/test_is_test_passed-missing_plan.log | 31 + .../test_is_test_passed-no_tests_no_plan.log | 7 + .../test_is_test_passed-no_tests_run_no_header.log | 75 ++ ...est_is_test_passed-no_tests_run_with_header.log | 2 + .../test_data/test_kernel_panic_interrupt.log | 26 + .../kunit/test_data/test_multiple_prefixes.log | 32 + .../test_data/test_output_isolated_correctly.log | 106 +++ .../test_output_with_prefix_isolated_correctly.log | 33 + .../kunit/test_data/test_parse_ktap_output.log | 8 + .../kunit/test_data/test_parse_subtest_header.log | 7 + .../kunit/test_data/test_pound_no_prefix.log | 34 + tools/testing/kunit/test_data/test_pound_sign.log | 34 + .../kunit/test_data/test_read_from_file.kconfig | 17 + .../kunit/test_data/test_skip_all_tests.log | 15 + tools/testing/kunit/test_data/test_skip_tests.log | 15 + .../testing/kunit/test_data/test_strip_hyphen.log | 16 + 49 files changed, 3809 insertions(+) create mode 100644 tools/testing/kunit/.gitignore create mode 100644 tools/testing/kunit/configs/all_tests.config create mode 100644 tools/testing/kunit/configs/arch_uml.config create mode 100644 tools/testing/kunit/configs/coverage_uml.config create mode 100644 tools/testing/kunit/configs/default.config create mode 100755 tools/testing/kunit/kunit.py create mode 100644 tools/testing/kunit/kunit_config.py create mode 100644 tools/testing/kunit/kunit_json.py create mode 100644 tools/testing/kunit/kunit_kernel.py create mode 100644 tools/testing/kunit/kunit_parser.py create mode 100644 tools/testing/kunit/kunit_printer.py create mode 100755 tools/testing/kunit/kunit_tool_test.py create mode 100644 tools/testing/kunit/mypy.ini create mode 100644 tools/testing/kunit/qemu_config.py create mode 100644 tools/testing/kunit/qemu_configs/alpha.py create mode 100644 tools/testing/kunit/qemu_configs/arm.py create mode 100644 tools/testing/kunit/qemu_configs/arm64.py create mode 100644 tools/testing/kunit/qemu_configs/i386.py create mode 100644 tools/testing/kunit/qemu_configs/m68k.py create mode 100644 tools/testing/kunit/qemu_configs/powerpc.py create mode 100644 tools/testing/kunit/qemu_configs/riscv.py create mode 100644 tools/testing/kunit/qemu_configs/s390.py create mode 100644 tools/testing/kunit/qemu_configs/sh.py create mode 100644 tools/testing/kunit/qemu_configs/sparc.py create mode 100644 tools/testing/kunit/qemu_configs/x86_64.py create mode 100755 tools/testing/kunit/run_checks.py create mode 100644 tools/testing/kunit/test_data/test_config_printk_time.log create mode 100644 tools/testing/kunit/test_data/test_insufficient_memory.log create mode 100644 tools/testing/kunit/test_data/test_interrupted_tap_output.log create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-all_passed.log create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-failure.log create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-kselftest.log create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log create mode 100644 tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log create mode 100644 tools/testing/kunit/test_data/test_kernel_panic_interrupt.log create mode 100644 tools/testing/kunit/test_data/test_multiple_prefixes.log create mode 100644 tools/testing/kunit/test_data/test_output_isolated_correctly.log create mode 100644 tools/testing/kunit/test_data/test_output_with_prefix_isolated_correctly.log create mode 100644 tools/testing/kunit/test_data/test_parse_ktap_output.log create mode 100644 tools/testing/kunit/test_data/test_parse_subtest_header.log create mode 100644 tools/testing/kunit/test_data/test_pound_no_prefix.log create mode 100644 tools/testing/kunit/test_data/test_pound_sign.log create mode 100644 tools/testing/kunit/test_data/test_read_from_file.kconfig create mode 100644 tools/testing/kunit/test_data/test_skip_all_tests.log create mode 100644 tools/testing/kunit/test_data/test_skip_tests.log create mode 100644 tools/testing/kunit/test_data/test_strip_hyphen.log (limited to 'tools/testing/kunit') diff --git a/tools/testing/kunit/.gitignore b/tools/testing/kunit/.gitignore new file mode 100644 index 0000000000..1c63e31f7e --- /dev/null +++ b/tools/testing/kunit/.gitignore @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] \ No newline at end of file diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config new file mode 100644 index 0000000000..3bf506d4a6 --- /dev/null +++ b/tools/testing/kunit/configs/all_tests.config @@ -0,0 +1,44 @@ +# This config enables as many tests as possible under UML. +# It is intended for use in continuous integration systems and similar for +# automated testing of as much as possible. +# The config is manually maintained, though it uses KUNIT_ALL_TESTS=y to enable +# any tests whose dependencies are already satisfied. Please feel free to add +# more options if they any new tests. + +CONFIG_KUNIT=y +CONFIG_KUNIT_EXAMPLE_TEST=y +CONFIG_KUNIT_ALL_TESTS=y + +CONFIG_FORTIFY_SOURCE=y + +CONFIG_IIO=y + +CONFIG_EXT4_FS=y + +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y + +CONFIG_PCI=y +CONFIG_USB4=y + +CONFIG_NET=y +CONFIG_MCTP=y + +CONFIG_INET=y +CONFIG_MPTCP=y + +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +CONFIG_DEBUG_FS=y +CONFIG_DAMON_DBGFS=y + +CONFIG_REGMAP_BUILD=y + +CONFIG_SECURITY=y +CONFIG_SECURITY_APPARMOR=y + +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SOC=y +CONFIG_SND_SOC_TOPOLOGY_BUILD=y diff --git a/tools/testing/kunit/configs/arch_uml.config b/tools/testing/kunit/configs/arch_uml.config new file mode 100644 index 0000000000..54ad897268 --- /dev/null +++ b/tools/testing/kunit/configs/arch_uml.config @@ -0,0 +1,8 @@ +# Config options which are added to UML builds by default + +# Enable virtio/pci, as a lot of tests require it. +CONFIG_VIRTIO_UML=y +CONFIG_UML_PCI_OVER_VIRTIO=y + +# Enable FORTIFY_SOURCE for wider checking. +CONFIG_FORTIFY_SOURCE=y diff --git a/tools/testing/kunit/configs/coverage_uml.config b/tools/testing/kunit/configs/coverage_uml.config new file mode 100644 index 0000000000..bacb77664f --- /dev/null +++ b/tools/testing/kunit/configs/coverage_uml.config @@ -0,0 +1,11 @@ +# This config fragment enables coverage on UML, which is different from the +# normal gcov used in other arches (no debugfs). +# Example usage: +# ./tools/testing/kunit/kunit.py run \ +# --kunitconfig=tools/testing/kunit/configs/all_tests_uml.config \ +# --kunitconfig=tools/testing/kunit/configs/coverage_uml.config + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +CONFIG_GCOV=y diff --git a/tools/testing/kunit/configs/default.config b/tools/testing/kunit/configs/default.config new file mode 100644 index 0000000000..e67af7b9f1 --- /dev/null +++ b/tools/testing/kunit/configs/default.config @@ -0,0 +1,3 @@ +CONFIG_KUNIT=y +CONFIG_KUNIT_EXAMPLE_TEST=y +CONFIG_KUNIT_ALL_TESTS=y diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py new file mode 100755 index 0000000000..bc74088c45 --- /dev/null +++ b/tools/testing/kunit/kunit.py @@ -0,0 +1,591 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# A thin wrapper on top of the KUnit Kernel +# +# Copyright (C) 2019, Google LLC. +# Author: Felix Guo +# Author: Brendan Higgins + +import argparse +import os +import re +import shlex +import sys +import time + +assert sys.version_info >= (3, 7), "Python version is too old" + +from dataclasses import dataclass +from enum import Enum, auto +from typing import Iterable, List, Optional, Sequence, Tuple + +import kunit_json +import kunit_kernel +import kunit_parser +from kunit_printer import stdout + +class KunitStatus(Enum): + SUCCESS = auto() + CONFIG_FAILURE = auto() + BUILD_FAILURE = auto() + TEST_FAILURE = auto() + +@dataclass +class KunitResult: + status: KunitStatus + elapsed_time: float + +@dataclass +class KunitConfigRequest: + build_dir: str + make_options: Optional[List[str]] + +@dataclass +class KunitBuildRequest(KunitConfigRequest): + jobs: int + +@dataclass +class KunitParseRequest: + raw_output: Optional[str] + json: Optional[str] + +@dataclass +class KunitExecRequest(KunitParseRequest): + build_dir: str + timeout: int + filter_glob: str + filter: str + filter_action: Optional[str] + kernel_args: Optional[List[str]] + run_isolated: Optional[str] + list_tests: bool + list_tests_attr: bool + +@dataclass +class KunitRequest(KunitExecRequest, KunitBuildRequest): + pass + + +def get_kernel_root_path() -> str: + path = sys.argv[0] if not __file__ else __file__ + parts = os.path.realpath(path).split('tools/testing/kunit') + if len(parts) != 2: + sys.exit(1) + return parts[0] + +def config_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitConfigRequest) -> KunitResult: + stdout.print_with_timestamp('Configuring KUnit Kernel ...') + + config_start = time.time() + success = linux.build_reconfig(request.build_dir, request.make_options) + config_end = time.time() + status = KunitStatus.SUCCESS if success else KunitStatus.CONFIG_FAILURE + return KunitResult(status, config_end - config_start) + +def build_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitBuildRequest) -> KunitResult: + stdout.print_with_timestamp('Building KUnit Kernel ...') + + build_start = time.time() + success = linux.build_kernel(request.jobs, + request.build_dir, + request.make_options) + build_end = time.time() + status = KunitStatus.SUCCESS if success else KunitStatus.BUILD_FAILURE + return KunitResult(status, build_end - build_start) + +def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitBuildRequest) -> KunitResult: + config_result = config_tests(linux, request) + if config_result.status != KunitStatus.SUCCESS: + return config_result + + return build_tests(linux, request) + +def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]: + args = ['kunit.action=list'] + + if request.kernel_args: + args.extend(request.kernel_args) + + output = linux.run_kernel(args=args, + timeout=request.timeout, + filter_glob=request.filter_glob, + filter=request.filter, + filter_action=request.filter_action, + build_dir=request.build_dir) + lines = kunit_parser.extract_tap_lines(output) + # Hack! Drop the dummy TAP version header that the executor prints out. + lines.pop() + + # Filter out any extraneous non-test output that might have gotten mixed in. + return [l for l in output if re.match(r'^[^\s.]+\.[^\s.]+$', l)] + +def _list_tests_attr(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> Iterable[str]: + args = ['kunit.action=list_attr'] + + if request.kernel_args: + args.extend(request.kernel_args) + + output = linux.run_kernel(args=args, + timeout=request.timeout, + filter_glob=request.filter_glob, + filter=request.filter, + filter_action=request.filter_action, + build_dir=request.build_dir) + lines = kunit_parser.extract_tap_lines(output) + # Hack! Drop the dummy TAP version header that the executor prints out. + lines.pop() + + # Filter out any extraneous non-test output that might have gotten mixed in. + return lines + +def _suites_from_test_list(tests: List[str]) -> List[str]: + """Extracts all the suites from an ordered list of tests.""" + suites = [] # type: List[str] + for t in tests: + parts = t.split('.', maxsplit=2) + if len(parts) != 2: + raise ValueError(f'internal KUnit error, test name should be of the form ".", got "{t}"') + suite, _ = parts + if not suites or suites[-1] != suite: + suites.append(suite) + return suites + +def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult: + filter_globs = [request.filter_glob] + if request.list_tests: + output = _list_tests(linux, request) + for line in output: + print(line.rstrip()) + return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0) + if request.list_tests_attr: + attr_output = _list_tests_attr(linux, request) + for line in attr_output: + print(line.rstrip()) + return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0) + if request.run_isolated: + tests = _list_tests(linux, request) + if request.run_isolated == 'test': + filter_globs = tests + elif request.run_isolated == 'suite': + filter_globs = _suites_from_test_list(tests) + # Apply the test-part of the user's glob, if present. + if '.' in request.filter_glob: + test_glob = request.filter_glob.split('.', maxsplit=2)[1] + filter_globs = [g + '.'+ test_glob for g in filter_globs] + + metadata = kunit_json.Metadata(arch=linux.arch(), build_dir=request.build_dir, def_config='kunit_defconfig') + + test_counts = kunit_parser.TestCounts() + exec_time = 0.0 + for i, filter_glob in enumerate(filter_globs): + stdout.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs))) + + test_start = time.time() + run_result = linux.run_kernel( + args=request.kernel_args, + timeout=request.timeout, + filter_glob=filter_glob, + filter=request.filter, + filter_action=request.filter_action, + build_dir=request.build_dir) + + _, test_result = parse_tests(request, metadata, run_result) + # run_kernel() doesn't block on the kernel exiting. + # That only happens after we get the last line of output from `run_result`. + # So exec_time here actually contains parsing + execution time, which is fine. + test_end = time.time() + exec_time += test_end - test_start + + test_counts.add_subtest_counts(test_result.counts) + + if len(filter_globs) == 1 and test_counts.crashed > 0: + bd = request.build_dir + print('The kernel seems to have crashed; you can decode the stack traces with:') + print('$ scripts/decode_stacktrace.sh {}/vmlinux {} < {} | tee {}/decoded.log | {} parse'.format( + bd, bd, kunit_kernel.get_outfile_path(bd), bd, sys.argv[0])) + + kunit_status = _map_to_overall_status(test_counts.get_status()) + return KunitResult(status=kunit_status, elapsed_time=exec_time) + +def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus: + if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED): + return KunitStatus.SUCCESS + return KunitStatus.TEST_FAILURE + +def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]: + parse_start = time.time() + + if request.raw_output: + # Treat unparsed results as one passing test. + fake_test = kunit_parser.Test() + fake_test.status = kunit_parser.TestStatus.SUCCESS + fake_test.counts.passed = 1 + + output: Iterable[str] = input_data + if request.raw_output == 'all': + pass + elif request.raw_output == 'kunit': + output = kunit_parser.extract_tap_lines(output) + for line in output: + print(line.rstrip()) + parse_time = time.time() - parse_start + return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test + + + # Actually parse the test results. + test = kunit_parser.parse_run_tests(input_data) + parse_time = time.time() - parse_start + + if request.json: + json_str = kunit_json.get_json_result( + test=test, + metadata=metadata) + if request.json == 'stdout': + print(json_str) + else: + with open(request.json, 'w') as f: + f.write(json_str) + stdout.print_with_timestamp("Test results stored in %s" % + os.path.abspath(request.json)) + + if test.status != kunit_parser.TestStatus.SUCCESS: + return KunitResult(KunitStatus.TEST_FAILURE, parse_time), test + + return KunitResult(KunitStatus.SUCCESS, parse_time), test + +def run_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitRequest) -> KunitResult: + run_start = time.time() + + config_result = config_tests(linux, request) + if config_result.status != KunitStatus.SUCCESS: + return config_result + + build_result = build_tests(linux, request) + if build_result.status != KunitStatus.SUCCESS: + return build_result + + exec_result = exec_tests(linux, request) + + run_end = time.time() + + stdout.print_with_timestamp(( + 'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' + + 'building, %.3fs running\n') % ( + run_end - run_start, + config_result.elapsed_time, + build_result.elapsed_time, + exec_result.elapsed_time)) + return exec_result + +# Problem: +# $ kunit.py run --json +# works as one would expect and prints the parsed test results as JSON. +# $ kunit.py run --json suite_name +# would *not* pass suite_name as the filter_glob and print as json. +# argparse will consider it to be another way of writing +# $ kunit.py run --json=suite_name +# i.e. it would run all tests, and dump the json to a `suite_name` file. +# So we hackily automatically rewrite --json => --json=stdout +pseudo_bool_flag_defaults = { + '--json': 'stdout', + '--raw_output': 'kunit', +} +def massage_argv(argv: Sequence[str]) -> Sequence[str]: + def massage_arg(arg: str) -> str: + if arg not in pseudo_bool_flag_defaults: + return arg + return f'{arg}={pseudo_bool_flag_defaults[arg]}' + return list(map(massage_arg, argv)) + +def get_default_jobs() -> int: + return len(os.sched_getaffinity(0)) + +def add_common_opts(parser: argparse.ArgumentParser) -> None: + parser.add_argument('--build_dir', + help='As in the make command, it specifies the build ' + 'directory.', + type=str, default='.kunit', metavar='DIR') + parser.add_argument('--make_options', + help='X=Y make option, can be repeated.', + action='append', metavar='X=Y') + parser.add_argument('--alltests', + help='Run all KUnit tests via tools/testing/kunit/configs/all_tests.config', + action='store_true') + parser.add_argument('--kunitconfig', + help='Path to Kconfig fragment that enables KUnit tests.' + ' If given a directory, (e.g. lib/kunit), "/.kunitconfig" ' + 'will get automatically appended. If repeated, the files ' + 'blindly concatenated, which might not work in all cases.', + action='append', metavar='PATHS') + parser.add_argument('--kconfig_add', + help='Additional Kconfig options to append to the ' + '.kunitconfig, e.g. CONFIG_KASAN=y. Can be repeated.', + action='append', metavar='CONFIG_X=Y') + + parser.add_argument('--arch', + help=('Specifies the architecture to run tests under. ' + 'The architecture specified here must match the ' + 'string passed to the ARCH make param, ' + 'e.g. i386, x86_64, arm, um, etc. Non-UML ' + 'architectures run on QEMU.'), + type=str, default='um', metavar='ARCH') + + parser.add_argument('--cross_compile', + help=('Sets make\'s CROSS_COMPILE variable; it should ' + 'be set to a toolchain path prefix (the prefix ' + 'of gcc and other tools in your toolchain, for ' + 'example `sparc64-linux-gnu-` if you have the ' + 'sparc toolchain installed on your system, or ' + '`$HOME/toolchains/microblaze/gcc-9.2.0-nolibc/microblaze-linux/bin/microblaze-linux-` ' + 'if you have downloaded the microblaze toolchain ' + 'from the 0-day website to a directory in your ' + 'home directory called `toolchains`).'), + metavar='PREFIX') + + parser.add_argument('--qemu_config', + help=('Takes a path to a path to a file containing ' + 'a QemuArchParams object.'), + type=str, metavar='FILE') + + parser.add_argument('--qemu_args', + help='Additional QEMU arguments, e.g. "-smp 8"', + action='append', metavar='') + +def add_build_opts(parser: argparse.ArgumentParser) -> None: + parser.add_argument('--jobs', + help='As in the make command, "Specifies the number of ' + 'jobs (commands) to run simultaneously."', + type=int, default=get_default_jobs(), metavar='N') + +def add_exec_opts(parser: argparse.ArgumentParser) -> None: + parser.add_argument('--timeout', + help='maximum number of seconds to allow for all tests ' + 'to run. This does not include time taken to build the ' + 'tests.', + type=int, + default=300, + metavar='SECONDS') + parser.add_argument('filter_glob', + help='Filter which KUnit test suites/tests run at ' + 'boot-time, e.g. list* or list*.*del_test', + type=str, + nargs='?', + default='', + metavar='filter_glob') + parser.add_argument('--filter', + help='Filter KUnit tests with attributes, ' + 'e.g. module=example or speed>slow', + type=str, + default='') + parser.add_argument('--filter_action', + help='If set to skip, filtered tests will be skipped, ' + 'e.g. --filter_action=skip. Otherwise they will not run.', + type=str, + choices=['skip']) + parser.add_argument('--kernel_args', + help='Kernel command-line parameters. Maybe be repeated', + action='append', metavar='') + parser.add_argument('--run_isolated', help='If set, boot the kernel for each ' + 'individual suite/test. This is can be useful for debugging ' + 'a non-hermetic test, one that might pass/fail based on ' + 'what ran before it.', + type=str, + choices=['suite', 'test']) + parser.add_argument('--list_tests', help='If set, list all tests that will be ' + 'run.', + action='store_true') + parser.add_argument('--list_tests_attr', help='If set, list all tests and test ' + 'attributes.', + action='store_true') + +def add_parse_opts(parser: argparse.ArgumentParser) -> None: + parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. ' + 'By default, filters to just KUnit output. Use ' + '--raw_output=all to show everything', + type=str, nargs='?', const='all', default=None, choices=['all', 'kunit']) + parser.add_argument('--json', + nargs='?', + help='Prints parsed test results as JSON to stdout or a file if ' + 'a filename is specified. Does nothing if --raw_output is set.', + type=str, const='stdout', default=None, metavar='FILE') + + +def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree: + """Returns a LinuxSourceTree based on the user's arguments.""" + # Allow users to specify multiple arguments in one string, e.g. '-smp 8' + qemu_args: List[str] = [] + if cli_args.qemu_args: + for arg in cli_args.qemu_args: + qemu_args.extend(shlex.split(arg)) + + kunitconfigs = cli_args.kunitconfig if cli_args.kunitconfig else [] + if cli_args.alltests: + # Prepend so user-specified options take prio if we ever allow + # --kunitconfig options to have differing options. + kunitconfigs = [kunit_kernel.ALL_TESTS_CONFIG_PATH] + kunitconfigs + + return kunit_kernel.LinuxSourceTree(cli_args.build_dir, + kunitconfig_paths=kunitconfigs, + kconfig_add=cli_args.kconfig_add, + arch=cli_args.arch, + cross_compile=cli_args.cross_compile, + qemu_config_path=cli_args.qemu_config, + extra_qemu_args=qemu_args) + + +def run_handler(cli_args: argparse.Namespace) -> None: + if not os.path.exists(cli_args.build_dir): + os.mkdir(cli_args.build_dir) + + linux = tree_from_args(cli_args) + request = KunitRequest(build_dir=cli_args.build_dir, + make_options=cli_args.make_options, + jobs=cli_args.jobs, + raw_output=cli_args.raw_output, + json=cli_args.json, + timeout=cli_args.timeout, + filter_glob=cli_args.filter_glob, + filter=cli_args.filter, + filter_action=cli_args.filter_action, + kernel_args=cli_args.kernel_args, + run_isolated=cli_args.run_isolated, + list_tests=cli_args.list_tests, + list_tests_attr=cli_args.list_tests_attr) + result = run_tests(linux, request) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + + +def config_handler(cli_args: argparse.Namespace) -> None: + if cli_args.build_dir and ( + not os.path.exists(cli_args.build_dir)): + os.mkdir(cli_args.build_dir) + + linux = tree_from_args(cli_args) + request = KunitConfigRequest(build_dir=cli_args.build_dir, + make_options=cli_args.make_options) + result = config_tests(linux, request) + stdout.print_with_timestamp(( + 'Elapsed time: %.3fs\n') % ( + result.elapsed_time)) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + + +def build_handler(cli_args: argparse.Namespace) -> None: + linux = tree_from_args(cli_args) + request = KunitBuildRequest(build_dir=cli_args.build_dir, + make_options=cli_args.make_options, + jobs=cli_args.jobs) + result = config_and_build_tests(linux, request) + stdout.print_with_timestamp(( + 'Elapsed time: %.3fs\n') % ( + result.elapsed_time)) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + + +def exec_handler(cli_args: argparse.Namespace) -> None: + linux = tree_from_args(cli_args) + exec_request = KunitExecRequest(raw_output=cli_args.raw_output, + build_dir=cli_args.build_dir, + json=cli_args.json, + timeout=cli_args.timeout, + filter_glob=cli_args.filter_glob, + filter=cli_args.filter, + filter_action=cli_args.filter_action, + kernel_args=cli_args.kernel_args, + run_isolated=cli_args.run_isolated, + list_tests=cli_args.list_tests, + list_tests_attr=cli_args.list_tests_attr) + result = exec_tests(linux, exec_request) + stdout.print_with_timestamp(( + 'Elapsed time: %.3fs\n') % (result.elapsed_time)) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + + +def parse_handler(cli_args: argparse.Namespace) -> None: + if cli_args.file is None: + sys.stdin.reconfigure(errors='backslashreplace') # type: ignore + kunit_output = sys.stdin # type: Iterable[str] + else: + with open(cli_args.file, 'r', errors='backslashreplace') as f: + kunit_output = f.read().splitlines() + # We know nothing about how the result was created! + metadata = kunit_json.Metadata() + request = KunitParseRequest(raw_output=cli_args.raw_output, + json=cli_args.json) + result, _ = parse_tests(request, metadata, kunit_output) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + + +subcommand_handlers_map = { + 'run': run_handler, + 'config': config_handler, + 'build': build_handler, + 'exec': exec_handler, + 'parse': parse_handler +} + + +def main(argv: Sequence[str]) -> None: + parser = argparse.ArgumentParser( + description='Helps writing and running KUnit tests.') + subparser = parser.add_subparsers(dest='subcommand') + + # The 'run' command will config, build, exec, and parse in one go. + run_parser = subparser.add_parser('run', help='Runs KUnit tests.') + add_common_opts(run_parser) + add_build_opts(run_parser) + add_exec_opts(run_parser) + add_parse_opts(run_parser) + + config_parser = subparser.add_parser('config', + help='Ensures that .config contains all of ' + 'the options in .kunitconfig') + add_common_opts(config_parser) + + build_parser = subparser.add_parser('build', help='Builds a kernel with KUnit tests') + add_common_opts(build_parser) + add_build_opts(build_parser) + + exec_parser = subparser.add_parser('exec', help='Run a kernel with KUnit tests') + add_common_opts(exec_parser) + add_exec_opts(exec_parser) + add_parse_opts(exec_parser) + + # The 'parse' option is special, as it doesn't need the kernel source + # (therefore there is no need for a build_dir, hence no add_common_opts) + # and the '--file' argument is not relevant to 'run', so isn't in + # add_parse_opts() + parse_parser = subparser.add_parser('parse', + help='Parses KUnit results from a file, ' + 'and parses formatted results.') + add_parse_opts(parse_parser) + parse_parser.add_argument('file', + help='Specifies the file to read results from.', + type=str, nargs='?', metavar='input_file') + + cli_args = parser.parse_args(massage_argv(argv)) + + if get_kernel_root_path(): + os.chdir(get_kernel_root_path()) + + subcomand_handler = subcommand_handlers_map.get(cli_args.subcommand, None) + + if subcomand_handler is None: + parser.print_help() + return + + subcomand_handler(cli_args) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py new file mode 100644 index 0000000000..eb5dd01210 --- /dev/null +++ b/tools/testing/kunit/kunit_config.py @@ -0,0 +1,108 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Builds a .config from a kunitconfig. +# +# Copyright (C) 2019, Google LLC. +# Author: Felix Guo +# Author: Brendan Higgins + +from dataclasses import dataclass +import re +from typing import Any, Dict, Iterable, List, Tuple + +CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$' +CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$' + +@dataclass(frozen=True) +class KconfigEntry: + name: str + value: str + + def __str__(self) -> str: + if self.value == 'n': + return f'# CONFIG_{self.name} is not set' + return f'CONFIG_{self.name}={self.value}' + + +class KconfigParseError(Exception): + """Error parsing Kconfig defconfig or .config.""" + + +class Kconfig: + """Represents defconfig or .config specified using the Kconfig language.""" + + def __init__(self) -> None: + self._entries = {} # type: Dict[str, str] + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, self.__class__): + return False + return self._entries == other._entries + + def __repr__(self) -> str: + return ','.join(str(e) for e in self.as_entries()) + + def as_entries(self) -> Iterable[KconfigEntry]: + for name, value in self._entries.items(): + yield KconfigEntry(name, value) + + def add_entry(self, name: str, value: str) -> None: + self._entries[name] = value + + def is_subset_of(self, other: 'Kconfig') -> bool: + for name, value in self._entries.items(): + b = other._entries.get(name) + if b is None: + if value == 'n': + continue + return False + if value != b: + return False + return True + + def conflicting_options(self, other: 'Kconfig') -> List[Tuple[KconfigEntry, KconfigEntry]]: + diff = [] # type: List[Tuple[KconfigEntry, KconfigEntry]] + for name, value in self._entries.items(): + b = other._entries.get(name) + if b and value != b: + pair = (KconfigEntry(name, value), KconfigEntry(name, b)) + diff.append(pair) + return diff + + def merge_in_entries(self, other: 'Kconfig') -> None: + for name, value in other._entries.items(): + self._entries[name] = value + + def write_to_file(self, path: str) -> None: + with open(path, 'a+') as f: + for e in self.as_entries(): + f.write(str(e) + '\n') + +def parse_file(path: str) -> Kconfig: + with open(path, 'r') as f: + return parse_from_string(f.read()) + +def parse_from_string(blob: str) -> Kconfig: + """Parses a string containing Kconfig entries.""" + kconfig = Kconfig() + is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN) + config_matcher = re.compile(CONFIG_PATTERN) + for line in blob.split('\n'): + line = line.strip() + if not line: + continue + + match = config_matcher.match(line) + if match: + kconfig.add_entry(match.group(1), match.group(2)) + continue + + empty_match = is_not_set_matcher.match(line) + if empty_match: + kconfig.add_entry(empty_match.group(1), 'n') + continue + + if line[0] == '#': + continue + raise KconfigParseError('Failed to parse: ' + line) + return kconfig diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py new file mode 100644 index 0000000000..10ff65689d --- /dev/null +++ b/tools/testing/kunit/kunit_json.py @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Generates JSON from KUnit results according to +# KernelCI spec: https://github.com/kernelci/kernelci-doc/wiki/Test-API +# +# Copyright (C) 2020, Google LLC. +# Author: Heidi Fahim + +from dataclasses import dataclass +import json +from typing import Any, Dict + +from kunit_parser import Test, TestStatus + +@dataclass +class Metadata: + """Stores metadata about this run to include in get_json_result().""" + arch: str = '' + def_config: str = '' + build_dir: str = '' + +JsonObj = Dict[str, Any] + +_status_map: Dict[TestStatus, str] = { + TestStatus.SUCCESS: "PASS", + TestStatus.SKIPPED: "SKIP", + TestStatus.TEST_CRASHED: "ERROR", +} + +def _get_group_json(test: Test, common_fields: JsonObj) -> JsonObj: + sub_groups = [] # List[JsonObj] + test_cases = [] # List[JsonObj] + + for subtest in test.subtests: + if subtest.subtests: + sub_group = _get_group_json(subtest, common_fields) + sub_groups.append(sub_group) + continue + status = _status_map.get(subtest.status, "FAIL") + test_cases.append({"name": subtest.name, "status": status}) + + test_group = { + "name": test.name, + "sub_groups": sub_groups, + "test_cases": test_cases, + } + test_group.update(common_fields) + return test_group + +def get_json_result(test: Test, metadata: Metadata) -> str: + common_fields = { + "arch": metadata.arch, + "defconfig": metadata.def_config, + "build_environment": metadata.build_dir, + "lab_name": None, + "kernel": None, + "job": None, + "git_branch": "kselftest", + } + + test_group = _get_group_json(test, common_fields) + test_group["name"] = "KUnit Test Group" + return json.dumps(test_group, indent=4) diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py new file mode 100644 index 0000000000..0b6488efed --- /dev/null +++ b/tools/testing/kunit/kunit_kernel.py @@ -0,0 +1,376 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Runs UML kernel, collects output, and handles errors. +# +# Copyright (C) 2019, Google LLC. +# Author: Felix Guo +# Author: Brendan Higgins + +import importlib.abc +import importlib.util +import logging +import subprocess +import os +import shlex +import shutil +import signal +import threading +from typing import Iterator, List, Optional, Tuple +from types import FrameType + +import kunit_config +import qemu_config + +KCONFIG_PATH = '.config' +KUNITCONFIG_PATH = '.kunitconfig' +OLD_KUNITCONFIG_PATH = 'last_used_kunitconfig' +DEFAULT_KUNITCONFIG_PATH = 'tools/testing/kunit/configs/default.config' +ALL_TESTS_CONFIG_PATH = 'tools/testing/kunit/configs/all_tests.config' +UML_KCONFIG_PATH = 'tools/testing/kunit/configs/arch_uml.config' +OUTFILE_PATH = 'test.log' +ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__)) +QEMU_CONFIGS_DIR = os.path.join(ABS_TOOL_PATH, 'qemu_configs') + +class ConfigError(Exception): + """Represents an error trying to configure the Linux kernel.""" + + +class BuildError(Exception): + """Represents an error trying to build the Linux kernel.""" + + +class LinuxSourceTreeOperations: + """An abstraction over command line operations performed on a source tree.""" + + def __init__(self, linux_arch: str, cross_compile: Optional[str]): + self._linux_arch = linux_arch + self._cross_compile = cross_compile + + def make_mrproper(self) -> None: + try: + subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT) + except OSError as e: + raise ConfigError('Could not call make command: ' + str(e)) + except subprocess.CalledProcessError as e: + raise ConfigError(e.output.decode()) + + def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig: + return base_kunitconfig + + def make_olddefconfig(self, build_dir: str, make_options: Optional[List[str]]) -> None: + command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, 'olddefconfig'] + if self._cross_compile: + command += ['CROSS_COMPILE=' + self._cross_compile] + if make_options: + command.extend(make_options) + print('Populating config with:\n$', ' '.join(command)) + try: + subprocess.check_output(command, stderr=subprocess.STDOUT) + except OSError as e: + raise ConfigError('Could not call make command: ' + str(e)) + except subprocess.CalledProcessError as e: + raise ConfigError(e.output.decode()) + + def make(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> None: + command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)] + if make_options: + command.extend(make_options) + if self._cross_compile: + command += ['CROSS_COMPILE=' + self._cross_compile] + print('Building with:\n$', ' '.join(command)) + try: + proc = subprocess.Popen(command, + stderr=subprocess.PIPE, + stdout=subprocess.DEVNULL) + except OSError as e: + raise BuildError('Could not call execute make: ' + str(e)) + except subprocess.CalledProcessError as e: + raise BuildError(e.output) + _, stderr = proc.communicate() + if proc.returncode != 0: + raise BuildError(stderr.decode()) + if stderr: # likely only due to build warnings + print(stderr.decode()) + + def start(self, params: List[str], build_dir: str) -> subprocess.Popen: + raise RuntimeError('not implemented!') + + +class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations): + + def __init__(self, qemu_arch_params: qemu_config.QemuArchParams, cross_compile: Optional[str]): + super().__init__(linux_arch=qemu_arch_params.linux_arch, + cross_compile=cross_compile) + self._kconfig = qemu_arch_params.kconfig + self._qemu_arch = qemu_arch_params.qemu_arch + self._kernel_path = qemu_arch_params.kernel_path + self._kernel_command_line = qemu_arch_params.kernel_command_line + ' kunit_shutdown=reboot' + self._extra_qemu_params = qemu_arch_params.extra_qemu_params + self._serial = qemu_arch_params.serial + + def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig: + kconfig = kunit_config.parse_from_string(self._kconfig) + kconfig.merge_in_entries(base_kunitconfig) + return kconfig + + def start(self, params: List[str], build_dir: str) -> subprocess.Popen: + kernel_path = os.path.join(build_dir, self._kernel_path) + qemu_command = ['qemu-system-' + self._qemu_arch, + '-nodefaults', + '-m', '1024', + '-kernel', kernel_path, + '-append', ' '.join(params + [self._kernel_command_line]), + '-no-reboot', + '-nographic', + '-serial', self._serial] + self._extra_qemu_params + # Note: shlex.join() does what we want, but requires python 3.8+. + print('Running tests with:\n$', ' '.join(shlex.quote(arg) for arg in qemu_command)) + return subprocess.Popen(qemu_command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, errors='backslashreplace') + +class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations): + """An abstraction over command line operations performed on a source tree.""" + + def __init__(self, cross_compile: Optional[str]=None): + super().__init__(linux_arch='um', cross_compile=cross_compile) + + def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig: + kconfig = kunit_config.parse_file(UML_KCONFIG_PATH) + kconfig.merge_in_entries(base_kunitconfig) + return kconfig + + def start(self, params: List[str], build_dir: str) -> subprocess.Popen: + """Runs the Linux UML binary. Must be named 'linux'.""" + linux_bin = os.path.join(build_dir, 'linux') + params.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt']) + return subprocess.Popen([linux_bin] + params, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, errors='backslashreplace') + +def get_kconfig_path(build_dir: str) -> str: + return os.path.join(build_dir, KCONFIG_PATH) + +def get_kunitconfig_path(build_dir: str) -> str: + return os.path.join(build_dir, KUNITCONFIG_PATH) + +def get_old_kunitconfig_path(build_dir: str) -> str: + return os.path.join(build_dir, OLD_KUNITCONFIG_PATH) + +def get_parsed_kunitconfig(build_dir: str, + kunitconfig_paths: Optional[List[str]]=None) -> kunit_config.Kconfig: + if not kunitconfig_paths: + path = get_kunitconfig_path(build_dir) + if not os.path.exists(path): + shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, path) + return kunit_config.parse_file(path) + + merged = kunit_config.Kconfig() + + for path in kunitconfig_paths: + if os.path.isdir(path): + path = os.path.join(path, KUNITCONFIG_PATH) + if not os.path.exists(path): + raise ConfigError(f'Specified kunitconfig ({path}) does not exist') + + partial = kunit_config.parse_file(path) + diff = merged.conflicting_options(partial) + if diff: + diff_str = '\n\n'.join(f'{a}\n vs from {path}\n{b}' for a, b in diff) + raise ConfigError(f'Multiple values specified for {len(diff)} options in kunitconfig:\n{diff_str}') + merged.merge_in_entries(partial) + return merged + +def get_outfile_path(build_dir: str) -> str: + return os.path.join(build_dir, OUTFILE_PATH) + +def _default_qemu_config_path(arch: str) -> str: + config_path = os.path.join(QEMU_CONFIGS_DIR, arch + '.py') + if os.path.isfile(config_path): + return config_path + + options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')] + raise ConfigError(arch + ' is not a valid arch, options are ' + str(sorted(options))) + +def _get_qemu_ops(config_path: str, + extra_qemu_args: Optional[List[str]], + cross_compile: Optional[str]) -> Tuple[str, LinuxSourceTreeOperations]: + # The module name/path has very little to do with where the actual file + # exists (I learned this through experimentation and could not find it + # anywhere in the Python documentation). + # + # Bascially, we completely ignore the actual file location of the config + # we are loading and just tell Python that the module lives in the + # QEMU_CONFIGS_DIR for import purposes regardless of where it actually + # exists as a file. + module_path = '.' + os.path.join(os.path.basename(QEMU_CONFIGS_DIR), os.path.basename(config_path)) + spec = importlib.util.spec_from_file_location(module_path, config_path) + assert spec is not None + config = importlib.util.module_from_spec(spec) + # See https://github.com/python/typeshed/pull/2626 for context. + assert isinstance(spec.loader, importlib.abc.Loader) + spec.loader.exec_module(config) + + if not hasattr(config, 'QEMU_ARCH'): + raise ValueError('qemu_config module missing "QEMU_ARCH": ' + config_path) + params: qemu_config.QemuArchParams = config.QEMU_ARCH + if extra_qemu_args: + params.extra_qemu_params.extend(extra_qemu_args) + return params.linux_arch, LinuxSourceTreeOperationsQemu( + params, cross_compile=cross_compile) + +class LinuxSourceTree: + """Represents a Linux kernel source tree with KUnit tests.""" + + def __init__( + self, + build_dir: str, + kunitconfig_paths: Optional[List[str]]=None, + kconfig_add: Optional[List[str]]=None, + arch: Optional[str]=None, + cross_compile: Optional[str]=None, + qemu_config_path: Optional[str]=None, + extra_qemu_args: Optional[List[str]]=None) -> None: + signal.signal(signal.SIGINT, self.signal_handler) + if qemu_config_path: + self._arch, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile) + else: + self._arch = 'um' if arch is None else arch + if self._arch == 'um': + self._ops = LinuxSourceTreeOperationsUml(cross_compile=cross_compile) + else: + qemu_config_path = _default_qemu_config_path(self._arch) + _, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile) + + self._kconfig = get_parsed_kunitconfig(build_dir, kunitconfig_paths) + if kconfig_add: + kconfig = kunit_config.parse_from_string('\n'.join(kconfig_add)) + self._kconfig.merge_in_entries(kconfig) + + def arch(self) -> str: + return self._arch + + def clean(self) -> bool: + try: + self._ops.make_mrproper() + except ConfigError as e: + logging.error(e) + return False + return True + + def validate_config(self, build_dir: str) -> bool: + kconfig_path = get_kconfig_path(build_dir) + validated_kconfig = kunit_config.parse_file(kconfig_path) + if self._kconfig.is_subset_of(validated_kconfig): + return True + missing = set(self._kconfig.as_entries()) - set(validated_kconfig.as_entries()) + message = 'Not all Kconfig options selected in kunitconfig were in the generated .config.\n' \ + 'This is probably due to unsatisfied dependencies.\n' \ + 'Missing: ' + ', '.join(str(e) for e in missing) + if self._arch == 'um': + message += '\nNote: many Kconfig options aren\'t available on UML. You can try running ' \ + 'on a different architecture with something like "--arch=x86_64".' + logging.error(message) + return False + + def build_config(self, build_dir: str, make_options: Optional[List[str]]) -> bool: + kconfig_path = get_kconfig_path(build_dir) + if build_dir and not os.path.exists(build_dir): + os.mkdir(build_dir) + try: + self._kconfig = self._ops.make_arch_config(self._kconfig) + self._kconfig.write_to_file(kconfig_path) + self._ops.make_olddefconfig(build_dir, make_options) + except ConfigError as e: + logging.error(e) + return False + if not self.validate_config(build_dir): + return False + + old_path = get_old_kunitconfig_path(build_dir) + if os.path.exists(old_path): + os.remove(old_path) # write_to_file appends to the file + self._kconfig.write_to_file(old_path) + return True + + def _kunitconfig_changed(self, build_dir: str) -> bool: + old_path = get_old_kunitconfig_path(build_dir) + if not os.path.exists(old_path): + return True + + old_kconfig = kunit_config.parse_file(old_path) + return old_kconfig != self._kconfig + + def build_reconfig(self, build_dir: str, make_options: Optional[List[str]]) -> bool: + """Creates a new .config if it is not a subset of the .kunitconfig.""" + kconfig_path = get_kconfig_path(build_dir) + if not os.path.exists(kconfig_path): + print('Generating .config ...') + return self.build_config(build_dir, make_options) + + existing_kconfig = kunit_config.parse_file(kconfig_path) + self._kconfig = self._ops.make_arch_config(self._kconfig) + + if self._kconfig.is_subset_of(existing_kconfig) and not self._kunitconfig_changed(build_dir): + return True + print('Regenerating .config ...') + os.remove(kconfig_path) + return self.build_config(build_dir, make_options) + + def build_kernel(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> bool: + try: + self._ops.make_olddefconfig(build_dir, make_options) + self._ops.make(jobs, build_dir, make_options) + except (ConfigError, BuildError) as e: + logging.error(e) + return False + return self.validate_config(build_dir) + + def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]: + if not args: + args = [] + if filter_glob: + args.append('kunit.filter_glob=' + filter_glob) + if filter: + args.append('kunit.filter="' + filter + '"') + if filter_action: + args.append('kunit.filter_action=' + filter_action) + args.append('kunit.enable=1') + + process = self._ops.start(args, build_dir) + assert process.stdout is not None # tell mypy it's set + + # Enforce the timeout in a background thread. + def _wait_proc() -> None: + try: + process.wait(timeout=timeout) + except Exception as e: + print(e) + process.terminate() + process.wait() + waiter = threading.Thread(target=_wait_proc) + waiter.start() + + output = open(get_outfile_path(build_dir), 'w') + try: + # Tee the output to the file and to our caller in real time. + for line in process.stdout: + output.write(line) + yield line + # This runs even if our caller doesn't consume every line. + finally: + # Flush any leftover output to the file + output.write(process.stdout.read()) + output.close() + process.stdout.close() + + waiter.join() + subprocess.call(['stty', 'sane']) + + def signal_handler(self, unused_sig: int, unused_frame: Optional[FrameType]) -> None: + logging.error('Build interruption occurred. Cleaning console.') + subprocess.call(['stty', 'sane']) diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py new file mode 100644 index 0000000000..79d8832c86 --- /dev/null +++ b/tools/testing/kunit/kunit_parser.py @@ -0,0 +1,823 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Parses KTAP test results from a kernel dmesg log and incrementally prints +# results with reader-friendly format. Stores and returns test results in a +# Test object. +# +# Copyright (C) 2019, Google LLC. +# Author: Felix Guo +# Author: Brendan Higgins +# Author: Rae Moar + +from __future__ import annotations +from dataclasses import dataclass +import re +import textwrap + +from enum import Enum, auto +from typing import Iterable, Iterator, List, Optional, Tuple + +from kunit_printer import stdout + +class Test: + """ + A class to represent a test parsed from KTAP results. All KTAP + results within a test log are stored in a main Test object as + subtests. + + Attributes: + status : TestStatus - status of the test + name : str - name of the test + expected_count : int - expected number of subtests (0 if single + test case and None if unknown expected number of subtests) + subtests : List[Test] - list of subtests + log : List[str] - log of KTAP lines that correspond to the test + counts : TestCounts - counts of the test statuses and errors of + subtests or of the test itself if the test is a single + test case. + """ + def __init__(self) -> None: + """Creates Test object with default attributes.""" + self.status = TestStatus.TEST_CRASHED + self.name = '' + self.expected_count = 0 # type: Optional[int] + self.subtests = [] # type: List[Test] + self.log = [] # type: List[str] + self.counts = TestCounts() + + def __str__(self) -> str: + """Returns string representation of a Test class object.""" + return (f'Test({self.status}, {self.name}, {self.expected_count}, ' + f'{self.subtests}, {self.log}, {self.counts})') + + def __repr__(self) -> str: + """Returns string representation of a Test class object.""" + return str(self) + + def add_error(self, error_message: str) -> None: + """Records an error that occurred while parsing this test.""" + self.counts.errors += 1 + stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}') + + def ok_status(self) -> bool: + """Returns true if the status was ok, i.e. passed or skipped.""" + return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED) + +class TestStatus(Enum): + """An enumeration class to represent the status of a test.""" + SUCCESS = auto() + FAILURE = auto() + SKIPPED = auto() + TEST_CRASHED = auto() + NO_TESTS = auto() + FAILURE_TO_PARSE_TESTS = auto() + +@dataclass +class TestCounts: + """ + Tracks the counts of statuses of all test cases and any errors within + a Test. + """ + passed: int = 0 + failed: int = 0 + crashed: int = 0 + skipped: int = 0 + errors: int = 0 + + def __str__(self) -> str: + """Returns the string representation of a TestCounts object.""" + statuses = [('passed', self.passed), ('failed', self.failed), + ('crashed', self.crashed), ('skipped', self.skipped), + ('errors', self.errors)] + return f'Ran {self.total()} tests: ' + \ + ', '.join(f'{s}: {n}' for s, n in statuses if n > 0) + + def total(self) -> int: + """Returns the total number of test cases within a test + object, where a test case is a test with no subtests. + """ + return (self.passed + self.failed + self.crashed + + self.skipped) + + def add_subtest_counts(self, counts: TestCounts) -> None: + """ + Adds the counts of another TestCounts object to the current + TestCounts object. Used to add the counts of a subtest to the + parent test. + + Parameters: + counts - a different TestCounts object whose counts + will be added to the counts of the TestCounts object + """ + self.passed += counts.passed + self.failed += counts.failed + self.crashed += counts.crashed + self.skipped += counts.skipped + self.errors += counts.errors + + def get_status(self) -> TestStatus: + """Returns the aggregated status of a Test using test + counts. + """ + if self.total() == 0: + return TestStatus.NO_TESTS + if self.crashed: + # Crashes should take priority. + return TestStatus.TEST_CRASHED + if self.failed: + return TestStatus.FAILURE + if self.passed: + # No failures or crashes, looks good! + return TestStatus.SUCCESS + # We have only skipped tests. + return TestStatus.SKIPPED + + def add_status(self, status: TestStatus) -> None: + """Increments the count for `status`.""" + if status == TestStatus.SUCCESS: + self.passed += 1 + elif status == TestStatus.FAILURE: + self.failed += 1 + elif status == TestStatus.SKIPPED: + self.skipped += 1 + elif status != TestStatus.NO_TESTS: + self.crashed += 1 + +class LineStream: + """ + A class to represent the lines of kernel output. + Provides a lazy peek()/pop() interface over an iterator of + (line#, text). + """ + _lines: Iterator[Tuple[int, str]] + _next: Tuple[int, str] + _need_next: bool + _done: bool + + def __init__(self, lines: Iterator[Tuple[int, str]]): + """Creates a new LineStream that wraps the given iterator.""" + self._lines = lines + self._done = False + self._need_next = True + self._next = (0, '') + + def _get_next(self) -> None: + """Advances the LineSteam to the next line, if necessary.""" + if not self._need_next: + return + try: + self._next = next(self._lines) + except StopIteration: + self._done = True + finally: + self._need_next = False + + def peek(self) -> str: + """Returns the current line, without advancing the LineStream. + """ + self._get_next() + return self._next[1] + + def pop(self) -> str: + """Returns the current line and advances the LineStream to + the next line. + """ + s = self.peek() + if self._done: + raise ValueError(f'LineStream: going past EOF, last line was {s}') + self._need_next = True + return s + + def __bool__(self) -> bool: + """Returns True if stream has more lines.""" + self._get_next() + return not self._done + + # Only used by kunit_tool_test.py. + def __iter__(self) -> Iterator[str]: + """Empties all lines stored in LineStream object into + Iterator object and returns the Iterator object. + """ + while bool(self): + yield self.pop() + + def line_number(self) -> int: + """Returns the line number of the current line.""" + self._get_next() + return self._next[0] + +# Parsing helper methods: + +KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$') +TAP_START = re.compile(r'\s*TAP version ([0-9]+)$') +KTAP_END = re.compile(r'\s*(List of all partitions:|' + 'Kernel panic - not syncing: VFS:|reboot: System halted)') +EXECUTOR_ERROR = re.compile(r'\s*kunit executor: (.*)$') + +def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream: + """Extracts KTAP lines from the kernel output.""" + def isolate_ktap_output(kernel_output: Iterable[str]) \ + -> Iterator[Tuple[int, str]]: + line_num = 0 + started = False + for line in kernel_output: + line_num += 1 + line = line.rstrip() # remove trailing \n + if not started and KTAP_START.search(line): + # start extracting KTAP lines and set prefix + # to number of characters before version line + prefix_len = len( + line.split('KTAP version')[0]) + started = True + yield line_num, line[prefix_len:] + elif not started and TAP_START.search(line): + # start extracting KTAP lines and set prefix + # to number of characters before version line + prefix_len = len(line.split('TAP version')[0]) + started = True + yield line_num, line[prefix_len:] + elif started and KTAP_END.search(line): + # stop extracting KTAP lines + break + elif started: + # remove the prefix, if any. + line = line[prefix_len:] + yield line_num, line + elif EXECUTOR_ERROR.search(line): + yield line_num, line + return LineStream(lines=isolate_ktap_output(kernel_output)) + +KTAP_VERSIONS = [1] +TAP_VERSIONS = [13, 14] + +def check_version(version_num: int, accepted_versions: List[int], + version_type: str, test: Test) -> None: + """ + Adds error to test object if version number is too high or too + low. + + Parameters: + version_num - The inputted version number from the parsed KTAP or TAP + header line + accepted_version - List of accepted KTAP or TAP versions + version_type - 'KTAP' or 'TAP' depending on the type of + version line. + test - Test object for current test being parsed + """ + if version_num < min(accepted_versions): + test.add_error(f'{version_type} version lower than expected!') + elif version_num > max(accepted_versions): + test.add_error(f'{version_type} version higer than expected!') + +def parse_ktap_header(lines: LineStream, test: Test) -> bool: + """ + Parses KTAP/TAP header line and checks version number. + Returns False if fails to parse KTAP/TAP header line. + + Accepted formats: + - 'KTAP version [version number]' + - 'TAP version [version number]' + + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed + + Return: + True if successfully parsed KTAP/TAP header line + """ + ktap_match = KTAP_START.match(lines.peek()) + tap_match = TAP_START.match(lines.peek()) + if ktap_match: + version_num = int(ktap_match.group(1)) + check_version(version_num, KTAP_VERSIONS, 'KTAP', test) + elif tap_match: + version_num = int(tap_match.group(1)) + check_version(version_num, TAP_VERSIONS, 'TAP', test) + else: + return False + lines.pop() + return True + +TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$') + +def parse_test_header(lines: LineStream, test: Test) -> bool: + """ + Parses test header and stores test name in test object. + Returns False if fails to parse test header line. + + Accepted format: + - '# Subtest: [test name]' + + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed + + Return: + True if successfully parsed test header line + """ + match = TEST_HEADER.match(lines.peek()) + if not match: + return False + test.name = match.group(1) + lines.pop() + return True + +TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)') + +def parse_test_plan(lines: LineStream, test: Test) -> bool: + """ + Parses test plan line and stores the expected number of subtests in + test object. Reports an error if expected count is 0. + Returns False and sets expected_count to None if there is no valid test + plan. + + Accepted format: + - '1..[number of subtests]' + + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed + + Return: + True if successfully parsed test plan line + """ + match = TEST_PLAN.match(lines.peek()) + if not match: + test.expected_count = None + return False + expected_count = int(match.group(1)) + test.expected_count = expected_count + lines.pop() + return True + +TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') + +TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') + +def peek_test_name_match(lines: LineStream, test: Test) -> bool: + """ + Matches current line with the format of a test result line and checks + if the name matches the name of the current test. + Returns False if fails to match format or name. + + Accepted format: + - '[ok|not ok] [test number] [-] [test name] [optional skip + directive]' + + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed + + Return: + True if matched a test result line and the name matching the + expected test name + """ + line = lines.peek() + match = TEST_RESULT.match(line) + if not match: + return False + name = match.group(4) + return name == test.name + +def parse_test_result(lines: LineStream, test: Test, + expected_num: int) -> bool: + """ + Parses test result line and stores the status and name in the test + object. Reports an error if the test number does not match expected + test number. + Returns False if fails to parse test result line. + + Note that the SKIP directive is the only direction that causes a + change in status. + + Accepted format: + - '[ok|not ok] [test number] [-] [test name] [optional skip + directive]' + + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed + expected_num - expected test number for current test + + Return: + True if successfully parsed a test result line. + """ + line = lines.peek() + match = TEST_RESULT.match(line) + skip_match = TEST_RESULT_SKIP.match(line) + + # Check if line matches test result line format + if not match: + return False + lines.pop() + + # Set name of test object + if skip_match: + test.name = skip_match.group(4) + else: + test.name = match.group(4) + + # Check test num + num = int(match.group(2)) + if num != expected_num: + test.add_error(f'Expected test number {expected_num} but found {num}') + + # Set status of test object + status = match.group(1) + if skip_match: + test.status = TestStatus.SKIPPED + elif status == 'ok': + test.status = TestStatus.SUCCESS + else: + test.status = TestStatus.FAILURE + return True + +def parse_diagnostic(lines: LineStream) -> List[str]: + """ + Parse lines that do not match the format of a test result line or + test header line and returns them in list. + + Line formats that are not parsed: + - '# Subtest: [test name]' + - '[ok|not ok] [test number] [-] [test name] [optional skip + directive]' + - 'KTAP version [version number]' + + Parameters: + lines - LineStream of KTAP output to parse + + Return: + Log of diagnostic lines + """ + log = [] # type: List[str] + non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START] + while lines and not any(re.match(lines.peek()) + for re in non_diagnostic_lines): + log.append(lines.pop()) + return log + + +# Printing helper methods: + +DIVIDER = '=' * 60 + +def format_test_divider(message: str, len_message: int) -> str: + """ + Returns string with message centered in fixed width divider. + + Example: + '===================== message example =====================' + + Parameters: + message - message to be centered in divider line + len_message - length of the message to be printed such that + any characters of the color codes are not counted + + Return: + String containing message centered in fixed width divider + """ + default_count = 3 # default number of dashes + len_1 = default_count + len_2 = default_count + difference = len(DIVIDER) - len_message - 2 # 2 spaces added + if difference > 0: + # calculate number of dashes for each side of the divider + len_1 = int(difference / 2) + len_2 = difference - len_1 + return ('=' * len_1) + f' {message} ' + ('=' * len_2) + +def print_test_header(test: Test) -> None: + """ + Prints test header with test name and optionally the expected number + of subtests. + + Example: + '=================== example (2 subtests) ===================' + + Parameters: + test - Test object representing current test being printed + """ + message = test.name + if message != "": + # Add a leading space before the subtest counts only if a test name + # is provided using a "# Subtest" header line. + message += " " + if test.expected_count: + if test.expected_count == 1: + message += '(1 subtest)' + else: + message += f'({test.expected_count} subtests)' + stdout.print_with_timestamp(format_test_divider(message, len(message))) + +def print_log(log: Iterable[str]) -> None: + """Prints all strings in saved log for test in yellow.""" + formatted = textwrap.dedent('\n'.join(log)) + for line in formatted.splitlines(): + stdout.print_with_timestamp(stdout.yellow(line)) + +def format_test_result(test: Test) -> str: + """ + Returns string with formatted test result with colored status and test + name. + + Example: + '[PASSED] example' + + Parameters: + test - Test object representing current test being printed + + Return: + String containing formatted test result + """ + if test.status == TestStatus.SUCCESS: + return stdout.green('[PASSED] ') + test.name + if test.status == TestStatus.SKIPPED: + return stdout.yellow('[SKIPPED] ') + test.name + if test.status == TestStatus.NO_TESTS: + return stdout.yellow('[NO TESTS RUN] ') + test.name + if test.status == TestStatus.TEST_CRASHED: + print_log(test.log) + return stdout.red('[CRASHED] ') + test.name + print_log(test.log) + return stdout.red('[FAILED] ') + test.name + +def print_test_result(test: Test) -> None: + """ + Prints result line with status of test. + + Example: + '[PASSED] example' + + Parameters: + test - Test object representing current test being printed + """ + stdout.print_with_timestamp(format_test_result(test)) + +def print_test_footer(test: Test) -> None: + """ + Prints test footer with status of test. + + Example: + '===================== [PASSED] example =====================' + + Parameters: + test - Test object representing current test being printed + """ + message = format_test_result(test) + stdout.print_with_timestamp(format_test_divider(message, + len(message) - stdout.color_len())) + + + +def _summarize_failed_tests(test: Test) -> str: + """Tries to summarize all the failing subtests in `test`.""" + + def failed_names(test: Test, parent_name: str) -> List[str]: + # Note: we use 'main' internally for the top-level test. + if not parent_name or parent_name == 'main': + full_name = test.name + else: + full_name = parent_name + '.' + test.name + + if not test.subtests: # this is a leaf node + return [full_name] + + # If all the children failed, just say this subtest failed. + # Don't summarize it down "the top-level test failed", though. + failed_subtests = [sub for sub in test.subtests if not sub.ok_status()] + if parent_name and len(failed_subtests) == len(test.subtests): + return [full_name] + + all_failures = [] # type: List[str] + for t in failed_subtests: + all_failures.extend(failed_names(t, full_name)) + return all_failures + + failures = failed_names(test, '') + # If there are too many failures, printing them out will just be noisy. + if len(failures) > 10: # this is an arbitrary limit + return '' + + return 'Failures: ' + ', '.join(failures) + + +def print_summary_line(test: Test) -> None: + """ + Prints summary line of test object. Color of line is dependent on + status of test. Color is green if test passes, yellow if test is + skipped, and red if the test fails or crashes. Summary line contains + counts of the statuses of the tests subtests or the test itself if it + has no subtests. + + Example: + "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0, + Errors: 0" + + test - Test object representing current test being printed + """ + if test.status == TestStatus.SUCCESS: + color = stdout.green + elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS): + color = stdout.yellow + else: + color = stdout.red + stdout.print_with_timestamp(color(f'Testing complete. {test.counts}')) + + # Summarize failures that might have gone off-screen since we had a lot + # of tests (arbitrarily defined as >=100 for now). + if test.ok_status() or test.counts.total() < 100: + return + summarized = _summarize_failed_tests(test) + if not summarized: + return + stdout.print_with_timestamp(color(summarized)) + +# Other methods: + +def bubble_up_test_results(test: Test) -> None: + """ + If the test has subtests, add the test counts of the subtests to the + test and check if any of the tests crashed and if so set the test + status to crashed. Otherwise if the test has no subtests add the + status of the test to the test counts. + + Parameters: + test - Test object for current test being parsed + """ + subtests = test.subtests + counts = test.counts + status = test.status + for t in subtests: + counts.add_subtest_counts(t.counts) + if counts.total() == 0: + counts.add_status(status) + elif test.counts.get_status() == TestStatus.TEST_CRASHED: + test.status = TestStatus.TEST_CRASHED + +def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test: + """ + Finds next test to parse in LineStream, creates new Test object, + parses any subtests of the test, populates Test object with all + information (status, name) about the test and the Test objects for + any subtests, and then returns the Test object. The method accepts + three formats of tests: + + Accepted test formats: + + - Main KTAP/TAP header + + Example: + + KTAP version 1 + 1..4 + [subtests] + + - Subtest header (must include either the KTAP version line or + "# Subtest" header line) + + Example (preferred format with both KTAP version line and + "# Subtest" line): + + KTAP version 1 + # Subtest: name + 1..3 + [subtests] + ok 1 name + + Example (only "# Subtest" line): + + # Subtest: name + 1..3 + [subtests] + ok 1 name + + Example (only KTAP version line, compliant with KTAP v1 spec): + + KTAP version 1 + 1..3 + [subtests] + ok 1 name + + - Test result line + + Example: + + ok 1 - test + + Parameters: + lines - LineStream of KTAP output to parse + expected_num - expected test number for test to be parsed + log - list of strings containing any preceding diagnostic lines + corresponding to the current test + is_subtest - boolean indicating whether test is a subtest + + Return: + Test object populated with characteristics and any subtests + """ + test = Test() + test.log.extend(log) + + # Parse any errors prior to parsing tests + err_log = parse_diagnostic(lines) + test.log.extend(err_log) + + if not is_subtest: + # If parsing the main/top-level test, parse KTAP version line and + # test plan + test.name = "main" + ktap_line = parse_ktap_header(lines, test) + parse_test_plan(lines, test) + parent_test = True + else: + # If not the main test, attempt to parse a test header containing + # the KTAP version line and/or subtest header line + ktap_line = parse_ktap_header(lines, test) + subtest_line = parse_test_header(lines, test) + parent_test = (ktap_line or subtest_line) + if parent_test: + # If KTAP version line and/or subtest header is found, attempt + # to parse test plan and print test header + parse_test_plan(lines, test) + print_test_header(test) + expected_count = test.expected_count + subtests = [] + test_num = 1 + while parent_test and (expected_count is None or test_num <= expected_count): + # Loop to parse any subtests. + # Break after parsing expected number of tests or + # if expected number of tests is unknown break when test + # result line with matching name to subtest header is found + # or no more lines in stream. + sub_log = parse_diagnostic(lines) + sub_test = Test() + if not lines or (peek_test_name_match(lines, test) and + is_subtest): + if expected_count and test_num <= expected_count: + # If parser reaches end of test before + # parsing expected number of subtests, print + # crashed subtest and record error + test.add_error('missing expected subtest!') + sub_test.log.extend(sub_log) + test.counts.add_status( + TestStatus.TEST_CRASHED) + print_test_result(sub_test) + else: + test.log.extend(sub_log) + break + else: + sub_test = parse_test(lines, test_num, sub_log, True) + subtests.append(sub_test) + test_num += 1 + test.subtests = subtests + if is_subtest: + # If not main test, look for test result line + test.log.extend(parse_diagnostic(lines)) + if test.name != "" and not peek_test_name_match(lines, test): + test.add_error('missing subtest result line!') + else: + parse_test_result(lines, test, expected_num) + + # Check for there being no subtests within parent test + if parent_test and len(subtests) == 0: + # Don't override a bad status if this test had one reported. + # Assumption: no subtests means CRASHED is from Test.__init__() + if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS): + print_log(test.log) + test.status = TestStatus.NO_TESTS + test.add_error('0 tests run!') + + # Add statuses to TestCounts attribute in Test object + bubble_up_test_results(test) + if parent_test and is_subtest: + # If test has subtests and is not the main test object, print + # footer. + print_test_footer(test) + elif is_subtest: + print_test_result(test) + return test + +def parse_run_tests(kernel_output: Iterable[str]) -> Test: + """ + Using kernel output, extract KTAP lines, parse the lines for test + results and print condensed test results and summary line. + + Parameters: + kernel_output - Iterable object contains lines of kernel output + + Return: + Test - the main test object with all subtests. + """ + stdout.print_with_timestamp(DIVIDER) + lines = extract_tap_lines(kernel_output) + test = Test() + if not lines: + test.name = '' + test.add_error('Could not find any KTAP output. Did any KUnit tests run?') + test.status = TestStatus.FAILURE_TO_PARSE_TESTS + else: + test = parse_test(lines, 0, [], False) + if test.status != TestStatus.NO_TESTS: + test.status = test.counts.get_status() + stdout.print_with_timestamp(DIVIDER) + print_summary_line(test) + return test diff --git a/tools/testing/kunit/kunit_printer.py b/tools/testing/kunit/kunit_printer.py new file mode 100644 index 0000000000..015adf87dc --- /dev/null +++ b/tools/testing/kunit/kunit_printer.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# Utilities for printing and coloring output. +# +# Copyright (C) 2022, Google LLC. +# Author: Daniel Latypov + +import datetime +import sys +import typing + +_RESET = '\033[0;0m' + +class Printer: + """Wraps a file object, providing utilities for coloring output, etc.""" + + def __init__(self, output: typing.IO[str]): + self._output = output + self._use_color = output.isatty() + + def print(self, message: str) -> None: + print(message, file=self._output) + + def print_with_timestamp(self, message: str) -> None: + ts = datetime.datetime.now().strftime('%H:%M:%S') + self.print(f'[{ts}] {message}') + + def _color(self, code: str, text: str) -> str: + if not self._use_color: + return text + return code + text + _RESET + + def red(self, text: str) -> str: + return self._color('\033[1;31m', text) + + def yellow(self, text: str) -> str: + return self._color('\033[1;33m', text) + + def green(self, text: str) -> str: + return self._color('\033[1;32m', text) + + def color_len(self) -> int: + """Returns the length of the color escape codes.""" + return len(self.red('')) + +# Provides a default instance that prints to stdout +stdout = Printer(sys.stdout) diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py new file mode 100755 index 0000000000..b28c1510be --- /dev/null +++ b/tools/testing/kunit/kunit_tool_test.py @@ -0,0 +1,831 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# A collection of tests for tools/testing/kunit/kunit.py +# +# Copyright (C) 2019, Google LLC. +# Author: Brendan Higgins + +import unittest +from unittest import mock + +import tempfile, shutil # Handling test_tmpdir + +import itertools +import json +import os +import signal +import subprocess +from typing import Iterable + +import kunit_config +import kunit_parser +import kunit_kernel +import kunit_json +import kunit + +test_tmpdir = '' +abs_test_data_dir = '' + +def setUpModule(): + global test_tmpdir, abs_test_data_dir + test_tmpdir = tempfile.mkdtemp() + abs_test_data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'test_data')) + +def tearDownModule(): + shutil.rmtree(test_tmpdir) + +def test_data_path(path): + return os.path.join(abs_test_data_dir, path) + +class KconfigTest(unittest.TestCase): + + def test_is_subset_of(self): + kconfig0 = kunit_config.Kconfig() + self.assertTrue(kconfig0.is_subset_of(kconfig0)) + + kconfig1 = kunit_config.Kconfig() + kconfig1.add_entry('TEST', 'y') + self.assertTrue(kconfig1.is_subset_of(kconfig1)) + self.assertTrue(kconfig0.is_subset_of(kconfig1)) + self.assertFalse(kconfig1.is_subset_of(kconfig0)) + + def test_read_from_file(self): + kconfig_path = test_data_path('test_read_from_file.kconfig') + + kconfig = kunit_config.parse_file(kconfig_path) + + expected_kconfig = kunit_config.Kconfig() + expected_kconfig.add_entry('UML', 'y') + expected_kconfig.add_entry('MMU', 'y') + expected_kconfig.add_entry('TEST', 'y') + expected_kconfig.add_entry('EXAMPLE_TEST', 'y') + expected_kconfig.add_entry('MK8', 'n') + + self.assertEqual(kconfig, expected_kconfig) + + def test_write_to_file(self): + kconfig_path = os.path.join(test_tmpdir, '.config') + + expected_kconfig = kunit_config.Kconfig() + expected_kconfig.add_entry('UML', 'y') + expected_kconfig.add_entry('MMU', 'y') + expected_kconfig.add_entry('TEST', 'y') + expected_kconfig.add_entry('EXAMPLE_TEST', 'y') + expected_kconfig.add_entry('MK8', 'n') + + expected_kconfig.write_to_file(kconfig_path) + + actual_kconfig = kunit_config.parse_file(kconfig_path) + self.assertEqual(actual_kconfig, expected_kconfig) + +class KUnitParserTest(unittest.TestCase): + def setUp(self): + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) + + def noPrintCallContains(self, substr: str): + for call in self.print_mock.mock_calls: + self.assertNotIn(substr, call.args[0]) + + def assertContains(self, needle: str, haystack: kunit_parser.LineStream): + # Clone the iterator so we can print the contents on failure. + copy, backup = itertools.tee(haystack) + for line in copy: + if needle in line: + return + raise AssertionError(f'"{needle}" not found in {list(backup)}!') + + def test_output_isolated_correctly(self): + log_path = test_data_path('test_output_isolated_correctly.log') + with open(log_path) as file: + result = kunit_parser.extract_tap_lines(file.readlines()) + self.assertContains('TAP version 14', result) + self.assertContains('# Subtest: example', result) + self.assertContains('1..2', result) + self.assertContains('ok 1 - example_simple_test', result) + self.assertContains('ok 2 - example_mock_test', result) + self.assertContains('ok 1 - example', result) + + def test_output_with_prefix_isolated_correctly(self): + log_path = test_data_path('test_pound_sign.log') + with open(log_path) as file: + result = kunit_parser.extract_tap_lines(file.readlines()) + self.assertContains('TAP version 14', result) + self.assertContains('# Subtest: kunit-resource-test', result) + self.assertContains('1..5', result) + self.assertContains('ok 1 - kunit_resource_test_init_resources', result) + self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result) + self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result) + self.assertContains('foo bar #', result) + self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result) + self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result) + self.assertContains('ok 1 - kunit-resource-test', result) + self.assertContains('foo bar # non-kunit output', result) + self.assertContains('# Subtest: kunit-try-catch-test', result) + self.assertContains('1..2', result) + self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch', + result) + self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch', + result) + self.assertContains('ok 2 - kunit-try-catch-test', result) + self.assertContains('# Subtest: string-stream-test', result) + self.assertContains('1..3', result) + self.assertContains('ok 1 - string_stream_test_empty_on_creation', result) + self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result) + self.assertContains('ok 3 - string_stream_test_get_string', result) + self.assertContains('ok 3 - string-stream-test', result) + + def test_parse_successful_test_log(self): + all_passed_log = test_data_path('test_is_test_passed-all_passed.log') + with open(all_passed_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) + + def test_parse_successful_nested_tests_log(self): + all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log') + with open(all_passed_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) + + def test_kselftest_nested(self): + kselftest_log = test_data_path('test_is_test_passed-kselftest.log') + with open(kselftest_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) + + def test_parse_failed_test_log(self): + failed_log = test_data_path('test_is_test_passed-failure.log') + with open(failed_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + self.assertEqual(result.counts.errors, 0) + + def test_no_header(self): + empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log') + with open(empty_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + self.assertEqual(0, len(result.subtests)) + self.assertEqual(kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, result.status) + self.assertEqual(result.counts.errors, 1) + + def test_missing_test_plan(self): + missing_plan_log = test_data_path('test_is_test_passed-' + 'missing_plan.log') + with open(missing_plan_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines( + file.readlines())) + # A missing test plan is not an error. + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=10, errors=0)) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + + def test_no_tests(self): + header_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log') + with open(header_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + self.assertEqual(0, len(result.subtests)) + self.assertEqual(kunit_parser.TestStatus.NO_TESTS, result.status) + self.assertEqual(result.counts.errors, 1) + + def test_no_tests_no_plan(self): + no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log') + with open(no_plan_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + self.assertEqual(0, len(result.subtests[0].subtests[0].subtests)) + self.assertEqual( + kunit_parser.TestStatus.NO_TESTS, + result.subtests[0].subtests[0].status) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=1)) + + + def test_no_kunit_output(self): + crash_log = test_data_path('test_insufficient_memory.log') + print_mock = mock.patch('kunit_printer.Printer.print').start() + with open(crash_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + print_mock.assert_any_call(StrContains('Could not find any KTAP output.')) + print_mock.stop() + self.assertEqual(0, len(result.subtests)) + self.assertEqual(result.counts.errors, 1) + + def test_skipped_test(self): + skipped_log = test_data_path('test_skip_tests.log') + with open(skipped_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + + # A skipped test does not fail the whole suite. + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=4, skipped=1)) + + def test_skipped_all_tests(self): + skipped_log = test_data_path('test_skip_all_tests.log') + with open(skipped_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + + self.assertEqual(kunit_parser.TestStatus.SKIPPED, result.status) + self.assertEqual(result.counts, kunit_parser.TestCounts(skipped=5)) + + def test_ignores_hyphen(self): + hyphen_log = test_data_path('test_strip_hyphen.log') + with open(hyphen_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + + # A skipped test does not fail the whole suite. + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual( + "sysctl_test", + result.subtests[0].name) + self.assertEqual( + "example", + result.subtests[1].name) + + def test_ignores_prefix_printk_time(self): + prefix_log = test_data_path('test_config_printk_time.log') + with open(prefix_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) + + def test_ignores_multiple_prefixes(self): + prefix_log = test_data_path('test_multiple_prefixes.log') + with open(prefix_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) + + def test_prefix_mixed_kernel_output(self): + mixed_prefix_log = test_data_path('test_interrupted_tap_output.log') + with open(mixed_prefix_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) + + def test_prefix_poundsign(self): + pound_log = test_data_path('test_pound_sign.log') + with open(pound_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) + + def test_kernel_panic_end(self): + panic_log = test_data_path('test_kernel_panic_interrupt.log') + with open(panic_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.TEST_CRASHED, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertGreaterEqual(result.counts.errors, 1) + + def test_pound_no_prefix(self): + pound_log = test_data_path('test_pound_no_prefix.log') + with open(pound_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) + + def test_summarize_failures(self): + output = """ + KTAP version 1 + 1..2 + # Subtest: all_failed_suite + 1..2 + not ok 1 - test1 + not ok 2 - test2 + not ok 1 - all_failed_suite + # Subtest: some_failed_suite + 1..2 + ok 1 - test1 + not ok 2 - test2 + not ok 1 - some_failed_suite + """ + result = kunit_parser.parse_run_tests(output.splitlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + + self.assertEqual(kunit_parser._summarize_failed_tests(result), + 'Failures: all_failed_suite, some_failed_suite.test2') + + def test_ktap_format(self): + ktap_log = test_data_path('test_parse_ktap_output.log') + with open(ktap_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=3)) + self.assertEqual('suite', result.subtests[0].name) + self.assertEqual('case_1', result.subtests[0].subtests[0].name) + self.assertEqual('case_2', result.subtests[0].subtests[1].name) + + def test_parse_subtest_header(self): + ktap_log = test_data_path('test_parse_subtest_header.log') + with open(ktap_log) as file: + kunit_parser.parse_run_tests(file.readlines()) + self.print_mock.assert_any_call(StrContains('suite (1 subtest)')) + + def test_show_test_output_on_failure(self): + output = """ + KTAP version 1 + 1..1 + Test output. + Indented more. + not ok 1 test1 + """ + result = kunit_parser.parse_run_tests(output.splitlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + + self.print_mock.assert_any_call(StrContains('Test output.')) + self.print_mock.assert_any_call(StrContains(' Indented more.')) + self.noPrintCallContains('not ok 1 test1') + +def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream: + return kunit_parser.LineStream(enumerate(strs, start=1)) + +class LineStreamTest(unittest.TestCase): + + def test_basic(self): + stream = line_stream_from_strs(['hello', 'world']) + + self.assertTrue(stream, msg='Should be more input') + self.assertEqual(stream.line_number(), 1) + self.assertEqual(stream.peek(), 'hello') + self.assertEqual(stream.pop(), 'hello') + + self.assertTrue(stream, msg='Should be more input') + self.assertEqual(stream.line_number(), 2) + self.assertEqual(stream.peek(), 'world') + self.assertEqual(stream.pop(), 'world') + + self.assertFalse(stream, msg='Should be no more input') + with self.assertRaisesRegex(ValueError, 'LineStream: going past EOF'): + stream.pop() + + def test_is_lazy(self): + called_times = 0 + def generator(): + nonlocal called_times + for _ in range(1,5): + called_times += 1 + yield called_times, str(called_times) + + stream = kunit_parser.LineStream(generator()) + self.assertEqual(called_times, 0) + + self.assertEqual(stream.pop(), '1') + self.assertEqual(called_times, 1) + + self.assertEqual(stream.pop(), '2') + self.assertEqual(called_times, 2) + +class LinuxSourceTreeTest(unittest.TestCase): + + def setUp(self): + mock.patch.object(signal, 'signal').start() + self.addCleanup(mock.patch.stopall) + + def test_invalid_kunitconfig(self): + with self.assertRaisesRegex(kunit_kernel.ConfigError, 'nonexistent.* does not exist'): + kunit_kernel.LinuxSourceTree('', kunitconfig_paths=['/nonexistent_file']) + + def test_valid_kunitconfig(self): + with tempfile.NamedTemporaryFile('wt') as kunitconfig: + kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[kunitconfig.name]) + + def test_dir_kunitconfig(self): + with tempfile.TemporaryDirectory('') as dir: + with open(os.path.join(dir, '.kunitconfig'), 'w'): + pass + kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir]) + + def test_multiple_kunitconfig(self): + want_kconfig = kunit_config.Kconfig() + want_kconfig.add_entry('KUNIT', 'y') + want_kconfig.add_entry('KUNIT_TEST', 'm') + + with tempfile.TemporaryDirectory('') as dir: + other = os.path.join(dir, 'otherkunitconfig') + with open(os.path.join(dir, '.kunitconfig'), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(other, 'w') as f: + f.write('CONFIG_KUNIT_TEST=m') + pass + + tree = kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other]) + self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig) + + + def test_multiple_kunitconfig_invalid(self): + with tempfile.TemporaryDirectory('') as dir: + other = os.path.join(dir, 'otherkunitconfig') + with open(os.path.join(dir, '.kunitconfig'), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(other, 'w') as f: + f.write('CONFIG_KUNIT=m') + + with self.assertRaisesRegex(kunit_kernel.ConfigError, '(?s)Multiple values.*CONFIG_KUNIT'): + kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other]) + + + def test_kconfig_add(self): + want_kconfig = kunit_config.Kconfig() + want_kconfig.add_entry('NOT_REAL', 'y') + + tree = kunit_kernel.LinuxSourceTree('', kconfig_add=['CONFIG_NOT_REAL=y']) + self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig) + + def test_invalid_arch(self): + with self.assertRaisesRegex(kunit_kernel.ConfigError, 'not a valid arch, options are.*x86_64'): + kunit_kernel.LinuxSourceTree('', arch='invalid') + + def test_run_kernel_hits_exception(self): + def fake_start(unused_args, unused_build_dir): + return subprocess.Popen(['echo "hi\nbye"'], shell=True, text=True, stdout=subprocess.PIPE) + + with tempfile.TemporaryDirectory('') as build_dir: + tree = kunit_kernel.LinuxSourceTree(build_dir) + mock.patch.object(tree._ops, 'start', side_effect=fake_start).start() + + with self.assertRaises(ValueError): + for line in tree.run_kernel(build_dir=build_dir): + self.assertEqual(line, 'hi\n') + raise ValueError('uh oh, did not read all output') + + with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile: + self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output') + + def test_build_reconfig_no_config(self): + with tempfile.TemporaryDirectory('') as build_dir: + with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + + tree = kunit_kernel.LinuxSourceTree(build_dir) + # Stub out the source tree operations, so we don't have + # the defaults for any given architecture get in the + # way. + tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None) + mock_build_config = mock.patch.object(tree, 'build_config').start() + + # Should generate the .config + self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) + mock_build_config.assert_called_once_with(build_dir, []) + + def test_build_reconfig_existing_config(self): + with tempfile.TemporaryDirectory('') as build_dir: + # Existing .config is a superset, should not touch it + with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') + + tree = kunit_kernel.LinuxSourceTree(build_dir) + # Stub out the source tree operations, so we don't have + # the defaults for any given architecture get in the + # way. + tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None) + mock_build_config = mock.patch.object(tree, 'build_config').start() + + self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) + self.assertEqual(mock_build_config.call_count, 0) + + def test_build_reconfig_remove_option(self): + with tempfile.TemporaryDirectory('') as build_dir: + # We removed CONFIG_KUNIT_TEST=y from our .kunitconfig... + with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') + with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') + + tree = kunit_kernel.LinuxSourceTree(build_dir) + # Stub out the source tree operations, so we don't have + # the defaults for any given architecture get in the + # way. + tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None) + mock_build_config = mock.patch.object(tree, 'build_config').start() + + # ... so we should trigger a call to build_config() + self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) + mock_build_config.assert_called_once_with(build_dir, []) + + # TODO: add more test cases. + + +class KUnitJsonTest(unittest.TestCase): + def setUp(self): + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) + + def _json_for(self, log_file): + with open(test_data_path(log_file)) as file: + test_result = kunit_parser.parse_run_tests(file) + json_obj = kunit_json.get_json_result( + test=test_result, + metadata=kunit_json.Metadata()) + return json.loads(json_obj) + + def test_failed_test_json(self): + result = self._json_for('test_is_test_passed-failure.log') + self.assertEqual( + {'name': 'example_simple_test', 'status': 'FAIL'}, + result["sub_groups"][1]["test_cases"][0]) + + def test_crashed_test_json(self): + result = self._json_for('test_kernel_panic_interrupt.log') + self.assertEqual( + {'name': '', 'status': 'ERROR'}, + result["sub_groups"][2]["test_cases"][1]) + + def test_skipped_test_json(self): + result = self._json_for('test_skip_tests.log') + self.assertEqual( + {'name': 'example_skip_test', 'status': 'SKIP'}, + result["sub_groups"][1]["test_cases"][1]) + + def test_no_tests_json(self): + result = self._json_for('test_is_test_passed-no_tests_run_with_header.log') + self.assertEqual(0, len(result['sub_groups'])) + + def test_nested_json(self): + result = self._json_for('test_is_test_passed-all_passed_nested.log') + self.assertEqual( + {'name': 'example_simple_test', 'status': 'PASS'}, + result["sub_groups"][0]["sub_groups"][0]["test_cases"][0]) + +class StrContains(str): + def __eq__(self, other): + return self in other + +class KUnitMainTest(unittest.TestCase): + def setUp(self): + path = test_data_path('test_is_test_passed-all_passed.log') + with open(path) as file: + all_passed_log = file.readlines() + + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) + + self.mock_linux_init = mock.patch.object(kunit_kernel, 'LinuxSourceTree').start() + self.linux_source_mock = self.mock_linux_init.return_value + self.linux_source_mock.build_reconfig.return_value = True + self.linux_source_mock.build_kernel.return_value = True + self.linux_source_mock.run_kernel.return_value = all_passed_log + + def test_config_passes_args_pass(self): + kunit.main(['config', '--build_dir=.kunit']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0) + + def test_build_passes_args_pass(self): + kunit.main(['build']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.linux_source_mock.build_kernel.assert_called_once_with(kunit.get_default_jobs(), '.kunit', None) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0) + + def test_exec_passes_args_pass(self): + kunit.main(['exec']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + + def test_run_passes_args_pass(self): + kunit.main(['run']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + + def test_exec_passes_args_fail(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + with self.assertRaises(SystemExit) as e: + kunit.main(['exec']) + self.assertEqual(e.exception.code, 1) + + def test_run_passes_args_fail(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + with self.assertRaises(SystemExit) as e: + kunit.main(['run']) + self.assertEqual(e.exception.code, 1) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + self.print_mock.assert_any_call(StrContains('Could not find any KTAP output.')) + + def test_exec_no_tests(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0']) + with self.assertRaises(SystemExit) as e: + kunit.main(['run']) + self.assertEqual(e.exception.code, 1) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300) + self.print_mock.assert_any_call(StrContains(' 0 tests run!')) + + def test_exec_raw_output(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + kunit.main(['exec', '--raw_output']) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + for call in self.print_mock.call_args_list: + self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) + self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!'))) + + def test_run_raw_output(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + kunit.main(['run', '--raw_output']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + for call in self.print_mock.call_args_list: + self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) + self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!'))) + + def test_run_raw_output_kunit(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + kunit.main(['run', '--raw_output=kunit']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + for call in self.print_mock.call_args_list: + self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) + self.assertNotEqual(call, mock.call(StrContains(' 0 tests run'))) + + def test_run_raw_output_invalid(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + with self.assertRaises(SystemExit) as e: + kunit.main(['run', '--raw_output=invalid']) + self.assertNotEqual(e.exception.code, 0) + + def test_run_raw_output_does_not_take_positional_args(self): + # --raw_output is a string flag, but we don't want it to consume + # any positional arguments, only ones after an '=' + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + kunit.main(['run', '--raw_output', 'filter_glob']) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='filter_glob', filter='', filter_action=None, timeout=300) + + def test_exec_timeout(self): + timeout = 3453 + kunit.main(['exec', '--timeout', str(timeout)]) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + + def test_run_timeout(self): + timeout = 3453 + kunit.main(['run', '--timeout', str(timeout)]) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + + def test_run_builddir(self): + build_dir = '.kunit' + kunit.main(['run', '--build_dir=.kunit']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + + def test_config_builddir(self): + build_dir = '.kunit' + kunit.main(['config', '--build_dir', build_dir]) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + + def test_build_builddir(self): + build_dir = '.kunit' + jobs = kunit.get_default_jobs() + kunit.main(['build', '--build_dir', build_dir]) + self.linux_source_mock.build_kernel.assert_called_once_with(jobs, build_dir, None) + + def test_exec_builddir(self): + build_dir = '.kunit' + kunit.main(['exec', '--build_dir', build_dir]) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + + def test_run_kunitconfig(self): + kunit.main(['run', '--kunitconfig=mykunitconfig']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=['mykunitconfig'], + kconfig_add=None, + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + def test_config_kunitconfig(self): + kunit.main(['config', '--kunitconfig=mykunitconfig']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=['mykunitconfig'], + kconfig_add=None, + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + def test_config_alltests(self): + kunit.main(['config', '--kunitconfig=mykunitconfig', '--alltests']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=[kunit_kernel.ALL_TESTS_CONFIG_PATH, 'mykunitconfig'], + kconfig_add=None, + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + + @mock.patch.object(kunit_kernel, 'LinuxSourceTree') + def test_run_multiple_kunitconfig(self, mock_linux_init): + mock_linux_init.return_value = self.linux_source_mock + kunit.main(['run', '--kunitconfig=mykunitconfig', '--kunitconfig=other']) + # Just verify that we parsed and initialized it correctly here. + mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=['mykunitconfig', 'other'], + kconfig_add=None, + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + def test_run_kconfig_add(self): + kunit.main(['run', '--kconfig_add=CONFIG_KASAN=y', '--kconfig_add=CONFIG_KCSAN=y']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=[], + kconfig_add=['CONFIG_KASAN=y', 'CONFIG_KCSAN=y'], + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + def test_run_qemu_args(self): + kunit.main(['run', '--arch=x86_64', '--qemu_args', '-m 2048']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=[], + kconfig_add=None, + arch='x86_64', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=['-m', '2048']) + + def test_run_kernel_args(self): + kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=['a=1','b=2'], build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + + def test_list_tests(self): + want = ['suite.test1', 'suite.test2', 'suite2.test1'] + self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want + + got = kunit._list_tests(self.linux_source_mock, + kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False)) + self.assertEqual(got, want) + # Should respect the user's filter glob when listing tests. + self.linux_source_mock.run_kernel.assert_called_once_with( + args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', filter='', filter_action=None, timeout=300) + + @mock.patch.object(kunit, '_list_tests') + def test_run_isolated_by_suite(self, mock_tests): + mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1'] + kunit.main(['exec', '--run_isolated=suite', 'suite*.test*']) + + # Should respect the user's filter glob when listing tests. + mock_tests.assert_called_once_with(mock.ANY, + kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False)) + self.linux_source_mock.run_kernel.assert_has_calls([ + mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300), + mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300), + ]) + + @mock.patch.object(kunit, '_list_tests') + def test_run_isolated_by_test(self, mock_tests): + mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1'] + kunit.main(['exec', '--run_isolated=test', 'suite*']) + + # Should respect the user's filter glob when listing tests. + mock_tests.assert_called_once_with(mock.ANY, + kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'test', False, False)) + self.linux_source_mock.run_kernel.assert_has_calls([ + mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300), + mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300), + mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', filter='', filter_action=None, timeout=300), + ]) + +if __name__ == '__main__': + unittest.main() diff --git a/tools/testing/kunit/mypy.ini b/tools/testing/kunit/mypy.ini new file mode 100644 index 0000000000..ddd288309e --- /dev/null +++ b/tools/testing/kunit/mypy.ini @@ -0,0 +1,6 @@ +[mypy] +strict = True + +# E.g. we can't write subprocess.Popen[str] until Python 3.9+. +# But kunit.py tries to support Python 3.7+, so let's disable it. +disable_error_code = type-arg diff --git a/tools/testing/kunit/qemu_config.py b/tools/testing/kunit/qemu_config.py new file mode 100644 index 0000000000..b1fba9016e --- /dev/null +++ b/tools/testing/kunit/qemu_config.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Collection of configs for building non-UML kernels and running them on QEMU. +# +# Copyright (C) 2021, Google LLC. +# Author: Brendan Higgins + +from dataclasses import dataclass +from typing import List + + +@dataclass(frozen=True) +class QemuArchParams: + linux_arch: str + kconfig: str + qemu_arch: str + kernel_path: str + kernel_command_line: str + extra_qemu_params: List[str] + serial: str = 'stdio' diff --git a/tools/testing/kunit/qemu_configs/alpha.py b/tools/testing/kunit/qemu_configs/alpha.py new file mode 100644 index 0000000000..3ac846e03a --- /dev/null +++ b/tools/testing/kunit/qemu_configs/alpha.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='alpha', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='alpha', + kernel_path='arch/alpha/boot/vmlinux', + kernel_command_line='console=ttyS0', + extra_qemu_params=[]) diff --git a/tools/testing/kunit/qemu_configs/arm.py b/tools/testing/kunit/qemu_configs/arm.py new file mode 100644 index 0000000000..db21602005 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/arm.py @@ -0,0 +1,13 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='arm', + kconfig=''' +CONFIG_ARCH_VIRT=y +CONFIG_SERIAL_AMBA_PL010=y +CONFIG_SERIAL_AMBA_PL010_CONSOLE=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''', + qemu_arch='arm', + kernel_path='arch/arm/boot/zImage', + kernel_command_line='console=ttyAMA0', + extra_qemu_params=['-machine', 'virt']) diff --git a/tools/testing/kunit/qemu_configs/arm64.py b/tools/testing/kunit/qemu_configs/arm64.py new file mode 100644 index 0000000000..d3ff270247 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/arm64.py @@ -0,0 +1,12 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='arm64', + kconfig=''' +CONFIG_SERIAL_AMBA_PL010=y +CONFIG_SERIAL_AMBA_PL010_CONSOLE=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''', + qemu_arch='aarch64', + kernel_path='arch/arm64/boot/Image.gz', + kernel_command_line='console=ttyAMA0', + extra_qemu_params=['-machine', 'virt', '-cpu', 'max,pauth-impdef=on']) diff --git a/tools/testing/kunit/qemu_configs/i386.py b/tools/testing/kunit/qemu_configs/i386.py new file mode 100644 index 0000000000..4463ebefd5 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/i386.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='i386', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='i386', + kernel_path='arch/x86/boot/bzImage', + kernel_command_line='console=ttyS0', + extra_qemu_params=[]) diff --git a/tools/testing/kunit/qemu_configs/m68k.py b/tools/testing/kunit/qemu_configs/m68k.py new file mode 100644 index 0000000000..287fc386f8 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/m68k.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='m68k', + kconfig=''' +CONFIG_VIRT=y''', + qemu_arch='m68k', + kernel_path='vmlinux', + kernel_command_line='console=hvc0', + extra_qemu_params=['-machine', 'virt']) diff --git a/tools/testing/kunit/qemu_configs/powerpc.py b/tools/testing/kunit/qemu_configs/powerpc.py new file mode 100644 index 0000000000..7ec38d4131 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/powerpc.py @@ -0,0 +1,12 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='powerpc', + kconfig=''' +CONFIG_PPC64=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HVC_CONSOLE=y''', + qemu_arch='ppc64', + kernel_path='vmlinux', + kernel_command_line='console=ttyS0', + extra_qemu_params=['-M', 'pseries', '-cpu', 'power8']) diff --git a/tools/testing/kunit/qemu_configs/riscv.py b/tools/testing/kunit/qemu_configs/riscv.py new file mode 100644 index 0000000000..12a1d52597 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/riscv.py @@ -0,0 +1,28 @@ +from ..qemu_config import QemuArchParams +import os +import os.path +import sys + +OPENSBI_FILE = 'opensbi-riscv64-generic-fw_dynamic.bin' +OPENSBI_PATH = '/usr/share/qemu/' + OPENSBI_FILE + +if not os.path.isfile(OPENSBI_PATH): + print('\n\nOpenSBI bios was not found in "' + OPENSBI_PATH + '".\n' + 'Please ensure that qemu-system-riscv is installed, or edit the path in "qemu_configs/riscv.py"\n') + sys.exit() + +QEMU_ARCH = QemuArchParams(linux_arch='riscv', + kconfig=''' +CONFIG_SOC_VIRT=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_RISCV_SBI_V01=y +CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''', + qemu_arch='riscv64', + kernel_path='arch/riscv/boot/Image', + kernel_command_line='console=ttyS0', + extra_qemu_params=[ + '-machine', 'virt', + '-cpu', 'rv64', + '-bios', OPENSBI_PATH]) diff --git a/tools/testing/kunit/qemu_configs/s390.py b/tools/testing/kunit/qemu_configs/s390.py new file mode 100644 index 0000000000..98fa4fb60c --- /dev/null +++ b/tools/testing/kunit/qemu_configs/s390.py @@ -0,0 +1,14 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='s390', + kconfig=''' +CONFIG_EXPERT=y +CONFIG_TUNE_ZEC12=y +CONFIG_NUMA=y +CONFIG_MODULES=y''', + qemu_arch='s390x', + kernel_path='arch/s390/boot/bzImage', + kernel_command_line='console=ttyS0', + extra_qemu_params=[ + '-machine', 's390-ccw-virtio', + '-cpu', 'qemu',]) diff --git a/tools/testing/kunit/qemu_configs/sh.py b/tools/testing/kunit/qemu_configs/sh.py new file mode 100644 index 0000000000..78a474a5b9 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/sh.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='sh', + kconfig=''' +CONFIG_CPU_SUBTYPE_SH7751R=y +CONFIG_MEMORY_START=0x0c000000 +CONFIG_SH_RTS7751R2D=y +CONFIG_RTS7751R2D_PLUS=y +CONFIG_SERIAL_SH_SCI=y''', + qemu_arch='sh4', + kernel_path='arch/sh/boot/zImage', + kernel_command_line='console=ttySC1', + serial='null', + extra_qemu_params=[ + '-machine', 'r2d', + '-serial', 'mon:stdio']) diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py new file mode 100644 index 0000000000..e975c4331a --- /dev/null +++ b/tools/testing/kunit/qemu_configs/sparc.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='sparc', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='sparc', + kernel_path='arch/sparc/boot/zImage', + kernel_command_line='console=ttyS0 mem=256M', + extra_qemu_params=['-m', '256']) diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py new file mode 100644 index 0000000000..dc79490768 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/x86_64.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='x86_64', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='x86_64', + kernel_path='arch/x86/boot/bzImage', + kernel_command_line='console=ttyS0', + extra_qemu_params=[]) diff --git a/tools/testing/kunit/run_checks.py b/tools/testing/kunit/run_checks.py new file mode 100755 index 0000000000..c6d494ea33 --- /dev/null +++ b/tools/testing/kunit/run_checks.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# This file runs some basic checks to verify kunit works. +# It is only of interest if you're making changes to KUnit itself. +# +# Copyright (C) 2021, Google LLC. +# Author: Daniel Latypov + +from concurrent import futures +import datetime +import os +import shutil +import subprocess +import sys +import textwrap +from typing import Dict, List, Sequence + +ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__)) +TIMEOUT = datetime.timedelta(minutes=5).total_seconds() + +commands: Dict[str, Sequence[str]] = { + 'kunit_tool_test.py': ['./kunit_tool_test.py'], + 'kunit smoke test': ['./kunit.py', 'run', '--kunitconfig=lib/kunit', '--build_dir=kunit_run_checks'], + 'pytype': ['/bin/sh', '-c', 'pytype *.py'], + 'mypy': ['mypy', '--config-file', 'mypy.ini', '--exclude', '_test.py$', '--exclude', 'qemu_configs/', '.'], +} + +# The user might not have mypy or pytype installed, skip them if so. +# Note: you can install both via `$ pip install mypy pytype` +necessary_deps : Dict[str, str] = { + 'pytype': 'pytype', + 'mypy': 'mypy', +} + +def main(argv: Sequence[str]) -> None: + if argv: + raise RuntimeError('This script takes no arguments') + + future_to_name: Dict[futures.Future[None], str] = {} + executor = futures.ThreadPoolExecutor(max_workers=len(commands)) + for name, argv in commands.items(): + if name in necessary_deps and shutil.which(necessary_deps[name]) is None: + print(f'{name}: SKIPPED, {necessary_deps[name]} not in $PATH') + continue + f = executor.submit(run_cmd, argv) + future_to_name[f] = name + + has_failures = False + print(f'Waiting on {len(future_to_name)} checks ({", ".join(future_to_name.values())})...') + for f in futures.as_completed(future_to_name.keys()): + name = future_to_name[f] + ex = f.exception() + if not ex: + print(f'{name}: PASSED') + continue + + has_failures = True + if isinstance(ex, subprocess.TimeoutExpired): + print(f'{name}: TIMED OUT') + elif isinstance(ex, subprocess.CalledProcessError): + print(f'{name}: FAILED') + else: + print(f'{name}: unexpected exception: {ex}') + continue + + output = ex.output + if output: + print(textwrap.indent(output.decode(), '> ')) + executor.shutdown() + + if has_failures: + sys.exit(1) + + +def run_cmd(argv: Sequence[str]) -> None: + subprocess.check_output(argv, stderr=subprocess.STDOUT, cwd=ABS_TOOL_PATH, timeout=TIMEOUT) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/tools/testing/kunit/test_data/test_config_printk_time.log b/tools/testing/kunit/test_data/test_config_printk_time.log new file mode 100644 index 0000000000..6bdb57f76e --- /dev/null +++ b/tools/testing/kunit/test_data/test_config_printk_time.log @@ -0,0 +1,32 @@ +[ 0.060000] printk: console [mc-1] enabled +[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 +[ 0.060000] TAP version 14 +[ 0.060000] 1..3 +[ 0.060000] # Subtest: kunit-resource-test +[ 0.060000] 1..5 +[ 0.060000] ok 1 - kunit_resource_test_init_resources +[ 0.060000] ok 2 - kunit_resource_test_alloc_resource +[ 0.060000] ok 3 - kunit_resource_test_destroy_resource +[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources +[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering +[ 0.060000] ok 1 - kunit-resource-test +[ 0.060000] # Subtest: kunit-try-catch-test +[ 0.060000] 1..2 +[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch +[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch +[ 0.060000] ok 2 - kunit-try-catch-test +[ 0.060000] # Subtest: string-stream-test +[ 0.060000] 1..3 +[ 0.060000] ok 1 - string_stream_test_empty_on_creation +[ 0.060000] ok 2 - string_stream_test_not_empty_after_add +[ 0.060000] ok 3 - string_stream_test_get_string +[ 0.060000] ok 3 - string-stream-test +[ 0.060000] List of all partitions: +[ 0.060000] No filesystem could mount root, tried: +[ 0.060000] +[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0) +[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2 +[ 0.060000] Stack: +[ 0.060000] 602086f8 601bc260 705c0000 705c0000 +[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_insufficient_memory.log b/tools/testing/kunit/test_data/test_insufficient_memory.log new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tools/testing/kunit/test_data/test_interrupted_tap_output.log b/tools/testing/kunit/test_data/test_interrupted_tap_output.log new file mode 100644 index 0000000000..1fb677728a --- /dev/null +++ b/tools/testing/kunit/test_data/test_interrupted_tap_output.log @@ -0,0 +1,38 @@ +[ 0.060000] printk: console [mc-1] enabled +[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 +[ 0.060000] TAP version 14 +[ 0.060000] 1..3 +[ 0.060000] # Subtest: kunit-resource-test +[ 0.060000] 1..5 +[ 0.060000] ok 1 - kunit_resource_test_init_resources +[ 0.060000] ok 2 - kunit_resource_test_alloc_resource +[ 0.060000] ok 3 - kunit_resource_test_destroy_resource +[ 0.060000] kAFS: Red Hat AFS client v0.1 registering. +[ 0.060000] FS-Cache: Netfs 'afs' registered for caching +[ 0.060000] *** VALIDATE kAFS *** +[ 0.060000] Btrfs loaded, crc32c=crc32c-generic, debug=on, assert=on, integrity-checker=on, ref-verify=on +[ 0.060000] BTRFS: selftest: sectorsize: 4096 nodesize: 4096 +[ 0.060000] BTRFS: selftest: running btrfs free space cache tests +[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources +[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering +[ 0.060000] ok 1 - kunit-resource-test +[ 0.060000] # Subtest: kunit-try-catch-test +[ 0.060000] 1..2 +[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch +[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch +[ 0.060000] ok 2 - kunit-try-catch-test +[ 0.060000] # Subtest: string-stream-test +[ 0.060000] 1..3 +[ 0.060000] ok 1 - string_stream_test_empty_on_creation +[ 0.060000] ok 2 - string_stream_test_not_empty_after_add +[ 0.060000] ok 3 - string_stream_test_get_string +[ 0.060000] ok 3 - string-stream-test +[ 0.060000] List of all partitions: +[ 0.060000] No filesystem could mount root, tried: +[ 0.060000] +[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0) +[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2 +[ 0.060000] Stack: +[ 0.060000] 602086f8 601bc260 705c0000 705c0000 +[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed.log new file mode 100644 index 0000000000..bc0dc8fe35 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed.log @@ -0,0 +1,33 @@ +TAP version 14 +1..2 + # Subtest: sysctl_test + 1..8 + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed + ok 1 - sysctl_test_dointvec_null_tbl_data + # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed + ok 2 - sysctl_test_dointvec_table_maxlen_unset + # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed + ok 3 - sysctl_test_dointvec_table_len_is_zero + # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed + ok 4 - sysctl_test_dointvec_table_read_but_position_set + # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed + ok 5 - sysctl_test_dointvec_happy_single_positive + # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed + ok 6 - sysctl_test_dointvec_happy_single_negative + # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed + ok 7 - sysctl_test_dointvec_single_less_int_min + # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed + ok 8 - sysctl_test_dointvec_single_greater_int_max +kunit sysctl_test: all tests passed +ok 1 - sysctl_test + # Subtest: example + 1..2 +init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 - example_simple_test + # example_mock_test: initializing + # example_mock_test: example_mock_test passed + ok 2 - example_mock_test +kunit example: all tests passed +ok 2 - example diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log new file mode 100644 index 0000000000..9d5b04fe43 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log @@ -0,0 +1,34 @@ +TAP version 14 +1..2 + # Subtest: sysctl_test + 1..4 + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed + ok 1 - sysctl_test_dointvec_null_tbl_data + # Subtest: example + 1..2 + init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 - example_simple_test + # example_mock_test: initializing + # example_mock_test: example_mock_test passed + ok 2 - example_mock_test + kunit example: all tests passed + ok 2 - example + # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed + ok 3 - sysctl_test_dointvec_table_len_is_zero + # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed + ok 4 - sysctl_test_dointvec_table_read_but_position_set +kunit sysctl_test: all tests passed +ok 1 - sysctl_test + # Subtest: example + 1..2 +init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 - example_simple_test + # example_mock_test: initializing + # example_mock_test: example_mock_test passed + ok 2 - example_mock_test +kunit example: all tests passed +ok 2 - example diff --git a/tools/testing/kunit/test_data/test_is_test_passed-failure.log b/tools/testing/kunit/test_data/test_is_test_passed-failure.log new file mode 100644 index 0000000000..7a416497e3 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-failure.log @@ -0,0 +1,37 @@ +TAP version 14 +1..2 + # Subtest: sysctl_test + 1..8 + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed + ok 1 - sysctl_test_dointvec_null_tbl_data + # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed + ok 2 - sysctl_test_dointvec_table_maxlen_unset + # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed + ok 3 - sysctl_test_dointvec_table_len_is_zero + # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed + ok 4 - sysctl_test_dointvec_table_read_but_position_set + # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed + ok 5 - sysctl_test_dointvec_happy_single_positive + # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed + ok 6 - sysctl_test_dointvec_happy_single_negative + # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed + ok 7 - sysctl_test_dointvec_single_less_int_min + # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed + ok 8 - sysctl_test_dointvec_single_greater_int_max +kunit sysctl_test: all tests passed +ok 1 - sysctl_test + # Subtest: example + 1..2 +init_suite + # example_simple_test: initializing + # example_simple_test: EXPECTATION FAILED at lib/kunit/example-test.c:30 + Expected 1 + 1 == 3, but + 1 + 1 == 2 + 3 == 3 + # example_simple_test: example_simple_test failed + not ok 1 - example_simple_test + # example_mock_test: initializing + # example_mock_test: example_mock_test passed + ok 2 - example_mock_test +kunit example: one or more tests failed +not ok 2 - example diff --git a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log new file mode 100644 index 0000000000..65d3f27fea --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log @@ -0,0 +1,14 @@ +TAP version 13 +1..2 +# selftests: membarrier: membarrier_test_single_thread +# TAP version 13 +# 1..2 +# ok 1 sys_membarrier available +# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected +ok 1 selftests: membarrier: membarrier_test_single_thread +# selftests: membarrier: membarrier_test_multi_thread +# TAP version 13 +# 1..2 +# ok 1 sys_membarrier available +# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected +ok 2 selftests: membarrier: membarrier_test_multi_thread diff --git a/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log new file mode 100644 index 0000000000..5cd17b7f81 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log @@ -0,0 +1,31 @@ +KTAP version 1 + # Subtest: sysctl_test + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed + ok 1 - sysctl_test_dointvec_null_tbl_data + # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed + ok 2 - sysctl_test_dointvec_table_maxlen_unset + # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed + ok 3 - sysctl_test_dointvec_table_len_is_zero + # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed + ok 4 - sysctl_test_dointvec_table_read_but_position_set + # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed + ok 5 - sysctl_test_dointvec_happy_single_positive + # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed + ok 6 - sysctl_test_dointvec_happy_single_negative + # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed + ok 7 - sysctl_test_dointvec_single_less_int_min + # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed + ok 8 - sysctl_test_dointvec_single_greater_int_max +kunit sysctl_test: all tests passed +ok 1 - sysctl_test + # Subtest: example + 1..2 +init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 - example_simple_test + # example_mock_test: initializing + # example_mock_test: example_mock_test passed + ok 2 - example_mock_test +kunit example: all tests passed +ok 2 - example diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log new file mode 100644 index 0000000000..4f81876ee6 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log @@ -0,0 +1,7 @@ +TAP version 14 +1..1 + # Subtest: suite + 1..1 + # Subtest: case + ok 1 - case +ok 1 - suite diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log new file mode 100644 index 0000000000..ba69f5c94b --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log @@ -0,0 +1,75 @@ +Core dump limits : + soft - 0 + hard - NONE +Checking environment variables for a tempdir...none found +Checking if /dev/shm is on tmpfs...OK +Checking PROT_EXEC mmap in /dev/shm...OK +Adding 24743936 bytes to physical memory to account for exec-shield gap +Linux version 4.12.0-rc3-00010-g7319eb35f493-dirty (brendanhiggins@mactruck.svl.corp.google.com) (gcc version 7.3.0 (Debian 7.3.0-5) ) #29 Thu Mar 15 14:57:19 PDT 2018 +Built 1 zonelists in Zone order, mobility grouping on. Total pages: 14038 +Kernel command line: root=98:0 +PID hash table entries: 256 (order: -1, 2048 bytes) +Dentry cache hash table entries: 8192 (order: 4, 65536 bytes) +Inode-cache hash table entries: 4096 (order: 3, 32768 bytes) +Memory: 27868K/56932K available (1681K kernel code, 480K rwdata, 400K rodata, 89K init, 205K bss, 29064K reserved, 0K cma-reserved) +SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=1, Nodes=1 +NR_IRQS:15 +clocksource: timer: mask: 0xffffffffffffffff max_cycles: 0x1cd42e205, max_idle_ns: 881590404426 ns +Calibrating delay loop... 7384.26 BogoMIPS (lpj=36921344) +pid_max: default: 32768 minimum: 301 +Mount-cache hash table entries: 512 (order: 0, 4096 bytes) +Mountpoint-cache hash table entries: 512 (order: 0, 4096 bytes) +Checking that host ptys support output SIGIO...Yes +Checking that host ptys support SIGIO on close...No, enabling workaround +Using 2.6 host AIO +clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 19112604462750000 ns +futex hash table entries: 256 (order: 0, 6144 bytes) +clocksource: Switched to clocksource timer +console [stderr0] disabled +mconsole (version 2) initialized on /usr/local/google/home/brendanhiggins/.uml/6Ijecl/mconsole +Checking host MADV_REMOVE support...OK +workingset: timestamp_bits=62 max_order=13 bucket_order=0 +Block layer SCSI generic (bsg) driver version 0.4 loaded (major 254) +io scheduler noop registered +io scheduler deadline registered +io scheduler cfq registered (default) +io scheduler mq-deadline registered +io scheduler kyber registered +Initialized stdio console driver +Using a channel type which is configured out of UML +setup_one_line failed for device 1 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 2 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 3 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 4 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 5 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 6 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 7 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 8 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 9 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 10 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 11 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 12 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 13 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 14 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 15 : Configuration failed +Console initialized on /dev/tty0 +console [tty0] enabled +console [mc-1] enabled +List of all partitions: +No filesystem could mount root, tried: + +Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0) diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log new file mode 100644 index 0000000000..5f48ee659d --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log @@ -0,0 +1,2 @@ +TAP version 14 +1..0 diff --git a/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log b/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log new file mode 100644 index 0000000000..a014ffe972 --- /dev/null +++ b/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log @@ -0,0 +1,26 @@ +[ 0.060000] printk: console [mc-1] enabled +[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 +[ 0.060000] TAP version 14 +[ 0.060000] 1..3 +[ 0.060000] # Subtest: kunit-resource-test +[ 0.060000] 1..5 +[ 0.060000] ok 1 - kunit_resource_test_init_resources +[ 0.060000] ok 2 - kunit_resource_test_alloc_resource +[ 0.060000] ok 3 - kunit_resource_test_destroy_resource +[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources +[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering +[ 0.060000] ok 1 - kunit-resource-test +[ 0.060000] # Subtest: kunit-try-catch-test +[ 0.060000] 1..2 +[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch +[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch +[ 0.060000] ok 2 - kunit-try-catch-test +[ 0.060000] # Subtest: string-stream-test +[ 0.060000] 1..3 +[ 0.060000] ok 1 - string_stream_test_empty_on_creation +[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0) +[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2 +[ 0.060000] Stack: +[ 0.060000] 602086f8 601bc260 705c0000 705c0000 +[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_multiple_prefixes.log b/tools/testing/kunit/test_data/test_multiple_prefixes.log new file mode 100644 index 0000000000..0ad78481a0 --- /dev/null +++ b/tools/testing/kunit/test_data/test_multiple_prefixes.log @@ -0,0 +1,32 @@ +[ 0.060000][ T1] printk: console [mc-1] enabled +[ 0.060000][ T1] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 +[ 0.060000][ T1] TAP version 14 +[ 0.060000][ T1] 1..3 +[ 0.060000][ T1] # Subtest: kunit-resource-test +[ 0.060000][ T1] 1..5 +[ 0.060000][ T1] ok 1 - kunit_resource_test_init_resources +[ 0.060000][ T1] ok 2 - kunit_resource_test_alloc_resource +[ 0.060000][ T1] ok 3 - kunit_resource_test_destroy_resource +[ 0.060000][ T1] ok 4 - kunit_resource_test_cleanup_resources +[ 0.060000][ T1] ok 5 - kunit_resource_test_proper_free_ordering +[ 0.060000][ T1] ok 1 - kunit-resource-test +[ 0.060000][ T1] # Subtest: kunit-try-catch-test +[ 0.060000][ T1] 1..2 +[ 0.060000][ T1] ok 1 - kunit_test_try_catch_successful_try_no_catch +[ 0.060000][ T1] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch +[ 0.060000][ T1] ok 2 - kunit-try-catch-test +[ 0.060000][ T1] # Subtest: string-stream-test +[ 0.060000][ T1] 1..3 +[ 0.060000][ T1] ok 1 - string_stream_test_empty_on_creation +[ 0.060000][ T1] ok 2 - string_stream_test_not_empty_after_add +[ 0.060000][ T1] ok 3 - string_stream_test_get_string +[ 0.060000][ T1] ok 3 - string-stream-test +[ 0.060000][ T1] List of all partitions: +[ 0.060000][ T1] No filesystem could mount root, tried: +[ 0.060000][ T1] +[ 0.060000][ T1] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0) +[ 0.060000][ T1] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2 +[ 0.060000][ T1] Stack: +[ 0.060000][ T1] 602086f8 601bc260 705c0000 705c0000 +[ 0.060000][ T1] 602086f8 6005fcec 705c0000 6002c6ab +[ 0.060000][ T1] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_output_isolated_correctly.log b/tools/testing/kunit/test_data/test_output_isolated_correctly.log new file mode 100644 index 0000000000..94a6b3aeaa --- /dev/null +++ b/tools/testing/kunit/test_data/test_output_isolated_correctly.log @@ -0,0 +1,106 @@ +Linux version 5.1.0-rc7-00061-g04652f1cb4aa0 (brendanhiggins@mactruck.svl.corp.google.com) (gcc version 7.3.0 (Debian 7.3.0-18)) #163 Wed May 8 16:18:20 PDT 2019 +Built 1 zonelists, mobility grouping on. Total pages: 69906 +Kernel command line: mem=256M root=98:0 +Dentry cache hash table entries: 65536 (order: 7, 524288 bytes) +Inode-cache hash table entries: 32768 (order: 6, 262144 bytes) +Memory: 254468K/283500K available (1734K kernel code, 489K rwdata, 396K rodata, 85K init, 216K bss, 29032K reserved, 0K cma-reserved) +SLUB: HWalign=64, Order=0-3, MinObjects=0, CPUs=1, Nodes=1 +NR_IRQS: 15 +clocksource: timer: mask: 0xffffffffffffffff max_cycles: 0x1cd42e205, max_idle_ns: 881590404426 ns +------------[ cut here ]------------ +WARNING: CPU: 0 PID: 0 at kernel/time/clockevents.c:458 clockevents_register_device+0x143/0x160 +posix-timer cpumask == cpu_all_mask, using cpu_possible_mask instead +CPU: 0 PID: 0 Comm: swapper Not tainted 5.1.0-rc7-00061-g04652f1cb4aa0 #163 +Stack: + 6005cc00 60233e18 60233e60 60233e18 + 60233e60 00000009 00000000 6002a1b4 + 1ca00000000 60071c23 60233e78 100000000000062 +Call Trace: + [<600214c5>] ? os_is_signal_stack+0x15/0x30 + [<6005c5ec>] ? printk+0x0/0x9b + [<6001597e>] ? show_stack+0xbe/0x1c0 + [<6005cc00>] ? __printk_safe_exit+0x0/0x40 + [<6002a1b4>] ? __warn+0x144/0x170 + [<60071c23>] ? clockevents_register_device+0x143/0x160 + [<60021440>] ? get_signals+0x0/0x10 + [<6005c5ec>] ? printk+0x0/0x9b + [<6002a27b>] ? warn_slowpath_fmt+0x9b/0xb0 + [<6005c5ec>] ? printk+0x0/0x9b + [<6002a1e0>] ? warn_slowpath_fmt+0x0/0xb0 + [<6005c5ec>] ? printk+0x0/0x9b + [<60021440>] ? get_signals+0x0/0x10 + [<600213f0>] ? block_signals+0x0/0x20 + [<60071c23>] ? clockevents_register_device+0x143/0x160 + [<60021440>] ? get_signals+0x0/0x10 + [<600213f0>] ? block_signals+0x0/0x20 + [<6005c5ec>] ? printk+0x0/0x9b + [<60001bc8>] ? start_kernel+0x477/0x56a + [<600036f1>] ? start_kernel_proc+0x46/0x4d + [<60014442>] ? new_thread_handler+0x82/0xc0 + +random: get_random_bytes called from print_oops_end_marker+0x4c/0x60 with crng_init=0 +---[ end trace c83434852b3702d3 ]--- +Calibrating delay loop... 6958.28 BogoMIPS (lpj=34791424) +pid_max: default: 32768 minimum: 301 +Mount-cache hash table entries: 1024 (order: 1, 8192 bytes) +Mountpoint-cache hash table entries: 1024 (order: 1, 8192 bytes) +*** VALIDATE proc *** +Checking that host ptys support output SIGIO...Yes +Checking that host ptys support SIGIO on close...No, enabling workaround +clocksource: jiffies: mask: 0xffffffff max_cycles: 0xffffffff, max_idle_ns: 19112604462750000 ns +futex hash table entries: 256 (order: 0, 6144 bytes) +clocksource: Switched to clocksource timer +printk: console [stderr0] disabled +mconsole (version 2) initialized on /usr/local/google/home/brendanhiggins/.uml/VZ2qMm/mconsole +Checking host MADV_REMOVE support...OK +workingset: timestamp_bits=62 max_order=16 bucket_order=0 +Block layer SCSI generic (bsg) driver version 0.4 loaded (major 254) +io scheduler mq-deadline registered +io scheduler kyber registered +Initialized stdio console driver +Using a channel type which is configured out of UML +setup_one_line failed for device 1 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 2 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 3 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 4 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 5 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 6 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 7 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 8 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 9 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 10 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 11 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 12 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 13 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 14 : Configuration failed +Using a channel type which is configured out of UML +setup_one_line failed for device 15 : Configuration failed +Console initialized on /dev/tty0 +printk: console [tty0] enabled +printk: console [mc-1] enabled +TAP version 14 + # Subtest: example + 1..2 +init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 - example_simple_test + # example_mock_test: initializing + # example_mock_test: example_mock_test passed + ok 2 - example_mock_test +kunit example: all tests passed +ok 1 - example +List of all partitions: diff --git a/tools/testing/kunit/test_data/test_output_with_prefix_isolated_correctly.log b/tools/testing/kunit/test_data/test_output_with_prefix_isolated_correctly.log new file mode 100644 index 0000000000..0f87cdabeb --- /dev/null +++ b/tools/testing/kunit/test_data/test_output_with_prefix_isolated_correctly.log @@ -0,0 +1,33 @@ +[ 0.060000] printk: console [mc-1] enabled +[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 +[ 0.060000] TAP version 14 +[ 0.060000] # Subtest: kunit-resource-test +[ 0.060000] 1..5 +[ 0.060000] ok 1 - kunit_resource_test_init_resources +[ 0.060000] ok 2 - kunit_resource_test_alloc_resource +[ 0.060000] ok 3 - kunit_resource_test_destroy_resource +[ 0.060000] foo bar # +[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources +[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering +[ 0.060000] ok 1 - kunit-resource-test +[ 0.060000] foo bar # non-kunit output +[ 0.060000] # Subtest: kunit-try-catch-test +[ 0.060000] 1..2 +[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch +[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch +[ 0.060000] ok 2 - kunit-try-catch-test +[ 0.060000] # Subtest: string-stream-test +[ 0.060000] 1..3 +[ 0.060000] ok 1 - string_stream_test_empty_on_creation +[ 0.060000] ok 2 - string_stream_test_not_empty_after_add +[ 0.060000] ok 3 - string_stream_test_get_string +[ 0.060000] ok 3 - string-stream-test +[ 0.060000] List of all partitions: +[ 0.060000] No filesystem could mount root, tried: +[ 0.060000] +[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0) +[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2 +[ 0.060000] Stack: +[ 0.060000] 602086f8 601bc260 705c0000 705c0000 +[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 \ No newline at end of file diff --git a/tools/testing/kunit/test_data/test_parse_ktap_output.log b/tools/testing/kunit/test_data/test_parse_ktap_output.log new file mode 100644 index 0000000000..ccdf244e53 --- /dev/null +++ b/tools/testing/kunit/test_data/test_parse_ktap_output.log @@ -0,0 +1,8 @@ +KTAP version 1 +1..1 + KTAP version 1 + 1..3 + ok 1 case_1 + ok 2 case_2 + ok 3 case_3 +ok 1 suite diff --git a/tools/testing/kunit/test_data/test_parse_subtest_header.log b/tools/testing/kunit/test_data/test_parse_subtest_header.log new file mode 100644 index 0000000000..216631092e --- /dev/null +++ b/tools/testing/kunit/test_data/test_parse_subtest_header.log @@ -0,0 +1,7 @@ +KTAP version 1 +1..1 + KTAP version 1 + # Subtest: suite + 1..1 + ok 1 test +ok 1 suite \ No newline at end of file diff --git a/tools/testing/kunit/test_data/test_pound_no_prefix.log b/tools/testing/kunit/test_data/test_pound_no_prefix.log new file mode 100644 index 0000000000..dc4cf09a96 --- /dev/null +++ b/tools/testing/kunit/test_data/test_pound_no_prefix.log @@ -0,0 +1,34 @@ + printk: console [mc-1] enabled + random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 + TAP version 14 + 1..3 + # Subtest: kunit-resource-test + 1..5 + ok 1 - kunit_resource_test_init_resources + ok 2 - kunit_resource_test_alloc_resource + ok 3 - kunit_resource_test_destroy_resource + foo bar # + ok 4 - kunit_resource_test_cleanup_resources + ok 5 - kunit_resource_test_proper_free_ordering + ok 1 - kunit-resource-test + foo bar # non-kunit output + # Subtest: kunit-try-catch-test + 1..2 + ok 1 - kunit_test_try_catch_successful_try_no_catch + ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch + ok 2 - kunit-try-catch-test + # Subtest: string-stream-test + 1..3 + ok 1 - string_stream_test_empty_on_creation + ok 2 - string_stream_test_not_empty_after_add + ok 3 - string_stream_test_get_string + ok 3 - string-stream-test + List of all partitions: + No filesystem could mount root, tried: + + Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0) + CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2 + Stack: + 602086f8 601bc260 705c0000 705c0000 + 602086f8 6005fcec 705c0000 6002c6ab + 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_pound_sign.log b/tools/testing/kunit/test_data/test_pound_sign.log new file mode 100644 index 0000000000..3f358e3a7b --- /dev/null +++ b/tools/testing/kunit/test_data/test_pound_sign.log @@ -0,0 +1,34 @@ +[ 0.060000] printk: console [mc-1] enabled +[ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 +[ 0.060000] TAP version 14 +[ 0.060000] 1..3 +[ 0.060000] # Subtest: kunit-resource-test +[ 0.060000] 1..5 +[ 0.060000] ok 1 - kunit_resource_test_init_resources +[ 0.060000] ok 2 - kunit_resource_test_alloc_resource +[ 0.060000] ok 3 - kunit_resource_test_destroy_resource +[ 0.060000] foo bar # +[ 0.060000] ok 4 - kunit_resource_test_cleanup_resources +[ 0.060000] ok 5 - kunit_resource_test_proper_free_ordering +[ 0.060000] ok 1 - kunit-resource-test +[ 0.060000] foo bar # non-kunit output +[ 0.060000] # Subtest: kunit-try-catch-test +[ 0.060000] 1..2 +[ 0.060000] ok 1 - kunit_test_try_catch_successful_try_no_catch +[ 0.060000] ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch +[ 0.060000] ok 2 - kunit-try-catch-test +[ 0.060000] # Subtest: string-stream-test +[ 0.060000] 1..3 +[ 0.060000] ok 1 - string_stream_test_empty_on_creation +[ 0.060000] ok 2 - string_stream_test_not_empty_after_add +[ 0.060000] ok 3 - string_stream_test_get_string +[ 0.060000] ok 3 - string-stream-test +[ 0.060000] List of all partitions: +[ 0.060000] No filesystem could mount root, tried: +[ 0.060000] +[ 0.060000] Kernel panic - not syncing: VFS: Unable to mount root fs on unknown-block(98,0) +[ 0.060000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.4.0-rc1-gea2dd7c0875e-dirty #2 +[ 0.060000] Stack: +[ 0.060000] 602086f8 601bc260 705c0000 705c0000 +[ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_read_from_file.kconfig b/tools/testing/kunit/test_data/test_read_from_file.kconfig new file mode 100644 index 0000000000..d2a4928ac7 --- /dev/null +++ b/tools/testing/kunit/test_data/test_read_from_file.kconfig @@ -0,0 +1,17 @@ +# +# Automatically generated file; DO NOT EDIT. +# User Mode Linux/x86 4.12.0-rc3 Kernel Configuration +# +CONFIG_UML=y +CONFIG_MMU=y + +# +# UML-specific options +# + +# +# Host processor type and features +# +# CONFIG_MK8 is not set +CONFIG_TEST=y +CONFIG_EXAMPLE_TEST=y diff --git a/tools/testing/kunit/test_data/test_skip_all_tests.log b/tools/testing/kunit/test_data/test_skip_all_tests.log new file mode 100644 index 0000000000..2ea6e6d14f --- /dev/null +++ b/tools/testing/kunit/test_data/test_skip_all_tests.log @@ -0,0 +1,15 @@ +TAP version 14 +1..2 + # Subtest: string-stream-test + 1..3 + ok 1 - string_stream_test_empty_on_creation # SKIP all tests skipped + ok 2 - string_stream_test_not_empty_after_add # SKIP all tests skipped + ok 3 - string_stream_test_get_string # SKIP all tests skipped +ok 1 - string-stream-test # SKIP + # Subtest: example + 1..2 + # example_simple_test: initializing + ok 1 - example_simple_test # SKIP all tests skipped + # example_skip_test: initializing + ok 2 - example_skip_test # SKIP this test should be skipped +ok 2 - example # SKIP diff --git a/tools/testing/kunit/test_data/test_skip_tests.log b/tools/testing/kunit/test_data/test_skip_tests.log new file mode 100644 index 0000000000..79b326e312 --- /dev/null +++ b/tools/testing/kunit/test_data/test_skip_tests.log @@ -0,0 +1,15 @@ +TAP version 14 +1..2 + # Subtest: string-stream-test + 1..3 + ok 1 - string_stream_test_empty_on_creation + ok 2 - string_stream_test_not_empty_after_add + ok 3 - string_stream_test_get_string +ok 1 - string-stream-test + # Subtest: example + 1..2 + # example_simple_test: initializing + ok 1 - example_simple_test + # example_skip_test: initializing + ok 2 - example_skip_test # SKIP this test should be skipped +ok 2 - example diff --git a/tools/testing/kunit/test_data/test_strip_hyphen.log b/tools/testing/kunit/test_data/test_strip_hyphen.log new file mode 100644 index 0000000000..92ac7c24b3 --- /dev/null +++ b/tools/testing/kunit/test_data/test_strip_hyphen.log @@ -0,0 +1,16 @@ +KTAP version 1 +1..2 + # Subtest: sysctl_test + 1..1 + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed + ok 1 - sysctl_test_dointvec_null_tbl_data +kunit sysctl_test: all tests passed +ok 1 - sysctl_test + # Subtest: example + 1..1 +init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 example_simple_test +kunit example: all tests passed +ok 2 example -- cgit v1.2.3