diff options
Diffstat (limited to '')
46 files changed, 2851 insertions, 1053 deletions
diff --git a/tools/testing/kunit/.gitattributes b/tools/testing/kunit/.gitattributes deleted file mode 100644 index 5b7da1fc3b8f..000000000000 --- a/tools/testing/kunit/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -test_data/* binary diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config index 9235b7d42d38..aa5ec149f96c 100644 --- a/tools/testing/kunit/configs/all_tests.config +++ b/tools/testing/kunit/configs/all_tests.config @@ -1,3 +1,51 @@ +# This config enables as many tests as possible under UML. +# It is intended for use in continuous integration systems and similar for +# automated testing of as much as possible. +# The config is manually maintained, though it uses KUNIT_ALL_TESTS=y to enable +# any tests whose dependencies are already satisfied. Please feel free to add +# more options if they any new tests. + CONFIG_KUNIT=y -CONFIG_KUNIT_TEST=y CONFIG_KUNIT_EXAMPLE_TEST=y +CONFIG_KUNIT_ALL_TESTS=y + +CONFIG_FORTIFY_SOURCE=y + +CONFIG_IIO=y + +CONFIG_EXT4_FS=y + +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y + +CONFIG_PCI=y +CONFIG_USB4=y + +CONFIG_NET=y +CONFIG_MCTP=y +CONFIG_MCTP_FLOWS=y + +CONFIG_INET=y +CONFIG_MPTCP=y + +CONFIG_CFG80211=y +CONFIG_MAC80211=y +CONFIG_WLAN_VENDOR_INTEL=y +CONFIG_IWLWIFI=y + +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +CONFIG_DEBUG_FS=y +CONFIG_DAMON_DBGFS=y + +CONFIG_REGMAP_BUILD=y + +CONFIG_SECURITY=y +CONFIG_SECURITY_APPARMOR=y +CONFIG_SECURITY_LANDLOCK=y + +CONFIG_SOUND=y +CONFIG_SND=y +CONFIG_SND_SOC=y +CONFIG_SND_SOC_TOPOLOGY_BUILD=y diff --git a/tools/testing/kunit/configs/arch_uml.config b/tools/testing/kunit/configs/arch_uml.config new file mode 100644 index 000000000000..54ad8972681a --- /dev/null +++ b/tools/testing/kunit/configs/arch_uml.config @@ -0,0 +1,8 @@ +# Config options which are added to UML builds by default + +# Enable virtio/pci, as a lot of tests require it. +CONFIG_VIRTIO_UML=y +CONFIG_UML_PCI_OVER_VIRTIO=y + +# Enable FORTIFY_SOURCE for wider checking. +CONFIG_FORTIFY_SOURCE=y diff --git a/tools/testing/kunit/configs/broken_on_uml.config b/tools/testing/kunit/configs/broken_on_uml.config deleted file mode 100644 index a7f0603d33f6..000000000000 --- a/tools/testing/kunit/configs/broken_on_uml.config +++ /dev/null @@ -1,42 +0,0 @@ -# These are currently broken on UML and prevent allyesconfig from building -# CONFIG_STATIC_LINK is not set -# CONFIG_UML_NET_VECTOR is not set -# CONFIG_UML_NET_VDE is not set -# CONFIG_UML_NET_PCAP is not set -# CONFIG_NET_PTP_CLASSIFY is not set -# CONFIG_IP_VS is not set -# CONFIG_BRIDGE_EBT_BROUTE is not set -# CONFIG_BRIDGE_EBT_T_FILTER is not set -# CONFIG_BRIDGE_EBT_T_NAT is not set -# CONFIG_MTD_NAND_CADENCE is not set -# CONFIG_MTD_NAND_NANDSIM is not set -# CONFIG_BLK_DEV_NULL_BLK is not set -# CONFIG_BLK_DEV_RAM is not set -# CONFIG_SCSI_DEBUG is not set -# CONFIG_NET_VENDOR_XILINX is not set -# CONFIG_NULL_TTY is not set -# CONFIG_PTP_1588_CLOCK is not set -# CONFIG_PINCTRL_EQUILIBRIUM is not set -# CONFIG_DMABUF_SELFTESTS is not set -# CONFIG_COMEDI is not set -# CONFIG_XIL_AXIS_FIFO is not set -# CONFIG_EXFAT_FS is not set -# CONFIG_STM_DUMMY is not set -# CONFIG_FSI_MASTER_ASPEED is not set -# CONFIG_JFS_FS is not set -# CONFIG_UBIFS_FS is not set -# CONFIG_CRAMFS is not set -# CONFIG_CRYPTO_DEV_SAFEXCEL is not set -# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set -# CONFIG_KCOV is not set -# CONFIG_LKDTM is not set -# CONFIG_REED_SOLOMON_TEST is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_TEST_MEMINIT is not set -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -# CONFIG_DEBUG_INFO_BTF is not set -# CONFIG_PTP_1588_CLOCK_INES is not set -# CONFIG_QCOM_CPR is not set -# CONFIG_RESET_BRCMSTB_RESCAL is not set -# CONFIG_RESET_INTEL_GW is not set -# CONFIG_ADI_AXI_ADC is not set diff --git a/tools/testing/kunit/configs/coverage_uml.config b/tools/testing/kunit/configs/coverage_uml.config new file mode 100644 index 000000000000..bacb77664fa8 --- /dev/null +++ b/tools/testing/kunit/configs/coverage_uml.config @@ -0,0 +1,11 @@ +# This config fragment enables coverage on UML, which is different from the +# normal gcov used in other arches (no debugfs). +# Example usage: +# ./tools/testing/kunit/kunit.py run \ +# --kunitconfig=tools/testing/kunit/configs/all_tests_uml.config \ +# --kunitconfig=tools/testing/kunit/configs/coverage_uml.config + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +CONFIG_GCOV=y diff --git a/tools/testing/kunit/configs/default.config b/tools/testing/kunit/configs/default.config new file mode 100644 index 000000000000..e67af7b9f1bb --- /dev/null +++ b/tools/testing/kunit/configs/default.config @@ -0,0 +1,3 @@ +CONFIG_KUNIT=y +CONFIG_KUNIT_EXAMPLE_TEST=y +CONFIG_KUNIT_ALL_TESTS=y diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index ebf5f5763dee..bc74088c458a 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # # A thin wrapper on top of the KUnit Kernel @@ -8,35 +8,22 @@ # Author: Brendan Higgins <brendanhiggins@google.com> import argparse -import sys import os +import re +import shlex +import sys import time -import shutil -from collections import namedtuple +assert sys.version_info >= (3, 7), "Python version is too old" + +from dataclasses import dataclass from enum import Enum, auto +from typing import Iterable, List, Optional, Sequence, Tuple -import kunit_config import kunit_json import kunit_kernel import kunit_parser - -KunitResult = namedtuple('KunitResult', ['status','result','elapsed_time']) - -KunitConfigRequest = namedtuple('KunitConfigRequest', - ['build_dir', 'make_options']) -KunitBuildRequest = namedtuple('KunitBuildRequest', - ['jobs', 'build_dir', 'alltests', - 'make_options']) -KunitExecRequest = namedtuple('KunitExecRequest', - ['timeout', 'build_dir', 'alltests']) -KunitParseRequest = namedtuple('KunitParseRequest', - ['raw_output', 'input_data', 'build_dir', 'json']) -KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs', - 'build_dir', 'alltests', 'json', - 'make_options']) - -KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0] +from kunit_printer import stdout class KunitStatus(Enum): SUCCESS = auto() @@ -44,178 +31,511 @@ class KunitStatus(Enum): BUILD_FAILURE = auto() TEST_FAILURE = auto() -def create_default_kunitconfig(): - if not os.path.exists(kunit_kernel.kunitconfig_path): - shutil.copyfile('arch/um/configs/kunit_defconfig', - kunit_kernel.kunitconfig_path) - -def get_kernel_root_path(): - parts = sys.argv[0] if not __file__ else __file__ - parts = os.path.realpath(parts).split('tools/testing/kunit') +@dataclass +class KunitResult: + status: KunitStatus + elapsed_time: float + +@dataclass +class KunitConfigRequest: + build_dir: str + make_options: Optional[List[str]] + +@dataclass +class KunitBuildRequest(KunitConfigRequest): + jobs: int + +@dataclass +class KunitParseRequest: + raw_output: Optional[str] + json: Optional[str] + +@dataclass +class KunitExecRequest(KunitParseRequest): + build_dir: str + timeout: int + filter_glob: str + filter: str + filter_action: Optional[str] + kernel_args: Optional[List[str]] + run_isolated: Optional[str] + list_tests: bool + list_tests_attr: bool + +@dataclass +class KunitRequest(KunitExecRequest, KunitBuildRequest): + pass + + +def get_kernel_root_path() -> str: + path = sys.argv[0] if not __file__ else __file__ + parts = os.path.realpath(path).split('tools/testing/kunit') if len(parts) != 2: sys.exit(1) return parts[0] def config_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitConfigRequest) -> KunitResult: - kunit_parser.print_with_timestamp('Configuring KUnit Kernel ...') + stdout.print_with_timestamp('Configuring KUnit Kernel ...') config_start = time.time() - create_default_kunitconfig() success = linux.build_reconfig(request.build_dir, request.make_options) config_end = time.time() - if not success: - return KunitResult(KunitStatus.CONFIG_FAILURE, - 'could not configure kernel', - config_end - config_start) - return KunitResult(KunitStatus.SUCCESS, - 'configured kernel successfully', - config_end - config_start) + status = KunitStatus.SUCCESS if success else KunitStatus.CONFIG_FAILURE + return KunitResult(status, config_end - config_start) def build_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitBuildRequest) -> KunitResult: - kunit_parser.print_with_timestamp('Building KUnit Kernel ...') + stdout.print_with_timestamp('Building KUnit Kernel ...') build_start = time.time() - success = linux.build_um_kernel(request.alltests, - request.jobs, - request.build_dir, - request.make_options) + success = linux.build_kernel(request.jobs, + request.build_dir, + request.make_options) build_end = time.time() - if not success: - return KunitResult(KunitStatus.BUILD_FAILURE, - 'could not build kernel', - build_end - build_start) - if not success: - return KunitResult(KunitStatus.BUILD_FAILURE, - 'could not build kernel', - build_end - build_start) - return KunitResult(KunitStatus.SUCCESS, - 'built kernel successfully', - build_end - build_start) - -def exec_tests(linux: kunit_kernel.LinuxSourceTree, - request: KunitExecRequest) -> KunitResult: - kunit_parser.print_with_timestamp('Starting KUnit Kernel ...') - test_start = time.time() - result = linux.run_kernel( - timeout=None if request.alltests else request.timeout, - build_dir=request.build_dir) - - test_end = time.time() - - return KunitResult(KunitStatus.SUCCESS, - result, - test_end - test_start) - -def parse_tests(request: KunitParseRequest) -> KunitResult: - parse_start = time.time() + status = KunitStatus.SUCCESS if success else KunitStatus.BUILD_FAILURE + return KunitResult(status, build_end - build_start) + +def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitBuildRequest) -> KunitResult: + config_result = config_tests(linux, request) + if config_result.status != KunitStatus.SUCCESS: + return config_result - test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS, - [], - 'Tests not Parsed.') + return build_tests(linux, request) + +def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]: + args = ['kunit.action=list'] + + if request.kernel_args: + args.extend(request.kernel_args) + + output = linux.run_kernel(args=args, + timeout=request.timeout, + filter_glob=request.filter_glob, + filter=request.filter, + filter_action=request.filter_action, + build_dir=request.build_dir) + lines = kunit_parser.extract_tap_lines(output) + # Hack! Drop the dummy TAP version header that the executor prints out. + lines.pop() + + # Filter out any extraneous non-test output that might have gotten mixed in. + return [l for l in output if re.match(r'^[^\s.]+\.[^\s.]+$', l)] + +def _list_tests_attr(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> Iterable[str]: + args = ['kunit.action=list_attr'] + + if request.kernel_args: + args.extend(request.kernel_args) + + output = linux.run_kernel(args=args, + timeout=request.timeout, + filter_glob=request.filter_glob, + filter=request.filter, + filter_action=request.filter_action, + build_dir=request.build_dir) + lines = kunit_parser.extract_tap_lines(output) + # Hack! Drop the dummy TAP version header that the executor prints out. + lines.pop() + + # Filter out any extraneous non-test output that might have gotten mixed in. + return lines + +def _suites_from_test_list(tests: List[str]) -> List[str]: + """Extracts all the suites from an ordered list of tests.""" + suites = [] # type: List[str] + for t in tests: + parts = t.split('.', maxsplit=2) + if len(parts) != 2: + raise ValueError(f'internal KUnit error, test name should be of the form "<suite>.<test>", got "{t}"') + suite, _ = parts + if not suites or suites[-1] != suite: + suites.append(suite) + return suites + +def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult: + filter_globs = [request.filter_glob] + if request.list_tests: + output = _list_tests(linux, request) + for line in output: + print(line.rstrip()) + return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0) + if request.list_tests_attr: + attr_output = _list_tests_attr(linux, request) + for line in attr_output: + print(line.rstrip()) + return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0) + if request.run_isolated: + tests = _list_tests(linux, request) + if request.run_isolated == 'test': + filter_globs = tests + elif request.run_isolated == 'suite': + filter_globs = _suites_from_test_list(tests) + # Apply the test-part of the user's glob, if present. + if '.' in request.filter_glob: + test_glob = request.filter_glob.split('.', maxsplit=2)[1] + filter_globs = [g + '.'+ test_glob for g in filter_globs] + + metadata = kunit_json.Metadata(arch=linux.arch(), build_dir=request.build_dir, def_config='kunit_defconfig') + + test_counts = kunit_parser.TestCounts() + exec_time = 0.0 + for i, filter_glob in enumerate(filter_globs): + stdout.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs))) + + test_start = time.time() + run_result = linux.run_kernel( + args=request.kernel_args, + timeout=request.timeout, + filter_glob=filter_glob, + filter=request.filter, + filter_action=request.filter_action, + build_dir=request.build_dir) + + _, test_result = parse_tests(request, metadata, run_result) + # run_kernel() doesn't block on the kernel exiting. + # That only happens after we get the last line of output from `run_result`. + # So exec_time here actually contains parsing + execution time, which is fine. + test_end = time.time() + exec_time += test_end - test_start + + test_counts.add_subtest_counts(test_result.counts) + + if len(filter_globs) == 1 and test_counts.crashed > 0: + bd = request.build_dir + print('The kernel seems to have crashed; you can decode the stack traces with:') + print('$ scripts/decode_stacktrace.sh {}/vmlinux {} < {} | tee {}/decoded.log | {} parse'.format( + bd, bd, kunit_kernel.get_outfile_path(bd), bd, sys.argv[0])) + + kunit_status = _map_to_overall_status(test_counts.get_status()) + return KunitResult(status=kunit_status, elapsed_time=exec_time) + +def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus: + if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED): + return KunitStatus.SUCCESS + return KunitStatus.TEST_FAILURE + +def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]: + parse_start = time.time() if request.raw_output: - kunit_parser.raw_output(request.input_data) - else: - test_result = kunit_parser.parse_run_tests(request.input_data) - parse_end = time.time() + # Treat unparsed results as one passing test. + fake_test = kunit_parser.Test() + fake_test.status = kunit_parser.TestStatus.SUCCESS + fake_test.counts.passed = 1 + + output: Iterable[str] = input_data + if request.raw_output == 'all': + pass + elif request.raw_output == 'kunit': + output = kunit_parser.extract_tap_lines(output) + for line in output: + print(line.rstrip()) + parse_time = time.time() - parse_start + return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test + + + # Actually parse the test results. + test = kunit_parser.parse_run_tests(input_data) + parse_time = time.time() - parse_start if request.json: - json_obj = kunit_json.get_json_result( - test_result=test_result, - def_config='kunit_defconfig', - build_dir=request.build_dir, - json_path=request.json) + json_str = kunit_json.get_json_result( + test=test, + metadata=metadata) if request.json == 'stdout': - print(json_obj) - - if test_result.status != kunit_parser.TestStatus.SUCCESS: - return KunitResult(KunitStatus.TEST_FAILURE, test_result, - parse_end - parse_start) + print(json_str) + else: + with open(request.json, 'w') as f: + f.write(json_str) + stdout.print_with_timestamp("Test results stored in %s" % + os.path.abspath(request.json)) - return KunitResult(KunitStatus.SUCCESS, test_result, - parse_end - parse_start) + if test.status != kunit_parser.TestStatus.SUCCESS: + return KunitResult(KunitStatus.TEST_FAILURE, parse_time), test + return KunitResult(KunitStatus.SUCCESS, parse_time), test def run_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitRequest) -> KunitResult: run_start = time.time() - config_request = KunitConfigRequest(request.build_dir, - request.make_options) - config_result = config_tests(linux, config_request) + config_result = config_tests(linux, request) if config_result.status != KunitStatus.SUCCESS: return config_result - build_request = KunitBuildRequest(request.jobs, request.build_dir, - request.alltests, - request.make_options) - build_result = build_tests(linux, build_request) + build_result = build_tests(linux, request) if build_result.status != KunitStatus.SUCCESS: return build_result - exec_request = KunitExecRequest(request.timeout, request.build_dir, - request.alltests) - exec_result = exec_tests(linux, exec_request) - if exec_result.status != KunitStatus.SUCCESS: - return exec_result - - parse_request = KunitParseRequest(request.raw_output, - exec_result.result, - request.build_dir, - request.json) - parse_result = parse_tests(parse_request) + exec_result = exec_tests(linux, request) run_end = time.time() - kunit_parser.print_with_timestamp(( + stdout.print_with_timestamp(( 'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' + 'building, %.3fs running\n') % ( run_end - run_start, config_result.elapsed_time, build_result.elapsed_time, exec_result.elapsed_time)) - return parse_result - -def add_common_opts(parser): + return exec_result + +# Problem: +# $ kunit.py run --json +# works as one would expect and prints the parsed test results as JSON. +# $ kunit.py run --json suite_name +# would *not* pass suite_name as the filter_glob and print as json. +# argparse will consider it to be another way of writing +# $ kunit.py run --json=suite_name +# i.e. it would run all tests, and dump the json to a `suite_name` file. +# So we hackily automatically rewrite --json => --json=stdout +pseudo_bool_flag_defaults = { + '--json': 'stdout', + '--raw_output': 'kunit', +} +def massage_argv(argv: Sequence[str]) -> Sequence[str]: + def massage_arg(arg: str) -> str: + if arg not in pseudo_bool_flag_defaults: + return arg + return f'{arg}={pseudo_bool_flag_defaults[arg]}' + return list(map(massage_arg, argv)) + +def get_default_jobs() -> int: + return len(os.sched_getaffinity(0)) + +def add_common_opts(parser: argparse.ArgumentParser) -> None: parser.add_argument('--build_dir', help='As in the make command, it specifies the build ' 'directory.', - type=str, default='.kunit', metavar='build_dir') + type=str, default='.kunit', metavar='DIR') parser.add_argument('--make_options', help='X=Y make option, can be repeated.', - action='append') + action='append', metavar='X=Y') parser.add_argument('--alltests', - help='Run all KUnit tests through allyesconfig', + help='Run all KUnit tests via tools/testing/kunit/configs/all_tests.config', action='store_true') - -def add_build_opts(parser): + parser.add_argument('--kunitconfig', + help='Path to Kconfig fragment that enables KUnit tests.' + ' If given a directory, (e.g. lib/kunit), "/.kunitconfig" ' + 'will get automatically appended. If repeated, the files ' + 'blindly concatenated, which might not work in all cases.', + action='append', metavar='PATHS') + parser.add_argument('--kconfig_add', + help='Additional Kconfig options to append to the ' + '.kunitconfig, e.g. CONFIG_KASAN=y. Can be repeated.', + action='append', metavar='CONFIG_X=Y') + + parser.add_argument('--arch', + help=('Specifies the architecture to run tests under. ' + 'The architecture specified here must match the ' + 'string passed to the ARCH make param, ' + 'e.g. i386, x86_64, arm, um, etc. Non-UML ' + 'architectures run on QEMU.'), + type=str, default='um', metavar='ARCH') + + parser.add_argument('--cross_compile', + help=('Sets make\'s CROSS_COMPILE variable; it should ' + 'be set to a toolchain path prefix (the prefix ' + 'of gcc and other tools in your toolchain, for ' + 'example `sparc64-linux-gnu-` if you have the ' + 'sparc toolchain installed on your system, or ' + '`$HOME/toolchains/microblaze/gcc-9.2.0-nolibc/microblaze-linux/bin/microblaze-linux-` ' + 'if you have downloaded the microblaze toolchain ' + 'from the 0-day website to a directory in your ' + 'home directory called `toolchains`).'), + metavar='PREFIX') + + parser.add_argument('--qemu_config', + help=('Takes a path to a path to a file containing ' + 'a QemuArchParams object.'), + type=str, metavar='FILE') + + parser.add_argument('--qemu_args', + help='Additional QEMU arguments, e.g. "-smp 8"', + action='append', metavar='') + +def add_build_opts(parser: argparse.ArgumentParser) -> None: parser.add_argument('--jobs', help='As in the make command, "Specifies the number of ' 'jobs (commands) to run simultaneously."', - type=int, default=8, metavar='jobs') + type=int, default=get_default_jobs(), metavar='N') -def add_exec_opts(parser): +def add_exec_opts(parser: argparse.ArgumentParser) -> None: parser.add_argument('--timeout', help='maximum number of seconds to allow for all tests ' 'to run. This does not include time taken to build the ' 'tests.', type=int, default=300, - metavar='timeout') - -def add_parse_opts(parser): - parser.add_argument('--raw_output', help='don\'t format output from kernel', + metavar='SECONDS') + parser.add_argument('filter_glob', + help='Filter which KUnit test suites/tests run at ' + 'boot-time, e.g. list* or list*.*del_test', + type=str, + nargs='?', + default='', + metavar='filter_glob') + parser.add_argument('--filter', + help='Filter KUnit tests with attributes, ' + 'e.g. module=example or speed>slow', + type=str, + default='') + parser.add_argument('--filter_action', + help='If set to skip, filtered tests will be skipped, ' + 'e.g. --filter_action=skip. Otherwise they will not run.', + type=str, + choices=['skip']) + parser.add_argument('--kernel_args', + help='Kernel command-line parameters. Maybe be repeated', + action='append', metavar='') + parser.add_argument('--run_isolated', help='If set, boot the kernel for each ' + 'individual suite/test. This is can be useful for debugging ' + 'a non-hermetic test, one that might pass/fail based on ' + 'what ran before it.', + type=str, + choices=['suite', 'test']) + parser.add_argument('--list_tests', help='If set, list all tests that will be ' + 'run.', + action='store_true') + parser.add_argument('--list_tests_attr', help='If set, list all tests and test ' + 'attributes.', action='store_true') + +def add_parse_opts(parser: argparse.ArgumentParser) -> None: + parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. ' + 'By default, filters to just KUnit output. Use ' + '--raw_output=all to show everything', + type=str, nargs='?', const='all', default=None, choices=['all', 'kunit']) parser.add_argument('--json', nargs='?', - help='Stores test results in a JSON, and either ' - 'prints to stdout or saves to file if a ' - 'filename is specified', - type=str, const='stdout', default=None) + help='Prints parsed test results as JSON to stdout or a file if ' + 'a filename is specified. Does nothing if --raw_output is set.', + type=str, const='stdout', default=None, metavar='FILE') + + +def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree: + """Returns a LinuxSourceTree based on the user's arguments.""" + # Allow users to specify multiple arguments in one string, e.g. '-smp 8' + qemu_args: List[str] = [] + if cli_args.qemu_args: + for arg in cli_args.qemu_args: + qemu_args.extend(shlex.split(arg)) + + kunitconfigs = cli_args.kunitconfig if cli_args.kunitconfig else [] + if cli_args.alltests: + # Prepend so user-specified options take prio if we ever allow + # --kunitconfig options to have differing options. + kunitconfigs = [kunit_kernel.ALL_TESTS_CONFIG_PATH] + kunitconfigs + + return kunit_kernel.LinuxSourceTree(cli_args.build_dir, + kunitconfig_paths=kunitconfigs, + kconfig_add=cli_args.kconfig_add, + arch=cli_args.arch, + cross_compile=cli_args.cross_compile, + qemu_config_path=cli_args.qemu_config, + extra_qemu_args=qemu_args) + + +def run_handler(cli_args: argparse.Namespace) -> None: + if not os.path.exists(cli_args.build_dir): + os.mkdir(cli_args.build_dir) + + linux = tree_from_args(cli_args) + request = KunitRequest(build_dir=cli_args.build_dir, + make_options=cli_args.make_options, + jobs=cli_args.jobs, + raw_output=cli_args.raw_output, + json=cli_args.json, + timeout=cli_args.timeout, + filter_glob=cli_args.filter_glob, + filter=cli_args.filter, + filter_action=cli_args.filter_action, + kernel_args=cli_args.kernel_args, + run_isolated=cli_args.run_isolated, + list_tests=cli_args.list_tests, + list_tests_attr=cli_args.list_tests_attr) + result = run_tests(linux, request) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) -def main(argv, linux=None): + +def config_handler(cli_args: argparse.Namespace) -> None: + if cli_args.build_dir and ( + not os.path.exists(cli_args.build_dir)): + os.mkdir(cli_args.build_dir) + + linux = tree_from_args(cli_args) + request = KunitConfigRequest(build_dir=cli_args.build_dir, + make_options=cli_args.make_options) + result = config_tests(linux, request) + stdout.print_with_timestamp(( + 'Elapsed time: %.3fs\n') % ( + result.elapsed_time)) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + + +def build_handler(cli_args: argparse.Namespace) -> None: + linux = tree_from_args(cli_args) + request = KunitBuildRequest(build_dir=cli_args.build_dir, + make_options=cli_args.make_options, + jobs=cli_args.jobs) + result = config_and_build_tests(linux, request) + stdout.print_with_timestamp(( + 'Elapsed time: %.3fs\n') % ( + result.elapsed_time)) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + + +def exec_handler(cli_args: argparse.Namespace) -> None: + linux = tree_from_args(cli_args) + exec_request = KunitExecRequest(raw_output=cli_args.raw_output, + build_dir=cli_args.build_dir, + json=cli_args.json, + timeout=cli_args.timeout, + filter_glob=cli_args.filter_glob, + filter=cli_args.filter, + filter_action=cli_args.filter_action, + kernel_args=cli_args.kernel_args, + run_isolated=cli_args.run_isolated, + list_tests=cli_args.list_tests, + list_tests_attr=cli_args.list_tests_attr) + result = exec_tests(linux, exec_request) + stdout.print_with_timestamp(( + 'Elapsed time: %.3fs\n') % (result.elapsed_time)) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + + +def parse_handler(cli_args: argparse.Namespace) -> None: + if cli_args.file is None: + sys.stdin.reconfigure(errors='backslashreplace') # type: ignore + kunit_output = sys.stdin # type: Iterable[str] + else: + with open(cli_args.file, 'r', errors='backslashreplace') as f: + kunit_output = f.read().splitlines() + # We know nothing about how the result was created! + metadata = kunit_json.Metadata() + request = KunitParseRequest(raw_output=cli_args.raw_output, + json=cli_args.json) + result, _ = parse_tests(request, metadata, kunit_output) + if result.status != KunitStatus.SUCCESS: + sys.exit(1) + + +subcommand_handlers_map = { + 'run': run_handler, + 'config': config_handler, + 'build': build_handler, + 'exec': exec_handler, + 'parse': parse_handler +} + + +def main(argv: Sequence[str]) -> None: parser = argparse.ArgumentParser( description='Helps writing and running KUnit tests.') subparser = parser.add_subparsers(dest='subcommand') @@ -253,97 +573,19 @@ def main(argv, linux=None): help='Specifies the file to read results from.', type=str, nargs='?', metavar='input_file') - cli_args = parser.parse_args(argv) + cli_args = parser.parse_args(massage_argv(argv)) if get_kernel_root_path(): os.chdir(get_kernel_root_path()) - if cli_args.subcommand == 'run': - if not os.path.exists(cli_args.build_dir): - os.mkdir(cli_args.build_dir) - - if not os.path.exists(kunit_kernel.kunitconfig_path): - create_default_kunitconfig() - - if not linux: - linux = kunit_kernel.LinuxSourceTree() - - request = KunitRequest(cli_args.raw_output, - cli_args.timeout, - cli_args.jobs, - cli_args.build_dir, - cli_args.alltests, - cli_args.json, - cli_args.make_options) - result = run_tests(linux, request) - if result.status != KunitStatus.SUCCESS: - sys.exit(1) - elif cli_args.subcommand == 'config': - if cli_args.build_dir and ( - not os.path.exists(cli_args.build_dir)): - os.mkdir(cli_args.build_dir) - - if not os.path.exists(kunit_kernel.kunitconfig_path): - create_default_kunitconfig() - - if not linux: - linux = kunit_kernel.LinuxSourceTree() - - request = KunitConfigRequest(cli_args.build_dir, - cli_args.make_options) - result = config_tests(linux, request) - kunit_parser.print_with_timestamp(( - 'Elapsed time: %.3fs\n') % ( - result.elapsed_time)) - if result.status != KunitStatus.SUCCESS: - sys.exit(1) - elif cli_args.subcommand == 'build': - if not linux: - linux = kunit_kernel.LinuxSourceTree() - - request = KunitBuildRequest(cli_args.jobs, - cli_args.build_dir, - cli_args.alltests, - cli_args.make_options) - result = build_tests(linux, request) - kunit_parser.print_with_timestamp(( - 'Elapsed time: %.3fs\n') % ( - result.elapsed_time)) - if result.status != KunitStatus.SUCCESS: - sys.exit(1) - elif cli_args.subcommand == 'exec': - if not linux: - linux = kunit_kernel.LinuxSourceTree() - - exec_request = KunitExecRequest(cli_args.timeout, - cli_args.build_dir, - cli_args.alltests) - exec_result = exec_tests(linux, exec_request) - parse_request = KunitParseRequest(cli_args.raw_output, - exec_result.result, - cli_args.build_dir, - cli_args.json) - result = parse_tests(parse_request) - kunit_parser.print_with_timestamp(( - 'Elapsed time: %.3fs\n') % ( - exec_result.elapsed_time)) - if result.status != KunitStatus.SUCCESS: - sys.exit(1) - elif cli_args.subcommand == 'parse': - if cli_args.file == None: - kunit_output = sys.stdin - else: - with open(cli_args.file, 'r') as f: - kunit_output = f.read().splitlines() - request = KunitParseRequest(cli_args.raw_output, - kunit_output, - cli_args.build_dir, - cli_args.json) - result = parse_tests(request) - if result.status != KunitStatus.SUCCESS: - sys.exit(1) - else: + subcomand_handler = subcommand_handlers_map.get(cli_args.subcommand, None) + + if subcomand_handler is None: parser.print_help() + return + + subcomand_handler(cli_args) + if __name__ == '__main__': main(sys.argv[1:]) diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py index 02ffc3a3e5dc..eb5dd01210b1 100644 --- a/tools/testing/kunit/kunit_config.py +++ b/tools/testing/kunit/kunit_config.py @@ -6,84 +6,103 @@ # Author: Felix Guo <felixguoxiuping@gmail.com> # Author: Brendan Higgins <brendanhiggins@google.com> -import collections +from dataclasses import dataclass import re +from typing import Any, Dict, Iterable, List, Tuple CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$' CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$' -KconfigEntryBase = collections.namedtuple('KconfigEntry', ['name', 'value']) - -class KconfigEntry(KconfigEntryBase): +@dataclass(frozen=True) +class KconfigEntry: + name: str + value: str def __str__(self) -> str: if self.value == 'n': - return r'# CONFIG_%s is not set' % (self.name) - else: - return r'CONFIG_%s=%s' % (self.name, self.value) + return f'# CONFIG_{self.name} is not set' + return f'CONFIG_{self.name}={self.value}' class KconfigParseError(Exception): """Error parsing Kconfig defconfig or .config.""" -class Kconfig(object): +class Kconfig: """Represents defconfig or .config specified using the Kconfig language.""" - def __init__(self): - self._entries = [] + def __init__(self) -> None: + self._entries = {} # type: Dict[str, str] + + def __eq__(self, other: Any) -> bool: + if not isinstance(other, self.__class__): + return False + return self._entries == other._entries + + def __repr__(self) -> str: + return ','.join(str(e) for e in self.as_entries()) - def entries(self): - return set(self._entries) + def as_entries(self) -> Iterable[KconfigEntry]: + for name, value in self._entries.items(): + yield KconfigEntry(name, value) - def add_entry(self, entry: KconfigEntry) -> None: - self._entries.append(entry) + def add_entry(self, name: str, value: str) -> None: + self._entries[name] = value def is_subset_of(self, other: 'Kconfig') -> bool: - for a in self.entries(): - found = False - for b in other.entries(): - if a.name != b.name: + for name, value in self._entries.items(): + b = other._entries.get(name) + if b is None: + if value == 'n': continue - if a.value != b.value: - return False - found = True - if a.value != 'n' and found == False: + return False + if value != b: return False return True + def conflicting_options(self, other: 'Kconfig') -> List[Tuple[KconfigEntry, KconfigEntry]]: + diff = [] # type: List[Tuple[KconfigEntry, KconfigEntry]] + for name, value in self._entries.items(): + b = other._entries.get(name) + if b and value != b: + pair = (KconfigEntry(name, value), KconfigEntry(name, b)) + diff.append(pair) + return diff + + def merge_in_entries(self, other: 'Kconfig') -> None: + for name, value in other._entries.items(): + self._entries[name] = value + def write_to_file(self, path: str) -> None: - with open(path, 'w') as f: - for entry in self.entries(): - f.write(str(entry) + '\n') - - def parse_from_string(self, blob: str) -> None: - """Parses a string containing KconfigEntrys and populates this Kconfig.""" - self._entries = [] - is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN) - config_matcher = re.compile(CONFIG_PATTERN) - for line in blob.split('\n'): - line = line.strip() - if not line: - continue - - match = config_matcher.match(line) - if match: - entry = KconfigEntry(match.group(1), match.group(2)) - self.add_entry(entry) - continue - - empty_match = is_not_set_matcher.match(line) - if empty_match: - entry = KconfigEntry(empty_match.group(1), 'n') - self.add_entry(entry) - continue - - if line[0] == '#': - continue - else: - raise KconfigParseError('Failed to parse: ' + line) - - def read_from_file(self, path: str) -> None: - with open(path, 'r') as f: - self.parse_from_string(f.read()) + with open(path, 'a+') as f: + for e in self.as_entries(): + f.write(str(e) + '\n') + +def parse_file(path: str) -> Kconfig: + with open(path, 'r') as f: + return parse_from_string(f.read()) + +def parse_from_string(blob: str) -> Kconfig: + """Parses a string containing Kconfig entries.""" + kconfig = Kconfig() + is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN) + config_matcher = re.compile(CONFIG_PATTERN) + for line in blob.split('\n'): + line = line.strip() + if not line: + continue + + match = config_matcher.match(line) + if match: + kconfig.add_entry(match.group(1), match.group(2)) + continue + + empty_match = is_not_set_matcher.match(line) + if empty_match: + kconfig.add_entry(empty_match.group(1), 'n') + continue + + if line[0] == '#': + continue + raise KconfigParseError('Failed to parse: ' + line) + return kconfig diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py index 624b31b2dbd6..10ff65689dd8 100644 --- a/tools/testing/kunit/kunit_json.py +++ b/tools/testing/kunit/kunit_json.py @@ -6,58 +6,58 @@ # Copyright (C) 2020, Google LLC. # Author: Heidi Fahim <heidifahim@google.com> +from dataclasses import dataclass import json -import os - -import kunit_parser - -from kunit_parser import TestStatus - -def get_json_result(test_result, def_config, build_dir, json_path): - sub_groups = [] - - # Each test suite is mapped to a KernelCI sub_group - for test_suite in test_result.suites: - sub_group = { - "name": test_suite.name, - "arch": "UM", - "defconfig": def_config, - "build_environment": build_dir, - "test_cases": [], - "lab_name": None, - "kernel": None, - "job": None, - "git_branch": "kselftest", - } - test_cases = [] - # TODO: Add attachments attribute in test_case with detailed - # failure message, see https://api.kernelci.org/schema-test-case.html#get - for case in test_suite.cases: - test_case = {"name": case.name, "status": "FAIL"} - if case.status == TestStatus.SUCCESS: - test_case["status"] = "PASS" - elif case.status == TestStatus.TEST_CRASHED: - test_case["status"] = "ERROR" - test_cases.append(test_case) - sub_group["test_cases"] = test_cases - sub_groups.append(sub_group) +from typing import Any, Dict + +from kunit_parser import Test, TestStatus + +@dataclass +class Metadata: + """Stores metadata about this run to include in get_json_result().""" + arch: str = '' + def_config: str = '' + build_dir: str = '' + +JsonObj = Dict[str, Any] + +_status_map: Dict[TestStatus, str] = { + TestStatus.SUCCESS: "PASS", + TestStatus.SKIPPED: "SKIP", + TestStatus.TEST_CRASHED: "ERROR", +} + +def _get_group_json(test: Test, common_fields: JsonObj) -> JsonObj: + sub_groups = [] # List[JsonObj] + test_cases = [] # List[JsonObj] + + for subtest in test.subtests: + if subtest.subtests: + sub_group = _get_group_json(subtest, common_fields) + sub_groups.append(sub_group) + continue + status = _status_map.get(subtest.status, "FAIL") + test_cases.append({"name": subtest.name, "status": status}) + test_group = { - "name": "KUnit Test Group", - "arch": "UM", - "defconfig": def_config, - "build_environment": build_dir, + "name": test.name, "sub_groups": sub_groups, + "test_cases": test_cases, + } + test_group.update(common_fields) + return test_group + +def get_json_result(test: Test, metadata: Metadata) -> str: + common_fields = { + "arch": metadata.arch, + "defconfig": metadata.def_config, + "build_environment": metadata.build_dir, "lab_name": None, "kernel": None, "job": None, "git_branch": "kselftest", } - json_obj = json.dumps(test_group, indent=4) - if json_path != 'stdout': - with open(json_path, 'w') as result_path: - result_path.write(json_obj) - root = __file__.split('tools/testing/kunit/')[0] - kunit_parser.print_with_timestamp( - "Test results stored in %s" % - os.path.join(root, result_path.name)) - return json_obj + + test_group = _get_group_json(test, common_fields) + test_group["name"] = "KUnit Test Group" + return json.dumps(test_group, indent=4) diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py index b557b1e93f98..7254c110ff23 100644 --- a/tools/testing/kunit/kunit_kernel.py +++ b/tools/testing/kunit/kunit_kernel.py @@ -6,20 +6,30 @@ # Author: Felix Guo <felixguoxiuping@gmail.com> # Author: Brendan Higgins <brendanhiggins@google.com> - +import importlib.abc +import importlib.util import logging import subprocess import os +import shlex +import shutil import signal - -from contextlib import ExitStack +import threading +from typing import Iterator, List, Optional, Tuple +from types import FrameType import kunit_config -import kunit_parser +import qemu_config KCONFIG_PATH = '.config' -kunitconfig_path = '.kunitconfig' -BROKEN_ALLCONFIG_PATH = 'tools/testing/kunit/configs/broken_on_uml.config' +KUNITCONFIG_PATH = '.kunitconfig' +OLD_KUNITCONFIG_PATH = 'last_used_kunitconfig' +DEFAULT_KUNITCONFIG_PATH = 'tools/testing/kunit/configs/default.config' +ALL_TESTS_CONFIG_PATH = 'tools/testing/kunit/configs/all_tests.config' +UML_KCONFIG_PATH = 'tools/testing/kunit/configs/arch_uml.config' +OUTFILE_PATH = 'test.log' +ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__)) +QEMU_CONFIGS_DIR = os.path.join(ABS_TOOL_PATH, 'qemu_configs') class ConfigError(Exception): """Represents an error trying to configure the Linux kernel.""" @@ -29,10 +39,14 @@ class BuildError(Exception): """Represents an error trying to build the Linux kernel.""" -class LinuxSourceTreeOperations(object): +class LinuxSourceTreeOperations: """An abstraction over command line operations performed on a source tree.""" - def make_mrproper(self): + def __init__(self, linux_arch: str, cross_compile: Optional[str]): + self._linux_arch = linux_arch + self._cross_compile = cross_compile + + def make_mrproper(self) -> None: try: subprocess.check_output(['make', 'mrproper'], stderr=subprocess.STDOUT) except OSError as e: @@ -40,12 +54,16 @@ class LinuxSourceTreeOperations(object): except subprocess.CalledProcessError as e: raise ConfigError(e.output.decode()) - def make_olddefconfig(self, build_dir, make_options): - command = ['make', 'ARCH=um', 'olddefconfig'] + def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig: + return base_kunitconfig + + def make_olddefconfig(self, build_dir: str, make_options: Optional[List[str]]) -> None: + command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, 'olddefconfig'] + if self._cross_compile: + command += ['CROSS_COMPILE=' + self._cross_compile] if make_options: command.extend(make_options) - if build_dir: - command += ['O=' + build_dir] + print('Populating config with:\n$', ' '.join(command)) try: subprocess.check_output(command, stderr=subprocess.STDOUT) except OSError as e: @@ -53,69 +71,191 @@ class LinuxSourceTreeOperations(object): except subprocess.CalledProcessError as e: raise ConfigError(e.output.decode()) - def make_allyesconfig(self, build_dir, make_options): - kunit_parser.print_with_timestamp( - 'Enabling all CONFIGs for UML...') - command = ['make', 'ARCH=um', 'allyesconfig'] + def make(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> None: + command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)] if make_options: command.extend(make_options) - if build_dir: - command += ['O=' + build_dir] - process = subprocess.Popen( - command, - stdout=subprocess.DEVNULL, - stderr=subprocess.STDOUT) - process.wait() - kunit_parser.print_with_timestamp( - 'Disabling broken configs to run KUnit tests...') - with ExitStack() as es: - config = open(get_kconfig_path(build_dir), 'a') - disable = open(BROKEN_ALLCONFIG_PATH, 'r').read() - config.write(disable) - kunit_parser.print_with_timestamp( - 'Starting Kernel with all configs takes a few minutes...') - - def make(self, jobs, build_dir, make_options): - command = ['make', 'ARCH=um', '--jobs=' + str(jobs)] - if make_options: - command.extend(make_options) - if build_dir: - command += ['O=' + build_dir] + if self._cross_compile: + command += ['CROSS_COMPILE=' + self._cross_compile] + print('Building with:\n$', ' '.join(command)) try: - subprocess.check_output(command, stderr=subprocess.STDOUT) + proc = subprocess.Popen(command, + stderr=subprocess.PIPE, + stdout=subprocess.DEVNULL) except OSError as e: raise BuildError('Could not call execute make: ' + str(e)) except subprocess.CalledProcessError as e: - raise BuildError(e.output.decode()) + raise BuildError(e.output) + _, stderr = proc.communicate() + if proc.returncode != 0: + raise BuildError(stderr.decode()) + if stderr: # likely only due to build warnings + print(stderr.decode()) + + def start(self, params: List[str], build_dir: str) -> subprocess.Popen: + raise RuntimeError('not implemented!') + + +class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations): + + def __init__(self, qemu_arch_params: qemu_config.QemuArchParams, cross_compile: Optional[str]): + super().__init__(linux_arch=qemu_arch_params.linux_arch, + cross_compile=cross_compile) + self._kconfig = qemu_arch_params.kconfig + self._qemu_arch = qemu_arch_params.qemu_arch + self._kernel_path = qemu_arch_params.kernel_path + self._kernel_command_line = qemu_arch_params.kernel_command_line + ' kunit_shutdown=reboot' + self._extra_qemu_params = qemu_arch_params.extra_qemu_params + self._serial = qemu_arch_params.serial + + def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig: + kconfig = kunit_config.parse_from_string(self._kconfig) + kconfig.merge_in_entries(base_kunitconfig) + return kconfig + + def start(self, params: List[str], build_dir: str) -> subprocess.Popen: + kernel_path = os.path.join(build_dir, self._kernel_path) + qemu_command = ['qemu-system-' + self._qemu_arch, + '-nodefaults', + '-m', '1024', + '-kernel', kernel_path, + '-append', ' '.join(params + [self._kernel_command_line]), + '-no-reboot', + '-nographic', + '-serial', self._serial] + self._extra_qemu_params + # Note: shlex.join() does what we want, but requires python 3.8+. + print('Running tests with:\n$', ' '.join(shlex.quote(arg) for arg in qemu_command)) + return subprocess.Popen(qemu_command, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, errors='backslashreplace') - def linux_bin(self, params, timeout, build_dir, outfile): +class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations): + """An abstraction over command line operations performed on a source tree.""" + + def __init__(self, cross_compile: Optional[str]=None): + super().__init__(linux_arch='um', cross_compile=cross_compile) + + def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig: + kconfig = kunit_config.parse_file(UML_KCONFIG_PATH) + kconfig.merge_in_entries(base_kunitconfig) + return kconfig + + def start(self, params: List[str], build_dir: str) -> subprocess.Popen: """Runs the Linux UML binary. Must be named 'linux'.""" - linux_bin = './linux' - if build_dir: - linux_bin = os.path.join(build_dir, 'linux') - with open(outfile, 'w') as output: - process = subprocess.Popen([linux_bin] + params, - stdout=output, - stderr=subprocess.STDOUT) - process.wait(timeout) - - -def get_kconfig_path(build_dir): - kconfig_path = KCONFIG_PATH - if build_dir: - kconfig_path = os.path.join(build_dir, KCONFIG_PATH) - return kconfig_path - -class LinuxSourceTree(object): + linux_bin = os.path.join(build_dir, 'linux') + params.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt']) + print('Running tests with:\n$', linux_bin, ' '.join(shlex.quote(arg) for arg in params)) + return subprocess.Popen([linux_bin] + params, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, errors='backslashreplace') + +def get_kconfig_path(build_dir: str) -> str: + return os.path.join(build_dir, KCONFIG_PATH) + +def get_kunitconfig_path(build_dir: str) -> str: + return os.path.join(build_dir, KUNITCONFIG_PATH) + +def get_old_kunitconfig_path(build_dir: str) -> str: + return os.path.join(build_dir, OLD_KUNITCONFIG_PATH) + +def get_parsed_kunitconfig(build_dir: str, + kunitconfig_paths: Optional[List[str]]=None) -> kunit_config.Kconfig: + if not kunitconfig_paths: + path = get_kunitconfig_path(build_dir) + if not os.path.exists(path): + shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, path) + return kunit_config.parse_file(path) + + merged = kunit_config.Kconfig() + + for path in kunitconfig_paths: + if os.path.isdir(path): + path = os.path.join(path, KUNITCONFIG_PATH) + if not os.path.exists(path): + raise ConfigError(f'Specified kunitconfig ({path}) does not exist') + + partial = kunit_config.parse_file(path) + diff = merged.conflicting_options(partial) + if diff: + diff_str = '\n\n'.join(f'{a}\n vs from {path}\n{b}' for a, b in diff) + raise ConfigError(f'Multiple values specified for {len(diff)} options in kunitconfig:\n{diff_str}') + merged.merge_in_entries(partial) + return merged + +def get_outfile_path(build_dir: str) -> str: + return os.path.join(build_dir, OUTFILE_PATH) + +def _default_qemu_config_path(arch: str) -> str: + config_path = os.path.join(QEMU_CONFIGS_DIR, arch + '.py') + if os.path.isfile(config_path): + return config_path + + options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')] + raise ConfigError(arch + ' is not a valid arch, options are ' + str(sorted(options))) + +def _get_qemu_ops(config_path: str, + extra_qemu_args: Optional[List[str]], + cross_compile: Optional[str]) -> Tuple[str, LinuxSourceTreeOperations]: + # The module name/path has very little to do with where the actual file + # exists (I learned this through experimentation and could not find it + # anywhere in the Python documentation). + # + # Bascially, we completely ignore the actual file location of the config + # we are loading and just tell Python that the module lives in the + # QEMU_CONFIGS_DIR for import purposes regardless of where it actually + # exists as a file. + module_path = '.' + os.path.join(os.path.basename(QEMU_CONFIGS_DIR), os.path.basename(config_path)) + spec = importlib.util.spec_from_file_location(module_path, config_path) + assert spec is not None + config = importlib.util.module_from_spec(spec) + # See https://github.com/python/typeshed/pull/2626 for context. + assert isinstance(spec.loader, importlib.abc.Loader) + spec.loader.exec_module(config) + + if not hasattr(config, 'QEMU_ARCH'): + raise ValueError('qemu_config module missing "QEMU_ARCH": ' + config_path) + params: qemu_config.QemuArchParams = config.QEMU_ARCH + if extra_qemu_args: + params.extra_qemu_params.extend(extra_qemu_args) + return params.linux_arch, LinuxSourceTreeOperationsQemu( + params, cross_compile=cross_compile) + +class LinuxSourceTree: """Represents a Linux kernel source tree with KUnit tests.""" - def __init__(self): - self._kconfig = kunit_config.Kconfig() - self._kconfig.read_from_file(kunitconfig_path) - self._ops = LinuxSourceTreeOperations() + def __init__( + self, + build_dir: str, + kunitconfig_paths: Optional[List[str]]=None, + kconfig_add: Optional[List[str]]=None, + arch: Optional[str]=None, + cross_compile: Optional[str]=None, + qemu_config_path: Optional[str]=None, + extra_qemu_args: Optional[List[str]]=None) -> None: signal.signal(signal.SIGINT, self.signal_handler) + if qemu_config_path: + self._arch, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile) + else: + self._arch = 'um' if arch is None else arch + if self._arch == 'um': + self._ops = LinuxSourceTreeOperationsUml(cross_compile=cross_compile) + else: + qemu_config_path = _default_qemu_config_path(self._arch) + _, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile) + + self._kconfig = get_parsed_kunitconfig(build_dir, kunitconfig_paths) + if kconfig_add: + kconfig = kunit_config.parse_from_string('\n'.join(kconfig_add)) + self._kconfig.merge_in_entries(kconfig) - def clean(self): + def arch(self) -> str: + return self._arch + + def clean(self) -> bool: try: self._ops.make_mrproper() except ConfigError as e: @@ -123,52 +263,67 @@ class LinuxSourceTree(object): return False return True - def validate_config(self, build_dir): + def validate_config(self, build_dir: str) -> bool: kconfig_path = get_kconfig_path(build_dir) - validated_kconfig = kunit_config.Kconfig() - validated_kconfig.read_from_file(kconfig_path) - if not self._kconfig.is_subset_of(validated_kconfig): - invalid = self._kconfig.entries() - validated_kconfig.entries() - message = 'Provided Kconfig is not contained in validated .config. Following fields found in kunitconfig, ' \ - 'but not in .config: %s' % ( - ', '.join([str(e) for e in invalid]) - ) - logging.error(message) - return False - return True + validated_kconfig = kunit_config.parse_file(kconfig_path) + if self._kconfig.is_subset_of(validated_kconfig): + return True + missing = set(self._kconfig.as_entries()) - set(validated_kconfig.as_entries()) + message = 'Not all Kconfig options selected in kunitconfig were in the generated .config.\n' \ + 'This is probably due to unsatisfied dependencies.\n' \ + 'Missing: ' + ', '.join(str(e) for e in missing) + if self._arch == 'um': + message += '\nNote: many Kconfig options aren\'t available on UML. You can try running ' \ + 'on a different architecture with something like "--arch=x86_64".' + logging.error(message) + return False - def build_config(self, build_dir, make_options): + def build_config(self, build_dir: str, make_options: Optional[List[str]]) -> bool: kconfig_path = get_kconfig_path(build_dir) if build_dir and not os.path.exists(build_dir): os.mkdir(build_dir) - self._kconfig.write_to_file(kconfig_path) try: + self._kconfig = self._ops.make_arch_config(self._kconfig) + self._kconfig.write_to_file(kconfig_path) self._ops.make_olddefconfig(build_dir, make_options) except ConfigError as e: logging.error(e) return False - return self.validate_config(build_dir) + if not self.validate_config(build_dir): + return False + + old_path = get_old_kunitconfig_path(build_dir) + if os.path.exists(old_path): + os.remove(old_path) # write_to_file appends to the file + self._kconfig.write_to_file(old_path) + return True + + def _kunitconfig_changed(self, build_dir: str) -> bool: + old_path = get_old_kunitconfig_path(build_dir) + if not os.path.exists(old_path): + return True - def build_reconfig(self, build_dir, make_options): + old_kconfig = kunit_config.parse_file(old_path) + return old_kconfig != self._kconfig + + def build_reconfig(self, build_dir: str, make_options: Optional[List[str]]) -> bool: """Creates a new .config if it is not a subset of the .kunitconfig.""" kconfig_path = get_kconfig_path(build_dir) - if os.path.exists(kconfig_path): - existing_kconfig = kunit_config.Kconfig() - existing_kconfig.read_from_file(kconfig_path) - if not self._kconfig.is_subset_of(existing_kconfig): - print('Regenerating .config ...') - os.remove(kconfig_path) - return self.build_config(build_dir, make_options) - else: - return True - else: + if not os.path.exists(kconfig_path): print('Generating .config ...') return self.build_config(build_dir, make_options) - def build_um_kernel(self, alltests, jobs, build_dir, make_options): + existing_kconfig = kunit_config.parse_file(kconfig_path) + self._kconfig = self._ops.make_arch_config(self._kconfig) + + if self._kconfig.is_subset_of(existing_kconfig) and not self._kunitconfig_changed(build_dir): + return True + print('Regenerating .config ...') + os.remove(kconfig_path) + return self.build_config(build_dir, make_options) + + def build_kernel(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> bool: try: - if alltests: - self._ops.make_allyesconfig(build_dir, make_options) self._ops.make_olddefconfig(build_dir, make_options) self._ops.make(jobs, build_dir, make_options) except (ConfigError, BuildError) as e: @@ -176,15 +331,47 @@ class LinuxSourceTree(object): return False return self.validate_config(build_dir) - def run_kernel(self, args=[], build_dir='', timeout=None): - args.extend(['mem=1G']) - outfile = 'test.log' - self._ops.linux_bin(args, timeout, build_dir, outfile) - subprocess.call(['stty', 'sane']) - with open(outfile, 'r') as file: - for line in file: + def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]: + if not args: + args = [] + if filter_glob: + args.append('kunit.filter_glob=' + filter_glob) + if filter: + args.append('kunit.filter="' + filter + '"') + if filter_action: + args.append('kunit.filter_action=' + filter_action) + args.append('kunit.enable=1') + + process = self._ops.start(args, build_dir) + assert process.stdout is not None # tell mypy it's set + + # Enforce the timeout in a background thread. + def _wait_proc() -> None: + try: + process.wait(timeout=timeout) + except Exception as e: + print(e) + process.terminate() + process.wait() + waiter = threading.Thread(target=_wait_proc) + waiter.start() + + output = open(get_outfile_path(build_dir), 'w') + try: + # Tee the output to the file and to our caller in real time. + for line in process.stdout: + output.write(line) yield line + # This runs even if our caller doesn't consume every line. + finally: + # Flush any leftover output to the file + output.write(process.stdout.read()) + output.close() + process.stdout.close() + + waiter.join() + subprocess.call(['stty', 'sane']) - def signal_handler(self, sig, frame): + def signal_handler(self, unused_sig: int, unused_frame: Optional[FrameType]) -> None: logging.error('Build interruption occurred. Cleaning console.') subprocess.call(['stty', 'sane']) diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 8019e3dd4c32..ce34be15c929 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -1,359 +1,825 @@ # SPDX-License-Identifier: GPL-2.0 # -# Parses test results from a kernel dmesg log. +# Parses KTAP test results from a kernel dmesg log and incrementally prints +# results with reader-friendly format. Stores and returns test results in a +# Test object. # # Copyright (C) 2019, Google LLC. # Author: Felix Guo <felixguoxiuping@gmail.com> # Author: Brendan Higgins <brendanhiggins@google.com> +# Author: Rae Moar <rmoar@google.com> +from __future__ import annotations +from dataclasses import dataclass import re +import textwrap -from collections import namedtuple -from datetime import datetime from enum import Enum, auto -from functools import reduce -from typing import List - -TestResult = namedtuple('TestResult', ['status','suites','log']) - -class TestSuite(object): - def __init__(self): - self.status = None - self.name = None - self.cases = [] - - def __str__(self): - return 'TestSuite(' + self.status + ',' + self.name + ',' + str(self.cases) + ')' - - def __repr__(self): +from typing import Iterable, Iterator, List, Optional, Tuple + +from kunit_printer import stdout + +class Test: + """ + A class to represent a test parsed from KTAP results. All KTAP + results within a test log are stored in a main Test object as + subtests. + + Attributes: + status : TestStatus - status of the test + name : str - name of the test + expected_count : int - expected number of subtests (0 if single + test case and None if unknown expected number of subtests) + subtests : List[Test] - list of subtests + log : List[str] - log of KTAP lines that correspond to the test + counts : TestCounts - counts of the test statuses and errors of + subtests or of the test itself if the test is a single + test case. + """ + def __init__(self) -> None: + """Creates Test object with default attributes.""" + self.status = TestStatus.TEST_CRASHED + self.name = '' + self.expected_count = 0 # type: Optional[int] + self.subtests = [] # type: List[Test] + self.log = [] # type: List[str] + self.counts = TestCounts() + + def __str__(self) -> str: + """Returns string representation of a Test class object.""" + return (f'Test({self.status}, {self.name}, {self.expected_count}, ' + f'{self.subtests}, {self.log}, {self.counts})') + + def __repr__(self) -> str: + """Returns string representation of a Test class object.""" return str(self) -class TestCase(object): - def __init__(self): - self.status = None - self.name = '' - self.log = [] + def add_error(self, error_message: str) -> None: + """Records an error that occurred while parsing this test.""" + self.counts.errors += 1 + stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}') - def __str__(self): - return 'TestCase(' + self.status + ',' + self.name + ',' + str(self.log) + ')' - - def __repr__(self): - return str(self) + def ok_status(self) -> bool: + """Returns true if the status was ok, i.e. passed or skipped.""" + return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED) class TestStatus(Enum): + """An enumeration class to represent the status of a test.""" SUCCESS = auto() FAILURE = auto() + SKIPPED = auto() TEST_CRASHED = auto() NO_TESTS = auto() FAILURE_TO_PARSE_TESTS = auto() -kunit_start_re = re.compile(r'TAP version [0-9]+$') -kunit_end_re = re.compile('(List of all partitions:|' - 'Kernel panic - not syncing: VFS:)') - -def isolate_kunit_output(kernel_output): - started = False - for line in kernel_output: - if kunit_start_re.search(line): - prefix_len = len(line.split('TAP version')[0]) - started = True - yield line[prefix_len:] if prefix_len > 0 else line - elif kunit_end_re.search(line): - break - elif started: - yield line[prefix_len:] if prefix_len > 0 else line - -def raw_output(kernel_output): - for line in kernel_output: - print(line) - yield line - -DIVIDER = '=' * 60 - -RESET = '\033[0;0m' - -def red(text): - return '\033[1;31m' + text + RESET - -def yellow(text): - return '\033[1;33m' + text + RESET - -def green(text): - return '\033[1;32m' + text + RESET - -def print_with_timestamp(message): - print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message)) - -def format_suite_divider(message): - return '======== ' + message + ' ========' - -def print_suite_divider(message): - print_with_timestamp(DIVIDER) - print_with_timestamp(format_suite_divider(message)) - -def print_log(log): - for m in log: - print_with_timestamp(m) - -TAP_ENTRIES = re.compile(r'^(TAP|[\s]*ok|[\s]*not ok|[\s]*[0-9]+\.\.[0-9]+|[\s]*#).*$') - -def consume_non_diagnositic(lines: List[str]) -> None: - while lines and not TAP_ENTRIES.match(lines[0]): - lines.pop(0) +@dataclass +class TestCounts: + """ + Tracks the counts of statuses of all test cases and any errors within + a Test. + """ + passed: int = 0 + failed: int = 0 + crashed: int = 0 + skipped: int = 0 + errors: int = 0 + + def __str__(self) -> str: + """Returns the string representation of a TestCounts object.""" + statuses = [('passed', self.passed), ('failed', self.failed), + ('crashed', self.crashed), ('skipped', self.skipped), + ('errors', self.errors)] + return f'Ran {self.total()} tests: ' + \ + ', '.join(f'{s}: {n}' for s, n in statuses if n > 0) + + def total(self) -> int: + """Returns the total number of test cases within a test + object, where a test case is a test with no subtests. + """ + return (self.passed + self.failed + self.crashed + + self.skipped) + + def add_subtest_counts(self, counts: TestCounts) -> None: + """ + Adds the counts of another TestCounts object to the current + TestCounts object. Used to add the counts of a subtest to the + parent test. + + Parameters: + counts - a different TestCounts object whose counts + will be added to the counts of the TestCounts object + """ + self.passed += counts.passed + self.failed += counts.failed + self.crashed += counts.crashed + self.skipped += counts.skipped + self.errors += counts.errors + + def get_status(self) -> TestStatus: + """Returns the aggregated status of a Test using test + counts. + """ + if self.total() == 0: + return TestStatus.NO_TESTS + if self.crashed: + # Crashes should take priority. + return TestStatus.TEST_CRASHED + if self.failed: + return TestStatus.FAILURE + if self.passed: + # No failures or crashes, looks good! + return TestStatus.SUCCESS + # We have only skipped tests. + return TestStatus.SKIPPED + + def add_status(self, status: TestStatus) -> None: + """Increments the count for `status`.""" + if status == TestStatus.SUCCESS: + self.passed += 1 + elif status == TestStatus.FAILURE: + self.failed += 1 + elif status == TestStatus.SKIPPED: + self.skipped += 1 + elif status != TestStatus.NO_TESTS: + self.crashed += 1 + +class LineStream: + """ + A class to represent the lines of kernel output. + Provides a lazy peek()/pop() interface over an iterator of + (line#, text). + """ + _lines: Iterator[Tuple[int, str]] + _next: Tuple[int, str] + _need_next: bool + _done: bool + + def __init__(self, lines: Iterator[Tuple[int, str]]): + """Creates a new LineStream that wraps the given iterator.""" + self._lines = lines + self._done = False + self._need_next = True + self._next = (0, '') + + def _get_next(self) -> None: + """Advances the LineSteam to the next line, if necessary.""" + if not self._need_next: + return + try: + self._next = next(self._lines) + except StopIteration: + self._done = True + finally: + self._need_next = False + + def peek(self) -> str: + """Returns the current line, without advancing the LineStream. + """ + self._get_next() + return self._next[1] + + def pop(self) -> str: + """Returns the current line and advances the LineStream to + the next line. + """ + s = self.peek() + if self._done: + raise ValueError(f'LineStream: going past EOF, last line was {s}') + self._need_next = True + return s + + def __bool__(self) -> bool: + """Returns True if stream has more lines.""" + self._get_next() + return not self._done + + # Only used by kunit_tool_test.py. + def __iter__(self) -> Iterator[str]: + """Empties all lines stored in LineStream object into + Iterator object and returns the Iterator object. + """ + while bool(self): + yield self.pop() + + def line_number(self) -> int: + """Returns the line number of the current line.""" + self._get_next() + return self._next[0] + +# Parsing helper methods: + +KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$') +TAP_START = re.compile(r'\s*TAP version ([0-9]+)$') +KTAP_END = re.compile(r'\s*(List of all partitions:|' + 'Kernel panic - not syncing: VFS:|reboot: System halted)') +EXECUTOR_ERROR = re.compile(r'\s*kunit executor: (.*)$') + +def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream: + """Extracts KTAP lines from the kernel output.""" + def isolate_ktap_output(kernel_output: Iterable[str]) \ + -> Iterator[Tuple[int, str]]: + line_num = 0 + started = False + for line in kernel_output: + line_num += 1 + line = line.rstrip() # remove trailing \n + if not started and KTAP_START.search(line): + # start extracting KTAP lines and set prefix + # to number of characters before version line + prefix_len = len( + line.split('KTAP version')[0]) + started = True + yield line_num, line[prefix_len:] + elif not started and TAP_START.search(line): + # start extracting KTAP lines and set prefix + # to number of characters before version line + prefix_len = len(line.split('TAP version')[0]) + started = True + yield line_num, line[prefix_len:] + elif started and KTAP_END.search(line): + # stop extracting KTAP lines + break + elif started: + # remove the prefix, if any. + line = line[prefix_len:] + yield line_num, line + elif EXECUTOR_ERROR.search(line): + yield line_num, line + return LineStream(lines=isolate_ktap_output(kernel_output)) + +KTAP_VERSIONS = [1] +TAP_VERSIONS = [13, 14] + +def check_version(version_num: int, accepted_versions: List[int], + version_type: str, test: Test) -> None: + """ + Adds error to test object if version number is too high or too + low. + + Parameters: + version_num - The inputted version number from the parsed KTAP or TAP + header line + accepted_version - List of accepted KTAP or TAP versions + version_type - 'KTAP' or 'TAP' depending on the type of + version line. + test - Test object for current test being parsed + """ + if version_num < min(accepted_versions): + test.add_error(f'{version_type} version lower than expected!') + elif version_num > max(accepted_versions): + test.add_error(f'{version_type} version higer than expected!') + +def parse_ktap_header(lines: LineStream, test: Test) -> bool: + """ + Parses KTAP/TAP header line and checks version number. + Returns False if fails to parse KTAP/TAP header line. + + Accepted formats: + - 'KTAP version [version number]' + - 'TAP version [version number]' + + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed + + Return: + True if successfully parsed KTAP/TAP header line + """ + ktap_match = KTAP_START.match(lines.peek()) + tap_match = TAP_START.match(lines.peek()) + if ktap_match: + version_num = int(ktap_match.group(1)) + check_version(version_num, KTAP_VERSIONS, 'KTAP', test) + elif tap_match: + version_num = int(tap_match.group(1)) + check_version(version_num, TAP_VERSIONS, 'TAP', test) + else: + return False + lines.pop() + return True -def save_non_diagnositic(lines: List[str], test_case: TestCase) -> None: - while lines and not TAP_ENTRIES.match(lines[0]): - test_case.log.append(lines[0]) - lines.pop(0) +TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$') -OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text']) +def parse_test_header(lines: LineStream, test: Test) -> bool: + """ + Parses test header and stores test name in test object. + Returns False if fails to parse test header line. -OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$') + Accepted format: + - '# Subtest: [test name]' -OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$') + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed -def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool: - save_non_diagnositic(lines, test_case) - if not lines: - test_case.status = TestStatus.TEST_CRASHED - return True - line = lines[0] - match = OK_NOT_OK_SUBTEST.match(line) - while not match and lines: - line = lines.pop(0) - match = OK_NOT_OK_SUBTEST.match(line) - if match: - test_case.log.append(lines.pop(0)) - test_case.name = match.group(2) - if test_case.status == TestStatus.TEST_CRASHED: - return True - if match.group(1) == 'ok': - test_case.status = TestStatus.SUCCESS - else: - test_case.status = TestStatus.FAILURE - return True - else: + Return: + True if successfully parsed test header line + """ + match = TEST_HEADER.match(lines.peek()) + if not match: return False - -SUBTEST_DIAGNOSTIC = re.compile(r'^[\s]+# .*?: (.*)$') -DIAGNOSTIC_CRASH_MESSAGE = 'kunit test case crashed!' - -def parse_diagnostic(lines: List[str], test_case: TestCase) -> bool: - save_non_diagnositic(lines, test_case) - if not lines: + test.name = match.group(1) + lines.pop() + return True + +TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)') + +def parse_test_plan(lines: LineStream, test: Test) -> bool: + """ + Parses test plan line and stores the expected number of subtests in + test object. Reports an error if expected count is 0. + Returns False and sets expected_count to None if there is no valid test + plan. + + Accepted format: + - '1..[number of subtests]' + + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed + + Return: + True if successfully parsed test plan line + """ + match = TEST_PLAN.match(lines.peek()) + if not match: + test.expected_count = None return False - line = lines[0] - match = SUBTEST_DIAGNOSTIC.match(line) - if match: - test_case.log.append(lines.pop(0)) - if match.group(1) == DIAGNOSTIC_CRASH_MESSAGE: - test_case.status = TestStatus.TEST_CRASHED - return True - else: + expected_count = int(match.group(1)) + test.expected_count = expected_count + lines.pop() + return True + +TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') + +TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$') + +def peek_test_name_match(lines: LineStream, test: Test) -> bool: + """ + Matches current line with the format of a test result line and checks + if the name matches the name of the current test. + Returns False if fails to match format or name. + + Accepted format: + - '[ok|not ok] [test number] [-] [test name] [optional skip + directive]' + + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed + + Return: + True if matched a test result line and the name matching the + expected test name + """ + line = lines.peek() + match = TEST_RESULT.match(line) + if not match: + return False + name = match.group(4) + return name == test.name + +def parse_test_result(lines: LineStream, test: Test, + expected_num: int) -> bool: + """ + Parses test result line and stores the status and name in the test + object. Reports an error if the test number does not match expected + test number. + Returns False if fails to parse test result line. + + Note that the SKIP directive is the only direction that causes a + change in status. + + Accepted format: + - '[ok|not ok] [test number] [-] [test name] [optional skip + directive]' + + Parameters: + lines - LineStream of KTAP output to parse + test - Test object for current test being parsed + expected_num - expected test number for current test + + Return: + True if successfully parsed a test result line. + """ + line = lines.peek() + match = TEST_RESULT.match(line) + skip_match = TEST_RESULT_SKIP.match(line) + + # Check if line matches test result line format + if not match: return False + lines.pop() -def parse_test_case(lines: List[str]) -> TestCase: - test_case = TestCase() - save_non_diagnositic(lines, test_case) - while parse_diagnostic(lines, test_case): - pass - if parse_ok_not_ok_test_case(lines, test_case): - return test_case + # Set name of test object + if skip_match: + test.name = skip_match.group(4) else: - return None - -SUBTEST_HEADER = re.compile(r'^[\s]+# Subtest: (.*)$') - -def parse_subtest_header(lines: List[str]) -> str: - consume_non_diagnositic(lines) - if not lines: - return None - match = SUBTEST_HEADER.match(lines[0]) - if match: - lines.pop(0) - return match.group(1) + test.name = match.group(4) + + # Check test num + num = int(match.group(2)) + if num != expected_num: + test.add_error(f'Expected test number {expected_num} but found {num}') + + # Set status of test object + status = match.group(1) + if skip_match: + test.status = TestStatus.SKIPPED + elif status == 'ok': + test.status = TestStatus.SUCCESS else: - return None + test.status = TestStatus.FAILURE + return True -SUBTEST_PLAN = re.compile(r'[\s]+[0-9]+\.\.([0-9]+)') +def parse_diagnostic(lines: LineStream) -> List[str]: + """ + Parse lines that do not match the format of a test result line or + test header line and returns them in list. -def parse_subtest_plan(lines: List[str]) -> int: - consume_non_diagnositic(lines) - match = SUBTEST_PLAN.match(lines[0]) - if match: - lines.pop(0) - return int(match.group(1)) - else: - return None - -def max_status(left: TestStatus, right: TestStatus) -> TestStatus: - if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED: - return TestStatus.TEST_CRASHED - elif left == TestStatus.FAILURE or right == TestStatus.FAILURE: - return TestStatus.FAILURE - elif left != TestStatus.SUCCESS: - return left - elif right != TestStatus.SUCCESS: - return right - else: - return TestStatus.SUCCESS + Line formats that are not parsed: + - '# Subtest: [test name]' + - '[ok|not ok] [test number] [-] [test name] [optional skip + directive]' + - 'KTAP version [version number]' -def parse_ok_not_ok_test_suite(lines: List[str], - test_suite: TestSuite, - expected_suite_index: int) -> bool: - consume_non_diagnositic(lines) - if not lines: - test_suite.status = TestStatus.TEST_CRASHED - return False - line = lines[0] - match = OK_NOT_OK_MODULE.match(line) - if match: - lines.pop(0) - if match.group(1) == 'ok': - test_suite.status = TestStatus.SUCCESS - else: - test_suite.status = TestStatus.FAILURE - suite_index = int(match.group(2)) - if suite_index != expected_suite_index: - print_with_timestamp( - red('[ERROR] ') + 'expected_suite_index ' + - str(expected_suite_index) + ', but got ' + - str(suite_index)) - return True - else: - return False + Parameters: + lines - LineStream of KTAP output to parse -def bubble_up_errors(to_status, status_container_list) -> TestStatus: - status_list = map(to_status, status_container_list) - return reduce(max_status, status_list, TestStatus.SUCCESS) + Return: + Log of diagnostic lines + """ + log = [] # type: List[str] + non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START, TEST_PLAN] + while lines and not any(re.match(lines.peek()) + for re in non_diagnostic_lines): + log.append(lines.pop()) + return log -def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus: - max_test_case_status = bubble_up_errors(lambda x: x.status, test_suite.cases) - return max_status(max_test_case_status, test_suite.status) -def parse_test_suite(lines: List[str], expected_suite_index: int) -> TestSuite: - if not lines: - return None - consume_non_diagnositic(lines) - test_suite = TestSuite() - test_suite.status = TestStatus.SUCCESS - name = parse_subtest_header(lines) - if not name: - return None - test_suite.name = name - expected_test_case_num = parse_subtest_plan(lines) - if not expected_test_case_num: - return None - while expected_test_case_num > 0: - test_case = parse_test_case(lines) - if not test_case: - break - test_suite.cases.append(test_case) - expected_test_case_num -= 1 - if parse_ok_not_ok_test_suite(lines, test_suite, expected_suite_index): - test_suite.status = bubble_up_test_case_errors(test_suite) - return test_suite - elif not lines: - print_with_timestamp(red('[ERROR] ') + 'ran out of lines before end token') - return test_suite - else: - print('failed to parse end of suite' + lines[0]) - return None +# Printing helper methods: -TAP_HEADER = re.compile(r'^TAP version 14$') +DIVIDER = '=' * 60 -def parse_tap_header(lines: List[str]) -> bool: - consume_non_diagnositic(lines) - if TAP_HEADER.match(lines[0]): - lines.pop(0) - return True +def format_test_divider(message: str, len_message: int) -> str: + """ + Returns string with message centered in fixed width divider. + + Example: + '===================== message example =====================' + + Parameters: + message - message to be centered in divider line + len_message - length of the message to be printed such that + any characters of the color codes are not counted + + Return: + String containing message centered in fixed width divider + """ + default_count = 3 # default number of dashes + len_1 = default_count + len_2 = default_count + difference = len(DIVIDER) - len_message - 2 # 2 spaces added + if difference > 0: + # calculate number of dashes for each side of the divider + len_1 = int(difference / 2) + len_2 = difference - len_1 + return ('=' * len_1) + f' {message} ' + ('=' * len_2) + +def print_test_header(test: Test) -> None: + """ + Prints test header with test name and optionally the expected number + of subtests. + + Example: + '=================== example (2 subtests) ===================' + + Parameters: + test - Test object representing current test being printed + """ + message = test.name + if message != "": + # Add a leading space before the subtest counts only if a test name + # is provided using a "# Subtest" header line. + message += " " + if test.expected_count: + if test.expected_count == 1: + message += '(1 subtest)' + else: + message += f'({test.expected_count} subtests)' + stdout.print_with_timestamp(format_test_divider(message, len(message))) + +def print_log(log: Iterable[str]) -> None: + """Prints all strings in saved log for test in yellow.""" + formatted = textwrap.dedent('\n'.join(log)) + for line in formatted.splitlines(): + stdout.print_with_timestamp(stdout.yellow(line)) + +def format_test_result(test: Test) -> str: + """ + Returns string with formatted test result with colored status and test + name. + + Example: + '[PASSED] example' + + Parameters: + test - Test object representing current test being printed + + Return: + String containing formatted test result + """ + if test.status == TestStatus.SUCCESS: + return stdout.green('[PASSED] ') + test.name + if test.status == TestStatus.SKIPPED: + return stdout.yellow('[SKIPPED] ') + test.name + if test.status == TestStatus.NO_TESTS: + return stdout.yellow('[NO TESTS RUN] ') + test.name + if test.status == TestStatus.TEST_CRASHED: + print_log(test.log) + return stdout.red('[CRASHED] ') + test.name + print_log(test.log) + return stdout.red('[FAILED] ') + test.name + +def print_test_result(test: Test) -> None: + """ + Prints result line with status of test. + + Example: + '[PASSED] example' + + Parameters: + test - Test object representing current test being printed + """ + stdout.print_with_timestamp(format_test_result(test)) + +def print_test_footer(test: Test) -> None: + """ + Prints test footer with status of test. + + Example: + '===================== [PASSED] example =====================' + + Parameters: + test - Test object representing current test being printed + """ + message = format_test_result(test) + stdout.print_with_timestamp(format_test_divider(message, + len(message) - stdout.color_len())) + + + +def _summarize_failed_tests(test: Test) -> str: + """Tries to summarize all the failing subtests in `test`.""" + + def failed_names(test: Test, parent_name: str) -> List[str]: + # Note: we use 'main' internally for the top-level test. + if not parent_name or parent_name == 'main': + full_name = test.name + else: + full_name = parent_name + '.' + test.name + + if not test.subtests: # this is a leaf node + return [full_name] + + # If all the children failed, just say this subtest failed. + # Don't summarize it down "the top-level test failed", though. + failed_subtests = [sub for sub in test.subtests if not sub.ok_status()] + if parent_name and len(failed_subtests) == len(test.subtests): + return [full_name] + + all_failures = [] # type: List[str] + for t in failed_subtests: + all_failures.extend(failed_names(t, full_name)) + return all_failures + + failures = failed_names(test, '') + # If there are too many failures, printing them out will just be noisy. + if len(failures) > 10: # this is an arbitrary limit + return '' + + return 'Failures: ' + ', '.join(failures) + + +def print_summary_line(test: Test) -> None: + """ + Prints summary line of test object. Color of line is dependent on + status of test. Color is green if test passes, yellow if test is + skipped, and red if the test fails or crashes. Summary line contains + counts of the statuses of the tests subtests or the test itself if it + has no subtests. + + Example: + "Testing complete. Passed: 2, Failed: 0, Crashed: 0, Skipped: 0, + Errors: 0" + + test - Test object representing current test being printed + """ + if test.status == TestStatus.SUCCESS: + color = stdout.green + elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS): + color = stdout.yellow else: - return False - -TEST_PLAN = re.compile(r'[0-9]+\.\.([0-9]+)') - -def parse_test_plan(lines: List[str]) -> int: - consume_non_diagnositic(lines) - match = TEST_PLAN.match(lines[0]) - if match: - lines.pop(0) - return int(match.group(1)) + color = stdout.red + stdout.print_with_timestamp(color(f'Testing complete. {test.counts}')) + + # Summarize failures that might have gone off-screen since we had a lot + # of tests (arbitrarily defined as >=100 for now). + if test.ok_status() or test.counts.total() < 100: + return + summarized = _summarize_failed_tests(test) + if not summarized: + return + stdout.print_with_timestamp(color(summarized)) + +# Other methods: + +def bubble_up_test_results(test: Test) -> None: + """ + If the test has subtests, add the test counts of the subtests to the + test and check if any of the tests crashed and if so set the test + status to crashed. Otherwise if the test has no subtests add the + status of the test to the test counts. + + Parameters: + test - Test object for current test being parsed + """ + subtests = test.subtests + counts = test.counts + status = test.status + for t in subtests: + counts.add_subtest_counts(t.counts) + if counts.total() == 0: + counts.add_status(status) + elif test.counts.get_status() == TestStatus.TEST_CRASHED: + test.status = TestStatus.TEST_CRASHED + +def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test: + """ + Finds next test to parse in LineStream, creates new Test object, + parses any subtests of the test, populates Test object with all + information (status, name) about the test and the Test objects for + any subtests, and then returns the Test object. The method accepts + three formats of tests: + + Accepted test formats: + + - Main KTAP/TAP header + + Example: + + KTAP version 1 + 1..4 + [subtests] + + - Subtest header (must include either the KTAP version line or + "# Subtest" header line) + + Example (preferred format with both KTAP version line and + "# Subtest" line): + + KTAP version 1 + # Subtest: name + 1..3 + [subtests] + ok 1 name + + Example (only "# Subtest" line): + + # Subtest: name + 1..3 + [subtests] + ok 1 name + + Example (only KTAP version line, compliant with KTAP v1 spec): + + KTAP version 1 + 1..3 + [subtests] + ok 1 name + + - Test result line + + Example: + + ok 1 - test + + Parameters: + lines - LineStream of KTAP output to parse + expected_num - expected test number for test to be parsed + log - list of strings containing any preceding diagnostic lines + corresponding to the current test + is_subtest - boolean indicating whether test is a subtest + + Return: + Test object populated with characteristics and any subtests + """ + test = Test() + test.log.extend(log) + + # Parse any errors prior to parsing tests + err_log = parse_diagnostic(lines) + test.log.extend(err_log) + + if not is_subtest: + # If parsing the main/top-level test, parse KTAP version line and + # test plan + test.name = "main" + ktap_line = parse_ktap_header(lines, test) + test.log.extend(parse_diagnostic(lines)) + parse_test_plan(lines, test) + parent_test = True else: - return None - -def bubble_up_suite_errors(test_suite_list: List[TestSuite]) -> TestStatus: - return bubble_up_errors(lambda x: x.status, test_suite_list) - -def parse_test_result(lines: List[str]) -> TestResult: - consume_non_diagnositic(lines) - if not lines or not parse_tap_header(lines): - return TestResult(TestStatus.NO_TESTS, [], lines) - expected_test_suite_num = parse_test_plan(lines) - if not expected_test_suite_num: - return TestResult(TestStatus.FAILURE_TO_PARSE_TESTS, [], lines) - test_suites = [] - for i in range(1, expected_test_suite_num + 1): - test_suite = parse_test_suite(lines, i) - if test_suite: - test_suites.append(test_suite) + # If not the main test, attempt to parse a test header containing + # the KTAP version line and/or subtest header line + ktap_line = parse_ktap_header(lines, test) + subtest_line = parse_test_header(lines, test) + parent_test = (ktap_line or subtest_line) + if parent_test: + # If KTAP version line and/or subtest header is found, attempt + # to parse test plan and print test header + test.log.extend(parse_diagnostic(lines)) + parse_test_plan(lines, test) + print_test_header(test) + expected_count = test.expected_count + subtests = [] + test_num = 1 + while parent_test and (expected_count is None or test_num <= expected_count): + # Loop to parse any subtests. + # Break after parsing expected number of tests or + # if expected number of tests is unknown break when test + # result line with matching name to subtest header is found + # or no more lines in stream. + sub_log = parse_diagnostic(lines) + sub_test = Test() + if not lines or (peek_test_name_match(lines, test) and + is_subtest): + if expected_count and test_num <= expected_count: + # If parser reaches end of test before + # parsing expected number of subtests, print + # crashed subtest and record error + test.add_error('missing expected subtest!') + sub_test.log.extend(sub_log) + test.counts.add_status( + TestStatus.TEST_CRASHED) + print_test_result(sub_test) + else: + test.log.extend(sub_log) + break else: - print_with_timestamp( - red('[ERROR] ') + ' expected ' + - str(expected_test_suite_num) + - ' test suites, but got ' + str(i - 2)) - break - test_suite = parse_test_suite(lines, -1) - if test_suite: - print_with_timestamp(red('[ERROR] ') + - 'got unexpected test suite: ' + test_suite.name) - if test_suites: - return TestResult(bubble_up_suite_errors(test_suites), test_suites, lines) - else: - return TestResult(TestStatus.NO_TESTS, [], lines) - -def print_and_count_results(test_result: TestResult) -> None: - total_tests = 0 - failed_tests = 0 - crashed_tests = 0 - for test_suite in test_result.suites: - if test_suite.status == TestStatus.SUCCESS: - print_suite_divider(green('[PASSED] ') + test_suite.name) - elif test_suite.status == TestStatus.TEST_CRASHED: - print_suite_divider(red('[CRASHED] ' + test_suite.name)) + sub_test = parse_test(lines, test_num, sub_log, True) + subtests.append(sub_test) + test_num += 1 + test.subtests = subtests + if is_subtest: + # If not main test, look for test result line + test.log.extend(parse_diagnostic(lines)) + if test.name != "" and not peek_test_name_match(lines, test): + test.add_error('missing subtest result line!') else: - print_suite_divider(red('[FAILED] ') + test_suite.name) - for test_case in test_suite.cases: - total_tests += 1 - if test_case.status == TestStatus.SUCCESS: - print_with_timestamp(green('[PASSED] ') + test_case.name) - elif test_case.status == TestStatus.TEST_CRASHED: - crashed_tests += 1 - print_with_timestamp(red('[CRASHED] ' + test_case.name)) - print_log(map(yellow, test_case.log)) - print_with_timestamp('') - else: - failed_tests += 1 - print_with_timestamp(red('[FAILED] ') + test_case.name) - print_log(map(yellow, test_case.log)) - print_with_timestamp('') - return total_tests, failed_tests, crashed_tests - -def parse_run_tests(kernel_output) -> TestResult: - total_tests = 0 - failed_tests = 0 - crashed_tests = 0 - test_result = parse_test_result(list(isolate_kunit_output(kernel_output))) - if test_result.status == TestStatus.NO_TESTS: - print(red('[ERROR] ') + yellow('no tests run!')) - elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS: - print(red('[ERROR] ') + yellow('could not parse test results!')) + parse_test_result(lines, test, expected_num) + + # Check for there being no subtests within parent test + if parent_test and len(subtests) == 0: + # Don't override a bad status if this test had one reported. + # Assumption: no subtests means CRASHED is from Test.__init__() + if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS): + print_log(test.log) + test.status = TestStatus.NO_TESTS + test.add_error('0 tests run!') + + # Add statuses to TestCounts attribute in Test object + bubble_up_test_results(test) + if parent_test and is_subtest: + # If test has subtests and is not the main test object, print + # footer. + print_test_footer(test) + elif is_subtest: + print_test_result(test) + return test + +def parse_run_tests(kernel_output: Iterable[str]) -> Test: + """ + Using kernel output, extract KTAP lines, parse the lines for test + results and print condensed test results and summary line. + + Parameters: + kernel_output - Iterable object contains lines of kernel output + + Return: + Test - the main test object with all subtests. + """ + stdout.print_with_timestamp(DIVIDER) + lines = extract_tap_lines(kernel_output) + test = Test() + if not lines: + test.name = '<missing>' + test.add_error('Could not find any KTAP output. Did any KUnit tests run?') + test.status = TestStatus.FAILURE_TO_PARSE_TESTS else: - (total_tests, - failed_tests, - crashed_tests) = print_and_count_results(test_result) - print_with_timestamp(DIVIDER) - fmt = green if test_result.status == TestStatus.SUCCESS else red - print_with_timestamp( - fmt('Testing complete. %d tests run. %d failed. %d crashed.' % - (total_tests, failed_tests, crashed_tests))) - return test_result + test = parse_test(lines, 0, [], False) + if test.status != TestStatus.NO_TESTS: + test.status = test.counts.get_status() + stdout.print_with_timestamp(DIVIDER) + print_summary_line(test) + return test diff --git a/tools/testing/kunit/kunit_printer.py b/tools/testing/kunit/kunit_printer.py new file mode 100644 index 000000000000..015adf87dc2c --- /dev/null +++ b/tools/testing/kunit/kunit_printer.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# Utilities for printing and coloring output. +# +# Copyright (C) 2022, Google LLC. +# Author: Daniel Latypov <dlatypov@google.com> + +import datetime +import sys +import typing + +_RESET = '\033[0;0m' + +class Printer: + """Wraps a file object, providing utilities for coloring output, etc.""" + + def __init__(self, output: typing.IO[str]): + self._output = output + self._use_color = output.isatty() + + def print(self, message: str) -> None: + print(message, file=self._output) + + def print_with_timestamp(self, message: str) -> None: + ts = datetime.datetime.now().strftime('%H:%M:%S') + self.print(f'[{ts}] {message}') + + def _color(self, code: str, text: str) -> str: + if not self._use_color: + return text + return code + text + _RESET + + def red(self, text: str) -> str: + return self._color('\033[1;31m', text) + + def yellow(self, text: str) -> str: + return self._color('\033[1;33m', text) + + def green(self, text: str) -> str: + return self._color('\033[1;32m', text) + + def color_len(self) -> int: + """Returns the length of the color escape codes.""" + return len(self.red('')) + +# Provides a default instance that prints to stdout +stdout = Printer(sys.stdout) diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 99c3c5671ea4..2beb7327e53f 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 # SPDX-License-Identifier: GPL-2.0 # # A collection of tests for tools/testing/kunit/kunit.py @@ -11,8 +11,12 @@ from unittest import mock import tempfile, shutil # Handling test_tmpdir +import itertools import json import os +import signal +import subprocess +from typing import Iterable import kunit_config import kunit_parser @@ -21,16 +25,18 @@ import kunit_json import kunit test_tmpdir = '' +abs_test_data_dir = '' def setUpModule(): - global test_tmpdir + global test_tmpdir, abs_test_data_dir test_tmpdir = tempfile.mkdtemp() + abs_test_data_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'test_data')) def tearDownModule(): shutil.rmtree(test_tmpdir) -def get_absolute_path(path): - return os.path.join(os.path.dirname(__file__), path) +def test_data_path(path): + return os.path.join(abs_test_data_dir, path) class KconfigTest(unittest.TestCase): @@ -39,347 +45,803 @@ class KconfigTest(unittest.TestCase): self.assertTrue(kconfig0.is_subset_of(kconfig0)) kconfig1 = kunit_config.Kconfig() - kconfig1.add_entry(kunit_config.KconfigEntry('TEST', 'y')) + kconfig1.add_entry('TEST', 'y') self.assertTrue(kconfig1.is_subset_of(kconfig1)) self.assertTrue(kconfig0.is_subset_of(kconfig1)) self.assertFalse(kconfig1.is_subset_of(kconfig0)) def test_read_from_file(self): - kconfig = kunit_config.Kconfig() - kconfig_path = get_absolute_path( - 'test_data/test_read_from_file.kconfig') + kconfig_path = test_data_path('test_read_from_file.kconfig') - kconfig.read_from_file(kconfig_path) + kconfig = kunit_config.parse_file(kconfig_path) expected_kconfig = kunit_config.Kconfig() - expected_kconfig.add_entry( - kunit_config.KconfigEntry('UML', 'y')) - expected_kconfig.add_entry( - kunit_config.KconfigEntry('MMU', 'y')) - expected_kconfig.add_entry( - kunit_config.KconfigEntry('TEST', 'y')) - expected_kconfig.add_entry( - kunit_config.KconfigEntry('EXAMPLE_TEST', 'y')) - expected_kconfig.add_entry( - kunit_config.KconfigEntry('MK8', 'n')) - - self.assertEqual(kconfig.entries(), expected_kconfig.entries()) + expected_kconfig.add_entry('UML', 'y') + expected_kconfig.add_entry('MMU', 'y') + expected_kconfig.add_entry('TEST', 'y') + expected_kconfig.add_entry('EXAMPLE_TEST', 'y') + expected_kconfig.add_entry('MK8', 'n') + + self.assertEqual(kconfig, expected_kconfig) def test_write_to_file(self): kconfig_path = os.path.join(test_tmpdir, '.config') expected_kconfig = kunit_config.Kconfig() - expected_kconfig.add_entry( - kunit_config.KconfigEntry('UML', 'y')) - expected_kconfig.add_entry( - kunit_config.KconfigEntry('MMU', 'y')) - expected_kconfig.add_entry( - kunit_config.KconfigEntry('TEST', 'y')) - expected_kconfig.add_entry( - kunit_config.KconfigEntry('EXAMPLE_TEST', 'y')) - expected_kconfig.add_entry( - kunit_config.KconfigEntry('MK8', 'n')) + expected_kconfig.add_entry('UML', 'y') + expected_kconfig.add_entry('MMU', 'y') + expected_kconfig.add_entry('TEST', 'y') + expected_kconfig.add_entry('EXAMPLE_TEST', 'y') + expected_kconfig.add_entry('MK8', 'n') expected_kconfig.write_to_file(kconfig_path) - actual_kconfig = kunit_config.Kconfig() - actual_kconfig.read_from_file(kconfig_path) - - self.assertEqual(actual_kconfig.entries(), - expected_kconfig.entries()) + actual_kconfig = kunit_config.parse_file(kconfig_path) + self.assertEqual(actual_kconfig, expected_kconfig) class KUnitParserTest(unittest.TestCase): + def setUp(self): + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) - def assertContains(self, needle, haystack): - for line in haystack: + def noPrintCallContains(self, substr: str): + for call in self.print_mock.mock_calls: + self.assertNotIn(substr, call.args[0]) + + def assertContains(self, needle: str, haystack: kunit_parser.LineStream): + # Clone the iterator so we can print the contents on failure. + copy, backup = itertools.tee(haystack) + for line in copy: if needle in line: return - raise AssertionError('"' + - str(needle) + '" not found in "' + str(haystack) + '"!') + raise AssertionError(f'"{needle}" not found in {list(backup)}!') def test_output_isolated_correctly(self): - log_path = get_absolute_path( - 'test_data/test_output_isolated_correctly.log') - file = open(log_path) - result = kunit_parser.isolate_kunit_output(file.readlines()) - self.assertContains('TAP version 14\n', result) - self.assertContains(' # Subtest: example', result) - self.assertContains(' 1..2', result) - self.assertContains(' ok 1 - example_simple_test', result) - self.assertContains(' ok 2 - example_mock_test', result) + log_path = test_data_path('test_output_isolated_correctly.log') + with open(log_path) as file: + result = kunit_parser.extract_tap_lines(file.readlines()) + self.assertContains('TAP version 14', result) + self.assertContains('# Subtest: example', result) + self.assertContains('1..2', result) + self.assertContains('ok 1 - example_simple_test', result) + self.assertContains('ok 2 - example_mock_test', result) self.assertContains('ok 1 - example', result) - file.close() def test_output_with_prefix_isolated_correctly(self): - log_path = get_absolute_path( - 'test_data/test_pound_sign.log') + log_path = test_data_path('test_pound_sign.log') with open(log_path) as file: - result = kunit_parser.isolate_kunit_output(file.readlines()) - self.assertContains('TAP version 14\n', result) - self.assertContains(' # Subtest: kunit-resource-test', result) - self.assertContains(' 1..5', result) - self.assertContains(' ok 1 - kunit_resource_test_init_resources', result) - self.assertContains(' ok 2 - kunit_resource_test_alloc_resource', result) - self.assertContains(' ok 3 - kunit_resource_test_destroy_resource', result) - self.assertContains(' foo bar #', result) - self.assertContains(' ok 4 - kunit_resource_test_cleanup_resources', result) - self.assertContains(' ok 5 - kunit_resource_test_proper_free_ordering', result) + result = kunit_parser.extract_tap_lines(file.readlines()) + self.assertContains('TAP version 14', result) + self.assertContains('# Subtest: kunit-resource-test', result) + self.assertContains('1..5', result) + self.assertContains('ok 1 - kunit_resource_test_init_resources', result) + self.assertContains('ok 2 - kunit_resource_test_alloc_resource', result) + self.assertContains('ok 3 - kunit_resource_test_destroy_resource', result) + self.assertContains('foo bar #', result) + self.assertContains('ok 4 - kunit_resource_test_cleanup_resources', result) + self.assertContains('ok 5 - kunit_resource_test_proper_free_ordering', result) self.assertContains('ok 1 - kunit-resource-test', result) - self.assertContains(' foo bar # non-kunit output', result) - self.assertContains(' # Subtest: kunit-try-catch-test', result) - self.assertContains(' 1..2', result) - self.assertContains(' ok 1 - kunit_test_try_catch_successful_try_no_catch', + self.assertContains('foo bar # non-kunit output', result) + self.assertContains('# Subtest: kunit-try-catch-test', result) + self.assertContains('1..2', result) + self.assertContains('ok 1 - kunit_test_try_catch_successful_try_no_catch', result) - self.assertContains(' ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch', + self.assertContains('ok 2 - kunit_test_try_catch_unsuccessful_try_does_catch', result) self.assertContains('ok 2 - kunit-try-catch-test', result) - self.assertContains(' # Subtest: string-stream-test', result) - self.assertContains(' 1..3', result) - self.assertContains(' ok 1 - string_stream_test_empty_on_creation', result) - self.assertContains(' ok 2 - string_stream_test_not_empty_after_add', result) - self.assertContains(' ok 3 - string_stream_test_get_string', result) + self.assertContains('# Subtest: string-stream-test', result) + self.assertContains('1..3', result) + self.assertContains('ok 1 - string_stream_test_empty_on_creation', result) + self.assertContains('ok 2 - string_stream_test_not_empty_after_add', result) + self.assertContains('ok 3 - string_stream_test_get_string', result) self.assertContains('ok 3 - string-stream-test', result) def test_parse_successful_test_log(self): - all_passed_log = get_absolute_path( - 'test_data/test_is_test_passed-all_passed.log') - file = open(all_passed_log) - result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.SUCCESS, - result.status) - file.close() + all_passed_log = test_data_path('test_is_test_passed-all_passed.log') + with open(all_passed_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) + + def test_parse_successful_nested_tests_log(self): + all_passed_log = test_data_path('test_is_test_passed-all_passed_nested.log') + with open(all_passed_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) + + def test_kselftest_nested(self): + kselftest_log = test_data_path('test_is_test_passed-kselftest.log') + with open(kselftest_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts.errors, 0) def test_parse_failed_test_log(self): - failed_log = get_absolute_path( - 'test_data/test_is_test_passed-failure.log') - file = open(failed_log) - result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual( - kunit_parser.TestStatus.FAILURE, - result.status) - file.close() + failed_log = test_data_path('test_is_test_passed-failure.log') + with open(failed_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + self.assertEqual(result.counts.errors, 0) + + def test_no_header(self): + empty_log = test_data_path('test_is_test_passed-no_tests_run_no_header.log') + with open(empty_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + self.assertEqual(0, len(result.subtests)) + self.assertEqual(kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, result.status) + self.assertEqual(result.counts.errors, 1) + + def test_missing_test_plan(self): + missing_plan_log = test_data_path('test_is_test_passed-' + 'missing_plan.log') + with open(missing_plan_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines( + file.readlines())) + # A missing test plan is not an error. + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=10, errors=0)) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) def test_no_tests(self): - empty_log = get_absolute_path( - 'test_data/test_is_test_passed-no_tests_run.log') - file = open(empty_log) - result = kunit_parser.parse_run_tests( - kunit_parser.isolate_kunit_output(file.readlines())) - self.assertEqual(0, len(result.suites)) + header_log = test_data_path('test_is_test_passed-no_tests_run_with_header.log') + with open(header_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + self.assertEqual(0, len(result.subtests)) + self.assertEqual(kunit_parser.TestStatus.NO_TESTS, result.status) + self.assertEqual(result.counts.errors, 1) + + def test_no_tests_no_plan(self): + no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log') + with open(no_plan_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + self.assertEqual(0, len(result.subtests[0].subtests[0].subtests)) self.assertEqual( kunit_parser.TestStatus.NO_TESTS, - result.status) - file.close() + result.subtests[0].subtests[0].status) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=1)) + def test_no_kunit_output(self): - crash_log = get_absolute_path( - 'test_data/test_insufficient_memory.log') - file = open(crash_log) - print_mock = mock.patch('builtins.print').start() - result = kunit_parser.parse_run_tests( - kunit_parser.isolate_kunit_output(file.readlines())) - print_mock.assert_any_call(StrContains("no kunit output detected")) + crash_log = test_data_path('test_insufficient_memory.log') + print_mock = mock.patch('kunit_printer.Printer.print').start() + with open(crash_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + print_mock.assert_any_call(StrContains('Could not find any KTAP output.')) print_mock.stop() - file.close() + self.assertEqual(0, len(result.subtests)) + self.assertEqual(result.counts.errors, 1) - def test_crashed_test(self): - crashed_log = get_absolute_path( - 'test_data/test_is_test_passed-crash.log') - file = open(crashed_log) - result = kunit_parser.parse_run_tests(file.readlines()) + def test_skipped_test(self): + skipped_log = test_data_path('test_skip_tests.log') + with open(skipped_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + + # A skipped test does not fail the whole suite. + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=4, skipped=1)) + + def test_skipped_all_tests(self): + skipped_log = test_data_path('test_skip_all_tests.log') + with open(skipped_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + + self.assertEqual(kunit_parser.TestStatus.SKIPPED, result.status) + self.assertEqual(result.counts, kunit_parser.TestCounts(skipped=5)) + + def test_ignores_hyphen(self): + hyphen_log = test_data_path('test_strip_hyphen.log') + with open(hyphen_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + + # A skipped test does not fail the whole suite. + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual( + "sysctl_test", + result.subtests[0].name) self.assertEqual( - kunit_parser.TestStatus.TEST_CRASHED, - result.status) - file.close() + "example", + result.subtests[1].name) def test_ignores_prefix_printk_time(self): - prefix_log = get_absolute_path( - 'test_data/test_config_printk_time.log') + prefix_log = test_data_path('test_config_printk_time.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_ignores_multiple_prefixes(self): - prefix_log = get_absolute_path( - 'test_data/test_multiple_prefixes.log') + prefix_log = test_data_path('test_multiple_prefixes.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_prefix_mixed_kernel_output(self): - mixed_prefix_log = get_absolute_path( - 'test_data/test_interrupted_tap_output.log') + mixed_prefix_log = test_data_path('test_interrupted_tap_output.log') with open(mixed_prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_prefix_poundsign(self): - pound_log = get_absolute_path('test_data/test_pound_sign.log') + pound_log = test_data_path('test_pound_sign.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) def test_kernel_panic_end(self): - panic_log = get_absolute_path('test_data/test_kernel_panic_interrupt.log') + panic_log = test_data_path('test_kernel_panic_interrupt.log') with open(panic_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual(kunit_parser.TestStatus.TEST_CRASHED, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertGreaterEqual(result.counts.errors, 1) def test_pound_no_prefix(self): - pound_log = get_absolute_path('test_data/test_pound_no_prefix.log') + pound_log = test_data_path('test_pound_no_prefix.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + self.assertEqual(result.counts.errors, 0) + + def test_summarize_failures(self): + output = """ + KTAP version 1 + 1..2 + # Subtest: all_failed_suite + 1..2 + not ok 1 - test1 + not ok 2 - test2 + not ok 1 - all_failed_suite + # Subtest: some_failed_suite + 1..2 + ok 1 - test1 + not ok 2 - test2 + not ok 1 - some_failed_suite + """ + result = kunit_parser.parse_run_tests(output.splitlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + + self.assertEqual(kunit_parser._summarize_failed_tests(result), + 'Failures: all_failed_suite, some_failed_suite.test2') + + def test_ktap_format(self): + ktap_log = test_data_path('test_parse_ktap_output.log') + with open(ktap_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=3)) + self.assertEqual('suite', result.subtests[0].name) + self.assertEqual('case_1', result.subtests[0].subtests[0].name) + self.assertEqual('case_2', result.subtests[0].subtests[1].name) + + def test_parse_subtest_header(self): + ktap_log = test_data_path('test_parse_subtest_header.log') + with open(ktap_log) as file: + kunit_parser.parse_run_tests(file.readlines()) + self.print_mock.assert_any_call(StrContains('suite (1 subtest)')) + + def test_parse_attributes(self): + ktap_log = test_data_path('test_parse_attributes.log') + with open(ktap_log) as file: + result = kunit_parser.parse_run_tests(file.readlines()) + + # Test should pass with no errors + self.assertEqual(result.counts, kunit_parser.TestCounts(passed=1, errors=0)) + self.assertEqual(kunit_parser.TestStatus.SUCCESS, result.status) + + # Ensure suite header is parsed correctly + self.print_mock.assert_any_call(StrContains('suite (1 subtest)')) + + # Ensure attributes in correct test log + self.assertContains('# module: example', result.subtests[0].log) + self.assertContains('# test.speed: slow', result.subtests[0].subtests[0].log) + + def test_show_test_output_on_failure(self): + output = """ + KTAP version 1 + 1..1 + Test output. + Indented more. + not ok 1 test1 + """ + result = kunit_parser.parse_run_tests(output.splitlines()) + self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status) + + self.print_mock.assert_any_call(StrContains('Test output.')) + self.print_mock.assert_any_call(StrContains(' Indented more.')) + self.noPrintCallContains('not ok 1 test1') + +def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream: + return kunit_parser.LineStream(enumerate(strs, start=1)) + +class LineStreamTest(unittest.TestCase): + + def test_basic(self): + stream = line_stream_from_strs(['hello', 'world']) + + self.assertTrue(stream, msg='Should be more input') + self.assertEqual(stream.line_number(), 1) + self.assertEqual(stream.peek(), 'hello') + self.assertEqual(stream.pop(), 'hello') + + self.assertTrue(stream, msg='Should be more input') + self.assertEqual(stream.line_number(), 2) + self.assertEqual(stream.peek(), 'world') + self.assertEqual(stream.pop(), 'world') + + self.assertFalse(stream, msg='Should be no more input') + with self.assertRaisesRegex(ValueError, 'LineStream: going past EOF'): + stream.pop() + + def test_is_lazy(self): + called_times = 0 + def generator(): + nonlocal called_times + for _ in range(1,5): + called_times += 1 + yield called_times, str(called_times) + + stream = kunit_parser.LineStream(generator()) + self.assertEqual(called_times, 0) + + self.assertEqual(stream.pop(), '1') + self.assertEqual(called_times, 1) + + self.assertEqual(stream.pop(), '2') + self.assertEqual(called_times, 2) + +class LinuxSourceTreeTest(unittest.TestCase): + + def setUp(self): + mock.patch.object(signal, 'signal').start() + self.addCleanup(mock.patch.stopall) + + def test_invalid_kunitconfig(self): + with self.assertRaisesRegex(kunit_kernel.ConfigError, 'nonexistent.* does not exist'): + kunit_kernel.LinuxSourceTree('', kunitconfig_paths=['/nonexistent_file']) + + def test_valid_kunitconfig(self): + with tempfile.NamedTemporaryFile('wt') as kunitconfig: + kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[kunitconfig.name]) + + def test_dir_kunitconfig(self): + with tempfile.TemporaryDirectory('') as dir: + with open(os.path.join(dir, '.kunitconfig'), 'w'): + pass + kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir]) + + def test_multiple_kunitconfig(self): + want_kconfig = kunit_config.Kconfig() + want_kconfig.add_entry('KUNIT', 'y') + want_kconfig.add_entry('KUNIT_TEST', 'm') + + with tempfile.TemporaryDirectory('') as dir: + other = os.path.join(dir, 'otherkunitconfig') + with open(os.path.join(dir, '.kunitconfig'), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(other, 'w') as f: + f.write('CONFIG_KUNIT_TEST=m') + pass + + tree = kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other]) + self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig) + + + def test_multiple_kunitconfig_invalid(self): + with tempfile.TemporaryDirectory('') as dir: + other = os.path.join(dir, 'otherkunitconfig') + with open(os.path.join(dir, '.kunitconfig'), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(other, 'w') as f: + f.write('CONFIG_KUNIT=m') + + with self.assertRaisesRegex(kunit_kernel.ConfigError, '(?s)Multiple values.*CONFIG_KUNIT'): + kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other]) + + + def test_kconfig_add(self): + want_kconfig = kunit_config.Kconfig() + want_kconfig.add_entry('NOT_REAL', 'y') + + tree = kunit_kernel.LinuxSourceTree('', kconfig_add=['CONFIG_NOT_REAL=y']) + self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig) + + def test_invalid_arch(self): + with self.assertRaisesRegex(kunit_kernel.ConfigError, 'not a valid arch, options are.*x86_64'): + kunit_kernel.LinuxSourceTree('', arch='invalid') + + def test_run_kernel_hits_exception(self): + def fake_start(unused_args, unused_build_dir): + return subprocess.Popen(['echo "hi\nbye"'], shell=True, text=True, stdout=subprocess.PIPE) + + with tempfile.TemporaryDirectory('') as build_dir: + tree = kunit_kernel.LinuxSourceTree(build_dir) + mock.patch.object(tree._ops, 'start', side_effect=fake_start).start() + + with self.assertRaises(ValueError): + for line in tree.run_kernel(build_dir=build_dir): + self.assertEqual(line, 'hi\n') + raise ValueError('uh oh, did not read all output') + + with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile: + self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output') + + def test_build_reconfig_no_config(self): + with tempfile.TemporaryDirectory('') as build_dir: + with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + + tree = kunit_kernel.LinuxSourceTree(build_dir) + # Stub out the source tree operations, so we don't have + # the defaults for any given architecture get in the + # way. + tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None) + mock_build_config = mock.patch.object(tree, 'build_config').start() + + # Should generate the .config + self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) + mock_build_config.assert_called_once_with(build_dir, []) + + def test_build_reconfig_existing_config(self): + with tempfile.TemporaryDirectory('') as build_dir: + # Existing .config is a superset, should not touch it + with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') + + tree = kunit_kernel.LinuxSourceTree(build_dir) + # Stub out the source tree operations, so we don't have + # the defaults for any given architecture get in the + # way. + tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None) + mock_build_config = mock.patch.object(tree, 'build_config').start() + + self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) + self.assertEqual(mock_build_config.call_count, 0) + + def test_build_reconfig_remove_option(self): + with tempfile.TemporaryDirectory('') as build_dir: + # We removed CONFIG_KUNIT_TEST=y from our .kunitconfig... + with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') + with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') + + tree = kunit_kernel.LinuxSourceTree(build_dir) + # Stub out the source tree operations, so we don't have + # the defaults for any given architecture get in the + # way. + tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None) + mock_build_config = mock.patch.object(tree, 'build_config').start() + + # ... so we should trigger a call to build_config() + self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) + mock_build_config.assert_called_once_with(build_dir, []) + + # TODO: add more test cases. + class KUnitJsonTest(unittest.TestCase): + def setUp(self): + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) def _json_for(self, log_file): - with(open(get_absolute_path(log_file))) as file: + with open(test_data_path(log_file)) as file: test_result = kunit_parser.parse_run_tests(file) json_obj = kunit_json.get_json_result( - test_result=test_result, - def_config='kunit_defconfig', - build_dir=None, - json_path='stdout') + test=test_result, + metadata=kunit_json.Metadata()) return json.loads(json_obj) def test_failed_test_json(self): - result = self._json_for( - 'test_data/test_is_test_passed-failure.log') + result = self._json_for('test_is_test_passed-failure.log') self.assertEqual( {'name': 'example_simple_test', 'status': 'FAIL'}, result["sub_groups"][1]["test_cases"][0]) def test_crashed_test_json(self): - result = self._json_for( - 'test_data/test_is_test_passed-crash.log') + result = self._json_for('test_kernel_panic_interrupt.log') self.assertEqual( - {'name': 'example_simple_test', 'status': 'ERROR'}, - result["sub_groups"][1]["test_cases"][0]) + {'name': '', 'status': 'ERROR'}, + result["sub_groups"][2]["test_cases"][1]) + + def test_skipped_test_json(self): + result = self._json_for('test_skip_tests.log') + self.assertEqual( + {'name': 'example_skip_test', 'status': 'SKIP'}, + result["sub_groups"][1]["test_cases"][1]) def test_no_tests_json(self): - result = self._json_for( - 'test_data/test_is_test_passed-no_tests_run.log') + result = self._json_for('test_is_test_passed-no_tests_run_with_header.log') self.assertEqual(0, len(result['sub_groups'])) + def test_nested_json(self): + result = self._json_for('test_is_test_passed-all_passed_nested.log') + self.assertEqual( + {'name': 'example_simple_test', 'status': 'PASS'}, + result["sub_groups"][0]["sub_groups"][0]["test_cases"][0]) + class StrContains(str): def __eq__(self, other): return self in other class KUnitMainTest(unittest.TestCase): def setUp(self): - path = get_absolute_path('test_data/test_is_test_passed-all_passed.log') - file = open(path) - all_passed_log = file.readlines() - self.print_patch = mock.patch('builtins.print') - self.print_mock = self.print_patch.start() - self.linux_source_mock = mock.Mock() - self.linux_source_mock.build_reconfig = mock.Mock(return_value=True) - self.linux_source_mock.build_um_kernel = mock.Mock(return_value=True) - self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log) - - def tearDown(self): - self.print_patch.stop() - pass + path = test_data_path('test_is_test_passed-all_passed.log') + with open(path) as file: + all_passed_log = file.readlines() + + self.print_mock = mock.patch('kunit_printer.Printer.print').start() + self.addCleanup(mock.patch.stopall) + + self.mock_linux_init = mock.patch.object(kunit_kernel, 'LinuxSourceTree').start() + self.linux_source_mock = self.mock_linux_init.return_value + self.linux_source_mock.build_reconfig.return_value = True + self.linux_source_mock.build_kernel.return_value = True + self.linux_source_mock.run_kernel.return_value = all_passed_log def test_config_passes_args_pass(self): - kunit.main(['config', '--build_dir=.kunit'], self.linux_source_mock) - assert self.linux_source_mock.build_reconfig.call_count == 1 - assert self.linux_source_mock.run_kernel.call_count == 0 + kunit.main(['config', '--build_dir=.kunit']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0) def test_build_passes_args_pass(self): - kunit.main(['build'], self.linux_source_mock) - assert self.linux_source_mock.build_reconfig.call_count == 0 - self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, '.kunit', None) - assert self.linux_source_mock.run_kernel.call_count == 0 + kunit.main(['build']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.linux_source_mock.build_kernel.assert_called_once_with(kunit.get_default_jobs(), '.kunit', None) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0) def test_exec_passes_args_pass(self): - kunit.main(['exec'], self.linux_source_mock) - assert self.linux_source_mock.build_reconfig.call_count == 0 - assert self.linux_source_mock.run_kernel.call_count == 1 - self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='.kunit', timeout=300) + kunit.main(['exec']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_run_passes_args_pass(self): - kunit.main(['run'], self.linux_source_mock) - assert self.linux_source_mock.build_reconfig.call_count == 1 - assert self.linux_source_mock.run_kernel.call_count == 1 + kunit.main(['run']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) self.linux_source_mock.run_kernel.assert_called_once_with( - build_dir='.kunit', timeout=300) + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_exec_passes_args_fail(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) with self.assertRaises(SystemExit) as e: - kunit.main(['exec'], self.linux_source_mock) - assert type(e.exception) == SystemExit - assert e.exception.code == 1 + kunit.main(['exec']) + self.assertEqual(e.exception.code, 1) def test_run_passes_args_fail(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) with self.assertRaises(SystemExit) as e: - kunit.main(['run'], self.linux_source_mock) - assert type(e.exception) == SystemExit - assert e.exception.code == 1 - assert self.linux_source_mock.build_reconfig.call_count == 1 - assert self.linux_source_mock.run_kernel.call_count == 1 - self.print_mock.assert_any_call(StrContains(' 0 tests run')) + kunit.main(['run']) + self.assertEqual(e.exception.code, 1) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + self.print_mock.assert_any_call(StrContains('Could not find any KTAP output.')) + + def test_exec_no_tests(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0']) + with self.assertRaises(SystemExit) as e: + kunit.main(['run']) + self.assertEqual(e.exception.code, 1) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300) + self.print_mock.assert_any_call(StrContains(' 0 tests run!')) def test_exec_raw_output(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) - kunit.main(['exec', '--raw_output'], self.linux_source_mock) - assert self.linux_source_mock.run_kernel.call_count == 1 - for kall in self.print_mock.call_args_list: - assert kall != mock.call(StrContains('Testing complete.')) - assert kall != mock.call(StrContains(' 0 tests run')) + kunit.main(['exec', '--raw_output']) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + for call in self.print_mock.call_args_list: + self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) + self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!'))) def test_run_raw_output(self): self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) - kunit.main(['run', '--raw_output'], self.linux_source_mock) - assert self.linux_source_mock.build_reconfig.call_count == 1 - assert self.linux_source_mock.run_kernel.call_count == 1 - for kall in self.print_mock.call_args_list: - assert kall != mock.call(StrContains('Testing complete.')) - assert kall != mock.call(StrContains(' 0 tests run')) + kunit.main(['run', '--raw_output']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + for call in self.print_mock.call_args_list: + self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) + self.assertNotEqual(call, mock.call(StrContains(' 0 tests run!'))) + + def test_run_raw_output_kunit(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + kunit.main(['run', '--raw_output=kunit']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1) + for call in self.print_mock.call_args_list: + self.assertNotEqual(call, mock.call(StrContains('Testing complete.'))) + self.assertNotEqual(call, mock.call(StrContains(' 0 tests run'))) + + def test_run_raw_output_invalid(self): + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + with self.assertRaises(SystemExit) as e: + kunit.main(['run', '--raw_output=invalid']) + self.assertNotEqual(e.exception.code, 0) + + def test_run_raw_output_does_not_take_positional_args(self): + # --raw_output is a string flag, but we don't want it to consume + # any positional arguments, only ones after an '=' + self.linux_source_mock.run_kernel = mock.Mock(return_value=[]) + kunit.main(['run', '--raw_output', 'filter_glob']) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='filter_glob', filter='', filter_action=None, timeout=300) def test_exec_timeout(self): timeout = 3453 - kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock) - self.linux_source_mock.run_kernel.assert_called_once_with(build_dir='.kunit', timeout=timeout) + kunit.main(['exec', '--timeout', str(timeout)]) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_run_timeout(self): timeout = 3453 - kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock) - assert self.linux_source_mock.build_reconfig.call_count == 1 + kunit.main(['run', '--timeout', str(timeout)]) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.linux_source_mock.run_kernel.assert_called_once_with( - build_dir='.kunit', timeout=timeout) + args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_run_builddir(self): build_dir = '.kunit' - kunit.main(['run', '--build_dir=.kunit'], self.linux_source_mock) - assert self.linux_source_mock.build_reconfig.call_count == 1 + kunit.main(['run', '--build_dir=.kunit']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) self.linux_source_mock.run_kernel.assert_called_once_with( - build_dir=build_dir, timeout=300) + args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) def test_config_builddir(self): build_dir = '.kunit' - kunit.main(['config', '--build_dir', build_dir], self.linux_source_mock) - assert self.linux_source_mock.build_reconfig.call_count == 1 + kunit.main(['config', '--build_dir', build_dir]) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) def test_build_builddir(self): build_dir = '.kunit' - kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock) - self.linux_source_mock.build_um_kernel.assert_called_once_with(False, 8, build_dir, None) + jobs = kunit.get_default_jobs() + kunit.main(['build', '--build_dir', build_dir]) + self.linux_source_mock.build_kernel.assert_called_once_with(jobs, build_dir, None) def test_exec_builddir(self): build_dir = '.kunit' - kunit.main(['exec', '--build_dir', build_dir], self.linux_source_mock) - self.linux_source_mock.run_kernel.assert_called_once_with(build_dir=build_dir, timeout=300) + kunit.main(['exec', '--build_dir', build_dir]) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300) + self.print_mock.assert_any_call(StrContains('Testing complete.')) + + def test_run_kunitconfig(self): + kunit.main(['run', '--kunitconfig=mykunitconfig']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=['mykunitconfig'], + kconfig_add=None, + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + def test_config_kunitconfig(self): + kunit.main(['config', '--kunitconfig=mykunitconfig']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=['mykunitconfig'], + kconfig_add=None, + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + def test_config_alltests(self): + kunit.main(['config', '--kunitconfig=mykunitconfig', '--alltests']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=[kunit_kernel.ALL_TESTS_CONFIG_PATH, 'mykunitconfig'], + kconfig_add=None, + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + + @mock.patch.object(kunit_kernel, 'LinuxSourceTree') + def test_run_multiple_kunitconfig(self, mock_linux_init): + mock_linux_init.return_value = self.linux_source_mock + kunit.main(['run', '--kunitconfig=mykunitconfig', '--kunitconfig=other']) + # Just verify that we parsed and initialized it correctly here. + mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=['mykunitconfig', 'other'], + kconfig_add=None, + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + def test_run_kconfig_add(self): + kunit.main(['run', '--kconfig_add=CONFIG_KASAN=y', '--kconfig_add=CONFIG_KCSAN=y']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=[], + kconfig_add=['CONFIG_KASAN=y', 'CONFIG_KCSAN=y'], + arch='um', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=[]) + + def test_run_qemu_args(self): + kunit.main(['run', '--arch=x86_64', '--qemu_args', '-m 2048']) + # Just verify that we parsed and initialized it correctly here. + self.mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_paths=[], + kconfig_add=None, + arch='x86_64', + cross_compile=None, + qemu_config_path=None, + extra_qemu_args=['-m', '2048']) + + def test_run_kernel_args(self): + kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2']) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.linux_source_mock.run_kernel.assert_called_once_with( + args=['a=1','b=2'], build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300) self.print_mock.assert_any_call(StrContains('Testing complete.')) + def test_list_tests(self): + want = ['suite.test1', 'suite.test2', 'suite2.test1'] + self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want + + got = kunit._list_tests(self.linux_source_mock, + kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False)) + self.assertEqual(got, want) + # Should respect the user's filter glob when listing tests. + self.linux_source_mock.run_kernel.assert_called_once_with( + args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', filter='', filter_action=None, timeout=300) + + @mock.patch.object(kunit, '_list_tests') + def test_run_isolated_by_suite(self, mock_tests): + mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1'] + kunit.main(['exec', '--run_isolated=suite', 'suite*.test*']) + + # Should respect the user's filter glob when listing tests. + mock_tests.assert_called_once_with(mock.ANY, + kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False)) + self.linux_source_mock.run_kernel.assert_has_calls([ + mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300), + mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300), + ]) + + @mock.patch.object(kunit, '_list_tests') + def test_run_isolated_by_test(self, mock_tests): + mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1'] + kunit.main(['exec', '--run_isolated=test', 'suite*']) + + # Should respect the user's filter glob when listing tests. + mock_tests.assert_called_once_with(mock.ANY, + kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'test', False, False)) + self.linux_source_mock.run_kernel.assert_has_calls([ + mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300), + mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300), + mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', filter='', filter_action=None, timeout=300), + ]) + if __name__ == '__main__': unittest.main() diff --git a/tools/testing/kunit/mypy.ini b/tools/testing/kunit/mypy.ini new file mode 100644 index 000000000000..ddd288309efa --- /dev/null +++ b/tools/testing/kunit/mypy.ini @@ -0,0 +1,6 @@ +[mypy] +strict = True + +# E.g. we can't write subprocess.Popen[str] until Python 3.9+. +# But kunit.py tries to support Python 3.7+, so let's disable it. +disable_error_code = type-arg diff --git a/tools/testing/kunit/qemu_config.py b/tools/testing/kunit/qemu_config.py new file mode 100644 index 000000000000..b1fba9016eed --- /dev/null +++ b/tools/testing/kunit/qemu_config.py @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Collection of configs for building non-UML kernels and running them on QEMU. +# +# Copyright (C) 2021, Google LLC. +# Author: Brendan Higgins <brendanhiggins@google.com> + +from dataclasses import dataclass +from typing import List + + +@dataclass(frozen=True) +class QemuArchParams: + linux_arch: str + kconfig: str + qemu_arch: str + kernel_path: str + kernel_command_line: str + extra_qemu_params: List[str] + serial: str = 'stdio' diff --git a/tools/testing/kunit/qemu_configs/alpha.py b/tools/testing/kunit/qemu_configs/alpha.py new file mode 100644 index 000000000000..3ac846e03a6b --- /dev/null +++ b/tools/testing/kunit/qemu_configs/alpha.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='alpha', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='alpha', + kernel_path='arch/alpha/boot/vmlinux', + kernel_command_line='console=ttyS0', + extra_qemu_params=[]) diff --git a/tools/testing/kunit/qemu_configs/arm.py b/tools/testing/kunit/qemu_configs/arm.py new file mode 100644 index 000000000000..db2160200566 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/arm.py @@ -0,0 +1,13 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='arm', + kconfig=''' +CONFIG_ARCH_VIRT=y +CONFIG_SERIAL_AMBA_PL010=y +CONFIG_SERIAL_AMBA_PL010_CONSOLE=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''', + qemu_arch='arm', + kernel_path='arch/arm/boot/zImage', + kernel_command_line='console=ttyAMA0', + extra_qemu_params=['-machine', 'virt']) diff --git a/tools/testing/kunit/qemu_configs/arm64.py b/tools/testing/kunit/qemu_configs/arm64.py new file mode 100644 index 000000000000..d3ff27024755 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/arm64.py @@ -0,0 +1,12 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='arm64', + kconfig=''' +CONFIG_SERIAL_AMBA_PL010=y +CONFIG_SERIAL_AMBA_PL010_CONSOLE=y +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''', + qemu_arch='aarch64', + kernel_path='arch/arm64/boot/Image.gz', + kernel_command_line='console=ttyAMA0', + extra_qemu_params=['-machine', 'virt', '-cpu', 'max,pauth-impdef=on']) diff --git a/tools/testing/kunit/qemu_configs/i386.py b/tools/testing/kunit/qemu_configs/i386.py new file mode 100644 index 000000000000..4463ebefd567 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/i386.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='i386', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='i386', + kernel_path='arch/x86/boot/bzImage', + kernel_command_line='console=ttyS0', + extra_qemu_params=[]) diff --git a/tools/testing/kunit/qemu_configs/m68k.py b/tools/testing/kunit/qemu_configs/m68k.py new file mode 100644 index 000000000000..287fc386f8a7 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/m68k.py @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='m68k', + kconfig=''' +CONFIG_VIRT=y''', + qemu_arch='m68k', + kernel_path='vmlinux', + kernel_command_line='console=hvc0', + extra_qemu_params=['-machine', 'virt']) diff --git a/tools/testing/kunit/qemu_configs/powerpc.py b/tools/testing/kunit/qemu_configs/powerpc.py new file mode 100644 index 000000000000..7ec38d4131f7 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/powerpc.py @@ -0,0 +1,12 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='powerpc', + kconfig=''' +CONFIG_PPC64=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HVC_CONSOLE=y''', + qemu_arch='ppc64', + kernel_path='vmlinux', + kernel_command_line='console=ttyS0', + extra_qemu_params=['-M', 'pseries', '-cpu', 'power8']) diff --git a/tools/testing/kunit/qemu_configs/riscv.py b/tools/testing/kunit/qemu_configs/riscv.py new file mode 100644 index 000000000000..12a1d525978a --- /dev/null +++ b/tools/testing/kunit/qemu_configs/riscv.py @@ -0,0 +1,28 @@ +from ..qemu_config import QemuArchParams +import os +import os.path +import sys + +OPENSBI_FILE = 'opensbi-riscv64-generic-fw_dynamic.bin' +OPENSBI_PATH = '/usr/share/qemu/' + OPENSBI_FILE + +if not os.path.isfile(OPENSBI_PATH): + print('\n\nOpenSBI bios was not found in "' + OPENSBI_PATH + '".\n' + 'Please ensure that qemu-system-riscv is installed, or edit the path in "qemu_configs/riscv.py"\n') + sys.exit() + +QEMU_ARCH = QemuArchParams(linux_arch='riscv', + kconfig=''' +CONFIG_SOC_VIRT=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_RISCV_SBI_V01=y +CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''', + qemu_arch='riscv64', + kernel_path='arch/riscv/boot/Image', + kernel_command_line='console=ttyS0', + extra_qemu_params=[ + '-machine', 'virt', + '-cpu', 'rv64', + '-bios', OPENSBI_PATH]) diff --git a/tools/testing/kunit/qemu_configs/s390.py b/tools/testing/kunit/qemu_configs/s390.py new file mode 100644 index 000000000000..98fa4fb60c0a --- /dev/null +++ b/tools/testing/kunit/qemu_configs/s390.py @@ -0,0 +1,14 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='s390', + kconfig=''' +CONFIG_EXPERT=y +CONFIG_TUNE_ZEC12=y +CONFIG_NUMA=y +CONFIG_MODULES=y''', + qemu_arch='s390x', + kernel_path='arch/s390/boot/bzImage', + kernel_command_line='console=ttyS0', + extra_qemu_params=[ + '-machine', 's390-ccw-virtio', + '-cpu', 'qemu',]) diff --git a/tools/testing/kunit/qemu_configs/sh.py b/tools/testing/kunit/qemu_configs/sh.py new file mode 100644 index 000000000000..78a474a5b95f --- /dev/null +++ b/tools/testing/kunit/qemu_configs/sh.py @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-only +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='sh', + kconfig=''' +CONFIG_CPU_SUBTYPE_SH7751R=y +CONFIG_MEMORY_START=0x0c000000 +CONFIG_SH_RTS7751R2D=y +CONFIG_RTS7751R2D_PLUS=y +CONFIG_SERIAL_SH_SCI=y''', + qemu_arch='sh4', + kernel_path='arch/sh/boot/zImage', + kernel_command_line='console=ttySC1', + serial='null', + extra_qemu_params=[ + '-machine', 'r2d', + '-serial', 'mon:stdio']) diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py new file mode 100644 index 000000000000..e975c4331a7c --- /dev/null +++ b/tools/testing/kunit/qemu_configs/sparc.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='sparc', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='sparc', + kernel_path='arch/sparc/boot/zImage', + kernel_command_line='console=ttyS0 mem=256M', + extra_qemu_params=['-m', '256']) diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py new file mode 100644 index 000000000000..dc7949076863 --- /dev/null +++ b/tools/testing/kunit/qemu_configs/x86_64.py @@ -0,0 +1,10 @@ +from ..qemu_config import QemuArchParams + +QEMU_ARCH = QemuArchParams(linux_arch='x86_64', + kconfig=''' +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y''', + qemu_arch='x86_64', + kernel_path='arch/x86/boot/bzImage', + kernel_command_line='console=ttyS0', + extra_qemu_params=[]) diff --git a/tools/testing/kunit/run_checks.py b/tools/testing/kunit/run_checks.py new file mode 100755 index 000000000000..c6d494ea3373 --- /dev/null +++ b/tools/testing/kunit/run_checks.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# This file runs some basic checks to verify kunit works. +# It is only of interest if you're making changes to KUnit itself. +# +# Copyright (C) 2021, Google LLC. +# Author: Daniel Latypov <dlatypov@google.com.com> + +from concurrent import futures +import datetime +import os +import shutil +import subprocess +import sys +import textwrap +from typing import Dict, List, Sequence + +ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__)) +TIMEOUT = datetime.timedelta(minutes=5).total_seconds() + +commands: Dict[str, Sequence[str]] = { + 'kunit_tool_test.py': ['./kunit_tool_test.py'], + 'kunit smoke test': ['./kunit.py', 'run', '--kunitconfig=lib/kunit', '--build_dir=kunit_run_checks'], + 'pytype': ['/bin/sh', '-c', 'pytype *.py'], + 'mypy': ['mypy', '--config-file', 'mypy.ini', '--exclude', '_test.py$', '--exclude', 'qemu_configs/', '.'], +} + +# The user might not have mypy or pytype installed, skip them if so. +# Note: you can install both via `$ pip install mypy pytype` +necessary_deps : Dict[str, str] = { + 'pytype': 'pytype', + 'mypy': 'mypy', +} + +def main(argv: Sequence[str]) -> None: + if argv: + raise RuntimeError('This script takes no arguments') + + future_to_name: Dict[futures.Future[None], str] = {} + executor = futures.ThreadPoolExecutor(max_workers=len(commands)) + for name, argv in commands.items(): + if name in necessary_deps and shutil.which(necessary_deps[name]) is None: + print(f'{name}: SKIPPED, {necessary_deps[name]} not in $PATH') + continue + f = executor.submit(run_cmd, argv) + future_to_name[f] = name + + has_failures = False + print(f'Waiting on {len(future_to_name)} checks ({", ".join(future_to_name.values())})...') + for f in futures.as_completed(future_to_name.keys()): + name = future_to_name[f] + ex = f.exception() + if not ex: + print(f'{name}: PASSED') + continue + + has_failures = True + if isinstance(ex, subprocess.TimeoutExpired): + print(f'{name}: TIMED OUT') + elif isinstance(ex, subprocess.CalledProcessError): + print(f'{name}: FAILED') + else: + print(f'{name}: unexpected exception: {ex}') + continue + + output = ex.output + if output: + print(textwrap.indent(output.decode(), '> ')) + executor.shutdown() + + if has_failures: + sys.exit(1) + + +def run_cmd(argv: Sequence[str]) -> None: + subprocess.check_output(argv, stderr=subprocess.STDOUT, cwd=ABS_TOOL_PATH, timeout=TIMEOUT) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/tools/testing/kunit/test_data/test_config_printk_time.log b/tools/testing/kunit/test_data/test_config_printk_time.log index c02ca773946d..6bdb57f76eac 100644 --- a/tools/testing/kunit/test_data/test_config_printk_time.log +++ b/tools/testing/kunit/test_data/test_config_printk_time.log @@ -1,6 +1,7 @@ [ 0.060000] printk: console [mc-1] enabled [ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000] TAP version 14 +[ 0.060000] 1..3 [ 0.060000] # Subtest: kunit-resource-test [ 0.060000] 1..5 [ 0.060000] ok 1 - kunit_resource_test_init_resources @@ -28,4 +29,4 @@ [ 0.060000] Stack: [ 0.060000] 602086f8 601bc260 705c0000 705c0000 [ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab -[ 0.060000] 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_interrupted_tap_output.log b/tools/testing/kunit/test_data/test_interrupted_tap_output.log index 5c73fb3a1c6f..1fb677728abe 100644 --- a/tools/testing/kunit/test_data/test_interrupted_tap_output.log +++ b/tools/testing/kunit/test_data/test_interrupted_tap_output.log @@ -1,6 +1,7 @@ [ 0.060000] printk: console [mc-1] enabled [ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000] TAP version 14 +[ 0.060000] 1..3 [ 0.060000] # Subtest: kunit-resource-test [ 0.060000] 1..5 [ 0.060000] ok 1 - kunit_resource_test_init_resources @@ -34,4 +35,4 @@ [ 0.060000] Stack: [ 0.060000] 602086f8 601bc260 705c0000 705c0000 [ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab -[ 0.060000] 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log new file mode 100644 index 000000000000..9d5b04fe43a6 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-all_passed_nested.log @@ -0,0 +1,34 @@ +TAP version 14 +1..2 + # Subtest: sysctl_test + 1..4 + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed + ok 1 - sysctl_test_dointvec_null_tbl_data + # Subtest: example + 1..2 + init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 - example_simple_test + # example_mock_test: initializing + # example_mock_test: example_mock_test passed + ok 2 - example_mock_test + kunit example: all tests passed + ok 2 - example + # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed + ok 3 - sysctl_test_dointvec_table_len_is_zero + # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed + ok 4 - sysctl_test_dointvec_table_read_but_position_set +kunit sysctl_test: all tests passed +ok 1 - sysctl_test + # Subtest: example + 1..2 +init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 - example_simple_test + # example_mock_test: initializing + # example_mock_test: example_mock_test passed + ok 2 - example_mock_test +kunit example: all tests passed +ok 2 - example diff --git a/tools/testing/kunit/test_data/test_is_test_passed-crash.log b/tools/testing/kunit/test_data/test_is_test_passed-crash.log deleted file mode 100644 index 4d97f6708c4a..000000000000 --- a/tools/testing/kunit/test_data/test_is_test_passed-crash.log +++ /dev/null @@ -1,70 +0,0 @@ -printk: console [tty0] enabled -printk: console [mc-1] enabled -TAP version 14 -1..2 - # Subtest: sysctl_test - 1..8 - # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed - ok 1 - sysctl_test_dointvec_null_tbl_data - # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed - ok 2 - sysctl_test_dointvec_table_maxlen_unset - # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed - ok 3 - sysctl_test_dointvec_table_len_is_zero - # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed - ok 4 - sysctl_test_dointvec_table_read_but_position_set - # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed - ok 5 - sysctl_test_dointvec_happy_single_positive - # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed - ok 6 - sysctl_test_dointvec_happy_single_negative - # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed - ok 7 - sysctl_test_dointvec_single_less_int_min - # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed - ok 8 - sysctl_test_dointvec_single_greater_int_max -kunit sysctl_test: all tests passed -ok 1 - sysctl_test - # Subtest: example - 1..2 -init_suite - # example_simple_test: initializing -Stack: - 6016f7db 6f81bd30 6f81bdd0 60021450 - 6024b0e8 60021440 60018bbe 16f81bdc0 - 00000001 6f81bd30 6f81bd20 6f81bdd0 -Call Trace: - [<6016f7db>] ? kunit_try_run_case+0xab/0xf0 - [<60021450>] ? set_signals+0x0/0x60 - [<60021440>] ? get_signals+0x0/0x10 - [<60018bbe>] ? kunit_um_run_try_catch+0x5e/0xc0 - [<60021450>] ? set_signals+0x0/0x60 - [<60021440>] ? get_signals+0x0/0x10 - [<60018bb3>] ? kunit_um_run_try_catch+0x53/0xc0 - [<6016f321>] ? kunit_run_case_catch_errors+0x121/0x1a0 - [<60018b60>] ? kunit_um_run_try_catch+0x0/0xc0 - [<600189e0>] ? kunit_um_throw+0x0/0x180 - [<6016f730>] ? kunit_try_run_case+0x0/0xf0 - [<6016f600>] ? kunit_catch_run_case+0x0/0x130 - [<6016edd0>] ? kunit_vprintk+0x0/0x30 - [<6016ece0>] ? kunit_fail+0x0/0x40 - [<6016eca0>] ? kunit_abort+0x0/0x40 - [<6016ed20>] ? kunit_printk_emit+0x0/0xb0 - [<6016f200>] ? kunit_run_case_catch_errors+0x0/0x1a0 - [<6016f46e>] ? kunit_run_tests+0xce/0x260 - [<6005b390>] ? unregister_console+0x0/0x190 - [<60175b70>] ? suite_kunit_initexample_test_suite+0x0/0x20 - [<60001cbb>] ? do_one_initcall+0x0/0x197 - [<60001d47>] ? do_one_initcall+0x8c/0x197 - [<6005cd20>] ? irq_to_desc+0x0/0x30 - [<60002005>] ? kernel_init_freeable+0x1b3/0x272 - [<6005c5ec>] ? printk+0x0/0x9b - [<601c0086>] ? kernel_init+0x26/0x160 - [<60014442>] ? new_thread_handler+0x82/0xc0 - - # example_simple_test: kunit test case crashed! - # example_simple_test: example_simple_test failed - not ok 1 - example_simple_test - # example_mock_test: initializing - # example_mock_test: example_mock_test passed - ok 2 - example_mock_test -kunit example: one or more tests failed -not ok 2 - example -List of all partitions: diff --git a/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log new file mode 100644 index 000000000000..65d3f27feaf2 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-kselftest.log @@ -0,0 +1,14 @@ +TAP version 13 +1..2 +# selftests: membarrier: membarrier_test_single_thread +# TAP version 13 +# 1..2 +# ok 1 sys_membarrier available +# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected +ok 1 selftests: membarrier: membarrier_test_single_thread +# selftests: membarrier: membarrier_test_multi_thread +# TAP version 13 +# 1..2 +# ok 1 sys_membarrier available +# ok 2 sys membarrier invalid command test: command = -1, flags = 0, errno = 22. Failed as expected +ok 2 selftests: membarrier: membarrier_test_multi_thread diff --git a/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log new file mode 100644 index 000000000000..5cd17b7f818a --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-missing_plan.log @@ -0,0 +1,31 @@ +KTAP version 1 + # Subtest: sysctl_test + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed + ok 1 - sysctl_test_dointvec_null_tbl_data + # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed + ok 2 - sysctl_test_dointvec_table_maxlen_unset + # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed + ok 3 - sysctl_test_dointvec_table_len_is_zero + # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed + ok 4 - sysctl_test_dointvec_table_read_but_position_set + # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed + ok 5 - sysctl_test_dointvec_happy_single_positive + # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed + ok 6 - sysctl_test_dointvec_happy_single_negative + # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed + ok 7 - sysctl_test_dointvec_single_less_int_min + # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed + ok 8 - sysctl_test_dointvec_single_greater_int_max +kunit sysctl_test: all tests passed +ok 1 - sysctl_test + # Subtest: example + 1..2 +init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 - example_simple_test + # example_mock_test: initializing + # example_mock_test: example_mock_test passed + ok 2 - example_mock_test +kunit example: all tests passed +ok 2 - example diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log new file mode 100644 index 000000000000..4f81876ee6f1 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log @@ -0,0 +1,7 @@ +TAP version 14 +1..1 + # Subtest: suite + 1..1 + # Subtest: case + ok 1 - case +ok 1 - suite diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log index ba69f5c94b75..ba69f5c94b75 100644 --- a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run.log +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_no_header.log diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log new file mode 100644 index 000000000000..5f48ee659d40 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_run_with_header.log @@ -0,0 +1,2 @@ +TAP version 14 +1..0 diff --git a/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log b/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log index c045eee75f27..a014ffe9725e 100644 --- a/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log +++ b/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log @@ -1,6 +1,7 @@ [ 0.060000] printk: console [mc-1] enabled [ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000] TAP version 14 +[ 0.060000] 1..3 [ 0.060000] # Subtest: kunit-resource-test [ 0.060000] 1..5 [ 0.060000] ok 1 - kunit_resource_test_init_resources @@ -22,4 +23,4 @@ [ 0.060000] Stack: [ 0.060000] 602086f8 601bc260 705c0000 705c0000 [ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab -[ 0.060000] 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_multiple_prefixes.log b/tools/testing/kunit/test_data/test_multiple_prefixes.log index bc48407dcc36..0ad78481a0b4 100644 --- a/tools/testing/kunit/test_data/test_multiple_prefixes.log +++ b/tools/testing/kunit/test_data/test_multiple_prefixes.log @@ -1,6 +1,7 @@ [ 0.060000][ T1] printk: console [mc-1] enabled [ 0.060000][ T1] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000][ T1] TAP version 14 +[ 0.060000][ T1] 1..3 [ 0.060000][ T1] # Subtest: kunit-resource-test [ 0.060000][ T1] 1..5 [ 0.060000][ T1] ok 1 - kunit_resource_test_init_resources @@ -28,4 +29,4 @@ [ 0.060000][ T1] Stack: [ 0.060000][ T1] 602086f8 601bc260 705c0000 705c0000 [ 0.060000][ T1] 602086f8 6005fcec 705c0000 6002c6ab -[ 0.060000][ T1] 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file +[ 0.060000][ T1] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_parse_attributes.log b/tools/testing/kunit/test_data/test_parse_attributes.log new file mode 100644 index 000000000000..1a13c371fe9d --- /dev/null +++ b/tools/testing/kunit/test_data/test_parse_attributes.log @@ -0,0 +1,9 @@ +KTAP version 1 +1..1 + KTAP version 1 + # Subtest: suite + # module: example + 1..1 + # test.speed: slow + ok 1 test +ok 1 suite
\ No newline at end of file diff --git a/tools/testing/kunit/test_data/test_parse_ktap_output.log b/tools/testing/kunit/test_data/test_parse_ktap_output.log new file mode 100644 index 000000000000..ccdf244e5303 --- /dev/null +++ b/tools/testing/kunit/test_data/test_parse_ktap_output.log @@ -0,0 +1,8 @@ +KTAP version 1 +1..1 + KTAP version 1 + 1..3 + ok 1 case_1 + ok 2 case_2 + ok 3 case_3 +ok 1 suite diff --git a/tools/testing/kunit/test_data/test_parse_subtest_header.log b/tools/testing/kunit/test_data/test_parse_subtest_header.log new file mode 100644 index 000000000000..216631092e7b --- /dev/null +++ b/tools/testing/kunit/test_data/test_parse_subtest_header.log @@ -0,0 +1,7 @@ +KTAP version 1 +1..1 + KTAP version 1 + # Subtest: suite + 1..1 + ok 1 test +ok 1 suite
\ No newline at end of file diff --git a/tools/testing/kunit/test_data/test_pound_no_prefix.log b/tools/testing/kunit/test_data/test_pound_no_prefix.log index 2ceb360be7d5..dc4cf09a96d0 100644 --- a/tools/testing/kunit/test_data/test_pound_no_prefix.log +++ b/tools/testing/kunit/test_data/test_pound_no_prefix.log @@ -1,6 +1,7 @@ printk: console [mc-1] enabled random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 TAP version 14 + 1..3 # Subtest: kunit-resource-test 1..5 ok 1 - kunit_resource_test_init_resources @@ -30,4 +31,4 @@ Stack: 602086f8 601bc260 705c0000 705c0000 602086f8 6005fcec 705c0000 6002c6ab - 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file + 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_pound_sign.log b/tools/testing/kunit/test_data/test_pound_sign.log index 28ffa5ba03bf..3f358e3a7ba0 100644 --- a/tools/testing/kunit/test_data/test_pound_sign.log +++ b/tools/testing/kunit/test_data/test_pound_sign.log @@ -1,6 +1,7 @@ [ 0.060000] printk: console [mc-1] enabled [ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000] TAP version 14 +[ 0.060000] 1..3 [ 0.060000] # Subtest: kunit-resource-test [ 0.060000] 1..5 [ 0.060000] ok 1 - kunit_resource_test_init_resources diff --git a/tools/testing/kunit/test_data/test_skip_all_tests.log b/tools/testing/kunit/test_data/test_skip_all_tests.log new file mode 100644 index 000000000000..2ea6e6d14fff --- /dev/null +++ b/tools/testing/kunit/test_data/test_skip_all_tests.log @@ -0,0 +1,15 @@ +TAP version 14 +1..2 + # Subtest: string-stream-test + 1..3 + ok 1 - string_stream_test_empty_on_creation # SKIP all tests skipped + ok 2 - string_stream_test_not_empty_after_add # SKIP all tests skipped + ok 3 - string_stream_test_get_string # SKIP all tests skipped +ok 1 - string-stream-test # SKIP + # Subtest: example + 1..2 + # example_simple_test: initializing + ok 1 - example_simple_test # SKIP all tests skipped + # example_skip_test: initializing + ok 2 - example_skip_test # SKIP this test should be skipped +ok 2 - example # SKIP diff --git a/tools/testing/kunit/test_data/test_skip_tests.log b/tools/testing/kunit/test_data/test_skip_tests.log new file mode 100644 index 000000000000..79b326e31274 --- /dev/null +++ b/tools/testing/kunit/test_data/test_skip_tests.log @@ -0,0 +1,15 @@ +TAP version 14 +1..2 + # Subtest: string-stream-test + 1..3 + ok 1 - string_stream_test_empty_on_creation + ok 2 - string_stream_test_not_empty_after_add + ok 3 - string_stream_test_get_string +ok 1 - string-stream-test + # Subtest: example + 1..2 + # example_simple_test: initializing + ok 1 - example_simple_test + # example_skip_test: initializing + ok 2 - example_skip_test # SKIP this test should be skipped +ok 2 - example diff --git a/tools/testing/kunit/test_data/test_strip_hyphen.log b/tools/testing/kunit/test_data/test_strip_hyphen.log new file mode 100644 index 000000000000..92ac7c24b374 --- /dev/null +++ b/tools/testing/kunit/test_data/test_strip_hyphen.log @@ -0,0 +1,16 @@ +KTAP version 1 +1..2 + # Subtest: sysctl_test + 1..1 + # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed + ok 1 - sysctl_test_dointvec_null_tbl_data +kunit sysctl_test: all tests passed +ok 1 - sysctl_test + # Subtest: example + 1..1 +init_suite + # example_simple_test: initializing + # example_simple_test: example_simple_test passed + ok 1 example_simple_test +kunit example: all tests passed +ok 2 example |