aboutsummaryrefslogtreecommitdiffstats
path: root/tools/testing/kunit
diff options
context:
space:
mode:
Diffstat (limited to 'tools/testing/kunit')
-rw-r--r--tools/testing/kunit/configs/all_tests.config35
-rw-r--r--tools/testing/kunit/configs/arch_uml.config5
-rw-r--r--tools/testing/kunit/configs/broken_on_uml.config44
-rw-r--r--tools/testing/kunit/configs/coverage_uml.config11
-rwxr-xr-xtools/testing/kunit/kunit.py324
-rw-r--r--tools/testing/kunit/kunit_config.py124
-rw-r--r--tools/testing/kunit/kunit_json.py64
-rw-r--r--tools/testing/kunit/kunit_kernel.py260
-rw-r--r--tools/testing/kunit/kunit_parser.py244
-rw-r--r--tools/testing/kunit/kunit_printer.py48
-rwxr-xr-xtools/testing/kunit/kunit_tool_test.py407
-rw-r--r--tools/testing/kunit/qemu_config.py17
-rw-r--r--tools/testing/kunit/qemu_configs/alpha.py2
-rw-r--r--tools/testing/kunit/qemu_configs/arm.py2
-rw-r--r--tools/testing/kunit/qemu_configs/arm64.py2
-rw-r--r--tools/testing/kunit/qemu_configs/i386.py4
-rw-r--r--tools/testing/kunit/qemu_configs/powerpc.py2
-rw-r--r--tools/testing/kunit/qemu_configs/riscv.py23
-rw-r--r--tools/testing/kunit/qemu_configs/s390.py4
-rw-r--r--tools/testing/kunit/qemu_configs/sparc.py2
-rw-r--r--tools/testing/kunit/qemu_configs/x86_64.py2
-rwxr-xr-xtools/testing/kunit/run_checks.py81
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-crash.log70
-rw-r--r--tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log7
24 files changed, 1013 insertions, 771 deletions
diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
new file mode 100644
index 000000000000..f990cbb73250
--- /dev/null
+++ b/tools/testing/kunit/configs/all_tests.config
@@ -0,0 +1,35 @@
+# This config enables as many tests as possible under UML.
+# It is intended for use in continuous integration systems and similar for
+# automated testing of as much as possible.
+# The config is manually maintained, though it uses KUNIT_ALL_TESTS=y to enable
+# any tests whose dependencies are already satisfied. Please feel free to add
+# more options if they any new tests.
+
+CONFIG_KUNIT=y
+CONFIG_KUNIT_EXAMPLE_TEST=y
+CONFIG_KUNIT_ALL_TESTS=y
+
+CONFIG_IIO=y
+
+CONFIG_EXT4_FS=y
+
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+
+CONFIG_PCI=y
+CONFIG_USB4=y
+
+CONFIG_NET=y
+CONFIG_MCTP=y
+
+CONFIG_INET=y
+CONFIG_MPTCP=y
+
+CONFIG_DAMON=y
+CONFIG_DAMON_VADDR=y
+CONFIG_DAMON_PADDR=y
+CONFIG_DEBUG_FS=y
+CONFIG_DAMON_DBGFS=y
+
+CONFIG_SECURITY=y
+CONFIG_SECURITY_APPARMOR=y
diff --git a/tools/testing/kunit/configs/arch_uml.config b/tools/testing/kunit/configs/arch_uml.config
new file mode 100644
index 000000000000..e824ce43b05a
--- /dev/null
+++ b/tools/testing/kunit/configs/arch_uml.config
@@ -0,0 +1,5 @@
+# Config options which are added to UML builds by default
+
+# Enable virtio/pci, as a lot of tests require it.
+CONFIG_VIRTIO_UML=y
+CONFIG_UML_PCI_OVER_VIRTIO=y
diff --git a/tools/testing/kunit/configs/broken_on_uml.config b/tools/testing/kunit/configs/broken_on_uml.config
deleted file mode 100644
index 690870043ac0..000000000000
--- a/tools/testing/kunit/configs/broken_on_uml.config
+++ /dev/null
@@ -1,44 +0,0 @@
-# These are currently broken on UML and prevent allyesconfig from building
-# CONFIG_STATIC_LINK is not set
-# CONFIG_UML_NET_VECTOR is not set
-# CONFIG_UML_NET_VDE is not set
-# CONFIG_UML_NET_PCAP is not set
-# CONFIG_NET_PTP_CLASSIFY is not set
-# CONFIG_IP_VS is not set
-# CONFIG_BRIDGE_EBT_BROUTE is not set
-# CONFIG_BRIDGE_EBT_T_FILTER is not set
-# CONFIG_BRIDGE_EBT_T_NAT is not set
-# CONFIG_MTD_NAND_CADENCE is not set
-# CONFIG_MTD_NAND_NANDSIM is not set
-# CONFIG_BLK_DEV_NULL_BLK is not set
-# CONFIG_BLK_DEV_RAM is not set
-# CONFIG_SCSI_DEBUG is not set
-# CONFIG_NET_VENDOR_XILINX is not set
-# CONFIG_NULL_TTY is not set
-# CONFIG_PTP_1588_CLOCK is not set
-# CONFIG_PINCTRL_EQUILIBRIUM is not set
-# CONFIG_DMABUF_SELFTESTS is not set
-# CONFIG_COMEDI is not set
-# CONFIG_XIL_AXIS_FIFO is not set
-# CONFIG_EXFAT_FS is not set
-# CONFIG_STM_DUMMY is not set
-# CONFIG_FSI_MASTER_ASPEED is not set
-# CONFIG_JFS_FS is not set
-# CONFIG_UBIFS_FS is not set
-# CONFIG_CRAMFS is not set
-# CONFIG_CRYPTO_DEV_SAFEXCEL is not set
-# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set
-# CONFIG_KCOV is not set
-# CONFIG_LKDTM is not set
-# CONFIG_REED_SOLOMON_TEST is not set
-# CONFIG_TEST_RHASHTABLE is not set
-# CONFIG_TEST_MEMINIT is not set
-# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
-# CONFIG_DEBUG_INFO_BTF is not set
-# CONFIG_PTP_1588_CLOCK_INES is not set
-# CONFIG_QCOM_CPR is not set
-# CONFIG_RESET_BRCMSTB_RESCAL is not set
-# CONFIG_RESET_INTEL_GW is not set
-# CONFIG_ADI_AXI_ADC is not set
-# CONFIG_DEBUG_PAGEALLOC is not set
-# CONFIG_PAGE_POISONING is not set
diff --git a/tools/testing/kunit/configs/coverage_uml.config b/tools/testing/kunit/configs/coverage_uml.config
new file mode 100644
index 000000000000..bacb77664fa8
--- /dev/null
+++ b/tools/testing/kunit/configs/coverage_uml.config
@@ -0,0 +1,11 @@
+# This config fragment enables coverage on UML, which is different from the
+# normal gcov used in other arches (no debugfs).
+# Example usage:
+# ./tools/testing/kunit/kunit.py run \
+# --kunitconfig=tools/testing/kunit/configs/all_tests_uml.config \
+# --kunitconfig=tools/testing/kunit/configs/coverage_uml.config
+
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
+CONFIG_GCOV=y
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index 68e6f461c758..4d4663fb578b 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -10,36 +10,20 @@
import argparse
import os
import re
+import shlex
import sys
import time
assert sys.version_info >= (3, 7), "Python version is too old"
-from collections import namedtuple
+from dataclasses import dataclass
from enum import Enum, auto
-from typing import Iterable, Sequence, List
+from typing import Iterable, List, Optional, Sequence, Tuple
import kunit_json
import kunit_kernel
import kunit_parser
-
-KunitResult = namedtuple('KunitResult', ['status','result','elapsed_time'])
-
-KunitConfigRequest = namedtuple('KunitConfigRequest',
- ['build_dir', 'make_options'])
-KunitBuildRequest = namedtuple('KunitBuildRequest',
- ['jobs', 'build_dir', 'alltests',
- 'make_options'])
-KunitExecRequest = namedtuple('KunitExecRequest',
- ['timeout', 'build_dir', 'alltests',
- 'filter_glob', 'kernel_args', 'run_isolated'])
-KunitParseRequest = namedtuple('KunitParseRequest',
- ['raw_output', 'build_dir', 'json'])
-KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs',
- 'build_dir', 'alltests', 'filter_glob',
- 'kernel_args', 'run_isolated', 'json', 'make_options'])
-
-KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0]
+from kunit_printer import stdout
class KunitStatus(Enum):
SUCCESS = auto()
@@ -47,6 +31,38 @@ class KunitStatus(Enum):
BUILD_FAILURE = auto()
TEST_FAILURE = auto()
+@dataclass
+class KunitResult:
+ status: KunitStatus
+ elapsed_time: float
+
+@dataclass
+class KunitConfigRequest:
+ build_dir: str
+ make_options: Optional[List[str]]
+
+@dataclass
+class KunitBuildRequest(KunitConfigRequest):
+ jobs: int
+
+@dataclass
+class KunitParseRequest:
+ raw_output: Optional[str]
+ json: Optional[str]
+
+@dataclass
+class KunitExecRequest(KunitParseRequest):
+ build_dir: str
+ timeout: int
+ filter_glob: str
+ kernel_args: Optional[List[str]]
+ run_isolated: Optional[str]
+
+@dataclass
+class KunitRequest(KunitExecRequest, KunitBuildRequest):
+ pass
+
+
def get_kernel_root_path() -> str:
path = sys.argv[0] if not __file__ else __file__
parts = os.path.realpath(path).split('tools/testing/kunit')
@@ -56,48 +72,50 @@ def get_kernel_root_path() -> str:
def config_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitConfigRequest) -> KunitResult:
- kunit_parser.print_with_timestamp('Configuring KUnit Kernel ...')
+ stdout.print_with_timestamp('Configuring KUnit Kernel ...')
config_start = time.time()
success = linux.build_reconfig(request.build_dir, request.make_options)
config_end = time.time()
if not success:
return KunitResult(KunitStatus.CONFIG_FAILURE,
- 'could not configure kernel',
config_end - config_start)
return KunitResult(KunitStatus.SUCCESS,
- 'configured kernel successfully',
config_end - config_start)
def build_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitBuildRequest) -> KunitResult:
- kunit_parser.print_with_timestamp('Building KUnit Kernel ...')
+ stdout.print_with_timestamp('Building KUnit Kernel ...')
build_start = time.time()
- success = linux.build_kernel(request.alltests,
- request.jobs,
+ success = linux.build_kernel(request.jobs,
request.build_dir,
request.make_options)
build_end = time.time()
if not success:
return KunitResult(KunitStatus.BUILD_FAILURE,
- 'could not build kernel',
build_end - build_start)
if not success:
return KunitResult(KunitStatus.BUILD_FAILURE,
- 'could not build kernel',
build_end - build_start)
return KunitResult(KunitStatus.SUCCESS,
- 'built kernel successfully',
build_end - build_start)
+def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree,
+ request: KunitBuildRequest) -> KunitResult:
+ config_result = config_tests(linux, request)
+ if config_result.status != KunitStatus.SUCCESS:
+ return config_result
+
+ return build_tests(linux, request)
+
def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]:
args = ['kunit.action=list']
if request.kernel_args:
args.extend(request.kernel_args)
output = linux.run_kernel(args=args,
- timeout=None if request.alltests else request.timeout,
+ timeout=request.timeout,
filter_glob=request.filter_glob,
build_dir=request.build_dir)
lines = kunit_parser.extract_tap_lines(output)
@@ -105,7 +123,7 @@ def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest)
lines.pop()
# Filter out any extraneous non-test output that might have gotten mixed in.
- return [l for l in lines if re.match('^[^\s.]+\.[^\s.]+$', l)]
+ return [l for l in lines if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
def _suites_from_test_list(tests: List[str]) -> List[str]:
"""Extracts all the suites from an ordered list of tests."""
@@ -121,8 +139,7 @@ def _suites_from_test_list(tests: List[str]) -> List[str]:
-def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest,
- parse_request: KunitParseRequest) -> KunitResult:
+def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult:
filter_globs = [request.filter_glob]
if request.run_isolated:
tests = _list_tests(linux, request)
@@ -135,55 +152,58 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest,
test_glob = request.filter_glob.split('.', maxsplit=2)[1]
filter_globs = [g + '.'+ test_glob for g in filter_globs]
+ metadata = kunit_json.Metadata(arch=linux.arch(), build_dir=request.build_dir, def_config='kunit_defconfig')
+
test_counts = kunit_parser.TestCounts()
exec_time = 0.0
for i, filter_glob in enumerate(filter_globs):
- kunit_parser.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
+ stdout.print_with_timestamp('Starting KUnit Kernel ({}/{})...'.format(i+1, len(filter_globs)))
test_start = time.time()
run_result = linux.run_kernel(
args=request.kernel_args,
- timeout=None if request.alltests else request.timeout,
+ timeout=request.timeout,
filter_glob=filter_glob,
build_dir=request.build_dir)
- result = parse_tests(parse_request, run_result)
+ _, test_result = parse_tests(request, metadata, run_result)
# run_kernel() doesn't block on the kernel exiting.
# That only happens after we get the last line of output from `run_result`.
# So exec_time here actually contains parsing + execution time, which is fine.
test_end = time.time()
exec_time += test_end - test_start
- test_counts.add_subtest_counts(result.result.test.counts)
+ test_counts.add_subtest_counts(test_result.counts)
+
+ if len(filter_globs) == 1 and test_counts.crashed > 0:
+ bd = request.build_dir
+ print('The kernel seems to have crashed; you can decode the stack traces with:')
+ print('$ scripts/decode_stacktrace.sh {}/vmlinux {} < {} | tee {}/decoded.log | {} parse'.format(
+ bd, bd, kunit_kernel.get_outfile_path(bd), bd, sys.argv[0]))
kunit_status = _map_to_overall_status(test_counts.get_status())
- return KunitResult(status=kunit_status, result=result.result, elapsed_time=exec_time)
+ return KunitResult(status=kunit_status, elapsed_time=exec_time)
def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus:
if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED):
return KunitStatus.SUCCESS
- else:
- return KunitStatus.TEST_FAILURE
+ return KunitStatus.TEST_FAILURE
-def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> KunitResult:
+def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
parse_start = time.time()
- test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS,
- kunit_parser.Test(),
- 'Tests not Parsed.')
+ test_result = kunit_parser.Test()
if request.raw_output:
# Treat unparsed results as one passing test.
- test_result.test.status = kunit_parser.TestStatus.SUCCESS
- test_result.test.counts.passed = 1
+ test_result.status = kunit_parser.TestStatus.SUCCESS
+ test_result.counts.passed = 1
output: Iterable[str] = input_data
if request.raw_output == 'all':
pass
elif request.raw_output == 'kunit':
- output = kunit_parser.extract_tap_lines(output)
- else:
- print(f'Unknown --raw_output option "{request.raw_output}"', file=sys.stderr)
+ output = kunit_parser.extract_tap_lines(output, lstrip=False)
for line in output:
print(line.rstrip())
@@ -192,50 +212,39 @@ def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> KunitR
parse_end = time.time()
if request.json:
- json_obj = kunit_json.get_json_result(
- test_result=test_result,
- def_config='kunit_defconfig',
- build_dir=request.build_dir,
- json_path=request.json)
+ json_str = kunit_json.get_json_result(
+ test=test_result,
+ metadata=metadata)
if request.json == 'stdout':
- print(json_obj)
+ print(json_str)
+ else:
+ with open(request.json, 'w') as f:
+ f.write(json_str)
+ stdout.print_with_timestamp("Test results stored in %s" %
+ os.path.abspath(request.json))
if test_result.status != kunit_parser.TestStatus.SUCCESS:
- return KunitResult(KunitStatus.TEST_FAILURE, test_result,
- parse_end - parse_start)
+ return KunitResult(KunitStatus.TEST_FAILURE, parse_end - parse_start), test_result
- return KunitResult(KunitStatus.SUCCESS, test_result,
- parse_end - parse_start)
+ return KunitResult(KunitStatus.SUCCESS, parse_end - parse_start), test_result
def run_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitRequest) -> KunitResult:
run_start = time.time()
- config_request = KunitConfigRequest(request.build_dir,
- request.make_options)
- config_result = config_tests(linux, config_request)
+ config_result = config_tests(linux, request)
if config_result.status != KunitStatus.SUCCESS:
return config_result
- build_request = KunitBuildRequest(request.jobs, request.build_dir,
- request.alltests,
- request.make_options)
- build_result = build_tests(linux, build_request)
+ build_result = build_tests(linux, request)
if build_result.status != KunitStatus.SUCCESS:
return build_result
- exec_request = KunitExecRequest(request.timeout, request.build_dir,
- request.alltests, request.filter_glob,
- request.kernel_args, request.run_isolated)
- parse_request = KunitParseRequest(request.raw_output,
- request.build_dir,
- request.json)
-
- exec_result = exec_tests(linux, exec_request, parse_request)
+ exec_result = exec_tests(linux, request)
run_end = time.time()
- kunit_parser.print_with_timestamp((
+ stdout.print_with_timestamp((
'Elapsed time: %.3fs total, %.3fs configuring, %.3fs ' +
'building, %.3fs running\n') % (
run_end - run_start,
@@ -264,22 +273,30 @@ def massage_argv(argv: Sequence[str]) -> Sequence[str]:
return f'{arg}={pseudo_bool_flag_defaults[arg]}'
return list(map(massage_arg, argv))
+def get_default_jobs() -> int:
+ return len(os.sched_getaffinity(0))
+
def add_common_opts(parser) -> None:
parser.add_argument('--build_dir',
help='As in the make command, it specifies the build '
'directory.',
- type=str, default='.kunit', metavar='build_dir')
+ type=str, default='.kunit', metavar='DIR')
parser.add_argument('--make_options',
help='X=Y make option, can be repeated.',
- action='append')
+ action='append', metavar='X=Y')
parser.add_argument('--alltests',
- help='Run all KUnit tests through allyesconfig',
+ help='Run all KUnit tests via tools/testing/kunit/configs/all_tests.config',
action='store_true')
parser.add_argument('--kunitconfig',
help='Path to Kconfig fragment that enables KUnit tests.'
' If given a directory, (e.g. lib/kunit), "/.kunitconfig" '
- 'will get automatically appended.',
- metavar='kunitconfig')
+ 'will get automatically appended. If repeated, the files '
+ 'blindly concatenated, which might not work in all cases.',
+ action='append', metavar='PATHS')
+ parser.add_argument('--kconfig_add',
+ help='Additional Kconfig options to append to the '
+ '.kunitconfig, e.g. CONFIG_KASAN=y. Can be repeated.',
+ action='append', metavar='CONFIG_X=Y')
parser.add_argument('--arch',
help=('Specifies the architecture to run tests under. '
@@ -287,7 +304,7 @@ def add_common_opts(parser) -> None:
'string passed to the ARCH make param, '
'e.g. i386, x86_64, arm, um, etc. Non-UML '
'architectures run on QEMU.'),
- type=str, default='um', metavar='arch')
+ type=str, default='um', metavar='ARCH')
parser.add_argument('--cross_compile',
help=('Sets make\'s CROSS_COMPILE variable; it should '
@@ -299,18 +316,22 @@ def add_common_opts(parser) -> None:
'if you have downloaded the microblaze toolchain '
'from the 0-day website to a directory in your '
'home directory called `toolchains`).'),
- metavar='cross_compile')
+ metavar='PREFIX')
parser.add_argument('--qemu_config',
help=('Takes a path to a path to a file containing '
'a QemuArchParams object.'),
- type=str, metavar='qemu_config')
+ type=str, metavar='FILE')
+
+ parser.add_argument('--qemu_args',
+ help='Additional QEMU arguments, e.g. "-smp 8"',
+ action='append', metavar='')
def add_build_opts(parser) -> None:
parser.add_argument('--jobs',
help='As in the make command, "Specifies the number of '
'jobs (commands) to run simultaneously."',
- type=int, default=8, metavar='jobs')
+ type=int, default=get_default_jobs(), metavar='N')
def add_exec_opts(parser) -> None:
parser.add_argument('--timeout',
@@ -319,7 +340,7 @@ def add_exec_opts(parser) -> None:
'tests.',
type=int,
default=300,
- metavar='timeout')
+ metavar='SECONDS')
parser.add_argument('filter_glob',
help='Filter which KUnit test suites/tests run at '
'boot-time, e.g. list* or list*.*del_test',
@@ -329,26 +350,50 @@ def add_exec_opts(parser) -> None:
metavar='filter_glob')
parser.add_argument('--kernel_args',
help='Kernel command-line parameters. Maybe be repeated',
- action='append')
+ action='append', metavar='')
parser.add_argument('--run_isolated', help='If set, boot the kernel for each '
'individual suite/test. This is can be useful for debugging '
'a non-hermetic test, one that might pass/fail based on '
'what ran before it.',
type=str,
- choices=['suite', 'test']),
+ choices=['suite', 'test'])
def add_parse_opts(parser) -> None:
parser.add_argument('--raw_output', help='If set don\'t format output from kernel. '
'If set to --raw_output=kunit, filters to just KUnit output.',
- type=str, nargs='?', const='all', default=None)
+ type=str, nargs='?', const='all', default=None, choices=['all', 'kunit'])
parser.add_argument('--json',
nargs='?',
help='Stores test results in a JSON, and either '
'prints to stdout or saves to file if a '
'filename is specified',
- type=str, const='stdout', default=None)
+ type=str, const='stdout', default=None, metavar='FILE')
+
+
+def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree:
+ """Returns a LinuxSourceTree based on the user's arguments."""
+ # Allow users to specify multiple arguments in one string, e.g. '-smp 8'
+ qemu_args: List[str] = []
+ if cli_args.qemu_args:
+ for arg in cli_args.qemu_args:
+ qemu_args.extend(shlex.split(arg))
+
+ kunitconfigs = cli_args.kunitconfig if cli_args.kunitconfig else []
+ if cli_args.alltests:
+ # Prepend so user-specified options take prio if we ever allow
+ # --kunitconfig options to have differing options.
+ kunitconfigs = [kunit_kernel.ALL_TESTS_CONFIG_PATH] + kunitconfigs
-def main(argv, linux=None):
+ return kunit_kernel.LinuxSourceTree(cli_args.build_dir,
+ kunitconfig_paths=kunitconfigs,
+ kconfig_add=cli_args.kconfig_add,
+ arch=cli_args.arch,
+ cross_compile=cli_args.cross_compile,
+ qemu_config_path=cli_args.qemu_config,
+ extra_qemu_args=qemu_args)
+
+
+def main(argv):
parser = argparse.ArgumentParser(
description='Helps writing and running KUnit tests.')
subparser = parser.add_subparsers(dest='subcommand')
@@ -395,23 +440,16 @@ def main(argv, linux=None):
if not os.path.exists(cli_args.build_dir):
os.mkdir(cli_args.build_dir)
- if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
- kunitconfig_path=cli_args.kunitconfig,
- arch=cli_args.arch,
- cross_compile=cli_args.cross_compile,
- qemu_config_path=cli_args.qemu_config)
-
- request = KunitRequest(cli_args.raw_output,
- cli_args.timeout,
- cli_args.jobs,
- cli_args.build_dir,
- cli_args.alltests,
- cli_args.filter_glob,
- cli_args.kernel_args,
- cli_args.run_isolated,
- cli_args.json,
- cli_args.make_options)
+ linux = tree_from_args(cli_args)
+ request = KunitRequest(build_dir=cli_args.build_dir,
+ make_options=cli_args.make_options,
+ jobs=cli_args.jobs,
+ raw_output=cli_args.raw_output,
+ json=cli_args.json,
+ timeout=cli_args.timeout,
+ filter_glob=cli_args.filter_glob,
+ kernel_args=cli_args.kernel_args,
+ run_isolated=cli_args.run_isolated)
result = run_tests(linux, request)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
@@ -420,72 +458,52 @@ def main(argv, linux=None):
not os.path.exists(cli_args.build_dir)):
os.mkdir(cli_args.build_dir)
- if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
- kunitconfig_path=cli_args.kunitconfig,
- arch=cli_args.arch,
- cross_compile=cli_args.cross_compile,
- qemu_config_path=cli_args.qemu_config)
-
- request = KunitConfigRequest(cli_args.build_dir,
- cli_args.make_options)
+ linux = tree_from_args(cli_args)
+ request = KunitConfigRequest(build_dir=cli_args.build_dir,
+ make_options=cli_args.make_options)
result = config_tests(linux, request)
- kunit_parser.print_with_timestamp((
+ stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (
result.elapsed_time))
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
elif cli_args.subcommand == 'build':
- if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
- kunitconfig_path=cli_args.kunitconfig,
- arch=cli_args.arch,
- cross_compile=cli_args.cross_compile,
- qemu_config_path=cli_args.qemu_config)
-
- request = KunitBuildRequest(cli_args.jobs,
- cli_args.build_dir,
- cli_args.alltests,
- cli_args.make_options)
- result = build_tests(linux, request)
- kunit_parser.print_with_timestamp((
+ linux = tree_from_args(cli_args)
+ request = KunitBuildRequest(build_dir=cli_args.build_dir,
+ make_options=cli_args.make_options,
+ jobs=cli_args.jobs)
+ result = config_and_build_tests(linux, request)
+ stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (
result.elapsed_time))
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
elif cli_args.subcommand == 'exec':
- if not linux:
- linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir,
- kunitconfig_path=cli_args.kunitconfig,
- arch=cli_args.arch,
- cross_compile=cli_args.cross_compile,
- qemu_config_path=cli_args.qemu_config)
-
- exec_request = KunitExecRequest(cli_args.timeout,
- cli_args.build_dir,
- cli_args.alltests,
- cli_args.filter_glob,
- cli_args.kernel_args,
- cli_args.run_isolated)
- parse_request = KunitParseRequest(cli_args.raw_output,
- cli_args.build_dir,
- cli_args.json)
- result = exec_tests(linux, exec_request, parse_request)
- kunit_parser.print_with_timestamp((
+ linux = tree_from_args(cli_args)
+ exec_request = KunitExecRequest(raw_output=cli_args.raw_output,
+ build_dir=cli_args.build_dir,
+ json=cli_args.json,
+ timeout=cli_args.timeout,
+ filter_glob=cli_args.filter_glob,
+ kernel_args=cli_args.kernel_args,
+ run_isolated=cli_args.run_isolated)
+ result = exec_tests(linux, exec_request)
+ stdout.print_with_timestamp((
'Elapsed time: %.3fs\n') % (result.elapsed_time))
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
elif cli_args.subcommand == 'parse':
- if cli_args.file == None:
+ if cli_args.file is None:
sys.stdin.reconfigure(errors='backslashreplace') # pytype: disable=attribute-error
kunit_output = sys.stdin
else:
with open(cli_args.file, 'r', errors='backslashreplace') as f:
kunit_output = f.read().splitlines()
- request = KunitParseRequest(cli_args.raw_output,
- None,
- cli_args.json)
- result = parse_tests(request, kunit_output)
+ # We know nothing about how the result was created!
+ metadata = kunit_json.Metadata()
+ request = KunitParseRequest(raw_output=cli_args.raw_output,
+ json=cli_args.json)
+ result, _ = parse_tests(request, metadata, kunit_output)
if result.status != KunitStatus.SUCCESS:
sys.exit(1)
else:
diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
index c77c7d2ef622..48b5f34b2e5d 100644
--- a/tools/testing/kunit/kunit_config.py
+++ b/tools/testing/kunit/kunit_config.py
@@ -6,89 +6,103 @@
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
-import collections
+from dataclasses import dataclass
import re
-from typing import List, Set
+from typing import Dict, Iterable, List, Set, Tuple
CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
-KconfigEntryBase = collections.namedtuple('KconfigEntryBase', ['name', 'value'])
-
-class KconfigEntry(KconfigEntryBase):
+@dataclass(frozen=True)
+class KconfigEntry:
+ name: str
+ value: str
def __str__(self) -> str:
if self.value == 'n':
- return r'# CONFIG_%s is not set' % (self.name)
- else:
- return r'CONFIG_%s=%s' % (self.name, self.value)
+ return f'# CONFIG_{self.name} is not set'
+ return f'CONFIG_{self.name}={self.value}'
class KconfigParseError(Exception):
"""Error parsing Kconfig defconfig or .config."""
-class Kconfig(object):
+class Kconfig:
"""Represents defconfig or .config specified using the Kconfig language."""
def __init__(self) -> None:
- self._entries = [] # type: List[KconfigEntry]
+ self._entries = {} # type: Dict[str, str]
+
+ def __eq__(self, other) -> bool:
+ if not isinstance(other, self.__class__):
+ return False
+ return self._entries == other._entries
- def entries(self) -> Set[KconfigEntry]:
- return set(self._entries)
+ def __repr__(self) -> str:
+ return ','.join(str(e) for e in self.as_entries())
- def add_entry(self, entry: KconfigEntry) -> None:
- self._entries.append(entry)
+ def as_entries(self) -> Iterable[KconfigEntry]:
+ for name, value in self._entries.items():
+ yield KconfigEntry(name, value)
+
+ def add_entry(self, name: str, value: str) -> None:
+ self._entries[name] = value
def is_subset_of(self, other: 'Kconfig') -> bool:
- other_dict = {e.name: e.value for e in other.entries()}
- for a in self.entries():
- b = other_dict.get(a.name)
+ for name, value in self._entries.items():
+ b = other._entries.get(name)
if b is None:
- if a.value == 'n':
+ if value == 'n':
continue
return False
- elif a.value != b:
+ if value != b:
return False
return True
+ def conflicting_options(self, other: 'Kconfig') -> List[Tuple[KconfigEntry, KconfigEntry]]:
+ diff = [] # type: List[Tuple[KconfigEntry, KconfigEntry]]
+ for name, value in self._entries.items():
+ b = other._entries.get(name)
+ if b and value != b:
+ pair = (KconfigEntry(name, value), KconfigEntry(name, b))
+ diff.append(pair)
+ return diff
+
def merge_in_entries(self, other: 'Kconfig') -> None:
- if other.is_subset_of(self):
- return
- self._entries = list(self.entries().union(other.entries()))
+ for name, value in other._entries.items():
+ self._entries[name] = value
def write_to_file(self, path: str) -> None:
with open(path, 'a+') as f:
- for entry in self.entries():
- f.write(str(entry) + '\n')
-
- def parse_from_string(self, blob: str) -> None:
- """Parses a string containing KconfigEntrys and populates this Kconfig."""
- self._entries = []
- is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN)
- config_matcher = re.compile(CONFIG_PATTERN)
- for line in blob.split('\n'):
- line = line.strip()
- if not line:
- continue
-
- match = config_matcher.match(line)
- if match:
- entry = KconfigEntry(match.group(1), match.group(2))
- self.add_entry(entry)
- continue
-
- empty_match = is_not_set_matcher.match(line)
- if empty_match:
- entry = KconfigEntry(empty_match.group(1), 'n')
- self.add_entry(entry)
- continue
-
- if line[0] == '#':
- continue
- else:
- raise KconfigParseError('Failed to parse: ' + line)
-
- def read_from_file(self, path: str) -> None:
- with open(path, 'r') as f:
- self.parse_from_string(f.read())
+ for e in self.as_entries():
+ f.write(str(e) + '\n')
+
+def parse_file(path: str) -> Kconfig:
+ with open(path, 'r') as f:
+ return parse_from_string(f.read())
+
+def parse_from_string(blob: str) -> Kconfig:
+ """Parses a string containing Kconfig entries."""
+ kconfig = Kconfig()
+ is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN)
+ config_matcher = re.compile(CONFIG_PATTERN)
+ for line in blob.split('\n'):
+ line = line.strip()
+ if not line:
+ continue
+
+ match = config_matcher.match(line)
+ if match:
+ kconfig.add_entry(match.group(1), match.group(2))
+ continue
+
+ empty_match = is_not_set_matcher.match(line)
+ if empty_match:
+ kconfig.add_entry(empty_match.group(1), 'n')
+ continue
+
+ if line[0] == '#':
+ continue
+ raise KconfigParseError('Failed to parse: ' + line)
+ return kconfig
diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py
index 746bec72b9ac..10ff65689dd8 100644
--- a/tools/testing/kunit/kunit_json.py
+++ b/tools/testing/kunit/kunit_json.py
@@ -6,58 +6,58 @@
# Copyright (C) 2020, Google LLC.
# Author: Heidi Fahim <heidifahim@google.com>
+from dataclasses import dataclass
import json
-import os
+from typing import Any, Dict
-import kunit_parser
+from kunit_parser import Test, TestStatus
-from kunit_parser import Test, TestResult, TestStatus
-from typing import Any, Dict, Optional
+@dataclass
+class Metadata:
+ """Stores metadata about this run to include in get_json_result()."""
+ arch: str = ''
+ def_config: str = ''
+ build_dir: str = ''
JsonObj = Dict[str, Any]
-def _get_group_json(test: Test, def_config: str,
- build_dir: Optional[str]) -> JsonObj:
+_status_map: Dict[TestStatus, str] = {
+ TestStatus.SUCCESS: "PASS",
+ TestStatus.SKIPPED: "SKIP",
+ TestStatus.TEST_CRASHED: "ERROR",
+}
+
+def _get_group_json(test: Test, common_fields: JsonObj) -> JsonObj:
sub_groups = [] # List[JsonObj]
test_cases = [] # List[JsonObj]
for subtest in test.subtests:
- if len(subtest.subtests):
- sub_group = _get_group_json(subtest, def_config,
- build_dir)
+ if subtest.subtests:
+ sub_group = _get_group_json(subtest, common_fields)
sub_groups.append(sub_group)
- else:
- test_case = {"name": subtest.name, "status": "FAIL"}
- if subtest.status == TestStatus.SUCCESS:
- test_case["status"] = "PASS"
- elif subtest.status == TestStatus.TEST_CRASHED:
- test_case["status"] = "ERROR"
- test_cases.append(test_case)
+ continue
+ status = _status_map.get(subtest.status, "FAIL")
+ test_cases.append({"name": subtest.name, "status": status})
test_group = {
"name": test.name,
- "arch": "UM",
- "defconfig": def_config,
- "build_environment": build_dir,
"sub_groups": sub_groups,
"test_cases": test_cases,
+ }
+ test_group.update(common_fields)
+ return test_group
+
+def get_json_result(test: Test, metadata: Metadata) -> str:
+ common_fields = {
+ "arch": metadata.arch,
+ "defconfig": metadata.def_config,
+ "build_environment": metadata.build_dir,
"lab_name": None,
"kernel": None,
"job": None,
"git_branch": "kselftest",
}
- return test_group
-def get_json_result(test_result: TestResult, def_config: str,
- build_dir: Optional[str], json_path: str) -> str:
- test_group = _get_group_json(test_result.test, def_config, build_dir)
+ test_group = _get_group_json(test, common_fields)
test_group["name"] = "KUnit Test Group"
- json_obj = json.dumps(test_group, indent=4)
- if json_path != 'stdout':
- with open(json_path, 'w') as result_path:
- result_path.write(json_obj)
- root = __file__.split('tools/testing/kunit/')[0]
- kunit_parser.print_with_timestamp(
- "Test results stored in %s" %
- os.path.join(root, result_path.name))
- return json_obj
+ return json.dumps(test_group, indent=4)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index 66095568bf32..53e90c335834 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -6,32 +6,31 @@
# Author: Felix Guo <felixguoxiuping@gmail.com>
# Author: Brendan Higgins <brendanhiggins@google.com>
+import importlib.abc
import importlib.util
import logging
import subprocess
import os
+import shlex
import shutil
import signal
import threading
from typing import Iterator, List, Optional, Tuple
import kunit_config
-import kunit_parser
+from kunit_printer import stdout
import qemu_config
KCONFIG_PATH = '.config'
KUNITCONFIG_PATH = '.kunitconfig'
+OLD_KUNITCONFIG_PATH = 'last_used_kunitconfig'
DEFAULT_KUNITCONFIG_PATH = 'tools/testing/kunit/configs/default.config'
-BROKEN_ALLCONFIG_PATH = 'tools/testing/kunit/configs/broken_on_uml.config'
+ALL_TESTS_CONFIG_PATH = 'tools/testing/kunit/configs/all_tests.config'
+UML_KCONFIG_PATH = 'tools/testing/kunit/configs/arch_uml.config'
OUTFILE_PATH = 'test.log'
ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__))
QEMU_CONFIGS_DIR = os.path.join(ABS_TOOL_PATH, 'qemu_configs')
-def get_file_path(build_dir, default):
- if build_dir:
- default = os.path.join(build_dir, default)
- return default
-
class ConfigError(Exception):
"""Represents an error trying to configure the Linux kernel."""
@@ -40,7 +39,7 @@ class BuildError(Exception):
"""Represents an error trying to build the Linux kernel."""
-class LinuxSourceTreeOperations(object):
+class LinuxSourceTreeOperations:
"""An abstraction over command line operations performed on a source tree."""
def __init__(self, linux_arch: str, cross_compile: Optional[str]):
@@ -55,20 +54,15 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make_arch_qemuconfig(self, kconfig: kunit_config.Kconfig) -> None:
- pass
-
- def make_allyesconfig(self, build_dir, make_options) -> None:
- raise ConfigError('Only the "um" arch is supported for alltests')
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+ return base_kunitconfig
- def make_olddefconfig(self, build_dir, make_options) -> None:
- command = ['make', 'ARCH=' + self._linux_arch, 'olddefconfig']
+ def make_olddefconfig(self, build_dir: str, make_options) -> None:
+ command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, 'olddefconfig']
if self._cross_compile:
command += ['CROSS_COMPILE=' + self._cross_compile]
if make_options:
command.extend(make_options)
- if build_dir:
- command += ['O=' + build_dir]
print('Populating config with:\n$', ' '.join(command))
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
@@ -77,14 +71,12 @@ class LinuxSourceTreeOperations(object):
except subprocess.CalledProcessError as e:
raise ConfigError(e.output.decode())
- def make(self, jobs, build_dir, make_options) -> None:
- command = ['make', 'ARCH=' + self._linux_arch, '--jobs=' + str(jobs)]
+ def make(self, jobs, build_dir: str, make_options) -> None:
+ command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)]
if make_options:
command.extend(make_options)
if self._cross_compile:
command += ['CROSS_COMPILE=' + self._cross_compile]
- if build_dir:
- command += ['O=' + build_dir]
print('Building with:\n$', ' '.join(command))
try:
proc = subprocess.Popen(command,
@@ -115,10 +107,10 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
self._kernel_command_line = qemu_arch_params.kernel_command_line + ' kunit_shutdown=reboot'
self._extra_qemu_params = qemu_arch_params.extra_qemu_params
- def make_arch_qemuconfig(self, base_kunitconfig: kunit_config.Kconfig) -> None:
- kconfig = kunit_config.Kconfig()
- kconfig.parse_from_string(self._kconfig)
- base_kunitconfig.merge_in_entries(kconfig)
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+ kconfig = kunit_config.parse_from_string(self._kconfig)
+ kconfig.merge_in_entries(base_kunitconfig)
+ return kconfig
def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
kernel_path = os.path.join(build_dir, self._kernel_path)
@@ -126,16 +118,17 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
'-nodefaults',
'-m', '1024',
'-kernel', kernel_path,
- '-append', '\'' + ' '.join(params + [self._kernel_command_line]) + '\'',
+ '-append', ' '.join(params + [self._kernel_command_line]),
'-no-reboot',
'-nographic',
- '-serial stdio'] + self._extra_qemu_params
- print('Running tests with:\n$', ' '.join(qemu_command))
- return subprocess.Popen(' '.join(qemu_command),
- stdin=subprocess.PIPE,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- text=True, shell=True, errors='backslashreplace')
+ '-serial', 'stdio'] + self._extra_qemu_params
+ # Note: shlex.join() does what we want, but requires python 3.8+.
+ print('Running tests with:\n$', ' '.join(shlex.quote(arg) for arg in qemu_command))
+ return subprocess.Popen(qemu_command,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True, errors='backslashreplace')
class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
"""An abstraction over command line operations performed on a source tree."""
@@ -143,59 +136,68 @@ class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
def __init__(self, cross_compile=None):
super().__init__(linux_arch='um', cross_compile=cross_compile)
- def make_allyesconfig(self, build_dir, make_options) -> None:
- kunit_parser.print_with_timestamp(
- 'Enabling all CONFIGs for UML...')
- command = ['make', 'ARCH=um', 'allyesconfig']
- if make_options:
- command.extend(make_options)
- if build_dir:
- command += ['O=' + build_dir]
- process = subprocess.Popen(
- command,
- stdout=subprocess.DEVNULL,
- stderr=subprocess.STDOUT)
- process.wait()
- kunit_parser.print_with_timestamp(
- 'Disabling broken configs to run KUnit tests...')
-
- with open(get_kconfig_path(build_dir), 'a') as config:
- with open(BROKEN_ALLCONFIG_PATH, 'r') as disable:
- config.write(disable.read())
- kunit_parser.print_with_timestamp(
- 'Starting Kernel with all configs takes a few minutes...')
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+ kconfig = kunit_config.parse_file(UML_KCONFIG_PATH)
+ kconfig.merge_in_entries(base_kunitconfig)
+ return kconfig
def start(self, params: List[str], build_dir: str) -> subprocess.Popen:
"""Runs the Linux UML binary. Must be named 'linux'."""
- linux_bin = get_file_path(build_dir, 'linux')
+ linux_bin = os.path.join(build_dir, 'linux')
+ params.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt'])
return subprocess.Popen([linux_bin] + params,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True, errors='backslashreplace')
-def get_kconfig_path(build_dir) -> str:
- return get_file_path(build_dir, KCONFIG_PATH)
+def get_kconfig_path(build_dir: str) -> str:
+ return os.path.join(build_dir, KCONFIG_PATH)
-def get_kunitconfig_path(build_dir) -> str:
- return get_file_path(build_dir, KUNITCONFIG_PATH)
+def get_kunitconfig_path(build_dir: str) -> str:
+ return os.path.join(build_dir, KUNITCONFIG_PATH)
-def get_outfile_path(build_dir) -> str:
- return get_file_path(build_dir, OUTFILE_PATH)
+def get_old_kunitconfig_path(build_dir: str) -> str:
+ return os.path.join(build_dir, OLD_KUNITCONFIG_PATH)
-def get_source_tree_ops(arch: str, cross_compile: Optional[str]) -> LinuxSourceTreeOperations:
+def get_parsed_kunitconfig(build_dir: str,
+ kunitconfig_paths: Optional[List[str]]=None) -> kunit_config.Kconfig:
+ if not kunitconfig_paths:
+ path = get_kunitconfig_path(build_dir)
+ if not os.path.exists(path):
+ shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, path)
+ return kunit_config.parse_file(path)
+
+ merged = kunit_config.Kconfig()
+
+ for path in kunitconfig_paths:
+ if os.path.isdir(path):
+ path = os.path.join(path, KUNITCONFIG_PATH)
+ if not os.path.exists(path):
+ raise ConfigError(f'Specified kunitconfig ({path}) does not exist')
+
+ partial = kunit_config.parse_file(path)
+ diff = merged.conflicting_options(partial)
+ if diff:
+ diff_str = '\n\n'.join(f'{a}\n vs from {path}\n{b}' for a, b in diff)
+ raise ConfigError(f'Multiple values specified for {len(diff)} options in kunitconfig:\n{diff_str}')
+ merged.merge_in_entries(partial)
+ return merged
+
+def get_outfile_path(build_dir: str) -> str:
+ return os.path.join(build_dir, OUTFILE_PATH)
+
+def _default_qemu_config_path(arch: str) -> str:
config_path = os.path.join(QEMU_CONFIGS_DIR, arch + '.py')
- if arch == 'um':
- return LinuxSourceTreeOperationsUml(cross_compile=cross_compile)
- elif os.path.isfile(config_path):
- return get_source_tree_ops_from_qemu_config(config_path, cross_compile)[1]
+ if os.path.isfile(config_path):
+ return config_path
options = [f[:-3] for f in os.listdir(QEMU_CONFIGS_DIR) if f.endswith('.py')]
raise ConfigError(arch + ' is not a valid arch, options are ' + str(sorted(options)))
-def get_source_tree_ops_from_qemu_config(config_path: str,
- cross_compile: Optional[str]) -> Tuple[
- str, LinuxSourceTreeOperations]:
+def _get_qemu_ops(config_path: str,
+ extra_qemu_args: Optional[List[str]],
+ cross_compile: Optional[str]) -> Tuple[str, LinuxSourceTreeOperations]:
# The module name/path has very little to do with where the actual file
# exists (I learned this through experimentation and could not find it
# anywhere in the Python documentation).
@@ -206,6 +208,7 @@ def get_source_tree_ops_from_qemu_config(config_path: str,
# exists as a file.
module_path = '.' + os.path.join(os.path.basename(QEMU_CONFIGS_DIR), os.path.basename(config_path))
spec = importlib.util.spec_from_file_location(module_path, config_path)
+ assert spec is not None
config = importlib.util.module_from_spec(spec)
# See https://github.com/python/typeshed/pull/2626 for context.
assert isinstance(spec.loader, importlib.abc.Loader)
@@ -214,43 +217,41 @@ def get_source_tree_ops_from_qemu_config(config_path: str,
if not hasattr(config, 'QEMU_ARCH'):
raise ValueError('qemu_config module missing "QEMU_ARCH": ' + config_path)
params: qemu_config.QemuArchParams = config.QEMU_ARCH # type: ignore
+ if extra_qemu_args:
+ params.extra_qemu_params.extend(extra_qemu_args)
return params.linux_arch, LinuxSourceTreeOperationsQemu(
params, cross_compile=cross_compile)
-class LinuxSourceTree(object):
+class LinuxSourceTree:
"""Represents a Linux kernel source tree with KUnit tests."""
def __init__(
self,
build_dir: str,
- load_config=True,
- kunitconfig_path='',
+ kunitconfig_paths: Optional[List[str]]=None,
+ kconfig_add: Optional[List[str]]=None,
arch=None,
cross_compile=None,
- qemu_config_path=None) -> None:
+ qemu_config_path=None,
+ extra_qemu_args=None) -> None:
signal.signal(signal.SIGINT, self.signal_handler)
if qemu_config_path:
- self._arch, self._ops = get_source_tree_ops_from_qemu_config(
- qemu_config_path, cross_compile)
+ self._arch, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile)
else:
self._arch = 'um' if arch is None else arch
- self._ops = get_source_tree_ops(self._arch, cross_compile)
-
- if not load_config:
- return
+ if self._arch == 'um':
+ self._ops = LinuxSourceTreeOperationsUml(cross_compile=cross_compile)
+ else:
+ qemu_config_path = _default_qemu_config_path(self._arch)
+ _, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile)
- if kunitconfig_path:
- if os.path.isdir(kunitconfig_path):
- kunitconfig_path = os.path.join(kunitconfig_path, KUNITCONFIG_PATH)
- if not os.path.exists(kunitconfig_path):
- raise ConfigError(f'Specified kunitconfig ({kunitconfig_path}) does not exist')
- else:
- kunitconfig_path = get_kunitconfig_path(build_dir)
- if not os.path.exists(kunitconfig_path):
- shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, kunitconfig_path)
+ self._kconfig = get_parsed_kunitconfig(build_dir, kunitconfig_paths)
+ if kconfig_add:
+ kconfig = kunit_config.parse_from_string('\n'.join(kconfig_add))
+ self._kconfig.merge_in_entries(kconfig)
- self._kconfig = kunit_config.Kconfig()
- self._kconfig.read_from_file(kunitconfig_path)
+ def arch(self) -> str:
+ return self._arch
def clean(self) -> bool:
try:
@@ -260,54 +261,67 @@ class LinuxSourceTree(object):
return False
return True
- def validate_config(self, build_dir) -> bool:
+ def validate_config(self, build_dir: str) -> bool:
kconfig_path = get_kconfig_path(build_dir)
- validated_kconfig = kunit_config.Kconfig()
- validated_kconfig.read_from_file(kconfig_path)
- if not self._kconfig.is_subset_of(validated_kconfig):
- invalid = self._kconfig.entries() - validated_kconfig.entries()
- message = 'Provided Kconfig is not contained in validated .config. Following fields found in kunitconfig, ' \
- 'but not in .config: %s' % (
- ', '.join([str(e) for e in invalid])
- )
- logging.error(message)
- return False
- return True
-
- def build_config(self, build_dir, make_options) -> bool:
+ validated_kconfig = kunit_config.parse_file(kconfig_path)
+ if self._kconfig.is_subset_of(validated_kconfig):
+ return True
+ missing = set(self._kconfig.as_entries()) - set(validated_kconfig.as_entries())
+ message = 'Not all Kconfig options selected in kunitconfig were in the generated .config.\n' \
+ 'This is probably due to unsatisfied dependencies.\n' \
+ 'Missing: ' + ', '.join(str(e) for e in missing)
+ if self._arch == 'um':
+ message += '\nNote: many Kconfig options aren\'t available on UML. You can try running ' \
+ 'on a different architecture with something like "--arch=x86_64".'
+ logging.error(message)
+ return False
+
+ def build_config(self, build_dir: str, make_options) -> bool:
kconfig_path = get_kconfig_path(build_dir)
if build_dir and not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
- self._ops.make_arch_qemuconfig(self._kconfig)
+ self._kconfig = self._ops.make_arch_config(self._kconfig)
self._kconfig.write_to_file(kconfig_path)
self._ops.make_olddefconfig(build_dir, make_options)
except ConfigError as e:
logging.error(e)
return False
- return self.validate_config(build_dir)
+ if not self.validate_config(build_dir):
+ return False
+
+ old_path = get_old_kunitconfig_path(build_dir)
+ if os.path.exists(old_path):
+ os.remove(old_path) # write_to_file appends to the file
+ self._kconfig.write_to_file(old_path)
+ return True
+
+ def _kunitconfig_changed(self, build_dir: str) -> bool:
+ old_path = get_old_kunitconfig_path(build_dir)
+ if not os.path.exists(old_path):
+ return True
- def build_reconfig(self, build_dir, make_options) -> bool:
+ old_kconfig = kunit_config.parse_file(old_path)
+ return old_kconfig != self._kconfig
+
+ def build_reconfig(self, build_dir: str, make_options) -> bool:
"""Creates a new .config if it is not a subset of the .kunitconfig."""
kconfig_path = get_kconfig_path(build_dir)
- if os.path.exists(kconfig_path):
- existing_kconfig = kunit_config.Kconfig()
- existing_kconfig.read_from_file(kconfig_path)
- self._ops.make_arch_qemuconfig(self._kconfig)
- if not self._kconfig.is_subset_of(existing_kconfig):
- print('Regenerating .config ...')
- os.remove(kconfig_path)
- return self.build_config(build_dir, make_options)
- else:
- return True
- else:
+ if not os.path.exists(kconfig_path):
print('Generating .config ...')
return self.build_config(build_dir, make_options)
- def build_kernel(self, alltests, jobs, build_dir, make_options) -> bool:
+ existing_kconfig = kunit_config.parse_file(kconfig_path)
+ self._kconfig = self._ops.make_arch_config(self._kconfig)
+
+ if self._kconfig.is_subset_of(existing_kconfig) and not self._kunitconfig_changed(build_dir):
+ return True
+ print('Regenerating .config ...')
+ os.remove(kconfig_path)
+ return self.build_config(build_dir, make_options)
+
+ def build_kernel(self, jobs, build_dir: str, make_options) -> bool:
try:
- if alltests:
- self._ops.make_allyesconfig(build_dir, make_options)
self._ops.make_olddefconfig(build_dir, make_options)
self._ops.make(jobs, build_dir, make_options)
except (ConfigError, BuildError) as e:
@@ -318,9 +332,9 @@ class LinuxSourceTree(object):
def run_kernel(self, args=None, build_dir='', filter_glob='', timeout=None) -> Iterator[str]:
if not args:
args = []
- args.extend(['mem=1G', 'console=tty', 'kunit_shutdown=halt'])
if filter_glob:
args.append('kunit.filter_glob='+filter_glob)
+ args.append('kunit.enable=1')
process = self._ops.start(args, build_dir)
assert process.stdout is not None # tell mypy it's set
@@ -352,6 +366,6 @@ class LinuxSourceTree(object):
waiter.join()
subprocess.call(['stty', 'sane'])
- def signal_handler(self, sig, frame) -> None:
+ def signal_handler(self, unused_sig, unused_frame) -> None:
logging.error('Build interruption occurred. Cleaning console.')
subprocess.call(['stty', 'sane'])
diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index 3355196d0515..1ae873e3e341 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -11,16 +11,14 @@
from __future__ import annotations
import re
+import sys
-from collections import namedtuple
-from datetime import datetime
from enum import Enum, auto
-from functools import reduce
from typing import Iterable, Iterator, List, Optional, Tuple
-TestResult = namedtuple('TestResult', ['status','test','log'])
+from kunit_printer import stdout
-class Test(object):
+class Test:
"""
A class to represent a test parsed from KTAP results. All KTAP
results within a test log are stored in a main Test object as
@@ -48,10 +46,8 @@ class Test(object):
def __str__(self) -> str:
"""Returns string representation of a Test class object."""
- return ('Test(' + str(self.status) + ', ' + self.name +
- ', ' + str(self.expected_count) + ', ' +
- str(self.subtests) + ', ' + str(self.log) + ', ' +
- str(self.counts) + ')')
+ return (f'Test({self.status}, {self.name}, {self.expected_count}, '
+ f'{self.subtests}, {self.log}, {self.counts})')
def __repr__(self) -> str:
"""Returns string representation of a Test class object."""
@@ -60,7 +56,7 @@ class Test(object):
def add_error(self, error_message: str) -> None:
"""Records an error that occurred while parsing this test."""
self.counts.errors += 1
- print_error('Test ' + self.name + ': ' + error_message)
+ stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
class TestStatus(Enum):
"""An enumeration class to represent the status of a test."""
@@ -94,13 +90,12 @@ class TestCounts:
self.errors = 0
def __str__(self) -> str:
- """Returns the string representation of a TestCounts object.
- """
- return ('Passed: ' + str(self.passed) +
- ', Failed: ' + str(self.failed) +
- ', Crashed: ' + str(self.crashed) +
- ', Skipped: ' + str(self.skipped) +
- ', Errors: ' + str(self.errors))
+ """Returns the string representation of a TestCounts object."""
+ statuses = [('passed', self.passed), ('failed', self.failed),
+ ('crashed', self.crashed), ('skipped', self.skipped),
+ ('errors', self.errors)]
+ return f'Ran {self.total()} tests: ' + \
+ ', '.join(f'{s}: {n}' for s, n in statuses if n > 0)
def total(self) -> int:
"""Returns the total number of test cases within a test
@@ -131,31 +126,19 @@ class TestCounts:
"""
if self.total() == 0:
return TestStatus.NO_TESTS
- elif self.crashed:
- # If one of the subtests crash, the expected status
- # of the Test is crashed.
+ if self.crashed:
+ # Crashes should take priority.
return TestStatus.TEST_CRASHED
- elif self.failed:
- # Otherwise if one of the subtests fail, the
- # expected status of the Test is failed.
+ if self.failed:
return TestStatus.FAILURE
- elif self.passed:
- # Otherwise if one of the subtests pass, the
- # expected status of the Test is passed.
+ if self.passed:
+ # No failures or crashes, looks good!
return TestStatus.SUCCESS
- else:
- # Finally, if none of the subtests have failed,
- # crashed, or passed, the expected status of the
- # Test is skipped.
- return TestStatus.SKIPPED
+ # We have only skipped tests.
+ return TestStatus.SKIPPED
def add_status(self, status: TestStatus) -> None:
- """
- Increments count of inputted status.
-
- Parameters:
- status - status to be added to the TestCounts object
- """
+ """Increments the count for `status`."""
if status == TestStatus.SUCCESS:
self.passed += 1
elif status == TestStatus.FAILURE:
@@ -168,42 +151,51 @@ class TestCounts:
class LineStream:
"""
A class to represent the lines of kernel output.
- Provides a peek()/pop() interface over an iterator of
+ Provides a lazy peek()/pop() interface over an iterator of
(line#, text).
"""
_lines: Iterator[Tuple[int, str]]
_next: Tuple[int, str]
+ _need_next: bool
_done: bool
def __init__(self, lines: Iterator[Tuple[int, str]]):
"""Creates a new LineStream that wraps the given iterator."""
self._lines = lines
self._done = False
+ self._need_next = True
self._next = (0, '')
- self._get_next()
def _get_next(self) -> None:
- """Advances the LineSteam to the next line."""
+ """Advances the LineSteam to the next line, if necessary."""
+ if not self._need_next:
+ return
try:
self._next = next(self._lines)
except StopIteration:
self._done = True
+ finally:
+ self._need_next = False
def peek(self) -> str:
"""Returns the current line, without advancing the LineStream.
"""
+ self._get_next()
return self._next[1]
def pop(self) -> str:
"""Returns the current line and advances the LineStream to
the next line.
"""
- n = self._next
- self._get_next()
- return n[1]
+ s = self.peek()
+ if self._done:
+ raise ValueError(f'LineStream: going past EOF, last line was {s}')
+ self._need_next = True
+ return s
def __bool__(self) -> bool:
"""Returns True if stream has more lines."""
+ self._get_next()
return not self._done
# Only used by kunit_tool_test.py.
@@ -216,6 +208,7 @@ class LineStream:
def line_number(self) -> int:
"""Returns the line number of the current line."""
+ self._get_next()
return self._next[0]
# Parsing helper methods:
@@ -225,7 +218,7 @@ TAP_START = re.compile(r'TAP version ([0-9]+)$')
KTAP_END = re.compile('(List of all partitions:|'
'Kernel panic - not syncing: VFS:|reboot: System halted)')
-def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
+def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream:
"""Extracts KTAP lines from the kernel output."""
def isolate_ktap_output(kernel_output: Iterable[str]) \
-> Iterator[Tuple[int, str]]:
@@ -251,9 +244,11 @@ def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
# stop extracting KTAP lines
break
elif started:
- # remove prefix and any indention and yield
- # line with line number
- line = line[prefix_len:].lstrip()
+ # remove the prefix and optionally any leading
+ # whitespace. Our parsing logic relies on this.
+ line = line[prefix_len:]
+ if lstrip:
+ line = line.lstrip()
yield line_num, line
return LineStream(lines=isolate_ktap_output(kernel_output))
@@ -275,11 +270,9 @@ def check_version(version_num: int, accepted_versions: List[int],
test - Test object for current test being parsed
"""
if version_num < min(accepted_versions):
- test.add_error(version_type +
- ' version lower than expected!')
+ test.add_error(f'{version_type} version lower than expected!')
elif version_num > max(accepted_versions):
- test.add_error(
- version_type + ' version higher than expected!')
+ test.add_error(f'{version_type} version higer than expected!')
def parse_ktap_header(lines: LineStream, test: Test) -> bool:
"""
@@ -340,8 +333,8 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool:
"""
Parses test plan line and stores the expected number of subtests in
test object. Reports an error if expected count is 0.
- Returns False and reports missing test plan error if fails to parse
- test plan.
+ Returns False and sets expected_count to None if there is no valid test
+ plan.
Accepted format:
- '1..[number of subtests]'
@@ -356,14 +349,10 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool:
match = TEST_PLAN.match(lines.peek())
if not match:
test.expected_count = None
- test.add_error('missing plan line!')
return False
test.log.append(lines.pop())
expected_count = int(match.group(1))
test.expected_count = expected_count
- if expected_count == 0:
- test.status = TestStatus.NO_TESTS
- test.add_error('0 tests run!')
return True
TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
@@ -393,7 +382,7 @@ def peek_test_name_match(lines: LineStream, test: Test) -> bool:
if not match:
return False
name = match.group(4)
- return (name == test.name)
+ return name == test.name
def parse_test_result(lines: LineStream, test: Test,
expected_num: int) -> bool:
@@ -436,8 +425,7 @@ def parse_test_result(lines: LineStream, test: Test,
# Check test num
num = int(match.group(2))
if num != expected_num:
- test.add_error('Expected test number ' +
- str(expected_num) + ' but found ' + str(num))
+ test.add_error(f'Expected test number {expected_num} but found {num}')
# Set status of test object
status = match.group(1)
@@ -471,51 +459,11 @@ def parse_diagnostic(lines: LineStream) -> List[str]:
log.append(lines.pop())
return log
-DIAGNOSTIC_CRASH_MESSAGE = re.compile(r'^# .*?: kunit test case crashed!$')
-
-def parse_crash_in_log(test: Test) -> bool:
- """
- Iterate through the lines of the log to parse for crash message.
- If crash message found, set status to crashed and return True.
- Otherwise return False.
-
- Parameters:
- test - Test object for current test being parsed
-
- Return:
- True if crash message found in log
- """
- for line in test.log:
- if DIAGNOSTIC_CRASH_MESSAGE.match(line):
- test.status = TestStatus.TEST_CRASHED
- return True
- return False
-
# Printing helper methods:
DIVIDER = '=' * 60
-RESET = '\033[0;0m'
-
-def red(text: str) -> str:
- """Returns inputted string with red color code."""
- return '\033[1;31m' + text + RESET
-
-def yellow(text: str) -> str:
- """Returns inputted string with yellow color code."""
- return '\033[1;33m' + text + RESET
-
-def green(text: str) -> str:
- """Returns inputted string with green color code."""
- return '\033[1;32m' + text + RESET
-
-ANSI_LEN = len(red(''))
-
-def print_with_timestamp(message: str) -> None:
- """Prints message with timestamp at beginning."""
- print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message))
-
def format_test_divider(message: str, len_message: int) -> str:
"""
Returns string with message centered in fixed width divider.
@@ -539,7 +487,7 @@ def format_test_divider(message: str, len_message: int) -> str:
# calculate number of dashes for each side of the divider
len_1 = int(difference / 2)
len_2 = difference - len_1
- return ('=' * len_1) + ' ' + message + ' ' + ('=' * len_2)
+ return ('=' * len_1) + f' {message} ' + ('=' * len_2)
def print_test_header(test: Test) -> None:
"""
@@ -555,22 +503,15 @@ def print_test_header(test: Test) -> None:
message = test.name
if test.expected_count:
if test.expected_count == 1:
- message += (' (' + str(test.expected_count) +
- ' subtest)')
+ message += ' (1 subtest)'
else:
- message += (' (' + str(test.expected_count) +
- ' subtests)')
- print_with_timestamp(format_test_divider(message, len(message)))
+ message += f' ({test.expected_count} subtests)'
+ stdout.print_with_timestamp(format_test_divider(message, len(message)))
def print_log(log: Iterable[str]) -> None:
- """
- Prints all strings in saved log for test in yellow.
-
- Parameters:
- log - Iterable object with all strings saved in log for test
- """
+ """Prints all strings in saved log for test in yellow."""
for m in log:
- print_with_timestamp(yellow(m))
+ stdout.print_with_timestamp(stdout.yellow(m))
def format_test_result(test: Test) -> str:
"""
@@ -587,15 +528,16 @@ def format_test_result(test: Test) -> str:
String containing formatted test result
"""
if test.status == TestStatus.SUCCESS:
- return (green('[PASSED] ') + test.name)
- elif test.status == TestStatus.SKIPPED:
- return (yellow('[SKIPPED] ') + test.name)
- elif test.status == TestStatus.TEST_CRASHED:
+ return stdout.green('[PASSED] ') + test.name
+ if test.status == TestStatus.SKIPPED:
+ return stdout.yellow('[SKIPPED] ') + test.name
+ if test.status == TestStatus.NO_TESTS:
+ return stdout.yellow('[NO TESTS RUN] ') + test.name
+ if test.status == TestStatus.TEST_CRASHED:
print_log(test.log)
- return (red('[CRASHED] ') + test.name)
- else:
- print_log(test.log)
- return (red('[FAILED] ') + test.name)
+ return stdout.red('[CRASHED] ') + test.name
+ print_log(test.log)
+ return stdout.red('[FAILED] ') + test.name
def print_test_result(test: Test) -> None:
"""
@@ -607,7 +549,7 @@ def print_test_result(test: Test) -> None:
Parameters:
test - Test object representing current test being printed
"""
- print_with_timestamp(format_test_result(test))
+ stdout.print_with_timestamp(format_test_result(test))
def print_test_footer(test: Test) -> None:
"""
@@ -620,8 +562,8 @@ def print_test_footer(test: Test) -> None:
test - Test object representing current test being printed
"""
message = format_test_result(test)
- print_with_timestamp(format_test_divider(message,
- len(message) - ANSI_LEN))
+ stdout.print_with_timestamp(format_test_divider(message,
+ len(message) - stdout.color_len()))
def print_summary_line(test: Test) -> None:
"""
@@ -638,25 +580,12 @@ def print_summary_line(test: Test) -> None:
test - Test object representing current test being printed
"""
if test.status == TestStatus.SUCCESS:
- color = green
- elif test.status == TestStatus.SKIPPED or test.status == TestStatus.NO_TESTS:
- color = yellow
+ color = stdout.green
+ elif test.status in (TestStatus.SKIPPED, TestStatus.NO_TESTS):
+ color = stdout.yellow
else:
- color = red
- counts = test.counts
- print_with_timestamp(color('Testing complete. ' + str(counts)))
-
-def print_error(error_message: str) -> None:
- """
- Prints error message with error format.
-
- Example:
- "[ERROR] Test example: missing test plan!"
-
- Parameters:
- error_message - message describing error
- """
- print_with_timestamp(red('[ERROR] ') + error_message)
+ color = stdout.red
+ stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
# Other methods:
@@ -670,7 +599,6 @@ def bubble_up_test_results(test: Test) -> None:
Parameters:
test - Test object for current test being parsed
"""
- parse_crash_in_log(test)
subtests = test.subtests
counts = test.counts
status = test.status
@@ -732,6 +660,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
# test plan
test.name = "main"
parse_test_plan(lines, test)
+ parent_test = True
else:
# If KTAP/TAP header is not found, test must be subtest
# header or test result line so parse attempt to parser
@@ -745,7 +674,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
expected_count = test.expected_count
subtests = []
test_num = 1
- while expected_count is None or test_num <= expected_count:
+ while parent_test and (expected_count is None or test_num <= expected_count):
# Loop to parse any subtests.
# Break after parsing expected number of tests or
# if expected number of tests is unknown break when test
@@ -780,9 +709,18 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
parse_test_result(lines, test, expected_num)
else:
test.add_error('missing subtest result line!')
+
+ # Check for there being no tests
+ if parent_test and len(subtests) == 0:
+ # Don't override a bad status if this test had one reported.
+ # Assumption: no subtests means CRASHED is from Test.__init__()
+ if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
+ test.status = TestStatus.NO_TESTS
+ test.add_error('0 tests run!')
+
# Add statuses to TestCounts attribute in Test object
bubble_up_test_results(test)
- if parent_test:
+ if parent_test and not main:
# If test has subtests and is not the main test object, print
# footer.
print_test_footer(test)
@@ -790,28 +728,28 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
print_test_result(test)
return test
-def parse_run_tests(kernel_output: Iterable[str]) -> TestResult:
+def parse_run_tests(kernel_output: Iterable[str]) -> Test:
"""
Using kernel output, extract KTAP lines, parse the lines for test
- results and print condensed test results and summary line .
+ results and print condensed test results and summary line.
Parameters:
kernel_output - Iterable object contains lines of kernel output
Return:
- TestResult - Tuple containg status of main test object, main test
- object with all subtests, and log of all KTAP lines.
+ Test - the main test object with all subtests.
"""
- print_with_timestamp(DIVIDER)
+ stdout.print_with_timestamp(DIVIDER)
lines = extract_tap_lines(kernel_output)
test = Test()
if not lines:
- test.add_error('invalid KTAP input!')
+ test.name = '<missing>'
+ test.add_error('could not find any KTAP output!')
test.status = TestStatus.FAILURE_TO_PARSE_TESTS
else:
test = parse_test(lines, 0, [])
if test.status != TestStatus.NO_TESTS:
test.status = test.counts.get_status()
- print_with_timestamp(DIVIDER)
+ stdout.print_with_timestamp(DIVIDER)
print_summary_line(test)
- return TestResult(test.status, test, lines)
+ return test
diff --git a/tools/testing/kunit/kunit_printer.py b/tools/testing/kunit/kunit_printer.py
new file mode 100644
index 000000000000..5f1cc55ecdf5
--- /dev/null
+++ b/tools/testing/kunit/kunit_printer.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# Utilities for printing and coloring output.
+#
+# Copyright (C) 2022, Google LLC.
+# Author: Daniel Latypov <dlatypov@google.com>
+
+import datetime
+import sys
+import typing
+
+_RESET = '\033[0;0m'
+
+class Printer:
+ """Wraps a file object, providing utilities for coloring output, etc."""
+
+ def __init__(self, output: typing.IO):
+ self._output = output
+ self._use_color = output.isatty()
+
+ def print(self, message: str) -> None:
+ print(message, file=self._output)
+
+ def print_with_timestamp(self, message: str) -> None:
+ ts = datetime.datetime.now().strftime('%H:%M:%S')
+ self.print(f'[{ts}] {message}')
+
+ def _color(self, code: str, text: str) -> str:
+ if not self._use_color:
+ return text
+ return code + text + _RESET
+
+ def red(self, text: str) -> str:
+ return self._color('\033[1;31m', text)
+
+ def yellow(self, text: str) -> str:
+ return self._color('\033[1;33m', text)
+
+ def green(self, text: str) -> str:
+ return self._color('\033[1;32m', text)
+
+ def color_len(self) -> int:
+ """Returns the length of the color escape codes."""
+ return len(self.red(''))
+
+# Provides a default instance that prints to stdout
+stdout = Printer(sys.stdout)
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 9c4126731457..e2cd2cc2e98f 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -13,9 +13,10 @@ import tempfile, shutil # Handling test_tmpdir
import itertools
import json
+import os
import signal
import subprocess
-import os
+from typing import Iterable
import kunit_config
import kunit_parser
@@ -44,53 +45,39 @@ class KconfigTest(unittest.TestCase):
self.assertTrue(kconfig0.is_subset_of(kconfig0))
kconfig1 = kunit_config.Kconfig()
- kconfig1.add_entry(kunit_config.KconfigEntry('TEST', 'y'))
+ kconfig1.add_entry('TEST', 'y')
self.assertTrue(kconfig1.is_subset_of(kconfig1))
self.assertTrue(kconfig0.is_subset_of(kconfig1))
self.assertFalse(kconfig1.is_subset_of(kconfig0))
def test_read_from_file(self):
- kconfig = kunit_config.Kconfig()
kconfig_path = test_data_path('test_read_from_file.kconfig')
- kconfig.read_from_file(kconfig_path)
+ kconfig = kunit_config.parse_file(kconfig_path)
expected_kconfig = kunit_config.Kconfig()
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('UML', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MMU', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MK8', 'n'))
-
- self.assertEqual(kconfig.entries(), expected_kconfig.entries())
+ expected_kconfig.add_entry('UML', 'y')
+ expected_kconfig.add_entry('MMU', 'y')
+ expected_kconfig.add_entry('TEST', 'y')
+ expected_kconfig.add_entry('EXAMPLE_TEST', 'y')
+ expected_kconfig.add_entry('MK8', 'n')
+
+ self.assertEqual(kconfig, expected_kconfig)
def test_write_to_file(self):
kconfig_path = os.path.join(test_tmpdir, '.config')
expected_kconfig = kunit_config.Kconfig()
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('UML', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MMU', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('EXAMPLE_TEST', 'y'))
- expected_kconfig.add_entry(
- kunit_config.KconfigEntry('MK8', 'n'))
+ expected_kconfig.add_entry('UML', 'y')
+ expected_kconfig.add_entry('MMU', 'y')
+ expected_kconfig.add_entry('TEST', 'y')
+ expected_kconfig.add_entry('EXAMPLE_TEST', 'y')
+ expected_kconfig.add_entry('MK8', 'n')
expected_kconfig.write_to_file(kconfig_path)
- actual_kconfig = kunit_config.Kconfig()
- actual_kconfig.read_from_file(kconfig_path)
-
- self.assertEqual(actual_kconfig.entries(),
- expected_kconfig.entries())
+ actual_kconfig = kunit_config.parse_file(kconfig_path)
+ self.assertEqual(actual_kconfig, expected_kconfig)
class KUnitParserTest(unittest.TestCase):
@@ -179,7 +166,7 @@ class KUnitParserTest(unittest.TestCase):
with open(empty_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.test.subtests))
+ self.assertEqual(0, len(result.subtests))
self.assertEqual(
kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS,
result.status)
@@ -191,7 +178,10 @@ class KUnitParserTest(unittest.TestCase):
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(
file.readlines()))
- self.assertEqual(2, result.test.counts.errors)
+ # A missing test plan is not an error.
+ self.assertEqual(0, result.counts.errors)
+ # All tests should be accounted for.
+ self.assertEqual(10, result.counts.total())
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
@@ -201,29 +191,32 @@ class KUnitParserTest(unittest.TestCase):
with open(header_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- self.assertEqual(0, len(result.test.subtests))
+ self.assertEqual(0, len(result.subtests))
self.assertEqual(
kunit_parser.TestStatus.NO_TESTS,
result.status)
+ def test_no_tests_no_plan(self):
+ no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log')
+ with open(no_plan_log) as file:
+ result = kunit_parser.parse_run_tests(
+ kunit_parser.extract_tap_lines(file.readlines()))
+ self.assertEqual(0, len(result.subtests[0].subtests[0].subtests))
+ self.assertEqual(
+ kunit_parser.TestStatus.NO_TESTS,
+ result.subtests[0].subtests[0].status)
+ self.assertEqual(1, result.counts.errors)
+
+
def test_no_kunit_output(self):
crash_log = test_data_path('test_insufficient_memory.log')
- print_mock = mock.patch('builtins.print').start()
+ print_mock = mock.patch('kunit_printer.Printer.print').start()
with open(crash_log) as file:
result = kunit_parser.parse_run_tests(
kunit_parser.extract_tap_lines(file.readlines()))
- print_mock.assert_any_call(StrContains('invalid KTAP input!'))
+ print_mock.assert_any_call(StrContains('could not find any KTAP output!'))
print_mock.stop()
- self.assertEqual(0, len(result.test.subtests))
-
- def test_crashed_test(self):
- crashed_log = test_data_path('test_is_test_passed-crash.log')
- with open(crashed_log) as file:
- result = kunit_parser.parse_run_tests(
- file.readlines())
- self.assertEqual(
- kunit_parser.TestStatus.TEST_CRASHED,
- result.status)
+ self.assertEqual(0, len(result.subtests))
def test_skipped_test(self):
skipped_log = test_data_path('test_skip_tests.log')
@@ -246,8 +239,8 @@ class KUnitParserTest(unittest.TestCase):
def test_ignores_hyphen(self):
hyphen_log = test_data_path('test_strip_hyphen.log')
- file = open(hyphen_log)
- result = kunit_parser.parse_run_tests(file.readlines())
+ with open(hyphen_log) as file:
+ result = kunit_parser.parse_run_tests(file.readlines())
# A skipped test does not fail the whole suite.
self.assertEqual(
@@ -255,10 +248,10 @@ class KUnitParserTest(unittest.TestCase):
result.status)
self.assertEqual(
"sysctl_test",
- result.test.subtests[0].name)
+ result.subtests[0].name)
self.assertEqual(
"example",
- result.test.subtests[1].name)
+ result.subtests[1].name)
file.close()
@@ -269,7 +262,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
def test_ignores_multiple_prefixes(self):
prefix_log = test_data_path('test_multiple_prefixes.log')
@@ -278,7 +271,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
def test_prefix_mixed_kernel_output(self):
mixed_prefix_log = test_data_path('test_interrupted_tap_output.log')
@@ -287,7 +280,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
def test_prefix_poundsign(self):
pound_log = test_data_path('test_pound_sign.log')
@@ -296,7 +289,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
def test_kernel_panic_end(self):
panic_log = test_data_path('test_kernel_panic_interrupt.log')
@@ -305,7 +298,7 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.TEST_CRASHED,
result.status)
- self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
def test_pound_no_prefix(self):
pound_log = test_data_path('test_pound_no_prefix.log')
@@ -314,7 +307,46 @@ class KUnitParserTest(unittest.TestCase):
self.assertEqual(
kunit_parser.TestStatus.SUCCESS,
result.status)
- self.assertEqual('kunit-resource-test', result.test.subtests[0].name)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
+
+def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream:
+ return kunit_parser.LineStream(enumerate(strs, start=1))
+
+class LineStreamTest(unittest.TestCase):
+
+ def test_basic(self):
+ stream = line_stream_from_strs(['hello', 'world'])
+
+ self.assertTrue(stream, msg='Should be more input')
+ self.assertEqual(stream.line_number(), 1)
+ self.assertEqual(stream.peek(), 'hello')
+ self.assertEqual(stream.pop(), 'hello')
+
+ self.assertTrue(stream, msg='Should be more input')
+ self.assertEqual(stream.line_number(), 2)
+ self.assertEqual(stream.peek(), 'world')
+ self.assertEqual(stream.pop(), 'world')
+
+ self.assertFalse(stream, msg='Should be no more input')
+ with self.assertRaisesRegex(ValueError, 'LineStream: going past EOF'):
+ stream.pop()
+
+ def test_is_lazy(self):
+ called_times = 0
+ def generator():
+ nonlocal called_times
+ for _ in range(1,5):
+ called_times += 1
+ yield called_times, str(called_times)
+
+ stream = kunit_parser.LineStream(generator())
+ self.assertEqual(called_times, 0)
+
+ self.assertEqual(stream.pop(), '1')
+ self.assertEqual(called_times, 1)
+
+ self.assertEqual(stream.pop(), '2')
+ self.assertEqual(called_times, 2)
class LinuxSourceTreeTest(unittest.TestCase):
@@ -324,17 +356,53 @@ class LinuxSourceTreeTest(unittest.TestCase):
def test_invalid_kunitconfig(self):
with self.assertRaisesRegex(kunit_kernel.ConfigError, 'nonexistent.* does not exist'):
- kunit_kernel.LinuxSourceTree('', kunitconfig_path='/nonexistent_file')
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=['/nonexistent_file'])
def test_valid_kunitconfig(self):
with tempfile.NamedTemporaryFile('wt') as kunitconfig:
- kunit_kernel.LinuxSourceTree('', kunitconfig_path=kunitconfig.name)
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[kunitconfig.name])
def test_dir_kunitconfig(self):
with tempfile.TemporaryDirectory('') as dir:
with open(os.path.join(dir, '.kunitconfig'), 'w'):
pass
- kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir)
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir])
+
+ def test_multiple_kunitconfig(self):
+ want_kconfig = kunit_config.Kconfig()
+ want_kconfig.add_entry('KUNIT', 'y')
+ want_kconfig.add_entry('KUNIT_TEST', 'm')
+
+ with tempfile.TemporaryDirectory('') as dir:
+ other = os.path.join(dir, 'otherkunitconfig')
+ with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(other, 'w') as f:
+ f.write('CONFIG_KUNIT_TEST=m')
+ pass
+
+ tree = kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other])
+ self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig)
+
+
+ def test_multiple_kunitconfig_invalid(self):
+ with tempfile.TemporaryDirectory('') as dir:
+ other = os.path.join(dir, 'otherkunitconfig')
+ with open(os.path.join(dir, '.kunitconfig'), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(other, 'w') as f:
+ f.write('CONFIG_KUNIT=m')
+
+ with self.assertRaisesRegex(kunit_kernel.ConfigError, '(?s)Multiple values.*CONFIG_KUNIT'):
+ kunit_kernel.LinuxSourceTree('', kunitconfig_paths=[dir, other])
+
+
+ def test_kconfig_add(self):
+ want_kconfig = kunit_config.Kconfig()
+ want_kconfig.add_entry('NOT_REAL', 'y')
+
+ tree = kunit_kernel.LinuxSourceTree('', kconfig_add=['CONFIG_NOT_REAL=y'])
+ self.assertTrue(want_kconfig.is_subset_of(tree._kconfig), msg=tree._kconfig)
def test_invalid_arch(self):
with self.assertRaisesRegex(kunit_kernel.ConfigError, 'not a valid arch, options are.*x86_64'):
@@ -345,7 +413,7 @@ class LinuxSourceTreeTest(unittest.TestCase):
return subprocess.Popen(['echo "hi\nbye"'], shell=True, text=True, stdout=subprocess.PIPE)
with tempfile.TemporaryDirectory('') as build_dir:
- tree = kunit_kernel.LinuxSourceTree(build_dir, load_config=False)
+ tree = kunit_kernel.LinuxSourceTree(build_dir)
mock.patch.object(tree._ops, 'start', side_effect=fake_start).start()
with self.assertRaises(ValueError):
@@ -356,6 +424,63 @@ class LinuxSourceTreeTest(unittest.TestCase):
with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+ def test_build_reconfig_no_config(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+
+ tree = kunit_kernel.LinuxSourceTree(build_dir)
+ # Stub out the source tree operations, so we don't have
+ # the defaults for any given architecture get in the
+ # way.
+ tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None)
+ mock_build_config = mock.patch.object(tree, 'build_config').start()
+
+ # Should generate the .config
+ self.assertTrue(tree.build_reconfig(build_dir, make_options=[]))
+ mock_build_config.assert_called_once_with(build_dir, [])
+
+ def test_build_reconfig_existing_config(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ # Existing .config is a superset, should not touch it
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y')
+
+ tree = kunit_kernel.LinuxSourceTree(build_dir)
+ # Stub out the source tree operations, so we don't have
+ # the defaults for any given architecture get in the
+ # way.
+ tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None)
+ mock_build_config = mock.patch.object(tree, 'build_config').start()
+
+ self.assertTrue(tree.build_reconfig(build_dir, make_options=[]))
+ self.assertEqual(mock_build_config.call_count, 0)
+
+ def test_build_reconfig_remove_option(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ # We removed CONFIG_KUNIT_TEST=y from our .kunitconfig...
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y')
+ with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y')
+ with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f:
+ f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y')
+
+ tree = kunit_kernel.LinuxSourceTree(build_dir)
+ # Stub out the source tree operations, so we don't have
+ # the defaults for any given architecture get in the
+ # way.
+ tree._ops = kunit_kernel.LinuxSourceTreeOperations('none', None)
+ mock_build_config = mock.patch.object(tree, 'build_config').start()
+
+ # ... so we should trigger a call to build_config()
+ self.assertTrue(tree.build_reconfig(build_dir, make_options=[]))
+ mock_build_config.assert_called_once_with(build_dir, [])
+
# TODO: add more test cases.
@@ -365,10 +490,8 @@ class KUnitJsonTest(unittest.TestCase):
with open(test_data_path(log_file)) as file:
test_result = kunit_parser.parse_run_tests(file)
json_obj = kunit_json.get_json_result(
- test_result=test_result,
- def_config='kunit_defconfig',
- build_dir=None,
- json_path='stdout')
+ test=test_result,
+ metadata=kunit_json.Metadata())
return json.loads(json_obj)
def test_failed_test_json(self):
@@ -378,10 +501,16 @@ class KUnitJsonTest(unittest.TestCase):
result["sub_groups"][1]["test_cases"][0])
def test_crashed_test_json(self):
- result = self._json_for('test_is_test_passed-crash.log')
+ result = self._json_for('test_kernel_panic_interrupt.log')
self.assertEqual(
- {'name': 'example_simple_test', 'status': 'ERROR'},
- result["sub_groups"][1]["test_cases"][0])
+ {'name': '', 'status': 'ERROR'},
+ result["sub_groups"][2]["test_cases"][1])
+
+ def test_skipped_test_json(self):
+ result = self._json_for('test_skip_tests.log')
+ self.assertEqual(
+ {'name': 'example_skip_test', 'status': 'SKIP'},
+ result["sub_groups"][1]["test_cases"][1])
def test_no_tests_json(self):
result = self._json_for('test_is_test_passed-no_tests_run_with_header.log')
@@ -403,27 +532,28 @@ class KUnitMainTest(unittest.TestCase):
with open(path) as file:
all_passed_log = file.readlines()
- self.print_mock = mock.patch('builtins.print').start()
+ self.print_mock = mock.patch('kunit_printer.Printer.print').start()
self.addCleanup(mock.patch.stopall)
- self.linux_source_mock = mock.Mock()
- self.linux_source_mock.build_reconfig = mock.Mock(return_value=True)
- self.linux_source_mock.build_kernel = mock.Mock(return_value=True)
- self.linux_source_mock.run_kernel = mock.Mock(return_value=all_passed_log)
+ self.mock_linux_init = mock.patch.object(kunit_kernel, 'LinuxSourceTree').start()
+ self.linux_source_mock = self.mock_linux_init.return_value
+ self.linux_source_mock.build_reconfig.return_value = True
+ self.linux_source_mock.build_kernel.return_value = True
+ self.linux_source_mock.run_kernel.return_value = all_passed_log
def test_config_passes_args_pass(self):
- kunit.main(['config', '--build_dir=.kunit'], self.linux_source_mock)
+ kunit.main(['config', '--build_dir=.kunit'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_build_passes_args_pass(self):
- kunit.main(['build'], self.linux_source_mock)
- self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
- self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, '.kunit', None)
+ kunit.main(['build'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.linux_source_mock.build_kernel.assert_called_once_with(kunit.get_default_jobs(), '.kunit', None)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0)
def test_exec_passes_args_pass(self):
- kunit.main(['exec'], self.linux_source_mock)
+ kunit.main(['exec'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
@@ -431,7 +561,7 @@ class KUnitMainTest(unittest.TestCase):
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_passes_args_pass(self):
- kunit.main(['run'], self.linux_source_mock)
+ kunit.main(['run'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
@@ -441,29 +571,30 @@ class KUnitMainTest(unittest.TestCase):
def test_exec_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
- kunit.main(['exec'], self.linux_source_mock)
+ kunit.main(['exec'])
self.assertEqual(e.exception.code, 1)
def test_run_passes_args_fail(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
with self.assertRaises(SystemExit) as e:
- kunit.main(['run'], self.linux_source_mock)
+ kunit.main(['run'])
self.assertEqual(e.exception.code, 1)
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
- self.print_mock.assert_any_call(StrContains('invalid KTAP input!'))
+ self.print_mock.assert_any_call(StrContains('could not find any KTAP output!'))
def test_exec_no_tests(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=['TAP version 14', '1..0'])
with self.assertRaises(SystemExit) as e:
- kunit.main(['run'], self.linux_source_mock)
+ kunit.main(['run'])
+ self.assertEqual(e.exception.code, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains(' 0 tests run!'))
def test_exec_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['exec', '--raw_output'], self.linux_source_mock)
+ kunit.main(['exec', '--raw_output'])
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
@@ -471,7 +602,7 @@ class KUnitMainTest(unittest.TestCase):
def test_run_raw_output(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['run', '--raw_output'], self.linux_source_mock)
+ kunit.main(['run', '--raw_output'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
@@ -480,31 +611,37 @@ class KUnitMainTest(unittest.TestCase):
def test_run_raw_output_kunit(self):
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['run', '--raw_output=kunit'], self.linux_source_mock)
+ kunit.main(['run', '--raw_output=kunit'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
for call in self.print_mock.call_args_list:
self.assertNotEqual(call, mock.call(StrContains('Testing complete.')))
self.assertNotEqual(call, mock.call(StrContains(' 0 tests run')))
+ def test_run_raw_output_invalid(self):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ with self.assertRaises(SystemExit) as e:
+ kunit.main(['run', '--raw_output=invalid'])
+ self.assertNotEqual(e.exception.code, 0)
+
def test_run_raw_output_does_not_take_positional_args(self):
# --raw_output is a string flag, but we don't want it to consume
# any positional arguments, only ones after an '='
self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
- kunit.main(['run', '--raw_output', 'filter_glob'], self.linux_source_mock)
+ kunit.main(['run', '--raw_output', 'filter_glob'])
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
def test_exec_timeout(self):
timeout = 3453
- kunit.main(['exec', '--timeout', str(timeout)], self.linux_source_mock)
+ kunit.main(['exec', '--timeout', str(timeout)])
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
def test_run_timeout(self):
timeout = 3453
- kunit.main(['run', '--timeout', str(timeout)], self.linux_source_mock)
+ kunit.main(['run', '--timeout', str(timeout)])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
@@ -512,7 +649,7 @@ class KUnitMainTest(unittest.TestCase):
def test_run_builddir(self):
build_dir = '.kunit'
- kunit.main(['run', '--build_dir=.kunit'], self.linux_source_mock)
+ kunit.main(['run', '--build_dir=.kunit'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir=build_dir, filter_glob='', timeout=300)
@@ -520,45 +657,93 @@ class KUnitMainTest(unittest.TestCase):
def test_config_builddir(self):
build_dir = '.kunit'
- kunit.main(['config', '--build_dir', build_dir], self.linux_source_mock)
+ kunit.main(['config', '--build_dir', build_dir])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
def test_build_builddir(self):
build_dir = '.kunit'
- kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock)
- self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, build_dir, None)
+ jobs = kunit.get_default_jobs()
+ kunit.main(['build', '--build_dir', build_dir])
+ self.linux_source_mock.build_kernel.assert_called_once_with(jobs, build_dir, None)
def test_exec_builddir(self):
build_dir = '.kunit'
- kunit.main(['exec', '--build_dir', build_dir], self.linux_source_mock)
+ kunit.main(['exec', '--build_dir', build_dir])
self.linux_source_mock.run_kernel.assert_called_once_with(
args=None, build_dir=build_dir, filter_glob='', timeout=300)
self.print_mock.assert_any_call(StrContains('Testing complete.'))
- @mock.patch.object(kunit_kernel, 'LinuxSourceTree')
- def test_run_kunitconfig(self, mock_linux_init):
- mock_linux_init.return_value = self.linux_source_mock
+ def test_run_kunitconfig(self):
kunit.main(['run', '--kunitconfig=mykunitconfig'])
# Just verify that we parsed and initialized it correctly here.
- mock_linux_init.assert_called_once_with('.kunit',
- kunitconfig_path='mykunitconfig',
- arch='um',
- cross_compile=None,
- qemu_config_path=None)
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=['mykunitconfig'],
+ kconfig_add=None,
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_config_kunitconfig(self):
+ kunit.main(['config', '--kunitconfig=mykunitconfig'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=['mykunitconfig'],
+ kconfig_add=None,
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_config_alltests(self):
+ kunit.main(['config', '--kunitconfig=mykunitconfig', '--alltests'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=[kunit_kernel.ALL_TESTS_CONFIG_PATH, 'mykunitconfig'],
+ kconfig_add=None,
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
@mock.patch.object(kunit_kernel, 'LinuxSourceTree')
- def test_config_kunitconfig(self, mock_linux_init):
+ def test_run_multiple_kunitconfig(self, mock_linux_init):
mock_linux_init.return_value = self.linux_source_mock
- kunit.main(['config', '--kunitconfig=mykunitconfig'])
+ kunit.main(['run', '--kunitconfig=mykunitconfig', '--kunitconfig=other'])
# Just verify that we parsed and initialized it correctly here.
mock_linux_init.assert_called_once_with('.kunit',
- kunitconfig_path='mykunitconfig',
+ kunitconfig_paths=['mykunitconfig', 'other'],
+ kconfig_add=None,
arch='um',
cross_compile=None,
- qemu_config_path=None)
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_run_kconfig_add(self):
+ kunit.main(['run', '--kconfig_add=CONFIG_KASAN=y', '--kconfig_add=CONFIG_KCSAN=y'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=[],
+ kconfig_add=['CONFIG_KASAN=y', 'CONFIG_KCSAN=y'],
+ arch='um',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=[])
+
+ def test_run_qemu_args(self):
+ kunit.main(['run', '--arch=x86_64', '--qemu_args', '-m 2048'])
+ # Just verify that we parsed and initialized it correctly here.
+ self.mock_linux_init.assert_called_once_with('.kunit',
+ kunitconfig_paths=[],
+ kconfig_add=None,
+ arch='x86_64',
+ cross_compile=None,
+ qemu_config_path=None,
+ extra_qemu_args=['-m', '2048'])
def test_run_kernel_args(self):
- kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2'], self.linux_source_mock)
+ kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2'])
self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
self.linux_source_mock.run_kernel.assert_called_once_with(
args=['a=1','b=2'], build_dir='.kunit', filter_glob='', timeout=300)
@@ -569,7 +754,7 @@ class KUnitMainTest(unittest.TestCase):
self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
got = kunit._list_tests(self.linux_source_mock,
- kunit.KunitExecRequest(300, '.kunit', False, 'suite*', None, 'suite'))
+ kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'suite'))
self.assertEqual(got, want)
# Should respect the user's filter glob when listing tests.
@@ -580,11 +765,11 @@ class KUnitMainTest(unittest.TestCase):
@mock.patch.object(kunit, '_list_tests')
def test_run_isolated_by_suite(self, mock_tests):
mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1']
- kunit.main(['exec', '--run_isolated=suite', 'suite*.test*'], self.linux_source_mock)
+ kunit.main(['exec', '--run_isolated=suite', 'suite*.test*'])
# Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY,
- kunit.KunitExecRequest(300, '.kunit', False, 'suite*.test*', None, 'suite'))
+ kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', None, 'suite'))
self.linux_source_mock.run_kernel.assert_has_calls([
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', timeout=300),
@@ -593,11 +778,11 @@ class KUnitMainTest(unittest.TestCase):
@mock.patch.object(kunit, '_list_tests')
def test_run_isolated_by_test(self, mock_tests):
mock_tests.return_value = ['suite.test1', 'suite.test2', 'suite2.test1']
- kunit.main(['exec', '--run_isolated=test', 'suite*'], self.linux_source_mock)
+ kunit.main(['exec', '--run_isolated=test', 'suite*'])
# Should respect the user's filter glob when listing tests.
mock_tests.assert_called_once_with(mock.ANY,
- kunit.KunitExecRequest(300, '.kunit', False, 'suite*', None, 'test'))
+ kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'test'))
self.linux_source_mock.run_kernel.assert_has_calls([
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', timeout=300),
mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', timeout=300),
diff --git a/tools/testing/kunit/qemu_config.py b/tools/testing/kunit/qemu_config.py
index 1672f6184e95..0b6a80398ccc 100644
--- a/tools/testing/kunit/qemu_config.py
+++ b/tools/testing/kunit/qemu_config.py
@@ -5,12 +5,15 @@
# Copyright (C) 2021, Google LLC.
# Author: Brendan Higgins <brendanhiggins@google.com>
-from collections import namedtuple
+from dataclasses import dataclass
+from typing import List
-QemuArchParams = namedtuple('QemuArchParams', ['linux_arch',
- 'kconfig',
- 'qemu_arch',
- 'kernel_path',
- 'kernel_command_line',
- 'extra_qemu_params'])
+@dataclass(frozen=True)
+class QemuArchParams:
+ linux_arch: str
+ kconfig: str
+ qemu_arch: str
+ kernel_path: str
+ kernel_command_line: str
+ extra_qemu_params: List[str]
diff --git a/tools/testing/kunit/qemu_configs/alpha.py b/tools/testing/kunit/qemu_configs/alpha.py
index 5d0c0cff03bd..3ac846e03a6b 100644
--- a/tools/testing/kunit/qemu_configs/alpha.py
+++ b/tools/testing/kunit/qemu_configs/alpha.py
@@ -7,4 +7,4 @@ CONFIG_SERIAL_8250_CONSOLE=y''',
qemu_arch='alpha',
kernel_path='arch/alpha/boot/vmlinux',
kernel_command_line='console=ttyS0',
- extra_qemu_params=[''])
+ extra_qemu_params=[])
diff --git a/tools/testing/kunit/qemu_configs/arm.py b/tools/testing/kunit/qemu_configs/arm.py
index b9c2a35e0296..db2160200566 100644
--- a/tools/testing/kunit/qemu_configs/arm.py
+++ b/tools/testing/kunit/qemu_configs/arm.py
@@ -10,4 +10,4 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
qemu_arch='arm',
kernel_path='arch/arm/boot/zImage',
kernel_command_line='console=ttyAMA0',
- extra_qemu_params=['-machine virt'])
+ extra_qemu_params=['-machine', 'virt'])
diff --git a/tools/testing/kunit/qemu_configs/arm64.py b/tools/testing/kunit/qemu_configs/arm64.py
index 517c04459f47..67d04064f785 100644
--- a/tools/testing/kunit/qemu_configs/arm64.py
+++ b/tools/testing/kunit/qemu_configs/arm64.py
@@ -9,4 +9,4 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y''',
qemu_arch='aarch64',
kernel_path='arch/arm64/boot/Image.gz',
kernel_command_line='console=ttyAMA0',
- extra_qemu_params=['-machine virt', '-cpu cortex-a57'])
+ extra_qemu_params=['-machine', 'virt', '-cpu', 'cortex-a57'])
diff --git a/tools/testing/kunit/qemu_configs/i386.py b/tools/testing/kunit/qemu_configs/i386.py
index aed3ffd3937d..4463ebefd567 100644
--- a/tools/testing/kunit/qemu_configs/i386.py
+++ b/tools/testing/kunit/qemu_configs/i386.py
@@ -4,7 +4,7 @@ QEMU_ARCH = QemuArchParams(linux_arch='i386',
kconfig='''
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y''',
- qemu_arch='x86_64',
+ qemu_arch='i386',
kernel_path='arch/x86/boot/bzImage',
kernel_command_line='console=ttyS0',
- extra_qemu_params=[''])
+ extra_qemu_params=[])
diff --git a/tools/testing/kunit/qemu_configs/powerpc.py b/tools/testing/kunit/qemu_configs/powerpc.py
index 35e9de24f0db..7ec38d4131f7 100644
--- a/tools/testing/kunit/qemu_configs/powerpc.py
+++ b/tools/testing/kunit/qemu_configs/powerpc.py
@@ -9,4 +9,4 @@ CONFIG_HVC_CONSOLE=y''',
qemu_arch='ppc64',
kernel_path='vmlinux',
kernel_command_line='console=ttyS0',
- extra_qemu_params=['-M pseries', '-cpu power8'])
+ extra_qemu_params=['-M', 'pseries', '-cpu', 'power8'])
diff --git a/tools/testing/kunit/qemu_configs/riscv.py b/tools/testing/kunit/qemu_configs/riscv.py
index 9e528087cd7c..12a1d525978a 100644
--- a/tools/testing/kunit/qemu_configs/riscv.py
+++ b/tools/testing/kunit/qemu_configs/riscv.py
@@ -3,17 +3,13 @@ import os
import os.path
import sys
-GITHUB_OPENSBI_URL = 'https://github.com/qemu/qemu/raw/master/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin'
-OPENSBI_FILE = os.path.basename(GITHUB_OPENSBI_URL)
+OPENSBI_FILE = 'opensbi-riscv64-generic-fw_dynamic.bin'
+OPENSBI_PATH = '/usr/share/qemu/' + OPENSBI_FILE
-if not os.path.isfile(OPENSBI_FILE):
- print('\n\nOpenSBI file is not in the current working directory.\n'
- 'Would you like me to download it for you from:\n' + GITHUB_OPENSBI_URL + ' ?\n')
- response = input('yes/[no]: ')
- if response.strip() == 'yes':
- os.system('wget ' + GITHUB_OPENSBI_URL)
- else:
- sys.exit()
+if not os.path.isfile(OPENSBI_PATH):
+ print('\n\nOpenSBI bios was not found in "' + OPENSBI_PATH + '".\n'
+ 'Please ensure that qemu-system-riscv is installed, or edit the path in "qemu_configs/riscv.py"\n')
+ sys.exit()
QEMU_ARCH = QemuArchParams(linux_arch='riscv',
kconfig='''
@@ -21,11 +17,12 @@ CONFIG_SOC_VIRT=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_RISCV_SBI_V01=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y''',
qemu_arch='riscv64',
kernel_path='arch/riscv/boot/Image',
kernel_command_line='console=ttyS0',
extra_qemu_params=[
- '-machine virt',
- '-cpu rv64',
- '-bios opensbi-riscv64-generic-fw_dynamic.bin'])
+ '-machine', 'virt',
+ '-cpu', 'rv64',
+ '-bios', OPENSBI_PATH])
diff --git a/tools/testing/kunit/qemu_configs/s390.py b/tools/testing/kunit/qemu_configs/s390.py
index e310bd521113..98fa4fb60c0a 100644
--- a/tools/testing/kunit/qemu_configs/s390.py
+++ b/tools/testing/kunit/qemu_configs/s390.py
@@ -10,5 +10,5 @@ CONFIG_MODULES=y''',
kernel_path='arch/s390/boot/bzImage',
kernel_command_line='console=ttyS0',
extra_qemu_params=[
- '-machine s390-ccw-virtio',
- '-cpu qemu',])
+ '-machine', 's390-ccw-virtio',
+ '-cpu', 'qemu',])
diff --git a/tools/testing/kunit/qemu_configs/sparc.py b/tools/testing/kunit/qemu_configs/sparc.py
index 27f474e7ad6e..e975c4331a7c 100644
--- a/tools/testing/kunit/qemu_configs/sparc.py
+++ b/tools/testing/kunit/qemu_configs/sparc.py
@@ -7,4 +7,4 @@ CONFIG_SERIAL_8250_CONSOLE=y''',
qemu_arch='sparc',
kernel_path='arch/sparc/boot/zImage',
kernel_command_line='console=ttyS0 mem=256M',
- extra_qemu_params=['-m 256'])
+ extra_qemu_params=['-m', '256'])
diff --git a/tools/testing/kunit/qemu_configs/x86_64.py b/tools/testing/kunit/qemu_configs/x86_64.py
index 77ab1aeee8a3..dc7949076863 100644
--- a/tools/testing/kunit/qemu_configs/x86_64.py
+++ b/tools/testing/kunit/qemu_configs/x86_64.py
@@ -7,4 +7,4 @@ CONFIG_SERIAL_8250_CONSOLE=y''',
qemu_arch='x86_64',
kernel_path='arch/x86/boot/bzImage',
kernel_command_line='console=ttyS0',
- extra_qemu_params=[''])
+ extra_qemu_params=[])
diff --git a/tools/testing/kunit/run_checks.py b/tools/testing/kunit/run_checks.py
new file mode 100755
index 000000000000..066e6f938f6d
--- /dev/null
+++ b/tools/testing/kunit/run_checks.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+# SPDX-License-Identifier: GPL-2.0
+#
+# This file runs some basic checks to verify kunit works.
+# It is only of interest if you're making changes to KUnit itself.
+#
+# Copyright (C) 2021, Google LLC.
+# Author: Daniel Latypov <dlatypov@google.com.com>
+
+from concurrent import futures
+import datetime
+import os
+import shutil
+import subprocess
+import sys
+import textwrap
+from typing import Dict, List, Sequence
+
+ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__))
+TIMEOUT = datetime.timedelta(minutes=5).total_seconds()
+
+commands: Dict[str, Sequence[str]] = {
+ 'kunit_tool_test.py': ['./kunit_tool_test.py'],
+ 'kunit smoke test': ['./kunit.py', 'run', '--kunitconfig=lib/kunit', '--build_dir=kunit_run_checks'],
+ 'pytype': ['/bin/sh', '-c', 'pytype *.py'],
+ 'mypy': ['/bin/sh', '-c', 'mypy *.py'],
+}
+
+# The user might not have mypy or pytype installed, skip them if so.
+# Note: you can install both via `$ pip install mypy pytype`
+necessary_deps : Dict[str, str] = {
+ 'pytype': 'pytype',
+ 'mypy': 'mypy',
+}
+
+def main(argv: Sequence[str]) -> None:
+ if argv:
+ raise RuntimeError('This script takes no arguments')
+
+ future_to_name: Dict[futures.Future, str] = {}
+ executor = futures.ThreadPoolExecutor(max_workers=len(commands))
+ for name, argv in commands.items():
+ if name in necessary_deps and shutil.which(necessary_deps[name]) is None:
+ print(f'{name}: SKIPPED, {necessary_deps[name]} not in $PATH')
+ continue
+ f = executor.submit(run_cmd, argv)
+ future_to_name[f] = name
+
+ has_failures = False
+ print(f'Waiting on {len(future_to_name)} checks ({", ".join(future_to_name.values())})...')
+ for f in futures.as_completed(future_to_name.keys()):
+ name = future_to_name[f]
+ ex = f.exception()
+ if not ex:
+ print(f'{name}: PASSED')
+ continue
+
+ has_failures = True
+ if isinstance(ex, subprocess.TimeoutExpired):
+ print(f'{name}: TIMED OUT')
+ elif isinstance(ex, subprocess.CalledProcessError):
+ print(f'{name}: FAILED')
+ else:
+ print(f'{name}: unexpected exception: {ex}')
+ continue
+
+ output = ex.output
+ if output:
+ print(textwrap.indent(output.decode(), '> '))
+ executor.shutdown()
+
+ if has_failures:
+ sys.exit(1)
+
+
+def run_cmd(argv: Sequence[str]):
+ subprocess.check_output(argv, stderr=subprocess.STDOUT, cwd=ABS_TOOL_PATH, timeout=TIMEOUT)
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-crash.log b/tools/testing/kunit/test_data/test_is_test_passed-crash.log
deleted file mode 100644
index 4d97f6708c4a..000000000000
--- a/tools/testing/kunit/test_data/test_is_test_passed-crash.log
+++ /dev/null
@@ -1,70 +0,0 @@
-printk: console [tty0] enabled
-printk: console [mc-1] enabled
-TAP version 14
-1..2
- # Subtest: sysctl_test
- 1..8
- # sysctl_test_dointvec_null_tbl_data: sysctl_test_dointvec_null_tbl_data passed
- ok 1 - sysctl_test_dointvec_null_tbl_data
- # sysctl_test_dointvec_table_maxlen_unset: sysctl_test_dointvec_table_maxlen_unset passed
- ok 2 - sysctl_test_dointvec_table_maxlen_unset
- # sysctl_test_dointvec_table_len_is_zero: sysctl_test_dointvec_table_len_is_zero passed
- ok 3 - sysctl_test_dointvec_table_len_is_zero
- # sysctl_test_dointvec_table_read_but_position_set: sysctl_test_dointvec_table_read_but_position_set passed
- ok 4 - sysctl_test_dointvec_table_read_but_position_set
- # sysctl_test_dointvec_happy_single_positive: sysctl_test_dointvec_happy_single_positive passed
- ok 5 - sysctl_test_dointvec_happy_single_positive
- # sysctl_test_dointvec_happy_single_negative: sysctl_test_dointvec_happy_single_negative passed
- ok 6 - sysctl_test_dointvec_happy_single_negative
- # sysctl_test_dointvec_single_less_int_min: sysctl_test_dointvec_single_less_int_min passed
- ok 7 - sysctl_test_dointvec_single_less_int_min
- # sysctl_test_dointvec_single_greater_int_max: sysctl_test_dointvec_single_greater_int_max passed
- ok 8 - sysctl_test_dointvec_single_greater_int_max
-kunit sysctl_test: all tests passed
-ok 1 - sysctl_test
- # Subtest: example
- 1..2
-init_suite
- # example_simple_test: initializing
-Stack:
- 6016f7db 6f81bd30 6f81bdd0 60021450
- 6024b0e8 60021440 60018bbe 16f81bdc0
- 00000001 6f81bd30 6f81bd20 6f81bdd0
-Call Trace:
- [<6016f7db>] ? kunit_try_run_case+0xab/0xf0
- [<60021450>] ? set_signals+0x0/0x60
- [<60021440>] ? get_signals+0x0/0x10
- [<60018bbe>] ? kunit_um_run_try_catch+0x5e/0xc0
- [<60021450>] ? set_signals+0x0/0x60
- [<60021440>] ? get_signals+0x0/0x10
- [<60018bb3>] ? kunit_um_run_try_catch+0x53/0xc0
- [<6016f321>] ? kunit_run_case_catch_errors+0x121/0x1a0
- [<60018b60>] ? kunit_um_run_try_catch+0x0/0xc0
- [<600189e0>] ? kunit_um_throw+0x0/0x180
- [<6016f730>] ? kunit_try_run_case+0x0/0xf0
- [<6016f600>] ? kunit_catch_run_case+0x0/0x130
- [<6016edd0>] ? kunit_vprintk+0x0/0x30
- [<6016ece0>] ? kunit_fail+0x0/0x40
- [<6016eca0>] ? kunit_abort+0x0/0x40
- [<6016ed20>] ? kunit_printk_emit+0x0/0xb0
- [<6016f200>] ? kunit_run_case_catch_errors+0x0/0x1a0
- [<6016f46e>] ? kunit_run_tests+0xce/0x260
- [<6005b390>] ? unregister_console+0x0/0x190
- [<60175b70>] ? suite_kunit_initexample_test_suite+0x0/0x20
- [<60001cbb>] ? do_one_initcall+0x0/0x197
- [<60001d47>] ? do_one_initcall+0x8c/0x197
- [<6005cd20>] ? irq_to_desc+0x0/0x30
- [<60002005>] ? kernel_init_freeable+0x1b3/0x272
- [<6005c5ec>] ? printk+0x0/0x9b
- [<601c0086>] ? kernel_init+0x26/0x160
- [<60014442>] ? new_thread_handler+0x82/0xc0
-
- # example_simple_test: kunit test case crashed!
- # example_simple_test: example_simple_test failed
- not ok 1 - example_simple_test
- # example_mock_test: initializing
- # example_mock_test: example_mock_test passed
- ok 2 - example_mock_test
-kunit example: one or more tests failed
-not ok 2 - example
-List of all partitions:
diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
new file mode 100644
index 000000000000..4f81876ee6f1
--- /dev/null
+++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log
@@ -0,0 +1,7 @@
+TAP version 14
+1..1
+ # Subtest: suite
+ 1..1
+ # Subtest: case
+ ok 1 - case
+ok 1 - suite