diff options
Diffstat (limited to 'tools')
146 files changed, 3815 insertions, 919 deletions
diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index a59cb0ee609c..73409e27be01 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -83,6 +83,7 @@ struct btf_id { int cnt; }; int addr_cnt; + bool is_set; Elf64_Addr addr[ADDR_CNT]; }; @@ -451,8 +452,10 @@ static int symbols_collect(struct object *obj) * in symbol's size, together with 'cnt' field hence * that - 1. */ - if (id) + if (id) { id->cnt = sym.st_size / sizeof(int) - 1; + id->is_set = true; + } } else { pr_err("FAILED unsupported prefix %s\n", prefix); return -1; @@ -568,9 +571,8 @@ static int id_patch(struct object *obj, struct btf_id *id) int *ptr = data->d_buf; int i; - if (!id->id) { + if (!id->id && !id->is_set) pr_err("WARN: resolve_btfids: unresolved symbol %s\n", id->name); - } for (i = 0; i < id->addr_cnt; i++) { unsigned long addr = id->addr[i]; diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 45a9a59828c3..ae61f464043a 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -48,7 +48,6 @@ FEATURE_TESTS_BASIC := \ numa_num_possible_cpus \ libperl \ libpython \ - libpython-version \ libslang \ libslang-include-subdir \ libtraceevent \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index 0a3244ad9673..1480910c792e 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -32,7 +32,6 @@ FILES= \ test-numa_num_possible_cpus.bin \ test-libperl.bin \ test-libpython.bin \ - test-libpython-version.bin \ test-libslang.bin \ test-libslang-include-subdir.bin \ test-libtraceevent.bin \ @@ -227,9 +226,6 @@ $(OUTPUT)test-libperl.bin: $(OUTPUT)test-libpython.bin: $(BUILD) $(FLAGS_PYTHON_EMBED) -$(OUTPUT)test-libpython-version.bin: - $(BUILD) - $(OUTPUT)test-libbfd.bin: $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index 0b243ce842be..5ffafb967b6e 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -14,10 +14,6 @@ # include "test-libpython.c" #undef main -#define main main_test_libpython_version -# include "test-libpython-version.c" -#undef main - #define main main_test_libperl # include "test-libperl.c" #undef main @@ -177,7 +173,6 @@ int main(int argc, char *argv[]) { main_test_libpython(); - main_test_libpython_version(); main_test_libperl(); main_test_hello(); main_test_libelf(); diff --git a/tools/build/feature/test-libpython-version.c b/tools/build/feature/test-libpython-version.c deleted file mode 100644 index 47714b942d4d..000000000000 --- a/tools/build/feature/test-libpython-version.c +++ /dev/null @@ -1,11 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <Python.h> - -#if PY_VERSION_HEX >= 0x03000000 - #error -#endif - -int main(void) -{ - return 0; -} diff --git a/tools/include/linux/debug_locks.h b/tools/include/linux/debug_locks.h deleted file mode 100644 index 72d595ce764a..000000000000 --- a/tools/include/linux/debug_locks.h +++ /dev/null @@ -1,14 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_DEBUG_LOCKS_H_ -#define _LIBLOCKDEP_DEBUG_LOCKS_H_ - -#include <stddef.h> -#include <linux/compiler.h> -#include <asm/bug.h> - -#define DEBUG_LOCKS_WARN_ON(x) WARN_ON(x) - -extern bool debug_locks; -extern bool debug_locks_silent; - -#endif diff --git a/tools/include/linux/hardirq.h b/tools/include/linux/hardirq.h deleted file mode 100644 index b25580b6a9be..000000000000 --- a/tools/include/linux/hardirq.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_HARDIRQ_H_ -#define _LIBLOCKDEP_LINUX_HARDIRQ_H_ - -#define SOFTIRQ_BITS 0UL -#define HARDIRQ_BITS 0UL -#define SOFTIRQ_SHIFT 0UL -#define HARDIRQ_SHIFT 0UL -#define hardirq_count() 0UL -#define softirq_count() 0UL - -#endif diff --git a/tools/include/linux/irqflags.h b/tools/include/linux/irqflags.h deleted file mode 100644 index 501262aee8ff..000000000000 --- a/tools/include/linux/irqflags.h +++ /dev/null @@ -1,39 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ -#define _LIBLOCKDEP_LINUX_TRACE_IRQFLAGS_H_ - -# define lockdep_hardirq_context() 0 -# define lockdep_softirq_context(p) 0 -# define lockdep_hardirqs_enabled() 0 -# define lockdep_softirqs_enabled(p) 0 -# define lockdep_hardirq_enter() do { } while (0) -# define lockdep_hardirq_exit() do { } while (0) -# define lockdep_softirq_enter() do { } while (0) -# define lockdep_softirq_exit() do { } while (0) -# define INIT_TRACE_IRQFLAGS - -# define stop_critical_timings() do { } while (0) -# define start_critical_timings() do { } while (0) - -#define raw_local_irq_disable() do { } while (0) -#define raw_local_irq_enable() do { } while (0) -#define raw_local_irq_save(flags) ((flags) = 0) -#define raw_local_irq_restore(flags) ((void)(flags)) -#define raw_local_save_flags(flags) ((flags) = 0) -#define raw_irqs_disabled_flags(flags) ((void)(flags)) -#define raw_irqs_disabled() 0 -#define raw_safe_halt() - -#define local_irq_enable() do { } while (0) -#define local_irq_disable() do { } while (0) -#define local_irq_save(flags) ((flags) = 0) -#define local_irq_restore(flags) ((void)(flags)) -#define local_save_flags(flags) ((flags) = 0) -#define irqs_disabled() (1) -#define irqs_disabled_flags(flags) ((void)(flags), 0) -#define safe_halt() do { } while (0) - -#define trace_lock_release(x, y) -#define trace_lock_acquire(a, b, c, d, e, f, g) - -#endif diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h index 3e8df500cfbd..9701e8307db0 100644 --- a/tools/include/linux/kernel.h +++ b/tools/include/linux/kernel.h @@ -92,7 +92,9 @@ int vscnprintf(char *buf, size_t size, const char *fmt, va_list args); int scnprintf(char * buf, size_t size, const char * fmt, ...); int scnprintf_pad(char * buf, size_t size, const char * fmt, ...); +#ifndef ARRAY_SIZE #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) +#endif #define current_gfp_context(k) 0 #define synchronize_rcu() diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h deleted file mode 100644 index e56997288f2b..000000000000 --- a/tools/include/linux/lockdep.h +++ /dev/null @@ -1,72 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LOCKDEP_H_ -#define _LIBLOCKDEP_LOCKDEP_H_ - -#include <sys/prctl.h> -#include <sys/syscall.h> -#include <string.h> -#include <limits.h> -#include <linux/utsname.h> -#include <linux/compiler.h> -#include <linux/export.h> -#include <linux/kern_levels.h> -#include <linux/err.h> -#include <linux/rcu.h> -#include <linux/list.h> -#include <linux/hardirq.h> -#include <unistd.h> - -#define MAX_LOCK_DEPTH 63UL - -#define asmlinkage -#define __visible - -#include "../../../include/linux/lockdep.h" - -struct task_struct { - u64 curr_chain_key; - int lockdep_depth; - unsigned int lockdep_recursion; - struct held_lock held_locks[MAX_LOCK_DEPTH]; - gfp_t lockdep_reclaim_gfp; - int pid; - int state; - char comm[17]; -}; - -#define TASK_RUNNING 0 - -extern struct task_struct *__curr(void); - -#define current (__curr()) - -static inline int debug_locks_off(void) -{ - return 1; -} - -#define task_pid_nr(tsk) ((tsk)->pid) - -#define KSYM_NAME_LEN 128 -#define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) -#define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) -#define pr_warn pr_err -#define pr_cont pr_err - -#define list_del_rcu list_del - -#define atomic_t unsigned long -#define atomic_inc(x) ((*(x))++) - -#define print_tainted() "" -#define static_obj(x) 1 - -#define debug_show_all_locks() -extern void debug_check_no_locks_held(void); - -static __used bool __is_kernel_percpu_address(unsigned long addr, void *can_addr) -{ - return false; -} - -#endif diff --git a/tools/include/linux/proc_fs.h b/tools/include/linux/proc_fs.h deleted file mode 100644 index 8b3b03b64fda..000000000000 --- a/tools/include/linux/proc_fs.h +++ /dev/null @@ -1,4 +0,0 @@ -#ifndef _TOOLS_INCLUDE_LINUX_PROC_FS_H -#define _TOOLS_INCLUDE_LINUX_PROC_FS_H - -#endif /* _TOOLS_INCLUDE_LINUX_PROC_FS_H */ diff --git a/tools/include/linux/spinlock.h b/tools/include/linux/spinlock.h index c934572d935c..622266b197d0 100644 --- a/tools/include/linux/spinlock.h +++ b/tools/include/linux/spinlock.h @@ -37,6 +37,4 @@ static inline bool arch_spin_is_locked(arch_spinlock_t *mutex) return true; } -#include <linux/lockdep.h> - #endif diff --git a/tools/include/linux/stacktrace.h b/tools/include/linux/stacktrace.h deleted file mode 100644 index ae343ac35bfa..000000000000 --- a/tools/include/linux/stacktrace.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LIBLOCKDEP_LINUX_STACKTRACE_H_ -#define _LIBLOCKDEP_LINUX_STACKTRACE_H_ - -#include <execinfo.h> - -struct stack_trace { - unsigned int nr_entries, max_entries; - unsigned long *entries; - int skip; -}; - -static inline void print_stack_trace(struct stack_trace *trace, int spaces) -{ - backtrace_symbols_fd((void **)trace->entries, trace->nr_entries, 1); -} - -#define save_stack_trace(trace) \ - ((trace)->nr_entries = \ - backtrace((void **)(trace)->entries, (trace)->max_entries)) - -static inline int dump_stack(void) -{ - void *array[64]; - size_t size; - - size = backtrace(array, 64); - backtrace_symbols_fd(array, size, 1); - - return 0; -} - -#endif diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index afd144725a0b..3df74cf5651a 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -271,8 +271,6 @@ endif FEATURE_CHECK_CFLAGS-libpython := $(PYTHON_EMBED_CCOPTS) FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS) -FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS) -FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS) FEATURE_CHECK_LDFLAGS-libaio = -lrt diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl index 7bef917cc84e..15109af9d075 100644 --- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl @@ -528,3 +528,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl index df5261e5cfe1..ed9c5c2eafad 100644 --- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl +++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl @@ -451,3 +451,4 @@ 446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self # 447 reserved for memfd_secret 448 common process_mrelease sys_process_mrelease sys_process_mrelease +449 common futex_waitv sys_futex_waitv sys_futex_waitv diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c index fa0ff4ce2b74..488f6e6ba1a5 100644 --- a/tools/perf/bench/sched-messaging.c +++ b/tools/perf/bench/sched-messaging.c @@ -223,8 +223,6 @@ static unsigned int group(pthread_t *pth, snd_ctx->out_fds[i] = fds[1]; if (!thread_mode) close(fds[0]); - - free(ctx); } /* Now we have all the fds, fork the senders */ @@ -241,8 +239,6 @@ static unsigned int group(pthread_t *pth, for (i = 0; i < num_fds; i++) close(snd_ctx->out_fds[i]); - free(snd_ctx); - /* Return number of children to reap */ return num_fds * 2; } diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c index bc5259db5fd9..409b721666cb 100644 --- a/tools/perf/builtin-inject.c +++ b/tools/perf/builtin-inject.c @@ -755,12 +755,16 @@ static int parse_vm_time_correlation(const struct option *opt, const char *str, return inject->itrace_synth_opts.vm_tm_corr_args ? 0 : -ENOMEM; } +static int output_fd(struct perf_inject *inject) +{ + return inject->in_place_update ? -1 : perf_data__fd(&inject->output); +} + static int __cmd_inject(struct perf_inject *inject) { int ret = -EINVAL; struct perf_session *session = inject->session; - struct perf_data *data_out = &inject->output; - int fd = inject->in_place_update ? -1 : perf_data__fd(data_out); + int fd = output_fd(inject); u64 output_data_offset; signal(SIGINT, sig_handler); @@ -820,7 +824,7 @@ static int __cmd_inject(struct perf_inject *inject) inject->tool.ordered_events = true; inject->tool.ordering_requires_timestamps = true; /* Allow space in the header for new attributes */ - output_data_offset = 4096; + output_data_offset = roundup(8192 + session->header.data_offset, 4096); if (inject->strip) strip_init(inject); } @@ -1015,7 +1019,7 @@ int cmd_inject(int argc, const char **argv) } inject.session = __perf_session__new(&data, repipe, - perf_data__fd(&inject.output), + output_fd(&inject), &inject.tool); if (IS_ERR(inject.session)) { ret = PTR_ERR(inject.session); @@ -1078,7 +1082,8 @@ out_delete: zstd_fini(&(inject.session->zstd_data)); perf_session__delete(inject.session); out_close_output: - perf_data__close(&inject.output); + if (!inject.in_place_update) + perf_data__close(&inject.output); free(inject.itrace_synth_opts.vm_tm_corr_args); return ret; } diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 9434367af166..c82b033e8942 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2473,7 +2473,7 @@ static int process_switch_event(struct perf_tool *tool, if (perf_event__process_switch(tool, event, sample, machine) < 0) return -1; - if (scripting_ops && scripting_ops->process_switch) + if (scripting_ops && scripting_ops->process_switch && !filter_cpu(sample)) scripting_ops->process_switch(event, sample, machine); if (!script->show_switch_events) diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 0b52e08e558e..ef94388e8323 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -3925,6 +3925,7 @@ static int trace__run(struct trace *trace, int argc, const char **argv) bool draining = false; trace->live = true; + signal(SIGCHLD, sig_handler); if (!trace->raw_augmented_syscalls) { if (trace->trace_syscalls && trace__add_syscall_newtp(trace)) @@ -4873,7 +4874,6 @@ int cmd_trace(int argc, const char **argv) signal(SIGSEGV, sighandler_dump_stack); signal(SIGFPE, sighandler_dump_stack); - signal(SIGCHLD, sig_handler); signal(SIGINT, sig_handler); trace.evlist = evlist__new(); diff --git a/tools/perf/scripts/python/intel-pt-events.py b/tools/perf/scripts/python/intel-pt-events.py index 1d3a189a9a54..66452a8ec358 100644 --- a/tools/perf/scripts/python/intel-pt-events.py +++ b/tools/perf/scripts/python/intel-pt-events.py @@ -32,8 +32,7 @@ try: except: broken_pipe_exception = IOError -glb_switch_str = None -glb_switch_printed = True +glb_switch_str = {} glb_insn = False glb_disassembler = None glb_src = False @@ -70,6 +69,7 @@ def trace_begin(): ap = argparse.ArgumentParser(usage = "", add_help = False) ap.add_argument("--insn-trace", action='store_true') ap.add_argument("--src-trace", action='store_true') + ap.add_argument("--all-switch-events", action='store_true') global glb_args global glb_insn global glb_src @@ -256,10 +256,6 @@ def print_srccode(comm, param_dict, sample, symbol, dso, with_insn): print(start_str, src_str) def do_process_event(param_dict): - global glb_switch_printed - if not glb_switch_printed: - print(glb_switch_str) - glb_switch_printed = True event_attr = param_dict["attr"] sample = param_dict["sample"] raw_buf = param_dict["raw_buf"] @@ -274,6 +270,11 @@ def do_process_event(param_dict): dso = get_optional(param_dict, "dso") symbol = get_optional(param_dict, "symbol") + cpu = sample["cpu"] + if cpu in glb_switch_str: + print(glb_switch_str[cpu]) + del glb_switch_str[cpu] + if name[0:12] == "instructions": if glb_src: print_srccode(comm, param_dict, sample, symbol, dso, True) @@ -336,8 +337,6 @@ def auxtrace_error(typ, code, cpu, pid, tid, ip, ts, msg, cpumode, *x): sys.exit(1) def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_preempt, *x): - global glb_switch_printed - global glb_switch_str if out: out_str = "Switch out " else: @@ -350,6 +349,10 @@ def context_switch(ts, cpu, pid, tid, np_pid, np_tid, machine_pid, out, out_pree machine_str = "" else: machine_str = "machine PID %d" % machine_pid - glb_switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \ + switch_str = "%16s %5d/%-5d [%03u] %9u.%09u %5d/%-5d %s %s" % \ (out_str, pid, tid, cpu, ts / 1000000000, ts %1000000000, np_pid, np_tid, machine_str, preempt_str) - glb_switch_printed = False + if glb_args.all_switch_events: + print(switch_str); + else: + global glb_switch_str + glb_switch_str[cpu] = switch_str diff --git a/tools/perf/tests/expr.c b/tools/perf/tests/expr.c index c895de481fe1..d54c5371c6a6 100644 --- a/tools/perf/tests/expr.c +++ b/tools/perf/tests/expr.c @@ -169,7 +169,9 @@ static int test__expr(struct test_suite *t __maybe_unused, int subtest __maybe_u TEST_ASSERT_VAL("#num_dies", expr__parse(&num_dies, ctx, "#num_dies") == 0); TEST_ASSERT_VAL("#num_cores >= #num_dies", num_cores >= num_dies); TEST_ASSERT_VAL("#num_packages", expr__parse(&num_packages, ctx, "#num_packages") == 0); - TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages); + + if (num_dies) // Some platforms do not have CPU die support, for example s390 + TEST_ASSERT_VAL("#num_dies >= #num_packages", num_dies >= num_packages); /* * Source count returns the number of events aggregating in a leader diff --git a/tools/perf/tests/parse-metric.c b/tools/perf/tests/parse-metric.c index 574b7e4efd3a..07b6f4ec024f 100644 --- a/tools/perf/tests/parse-metric.c +++ b/tools/perf/tests/parse-metric.c @@ -109,6 +109,7 @@ static void load_runtime_stat(struct runtime_stat *st, struct evlist *evlist, struct evsel *evsel; u64 count; + perf_stat__reset_shadow_stats(); evlist__for_each_entry(evlist, evsel) { count = find_value(evsel->name, vals); perf_stat__update_shadow_stats(evsel, count, 0, st); diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c index e9bfe856a5de..b1be59b4e2a4 100644 --- a/tools/perf/ui/tui/setup.c +++ b/tools/perf/ui/tui/setup.c @@ -170,9 +170,11 @@ void ui__exit(bool wait_for_ok) "Press any key...", 0); SLtt_set_cursor_visibility(1); - SLsmg_refresh(); - SLsmg_reset_smg(); + if (!pthread_mutex_trylock(&ui__lock)) { + SLsmg_refresh(); + SLsmg_reset_smg(); + pthread_mutex_unlock(&ui__lock); + } SLang_reset_tty(); - perf_error__unregister(&perf_tui_eops); } diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h deleted file mode 100644 index 186a5551ddb9..000000000000 --- a/tools/perf/util/bpf_skel/bperf.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -// Copyright (c) 2021 Facebook - -#ifndef __BPERF_STAT_H -#define __BPERF_STAT_H - -typedef struct { - __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); - __uint(key_size, sizeof(__u32)); - __uint(value_size, sizeof(struct bpf_perf_event_value)); - __uint(max_entries, 1); -} reading_map; - -#endif /* __BPERF_STAT_H */ diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c index b8fa3cb2da23..f193998530d4 100644 --- a/tools/perf/util/bpf_skel/bperf_follower.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c @@ -1,14 +1,23 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2021 Facebook -#include <linux/bpf.h> -#include <linux/perf_event.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "bperf.h" #include "bperf_u.h" -reading_map diff_readings SEC(".maps"); -reading_map accum_readings SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} diff_readings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} accum_readings SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); diff --git a/tools/perf/util/bpf_skel/bperf_leader.bpf.c b/tools/perf/util/bpf_skel/bperf_leader.bpf.c index 4f70d1459e86..e2a2d4cd7779 100644 --- a/tools/perf/util/bpf_skel/bperf_leader.bpf.c +++ b/tools/perf/util/bpf_skel/bperf_leader.bpf.c @@ -1,10 +1,8 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2021 Facebook -#include <linux/bpf.h> -#include <linux/perf_event.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> -#include "bperf.h" struct { __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); @@ -13,8 +11,19 @@ struct { __uint(map_flags, BPF_F_PRESERVE_ELEMS); } events SEC(".maps"); -reading_map prev_readings SEC(".maps"); -reading_map diff_readings SEC(".maps"); +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} prev_readings SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); + __uint(key_size, sizeof(__u32)); + __uint(value_size, sizeof(struct bpf_perf_event_value)); + __uint(max_entries, 1); +} diff_readings SEC(".maps"); SEC("raw_tp/sched_switch") int BPF_PROG(on_switch) diff --git a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c index ab12b4c4ece2..97037d3b3d9f 100644 --- a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c +++ b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // Copyright (c) 2020 Facebook -#include <linux/bpf.h> +#include "vmlinux.h" #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c index c7a9fa0ffae9..2c06abf6dcd2 100644 --- a/tools/perf/util/debug.c +++ b/tools/perf/util/debug.c @@ -24,16 +24,6 @@ #include "util/parse-sublevel-options.h" #include <linux/ctype.h> -#include <traceevent/event-parse.h> - -#define MAKE_LIBTRACEEVENT_VERSION(a, b, c) ((a)*255*255+(b)*255+(c)) -#ifndef LIBTRACEEVENT_VERSION -/* - * If LIBTRACEEVENT_VERSION wasn't computed then set to version 1.1.0 that ships - * with the Linux kernel tools. - */ -#define LIBTRACEEVENT_VERSION MAKE_LIBTRACEEVENT_VERSION(1, 1, 0) -#endif int verbose; int debug_peo_args; @@ -238,15 +228,6 @@ int perf_debug_option(const char *str) /* Allow only verbose value in range (0, 10), otherwise set 0. */ verbose = (verbose < 0) || (verbose > 10) ? 0 : verbose; -#if MAKE_LIBTRACEEVENT_VERSION(1, 3, 0) <= LIBTRACEEVENT_VERSION - if (verbose == 1) - tep_set_loglevel(TEP_LOG_INFO); - else if (verbose == 2) - tep_set_loglevel(TEP_LOG_DEBUG); - else if (verbose >= 3) - tep_set_loglevel(TEP_LOG_ALL); -#endif - return 0; } diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 95ffed66369c..c59331eea1d9 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -44,13 +44,16 @@ struct perf_event_attr; /* perf sample has 16 bits size limit */ #define PERF_SAMPLE_MAX_SIZE (1 << 16) +/* number of register is bound by the number of bits in regs_dump::mask (64) */ +#define PERF_SAMPLE_REGS_CACHE_SIZE (8 * sizeof(u64)) + struct regs_dump { u64 abi; u64 mask; u64 *regs; /* Cached values/mask filled by first register access. */ - u64 cache_regs[PERF_REGS_MAX]; + u64 cache_regs[PERF_SAMPLE_REGS_CACHE_SIZE]; u64 cache_mask; }; diff --git a/tools/perf/util/expr.c b/tools/perf/util/expr.c index 1d532b9fed29..666b59baeb70 100644 --- a/tools/perf/util/expr.c +++ b/tools/perf/util/expr.c @@ -12,6 +12,7 @@ #include "expr-bison.h" #include "expr-flex.h" #include "smt.h" +#include <linux/err.h> #include <linux/kernel.h> #include <linux/zalloc.h> #include <ctype.h> @@ -65,7 +66,12 @@ static bool key_equal(const void *key1, const void *key2, struct hashmap *ids__new(void) { - return hashmap__new(key_hash, key_equal, NULL); + struct hashmap *hash; + + hash = hashmap__new(key_hash, key_equal, NULL); + if (IS_ERR(hash)) + return NULL; + return hash; } void ids__free(struct hashmap *ids) @@ -299,6 +305,10 @@ struct expr_parse_ctx *expr__ctx_new(void) return NULL; ctx->ids = hashmap__new(key_hash, key_equal, NULL); + if (IS_ERR(ctx->ids)) { + free(ctx); + return NULL; + } ctx->runtime = 0; return ctx; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 79cce216727e..e3c1a532d059 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2321,6 +2321,7 @@ out: #define FEAT_PROCESS_STR_FUN(__feat, __feat_env) \ static int process_##__feat(struct feat_fd *ff, void *data __maybe_unused) \ {\ + free(ff->ph->env.__feat_env); \ ff->ph->env.__feat_env = do_read_string(ff); \ return ff->ph->env.__feat_env ? 0 : -ENOMEM; \ } @@ -4124,6 +4125,7 @@ int perf_event__process_feature(struct perf_session *session, struct perf_record_header_feature *fe = (struct perf_record_header_feature *)event; int type = fe->header.type; u64 feat = fe->feat_id; + int ret = 0; if (type < 0 || type >= PERF_RECORD_HEADER_MAX) { pr_warning("invalid record type %d in pipe-mode\n", type); @@ -4141,11 +4143,13 @@ int perf_event__process_feature(struct perf_session *session, ff.size = event->header.size - sizeof(*fe); ff.ph = &session->header; - if (feat_ops[feat].process(&ff, NULL)) - return -1; + if (feat_ops[feat].process(&ff, NULL)) { + ret = -1; + goto out; + } if (!feat_ops[feat].print || !tool->show_feat_hdr) - return 0; + goto out; if (!feat_ops[feat].full_only || tool->show_feat_hdr >= SHOW_FEAT_HEADER_FULL_INFO) { @@ -4154,8 +4158,9 @@ int perf_event__process_feature(struct perf_session *session, fprintf(stdout, "# %s info available, use -I to display\n", feat_ops[feat].name); } - - return 0; +out: + free_event_desc(ff.events); + return ret; } size_t perf_event__fprintf_event_update(union perf_event *event, FILE *fp) diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 5f83937bf8f3..0e013c2d9eb4 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1205,61 +1205,69 @@ out_no_progress: static bool intel_pt_fup_event(struct intel_pt_decoder *decoder) { + enum intel_pt_sample_type type = decoder->state.type; bool ret = false; + decoder->state.type &= ~INTEL_PT_BRANCH; + if (decoder->set_fup_tx_flags) { decoder->set_fup_tx_flags = false; decoder->tx_flags = decoder->fup_tx_flags; - decoder->state.type = INTEL_PT_TRANSACTION; + decoder->state.type |= INTEL_PT_TRANSACTION; if (decoder->fup_tx_flags & INTEL_PT_ABORT_TX) decoder->state.type |= INTEL_PT_BRANCH; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; decoder->state.flags = decoder->fup_tx_flags; - return true; + ret = true; } if (decoder->set_fup_ptw) { decoder->set_fup_ptw = false; - decoder->state.type = INTEL_PT_PTW; + decoder->state.type |= INTEL_PT_PTW; decoder->state.flags |= INTEL_PT_FUP_IP; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; decoder->state.ptw_payload = decoder->fup_ptw_payload; - return true; + ret = true; } if (decoder->set_fup_mwait) { decoder->set_fup_mwait = false; - decoder->state.type = INTEL_PT_MWAIT_OP; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; + decoder->state.type |= INTEL_PT_MWAIT_OP; decoder->state.mwait_payload = decoder->fup_mwait_payload; ret = true; } if (decoder->set_fup_pwre) { decoder->set_fup_pwre = false; decoder->state.type |= INTEL_PT_PWR_ENTRY; - decoder->state.type &= ~INTEL_PT_BRANCH; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; decoder->state.pwre_payload = decoder->fup_pwre_payload; ret = true; } if (decoder->set_fup_exstop) { decoder->set_fup_exstop = false; decoder->state.type |= INTEL_PT_EX_STOP; - decoder->state.type &= ~INTEL_PT_BRANCH; decoder->state.flags |= INTEL_PT_FUP_IP; - decoder->state.from_ip = decoder->ip; - decoder->state.to_ip = 0; ret = true; } if (decoder->set_fup_bep) { decoder->set_fup_bep = false; decoder->state.type |= INTEL_PT_BLK_ITEMS; - decoder->state.type &= ~INTEL_PT_BRANCH; + ret = true; + } + if (decoder->overflow) { + decoder->overflow = false; + if (!ret && !decoder->pge) { + if (decoder->hop) { + decoder->state.type = 0; + decoder->pkt_state = INTEL_PT_STATE_RESAMPLE; + } + decoder->pge = true; + decoder->state.type |= INTEL_PT_BRANCH | INTEL_PT_TRACE_BEGIN; + decoder->state.from_ip = 0; + decoder->state.to_ip = decoder->ip; + return true; + } + } + if (ret) { decoder->state.from_ip = decoder->ip; decoder->state.to_ip = 0; - ret = true; + } else { + decoder->state.type = type; } return ret; } @@ -1608,7 +1616,16 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder) intel_pt_clear_tx_flags(decoder); intel_pt_set_nr(decoder); decoder->timestamp_insn_cnt = 0; - decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC; + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; + decoder->state.from_ip = decoder->ip; + decoder->ip = 0; + decoder->pge = false; + decoder->set_fup_tx_flags = false; + decoder->set_fup_ptw = false; + decoder->set_fup_mwait = false; + decoder->set_fup_pwre = false; + decoder->set_fup_exstop = false; + decoder->set_fup_bep = false; decoder->overflow = true; return -EOVERFLOW; } @@ -2666,6 +2683,8 @@ static int intel_pt_scan_for_psb(struct intel_pt_decoder *decoder); /* Hop mode: Ignore TNT, do not walk code, but get ip from FUPs and TIPs */ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, int *err) { + *err = 0; + /* Leap from PSB to PSB, getting ip from FUP within PSB+ */ if (decoder->leap && !decoder->in_psb && decoder->packet.type != INTEL_PT_PSB) { *err = intel_pt_scan_for_psb(decoder); @@ -2678,6 +2697,7 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in return HOP_IGNORE; case INTEL_PT_TIP_PGD: + decoder->pge = false; if (!decoder->packet.count) { intel_pt_set_nr(decoder); return HOP_IGNORE; @@ -2705,18 +2725,21 @@ static int intel_pt_hop_trace(struct intel_pt_decoder *decoder, bool *no_tip, in if (!decoder->packet.count) return HOP_IGNORE; intel_pt_set_ip(decoder); - if (intel_pt_fup_event(decoder)) - return HOP_RETURN; - if (!decoder->branch_enable) + if (decoder->set_fup_mwait || decoder->set_fup_pwre) + *no_tip = true; + if (!decoder->branch_enable || !decoder->pge) *no_tip = true; if (*no_tip) { decoder->state.type = INTEL_PT_INSTRUCTION; decoder->state.from_ip = decoder->ip; decoder->state.to_ip = 0; + intel_pt_fup_event(decoder); return HOP_RETURN; } + intel_pt_fup_event(decoder); + decoder->state.type |= INTEL_PT_INSTRUCTION | INTEL_PT_BRANCH; *err = intel_pt_walk_fup_tip(decoder); - if (!*err) + if (!*err && decoder->state.to_ip) decoder->pkt_state = INTEL_PT_STATE_RESAMPLE; return HOP_RETURN; @@ -2897,7 +2920,7 @@ static bool intel_pt_psb_with_fup(struct intel_pt_decoder *decoder, int *err) { struct intel_pt_psb_info data = { .fup = false }; - if (!decoder->branch_enable || !decoder->pge) + if (!decoder->branch_enable) return false; intel_pt_pkt_lookahead(decoder, intel_pt_psb_lookahead_cb, &data); @@ -2924,6 +2947,7 @@ static int intel_pt_walk_trace(struct intel_pt_decoder *decoder) if (err) return err; next: + err = 0; if (decoder->cyc_threshold) { if (decoder->sample_cyc && last_packet_type != INTEL_PT_CYC) decoder->sample_cyc = false; @@ -2962,6 +2986,7 @@ next: case INTEL_PT_TIP_PGE: { decoder->pge = true; + decoder->overflow = false; intel_pt_mtc_cyc_cnt_pge(decoder); intel_pt_set_nr(decoder); if (decoder->packet.count == 0) { @@ -2999,7 +3024,7 @@ next: break; } intel_pt_set_last_ip(decoder); - if (!decoder->branch_enable) { + if (!decoder->branch_enable || !decoder->pge) { decoder->ip = decoder->last_ip; if (intel_pt_fup_event(decoder)) return 0; @@ -3467,10 +3492,10 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) decoder->set_fup_pwre = false; decoder->set_fup_exstop = false; decoder->set_fup_bep = false; + decoder->overflow = false; if (!decoder->branch_enable) { decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; - decoder->overflow = false; decoder->state.type = 0; /* Do not have a sample */ return 0; } @@ -3485,7 +3510,6 @@ static int intel_pt_sync_ip(struct intel_pt_decoder *decoder) decoder->pkt_state = INTEL_PT_STATE_RESAMPLE; else decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; - decoder->overflow = false; decoder->state.from_ip = 0; decoder->state.to_ip = decoder->ip; @@ -3607,7 +3631,7 @@ static int intel_pt_sync(struct intel_pt_decoder *decoder) } decoder->have_last_ip = true; - decoder->pkt_state = INTEL_PT_STATE_NO_IP; + decoder->pkt_state = INTEL_PT_STATE_IN_SYNC; err = intel_pt_walk_psb(decoder); if (err) @@ -3704,7 +3728,8 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder) if (err) { decoder->state.err = intel_pt_ext_err(err); - decoder->state.from_ip = decoder->ip; + if (err != -EOVERFLOW) + decoder->state.from_ip = decoder->ip; intel_pt_update_sample_time(decoder); decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt; intel_pt_set_nr(decoder); diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 556a893508da..e8613cbda331 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -2565,6 +2565,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp) ptq->sync_switch = false; intel_pt_next_tid(pt, ptq); } + ptq->timestamp = state->est_timestamp; if (pt->synth_opts.errors) { err = intel_ptq_synth_error(ptq, state); if (err) @@ -3624,6 +3625,7 @@ static int intel_pt_parse_vm_tm_corr_arg(struct intel_pt *pt, char **args) *args = p; return 0; } + p += 1; while (1) { vmcs = strtoull(p, &p, 0); if (errno) diff --git a/tools/perf/util/perf_regs.c b/tools/perf/util/perf_regs.c index 5ee47ae1509c..06a7461ba864 100644 --- a/tools/perf/util/perf_regs.c +++ b/tools/perf/util/perf_regs.c @@ -25,6 +25,9 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) int i, idx = 0; u64 mask = regs->mask; + if ((u64)id >= PERF_SAMPLE_REGS_CACHE_SIZE) + return -EINVAL; + if (regs->cache_mask & (1ULL << id)) goto out; diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 6ae58406f4fc..8dfbba15aeb8 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -1659,6 +1659,21 @@ bool is_pmu_core(const char *name) return !strcmp(name, "cpu") || is_arm_pmu_core(name); } +static bool pmu_alias_is_duplicate(struct sevent *alias_a, + struct sevent *alias_b) +{ + /* Different names -> never duplicates */ + if (strcmp(alias_a->name, alias_b->name)) + return false; + + /* Don't remove duplicates for hybrid PMUs */ + if (perf_pmu__is_hybrid(alias_a->pmu) && + perf_pmu__is_hybrid(alias_b->pmu)) + return false; + + return true; +} + void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag, bool long_desc, bool details_flag, bool deprecated, const char *pmu_name) @@ -1744,12 +1759,8 @@ void print_pmu_events(const char *event_glob, bool name_only, bool quiet_flag, qsort(aliases, len, sizeof(struct sevent), cmp_sevent); for (j = 0; j < len; j++) { /* Skip duplicates */ - if (j > 0 && !strcmp(aliases[j].name, aliases[j - 1].name)) { - if (!aliases[j].pmu || !aliases[j - 1].pmu || - !strcmp(aliases[j].pmu, aliases[j - 1].pmu)) { - continue; - } - } + if (j > 0 && pmu_alias_is_duplicate(&aliases[j], &aliases[j - 1])) + continue; if (name_only) { printf("%s ", aliases[j].name); diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 563a9ba8954f..7f782a31bda3 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -461,7 +461,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) struct tep_event *tp_format; tp_format = trace_event__tp_format_id(evsel->core.attr.config); - if (!tp_format) + if (IS_ERR_OR_NULL(tp_format)) return NULL; evsel->tp_format = tp_format; diff --git a/tools/perf/util/smt.c b/tools/perf/util/smt.c index 20bacd5972ad..34f1b1b1176c 100644 --- a/tools/perf/util/smt.c +++ b/tools/perf/util/smt.c @@ -15,7 +15,7 @@ int smt_on(void) if (cached) return cached_result; - if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) > 0) + if (sysfs__read_int("devices/system/cpu/smt/active", &cached_result) >= 0) goto done; ncpu = sysconf(_SC_NPROCESSORS_CONF); diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config index 331f6d30f472..cd7106876a5f 100644 --- a/tools/power/acpi/Makefile.config +++ b/tools/power/acpi/Makefile.config @@ -69,6 +69,7 @@ KERNEL_INCLUDE := $(OUTPUT)include ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) CFLAGS += $(WARNINGS) +MKDIR = mkdir ifeq ($(strip $(V)),false) QUIET=@ diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules index 2a6c170b57cd..1d7616f5d0ae 100644 --- a/tools/power/acpi/Makefile.rules +++ b/tools/power/acpi/Makefile.rules @@ -21,6 +21,7 @@ $(KERNEL_INCLUDE): $(objdir)%.o: %.c $(KERNEL_INCLUDE) $(ECHO) " CC " $(subst $(OUTPUT),,$@) + $(QUIET) $(MKDIR) -p $(objdir) 2>/dev/null $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< all: $(OUTPUT)$(TOOL) diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py index 68e6f461c758..7a706f96f68d 100755 --- a/tools/testing/kunit/kunit.py +++ b/tools/testing/kunit/kunit.py @@ -15,38 +15,57 @@ import time assert sys.version_info >= (3, 7), "Python version is too old" -from collections import namedtuple +from dataclasses import dataclass from enum import Enum, auto -from typing import Iterable, Sequence, List +from typing import Any, Iterable, Sequence, List, Optional import kunit_json import kunit_kernel import kunit_parser -KunitResult = namedtuple('KunitResult', ['status','result','elapsed_time']) - -KunitConfigRequest = namedtuple('KunitConfigRequest', - ['build_dir', 'make_options']) -KunitBuildRequest = namedtuple('KunitBuildRequest', - ['jobs', 'build_dir', 'alltests', - 'make_options']) -KunitExecRequest = namedtuple('KunitExecRequest', - ['timeout', 'build_dir', 'alltests', - 'filter_glob', 'kernel_args', 'run_isolated']) -KunitParseRequest = namedtuple('KunitParseRequest', - ['raw_output', 'build_dir', 'json']) -KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs', - 'build_dir', 'alltests', 'filter_glob', - 'kernel_args', 'run_isolated', 'json', 'make_options']) - -KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0] - class KunitStatus(Enum): SUCCESS = auto() CONFIG_FAILURE = auto() BUILD_FAILURE = auto() TEST_FAILURE = auto() +@dataclass +class KunitResult: + status: KunitStatus + result: Any + elapsed_time: float + +@dataclass +class KunitConfigRequest: + build_dir: str + make_options: Optional[List[str]] + +@dataclass +class KunitBuildRequest(KunitConfigRequest): + jobs: int + alltests: bool + +@dataclass +class KunitParseRequest: + raw_output: Optional[str] + build_dir: str + json: Optional[str] + +@dataclass +class KunitExecRequest(KunitParseRequest): + timeout: int + alltests: bool + filter_glob: str + kernel_args: Optional[List[str]] + run_isolated: Optional[str] + +@dataclass +class KunitRequest(KunitExecRequest, KunitBuildRequest): + pass + + +KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0] + def get_kernel_root_path() -> str: path = sys.argv[0] if not __file__ else __file__ parts = os.path.realpath(path).split('tools/testing/kunit') @@ -91,6 +110,14 @@ def build_tests(linux: kunit_kernel.LinuxSourceTree, 'built kernel successfully', build_end - build_start) +def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree, + request: KunitBuildRequest) -> KunitResult: + config_result = config_tests(linux, request) + if config_result.status != KunitStatus.SUCCESS: + return config_result + + return build_tests(linux, request) + def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]: args = ['kunit.action=list'] if request.kernel_args: @@ -121,8 +148,7 @@ def _suites_from_test_list(tests: List[str]) -> List[str]: -def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest, - parse_request: KunitParseRequest) -> KunitResult: +def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult: filter_globs = [request.filter_glob] if request.run_isolated: tests = _list_tests(linux, request) @@ -147,17 +173,23 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest, filter_glob=filter_glob, build_dir=request.build_dir) - result = parse_tests(parse_request, run_result) + result = parse_tests(request, run_result) # run_kernel() doesn't block on the kernel exiting. # That only happens after we get the last line of output from `run_result`. # So exec_time here actually contains parsing + execution time, which is fine. test_end = time.time() exec_time += test_end - test_start - test_counts.add_subtest_counts(result.result.test.counts) + test_counts.add_subtest_counts(result.result.counts) + + if len(filter_globs) == 1 and test_counts.crashed > 0: + bd = request.build_dir + print('The kernel seems to have crashed; you can decode the stack traces with:') + print('$ scripts/decode_stacktrace.sh {}/vmlinux {} < {} | tee {}/decoded.log | {} parse'.format( + bd, bd, kunit_kernel.get_outfile_path(bd), bd, sys.argv[0])) kunit_status = _map_to_overall_status(test_counts.get_status()) - return KunitResult(status=kunit_status, result=result.result, elapsed_time=exec_time) + return KunitResult(status=kunit_status, result=result, elapsed_time=exec_time) def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus: if test_status in (kunit_parser.TestStatus.SUCCESS, kunit_parser.TestStatus.SKIPPED): @@ -168,14 +200,12 @@ def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus: def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> KunitResult: parse_start = time.time() - test_result = kunit_parser.TestResult(kunit_parser.TestStatus.SUCCESS, - kunit_parser.Test(), - 'Tests not Parsed.') + test_result = kunit_parser.Test() if request.raw_output: # Treat unparsed results as one passing test. - test_result.test.status = kunit_parser.TestStatus.SUCCESS - test_result.test.counts.passed = 1 + test_result.status = kunit_parser.TestStatus.SUCCESS + test_result.counts.passed = 1 output: Iterable[str] = input_data if request.raw_output == 'all': @@ -193,7 +223,7 @@ def parse_tests(request: KunitParseRequest, input_data: Iterable[str]) -> KunitR if request.json: json_obj = kunit_json.get_json_result( - test_result=test_result, + test=test_result, def_config='kunit_defconfig', build_dir=request.build_dir, json_path=request.json) @@ -211,27 +241,15 @@ def run_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitRequest) -> KunitResult: run_start = time.time() - config_request = KunitConfigRequest(request.build_dir, - request.make_options) - config_result = config_tests(linux, config_request) + config_result = config_tests(linux, request) if config_result.status != KunitStatus.SUCCESS: return config_result - build_request = KunitBuildRequest(request.jobs, request.build_dir, - request.alltests, - request.make_options) - build_result = build_tests(linux, build_request) + build_result = build_tests(linux, request) if build_result.status != KunitStatus.SUCCESS: return build_result - exec_request = KunitExecRequest(request.timeout, request.build_dir, - request.alltests, request.filter_glob, - request.kernel_args, request.run_isolated) - parse_request = KunitParseRequest(request.raw_output, - request.build_dir, - request.json) - - exec_result = exec_tests(linux, exec_request, parse_request) + exec_result = exec_tests(linux, request) run_end = time.time() @@ -264,6 +282,9 @@ def massage_argv(argv: Sequence[str]) -> Sequence[str]: return f'{arg}={pseudo_bool_flag_defaults[arg]}' return list(map(massage_arg, argv)) +def get_default_jobs() -> int: + return len(os.sched_getaffinity(0)) + def add_common_opts(parser) -> None: parser.add_argument('--build_dir', help='As in the make command, it specifies the build ' @@ -280,6 +301,10 @@ def add_common_opts(parser) -> None: ' If given a directory, (e.g. lib/kunit), "/.kunitconfig" ' 'will get automatically appended.', metavar='kunitconfig') + parser.add_argument('--kconfig_add', + help='Additional Kconfig options to append to the ' + '.kunitconfig, e.g. CONFIG_KASAN=y. Can be repeated.', + action='append') parser.add_argument('--arch', help=('Specifies the architecture to run tests under. ' @@ -310,7 +335,7 @@ def add_build_opts(parser) -> None: parser.add_argument('--jobs', help='As in the make command, "Specifies the number of ' 'jobs (commands) to run simultaneously."', - type=int, default=8, metavar='jobs') + type=int, default=get_default_jobs(), metavar='jobs') def add_exec_opts(parser) -> None: parser.add_argument('--timeout', @@ -398,20 +423,21 @@ def main(argv, linux=None): if not linux: linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig, + kconfig_add=cli_args.kconfig_add, arch=cli_args.arch, cross_compile=cli_args.cross_compile, qemu_config_path=cli_args.qemu_config) - request = KunitRequest(cli_args.raw_output, - cli_args.timeout, - cli_args.jobs, - cli_args.build_dir, - cli_args.alltests, - cli_args.filter_glob, - cli_args.kernel_args, - cli_args.run_isolated, - cli_args.json, - cli_args.make_options) + request = KunitRequest(build_dir=cli_args.build_dir, + make_options=cli_args.make_options, + jobs=cli_args.jobs, + alltests=cli_args.alltests, + raw_output=cli_args.raw_output, + json=cli_args.json, + timeout=cli_args.timeout, + filter_glob=cli_args.filter_glob, + kernel_args=cli_args.kernel_args, + run_isolated=cli_args.run_isolated) result = run_tests(linux, request) if result.status != KunitStatus.SUCCESS: sys.exit(1) @@ -423,12 +449,13 @@ def main(argv, linux=None): if not linux: linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig, + kconfig_add=cli_args.kconfig_add, arch=cli_args.arch, cross_compile=cli_args.cross_compile, qemu_config_path=cli_args.qemu_config) - request = KunitConfigRequest(cli_args.build_dir, - cli_args.make_options) + request = KunitConfigRequest(build_dir=cli_args.build_dir, + make_options=cli_args.make_options) result = config_tests(linux, request) kunit_parser.print_with_timestamp(( 'Elapsed time: %.3fs\n') % ( @@ -439,15 +466,16 @@ def main(argv, linux=None): if not linux: linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig, + kconfig_add=cli_args.kconfig_add, arch=cli_args.arch, cross_compile=cli_args.cross_compile, qemu_config_path=cli_args.qemu_config) - request = KunitBuildRequest(cli_args.jobs, - cli_args.build_dir, - cli_args.alltests, - cli_args.make_options) - result = build_tests(linux, request) + request = KunitBuildRequest(build_dir=cli_args.build_dir, + make_options=cli_args.make_options, + jobs=cli_args.jobs, + alltests=cli_args.alltests) + result = config_and_build_tests(linux, request) kunit_parser.print_with_timestamp(( 'Elapsed time: %.3fs\n') % ( result.elapsed_time)) @@ -457,20 +485,20 @@ def main(argv, linux=None): if not linux: linux = kunit_kernel.LinuxSourceTree(cli_args.build_dir, kunitconfig_path=cli_args.kunitconfig, + kconfig_add=cli_args.kconfig_add, arch=cli_args.arch, cross_compile=cli_args.cross_compile, qemu_config_path=cli_args.qemu_config) - exec_request = KunitExecRequest(cli_args.timeout, - cli_args.build_dir, - cli_args.alltests, - cli_args.filter_glob, - cli_args.kernel_args, - cli_args.run_isolated) - parse_request = KunitParseRequest(cli_args.raw_output, - cli_args.build_dir, - cli_args.json) - result = exec_tests(linux, exec_request, parse_request) + exec_request = KunitExecRequest(raw_output=cli_args.raw_output, + build_dir=cli_args.build_dir, + json=cli_args.json, + timeout=cli_args.timeout, + alltests=cli_args.alltests, + filter_glob=cli_args.filter_glob, + kernel_args=cli_args.kernel_args, + run_isolated=cli_args.run_isolated) + result = exec_tests(linux, exec_request) kunit_parser.print_with_timestamp(( 'Elapsed time: %.3fs\n') % (result.elapsed_time)) if result.status != KunitStatus.SUCCESS: @@ -482,9 +510,9 @@ def main(argv, linux=None): else: with open(cli_args.file, 'r', errors='backslashreplace') as f: kunit_output = f.read().splitlines() - request = KunitParseRequest(cli_args.raw_output, - None, - cli_args.json) + request = KunitParseRequest(raw_output=cli_args.raw_output, + build_dir='', + json=cli_args.json) result = parse_tests(request, kunit_output) if result.status != KunitStatus.SUCCESS: sys.exit(1) diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py index c77c7d2ef622..677354546156 100644 --- a/tools/testing/kunit/kunit_config.py +++ b/tools/testing/kunit/kunit_config.py @@ -62,33 +62,34 @@ class Kconfig(object): for entry in self.entries(): f.write(str(entry) + '\n') - def parse_from_string(self, blob: str) -> None: - """Parses a string containing KconfigEntrys and populates this Kconfig.""" - self._entries = [] - is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN) - config_matcher = re.compile(CONFIG_PATTERN) - for line in blob.split('\n'): - line = line.strip() - if not line: - continue - - match = config_matcher.match(line) - if match: - entry = KconfigEntry(match.group(1), match.group(2)) - self.add_entry(entry) - continue - - empty_match = is_not_set_matcher.match(line) - if empty_match: - entry = KconfigEntry(empty_match.group(1), 'n') - self.add_entry(entry) - continue - - if line[0] == '#': - continue - else: - raise KconfigParseError('Failed to parse: ' + line) - - def read_from_file(self, path: str) -> None: - with open(path, 'r') as f: - self.parse_from_string(f.read()) +def parse_file(path: str) -> Kconfig: + with open(path, 'r') as f: + return parse_from_string(f.read()) + +def parse_from_string(blob: str) -> Kconfig: + """Parses a string containing Kconfig entries.""" + kconfig = Kconfig() + is_not_set_matcher = re.compile(CONFIG_IS_NOT_SET_PATTERN) + config_matcher = re.compile(CONFIG_PATTERN) + for line in blob.split('\n'): + line = line.strip() + if not line: + continue + + match = config_matcher.match(line) + if match: + entry = KconfigEntry(match.group(1), match.group(2)) + kconfig.add_entry(entry) + continue + + empty_match = is_not_set_matcher.match(line) + if empty_match: + entry = KconfigEntry(empty_match.group(1), 'n') + kconfig.add_entry(entry) + continue + + if line[0] == '#': + continue + else: + raise KconfigParseError('Failed to parse: ' + line) + return kconfig diff --git a/tools/testing/kunit/kunit_json.py b/tools/testing/kunit/kunit_json.py index 746bec72b9ac..6862671709bc 100644 --- a/tools/testing/kunit/kunit_json.py +++ b/tools/testing/kunit/kunit_json.py @@ -11,7 +11,7 @@ import os import kunit_parser -from kunit_parser import Test, TestResult, TestStatus +from kunit_parser import Test, TestStatus from typing import Any, Dict, Optional JsonObj = Dict[str, Any] @@ -30,6 +30,8 @@ def _get_group_json(test: Test, def_config: str, test_case = {"name": subtest.name, "status": "FAIL"} if subtest.status == TestStatus.SUCCESS: test_case["status"] = "PASS" + elif subtest.status == TestStatus.SKIPPED: + test_case["status"] = "SKIP" elif subtest.status == TestStatus.TEST_CRASHED: test_case["status"] = "ERROR" test_cases.append(test_case) @@ -48,9 +50,9 @@ def _get_group_json(test: Test, def_config: str, } return test_group -def get_json_result(test_result: TestResult, def_config: str, +def get_json_result(test: Test, def_config: str, build_dir: Optional[str], json_path: str) -> str: - test_group = _get_group_json(test_result.test, def_config, build_dir) + test_group = _get_group_json(test, def_config, build_dir) test_group["name"] = "KUnit Test Group" json_obj = json.dumps(test_group, indent=4) if json_path != 'stdout': diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py index 66095568bf32..44bbe54f25f1 100644 --- a/tools/testing/kunit/kunit_kernel.py +++ b/tools/testing/kunit/kunit_kernel.py @@ -21,6 +21,7 @@ import qemu_config KCONFIG_PATH = '.config' KUNITCONFIG_PATH = '.kunitconfig' +OLD_KUNITCONFIG_PATH = 'last_used_kunitconfig' DEFAULT_KUNITCONFIG_PATH = 'tools/testing/kunit/configs/default.config' BROKEN_ALLCONFIG_PATH = 'tools/testing/kunit/configs/broken_on_uml.config' OUTFILE_PATH = 'test.log' @@ -116,8 +117,7 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations): self._extra_qemu_params = qemu_arch_params.extra_qemu_params def make_arch_qemuconfig(self, base_kunitconfig: kunit_config.Kconfig) -> None: - kconfig = kunit_config.Kconfig() - kconfig.parse_from_string(self._kconfig) + kconfig = kunit_config.parse_from_string(self._kconfig) base_kunitconfig.merge_in_entries(kconfig) def start(self, params: List[str], build_dir: str) -> subprocess.Popen: @@ -180,6 +180,9 @@ def get_kconfig_path(build_dir) -> str: def get_kunitconfig_path(build_dir) -> str: return get_file_path(build_dir, KUNITCONFIG_PATH) +def get_old_kunitconfig_path(build_dir) -> str: + return get_file_path(build_dir, OLD_KUNITCONFIG_PATH) + def get_outfile_path(build_dir) -> str: return get_file_path(build_dir, OUTFILE_PATH) @@ -206,6 +209,7 @@ def get_source_tree_ops_from_qemu_config(config_path: str, # exists as a file. module_path = '.' + os.path.join(os.path.basename(QEMU_CONFIGS_DIR), os.path.basename(config_path)) spec = importlib.util.spec_from_file_location(module_path, config_path) + assert spec is not None config = importlib.util.module_from_spec(spec) # See https://github.com/python/typeshed/pull/2626 for context. assert isinstance(spec.loader, importlib.abc.Loader) @@ -225,6 +229,7 @@ class LinuxSourceTree(object): build_dir: str, load_config=True, kunitconfig_path='', + kconfig_add: Optional[List[str]]=None, arch=None, cross_compile=None, qemu_config_path=None) -> None: @@ -249,8 +254,11 @@ class LinuxSourceTree(object): if not os.path.exists(kunitconfig_path): shutil.copyfile(DEFAULT_KUNITCONFIG_PATH, kunitconfig_path) - self._kconfig = kunit_config.Kconfig() - self._kconfig.read_from_file(kunitconfig_path) + self._kconfig = kunit_config.parse_file(kunitconfig_path) + if kconfig_add: + kconfig = kunit_config.parse_from_string('\n'.join(kconfig_add)) + self._kconfig.merge_in_entries(kconfig) + def clean(self) -> bool: try: @@ -262,17 +270,18 @@ class LinuxSourceTree(object): def validate_config(self, build_dir) -> bool: kconfig_path = get_kconfig_path(build_dir) - validated_kconfig = kunit_config.Kconfig() - validated_kconfig.read_from_file(kconfig_path) - if not self._kconfig.is_subset_of(validated_kconfig): - invalid = self._kconfig.entries() - validated_kconfig.entries() - message = 'Provided Kconfig is not contained in validated .config. Following fields found in kunitconfig, ' \ - 'but not in .config: %s' % ( - ', '.join([str(e) for e in invalid]) - ) - logging.error(message) - return False - return True + validated_kconfig = kunit_config.parse_file(kconfig_path) + if self._kconfig.is_subset_of(validated_kconfig): + return True + invalid = self._kconfig.entries() - validated_kconfig.entries() + message = 'Not all Kconfig options selected in kunitconfig were in the generated .config.\n' \ + 'This is probably due to unsatisfied dependencies.\n' \ + 'Missing: ' + ', '.join([str(e) for e in invalid]) + if self._arch == 'um': + message += '\nNote: many Kconfig options aren\'t available on UML. You can try running ' \ + 'on a different architecture with something like "--arch=x86_64".' + logging.error(message) + return False def build_config(self, build_dir, make_options) -> bool: kconfig_path = get_kconfig_path(build_dir) @@ -285,25 +294,38 @@ class LinuxSourceTree(object): except ConfigError as e: logging.error(e) return False - return self.validate_config(build_dir) + if not self.validate_config(build_dir): + return False + + old_path = get_old_kunitconfig_path(build_dir) + if os.path.exists(old_path): + os.remove(old_path) # write_to_file appends to the file + self._kconfig.write_to_file(old_path) + return True + + def _kunitconfig_changed(self, build_dir: str) -> bool: + old_path = get_old_kunitconfig_path(build_dir) + if not os.path.exists(old_path): + return True + + old_kconfig = kunit_config.parse_file(old_path) + return old_kconfig.entries() != self._kconfig.entries() def build_reconfig(self, build_dir, make_options) -> bool: """Creates a new .config if it is not a subset of the .kunitconfig.""" kconfig_path = get_kconfig_path(build_dir) - if os.path.exists(kconfig_path): - existing_kconfig = kunit_config.Kconfig() - existing_kconfig.read_from_file(kconfig_path) - self._ops.make_arch_qemuconfig(self._kconfig) - if not self._kconfig.is_subset_of(existing_kconfig): - print('Regenerating .config ...') - os.remove(kconfig_path) - return self.build_config(build_dir, make_options) - else: - return True - else: + if not os.path.exists(kconfig_path): print('Generating .config ...') return self.build_config(build_dir, make_options) + existing_kconfig = kunit_config.parse_file(kconfig_path) + self._ops.make_arch_qemuconfig(self._kconfig) + if self._kconfig.is_subset_of(existing_kconfig) and not self._kunitconfig_changed(build_dir): + return True + print('Regenerating .config ...') + os.remove(kconfig_path) + return self.build_config(build_dir, make_options) + def build_kernel(self, alltests, jobs, build_dir, make_options) -> bool: try: if alltests: diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 3355196d0515..05ff334761dd 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -12,14 +12,11 @@ from __future__ import annotations import re -from collections import namedtuple -from datetime import datetime +import datetime from enum import Enum, auto from functools import reduce from typing import Iterable, Iterator, List, Optional, Tuple -TestResult = namedtuple('TestResult', ['status','test','log']) - class Test(object): """ A class to represent a test parsed from KTAP results. All KTAP @@ -168,42 +165,51 @@ class TestCounts: class LineStream: """ A class to represent the lines of kernel output. - Provides a peek()/pop() interface over an iterator of + Provides a lazy peek()/pop() interface over an iterator of (line#, text). """ _lines: Iterator[Tuple[int, str]] _next: Tuple[int, str] + _need_next: bool _done: bool def __init__(self, lines: Iterator[Tuple[int, str]]): """Creates a new LineStream that wraps the given iterator.""" self._lines = lines self._done = False + self._need_next = True self._next = (0, '') - self._get_next() def _get_next(self) -> None: - """Advances the LineSteam to the next line.""" + """Advances the LineSteam to the next line, if necessary.""" + if not self._need_next: + return try: self._next = next(self._lines) except StopIteration: self._done = True + finally: + self._need_next = False def peek(self) -> str: """Returns the current line, without advancing the LineStream. """ + self._get_next() return self._next[1] def pop(self) -> str: """Returns the current line and advances the LineStream to the next line. """ - n = self._next - self._get_next() - return n[1] + s = self.peek() + if self._done: + raise ValueError(f'LineStream: going past EOF, last line was {s}') + self._need_next = True + return s def __bool__(self) -> bool: """Returns True if stream has more lines.""" + self._get_next() return not self._done # Only used by kunit_tool_test.py. @@ -216,6 +222,7 @@ class LineStream: def line_number(self) -> int: """Returns the line number of the current line.""" + self._get_next() return self._next[0] # Parsing helper methods: @@ -340,8 +347,8 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: """ Parses test plan line and stores the expected number of subtests in test object. Reports an error if expected count is 0. - Returns False and reports missing test plan error if fails to parse - test plan. + Returns False and sets expected_count to None if there is no valid test + plan. Accepted format: - '1..[number of subtests]' @@ -356,14 +363,10 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool: match = TEST_PLAN.match(lines.peek()) if not match: test.expected_count = None - test.add_error('missing plan line!') return False test.log.append(lines.pop()) expected_count = int(match.group(1)) test.expected_count = expected_count - if expected_count == 0: - test.status = TestStatus.NO_TESTS - test.add_error('0 tests run!') return True TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$') @@ -514,7 +517,7 @@ ANSI_LEN = len(red('')) def print_with_timestamp(message: str) -> None: """Prints message with timestamp at beginning.""" - print('[%s] %s' % (datetime.now().strftime('%H:%M:%S'), message)) + print('[%s] %s' % (datetime.datetime.now().strftime('%H:%M:%S'), message)) def format_test_divider(message: str, len_message: int) -> str: """ @@ -590,6 +593,8 @@ def format_test_result(test: Test) -> str: return (green('[PASSED] ') + test.name) elif test.status == TestStatus.SKIPPED: return (yellow('[SKIPPED] ') + test.name) + elif test.status == TestStatus.NO_TESTS: + return (yellow('[NO TESTS RUN] ') + test.name) elif test.status == TestStatus.TEST_CRASHED: print_log(test.log) return (red('[CRASHED] ') + test.name) @@ -732,6 +737,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: # test plan test.name = "main" parse_test_plan(lines, test) + parent_test = True else: # If KTAP/TAP header is not found, test must be subtest # header or test result line so parse attempt to parser @@ -745,7 +751,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: expected_count = test.expected_count subtests = [] test_num = 1 - while expected_count is None or test_num <= expected_count: + while parent_test and (expected_count is None or test_num <= expected_count): # Loop to parse any subtests. # Break after parsing expected number of tests or # if expected number of tests is unknown break when test @@ -780,9 +786,15 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: parse_test_result(lines, test, expected_num) else: test.add_error('missing subtest result line!') + + # Check for there being no tests + if parent_test and len(subtests) == 0: + test.status = TestStatus.NO_TESTS + test.add_error('0 tests run!') + # Add statuses to TestCounts attribute in Test object bubble_up_test_results(test) - if parent_test: + if parent_test and not main: # If test has subtests and is not the main test object, print # footer. print_test_footer(test) @@ -790,7 +802,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test: print_test_result(test) return test -def parse_run_tests(kernel_output: Iterable[str]) -> TestResult: +def parse_run_tests(kernel_output: Iterable[str]) -> Test: """ Using kernel output, extract KTAP lines, parse the lines for test results and print condensed test results and summary line . @@ -799,8 +811,7 @@ def parse_run_tests(kernel_output: Iterable[str]) -> TestResult: kernel_output - Iterable object contains lines of kernel output Return: - TestResult - Tuple containg status of main test object, main test - object with all subtests, and log of all KTAP lines. + Test - the main test object with all subtests. """ print_with_timestamp(DIVIDER) lines = extract_tap_lines(kernel_output) @@ -814,4 +825,4 @@ def parse_run_tests(kernel_output: Iterable[str]) -> TestResult: test.status = test.counts.get_status() print_with_timestamp(DIVIDER) print_summary_line(test) - return TestResult(test.status, test, lines) + return test diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 9c4126731457..352369dffbd9 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -13,9 +13,10 @@ import tempfile, shutil # Handling test_tmpdir import itertools import json +import os import signal import subprocess -import os +from typing import Iterable import kunit_config import kunit_parser @@ -50,10 +51,9 @@ class KconfigTest(unittest.TestCase): self.assertFalse(kconfig1.is_subset_of(kconfig0)) def test_read_from_file(self): - kconfig = kunit_config.Kconfig() kconfig_path = test_data_path('test_read_from_file.kconfig') - kconfig.read_from_file(kconfig_path) + kconfig = kunit_config.parse_file(kconfig_path) expected_kconfig = kunit_config.Kconfig() expected_kconfig.add_entry( @@ -86,8 +86,7 @@ class KconfigTest(unittest.TestCase): expected_kconfig.write_to_file(kconfig_path) - actual_kconfig = kunit_config.Kconfig() - actual_kconfig.read_from_file(kconfig_path) + actual_kconfig = kunit_config.parse_file(kconfig_path) self.assertEqual(actual_kconfig.entries(), expected_kconfig.entries()) @@ -179,7 +178,7 @@ class KUnitParserTest(unittest.TestCase): with open(empty_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) - self.assertEqual(0, len(result.test.subtests)) + self.assertEqual(0, len(result.subtests)) self.assertEqual( kunit_parser.TestStatus.FAILURE_TO_PARSE_TESTS, result.status) @@ -191,7 +190,10 @@ class KUnitParserTest(unittest.TestCase): result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines( file.readlines())) - self.assertEqual(2, result.test.counts.errors) + # A missing test plan is not an error. + self.assertEqual(0, result.counts.errors) + # All tests should be accounted for. + self.assertEqual(10, result.counts.total()) self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) @@ -201,11 +203,23 @@ class KUnitParserTest(unittest.TestCase): with open(header_log) as file: result = kunit_parser.parse_run_tests( kunit_parser.extract_tap_lines(file.readlines())) - self.assertEqual(0, len(result.test.subtests)) + self.assertEqual(0, len(result.subtests)) self.assertEqual( kunit_parser.TestStatus.NO_TESTS, result.status) + def test_no_tests_no_plan(self): + no_plan_log = test_data_path('test_is_test_passed-no_tests_no_plan.log') + with open(no_plan_log) as file: + result = kunit_parser.parse_run_tests( + kunit_parser.extract_tap_lines(file.readlines())) + self.assertEqual(0, len(result.subtests[0].subtests[0].subtests)) + self.assertEqual( + kunit_parser.TestStatus.NO_TESTS, + result.subtests[0].subtests[0].status) + self.assertEqual(1, result.counts.errors) + + def test_no_kunit_output(self): crash_log = test_data_path('test_insufficient_memory.log') print_mock = mock.patch('builtins.print').start() @@ -214,7 +228,7 @@ class KUnitParserTest(unittest.TestCase): kunit_parser.extract_tap_lines(file.readlines())) print_mock.assert_any_call(StrContains('invalid KTAP input!')) print_mock.stop() - self.assertEqual(0, len(result.test.subtests)) + self.assertEqual(0, len(result.subtests)) def test_crashed_test(self): crashed_log = test_data_path('test_is_test_passed-crash.log') @@ -255,10 +269,10 @@ class KUnitParserTest(unittest.TestCase): result.status) self.assertEqual( "sysctl_test", - result.test.subtests[0].name) + result.subtests[0].name) self.assertEqual( "example", - result.test.subtests[1].name) + result.subtests[1].name) file.close() @@ -269,7 +283,7 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) - self.assertEqual('kunit-resource-test', result.test.subtests[0].name) + self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_ignores_multiple_prefixes(self): prefix_log = test_data_path('test_multiple_prefixes.log') @@ -278,7 +292,7 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) - self.assertEqual('kunit-resource-test', result.test.subtests[0].name) + self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_prefix_mixed_kernel_output(self): mixed_prefix_log = test_data_path('test_interrupted_tap_output.log') @@ -287,7 +301,7 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) - self.assertEqual('kunit-resource-test', result.test.subtests[0].name) + self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_prefix_poundsign(self): pound_log = test_data_path('test_pound_sign.log') @@ -296,7 +310,7 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) - self.assertEqual('kunit-resource-test', result.test.subtests[0].name) + self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_kernel_panic_end(self): panic_log = test_data_path('test_kernel_panic_interrupt.log') @@ -305,7 +319,7 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.TEST_CRASHED, result.status) - self.assertEqual('kunit-resource-test', result.test.subtests[0].name) + self.assertEqual('kunit-resource-test', result.subtests[0].name) def test_pound_no_prefix(self): pound_log = test_data_path('test_pound_no_prefix.log') @@ -314,7 +328,46 @@ class KUnitParserTest(unittest.TestCase): self.assertEqual( kunit_parser.TestStatus.SUCCESS, result.status) - self.assertEqual('kunit-resource-test', result.test.subtests[0].name) + self.assertEqual('kunit-resource-test', result.subtests[0].name) + +def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream: + return kunit_parser.LineStream(enumerate(strs, start=1)) + +class LineStreamTest(unittest.TestCase): + + def test_basic(self): + stream = line_stream_from_strs(['hello', 'world']) + + self.assertTrue(stream, msg='Should be more input') + self.assertEqual(stream.line_number(), 1) + self.assertEqual(stream.peek(), 'hello') + self.assertEqual(stream.pop(), 'hello') + + self.assertTrue(stream, msg='Should be more input') + self.assertEqual(stream.line_number(), 2) + self.assertEqual(stream.peek(), 'world') + self.assertEqual(stream.pop(), 'world') + + self.assertFalse(stream, msg='Should be no more input') + with self.assertRaisesRegex(ValueError, 'LineStream: going past EOF'): + stream.pop() + + def test_is_lazy(self): + called_times = 0 + def generator(): + nonlocal called_times + for i in range(1,5): + called_times += 1 + yield called_times, str(called_times) + + stream = kunit_parser.LineStream(generator()) + self.assertEqual(called_times, 0) + + self.assertEqual(stream.pop(), '1') + self.assertEqual(called_times, 1) + + self.assertEqual(stream.pop(), '2') + self.assertEqual(called_times, 2) class LinuxSourceTreeTest(unittest.TestCase): @@ -336,6 +389,10 @@ class LinuxSourceTreeTest(unittest.TestCase): pass kunit_kernel.LinuxSourceTree('', kunitconfig_path=dir) + def test_kconfig_add(self): + tree = kunit_kernel.LinuxSourceTree('', kconfig_add=['CONFIG_NOT_REAL=y']) + self.assertIn(kunit_config.KconfigEntry('NOT_REAL', 'y'), tree._kconfig.entries()) + def test_invalid_arch(self): with self.assertRaisesRegex(kunit_kernel.ConfigError, 'not a valid arch, options are.*x86_64'): kunit_kernel.LinuxSourceTree('', arch='invalid') @@ -356,6 +413,51 @@ class LinuxSourceTreeTest(unittest.TestCase): with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile: self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output') + def test_build_reconfig_no_config(self): + with tempfile.TemporaryDirectory('') as build_dir: + with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + + tree = kunit_kernel.LinuxSourceTree(build_dir) + mock_build_config = mock.patch.object(tree, 'build_config').start() + + # Should generate the .config + self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) + mock_build_config.assert_called_once_with(build_dir, []) + + def test_build_reconfig_existing_config(self): + with tempfile.TemporaryDirectory('') as build_dir: + # Existing .config is a superset, should not touch it + with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') + + tree = kunit_kernel.LinuxSourceTree(build_dir) + mock_build_config = mock.patch.object(tree, 'build_config').start() + + self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) + self.assertEqual(mock_build_config.call_count, 0) + + def test_build_reconfig_remove_option(self): + with tempfile.TemporaryDirectory('') as build_dir: + # We removed CONFIG_KUNIT_TEST=y from our .kunitconfig... + with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y') + with open(kunit_kernel.get_old_kunitconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') + with open(kunit_kernel.get_kconfig_path(build_dir), 'w') as f: + f.write('CONFIG_KUNIT=y\nCONFIG_KUNIT_TEST=y') + + tree = kunit_kernel.LinuxSourceTree(build_dir) + mock_build_config = mock.patch.object(tree, 'build_config').start() + + # ... so we should trigger a call to build_config() + self.assertTrue(tree.build_reconfig(build_dir, make_options=[])) + mock_build_config.assert_called_once_with(build_dir, []) + # TODO: add more test cases. @@ -365,7 +467,7 @@ class KUnitJsonTest(unittest.TestCase): with open(test_data_path(log_file)) as file: test_result = kunit_parser.parse_run_tests(file) json_obj = kunit_json.get_json_result( - test_result=test_result, + test=test_result, def_config='kunit_defconfig', build_dir=None, json_path='stdout') @@ -383,6 +485,12 @@ class KUnitJsonTest(unittest.TestCase): {'name': 'example_simple_test', 'status': 'ERROR'}, result["sub_groups"][1]["test_cases"][0]) + def test_skipped_test_json(self): + result = self._json_for('test_skip_tests.log') + self.assertEqual( + {'name': 'example_skip_test', 'status': 'SKIP'}, + result["sub_groups"][1]["test_cases"][1]) + def test_no_tests_json(self): result = self._json_for('test_is_test_passed-no_tests_run_with_header.log') self.assertEqual(0, len(result['sub_groups'])) @@ -418,8 +526,8 @@ class KUnitMainTest(unittest.TestCase): def test_build_passes_args_pass(self): kunit.main(['build'], self.linux_source_mock) - self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0) - self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, '.kunit', None) + self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1) + self.linux_source_mock.build_kernel.assert_called_once_with(False, kunit.get_default_jobs(), '.kunit', None) self.assertEqual(self.linux_source_mock.run_kernel.call_count, 0) def test_exec_passes_args_pass(self): @@ -525,8 +633,9 @@ class KUnitMainTest(unittest.TestCase): def test_build_builddir(self): build_dir = '.kunit' + jobs = kunit.get_default_jobs() kunit.main(['build', '--build_dir', build_dir], self.linux_source_mock) - self.linux_source_mock.build_kernel.assert_called_once_with(False, 8, build_dir, None) + self.linux_source_mock.build_kernel.assert_called_once_with(False, jobs, build_dir, None) def test_exec_builddir(self): build_dir = '.kunit' @@ -542,6 +651,7 @@ class KUnitMainTest(unittest.TestCase): # Just verify that we parsed and initialized it correctly here. mock_linux_init.assert_called_once_with('.kunit', kunitconfig_path='mykunitconfig', + kconfig_add=None, arch='um', cross_compile=None, qemu_config_path=None) @@ -553,6 +663,19 @@ class KUnitMainTest(unittest.TestCase): # Just verify that we parsed and initialized it correctly here. mock_linux_init.assert_called_once_with('.kunit', kunitconfig_path='mykunitconfig', + kconfig_add=None, + arch='um', + cross_compile=None, + qemu_config_path=None) + + @mock.patch.object(kunit_kernel, 'LinuxSourceTree') + def test_run_kconfig_add(self, mock_linux_init): + mock_linux_init.return_value = self.linux_source_mock + kunit.main(['run', '--kconfig_add=CONFIG_KASAN=y', '--kconfig_add=CONFIG_KCSAN=y']) + # Just verify that we parsed and initialized it correctly here. + mock_linux_init.assert_called_once_with('.kunit', + kunitconfig_path=None, + kconfig_add=['CONFIG_KASAN=y', 'CONFIG_KCSAN=y'], arch='um', cross_compile=None, qemu_config_path=None) @@ -569,7 +692,7 @@ class KUnitMainTest(unittest.TestCase): self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want got = kunit._list_tests(self.linux_source_mock, - kunit.KunitExecRequest(300, '.kunit', False, 'suite*', None, 'suite')) + kunit.KunitExecRequest(None, '.kunit', None, 300, False, 'suite*', None, 'suite')) self.assertEqual(got, want) # Should respect the user's filter glob when listing tests. @@ -584,7 +707,7 @@ class KUnitMainTest(unittest.TestCase): # Should respect the user's filter glob when listing tests. mock_tests.assert_called_once_with(mock.ANY, - kunit.KunitExecRequest(300, '.kunit', False, 'suite*.test*', None, 'suite')) + kunit.KunitExecRequest(None, '.kunit', None, 300, False, 'suite*.test*', None, 'suite')) self.linux_source_mock.run_kernel.assert_has_calls([ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', timeout=300), mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', timeout=300), @@ -597,7 +720,7 @@ class KUnitMainTest(unittest.TestCase): # Should respect the user's filter glob when listing tests. mock_tests.assert_called_once_with(mock.ANY, - kunit.KunitExecRequest(300, '.kunit', False, 'suite*', None, 'test')) + kunit.KunitExecRequest(None, '.kunit', None, 300, False, 'suite*', None, 'test')) self.linux_source_mock.run_kernel.assert_has_calls([ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', timeout=300), mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', timeout=300), diff --git a/tools/testing/kunit/run_checks.py b/tools/testing/kunit/run_checks.py new file mode 100755 index 000000000000..4f32133ed77c --- /dev/null +++ b/tools/testing/kunit/run_checks.py @@ -0,0 +1,81 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# +# This file runs some basic checks to verify kunit works. +# It is only of interest if you're making changes to KUnit itself. +# +# Copyright (C) 2021, Google LLC. +# Author: Daniel Latypov <dlatypov@google.com.com> + +from concurrent import futures +import datetime +import os +import shutil +import subprocess +import sys +import textwrap +from typing import Dict, List, Sequence, Tuple + +ABS_TOOL_PATH = os.path.abspath(os.path.dirname(__file__)) +TIMEOUT = datetime.timedelta(minutes=5).total_seconds() + +commands: Dict[str, Sequence[str]] = { + 'kunit_tool_test.py': ['./kunit_tool_test.py'], + 'kunit smoke test': ['./kunit.py', 'run', '--kunitconfig=lib/kunit', '--build_dir=kunit_run_checks'], + 'pytype': ['/bin/sh', '-c', 'pytype *.py'], + 'mypy': ['/bin/sh', '-c', 'mypy *.py'], +} + +# The user might not have mypy or pytype installed, skip them if so. +# Note: you can install both via `$ pip install mypy pytype` +necessary_deps : Dict[str, str] = { + 'pytype': 'pytype', + 'mypy': 'mypy', +} + +def main(argv: Sequence[str]) -> None: + if argv: + raise RuntimeError('This script takes no arguments') + + future_to_name: Dict[futures.Future, str] = {} + executor = futures.ThreadPoolExecutor(max_workers=len(commands)) + for name, argv in commands.items(): + if name in necessary_deps and shutil.which(necessary_deps[name]) is None: + print(f'{name}: SKIPPED, {necessary_deps[name]} not in $PATH') + continue + f = executor.submit(run_cmd, argv) + future_to_name[f] = name + + has_failures = False + print(f'Waiting on {len(future_to_name)} checks ({", ".join(future_to_name.values())})...') + for f in futures.as_completed(future_to_name.keys()): + name = future_to_name[f] + ex = f.exception() + if not ex: + print(f'{name}: PASSED') + continue + + has_failures = True + if isinstance(ex, subprocess.TimeoutExpired): + print(f'{name}: TIMED OUT') + elif isinstance(ex, subprocess.CalledProcessError): + print(f'{name}: FAILED') + else: + print('{name}: unexpected exception: {ex}') + continue + + output = ex.output + if output: + print(textwrap.indent(output.decode(), '> ')) + executor.shutdown() + + if has_failures: + sys.exit(1) + + +def run_cmd(argv: Sequence[str]): + subprocess.check_output(argv, stderr=subprocess.STDOUT, cwd=ABS_TOOL_PATH, timeout=TIMEOUT) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log new file mode 100644 index 000000000000..dd873c981108 --- /dev/null +++ b/tools/testing/kunit/test_data/test_is_test_passed-no_tests_no_plan.log @@ -0,0 +1,7 @@ +TAP version 14 +1..1 + # Subtest: suite + 1..1 + # Subtest: case + ok 1 - case # SKIP +ok 1 - suite diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile index ced910fb4019..1e8d9a8f59df 100644 --- a/tools/testing/selftests/arm64/Makefile +++ b/tools/testing/selftests/arm64/Makefile @@ -4,7 +4,7 @@ ARCH ?= $(shell uname -m 2>/dev/null || echo not) ifneq (,$(filter $(ARCH),aarch64 arm64)) -ARM64_SUBTARGETS ?= tags signal pauth fp mte bti +ARM64_SUBTARGETS ?= tags signal pauth fp mte bti abi else ARM64_SUBTARGETS := endif diff --git a/tools/testing/selftests/arm64/abi/.gitignore b/tools/testing/selftests/arm64/abi/.gitignore new file mode 100644 index 000000000000..b79cf5814c23 --- /dev/null +++ b/tools/testing/selftests/arm64/abi/.gitignore @@ -0,0 +1 @@ +syscall-abi diff --git a/tools/testing/selftests/arm64/abi/Makefile b/tools/testing/selftests/arm64/abi/Makefile new file mode 100644 index 000000000000..96eba974ac8d --- /dev/null +++ b/tools/testing/selftests/arm64/abi/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (C) 2021 ARM Limited + +TEST_GEN_PROGS := syscall-abi + +include ../../lib.mk + +$(OUTPUT)/syscall-abi: syscall-abi.c syscall-abi-asm.S diff --git a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S new file mode 100644 index 000000000000..983467cfcee0 --- /dev/null +++ b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2021 ARM Limited. +// +// Assembly portion of the syscall ABI test + +// +// Load values from memory into registers, invoke a syscall and save the +// register values back to memory for later checking. The syscall to be +// invoked is configured in x8 of the input GPR data. +// +// x0: SVE VL, 0 for FP only +// +// GPRs: gpr_in, gpr_out +// FPRs: fpr_in, fpr_out +// Zn: z_in, z_out +// Pn: p_in, p_out +// FFR: ffr_in, ffr_out + +.arch_extension sve + +.globl do_syscall +do_syscall: + // Store callee saved registers x19-x29 (80 bytes) plus x0 and x1 + stp x29, x30, [sp, #-112]! + mov x29, sp + stp x0, x1, [sp, #16] + stp x19, x20, [sp, #32] + stp x21, x22, [sp, #48] + stp x23, x24, [sp, #64] + stp x25, x26, [sp, #80] + stp x27, x28, [sp, #96] + + // Load GPRs x8-x28, and save our SP/FP for later comparison + ldr x2, =gpr_in + add x2, x2, #64 + ldp x8, x9, [x2], #16 + ldp x10, x11, [x2], #16 + ldp x12, x13, [x2], #16 + ldp x14, x15, [x2], #16 + ldp x16, x17, [x2], #16 + ldp x18, x19, [x2], #16 + ldp x20, x21, [x2], #16 + ldp x22, x23, [x2], #16 + ldp x24, x25, [x2], #16 + ldp x26, x27, [x2], #16 + ldr x28, [x2], #8 + str x29, [x2], #8 // FP + str x30, [x2], #8 // LR + + // Load FPRs if we're not doing SVE + cbnz x0, 1f + ldr x2, =fpr_in + ldp q0, q1, [x2] + ldp q2, q3, [x2, #16 * 2] + ldp q4, q5, [x2, #16 * 4] + ldp q6, q7, [x2, #16 * 6] + ldp q8, q9, [x2, #16 * 8] + ldp q10, q11, [x2, #16 * 10] + ldp q12, q13, [x2, #16 * 12] + ldp q14, q15, [x2, #16 * 14] + ldp q16, q17, [x2, #16 * 16] + ldp q18, q19, [x2, #16 * 18] + ldp q20, q21, [x2, #16 * 20] + ldp q22, q23, [x2, #16 * 22] + ldp q24, q25, [x2, #16 * 24] + ldp q26, q27, [x2, #16 * 26] + ldp q28, q29, [x2, #16 * 28] + ldp q30, q31, [x2, #16 * 30] +1: + + // Load the SVE registers if we're doing SVE + cbz x0, 1f + + ldr x2, =z_in + ldr z0, [x2, #0, MUL VL] + ldr z1, [x2, #1, MUL VL] + ldr z2, [x2, #2, MUL VL] + ldr z3, [x2, #3, MUL VL] + ldr z4, [x2, #4, MUL VL] + ldr z5, [x2, #5, MUL VL] + ldr z6, [x2, #6, MUL VL] + ldr z7, [x2, #7, MUL VL] + ldr z8, [x2, #8, MUL VL] + ldr z9, [x2, #9, MUL VL] + ldr z10, [x2, #10, MUL VL] + ldr z11, [x2, #11, MUL VL] + ldr z12, [x2, #12, MUL VL] + ldr z13, [x2, #13, MUL VL] + ldr z14, [x2, #14, MUL VL] + ldr z15, [x2, #15, MUL VL] + ldr z16, [x2, #16, MUL VL] + ldr z17, [x2, #17, MUL VL] + ldr z18, [x2, #18, MUL VL] + ldr z19, [x2, #19, MUL VL] + ldr z20, [x2, #20, MUL VL] + ldr z21, [x2, #21, MUL VL] + ldr z22, [x2, #22, MUL VL] + ldr z23, [x2, #23, MUL VL] + ldr z24, [x2, #24, MUL VL] + ldr z25, [x2, #25, MUL VL] + ldr z26, [x2, #26, MUL VL] + ldr z27, [x2, #27, MUL VL] + ldr z28, [x2, #28, MUL VL] + ldr z29, [x2, #29, MUL VL] + ldr z30, [x2, #30, MUL VL] + ldr z31, [x2, #31, MUL VL] + + ldr x2, =ffr_in + ldr p0, [x2, #0] + wrffr p0.b + + ldr x2, =p_in + ldr p0, [x2, #0, MUL VL] + ldr p1, [x2, #1, MUL VL] + ldr p2, [x2, #2, MUL VL] + ldr p3, [x2, #3, MUL VL] + ldr p4, [x2, #4, MUL VL] + ldr p5, [x2, #5, MUL VL] + ldr p6, [x2, #6, MUL VL] + ldr p7, [x2, #7, MUL VL] + ldr p8, [x2, #8, MUL VL] + ldr p9, [x2, #9, MUL VL] + ldr p10, [x2, #10, MUL VL] + ldr p11, [x2, #11, MUL VL] + ldr p12, [x2, #12, MUL VL] + ldr p13, [x2, #13, MUL VL] + ldr p14, [x2, #14, MUL VL] + ldr p15, [x2, #15, MUL VL] +1: + + // Do the syscall + svc #0 + + // Save GPRs x8-x30 + ldr x2, =gpr_out + add x2, x2, #64 + stp x8, x9, [x2], #16 + stp x10, x11, [x2], #16 + stp x12, x13, [x2], #16 + stp x14, x15, [x2], #16 + stp x16, x17, [x2], #16 + stp x18, x19, [x2], #16 + stp x20, x21, [x2], #16 + stp x22, x23, [x2], #16 + stp x24, x25, [x2], #16 + stp x26, x27, [x2], #16 + stp x28, x29, [x2], #16 + str x30, [x2] + + // Restore x0 and x1 for feature checks + ldp x0, x1, [sp, #16] + + // Save FPSIMD state + ldr x2, =fpr_out + stp q0, q1, [x2] + stp q2, q3, [x2, #16 * 2] + stp q4, q5, [x2, #16 * 4] + stp q6, q7, [x2, #16 * 6] + stp q8, q9, [x2, #16 * 8] + stp q10, q11, [x2, #16 * 10] + stp q12, q13, [x2, #16 * 12] + stp q14, q15, [x2, #16 * 14] + stp q16, q17, [x2, #16 * 16] + stp q18, q19, [x2, #16 * 18] + stp q20, q21, [x2, #16 * 20] + stp q22, q23, [x2, #16 * 22] + stp q24, q25, [x2, #16 * 24] + stp q26, q27, [x2, #16 * 26] + stp q28, q29, [x2, #16 * 28] + stp q30, q31, [x2, #16 * 30] + + // Save the SVE state if we have some + cbz x0, 1f + + ldr x2, =z_out + str z0, [x2, #0, MUL VL] + str z1, [x2, #1, MUL VL] + str z2, [x2, #2, MUL VL] + str z3, [x2, #3, MUL VL] + str z4, [x2, #4, MUL VL] + str z5, [x2, #5, MUL VL] + str z6, [x2, #6, MUL VL] + str z7, [x2, #7, MUL VL] + str z8, [x2, #8, MUL VL] + str z9, [x2, #9, MUL VL] + str z10, [x2, #10, MUL VL] + str z11, [x2, #11, MUL VL] + str z12, [x2, #12, MUL VL] + str z13, [x2, #13, MUL VL] + str z14, [x2, #14, MUL VL] + str z15, [x2, #15, MUL VL] + str z16, [x2, #16, MUL VL] + str z17, [x2, #17, MUL VL] + str z18, [x2, #18, MUL VL] + str z19, [x2, #19, MUL VL] + str z20, [x2, #20, MUL VL] + str z21, [x2, #21, MUL VL] + str z22, [x2, #22, MUL VL] + str z23, [x2, #23, MUL VL] + str z24, [x2, #24, MUL VL] + str z25, [x2, #25, MUL VL] + str z26, [x2, #26, MUL VL] + str z27, [x2, #27, MUL VL] + str z28, [x2, #28, MUL VL] + str z29, [x2, #29, MUL VL] + str z30, [x2, #30, MUL VL] + str z31, [x2, #31, MUL VL] + + ldr x2, =p_out + str p0, [x2, #0, MUL VL] + str p1, [x2, #1, MUL VL] + str p2, [x2, #2, MUL VL] + str p3, [x2, #3, MUL VL] + str p4, [x2, #4, MUL VL] + str p5, [x2, #5, MUL VL] + str p6, [x2, #6, MUL VL] + str p7, [x2, #7, MUL VL] + str p8, [x2, #8, MUL VL] + str p9, [x2, #9, MUL VL] + str p10, [x2, #10, MUL VL] + str p11, [x2, #11, MUL VL] + str p12, [x2, #12, MUL VL] + str p13, [x2, #13, MUL VL] + str p14, [x2, #14, MUL VL] + str p15, [x2, #15, MUL VL] + + ldr x2, =ffr_out + rdffr p0.b + str p0, [x2, #0] +1: + + // Restore callee saved registers x19-x30 + ldp x19, x20, [sp, #32] + ldp x21, x22, [sp, #48] + ldp x23, x24, [sp, #64] + ldp x25, x26, [sp, #80] + ldp x27, x28, [sp, #96] + ldp x29, x30, [sp], #112 + + ret diff --git a/tools/testing/selftests/arm64/abi/syscall-abi.c b/tools/testing/selftests/arm64/abi/syscall-abi.c new file mode 100644 index 000000000000..d8eeeafb50dc --- /dev/null +++ b/tools/testing/selftests/arm64/abi/syscall-abi.c @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 ARM Limited. + */ + +#include <errno.h> +#include <stdbool.h> +#include <stddef.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#include <sys/auxv.h> +#include <sys/prctl.h> +#include <asm/hwcap.h> +#include <asm/sigcontext.h> +#include <asm/unistd.h> + +#include "../../kselftest.h" + +#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) +#define NUM_VL ((SVE_VQ_MAX - SVE_VQ_MIN) + 1) + +extern void do_syscall(int sve_vl); + +static void fill_random(void *buf, size_t size) +{ + int i; + uint32_t *lbuf = buf; + + /* random() returns a 32 bit number regardless of the size of long */ + for (i = 0; i < size / sizeof(uint32_t); i++) + lbuf[i] = random(); +} + +/* + * We also repeat the test for several syscalls to try to expose different + * behaviour. + */ +static struct syscall_cfg { + int syscall_nr; + const char *name; +} syscalls[] = { + { __NR_getpid, "getpid()" }, + { __NR_sched_yield, "sched_yield()" }, +}; + +#define NUM_GPR 31 +uint64_t gpr_in[NUM_GPR]; +uint64_t gpr_out[NUM_GPR]; + +static void setup_gpr(struct syscall_cfg *cfg, int sve_vl) +{ + fill_random(gpr_in, sizeof(gpr_in)); + gpr_in[8] = cfg->syscall_nr; + memset(gpr_out, 0, sizeof(gpr_out)); +} + +static int check_gpr(struct syscall_cfg *cfg, int sve_vl) +{ + int errors = 0; + int i; + + /* + * GPR x0-x7 may be clobbered, and all others should be preserved. + */ + for (i = 9; i < ARRAY_SIZE(gpr_in); i++) { + if (gpr_in[i] != gpr_out[i]) { + ksft_print_msg("%s SVE VL %d mismatch in GPR %d: %llx != %llx\n", + cfg->name, sve_vl, i, + gpr_in[i], gpr_out[i]); + errors++; + } + } + + return errors; +} + +#define NUM_FPR 32 +uint64_t fpr_in[NUM_FPR * 2]; +uint64_t fpr_out[NUM_FPR * 2]; + +static void setup_fpr(struct syscall_cfg *cfg, int sve_vl) +{ + fill_random(fpr_in, sizeof(fpr_in)); + memset(fpr_out, 0, sizeof(fpr_out)); +} + +static int check_fpr(struct syscall_cfg *cfg, int sve_vl) +{ + int errors = 0; + int i; + + if (!sve_vl) { + for (i = 0; i < ARRAY_SIZE(fpr_in); i++) { + if (fpr_in[i] != fpr_out[i]) { + ksft_print_msg("%s Q%d/%d mismatch %llx != %llx\n", + cfg->name, + i / 2, i % 2, + fpr_in[i], fpr_out[i]); + errors++; + } + } + } + + return errors; +} + +static uint8_t z_zero[__SVE_ZREG_SIZE(SVE_VQ_MAX)]; +uint8_t z_in[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; +uint8_t z_out[SVE_NUM_PREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)]; + +static void setup_z(struct syscall_cfg *cfg, int sve_vl) +{ + fill_random(z_in, sizeof(z_in)); + fill_random(z_out, sizeof(z_out)); +} + +static int check_z(struct syscall_cfg *cfg, int sve_vl) +{ + size_t reg_size = sve_vl; + int errors = 0; + int i; + + if (!sve_vl) + return 0; + + /* + * After a syscall the low 128 bits of the Z registers should + * be preserved and the rest be zeroed or preserved. + */ + for (i = 0; i < SVE_NUM_ZREGS; i++) { + void *in = &z_in[reg_size * i]; + void *out = &z_out[reg_size * i]; + + if (memcmp(in, out, SVE_VQ_BYTES) != 0) { + ksft_print_msg("%s SVE VL %d Z%d low 128 bits changed\n", + cfg->name, sve_vl, i); + errors++; + } + } + + return errors; +} + +uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)]; +uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)]; + +static void setup_p(struct syscall_cfg *cfg, int sve_vl) +{ + fill_random(p_in, sizeof(p_in)); + fill_random(p_out, sizeof(p_out)); +} + +static int check_p(struct syscall_cfg *cfg, int sve_vl) +{ + size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */ + + int errors = 0; + int i; + + if (!sve_vl) + return 0; + + /* After a syscall the P registers should be preserved or zeroed */ + for (i = 0; i < SVE_NUM_PREGS * reg_size; i++) + if (p_out[i] && (p_in[i] != p_out[i])) + errors++; + if (errors) + ksft_print_msg("%s SVE VL %d predicate registers non-zero\n", + cfg->name, sve_vl); + + return errors; +} + +uint8_t ffr_in[__SVE_PREG_SIZE(SVE_VQ_MAX)]; +uint8_t ffr_out[__SVE_PREG_SIZE(SVE_VQ_MAX)]; + +static void setup_ffr(struct syscall_cfg *cfg, int sve_vl) +{ + /* + * It is only valid to set a contiguous set of bits starting + * at 0. For now since we're expecting this to be cleared by + * a syscall just set all bits. + */ + memset(ffr_in, 0xff, sizeof(ffr_in)); + fill_random(ffr_out, sizeof(ffr_out)); +} + +static int check_ffr(struct syscall_cfg *cfg, int sve_vl) +{ + size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */ + int errors = 0; + int i; + + if (!sve_vl) + return 0; + + /* After a syscall the P registers should be preserved or zeroed */ + for (i = 0; i < reg_size; i++) + if (ffr_out[i] && (ffr_in[i] != ffr_out[i])) + errors++; + if (errors) + ksft_print_msg("%s SVE VL %d FFR non-zero\n", + cfg->name, sve_vl); + + return errors; +} + +typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl); +typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl); + +/* + * Each set of registers has a setup function which is called before + * the syscall to fill values in a global variable for loading by the + * test code and a check function which validates that the results are + * as expected. Vector lengths are passed everywhere, a vector length + * of 0 should be treated as do not test. + */ +static struct { + setup_fn setup; + check_fn check; +} regset[] = { + { setup_gpr, check_gpr }, + { setup_fpr, check_fpr }, + { setup_z, check_z }, + { setup_p, check_p }, + { setup_ffr, check_ffr }, +}; + +static bool do_test(struct syscall_cfg *cfg, int sve_vl) +{ + int errors = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(regset); i++) + regset[i].setup(cfg, sve_vl); + + do_syscall(sve_vl); + + for (i = 0; i < ARRAY_SIZE(regset); i++) + errors += regset[i].check(cfg, sve_vl); + + return errors == 0; +} + +static void test_one_syscall(struct syscall_cfg *cfg) +{ + int sve_vq, sve_vl; + + /* FPSIMD only case */ + ksft_test_result(do_test(cfg, 0), + "%s FPSIMD\n", cfg->name); + + if (!(getauxval(AT_HWCAP) & HWCAP_SVE)) + return; + + for (sve_vq = SVE_VQ_MAX; sve_vq > 0; --sve_vq) { + sve_vl = prctl(PR_SVE_SET_VL, sve_vq * 16); + if (sve_vl == -1) + ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n", + strerror(errno), errno); + + sve_vl &= PR_SVE_VL_LEN_MASK; + + if (sve_vq != sve_vq_from_vl(sve_vl)) + sve_vq = sve_vq_from_vl(sve_vl); + + ksft_test_result(do_test(cfg, sve_vl), + "%s SVE VL %d\n", cfg->name, sve_vl); + } +} + +int sve_count_vls(void) +{ + unsigned int vq; + int vl_count = 0; + int vl; + + if (!(getauxval(AT_HWCAP) & HWCAP_SVE)) + return 0; + + /* + * Enumerate up to SVE_VQ_MAX vector lengths + */ + for (vq = SVE_VQ_MAX; vq > 0; --vq) { + vl = prctl(PR_SVE_SET_VL, vq * 16); + if (vl == -1) + ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n", + strerror(errno), errno); + + vl &= PR_SVE_VL_LEN_MASK; + + if (vq != sve_vq_from_vl(vl)) + vq = sve_vq_from_vl(vl); + + vl_count++; + } + + return vl_count; +} + +int main(void) +{ + int i; + + srandom(getpid()); + + ksft_print_header(); + ksft_set_plan(ARRAY_SIZE(syscalls) * (sve_count_vls() + 1)); + + for (i = 0; i < ARRAY_SIZE(syscalls); i++) + test_one_syscall(&syscalls[i]); + + ksft_print_cnts(); + + return 0; +} diff --git a/tools/testing/selftests/arm64/fp/.gitignore b/tools/testing/selftests/arm64/fp/.gitignore index b67395903b9b..c50d86331ed2 100644 --- a/tools/testing/selftests/arm64/fp/.gitignore +++ b/tools/testing/selftests/arm64/fp/.gitignore @@ -1,3 +1,4 @@ +fp-pidbench fpsimd-test rdvl-sve sve-probe-vls diff --git a/tools/testing/selftests/arm64/fp/Makefile b/tools/testing/selftests/arm64/fp/Makefile index ba1488c7c315..95f0b877a060 100644 --- a/tools/testing/selftests/arm64/fp/Makefile +++ b/tools/testing/selftests/arm64/fp/Makefile @@ -2,13 +2,15 @@ CFLAGS += -I../../../../../usr/include/ TEST_GEN_PROGS := sve-ptrace sve-probe-vls vec-syscfg -TEST_PROGS_EXTENDED := fpsimd-test fpsimd-stress \ +TEST_PROGS_EXTENDED := fp-pidbench fpsimd-test fpsimd-stress \ rdvl-sve \ sve-test sve-stress \ vlset all: $(TEST_GEN_PROGS) $(TEST_PROGS_EXTENDED) +fp-pidbench: fp-pidbench.S asm-utils.o + $(CC) -nostdlib $^ -o $@ fpsimd-test: fpsimd-test.o asm-utils.o $(CC) -nostdlib $^ -o $@ rdvl-sve: rdvl-sve.o rdvl.o diff --git a/tools/testing/selftests/arm64/fp/fp-pidbench.S b/tools/testing/selftests/arm64/fp/fp-pidbench.S new file mode 100644 index 000000000000..16a436389bfc --- /dev/null +++ b/tools/testing/selftests/arm64/fp/fp-pidbench.S @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2021 ARM Limited. +// Original author: Mark Brown <broonie@kernel.org> +// +// Trivial syscall overhead benchmark. +// +// This is implemented in asm to ensure that we don't have any issues with +// system libraries using instructions that disrupt the test. + +#include <asm/unistd.h> +#include "assembler.h" + +.arch_extension sve + +.macro test_loop per_loop + mov x10, x20 + mov x8, #__NR_getpid + mrs x11, CNTVCT_EL0 +1: + \per_loop + svc #0 + sub x10, x10, #1 + cbnz x10, 1b + + mrs x12, CNTVCT_EL0 + sub x0, x12, x11 + bl putdec + puts "\n" +.endm + +// Main program entry point +.globl _start +function _start +_start: + puts "Iterations per test: " + mov x20, #10000 + lsl x20, x20, #8 + mov x0, x20 + bl putdec + puts "\n" + + // Test having never used SVE + puts "No SVE: " + test_loop + + // Check for SVE support - should use hwcap but that's hard in asm + mrs x0, ID_AA64PFR0_EL1 + ubfx x0, x0, #32, #4 + cbnz x0, 1f + puts "System does not support SVE\n" + b out +1: + + // Execute a SVE instruction + puts "SVE VL: " + rdvl x0, #8 + bl putdec + puts "\n" + + puts "SVE used once: " + test_loop + + // Use SVE per syscall + puts "SVE used per syscall: " + test_loop "rdvl x0, #8" + + // And we're done +out: + mov x0, #0 + mov x8, #__NR_exit + svc #0 diff --git a/tools/testing/selftests/arm64/fp/sve-ptrace.c b/tools/testing/selftests/arm64/fp/sve-ptrace.c index c4417bc48d4f..af798b9d232c 100644 --- a/tools/testing/selftests/arm64/fp/sve-ptrace.c +++ b/tools/testing/selftests/arm64/fp/sve-ptrace.c @@ -21,16 +21,37 @@ #include "../../kselftest.h" -#define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3) -#define FPSIMD_TESTS 5 - -#define EXPECTED_TESTS (VL_TESTS + FPSIMD_TESTS) +#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) /* <linux/elf.h> and <sys/auxv.h> don't like each other, so: */ #ifndef NT_ARM_SVE #define NT_ARM_SVE 0x405 #endif +struct vec_type { + const char *name; + unsigned long hwcap_type; + unsigned long hwcap; + int regset; + int prctl_set; +}; + +static const struct vec_type vec_types[] = { + { + .name = "SVE", + .hwcap_type = AT_HWCAP, + .hwcap = HWCAP_SVE, + .regset = NT_ARM_SVE, + .prctl_set = PR_SVE_SET_VL, + }, +}; + +#define VL_TESTS (((SVE_VQ_MAX - SVE_VQ_MIN) + 1) * 3) +#define FLAG_TESTS 2 +#define FPSIMD_TESTS 3 + +#define EXPECTED_TESTS ((VL_TESTS + FLAG_TESTS + FPSIMD_TESTS) * ARRAY_SIZE(vec_types)) + static void fill_buf(char *buf, size_t size) { int i; @@ -59,7 +80,8 @@ static int get_fpsimd(pid_t pid, struct user_fpsimd_state *fpsimd) return ptrace(PTRACE_GETREGSET, pid, NT_PRFPREG, &iov); } -static struct user_sve_header *get_sve(pid_t pid, void **buf, size_t *size) +static struct user_sve_header *get_sve(pid_t pid, const struct vec_type *type, + void **buf, size_t *size) { struct user_sve_header *sve; void *p; @@ -80,7 +102,7 @@ static struct user_sve_header *get_sve(pid_t pid, void **buf, size_t *size) iov.iov_base = *buf; iov.iov_len = sz; - if (ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov)) + if (ptrace(PTRACE_GETREGSET, pid, type->regset, &iov)) goto error; sve = *buf; @@ -96,17 +118,18 @@ error: return NULL; } -static int set_sve(pid_t pid, const struct user_sve_header *sve) +static int set_sve(pid_t pid, const struct vec_type *type, + const struct user_sve_header *sve) { struct iovec iov; iov.iov_base = (void *)sve; iov.iov_len = sve->size; - return ptrace(PTRACE_SETREGSET, pid, NT_ARM_SVE, &iov); + return ptrace(PTRACE_SETREGSET, pid, type->regset, &iov); } /* Validate setting and getting the inherit flag */ -static void ptrace_set_get_inherit(pid_t child) +static void ptrace_set_get_inherit(pid_t child, const struct vec_type *type) { struct user_sve_header sve; struct user_sve_header *new_sve = NULL; @@ -118,9 +141,10 @@ static void ptrace_set_get_inherit(pid_t child) sve.size = sizeof(sve); sve.vl = sve_vl_from_vq(SVE_VQ_MIN); sve.flags = SVE_PT_VL_INHERIT; - ret = set_sve(child, &sve); + ret = set_sve(child, type, &sve); if (ret != 0) { - ksft_test_result_fail("Failed to set SVE_PT_VL_INHERIT\n"); + ksft_test_result_fail("Failed to set %s SVE_PT_VL_INHERIT\n", + type->name); return; } @@ -128,35 +152,39 @@ static void ptrace_set_get_inherit(pid_t child) * Read back the new register state and verify that we have * set the flags we expected. */ - if (!get_sve(child, (void **)&new_sve, &new_sve_size)) { - ksft_test_result_fail("Failed to read SVE flags\n"); + if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { + ksft_test_result_fail("Failed to read %s SVE flags\n", + type->name); return; } ksft_test_result(new_sve->flags & SVE_PT_VL_INHERIT, - "SVE_PT_VL_INHERIT set\n"); + "%s SVE_PT_VL_INHERIT set\n", type->name); /* Now clear */ sve.flags &= ~SVE_PT_VL_INHERIT; - ret = set_sve(child, &sve); + ret = set_sve(child, type, &sve); if (ret != 0) { - ksft_test_result_fail("Failed to clear SVE_PT_VL_INHERIT\n"); + ksft_test_result_fail("Failed to clear %s SVE_PT_VL_INHERIT\n", + type->name); return; } - if (!get_sve(child, (void **)&new_sve, &new_sve_size)) { - ksft_test_result_fail("Failed to read SVE flags\n"); + if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { + ksft_test_result_fail("Failed to read %s SVE flags\n", + type->name); return; } ksft_test_result(!(new_sve->flags & SVE_PT_VL_INHERIT), - "SVE_PT_VL_INHERIT cleared\n"); + "%s SVE_PT_VL_INHERIT cleared\n", type->name); free(new_sve); } /* Validate attempting to set the specfied VL via ptrace */ -static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported) +static void ptrace_set_get_vl(pid_t child, const struct vec_type *type, + unsigned int vl, bool *supported) { struct user_sve_header sve; struct user_sve_header *new_sve = NULL; @@ -166,10 +194,10 @@ static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported) *supported = false; /* Check if the VL is supported in this process */ - prctl_vl = prctl(PR_SVE_SET_VL, vl); + prctl_vl = prctl(type->prctl_set, vl); if (prctl_vl == -1) - ksft_exit_fail_msg("prctl(PR_SVE_SET_VL) failed: %s (%d)\n", - strerror(errno), errno); + ksft_exit_fail_msg("prctl(PR_%s_SET_VL) failed: %s (%d)\n", + type->name, strerror(errno), errno); /* If the VL is not supported then a supported VL will be returned */ *supported = (prctl_vl == vl); @@ -178,9 +206,10 @@ static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported) memset(&sve, 0, sizeof(sve)); sve.size = sizeof(sve); sve.vl = vl; - ret = set_sve(child, &sve); + ret = set_sve(child, type, &sve); if (ret != 0) { - ksft_test_result_fail("Failed to set VL %u\n", vl); + ksft_test_result_fail("Failed to set %s VL %u\n", + type->name, vl); return; } @@ -188,12 +217,14 @@ static void ptrace_set_get_vl(pid_t child, unsigned int vl, bool *supported) * Read back the new register state and verify that we have the * same VL that we got from prctl() on ourselves. */ - if (!get_sve(child, (void **)&new_sve, &new_sve_size)) { - ksft_test_result_fail("Failed to read VL %u\n", vl); + if (!get_sve(child, type, (void **)&new_sve, &new_sve_size)) { + ksft_test_result_fail("Failed to read %s VL %u\n", + type->name, vl); return; } - ksft_test_result(new_sve->vl = prctl_vl, "Set VL %u\n", vl); + ksft_test_result(new_sve->vl = prctl_vl, "Set %s VL %u\n", + type->name, vl); free(new_sve); } @@ -209,7 +240,7 @@ static void check_u32(unsigned int vl, const char *reg, } /* Access the FPSIMD registers via the SVE regset */ -static void ptrace_sve_fpsimd(pid_t child) +static void ptrace_sve_fpsimd(pid_t child, const struct vec_type *type) { void *svebuf = NULL; size_t svebufsz = 0; @@ -219,17 +250,18 @@ static void ptrace_sve_fpsimd(pid_t child) unsigned char *p; /* New process should start with FPSIMD registers only */ - sve = get_sve(child, &svebuf, &svebufsz); + sve = get_sve(child, type, &svebuf, &svebufsz); if (!sve) { - ksft_test_result_fail("get_sve: %s\n", strerror(errno)); + ksft_test_result_fail("get_sve(%s): %s\n", + type->name, strerror(errno)); return; } else { - ksft_test_result_pass("get_sve(FPSIMD)\n"); + ksft_test_result_pass("get_sve(%s FPSIMD)\n", type->name); } ksft_test_result((sve->flags & SVE_PT_REGS_MASK) == SVE_PT_REGS_FPSIMD, - "Set FPSIMD registers\n"); + "Set FPSIMD registers via %s\n", type->name); if ((sve->flags & SVE_PT_REGS_MASK) != SVE_PT_REGS_FPSIMD) goto out; @@ -243,9 +275,9 @@ static void ptrace_sve_fpsimd(pid_t child) p[j] = j; } - if (set_sve(child, sve)) { - ksft_test_result_fail("set_sve(FPSIMD): %s\n", - strerror(errno)); + if (set_sve(child, type, sve)) { + ksft_test_result_fail("set_sve(%s FPSIMD): %s\n", + type->name, strerror(errno)); goto out; } @@ -257,16 +289,20 @@ static void ptrace_sve_fpsimd(pid_t child) goto out; } if (memcmp(fpsimd, &new_fpsimd, sizeof(*fpsimd)) == 0) - ksft_test_result_pass("get_fpsimd() gave same state\n"); + ksft_test_result_pass("%s get_fpsimd() gave same state\n", + type->name); else - ksft_test_result_fail("get_fpsimd() gave different state\n"); + ksft_test_result_fail("%s get_fpsimd() gave different state\n", + type->name); out: free(svebuf); } /* Validate attempting to set SVE data and read SVE data */ -static void ptrace_set_sve_get_sve_data(pid_t child, unsigned int vl) +static void ptrace_set_sve_get_sve_data(pid_t child, + const struct vec_type *type, + unsigned int vl) { void *write_buf; void *read_buf = NULL; @@ -281,8 +317,8 @@ static void ptrace_set_sve_get_sve_data(pid_t child, unsigned int vl) data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); write_buf = malloc(data_size); if (!write_buf) { - ksft_test_result_fail("Error allocating %d byte buffer for VL %u\n", - data_size, vl); + ksft_test_result_fail("Error allocating %d byte buffer for %s VL %u\n", + data_size, type->name, vl); return; } write_sve = write_buf; @@ -306,23 +342,26 @@ static void ptrace_set_sve_get_sve_data(pid_t child, unsigned int vl) /* TODO: Generate a valid FFR pattern */ - ret = set_sve(child, write_sve); + ret = set_sve(child, type, write_sve); if (ret != 0) { - ksft_test_result_fail("Failed to set VL %u data\n", vl); + ksft_test_result_fail("Failed to set %s VL %u data\n", + type->name, vl); goto out; } /* Read the data back */ - if (!get_sve(child, (void **)&read_buf, &read_sve_size)) { - ksft_test_result_fail("Failed to read VL %u data\n", vl); + if (!get_sve(child, type, (void **)&read_buf, &read_sve_size)) { + ksft_test_result_fail("Failed to read %s VL %u data\n", + type->name, vl); goto out; } read_sve = read_buf; /* We might read more data if there's extensions we don't know */ if (read_sve->size < write_sve->size) { - ksft_test_result_fail("Wrote %d bytes, only read %d\n", - write_sve->size, read_sve->size); + ksft_test_result_fail("%s wrote %d bytes, only read %d\n", + type->name, write_sve->size, + read_sve->size); goto out_read; } @@ -349,7 +388,8 @@ static void ptrace_set_sve_get_sve_data(pid_t child, unsigned int vl) check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), read_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &errors); - ksft_test_result(errors == 0, "Set and get SVE data for VL %u\n", vl); + ksft_test_result(errors == 0, "Set and get %s data for VL %u\n", + type->name, vl); out_read: free(read_buf); @@ -358,7 +398,9 @@ out: } /* Validate attempting to set SVE data and read SVE data */ -static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl) +static void ptrace_set_sve_get_fpsimd_data(pid_t child, + const struct vec_type *type, + unsigned int vl) { void *write_buf; struct user_sve_header *write_sve; @@ -376,8 +418,8 @@ static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl) data_size = SVE_PT_SVE_OFFSET + SVE_PT_SVE_SIZE(vq, SVE_PT_REGS_SVE); write_buf = malloc(data_size); if (!write_buf) { - ksft_test_result_fail("Error allocating %d byte buffer for VL %u\n", - data_size, vl); + ksft_test_result_fail("Error allocating %d byte buffer for %s VL %u\n", + data_size, type->name, vl); return; } write_sve = write_buf; @@ -395,16 +437,17 @@ static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl) fill_buf(write_buf + SVE_PT_SVE_FPSR_OFFSET(vq), SVE_PT_SVE_FPSR_SIZE); fill_buf(write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), SVE_PT_SVE_FPCR_SIZE); - ret = set_sve(child, write_sve); + ret = set_sve(child, type, write_sve); if (ret != 0) { - ksft_test_result_fail("Failed to set VL %u data\n", vl); + ksft_test_result_fail("Failed to set %s VL %u data\n", + type->name, vl); goto out; } /* Read the data back */ if (get_fpsimd(child, &fpsimd_state)) { - ksft_test_result_fail("Failed to read VL %u FPSIMD data\n", - vl); + ksft_test_result_fail("Failed to read %s VL %u FPSIMD data\n", + type->name, vl); goto out; } @@ -419,7 +462,8 @@ static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl) sizeof(tmp)); if (tmp != fpsimd_state.vregs[i]) { - printf("# Mismatch in FPSIMD for VL %u Z%d\n", vl, i); + printf("# Mismatch in FPSIMD for %s VL %u Z%d\n", + type->name, vl, i); errors++; } } @@ -429,8 +473,8 @@ static void ptrace_set_sve_get_fpsimd_data(pid_t child, unsigned int vl) check_u32(vl, "FPCR", write_buf + SVE_PT_SVE_FPCR_OFFSET(vq), &fpsimd_state.fpcr, &errors); - ksft_test_result(errors == 0, "Set and get FPSIMD data for VL %u\n", - vl); + ksft_test_result(errors == 0, "Set and get FPSIMD data for %s VL %u\n", + type->name, vl); out: free(write_buf); @@ -440,7 +484,7 @@ static int do_parent(pid_t child) { int ret = EXIT_FAILURE; pid_t pid; - int status; + int status, i; siginfo_t si; unsigned int vq, vl; bool vl_supported; @@ -499,26 +543,47 @@ static int do_parent(pid_t child) } } - /* FPSIMD via SVE regset */ - ptrace_sve_fpsimd(child); - - /* prctl() flags */ - ptrace_set_get_inherit(child); - - /* Step through every possible VQ */ - for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) { - vl = sve_vl_from_vq(vq); + for (i = 0; i < ARRAY_SIZE(vec_types); i++) { + /* FPSIMD via SVE regset */ + if (getauxval(vec_types[i].hwcap_type) & vec_types[i].hwcap) { + ptrace_sve_fpsimd(child, &vec_types[i]); + } else { + ksft_test_result_skip("%s FPSIMD get via SVE\n", + vec_types[i].name); + ksft_test_result_skip("%s FPSIMD set via SVE\n", + vec_types[i].name); + ksft_test_result_skip("%s set read via FPSIMD\n", + vec_types[i].name); + } - /* First, try to set this vector length */ - ptrace_set_get_vl(child, vl, &vl_supported); + /* prctl() flags */ + ptrace_set_get_inherit(child, &vec_types[i]); + + /* Step through every possible VQ */ + for (vq = SVE_VQ_MIN; vq <= SVE_VQ_MAX; vq++) { + vl = sve_vl_from_vq(vq); + + /* First, try to set this vector length */ + if (getauxval(vec_types[i].hwcap_type) & + vec_types[i].hwcap) { + ptrace_set_get_vl(child, &vec_types[i], vl, + &vl_supported); + } else { + ksft_test_result_skip("%s get/set VL %d\n", + vec_types[i].name, vl); + vl_supported = false; + } - /* If the VL is supported validate data set/get */ - if (vl_supported) { - ptrace_set_sve_get_sve_data(child, vl); - ptrace_set_sve_get_fpsimd_data(child, vl); - } else { - ksft_test_result_skip("set SVE get SVE for VL %d\n", vl); - ksft_test_result_skip("set SVE get FPSIMD for VL %d\n", vl); + /* If the VL is supported validate data set/get */ + if (vl_supported) { + ptrace_set_sve_get_sve_data(child, &vec_types[i], vl); + ptrace_set_sve_get_fpsimd_data(child, &vec_types[i], vl); + } else { + ksft_test_result_skip("%s set SVE get SVE for VL %d\n", + vec_types[i].name, vl); + ksft_test_result_skip("%s set SVE get FPSIMD for VL %d\n", + vec_types[i].name, vl); + } } } diff --git a/tools/testing/selftests/arm64/fp/vec-syscfg.c b/tools/testing/selftests/arm64/fp/vec-syscfg.c index 272b888e018e..c90658811a83 100644 --- a/tools/testing/selftests/arm64/fp/vec-syscfg.c +++ b/tools/testing/selftests/arm64/fp/vec-syscfg.c @@ -21,8 +21,6 @@ #include "../../kselftest.h" #include "rdvl.h" -#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) - #define ARCH_MIN_VL SVE_VL_MIN struct vec_data { diff --git a/tools/testing/selftests/arm64/signal/test_signals_utils.c b/tools/testing/selftests/arm64/signal/test_signals_utils.c index 22722abc9dfa..2f8c23af3b5e 100644 --- a/tools/testing/selftests/arm64/signal/test_signals_utils.c +++ b/tools/testing/selftests/arm64/signal/test_signals_utils.c @@ -310,14 +310,12 @@ int test_setup(struct tdescr *td) int test_run(struct tdescr *td) { - if (td->sig_trig) { - if (td->trigger) - return td->trigger(td); - else - return default_trigger(td); - } else { + if (td->trigger) + return td->trigger(td); + else if (td->sig_trig) + return default_trigger(td); + else return td->run(td, NULL, NULL); - } } void test_result(struct tdescr *td) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 5d52ea2768df..df3b292a8ffe 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -33,6 +33,22 @@ noinline int bpf_testmod_loop_test(int n) return sum; } +__weak noinline struct file *bpf_testmod_return_ptr(int arg) +{ + static struct file f = {}; + + switch (arg) { + case 1: return (void *)EINVAL; /* user addr */ + case 2: return (void *)0xcafe4a11; /* user addr */ + case 3: return (void *)-EINVAL; /* canonical, but invalid */ + case 4: return (void *)(1ull << 60); /* non-canonical and invalid */ + case 5: return (void *)~(1ull << 30); /* trigger extable */ + case 6: return &f; /* valid addr */ + case 7: return (void *)((long)&f | 1); /* kernel tricks */ + default: return NULL; + } +} + noinline ssize_t bpf_testmod_test_read(struct file *file, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -43,6 +59,10 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, .off = off, .len = len, }; + int i = 1; + + while (bpf_testmod_return_ptr(i)) + i++; /* This is always true. Use the check to make sure the compiler * doesn't remove bpf_testmod_loop_test. diff --git a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c index 762f6a9da8b5..664ffc0364f4 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c +++ b/tools/testing/selftests/bpf/prog_tests/btf_skc_cls_ingress.c @@ -90,7 +90,7 @@ static void print_err_line(void) static void test_conn(void) { - int listen_fd = -1, cli_fd = -1, err; + int listen_fd = -1, cli_fd = -1, srv_fd = -1, err; socklen_t addrlen = sizeof(srv_sa6); int srv_port; @@ -112,6 +112,10 @@ static void test_conn(void) if (CHECK_FAIL(cli_fd == -1)) goto done; + srv_fd = accept(listen_fd, NULL, NULL); + if (CHECK_FAIL(srv_fd == -1)) + goto done; + if (CHECK(skel->bss->listen_tp_sport != srv_port || skel->bss->req_sk_sport != srv_port, "Unexpected sk src port", @@ -134,11 +138,13 @@ done: close(listen_fd); if (cli_fd != -1) close(cli_fd); + if (srv_fd != -1) + close(srv_fd); } static void test_syncookie(void) { - int listen_fd = -1, cli_fd = -1, err; + int listen_fd = -1, cli_fd = -1, srv_fd = -1, err; socklen_t addrlen = sizeof(srv_sa6); int srv_port; @@ -161,6 +167,10 @@ static void test_syncookie(void) if (CHECK_FAIL(cli_fd == -1)) goto done; + srv_fd = accept(listen_fd, NULL, NULL); + if (CHECK_FAIL(srv_fd == -1)) + goto done; + if (CHECK(skel->bss->listen_tp_sport != srv_port, "Unexpected tp src port", "listen_tp_sport:%u expected:%u\n", @@ -188,6 +198,8 @@ done: close(listen_fd); if (cli_fd != -1) close(cli_fd); + if (srv_fd != -1) + close(srv_fd); } struct test { diff --git a/tools/testing/selftests/bpf/progs/test_module_attach.c b/tools/testing/selftests/bpf/progs/test_module_attach.c index b36857093f71..50ce16d02da7 100644 --- a/tools/testing/selftests/bpf/progs/test_module_attach.c +++ b/tools/testing/selftests/bpf/progs/test_module_attach.c @@ -87,6 +87,18 @@ int BPF_PROG(handle_fexit, return 0; } +SEC("fexit/bpf_testmod_return_ptr") +int BPF_PROG(handle_fexit_ret, int arg, struct file *ret) +{ + long buf = 0; + + bpf_probe_read_kernel(&buf, 8, ret); + bpf_probe_read_kernel(&buf, 8, (char *)ret + 256); + *(volatile long long *)ret; + *(volatile int *)&ret->f_mode; + return 0; +} + __u32 fmod_ret_read_sz = 0; SEC("fmod_ret/bpf_testmod_test_read") diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 465ef3f112c0..d3bf83d5c6cf 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -54,7 +54,7 @@ #define MAX_INSNS BPF_MAXINSNS #define MAX_TEST_INSNS 1000000 #define MAX_FIXUPS 8 -#define MAX_NR_MAPS 21 +#define MAX_NR_MAPS 22 #define MAX_TEST_RUNS 8 #define POINTER_VALUE 0xcafe4all #define TEST_DATA_LEN 64 diff --git a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c index c22dc83a41fd..b39665f33524 100644 --- a/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c +++ b/tools/testing/selftests/bpf/verifier/atomic_cmpxchg.c @@ -138,6 +138,8 @@ BPF_EXIT_INSN(), }, .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr into mem", }, { "Dest pointer in r0 - succeed", @@ -156,4 +158,88 @@ BPF_EXIT_INSN(), }, .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr into mem", +}, +{ + "Dest pointer in r0 - succeed, check 2", + .insns = { + /* r0 = &val */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + /* r5 = &val */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, r5); */ + BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8), + /* r1 = *r0 */ + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R0 leaks addr into mem", +}, +{ + "Dest pointer in r0 - succeed, check 3", + .insns = { + /* r0 = &val */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), + /* r5 = &val */ + BPF_MOV64_REG(BPF_REG_5, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, r5); */ + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid size of register fill", + .errstr_unpriv = "R0 leaks addr into mem", +}, +{ + "Dest pointer in r0 - succeed, check 4", + .insns = { + /* r0 = &val */ + BPF_MOV32_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), + /* r5 = &val */ + BPF_MOV32_REG(BPF_REG_5, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, r5); */ + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8), + /* r1 = *r10 */ + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "R10 partial copy of pointer", +}, +{ + "Dest pointer in r0 - succeed, check 5", + .insns = { + /* r0 = &val */ + BPF_MOV32_REG(BPF_REG_0, BPF_REG_10), + /* val = r0; */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8), + /* r5 = &val */ + BPF_MOV32_REG(BPF_REG_5, BPF_REG_10), + /* r0 = atomic_cmpxchg(&val, r0, r5); */ + BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8), + /* r1 = *r0 */ + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8), + /* exit(0); */ + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "R0 invalid mem access", + .errstr_unpriv = "R10 partial copy of pointer", }, diff --git a/tools/testing/selftests/bpf/verifier/atomic_fetch.c b/tools/testing/selftests/bpf/verifier/atomic_fetch.c index 3bc9ff7a860b..5bf03fb4fa2b 100644 --- a/tools/testing/selftests/bpf/verifier/atomic_fetch.c +++ b/tools/testing/selftests/bpf/verifier/atomic_fetch.c @@ -1,3 +1,97 @@ +{ + "atomic dw/fetch and address leakage of (map ptr & -1) via stack slot", + .insns = { + BPF_LD_IMM64(BPF_REG_1, -1), + BPF_LD_MAP_FD(BPF_REG_8, 0), + BPF_LD_MAP_FD(BPF_REG_9, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0), + BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 2, 4 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "leaking pointer from stack off -8", +}, +{ + "atomic dw/fetch and address leakage of (map ptr & -1) via returned value", + .insns = { + BPF_LD_IMM64(BPF_REG_1, -1), + BPF_LD_MAP_FD(BPF_REG_8, 0), + BPF_LD_MAP_FD(BPF_REG_9, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0), + BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 2, 4 }, + .result = ACCEPT, + .result_unpriv = REJECT, + .errstr_unpriv = "leaking pointer from stack off -8", +}, +{ + "atomic w/fetch and address leakage of (map ptr & -1) via stack slot", + .insns = { + BPF_LD_IMM64(BPF_REG_1, -1), + BPF_LD_MAP_FD(BPF_REG_8, 0), + BPF_LD_MAP_FD(BPF_REG_9, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0), + BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0), + BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 2, 4 }, + .result = REJECT, + .errstr = "invalid size of register fill", +}, +{ + "atomic w/fetch and address leakage of (map ptr & -1) via returned value", + .insns = { + BPF_LD_IMM64(BPF_REG_1, -1), + BPF_LD_MAP_FD(BPF_REG_8, 0), + BPF_LD_MAP_FD(BPF_REG_9, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0), + BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0), + BPF_MOV64_REG(BPF_REG_9, BPF_REG_1), + BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_8), + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 2, 4 }, + .result = REJECT, + .errstr = "invalid size of register fill", +}, #define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \ { \ "atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \ diff --git a/tools/testing/selftests/bpf/verifier/search_pruning.c b/tools/testing/selftests/bpf/verifier/search_pruning.c index 7e50cb80873a..682519769fe3 100644 --- a/tools/testing/selftests/bpf/verifier/search_pruning.c +++ b/tools/testing/selftests/bpf/verifier/search_pruning.c @@ -133,6 +133,77 @@ .prog_type = BPF_PROG_TYPE_TRACEPOINT, }, { + "precision tracking for u32 spill/fill", + .insns = { + BPF_MOV64_REG(BPF_REG_7, BPF_REG_1), + BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), + BPF_MOV32_IMM(BPF_REG_6, 32), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_MOV32_IMM(BPF_REG_6, 4), + /* Additional insns to introduce a pruning point. */ + BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_3, 0), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 0), + /* u32 spill/fill */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8), + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8), + /* out-of-bound map value access for r6=32 */ + BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8), + BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_hash_8b = { 15 }, + .result = REJECT, + .errstr = "R0 min value is outside of the allowed memory range", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ + "precision tracking for u32 spills, u64 fill", + .insns = { + BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_MOV32_IMM(BPF_REG_7, 0xffffffff), + /* Additional insns to introduce a pruning point. */ + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32), + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), + /* u32 spills, u64 fill */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8), + /* if r8 != X goto pc+1 r8 known in fallthrough branch */ + BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1), + BPF_MOV64_IMM(BPF_REG_3, 1), + /* if r8 == X goto pc+1 condition always true on first + * traversal, so starts backtracking to mark r8 as requiring + * precision. r7 marked as needing precision. r6 not marked + * since it's not tracked. + */ + BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1), + /* fails if r8 correctly marked unknown after fill. */ + BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "div by zero", + .prog_type = BPF_PROG_TYPE_TRACEPOINT, +}, +{ "allocated_stack", .insns = { BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1), diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c index 7ab3de108761..6c907144311f 100644 --- a/tools/testing/selftests/bpf/verifier/spill_fill.c +++ b/tools/testing/selftests/bpf/verifier/spill_fill.c @@ -176,6 +176,38 @@ .prog_type = BPF_PROG_TYPE_SCHED_CLS, }, { + "Spill u32 const scalars. Refill as u64. Offset to skb->data", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct __sk_buff, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct __sk_buff, data_end)), + /* r6 = 0 */ + BPF_MOV32_IMM(BPF_REG_6, 0), + /* r7 = 20 */ + BPF_MOV32_IMM(BPF_REG_7, 20), + /* *(u32 *)(r10 -4) = r6 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4), + /* *(u32 *)(r10 -8) = r7 */ + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8), + /* r4 = *(u64 *)(r10 -8) */ + BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8), + /* r0 = r2 */ + BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), + /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */ + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4), + /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */ + BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1), + /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */ + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = REJECT, + .errstr = "invalid access to packet", + .prog_type = BPF_PROG_TYPE_SCHED_CLS, +}, +{ "Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index 2debba4e8a3a..359f3e8f8b60 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c @@ -1078,6 +1078,29 @@ .errstr_unpriv = "R0 pointer -= pointer prohibited", }, { + "map access: trying to leak tainted dst reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), + BPF_LD_MAP_FD(BPF_REG_1, 0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), + BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF), + BPF_MOV32_REG(BPF_REG_1, BPF_REG_1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 4 }, + .result = REJECT, + .errstr = "math between map_value pointer and 4294967295 is not allowed", +}, +{ "32bit pkt_ptr -= scalar", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, diff --git a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c index bfb97383e6b5..b4ec228eb95d 100644 --- a/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c +++ b/tools/testing/selftests/bpf/verifier/xdp_direct_packet_access.c @@ -35,7 +35,7 @@ .prog_type = BPF_PROG_TYPE_XDP, }, { - "XDP pkt read, pkt_data' > pkt_end, good access", + "XDP pkt read, pkt_data' > pkt_end, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -88,6 +88,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data' > pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' > pkt_end, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_end > pkt_data', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -106,16 +141,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end > pkt_data', bad access 1", + "XDP pkt read, pkt_end > pkt_data', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -143,6 +178,42 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_end > pkt_data', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end > pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data' < pkt_end, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -161,16 +232,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' < pkt_end, bad access 1", + "XDP pkt read, pkt_data' < pkt_end, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -198,7 +269,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end < pkt_data', good access", + "XDP pkt read, pkt_data' < pkt_end, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' < pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -251,6 +358,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_end < pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end < pkt_data', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data' >= pkt_end, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -268,15 +410,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' >= pkt_end, bad access 1", + "XDP pkt read, pkt_data' >= pkt_end, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -304,7 +446,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end >= pkt_data', good access", + "XDP pkt read, pkt_data' >= pkt_end, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' >= pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -359,7 +535,44 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data' <= pkt_end, good access", + "XDP pkt read, pkt_end >= pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end >= pkt_data', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, @@ -414,6 +627,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data' <= pkt_end, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data' <= pkt_end, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_end <= pkt_data', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), @@ -431,15 +681,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_end <= pkt_data', bad access 1", + "XDP pkt read, pkt_end <= pkt_data', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data_end)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -467,7 +717,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' > pkt_data, good access", + "XDP pkt read, pkt_end <= pkt_data', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_end <= pkt_data', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, + offsetof(struct xdp_md, data_end)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -520,6 +804,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_meta' > pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' > pkt_data, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data > pkt_meta', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -538,16 +857,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data > pkt_meta', bad access 1", + "XDP pkt read, pkt_data > pkt_meta', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -575,6 +894,42 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data > pkt_meta', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data > pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_meta' < pkt_data, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -593,16 +948,16 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' < pkt_data, bad access 1", + "XDP pkt read, pkt_meta' < pkt_data, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), BPF_JMP_IMM(BPF_JA, 0, 0, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -630,7 +985,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data < pkt_meta', good access", + "XDP pkt read, pkt_meta' < pkt_data, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' < pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -683,6 +1074,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_data < pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data < pkt_meta', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_meta' >= pkt_data, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -700,15 +1126,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' >= pkt_data, bad access 1", + "XDP pkt read, pkt_meta' >= pkt_data, corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -736,7 +1162,41 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data >= pkt_meta', good access", + "XDP pkt read, pkt_meta' >= pkt_data, corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' >= pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -791,7 +1251,44 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_meta' <= pkt_data, good access", + "XDP pkt read, pkt_data >= pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data >= pkt_meta', corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, corner case, good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), @@ -846,6 +1343,43 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { + "XDP pkt read, pkt_meta' <= pkt_data, corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 9), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -9), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_meta' <= pkt_data, corner case -1, bad access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), + BPF_JMP_IMM(BPF_JA, 0, 0, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .errstr = "R1 offset is outside of the packet", + .result = REJECT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ "XDP pkt read, pkt_data <= pkt_meta', good access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, @@ -863,15 +1397,15 @@ .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, { - "XDP pkt read, pkt_data <= pkt_meta', bad access 1", + "XDP pkt read, pkt_data <= pkt_meta', corner case -1, bad access", .insns = { BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, offsetof(struct xdp_md, data_meta)), BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), - BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6), BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), - BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -6), BPF_MOV64_IMM(BPF_REG_0, 0), BPF_EXIT_INSN(), }, @@ -898,3 +1432,37 @@ .prog_type = BPF_PROG_TYPE_XDP, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, }, +{ + "XDP pkt read, pkt_data <= pkt_meta', corner case, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 7), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -7), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, +{ + "XDP pkt read, pkt_data <= pkt_meta', corner case +1, good access", + .insns = { + BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, + offsetof(struct xdp_md, data_meta)), + BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, offsetof(struct xdp_md, data)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), + BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .prog_type = BPF_PROG_TYPE_XDP, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, +}, diff --git a/tools/testing/selftests/cgroup/Makefile b/tools/testing/selftests/cgroup/Makefile index 59e222460581..745fe25fa0b9 100644 --- a/tools/testing/selftests/cgroup/Makefile +++ b/tools/testing/selftests/cgroup/Makefile @@ -11,10 +11,12 @@ TEST_GEN_PROGS += test_core TEST_GEN_PROGS += test_freezer TEST_GEN_PROGS += test_kill +LOCAL_HDRS += $(selfdir)/clone3/clone3_selftests.h $(selfdir)/pidfd/pidfd.h + include ../lib.mk -$(OUTPUT)/test_memcontrol: cgroup_util.c ../clone3/clone3_selftests.h -$(OUTPUT)/test_kmem: cgroup_util.c ../clone3/clone3_selftests.h -$(OUTPUT)/test_core: cgroup_util.c ../clone3/clone3_selftests.h -$(OUTPUT)/test_freezer: cgroup_util.c ../clone3/clone3_selftests.h -$(OUTPUT)/test_kill: cgroup_util.c ../clone3/clone3_selftests.h ../pidfd/pidfd.h +$(OUTPUT)/test_memcontrol: cgroup_util.c +$(OUTPUT)/test_kmem: cgroup_util.c +$(OUTPUT)/test_core: cgroup_util.c +$(OUTPUT)/test_freezer: cgroup_util.c +$(OUTPUT)/test_kill: cgroup_util.c diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c index 623cec04ad42..0cf7e90c0052 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.c +++ b/tools/testing/selftests/cgroup/cgroup_util.c @@ -221,7 +221,7 @@ int cg_find_unified_root(char *root, size_t len) int cg_create(const char *cgroup) { - return mkdir(cgroup, 0644); + return mkdir(cgroup, 0755); } int cg_wait_for_proc_count(const char *cgroup, int count) diff --git a/tools/testing/selftests/cgroup/cgroup_util.h b/tools/testing/selftests/cgroup/cgroup_util.h index 82e59cdf16e7..4f66d10626d2 100644 --- a/tools/testing/selftests/cgroup/cgroup_util.h +++ b/tools/testing/selftests/cgroup/cgroup_util.h @@ -2,9 +2,9 @@ #include <stdbool.h> #include <stdlib.h> -#define PAGE_SIZE 4096 +#include "../kselftest.h" -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#define PAGE_SIZE 4096 #define MB(x) (x << 20) diff --git a/tools/testing/selftests/cgroup/test_core.c b/tools/testing/selftests/cgroup/test_core.c index 3df648c37876..600123503063 100644 --- a/tools/testing/selftests/cgroup/test_core.c +++ b/tools/testing/selftests/cgroup/test_core.c @@ -1,11 +1,14 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#define _GNU_SOURCE #include <linux/limits.h> +#include <linux/sched.h> #include <sys/types.h> #include <sys/mman.h> #include <sys/wait.h> #include <unistd.h> #include <fcntl.h> +#include <sched.h> #include <stdio.h> #include <errno.h> #include <signal.h> @@ -674,6 +677,166 @@ cleanup: return ret; } +/* + * cgroup migration permission check should be performed based on the + * credentials at the time of open instead of write. + */ +static int test_cgcore_lesser_euid_open(const char *root) +{ + const uid_t test_euid = 65534; /* usually nobody, any !root is fine */ + int ret = KSFT_FAIL; + char *cg_test_a = NULL, *cg_test_b = NULL; + char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; + int cg_test_b_procs_fd = -1; + uid_t saved_uid; + + cg_test_a = cg_name(root, "cg_test_a"); + cg_test_b = cg_name(root, "cg_test_b"); + + if (!cg_test_a || !cg_test_b) + goto cleanup; + + cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs"); + cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs"); + + if (!cg_test_a_procs || !cg_test_b_procs) + goto cleanup; + + if (cg_create(cg_test_a) || cg_create(cg_test_b)) + goto cleanup; + + if (cg_enter_current(cg_test_a)) + goto cleanup; + + if (chown(cg_test_a_procs, test_euid, -1) || + chown(cg_test_b_procs, test_euid, -1)) + goto cleanup; + + saved_uid = geteuid(); + if (seteuid(test_euid)) + goto cleanup; + + cg_test_b_procs_fd = open(cg_test_b_procs, O_RDWR); + + if (seteuid(saved_uid)) + goto cleanup; + + if (cg_test_b_procs_fd < 0) + goto cleanup; + + if (write(cg_test_b_procs_fd, "0", 1) >= 0 || errno != EACCES) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + cg_enter_current(root); + if (cg_test_b_procs_fd >= 0) + close(cg_test_b_procs_fd); + if (cg_test_b) + cg_destroy(cg_test_b); + if (cg_test_a) + cg_destroy(cg_test_a); + free(cg_test_b_procs); + free(cg_test_a_procs); + free(cg_test_b); + free(cg_test_a); + return ret; +} + +struct lesser_ns_open_thread_arg { + const char *path; + int fd; + int err; +}; + +static int lesser_ns_open_thread_fn(void *arg) +{ + struct lesser_ns_open_thread_arg *targ = arg; + + targ->fd = open(targ->path, O_RDWR); + targ->err = errno; + return 0; +} + +/* + * cgroup migration permission check should be performed based on the cgroup + * namespace at the time of open instead of write. + */ +static int test_cgcore_lesser_ns_open(const char *root) +{ + static char stack[65536]; + const uid_t test_euid = 65534; /* usually nobody, any !root is fine */ + int ret = KSFT_FAIL; + char *cg_test_a = NULL, *cg_test_b = NULL; + char *cg_test_a_procs = NULL, *cg_test_b_procs = NULL; + int cg_test_b_procs_fd = -1; + struct lesser_ns_open_thread_arg targ = { .fd = -1 }; + pid_t pid; + int status; + + cg_test_a = cg_name(root, "cg_test_a"); + cg_test_b = cg_name(root, "cg_test_b"); + + if (!cg_test_a || !cg_test_b) + goto cleanup; + + cg_test_a_procs = cg_name(cg_test_a, "cgroup.procs"); + cg_test_b_procs = cg_name(cg_test_b, "cgroup.procs"); + + if (!cg_test_a_procs || !cg_test_b_procs) + goto cleanup; + + if (cg_create(cg_test_a) || cg_create(cg_test_b)) + goto cleanup; + + if (cg_enter_current(cg_test_b)) + goto cleanup; + + if (chown(cg_test_a_procs, test_euid, -1) || + chown(cg_test_b_procs, test_euid, -1)) + goto cleanup; + + targ.path = cg_test_b_procs; + pid = clone(lesser_ns_open_thread_fn, stack + sizeof(stack), + CLONE_NEWCGROUP | CLONE_FILES | CLONE_VM | SIGCHLD, + &targ); + if (pid < 0) + goto cleanup; + + if (waitpid(pid, &status, 0) < 0) + goto cleanup; + + if (!WIFEXITED(status)) + goto cleanup; + + cg_test_b_procs_fd = targ.fd; + if (cg_test_b_procs_fd < 0) + goto cleanup; + + if (cg_enter_current(cg_test_a)) + goto cleanup; + + if ((status = write(cg_test_b_procs_fd, "0", 1)) >= 0 || errno != ENOENT) + goto cleanup; + + ret = KSFT_PASS; + +cleanup: + cg_enter_current(root); + if (cg_test_b_procs_fd >= 0) + close(cg_test_b_procs_fd); + if (cg_test_b) + cg_destroy(cg_test_b); + if (cg_test_a) + cg_destroy(cg_test_a); + free(cg_test_b_procs); + free(cg_test_a_procs); + free(cg_test_b); + free(cg_test_a); + return ret; +} + #define T(x) { x, #x } struct corecg_test { int (*fn)(const char *root); @@ -689,6 +852,8 @@ struct corecg_test { T(test_cgcore_proc_migration), T(test_cgcore_thread_migration), T(test_cgcore_destroy), + T(test_cgcore_lesser_euid_open), + T(test_cgcore_lesser_ns_open), }; #undef T diff --git a/tools/testing/selftests/clone3/clone3.c b/tools/testing/selftests/clone3/clone3.c index 42be3b925830..076cf4325f78 100644 --- a/tools/testing/selftests/clone3/clone3.c +++ b/tools/testing/selftests/clone3/clone3.c @@ -52,6 +52,12 @@ static int call_clone3(uint64_t flags, size_t size, enum test_mode test_mode) size = sizeof(struct __clone_args); switch (test_mode) { + case CLONE3_ARGS_NO_TEST: + /* + * Uses default 'flags' and 'SIGCHLD' + * assignment. + */ + break; case CLONE3_ARGS_ALL_0: args.flags = 0; args.exit_signal = 0; diff --git a/tools/testing/selftests/core/close_range_test.c b/tools/testing/selftests/core/close_range_test.c index aa7d13d91963..749239930ca8 100644 --- a/tools/testing/selftests/core/close_range_test.c +++ b/tools/testing/selftests/core/close_range_test.c @@ -50,10 +50,6 @@ static inline int sys_close_range(unsigned int fd, unsigned int max_fd, return syscall(__NR_close_range, fd, max_fd, flags); } -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - TEST(core_close_range) { int i, ret; diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore new file mode 100644 index 000000000000..c6c2965a6607 --- /dev/null +++ b/tools/testing/selftests/damon/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +huge_count_read_write diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile index 8a3f2cd9fec0..937d36ae9a69 100644 --- a/tools/testing/selftests/damon/Makefile +++ b/tools/testing/selftests/damon/Makefile @@ -1,7 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for damon selftests -TEST_FILES = _chk_dependency.sh -TEST_PROGS = debugfs_attrs.sh +TEST_GEN_FILES += huge_count_read_write + +TEST_FILES = _chk_dependency.sh _debugfs_common.sh +TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh +TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh include ../lib.mk diff --git a/tools/testing/selftests/damon/_debugfs_common.sh b/tools/testing/selftests/damon/_debugfs_common.sh new file mode 100644 index 000000000000..48989d4813ae --- /dev/null +++ b/tools/testing/selftests/damon/_debugfs_common.sh @@ -0,0 +1,52 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +test_write_result() { + file=$1 + content=$2 + orig_content=$3 + expect_reason=$4 + expected=$5 + + echo "$content" > "$file" + if [ $? -ne "$expected" ] + then + echo "writing $content to $file doesn't return $expected" + echo "expected because: $expect_reason" + echo "$orig_content" > "$file" + exit 1 + fi +} + +test_write_succ() { + test_write_result "$1" "$2" "$3" "$4" 0 +} + +test_write_fail() { + test_write_result "$1" "$2" "$3" "$4" 1 +} + +test_content() { + file=$1 + orig_content=$2 + expected=$3 + expect_reason=$4 + + content=$(cat "$file") + if [ "$content" != "$expected" ] + then + echo "reading $file expected $expected but $content" + echo "expected because: $expect_reason" + echo "$orig_content" > "$file" + exit 1 + fi +} + +source ./_chk_dependency.sh + +damon_onoff="$DBGFS/monitor_on" +if [ $(cat "$damon_onoff") = "on" ] +then + echo "monitoring is on" + exit $ksft_skip +fi diff --git a/tools/testing/selftests/damon/debugfs_attrs.sh b/tools/testing/selftests/damon/debugfs_attrs.sh index 196b6640bf37..902e312bca89 100644 --- a/tools/testing/selftests/damon/debugfs_attrs.sh +++ b/tools/testing/selftests/damon/debugfs_attrs.sh @@ -1,48 +1,7 @@ #!/bin/bash # SPDX-License-Identifier: GPL-2.0 -test_write_result() { - file=$1 - content=$2 - orig_content=$3 - expect_reason=$4 - expected=$5 - - echo "$content" > "$file" - if [ $? -ne "$expected" ] - then - echo "writing $content to $file doesn't return $expected" - echo "expected because: $expect_reason" - echo "$orig_content" > "$file" - exit 1 - fi -} - -test_write_succ() { - test_write_result "$1" "$2" "$3" "$4" 0 -} - -test_write_fail() { - test_write_result "$1" "$2" "$3" "$4" 1 -} - -test_content() { - file=$1 - orig_content=$2 - expected=$3 - expect_reason=$4 - - content=$(cat "$file") - if [ "$content" != "$expected" ] - then - echo "reading $file expected $expected but $content" - echo "expected because: $expect_reason" - echo "$orig_content" > "$file" - exit 1 - fi -} - -source ./_chk_dependency.sh +source _debugfs_common.sh # Test attrs file # =============== @@ -56,33 +15,3 @@ test_write_fail "$file" "1 2 3 5 4" "$orig_content" \ "min_nr_regions > max_nr_regions" test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written" echo "$orig_content" > "$file" - -# Test schemes file -# ================= - -file="$DBGFS/schemes" -orig_content=$(cat "$file") - -test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \ - "$orig_content" "valid input" -test_write_fail "$file" "1 2 -3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines" -test_write_succ "$file" "" "$orig_content" "disabling" -echo "$orig_content" > "$file" - -# Test target_ids file -# ==================== - -file="$DBGFS/target_ids" -orig_content=$(cat "$file") - -test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input" -test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input" -test_content "$file" "$orig_content" "1 2" "non-integer was there" -test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input" -test_content "$file" "$orig_content" "" "wrong input written" -test_write_succ "$file" "" "$orig_content" "empty input" -test_content "$file" "$orig_content" "" "empty input written" -echo "$orig_content" > "$file" - -echo "PASS" diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh new file mode 100644 index 000000000000..87aff8083822 --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_empty_targets.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test empty targets case +# ======================= + +orig_target_ids=$(cat "$DBGFS/target_ids") +echo "" > "$DBGFS/target_ids" +orig_monitor_on=$(cat "$DBGFS/monitor_on") +test_write_fail "$DBGFS/monitor_on" "on" "orig_monitor_on" "empty target ids" +echo "$orig_target_ids" > "$DBGFS/target_ids" diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh new file mode 100644 index 000000000000..922cadac2950 --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test huge count read write +# ========================== + +dmesg -C + +for file in "$DBGFS/"* +do + ./huge_count_read_write "$file" +done + +if dmesg | grep -q WARNING +then + dmesg + exit 1 +else + exit 0 +fi diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh new file mode 100644 index 000000000000..5b39ab44731c --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_schemes.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test schemes file +# ================= + +file="$DBGFS/schemes" +orig_content=$(cat "$file") + +test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \ + "$orig_content" "valid input" +test_write_fail "$file" "1 2 +3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines" +test_write_succ "$file" "" "$orig_content" "disabling" +test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \ + "$orig_content" "wrong condition ranges" +echo "$orig_content" > "$file" diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh new file mode 100644 index 000000000000..49aeabdb0aae --- /dev/null +++ b/tools/testing/selftests/damon/debugfs_target_ids.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +source _debugfs_common.sh + +# Test target_ids file +# ==================== + +file="$DBGFS/target_ids" +orig_content=$(cat "$file") + +test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input" +test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input" +test_content "$file" "$orig_content" "1 2" "non-integer was there" +test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input" +test_content "$file" "$orig_content" "" "wrong input written" +test_write_succ "$file" "" "$orig_content" "empty input" +test_content "$file" "$orig_content" "" "empty input written" +echo "$orig_content" > "$file" diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c new file mode 100644 index 000000000000..ad7a6b4cf338 --- /dev/null +++ b/tools/testing/selftests/damon/huge_count_read_write.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: SeongJae Park <sj@kernel.org> + */ + +#include <fcntl.h> +#include <stdlib.h> +#include <unistd.h> +#include <stdio.h> + +void write_read_with_huge_count(char *file) +{ + int filedesc = open(file, O_RDWR); + char buf[25]; + int ret; + + printf("%s %s\n", __func__, file); + if (filedesc < 0) { + fprintf(stderr, "failed opening %s\n", file); + exit(1); + } + + write(filedesc, "", 0xfffffffful); + perror("after write: "); + ret = read(filedesc, buf, 0xfffffffful); + perror("after read: "); + close(filedesc); +} + +int main(int argc, char *argv[]) +{ + if (argc != 2) { + fprintf(stderr, "Usage: %s <file>\n", argv[0]); + exit(1); + } + write_read_with_huge_count(argv[1]); + + return 0; +} diff --git a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh index b513f64d9092..026a126f584d 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/rif_mac_profiles_occ.sh @@ -72,6 +72,35 @@ rif_mac_profile_replacement_test() ip link set $h1.10 address $h1_10_mac } +rif_mac_profile_consolidation_test() +{ + local count=$1; shift + local h1_20_mac + + RET=0 + + if [[ $count -eq 1 ]]; then + return + fi + + h1_20_mac=$(mac_get $h1.20) + + # Set the MAC of $h1.20 to that of $h1.10 and confirm that they are + # using the same MAC profile. + ip link set $h1.20 address 00:11:11:11:11:11 + check_err $? + + occ=$(devlink -j resource show $DEVLINK_DEV \ + | jq '.[][][] | select(.name=="rif_mac_profiles") |.["occ"]') + + [[ $occ -eq $((count - 1)) ]] + check_err $? "MAC profile occupancy did not decrease" + + log_test "RIF MAC profile consolidation" + + ip link set $h1.20 address $h1_20_mac +} + rif_mac_profile_shared_replacement_test() { local count=$1; shift @@ -104,6 +133,7 @@ rif_mac_profile_edit_test() create_max_rif_mac_profiles $count rif_mac_profile_replacement_test + rif_mac_profile_consolidation_test $count rif_mac_profile_shared_replacement_test $count } diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc index 98166fa3eb91..34fb89b0c61f 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc @@ -1,6 +1,6 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 -# description: Kprobe dynamic event - adding and removing +# description: Kprobe profile # requires: kprobe_events ! grep -q 'myevent' kprobe_profile diff --git a/tools/testing/selftests/ir/ir_loopback.c b/tools/testing/selftests/ir/ir_loopback.c index af7f9c7d59bc..06256c96df12 100644 --- a/tools/testing/selftests/ir/ir_loopback.c +++ b/tools/testing/selftests/ir/ir_loopback.c @@ -26,7 +26,6 @@ #include "../kselftest.h" #define TEST_SCANCODES 10 -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #define SYSFS_PATH_MAX 256 #define DNAME_PATH_MAX 256 diff --git a/tools/testing/selftests/kselftest.h b/tools/testing/selftests/kselftest.h index 8d50483fe204..f1180987492c 100644 --- a/tools/testing/selftests/kselftest.h +++ b/tools/testing/selftests/kselftest.h @@ -48,6 +48,10 @@ #include <stdarg.h> #include <stdio.h> +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#endif + /* define kselftest exit codes */ #define KSFT_PASS 0 #define KSFT_FAIL 1 diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h index ae0f0f33b2a6..471eaa7b3a3f 100644 --- a/tools/testing/selftests/kselftest_harness.h +++ b/tools/testing/selftests/kselftest_harness.h @@ -671,7 +671,9 @@ #define EXPECT_STRNE(expected, seen) \ __EXPECT_STR(expected, seen, !=, 0) +#ifndef ARRAY_SIZE #define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) +#endif /* Support an optional handler after and ASSERT_* or EXPECT_*. The approach is * not thread-safe, but it should be fine in most sane test scenarios. @@ -969,7 +971,7 @@ void __run_test(struct __fixture_metadata *f, t->passed = 1; t->skip = 0; t->trigger = 0; - t->step = 0; + t->step = 1; t->no_print = 0; memset(t->results->reason, 0, sizeof(t->results->reason)); diff --git a/tools/testing/selftests/kvm/.gitignore b/tools/testing/selftests/kvm/.gitignore index 3763105029fb..3cb5ac5da087 100644 --- a/tools/testing/selftests/kvm/.gitignore +++ b/tools/testing/selftests/kvm/.gitignore @@ -30,10 +30,12 @@ /x86_64/svm_int_ctl_test /x86_64/sync_regs_test /x86_64/tsc_msrs_test +/x86_64/userspace_io_test /x86_64/userspace_msr_exit_test /x86_64/vmx_apic_access_test /x86_64/vmx_close_while_nested_test /x86_64/vmx_dirty_log_test +/x86_64/vmx_invalid_nested_guest_state /x86_64/vmx_preemption_timer_test /x86_64/vmx_set_nested_state_test /x86_64/vmx_tsc_adjust_test diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index c4e34717826a..17342b575e85 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -59,10 +59,12 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_preemption_timer_test TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test TEST_GEN_PROGS_x86_64 += x86_64/svm_int_ctl_test TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test +TEST_GEN_PROGS_x86_64 += x86_64/userspace_io_test TEST_GEN_PROGS_x86_64 += x86_64/userspace_msr_exit_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_apic_access_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test +TEST_GEN_PROGS_x86_64 += x86_64/vmx_invalid_nested_guest_state TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test TEST_GEN_PROGS_x86_64 += x86_64/vmx_nested_tsc_scaling_test diff --git a/tools/testing/selftests/kvm/include/kvm_util.h b/tools/testing/selftests/kvm/include/kvm_util.h index 6a1a37f30494..2d62edc49d67 100644 --- a/tools/testing/selftests/kvm/include/kvm_util.h +++ b/tools/testing/selftests/kvm/include/kvm_util.h @@ -321,6 +321,7 @@ bool vm_is_unrestricted_guest(struct kvm_vm *vm); unsigned int vm_get_page_size(struct kvm_vm *vm); unsigned int vm_get_page_shift(struct kvm_vm *vm); +unsigned long vm_compute_max_gfn(struct kvm_vm *vm); uint64_t vm_get_max_gfn(struct kvm_vm *vm); int vm_get_fd(struct kvm_vm *vm); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 8f2e0bb1ef96..53d2b5d04b82 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -302,7 +302,7 @@ struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) (1ULL << (vm->va_bits - 1)) >> vm->page_shift); /* Limit physical addresses to PA-bits. */ - vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; + vm->max_gfn = vm_compute_max_gfn(vm); /* Allocate and setup memory for guest. */ vm->vpages_mapped = sparsebit_alloc(); @@ -2328,6 +2328,11 @@ unsigned int vm_get_page_shift(struct kvm_vm *vm) return vm->page_shift; } +unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm) +{ + return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; +} + uint64_t vm_get_max_gfn(struct kvm_vm *vm) { return vm->max_gfn; diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index 82c39db91369..eef7b34756d5 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1431,3 +1431,71 @@ struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vm *vm, uint32_t vcpui return cpuid; } + +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 + +static inline unsigned x86_family(unsigned int eax) +{ + unsigned int x86; + + x86 = (eax >> 8) & 0xf; + + if (x86 == 0xf) + x86 += (eax >> 20) & 0xff; + + return x86; +} + +unsigned long vm_compute_max_gfn(struct kvm_vm *vm) +{ + const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ + unsigned long ht_gfn, max_gfn, max_pfn; + uint32_t eax, ebx, ecx, edx, max_ext_leaf; + + max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; + + /* Avoid reserved HyperTransport region on AMD processors. */ + eax = ecx = 0; + cpuid(&eax, &ebx, &ecx, &edx); + if (ebx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx || + ecx != X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx || + edx != X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) + return max_gfn; + + /* On parts with <40 physical address bits, the area is fully hidden */ + if (vm->pa_bits < 40) + return max_gfn; + + /* Before family 17h, the HyperTransport area is just below 1T. */ + ht_gfn = (1 << 28) - num_ht_pages; + eax = 1; + cpuid(&eax, &ebx, &ecx, &edx); + if (x86_family(eax) < 0x17) + goto done; + + /* + * Otherwise it's at the top of the physical address space, possibly + * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use + * the old conservative value if MAXPHYADDR is not enumerated. + */ + eax = 0x80000000; + cpuid(&eax, &ebx, &ecx, &edx); + max_ext_leaf = eax; + if (max_ext_leaf < 0x80000008) + goto done; + + eax = 0x80000008; + cpuid(&eax, &ebx, &ecx, &edx); + max_pfn = (1ULL << ((eax & 0xff) - vm->page_shift)) - 1; + if (max_ext_leaf >= 0x8000001f) { + eax = 0x8000001f; + cpuid(&eax, &ebx, &ecx, &edx); + max_pfn >>= (ebx >> 6) & 0x3f; + } + + ht_gfn = max_pfn - num_ht_pages; +done: + return min(max_gfn, ht_gfn - 1); +} diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c index df04f56ce859..30a81038df46 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c @@ -75,7 +75,7 @@ static void l1_guest_code(struct svm_test_data *svm) vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; /* No intercepts for real and virtual interrupts */ - vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR | INTERCEPT_VINTR); + vmcb->control.intercept &= ~(BIT(INTERCEPT_INTR) | BIT(INTERCEPT_VINTR)); /* Make a virtual interrupt VINTR_IRQ_NUMBER pending */ vmcb->control.int_ctl |= V_IRQ_MASK | (0x1 << V_INTR_PRIO_SHIFT); diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c new file mode 100644 index 000000000000..e4bef2e05686 --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <fcntl.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <sys/ioctl.h> + +#include "test_util.h" + +#include "kvm_util.h" +#include "processor.h" + +#define VCPU_ID 1 + +static void guest_ins_port80(uint8_t *buffer, unsigned int count) +{ + unsigned long end; + + if (count == 2) + end = (unsigned long)buffer + 1; + else + end = (unsigned long)buffer + 8192; + + asm volatile("cld; rep; insb" : "+D"(buffer), "+c"(count) : "d"(0x80) : "memory"); + GUEST_ASSERT_1(count == 0, count); + GUEST_ASSERT_2((unsigned long)buffer == end, buffer, end); +} + +static void guest_code(void) +{ + uint8_t buffer[8192]; + int i; + + /* + * Special case tests. main() will adjust RCX 2 => 1 and 3 => 8192 to + * test that KVM doesn't explode when userspace modifies the "count" on + * a userspace I/O exit. KVM isn't required to play nice with the I/O + * itself as KVM doesn't support manipulating the count, it just needs + * to not explode or overflow a buffer. + */ + guest_ins_port80(buffer, 2); + guest_ins_port80(buffer, 3); + + /* Verify KVM fills the buffer correctly when not stuffing RCX. */ + memset(buffer, 0, sizeof(buffer)); + guest_ins_port80(buffer, 8192); + for (i = 0; i < 8192; i++) + GUEST_ASSERT_2(buffer[i] == 0xaa, i, buffer[i]); + + GUEST_DONE(); +} + +int main(int argc, char *argv[]) +{ + struct kvm_regs regs; + struct kvm_run *run; + struct kvm_vm *vm; + struct ucall uc; + int rc; + + /* Tell stdout not to buffer its content */ + setbuf(stdout, NULL); + + /* Create VM */ + vm = vm_create_default(VCPU_ID, 0, guest_code); + run = vcpu_state(vm, VCPU_ID); + + memset(®s, 0, sizeof(regs)); + + while (1) { + rc = _vcpu_run(vm, VCPU_ID); + + TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc); + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Unexpected exit reason: %u (%s),\n", + run->exit_reason, + exit_reason_str(run->exit_reason)); + + if (get_ucall(vm, VCPU_ID, &uc)) + break; + + TEST_ASSERT(run->io.port == 0x80, + "Expected I/O at port 0x80, got port 0x%x\n", run->io.port); + + /* + * Modify the rep string count in RCX: 2 => 1 and 3 => 8192. + * Note, this abuses KVM's batching of rep string I/O to avoid + * getting stuck in an infinite loop. That behavior isn't in + * scope from a testing perspective as it's not ABI in any way, + * i.e. it really is abusing internal KVM knowledge. + */ + vcpu_regs_get(vm, VCPU_ID, ®s); + if (regs.rcx == 2) + regs.rcx = 1; + if (regs.rcx == 3) + regs.rcx = 8192; + memset((void *)run + run->io.data_offset, 0xaa, 4096); + vcpu_regs_set(vm, VCPU_ID, ®s); + } + + switch (uc.cmd) { + case UCALL_DONE: + break; + case UCALL_ABORT: + TEST_FAIL("%s at %s:%ld : argN+1 = 0x%lx, argN+2 = 0x%lx", + (const char *)uc.args[0], __FILE__, uc.args[1], + uc.args[2], uc.args[3]); + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + } + + kvm_vm_free(vm); + return 0; +} diff --git a/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c new file mode 100644 index 000000000000..489fbed4ca6f --- /dev/null +++ b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" +#include "vmx.h" + +#include <string.h> +#include <sys/ioctl.h> + +#include "kselftest.h" + +#define VCPU_ID 0 +#define ARBITRARY_IO_PORT 0x2000 + +static struct kvm_vm *vm; + +static void l2_guest_code(void) +{ + /* + * Generate an exit to L0 userspace, i.e. main(), via I/O to an + * arbitrary port. + */ + asm volatile("inb %%dx, %%al" + : : [port] "d" (ARBITRARY_IO_PORT) : "rax"); +} + +static void l1_guest_code(struct vmx_pages *vmx_pages) +{ +#define L2_GUEST_STACK_SIZE 64 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + + GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); + GUEST_ASSERT(load_vmcs(vmx_pages)); + + /* Prepare the VMCS for L2 execution. */ + prepare_vmcs(vmx_pages, l2_guest_code, + &l2_guest_stack[L2_GUEST_STACK_SIZE]); + + /* + * L2 must be run without unrestricted guest, verify that the selftests + * library hasn't enabled it. Because KVM selftests jump directly to + * 64-bit mode, unrestricted guest support isn't required. + */ + GUEST_ASSERT(!(vmreadz(CPU_BASED_VM_EXEC_CONTROL) & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) || + !(vmreadz(SECONDARY_VM_EXEC_CONTROL) & SECONDARY_EXEC_UNRESTRICTED_GUEST)); + + GUEST_ASSERT(!vmlaunch()); + + /* L2 should triple fault after main() stuffs invalid guest state. */ + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_TRIPLE_FAULT); + GUEST_DONE(); +} + +int main(int argc, char *argv[]) +{ + vm_vaddr_t vmx_pages_gva; + struct kvm_sregs sregs; + struct kvm_run *run; + struct ucall uc; + + nested_vmx_check_supported(); + + vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code); + + /* Allocate VMX pages and shared descriptors (vmx_pages). */ + vcpu_alloc_vmx(vm, &vmx_pages_gva); + vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva); + + vcpu_run(vm, VCPU_ID); + + run = vcpu_state(vm, VCPU_ID); + + /* + * The first exit to L0 userspace should be an I/O access from L2. + * Running L1 should launch L2 without triggering an exit to userspace. + */ + TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, + "Expected KVM_EXIT_IO, got: %u (%s)\n", + run->exit_reason, exit_reason_str(run->exit_reason)); + + TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT, + "Expected IN from port %d from L2, got port %d", + ARBITRARY_IO_PORT, run->io.port); + + /* + * Stuff invalid guest state for L2 by making TR unusuable. The next + * KVM_RUN should induce a TRIPLE_FAULT in L2 as KVM doesn't support + * emulating invalid guest state for L2. + */ + memset(&sregs, 0, sizeof(sregs)); + vcpu_sregs_get(vm, VCPU_ID, &sregs); + sregs.tr.unusable = 1; + vcpu_sregs_set(vm, VCPU_ID, &sregs); + + vcpu_run(vm, VCPU_ID); + + switch (get_ucall(vm, VCPU_ID, &uc)) { + case UCALL_DONE: + break; + case UCALL_ABORT: + TEST_FAIL("%s", (const char *)uc.args[0]); + default: + TEST_FAIL("Unexpected ucall: %lu", uc.cmd); + } +} diff --git a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c index 23051d84b907..2454a1f2ca0c 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_pmu_msrs_test.c @@ -110,22 +110,5 @@ int main(int argc, char *argv[]) ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_LBR_FMT); TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); - /* testcase 4, set capabilities when we don't have PDCM bit */ - entry_1_0->ecx &= ~X86_FEATURE_PDCM; - vcpu_set_cpuid(vm, VCPU_ID, cpuid); - ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, host_cap.capabilities); - TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); - - /* testcase 5, set capabilities when we don't have PMU version bits */ - entry_1_0->ecx |= X86_FEATURE_PDCM; - eax.split.version_id = 0; - entry_1_0->ecx = eax.full; - vcpu_set_cpuid(vm, VCPU_ID, cpuid); - ret = _vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, PMU_CAP_FW_WRITES); - TEST_ASSERT(ret == 0, "Bad PERF_CAPABILITIES didn't fail."); - - vcpu_set_msr(vm, 0, MSR_IA32_PERF_CAPABILITIES, 0); - ASSERT_EQ(vcpu_get_msr(vm, VCPU_ID, MSR_IA32_PERF_CAPABILITIES), 0); - kvm_vm_free(vm); } diff --git a/tools/testing/selftests/landlock/common.h b/tools/testing/selftests/landlock/common.h index 20e2a9286d71..183b7e8e1b95 100644 --- a/tools/testing/selftests/landlock/common.h +++ b/tools/testing/selftests/landlock/common.h @@ -17,10 +17,6 @@ #include "../kselftest_harness.h" -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - /* * TEST_F_FORK() is useful when a test drop privileges but the corresponding * FIXTURE_TEARDOWN() requires them (e.g. to remove files from a directory diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index fe7ee2b0f29c..a40add31a2e3 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -141,7 +141,7 @@ endif # Selftest makefiles can override those targets by setting # OVERRIDE_TARGETS = 1. ifeq ($(OVERRIDE_TARGETS),) -LOCAL_HDRS := $(selfdir)/kselftest_harness.h $(selfdir)/kselftest.h +LOCAL_HDRS += $(selfdir)/kselftest_harness.h $(selfdir)/kselftest.h $(OUTPUT)/%:%.c $(LOCAL_HDRS) $(LINK.c) $(filter-out $(LOCAL_HDRS),$^) $(LDLIBS) -o $@ diff --git a/tools/testing/selftests/mount/unprivileged-remount-test.c b/tools/testing/selftests/mount/unprivileged-remount-test.c index 584dc6bc3b06..d2917054fe3a 100644 --- a/tools/testing/selftests/mount/unprivileged-remount-test.c +++ b/tools/testing/selftests/mount/unprivileged-remount-test.c @@ -204,7 +204,7 @@ bool test_unpriv_remount(const char *fstype, const char *mount_options, if (!WIFEXITED(status)) { die("child did not terminate cleanly\n"); } - return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false; + return WEXITSTATUS(status) == EXIT_SUCCESS; } create_and_enter_userns(); @@ -282,7 +282,7 @@ static bool test_priv_mount_unpriv_remount(void) if (!WIFEXITED(status)) { die("child did not terminate cleanly\n"); } - return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false; + return WEXITSTATUS(status) == EXIT_SUCCESS; } orig_mnt_flags = read_mnt_flags(orig_path); diff --git a/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c b/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c index 860198f83a53..50ed5d475dd1 100644 --- a/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c +++ b/tools/testing/selftests/move_mount_set_group/move_mount_set_group_test.c @@ -191,7 +191,7 @@ static bool is_shared_mount(const char *path) #define SET_GROUP_FROM "/tmp/move_mount_set_group_supported_from" #define SET_GROUP_TO "/tmp/move_mount_set_group_supported_to" -static int move_mount_set_group_supported(void) +static bool move_mount_set_group_supported(void) { int ret; @@ -222,7 +222,7 @@ static int move_mount_set_group_supported(void) AT_FDCWD, SET_GROUP_TO, MOVE_MOUNT_SET_GROUP); umount2("/tmp", MNT_DETACH); - return ret < 0 ? false : true; + return ret >= 0; } FIXTURE(move_mount_set_group) { @@ -232,7 +232,7 @@ FIXTURE(move_mount_set_group) { FIXTURE_SETUP(move_mount_set_group) { - int ret; + bool ret; ASSERT_EQ(prepare_unpriv_mountns(), 0); @@ -254,7 +254,7 @@ FIXTURE_SETUP(move_mount_set_group) FIXTURE_TEARDOWN(move_mount_set_group) { - int ret; + bool ret; ret = move_mount_set_group_supported(); ASSERT_GE(ret, 0); @@ -348,7 +348,7 @@ TEST_F(move_mount_set_group, complex_sharing_copying) .shared = false, }; pid_t pid; - int ret; + bool ret; ret = move_mount_set_group_supported(); ASSERT_GE(ret, 0); diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh index 75528788cb95..75528788cb95 100644..100755 --- a/tools/testing/selftests/net/amt.sh +++ b/tools/testing/selftests/net/amt.sh diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index 7f5b265fcb90..ad2982b72e02 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -455,6 +455,22 @@ cleanup() ip netns del ${NSC} >/dev/null 2>&1 } +cleanup_vrf_dup() +{ + ip link del ${NSA_DEV2} >/dev/null 2>&1 + ip netns pids ${NSC} | xargs kill 2>/dev/null + ip netns del ${NSC} >/dev/null 2>&1 +} + +setup_vrf_dup() +{ + # some VRF tests use ns-C which has the same config as + # ns-B but for a device NOT in the VRF + create_ns ${NSC} "-" "-" + connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \ + ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64 +} + setup() { local with_vrf=${1} @@ -484,12 +500,6 @@ setup() ip -netns ${NSB} ro add ${VRF_IP}/32 via ${NSA_IP} dev ${NSB_DEV} ip -netns ${NSB} -6 ro add ${VRF_IP6}/128 via ${NSA_IP6} dev ${NSB_DEV} - - # some VRF tests use ns-C which has the same config as - # ns-B but for a device NOT in the VRF - create_ns ${NSC} "-" "-" - connect_ns ${NSA} ${NSA_DEV2} ${NSA_IP}/24 ${NSA_IP6}/64 \ - ${NSC} ${NSC_DEV} ${NSB_IP}/24 ${NSB_IP6}/64 else ip -netns ${NSA} ro add ${NSB_LO_IP}/32 via ${NSB_IP} dev ${NSA_DEV} ip -netns ${NSA} ro add ${NSB_LO_IP6}/128 via ${NSB_IP6} dev ${NSA_DEV} @@ -1240,7 +1250,9 @@ ipv4_tcp_vrf() log_test_addr ${a} $? 1 "Global server, local connection" # run MD5 tests + setup_vrf_dup ipv4_tcp_md5 + cleanup_vrf_dup # # enable VRF global server @@ -1798,8 +1810,9 @@ ipv4_addr_bind_vrf() for a in ${NSA_IP} ${VRF_IP} do log_start + show_hint "Socket not bound to VRF, but address is in VRF" run_cmd nettest -s -R -P icmp -l ${a} -b - log_test_addr ${a} $? 0 "Raw socket bind to local address" + log_test_addr ${a} $? 1 "Raw socket bind to local address" log_start run_cmd nettest -s -R -P icmp -l ${a} -I ${NSA_DEV} -b @@ -2191,7 +2204,7 @@ ipv6_ping_vrf() log_start show_hint "Fails since VRF device does not support linklocal or multicast" run_cmd ${ping6} -c1 -w1 ${a} - log_test_addr ${a} $? 2 "ping out, VRF bind" + log_test_addr ${a} $? 1 "ping out, VRF bind" done for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV} @@ -2719,7 +2732,9 @@ ipv6_tcp_vrf() log_test_addr ${a} $? 1 "Global server, local connection" # run MD5 tests + setup_vrf_dup ipv6_tcp_md5 + cleanup_vrf_dup # # enable VRF global server @@ -3414,11 +3429,14 @@ ipv6_addr_bind_novrf() run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind" + # Sadly, the kernel allows binding a socket to a device and then + # binding to an address not on the device. So this test passes + # when it really should not a=${NSA_LO_IP6} log_start - show_hint "Should fail with 'Cannot assign requested address'" + show_hint "Tecnically should fail since address is not on device but kernel allows" run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b - log_test_addr ${a} $? 1 "TCP socket bind to out of scope local address" + log_test_addr ${a} $? 0 "TCP socket bind to out of scope local address" } ipv6_addr_bind_vrf() @@ -3459,10 +3477,15 @@ ipv6_addr_bind_vrf() run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b log_test_addr ${a} $? 0 "TCP socket bind to local address with device bind" + # Sadly, the kernel allows binding a socket to a device and then + # binding to an address not on the device. The only restriction + # is that the address is valid in the L3 domain. So this test + # passes when it really should not a=${VRF_IP6} log_start + show_hint "Tecnically should fail since address is not on device but kernel allows" run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b - log_test_addr ${a} $? 1 "TCP socket bind to VRF address with device bind" + log_test_addr ${a} $? 0 "TCP socket bind to VRF address with device bind" a=${NSA_LO_IP6} log_start @@ -4077,3 +4100,11 @@ cleanup 2>/dev/null printf "\nTests passed: %3d\n" ${nsuccess} printf "Tests failed: %3d\n" ${nfail} + +if [ $nfail -ne 0 ]; then + exit 1 # KSFT_FAIL +elif [ $nsuccess -eq 0 ]; then + exit $ksft_skip +fi + +exit 0 # KSFT_PASS diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 5abe92d55b69..996af1ae3d3d 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh @@ -444,24 +444,63 @@ fib_rp_filter_test() setup set -e + ip netns add ns2 + ip netns set ns2 auto + + ip -netns ns2 link set dev lo up + + $IP link add name veth1 type veth peer name veth2 + $IP link set dev veth2 netns ns2 + $IP address add 192.0.2.1/24 dev veth1 + ip -netns ns2 address add 192.0.2.1/24 dev veth2 + $IP link set dev veth1 up + ip -netns ns2 link set dev veth2 up + $IP link set dev lo address 52:54:00:6a:c7:5e - $IP link set dummy0 address 52:54:00:6a:c7:5e - $IP link add dummy1 type dummy - $IP link set dummy1 address 52:54:00:6a:c7:5e - $IP link set dev dummy1 up + $IP link set dev veth1 address 52:54:00:6a:c7:5e + ip -netns ns2 link set dev lo address 52:54:00:6a:c7:5e + ip -netns ns2 link set dev veth2 address 52:54:00:6a:c7:5e + + # 1. (ns2) redirect lo's egress to veth2's egress + ip netns exec ns2 tc qdisc add dev lo parent root handle 1: fq_codel + ip netns exec ns2 tc filter add dev lo parent 1: protocol arp basic \ + action mirred egress redirect dev veth2 + ip netns exec ns2 tc filter add dev lo parent 1: protocol ip basic \ + action mirred egress redirect dev veth2 + + # 2. (ns1) redirect veth1's ingress to lo's ingress + $NS_EXEC tc qdisc add dev veth1 ingress + $NS_EXEC tc filter add dev veth1 ingress protocol arp basic \ + action mirred ingress redirect dev lo + $NS_EXEC tc filter add dev veth1 ingress protocol ip basic \ + action mirred ingress redirect dev lo + + # 3. (ns1) redirect lo's egress to veth1's egress + $NS_EXEC tc qdisc add dev lo parent root handle 1: fq_codel + $NS_EXEC tc filter add dev lo parent 1: protocol arp basic \ + action mirred egress redirect dev veth1 + $NS_EXEC tc filter add dev lo parent 1: protocol ip basic \ + action mirred egress redirect dev veth1 + + # 4. (ns2) redirect veth2's ingress to lo's ingress + ip netns exec ns2 tc qdisc add dev veth2 ingress + ip netns exec ns2 tc filter add dev veth2 ingress protocol arp basic \ + action mirred ingress redirect dev lo + ip netns exec ns2 tc filter add dev veth2 ingress protocol ip basic \ + action mirred ingress redirect dev lo + $NS_EXEC sysctl -qw net.ipv4.conf.all.rp_filter=1 $NS_EXEC sysctl -qw net.ipv4.conf.all.accept_local=1 $NS_EXEC sysctl -qw net.ipv4.conf.all.route_localnet=1 - - $NS_EXEC tc qd add dev dummy1 parent root handle 1: fq_codel - $NS_EXEC tc filter add dev dummy1 parent 1: protocol arp basic action mirred egress redirect dev lo - $NS_EXEC tc filter add dev dummy1 parent 1: protocol ip basic action mirred egress redirect dev lo + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.rp_filter=1 + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.accept_local=1 + ip netns exec ns2 sysctl -qw net.ipv4.conf.all.route_localnet=1 set +e - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 198.51.100.1" + run_cmd "ip netns exec ns2 ping -w1 -c1 192.0.2.1" log_test $? 0 "rp_filter passes local packets" - run_cmd "ip netns exec ns1 ping -I dummy1 -w1 -c1 127.0.0.1" + run_cmd "ip netns exec ns2 ping -w1 -c1 127.0.0.1" log_test $? 0 "rp_filter passes loopback packets" cleanup diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample index bf17e485684f..b0980a2efa31 100644 --- a/tools/testing/selftests/net/forwarding/forwarding.config.sample +++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample @@ -13,6 +13,8 @@ NETIFS[p5]=veth4 NETIFS[p6]=veth5 NETIFS[p7]=veth6 NETIFS[p8]=veth7 +NETIFS[p9]=veth8 +NETIFS[p10]=veth9 # Port that does not have a cable connected. NETIF_NO_CABLE=eth8 diff --git a/tools/testing/selftests/net/gro.c b/tools/testing/selftests/net/gro.c index cf37ce86b0fd..221525ccbe1d 100644 --- a/tools/testing/selftests/net/gro.c +++ b/tools/testing/selftests/net/gro.c @@ -57,10 +57,11 @@ #include <string.h> #include <unistd.h> +#include "../kselftest.h" + #define DPORT 8000 #define SPORT 1500 #define PAYLOAD_LEN 100 -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #define NUM_PACKETS 4 #define START_SEQ 100 #define START_ACK 100 diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh index ecbf57f264ed..7b9d6e31b8e7 100755 --- a/tools/testing/selftests/net/icmp_redirect.sh +++ b/tools/testing/selftests/net/icmp_redirect.sh @@ -311,7 +311,7 @@ check_exception() ip -netns h1 ro get ${H1_VRF_ARG} ${H2_N2_IP} | \ grep -E -v 'mtu|redirected' | grep -q "cache" fi - log_test $? 0 "IPv4: ${desc}" + log_test $? 0 "IPv4: ${desc}" 0 # No PMTU info for test "redirect" and "mtu exception plus redirect" if [ "$with_redirect" = "yes" ] && [ "$desc" != "redirect exception plus mtu" ]; then diff --git a/tools/testing/selftests/net/ipsec.c b/tools/testing/selftests/net/ipsec.c index 3d7dde2c321b..cc10c10c5ed9 100644 --- a/tools/testing/selftests/net/ipsec.c +++ b/tools/testing/selftests/net/ipsec.c @@ -41,7 +41,6 @@ #define pr_err(fmt, ...) printk(fmt ": %m", ##__VA_ARGS__) -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) #define IPV4_STR_SZ 16 /* xxx.xxx.xxx.xxx is longest + \0 */ diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config index 0faaccd21447..2b82628decb1 100644 --- a/tools/testing/selftests/net/mptcp/config +++ b/tools/testing/selftests/net/mptcp/config @@ -9,7 +9,6 @@ CONFIG_NETFILTER=y CONFIG_NETFILTER_ADVANCED=y CONFIG_NETFILTER_NETLINK=m CONFIG_NF_TABLES=m -CONFIG_NFT_COUNTER=m CONFIG_NFT_COMPAT=m CONFIG_NETFILTER_XTABLES=m CONFIG_NETFILTER_XT_MATCH_BPF=m diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c index b5277106df1f..072d709c96b4 100644 --- a/tools/testing/selftests/net/reuseport_bpf.c +++ b/tools/testing/selftests/net/reuseport_bpf.c @@ -24,9 +24,7 @@ #include <sys/resource.h> #include <unistd.h> -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) -#endif +#include "../kselftest.h" struct test_params { int recv_family; diff --git a/tools/testing/selftests/net/rxtimestamp.c b/tools/testing/selftests/net/rxtimestamp.c index e4613ce4ed69..9eb42570294d 100644 --- a/tools/testing/selftests/net/rxtimestamp.c +++ b/tools/testing/selftests/net/rxtimestamp.c @@ -18,7 +18,7 @@ #include <linux/net_tstamp.h> #include <linux/errqueue.h> -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#include "../kselftest.h" struct options { int so_timestamp; diff --git a/tools/testing/selftests/net/socket.c b/tools/testing/selftests/net/socket.c index afca1ead677f..db1aeb8c5d1e 100644 --- a/tools/testing/selftests/net/socket.c +++ b/tools/testing/selftests/net/socket.c @@ -7,6 +7,8 @@ #include <sys/socket.h> #include <netinet/in.h> +#include "../kselftest.h" + struct socket_testcase { int domain; int type; @@ -31,7 +33,6 @@ static struct socket_testcase tests[] = { { AF_INET, SOCK_STREAM, IPPROTO_UDP, -EPROTONOSUPPORT, 1 }, }; -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) #define ERR_STRING_SZ 64 static int run_tests(void) diff --git a/tools/testing/selftests/net/tcp_fastopen_backup_key.c b/tools/testing/selftests/net/tcp_fastopen_backup_key.c index 9c55ec44fc43..c1cb0c75156a 100644 --- a/tools/testing/selftests/net/tcp_fastopen_backup_key.c +++ b/tools/testing/selftests/net/tcp_fastopen_backup_key.c @@ -26,6 +26,8 @@ #include <fcntl.h> #include <time.h> +#include "../kselftest.h" + #ifndef TCP_FASTOPEN_KEY #define TCP_FASTOPEN_KEY 33 #endif @@ -34,10 +36,6 @@ #define PROC_FASTOPEN_KEY "/proc/sys/net/ipv4/tcp_fastopen_key" #define KEY_LENGTH 16 -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) -#endif - static bool do_ipv6; static bool do_sockopt; static bool do_rotate; diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c index 8a22db0cca49..6e468e0f42f7 100644 --- a/tools/testing/selftests/net/tls.c +++ b/tools/testing/selftests/net/tls.c @@ -31,6 +31,8 @@ struct tls_crypto_info_keys { struct tls12_crypto_info_chacha20_poly1305 chacha20; struct tls12_crypto_info_sm4_gcm sm4gcm; struct tls12_crypto_info_sm4_ccm sm4ccm; + struct tls12_crypto_info_aes_ccm_128 aesccm128; + struct tls12_crypto_info_aes_gcm_256 aesgcm256; }; size_t len; }; @@ -61,6 +63,16 @@ static void tls_crypto_info_init(uint16_t tls_version, uint16_t cipher_type, tls12->sm4ccm.info.version = tls_version; tls12->sm4ccm.info.cipher_type = cipher_type; break; + case TLS_CIPHER_AES_CCM_128: + tls12->len = sizeof(struct tls12_crypto_info_aes_ccm_128); + tls12->aesccm128.info.version = tls_version; + tls12->aesccm128.info.cipher_type = cipher_type; + break; + case TLS_CIPHER_AES_GCM_256: + tls12->len = sizeof(struct tls12_crypto_info_aes_gcm_256); + tls12->aesgcm256.info.version = tls_version; + tls12->aesgcm256.info.cipher_type = cipher_type; + break; default: break; } @@ -261,6 +273,30 @@ FIXTURE_VARIANT_ADD(tls, 13_sm4_ccm) .cipher_type = TLS_CIPHER_SM4_CCM, }; +FIXTURE_VARIANT_ADD(tls, 12_aes_ccm) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; + +FIXTURE_VARIANT_ADD(tls, 13_aes_ccm) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_CCM_128, +}; + +FIXTURE_VARIANT_ADD(tls, 12_aes_gcm_256) +{ + .tls_version = TLS_1_2_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; + +FIXTURE_VARIANT_ADD(tls, 13_aes_gcm_256) +{ + .tls_version = TLS_1_3_VERSION, + .cipher_type = TLS_CIPHER_AES_GCM_256, +}; + FIXTURE_SETUP(tls) { struct tls_crypto_info_keys tls12; diff --git a/tools/testing/selftests/net/toeplitz.c b/tools/testing/selftests/net/toeplitz.c index 710ac956bdb3..c5489341cfb8 100644 --- a/tools/testing/selftests/net/toeplitz.c +++ b/tools/testing/selftests/net/toeplitz.c @@ -498,7 +498,7 @@ static void parse_opts(int argc, char **argv) bool have_toeplitz = false; int index, c; - while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:u:v", long_options, &index)) != -1) { + while ((c = getopt_long(argc, argv, "46C:d:i:k:r:stT:uv", long_options, &index)) != -1) { switch (c) { case '4': cfg_family = AF_INET; diff --git a/tools/testing/selftests/net/udpgro_fwd.sh b/tools/testing/selftests/net/udpgro_fwd.sh index 7f26591f236b..6f05e06f6761 100755 --- a/tools/testing/selftests/net/udpgro_fwd.sh +++ b/tools/testing/selftests/net/udpgro_fwd.sh @@ -132,7 +132,7 @@ run_test() { local rcv=`ip netns exec $NS_DST $ipt"-save" -c | grep 'dport 8000' | \ sed -e 's/\[//' -e 's/:.*//'` if [ $rcv != $pkts ]; then - echo " fail - received $rvs packets, expected $pkts" + echo " fail - received $rcv packets, expected $pkts" ret=1 return fi @@ -185,6 +185,7 @@ for family in 4 6; do IPT=iptables SUFFIX=24 VXDEV=vxlan + PING=ping if [ $family = 6 ]; then BM_NET=$BM_NET_V6 @@ -192,6 +193,8 @@ for family in 4 6; do SUFFIX="64 nodad" VXDEV=vxlan6 IPT=ip6tables + # Use ping6 on systems where ping doesn't handle IPv6 + ping -w 1 -c 1 ::1 > /dev/null 2>&1 || PING="ping6" fi echo "IPv$family" @@ -237,7 +240,7 @@ for family in 4 6; do # load arp cache before running the test to reduce the amount of # stray traffic on top of the UDP tunnel - ip netns exec $NS_SRC ping -q -c 1 $OL_NET$DST_NAT >/dev/null + ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 1 1 $OL_NET$DST cleanup diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c index c66da6ffd6d8..7badaf215de2 100644 --- a/tools/testing/selftests/net/udpgso.c +++ b/tools/testing/selftests/net/udpgso.c @@ -156,13 +156,13 @@ struct testcase testcases_v4[] = { }, { /* send max number of min sized segments */ - .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4, + .tlen = UDP_MAX_SEGMENTS, .gso_len = 1, - .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4, + .r_num_mss = UDP_MAX_SEGMENTS, }, { /* send max number + 1 of min sized segments: fail */ - .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V4 + 1, + .tlen = UDP_MAX_SEGMENTS + 1, .gso_len = 1, .tfail = true, }, @@ -259,13 +259,13 @@ struct testcase testcases_v6[] = { }, { /* send max number of min sized segments */ - .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6, + .tlen = UDP_MAX_SEGMENTS, .gso_len = 1, - .r_num_mss = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6, + .r_num_mss = UDP_MAX_SEGMENTS, }, { /* send max number + 1 of min sized segments: fail */ - .tlen = UDP_MAX_SEGMENTS - CONST_HDRLEN_V6 + 1, + .tlen = UDP_MAX_SEGMENTS + 1, .gso_len = 1, .tfail = true, }, diff --git a/tools/testing/selftests/net/udpgso_bench_tx.c b/tools/testing/selftests/net/udpgso_bench_tx.c index 17512a43885e..f1fdaa270291 100644 --- a/tools/testing/selftests/net/udpgso_bench_tx.c +++ b/tools/testing/selftests/net/udpgso_bench_tx.c @@ -419,6 +419,7 @@ static void usage(const char *filepath) static void parse_opts(int argc, char **argv) { + const char *bind_addr = NULL; int max_len, hdrlen; int c; @@ -446,7 +447,7 @@ static void parse_opts(int argc, char **argv) cfg_cpu = strtol(optarg, NULL, 0); break; case 'D': - setup_sockaddr(cfg_family, optarg, &cfg_dst_addr); + bind_addr = optarg; break; case 'l': cfg_runtime_ms = strtoul(optarg, NULL, 10) * 1000; @@ -492,6 +493,11 @@ static void parse_opts(int argc, char **argv) } } + if (!bind_addr) + bind_addr = cfg_family == PF_INET6 ? "::" : "0.0.0.0"; + + setup_sockaddr(cfg_family, bind_addr, &cfg_dst_addr); + if (optind != argc) usage(argv[0]); diff --git a/tools/testing/selftests/netfilter/conntrack_vrf.sh b/tools/testing/selftests/netfilter/conntrack_vrf.sh index 91f3ef0f1192..8b5ea9234588 100755 --- a/tools/testing/selftests/netfilter/conntrack_vrf.sh +++ b/tools/testing/selftests/netfilter/conntrack_vrf.sh @@ -150,11 +150,27 @@ EOF # oifname is the vrf device. test_masquerade_vrf() { + local qdisc=$1 + + if [ "$qdisc" != "default" ]; then + tc -net $ns0 qdisc add dev tvrf root $qdisc + fi + ip netns exec $ns0 conntrack -F 2>/dev/null ip netns exec $ns0 nft -f - <<EOF flush ruleset table ip nat { + chain rawout { + type filter hook output priority raw; + + oif tvrf ct state untracked counter + } + chain postrouting2 { + type filter hook postrouting priority mangle; + + oif tvrf ct state untracked counter + } chain postrouting { type nat hook postrouting priority 0; # NB: masquerade should always be combined with 'oif(name) bla', @@ -171,13 +187,18 @@ EOF fi # must also check that nat table was evaluated on second (lower device) iteration. - ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' + ip netns exec $ns0 nft list table ip nat |grep -q 'counter packets 2' && + ip netns exec $ns0 nft list table ip nat |grep -q 'untracked counter packets [1-9]' if [ $? -eq 0 ]; then - echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device" + echo "PASS: iperf3 connect with masquerade + sport rewrite on vrf device ($qdisc qdisc)" else - echo "FAIL: vrf masq rule has unexpected counter value" + echo "FAIL: vrf rules have unexpected counter value" ret=1 fi + + if [ "$qdisc" != "default" ]; then + tc -net $ns0 qdisc del dev tvrf root + fi } # add masq rule that gets evaluated w. outif set to veth device. @@ -213,7 +234,8 @@ EOF } test_ct_zone_in -test_masquerade_vrf +test_masquerade_vrf "default" +test_masquerade_vrf "pfifo" test_masquerade_veth exit $ret diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh index 5a4938d6dcf2..ed61f6cab60f 100755 --- a/tools/testing/selftests/netfilter/nft_concat_range.sh +++ b/tools/testing/selftests/netfilter/nft_concat_range.sh @@ -23,8 +23,8 @@ TESTS="reported_issues correctness concurrency timeout" # Set types, defined by TYPE_ variables below TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto - net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port - net_port_mac_proto_net" + net_port_net net_mac mac_net net_mac_icmp net6_mac_icmp + net6_port_net6_port net_port_mac_proto_net" # Reported bugs, also described by TYPE_ variables below BUGS="flush_remove_add" @@ -277,6 +277,23 @@ perf_entries 1000 perf_proto ipv4 " +TYPE_mac_net=" +display mac,net +type_spec ether_addr . ipv4_addr +chain_spec ether saddr . ip saddr +dst +src mac addr4 +start 1 +count 5 +src_delta 2000 +tools sendip nc bash +proto udp + +race_repeat 0 + +perf_duration 0 +" + TYPE_net_mac_icmp=" display net,mac - ICMP type_spec ipv4_addr . ether_addr @@ -984,7 +1001,8 @@ format() { fi done for f in ${src}; do - __expr="${__expr} . " + [ "${__expr}" != "{ " ] && __expr="${__expr} . " + __start="$(eval format_"${f}" "${srcstart}")" __end="$(eval format_"${f}" "${srcend}")" diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh index ac646376eb01..04633119b29a 100755 --- a/tools/testing/selftests/netfilter/nft_zones_many.sh +++ b/tools/testing/selftests/netfilter/nft_zones_many.sh @@ -18,11 +18,17 @@ cleanup() ip netns del $ns } -ip netns add $ns -if [ $? -ne 0 ];then - echo "SKIP: Could not create net namespace $gw" - exit $ksft_skip -fi +checktool (){ + if ! $1 > /dev/null 2>&1; then + echo "SKIP: Could not $2" + exit $ksft_skip + fi +} + +checktool "nft --version" "run test without nft tool" +checktool "ip -Version" "run test without ip tool" +checktool "socat -V" "run test without socat tool" +checktool "ip netns add $ns" "create net namespace" trap cleanup EXIT @@ -71,7 +77,8 @@ EOF local start=$(date +%s%3N) i=$((i + 10000)) j=$((j + 1)) - dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" nc -w 1 -q 1 -u -p 12345 127.0.0.1 12345 > /dev/null + # nft rule in output places each packet in a different zone. + dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345 if [ $? -ne 0 ] ;then ret=1 break diff --git a/tools/testing/selftests/rseq/basic_percpu_ops_test.c b/tools/testing/selftests/rseq/basic_percpu_ops_test.c index eb3f6db36d36..b953a52ff706 100644 --- a/tools/testing/selftests/rseq/basic_percpu_ops_test.c +++ b/tools/testing/selftests/rseq/basic_percpu_ops_test.c @@ -9,10 +9,9 @@ #include <string.h> #include <stddef.h> +#include "../kselftest.h" #include "rseq.h" -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) - struct percpu_lock_entry { intptr_t v; } __attribute__((aligned(128))); diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index 7159eb777fd3..fb440dfca158 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -27,10 +27,9 @@ #include <signal.h> #include <limits.h> +#include "../kselftest.h" #include "rseq.h" -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) - __thread volatile struct rseq __rseq_abi = { .cpu_id = RSEQ_CPU_ID_UNINITIALIZED, }; diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c index 6e5102a7d7c9..5b5c9d558dee 100644 --- a/tools/testing/selftests/seccomp/seccomp_benchmark.c +++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c @@ -18,7 +18,7 @@ #include <sys/syscall.h> #include <sys/types.h> -#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0])) +#include "../kselftest.h" unsigned long long timing(clockid_t clk_id, unsigned long long samples) { diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index d425688cf59c..9d126d7fabdb 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -1487,7 +1487,7 @@ TEST_F(precedence, log_is_fifth_in_any_order) #define PTRACE_EVENT_SECCOMP 7 #endif -#define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP) +#define PTRACE_EVENT_MASK(status) ((status) >> 16) bool tracer_running; void tracer_stop(int sig) { @@ -1539,12 +1539,22 @@ void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, if (wait(&status) != tracee) continue; - if (WIFSIGNALED(status) || WIFEXITED(status)) - /* Child is dead. Time to go. */ + + if (WIFSIGNALED(status)) { + /* Child caught a fatal signal. */ + return; + } + if (WIFEXITED(status)) { + /* Child exited with code. */ return; + } - /* Check if this is a seccomp event. */ - ASSERT_EQ(!ptrace_syscall, IS_SECCOMP_EVENT(status)); + /* Check if we got an expected event. */ + ASSERT_EQ(WIFCONTINUED(status), false); + ASSERT_EQ(WIFSTOPPED(status), true); + ASSERT_EQ(WSTOPSIG(status) & SIGTRAP, SIGTRAP) { + TH_LOG("Unexpected WSTOPSIG: %d", WSTOPSIG(status)); + } tracer_func(_metadata, tracee, status, args); @@ -1961,6 +1971,11 @@ void tracer_seccomp(struct __test_metadata *_metadata, pid_t tracee, int ret; unsigned long msg; + EXPECT_EQ(PTRACE_EVENT_MASK(status), PTRACE_EVENT_SECCOMP) { + TH_LOG("Unexpected ptrace event: %d", PTRACE_EVENT_MASK(status)); + return; + } + /* Make sure we got the right message. */ ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); EXPECT_EQ(0, ret); @@ -2011,6 +2026,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, long *syscall_nr = NULL, *syscall_ret = NULL; FIXTURE_DATA(TRACE_syscall) *self = args; + EXPECT_EQ(WSTOPSIG(status) & 0x80, 0x80) { + TH_LOG("Unexpected WSTOPSIG: %d", WSTOPSIG(status)); + return; + } + /* * The traditional way to tell PTRACE_SYSCALL entry/exit * is by counting. @@ -2128,6 +2148,7 @@ FIXTURE_SETUP(TRACE_syscall) ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); + /* Do not install seccomp rewrite filters, as we'll use ptrace instead. */ if (variant->use_ptrace) return; @@ -2186,6 +2207,29 @@ TEST_F(TRACE_syscall, syscall_faked) EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); } +TEST_F_SIGNAL(TRACE_syscall, kill_immediate, SIGSYS) +{ + struct sock_filter filter[] = { + BPF_STMT(BPF_LD|BPF_W|BPF_ABS, + offsetof(struct seccomp_data, nr)), + BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_mknodat, 0, 1), + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL_THREAD), + BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), + }; + struct sock_fprog prog = { + .len = (unsigned short)ARRAY_SIZE(filter), + .filter = filter, + }; + long ret; + + /* Install "kill on mknodat" filter. */ + ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); + ASSERT_EQ(0, ret); + + /* This should immediately die with SIGSYS, regardless of tracer. */ + EXPECT_EQ(-1, syscall(__NR_mknodat, -1, NULL, 0, 0)); +} + TEST_F(TRACE_syscall, skip_after) { struct sock_filter filter[] = { @@ -4087,7 +4131,7 @@ TEST(user_notification_addfd) * lowest available fd to be assigned here. */ EXPECT_EQ(fd, nextfd++); - EXPECT_EQ(filecmp(getpid(), pid, memfd, fd), 0); + ASSERT_EQ(filecmp(getpid(), pid, memfd, fd), 0); /* * This sets the ID of the ADD FD to the last request plus 1. The diff --git a/tools/testing/selftests/sgx/Makefile b/tools/testing/selftests/sgx/Makefile index 7f12d55b97f8..2956584e1e37 100644 --- a/tools/testing/selftests/sgx/Makefile +++ b/tools/testing/selftests/sgx/Makefile @@ -45,7 +45,7 @@ $(OUTPUT)/sign_key.o: sign_key.S $(CC) $(HOST_CFLAGS) -c $< -o $@ $(OUTPUT)/test_encl.elf: test_encl.lds test_encl.c test_encl_bootstrap.S - $(CC) $(ENCL_CFLAGS) -T $^ -o $@ + $(CC) $(ENCL_CFLAGS) -T $^ -o $@ -Wl,--build-id=none EXTRA_CLEAN := \ $(OUTPUT)/test_encl.elf \ diff --git a/tools/testing/selftests/sgx/defines.h b/tools/testing/selftests/sgx/defines.h index f88562afcaa0..02d775789ea7 100644 --- a/tools/testing/selftests/sgx/defines.h +++ b/tools/testing/selftests/sgx/defines.h @@ -19,13 +19,38 @@ #include "../../../../arch/x86/include/uapi/asm/sgx.h" enum encl_op_type { - ENCL_OP_PUT, - ENCL_OP_GET, + ENCL_OP_PUT_TO_BUFFER, + ENCL_OP_GET_FROM_BUFFER, + ENCL_OP_PUT_TO_ADDRESS, + ENCL_OP_GET_FROM_ADDRESS, + ENCL_OP_NOP, + ENCL_OP_MAX, }; -struct encl_op { +struct encl_op_header { uint64_t type; - uint64_t buffer; +}; + +struct encl_op_put_to_buf { + struct encl_op_header header; + uint64_t value; +}; + +struct encl_op_get_from_buf { + struct encl_op_header header; + uint64_t value; +}; + +struct encl_op_put_to_addr { + struct encl_op_header header; + uint64_t value; + uint64_t addr; +}; + +struct encl_op_get_from_addr { + struct encl_op_header header; + uint64_t value; + uint64_t addr; }; #endif /* DEFINES_H */ diff --git a/tools/testing/selftests/sgx/load.c b/tools/testing/selftests/sgx/load.c index 3ebe5d1fe337..9d4322c946e2 100644 --- a/tools/testing/selftests/sgx/load.c +++ b/tools/testing/selftests/sgx/load.c @@ -21,6 +21,8 @@ void encl_delete(struct encl *encl) { + struct encl_segment *heap_seg = &encl->segment_tbl[encl->nr_segments - 1]; + if (encl->encl_base) munmap((void *)encl->encl_base, encl->encl_size); @@ -30,6 +32,8 @@ void encl_delete(struct encl *encl) if (encl->fd) close(encl->fd); + munmap(heap_seg->src, heap_seg->size); + if (encl->segment_tbl) free(encl->segment_tbl); @@ -107,11 +111,14 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg) memset(&secinfo, 0, sizeof(secinfo)); secinfo.flags = seg->flags; - ioc.src = (uint64_t)encl->src + seg->offset; + ioc.src = (uint64_t)seg->src; ioc.offset = seg->offset; ioc.length = seg->size; ioc.secinfo = (unsigned long)&secinfo; - ioc.flags = SGX_PAGE_MEASURE; + if (seg->measure) + ioc.flags = SGX_PAGE_MEASURE; + else + ioc.flags = 0; rc = ioctl(encl->fd, SGX_IOC_ENCLAVE_ADD_PAGES, &ioc); if (rc < 0) { @@ -122,11 +129,10 @@ static bool encl_ioc_add_pages(struct encl *encl, struct encl_segment *seg) return true; } - - -bool encl_load(const char *path, struct encl *encl) +bool encl_load(const char *path, struct encl *encl, unsigned long heap_size) { const char device_path[] = "/dev/sgx_enclave"; + struct encl_segment *seg; Elf64_Phdr *phdr_tbl; off_t src_offset; Elf64_Ehdr *ehdr; @@ -178,6 +184,8 @@ bool encl_load(const char *path, struct encl *encl) ehdr = encl->bin; phdr_tbl = encl->bin + ehdr->e_phoff; + encl->nr_segments = 1; /* one for the heap */ + for (i = 0; i < ehdr->e_phnum; i++) { Elf64_Phdr *phdr = &phdr_tbl[i]; @@ -193,7 +201,6 @@ bool encl_load(const char *path, struct encl *encl) for (i = 0, j = 0; i < ehdr->e_phnum; i++) { Elf64_Phdr *phdr = &phdr_tbl[i]; unsigned int flags = phdr->p_flags; - struct encl_segment *seg; if (phdr->p_type != PT_LOAD) continue; @@ -216,6 +223,7 @@ bool encl_load(const char *path, struct encl *encl) if (j == 0) { src_offset = phdr->p_offset & PAGE_MASK; + encl->src = encl->bin + src_offset; seg->prot = PROT_READ | PROT_WRITE; seg->flags = SGX_PAGE_TYPE_TCS << 8; @@ -228,15 +236,27 @@ bool encl_load(const char *path, struct encl *encl) seg->offset = (phdr->p_offset & PAGE_MASK) - src_offset; seg->size = (phdr->p_filesz + PAGE_SIZE - 1) & PAGE_MASK; + seg->src = encl->src + seg->offset; + seg->measure = true; j++; } - assert(j == encl->nr_segments); + assert(j == encl->nr_segments - 1); + + seg = &encl->segment_tbl[j]; + seg->offset = encl->segment_tbl[j - 1].offset + encl->segment_tbl[j - 1].size; + seg->size = heap_size; + seg->src = mmap(NULL, heap_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + seg->prot = PROT_READ | PROT_WRITE; + seg->flags = (SGX_PAGE_TYPE_REG << 8) | seg->prot; + seg->measure = false; + + if (seg->src == MAP_FAILED) + goto err; - encl->src = encl->bin + src_offset; - encl->src_size = encl->segment_tbl[j - 1].offset + - encl->segment_tbl[j - 1].size; + encl->src_size = encl->segment_tbl[j].offset + encl->segment_tbl[j].size; for (encl->encl_size = 4096; encl->encl_size < encl->src_size; ) encl->encl_size <<= 1; diff --git a/tools/testing/selftests/sgx/main.c b/tools/testing/selftests/sgx/main.c index e252015e0c15..370c4995f7c4 100644 --- a/tools/testing/selftests/sgx/main.c +++ b/tools/testing/selftests/sgx/main.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright(c) 2016-20 Intel Corporation. */ +#include <cpuid.h> #include <elf.h> #include <errno.h> #include <fcntl.h> @@ -21,6 +22,7 @@ #include "main.h" static const uint64_t MAGIC = 0x1122334455667788ULL; +static const uint64_t MAGIC2 = 0x8877665544332211ULL; vdso_sgx_enter_enclave_t vdso_sgx_enter_enclave; struct vdso_symtab { @@ -107,12 +109,32 @@ static Elf64_Sym *vdso_symtab_get(struct vdso_symtab *symtab, const char *name) return NULL; } +/* + * Return the offset in the enclave where the data segment can be found. + * The first RW segment loaded is the TCS, skip that to get info on the + * data segment. + */ +static off_t encl_get_data_offset(struct encl *encl) +{ + int i; + + for (i = 1; i < encl->nr_segments; i++) { + struct encl_segment *seg = &encl->segment_tbl[i]; + + if (seg->prot == (PROT_READ | PROT_WRITE)) + return seg->offset; + } + + return -1; +} + FIXTURE(enclave) { struct encl encl; struct sgx_enclave_run run; }; -FIXTURE_SETUP(enclave) +static bool setup_test_encl(unsigned long heap_size, struct encl *encl, + struct __test_metadata *_metadata) { Elf64_Sym *sgx_enter_enclave_sym = NULL; struct vdso_symtab symtab; @@ -122,31 +144,25 @@ FIXTURE_SETUP(enclave) unsigned int i; void *addr; - if (!encl_load("test_encl.elf", &self->encl)) { - encl_delete(&self->encl); - ksft_exit_skip("cannot load enclaves\n"); + if (!encl_load("test_encl.elf", encl, heap_size)) { + encl_delete(encl); + TH_LOG("Failed to load the test enclave.\n"); } - for (i = 0; i < self->encl.nr_segments; i++) { - seg = &self->encl.segment_tbl[i]; - - TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot); - } - - if (!encl_measure(&self->encl)) + if (!encl_measure(encl)) goto err; - if (!encl_build(&self->encl)) + if (!encl_build(encl)) goto err; /* * An enclave consumer only must do this. */ - for (i = 0; i < self->encl.nr_segments; i++) { - struct encl_segment *seg = &self->encl.segment_tbl[i]; + for (i = 0; i < encl->nr_segments; i++) { + struct encl_segment *seg = &encl->segment_tbl[i]; - addr = mmap((void *)self->encl.encl_base + seg->offset, seg->size, - seg->prot, MAP_SHARED | MAP_FIXED, self->encl.fd, 0); + addr = mmap((void *)encl->encl_base + seg->offset, seg->size, + seg->prot, MAP_SHARED | MAP_FIXED, encl->fd, 0); EXPECT_NE(addr, MAP_FAILED); if (addr == MAP_FAILED) goto err; @@ -166,8 +182,16 @@ FIXTURE_SETUP(enclave) vdso_sgx_enter_enclave = addr + sgx_enter_enclave_sym->st_value; - memset(&self->run, 0, sizeof(self->run)); - self->run.tcs = self->encl.encl_base; + return true; + +err: + encl_delete(encl); + + for (i = 0; i < encl->nr_segments; i++) { + seg = &encl->segment_tbl[i]; + + TH_LOG("0x%016lx 0x%016lx 0x%02x", seg->offset, seg->size, seg->prot); + } maps_file = fopen("/proc/self/maps", "r"); if (maps_file != NULL) { @@ -181,11 +205,13 @@ FIXTURE_SETUP(enclave) fclose(maps_file); } -err: - if (!sgx_enter_enclave_sym) - encl_delete(&self->encl); + TH_LOG("Failed to initialize the test enclave.\n"); + + return false; +} - ASSERT_NE(sgx_enter_enclave_sym, NULL); +FIXTURE_SETUP(enclave) +{ } FIXTURE_TEARDOWN(enclave) @@ -215,44 +241,130 @@ FIXTURE_TEARDOWN(enclave) TEST_F(enclave, unclobbered_vdso) { - struct encl_op op; + struct encl_op_get_from_buf get_op; + struct encl_op_put_to_buf put_op; + + ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); + + memset(&self->run, 0, sizeof(self->run)); + self->run.tcs = self->encl.encl_base; + + put_op.header.type = ENCL_OP_PUT_TO_BUFFER; + put_op.value = MAGIC; + + EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0); + + EXPECT_EEXIT(&self->run); + EXPECT_EQ(self->run.user_data, 0); + + get_op.header.type = ENCL_OP_GET_FROM_BUFFER; + get_op.value = 0; + + EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0); + + EXPECT_EQ(get_op.value, MAGIC); + EXPECT_EEXIT(&self->run); + EXPECT_EQ(self->run.user_data, 0); +} + +/* + * A section metric is concatenated in a way that @low bits 12-31 define the + * bits 12-31 of the metric and @high bits 0-19 define the bits 32-51 of the + * metric. + */ +static unsigned long sgx_calc_section_metric(unsigned int low, + unsigned int high) +{ + return (low & GENMASK_ULL(31, 12)) + + ((high & GENMASK_ULL(19, 0)) << 32); +} + +/* + * Sum total available physical SGX memory across all EPC sections + * + * Return: total available physical SGX memory available on system + */ +static unsigned long get_total_epc_mem(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long total_size = 0; + unsigned int type; + int section = 0; + + while (true) { + __cpuid_count(SGX_CPUID, section + SGX_CPUID_EPC, eax, ebx, ecx, edx); + + type = eax & SGX_CPUID_EPC_MASK; + if (type == SGX_CPUID_EPC_INVALID) + break; - op.type = ENCL_OP_PUT; - op.buffer = MAGIC; + if (type != SGX_CPUID_EPC_SECTION) + break; - EXPECT_EQ(ENCL_CALL(&op, &self->run, false), 0); + total_size += sgx_calc_section_metric(ecx, edx); + + section++; + } + + return total_size; +} + +TEST_F(enclave, unclobbered_vdso_oversubscribed) +{ + struct encl_op_get_from_buf get_op; + struct encl_op_put_to_buf put_op; + unsigned long total_mem; + + total_mem = get_total_epc_mem(); + ASSERT_NE(total_mem, 0); + ASSERT_TRUE(setup_test_encl(total_mem, &self->encl, _metadata)); + + memset(&self->run, 0, sizeof(self->run)); + self->run.tcs = self->encl.encl_base; + + put_op.header.type = ENCL_OP_PUT_TO_BUFFER; + put_op.value = MAGIC; + + EXPECT_EQ(ENCL_CALL(&put_op, &self->run, false), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); - op.type = ENCL_OP_GET; - op.buffer = 0; + get_op.header.type = ENCL_OP_GET_FROM_BUFFER; + get_op.value = 0; - EXPECT_EQ(ENCL_CALL(&op, &self->run, false), 0); + EXPECT_EQ(ENCL_CALL(&get_op, &self->run, false), 0); - EXPECT_EQ(op.buffer, MAGIC); + EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); + } TEST_F(enclave, clobbered_vdso) { - struct encl_op op; + struct encl_op_get_from_buf get_op; + struct encl_op_put_to_buf put_op; - op.type = ENCL_OP_PUT; - op.buffer = MAGIC; + ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); - EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); + memset(&self->run, 0, sizeof(self->run)); + self->run.tcs = self->encl.encl_base; + + put_op.header.type = ENCL_OP_PUT_TO_BUFFER; + put_op.value = MAGIC; + + EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); - op.type = ENCL_OP_GET; - op.buffer = 0; + get_op.header.type = ENCL_OP_GET_FROM_BUFFER; + get_op.value = 0; - EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); + EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0); - EXPECT_EQ(op.buffer, MAGIC); + EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } @@ -267,27 +379,179 @@ static int test_handler(long rdi, long rsi, long rdx, long ursp, long r8, long r TEST_F(enclave, clobbered_vdso_and_user_function) { - struct encl_op op; + struct encl_op_get_from_buf get_op; + struct encl_op_put_to_buf put_op; + + ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); + + memset(&self->run, 0, sizeof(self->run)); + self->run.tcs = self->encl.encl_base; self->run.user_handler = (__u64)test_handler; self->run.user_data = 0xdeadbeef; - op.type = ENCL_OP_PUT; - op.buffer = MAGIC; + put_op.header.type = ENCL_OP_PUT_TO_BUFFER; + put_op.value = MAGIC; - EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); + EXPECT_EQ(ENCL_CALL(&put_op, &self->run, true), 0); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); - op.type = ENCL_OP_GET; - op.buffer = 0; + get_op.header.type = ENCL_OP_GET_FROM_BUFFER; + get_op.value = 0; - EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); + EXPECT_EQ(ENCL_CALL(&get_op, &self->run, true), 0); - EXPECT_EQ(op.buffer, MAGIC); + EXPECT_EQ(get_op.value, MAGIC); EXPECT_EEXIT(&self->run); EXPECT_EQ(self->run.user_data, 0); } +/* + * Sanity check that it is possible to enter either of the two hardcoded TCS + */ +TEST_F(enclave, tcs_entry) +{ + struct encl_op_header op; + + ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); + + memset(&self->run, 0, sizeof(self->run)); + self->run.tcs = self->encl.encl_base; + + op.type = ENCL_OP_NOP; + + EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); + + EXPECT_EEXIT(&self->run); + EXPECT_EQ(self->run.exception_vector, 0); + EXPECT_EQ(self->run.exception_error_code, 0); + EXPECT_EQ(self->run.exception_addr, 0); + + /* Move to the next TCS. */ + self->run.tcs = self->encl.encl_base + PAGE_SIZE; + + EXPECT_EQ(ENCL_CALL(&op, &self->run, true), 0); + + EXPECT_EEXIT(&self->run); + EXPECT_EQ(self->run.exception_vector, 0); + EXPECT_EQ(self->run.exception_error_code, 0); + EXPECT_EQ(self->run.exception_addr, 0); +} + +/* + * Second page of .data segment is used to test changing PTE permissions. + * This spans the local encl_buffer within the test enclave. + * + * 1) Start with a sanity check: a value is written to the target page within + * the enclave and read back to ensure target page can be written to. + * 2) Change PTE permissions (RW -> RO) of target page within enclave. + * 3) Repeat (1) - this time expecting a regular #PF communicated via the + * vDSO. + * 4) Change PTE permissions of target page within enclave back to be RW. + * 5) Repeat (1) by resuming enclave, now expected to be possible to write to + * and read from target page within enclave. + */ +TEST_F(enclave, pte_permissions) +{ + struct encl_op_get_from_addr get_addr_op; + struct encl_op_put_to_addr put_addr_op; + unsigned long data_start; + int ret; + + ASSERT_TRUE(setup_test_encl(ENCL_HEAP_SIZE_DEFAULT, &self->encl, _metadata)); + + memset(&self->run, 0, sizeof(self->run)); + self->run.tcs = self->encl.encl_base; + + data_start = self->encl.encl_base + + encl_get_data_offset(&self->encl) + + PAGE_SIZE; + + /* + * Sanity check to ensure it is possible to write to page that will + * have its permissions manipulated. + */ + + /* Write MAGIC to page */ + put_addr_op.value = MAGIC; + put_addr_op.addr = data_start; + put_addr_op.header.type = ENCL_OP_PUT_TO_ADDRESS; + + EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); + + EXPECT_EEXIT(&self->run); + EXPECT_EQ(self->run.exception_vector, 0); + EXPECT_EQ(self->run.exception_error_code, 0); + EXPECT_EQ(self->run.exception_addr, 0); + + /* + * Read memory that was just written to, confirming that it is the + * value previously written (MAGIC). + */ + get_addr_op.value = 0; + get_addr_op.addr = data_start; + get_addr_op.header.type = ENCL_OP_GET_FROM_ADDRESS; + + EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); + + EXPECT_EQ(get_addr_op.value, MAGIC); + EXPECT_EEXIT(&self->run); + EXPECT_EQ(self->run.exception_vector, 0); + EXPECT_EQ(self->run.exception_error_code, 0); + EXPECT_EQ(self->run.exception_addr, 0); + + /* Change PTE permissions of target page within the enclave */ + ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ); + if (ret) + perror("mprotect"); + + /* + * PTE permissions of target page changed to read-only, EPCM + * permissions unchanged (EPCM permissions are RW), attempt to + * write to the page, expecting a regular #PF. + */ + + put_addr_op.value = MAGIC2; + + EXPECT_EQ(ENCL_CALL(&put_addr_op, &self->run, true), 0); + + EXPECT_EQ(self->run.exception_vector, 14); + EXPECT_EQ(self->run.exception_error_code, 0x7); + EXPECT_EQ(self->run.exception_addr, data_start); + + self->run.exception_vector = 0; + self->run.exception_error_code = 0; + self->run.exception_addr = 0; + + /* + * Change PTE permissions back to enable enclave to write to the + * target page and resume enclave - do not expect any exceptions this + * time. + */ + ret = mprotect((void *)data_start, PAGE_SIZE, PROT_READ | PROT_WRITE); + if (ret) + perror("mprotect"); + + EXPECT_EQ(vdso_sgx_enter_enclave((unsigned long)&put_addr_op, 0, + 0, ERESUME, 0, 0, &self->run), + 0); + + EXPECT_EEXIT(&self->run); + EXPECT_EQ(self->run.exception_vector, 0); + EXPECT_EQ(self->run.exception_error_code, 0); + EXPECT_EQ(self->run.exception_addr, 0); + + get_addr_op.value = 0; + + EXPECT_EQ(ENCL_CALL(&get_addr_op, &self->run, true), 0); + + EXPECT_EQ(get_addr_op.value, MAGIC2); + EXPECT_EEXIT(&self->run); + EXPECT_EQ(self->run.exception_vector, 0); + EXPECT_EQ(self->run.exception_error_code, 0); + EXPECT_EQ(self->run.exception_addr, 0); +} + TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/sgx/main.h b/tools/testing/selftests/sgx/main.h index 68672fd86cf9..b45c52ec7ab3 100644 --- a/tools/testing/selftests/sgx/main.h +++ b/tools/testing/selftests/sgx/main.h @@ -6,11 +6,15 @@ #ifndef MAIN_H #define MAIN_H +#define ENCL_HEAP_SIZE_DEFAULT 4096 + struct encl_segment { + void *src; off_t offset; size_t size; unsigned int prot; unsigned int flags; + bool measure; }; struct encl { @@ -31,7 +35,7 @@ extern unsigned char sign_key[]; extern unsigned char sign_key_end[]; void encl_delete(struct encl *ctx); -bool encl_load(const char *path, struct encl *encl); +bool encl_load(const char *path, struct encl *encl, unsigned long heap_size); bool encl_measure(struct encl *encl); bool encl_build(struct encl *encl); diff --git a/tools/testing/selftests/sgx/sigstruct.c b/tools/testing/selftests/sgx/sigstruct.c index 92bbc5a15c39..50c5ab1aa6fa 100644 --- a/tools/testing/selftests/sgx/sigstruct.c +++ b/tools/testing/selftests/sgx/sigstruct.c @@ -289,15 +289,17 @@ static bool mrenclave_eextend(EVP_MD_CTX *ctx, uint64_t offset, static bool mrenclave_segment(EVP_MD_CTX *ctx, struct encl *encl, struct encl_segment *seg) { - uint64_t end = seg->offset + seg->size; + uint64_t end = seg->size; uint64_t offset; - for (offset = seg->offset; offset < end; offset += PAGE_SIZE) { - if (!mrenclave_eadd(ctx, offset, seg->flags)) + for (offset = 0; offset < end; offset += PAGE_SIZE) { + if (!mrenclave_eadd(ctx, seg->offset + offset, seg->flags)) return false; - if (!mrenclave_eextend(ctx, offset, encl->src + offset)) - return false; + if (seg->measure) { + if (!mrenclave_eextend(ctx, seg->offset + offset, seg->src + offset)) + return false; + } } return true; diff --git a/tools/testing/selftests/sgx/test_encl.c b/tools/testing/selftests/sgx/test_encl.c index 734ea52f9924..4fca01cfd898 100644 --- a/tools/testing/selftests/sgx/test_encl.c +++ b/tools/testing/selftests/sgx/test_encl.c @@ -4,6 +4,11 @@ #include <stddef.h> #include "defines.h" +/* + * Data buffer spanning two pages that will be placed first in .data + * segment. Even if not used internally the second page is needed by + * external test manipulating page permissions. + */ static uint8_t encl_buffer[8192] = { 1 }; static void *memcpy(void *dest, const void *src, size_t n) @@ -16,20 +21,51 @@ static void *memcpy(void *dest, const void *src, size_t n) return dest; } -void encl_body(void *rdi, void *rsi) +static void do_encl_op_put_to_buf(void *op) +{ + struct encl_op_put_to_buf *op2 = op; + + memcpy(&encl_buffer[0], &op2->value, 8); +} + +static void do_encl_op_get_from_buf(void *op) { - struct encl_op *op = (struct encl_op *)rdi; + struct encl_op_get_from_buf *op2 = op; + + memcpy(&op2->value, &encl_buffer[0], 8); +} + +static void do_encl_op_put_to_addr(void *_op) +{ + struct encl_op_put_to_addr *op = _op; + + memcpy((void *)op->addr, &op->value, 8); +} - switch (op->type) { - case ENCL_OP_PUT: - memcpy(&encl_buffer[0], &op->buffer, 8); - break; +static void do_encl_op_get_from_addr(void *_op) +{ + struct encl_op_get_from_addr *op = _op; + + memcpy(&op->value, (void *)op->addr, 8); +} + +static void do_encl_op_nop(void *_op) +{ + +} + +void encl_body(void *rdi, void *rsi) +{ + const void (*encl_op_array[ENCL_OP_MAX])(void *) = { + do_encl_op_put_to_buf, + do_encl_op_get_from_buf, + do_encl_op_put_to_addr, + do_encl_op_get_from_addr, + do_encl_op_nop, + }; - case ENCL_OP_GET: - memcpy(&op->buffer, &encl_buffer[0], 8); - break; + struct encl_op_header *op = (struct encl_op_header *)rdi; - default: - break; - } + if (op->type < ENCL_OP_MAX) + (*encl_op_array[op->type])(op); } diff --git a/tools/testing/selftests/sgx/test_encl_bootstrap.S b/tools/testing/selftests/sgx/test_encl_bootstrap.S index 5d5680d4ea39..82fb0dfcbd23 100644 --- a/tools/testing/selftests/sgx/test_encl_bootstrap.S +++ b/tools/testing/selftests/sgx/test_encl_bootstrap.S @@ -12,7 +12,7 @@ .fill 1, 8, 0 # STATE (set by CPU) .fill 1, 8, 0 # FLAGS - .quad encl_ssa # OSSA + .quad encl_ssa_tcs1 # OSSA .fill 1, 4, 0 # CSSA (set by CPU) .fill 1, 4, 1 # NSSA .quad encl_entry # OENTRY @@ -23,10 +23,10 @@ .fill 1, 4, 0xFFFFFFFF # GSLIMIT .fill 4024, 1, 0 # Reserved - # Identical to the previous TCS. + # TCS2 .fill 1, 8, 0 # STATE (set by CPU) .fill 1, 8, 0 # FLAGS - .quad encl_ssa # OSSA + .quad encl_ssa_tcs2 # OSSA .fill 1, 4, 0 # CSSA (set by CPU) .fill 1, 4, 1 # NSSA .quad encl_entry # OENTRY @@ -40,8 +40,9 @@ .text encl_entry: - # RBX contains the base address for TCS, which is also the first address - # inside the enclave. By adding the value of le_stack_end to it, we get + # RBX contains the base address for TCS, which is the first address + # inside the enclave for TCS #1 and one page into the enclave for + # TCS #2. By adding the value of encl_stack to it, we get # the absolute address for the stack. lea (encl_stack)(%rbx), %rax xchg %rsp, %rax @@ -81,9 +82,15 @@ encl_entry: .section ".data", "aw" -encl_ssa: +encl_ssa_tcs1: + .space 4096 +encl_ssa_tcs2: .space 4096 .balign 4096 - .space 8192 + # Stack of TCS #1 + .space 4096 encl_stack: + .balign 4096 + # Stack of TCS #2 + .space 4096 diff --git a/tools/testing/selftests/sparc64/drivers/adi-test.c b/tools/testing/selftests/sparc64/drivers/adi-test.c index 95d93c6a88a5..84e5d9fd20b0 100644 --- a/tools/testing/selftests/sparc64/drivers/adi-test.c +++ b/tools/testing/selftests/sparc64/drivers/adi-test.c @@ -24,10 +24,6 @@ #define DEBUG_LEVEL_4_BIT (0x0008) #define DEBUG_TIMING_BIT (0x1000) -#ifndef ARRAY_SIZE -# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - /* bit mask of enabled bits to print */ #define DEBUG 0x0001 diff --git a/tools/testing/selftests/tc-testing/config b/tools/testing/selftests/tc-testing/config index b71828df5a6d..a3239d5e40c7 100644 --- a/tools/testing/selftests/tc-testing/config +++ b/tools/testing/selftests/tc-testing/config @@ -60,6 +60,8 @@ CONFIG_NET_IFE_SKBTCINDEX=m CONFIG_NET_SCH_FIFO=y CONFIG_NET_SCH_ETS=m CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NETDEVSIM=m # ## Network testing diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py index a3e43189d940..ee22e3447ec7 100755 --- a/tools/testing/selftests/tc-testing/tdc.py +++ b/tools/testing/selftests/tc-testing/tdc.py @@ -716,6 +716,7 @@ def set_operation_mode(pm, parser, args, remaining): list_test_cases(alltests) exit(0) + exit_code = 0 # KSFT_PASS if len(alltests): req_plugins = pm.get_required_plugins(alltests) try: @@ -724,6 +725,8 @@ def set_operation_mode(pm, parser, args, remaining): print('The following plugins were not found:') print('{}'.format(pde.missing_pg)) catresults = test_runner(pm, args, alltests) + if catresults.count_failures() != 0: + exit_code = 1 # KSFT_FAIL if args.format == 'none': print('Test results output suppression requested\n') else: @@ -748,6 +751,8 @@ def set_operation_mode(pm, parser, args, remaining): gid=int(os.getenv('SUDO_GID'))) else: print('No tests found\n') + exit_code = 4 # KSFT_SKIP + exit(exit_code) def main(): """ @@ -767,8 +772,5 @@ def main(): set_operation_mode(pm, parser, args, remaining) - exit(0) - - if __name__ == "__main__": main() diff --git a/tools/testing/selftests/tc-testing/tdc.sh b/tools/testing/selftests/tc-testing/tdc.sh index 7fe38c76db44..afb0cd86fa3d 100755 --- a/tools/testing/selftests/tc-testing/tdc.sh +++ b/tools/testing/selftests/tc-testing/tdc.sh @@ -1,5 +1,6 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0 +modprobe netdevsim ./tdc.py -c actions --nobuildebpf ./tdc.py -c qdisc diff --git a/tools/testing/selftests/timens/procfs.c b/tools/testing/selftests/timens/procfs.c index f2519154208a..1833ca97eb24 100644 --- a/tools/testing/selftests/timens/procfs.c +++ b/tools/testing/selftests/timens/procfs.c @@ -24,8 +24,6 @@ #define DAY_IN_SEC (60*60*24) #define TEN_DAYS_IN_SEC (10*DAY_IN_SEC) -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) - static int child_ns, parent_ns; static int switch_ns(int fd) diff --git a/tools/testing/selftests/timens/timens.c b/tools/testing/selftests/timens/timens.c index 52b6a1185f52..387220791a05 100644 --- a/tools/testing/selftests/timens/timens.c +++ b/tools/testing/selftests/timens/timens.c @@ -22,8 +22,6 @@ #define DAY_IN_SEC (60*60*24) #define TEN_DAYS_IN_SEC (10*DAY_IN_SEC) -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) - struct test_clock { clockid_t id; char *name; diff --git a/tools/testing/selftests/timers/alarmtimer-suspend.c b/tools/testing/selftests/timers/alarmtimer-suspend.c index 4da09dbf83ba..54da4b088f4c 100644 --- a/tools/testing/selftests/timers/alarmtimer-suspend.c +++ b/tools/testing/selftests/timers/alarmtimer-suspend.c @@ -79,7 +79,7 @@ char *clockstring(int clockid) return "CLOCK_BOOTTIME_ALARM"; case CLOCK_TAI: return "CLOCK_TAI"; - }; + } return "UNKNOWN_CLOCKID"; } diff --git a/tools/testing/selftests/timers/inconsistency-check.c b/tools/testing/selftests/timers/inconsistency-check.c index 022d3ffe3fbf..e6756d9c60a7 100644 --- a/tools/testing/selftests/timers/inconsistency-check.c +++ b/tools/testing/selftests/timers/inconsistency-check.c @@ -72,7 +72,7 @@ char *clockstring(int clockid) return "CLOCK_BOOTTIME_ALARM"; case CLOCK_TAI: return "CLOCK_TAI"; - }; + } return "UNKNOWN_CLOCKID"; } diff --git a/tools/testing/selftests/vm/mremap_test.c b/tools/testing/selftests/vm/mremap_test.c index 0624d1bd71b5..7c0b0617b9f8 100644 --- a/tools/testing/selftests/vm/mremap_test.c +++ b/tools/testing/selftests/vm/mremap_test.c @@ -20,7 +20,6 @@ #define VALIDATION_DEFAULT_THRESHOLD 4 /* 4MB */ #define VALIDATION_NO_THRESHOLD 0 /* Verify the entire region */ -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) struct config { diff --git a/tools/testing/selftests/vm/pkey-helpers.h b/tools/testing/selftests/vm/pkey-helpers.h index 622a85848f61..92f3be3dd8e5 100644 --- a/tools/testing/selftests/vm/pkey-helpers.h +++ b/tools/testing/selftests/vm/pkey-helpers.h @@ -13,6 +13,8 @@ #include <ucontext.h> #include <sys/mman.h> +#include "../kselftest.h" + /* Define some kernel-like types */ #define u8 __u8 #define u16 __u16 @@ -175,7 +177,6 @@ static inline void __pkey_write_allow(int pkey, int do_allow_write) dprintf4("pkey_reg now: %016llx\n", read_pkey_reg()); } -#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) #define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1)) #define ALIGN_DOWN(x, align_to) ((x) & ~((align_to)-1)) #define ALIGN_PTR_UP(p, ptr_align_to) \ diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index 8a09057d2f22..9354a5e0321c 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c @@ -87,7 +87,7 @@ static bool test_uffdio_minor = false; static bool map_shared; static int shm_fd; -static int huge_fd; +static int huge_fd = -1; /* only used for hugetlb_shared test */ static char *huge_fd_off0; static unsigned long long *count_verify; static int uffd = -1; @@ -223,6 +223,9 @@ static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset) static void hugetlb_release_pages(char *rel_area) { + if (huge_fd == -1) + return; + if (fallocate(huge_fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, rel_area == huge_fd_off0 ? 0 : nr_pages * page_size, nr_pages * page_size)) @@ -235,16 +238,17 @@ static void hugetlb_allocate_area(void **alloc_area) char **alloc_area_alias; *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, - (map_shared ? MAP_SHARED : MAP_PRIVATE) | - MAP_HUGETLB, - huge_fd, *alloc_area == area_src ? 0 : - nr_pages * page_size); + map_shared ? MAP_SHARED : + MAP_PRIVATE | MAP_HUGETLB | + (*alloc_area == area_src ? 0 : MAP_NORESERVE), + huge_fd, + *alloc_area == area_src ? 0 : nr_pages * page_size); if (*alloc_area == MAP_FAILED) err("mmap of hugetlbfs file failed"); if (map_shared) { area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_HUGETLB, + MAP_SHARED, huge_fd, *alloc_area == area_src ? 0 : nr_pages * page_size); if (area_alias == MAP_FAILED) diff --git a/tools/testing/selftests/vm/va_128TBswitch.c b/tools/testing/selftests/vm/va_128TBswitch.c index 83acdff26a13..da6ec3b53ea8 100644 --- a/tools/testing/selftests/vm/va_128TBswitch.c +++ b/tools/testing/selftests/vm/va_128TBswitch.c @@ -9,7 +9,7 @@ #include <sys/mman.h> #include <string.h> -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) +#include "../kselftest.h" #ifdef __powerpc64__ #define PAGE_SIZE (64 << 10) |