aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/arch/arm64/include/asm/cputype.h2
-rw-r--r--tools/arch/x86/include/asm/msr-index.h34
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_32.h3
-rw-r--r--tools/arch/x86/include/uapi/asm/unistd_64.h3
-rw-r--r--tools/include/linux/bits.h15
-rw-r--r--tools/include/linux/unaligned.h11
-rw-r--r--tools/include/uapi/linux/bits.h3
-rw-r--r--tools/include/uapi/linux/bpf.h3
-rw-r--r--tools/include/uapi/linux/const.h17
-rw-r--r--tools/include/vdso/unaligned.h15
-rw-r--r--tools/perf/Makefile.config4
-rw-r--r--tools/perf/builtin-trace.c2
-rwxr-xr-xtools/perf/check-headers.sh1
-rwxr-xr-xtools/perf/tests/shell/base_probe/test_adding_blacklisted.sh69
-rw-r--r--tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c22
-rw-r--r--tools/perf/util/cap.c10
-rw-r--r--tools/perf/util/python.c3
-rw-r--r--tools/perf/util/syscalltbl.c10
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h2
-rw-r--r--tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c109
-rw-r--r--tools/testing/selftests/bpf/prog_tests/verifier.c19
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bits_iter.c61
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c55
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_const.c31
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_mtu.c18
-rw-r--r--tools/testing/selftests/bpf/progs/verifier_search_pruning.c23
-rw-r--r--tools/testing/selftests/bpf/veristat.cfg1
-rw-r--r--tools/testing/selftests/mm/uffd-common.c5
-rw-r--r--tools/testing/selftests/mm/uffd-common.h3
-rw-r--r--tools/testing/selftests/mm/uffd-unit-tests.c24
-rw-r--r--tools/testing/selftests/sched_ext/Makefile2
-rw-r--r--tools/testing/selftests/sched_ext/create_dsq.bpf.c6
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/dsp_local_on.bpf.c8
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c8
-rw-r--r--tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c10
-rw-r--r--tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/exit.bpf.c22
-rw-r--r--tools/testing/selftests/sched_ext/hotplug.bpf.c8
-rw-r--r--tools/testing/selftests/sched_ext/init_enable_count.bpf.c8
-rw-r--r--tools/testing/selftests/sched_ext/maximal.bpf.c58
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null.bpf.c6
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/prog_run.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c6
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c2
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c4
-rw-r--r--tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c12
-rw-r--r--tools/testing/vma/vma.c40
54 files changed, 591 insertions, 216 deletions
diff --git a/tools/arch/arm64/include/asm/cputype.h b/tools/arch/arm64/include/asm/cputype.h
index 5a7dfeb8e8eb..488f8e751349 100644
--- a/tools/arch/arm64/include/asm/cputype.h
+++ b/tools/arch/arm64/include/asm/cputype.h
@@ -94,6 +94,7 @@
#define ARM_CPU_PART_NEOVERSE_V3 0xD84
#define ARM_CPU_PART_CORTEX_X925 0xD85
#define ARM_CPU_PART_CORTEX_A725 0xD87
+#define ARM_CPU_PART_NEOVERSE_N3 0xD8E
#define APM_CPU_PART_XGENE 0x000
#define APM_CPU_VAR_POTENZA 0x00
@@ -176,6 +177,7 @@
#define MIDR_NEOVERSE_V3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V3)
#define MIDR_CORTEX_X925 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X925)
#define MIDR_CORTEX_A725 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A725)
+#define MIDR_NEOVERSE_N3 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N3)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
#define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index a7c06a46fb76..3ae84c3b8e6d 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -36,6 +36,20 @@
#define EFER_FFXSR (1<<_EFER_FFXSR)
#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
+/*
+ * Architectural memory types that are common to MTRRs, PAT, VMX MSRs, etc.
+ * Most MSRs support/allow only a subset of memory types, but the values
+ * themselves are common across all relevant MSRs.
+ */
+#define X86_MEMTYPE_UC 0ull /* Uncacheable, a.k.a. Strong Uncacheable */
+#define X86_MEMTYPE_WC 1ull /* Write Combining */
+/* RESERVED 2 */
+/* RESERVED 3 */
+#define X86_MEMTYPE_WT 4ull /* Write Through */
+#define X86_MEMTYPE_WP 5ull /* Write Protected */
+#define X86_MEMTYPE_WB 6ull /* Write Back */
+#define X86_MEMTYPE_UC_MINUS 7ull /* Weak Uncacheabled (PAT only) */
+
/* FRED MSRs */
#define MSR_IA32_FRED_RSP0 0x1cc /* Level 0 stack pointer */
#define MSR_IA32_FRED_RSP1 0x1cd /* Level 1 stack pointer */
@@ -365,6 +379,12 @@
#define MSR_IA32_CR_PAT 0x00000277
+#define PAT_VALUE(p0, p1, p2, p3, p4, p5, p6, p7) \
+ ((X86_MEMTYPE_ ## p0) | (X86_MEMTYPE_ ## p1 << 8) | \
+ (X86_MEMTYPE_ ## p2 << 16) | (X86_MEMTYPE_ ## p3 << 24) | \
+ (X86_MEMTYPE_ ## p4 << 32) | (X86_MEMTYPE_ ## p5 << 40) | \
+ (X86_MEMTYPE_ ## p6 << 48) | (X86_MEMTYPE_ ## p7 << 56))
+
#define MSR_IA32_DEBUGCTLMSR 0x000001d9
#define MSR_IA32_LASTBRANCHFROMIP 0x000001db
#define MSR_IA32_LASTBRANCHTOIP 0x000001dc
@@ -1159,15 +1179,6 @@
#define MSR_IA32_VMX_VMFUNC 0x00000491
#define MSR_IA32_VMX_PROCBASED_CTLS3 0x00000492
-/* VMX_BASIC bits and bitmasks */
-#define VMX_BASIC_VMCS_SIZE_SHIFT 32
-#define VMX_BASIC_TRUE_CTLS (1ULL << 55)
-#define VMX_BASIC_64 0x0001000000000000LLU
-#define VMX_BASIC_MEM_TYPE_SHIFT 50
-#define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU
-#define VMX_BASIC_MEM_TYPE_WB 6LLU
-#define VMX_BASIC_INOUT 0x0040000000000000LLU
-
/* Resctrl MSRs: */
/* - Intel: */
#define MSR_IA32_L3_QOS_CFG 0xc81
@@ -1185,11 +1196,6 @@
#define MSR_IA32_SMBA_BW_BASE 0xc0000280
#define MSR_IA32_EVT_CFG_BASE 0xc0000400
-/* MSR_IA32_VMX_MISC bits */
-#define MSR_IA32_VMX_MISC_INTEL_PT (1ULL << 14)
-#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
-#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE 0x1F
-
/* AMD-V MSRs */
#define MSR_VM_CR 0xc0010114
#define MSR_VM_IGNNE 0xc0010115
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index bf57a824f722..a8debbf2f702 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -439,6 +439,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT (1 << 4)
#define KVM_X86_QUIRK_FIX_HYPERCALL_INSN (1 << 5)
#define KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS (1 << 6)
+#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
diff --git a/tools/arch/x86/include/uapi/asm/unistd_32.h b/tools/arch/x86/include/uapi/asm/unistd_32.h
index 9de35df1afc3..63182a023e9d 100644
--- a/tools/arch/x86/include/uapi/asm/unistd_32.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_32.h
@@ -11,6 +11,9 @@
#ifndef __NR_getpgid
#define __NR_getpgid 132
#endif
+#ifndef __NR_capget
+#define __NR_capget 184
+#endif
#ifndef __NR_gettid
#define __NR_gettid 224
#endif
diff --git a/tools/arch/x86/include/uapi/asm/unistd_64.h b/tools/arch/x86/include/uapi/asm/unistd_64.h
index d0f2043d7132..77311e8d1b5d 100644
--- a/tools/arch/x86/include/uapi/asm/unistd_64.h
+++ b/tools/arch/x86/include/uapi/asm/unistd_64.h
@@ -11,6 +11,9 @@
#ifndef __NR_getpgid
#define __NR_getpgid 121
#endif
+#ifndef __NR_capget
+#define __NR_capget 125
+#endif
#ifndef __NR_gettid
#define __NR_gettid 186
#endif
diff --git a/tools/include/linux/bits.h b/tools/include/linux/bits.h
index 0eb24d21aac2..60044b608817 100644
--- a/tools/include/linux/bits.h
+++ b/tools/include/linux/bits.h
@@ -36,4 +36,19 @@
#define GENMASK_ULL(h, l) \
(GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __GENMASK_U128() depends on _BIT128() which would not work
+ * in the asm code, as it shifts an 'unsigned __init128' data
+ * type instead of direct representation of 128 bit constants
+ * such as long and unsigned long. The fundamental problem is
+ * that a 128 bit constant will get silently truncated by the
+ * gcc compiler.
+ */
+#define GENMASK_U128(h, l) \
+ (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
+#endif
+
#endif /* __LINUX_BITS_H */
diff --git a/tools/include/linux/unaligned.h b/tools/include/linux/unaligned.h
index bc0633bc4650..395a4464fe73 100644
--- a/tools/include/linux/unaligned.h
+++ b/tools/include/linux/unaligned.h
@@ -9,16 +9,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wpacked"
#pragma GCC diagnostic ignored "-Wattributes"
-
-#define __get_unaligned_t(type, ptr) ({ \
- const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x; \
-})
-
-#define __put_unaligned_t(type, val, ptr) do { \
- struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
- __pptr->x = (val); \
-} while (0)
+#include <vdso/unaligned.h>
#define get_unaligned(ptr) __get_unaligned_t(typeof(*(ptr)), (ptr))
#define put_unaligned(val, ptr) __put_unaligned_t(typeof(*(ptr)), (val), (ptr))
diff --git a/tools/include/uapi/linux/bits.h b/tools/include/uapi/linux/bits.h
index 3c2a101986a3..5ee30f882736 100644
--- a/tools/include/uapi/linux/bits.h
+++ b/tools/include/uapi/linux/bits.h
@@ -12,4 +12,7 @@
(((~_ULL(0)) - (_ULL(1) << (l)) + 1) & \
(~_ULL(0) >> (__BITS_PER_LONG_LONG - 1 - (h))))
+#define __GENMASK_U128(h, l) \
+ ((_BIT128((h)) << 1) - (_BIT128(l)))
+
#endif /* _UAPI_LINUX_BITS_H */
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index e8241b320c6d..4a939c90dc2e 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1121,6 +1121,9 @@ enum bpf_attach_type {
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
+/* Add BPF_LINK_TYPE(type, name) in bpf_types.h to keep bpf_link_type_strs[]
+ * in sync with the definitions below.
+ */
enum bpf_link_type {
BPF_LINK_TYPE_UNSPEC = 0,
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
diff --git a/tools/include/uapi/linux/const.h b/tools/include/uapi/linux/const.h
index a429381e7ca5..e16be0d37746 100644
--- a/tools/include/uapi/linux/const.h
+++ b/tools/include/uapi/linux/const.h
@@ -28,6 +28,23 @@
#define _BITUL(x) (_UL(1) << (x))
#define _BITULL(x) (_ULL(1) << (x))
+#if !defined(__ASSEMBLY__)
+/*
+ * Missing asm support
+ *
+ * __BIT128() would not work in the asm code, as it shifts an
+ * 'unsigned __init128' data type as direct representation of
+ * 128 bit constants is not supported in the gcc compiler, as
+ * they get silently truncated.
+ *
+ * TODO: Please revisit this implementation when gcc compiler
+ * starts representing 128 bit constants directly like long
+ * and unsigned long etc. Subsequently drop the comment for
+ * GENMASK_U128() which would then start supporting asm code.
+ */
+#define _BIT128(x) ((unsigned __int128)(1) << (x))
+#endif
+
#define __ALIGN_KERNEL(x, a) __ALIGN_KERNEL_MASK(x, (__typeof__(x))(a) - 1)
#define __ALIGN_KERNEL_MASK(x, mask) (((x) + (mask)) & ~(mask))
diff --git a/tools/include/vdso/unaligned.h b/tools/include/vdso/unaligned.h
new file mode 100644
index 000000000000..eee3d2a4dbe4
--- /dev/null
+++ b/tools/include/vdso/unaligned.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_UNALIGNED_H
+#define __VDSO_UNALIGNED_H
+
+#define __get_unaligned_t(type, ptr) ({ \
+ const struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x; \
+})
+
+#define __put_unaligned_t(type, val, ptr) do { \
+ struct { type x; } __packed *__pptr = (typeof(__pptr))(ptr); \
+ __pptr->x = (val); \
+} while (0)
+
+#endif /* __VDSO_UNALIGNED_H */
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index 4ddb27a48eed..d4332675babb 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -704,8 +704,8 @@ ifeq ($(BUILD_BPF_SKEL),1)
BUILD_BPF_SKEL := 0
else
CLANG_VERSION := $(shell $(CLANG) --version | head -1 | sed 's/.*clang version \([[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+\).*/\1/g')
- ifeq ($(call version-lt3,$(CLANG_VERSION),16.0.6),1)
- $(warning Warning: Disabled BPF skeletons as at least $(CLANG) version 16.0.6 is reported to be a working setup with the current of BPF based perf features)
+ ifeq ($(call version-lt3,$(CLANG_VERSION),12.0.1),1)
+ $(warning Warning: Disabled BPF skeletons as reliable BTF generation needs at least $(CLANG) version 12.0.1)
BUILD_BPF_SKEL := 0
endif
endif
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index f6e847529073..d3f11b90d025 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1399,7 +1399,7 @@ static const struct syscall_fmt syscall_fmts[] = {
.arg = { [2] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
{ .name = "waitid", .errpid = true,
.arg = { [3] = { .scnprintf = SCA_WAITID_OPTIONS, /* options */ }, }, },
- { .name = "write", .errpid = true,
+ { .name = "write",
.arg = { [1] = { .scnprintf = SCA_BUF /* buf */, .from_user = true, }, }, },
};
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 29adbb423327..a05c1c105c51 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -22,6 +22,7 @@ FILES=(
"include/vdso/bits.h"
"include/linux/const.h"
"include/vdso/const.h"
+ "include/vdso/unaligned.h"
"include/linux/hash.h"
"include/linux/list-sort.h"
"include/uapi/linux/hw_breakpoint.h"
diff --git a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
index b5dc10b2a738..bead723e34af 100755
--- a/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
+++ b/tools/perf/tests/shell/base_probe/test_adding_blacklisted.sh
@@ -19,35 +19,74 @@
TEST_RESULT=0
# skip if not supported
-BLACKFUNC=`head -n 1 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
-if [ -z "$BLACKFUNC" ]; then
+BLACKFUNC_LIST=`head -n 5 /sys/kernel/debug/kprobes/blacklist 2> /dev/null | cut -f2`
+if [ -z "$BLACKFUNC_LIST" ]; then
print_overall_skipped
exit 0
fi
+# try to find vmlinux with DWARF debug info
+VMLINUX_FILE=$(perf probe -v random_probe |& grep "Using.*for symbols" | sed -r 's/^Using (.*) for symbols$/\1/')
+
# remove all previously added probes
clear_all_probes
### adding blacklisted function
-
-# functions from blacklist should be skipped by perf probe
-! $CMD_PERF probe $BLACKFUNC > $LOGS_DIR/adding_blacklisted.log 2> $LOGS_DIR/adding_blacklisted.err
-PERF_EXIT_CODE=$?
-
REGEX_SCOPE_FAIL="Failed to find scope of probe point"
REGEX_SKIP_MESSAGE=" is blacklisted function, skip it\."
-REGEX_NOT_FOUND_MESSAGE="Probe point \'$BLACKFUNC\' not found."
+REGEX_NOT_FOUND_MESSAGE="Probe point \'$RE_EVENT\' not found."
REGEX_ERROR_MESSAGE="Error: Failed to add events."
REGEX_INVALID_ARGUMENT="Failed to write event: Invalid argument"
REGEX_SYMBOL_FAIL="Failed to find symbol at $RE_ADDRESS"
-REGEX_OUT_SECTION="$BLACKFUNC is out of \.\w+, skip it"
-../common/check_all_lines_matched.pl "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" "$REGEX_ERROR_MESSAGE" "$REGEX_SCOPE_FAIL" "$REGEX_INVALID_ARGUMENT" "$REGEX_SYMBOL_FAIL" "$REGEX_OUT_SECTION" < $LOGS_DIR/adding_blacklisted.err
-CHECK_EXIT_CODE=$?
-
-print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding blacklisted function $BLACKFUNC"
-(( TEST_RESULT += $? ))
-
+REGEX_OUT_SECTION="$RE_EVENT is out of \.\w+, skip it"
+REGEX_MISSING_DECL_LINE="A function DIE doesn't have decl_line. Maybe broken DWARF?"
+
+BLACKFUNC=""
+SKIP_DWARF=0
+
+for BLACKFUNC in $BLACKFUNC_LIST; do
+ echo "Probing $BLACKFUNC"
+
+ # functions from blacklist should be skipped by perf probe
+ ! $CMD_PERF probe $BLACKFUNC > $LOGS_DIR/adding_blacklisted.log 2> $LOGS_DIR/adding_blacklisted.err
+ PERF_EXIT_CODE=$?
+
+ # check for bad DWARF polluting the result
+ ../common/check_all_patterns_found.pl "$REGEX_MISSING_DECL_LINE" >/dev/null < $LOGS_DIR/adding_blacklisted.err
+
+ if [ $? -eq 0 ]; then
+ SKIP_DWARF=1
+ echo "Result polluted by broken DWARF, trying another probe"
+
+ # confirm that the broken DWARF comes from assembler
+ if [ -n "$VMLINUX_FILE" ]; then
+ readelf -wi "$VMLINUX_FILE" |
+ awk -v probe="$BLACKFUNC" '/DW_AT_language/ { comp_lang = $0 }
+ $0 ~ probe { if (comp_lang) { print comp_lang }; exit }' |
+ grep -q "MIPS assembler"
+
+ CHECK_EXIT_CODE=$?
+ if [ $CHECK_EXIT_CODE -ne 0 ]; then
+ SKIP_DWARF=0 # broken DWARF while available
+ break
+ fi
+ fi
+ else
+ ../common/check_all_lines_matched.pl "$REGEX_SKIP_MESSAGE" "$REGEX_NOT_FOUND_MESSAGE" "$REGEX_ERROR_MESSAGE" "$REGEX_SCOPE_FAIL" "$REGEX_INVALID_ARGUMENT" "$REGEX_SYMBOL_FAIL" "$REGEX_OUT_SECTION" < $LOGS_DIR/adding_blacklisted.err
+ CHECK_EXIT_CODE=$?
+
+ SKIP_DWARF=0
+ break
+ fi
+done
+
+if [ $SKIP_DWARF -eq 1 ]; then
+ print_testcase_skipped "adding blacklisted function $BLACKFUNC"
+else
+ print_results $PERF_EXIT_CODE $CHECK_EXIT_CODE "adding blacklisted function $BLACKFUNC"
+ (( TEST_RESULT += $? ))
+fi
### listing not-added probe
diff --git a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
index b2f17cca014b..4a62ed593e84 100644
--- a/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
+++ b/tools/perf/util/bpf_skel/augmented_raw_syscalls.bpf.c
@@ -288,6 +288,10 @@ int sys_enter_rename(struct syscall_enter_args *args)
augmented_args->arg.size = PERF_ALIGN(oldpath_len + 1, sizeof(u64));
len += augmented_args->arg.size;
+ /* Every read from userspace is limited to value size */
+ if (augmented_args->arg.size > sizeof(augmented_args->arg.value))
+ return 1; /* Failure: don't filter */
+
struct augmented_arg *arg2 = (void *)&augmented_args->arg.value + augmented_args->arg.size;
newpath_len = augmented_arg__read_str(arg2, newpath_arg, sizeof(augmented_args->arg.value));
@@ -315,6 +319,10 @@ int sys_enter_renameat2(struct syscall_enter_args *args)
augmented_args->arg.size = PERF_ALIGN(oldpath_len + 1, sizeof(u64));
len += augmented_args->arg.size;
+ /* Every read from userspace is limited to value size */
+ if (augmented_args->arg.size > sizeof(augmented_args->arg.value))
+ return 1; /* Failure: don't filter */
+
struct augmented_arg *arg2 = (void *)&augmented_args->arg.value + augmented_args->arg.size;
newpath_len = augmented_arg__read_str(arg2, newpath_arg, sizeof(augmented_args->arg.value));
@@ -423,8 +431,9 @@ static bool pid_filter__has(struct pids_filtered *pids, pid_t pid)
static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
{
bool augmented, do_output = false;
- int zero = 0, size, aug_size, index, output = 0,
+ int zero = 0, size, aug_size, index,
value_size = sizeof(struct augmented_arg) - offsetof(struct augmented_arg, value);
+ u64 output = 0; /* has to be u64, otherwise it won't pass the verifier */
unsigned int nr, *beauty_map;
struct beauty_payload_enter *payload;
void *arg, *payload_offset;
@@ -477,6 +486,8 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
augmented = true;
} else if (size < 0 && size >= -6) { /* buffer */
index = -(size + 1);
+ barrier_var(index); // Prevent clang (noticed with v18) from removing the &= 7 trick.
+ index &= 7; // Satisfy the bounds checking with the verifier in some kernels.
aug_size = args->args[index];
if (aug_size > TRACE_AUG_MAX_BUF)
@@ -488,10 +499,17 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
}
}
+ /* Augmented data size is limited to sizeof(augmented_arg->unnamed union with value field) */
+ if (aug_size > value_size)
+ aug_size = value_size;
+
/* write data to payload */
if (augmented) {
int written = offsetof(struct augmented_arg, value) + aug_size;
+ if (written < 0 || written > sizeof(struct augmented_arg))
+ return 1;
+
((struct augmented_arg *)payload_offset)->size = aug_size;
output += written;
payload_offset += written;
@@ -499,7 +517,7 @@ static int augment_sys_enter(void *ctx, struct syscall_enter_args *args)
}
}
- if (!do_output)
+ if (!do_output || (sizeof(struct syscall_enter_args) + output) > sizeof(struct beauty_payload_enter))
return 1;
return augmented__beauty_output(ctx, payload, sizeof(struct syscall_enter_args) + output);
diff --git a/tools/perf/util/cap.c b/tools/perf/util/cap.c
index 7574a67651bc..69d9a2bcd40b 100644
--- a/tools/perf/util/cap.c
+++ b/tools/perf/util/cap.c
@@ -7,13 +7,9 @@
#include "debug.h"
#include <errno.h>
#include <string.h>
-#include <unistd.h>
#include <linux/capability.h>
#include <sys/syscall.h>
-
-#ifndef SYS_capget
-#define SYS_capget 90
-#endif
+#include <unistd.h>
#define MAX_LINUX_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3
@@ -21,9 +17,9 @@ bool perf_cap__capable(int cap, bool *used_root)
{
struct __user_cap_header_struct header = {
.version = _LINUX_CAPABILITY_VERSION_3,
- .pid = getpid(),
+ .pid = 0,
};
- struct __user_cap_data_struct data[MAX_LINUX_CAPABILITY_U32S];
+ struct __user_cap_data_struct data[MAX_LINUX_CAPABILITY_U32S] = {};
__u32 cap_val;
*used_root = false;
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 31a223eaf8e6..ee3d43a7ba45 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -19,6 +19,7 @@
#include "util/bpf-filter.h"
#include "util/env.h"
#include "util/kvm-stat.h"
+#include "util/stat.h"
#include "util/kwork.h"
#include "util/sample.h"
#include "util/lock-contention.h"
@@ -1355,6 +1356,7 @@ error:
unsigned int scripting_max_stack = PERF_MAX_STACK_DEPTH;
+#ifdef HAVE_KVM_STAT_SUPPORT
bool kvm_entry_event(struct evsel *evsel __maybe_unused)
{
return false;
@@ -1384,6 +1386,7 @@ void exit_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
char *decode __maybe_unused)
{
}
+#endif // HAVE_KVM_STAT_SUPPORT
int find_scripts(char **scripts_array __maybe_unused, char **scripts_path_array __maybe_unused,
int num __maybe_unused, int pathlen __maybe_unused)
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 7c15dec6900d..6c45ded922b6 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -46,6 +46,11 @@ static const char *const *syscalltbl_native = syscalltbl_mips_n64;
#include <asm/syscalls.c>
const int syscalltbl_native_max_id = SYSCALLTBL_LOONGARCH_MAX_ID;
static const char *const *syscalltbl_native = syscalltbl_loongarch;
+#else
+const int syscalltbl_native_max_id = 0;
+static const char *const syscalltbl_native[] = {
+ [0] = "unknown",
+};
#endif
struct syscall {
@@ -182,6 +187,11 @@ int syscalltbl__id(struct syscalltbl *tbl, const char *name)
return audit_name_to_syscall(name, tbl->audit_machine);
}
+int syscalltbl__id_at_idx(struct syscalltbl *tbl __maybe_unused, int idx)
+{
+ return idx;
+}
+
int syscalltbl__strglobmatch_next(struct syscalltbl *tbl __maybe_unused,
const char *syscall_glob __maybe_unused, int *idx __maybe_unused)
{
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 27749c51c3ec..248ab790d143 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -320,7 +320,7 @@ u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
/*
* Access a cpumask in read-only mode (typically to check bits).
*/
-const struct cpumask *cast_mask(struct bpf_cpumask *mask)
+static __always_inline const struct cpumask *cast_mask(struct bpf_cpumask *mask)
{
return (const struct cpumask *)mask;
}
diff --git a/tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c
new file mode 100644
index 000000000000..0ba015686492
--- /dev/null
+++ b/tools/testing/selftests/bpf/map_tests/lpm_trie_map_get_next_key.c
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define _GNU_SOURCE
+#include <linux/bpf.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+
+#include <bpf/bpf.h>
+#include <bpf/libbpf.h>
+
+#include <test_maps.h>
+
+struct test_lpm_key {
+ __u32 prefix;
+ __u32 data;
+};
+
+struct get_next_key_ctx {
+ struct test_lpm_key key;
+ bool start;
+ bool stop;
+ int map_fd;
+ int loop;
+};
+
+static void *get_next_key_fn(void *arg)
+{
+ struct get_next_key_ctx *ctx = arg;
+ struct test_lpm_key next_key;
+ int i = 0;
+
+ while (!ctx->start)
+ usleep(1);
+
+ while (!ctx->stop && i++ < ctx->loop)
+ bpf_map_get_next_key(ctx->map_fd, &ctx->key, &next_key);
+
+ return NULL;
+}
+
+static void abort_get_next_key(struct get_next_key_ctx *ctx, pthread_t *tids,
+ unsigned int nr)
+{
+ unsigned int i;
+
+ ctx->stop = true;
+ ctx->start = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+}
+
+/* This test aims to prevent regression of future. As long as the kernel does
+ * not panic, it is considered as success.
+ */
+void test_lpm_trie_map_get_next_key(void)
+{
+#define MAX_NR_THREADS 8
+ LIBBPF_OPTS(bpf_map_create_opts, create_opts,
+ .map_flags = BPF_F_NO_PREALLOC);
+ struct test_lpm_key key = {};
+ __u32 val = 0;
+ int map_fd;
+ const __u32 max_prefixlen = 8 * (sizeof(key) - sizeof(key.prefix));
+ const __u32 max_entries = max_prefixlen + 1;
+ unsigned int i, nr = MAX_NR_THREADS, loop = 65536;
+ pthread_t tids[MAX_NR_THREADS];
+ struct get_next_key_ctx ctx;
+ int err;
+
+ map_fd = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE, "lpm_trie_map",
+ sizeof(struct test_lpm_key), sizeof(__u32),
+ max_entries, &create_opts);
+ CHECK(map_fd == -1, "bpf_map_create()", "error:%s\n",
+ strerror(errno));
+
+ for (i = 0; i <= max_prefixlen; i++) {
+ key.prefix = i;
+ err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
+ CHECK(err, "bpf_map_update_elem()", "error:%s\n",
+ strerror(errno));
+ }
+
+ ctx.start = false;
+ ctx.stop = false;
+ ctx.map_fd = map_fd;
+ ctx.loop = loop;
+ memcpy(&ctx.key, &key, sizeof(key));
+
+ for (i = 0; i < nr; i++) {
+ err = pthread_create(&tids[i], NULL, get_next_key_fn, &ctx);
+ if (err) {
+ abort_get_next_key(&ctx, tids, i);
+ CHECK(err, "pthread_create", "error %d\n", err);
+ }
+ }
+
+ ctx.start = true;
+ for (i = 0; i < nr; i++)
+ pthread_join(tids[i], NULL);
+
+ printf("%s:PASS\n", __func__);
+
+ close(map_fd);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c
index 5356f26bbb3f..75f7a2ce334b 100644
--- a/tools/testing/selftests/bpf/prog_tests/verifier.c
+++ b/tools/testing/selftests/bpf/prog_tests/verifier.c
@@ -54,6 +54,7 @@
#include "verifier_masking.skel.h"
#include "verifier_meta_access.skel.h"
#include "verifier_movsx.skel.h"
+#include "verifier_mtu.skel.h"
#include "verifier_netfilter_ctx.skel.h"
#include "verifier_netfilter_retcode.skel.h"
#include "verifier_bpf_fastcall.skel.h"
@@ -223,6 +224,24 @@ void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_pack
void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); }
void test_verifier_lsm(void) { RUN(verifier_lsm); }
+void test_verifier_mtu(void)
+{
+ __u64 caps = 0;
+ int ret;
+
+ /* In case CAP_BPF and CAP_PERFMON is not set */
+ ret = cap_enable_effective(1ULL << CAP_BPF | 1ULL << CAP_NET_ADMIN, &caps);
+ if (!ASSERT_OK(ret, "set_cap_bpf_cap_net_admin"))
+ return;
+ ret = cap_disable_effective(1ULL << CAP_SYS_ADMIN | 1ULL << CAP_PERFMON, NULL);
+ if (!ASSERT_OK(ret, "disable_cap_sys_admin"))
+ goto restore_cap;
+ RUN(verifier_mtu);
+restore_cap:
+ if (caps)
+ cap_enable_effective(caps, NULL);
+}
+
static int init_test_val_map(struct bpf_object *obj, char *map_name)
{
struct test_val value = {
diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
index f4da4d508ddb..156cc278e2fc 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c
@@ -15,6 +15,8 @@ int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign,
int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak;
void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak;
+u64 bits_array[511] = {};
+
SEC("iter.s/cgroup")
__description("bits iter without destroy")
__failure __msg("Unreleased reference")
@@ -110,16 +112,16 @@ int bit_index(void)
}
SEC("syscall")
-__description("bits nomem")
+__description("bits too big")
__success __retval(0)
-int bits_nomem(void)
+int bits_too_big(void)
{
u64 data[4];
int nr = 0;
int *bit;
__builtin_memset(&data, 0xff, sizeof(data));
- bpf_for_each(bits, bit, &data[0], 513) /* Be greater than 512 */
+ bpf_for_each(bits, bit, &data[0], 512) /* Be greater than 511 */
nr++;
return nr;
}
@@ -151,3 +153,56 @@ int zero_words(void)
nr++;
return nr;
}
+
+SEC("syscall")
+__description("huge words")
+__success __retval(0)
+int huge_words(void)
+{
+ u64 data[8] = {0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1};
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, &data[0], 67108865)
+ nr++;
+ return nr;
+}
+
+SEC("syscall")
+__description("max words")
+__success __retval(4)
+int max_words(void)
+{
+ volatile int nr = 0;
+ int *bit;
+
+ bits_array[0] = (1ULL << 63) | 1U;
+ bits_array[510] = (1ULL << 33) | (1ULL << 32);
+
+ bpf_for_each(bits, bit, bits_array, 511) {
+ if (nr == 0 && *bit != 0)
+ break;
+ if (nr == 2 && *bit != 32672)
+ break;
+ nr++;
+ }
+ return nr;
+}
+
+SEC("syscall")
+__description("bad words")
+__success __retval(0)
+int bad_words(void)
+{
+ void *bad_addr = (void *)(3UL << 30);
+ int nr = 0;
+ int *bit;
+
+ bpf_for_each(bits, bit, bad_addr, 1)
+ nr++;
+
+ bpf_for_each(bits, bit, bad_addr, 4)
+ nr++;
+
+ return nr;
+}
diff --git a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
index 9da97d2efcd9..5094c288cfd7 100644
--- a/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
+++ b/tools/testing/selftests/bpf/progs/verifier_bpf_fastcall.c
@@ -790,61 +790,6 @@ __naked static void cumulative_stack_depth_subprog(void)
:: __imm(bpf_get_smp_processor_id) : __clobber_all);
}
-SEC("raw_tp")
-__arch_x86_64
-__log_level(4)
-__msg("stack depth 512")
-__xlated("0: r1 = 42")
-__xlated("1: *(u64 *)(r10 -512) = r1")
-__xlated("2: w0 = ")
-__xlated("3: r0 = &(void __percpu *)(r0)")
-__xlated("4: r0 = *(u32 *)(r0 +0)")
-__xlated("5: exit")
-__success
-__naked int bpf_fastcall_max_stack_ok(void)
-{
- asm volatile(
- "r1 = 42;"
- "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
- "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
- "call %[bpf_get_smp_processor_id];"
- "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
- "exit;"
- :
- : __imm_const(max_bpf_stack, MAX_BPF_STACK),
- __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
- __imm(bpf_get_smp_processor_id)
- : __clobber_all
- );
-}
-
-SEC("raw_tp")
-__arch_x86_64
-__log_level(4)
-__msg("stack depth 520")
-__failure
-__naked int bpf_fastcall_max_stack_fail(void)
-{
- asm volatile(
- "r1 = 42;"
- "*(u64 *)(r10 - %[max_bpf_stack]) = r1;"
- "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
- "call %[bpf_get_smp_processor_id];"
- "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
- /* call to prandom blocks bpf_fastcall rewrite */
- "*(u64 *)(r10 - %[max_bpf_stack_8]) = r1;"
- "call %[bpf_get_prandom_u32];"
- "r1 = *(u64 *)(r10 - %[max_bpf_stack_8]);"
- "exit;"
- :
- : __imm_const(max_bpf_stack, MAX_BPF_STACK),
- __imm_const(max_bpf_stack_8, MAX_BPF_STACK + 8),
- __imm(bpf_get_smp_processor_id),
- __imm(bpf_get_prandom_u32)
- : __clobber_all
- );
-}
-
SEC("cgroup/getsockname_unix")
__xlated("0: r2 = 1")
/* bpf_cast_to_kern_ctx is replaced by a single assignment */
diff --git a/tools/testing/selftests/bpf/progs/verifier_const.c b/tools/testing/selftests/bpf/progs/verifier_const.c
index 2e533d7eec2f..e118dbb768bf 100644
--- a/tools/testing/selftests/bpf/progs/verifier_const.c
+++ b/tools/testing/selftests/bpf/progs/verifier_const.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Isovalent */
-#include <linux/bpf.h>
+#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
#include "bpf_misc.h"
const volatile long foo = 42;
@@ -66,4 +67,32 @@ int tcx6(struct __sk_buff *skb)
return TCX_PASS;
}
+static inline void write_fixed(volatile void *p, __u32 val)
+{
+ *(volatile __u32 *)p = val;
+}
+
+static inline void write_dyn(void *p, void *val, int len)
+{
+ bpf_copy_from_user(p, len, val);
+}
+
+SEC("tc/ingress")
+__description("rodata/mark: write with unknown reg rejected")
+__failure __msg("write into map forbidden")
+int tcx7(struct __sk_buff *skb)
+{
+ write_fixed((void *)&foo, skb->mark);
+ return TCX_PASS;
+}
+
+SEC("lsm.s/bprm_committed_creds")
+__description("rodata/mark: write with unknown reg rejected")
+__failure __msg("write into map forbidden")
+int BPF_PROG(bprm, struct linux_binprm *bprm)
+{
+ write_dyn((void *)&foo, &bart, bpf_get_prandom_u32() & 3);
+ return 0;
+}
+
char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_mtu.c b/tools/testing/selftests/bpf/progs/verifier_mtu.c
new file mode 100644
index 000000000000..70c7600a26a0
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/verifier_mtu.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+SEC("tc/ingress")
+__description("uninit/mtu: write rejected")
+__failure __msg("invalid indirect read from stack")
+int tc_uninit_mtu(struct __sk_buff *ctx)
+{
+ __u32 mtu;
+
+ bpf_check_mtu(ctx, 0, &mtu, 0, 0);
+ return TCX_PASS;
+}
+
+char LICENSE[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c
index 5a14498d352f..f40e57251e94 100644
--- a/tools/testing/selftests/bpf/progs/verifier_search_pruning.c
+++ b/tools/testing/selftests/bpf/progs/verifier_search_pruning.c
@@ -2,6 +2,7 @@
/* Converted from tools/testing/selftests/bpf/verifier/search_pruning.c */
#include <linux/bpf.h>
+#include <../../../include/linux/filter.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
@@ -336,4 +337,26 @@ l0_%=: r1 = 42; \
: __clobber_all);
}
+/* Without checkpoint forcibly inserted at the back-edge a loop this
+ * test would take a very long time to verify.
+ */
+SEC("kprobe")
+__failure __log_level(4)
+__msg("BPF program is too large.")
+__naked void short_loop1(void)
+{
+ asm volatile (
+ " r7 = *(u16 *)(r1 +0);"
+ "1: r7 += 0x1ab064b9;"
+ " .8byte %[jset];" /* same as 'if r7 & 0x702000 goto 1b;' */
+ " r7 &= 0x1ee60e;"
+ " r7 += r1;"
+ " if r7 s> 0x37d2 goto +0;"
+ " r0 = 0;"
+ " exit;"
+ :
+ : __imm_insn(jset, BPF_JMP_IMM(BPF_JSET, BPF_REG_7, 0x702000, -2))
+ : __clobber_all);
+}
+
char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/veristat.cfg b/tools/testing/selftests/bpf/veristat.cfg
index 1a385061618d..e661ffdcaadf 100644
--- a/tools/testing/selftests/bpf/veristat.cfg
+++ b/tools/testing/selftests/bpf/veristat.cfg
@@ -15,3 +15,4 @@ test_usdt*
test_verif_scale*
test_xdp_noinline*
xdp_synproxy*
+verifier_search_pruning*
diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c
index 852e7281026e..717539eddf98 100644
--- a/tools/testing/selftests/mm/uffd-common.c
+++ b/tools/testing/selftests/mm/uffd-common.c
@@ -18,7 +18,7 @@ bool test_uffdio_wp = true;
unsigned long long *count_verify;
uffd_test_ops_t *uffd_test_ops;
uffd_test_case_ops_t *uffd_test_case_ops;
-pthread_barrier_t ready_for_fork;
+atomic_bool ready_for_fork;
static int uffd_mem_fd_create(off_t mem_size, bool hugetlb)
{
@@ -519,8 +519,7 @@ void *uffd_poll_thread(void *arg)
pollfd[1].fd = pipefd[cpu*2];
pollfd[1].events = POLLIN;
- /* Ready for parent thread to fork */
- pthread_barrier_wait(&ready_for_fork);
+ ready_for_fork = true;
for (;;) {
ret = poll(pollfd, 2, -1);
diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h
index 3e6228d8e0dc..a70ae10b5f62 100644
--- a/tools/testing/selftests/mm/uffd-common.h
+++ b/tools/testing/selftests/mm/uffd-common.h
@@ -33,6 +33,7 @@
#include <inttypes.h>
#include <stdint.h>
#include <sys/random.h>
+#include <stdatomic.h>
#include "../kselftest.h"
#include "vm_util.h"
@@ -104,7 +105,7 @@ extern bool map_shared;
extern bool test_uffdio_wp;
extern unsigned long long *count_verify;
extern volatile bool test_uffdio_copy_eexist;
-extern pthread_barrier_t ready_for_fork;
+extern atomic_bool ready_for_fork;
extern uffd_test_ops_t anon_uffd_test_ops;
extern uffd_test_ops_t shmem_uffd_test_ops;
diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c
index c8a3b1c7edff..a2e71b1636e7 100644
--- a/tools/testing/selftests/mm/uffd-unit-tests.c
+++ b/tools/testing/selftests/mm/uffd-unit-tests.c
@@ -241,8 +241,7 @@ static void *fork_event_consumer(void *data)
fork_event_args *args = data;
struct uffd_msg msg = { 0 };
- /* Ready for parent thread to fork */
- pthread_barrier_wait(&ready_for_fork);
+ ready_for_fork = true;
/* Read until a full msg received */
while (uffd_read_msg(args->parent_uffd, &msg));
@@ -311,12 +310,11 @@ static int pagemap_test_fork(int uffd, bool with_event, bool test_pin)
/* Prepare a thread to resolve EVENT_FORK */
if (with_event) {
- pthread_barrier_init(&ready_for_fork, NULL, 2);
+ ready_for_fork = false;
if (pthread_create(&thread, NULL, fork_event_consumer, &args))
err("pthread_create()");
- /* Wait for child thread to start before forking */
- pthread_barrier_wait(&ready_for_fork);
- pthread_barrier_destroy(&ready_for_fork);
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
}
child = fork();
@@ -781,7 +779,7 @@ static void uffd_sigbus_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
- pthread_barrier_init(&ready_for_fork, NULL, 2);
+ ready_for_fork = false;
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
@@ -798,9 +796,8 @@ static void uffd_sigbus_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
- /* Wait for child thread to start before forking */
- pthread_barrier_wait(&ready_for_fork);
- pthread_barrier_destroy(&ready_for_fork);
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
pid = fork();
if (pid < 0)
@@ -841,7 +838,7 @@ static void uffd_events_test_common(bool wp)
char c;
struct uffd_args args = { 0 };
- pthread_barrier_init(&ready_for_fork, NULL, 2);
+ ready_for_fork = false;
fcntl(uffd, F_SETFL, uffd_flags | O_NONBLOCK);
if (uffd_register(uffd, area_dst, nr_pages * page_size,
@@ -852,9 +849,8 @@ static void uffd_events_test_common(bool wp)
if (pthread_create(&uffd_mon, NULL, uffd_poll_thread, &args))
err("uffd_poll_thread create");
- /* Wait for child thread to start before forking */
- pthread_barrier_wait(&ready_for_fork);
- pthread_barrier_destroy(&ready_for_fork);
+ while (!ready_for_fork)
+ ; /* Wait for the poll_thread to start executing before forking */
pid = fork();
if (pid < 0)
diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile
index 06ae9c107049..011762224600 100644
--- a/tools/testing/selftests/sched_ext/Makefile
+++ b/tools/testing/selftests/sched_ext/Makefile
@@ -184,7 +184,7 @@ auto-test-targets := \
testcase-targets := $(addsuffix .o,$(addprefix $(SCXOBJ_DIR)/,$(auto-test-targets)))
-$(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR)
+$(SCXOBJ_DIR)/runner.o: runner.c | $(SCXOBJ_DIR) $(BPFOBJ)
$(CC) $(CFLAGS) -c $< -o $@
# Create all of the test targets object files, whose testcase objects will be
diff --git a/tools/testing/selftests/sched_ext/create_dsq.bpf.c b/tools/testing/selftests/sched_ext/create_dsq.bpf.c
index 23f79ed343f0..2cfc4ffd60e2 100644
--- a/tools/testing/selftests/sched_ext/create_dsq.bpf.c
+++ b/tools/testing/selftests/sched_ext/create_dsq.bpf.c
@@ -51,8 +51,8 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(create_dsq_init)
SEC(".struct_ops.link")
struct sched_ext_ops create_dsq_ops = {
- .init_task = create_dsq_init_task,
- .exit_task = create_dsq_exit_task,
- .init = create_dsq_init,
+ .init_task = (void *) create_dsq_init_task,
+ .exit_task = (void *) create_dsq_exit_task,
+ .init = (void *) create_dsq_init,
.name = "create_dsq",
};
diff --git a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
index e97ad41d354a..37d9bf6fb745 100644
--- a/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
+++ b/tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
@@ -35,8 +35,8 @@ void BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops ddsp_bogus_dsq_fail_ops = {
- .select_cpu = ddsp_bogus_dsq_fail_select_cpu,
- .exit = ddsp_bogus_dsq_fail_exit,
+ .select_cpu = (void *) ddsp_bogus_dsq_fail_select_cpu,
+ .exit = (void *) ddsp_bogus_dsq_fail_exit,
.name = "ddsp_bogus_dsq_fail",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
index dde7e7dafbfb..dffc97d9cdf1 100644
--- a/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
+++ b/tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
@@ -32,8 +32,8 @@ void BPF_STRUCT_OPS(ddsp_vtimelocal_fail_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops ddsp_vtimelocal_fail_ops = {
- .select_cpu = ddsp_vtimelocal_fail_select_cpu,
- .exit = ddsp_vtimelocal_fail_exit,
+ .select_cpu = (void *) ddsp_vtimelocal_fail_select_cpu,
+ .exit = (void *) ddsp_vtimelocal_fail_exit,
.name = "ddsp_vtimelocal_fail",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
index efb4672decb4..6a7db1502c29 100644
--- a/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
+++ b/tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
@@ -56,10 +56,10 @@ void BPF_STRUCT_OPS(dsp_local_on_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops dsp_local_on_ops = {
- .select_cpu = dsp_local_on_select_cpu,
- .enqueue = dsp_local_on_enqueue,
- .dispatch = dsp_local_on_dispatch,
- .exit = dsp_local_on_exit,
+ .select_cpu = (void *) dsp_local_on_select_cpu,
+ .enqueue = (void *) dsp_local_on_enqueue,
+ .dispatch = (void *) dsp_local_on_dispatch,
+ .exit = (void *) dsp_local_on_exit,
.name = "dsp_local_on",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c
index b0b99531d5d5..e1bd13e48889 100644
--- a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c
+++ b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.bpf.c
@@ -12,10 +12,18 @@
char _license[] SEC("license") = "GPL";
+u32 exit_kind;
+
+void BPF_STRUCT_OPS_SLEEPABLE(enq_last_no_enq_fails_exit, struct scx_exit_info *info)
+{
+ exit_kind = info->kind;
+}
+
SEC(".struct_ops.link")
struct sched_ext_ops enq_last_no_enq_fails_ops = {
.name = "enq_last_no_enq_fails",
/* Need to define ops.enqueue() with SCX_OPS_ENQ_LAST */
.flags = SCX_OPS_ENQ_LAST,
+ .exit = (void *) enq_last_no_enq_fails_exit,
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c
index 2a3eda5e2c0b..73e679953e27 100644
--- a/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c
+++ b/tools/testing/selftests/sched_ext/enq_last_no_enq_fails.c
@@ -31,8 +31,12 @@ static enum scx_test_status run(void *ctx)
struct bpf_link *link;
link = bpf_map__attach_struct_ops(skel->maps.enq_last_no_enq_fails_ops);
- if (link) {
- SCX_ERR("Incorrectly succeeded in to attaching scheduler");
+ if (!link) {
+ SCX_ERR("Incorrectly failed at attaching scheduler");
+ return SCX_TEST_FAIL;
+ }
+ if (!skel->bss->exit_kind) {
+ SCX_ERR("Incorrectly stayed loaded");
return SCX_TEST_FAIL;
}
@@ -50,7 +54,7 @@ static void cleanup(void *ctx)
struct scx_test enq_last_no_enq_fails = {
.name = "enq_last_no_enq_fails",
- .description = "Verify we fail to load a scheduler if we specify "
+ .description = "Verify we eject a scheduler if we specify "
"the SCX_OPS_ENQ_LAST flag without defining "
"ops.enqueue()",
.setup = setup,
diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
index b3dfc1033cd6..1efb50d61040 100644
--- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
+++ b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
@@ -36,8 +36,8 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
SEC(".struct_ops.link")
struct sched_ext_ops enq_select_cpu_fails_ops = {
- .select_cpu = enq_select_cpu_fails_select_cpu,
- .enqueue = enq_select_cpu_fails_enqueue,
+ .select_cpu = (void *) enq_select_cpu_fails_select_cpu,
+ .enqueue = (void *) enq_select_cpu_fails_enqueue,
.name = "enq_select_cpu_fails",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/exit.bpf.c b/tools/testing/selftests/sched_ext/exit.bpf.c
index ae12ddaac921..d75d4faf07f6 100644
--- a/tools/testing/selftests/sched_ext/exit.bpf.c
+++ b/tools/testing/selftests/sched_ext/exit.bpf.c
@@ -15,6 +15,8 @@ UEI_DEFINE(uei);
#define EXIT_CLEANLY() scx_bpf_exit(exit_point, "%d", exit_point)
+#define DSQ_ID 0
+
s32 BPF_STRUCT_OPS(exit_select_cpu, struct task_struct *p,
s32 prev_cpu, u64 wake_flags)
{
@@ -31,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
if (exit_point == EXIT_ENQUEUE)
EXIT_CLEANLY();
- scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+ scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
}
void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
@@ -39,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
if (exit_point == EXIT_DISPATCH)
EXIT_CLEANLY();
- scx_bpf_consume(SCX_DSQ_GLOBAL);
+ scx_bpf_consume(DSQ_ID);
}
void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
@@ -67,18 +69,18 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(exit_init)
if (exit_point == EXIT_INIT)
EXIT_CLEANLY();
- return 0;
+ return scx_bpf_create_dsq(DSQ_ID, -1);
}
SEC(".struct_ops.link")
struct sched_ext_ops exit_ops = {
- .select_cpu = exit_select_cpu,
- .enqueue = exit_enqueue,
- .dispatch = exit_dispatch,
- .init_task = exit_init_task,
- .enable = exit_enable,
- .exit = exit_exit,
- .init = exit_init,
+ .select_cpu = (void *) exit_select_cpu,
+ .enqueue = (void *) exit_enqueue,
+ .dispatch = (void *) exit_dispatch,
+ .init_task = (void *) exit_init_task,
+ .enable = (void *) exit_enable,
+ .exit = (void *) exit_exit,
+ .init = (void *) exit_init,
.name = "exit",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/hotplug.bpf.c b/tools/testing/selftests/sched_ext/hotplug.bpf.c
index 8f2601db39f3..6c9f25c9bf53 100644
--- a/tools/testing/selftests/sched_ext/hotplug.bpf.c
+++ b/tools/testing/selftests/sched_ext/hotplug.bpf.c
@@ -46,16 +46,16 @@ void BPF_STRUCT_OPS_SLEEPABLE(hotplug_cpu_offline, s32 cpu)
SEC(".struct_ops.link")
struct sched_ext_ops hotplug_cb_ops = {
- .cpu_online = hotplug_cpu_online,
- .cpu_offline = hotplug_cpu_offline,
- .exit = hotplug_exit,
+ .cpu_online = (void *) hotplug_cpu_online,
+ .cpu_offline = (void *) hotplug_cpu_offline,
+ .exit = (void *) hotplug_exit,
.name = "hotplug_cbs",
.timeout_ms = 1000U,
};
SEC(".struct_ops.link")
struct sched_ext_ops hotplug_nocb_ops = {
- .exit = hotplug_exit,
+ .exit = (void *) hotplug_exit,
.name = "hotplug_nocbs",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/init_enable_count.bpf.c b/tools/testing/selftests/sched_ext/init_enable_count.bpf.c
index 47ea89a626c3..5eb9edb1837d 100644
--- a/tools/testing/selftests/sched_ext/init_enable_count.bpf.c
+++ b/tools/testing/selftests/sched_ext/init_enable_count.bpf.c
@@ -45,9 +45,9 @@ void BPF_STRUCT_OPS(cnt_disable, struct task_struct *p)
SEC(".struct_ops.link")
struct sched_ext_ops init_enable_count_ops = {
- .init_task = cnt_init_task,
- .exit_task = cnt_exit_task,
- .enable = cnt_enable,
- .disable = cnt_disable,
+ .init_task = (void *) cnt_init_task,
+ .exit_task = (void *) cnt_exit_task,
+ .enable = (void *) cnt_enable,
+ .disable = (void *) cnt_disable,
.name = "init_enable_count",
};
diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c
index 00bfa9cb95d3..4d4cd8d966db 100644
--- a/tools/testing/selftests/sched_ext/maximal.bpf.c
+++ b/tools/testing/selftests/sched_ext/maximal.bpf.c
@@ -131,34 +131,34 @@ void BPF_STRUCT_OPS(maximal_exit, struct scx_exit_info *info)
SEC(".struct_ops.link")
struct sched_ext_ops maximal_ops = {
- .select_cpu = maximal_select_cpu,
- .enqueue = maximal_enqueue,
- .dequeue = maximal_dequeue,
- .dispatch = maximal_dispatch,
- .runnable = maximal_runnable,
- .running = maximal_running,
- .stopping = maximal_stopping,
- .quiescent = maximal_quiescent,
- .yield = maximal_yield,
- .core_sched_before = maximal_core_sched_before,
- .set_weight = maximal_set_weight,
- .set_cpumask = maximal_set_cpumask,
- .update_idle = maximal_update_idle,
- .cpu_acquire = maximal_cpu_acquire,
- .cpu_release = maximal_cpu_release,
- .cpu_online = maximal_cpu_online,
- .cpu_offline = maximal_cpu_offline,
- .init_task = maximal_init_task,
- .enable = maximal_enable,
- .exit_task = maximal_exit_task,
- .disable = maximal_disable,
- .cgroup_init = maximal_cgroup_init,
- .cgroup_exit = maximal_cgroup_exit,
- .cgroup_prep_move = maximal_cgroup_prep_move,
- .cgroup_move = maximal_cgroup_move,
- .cgroup_cancel_move = maximal_cgroup_cancel_move,
- .cgroup_set_weight = maximal_cgroup_set_weight,
- .init = maximal_init,
- .exit = maximal_exit,
+ .select_cpu = (void *) maximal_select_cpu,
+ .enqueue = (void *) maximal_enqueue,
+ .dequeue = (void *) maximal_dequeue,
+ .dispatch = (void *) maximal_dispatch,
+ .runnable = (void *) maximal_runnable,
+ .running = (void *) maximal_running,
+ .stopping = (void *) maximal_stopping,
+ .quiescent = (void *) maximal_quiescent,
+ .yield = (void *) maximal_yield,
+ .core_sched_before = (void *) maximal_core_sched_before,
+ .set_weight = (void *) maximal_set_weight,
+ .set_cpumask = (void *) maximal_set_cpumask,
+ .update_idle = (void *) maximal_update_idle,
+ .cpu_acquire = (void *) maximal_cpu_acquire,
+ .cpu_release = (void *) maximal_cpu_release,
+ .cpu_online = (void *) maximal_cpu_online,
+ .cpu_offline = (void *) maximal_cpu_offline,
+ .init_task = (void *) maximal_init_task,
+ .enable = (void *) maximal_enable,
+ .exit_task = (void *) maximal_exit_task,
+ .disable = (void *) maximal_disable,
+ .cgroup_init = (void *) maximal_cgroup_init,
+ .cgroup_exit = (void *) maximal_cgroup_exit,
+ .cgroup_prep_move = (void *) maximal_cgroup_prep_move,
+ .cgroup_move = (void *) maximal_cgroup_move,
+ .cgroup_cancel_move = (void *) maximal_cgroup_cancel_move,
+ .cgroup_set_weight = (void *) maximal_cgroup_set_weight,
+ .init = (void *) maximal_init,
+ .exit = (void *) maximal_exit,
.name = "maximal",
};
diff --git a/tools/testing/selftests/sched_ext/maybe_null.bpf.c b/tools/testing/selftests/sched_ext/maybe_null.bpf.c
index 27d0f386acfb..cf4ae870cd4e 100644
--- a/tools/testing/selftests/sched_ext/maybe_null.bpf.c
+++ b/tools/testing/selftests/sched_ext/maybe_null.bpf.c
@@ -29,8 +29,8 @@ bool BPF_STRUCT_OPS(maybe_null_success_yield, struct task_struct *from,
SEC(".struct_ops.link")
struct sched_ext_ops maybe_null_success = {
- .dispatch = maybe_null_success_dispatch,
- .yield = maybe_null_success_yield,
- .enable = maybe_null_running,
+ .dispatch = (void *) maybe_null_success_dispatch,
+ .yield = (void *) maybe_null_success_yield,
+ .enable = (void *) maybe_null_running,
.name = "minimal",
};
diff --git a/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c b/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
index c0641050271d..ec724d7b33d1 100644
--- a/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
+++ b/tools/testing/selftests/sched_ext/maybe_null_fail_dsp.bpf.c
@@ -19,7 +19,7 @@ void BPF_STRUCT_OPS(maybe_null_fail_dispatch, s32 cpu, struct task_struct *p)
SEC(".struct_ops.link")
struct sched_ext_ops maybe_null_fail = {
- .dispatch = maybe_null_fail_dispatch,
- .enable = maybe_null_running,
+ .dispatch = (void *) maybe_null_fail_dispatch,
+ .enable = (void *) maybe_null_running,
.name = "maybe_null_fail_dispatch",
};
diff --git a/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c b/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c
index 3c1740028e3b..e6552cace020 100644
--- a/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c
+++ b/tools/testing/selftests/sched_ext/maybe_null_fail_yld.bpf.c
@@ -22,7 +22,7 @@ bool BPF_STRUCT_OPS(maybe_null_fail_yield, struct task_struct *from,
SEC(".struct_ops.link")
struct sched_ext_ops maybe_null_fail = {
- .yield = maybe_null_fail_yield,
- .enable = maybe_null_running,
+ .yield = (void *) maybe_null_fail_yield,
+ .enable = (void *) maybe_null_running,
.name = "maybe_null_fail_yield",
};
diff --git a/tools/testing/selftests/sched_ext/prog_run.bpf.c b/tools/testing/selftests/sched_ext/prog_run.bpf.c
index 6a4d7c48e3f2..00c267626a68 100644
--- a/tools/testing/selftests/sched_ext/prog_run.bpf.c
+++ b/tools/testing/selftests/sched_ext/prog_run.bpf.c
@@ -28,6 +28,6 @@ void BPF_STRUCT_OPS(prog_run_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops prog_run_ops = {
- .exit = prog_run_exit,
+ .exit = (void *) prog_run_exit,
.name = "prog_run",
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
index 2ed2991afafe..f171ac470970 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
@@ -35,6 +35,6 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dfl_ops = {
- .enqueue = select_cpu_dfl_enqueue,
+ .enqueue = (void *) select_cpu_dfl_enqueue,
.name = "select_cpu_dfl",
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
index 4bb5abb2d369..9efdbb7da928 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
@@ -82,8 +82,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dfl_nodispatch_ops = {
- .select_cpu = select_cpu_dfl_nodispatch_select_cpu,
- .enqueue = select_cpu_dfl_nodispatch_enqueue,
- .init_task = select_cpu_dfl_nodispatch_init_task,
+ .select_cpu = (void *) select_cpu_dfl_nodispatch_select_cpu,
+ .enqueue = (void *) select_cpu_dfl_nodispatch_enqueue,
+ .init_task = (void *) select_cpu_dfl_nodispatch_init_task,
.name = "select_cpu_dfl_nodispatch",
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
index f0b96a4a04b2..59bfc4f36167 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
@@ -35,7 +35,7 @@ dispatch:
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dispatch_ops = {
- .select_cpu = select_cpu_dispatch_select_cpu,
+ .select_cpu = (void *) select_cpu_dispatch_select_cpu,
.name = "select_cpu_dispatch",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
index 7b42ddce0f56..3bbd5fcdfb18 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
@@ -30,8 +30,8 @@ void BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dispatch_bad_dsq_ops = {
- .select_cpu = select_cpu_dispatch_bad_dsq_select_cpu,
- .exit = select_cpu_dispatch_bad_dsq_exit,
+ .select_cpu = (void *) select_cpu_dispatch_bad_dsq_select_cpu,
+ .exit = (void *) select_cpu_dispatch_bad_dsq_exit,
.name = "select_cpu_dispatch_bad_dsq",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
index 653e3dc0b4dc..0fda57fe0ecf 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
@@ -31,8 +31,8 @@ void BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_exit, struct scx_exit_info *ei)
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_dispatch_dbl_dsp_ops = {
- .select_cpu = select_cpu_dispatch_dbl_dsp_select_cpu,
- .exit = select_cpu_dispatch_dbl_dsp_exit,
+ .select_cpu = (void *) select_cpu_dispatch_dbl_dsp_select_cpu,
+ .exit = (void *) select_cpu_dispatch_dbl_dsp_exit,
.name = "select_cpu_dispatch_dbl_dsp",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
index 7f3ebf4fc2ea..e6c67bcf5e6e 100644
--- a/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
+++ b/tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c
@@ -81,12 +81,12 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init)
SEC(".struct_ops.link")
struct sched_ext_ops select_cpu_vtime_ops = {
- .select_cpu = select_cpu_vtime_select_cpu,
- .dispatch = select_cpu_vtime_dispatch,
- .running = select_cpu_vtime_running,
- .stopping = select_cpu_vtime_stopping,
- .enable = select_cpu_vtime_enable,
- .init = select_cpu_vtime_init,
+ .select_cpu = (void *) select_cpu_vtime_select_cpu,
+ .dispatch = (void *) select_cpu_vtime_dispatch,
+ .running = (void *) select_cpu_vtime_running,
+ .stopping = (void *) select_cpu_vtime_stopping,
+ .enable = (void *) select_cpu_vtime_enable,
+ .init = (void *) select_cpu_vtime_init,
.name = "select_cpu_vtime",
.timeout_ms = 1000U,
};
diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c
index c53f220eb6cc..b33b47342d41 100644
--- a/tools/testing/vma/vma.c
+++ b/tools/testing/vma/vma.c
@@ -1522,6 +1522,45 @@ static bool test_copy_vma(void)
return true;
}
+static bool test_expand_only_mode(void)
+{
+ unsigned long flags = VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE;
+ struct mm_struct mm = {};
+ VMA_ITERATOR(vmi, &mm, 0);
+ struct vm_area_struct *vma_prev, *vma;
+ VMG_STATE(vmg, &mm, &vmi, 0x5000, 0x9000, flags, 5);
+
+ /*
+ * Place a VMA prior to the one we're expanding so we assert that we do
+ * not erroneously try to traverse to the previous VMA even though we
+ * have, through the use of VMG_FLAG_JUST_EXPAND, indicated we do not
+ * need to do so.
+ */
+ alloc_and_link_vma(&mm, 0, 0x2000, 0, flags);
+
+ /*
+ * We will be positioned at the prev VMA, but looking to expand to
+ * 0x9000.
+ */
+ vma_iter_set(&vmi, 0x3000);
+ vma_prev = alloc_and_link_vma(&mm, 0x3000, 0x5000, 3, flags);
+ vmg.prev = vma_prev;
+ vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
+
+ vma = vma_merge_new_range(&vmg);
+ ASSERT_NE(vma, NULL);
+ ASSERT_EQ(vma, vma_prev);
+ ASSERT_EQ(vmg.state, VMA_MERGE_SUCCESS);
+ ASSERT_EQ(vma->vm_start, 0x3000);
+ ASSERT_EQ(vma->vm_end, 0x9000);
+ ASSERT_EQ(vma->vm_pgoff, 3);
+ ASSERT_TRUE(vma_write_started(vma));
+ ASSERT_EQ(vma_iter_addr(&vmi), 0x3000);
+
+ cleanup_mm(&mm, &vmi);
+ return true;
+}
+
int main(void)
{
int num_tests = 0, num_fail = 0;
@@ -1553,6 +1592,7 @@ int main(void)
TEST(vmi_prealloc_fail);
TEST(merge_extend);
TEST(copy_vma);
+ TEST(expand_only_mode);
#undef TEST