diff options
author | 2025-05-27 21:12:50 -0700 | |
---|---|---|
committer | 2025-05-27 21:12:50 -0700 | |
commit | feacb1774bd5eac6382990d0f6d1378dc01dd78f (patch) | |
tree | f295cca0845b2caeed14efdef776ecb4f649339d /tools | |
parent | Merge tag 'cgroup-for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup (diff) | |
parent | sched_ext: Call ops.update_idle() after updating builtin idle bits (diff) | |
download | wireguard-linux-feacb1774bd5eac6382990d0f6d1378dc01dd78f.tar.xz wireguard-linux-feacb1774bd5eac6382990d0f6d1378dc01dd78f.zip |
Merge tag 'sched_ext-for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext
Pull sched_ext updates from Tejun Heo:
- More in-kernel idle CPU selection improvements. Expand topology
awareness coverage add scx_bpf_select_cpu_and() to allow more
flexibility. The idle CPU selection kfuncs can now be called from
unlocked contexts too.
- A bunch of reorganization changes to lay the foundation for multiple
hierarchical scheduler support. This isn't ready yet and the included
changes don't make meaningful behavior differences. One notable
change is replacing some static_key tests with dynamic tests as the
test results may differ depending on the scheduler instance. This
isn't expected to cause meaningful performance difference.
- Other minor and doc updates.
- There were multiple patches in for-6.15-fixes which conflicted with
changes in for-6.16. for-6.15-fixes were pulled three times into
for-6.16 to resolve the conflicts.
* tag 'sched_ext-for-6.16' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/sched_ext: (49 commits)
sched_ext: Call ops.update_idle() after updating builtin idle bits
sched_ext, docs: convert mentions of "CFS" to "fair-class scheduler"
selftests/sched_ext: Update test enq_select_cpu_fails
sched_ext: idle: Consolidate default idle CPU selection kfuncs
selftests/sched_ext: Add test for scx_bpf_select_cpu_and() via test_run
sched_ext: idle: Allow scx_bpf_select_cpu_and() from unlocked context
sched_ext: idle: Validate locking correctness in scx_bpf_select_cpu_and()
sched_ext: Make scx_kf_allowed_if_unlocked() available outside ext.c
sched_ext, docs: add label
sched_ext: Explain the temporary situation around scx_root dereferences
sched_ext: Add @sch to SCX_CALL_OP*()
sched_ext: Cleanup [__]scx_exit/error*()
sched_ext: Add @sch to SCX_CALL_OP*()
sched_ext: Clean up scx_root usages
Documentation: scheduler: Changed lowercase acronyms to uppercase
sched_ext: Avoid NULL scx_root deref in __scx_exit()
sched_ext: Add RCU protection to scx_root in DSQ iterator
sched_ext: Clean up SCX_EXIT_NONE handling in scx_disable_workfn()
sched_ext: Move disable machinery into scx_sched
sched_ext: Move event_stats_cpu into scx_sched
...
Diffstat (limited to 'tools')
-rw-r--r-- | tools/sched_ext/Makefile | 23 | ||||
-rw-r--r-- | tools/sched_ext/include/scx/common.bpf.h | 2 | ||||
-rw-r--r-- | tools/sched_ext/scx_qmap.bpf.c | 4 | ||||
-rw-r--r-- | tools/sched_ext/scx_show_state.py | 14 | ||||
-rw-r--r-- | tools/testing/selftests/sched_ext/Makefile | 3 | ||||
-rw-r--r-- | tools/testing/selftests/sched_ext/allowed_cpus.bpf.c | 144 | ||||
-rw-r--r-- | tools/testing/selftests/sched_ext/allowed_cpus.c | 84 | ||||
-rw-r--r-- | tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c | 74 | ||||
-rw-r--r-- | tools/testing/selftests/sched_ext/enq_select_cpu.c | 88 | ||||
-rw-r--r-- | tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c | 43 | ||||
-rw-r--r-- | tools/testing/selftests/sched_ext/enq_select_cpu_fails.c | 61 |
11 files changed, 420 insertions, 120 deletions
diff --git a/tools/sched_ext/Makefile b/tools/sched_ext/Makefile index ca3815e572d8..d68780e2e03d 100644 --- a/tools/sched_ext/Makefile +++ b/tools/sched_ext/Makefile @@ -61,8 +61,8 @@ SCXOBJ_DIR := $(OBJ_DIR)/sched_ext BINDIR := $(OUTPUT_DIR)/bin BPFOBJ := $(BPFOBJ_DIR)/libbpf.a ifneq ($(CROSS_COMPILE),) -HOST_BUILD_DIR := $(OBJ_DIR)/host -HOST_OUTPUT_DIR := host-tools +HOST_BUILD_DIR := $(OBJ_DIR)/host/obj +HOST_OUTPUT_DIR := $(OBJ_DIR)/host HOST_INCLUDE_DIR := $(HOST_OUTPUT_DIR)/include else HOST_BUILD_DIR := $(OBJ_DIR) @@ -98,7 +98,7 @@ ifneq ($(LLVM),) CFLAGS += -Wno-unused-command-line-argument endif -LDFLAGS = -lelf -lz -lpthread +LDFLAGS += -lelf -lz -lpthread IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - </dev/null | \ grep 'define __BYTE_ORDER__ __ORDER_LITTLE_ENDIAN__') @@ -136,14 +136,25 @@ $(MAKE_DIRS): $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ $(APIDIR)/linux/bpf.h \ | $(OBJ_DIR)/libbpf - $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \ + $(Q)$(MAKE) $(submake_extras) CROSS_COMPILE=$(CROSS_COMPILE) \ + -C $(BPFDIR) OUTPUT=$(OBJ_DIR)/libbpf/ \ EXTRA_CFLAGS='-g -O0 -fPIC' \ + LDFLAGS="$(LDFLAGS)" \ DESTDIR=$(OUTPUT_DIR) prefix= all install_headers +$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \ + $(APIDIR)/linux/bpf.h \ + | $(HOST_BUILD_DIR)/libbpf + $(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \ + OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ + ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD=$(HOSTLD) \ + EXTRA_CFLAGS='-g -O0 -fPIC' \ + DESTDIR=$(HOST_OUTPUT_DIR) prefix= all install_headers + $(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \ $(HOST_BPFOBJ) | $(HOST_BUILD_DIR)/bpftool $(Q)$(MAKE) $(submake_extras) -C $(BPFTOOLDIR) \ - ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) \ + ARCH= CROSS_COMPILE= CC="$(HOSTCC)" LD=$(HOSTLD) \ EXTRA_CFLAGS='-g -O0' \ OUTPUT=$(HOST_BUILD_DIR)/bpftool/ \ LIBBPF_OUTPUT=$(HOST_BUILD_DIR)/libbpf/ \ @@ -185,7 +196,7 @@ $(addprefix $(BINDIR)/,$(c-sched-targets)): \ $(SCX_COMMON_DEPS) $(eval sched=$(notdir $@)) $(CC) $(CFLAGS) -c $(sched).c -o $(SCXOBJ_DIR)/$(sched).o - $(CC) -o $@ $(SCXOBJ_DIR)/$(sched).o $(HOST_BPFOBJ) $(LDFLAGS) + $(CC) -o $@ $(SCXOBJ_DIR)/$(sched).o $(BPFOBJ) $(LDFLAGS) $(c-sched-targets): %: $(BINDIR)/% diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h index 8787048c6762..d4e21558e982 100644 --- a/tools/sched_ext/include/scx/common.bpf.h +++ b/tools/sched_ext/include/scx/common.bpf.h @@ -48,6 +48,8 @@ static inline void ___vmlinux_h_sanity_check___(void) s32 scx_bpf_create_dsq(u64 dsq_id, s32 node) __ksym; s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *is_idle) __ksym; +s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64 wake_flags, + const struct cpumask *cpus_allowed, u64 flags) __ksym __weak; void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak; void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak; u32 scx_bpf_dispatch_nr_slots(void) __ksym; diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c index 26c40ca4f36c..c3cd9a17d48e 100644 --- a/tools/sched_ext/scx_qmap.bpf.c +++ b/tools/sched_ext/scx_qmap.bpf.c @@ -784,8 +784,8 @@ static int monitor_timerfn(void *map, int *key, struct bpf_timer *timer) scx_read_event(&events, SCX_EV_DISPATCH_KEEP_LAST)); bpf_printk("%35s: %lld", "SCX_EV_ENQ_SKIP_EXITING", scx_read_event(&events, SCX_EV_ENQ_SKIP_EXITING)); - bpf_printk("%35s: %lld", "SCX_EV_ENQ_SLICE_DFL", - scx_read_event(&events, SCX_EV_ENQ_SLICE_DFL)); + bpf_printk("%35s: %lld", "SCX_EV_REFILL_SLICE_DFL", + scx_read_event(&events, SCX_EV_REFILL_SLICE_DFL)); bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DURATION", scx_read_event(&events, SCX_EV_BYPASS_DURATION)); bpf_printk("%35s: %lld", "SCX_EV_BYPASS_DISPATCH", diff --git a/tools/sched_ext/scx_show_state.py b/tools/sched_ext/scx_show_state.py index b800d4f5f2e9..7cdcc6729ea4 100644 --- a/tools/sched_ext/scx_show_state.py +++ b/tools/sched_ext/scx_show_state.py @@ -24,19 +24,19 @@ def read_atomic(name): def read_static_key(name): return prog[name].key.enabled.counter.value_() -def ops_state_str(state): - return prog['scx_ops_enable_state_str'][state].string_().decode() +def state_str(state): + return prog['scx_enable_state_str'][state].string_().decode() ops = prog['scx_ops'] -enable_state = read_atomic("scx_ops_enable_state_var") +enable_state = read_atomic("scx_enable_state_var") print(f'ops : {ops.name.string_().decode()}') -print(f'enabled : {read_static_key("__scx_ops_enabled")}') +print(f'enabled : {read_static_key("__scx_enabled")}') print(f'switching_all : {read_int("scx_switching_all")}') print(f'switched_all : {read_static_key("__scx_switched_all")}') -print(f'enable_state : {ops_state_str(enable_state)} ({enable_state})') +print(f'enable_state : {state_str(enable_state)} ({enable_state})') print(f'in_softlockup : {prog["scx_in_softlockup"].value_()}') -print(f'breather_depth: {read_atomic("scx_ops_breather_depth")}') -print(f'bypass_depth : {prog["scx_ops_bypass_depth"].value_()}') +print(f'breather_depth: {read_atomic("scx_breather_depth")}') +print(f'bypass_depth : {prog["scx_bypass_depth"].value_()}') print(f'nr_rejected : {read_atomic("scx_nr_rejected")}') print(f'enable_seq : {read_atomic("scx_enable_seq")}') diff --git a/tools/testing/selftests/sched_ext/Makefile b/tools/testing/selftests/sched_ext/Makefile index f4531327b8e7..9d9d6b4c38b0 100644 --- a/tools/testing/selftests/sched_ext/Makefile +++ b/tools/testing/selftests/sched_ext/Makefile @@ -162,10 +162,10 @@ all_test_bpfprogs := $(foreach prog,$(wildcard *.bpf.c),$(INCLUDE_DIR)/$(patsubs auto-test-targets := \ create_dsq \ enq_last_no_enq_fails \ - enq_select_cpu_fails \ ddsp_bogus_dsq_fail \ ddsp_vtimelocal_fail \ dsp_local_on \ + enq_select_cpu \ exit \ hotplug \ init_enable_count \ @@ -173,6 +173,7 @@ auto-test-targets := \ maybe_null \ minimal \ numa \ + allowed_cpus \ prog_run \ reload_loop \ select_cpu_dfl \ diff --git a/tools/testing/selftests/sched_ext/allowed_cpus.bpf.c b/tools/testing/selftests/sched_ext/allowed_cpus.bpf.c new file mode 100644 index 000000000000..35923e74a2ec --- /dev/null +++ b/tools/testing/selftests/sched_ext/allowed_cpus.bpf.c @@ -0,0 +1,144 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * A scheduler that validates the behavior of scx_bpf_select_cpu_and() by + * selecting idle CPUs strictly within a subset of allowed CPUs. + * + * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com> + */ + +#include <scx/common.bpf.h> + +char _license[] SEC("license") = "GPL"; + +UEI_DEFINE(uei); + +private(PREF_CPUS) struct bpf_cpumask __kptr * allowed_cpumask; + +static void +validate_idle_cpu(const struct task_struct *p, const struct cpumask *allowed, s32 cpu) +{ + if (scx_bpf_test_and_clear_cpu_idle(cpu)) + scx_bpf_error("CPU %d should be marked as busy", cpu); + + if (bpf_cpumask_subset(allowed, p->cpus_ptr) && + !bpf_cpumask_test_cpu(cpu, allowed)) + scx_bpf_error("CPU %d not in the allowed domain for %d (%s)", + cpu, p->pid, p->comm); +} + +s32 BPF_STRUCT_OPS(allowed_cpus_select_cpu, + struct task_struct *p, s32 prev_cpu, u64 wake_flags) +{ + const struct cpumask *allowed; + s32 cpu; + + allowed = cast_mask(allowed_cpumask); + if (!allowed) { + scx_bpf_error("allowed domain not initialized"); + return -EINVAL; + } + + /* + * Select an idle CPU strictly within the allowed domain. + */ + cpu = scx_bpf_select_cpu_and(p, prev_cpu, wake_flags, allowed, 0); + if (cpu >= 0) { + validate_idle_cpu(p, allowed, cpu); + scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL, 0); + + return cpu; + } + + return prev_cpu; +} + +void BPF_STRUCT_OPS(allowed_cpus_enqueue, struct task_struct *p, u64 enq_flags) +{ + const struct cpumask *allowed; + s32 prev_cpu = scx_bpf_task_cpu(p), cpu; + + scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0); + + allowed = cast_mask(allowed_cpumask); + if (!allowed) { + scx_bpf_error("allowed domain not initialized"); + return; + } + + /* + * Use scx_bpf_select_cpu_and() to proactively kick an idle CPU + * within @allowed_cpumask, usable by @p. + */ + cpu = scx_bpf_select_cpu_and(p, prev_cpu, 0, allowed, 0); + if (cpu >= 0) { + validate_idle_cpu(p, allowed, cpu); + scx_bpf_kick_cpu(cpu, SCX_KICK_IDLE); + } +} + +s32 BPF_STRUCT_OPS_SLEEPABLE(allowed_cpus_init) +{ + struct bpf_cpumask *mask; + + mask = bpf_cpumask_create(); + if (!mask) + return -ENOMEM; + + mask = bpf_kptr_xchg(&allowed_cpumask, mask); + if (mask) + bpf_cpumask_release(mask); + + bpf_rcu_read_lock(); + + /* + * Assign the first online CPU to the allowed domain. + */ + mask = allowed_cpumask; + if (mask) { + const struct cpumask *online = scx_bpf_get_online_cpumask(); + + bpf_cpumask_set_cpu(bpf_cpumask_first(online), mask); + scx_bpf_put_cpumask(online); + } + + bpf_rcu_read_unlock(); + + return 0; +} + +void BPF_STRUCT_OPS(allowed_cpus_exit, struct scx_exit_info *ei) +{ + UEI_RECORD(uei, ei); +} + +struct task_cpu_arg { + pid_t pid; +}; + +SEC("syscall") +int select_cpu_from_user(struct task_cpu_arg *input) +{ + struct task_struct *p; + int cpu; + + p = bpf_task_from_pid(input->pid); + if (!p) + return -EINVAL; + + bpf_rcu_read_lock(); + cpu = scx_bpf_select_cpu_and(p, bpf_get_smp_processor_id(), 0, p->cpus_ptr, 0); + bpf_rcu_read_unlock(); + + bpf_task_release(p); + + return cpu; +} + +SEC(".struct_ops.link") +struct sched_ext_ops allowed_cpus_ops = { + .select_cpu = (void *)allowed_cpus_select_cpu, + .enqueue = (void *)allowed_cpus_enqueue, + .init = (void *)allowed_cpus_init, + .exit = (void *)allowed_cpus_exit, + .name = "allowed_cpus", +}; diff --git a/tools/testing/selftests/sched_ext/allowed_cpus.c b/tools/testing/selftests/sched_ext/allowed_cpus.c new file mode 100644 index 000000000000..093f285ab4ba --- /dev/null +++ b/tools/testing/selftests/sched_ext/allowed_cpus.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2025 Andrea Righi <arighi@nvidia.com> + */ +#include <bpf/bpf.h> +#include <scx/common.h> +#include <sys/wait.h> +#include <unistd.h> +#include "allowed_cpus.bpf.skel.h" +#include "scx_test.h" + +static enum scx_test_status setup(void **ctx) +{ + struct allowed_cpus *skel; + + skel = allowed_cpus__open(); + SCX_FAIL_IF(!skel, "Failed to open"); + SCX_ENUM_INIT(skel); + SCX_FAIL_IF(allowed_cpus__load(skel), "Failed to load skel"); + + *ctx = skel; + + return SCX_TEST_PASS; +} + +static int test_select_cpu_from_user(const struct allowed_cpus *skel) +{ + int fd, ret; + __u64 args[1]; + + LIBBPF_OPTS(bpf_test_run_opts, attr, + .ctx_in = args, + .ctx_size_in = sizeof(args), + ); + + args[0] = getpid(); + fd = bpf_program__fd(skel->progs.select_cpu_from_user); + if (fd < 0) + return fd; + + ret = bpf_prog_test_run_opts(fd, &attr); + if (ret < 0) + return ret; + + fprintf(stderr, "%s: CPU %d\n", __func__, attr.retval); + + return 0; +} + +static enum scx_test_status run(void *ctx) +{ + struct allowed_cpus *skel = ctx; + struct bpf_link *link; + + link = bpf_map__attach_struct_ops(skel->maps.allowed_cpus_ops); + SCX_FAIL_IF(!link, "Failed to attach scheduler"); + + /* Pick an idle CPU from user-space */ + SCX_FAIL_IF(test_select_cpu_from_user(skel), "Failed to pick idle CPU"); + + /* Just sleeping is fine, plenty of scheduling events happening */ + sleep(1); + + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE)); + bpf_link__destroy(link); + + return SCX_TEST_PASS; +} + +static void cleanup(void *ctx) +{ + struct allowed_cpus *skel = ctx; + + allowed_cpus__destroy(skel); +} + +struct scx_test allowed_cpus = { + .name = "allowed_cpus", + .description = "Verify scx_bpf_select_cpu_and()", + .setup = setup, + .run = run, + .cleanup = cleanup, +}; +REGISTER_SCX_TEST(&allowed_cpus) diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c new file mode 100644 index 000000000000..ee2c9b89716e --- /dev/null +++ b/tools/testing/selftests/sched_ext/enq_select_cpu.bpf.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2023 David Vernet <dvernet@meta.com> + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> + */ + +#include <scx/common.bpf.h> + +char _license[] SEC("license") = "GPL"; + +UEI_DEFINE(uei); + +s32 BPF_STRUCT_OPS(enq_select_cpu_select_cpu, struct task_struct *p, + s32 prev_cpu, u64 wake_flags) +{ + /* Bounce all tasks to ops.enqueue() */ + return prev_cpu; +} + +void BPF_STRUCT_OPS(enq_select_cpu_enqueue, struct task_struct *p, + u64 enq_flags) +{ + s32 cpu, prev_cpu = scx_bpf_task_cpu(p); + bool found = false; + + cpu = scx_bpf_select_cpu_dfl(p, prev_cpu, 0, &found); + if (found) { + scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | cpu, SCX_SLICE_DFL, enq_flags); + return; + } + + scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); +} + +void BPF_STRUCT_OPS(enq_select_cpu_exit, struct scx_exit_info *ei) +{ + UEI_RECORD(uei, ei); +} + +struct task_cpu_arg { + pid_t pid; +}; + +SEC("syscall") +int select_cpu_from_user(struct task_cpu_arg *input) +{ + struct task_struct *p; + bool found = false; + s32 cpu; + + p = bpf_task_from_pid(input->pid); + if (!p) + return -EINVAL; + + bpf_rcu_read_lock(); + cpu = scx_bpf_select_cpu_dfl(p, bpf_get_smp_processor_id(), 0, &found); + if (!found) + cpu = -EBUSY; + bpf_rcu_read_unlock(); + + bpf_task_release(p); + + return cpu; +} + +SEC(".struct_ops.link") +struct sched_ext_ops enq_select_cpu_ops = { + .select_cpu = (void *)enq_select_cpu_select_cpu, + .enqueue = (void *)enq_select_cpu_enqueue, + .exit = (void *)enq_select_cpu_exit, + .name = "enq_select_cpu", + .timeout_ms = 1000U, +}; diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu.c b/tools/testing/selftests/sched_ext/enq_select_cpu.c new file mode 100644 index 000000000000..340c6f8b86da --- /dev/null +++ b/tools/testing/selftests/sched_ext/enq_select_cpu.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2023 David Vernet <dvernet@meta.com> + * Copyright (c) 2023 Tejun Heo <tj@kernel.org> + */ +#include <bpf/bpf.h> +#include <scx/common.h> +#include <sys/wait.h> +#include <unistd.h> +#include "enq_select_cpu.bpf.skel.h" +#include "scx_test.h" + +static enum scx_test_status setup(void **ctx) +{ + struct enq_select_cpu *skel; + + skel = enq_select_cpu__open(); + SCX_FAIL_IF(!skel, "Failed to open"); + SCX_ENUM_INIT(skel); + SCX_FAIL_IF(enq_select_cpu__load(skel), "Failed to load skel"); + + *ctx = skel; + + return SCX_TEST_PASS; +} + +static int test_select_cpu_from_user(const struct enq_select_cpu *skel) +{ + int fd, ret; + __u64 args[1]; + + LIBBPF_OPTS(bpf_test_run_opts, attr, + .ctx_in = args, + .ctx_size_in = sizeof(args), + ); + + args[0] = getpid(); + fd = bpf_program__fd(skel->progs.select_cpu_from_user); + if (fd < 0) + return fd; + + ret = bpf_prog_test_run_opts(fd, &attr); + if (ret < 0) + return ret; + + fprintf(stderr, "%s: CPU %d\n", __func__, attr.retval); + + return 0; +} + +static enum scx_test_status run(void *ctx) +{ + struct enq_select_cpu *skel = ctx; + struct bpf_link *link; + + link = bpf_map__attach_struct_ops(skel->maps.enq_select_cpu_ops); + if (!link) { + SCX_ERR("Failed to attach scheduler"); + return SCX_TEST_FAIL; + } + + /* Pick an idle CPU from user-space */ + SCX_FAIL_IF(test_select_cpu_from_user(skel), "Failed to pick idle CPU"); + + sleep(1); + + SCX_EQ(skel->data->uei.kind, EXIT_KIND(SCX_EXIT_NONE)); + bpf_link__destroy(link); + + return SCX_TEST_PASS; +} + +static void cleanup(void *ctx) +{ + struct enq_select_cpu *skel = ctx; + + enq_select_cpu__destroy(skel); +} + +struct scx_test enq_select_cpu = { + .name = "enq_select_cpu", + .description = "Verify scx_bpf_select_cpu_dfl() from multiple contexts", + .setup = setup, + .run = run, + .cleanup = cleanup, +}; +REGISTER_SCX_TEST(&enq_select_cpu) diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c deleted file mode 100644 index a7cf868d5e31..000000000000 --- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c +++ /dev/null @@ -1,43 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. - * Copyright (c) 2023 David Vernet <dvernet@meta.com> - * Copyright (c) 2023 Tejun Heo <tj@kernel.org> - */ - -#include <scx/common.bpf.h> - -char _license[] SEC("license") = "GPL"; - -/* Manually specify the signature until the kfunc is added to the scx repo. */ -s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, - bool *found) __ksym; - -s32 BPF_STRUCT_OPS(enq_select_cpu_fails_select_cpu, struct task_struct *p, - s32 prev_cpu, u64 wake_flags) -{ - return prev_cpu; -} - -void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p, - u64 enq_flags) -{ - /* - * Need to initialize the variable or the verifier will fail to load. - * Improving these semantics is actively being worked on. - */ - bool found = false; - - /* Can only call from ops.select_cpu() */ - scx_bpf_select_cpu_dfl(p, 0, 0, &found); - - scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags); -} - -SEC(".struct_ops.link") -struct sched_ext_ops enq_select_cpu_fails_ops = { - .select_cpu = (void *) enq_select_cpu_fails_select_cpu, - .enqueue = (void *) enq_select_cpu_fails_enqueue, - .name = "enq_select_cpu_fails", - .timeout_ms = 1000U, -}; diff --git a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.c b/tools/testing/selftests/sched_ext/enq_select_cpu_fails.c deleted file mode 100644 index a80e3a3b3698..000000000000 --- a/tools/testing/selftests/sched_ext/enq_select_cpu_fails.c +++ /dev/null @@ -1,61 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Copyright (c) 2023 Meta Platforms, Inc. and affiliates. - * Copyright (c) 2023 David Vernet <dvernet@meta.com> - * Copyright (c) 2023 Tejun Heo <tj@kernel.org> - */ -#include <bpf/bpf.h> -#include <scx/common.h> -#include <sys/wait.h> -#include <unistd.h> -#include "enq_select_cpu_fails.bpf.skel.h" -#include "scx_test.h" - -static enum scx_test_status setup(void **ctx) -{ - struct enq_select_cpu_fails *skel; - - skel = enq_select_cpu_fails__open(); - SCX_FAIL_IF(!skel, "Failed to open"); - SCX_ENUM_INIT(skel); - SCX_FAIL_IF(enq_select_cpu_fails__load(skel), "Failed to load skel"); - - *ctx = skel; - - return SCX_TEST_PASS; -} - -static enum scx_test_status run(void *ctx) -{ - struct enq_select_cpu_fails *skel = ctx; - struct bpf_link *link; - - link = bpf_map__attach_struct_ops(skel->maps.enq_select_cpu_fails_ops); - if (!link) { - SCX_ERR("Failed to attach scheduler"); - return SCX_TEST_FAIL; - } - - sleep(1); - - bpf_link__destroy(link); - - return SCX_TEST_PASS; -} - -static void cleanup(void *ctx) -{ - struct enq_select_cpu_fails *skel = ctx; - - enq_select_cpu_fails__destroy(skel); -} - -struct scx_test enq_select_cpu_fails = { - .name = "enq_select_cpu_fails", - .description = "Verify we fail to call scx_bpf_select_cpu_dfl() " - "from ops.enqueue()", - .setup = setup, - .run = run, - .cleanup = cleanup, -}; -REGISTER_SCX_TEST(&enq_select_cpu_fails) |