aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/sched_ext/include/scx/common.bpf.h2
-rw-r--r--tools/sched_ext/include/scx/compat.bpf.h11
-rw-r--r--tools/sched_ext/scx_central.bpf.c4
-rw-r--r--tools/sched_ext/scx_flatcg.bpf.c6
-rw-r--r--tools/sched_ext/scx_qmap.bpf.c4
-rw-r--r--tools/sched_ext/scx_simple.bpf.c2
6 files changed, 20 insertions, 9 deletions
diff --git a/tools/sched_ext/include/scx/common.bpf.h b/tools/sched_ext/include/scx/common.bpf.h
index 16c7100c69e4..535377649a22 100644
--- a/tools/sched_ext/include/scx/common.bpf.h
+++ b/tools/sched_ext/include/scx/common.bpf.h
@@ -40,7 +40,7 @@ void scx_bpf_dsq_insert(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_fl
void scx_bpf_dsq_insert_vtime(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
u32 scx_bpf_dispatch_nr_slots(void) __ksym;
void scx_bpf_dispatch_cancel(void) __ksym;
-bool scx_bpf_consume(u64 dsq_id) __ksym;
+bool scx_bpf_dsq_move_to_local(u64 dsq_id) __ksym;
void scx_bpf_dispatch_from_dsq_set_slice(struct bpf_iter_scx_dsq *it__iter, u64 slice) __ksym __weak;
void scx_bpf_dispatch_from_dsq_set_vtime(struct bpf_iter_scx_dsq *it__iter, u64 vtime) __ksym __weak;
bool scx_bpf_dispatch_from_dsq(struct bpf_iter_scx_dsq *it__iter, struct task_struct *p, u64 dsq_id, u64 enq_flags) __ksym __weak;
diff --git a/tools/sched_ext/include/scx/compat.bpf.h b/tools/sched_ext/include/scx/compat.bpf.h
index 183aa4c6f674..c00ed24a8721 100644
--- a/tools/sched_ext/include/scx/compat.bpf.h
+++ b/tools/sched_ext/include/scx/compat.bpf.h
@@ -43,6 +43,7 @@
*/
void scx_bpf_dispatch___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 enq_flags) __ksym __weak;
void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slice, u64 vtime, u64 enq_flags) __ksym __weak;
+bool scx_bpf_consume___compat(u64 dsq_id) __ksym __weak;
#define scx_bpf_dsq_insert(p, dsq_id, slice, enq_flags) \
(bpf_ksym_exists(scx_bpf_dsq_insert) ? \
@@ -54,12 +55,22 @@ void scx_bpf_dispatch_vtime___compat(struct task_struct *p, u64 dsq_id, u64 slic
scx_bpf_dsq_insert_vtime((p), (dsq_id), (slice), (vtime), (enq_flags)) : \
scx_bpf_dispatch_vtime___compat((p), (dsq_id), (slice), (vtime), (enq_flags)))
+#define scx_bpf_dsq_move_to_local(dsq_id) \
+ (bpf_ksym_exists(scx_bpf_dsq_move_to_local) ? \
+ scx_bpf_dsq_move_to_local((dsq_id)) : \
+ scx_bpf_consume___compat((dsq_id)))
+
#define scx_bpf_dispatch(p, dsq_id, slice, enq_flags) \
_Static_assert(false, "scx_bpf_dispatch() renamed to scx_bpf_dsq_insert()")
#define scx_bpf_dispatch_vtime(p, dsq_id, slice, vtime, enq_flags) \
_Static_assert(false, "scx_bpf_dispatch_vtime() renamed to scx_bpf_dsq_insert_vtime()")
+#define scx_bpf_consume(dsq_id) ({ \
+ _Static_assert(false, "scx_bpf_consume() renamed to scx_bpf_dsq_move_to_local()"); \
+ false; \
+})
+
/*
* Define sched_ext_ops. This may be expanded to define multiple variants for
* backward compatibility. See compat.h::SCX_OPS_LOAD/ATTACH().
diff --git a/tools/sched_ext/scx_central.bpf.c b/tools/sched_ext/scx_central.bpf.c
index 153d9656c75f..e6fad6211f6c 100644
--- a/tools/sched_ext/scx_central.bpf.c
+++ b/tools/sched_ext/scx_central.bpf.c
@@ -219,13 +219,13 @@ void BPF_STRUCT_OPS(central_dispatch, s32 cpu, struct task_struct *prev)
}
/* look for a task to run on the central CPU */
- if (scx_bpf_consume(FALLBACK_DSQ_ID))
+ if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))
return;
dispatch_to_cpu(central_cpu);
} else {
bool *gimme;
- if (scx_bpf_consume(FALLBACK_DSQ_ID))
+ if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ_ID))
return;
gimme = ARRAY_ELEM_PTR(cpu_gimme_task, cpu, nr_cpu_ids);
diff --git a/tools/sched_ext/scx_flatcg.bpf.c b/tools/sched_ext/scx_flatcg.bpf.c
index 4dcb6c747b46..4e3afcd260bf 100644
--- a/tools/sched_ext/scx_flatcg.bpf.c
+++ b/tools/sched_ext/scx_flatcg.bpf.c
@@ -665,7 +665,7 @@ static bool try_pick_next_cgroup(u64 *cgidp)
goto out_free;
}
- if (!scx_bpf_consume(cgid)) {
+ if (!scx_bpf_dsq_move_to_local(cgid)) {
bpf_cgroup_release(cgrp);
stat_inc(FCG_STAT_PNC_EMPTY);
goto out_stash;
@@ -745,7 +745,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
goto pick_next_cgroup;
if (vtime_before(now, cpuc->cur_at + cgrp_slice_ns)) {
- if (scx_bpf_consume(cpuc->cur_cgid)) {
+ if (scx_bpf_dsq_move_to_local(cpuc->cur_cgid)) {
stat_inc(FCG_STAT_CNS_KEEP);
return;
}
@@ -785,7 +785,7 @@ void BPF_STRUCT_OPS(fcg_dispatch, s32 cpu, struct task_struct *prev)
pick_next_cgroup:
cpuc->cur_at = now;
- if (scx_bpf_consume(FALLBACK_DSQ)) {
+ if (scx_bpf_dsq_move_to_local(FALLBACK_DSQ)) {
cpuc->cur_cgid = 0;
return;
}
diff --git a/tools/sched_ext/scx_qmap.bpf.c b/tools/sched_ext/scx_qmap.bpf.c
index 65c52cc4fffb..08f121252c56 100644
--- a/tools/sched_ext/scx_qmap.bpf.c
+++ b/tools/sched_ext/scx_qmap.bpf.c
@@ -374,7 +374,7 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev)
if (dispatch_highpri(false))
return;
- if (!nr_highpri_queued && scx_bpf_consume(SHARED_DSQ))
+ if (!nr_highpri_queued && scx_bpf_dsq_move_to_local(SHARED_DSQ))
return;
if (dsp_inf_loop_after && nr_dispatched > dsp_inf_loop_after) {
@@ -439,7 +439,7 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev)
if (!batch || !scx_bpf_dispatch_nr_slots()) {
if (dispatch_highpri(false))
return;
- scx_bpf_consume(SHARED_DSQ);
+ scx_bpf_dsq_move_to_local(SHARED_DSQ);
return;
}
if (!cpuc->dsp_cnt)
diff --git a/tools/sched_ext/scx_simple.bpf.c b/tools/sched_ext/scx_simple.bpf.c
index ce86a4a690b5..31f915b286c6 100644
--- a/tools/sched_ext/scx_simple.bpf.c
+++ b/tools/sched_ext/scx_simple.bpf.c
@@ -94,7 +94,7 @@ void BPF_STRUCT_OPS(simple_enqueue, struct task_struct *p, u64 enq_flags)
void BPF_STRUCT_OPS(simple_dispatch, s32 cpu, struct task_struct *prev)
{
- scx_bpf_consume(SHARED_DSQ);
+ scx_bpf_dsq_move_to_local(SHARED_DSQ);
}
void BPF_STRUCT_OPS(simple_running, struct task_struct *p)