diff options
-rw-r--r-- | kernel/sched/ext.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index f16d72d72635..0fabfe19327c 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -5628,8 +5628,10 @@ __bpf_kfunc_start_defs(); */ __bpf_kfunc u32 scx_bpf_reenqueue_local(void) { - u32 nr_enqueued, i; + LIST_HEAD(tasks); + u32 nr_enqueued = 0; struct rq *rq; + struct task_struct *p, *n; if (!scx_kf_allowed(SCX_KF_CPU_RELEASE)) return 0; @@ -5638,22 +5640,20 @@ __bpf_kfunc u32 scx_bpf_reenqueue_local(void) lockdep_assert_rq_held(rq); /* - * Get the number of tasks on the local DSQ before iterating over it to - * pull off tasks. The enqueue callback below can signal that it wants - * the task to stay on the local DSQ, and we want to prevent the BPF - * scheduler from causing us to loop indefinitely. + * The BPF scheduler may choose to dispatch tasks back to + * @rq->scx.local_dsq. Move all candidate tasks off to a private list + * first to avoid processing the same tasks repeatedly. */ - nr_enqueued = rq->scx.local_dsq.nr; - for (i = 0; i < nr_enqueued; i++) { - struct task_struct *p; - - p = first_local_task(rq); - WARN_ON_ONCE(atomic_long_read(&p->scx.ops_state) != - SCX_OPSS_NONE); - WARN_ON_ONCE(!(p->scx.flags & SCX_TASK_QUEUED)); - WARN_ON_ONCE(p->scx.holding_cpu != -1); + list_for_each_entry_safe(p, n, &rq->scx.local_dsq.list, + scx.dsq_list.node) { dispatch_dequeue(rq, p); + list_add_tail(&p->scx.dsq_list.node, &tasks); + } + + list_for_each_entry_safe(p, n, &tasks, scx.dsq_list.node) { + list_del_init(&p->scx.dsq_list.node); do_enqueue_task(rq, p, SCX_ENQ_REENQ, -1); + nr_enqueued++; } return nr_enqueued; |